query_id
stringlengths
32
32
query
stringlengths
9
4.01k
positive_passages
listlengths
1
1
negative_passages
listlengths
88
101
0e9ed4e2ae7a13df9cb4e74d0ee1ec9e
Update's the table from database using query as input use " " at the beginning and end of the query
[ { "docid": "96313f56d509bf954f7d64f086669e44", "score": "0.7863273", "text": "def update_table(self,query):\n query=query\n self._cursor.execute(query)\n self._connection.commit()", "title": "" } ]
[ { "docid": "bb68f0d9403b049569ad037b78d4e87d", "score": "0.7928796", "text": "def update(self, sql):", "title": "" }, { "docid": "f3fa507bf7a6aceb5b4cf35ddd2ba805", "score": "0.7482706", "text": "def make_update_query(self, query: str):\n\n self.create_connection() # sets connection\n cursor = self.connection.cursor() # cursor to execute the query\n res = cursor.execute(query) # result of the query\n self.connection.commit() # commits changes", "title": "" }, { "docid": "0514bd3ad467946597bd0657a018151d", "score": "0.7087283", "text": "def _update_query(self, query, *args):\n cnx = self._get_connection()\n cursor = cnx.cursor()\n cursor.execute(query, args)\n self._clean_up(cnx, cursor)", "title": "" }, { "docid": "22a03dcfefd675cc24ace21bd32bead3", "score": "0.70038104", "text": "def execute_query(self, query):\n stmt = self.__conn.cursor()\n stmt.execute(query)\n self.__conn.commit()\n print(\"Database updated\")", "title": "" }, { "docid": "73b524b52a66bc62ceef52b1ef46c1a1", "score": "0.6827473", "text": "def update_query(self, obj):\n obj.delete('1.0', END)\n all_entries = ENTRY_DB.all()\n for entry in all_entries:\n obj.insert(INSERT, entry)\n obj.insert(INSERT, \"\\n\")", "title": "" }, { "docid": "585ed3b59a82ef7d8538b2c0627fe644", "score": "0.68220824", "text": "def update_database(self, query):\n try:\n database = sqlite3.connect(\n self.sqlite_database,\n detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES\n )\n cursor = database.cursor()\n cursor.execute(query)\n database.commit()\n database.close()\n except sqlite3.Error as db_error:\n return print(\"update_database error: {0}\".format(db_error))\n return", "title": "" }, { "docid": "1f8b2413848e577d6d2cb5d55fba0be9", "score": "0.67925805", "text": "async def db_query_update(\n self, *, table_name: str, data: Dict[str, Any], conditions: Conditions = []\n):\n await self.dbctx.update(\n table_name=table_name, data=data, conditions=conditions\n )", "title": "" }, { "docid": "4acc9c0f3c213e4f703420e325b00c0b", "score": "0.67813605", "text": "def UpdateQuery(self, data, table, where, delayed=None):\n sql = \"UPDATE \"\n\n if delayed:\n sql += \"DELAYED \"\n\n sql += table + \" SET \"\n\n sets = []\n for k, v in data.iteritems():\n sets.append(self.FieldClause(k, v))\n\n sql += \", \".join(sets)\n sql += \" WHERE \" + self.WhereClause(where)\n\n return sql", "title": "" }, { "docid": "7395e417341cfec72c71e3853001002c", "score": "0.6765541", "text": "def update_data(conexion,table,dataSet,whereTab,data):\n cursorTable=conexion.cursor()\n cursorTable.execute(f'UPDATE {table} SET {dataSet} where {whereTab}',data)\n conexion.commit()", "title": "" }, { "docid": "f9fd0ab00402bf94fa20f4c85e8a694d", "score": "0.66853726", "text": "def update(self, insertion, table, logic):\n\n #build the command\n record=''\n exec_command =\"\"\n exec_command +=\"UPDATE \"+table\n exec_command +=\" SET \"+insertion\n if len(logic)>0:\n exec_command +=\" WHERE \"+logic\n\n #execute the command\n cursor = self.db.cursor()\n cursor.execute(exec_command)\n cursor.execute(\"COMMIT\")", "title": "" }, { "docid": "3233b45cb592fb154b86cc48e85bc904", "score": "0.6499966", "text": "def update(self, **data):\n data = data.items()\n update_command_arg = \", \".join(\"{} = ?\".format(entry[0])\n for entry in data)\n cmd = \"update {table} set {update_command_arg} {where_clause}\".format(\n update_command_arg=update_command_arg,\n where_clause=self.where_clause,\n table=self.table_name).rstrip()\n Repo.db.execute(cmd, [entry[1] for entry in data] + self.where_values)", "title": "" }, { "docid": "f47040deacce190c3ee074150c616627", "score": "0.6484788", "text": "def update(self, query):\n return self.insert(query)", "title": "" }, { "docid": "47bc42b5b6984437ac707701c7558ec4", "score": "0.6450352", "text": "def update(self, sql, param=None):\n self.__execute(sql, param)", "title": "" }, { "docid": "e18844929277c53025c04eed40073900", "score": "0.64211595", "text": "def update_table(table, newvalue , condition, re_item):\n conn = get_conn()\n cousor = get_cousor(conn)\n updatesql = \"\"\"update {t} set {n} where {c}\"\"\"\n column = []\n for k, v in newvalue.items():\n column.append(str(k)+\"=:\" + str(k)) \n column = \",\".join(column)\n cons = []\n for k, v in condition.items():\n cons.append(str(k)+\"=:\" + str(k))\n cons = \",\".join(cons)\n sql = updatesql.format(t = table, n = column, c = cons)\n merge = dict(condition.items() + newvalue.items())\n cousor.execute(sql,merge)\n conn.commit()\n conn.close()\n return select_something(table, re_item, condition)", "title": "" }, { "docid": "f73def5e82b27c6a20ea42dd2ac0c9a9", "score": "0.64080125", "text": "def execute(self, sql, params = ()):\r\n \r\n # keywords to replace and their replacement values\r\n keywords = {\r\n 'table': self.tablename,\r\n 'columns': self.get_columns_for_select(),\r\n }\r\n \r\n # Access odbc driver doesn't accept unicode strings\r\n sql = str(sql)\r\n \r\n # Replace any keywords in the SQL\r\n for kwd, replacement in keywords.items():\r\n sql = sql.replace('$%s$' % kwd, replacement)\r\n \r\n try:\r\n self.cursor.execute(sql, params)\r\n except Exception, e:\r\n print ' ! Database error: %s' % e\r\n print ' ! Caused by: %s' % sql\r\n import sys\r\n sys.exit()", "title": "" }, { "docid": "8c3826727b2db5cf09f70dccfe77d558", "score": "0.63951504", "text": "def update(table, id_):\n\n # your code\n\n return table", "title": "" }, { "docid": "a7fa4a8486a780e321ab4cdc43294018", "score": "0.632098", "text": "def query(self, sql):", "title": "" }, { "docid": "8c54a77e5a2197551316c492540b036c", "score": "0.6300081", "text": "def update(self, table, where, fields): \n whereClasues = ' AND '.join(where)\n _resolvedFields = []\n for key in fields.keys():\n _resolvedFields.append(key + \" = '\" + fields[key] + \"'\")\n \n _resolvedFieldsToStr = ', '.join(_resolvedFields)\n \n return self.query(\"UPDATE {} SET {} {}\", (_resolvedFieldsToStr, ((\" WHERE \" + whereClasues) if len(where) != 0 else \"\")), table)", "title": "" }, { "docid": "21773ca62a78ad8aac6a7023514fcd82", "score": "0.62838227", "text": "def updatetext():\n sql1 = 'update panellist set panelProfile = replace(panelProfile, \"<br/><br/>\",\"\\n\")'\n sql2 = \"update panellist set panelProfile = replace(panelProfile, '<br/>','\\n')\"\n sql3 = \"\"\"update panellist set panelProfile = replace(panelProfile, \"<92>\",\"'\")\"\"\"\n sql4 = \"\"\"update panellist set panelProfile = replace(panelProfile, '\\xc3\\x82\\xc2\\x92',\"'\")\"\"\"\n cur.execute(sql1)\n cur.execute(sql2)\n cur.execute(sql3)\n cur.execute(sql4)\n\n sql1 = 'update qanda set answers = replace(answers, \"<br/><br/>\",\"\\n\")'\n sql2 = \"update qanda set answers = replace(answers, '<br/>','\\n')\"\n sql3 = \"\"\"update qanda set answers = replace(answers, \"<92>\",\"'\")\"\"\"\n sql4 = \"\"\"update qanda set answers = replace(answers, '\\xc3\\x82\\xc2\\x92',\"'\")\"\"\"\n cur.execute(sql1)\n cur.execute(sql2)\n cur.execute(sql3)\n cur.execute(sql4)\n con.commit()\n cur.close()\n con.close()", "title": "" }, { "docid": "41e4568d5a4e2f8ca1f3746380865c02", "score": "0.6281323", "text": "def update(table, id_):\n\n # your code\n # Main Universal update function use\n common.update_universal(table, id_, title_list)\n # Save to file\n data_manager.write_table_to_file(file_name, table)\n return table", "title": "" }, { "docid": "71946ab159e7a745a4749b4ff47182aa", "score": "0.62549007", "text": "def update(self, tables, where, vars=None, _test=False, **values):\r\n if vars is None: vars = {}\r\n where = self._where(where, vars)\r\n\r\n query = (\r\n \"UPDATE \" + sqllist(tables) + \r\n \" SET \" + sqlwhere(values, ', ') + \r\n \" WHERE \" + where)\r\n\r\n if _test: return query\r\n \r\n db_cursor = self._db_cursor()\r\n self._db_execute(db_cursor, query)\r\n if not self.ctx.transactions: \r\n self.ctx.commit()\r\n return db_cursor.rowcount", "title": "" }, { "docid": "b5be4a6092bb5b9773c4cdf24485f14a", "score": "0.62492496", "text": "def edit_data():\n conn = sqlite3.connect(\"africaDB.sqlite3\") # connect to the database\n\n print(\"* Which table would you like to edit?:\")\n table_name = get_table_name()\n\n print(\"* Choose what attribute to udpdate: \")\n attribute = input()\n print(\"* Choose a value for this attribute: \")\n value = input()\n print(\"* For what condition will this be updated? (Ex: Rank = 1): \")\n condition = input()\n\n update_command = \"UPDATE {A} SET {B} = {C} WHERE {D}\".format(A = table_name, B = attribute, C = value, D = condition)\n conn.execute(update_command) # execute the update command\n conn.commit() # commit the changess\n conn.close()\n\n print(\"Updating...\")\n print()", "title": "" }, { "docid": "4980668fe5dff031e2a1795babf842cf", "score": "0.6248963", "text": "async def update(self, ctx: Context, *, _query):\n async with ctx.acquire():\n await ctx.db.execute(_query)\n\n await ctx.send(\"successfully updated.\")", "title": "" }, { "docid": "98683316e4a68fc23e3bcdba87d1a9a5", "score": "0.6233225", "text": "def ReplaceQuery(self, data, table, delayed=None):\n sql = \"REPLACE \"\n\n if delayed:\n sql += \"DELAYED \"\n\n sql += \"INTO \" + table\n\n keys = []\n values = []\n for k, v in data.iteritems():\n keys.append(k)\n values.append(self.escape(v))\n\n sql += \"(\" + self.fieldStr(keys) + \") VALUES (\" + \", \".join(values) + \")\"\n\n return sql", "title": "" }, { "docid": "b01a62be85cc331a62ceedd037a3baec", "score": "0.61497486", "text": "def update(self, table_name, table_data, num_rows=False):\n if len(self._where) == 0:\n return False\n self._query_type = 'update'\n self._query = \"UPDATE `{0}` SET \".format(table_name)\n stmt, data = self._build_query(num_rows=num_rows, table_data=table_data)\n res = self._execute(stmt, data)\n if self._affected_rows > 0:\n res = True\n else:\n res = False\n self._reset()\n return res", "title": "" }, { "docid": "d6bcd14d460e3e4207b97f728007adb5", "score": "0.61223376", "text": "def update(self, table_name, table_data, num_rows = False):\n\t\tif len(self._where) == 0:\n\t\t\treturn False\n\t\tself._query_type = 'update'\n\t\tself._query = \"UPDATE `{0}` SET \".format(table_name)\n\t\tstmt, data = self._build_query(num_rows=num_rows, table_data=table_data)\n\t\tres = self._execute(stmt, data)\n\t\tif self._affected_rows > 0:\n\t\t\tres = True\n\t\telse:\n\t\t\tres = False\n\t\tself._reset()\n\t\treturn res", "title": "" }, { "docid": "9cf484979bd66b4b2442e002dc6d6d3c", "score": "0.60884243", "text": "def update_database(self, data):\n pass", "title": "" }, { "docid": "745e00c4e56b92721340274233f7b243", "score": "0.60852623", "text": "def execute_sql_query(self):\n try:\n db = self.retrieve_db_client_by_selected_database()\n query = self.get_sql_query()\n connection = self.get_db_connection_string()\n db_result = db.run(query=query, connection_params=connection)\n # if columns was not provided it means that operations do not\n # return affected rows\n if not db_result.columns:\n rows_affected_msg = (\n f' Rows affected: {db_result.affected_rows}'\n if db_result.affected_rows > 0 else ''\n )\n self.show_info_message(\n f'You have made '\n f'changes to the database.{rows_affected_msg}'\n )\n return\n\n self.set_data_to_table_view(data=db_result.data,\n columns=db_result.columns)\n except DatabaseAppError as e:\n self.show_error(error_msg=e.msg)", "title": "" }, { "docid": "259509255d19b894bad17beb668c9ddd", "score": "0.60831153", "text": "def update_data():\n sql = \"\"\" UPDATE names\n SET name = %s\n WHERE url = %s\"\"\"\n try:\n time.sleep(3)\n params = config()\n connection = psycopg2.connect(**params)\n cursor = connection.cursor()\n postgreSQL_select_Query = \"select * from names;\"\n cursor.execute(postgreSQL_select_Query)\n print(\"Selecting rows from names table using cursor.fetchall\")\n profile_records = cursor.fetchall()\n print(\"Print each row and it's columns values\")\n for row in profile_records:\n profile_name = str(row[1])\n profile_url = str(row[2])\n result_name = re.search(pattern_name, profile_name)\n result_url = re.search(pattern_url, profile_url)\n if result_name:\n cursor.execute(sql, (str(result_name.group(1)), str(result_url.group(1))))\n connection.commit()\n\n except (Exception, psycopg2.Error) as error:\n print(\"Error while fetching data from PostgreSQL\", error)\n\n finally:\n if connection:\n cursor.close()\n connection.close()\n print(\"PostgreSQL connection is closed\")", "title": "" }, { "docid": "65bbc189e336d56e38ed535e09b6e2be", "score": "0.60466534", "text": "def update(self, conn, table, index, fields, values):\n self.table = table\n query = 'UPDATE {} SET '.format(self.table)\n query += ', '.join([' = '.join(items)\n for items in zip(fields,\n '?' * len(values))])\n query += ' WHERE {} = {}'.format(index.get('field'),\n index.get('value'))\n\n cur = conn.cursor()\n cur.execute(query, values)\n stat = conn.commit()\n cur.close()\n return stat", "title": "" }, { "docid": "a4bfc93687c902d0095862053fdcf2a6", "score": "0.60431284", "text": "def update(table, id_):\n id_storage = common.get_values_from_column(table, 0)\n if id_ in id_storage:\n table = manage_data_from_user(table, id_storage, id_, True)\n # Here u can make changes:\n\n else:\n ui.print_error_message('This option does not exist.')\n\n return table", "title": "" }, { "docid": "d6e7805bc2b0b330766b1fb9de0a1484", "score": "0.5984692", "text": "def update_table(self):\r\n self.cursor.execute(\"\"\"SELECT * FROM clients\"\"\")\r\n result = self.cursor.fetchall()\r\n self.tree.delete(*self.tree.get_children()) #clears table\r\n for item in result:\r\n self.tree.insert('', 'end', text=item[0], values=item[1:]) #updates table\r", "title": "" }, { "docid": "303fba86125f503edae1ce65fd4121bc", "score": "0.59756696", "text": "def update_clause(table_name, fields, where=None):\n set_query = 'SET ' + ', '.join(field + ' = ?' for field in fields)\n update_str = 'UPDATE {} {}'.format(table_name, set_query)\n \n if where:\n update_str += ' {}'.format(where_clause(where))\n\n return update_str", "title": "" }, { "docid": "3d7a5d3bb07a14278a9a497a61b0bd40", "score": "0.59582627", "text": "def execute_update(stmt, params):\n with connection.cursor() as cursor:\n return cursor.execute(stmt, params)", "title": "" }, { "docid": "567d8096c56b3ff9c4689ec998d44eed", "score": "0.59488475", "text": "def update_sql(fields):\n r = []\n r.append(\"UPDATE table\")\n r.append(\"<set>\")\n for f,jf in fields:\n r.append(f'\\t<if test=\"{jf} != null\">')\n r.append(f\"\\t\\t{f}=#{{{jf}}},\")\n r.append(\"\\t</if>\")\n r.append(\"</set>\")\n return \"\\r\\n\".join(r)", "title": "" }, { "docid": "c534c71556e89f0b190c8496c7a9ca9c", "score": "0.5936431", "text": "def update_db_from_dict(row_id, update_data, table):\n print(\"Update data is:\", json.dumps(update_data, indent=4, default=str))\n\n for field, value in update_data.items():\n if isinstance(value, str):\n value = None if not value.strip() else value\n update_query = \"UPDATE \" + table + \" SET \" + field + \" = %s WHERE id = %s\"\n update_params = (value, row_id)\n cursor.execute(update_query, update_params)\n\n db_conn.commit()", "title": "" }, { "docid": "24417aef6a76b453aaf5051616cd4f76", "score": "0.5931879", "text": "def update(self, table=\"defaultTable\", item=None, new_item=None):\n self.open()\n ipp = 1\n cmd = \"UPDATE \" + table + \" SET \"\n # First order the values to update\n if new_item is not None and type(new_item) is dict:\n # Extract the key's name to search for\n for key in new_item.keys():\n cmd += key + \"=\\\"\" + new_item[key] + \"\\\"\"\n if ipp != len(new_item):\n cmd += \", \"\n ipp += 1\n # Then order the value used to identify the line(s)\n ipp = 1\n cmd += \" WHERE \"\n if item is not None and type(item) is dict:\n # Extract the key's name to search for\n for key in item.keys():\n cmd += key + \"=\\\"\" + item[key] + \"\\\"\"\n if ipp != len(item):\n cmd += \", \"\n ipp += 1\n else:\n cmd += \";\"\n # Execute the command line and return the status\n self.sql.cursor.execute(cmd)\n return 0", "title": "" }, { "docid": "ffd8325ec4b47eb55f11651017bc864c", "score": "0.59229505", "text": "def form_update_with_all_fields_sql(self):\n sql_before_interpol = '''\n UPDATE %(tablename)s\n SET\n name=?,\n parentpath=?,\n bytesize=?,\n mdatetime=?,\n event_dt=?\n WHERE \n id=?\n '''\n return sql_before_interpol", "title": "" }, { "docid": "335048b748e7e44498774c5138d722b5", "score": "0.5909025", "text": "def execute(self, query):\n con = sqlite3.connect(self.database)\n cur = con.cursor()\n cur.execute(query)\n con.commit()\n con.close()", "title": "" }, { "docid": "2f8efc59992b957b2a05fa4c42eab5b9", "score": "0.5904954", "text": "def update(self, table: str, data: dict, sql_filter: str) -> bool:\n\n values = ','.join(['{}=\"{}\"'.format(k, v) for k, v in data.items()])\n\n t = sqlalchemy.text('UPDATE {} SET {} WHERE {}'.format(table, values, sql_filter))\n t.execution_options(autocommit=True)\n self.conn.execute(t)\n return True", "title": "" }, { "docid": "ee8c1c7cb376514c2f36f83c66a88bba", "score": "0.59023076", "text": "def update(self, id, data):\n sql = 'UPDATE {0[0]} SET {0[1]} = {2[0]}, \\\n {0[2]} = {2[1]}, {0[3]} = {2[2]} WHERE ID = {1[1]}'\n\n self.cursor.execute(sql.format(self.table_cols, id, data))", "title": "" }, { "docid": "9242f6bd86b01bbedd8dcd03cfd10585", "score": "0.5878132", "text": "def replace(self, table_name, condition, value):\n\n condition = self.convert_condition(condition)\n\n self.cursor.execute(\"UPDATE \" + table_name + \" SET \" + value[0] + \" = \" + '\"' + value[1] + '\"' + \" WHERE\" +\n condition + \";\")\n\n self.general.update_data_base_gui = True", "title": "" }, { "docid": "4c66c7eed3124cf0cd6892d1d1ce758d", "score": "0.5853233", "text": "def update_database_with_args(self, query, args):\n try:\n database = sqlite3.connect(\n self.sqlite_database,\n detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES\n )\n cursor = database.cursor()\n cursor.execute(query, args)\n database.commit()\n database.close()\n except sqlite3.Error as db_error:\n return print(\"update_database_with_args error: {0}\".format(db_error))\n return", "title": "" }, { "docid": "096b4e9e476f2f8d870ee58f6db0bdc5", "score": "0.5838418", "text": "def update(self):\n update_statement = f\"UPDATE products SET name='{self.name}',price={self.price},quantity={self.quantity} WHERE id={self.id}\"\n with create_connection(database_file()) as connection:\n cursor = connection.cursor()\n cursor.execute(update_statement)\n connection.commit()", "title": "" }, { "docid": "22c30f089e4465b09f7890bdcfc07099", "score": "0.58042717", "text": "def testEditSQLString(self): \n database = \"MAILINGS\" \n val = editSQLStr(\"fName\",\"Mark\",12, database)\n self.assertEqual(val,\"UPDATE MAILINGS SET fName = 'Mark' WHERE crmID ='12'\")", "title": "" }, { "docid": "cb3e89faf07d0e2f6879de8944276e9a", "score": "0.5800867", "text": "def update(self, query, what):\n raise NotImplementedError()", "title": "" }, { "docid": "3b8e9495242e69d0218c35ba6dec7014", "score": "0.5794405", "text": "def _send_update(self, name, **kwargs):\r\n # sql_query = \"Update tbl_scripts SET {} WHERE batch_id={} AND ScriptName='{}'\".format(\r\n # \",\".join([str(key) + \"=\" + str(value) for (key, value) in kwargs.items()]), self._batch_ID, name)\r\n\r\n sql_query = \"Update tbl_testruns SET {} WHERE tbl_testruns.Run_ID={}\".format(\r\n \",\".join([str(key) + \"=\" + str(value) for (key, value) in kwargs.items()]),\r\n self._run_ID)\r\n self.logger.debug('Sending Updates Query:%s', sql_query)\r\n try:\r\n db_con = m.Robo_Executor_SQLLiteDB()\r\n db_con.open_connection()\r\n\r\n db_con.run_sql(sql_query, commit=True)\r\n\r\n finally:\r\n db_con.close_connection()", "title": "" }, { "docid": "f51967d0a335df8170b1b3ae2e8fd3ec", "score": "0.575969", "text": "def update(table, id_):\n\n for i in table:\n if i[0] == id_:\n i[1] = ui.get_inputs([\"What should i update the name to: \"],\"\")\n i[2] = ui.get_inputs([\"What should I update the year of birth? \"],\"\")\n data_manager.write_table_to_file(\"hr/persons.csv\", table)\n\n return table", "title": "" }, { "docid": "9301928aa814a32250beea460e29115c", "score": "0.5759237", "text": "def execute_sql_query(conn: sqlite3.Connection, sql_query: str) -> None:\n get_cursor(conn).execute(sql_query)\n conn.commit()", "title": "" }, { "docid": "4a3dc124b25f137375989da8f31da109", "score": "0.57551146", "text": "def execute(self, sql):\n self.cursor.execute(sql)\n self.commit()", "title": "" }, { "docid": "a200ffd7e158f2f4247344ea6e5ee788", "score": "0.57484007", "text": "def __checkUpdate( self, table, param, paramValue, selectDict = {}, connection = False ):\n req = \"UPDATE %s SET %s = '%s'\" % ( table, param, paramValue )\n if selectDict:\n req = \"%s %s\" % ( req, self.buildCondition( selectDict ) )\n return self._update( req, connection )", "title": "" }, { "docid": "e0f213044e759d54b0d6396e97b89995", "score": "0.5723369", "text": "def execute_query(self, query):\n conn = self.dbconn\n try:\n cursor = conn.cursor()\n cursor.execute(query)\n conn.commit()\n #result = cursor.fetchall()\n cursor.close()\n except pyodbc.ProgrammingError as e:\n raise(\"db error occured\", e)", "title": "" }, { "docid": "331b3fc8cb97512aa0a0fe72485d615f", "score": "0.57228065", "text": "def update_records():\n conn = None\n try:\n # read connection parameters\n params = config()\n\n # connect to the PostgreSQL server\n # print('Connecting to the PostgreSQL database...')\n conn = psycopg2.connect(**params, sslmode=\"require\")\n\t\t\n # create a cursor\n cur = conn.cursor()\n\n getRowsSql = \"SELECT jobkey, descrip FROM jobs;\" #where basicreq IS NULL and bonusreq IS NULL\n\n cur.execute(getRowsSql)\n allRows = cur.fetchall()\n \n numUpdates = 0\n\n for row in allRows:\n key = str(row[0])\n html = str(row[1])\n\n print(key)\n\n arr = parse_html(key, html)\n sql1 = \"update jobs set basicreq = %s where jobkey = %s\"\n sql2 = \"update jobs set bonusreq = %s where jobkey = %s\"\n data1 = (str(arr[0]), key)\n data2 = (str(arr[1]), key)\n\n # print(data1)\n # print(data2)\n\n cur.execute(sql1, data1)\n cur.execute(sql2, data2)\n \n \n\t# close the communication with the PostgreSQL\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.commit()\n conn.close()\n print('Database connection closed.')", "title": "" }, { "docid": "a3b2434619e77290a61da0fb35bc71f3", "score": "0.57140934", "text": "def update_query(self, query_id, data):\n path = \"api/queries/{}\".format(query_id)\n return self._post(path, json=data)", "title": "" }, { "docid": "65279b476bdb40cb55177ea1ff644640", "score": "0.5711265", "text": "def update_database(cur, table_erc):\n tx= \"\"\" INSERT INTO tx VALUES (:block_number, :block_hash, :log_index, :from, :to, :amount, :transaction_hash, :transaction_index); \"\"\"\n erc=\"\"\" INSERT INTO erc VALUES (:token_address, :balance)\"\"\"\n for x in table_erc.keys():\n cur.execute(erc,(x,table_erc[x]['balance']))\n cur.executemany(tx,table_erc[x]['transactions'])", "title": "" }, { "docid": "1e4a81b2da3b470f7b6f84b370a82906", "score": "0.570453", "text": "def Update(self,platform : str,updated : dict) -> str:\n\n\n #Make query to get length of data and the data in case a field is blank\n RESP = connection.execute(\n \"\"\"\n SELECT * FROM DATA\n WHERE platform = ?\n \"\"\"\n ,(platform,))\n\n RESP = RESP.fetchall()\n\n if not len(RESP) > 0:\n msg = f'Specified row \"{platform}\" could not be found.'\n logging.log(LEVELS[2],msg)\n return f'[{LEVELS[2]}] {msg}'\n\n RESP = RESP[0]\n BEFORE = {\n \"password\" : RESP[1],\n \"username\" : RESP[2],\n \"email\" : RESP[3],\n \"name\" : RESP[4],\n \"first_name\" : RESP[5],\n \"last_name\" : RESP[6],\n \"full_name\" : RESP[7] \n }\n\n connection.execute(\n \"\"\"\n UPDATE DATA\n SET password = :password,username = :username,email = :email,name = :name,first_name = :first_name,last_name = :last_name, full_name = :full_name\n WHERE platform = :platform\n \"\"\"\n ,{\n \"platform\" : platform,\n \"password\" : updated['password'] if 'password' in updated else BEFORE[\"password\"],\n \"username\" : updated['username'] if 'username' in updated else BEFORE[\"username\"],\n \"email\" : updated['email'] if 'email' in updated else BEFORE[\"email\"],\n \"name\" : updated['name'] if 'name' in updated else BEFORE[\"name\"],\n \"first_name\" : updated['first_name'] if 'first_name' in updated else BEFORE[\"first_name\"],\n \"last_name\" : updated['last_name'] if 'last_name' in updated else BEFORE[\"last_name\"],\n \"full_name\" : updated['full_name'] if 'full_name' in updated else BEFORE[\"full_name\"],\n }) \n\n connection.commit()\n data = \",\".join([f'{item.upper()}' for item in updated])\n msg = f'{blue}[INFO]{reset} Successfully updated rows for platofrm {blue}{RESP[0]}{reset} : {green}{data}{reset}.'\n logging.log(LEVELS[0],msg)\n return f'{msg}'", "title": "" }, { "docid": "b070e5e24b5e7af6f310aa51967ae62c", "score": "0.5702393", "text": "def save_query(self):\n\n ssql = self.ui.textEdit_sql.toPlainText()\n ui_save = DialogSaveSql(self.app)\n ui_save.ui.label.hide()\n ui_save.ui.lineEdit_group.hide()\n ui_save.exec()\n title = ui_save.name\n if title == \"\":\n msg = _(\"The query must have a name\")\n Message(self.app, _(\"Cannot save\"), msg).exec()\n return\n grouper = ui_save.grouper\n description = ui_save.description\n cur = self.app.conn.cursor()\n sql = \"insert into stored_sql (title, description, grouper, ssql) values (?,?,?,?)\"\n try:\n cur.execute(sql, [title, description, grouper, ssql])\n self.app.conn.commit()\n except Exception as e:\n Message(self.app, _(\"Cannot save\"), str(e)).exec()\n self.get_schema_update_tree_widget()", "title": "" }, { "docid": "dedbd392d68414dfb429fc5b15862450", "score": "0.5694485", "text": "def testEditSQLString(self): \n database = \"CRM_DATA\" \n val = editSQLStr(\"fName\",\"George\",2, database)\n self.assertEqual(val,\"UPDATE CRM_DATA SET fName = 'George' WHERE crmID ='2'\")", "title": "" }, { "docid": "bd9b0986cf2521b8b94c88be87dda78c", "score": "0.5691728", "text": "def update(self, row_count=None, **data):\n join_tables, where, _paras = self._construct_where()\n if where is None:\n return\n elif where.strip():\n where = 'WHERE ' + where\n\n cdata = self._filter_fields(data)\n\n record = cdata\n set_sqls, paras = [], []\n for field, value in record.items():\n if isinstance(value, expr):\n set_sqls.append('%s = %s' % (field, str(value)))\n paras.extend(value.get_param())\n else:\n set_sqls.append('%s = %%s' % field)\n paras.append(value)\n\n sql = 'UPDATE %s SET %s %s' % (\n self._table_name,\n ', '.join(set_sqls),\n where)\n paras.extend(_paras)\n\n if row_count:\n sql = sql + ' LIMIT %d ' % row_count\n\n c = self.cursor()\n return c.execute(sql, paras)", "title": "" }, { "docid": "4773a6ab61bfcaa65736289e81eef151", "score": "0.5686867", "text": "def _update(table, r, columns=None):\n if not columns:\n columns = [c for c in list(r.keys()) if c != 'id']\n\n columns_str = ', '.join(['%s=?' % (c) for c in columns])\n\n query = \"update %s set %s where id=%d\" % (table, columns_str, r['id'])\n\n vals = [r[c] for c in columns]\n\n connect_db().execute(query, vals)", "title": "" }, { "docid": "102ef827d059f4c6c56c1e94594008fd", "score": "0.56680995", "text": "def dbUpdateTask():\n\n do = dbOperator(FotocasaHouse, FotocasaScrapper, FotocasaDataProcessor)\n do.dbUpdate(\"barcelona\")", "title": "" }, { "docid": "7d143673304782533a4210278e8a6b00", "score": "0.56333804", "text": "def update(conn, id, title, author, year, isbn):\n cur = conn.cursor()\n cur.execute(\"UPDATE book SET title=?, author=?, year=?, isbn=? WHERE id=?\", (title, author, year, isbn, id))\n conn.commit()", "title": "" }, { "docid": "5c71d6cb774fe20a16be86946d9ef053", "score": "0.56263614", "text": "def update_row(table_name: str,\n old_value: dict[str: Union[str, int, float, bool, dict]],\n new_value: dict[str: Union[str, int, float, bool, dict]]):\n\n command = f\"\"\"UPDATE \"{table_name}\"\n SET {list(old_value.keys())[0]} = '{list(new_value.values())[0]}'\n WHERE {list(new_value.keys())[0]} = '{list(old_value.values())[0]}'\n ;\"\"\"\n\n execute_sql(command)", "title": "" }, { "docid": "8b4c2b6dcd9e03466c8edd0b1f16e095", "score": "0.5625186", "text": "def executeSqlUpdates(fileData, con, sql):\n if not fileData or not con:\n return\n \n for tradeData in fileData: \n print \"sql\", sql\n print \"tradeData\", tradeData\n resolvedSql = sql % tradeData\n print \"Executing: %s\" % resolvedSql\n con.execute(resolvedSql)\n \n con.commit()\n print \"All records committed successfully\"", "title": "" }, { "docid": "fe70623d263590e01d50e29d541aaf5b", "score": "0.5623756", "text": "def update(self):\n _LOGGER.debug(\"Rendering query: %s\", self.query)\n try:\n rendered_query = self.query.render()\n except TemplateError as ex:\n _LOGGER.error(\"Could not render query template: %s\", ex)\n return\n\n self.full_query = f\"{self.query_prefix} {rendered_query} {self.query_postfix}\"\n\n _LOGGER.info(\"Running query: %s\", self.full_query)\n\n try:\n tables = self.query_api.query(self.full_query)\n except ApiException as exc:\n _LOGGER.error(\n \"Could not execute query '%s' due to '%s', \"\n \"Check the syntax of your query\",\n self.full_query,\n exc,\n )\n self.value = None\n return\n\n if not tables:\n _LOGGER.warning(\n \"Query returned no results, sensor state set to UNKNOWN: %s\",\n self.full_query,\n )\n self.value = None\n else:\n if len(tables) > 1:\n _LOGGER.warning(\n \"Query returned multiple tables, only value from first one is shown: %s\",\n self.full_query,\n )\n self.value = tables[0].records[0].values[\"_value\"]", "title": "" }, { "docid": "6b1204385cad5f0bc9bc796807a5cd61", "score": "0.5620481", "text": "def update_record(self, id, **kwargs):\n sql = 'UPDATE %s SET (' % (self.table)\n for key in kwargs:\n sql += '%s, ' % (key)\n sql = sql[:-2]\n sql += ') = ('\n for key in kwargs:\n sql += '\"%s\", ' % (kwargs[key])\n sql = sql[:-2]\n sql += ') WHERE id=%s' % (id)\n print(sql)\n self.curs.execute(sql)\n self.conn.commit()", "title": "" }, { "docid": "71b44eb43305ced618243d54f04e2694", "score": "0.5608139", "text": "def updateName(cursor, taxID, taxName):\n taxName = taxName.replace(\"'\", \"''\")\n sql = \"UPDATE \" + dbDef.tblTaxonomy.name + \" SET \" + dbDef.tblTaxonomy_col_tax_name.name + \" = '\" + taxName + \"' WHERE \" + dbDef.tblTaxonomy_col_tax_id.name + \" = '\" + taxID + \"'\"\n print sql;\n cursor.execute(sql)", "title": "" }, { "docid": "a027fe194a2a4f090298482344fbfbe1", "score": "0.5607194", "text": "def update_data_for(username, data):\n con = sql.connect(database_user) \n cur = con.cursor()\n cur.execute(f\"\"\"\n UPDATE UserDatabase \n SET color = '{data['color']}', weight = '{data['weight']}'\n WHERE username='{username}';\n \"\"\")\n con.commit()\n cur.close()\n con.close()", "title": "" }, { "docid": "2f72284620bbd8537255aab4b6365526", "score": "0.5607063", "text": "def update(db, table, updates, cond='', mash=False, commit=True, con=False):\n con = do_con(db, con)\n cur = con.cursor()\n\n # stop mutability\n updates = dict(updates)\n\n column_names = table_columns(cur, table)\n column_updates, mash_updates = seperate_mash(updates, column_names)\n\n if mash:\n query = 'select MashConfig from ' + table + ' ' + cond\n cur.execute(query)\n current_mash = cur.fetchone()[0]\n if current_mash:\n mash_updates = dict(objectify(current_mash), **updates)\n\n # do the queries\n if mash_updates:\n all_updates = dict(column_updates, **{'MashConfig': mash_updates})\n else:\n all_updates = column_updates\n tupled = [(k, v) for k, v in all_updates.items()]\n placeholders = ', '.join(k + ' =?' for k, v in tupled)\n query = 'UPDATE {} SET {} {}'.format(table, placeholders, cond)\n values = []\n for k, v in tupled:\n if isinstance(v, (tuple, list, dict)):\n values.append(stringify(v))\n else:\n values.append(v)\n\n cur.execute(query, values)\n\n if commit:\n con.commit()\n else:\n return con", "title": "" }, { "docid": "17604f63491b7fabe4144f23089ea30b", "score": "0.56068367", "text": "def _execute_raw_sql_query(self, table, sql, start=None, end=None, bind_params=None, operation=\"UPDATE\"):\n LOG.info(log_json(msg=f\"triggering {operation}\", table=table))\n with connection.cursor() as cursor:\n cursor.db.set_schema(self.schema)\n t1 = time.time()\n try:\n cursor.execute(sql, params=bind_params)\n except OperationalError as exc:\n db_exc = get_extended_exception_by_type(exc)\n LOG.error(log_json(os.getpid(), msg=str(db_exc), context=db_exc.as_dict()))\n raise db_exc from exc\n\n running_time = time.time() - t1\n LOG.info(log_json(msg=f\"finished {operation}\", table=table, running_time=running_time))", "title": "" }, { "docid": "ea79644e3eb2ea794a68641e23a0c162", "score": "0.56018007", "text": "def update():\r\n print(\"give the id of the employee:\")\r\n o = int(input(\">\"))\r\n while check(o)!=True:\r\n print(\"give the id of the employee:\")\r\n o = int(input(\">\"))\r\n print(\"this id doesn't exists\")\r\n print(\"type the new name or type -1 if you don't want to update it: \")\r\n k = input(\">\")\r\n if k != \"-1\":\r\n cr.execute(f\"update emp set name == '{k}' where id_num == {o} \")\r\n else:\r\n pass\r\n\r\n\r\n print(\"type the new position or type -1 if you don't want to update it: \")\r\n k = input(\">\")\r\n if k != \"-1\":\r\n cr.execute(f\"update emp set position == '{k}' where id_num == {o} \")\r\n else:\r\n pass\r\n print(\"type the new salary or type -1 if you don't want to update it: \")\r\n k = input(\">\")\r\n if k != \"-1\":\r\n k = float(k)\r\n cr.execute(f\"update emp set salary == '{k}' where id_num == {o} \")\r\n else:\r\n pass\r\n db.commit()", "title": "" }, { "docid": "da3de630bc4a6d16ed31e591f6b568bb", "score": "0.55974066", "text": "def __execute_query(self, query):\r\n\t\ttry:\r\n\t\t\tcursor = self.cnx.cursor()\r\n\t\t\t#execute the SQL change\r\n\t\t\tif self.debug == True:\r\n\t\t\t\tprint(\"Executing following SQL command : \" + query + \"on db :\" + self.dbname)\r\n\t\t\tlines = cursor.execute(query)\r\n\t\t\tdata = cursor.fetchall()\r\n\t\t\treturn data\r\n\t\texcept:\r\n\t\t\tif self.debug == True:\r\n\t\t\t\tprint(\"Error executing : \" + query + \" on db :\" + self.dbname)\r\n\t\t\treturn \"Error\"", "title": "" }, { "docid": "67604ad84ba1637223cc66676621b77d", "score": "0.5590353", "text": "def update(table, id_):\n searched_record = [record for record in table if id_ in record]\n ui.print_table(searched_record, title_list)\n searched_record = searched_record[0] # unpack from list of lists\n id_place = 1\n # due to id in on the 0 position in list\n\n i = 0\n while i < 1:\n user_input = input(\"What do you want change?\").lower()\n if user_input in update_options:\n chosen_option = update_options.index(user_input) + id_place\n new_data = input(\"Actual \" + user_input + \": \"\n + searched_record[chosen_option]\n + \"\\nEnter new: \")\n\n if chosen_option == 1:\n searched_record[chosen_option] = new_data\n i += 1\n elif common.check_if_input_is_number(new_data) and common.check_if_data_is_in_range(\n chosen_option - + id_place, new_data, border_conditions):\n searched_record[chosen_option] = new_data\n i += 1\n else:\n ui.print_error_message(\"some kind of error, to wide range for day month year etc\")\n else:\n ui.print_error_message(\"Provide correct value\")\n data_manager.write_table_to_file(file_name, table=table)\n ui.print_table([searched_record], title_list)\n return table", "title": "" }, { "docid": "227f7314315f6d7ee2952b76fa833209", "score": "0.55768156", "text": "def update(self):\n status = \"<p>Not yet updated</p>\"\n needed = UPDATE_FIELDS.intersection(self.data)\n needed = [key for key in needed if self.old_data[key] != self.data[key]]\n if needed:\n args = tuple(str(self.data[x]) for x in needed)\n updates = ', '.join(\"%s = ?\" % x for x in needed)\n\n line = \"UPDATE course set %s where id = %s\" % (updates,\n self.data['id'])\n\n # Want to self.cursor.execute(xline, *needed stuff)\n # Want to print lline % * needed stuff\n self.cursor.execute(line, args)\n print(dict(command=line, args=args), file=self.log_file)\n\n # Update the data to reflect changes\n self.old_data.update({k: self.data.get(k) for k in needed})\n\n status = (\"<p>Dish updated at %s</p>\" %\n datetime.now().time().strftime(\"%-I:%M:%S %p\"))\n\n return status", "title": "" }, { "docid": "4fa7342ede1842265f9a3becc1f9faf8", "score": "0.55620116", "text": "def update(table, id_):\n\n ID_LIST_INDEX = 0\n iterate = 0\n for row in table:\n if row[ID_LIST_INDEX] == id_[ID_LIST_INDEX]:\n updated_record = ui.get_inputs(['title: ', 'price: ', 'month: ', 'day: ', 'year: '], row)\n updated_record.insert(ID_LIST_INDEX, id_[ID_LIST_INDEX])\n table[iterate] = updated_record\n data_manager.write_table_to_file('sales/sales.csv', table)\n break\n iterate += 1\n return table", "title": "" }, { "docid": "3a10b2eab2fb77ba74e20b053fba81e7", "score": "0.5552898", "text": "def update_table(currency):\n input = ExchangeRate(currency)\n conn = None\n updated_rows = 0\n ###\n now = datetime.now().strftime('%Y-%m-%d, %H:%M')\n sql = \"\"\"\nINSERT INTO currencies_hourly (date_time, ccy, pair, provider, buy, sell)\n/**/\nVALUES (%s, %s, %s, %s, %s, %s);\n\"\"\".format(currency)\n data = (now, input.ccy(),f'{input.base_ccy()}/{input.ccy()}', input.bank(), float(input.buy()), float(input.sell()))\n try:\n # connect to the PostgreSQL server\n print('Trying to connect to the server')\n conn = psycopg2.connect(database='exchange_rate', user='yurii', password='yurii', host='localhost')\n\n # create a cursor\n cur = conn.cursor()\n\n # Execute a statement\n cur.execute(sql, data)\n updated_rows = cur.rowcount\n\n # Commit changes to the DB\n conn.commit()\n\n # Close a communication with SQL\n cur.close()\n print('Closed communication with database')\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n print('Closed Database connection')", "title": "" }, { "docid": "8cd1056cbba04ec8c597295d0f825145", "score": "0.5549005", "text": "def delete_from_sql(db_conn, TABLE_NAME, query):\n sql = text(query)\n db_conn.execute(sql.execution_options(autocommit=True))\n\n print(f\"Records succesfully deleted from {TABLE_NAME}.\")", "title": "" }, { "docid": "68f78aee0e85b35c5a4e94e882d95c10", "score": "0.5541533", "text": "def update(self, docs, table_name):\n return self.action('update', docs, table_name)", "title": "" }, { "docid": "019876dde52ef91cb98ced1f5c04d448", "score": "0.55286545", "text": "def update(self, enrolmentNumber, operator):\n self.connect()\n try:\n sql = \"\"\"update {0} set firstName = \"{1}\", lastName = \"{2}\", dob = \"{3}\", \nfaculty = \"{4}\", email = \"{5}\" where enrolmentNumber = {6}\"\"\".format(\n self.tablename, operator.firstName, operator.lastName, operator.dob ,\n operator.faculty , operator.email, enrolmentNumber\n )\n self.cursor.execute(sql)\n except Exception as err:\n print(err)\n\n finally:\n self.disconnect()", "title": "" }, { "docid": "1475f68dff78cf3038fb6e62e2126cf2", "score": "0.5527197", "text": "def execute_query(self, sql, params):\n try:\n cursor = self.db_connection.cursor()\n cursor.execute(sql, params)\n self.db_connection.commit()\n except psycopg2.DatabaseError:\n raise", "title": "" }, { "docid": "0fa97efb02893f874023be50523518fa", "score": "0.5521419", "text": "def update_records(self, something):\n print(\"Some logic (not shown) to update database of units\")", "title": "" }, { "docid": "fa0a1de7d854b17598460bf26ec8798c", "score": "0.552071", "text": "def execute(self, query):\n self.engine.execute(query)", "title": "" }, { "docid": "f130a50e80dc23479757196f0ab28b6a", "score": "0.5514604", "text": "def execute(self, sql):\n\n self.cur.execute(sql)", "title": "" }, { "docid": "a7bd0ee56589d26371967cc5d09e4871", "score": "0.5514484", "text": "def update(self, enrolmentNumber, student):\n self.connect()\n try:\n sql = \"\"\"update {0} set firstName = \"{1}\", lastName = \"{2}\", dob = \"{3}\", \nfaculty = \"{4}\", email = \"{5}\" where enrolmentNumber = {6}\"\"\".format(\n self.tablename, student.firstName, student.lastName, student.dob ,\n student.faculty , student.email, enrolmentNumber\n )\n self.cursor.execute(sql)\n self.db.commit()\n except Exception as err:\n print(err)\n\n finally:\n self.disconnect()", "title": "" }, { "docid": "cd965b975608eef59660ed92895610df", "score": "0.55077684", "text": "def update(self):\n db.session.commit()", "title": "" }, { "docid": "cd965b975608eef59660ed92895610df", "score": "0.55077684", "text": "def update(self):\n db.session.commit()", "title": "" }, { "docid": "cd965b975608eef59660ed92895610df", "score": "0.55077684", "text": "def update(self):\n db.session.commit()", "title": "" }, { "docid": "cd965b975608eef59660ed92895610df", "score": "0.55077684", "text": "def update(self):\n db.session.commit()", "title": "" }, { "docid": "5d8bd6f76e280113bb5db5310adb8bba", "score": "0.5504195", "text": "def update_data(self, table, score, ids):\r\n self.conn = sqlite3.connect(self.name, detect_types=sqlite3.PARSE_DECLTYPES)\r\n self.cursor = self.conn.cursor()\r\n str_update = \"UPDATE {} SET SCORE={} WHERE ID={}\".format(table, score, ids)\r\n\r\n try:\r\n self.cursor.execute(str_update)\r\n self.conn.commit()\r\n return True\r\n\r\n except IndexError:\r\n return False\r\n\r\n finally:\r\n self.conn.close()", "title": "" }, { "docid": "1abb954d5649fd6eb9ba99b5d7e60845", "score": "0.5502703", "text": "def update_field(self, table: str, field: tuple, condition: tuple) -> Cursor:\n query = f\"UPDATE {table} SET {field[0]} = :field_value WHERE {condition[0]} = :condition_value;\"\n params = {\n \"field_value\": field[1],\n \"condition_value\": condition[1],\n }\n return self.query(query, params)", "title": "" }, { "docid": "1708f94e114a52a3ac36105157982615", "score": "0.5499106", "text": "def sql(query):\n conn = MySQLdb.connect(conf['dbhost'], conf['dbuser'], \n conf['dbpass'])\n conn.select_db(conf['db'])\n conn.autocommit(True)\n cursor = conn.cursor()\n cursor.execute(query)\n ret = cursor.fetchall()\n conn.close()\n return ret", "title": "" }, { "docid": "7731d40c8d5f2849c108cb5ee06b1fbd", "score": "0.5496133", "text": "def execute(self, new_data):\r\n self.cur.execute(new_data)", "title": "" }, { "docid": "d327ed60920e4411a00c76fd2b68ad61", "score": "0.5485331", "text": "def update(table, id_):\n print(id_)\n while id_ != \"EXIT\":\n title = \"Provide new data:\"\n new_values = ui.get_inputs([x + \": \" for x in list_labels[1:]], title)\n check_id = id_\n for line in table:\n if check_id in line:\n for item in range(1, len(line)):\n line[item] = new_values[item-1]\n return table", "title": "" }, { "docid": "baa452cd2a70b4a958f58d7bbfe10eb5", "score": "0.5476017", "text": "def updatesql(self):\n df_result = pd.DataFrame()\n sql_quarter = \"SELECT DISTINCT `报表日期` FROM `financial` ORDER BY `报表日期` DESC \"\n Quarter = pd.read_sql(sql_quarter, self.conn)['报表日期'].astype('str')\n Quarter = Quarter.values\n for q in Quarter:\n print(q)\n sqli = \"SELECT * FROM `financial` WHERE `报表日期` ='%s'\"%(q)\n df = pd.read_sql(sqli,self.conn)\n df_tmp = df[(df['主营业务收入增长率'] > df['主营业务收入增长率'].median()) &\n (df['净利润增长率'] > df['净利润增长率'].median()) &\n (df['净资产增长率'] > df['净资产增长率'].median()) &\n (df['三年平均净资收益率'] > df['三年平均净资收益率'].median()) &\n ((df['每股资本公积金']+df['每股未分配利润']) > 2) &\n (df['每股净资产']>1)\n ]\n df_result = pd.concat([df_result,df_tmp])\n df_result = df_result[['代码','报表日期']]\n df_result.to_sql('faresult', con=self.conn, if_exists='replace',index=False, dtype=None)\n return print(\"Done!\")", "title": "" }, { "docid": "4a0b7c613dabb596ebbf7e4286a977b5", "score": "0.5464789", "text": "def update_row(self, i_table_name, i_id, i_new_values):\n query = f\"\"\"UPDATE {i_table_name} SET {', '.join(i_new_values)} WHERE id={i_id};\"\"\"\n\n return self._save_execute_query(i_query = query, i_function_called = 'update_row', i_call_at_end = self.save_connection)", "title": "" }, { "docid": "e431e8fd7108d035688283f868715db5", "score": "0.54593223", "text": "def update_db(db='hn_db.db', table_name='hn_users'):\n\n conn = sqlite3.connect(db)\n curs = conn.cursor()\n df = pd.read_sql(\n \"\"\"SELECT comment_author, comment_text, sentiment\n FROM hn_comments\"\"\", conn)\n\n df = preprocessing(df)\n df = sentiment_analysis(df)\n hn_rank = df.copy()\n\n # Make separate columns for df[sentiment] values: Positive and Negative\n df_test = hn_rank.groupby(\n ['comment_author', 'sentiment']).size().unstack(fill_value=0)\n df_test['saltiness'] = df_test['Positive'] - df_test['Negative']\n df_test = df_test.drop(['Positive', 'Negative'], axis=1)\n df_test = df_test.sort_values(by='saltiness', ascending=True)\n\n # If this table already exists, drop it before inserting new values\n df_test.to_sql(name=table_name, con=conn, if_exists='replace')\n\n # Feature engineer saltiness rankings(1, 2, 3..)\n rankings = []\n for i, _ in enumerate(df_test['saltiness']):\n rankings.append(i+1)", "title": "" }, { "docid": "dc8257c6188ce033650446f67dad1fbc", "score": "0.54466605", "text": "def update(table, id_, record):\n\n # your code\n return common.common_update(table, id_, record)", "title": "" }, { "docid": "751d8a9d679b742299711d33244e1953", "score": "0.5436997", "text": "def _update_table(self, updater: Callable[[Dict[int, Mapping]], None]):\n\n tables = self._storage.read()\n\n if tables is None:\n # The database is empty\n tables = {}\n\n try:\n raw_table = tables[self.name]\n except KeyError:\n # The table does not exist yet, so it is empty\n raw_table = {}\n\n # Convert the document IDs to the document ID class.\n # This is required as the rest of TinyDB expects the document IDs\n # to be an instance of ``self.document_id_class`` but the storage\n # might convert dict keys to strings.\n table = {\n self.document_id_class(doc_id): doc\n for doc_id, doc in raw_table.items()\n }\n\n # Perform the table update operation\n updater(table)\n\n # Convert the document IDs back to strings.\n # This is required as some storages (most notably the JSON file format)\n # don't require IDs other than strings.\n tables[self.name] = {\n str(doc_id): doc\n for doc_id, doc in table.items()\n }\n\n # Write the newly updated data back to the storage\n self._storage.write(tables)", "title": "" }, { "docid": "9d187fd9342e28865182aa883ffce784", "score": "0.5414468", "text": "def make_insert_query(self, query: str):\n\n self.create_connection() # sets connection\n cursor = self.connection.cursor() # cursor to perform the query\n res = cursor.execute(query) # result of the query\n self.connection.commit() # commits changes", "title": "" }, { "docid": "c449214aa5e31f55a4c30ec0db927e65", "score": "0.54109997", "text": "def update(slug, name):\n network, query = _get_query(slug, name)\n require.query.update(network, query)\n context = ValidationContext(network=network, query=query)\n data = dict(request_content(request).items())\n data = validate_query(dict(data.items()), context)\n query.update(data)\n db.session.commit()\n return jsonify(query, status=202)", "title": "" } ]
e1dcd7079a55f1c787e707ed12f112c4
1. Generate morphed images using face morpher
[ { "docid": "8a4a078ac64b1e5be88467de13ba285e", "score": "0.68224627", "text": "def morph_emotion_images(emotion, subjects):\n \n head_direction = 'Rafd090'\n eye_direction = 'frontal'\n \n for subject in subjects:\n \n print('[INFO] Processing', subject...)\n \n ID = subject.split('_')[0]\n \n output_folder_emotion = os.path.join(output_dir, ID, emotion) # for saving neutral faces\n output_folder_neutral = os.path.join(output_dir, ID, 'neutral')\n check_folder_exists(output_folder_emotion)\n check_folder_exists(output_folder_neutral)\n \n # Generate morphed image pairs\n img_neutral = os.path.join(image_root, f'{head_direction}_{subject}_neutral_{eye_direction}.jpg')\n img_emotion = os.path.join(image_root, f'{head_direction}_{subject}_{emotion}_{eye_direction}.jpg')\n \n # Morph neutral and images with emotions\n imgs = [img_neutral, img_emotion]\n \n \n morpher(imgs, out_frames=output_folder_emotion, num_frames=40)\n rename_target_images(emotion, subject, output_folder_emotion, type='morph')\n remove_unused_images(emotion, output_folder_emotion)\n \n \n # Morph neutral images in order to crop images \n imgs = [img_neutral, img_neutral]\n morpher(imgs, out_frames=output_folder_neutral, num_frames=3)\n rename_target_images(emotion, subject, output_folder_neutral, type='neutral')\n remove_unused_images(emotion, output_folder_neutral)\n \n \n # Morph raw images\n imgs = [img_emotion, img_emotion]\n morpher(imgs, out_frames=output_folder_emotion, num_frames=3)\n rename_target_images(emotion, subject, output_folder_emotion, type='raw')\n remove_unused_images(emotion, output_folder_emotion)", "title": "" } ]
[ { "docid": "cc9e78a261e7f4def8e8801d07438f48", "score": "0.6737589", "text": "def create_mini_images():\n\n emotions = [\"neutral\", \"anger\", \"contempt\", \"disgust\", \"fear\", \"happy\", \"sadness\", \"surprise\"]\n for emotion in emotions:\n source_path = \"ck-sorted/\"+emotion\n write_path = \"mini_images/\"+emotion\n for image in os.listdir(source_path):\n original = cv2.imread(source_path + \"/\" + image)\n try:\n original = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)\n except:\n pass\n casc = \"haarcascade_frontalface_default.xml\"\n cropped = crop_face(original, casc)\n if cropped is not None:\n small = cv2.resize(cropped, (24,24))\n cv2.imwrite(write_path + \"/\" + image, small)", "title": "" }, { "docid": "9eb75ce6e51f961c9d8036c08e26e01b", "score": "0.66232336", "text": "def preprocess(img_dir, fa, face_detector):\n transform_func = torchvision.transforms.Compose(\n [torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])\n img_list = sorted(os.listdir(img_dir))\n\n img_tensors = []\n ori_img_tensors = []\n kpts_tensors = []\n ori_imgs = []\n ss = []\n ts = []\n for image_name in img_list:\n if '.jpg' not in image_name:\n continue\n image_path = os.path.join(img_dir, image_name)\n img_tensor, ori_img_tensor, kpts_tensor, ori_img, s, t = \\\n load_img_2_tensors(image_path, fa, face_detector, transform_func)\n img_tensors.append(img_tensor)\n ori_img_tensors.append(ori_img_tensor)\n kpts_tensors.append(kpts_tensor)\n ori_imgs.append(ori_img)\n ss.append(s)\n ts.append(t)\n img_tensors = torch.stack(img_tensors, dim=0).unsqueeze(0) # (1, V, C, H, W)\n ori_img_tensors = torch.stack(ori_img_tensors, dim=0).unsqueeze(0) # (1, V, C, H, W)\n kpts_tensors = torch.stack(kpts_tensors, dim=0).unsqueeze(0) # (1, V, 68, 2)\n\n return img_tensors.cuda(), ori_img_tensors.cuda(), kpts_tensors.cuda(), ori_imgs, ss, ts", "title": "" }, { "docid": "a287724bbe4822d1b402ba6b47ca01c7", "score": "0.6461356", "text": "def generate_embeddings(face_image):\n try:\n faceBlob = cv2.dnn.blobFromImage(face_image, 1.0 / 255,\n (96, 96), (0, 0, 0), swapRB=True, crop=False)\n embedder_model.setInput(faceBlob)\n vec = embedder_model.forward()\n return vec\n except Exception as e:\n print(str(e))", "title": "" }, { "docid": "b184b6939446fc7759abfd8bd75dd6d7", "score": "0.62335485", "text": "def splitTransform(self):\n\t\tpath_merge = \"deform/deform_norm2\"\n\t\tpath_train = \"deform/train/\"\n\t\tpath_label = \"deform/label/\"\n\t\ttrain_imgs = glob.glob(path_merge+\"/*.\"+self.img_type)\n\t\tfor imgname in train_imgs:\n\t\t\tmidname = imgname[imgname.rindex(\"/\")+1:imgname.rindex(\".\"+self.img_type)]\n\t\t\timg = cv2.imread(imgname)\n\t\t\timg_train = img[:,:,2]#cv2 read image rgb->bgr\n\t\t\timg_label = img[:,:,0]\n\t\t\tcv2.imwrite(path_train+midname+\".\"+self.img_type,img_train)\n\t\t\tcv2.imwrite(path_label+midname+\".\"+self.img_type,img_label)", "title": "" }, { "docid": "f40dd4063e3b785317832ba4c7480a37", "score": "0.6221786", "text": "def morph(personPic, cartoonPic, numFrames):\n # Read images\n personPath = os.path.join(UPLOAD_FOLDER, personPic)\n cartoonPath = os.path.join(os.path.join(CARTOON_FOLDER, os.path.basename(\"images\")), cartoonPic)\n img1 = cv2.imread(personPath)\n img2 = cv2.imread(cartoonPath)\n\n # Convert Mat to float data type\n img1 = np.float32(img1)\n img2 = np.float32(img2)\n\n # Create an array for the morph images\n imgArray = []\n\n # Create file names for morph video and gif\n videoName = os.path.join(MORPH_FOLDER, personPic.split(\".\")[0] + cartoonPic.split(\".\")[0] + \"morph.mp4\")\n gifName = os.path.join(MORPH_FOLDER, personPic.split(\".\")[0] + cartoonPic.split(\".\")[0] + \"morph.gif\")\n quarterName = os.path.join(MORPH_FOLDER, personPic.split(\".\")[0] + cartoonPic.split(\".\")[0] + \"quarter.jpg\")\n halfwayName = os.path.join(MORPH_FOLDER, personPic.split(\".\")[0] + cartoonPic.split(\".\")[0] + \"halfway.jpg\")\n threequarterName = os.path.join(MORPH_FOLDER, personPic.split(\".\")[0] + cartoonPic.split(\".\")[0] + \"threequarter.jpg\")\n\n # Creates mp4 of morphing from person to cartoon\n out = cv2.VideoWriter(videoName, cv2.VideoWriter_fourcc(*'mp4v'), 20, (600, 800))\n\n # Note the halfway point of the frames\n halfwayFrames = numFrames // 2\n\n for i in range(0, numFrames):\n if i < halfwayFrames:\n alpha = i / halfwayFrames\n\n # Read array of corresponding points\n points1 = readPoints(personPath + '.txt')\n points2 = readPoints(os.path.join(os.path.join(CARTOON_FOLDER, os.path.basename(\"text\")), cartoonPic) + '.txt')\n points = []\n\n # Compute weighted average point coordinates\n for j in range(0, len(points2)):\n x = ( 1 - alpha ) * points1[j][0] + alpha * points2[j][0]\n y = ( 1 - alpha ) * points1[j][1] + alpha * points2[j][1]\n points.append((x,y))\n\n # Allocate space for final output\n imgMorph = np.zeros(img1.shape, dtype = img1.dtype)\n\n # Read triangles from tri.txt\n with open(\"tri_orig.txt\") as file :\n for line in file :\n x,y,z = line.split()\n \n x = int(x)\n y = int(y)\n z = int(z)\n\n t1 = [points1[x], points1[y], points1[z]]\n t2 = [points2[x], points2[y], points2[z]]\n t = [ points[x], points[y], points[z] ]\n\n # Morph one triangle at a time.\n morphTriangle(img1, img2, imgMorph, t1, t2, t, alpha)\n\n # Create result and write to output mp4\n finalImage = np.uint8(imgMorph)\n imgArray.append(finalImage)\n\n out.write(finalImage)\n\n # Write the 25% morphed image\n if i == halfwayFrames // 4:\n cv2.imwrite(quarterName, finalImage)\n\n # Write the 50% morphed image\n if i == halfwayFrames // 2:\n cv2.imwrite(halfwayName, finalImage)\n\n # Write the 75% morphed image\n if i == (3 * halfwayFrames) // 4:\n cv2.imwrite(threequarterName, finalImage)\n\n else: \n # Write frames in reverse to output\n out.write(imgArray[numFrames - 1 - i])\n\n # Yield a status update\n yield \"data:\" + str(int((i + 1) / numFrames * 100)) + \"\\n\\n\"\n \n out.release()\n\n # Creates gif of morphing from mp4\n clip = (VideoFileClip(videoName))\n clip.write_gif(gifName)\n\n yield \"data:stop\\n\\n\"", "title": "" }, { "docid": "c7e8e137ad93477349887edbb263e30a", "score": "0.61549586", "text": "def generalised_mosaic(images, ref_id):\n # print(images)\n ref_img = images[ref_id-1]\n # images.pop(ref_id-1)\n h_out = 0\n w_out = 0\n homo = []\n tot_hom = []\n for img in images:\n h_out += img.shape[0]\n w_out += img.shape[1]\n for _, img in enumerate(images):\n homo.append(get_homography(ref_img, img))\n for hom in homo:\n \n prev = None\n image_homo_pair = zip(images, homo)\n for img, h_ in image_homo_pair:\n\n result = cv2.warpPerspective(img, h_, (h_out, w_out))\n if not prev:\n r\n cv2.namedWindow('RESULT', cv2.WINDOW_NORMAL)\n cv2.imshow('RESULT', result)\n cv2.waitKey(0)\n prev = img\n return result", "title": "" }, { "docid": "d5d70c262f0d7e6272a180563fcb75cd", "score": "0.6086025", "text": "def visualize():\n print \"Preparing visualizations...\"\n\n tile_faces(fetch_lfw_people()[\"images\"], constants.LOG_DIR + \"/all_faces_tiled.png\")", "title": "" }, { "docid": "cd8afe1968c4bc00e16a809b6bafdb73", "score": "0.59469855", "text": "def extract_album_faces():\n np.random.seed(0)\n os.makedirs(os.path.abspath(FACES_PATH), exist_ok=True)\n mtcnn = MTCNN(select_largest=False, keep_all=True, device='cpu').eval()\n for root, dirs, files in os.walk(ALBUM_PATH):\n for fname in tqdm(files, ascii=True):\n fpath = os.path.join(ALBUM_PATH, fname)\n img = Image.open(fpath)\n with torch.no_grad():\n boxes, probs = mtcnn.detect(img, landmarks=False)\n if boxes is not None:\n for box, prob in zip(boxes, probs):\n if prob > 0.99:\n isfile = True\n while isfile: \n rand_key = np.random.randint(10**5, 10**6)\n save_path = os.path.join(FACES_PATH, '{}.png'.format(rand_key))\n isfile = os.path.isfile(save_path)\n _ = extract_face(img, box, save_path=save_path)", "title": "" }, { "docid": "c42dbe02a8d7d6ca95522ceca9c48f07", "score": "0.59127104", "text": "def splitTransform(self):\n\t\t#path_merge = \"transform\"\n\t\t#path_train = \"transform/data/\"\n\t\t#path_label = \"transform/label/\"\n path_merge = \"deform/deform_norm2\"\n path_train = \"deform/train/\"\n path_label = \"deform/label/\"\n train_imgs = glob.glob(path_merge+\"/*.\"+self.img_type)\n for imgname in train_imgs:\n midname = imgname[imgname.rindex(\"/\")+1:imgname.rindex(\".\"+self.img_type)]\n img = cv2.imread(imgname)\n img_train = img[:,:,2]#cv2 read image rgb->bgr\n img_label = img[:,:,0]\n cv2.imwrite(path_train+midname+\".\"+self.img_type,img_train)\n cv2.imwrite(path_label+midname+\".\"+self.img_type,img_label)", "title": "" }, { "docid": "7b6ac722cafe8fce015303c461ad2421", "score": "0.58913887", "text": "def mark_up_faces(self, image):\n\n pass", "title": "" }, { "docid": "4e16a7e1774a24468c17a12348ac2000", "score": "0.5888573", "text": "def preprocess(image, target_size=None, augmentation=True, mask=None,\n zero_center=False, scale=1., dim_ordering='th',\n to_bgr=False, hflip=False, vflip=False, shift_x=0, shift_y=0,\n rot_range=0, elastic_trans=False, colorize=False):\n image_size = image.shape\n cv2_imsize = (image_size[1], image_size[0])\n\n if target_size is not None:\n cv2_imsize = (target_size[1], target_size[0])\n image = cv2.resize(image, cv2_imsize, interpolation=cv2.INTER_LINEAR)\n \n if to_bgr:\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n \n if mask is not None:\n mask = cv2.resize(mask, cv2_imsize, interpolation=cv2.INTER_NEAREST)\n\n \n if augmentation:\n # histogram normalization\n if colorize:\n file_index = np.random.randint(5)\n files = ['img_07419.txt', 'img_00230.txt', 'img_01384.txt', 'img_02487.txt', 'img_00726.txt']\n target = np.loadtxt('../dataset/color normalization histograms/histogram_' + files[file_index])\n target = target/target.sum()\n target = target * cv2_imsize[1] * cv2_imsize[0] * 3\n \n image = histogram_colorization(target, image)\n \n \n # flips\n if hflip and np.random.randint(2) == 1:\n image = np.fliplr(image)\n if mask is not None:\n mask = np.fliplr(mask)\n\n if vflip and np.random.randint(2) == 1:\n image = np.flipud(image)\n if mask is not None:\n mask = np.flipud(mask)\n \n # translate\n shift_x = np.random.randint(-shift_x, shift_x+1)\n shift_y = np.random.randint(-shift_y, shift_y+1)\n M = np.float32([[1, 0, shift_x], [0, 1, shift_y]])\n image = cv2.warpAffine(image, M, cv2_imsize)\n if mask is not None:\n mask = cv2.warpAffine(mask, M, cv2_imsize)\n \n # elastic transform\n if elastic_trans:\n image = elastic_transform(image)\n \n # rotate\n rot = np.random.uniform(-rot_range, rot_range)\n \n # rotate wrt center\n M = cv2.getRotationMatrix2D((cv2_imsize[0]/2, cv2_imsize[1]/2), rot, 1)\n image = cv2.warpAffine(image, M, cv2_imsize)\n if mask is not None:\n mask = cv2.warpAffine(mask, M, cv2_imsize)\n \n\n if zero_center:\n image = image - 127 # naive zero-center\n image = image.astype(np.float32) * scale\n\n if dim_ordering == 'th':\n image = image.transpose(2, 0, 1)\n if mask is not None and mask.ndim == 3:\n mask = mask.transpose(2, 0, 1)\n\n if mask is not None:\n return image, mask\n \n\n return image", "title": "" }, { "docid": "411b7abf23dade88ab77f1cac11b2374", "score": "0.5832482", "text": "def demo_images():\r\n # Hard coded parameters\r\n maxSide = 600 # max length of longer side of Im\r\n lenSeq = 35 # longer seq will be shrinked between [lenSeq/2, lenSeq]\r\n binTh = 0.4 # final thresholding to obtain mask\r\n clearFinalBlobs = True # remove low energy blobs; uses binTh\r\n\r\n # parse commandline parameters\r\n args = parse_args()\r\n np.random.seed(args.seed)\r\n if args.imdir == '':\r\n imagenetVideoList = '/mnt/vol/gfsai-local/ai-group/users/bharathh/' + \\\r\n 'imagenet_videos/ILSVRC2015/ImageSets/VID/' + \\\r\n 'train_10.txt'\r\n imagenetRoot = '/mnt/vol/gfsai-local/ai-group/users/bharathh/' + \\\r\n 'imagenet_videos/ILSVRC2015/Data/VID/train/'\r\n with open(imagenetVideoList, 'r') as f:\r\n lines = f.readlines()\r\n imdirs = [x.strip().split(' ')[0] for x in lines]\r\n imdirs = imdirs[np.random.randint(len(imdirs))]\r\n args.imdir = os.path.join(imagenetRoot, imdirs)\r\n args.outdir = os.path.join(args.outdir, imdirs)\r\n\r\n # setup input directory\r\n print('InputDir: ', args.imdir)\r\n imPathList = utils.read_r(args.imdir, '*.*')\r\n if len(imPathList) < 2:\r\n print('Not enough images in image directory: \\n%s' % args.imdir)\r\n return\r\n\r\n # setup output directory\r\n suffix = args.imdir.split('/')[-1]\r\n suffix = args.imdir.split('/')[-2] if suffix == '' else suffix\r\n args.outdir = args.outdir + '/' + suffix\r\n utils.mkdir_p(args.outdir)\r\n print('OutputDir: ', args.outdir)\r\n\r\n # load image sequence after adjusting frame gap and imsize\r\n frameGap = args.frameGap\r\n if frameGap <= 0 and len(imPathList) > lenSeq:\r\n frameGap = int(len(imPathList) / lenSeq)\r\n imPathList = imPathList[0:len(imPathList):frameGap + 1]\r\n h, w, c = np.array(Image.open(imPathList[0])).shape\r\n frac = min(min(1. * maxSide / h, 1. * maxSide / w), 1.0)\r\n if frac < 1.0:\r\n h, w, c = imresize(np.array(Image.open(imPathList[0])), frac).shape\r\n imSeq = np.zeros((len(imPathList), h, w, c), dtype=np.uint8)\r\n for i in range(len(imPathList)):\r\n if frac < 1.0:\r\n imSeq[i] = imresize(np.array(Image.open(imPathList[i])), frac)\r\n else:\r\n imSeq[i] = np.array(Image.open(imPathList[i]))\r\n print('Total Video Shape: ', imSeq.shape)\r\n\r\n # run the algorithm\r\n maskSeq = nlc(imSeq, maxsp=args.maxsp, iters=args.iters, outdir=args.outdir)\r\n np.save(args.outdir + '/mask_%s.npy' % suffix, maskSeq)\r\n\r\n # save visual results\r\n if clearFinalBlobs:\r\n maskSeq = remove_low_energy_blobs(maskSeq, binTh)\r\n utils.rmdir_f(args.outdir + '/result_%s/' % suffix)\r\n utils.mkdir_p(args.outdir + '/result_%s/' % suffix)\r\n for i in range(maskSeq.shape[0]):\r\n mask = (maskSeq[i] > binTh).astype(np.uint8)\r\n grayscaleimage = (color.rgb2gray(imSeq[i]) * 255.).astype(np.uint8)\r\n imMasked = np.zeros(imSeq[i].shape, dtype=np.uint8)\r\n for c in range(3):\r\n imMasked[:, :, c] = grayscaleimage / 2 + 127\r\n imMasked[mask.astype(np.bool), 1:] = 0\r\n Image.fromarray(imMasked).save(\r\n args.outdir + '/result_%s/' % suffix + imPathList[i].split('/')[-1])\r\n import subprocess\r\n subprocess.call(\r\n ['tar', '-zcf', args.outdir + '/../result_%s.tar.gz' % suffix,\r\n '-C', args.outdir + '/result_%s/' % suffix, '.'])\r\n\r\n return", "title": "" }, { "docid": "ec6f05cd14bd402cf4974306d81c7462", "score": "0.5811773", "text": "def tile_faces(face_data, path):\n print \"\\tTiling faces...\"\n num_images_horiz = 10\n num_images_vert = int(math.ceil(len(face_data) / num_images_horiz))\n output_size = (num_images_horiz * constants.WIDTH, num_images_vert * constants.HEIGHT)\n output_img = Image.new(\"L\", output_size)\n\n idx = 0\n for y in xrange(0, num_images_vert * constants.HEIGHT, constants.HEIGHT):\n for x in xrange(0, num_images_horiz * constants.WIDTH, constants.WIDTH):\n single_face = Image.fromarray(face_data[idx])\n output_img.paste(single_face, (x, y))\n idx += 1\n\n output_img.save(path)\n print \"\\t\\tTiled faces saved to %s\" % path", "title": "" }, { "docid": "0e257141de3b37c53a2fdc65076d3287", "score": "0.5806697", "text": "def splitTransform(self):\n # path_merge = \"transform\"\n # path_train = \"transform\\\\data\\\\\"\n # path_label = \"transform\\\\label\\\\\"\n path_merge = \"deform\\\\deform_norm2\"\n path_train = \"deform\\\\train\\\\\"\n path_label = \"deform\\\\label\\\\\"\n train_imgs = glob.glob(path_merge + \"\\\\*.\" + self.img_type)\n for imgname in train_imgs:\n midname = imgname[imgname.rindex(\"\\\\\") + 1:imgname.rindex(\".\" + self.img_type)]\n img = cv2.imread(imgname)\n img_train = img[:, :, 2] # cv2 read image rgb->bgr\n img_label = img[:, :, 0]\n cv2.imwrite(path_train + midname + \".\" + self.img_type, img_train)\n cv2.imwrite(path_label + midname + \".\" + self.img_type, img_label)", "title": "" }, { "docid": "07382af08d4704af81fed051fc63ee12", "score": "0.5783151", "text": "def sample_images(batches_done):\n #imgs = next(iter(val_dataloader))\n image = cv2.imread(\"0_original.png\")\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n pil_im = Image.fromarray(image)\n pil_im = data_transform(pil_im)\n pil_im = pil_im.unsqueeze(0)\n\n image = cv2.resize(image, (256, 256)) \n image = np.transpose(image, (2, 0, 1))\n image = torch.tensor(image)\n image = image.unsqueeze(0)\n\n print(image.shape)\n print(pil_im.shape)\n #print(imgs['A'].shape)\n\n my_img = Variable(pil_im.type(Tensor))\n my_img_fake = G_BA(my_img)\n my_img_fake = my_img_fake.squeeze(0).detach().cpu()\n\n #my_img_fake = transforms.functional.to_pil_image(my_img_fake)\n #my_img_fake = transforms.ToPILImage()(my_img_fake)\n #my_img_fake = transforms.Normalize((-0.5, -0.5 -0.5), (1/0.5, 1/0.5, 1/0.5))(my_img_fake)\n #my_img_fake.show()\n #my_img_fake = transforms.Resize((480, 480), Image.BICUBIC)(my_img_fake)\n #my_img_fake = transforms.ToTensor()(my_img_fake)\n #my_img_fake = transforms.Normalize((-0.5/0.5, -0.5/0.5, -0.5/0.5), (1/0.5, 1/0.5, 1/0.5))(my_img_fake)\n #my_img_fake = transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))(my_img_fake)\n\n #real_A = Variable(imgs['A'].type(Tensor))\n #fake_B = G_AB(real_A)\n #real_B = Variable(imgs['B'].type(Tensor))\n #fake_A = G_BA(real_B)\n #img_sample = torch.cat((real_A.data, fake_B.data,\n # real_B.data, fake_A.data), 0)\n #img_sample = torch.cat((real_A.data, fake_B.data), 0)\n #img_sample = torch.cat((my_img.data, my_img_fake.data), 0)\n #save_image(img_sample, '%s.png' % (batches_done), nrow=5, normalize=True)\n '''mean = torch.tensor([0.5, 0.5, 0.5], dtype=torch.float32)\n std = torch.tensor([0.5, 0.5, 0.5], dtype=torch.float32)\n normalize = transforms.Normalize(mean.tolist(), std.tolist())\n unnormalize = transforms.Normalize((-mean / std).tolist(), (1.0 / std).tolist())\n my_img_fake = unnormalize(my_img_fake)'''\n\n save_image(my_img_fake, 'boxxx.png', nrow=5, normalize=True)", "title": "" }, { "docid": "81fe5866e9b40b5f2ad8b3ffdfc97179", "score": "0.57667947", "text": "def morphTrans(threshold_image, stringType, intensity, iterations, show=False):\n kernel = np.ones((intensity, intensity), np.uint8)\n\n if stringType == \"erosion\" or stringType == \"erode\" or stringType == 0:\n morphed_image = cv.erode(threshold_image, kernel, iterations)\n elif stringType == \"dilate\" or stringType == \"dilation\" or stringType == 1:\n morphed_image = cv.dilate(threshold_image, kernel, iterations)\n elif stringType == \"open\" or stringType == \"opening\" or stringType == 2:\n morphed_image = cv.morphologyEx(threshold_image, cv.MORPH_OPEN, kernel)\n else:\n print(\"Error: Incorrect morphtrans() parameters\")\n\n if show:\n show = cv.resize(morphed_image, (1320, 720))\n cv.imshow('Morphed Image', show)\n\n return morphed_image", "title": "" }, { "docid": "ddaf13d2d77306b7332cba2282e94438", "score": "0.5763481", "text": "def make_augmentation(landmark_file_name=\"/home/nasorokin11/Data/Data/data/train/landmarks.csv\", train_path=\"/home\"\n \"/nasorokin11/Data/Data/data/train\"\n \"/images/\"):\n image_names = []\n new_landmarks = []\n print(\"start...\")\n with open(landmark_file_name, \"rt\") as fp:\n for i, line in tqdm.tqdm(enumerate(fp)):\n if i == 0:\n continue # skip header\n elements = line.strip().split(\"\\t\")\n image_name = elements[0]\n landmarks = list(map(np.int16, elements[1:]))\n landmarks = np.array(landmarks, dtype=np.int16).reshape((len(landmarks) // 2, 2))\n im = Image.open(train_path+\"{}\".format(image_name))\n # To grayscale\n im_gray = im.convert(\"LA\").convert(\"RGB\")\n im_gray.save(train_path+\"gray_{}.jpg\".format(i))\n image_names.append(\"gray_{}.jpg\".format(i))\n\n new_landmarks.append(np.array(landmarks.reshape(landmarks.shape[0] * 2)))\n\n X = im.width\n im_mirror = Image.fromarray(np.array(im)[:, ::-1])\n im_mirror.save(train_path+\"mirror_{}.jpg\".format(i))\n image_names.append(\"mirror_{}.jpg\".format(i))\n landmarks[:, 0] = X - landmarks[:, 0] - 1\n\n new_landmarks.append(np.array(landmarks.reshape(landmarks.shape[0] * 2)))\n\n print(\"save images...\")\n with open(landmark_file_name, \"a\") as fl:\n for i in tqdm.tqdm(range(len(image_names))):\n fl.write(image_names[i] + \"\\t\" + \"\\t\".join(map(str, new_landmarks[i].tolist())) + \"\\n\")", "title": "" }, { "docid": "e8321d7d9fb584d197ff032b9cb4b44e", "score": "0.5739911", "text": "def makeDeblendFamilyMosaic(mi, parent, kids, mapperInfo=None,\n background=-10, maskbit=False, imBbox=None):\n\n aa = {}\n if maskbit:\n aa.update(mask=True)\n parent_im = footprintToImage(parent.getFootprint(), mi, **aa)\n bbox = afwGeom.BoxD(parent.getFootprint().getBBox())\n pext = (bbox.getMinX(), bbox.getMaxX(), bbox.getMinY(), bbox.getMaxY())\n\n pks = parent.getFootprint().getPeaks()\n pix = [pk.getIx() for pk in pks]\n piy = [pk.getIy() for pk in pks]\n pfx = [pk.getFx() for pk in pks]\n pfy = [pk.getFy() for pk in pks]\n\n N = 1 + len(kids)\n S = np.ceil(np.sqrt(N))\n C = S\n R = np.ceil(float(N)/C)\n\n Rx,Ry = [],[]\n tts = []\n stys = []\n xys = []\n #\n # Find how large an image we need to display the parent and all the children\n #\n kidImages, kim = {}, None\n for kid in kids:\n kim = footprintToImage(kid.getFootprint(), mi, **aa)\n kidImages[kid] = kim\n\n if not kim:\n kim = parent_im.clone()\n\n if not imBbox:\n imBbox = parent_im.getBBox(afwImage.PARENT)\n for kid in kids:\n imBbox.include(kidImages[kid].getBBox(afwImage.PARENT))\n\n mos = displayUtils.Mosaic(background=background)\n \n bbox = afwGeom.Box2I(afwGeom.Point2I(kim.getX0() - imBbox.getMinX(),\n kim.getY0() - imBbox.getMinY()), kim.getDimensions())\n\n kidImages[parent] = parent_im # not strictly a kid\n\n for kid in [parent] + kids:\n kim = kidImages[kid]\n #\n # Put the child into the correct place in the parent image. We have to do this for\n # the parent too if some of the children extended outside it's BBox\n #\n bbox = afwGeom.Box2I(afwGeom.Point2I(kim.getX0() - imBbox.getMinX(),\n kim.getY0() - imBbox.getMinY()), kim.getDimensions())\n\n _kim = parent_im.Factory(imBbox)\n _kim[bbox] <<= kim\n mos.append(_kim, '%d%s' % (mapperInfo.getId(kid) if mapperInfo else (kid.getId() & 0xfff),\n \"P\" if kid == parent else \"C\"))\n del _kim\n\n return mos", "title": "" }, { "docid": "e397f69a95b530ba2e4fc956e4b2a981", "score": "0.5736514", "text": "def collate_fn(self, batch):\n\n images = list()\n boxes = list()\n labels = list()\n for b in batch:\n images.append(b[0])\n boxes.append(b[1])\n labels.append(b[2])\n images = torch.stack(images, dim=0)\n return images, boxes, labels", "title": "" }, { "docid": "a9c08aac29d762283edffaaaad866a9e", "score": "0.5727736", "text": "def make_images(self):\n\n fwhm = self.psfpars['psf_fwhm']\n if self['expand_fac'] > 1:\n #eimage0 = self.get_image0(expand=True, verify=True)\n eimage0 = self.get_image0(expand=True)\n efwhm = fwhm*self['expand_fac']\n eimage,epsf = convolve_turb(eimage0,efwhm,get_psf=True)\n\n image0 = rebin(eimage0, self['expand_fac'])\n image = rebin(eimage, self['expand_fac'])\n psf = rebin(epsf, self['expand_fac'])\n else:\n image0 = self.get_image0()\n image,psf = convolve_turb(self.image0,fwhm,get_psf=True)\n\n self.image0 = image0\n self.image = image\n self.psf = psf", "title": "" }, { "docid": "ddf2dfa1dd941a2ded967ab5c78fb2e5", "score": "0.57211596", "text": "def extract_faces(img, pre_processing_methods, localization_method,\n post_processing_methods=False, face_out_size=(224, 224)):\n global _is_logging, _second_chance_counter\n start_time = None\n if _is_logging:\n import time\n print('STARTING FACE EXTRACTION')\n start_time = time.time()\n total_time = time.time()\n\n # PRE PROCESSING\n img = _image_processing(img, pre_processing_methods)\n if _is_logging:\n print('pre-processing:\\t\\t', time.time() - start_time, 's')\n start_time = time.time()\n\n # FACE LOCALISATION\n faces = _face_localization(img, localization_method)\n if _is_logging:\n print('first localisation:\\t', time.time()-start_time, 's')\n if len(faces) > 0:\n print('second localization canceled')\n start_time = time.time()\n\n # POST PROCESS IF NO FACE FOUND\n if post_processing_methods and len(faces) < 1:\n img = _image_processing(img, post_processing_methods)\n if _is_logging:\n print('post-processing:\\t\\t:', time.time()-start_time, 's')\n start_time = time.time()\n faces = _face_localization(img, localization_method)\n if len(faces) > 0:\n print('INFO: Face found in second attempt')\n _second_chance_counter += 1\n if _is_logging:\n print('second localization:\\t:', time.time() - start_time, 's')\n start_time = time.time()\n\n # NO FACE FOUND, RETURN NONE\n if len(faces) < 1:\n print('INFO: FaceExtractor: No face found!')\n if _is_logging:\n print('---------------\\n'\n 'no face found\\n'\n 'total time:', time.time()-total_time, 's',\n '\\n---------------')\n return None\n\n # RESIZE FACES TO GIVEN SIZE AND RETURN\n else:\n faces_resized = list()\n for face in faces:\n faces_resized.append(FaceOperator.scale(face, face_out_size[0], face_out_size[1]))\n if _is_logging:\n print('resize faces:\\t\\t', time.time()-start_time, 's')\n print('---------------\\n'\n 'faces found:', len(faces_resized),\n '\\ntotal time:', time.time() - total_time, 's',\n '\\n---------------')\n return faces_resized", "title": "" }, { "docid": "7f8edb2fa662ca517bfc4b236d234990", "score": "0.5721071", "text": "def collate_fn(self, batch):\n\n images = list()\n boxes = list()\n labels = list()\n # difficulties = list()\n\n for b in batch:\n images.append(b[0])\n boxes.append(b[1])\n labels.append(b[2])\n # difficulties.append(b[3])\n\n images = torch.stack(images, dim=0)\n\n # return images, boxes, labels, difficulties # tensor (N, 3, 300, 300), 3 lists of N tensors each\n return images, boxes, labels", "title": "" }, { "docid": "5e932db3804ee0523d17d1b939047029", "score": "0.56907946", "text": "async def marify(ctx, *args):\n await custom_meme.ify(ctx, face_detection.mar_scale, 'memes/marius/marius-face.png', 'marify.png', args)", "title": "" }, { "docid": "4db3105a8bb9621bed667c9b1c86c316", "score": "0.5678209", "text": "def demo_image(model, spoof_threshold, img_path):\n if not os.path.exists(img_path):\n print(\"The path does not exists, please check again!\")\n return None\n\n frame = cv2.imread(img_path)\n (h, w) = frame.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))\n net.setInput(blob)\n detections = net.forward()\n \n count_face = 0\n\n for i in range(0, detections.shape[2]):\n \n confidence = detections[0, 0, i, 2]\n \n if confidence > face_threshold:\n count_face += 1\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n \n if startX <= w and endX <= w and startY <= h and endY <= h:\n face = frame[startY:endY, startX:endX]\n print(face.shape)\n if face.shape[0] + face.shape[1] > 150:\n input_model = model.input_shape\n width , height = input_model[1], input_model[2]\n face = frame[startY:endY, startX:endX]\n face = cv2.resize(face, (width, height))\n\n face = face.astype(\"float\")\n face = np.array(face)\n face = np.expand_dims(face, axis=0)\n\n # pass the face ROI through the trained liveness detector\n # model to determine if the face is \"real\" or \"fake\"\n preds = model.predict(face)[0]\n # j = np.argmax(preds)\n if preds[1] > spoof_threshold:\n j = 1\n else:\n j = 0\n if w < 800:\n font_rate = 1\n font_size = 1\n else:\n font_rate = 4\n font_size = 3\n if j == 0:\n cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0), 3)\n _label = \"Live-Score: {:.4f}\".format(preds[j])\n cv2.putText(frame, _label, (startX, startY - 10), cv2.FONT_HERSHEY_SIMPLEX, font_rate, (0, 255, 0), font_size)\n else:\n cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 0, 255), 3)\n _label = \"Spoof-Score: {:.4f}\".format(preds[j])\n cv2.putText(frame, _label, (startX, startY - 10), cv2.FONT_HERSHEY_SIMPLEX, font_rate, (0, 0, 255), font_size)\n else:\n return -1\n if count_face > 0:\n try:\n parts = img_path.split(\".\")\n cv2.imwrite(parts[0] + '_predict.' + parts[1], frame)\n except:\n print('error img: ', img_path)\n return j\n if count_face == 0:\n print(img_path)\n \n # frame = cv2.resize(frame, (640,480))\n\n # img_name = img_path.split(\"/\")[-1]\n # # Display the resulting frame\n # cv2.imshow(img_name, frame)\n \n # cv2.waitKey(0) \n # cv2.destroyAllWindows()\n\n return -1", "title": "" }, { "docid": "e0643615b05a781ef78c4e5a1ef2e188", "score": "0.56554663", "text": "def collate_fn( batch):\n\n images = list()\n boxes = list()\n labels = list()\n y_is_box_label = list()\n num_pos = list()\n y_rpn_regr = list()\n\n\n for b in batch:\n images.append(b[0])\n boxes.append(b[1])\n labels.append(b[2])\n y_is_box_label.append(b[3][0])\n y_rpn_regr.append(b[3][1])\n num_pos.append(b[4])\n \n images = torch.stack(images, dim=0)\n y_is_box_label = torch.cat(y_is_box_label, dim=0)\n y_rpn_regr = torch.cat(y_rpn_regr, dim=0)\n\n return images, boxes, labels , [y_is_box_label, y_rpn_regr] , num_pos", "title": "" }, { "docid": "1063cd07135595ad3b73023fbedd1142", "score": "0.5654001", "text": "def _emojify(self):\n faces = face_recognition.face_locations(face_recognition.load_image_file(self.WORKING_FILE))\n\n if len(faces) == 0:\n return self.WORKING_FILE\n\n imagemagick_cmd = [\"convert\", self.WORKING_FILE]\n\n # compose imagemagick command to add multiple faces\n for face in faces:\n imagemagick_cmd.extend(self._get_params_for_face(face))\n\n output_path = self.WORKING_FILE + \".composite.jpg\"\n\n imagemagick_cmd.append(output_path)\n call(imagemagick_cmd)\n\n copyfile(output_path,self.WORKING_FILE)", "title": "" }, { "docid": "e6b90b4b10ea92da9ab8ed134ec358e3", "score": "0.56537515", "text": "def preprocess_images(self, images):\n raise NotImplementedError", "title": "" }, { "docid": "0e6230e180e9c917ba007c07dfc84b80", "score": "0.56480765", "text": "def createDataSets(smilePath, nonSmilePath,batchSize):\n\n pictures = []\n labels = []\n\n #transform all smiling pictures\n for root, dirs, files in os.walk(smilePath, True):\n i=0\n #static for loop\n for name in files:\n #all images\n #for name in files:\n if name.endswith(\".jpg\") and i<(batchSize/2):\n pictures.append(transformImage(os.path.join(root, name)))\n labels.append(np.array([1], np.int32))\n i=i+1\n\n # transform all non-smiling pictures\n for root, dirs, files in os.walk(nonSmilePath, True):\n k=0\n #all images\n #for name in files:\n #static for loop\n for name in files:\n if name.endswith(\".jpg\") and k<(batchSize/2):\n pictures.append(transformImage(os.path.join(root, name)))\n labels.append(np.array([0], np.int32))\n k=k+1\n\n return np.asarray(pictures), np.asarray(labels)", "title": "" }, { "docid": "0d8f08be4e0cf5f5dc8068da23e66746", "score": "0.5642071", "text": "def image_tranforms(input_size):\n # %% Bulding a tranformation pipeline on Pytorch thanks to Compose\n # Image transformations\n img_transforms = transforms.Compose([\n zPad_or_Rescale(input_size),\n # 2) Data augmentation\n RandomRot90(),\n RandomSmallRotation(),\n RandomVFlip(),\n RandomHFlip(),\n transforms.ToTensor(), #Will rescale to 0-1 Float32\n Double_to_Float()\n ])\n return img_transforms", "title": "" }, { "docid": "14b18aa9f34752762e7c3116ac71c4ef", "score": "0.563034", "text": "def TrainImages():\n\trecognizer = cv2.face.LBPHFaceRecognizer_create()#recognizer = cv2.face_LBPHFaceRecognizer.create()#$cv2.createLBPHFaceRecognizer()\n\tif(time_display):print(\"19: \\t\" + str(time_dif()))\n\tharcascadePath = \"haarcascade_frontalface_default.xml\"\n\tdetector = cv2.CascadeClassifier(harcascadePath)\n\tif(time_display):print(\"20: \\t\" + str(time_dif()))\n\tfaces,Id = getImagesAndLabels(\"TrainingImage\")\n\tif(time_display):print(\"20.5: \\t\" + str(time_dif()))\n\trecognizer.train(faces, np.array(Id))\n\tif(time_display):print(\"21: \\t\" + str(time_dif()))\n\tif (not os.path.isdir(\"TrainingImageLabel\")): os.mkdir(\"TrainingImageLabel\")\n\trecognizer.save(\"TrainingImageLabel/Trainner.yml\")\n\tif(time_display):print(\"21.5: \\t\" + str(time_dif()))\n\tres = \"Image Trained\"#+\",\".join(str(f) for f in Id)\n\tupdateStatus(res)\n\tif(time_display):print(\"22: \\t\" + str(time_dif()))", "title": "" }, { "docid": "8b9f42ddc5fb4e83610ea23975eea35a", "score": "0.5625234", "text": "def processImages(imageset):\n rgb_images = []\n np_lab_images = []\n for image, label in imageset:\n rgb_images.append(image)\n\n \n for rgb_image in rgb_images:\n np_rgb_image = np.transpose(rgb_image.numpy(), (1, 2, 0))\n # Convert it to LAB\n\n np_lab_image = cv2.cvtColor(numpy_rgb_image, cv2.COLOR_RGB2LAB)\n np_lab_images.append(np_lab_image)\n\n for np_lab_image in np_lab_images:\n np_lab_image[:, :, 0] *= 255 / 100\n np_lab_image[:, :, 1] += 128\n np_lab_image[:, :, 2] += 128\n np_lab_image /= 255\n torch_lab_image = torch.from_numpy(np.transpose(numpy_lab_image, (2, 0, 1)))\n lab_images.append(torch_lab_image)", "title": "" }, { "docid": "92102c34da65868f3d966e67deebdda7", "score": "0.56188774", "text": "def corp_face(image_array, landmarks, addsize = 0):\r\n x_min1 = np.min(landmarks['chin'], axis = 0)[0]\r\n x_max1 = np.max(landmarks['chin'], axis = 0)[0]\r\n x_min2 = np.min(landmarks['left_eyebrow'], axis = 0)[0]\r\n x_max2 = np.max(landmarks['right_eyebrow'], axis = 0)[0]\r\n x_min = min(x_min1, x_min2)\r\n x_max = max(x_max1, x_max2)\r\n x_center = (x_max - x_min) / 2 + x_min\r\n \r\n y_min1 = np.min(landmarks['chin'], axis = 0)[1]\r\n y_max1 = np.max(landmarks['chin'], axis = 0)[1]\r\n y_min2 = np.min(landmarks['left_eyebrow'], axis = 0)[1]\r\n y_max2 = np.max(landmarks['right_eyebrow'], axis = 0)[1]\r\n y_min = min(y_min1, y_min2)\r\n y_max = max(y_max1, y_max2)\r\n y_center = (y_max - y_min) / 2 + y_min\r\n\r\n width = x_max - x_min\r\n height = y_max - y_min\r\n length = max(width, height) + addsize\r\n left = x_center - length / 2\r\n right = x_center + length / 2\r\n top = y_center - length / 2\r\n bottom = y_center + length / 2\r\n\r\n pil_img = Image.fromarray(image_array)\r\n left, top, right, bottom = [int(i) for i in [left, top, right, bottom]]\r\n cropped_img = pil_img.crop((left, top, right, bottom))\r\n cropped_img = np.array(cropped_img)\r\n return cropped_img, left, top", "title": "" }, { "docid": "03e26ee27f7a9278f05e1370f49a81aa", "score": "0.5613755", "text": "def compute_photometric_stereo_impl(lights, images):\n # G= np.empty([9,1])\n # for i,image in enumerate(images):\n # np.insert(G,lights[i] * image, axis=1)\n \n # images = np.array([[[coord for coord in xk] for xk in xj] for xj in xi], ndmin=3)\n # images = np.array(images).reshape(3,9)\n # g1=np.linalg.inv(np.matmul(np.transpose(lights), lights))\n # g2= np.transpose(lights) * images\n\n # G = np.matmul(g1,g2)\n # print(G.shape())\n\n # albedo = np.linalg.norm(G)\n # normals = G/albedo\n\n # return albedo, normals\n\n base_image = images[0]\n n = len(images)\n image_shape = base_image.shape\n h, w, c = image_shape\n\n I = np.array(images).reshape(n, h * w * c)\n l_inv = np.linalg.inv(np.dot(lights.T, lights))\n\n l_t_l = np.dot(l_inv, lights.T)\n\n G = np.dot(l_t_l, I)\n\n G_channels = np.reshape(G.T,(h, w, c, 3))\n albedos = np.linalg.norm(G_channels, axis = 3)\n\n G_grey = np.mean(G_channels, axis=2)\n norm_of_albedos = np.linalg.norm(G_grey, axis = 2)\n\n threshold = 1e-7\n normals = G_grey/np.maximum(threshold, norm_of_albedos[:,:,np.newaxis])\n normals[norm_of_albedos < threshold] = 0\n \n return albedos, normals\n\n #taking array of images with corresponding light directions, and computing map of combined images\n # two different ways, albedo way and normal way\n # red is right direction (+x)\n # green is left direction (+y)\n # blue is normal pointing out of screen (+z)\n #kd is norm of G(square root of sum)\n # combining image and lighting arrays\n #light x image array which gets you G and then decompose to albedo of normals\n # inverse (L transpose * L) \n # multiply ^ by ( l transpose times images)\n # that's G\n # take np.linalg.norm(G) => albedos\n # divide G by ^ to get normals\n # ", "title": "" }, { "docid": "ae1349b7e2b5ebddd3c021f85965d96f", "score": "0.5607941", "text": "def FaceDetectionNew_saveInterestingFaces( faceDetectionNewInfos, img, strDestPath, rScale = 1. ):\n import cv2\n import numeric\n print( \"FaceDetectionNew_saveInterestingFaces: %s\" % faceDetectionNewInfos );\n for object in faceDetectionNewInfos.objects:\n vertices = object.faceInfo.vertices;\n if( abs( vertices[0][0] ) > 2000 or abs( vertices[2][1] ) > 2000 ): # a stange bug: once the vertices were :[[-540508416, 1825055232], [-1352820352, 1592128512], [-307581664, 1012743168], [-1119893632, 779816448]]\n continue;\n if( object.smile[1] > 0.05 or True ):\n # if the smile in unsure then the face is blurred or ...\n if( object.smile[0] < 0.5 ):\n # we want only neutral face\n xt, yt, xb, yb = numeric.findBoundingBox( vertices );\n rSurroundingBonusX = 0.03*(xb-xt);\n rSurroundingBonusY = 0.30*(yb-yt);\n xt = int(xt-rSurroundingBonusX)*rScale;\n yt = int(yt-rSurroundingBonusY*0.75)*rScale;\n xb = int(xb+rSurroundingBonusX)*rScale;\n yb = int(yb+rSurroundingBonusY*0.25)*rScale;\n print( \"bb: %s, %s, %s, %s\" % (xt, yt, xb, yb ));\n cropped = img[yt:yb, xt:xb];\n strFilenameImageDst = strDestPath + (\"%05d\" % object.faceInfo.id) + \"_\" + filetools.getFilenameFromTime() + \".jpg\";\n print( \"INF: FaceDetectionNew_saveInterestingFaces: saving face to %s\" % strFilenameImageDst );\n bRet = cv2.imwrite( strFilenameImageDst, cropped, [int(cv2.IMWRITE_JPEG_QUALITY), 100] );", "title": "" }, { "docid": "6b6f8af9149a44d7ba8bcefda5242fc5", "score": "0.5598042", "text": "def caption_image_beam_search_awes(encoder, decoder, image_path, word_map, beam_size=3,all_captions = False):\n\n k = beam_size\n vocab_size = len(word_map)\n\n # Read image and process\n img = imread(image_path)\n if len(img.shape) == 2:\n img = img[:, :, np.newaxis]\n img = np.concatenate([img, img, img], axis=2)\n img = imresize(img, (256, 256))\n img = img.transpose(2, 0, 1)\n img = img / 255.\n img = torch.FloatTensor(img).to(device)\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n transform = transforms.Compose([normalize])\n image = transform(img) # (3, 256, 256)\n\n # Encode\n image = image.unsqueeze(0) # (1, 3, 256, 256)\n\n\n\n encoder_out = encoder(image) # (1, enc_image_size, enc_image_size, encoder_dim)\n enc_image_size = encoder_out.size(1)\n encoder_dim = encoder_out.size(3)\n\n # Flatten encoding\n encoder_out = encoder_out.view(1, -1, encoder_dim) # (1, num_pixels, encoder_dim)\n num_pixels = encoder_out.size(1)\n\n # We'll treat the problem as having a batch size of k\n encoder_out = encoder_out.expand(k, num_pixels, encoder_dim) # (k, num_pixels, encoder_dim)\n\n # Tensor to store top k previous words at each step; now they're just <start>\n k_prev_words = torch.LongTensor([[word_map['<start>']]] * k).to(device) # (k, 1)\n\n # Tensor to store top k sequences; now they're just <start>\n seqs = k_prev_words # (k, 1)\n\n # Tensor to store top k sequences' scores; now they're just 0\n top_k_scores = torch.zeros(k, 1).to(device) # (k, 1)\n\n # Tensor to store top k sequences' alphas; now they're just 1s\n seqs_alpha = torch.ones(k, 1, enc_image_size, enc_image_size).to(device) # (k, 1, enc_image_size, enc_image_size)\n\n #LAST LAYER HAS 2048 CHANNELS\n seqs_awe = torch.ones(k, 1, 2048).to(device)\n # Lists to store completed sequences, their alphas and scores\n complete_seqs = list()\n complete_seqs_alpha = list()\n complete_seqs_awe = list()\n complete_seqs_scores = list()\n\n # Start decoding\n step = 1\n h, c = decoder.init_hidden_state(encoder_out)\n\n # s is a number less than or equal to k, because sequences are removed from this process once they hit <end>\n while True:\n\n embeddings = decoder.embedding(k_prev_words).squeeze(1) # ( s, embed_dim)\n\n awe, alpha = decoder.attention(encoder_out, h) # (s, encoder_dim), (s, num_pixels)\n\n alpha = alpha.view(-1, enc_image_size, enc_image_size) # (s, enc_image_size, enc_image_size)\n\n gate = decoder.sigmoid(decoder.f_beta(h)) # gating scalar, (s, encoder_dim)\n\n awe = gate * awe\n # print(awe[0])\n # print(np.linalg.norm(awe[0].detach()))\n # print(np.linalg.norm(awe[0].detach(),1))\n h, c = decoder.decode_step(torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim)\n\n scores = decoder.fc(h) # (s, vocab_size)\n scores = F.log_softmax(scores, dim=1)\n\n # Add\n scores = top_k_scores.expand_as(scores) + scores # (s, vocab_size)\n\n # For the first step, all k points will have the same scores (since same k previous words, h, c)\n if step == 1:\n top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)\n else:\n # Unroll and find top scores, and their unrolled indices\n top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s)\n\n # Convert unrolled indices to actual indices of scores\n prev_word_inds = top_k_words / vocab_size # (s)\n next_word_inds = top_k_words % vocab_size # (s)\n\n # Add new words to sequences, alphas\n seqs = torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)\n seqs_alpha = torch.cat([seqs_alpha[prev_word_inds], alpha[prev_word_inds].unsqueeze(1)],\n dim=1) # (s, step+1, enc_image_size, enc_image_size)\n\n seqs_awe = torch.cat([seqs_awe[prev_word_inds], awe[prev_word_inds].unsqueeze(1)],dim=1) # (s, step+1, enc_image_size, enc_image_size)\n # Which sequences are incomplete (didn't reach <end>)?\n incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if\n next_word != word_map['<end>']]\n complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))\n\n # Set aside complete sequences\n if len(complete_inds) > 0:\n complete_seqs.extend(seqs[complete_inds].tolist())\n complete_seqs_awe.extend(seqs_awe[complete_inds].tolist())\n complete_seqs_alpha.extend(seqs_alpha[complete_inds].tolist())\n complete_seqs_scores.extend(top_k_scores[complete_inds])\n k -= len(complete_inds) # reduce beam length accordingly\n\n # Proceed with incomplete sequences\n if k == 0:\n break\n seqs = seqs[incomplete_inds]\n seqs_alpha = seqs_alpha[incomplete_inds]\n h = h[prev_word_inds[incomplete_inds]]\n c = c[prev_word_inds[incomplete_inds]]\n encoder_out = encoder_out[prev_word_inds[incomplete_inds]]\n top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)\n k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)\n\n # Break if things have been going on too long\n if step > 50:\n break\n step += 1\n\n i = complete_seqs_scores.index(max(complete_seqs_scores))\n seq = complete_seqs[i]\n # print(\"scores:\",complete_seqs_scores)\n # print(sorted(range(len(complete_seqs_scores)),key = lambda x: complete_seqs_scores[x]))\n # print(complete_seqs)\n # print(seq)\n # input(\"Press Enter to continue...\")\n alphas = complete_seqs_alpha[i]\n awes = complete_seqs_awe[i]\n normalize = False\n if normalize:\n normalized_awes = [[x*len(awe) / sum(awe) for x in awe] for awe in awes]\n else:\n normalized_awes = awes\n if all_captions:\n top_captions = sorted(range(len(complete_seqs_scores)),key = lambda x: complete_seqs_scores[x],reverse=True)\n sorted_seqs = [complete_seqs[ind] for ind in top_captions]\n return seq, alphas, normalized_awes, sorted_seqs\n return seq, alphas, normalized_awes", "title": "" }, { "docid": "1b10a47ae86505a9e4beac4c9fbfdaa8", "score": "0.5591888", "text": "def FaceDetection_drawResultsOnImage( faceDetectionInfo, strFilenameImageSrc, strFilenameImageDst = None ):\n if( strFilenameImageDst == None ):\n strFilenameImageDst = strFilenameImageSrc;\n import cv2\n img = cv2.imread( strFilenameImageSrc );\n if( img == None ):\n print( \"ERR: FaceDetection_drawResultsOnImage: can't open src image file: '%s'\" % strFilenameImageSrc );\n return False;\n w = img.shape[1];\n h = img.shape[0];\n print( \"w: %s\" % w );\n print( \"h: %s\" % h );\n #~ nThickness = 2;\n #~ font = cv2.initFont( cv2.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0, nThickness, 8 ); \n for object in faceDetectionInfo.objects:\n shape = object.aShapeInfo[1:];\n print( \"shape: %s\" % shape );\n center = arraytools.convertAngleToImagePixels(shape[0],shape[1],w,h)\n print( \"center: %s\" % str(center) );\n size = arraytools.convertSizeToImagePixels(shape[2],shape[3],w,h)\n print( \"size: %s\" % str(size) );\n pt1 = ( center[0]-size[0]/2, center[1]-size[1] ); # tuple(map(lambda x, y: x + y, pt1, size))\n pt2 = ( center[0]+size[0]/2, center[1]+size[1] );\n cv2.rectangle( img, pt1,pt2, (255,0,0) ); \n if( len(object.aExtraInfos.strFaceLabel) > 0 ):\n strText = object.aExtraInfos.strFaceLabel.split(\"__\")[0];\n strText += \"%6.2f\" % object.aExtraInfos.rScoreReco;\n textSize = cv2.getTextSize( strText, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1 );\n print( str( textSize ) );\n colorText = (255,255,0);\n if( object.aExtraInfos.rScoreReco < 0.30):\n colorText = (80,80,0);\n if( object.aExtraInfos.rScoreReco < 0.20):\n colorText = (40,40,40); \n cv2.putText( img, strText, ((pt1[0]+pt2[0])/2-(textSize[0][0]/2), pt2[1]-4), cv2.FONT_HERSHEY_SIMPLEX, 0.5, colorText );\n \n drawPoints( img, object.aExtraInfos.aLeftEyePoints, (255,0,0) );\n drawPoints( img, object.aExtraInfos.aRightEyePoints, (255,128,0) );\n drawPoints( img, object.aExtraInfos.aNosePoints, (0,255,0) );\n drawPoints( img, object.aExtraInfos.aMouthPoints, (0,0,255) );\n bRet = cv2.imwrite( strFilenameImageDst, img );\n if( not bRet ):\n print( \"ERR: FaceDetection_drawResultsOnImage: can't open dest image file: '%s'\" % strFilenameImageDst ); \n return False;\n return True;", "title": "" }, { "docid": "1efe4dc6f3c2e3af71bcebe8a4107738", "score": "0.5590591", "text": "def AlignIm(path_to_FOV, path_to_masks=[], preprocess=False, diameter=None, templateID=0 ,iterNum=100, method='sift'):\r\n files=get_file_names(path_to_FOV)\r\n generate_summary(templateID, files)\r\n imgs=[]\r\n if preprocess==True:\r\n imgs = Image_enhance_contrast(files)\r\n else:\r\n imgs = [skimage.io.imread(f) for f in files]\r\n\r\n nimg = len(imgs)\r\n\r\n if path_to_masks == []:\r\n model = models.Cellpose(gpu=False, model_type='cyto')\r\n channels = []\r\n for idx in range(nimg):\r\n channels.append([0,0])\r\n\r\n if diameter==None:\r\n masks, flows, styles, diams = model.eval(imgs, diameter=None, channels=channels)\r\n else:\r\n masks, flows, styles, diams = model.eval(imgs, diameter=diameter, channels=channels)\r\n\r\n ROIs_mask = generate_ROIs_mask(masks, imgs)\r\n else:\r\n ROI_files=get_file_names(path_to_masks)\r\n ROIs_mask = [skimage.io.imread(f) for f in ROI_files]\r\n\r\n\r\n if not (os.path.exists(path_to_FOV+'/ROIs_mask/')):\r\n os.makedirs(path_to_FOV+'/ROIs_mask/')\r\n for i in range(len(files)):\r\n skimage.io.imsave(path_to_FOV+'/ROIs_mask/' + os.path.split(files[i])[-1], ROIs_mask[i])\r\n\r\n Template = imgs[templateID] # FOV_template\r\n Template = cv.normalize(Template, Template, 0, 255, cv.NORM_MINMAX)\r\n Template_ROI = ROIs_mask[templateID]\r\n\r\n Tmatrices=[]\r\n regImages=[]\r\n regROIs=[]\r\n\r\n if method=='akaze':\r\n print('A'+ method.upper() + ' is running')\r\n for j in range(len(imgs)):\r\n if j != templateID:\r\n print('registering ' + os.path.split(files[j])[-1])\r\n Regimage = imgs[j]\r\n Regimage = cv.normalize(Regimage, Regimage, 0, 255, cv.NORM_MINMAX)\r\n Regimage_ROI = ROIs_mask[j]\r\n T_matrix, regIm, regROI= Apply_affine_methods(Template, Template_ROI, Regimage, Regimage_ROI, iterNum, 'akaze')\r\n Tmatrices.append(T_matrix)\r\n regImages.append(regIm)\r\n regROIs.append(regROI)\r\n\r\n output_results(path_to_FOV, files, templateID, Template, Template_ROI, Tmatrices, regImages, regROIs, 'akaze')\r\n return Tmatrices, regImages, regROIs\r\n\r\n elif method=='sift':\r\n print('A'+ method.upper() + ' is running')\r\n for j in range(len(imgs)):\r\n if j != templateID:\r\n print('registering ' + os.path.split(files[j])[-1])\r\n Regimage = imgs[j]\r\n Regimage = cv.normalize(Regimage, Regimage, 0, 255, cv.NORM_MINMAX)\r\n Regimage_ROI = ROIs_mask[j]\r\n T_matrix, regIm, regROI= Apply_affine_methods(Template, Template_ROI, Regimage, Regimage_ROI, iterNum, 'sift')\r\n Tmatrices.append(T_matrix)\r\n regImages.append(regIm)\r\n regROIs.append(regROI)\r\n\r\n output_results(path_to_FOV, files, templateID, Template, Template_ROI, Tmatrices, regImages, regROIs, 'sift')\r\n return Tmatrices, regImages, regROIs\r\n\r\n elif method=='surf':\r\n print('A'+ method.upper() + ' is running')\r\n for j in range(len(imgs)):\r\n if j != templateID:\r\n print('registering ' + os.path.split(files[j])[-1])\r\n Regimage = imgs[j]\r\n Regimage = cv.normalize(Regimage, Regimage, 0, 255, cv.NORM_MINMAX)\r\n Regimage_ROI = ROIs_mask[j]\r\n T_matrix, regIm, regROI= Apply_affine_methods(Template, Template_ROI, Regimage, Regimage_ROI, iterNum, 'surf')\r\n Tmatrices.append(T_matrix)\r\n regImages.append(regIm)\r\n regROIs.append(regROI)\r\n\r\n output_results(path_to_FOV, files, templateID, Template, Template_ROI, Tmatrices, regImages, regROIs, 'surf')\r\n return Tmatrices, regImages, regROIs\r\n\r\n elif method=='brisk':\r\n print('A'+ method.upper() + ' is running')\r\n for j in range(len(imgs)):\r\n if j != templateID:\r\n print('registering ' + os.path.split(files[j])[-1])\r\n Regimage = imgs[j]\r\n Regimage = cv.normalize(Regimage, Regimage, 0, 255, cv.NORM_MINMAX)\r\n Regimage_ROI = ROIs_mask[j]\r\n T_matrix, regIm, regROI= Apply_affine_methods(Template, Template_ROI, Regimage, Regimage_ROI, iterNum, 'brisk')\r\n Tmatrices.append(T_matrix)\r\n regImages.append(regIm)\r\n regROIs.append(regROI)\r\n\r\n output_results(path_to_FOV, files, templateID, Template, Template_ROI, Tmatrices, regImages, regROIs, 'brisk')\r\n return Tmatrices, regImages, regROIs\r\n\r\n elif method=='orb':\r\n print('A'+ method.upper() + ' is running')\r\n for j in range(len(imgs)):\r\n if j != templateID:\r\n print('registering ' + os.path.split(files[j])[-1])\r\n Regimage = imgs[j]\r\n Regimage = cv.normalize(Regimage, Regimage, 0, 255, cv.NORM_MINMAX)\r\n Regimage_ROI = ROIs_mask[j]\r\n T_matrix, regIm, regROI= Apply_affine_methods(Template, Template_ROI, Regimage, Regimage_ROI, iterNum, 'orb')\r\n Tmatrices.append(T_matrix)\r\n regImages.append(regIm)\r\n regROIs.append(regROI)\r\n\r\n output_results(path_to_FOV, files, templateID, Template, Template_ROI, Tmatrices, regImages, regROIs, 'orb')\r\n return Tmatrices, regImages, regROIs", "title": "" }, { "docid": "d931c2a7c4ff94305d6e3e7d89d50fe7", "score": "0.5586063", "text": "def sample_homography(image_shape, perspective=True, scaling=True, rotation=True, translation=True,\n n_scales=5, n_angles=25, scaling_amplitude=0.1, perspective_amplitude_x=0.1,\n perspective_amplitude_y=0.1, patch_ratio=0.5, max_angle=np.pi/2,\n allow_artifacts=True, translation_overflow=0.1):\n\n def transform_perspective(points):\n t_min, t_max = -points.min(axis=0), 1.0-points.max(axis=0)\n t_max[1] = min(abs(t_min[1]), abs(t_max[1]))\n t_min[1] = -t_max[1]\n if not allow_artifacts:\n perspective_amplitude_min = np.maximum(np.array([-perspective_amplitude_x,-perspective_amplitude_y]), t_min)\n perspective_amplitude_max = np.minimum(np.array([perspective_amplitude_x,perspective_amplitude_y]), t_max)\n else:\n perspective_amplitude_min = np.array([-perspective_amplitude_x,-perspective_amplitude_y])\n perspective_amplitude_max = np.array([perspective_amplitude_x,perspective_amplitude_y])\n\n perspective_displacement = np.random.uniform(perspective_amplitude_min[1], perspective_amplitude_max[1])\n h_displacement_left = np.random.uniform(perspective_amplitude_min[0], perspective_amplitude_max[0])\n h_displacement_right = np.random.uniform(perspective_amplitude_min[0], perspective_amplitude_max[0])\n\n tmp = points.copy()\n points += np.array([[h_displacement_left, perspective_displacement],\n [h_displacement_left, -perspective_displacement],\n [h_displacement_right, perspective_displacement],\n [h_displacement_right, -perspective_displacement]])\n\n return points\n\n def transform_scale(points):\n scales = np.random.uniform(-scaling_amplitude, scaling_amplitude, n_scales) + 1.0\n center = points.mean(axis=0)\n scaled = np.expand_dims(points - center, 0) * np.expand_dims(np.expand_dims(scales, 1), 1) + center\n\n if allow_artifacts:\n valid = np.arange(n_scales) # all scales are valid except scale=1\n else:\n valid = []\n for i in range(n_scales):\n if scaled[i,...].max() < 1.0 and scaled[i,...].min() >= 0.0:\n valid.append(i)\n\n if valid is not None:\n idx = np.random.choice(valid)\n points = scaled[idx]\n else:\n print('sample_homography: No valid scale found')\n\n return points\n\n def transform_translation(points):\n t_min, t_max = -points.min(axis=0), 1.0-points.max(axis=0)\n if allow_artifacts:\n t_min -= translation_overflow\n t_max += translation_overflow\n points += np.array([np.random.uniform(t_min[0], t_max[0]),\n np.random.uniform(t_min[1], t_max[1])])\n\n return points\n\n def transform_rotation(points):\n angles = np.random.uniform(-max_angle, max_angle, n_angles)\n angles = np.append(angles, 0) # in case no rotation is valid\n center = points.mean(axis=0)\n rot_mat = np.reshape(np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)], axis=1), [-1, 2, 2])\n rotated = np.matmul(np.tile(np.expand_dims(points - center, axis=0), [n_angles+1, 1, 1]), rot_mat) + center\n if allow_artifacts:\n valid = np.arange(n_angles) # all angles are valid, except angle=0\n else:\n valid = []\n for i in range(len(angles)):\n if rotated[i,...].max() < 1.0 and rotated[i,...].min() >= 0.0:\n valid.append(i)\n\n idx = np.random.choice(valid)\n points = rotated[idx]\n\n return points\n\n # Corners of the input image\n pts1 = np.array([[0., 0.], [0., 1.], [1., 1.], [1., 0.]])\n\n # Corners of the output patch\n margin = (1 - patch_ratio) * 0.5\n pts2 = margin + patch_ratio * pts1\n\n # Random perspective and affine perturbations\n functions = []\n if perspective:\n functions.append(transform_perspective)\n\n # Random scaling\n if scaling:\n functions.append(transform_scale)\n\n # Random translation\n if translation:\n functions.append(transform_translation)\n\n # Random rotation\n # sample several rotations, check collision with borders, randomly pick a valid one\n if rotation:\n functions.append(transform_rotation)\n\n indices = np.arange(len(functions))\n np.random.shuffle(indices)\n\n for i in range(len(functions)):\n idx = indices[i]\n pts2 = functions[idx](pts2)\n\n # Rescale to actual size\n shape = image_shape[::-1] # different convention [y, x]\n pts1 *= shape\n pts2 *= shape\n\n homography = cv2.getPerspectiveTransform(pts1.astype(np.float32), pts2.astype(np.float32))\n return homography", "title": "" }, { "docid": "56a5c0fcdd76a233758120ef904cdcd9", "score": "0.55799705", "text": "def detect_faces_on_image(instance):\n\n if instance.input_image:\n\n classifiers = prepare_classifiers()\n\n with open(instance.input_image.path, 'rb') as img_source:\n img = cv2.imdecode(np.frombuffer(img_source.read(), np.uint8), -1)\n\n face_cascade = cv2.CascadeClassifier(classifiers.get('frontal_face_default', None))\n\n try:\n image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n faces = face_cascade.detectMultiScale(\n image,\n scaleFactor=1.3,\n minNeighbors=5,\n minSize=(30, 30),\n flags=cv2.cv.CV_HAAR_SCALE_IMAGE\n )\n\n for (x, y, w, h) in faces:\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n number_of_faces = len(faces) if len(faces) > 0 else u'No faces found!'\n\n instance.processing_output_info = u'Faces found: {number_of_faces}'.format(\n number_of_faces=number_of_faces,\n )\n\n image_file_name = instance.input_image.path.split('/')[-1]\n output_image_file = ContentFile(image)\n\n instance.output_image.save(\n image_file_name,\n output_image_file,\n save=True\n )\n\n cv2.imwrite(os.path.join(instance.output_image.path), image)\n\n instance.save()\n\n except IOError as io:\n print('Error --> {}'.format(io))", "title": "" }, { "docid": "1bc59fb2c6183ea847b49d5ea43bd2fd", "score": "0.5576503", "text": "def morph_imgs(self, set=[], labels=[]):\r\n print('Morphing images...')\r\n out_set = []\r\n out_labels = []\r\n for i in range(0, len(set)):\r\n out_set.append(set[i])\r\n out_labels.append(labels[i])\r\n\r\n img_dilated = dilation(set[i])\r\n out_set.append(img_dilated)\r\n out_labels.append(labels[i])\r\n\r\n img_closed = closing(set[i])\r\n out_set.append(img_closed)\r\n out_labels.append(labels[i])\r\n\r\n img_eroded = erosion(set[i])\r\n out_set.append(img_eroded)\r\n out_labels.append(labels[i])\r\n\r\n img_opened = opening(set[i])\r\n out_set.append(img_opened)\r\n out_labels.append(labels[i])\r\n return np.asarray(out_set), np.asarray(out_labels)", "title": "" }, { "docid": "4672c7d9b71a5a06035bd4fd28ec3c50", "score": "0.5571499", "text": "def mkimage(filename, objs, names, bgs, species, maxobjs, output_dir=\"images_out\", mask_dir=\"mask_out\", single=False):\n log = []\n im = bgs[random.randint(0,len(bgs)-1)].copy()\n # print('bg size='+str(im.size))\n cls0 = random.randint(0,len(objs)-1)\n n_obj = random.randint(1,maxobjs)\n img_list = []\n for c in range(0, n_obj):\n \n if single: cls=cls0\n else: cls = random.randint(0,len(objs)-1)\n obj = random.choice(objs[cls]) \n imx,imy = im.size\n \n # first paste to zero background, or the moment function\n # will return the wrong values\n black_bkg = Image.new('L', obj.size)\n black_bkg.paste(obj,(0,0),obj)\n \n \n \n # horizontally align objects so scaling will be appropriate\n degrees = orientation(moments2e(black_bkg)) \n obj = obj.rotate(-1*degrees,expand=True)\n \n # we probably padded it with a lot of black that will interfere with\n # so we need to get rid of this\n bbox = obj.getbbox()\n obj = obj.crop(bbox)\n \n \n sizex,sizey = obj.size\n #print(obj.size)\n if(sizey > sizex):\n #print('taller than wide, rotating')\n obj = obj.rotate(-90,expand=True)\n \n # get size limitations from species dictionary\n mean_size= species[names[cls]][1][0]\n max_size= species[names[cls]][1][1]\n #print (mean_size, max_size)\n obj, distance = scale_obj(obj, mean_size, max_size) # scale to random size\n obj = rot_obj(obj) # rotate to random angle\n sizex,sizey = obj.size # update size after rotation\n obj = flip_obj(obj) # sometimes flip image\n \n imx,imy = im.size\n \n # allow object to be outside horizontally, but must be less than gap value from top and bottom\n top_gap = 25\n bottom_gap = 25\n \n posx = random.randint(-floor(sizex/2),imx-floor(sizex/2))\n \n posy = random.randint(-floor(sizey/2),imy-floor(sizey/2)) \n \n #posy = random.randint(-top_gap,imy-sizey+bottom_gap)\n #im.paste(obj,(posx,posy),obj) # need to do this in the right order.\n mask = get_mask(obj)\n img_list.append([obj, (posx,posy), distance, cls, mask,(sizex,sizey)])\n \n # we need to do this in order as well, to get a mapping btw index and image\n # log = log + ['{},{},{},{},{},{}\\n'.format(names[cls],cls,posy,posx,posy+sizey,posx+sizex)]\n \n # sort images from farthest to closest\n img_list.sort(key=lambda tup: tup[2], reverse=True)\n \n # paste in the correct order, farthest first\n \n \n for idx, item in enumerate(img_list):\n log = log + ['{},{},{},{},{},{}\\n'.format(names[item[3]],item[3],item[1][1],item[1][0],item[1][1]+item[5][1],item[1][0] + item[5][0])]\n im.paste(item[0], item[1], item[0]) \n \n #start with the full mask\n mask_im = Image.new('1', im.size)\n mask_im.paste(item[4], item[1], item[4]) \n \n #print(\"base mask of object no \" + str(idx))\n #display(mask_im.resize(((int)(mask_im.size[0]/4), (int)(mask_im.size[1]/4))))\n \n # If we're not the foremost image we will produce an occlusion mask from the closer images \n \n if ((idx+1)<len(img_list)):\n higher_mask = Image.new('1', im.size)\n for item in range(idx+1, len(img_list)): \n higher_obj = img_list[item][4]\n higher_pos = img_list[item][1]\n higher_mask.paste(higher_obj, higher_pos, higher_obj) \n mask_im = ImageChops.subtract(mask_im, higher_mask)\n #print(\"occluded mask of object no \" + str(idx))\n #display(mask_im.resize(((int)(mask_im.size[0]/4), (int)(mask_im.size[1]/4))))\n #print(\"occlusion mask of object no \" + str(idx))\n #display(higher_mask.resize(((int)(higher_mask.size[0]/4), (int)(higher_mask.size[1]/4)))) \n \n # the one we'll save for this object, with occluded areas removed\n mask_im = mask_im.resize((512,512))\n mask_im.save(os.path.join(mask_dir, filename+'-'+str(idx)+'.png'))\n im = im.resize((512,512))\n im.save(os.path.join(output_dir,filename+'.png'))\n with open(os.path.join(mask_dir,filename+'.csv'),'w') as f:\n for l in log: f.write(l)", "title": "" }, { "docid": "62f8e2b6782c3a5ee6025b0fa16df47b", "score": "0.5570637", "text": "def multiperson(img, func, persons):\n\n scales = [2, 1., 0.5]\n\n height, width = img.shape[0:2]\n center = (width / 2, height / 2)\n dets, tags = None, []\n for idx, i in enumerate(scales):\n scale = max(height, width) / 200\n inp_res = int((i * 512 + 63) // 64 * 64)\n res = (inp_res, inp_res)\n\n mat_ = get_transform(center, scale, res)[:2]\n inp = cv2.warpAffine(img, mat_, res) / 255\n\n def array2dict(tmp):\n return {\n 'det': tmp[0][:, :, :17],\n 'tag': tmp[0][:, -1, 17:34]\n }\n\n tmp1 = array2dict(func([inp]))\n tmp2 = array2dict(func([inp[:, ::-1]]))\n\n tmp = {}\n for ii in tmp1:\n tmp[ii] = np.concatenate((tmp1[ii], tmp2[ii]), axis=0)\n\n # === tag: [2, 4, 17, res, res]\n # === det: [2, 17, res, res]\n\n det = tmp['det'][0, -1] + tmp['det'][1, -1, :, :, ::-1][flipRef]\n if det.max() > 10:\n continue\n if dets is None:\n dets = det\n mat_fw = mat_\n mat_bw = np.linalg.pinv(np.array(mat_).tolist() + [[0, 0, 1]])[:2]\n else:\n # === dets of different scales are resized to the largest res\n dets = dets + resize(det, dets.shape[1:3])\n\n if abs(i - 1) < 0.5:\n res = dets.shape[1:3]\n # === only keep tags for scale in (0.5, 1.5)\n tags += [resize(tmp['tag'][0], res), resize(tmp['tag'][1, :, :, ::-1][flipRef], res)]\n\n assert dets is not None\n assert len(tags) != 0\n\n # === tags: [17, res, res, 2]\n # === dets: [17, res, res]\n tags = np.concatenate([i[:, :, :, None] for i in tags], axis=3)\n dets = dets / len(scales) / 2\n\n dets = np.minimum(dets, 1)\n\n refined_persons = []\n for i in range(persons.shape[0]):\n tmp_person = persons[i].copy()\n if np.sum(tmp_person[:, 2] > 0) == 17:\n refined_persons.append(tmp_person)\n continue\n tmp_person[:, :2] = kpt_affine(tmp_person[:, :2], mat_fw) / 4.0\n refined_person = refine(dets, tags, tmp_person)\n refined_person[:, :2] = kpt_affine(refined_person[:, :2] * 4, mat_bw)\n refined_persons.append(refined_person)\n\n refined_persons = np.stack(refined_persons, axis=0)\n\n return refined_persons", "title": "" }, { "docid": "f11429dc92347dedf205bd2e9963073a", "score": "0.5566314", "text": "def inference(self, images):\n pass", "title": "" }, { "docid": "ea590aaff4eab73b88b0a7cf94cfd71e", "score": "0.5563466", "text": "def apply_image_transform(self):\n self.resize()\n \n self.find_primary_colors()\n self.find_borders()\n self.find_structure()\n\n self.create_painting()\n\n self.create_overlay()", "title": "" }, { "docid": "444234d583d491356d8b82913325f0c8", "score": "0.5558589", "text": "def make_faces_chunk(tri_list, mesh, materialDict):\n\n materials = mesh.materials\n if not materials:\n mat = None\n\n face_chunk = _3ds_chunk(OBJECT_FACES)\n face_list = _3ds_array()\n\n ''' FIXME\n if mesh.uv_layers:\n # Gather materials used in this mesh - mat/image pairs\n unique_mats = {}\n for i, tri in enumerate(tri_list):\n\n face_list.add(_3ds_face(tri.vertex_index))\n\n if materials:\n mat = materials[tri.mat]\n if mat:\n mat = mat.name\n\n img = tri.image\n\n try:\n context_mat_face_array = unique_mats[mat, img][1]\n except:\n name_str = mat if mat else \"None\"\n if img:\n name_str += img\n\n context_mat_face_array = _3ds_array()\n unique_mats[mat, img] = _3ds_string(sane_name(name_str)), context_mat_face_array\n\n context_mat_face_array.add(_3ds_ushort(i))\n # obj_material_faces[tri.mat].add(_3ds_ushort(i))\n\n face_chunk.add_variable(\"faces\", face_list)\n for mat_name, mat_faces in unique_mats.values():\n obj_material_chunk = _3ds_chunk(OBJECT_MATERIAL)\n obj_material_chunk.add_variable(\"name\", mat_name)\n obj_material_chunk.add_variable(\"face_list\", mat_faces)\n face_chunk.add_subchunk(obj_material_chunk)\n\n else:\n '''\n # else branch start\n obj_material_faces = []\n obj_material_names = []\n for m in materials:\n if m:\n obj_material_names.append(_3ds_string(sane_name(m.name)))\n obj_material_faces.append(_3ds_array())\n n_materials = len(obj_material_names)\n\n for i, tri in enumerate(tri_list):\n face_list.add(_3ds_face(tri.vertex_index))\n if (tri.mat < n_materials):\n obj_material_faces[tri.mat].add(_3ds_ushort(i))\n\n face_chunk.add_variable(\"faces\", face_list)\n for i in range(n_materials):\n obj_material_chunk = _3ds_chunk(OBJECT_MATERIAL)\n obj_material_chunk.add_variable(\"name\", obj_material_names[i])\n obj_material_chunk.add_variable(\"face_list\", obj_material_faces[i])\n face_chunk.add_subchunk(obj_material_chunk)\n # else branch end\n\n smooth_chunk = _3ds_chunk(OBJECT_SMOOTH)\n for i, tri in enumerate(tri_list) :\n smooth_chunk.add_variable(\"face_\" + str(i),_3ds_uint(tri.group))\n face_chunk.add_subchunk(smooth_chunk)\n\n return face_chunk", "title": "" }, { "docid": "3488e68c875d96c6a6132c47eef0fd4f", "score": "0.5557939", "text": "def gen_batch_function(data_folder, image_shape):\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n image_paths = glob(os.path.join(data_folder, 'merge', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)):path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imread(image_file)\n # image = cv.imread(image_file)\n # print(\"temp.shape\", image.shape)\n gt_image = scipy.misc.imread(gt_image_file)\n # gt_image = cv.imread(gt_image_file)\n\n image2, gt_image2 = crop_image(image, gt_image)\n image3, gt_image3 = flip_image(image, gt_image)\n\n image = scipy.misc.imresize(image, image_shape)\n # image = cv.resize(image, image_shape)\n gt_image = scipy.misc.imresize(gt_image, image_shape)\n # gt_image = cv.resize(gt_image, image_shape)\n\n image2 = scipy.misc.imresize(image2, image_shape)\n # image2 = cv.resize(image2, image_shape)\n gt_image2 = scipy.misc.imresize(gt_image2, image_shape)\n # gt_image2 = cv.resize(gt_image2, image_shape)\n\n image3 = scipy.misc.imresize(image3, image_shape)\n # image3 = cv.resize(image3, image_shape)\n gt_image3 = scipy.misc.imresize(gt_image3, image_shape)\n # gt_image3 = cv.resize(gt_image3, image_shape)\n\n contrast = random.uniform(0.85, 1.15) # Contrast augmentation\n bright = random.randint(-45, 30) # Brightness augmentation\n image = bc_img(image, contrast, bright)\n\n gt_image = process_gt_image(gt_image)\n gt_image2 = process_gt_image(gt_image2)\n gt_image3 = process_gt_image(gt_image3)\n\n images.append(image)\n gt_images.append(gt_image)\n\n images.append(image2)\n gt_images.append(gt_image2)\n\n images.append(image3)\n gt_images.append(gt_image3)\n yield np.array(images), np.array(gt_images)\n\n return get_batches_fn", "title": "" }, { "docid": "d1e17c49f07fc49e46bd046a99a502b7", "score": "0.5552038", "text": "def face_preprocess(image, landmark_model_type = 'large', addsize = 0):\r\n # detect landmarks\r\n face_landmarks_dict = detect_landmark(image_array = image, model_type = landmark_model_type)\r\n cropped_face = []\r\n transferred_landmarks = []\r\n for i, face_landmarks_item in enumerate(face_landmarks_dict):\r\n # rotate image array to align face\r\n aligned_face, eye_center, angle = align_face(image_array = image, landmarks = face_landmarks_item)\r\n rotated_landmarks = detect_landmark(image_array = aligned_face, model_type = landmark_model_type)\r\n sub_cropped_face = []\r\n sub_transferred_landmarks = []\r\n for i, face_landmarks_item in enumerate(rotated_landmarks):\r\n # crop face according to landmarks\r\n cropped_face_item, left, top = corp_face(image_array = aligned_face, landmarks = rotated_landmarks[i], addsize = addsize)\r\n # transfer landmarks to fit the cropped face\r\n transferred_landmarks_item = transfer_landmark(landmarks = rotated_landmarks[i], left = left, top = top)\r\n sub_cropped_face.append(cropped_face_item)\r\n sub_transferred_landmarks.append(transferred_landmarks_item)\r\n # add to the end of list\r\n cropped_face.append(sub_cropped_face)\r\n transferred_landmarks.append(sub_transferred_landmarks)\r\n return cropped_face, transferred_landmarks", "title": "" }, { "docid": "896aec642a143c025bee2293bc56cc99", "score": "0.55502534", "text": "def preprocess_images(self, pool, content_image, style_images, content_layers, style_layers,\n tile_size=512):\n # Construct list of layers to visit during the backward pass\n layers = []\n for layer in reversed(self.layers()):\n if layer in content_layers or layer in style_layers:\n layers.append(layer)\n\n # Prepare Gram matrices from style image\n print_('Preprocessing the style image...')\n self.grams = {}\n for layer in style_layers:\n _, ch = self.layer_info(layer)\n self.grams[layer] = np.zeros((ch, ch), np.float32)\n for image in style_images:\n self.set_image(image)\n feats = self.prepare_features(pool, style_layers, tile_size)\n for layer in feats:\n axpy(1 / len(style_images), gram_matrix(feats[layer]), self.grams[layer])\n\n # Prepare feature maps from content image\n print_('Preprocessing the content image...')\n self.set_image(content_image)\n self.prepare_features(pool, content_layers, tile_size)\n\n return layers", "title": "" }, { "docid": "c9fa2be059558dbae4f0443bc1b259d5", "score": "0.5547774", "text": "def visualize_all(imgs, proc_params, all_joints, all_verts, all_cams):\n\n skele = imgs[0]\n overlay = None\n mesh = None\n view1 = None\n view2 = None\n\n for index in range(len(imgs)):\n cam_for_render, vert_shifted, joints_orig = vis_util.get_original(\n proc_params[index], all_verts[index][0], all_cams[index][0],\n all_joints[index][0], img_size=imgs[index].shape[:2])\n\n if index == 0:\n skele = vis_util.draw_skeleton(imgs[index], joints_orig)\n print(skele.shape)\n overlay = renderer(\n vert_shifted, cam=cam_for_render, img=imgs[index], do_alpha=False)\n\t print(overlay.shape)\n mesh = renderer(\n vert_shifted, cam=cam_for_render, img_size=imgs[index].shape[:2])\n\t print(mesh.shape)\n view1 = renderer.rotated(\n vert_shifted, 60, cam=cam_for_render, img_size=imgs[index].shape[:2])\n\t print(view1.shape)\n view2 = renderer.rotated(\n vert_shifted, -60, cam=cam_for_render, img_size=imgs[index].shape[:2])\n\t print(view2.shape)\n else:\n skele = vis_util.draw_skeleton(skele, joints_orig)\n overlay = renderer(\n vert_shifted, cam=cam_for_render, img=overlay, do_alpha=False)\n mesh = renderer(\n vert_shifted, cam=cam_for_render, img=mesh, img_size=imgs[index].shape[:2], do_alpha=False)\n view1 = renderer.rotated(\n vert_shifted, 60, cam=cam_for_render, img=view1, img_size=imgs[index].shape[:2], do_alpha=False)\n view2 = renderer.rotated(\n vert_shifted, -60, cam=cam_for_render, img=view2, img_size=imgs[index].shape[:2], do_alpha=False)\n\n import matplotlib.pyplot as plt\n # plt.ion()\n plt.figure(1)\n plt.clf()\n plt.subplot(231)\n plt.imshow(imgs[0])\n plt.title('input')\n plt.axis('off')\n plt.subplot(232)\n plt.imshow(skele)\n plt.title('joint projection')\n plt.axis('off')\n plt.subplot(233)\n plt.imshow(overlay)\n plt.title('3D Mesh overlay')\n plt.axis('off')\n plt.subplot(234)\n plt.imshow(mesh)\n plt.title('3D mesh')\n plt.axis('off')\n plt.subplot(235)\n plt.imshow(view1)\n plt.title('diff vp')\n plt.axis('off')\n plt.subplot(236)\n plt.imshow(view2)\n plt.title('diff vp')\n plt.axis('off')\n plt.draw()\n plt.show()\n # import ipdb\n # ipdb.set_trace()", "title": "" }, { "docid": "a717cdb791b040b1d9c871f48175e24e", "score": "0.55222905", "text": "def _construct_imag_csv(gen_path, subj_names, task_names, face_cascade = face_cascade):\n pic_subj = []\n pic_task = []\n pic_num = []\n result_arr = None\n for i, subj in enumerate(subj_names):\n \n for j, task in enumerate(task_names[i]):\n #print(gen_path+subj+\"\\\\\"+task+\"\\\\\"+\"*.jpg\")\n curr_jpgs = glob.glob(gen_path+subj+\"\\\\\"+task+\"\\\\\"+\"*.jpg\")\n valid_jpgs = []\n for img_file in curr_jpgs:\n img = Image.open(img_file)\n img = np.array(img)\n grayscale_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n detected_faces = face_cascade.detectMultiScale(grayscale_image)\n if len(detected_faces) > 0:\n valid_jpgs.append(img_file)\n #face_landmark = cv2.face.createFacemarkLBF()\n #face_landmark.loadModel('F:/lbfmodel.yaml.txt')\n\n pic_subj += [subj]*len(valid_jpgs)\n pic_task += [task]*len(valid_jpgs)\n pic_num += [i.split(\"\\\\\")[-1].split(\".\")[0] for i in valid_jpgs]\n \n temp_df1 = pd.DataFrame(list(zip(pic_subj, pic_task, pic_num)), \n columns =['Subject', 'Task', 'Number'])\n \n temp_AU1 = pd.read_csv(gen_path + subj+\"_\"+\"label\\\\\" + subj + \"_\" + task + \".csv\")\n temp_AU1.rename(columns={'1':'Number','1.1':'1'}, inplace=True)\n temp_AU1 = temp_AU1.astype({'Number':'str'})\n result = pd.merge(temp_df1, temp_AU1, how = 'inner', on = \"Number\")\n \n if result_arr is None:\n result_arr = result\n else:\n result_arr = pd.concat([result_arr, result], axis=0)\n\n pic_subj = [item for sublist in pic_subj for item in sublist]\n pic_task = [item for sublist in pic_task for item in sublist]\n pic_num = [item for sublist in pic_num for item in sublist]\n \n return(result_arr)", "title": "" }, { "docid": "1845fa88cda31e78b9af8d2c02102397", "score": "0.55153793", "text": "def detect(self, img):\n\n if isinstance(img, str):\n if os.path.exists(img):\n img_name = os.path.basename(img)\n img = Image.open(img)\n else:\n raise FileNotFoundError(\"2\",img)\n elif isinstance(img, np.ndarray):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = Image.fromarray(img)\n elif isinstance(img, Image.Image):\n pass\n\n w, h = img.size\n min_length = min(h, w)\n min_detection_size = 12\n scale_factor = 0.709 #not sure why its .709\n scales = []\n m = min_detection_size/self.min_face_size\n min_length *= m\n factor_count = 0\n \n while min_length > min_detection_size:\n scales += [m * np.power(scale_factor,factor_count)]\n min_length *= scale_factor\n factor_count += 1\n\n ################## Stage 1 #############################\n\n bounding_boxes = []\n\n for s in scales:\n boxes = first_stage(img, s, self.pnet, self.nms_thresh[0])\n bounding_boxes.append(boxes) \n #bounding_boxes has shape [n_scales, n_boxes, 9]\n \n #remove those scales for which bounding boxes were none\n bounding_boxes = [i for i in bounding_boxes if i is not None]\n\n #Add all the boxes for each scale \n if len(bounding_boxes)==0:\n return bounding_boxes\n \n bounding_boxes = np.vstack(bounding_boxes) # returns array of shape [n_boxes, 9]\n\n \n #------------------------- Stage 2 -------------------------------------\n \n img_box = get_image_boxes(bounding_boxes, img, size=24) \n img_box = torch.tensor(img_box, dtype=torch.float32, requires_grad=False)\n\n probs, boxes = self.rnet(img_box)\n\n probs = probs.data.numpy() #Shape [boxes, 2]\n boxes = boxes.data.numpy() #Shape [boxes, 4]\n \n ind = np.where(probs[:, 1] >= self.conf_thresh[1])[0]\n\n bounding_boxes = bounding_boxes[ind]\n bounding_boxes[:, 4] = probs[ind, 1].reshape((-1,))\n boxes = boxes[ind]\n \n keep = nms(bounding_boxes, self.nms_thresh[1], mode=\"union\")\n bounding_boxes = bounding_boxes[keep]\n boxes = boxes[keep]\n \n bounding_boxes = calibrate_boxes(bounding_boxes, boxes)\n bounding_boxes = convert_to_square(bounding_boxes)\n bounding_boxes[:, 0:4] = np.round(bounding_boxes[:, 0:4])\n \n #--------------------STAGE 3-------------------------------------------------\n\n img_box = get_image_boxes(bounding_boxes, img, size=48)\n \n if len(img_box) == 0:\n return [], []\n \n img_box = torch.tensor(img_box, dtype=torch.float32, requires_grad=False)\n probs, boxes, landmarks = self.onet(img_box)\n\n probs = probs.data.numpy()\n boxes = boxes.data.numpy()\n landmarks = landmarks.data.numpy()\n\n\n keep = np.where(probs[:,1] > self.conf_thresh[2])[0]\n\n bounding_boxes = bounding_boxes[keep]\n bounding_boxes[:, 4] = probs[keep, 1].reshape((-1,))\n boxes = boxes[keep]\n landmarks = landmarks[keep]\n \n bounding_boxes = calibrate_boxes(bounding_boxes, boxes)\n \n\n keep = nms(bounding_boxes, overlap_thresh=self.nms_thresh[2], mode=\"min\")\n bounding_boxes = bounding_boxes[keep]\n bounding_boxes = convert_to_square(bounding_boxes)\n\n\n return bounding_boxes", "title": "" }, { "docid": "907dd8dff3e3bc008b4313b92dd62f7c", "score": "0.5506991", "text": "def to_automorphism(self):\n if not self.is_invertible():\n raise ValueError(\"the morphism is not invertible\")\n return FreeGroupAutomorphism(dict(\n (a, self.image(a))\n for a in self.domain().alphabet().positive_letters()))\n #,domain=self.domain())", "title": "" }, { "docid": "6d6ee6edb005fd9e5ad946c6eae7607b", "score": "0.55000144", "text": "def extract_faces(\n img,\n target_size=(224, 224),\n detector_backend=\"opencv\",\n grayscale=False,\n enforce_detection=True,\n align=True,\n):\n\n # this is going to store a list of img itself (numpy), it region and confidence\n extracted_faces = []\n\n # img might be path, base64 or numpy array. Convert it to numpy whatever it is.\n img = load_image(img)\n img_region = [0, 0, img.shape[1], img.shape[0]]\n\n if detector_backend == \"skip\":\n face_objs = [(img, img_region, 0)]\n else:\n face_detector = FaceDetector.build_model(detector_backend)\n face_objs = FaceDetector.detect_faces(face_detector, detector_backend, img, align)\n\n # in case of no face found\n if len(face_objs) == 0 and enforce_detection is True:\n raise ValueError(\n \"Face could not be detected. Please confirm that the picture is a face photo \"\n + \"or consider to set enforce_detection param to False.\"\n )\n\n if len(face_objs) == 0 and enforce_detection is False:\n face_objs = [(img, img_region, 0)]\n\n for current_img, current_region, confidence in face_objs:\n if current_img.shape[0] > 0 and current_img.shape[1] > 0:\n if grayscale is True:\n current_img = cv2.cvtColor(current_img, cv2.COLOR_BGR2GRAY)\n\n # resize and padding\n if current_img.shape[0] > 0 and current_img.shape[1] > 0:\n factor_0 = target_size[0] / current_img.shape[0]\n factor_1 = target_size[1] / current_img.shape[1]\n factor = min(factor_0, factor_1)\n\n dsize = (\n int(current_img.shape[1] * factor),\n int(current_img.shape[0] * factor),\n )\n current_img = cv2.resize(current_img, dsize)\n\n diff_0 = target_size[0] - current_img.shape[0]\n diff_1 = target_size[1] - current_img.shape[1]\n if grayscale is False:\n # Put the base image in the middle of the padded image\n current_img = np.pad(\n current_img,\n (\n (diff_0 // 2, diff_0 - diff_0 // 2),\n (diff_1 // 2, diff_1 - diff_1 // 2),\n (0, 0),\n ),\n \"constant\",\n )\n else:\n current_img = np.pad(\n current_img,\n (\n (diff_0 // 2, diff_0 - diff_0 // 2),\n (diff_1 // 2, diff_1 - diff_1 // 2),\n ),\n \"constant\",\n )\n\n # double check: if target image is not still the same size with target.\n if current_img.shape[0:2] != target_size:\n current_img = cv2.resize(current_img, target_size)\n\n # normalizing the image pixels\n # what this line doing? must?\n img_pixels = image.img_to_array(current_img)\n img_pixels = np.expand_dims(img_pixels, axis=0)\n img_pixels /= 255 # normalize input in [0, 1]\n\n # int cast is for the exception - object of type 'float32' is not JSON serializable\n region_obj = {\n \"x\": int(current_region[0]),\n \"y\": int(current_region[1]),\n \"w\": int(current_region[2]),\n \"h\": int(current_region[3]),\n }\n\n extracted_face = [img_pixels, region_obj, confidence]\n extracted_faces.append(extracted_face)\n\n if len(extracted_faces) == 0 and enforce_detection == True:\n raise ValueError(\n f\"Detected face shape is {img.shape}. Consider to set enforce_detection arg to False.\"\n )\n\n return extracted_faces", "title": "" }, { "docid": "3d6f8f6bab2f49cfbea9d98d77933000", "score": "0.54980004", "text": "def preprocess(self, image):\n return image", "title": "" }, { "docid": "fc33ca0a15db2805b093fae69e6d0473", "score": "0.549515", "text": "def synthesize(\n\t\tdataloader,\n\t\tmodel, base_path_affinity, base_path_character, base_path_bbox, base_path_char, base_path_aff, base_path_json):\n\n\twith torch.no_grad():\n\n\t\tmodel.eval()\n\t\titerator = tqdm(dataloader)\n\n\t\tfor no, (image, image_name, original_dim) in enumerate(iterator):\n\n\t\t\tif config.use_cuda:\n\t\t\t\timage = image.cuda()\n\n\t\t\toutput = model(image)\n\n\t\t\tif type(output) == list:\n\n\t\t\t\t# If using custom DataParallelModel this is necessary to convert the list to tensor\n\t\t\t\toutput = torch.cat(output, dim=0)\n\n\t\t\toutput = output.data.cpu().numpy()\n\t\t\toutput[output < 0] = 0\n\t\t\toutput[output > 1] = 1\n\t\t\toriginal_dim = original_dim.cpu().numpy()\n\n\t\t\tfor i in range(output.shape[0]):\n\n\t\t\t\t# --------- Resizing it back to the original image size and saving it ----------- #\n\n\t\t\t\timage_i = denormalize_mean_variance(image[i].data.cpu().numpy().transpose(1, 2, 0))\n\n\t\t\t\tmax_dim = original_dim[i].max()\n\t\t\t\tresizing_factor = 768/max_dim\n\t\t\t\tbefore_pad_dim = [int(original_dim[i][0]*resizing_factor), int(original_dim[i][1]*resizing_factor)]\n\n\t\t\t\toutput[i, :, :, :] = np.uint8(output[i, :, :, :]*255)\n\n\t\t\t\theight_pad = (768 - before_pad_dim[0])//2\n\t\t\t\twidth_pad = (768 - before_pad_dim[1])//2\n\n\t\t\t\timage_i = cv2.resize(\n\t\t\t\t\timage_i[height_pad:height_pad + before_pad_dim[0], width_pad:width_pad + before_pad_dim[1]],\n\t\t\t\t\t(original_dim[i][1], original_dim[i][0])\n\t\t\t\t)\n\n\t\t\t\tcharacter_bbox = cv2.resize(\n\t\t\t\t\toutput[i, 0, height_pad:height_pad + before_pad_dim[0], width_pad:width_pad + before_pad_dim[1]],\n\t\t\t\t\t(original_dim[i][1], original_dim[i][0])\n\t\t\t\t)/255\n\n\t\t\t\taffinity_bbox = cv2.resize(\n\t\t\t\t\toutput[i, 1, height_pad:height_pad + before_pad_dim[0], width_pad:width_pad + before_pad_dim[1]],\n\t\t\t\t\t(original_dim[i][1], original_dim[i][0])\n\t\t\t\t)/255\n\n\t\t\t\tpredicted_bbox = generate_word_bbox(\n\t\t\t\t\tcharacter_bbox,\n\t\t\t\t\taffinity_bbox,\n\t\t\t\t\tcharacter_threshold=config.threshold_character,\n\t\t\t\t\taffinity_threshold=config.threshold_affinity,\n\t\t\t\t\tword_threshold=config.threshold_word,\n\t\t\t\t\tcharacter_threshold_upper=config.threshold_character_upper,\n\t\t\t\t\taffinity_threshold_upper=config.threshold_affinity_upper,\n\t\t\t\t\tscaling_character=config.scale_character,\n\t\t\t\t\tscaling_affinity=config.scale_affinity\n\t\t\t\t)\n\n\t\t\t\tword_bbox = predicted_bbox['word_bbox']\n\t\t\t\tchar_bbox = np.concatenate(predicted_bbox['characters'], axis=0)\n\t\t\t\taff_bbox = np.concatenate(predicted_bbox['affinity'], axis=0)\n\n\t\t\t\tword_image = image_i.copy()\n\t\t\t\tchar_image = image_i.copy()\n\t\t\t\taff_image = image_i.copy()\n\n\t\t\t\tcv2.drawContours(word_image, word_bbox, -1, (0, 255, 0), 2)\n\t\t\t\tcv2.drawContours(char_image, char_bbox, -1, (0, 255, 0), 2)\n\t\t\t\tcv2.drawContours(aff_image, aff_bbox, -1, (0, 255, 0), 2)\n\n\t\t\t\tplt.imsave(\n\t\t\t\t\tbase_path_char + '/' + '.'.join(image_name[i].split('.')[:-1]) + '.png',\n\t\t\t\t\tchar_image)\n\n\t\t\t\tplt.imsave(\n\t\t\t\t\tbase_path_aff + '/' + '.'.join(image_name[i].split('.')[:-1]) + '.png',\n\t\t\t\t\taff_image)\n\n\t\t\t\tplt.imsave(\n\t\t\t\t\tbase_path_bbox + '/' + '.'.join(image_name[i].split('.')[:-1]) + '.png',\n\t\t\t\t\tword_image)\n\n\t\t\t\tplt.imsave(\n\t\t\t\t\tbase_path_character + '/' + '.'.join(image_name[i].split('.')[:-1]) + '.png',\n\t\t\t\t\tnp.float32(character_bbox > config.threshold_character),\n\t\t\t\t\tcmap='gray')\n\n\t\t\t\tplt.imsave(\n\t\t\t\t\tbase_path_affinity+'/'+'.'.join(image_name[i].split('.')[:-1])+'.png',\n\t\t\t\t\tnp.float32(affinity_bbox > config.threshold_affinity),\n\t\t\t\t\tcmap='gray')\n\n\t\t\t\tpredicted_bbox['word_bbox'] = predicted_bbox['word_bbox'].tolist()\n\t\t\t\tpredicted_bbox['characters'] = [_.tolist() for _ in predicted_bbox['characters']]\n\t\t\t\tpredicted_bbox['affinity'] = [_.tolist() for _ in predicted_bbox['affinity']]\n\n\t\t\t\twith open(base_path_json + '/' + '.'.join(image_name[i].split('.')[:-1])+'.json', 'w') as f:\n\t\t\t\t\tjson.dump(predicted_bbox, f)", "title": "" }, { "docid": "d662f9c70959c08c2d27213c6385cec8", "score": "0.5494238", "text": "def preprocess(self, data):\n input_ids = []\n input_mask = []\n segment_ids = []\n token_boxes = []\n ocr_boxes = []\n for row in data:\n val = row.get(\"recognition\") or row.get(\"body\")\n predictions = eval(val.decode())\n df = pd.DataFrame(predictions)\n bboxes = df['bbox'].tolist()\n boxes = np.array(bboxes).astype('float32')\n boxes = [[int(1000 * box[0][0]),int(1000 * box[0][1]),int(1000 * box[2][0]),int(1000 * box[2][1])] for box in bboxes] \n # boxes = []\n # for box in bboxes:\n # box = np.array(box).astype('float32')\n # boxes.append([int(1000 * box[0][0]),int(1000 * box[0][1]),int(1000 * box[2][0]),int(1000 * box[2][1]),])\n words = [w[0] for w in df['ocr'].tolist()] \n features = self.convert_example_to_features(words=words, boxes=boxes)\n input_ids.append(torch.tensor(features[0]))\n input_mask.append(torch.tensor(features[1]))\n segment_ids.append(torch.tensor(features[2]))\n token_boxes.append(torch.tensor(features[3]))\n ocr_boxes.append(features[4])\n return torch.stack(input_ids).to(self.device), torch.stack(input_mask).to(self.device), torch.stack(segment_ids).to(self.device), torch.stack(token_boxes).to(self.device), ocr_boxes", "title": "" }, { "docid": "91c270cb5f1c7b70490ed096c6120588", "score": "0.54923403", "text": "def find_faces_single_scale(img, stride, thresh, params, orientations, wrap180):\n windowsize = 36\n if stride > windowsize:\n stride = windowsize\n\n height, width = img.shape\n probmap = np.zeros([height, width])\n outimg = np.array(img)\n\n # Loop over windowsize x windowsize windows, advancing by stride\n hog_descriptor_size = 100 * orientations\n window_descriptor = np.zeros([hog_descriptor_size + 1, 1])\n\n # // slides down then across the image, by stride\n for i in range(0, width-windowsize, stride):\n for j in range(0, height-windowsize, stride):\n\n # Crop out a windowsize x windowsize window starting at (i,j)\n crop = img[i:i+windowsize,j:j+windowsize] \n\n # Compute a HoG descriptor, and run the classifier\n window_descriptor[0,0] = 1\n window_descriptor[1:,0] = hog36(crop, orientations, wrap180)\n # NEED TO TRAIN AND RUN CLASSIFIER ?? PROB --> FIT () ?? or since trained params good \n probability = logistic_prob(window_descriptor, params) #or need to do both fit + prob \n\n # Mark detection probability in probmap\n win_i = i + int((windowsize - stride) / 2)\n win_j = j + int((windowsize - stride) / 2)\n probmap[win_i:win_i+stride, win_j:win_j+stride] = probability\n\n # If probability of a face is below thresh, continue \n # else mark the face on img \n if probability < thresh:\n continue\n \n #print(\"got here\")\n # Mark the face in outimg\n outimg[i, j:j+windowsize] = 255\n outimg[i+windowsize-1, j:j+windowsize] = 255\n outimg[i:i+windowsize, j] = 255\n outimg[i:i+windowsize, j+windowsize-1] = 255\n\n return outimg, probmap", "title": "" }, { "docid": "52ede030495a13719096f9f0e8c9b1bb", "score": "0.54904467", "text": "def data_augment(self, images, labels):\n \n \n augmented_images = []\n augmented_labels = []\n \n for idx, img in enumerate(images):\n \n ##Randomly Adjust Brightness of images \n #\n new_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n brightness_level = (0.2 * np.random.uniform()) + 0.4\n new_img[:,:,2] = new_img[:,:,2] * brightness_level\n new_img = cv2.cvtColor(new_img, cv2.COLOR_HSV2RGB)\n \n # Randomly shift the image virtially and horizontally \n x_shift = 100 * (np.random.rand() - 0.6)\n y_shift = 20 * (np.random.rand() - 0.4)\n new_steer_angle = labels[idx] + x_shift * 0.002\n transition_matrix = np.float32([[1, 0, x_shift],[0, 1, y_shift]])\n height, width = new_img.shape[:2]\n new_img = cv2.warpAffine(new_img, transition_matrix, (width, height))\n \n augmented_images.append(new_img)\n augmented_labels.append(new_steer_angle) \n \n \n #Randomly select images and Flip them and append to main Set\n num_imgs = len(augmented_images)\n random_flip_idx = random.sample(range(num_imgs), num_imgs//2) \n for idx in random_flip_idx:\n new_img = np.fliplr(augmented_images[idx]) \n new_steer_angle = -augmented_labels[idx]\n augmented_images.append(new_img)\n augmented_labels.append(new_steer_angle) \n \n \n images = np.array(augmented_images) \n labels = np.array(augmented_labels) \n \n return images, labels", "title": "" }, { "docid": "1b03cebbce7feb622dcff5b0dd876ecd", "score": "0.54875845", "text": "def getFaceTexture(self, normal):", "title": "" }, { "docid": "91129a176efa928bbefe010e2b7d22db", "score": "0.54741424", "text": "def _preprocess(self):\n self.logger.info(\n \"Note: if root path is changed, the previously generated json files need to be re-generated (delete them first)\")\n if self.imgs_labeled_dir.exists() and \\\n self.imgs_detected_dir.exists() and \\\n self.split_classic_det_json_path.exists() and \\\n self.split_classic_lab_json_path.exists() and \\\n self.split_new_det_json_path.exists() and \\\n self.split_new_lab_json_path.exists():\n return\n\n check_path(self.imgs_detected_dir, create=True)\n check_path(self.imgs_labeled_dir, create=True)\n\n self.logger.info(\"Extract image data from {} and save as png\".format(self.raw_mat_path))\n mat = h5py.File(self.raw_mat_path, 'r')\n\n def _deref(ref):\n return mat[ref][:].T\n\n def _process_images(img_refs, campid, pid, save_dir):\n img_paths = [] # Note: some persons only have images for one view\n for imgid, img_ref in enumerate(img_refs):\n img = _deref(img_ref)\n # skip empty cell\n if img.size == 0 or img.ndim < 3: continue\n img = Image.fromarray(img, mode='RGB')\n\n # images are saved with the following format, index-1 (ensure uniqueness)\n # campid: index of camera pair (1-5)\n # pid: index of person in 'campid'-th camera pair\n # viewid: index of view, {1, 2}\n # imgid: index of image, (1-10)\n viewid = 1 if imgid < 5 else 2\n img_name = '{:01d}_{:03d}_{:01d}_{:02d}.png'.format(campid + 1, pid + 1, viewid, imgid + 1)\n img_path = osp.join(save_dir, img_name)\n img.save(img_path)\n img_paths.append(img_path)\n return img_paths\n\n def _extract_img(name):\n self.logger.info(\"Processing {} images (extract and save) ...\".format(name))\n meta_data = []\n imgs_dir = self.imgs_detected_dir if name == 'detected' else self.imgs_labeled_dir\n for campid, camp_ref in enumerate(mat[name][0]):\n camp = _deref(camp_ref)\n num_pids = camp.shape[0]\n for pid in range(num_pids):\n img_paths = _process_images(camp[pid, :], campid, pid, imgs_dir)\n assert len(img_paths) > 0, \"campid{}-pid{} has no images\".format(campid, pid)\n meta_data.append((campid + 1, pid + 1, img_paths))\n self.logger.info(\"done camera pair {} with {} identities\".format(campid + 1, num_pids))\n return meta_data\n\n meta_detected = _extract_img('detected')\n meta_labeled = _extract_img('labeled')\n\n def _extract_classic_split(meta_data, test_split):\n train, test = [], []\n num_train_pids, num_test_pids = 0, 0\n num_train_imgs, num_test_imgs = 0, 0\n for i, (campid, pid, img_paths) in enumerate(meta_data):\n\n if [campid, pid] in test_split:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n test.append((img_path, num_test_pids, camid))\n num_test_pids += 1\n num_test_imgs += len(img_paths)\n else:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n train.append((img_path, num_train_pids, camid))\n num_train_pids += 1\n num_train_imgs += len(img_paths)\n return train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs\n\n self.logger.info(\"Creating classic splits (# = 20) ...\")\n splits_classic_det, splits_classic_lab = [], []\n for split_ref in mat['testsets'][0]:\n test_split = _deref(split_ref).tolist()\n\n # create split for detected images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_detected, test_split)\n splits_classic_det.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n\n # create split for labeled images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_labeled, test_split)\n splits_classic_lab.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n\n DataPacker.dump(splits_classic_det, self.split_classic_det_json_path, self.logger)\n DataPacker.dump(splits_classic_lab, self.split_classic_lab_json_path, self.logger)\n mat.close()\n\n def _extract_set(filelist, pids, pid2label, idxs, img_dir, relabel):\n tmp_set = []\n unique_pids = set()\n for idx in idxs:\n img_name = filelist[idx][0]\n camid = int(img_name.split('_')[2])\n pid = pids[idx]\n if relabel: pid = pid2label[pid]\n img_path = osp.join(img_dir, img_name)\n tmp_set.append((img_path, int(pid), camid))\n unique_pids.add(pid)\n return tmp_set, len(unique_pids), len(idxs)\n\n def _extract_new_split(split_dict, img_dir):\n train_idxs = split_dict['train_idx'].flatten() - 1 # index-0\n pids = split_dict['labels'].flatten()\n train_pids = set(pids[train_idxs])\n pid2label = {pid: label for label, pid in enumerate(train_pids)}\n query_idxs = split_dict['query_idx'].flatten() - 1\n gallery_idxs = split_dict['gallery_idx'].flatten() - 1\n filelist = split_dict['filelist'].flatten()\n train_info = _extract_set(filelist, pids, pid2label, train_idxs, img_dir, relabel=True)\n query_info = _extract_set(filelist, pids, pid2label, query_idxs, img_dir, relabel=False)\n gallery_info = _extract_set(filelist, pids, pid2label, gallery_idxs, img_dir, relabel=False)\n return train_info, query_info, gallery_info\n\n self.logger.info(\"Creating new splits for detected images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_det_mat_path),\n self.imgs_detected_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n DataPacker.dump(splits, self.split_new_det_json_path)\n\n self.logger.info(\"Creating new splits for labeled images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_lab_mat_path),\n self.imgs_labeled_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n DataPacker.dump(splits, self.split_new_lab_json_path)", "title": "" }, { "docid": "c22fff9dc5882be229b49d536e5f0c00", "score": "0.546464", "text": "def isolate_faces(image: np.array, target_size=(224, 224)) -> List[np.array]:\n face_cascade = cv2.CascadeClassifier(PATH_TO_FACEDETECTOR_XML)\n # resize image to max of 800 pixels of width/height for better face detection\n image = reshape_to_max_dimensions(image, max_one_dimension_pixel=800)\n\n image_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n faces = face_cascade.detectMultiScale(image_gray, 1.3, 5)\n resized_faces = []\n for (x, y, w, h) in faces:\n face_color = image[\n max(0, y - int(h * FACE_EXTRA_TOP_PERC))\n :min(y + int((1 + FACE_EXTRA_TOP_PERC) * h), image.shape[0]),\n x - int(w * FACE_EXTRA_SIDE_PERC):x + w + int(w * FACE_EXTRA_SIDE_PERC)\n ]\n if target_size is not None:\n face_color = resize_keep_aspect_ratio(face_color, target_size)\n resized_faces.append(face_color)\n return resized_faces", "title": "" }, { "docid": "da1e55e313767fdbceb5a201f3f636ab", "score": "0.54572344", "text": "def extract_and_adjust_faces(self, image):\n\n pass", "title": "" }, { "docid": "ea12107fe87fa0e87212c6d61c063c2b", "score": "0.54557294", "text": "def caption_image_beam_search(encoder, decoder, image_path, word_map, beam_size=3):\n\n k = beam_size\n vocab_size = len(word_map)\n\n # Read image and process\n img = imread(image_path)\n if len(img.shape) == 2:\n img = img[:, :, np.newaxis]\n img = np.concatenate([img, img, img], axis=2)\n img = imresize(img, (256, 256))\n img = img.transpose(2, 0, 1)\n img = img / 255.\n img = torch.FloatTensor(img).to(device)\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n transform = transforms.Compose([normalize])\n image = transform(img) # (3, 256, 256)\n\n # Encode\n image = image.unsqueeze(0) # (1, 3, 256, 256)\n\n\n\n encoder_out = encoder(image) # (1, enc_image_size, enc_image_size, encoder_dim)\n enc_image_size = encoder_out.size(1)\n encoder_dim = encoder_out.size(3)\n\n # Flatten encoding\n encoder_out = encoder_out.view(1, -1, encoder_dim) # (1, num_pixels, encoder_dim)\n num_pixels = encoder_out.size(1)\n\n # We'll treat the problem as having a batch size of k\n encoder_out = encoder_out.expand(k, num_pixels, encoder_dim) # (k, num_pixels, encoder_dim)\n\n # Tensor to store top k previous words at each step; now they're just <start>\n k_prev_words = torch.LongTensor([[word_map['<start>']]] * k).to(device) # (k, 1)\n\n # Tensor to store top k sequences; now they're just <start>\n seqs = k_prev_words # (k, 1)\n\n # Tensor to store top k sequences' scores; now they're just 0\n top_k_scores = torch.zeros(k, 1).to(device) # (k, 1)\n\n # Tensor to store top k sequences' alphas; now they're just 1s\n seqs_alpha = torch.ones(k, 1, enc_image_size, enc_image_size).to(device) # (k, 1, enc_image_size, enc_image_size)\n\n # Lists to store completed sequences, their alphas and scores\n complete_seqs = list()\n complete_seqs_alpha = list()\n complete_seqs_scores = list()\n\n # Start decoding\n step = 1\n h, c = decoder.init_hidden_state(encoder_out)\n\n # s is a number less than or equal to k, because sequences are removed from this process once they hit <end>\n while True:\n\n embeddings = decoder.embedding(k_prev_words).squeeze(1) # ( s, embed_dim)\n\n awe, alpha = decoder.attention(encoder_out, h) # (s, encoder_dim), (s, num_pixels)\n\n alpha = alpha.view(-1, enc_image_size, enc_image_size) # (s, enc_image_size, enc_image_size)\n\n gate = decoder.sigmoid(decoder.f_beta(h)) # gating scalar, (s, encoder_dim)\n\n awe = gate * awe\n h, c = decoder.decode_step(torch.cat([embeddings, awe], dim=1), (h, c)) # (s, decoder_dim)\n\n scores = decoder.fc(h) # (s, vocab_size)\n scores = F.log_softmax(scores, dim=1)\n\n # Add\n scores = top_k_scores.expand_as(scores) + scores # (s, vocab_size)\n\n # For the first step, all k points will have the same scores (since same k previous words, h, c)\n if step == 1:\n top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)\n else:\n # Unroll and find top scores, and their unrolled indices\n top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s)\n\n # Convert unrolled indices to actual indices of scores\n prev_word_inds = top_k_words / vocab_size # (s)\n next_word_inds = top_k_words % vocab_size # (s)\n\n # Add new words to sequences, alphas\n seqs = torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)\n seqs_alpha = torch.cat([seqs_alpha[prev_word_inds], alpha[prev_word_inds].unsqueeze(1)],\n dim=1) # (s, step+1, enc_image_size, enc_image_size)\n\n # Which sequences are incomplete (didn't reach <end>)?\n incomplete_inds = [ind for ind, next_word in enumerate(next_word_inds) if\n next_word != word_map['<end>']]\n complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))\n\n # Set aside complete sequences\n if len(complete_inds) > 0:\n complete_seqs.extend(seqs[complete_inds].tolist())\n complete_seqs_alpha.extend(seqs_alpha[complete_inds].tolist())\n complete_seqs_scores.extend(top_k_scores[complete_inds])\n k -= len(complete_inds) # reduce beam length accordingly\n\n # Proceed with incomplete sequences\n if k == 0:\n break\n seqs = seqs[incomplete_inds]\n seqs_alpha = seqs_alpha[incomplete_inds]\n h = h[prev_word_inds[incomplete_inds]]\n c = c[prev_word_inds[incomplete_inds]]\n encoder_out = encoder_out[prev_word_inds[incomplete_inds]]\n top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)\n k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)\n\n # Break if things have been going on too long\n if step > 50:\n break\n step += 1\n\n i = complete_seqs_scores.index(max(complete_seqs_scores))\n seq = complete_seqs[i]\n alphas = complete_seqs_alpha[i]\n\n return seq, alphas", "title": "" }, { "docid": "ea8069cb993d49d662d4297589afc593", "score": "0.5455491", "text": "def forward(self, navigable_img_feat, navigable_obj_feat, navigable_obj_img_feat, object_mask, pre_feat, h_0, c_0, ctx, \n s_0, r_t, navigable_index, ctx_mask, step, landmark_similarity):\n # input of image_feature should be changed\n \n \n batch_size, num_heading, num_object, object_feat_dim = navigable_obj_feat.size()\n\n # object text feature\n navigable_obj_feat = navigable_obj_feat.view(batch_size, num_heading*num_object, object_feat_dim) #4 x 16*36 x 300 \n \n # object image feature\n navigable_obj_img_feat = navigable_obj_img_feat.view(batch_size, num_heading*num_object, 152) # 4 x 48*36 x 152\n index_length = [len(_index)+1 for _index in navigable_index]\n \n navigable_mask = create_mask(batch_size, int(num_heading/3), index_length)\n \n # not add similarity\n proj_navigable_obj_feat = proj_masking(navigable_obj_img_feat, self.proj_navigable_obj_mlp1, object_mask.view(batch_size, num_heading*num_object)) # batch x 48*36 x 152 -> batch x 48*36 x 128\n #proj_navigable_feat = proj_masking(navigable_img_feat, self.proj_navigable_img_mlp, navigable_mask.repeat(1,3))\n \n \n # add similarity with two methods\n proj_navigable_feat = proj_masking(torch.cat([navigable_img_feat, torch.sort(landmark_similarity, dim=-1)[0]],2), self.proj_navigable_img_mlp2, navigable_mask.repeat(1,3)) # batch x 48 x 128\n #proj_navigable_feat = proj_masking(torch.cat([navigable_img_feat, landmark_similarity],2), self.proj_navigable_img_mlp, navigable_mask.repeat(1,3))\n # landmark_similarity: 4 x 48 x 36\n # navigable_img_feat: 4 x 48 x 2176 \n \n #proj_pre_feat = self.proj_navigable_img_mlp(pre_feat)\n\n weighted_img_feat, img_attn = self.soft_attn(self.h0_fc(h_0), proj_navigable_feat, mask=navigable_mask.repeat(1,3))\n\n if r_t is None:\n r_t = self.r_linear(torch.cat((weighted_img_feat, h_0), dim=1))\n r_t = self.sm(r_t)\n \n \n weighted_ctx, ctx_attn = self.state_attention(s_0, r_t, ctx, ctx_mask, step)\n\n conf_obj_feat, conf_obj_attn = self.config_obj_attention(self.config_fc(weighted_ctx), proj_navigable_obj_feat, navigable_mask, object_mask) # 4 x 16 x 128\n weighted_conf_obj_feat, conf_obj_attn = self.soft_attn(self.h0_fc(h_0), conf_obj_feat, mask=navigable_mask.repeat(1,3)) # 4 x 128\n\n new_weighted_img_feat = torch.bmm(conf_obj_attn.unsqueeze(dim=1), self.image_linear(navigable_img_feat)).squeeze(dim=1)# batch x 128\n \n #concat_input = torch.cat((proj_pre_feat, new_weighted_img_feat, weighted_ctx), 1)\n concat_input = torch.cat((new_weighted_img_feat, weighted_ctx), 1)\n\n h_1, c_1 = self.lstm(concat_input, (h_0, c_0))\n h_1_drop = self.dropout(h_1)\n\n # policy network\n h_tilde = self.logit_fc(torch.cat((weighted_ctx, h_1_drop), dim=1))\n logit = torch.bmm(proj_navigable_feat, h_tilde.unsqueeze(2)).squeeze(2)\n logit = logit[:,0:int(num_heading/3)] + logit[:,int(num_heading/3):2*int(num_heading/3)] + logit[:,2*int(num_heading/3):num_heading]\n \n return h_1, c_1, ctx_attn, logit", "title": "" }, { "docid": "648f9a797f2e99e72d4fe7c078c675ac", "score": "0.5442549", "text": "def next_batch(self):\n # Start enqueuing and other preparation at the beginning of an epoch.\n cfg = Config()\n pose_root = osp.join(cfg.user_dir,'new_reid/tri_loss/dataset/Dataset/market1501/poses')\n if self.epoch_done and self.shuffle:\n #np.random.shuffle(self.ids)\n self.ids = list(self.ids)\n np.random.shuffle(self.ids)\n samples, self.epoch_done = self.prefetcher.next_batch()\n im_list, im_names, labels, mirrored = zip(*samples)\n # t = time.time()\n # Transform the list into a numpy array with shape [N, ...]\n ims = np.stack(np.concatenate(im_list))\n # print '---stacking time {:.4f}s'.format(time.time() - t)\n\n\n\n im_names = np.concatenate(im_names)\n labels = np.concatenate(labels)\n mirrored = np.concatenate(mirrored)\n\n batch_im_ids = [parse_im_name(name, 'id') for name in im_names]\n im_ids = [parse_im_name(name, 'id') for name in self.im_names]\n number_indx = 0\n batch_maps = []\n target_ims = []\n for batch_im_id in batch_im_ids:\n A = [indx for indx, i in enumerate(im_ids) if i == batch_im_id ]\n b = []\n\n for a in A:\n b.append(self.im_names[a])\n \n if im_names[number_indx] in b :\n b.remove(im_names[number_indx])\n old_pname = random.choice(b)\n t_ims= np.asarray(Image.open(osp.join(self.im_dir, old_pname))) \n\n\n pname = old_pname[0:9]+old_pname[11:12]+str(int(old_pname[12:13])-1)+'_'+old_pname[18:23]+'txt'\n ppath = osp.join(pose_root,pname)\n landmark = self._load_landmark(ppath, 256/128, 128/64)\n maps = self._generate_pose_map(landmark)\n t_ims,_ = self.pre_process_im(t_ims)\n target_ims.append(t_ims)\n batch_maps.append(maps)\n \n number_indx = number_indx+1\n batch_maps = np.array(batch_maps)\n target_ims = np.array(target_ims)\n #pdb.set_trace()\n return ims, im_names, labels, batch_maps, target_ims,mirrored, self.epoch_done", "title": "" }, { "docid": "0581cf1dfccad87492e47668b8a7d7c5", "score": "0.5440455", "text": "def show_faces_batch(sample_batched):\n faces_1_batch, faces_2_batch = sample_batched['face_1'], sample_batched['face_2']\n print (faces_1_batch.shape)\n print (faces_2_batch.shape)\n labels_batch=sample_batched['label']\n print(labels_batch.shape)\n batch_size = len(faces_1_batch)\n im_size = faces_1_batch.size(2)\n grid_1 = utils.make_grid(faces_1_batch)\n grid_2 = utils.make_grid(faces_2_batch)\n plt.imshow(grid_1.numpy().transpose((1, 2, 0)))\n plt.imshow(grid_2.numpy().transpose((1, 2, 0)))", "title": "" }, { "docid": "be66c4f380a1d87fe8359f882a5b907e", "score": "0.5439752", "text": "def group_run(subscription_key):\n\n face_base_url = \"https://{}.api.cognitive.microsoft.com\".format(\n FACE_LOCATION)\n face_client = FaceClient(\n endpoint=face_base_url, credentials=CognitiveServicesCredentials(subscription_key))\n image_url_prefix = \"https://csdx.blob.core.windows.net/resources/Face/Images/\"\n image_file_names = [\n \"Family1-Dad1.jpg\",\n \"Family1-Daughter1.jpg\",\n \"Family1-Mom1.jpg\",\n \"Family1-Son1.jpg\",\n \"Family2-Lady1.jpg\",\n \"Family2-Man1.jpg\",\n \"Family3-Lady1.jpg\",\n \"Family3-Man1.jpg\"\n ]\n\n faces = {}\n\n for image_file_name in image_file_names:\n # Detect faces from target image url.\n detected_faces = _detect_faces_helper(\n face_client=face_client, image_url=image_url_prefix + image_file_name)\n\n # Add detected face id to faces.\n if not detected_faces:\n print(\"No face detected in {}\".format(image_file_name))\n continue\n faces[detected_faces[0].face_id] = image_file_name\n\n # Call grouping, the grouping result is a group collection, each group contains similar faces.\n group_result = face_client.face.group(face_ids=list(faces.keys()))\n\n # Face groups containing faces that are similar.\n for i, group in enumerate(group_result.groups):\n print(\"Found face group {}: {}.\".format(\n i + 1,\n \" \".join([faces[face_id] for face_id in group])\n ))\n\n # Messy group contains all faces which are not similar to any other faces.\n if group_result.messy_group:\n print(\"Found messy face group: {}.\".format(\n \" \".join([faces[face_id] for face_id in group_result.messy_group])\n ))", "title": "" }, { "docid": "cc6ad35cc91b3eee100c3361d7df93d5", "score": "0.54369295", "text": "def generate_morphed_corpus(self):\n # Save the language data, use Morfessor CatMAP to morph it, then read in the morphed data\n # The input format of the data is one word per line, with the word frequency proceeding the word. Let's make\n # that map\n\n catmap_input_filename = self._language_dir + 'catmap_input.gz'\n\n self._prepare_words_for_catmap(catmap_input_filename)\n\n self._log.info('Segmenting words...')\n import subprocess\n returncode = subprocess.call('make --makefile=morfessor_catmap0.9.2/train/Makefile GZIPPEDINPUTDATA=%s BINDIR=morfessor_catmap0.9.2/bin' % catmap_input_filename, shell=True)\n if returncode != 0:\n self._log.fatal('Could not generate morphs')\n exit(returncode)\n\n self._log.info('Segmented words')\n word_counts = {}\n\n # Copy the segmentations to a better place, and save them internally\n with gzip.open('segmentation.final.gz', 'rt') as segmentations_file:\n with open(self._language_dir + 'morphemes.txt', 'w') as segmentations_output:\n segments = segmentations_file.read()\n segmentations_output.write(segments)\n\n for line in segments.split('\\n'):\n if len(line) > 0:\n if line[0] == '#':\n continue\n\n count, word, morphs = _extract_info(line)\n self.segmentations[word] = morphs\n word_counts[word] = count\n\n subprocess.call('./cleanup.sh', shell=True)\n self._log.info('Cleaned up intermediate data')\n\n with open(self._language_dir + 'corpus_morphemes.txt', 'w') as morpheme_corpus:\n wordcount = 0\n for line in self._language_data:\n for word in line:\n wordcount += 1\n morphemes = self.segmentations[word]\n morpheme_corpus.write('[')\n morpheme_corpus.write(' '.join(morphemes))\n morpheme_corpus.write('] ')\n\n self._all_morphs += morphemes\n\n morpheme_corpus.write('\\n')\n\n self._created_morphed_corpus = True\n self._log.info('Split corpus into morphemes')", "title": "" }, { "docid": "5338fc0f6af363fbf88343898a067ddf", "score": "0.5434353", "text": "def collate_fn(self, batch):\n\n images = list()\n boxes = list()\n labels = list()\n \n for b in batch:\n images.append(b['img'])\n boxes.append(b['box'])\n labels.append(b['label'])\n \n images = torch.stack(images, dim=0)\n\n return {\n 'imgs': images,\n 'boxes': boxes,\n 'labels': labels} # tensor (N, 3, 300, 300), 3 lists of N tensors each", "title": "" }, { "docid": "9426ad7b9c65d67f53a8126e5ed11695", "score": "0.54296035", "text": "def get_normal_generator(image_list, mask_list, batch_size=2, input_hw=(224, 224, 3), mask_hw=(224, 224, 20),\n preprocess=True, shuffle=True, augment_param=None):\n def norm(x):\n x = x / 127.5\n x -= 1\n return x\n\n assert len(image_list) == len(mask_list)\n\n batch_images = np.empty((batch_size, input_hw[0], input_hw[1], input_hw[2]))\n batch_masks = np.empty((batch_size, mask_hw[0], mask_hw[1], mask_hw[2]))\n batch_id = 0\n\n indexs = list(range(len(image_list)))\n while True:\n if shuffle:\n random.shuffle(indexs)\n\n for id in indexs:\n try:\n image = Image.open(image_list[id])\n image = image.resize(input_hw[0:2], Image.NEAREST)\n image_np = np.asarray(image, dtype=np.uint8)\n\n if preprocess:\n image_np = norm(image_np)\n\n if image_np.shape != input_hw: # try to reshape\n image_np = np.stack((image_np,) * input_hw[2], axis=-1)\n\n if image_np.shape != input_hw:\n raise Exception('Image size wrong')\n\n mask = Image.open(mask_list[id])\n mask = mask.resize(mask_hw[0:2], Image.NEAREST)\n mask_np = np.asarray(mask, dtype=np.uint8).copy()\n\n # mask_np[mask_np != 0] = 1\n if augment_param:\n image_np, mask_np = augment_img_mask(image_np, mask_np, augment_param)\n\n mask_np = to_categorical(mask_np, num_classes=mask_hw[2])\n\n batch_images[batch_id] = image_np\n batch_masks[batch_id] = mask_np\n\n batch_id += 1\n if batch_id == batch_size:\n batch_id = 0\n yield batch_images, batch_masks\n except FileNotFoundError:\n print('Image/mask not found, Ignore', image_list[id], mask_list[id])", "title": "" }, { "docid": "8b6c7b84080deabdd5fa04fb0bc4dd8b", "score": "0.54238516", "text": "def generate_embeddings(model, dataloader):\n\n embeddings = []\n filenames = []\n with torch.no_grad():\n for img, label, fnames in dataloader:\n img = img.to(model.device)\n emb = model.backbone(img).flatten(start_dim=1)\n embeddings.append(emb)\n filenames.extend(fnames)\n\n embeddings = torch.cat(embeddings, 0)\n embeddings = normalize(embeddings)\n return embeddings, filenames", "title": "" }, { "docid": "c4f7739feff3a1227bf3c68268939e7f", "score": "0.54226786", "text": "def collate_fn(batch):\n\n images = list()\n boxes = list()\n labels = list()\n difficulties = list()\n\n for b in batch:\n images.append(b[0])\n boxes.append(b[1])\n labels.append(b[2])\n difficulties.append(b[3])\n\n for i in range(len(boxes)):\n boxes[i] = boxes[i].squeeze(0)\n labels[i] = labels[i].squeeze(0)\n images = torch.stack(images, dim=0)\n\n return images, boxes, labels, difficulties # tensor (N, 3, 300, 300), 3 lists of N tensors each", "title": "" }, { "docid": "65ad9302e7e17c57dbc1177aed5481ff", "score": "0.542171", "text": "def forward(ctx,\n feat,\n aspect_ratios=[2, 3],\n image_height=300,\n image_width=300,\n step_height=300,\n step_width=300,\n max_sizes=[300],\n min_sizes=[285],\n offset=0.5,\n step_mmdetection=1):\n num_priors = len(aspect_ratios) * 2 + len(min_sizes) + \\\n len(max_sizes)\n return torch.rand(2, 4 * num_priors * feat.shape[-1] * feat.shape[-2])", "title": "" }, { "docid": "a77e88cd3e4ff6a3e24ebfec77c35834", "score": "0.5418132", "text": "def change_faces(save_path, faces_coordinates):\r\n for (x, y, w, h) in faces_coordinates:\r\n # Choose random template.\r\n template_img = cv2.imread(random.choice(constant.TROLLS), -1)\r\n\r\n # Resize template to width and height of face rectangle and save it.\r\n dim = (w, h)\r\n resized_temp = cv2.resize(template_img, dim,\r\n interpolation=cv2.INTER_AREA)\r\n\r\n # Save every resized template with unique name.\r\n resized_name = str(w) + str(h) + '_resized.png'\r\n cv2.imwrite(os.path.join(constant.PATH_1, resized_name), resized_temp)\r\n\r\n # Reading image and resized template.\r\n image = cv2.cvtColor(cv2.imread(save_path, 1), cv2.COLOR_RGB2RGBA)\r\n template = cv2.imread(os.path.join(constant.PATH_1, resized_name), -1)\r\n template = cv2.cvtColor(template, cv2.COLOR_RGB2RGBA)\r\n\r\n # Replacing faces with templates.\r\n for c in range(0, 3):\r\n image[y: y + template.shape[0], \r\n x: x + template.shape[1], \r\n c] = (template[:, :, c] * \r\n (template[:, :, 3] / 255.0) + \r\n image[y: y + template.shape[0], \r\n x: x + template.shape[1], c] * \r\n (1.0 - template[:, :, 3] / 255.0))\r\n\r\n # Saving photo with replaced templates.\r\n cv2.imwrite(os.path.join(constant.PATH, save_path[18:]), image)\r\n\r\n # Deleting \"rubbish\"\r\n os.remove(os.path.join(constant.PATH_1, resized_name))", "title": "" }, { "docid": "16cabdd4a83b7fc9e3f2f54991f26601", "score": "0.54038244", "text": "def _mosaic4(self, labels):\n mosaic_labels = []\n s = self.imgsz\n yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.border) # mosaic center x, y\n for i in range(4):\n labels_patch = labels if i == 0 else labels['mix_labels'][i - 1]\n # Load image\n img = labels_patch['img']\n h, w = labels_patch.pop('resized_shape')\n\n # Place img in img4\n if i == 0: # top left\n img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles\n x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)\n x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)\n elif i == 1: # top right\n x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc\n x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h\n elif i == 2: # bottom left\n x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)\n x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)\n elif i == 3: # bottom right\n x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)\n x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)\n\n img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]\n padw = x1a - x1b\n padh = y1a - y1b\n\n labels_patch = self._update_labels(labels_patch, padw, padh)\n mosaic_labels.append(labels_patch)\n final_labels = self._cat_labels(mosaic_labels)\n final_labels['img'] = img4\n return final_labels", "title": "" }, { "docid": "eb07e5aed8cc8aff119ed3f8c981f33c", "score": "0.54006463", "text": "def mtf_image_transformer_base_imagenet_mp():\n hparams = mtf_image_transformer_base_imagenet()\n hparams.mesh_shape = \"model:4;batch:8\"\n hparams.layout = \"batch:batch;d_ff:model;heads:model\"\n hparams.batch_size = 32\n hparams.num_heads = 8\n hparams.d_ff = 8192\n hparams.learning_rate_warmup_steps = 31250\n hparams.unconditional = True\n return hparams", "title": "" }, { "docid": "412a0d9feaf04c3439053723933fb6d9", "score": "0.5398387", "text": "def batch_mouth_detection(self, frames, detections, with_keypoints=True, img_dims= (128, 128)):\n resize_frames = []\n for frame in frames:\n if frame.shape[0] !=self.img_dims[0] or frame.shape[1] != self.img_dims[1]:\n frame = resize(frame, self.img_dims)\n resize_frames.append(frame)\n else:\n resize_frames.append(frame)\n # print(len(resize_frames))\n\n# frames = torch.from_numpy(np.array(resize_frames))\n\n# if isinstance(detections, torch.Tensor):\n# detections = detections.cpu().numpy()\n\n # if len(detections) == 2:\n # detections = np.expand_dims(detections, axis=1)\n\n # print(\"Found %d faces\" % detections.shape[0])\n i = 0 # first face detection\n k = 2 # nose keypoint\n # for i in range(detections.shape[0]): #for all faces\n # print(len(detections))\n # print('########')\n # print(kp_y, kp_x)\n mouth_regions = []\n for index, img in enumerate(frames):\n if len(detections[index]) > 0:\n try:\n ymin = detections[index][i, 0] * img_dims[0]\n xmin = detections[index][i, 1] * img_dims[1]\n ymax = detections[index][i, 2] * img_dims[0]\n xmax = detections[index][i, 3] * img_dims[1]\n\n # print(xmin, xmax, ymin, ymax)\n # for k in range(2,3): #for all keypoints\n kp_x = detections[index][i, 4 + k*2 ] * img_dims[1]\n kp_y = detections[index][i, 4 + k*2 + 1] * img_dims[0]\n\n mouth_region = img[int(kp_y):int(ymax), int(xmin):int(xmax)]\n mouth_regions.append(resize(mouth_region.cpu().numpy(), self.mouth_region_size))\n except IndexError:\n flag = True\n break\n else:\n flag = True\n\n if len(frames) == len(mouth_regions):\n flag = False \n else: \n flag = True\n # print(len(mouth_region_size))\n return np.array(mouth_regions), flag", "title": "" }, { "docid": "1bb1a7978489364ce239e7107a264502", "score": "0.5397252", "text": "def apply_augmentation (image):\n #select a random augmentation to apply\n number = random.randint(1,4)\n \n if number == 1:\n image= rotate_images(image, scale =1.0, height=380, width = 380)\n \n if number == 2:\n image= flip(image)\n \n if number ==3:\n image= translation(image)\n \n return image", "title": "" }, { "docid": "404f6810356e6aa8e560bf31d794631c", "score": "0.53971875", "text": "def draw(image, face):\n if face.predicted_landmarks:\n landmarks = face.predicted_landmarks\n # features\n for i in range(1, 68):\n cv2.circle(image, (landmarks.part(i).x, landmarks.part(i).y), 1, (0,0,255), thickness=2)\n # center of mass\n cv2.circle(image, (int(face.xcenter), int(face.ycenter)), 1, (255, 0, 0), thickness = 3)\n \n return image", "title": "" }, { "docid": "44656734255a9720cab43e7fc7c3d0c2", "score": "0.5393925", "text": "def get_batches_fn(batch_size):\n image_paths = glob(os.path.join(data_folder, 'merge', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)):path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imread(image_file)\n # image = cv.imread(image_file)\n # print(\"temp.shape\", image.shape)\n gt_image = scipy.misc.imread(gt_image_file)\n # gt_image = cv.imread(gt_image_file)\n\n image2, gt_image2 = crop_image(image, gt_image)\n image3, gt_image3 = flip_image(image, gt_image)\n\n image = scipy.misc.imresize(image, image_shape)\n # image = cv.resize(image, image_shape)\n gt_image = scipy.misc.imresize(gt_image, image_shape)\n # gt_image = cv.resize(gt_image, image_shape)\n\n image2 = scipy.misc.imresize(image2, image_shape)\n # image2 = cv.resize(image2, image_shape)\n gt_image2 = scipy.misc.imresize(gt_image2, image_shape)\n # gt_image2 = cv.resize(gt_image2, image_shape)\n\n image3 = scipy.misc.imresize(image3, image_shape)\n # image3 = cv.resize(image3, image_shape)\n gt_image3 = scipy.misc.imresize(gt_image3, image_shape)\n # gt_image3 = cv.resize(gt_image3, image_shape)\n\n contrast = random.uniform(0.85, 1.15) # Contrast augmentation\n bright = random.randint(-45, 30) # Brightness augmentation\n image = bc_img(image, contrast, bright)\n\n gt_image = process_gt_image(gt_image)\n gt_image2 = process_gt_image(gt_image2)\n gt_image3 = process_gt_image(gt_image3)\n\n images.append(image)\n gt_images.append(gt_image)\n\n images.append(image2)\n gt_images.append(gt_image2)\n\n images.append(image3)\n gt_images.append(gt_image3)\n yield np.array(images), np.array(gt_images)", "title": "" }, { "docid": "83bf382ad446da1f13cf27882dbbabc3", "score": "0.5392492", "text": "def collate_fn(data):\n\tdata.sort(key=lambda x: len(x[1]), reverse=True)\n\timages, poses, homography, poses2 = zip(*data)\n\timages = torch.stack(images, 0)\n\tlengths = [len(pose) for pose in poses]\n\ttargets = torch.zeros(len(poses), max(lengths)).long()\n\tprint(\"target : \" + targets.size())\n\tprint(\"pose : \" + poses.shape)\n\tprint(\"t2 : \" + str(poses[0]) + \" : \" + str(poses.shape))\n\n\tfor i, pose in enumerate(poses):\n\t\tend = lengths[i]\n\t\ttargets[i, :end] = pose[:end]\n\n\thomography = torch.stack(homography, 0)\n\tposes2 = torch.stack(poses2, 0)\n\treturn images, targets, homography, poses2, lengths", "title": "" }, { "docid": "5fc3ec70508a10d4e9e2f08773a90f2c", "score": "0.53920233", "text": "def image_stitching(img_list, blending, gain_comp, drawInliers):\n img_src = img_list[0]\n\n for i in range(1, len(img_list)):\n img_dst = img_list[i]\n\n # point locations for the four vertices of source image\n src_vertices = vertices_location(img_src)\n\n ###################################################\n # Step 1: Detect SIFT points for each of the images\n ###################################################\n # Initiate SIFT detector\n sift = cv2.xfeatures2d.SIFT_create()\n\n # find the keypoints and descriptors with SIFT\n kp1, des1 = sift.detectAndCompute(img_src, None)\n kp2, des2 = sift.detectAndCompute(img_dst, None)\n\n ###############################################################\n # Step 2: Find matching points among those found in two images.\n ###############################################################\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n search_params = dict(checks=50)\n\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n\n matches = flann.knnMatch(des1, des2, k=2) # find points that are close to each other\n\n # store all the good matches as per Lowe's ratio test.\n good = []\n for m, n in matches:\n if m.distance < 0.7 * n.distance:\n good.append(m)\n\n ##################################################\n # Step 3: Estimate Homography based on the matches\n ##################################################\n src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n\n # Find homography using RANSAC\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n\n\n ####################################################\n # Step 4: Compute the offset along x and y direction\n ####################################################\n # vertices location for src image after homography\n src_vertices_transform = transform_vertices(src_vertices, M)\n\n # determine the x offset for the final output image based on the dst image\n src_up_left_transform = src_vertices_transform[0]\n src_down_left_transform = src_vertices_transform[3]\n if min(src_up_left_transform[0], src_down_left_transform[0]) < 0:\n offset_x = int((-1) * min(src_up_left_transform[0], src_down_left_transform[0]))\n else:\n offset_x = 0\n\n # determine the y offset and y_expand for the final output image based on the dst image\n src_up_right_transform = src_vertices_transform[1]\n src_down_right_transform = src_vertices_transform[2]\n offset_y = 0\n y_expand = 0\n if min(src_up_left_transform[1], src_up_right_transform[1]) < 0:\n offset_y = int((-1) * min(src_up_left_transform[1], src_up_right_transform[1]))\n\n if max(src_down_right_transform[1], src_down_left_transform[1]) > img_dst.shape[0]:\n y_expand = int(max(src_down_right_transform[1], src_down_left_transform[1]) - img_dst.shape[0])\n\n ################################################\n # Step 5: draw the inliers if specified by users\n ################################################\n # visualize inliers if drawInliers is true\n if drawInliers:\n matchesMask = mask.ravel().tolist()\n\n draw_params = dict(matchColor=(0, 255, 0), # draw matches in green color\n singlePointColor=None,\n matchesMask=matchesMask, # draw only inliers\n flags=2)\n\n img3 = cv2.drawMatches(img_src, kp1, img_dst, kp2, good, None, **draw_params)\n plt.figure()\n plt.imshow(img3)\n plt.xticks([]), plt.yticks([])\n\n ##########################################################\n # Step 6: for each matched points of the right image,\n # add the translation from right image to the final image.\n ##########################################################\n\n w = img_dst.shape[1]\n h = img_dst.shape[0]\n\n dst_pts1 = dst_pts.copy() # matched points on the right image\n dst_pts1[:, :, 0] = dst_pts1[:, :, 0] + offset_x\n dst_pts1[:, :, 1] = dst_pts1[:, :, 1] + offset_y\n\n ###########################################\n # Step 7: Compute new homography using the\n # new set of coordinates of the right image\n ###########################################\n newM, mask = cv2.findHomography(src_pts, dst_pts1, cv2.RANSAC, 5.0)\n\n src_vertices_transform = transform_vertices(src_vertices, newM)\n src_up_right_transform = src_vertices_transform[1]\n src_down_right_transform = src_vertices_transform[2]\n #############################\n # Step 8: Warp the left image\n #############################\n img_final = cv2.warpPerspective(img_src, newM, (w + offset_x, h + offset_y + y_expand))\n\n #######################################################\n # Step 9: Adopt gain compensation if requested by users\n #######################################################\n if gain_comp:\n img_final, img_dst = gain_compensation(img_final, img_dst, offset_x, offset_y)\n\n ############################################################\n # Step 10: Add the translated right image to the final image\n # and blend the overlapping region according to the selected\n # blending method\n ############################################################\n if blending == 'avg': # average blending\n img_src = avg_blending(img_final, img_dst, offset_x, offset_y)\n elif blending == 'alpha': # alpha blending\n img_src = alpha_blending(img_final, img_dst, offset_x, offset_y)\n elif blending == 'feathering': # feathering blending\n img_src = feathering(img_final, img_dst, offset_x, offset_y)\n elif blending == 'pyramid': # pyramid blending\n img_src = pyramid_blending(img_src, newM, img_final, img_dst, offset_x, offset_y,\n max(src_up_right_transform[0], src_down_right_transform[0]))\n\n return img_src.astype(np.uint8)", "title": "" }, { "docid": "95cf7838f69f0f5c6e7ebfb87fb72614", "score": "0.5388771", "text": "def generate_random_stereogram(\n im_size: Tuple[int, int, int] = (51, 51, 3), disparity: int = 4\n) -> Tuple[torch.Tensor, torch.Tensor]:\n H, W, C = im_size\n block_size = (H // 2, W // 2)\n H_center = H // 2\n W_center = W // 2\n im_left = torch.zeros(H,W,C) # placeholder, not actual size\n im_right = torch.zeros(H,W,C) # placeholder, not actual size\n\n ###########################################################################\n # Student code begins\n ###########################################################################\n\n random_im_left_2D = torch.randint(0,2,(H,W))\n random_im_right_2D = random_im_left_2D.clone()\n H_start = H_center - block_size[0]//2\n H_end = H_center + block_size[0]//2\n W_start = W_center - block_size[1]//2\n W_end = W_center + block_size[1]//2\n random_im_right_2D[H_start:H_end, W_start-disparity:W_end-disparity] = random_im_right_2D[H_start:H_end, W_start:W_end]\n random_im_right_2D[H_start:H_end, W_end-disparity:W_end] = torch.randint(0,2,(H//2-1,disparity))\n\n for i in range(C):\n im_left[:,:,i] = random_im_left_2D\n im_right[:,:,i] = random_im_right_2D\n\n ###########################################################################\n # Student code ends\n ###########################################################################\n return im_left, im_right", "title": "" }, { "docid": "a1f3ff895bff7684e1d5b3f0e50e3ca7", "score": "0.53877574", "text": "def buildRefRenderingPipeline():\n\n # Geometry\n vertices, faces, albedo = loadDemoGeometry()\n\n # Lighting\n light_pos, light_intensity = loadDemoLighting()\n\n # Camera\n cameraIntrinsics = orthographicCameraIntrinsics()\n cameraExtrinsics = extrinsics()\n cameraProjection = cameraIntrinsics.dot(cameraExtrinsics)\n cameraUnprojection = np.linalg.pinv(cameraProjection)[:3,:3]\n\n # Film (Pixel coordinates)\n far = 10\n pixels = np.vstack([m.ravel() for m in np.mgrid[-0.5:0.5:80j,-0.5:0.5:140j]] + [far*np.ones((80, 140)).ravel()]).T\n nb_pixels = pixels.shape[0]\n\n # Slower -- caches / size of intermediate tables?\n # inter_d, inter_pi = intersect_triangle(np.hstack((pixels[:,:2], np.zeros((nb_pixels, 1)))), pixels, vertices[faces[:,0],:],\n # vertices[faces[:,1],:],\n # vertices[faces[:,2],:])\n\n # dist = np.min(inter_d, axis=1)\n # closest = np.argmin(inter_d, axis=1)\n # pi = inter_pi[np.arange(inter_pi.shape[0]),closest,:]\n\n # closest[~np.isfinite(dist)] = -1\n\n dist = np.zeros((nb_pixels,)) + np.inf\n pi = np.zeros((nb_pixels,3))\n closest = np.zeros((nb_pixels,)) - 1\n\n for fidx, face in enumerate(faces):\n # TODO: precompute n, denom, ...\n inter_d, inter_pi = intersect_triangle(np.hstack((pixels[:,:2], np.zeros((nb_pixels, 1)))), pixels, vertices[face[0],:],\n vertices[face[1],:],\n vertices[face[2],:])\n closer_obj = (inter_d < dist) & (inter_d > 0)\n closest[closer_obj] = fidx\n dist[closer_obj] = inter_d[closer_obj]\n pi[closer_obj,:] = inter_pi[closer_obj,:]\n\n\n # output color\n color = np.zeros((nb_pixels, 3))\n\n for pidx, surf in enumerate(closest):\n if surf < 0:\n continue\n\n # print(\"Lighting {}/{}\".format(pidx, closest.shape[0]))\n\n for lidx, light in enumerate(light_pos):\n # Get surface normal (Already done in intersect_triangle, btw)\n u = vertices[faces[surf,1],:] - vertices[faces[surf,0],:]\n v = vertices[faces[surf,2],:] - vertices[faces[surf,0],:]\n n = np.cross(u, v)\n n /= np.sqrt(np.sum(n**2))\n\n l = light - pi[pidx,:]\n l /= np.sqrt(np.sum(l**2))\n\n # check for shadows (occlusions)\n # Technically, we could do them all at once\n inter_d, _ = intersect_triangle(pi[pidx,:], light, vertices[faces[:,0],:], vertices[faces[:,1],:], vertices[faces[:,2],:])\n # print(inter_d[np.isfinite(inter_d)].min(), inter_d[np.isfinite(inter_d)].max())\n if not np.any((inter_d < 1) & (inter_d > 1e-3)):\n color[pidx,:] += albedo[surf,:] * light_intensity[lidx,:] * np.abs(n.dot(l))\n # color[pidx,:] = light_intensity[lidx,:] * np.abs(n.dot(l))\n\n from matplotlib import pyplot as plt\n plt.subplot(121); plt.imshow(dist.reshape(80,140))\n plt.subplot(122); plt.imshow(color.reshape(80,140,3))\n plt.show()\n\n\n import pdb; pdb.set_trace()", "title": "" }, { "docid": "a8ba78988dbf73cb572a214350fc7e57", "score": "0.5387626", "text": "def getFaceMapping(self, normal):", "title": "" }, { "docid": "4c7918ec91e5b18957c2ed2602911bcd", "score": "0.5382895", "text": "def transform(self, img, label):\n\n img = img.astype(np.float32)\n\n # Data augmentation for mask\n morphed_label = label.copy()\n if self.params['use_data_augmentation']:\n if np.random.rand() < self.params['rate_of_morphological_transform']:\n morphed_label = data_augmentation.random_morphological_transform(morphed_label, self.params)\n if np.random.rand() < self.params['rate_of_translation']:\n morphed_label = data_augmentation.random_translation(morphed_label, self.params)\n if np.random.rand() < self.params['rate_of_rotation']:\n morphed_label = data_augmentation.random_rotation(morphed_label, self.params)\n\n sample = np.random.rand()\n if sample < self.params['rate_of_label_adding']:\n morphed_label = data_augmentation.random_add(morphed_label, self.params)\n elif sample < self.params['rate_of_label_adding'] + self.params['rate_of_label_cutting']:\n morphed_label = data_augmentation.random_cut(morphed_label, self.params)\n \n if np.random.rand() < self.params['rate_of_ellipses']:\n morphed_label = data_augmentation.random_ellipses(morphed_label, self.params)\n\n # Next, crop the mask with some padding, and resize to 224x224. Make sure to preserve the aspect ratio\n img_crop, morphed_label_crop, label_crop = self.pad_crop_resize(img, morphed_label, label)\n\n # Data augmentation for RGB\n # if self.params['use_data_augmentation']:\n # img_crop = data_augmentation.random_color_warp(img_crop)\n img_crop = data_augmentation.standardize_image(img_crop)\n\n # Turn into torch tensors\n img_crop = data_augmentation.array_to_tensor(img_crop) # Shape: [3 x H x W]\n morphed_label_crop = data_augmentation.array_to_tensor(morphed_label_crop) # Shape: [H x W]\n label_crop = data_augmentation.array_to_tensor(label_crop) # Shape: [H x W]\n\n return img_crop, morphed_label_crop, label_crop", "title": "" }, { "docid": "10e6da7b8b6fcf359bd9af7e2be12e14", "score": "0.5371607", "text": "def makeMap(prefix, faces, years, longitudes, latitudes):\n n, m = faces.shape\n photos = np.arange(n)\n ppl = np.arange(m)\n\n # Bin times and GPS coordinates\n times, places = binValues(years, longitudes, latitudes)\n\n # Choose face clusters\n #TODO we don't want to redo this work every time\n faceClusters = clusterFaces(faces)[:NUM_LINES]\n\n vects = np.hstack([faces, times, places])\n paths = []\n # For each face cluster, get high-coverage coherent path for its photos\n for cl in faceClusters:\n # choose weights for this cluster's line, de-emphasizing faces outside the cluster\n other = np.empty(faces.shape)\n other[:, cl] = faces[:, cl]\n ind = list(set(ppl) - set(cl))\n other[:, ind] = -faces[:,ind]\n weights = getWeights(other, times, places)\n\n # choose a pool of photos containing at least len(cl)*TAU faces within cl\n nonz = np.nonzero(faces[:,cl])[0]\n sumz = dict(zip(nonz, np.apply_along_axis(np.count_nonzero, 1, faces[nonz][:,cl])))\n pool = filter(lambda x: x in sumz.keys() and sumz[x]>len(cl)*TAU, photos)\n\n # choose a path greedily from among the pool\n path = []\n for i in range(NUM_PHOTOS):\n greedy(paths, path, pool, vects, weights, times)\n paths.append(path)\n\n # Fix lines to show overlaps in faces\n for i in range(len(paths)):\n for j in range(len(paths)):\n if i == j:\n continue\n for img in paths[j]:\n if sum(faces[img, faceClusters[i]]) == len(faceClusters[i]): #img includes all the faces in i\n paths[i].append(img)\n\n # Sort and re-order lines to improve layout\n paths = [sorted(x, key=lambda x: years[x]) for x in paths]\n orderLines(paths, faceClusters)\n return paths", "title": "" }, { "docid": "a7418d0729ae967771e0ae2f427efd55", "score": "0.5361398", "text": "def smooth_face(cfg, detected_img, bboxes):\n # Get Region Of Interest of each face\n for box_num in range(len(bboxes)):\n print(f'Face detected: {bboxes[box_num]}')\n # Get Region of Interest\n roi_img = get_roi(detected_img, bboxes, box_num)\n # Copy ROI\n temp_img = roi_img.copy()\n # Convert roi_img to HSV colorspace\n hsv_img = cv2.cvtColor(roi_img, cv2.COLOR_BGR2HSV)\n # Get the mask for calculating histogram of the object and remove noise\n hsv_mask = cv2.inRange(hsv_img, \n np.array(cfg['image']['hsv_low']), \n np.array(cfg['image']['hsv_high']))\n # Make a 3 channel mask\n full_mask = cv2.merge((hsv_mask, hsv_mask, hsv_mask))\n # Apply blur on the created image\n blurred_img = cv2.bilateralFilter(roi_img, \n cfg['filter']['diameter'], \n cfg['filter']['sigma_1'], \n cfg['filter']['sigma_2'])\n # Apply mask to image\n masked_img = cv2.bitwise_and(blurred_img, full_mask)\n # Invert mask\n inverted_mask = cv2.bitwise_not(full_mask)\n # Created anti-mask\n masked_img2 = cv2.bitwise_and(temp_img, inverted_mask)\n # Add the masked images together\n smoothed_roi = cv2.add(masked_img2, masked_img)\n # Init smoothed image\n output_img = detected_img.copy()\n # Replace ROI on full image with blurred ROI\n output_img[bboxes[box_num][1]:bboxes[box_num][3], \n bboxes[box_num][0]:bboxes[box_num][2]] = smoothed_roi\n return output_img, roi_img, full_mask, smoothed_roi", "title": "" }, { "docid": "178521f50cc3f51bfe07b1b4667919f7", "score": "0.5361371", "text": "def extract_image_segmentation(model):\n model.extract_torchscript()\n # model.extract_onnx()", "title": "" }, { "docid": "46a47c6743bef452eab8d655f4cb927c", "score": "0.5361178", "text": "def align_images(self, translation_only=False):\n # Extract feature point locations and descriptors.\n points_and_descriptors = []\n for file in self.files:\n image = sol4_utils.read_image(file, 1)\n self.h, self.w = image.shape\n pyramid, _ = sol4_utils.build_gaussian_pyramid(image, 3, 7)\n points_and_descriptors.append(find_features(pyramid))\n\n # Compute homographies between successive pairs of images.\n Hs = []\n for i in range(len(points_and_descriptors) - 1):\n points1, points2 = points_and_descriptors[i][0], points_and_descriptors[i + 1][0]\n desc1, desc2 = points_and_descriptors[i][1], points_and_descriptors[i + 1][1]\n\n # Find matching feature points.\n ind1, ind2 = match_features(desc1, desc2, .7)\n points1, points2 = points1[ind1, :], points2[ind2, :]\n\n # Compute homography using RANSAC.\n H12, inliers = ransac_homography(points1, points2, 100, 6, translation_only)\n\n # Uncomment for debugging: display inliers and outliers among matching points.\n # In the submitted code this function should be commented out!\n # display_matches(self.images[i], self.images[i+1], points1 , points2, inliers)\n\n Hs.append(H12)\n\n # Compute composite homographies from the central coordinate system.\n accumulated_homographies = accumulate_homographies(Hs, (len(Hs) - 1) // 2)\n self.homographies = np.stack(accumulated_homographies)\n self.frames_for_panoramas = filter_homographies_with_translation(self.homographies, minimum_right_translation=5)\n self.homographies = self.homographies[self.frames_for_panoramas]", "title": "" }, { "docid": "37743d9f6669c204e4ba6e81c87cd0bd", "score": "0.53603727", "text": "def main(image_name, joint_dir, out_dir, folder_image_suffix):\n if _os.path.isdir(image_name):\n folder_name = image_name[:]\n _LOGGER.info(\"Specified image name is a folder. Processing all images \"\n \"with suffix %s.\", folder_image_suffix)\n print 'path to images: '+ folder_name\n images = sorted(_glob.glob(_os.path.join(folder_name, '*' + folder_image_suffix)))\n\n print 'path to 2D joints: '+joint_dir\n joints = sorted(_glob.glob(_os.path.join(joint_dir, '*[0-9]*.npz')))\n\n process_folder = True\n else:\n images = [image_name]\n process_folder = False\n\n if process_folder and out_dir is not None and not _os.path.exists(out_dir):\n _os.mkdir(out_dir)\n #else:\n #_os.mkdir('/sequoia/data2/zoli/datazml/deepercut_tmp')\n for ite, image_name in enumerate(images):\n print 'iteration ' + str(ite)\n\n # load 2d joints\n joint_path = joints[ite]\n est = np.load(joint_path)['pose']\n pose = est[:3, :]\n\n # set output parameters\n image_name = image_name[:-4]\n if out_dir is None:\n out_name = _os.path.join('/sequoia/data2/zoli/datazml/deepercut_tmp', _os.path.basename(image_name) + '_pose_vis.png')\n elif process_folder:\n out_name = _os.path.join(out_dir, _os.path.basename(image_name) + '_pose_vis.png')\n else:\n out_name = out_dir+'_pose_vis.png'\n _LOGGER.info(\"Generating stickfigures on `%s` (saving to `%s`)\", _os.path.basename(image_name)+folder_image_suffix, out_dir)\n\n image = _scipy.misc.imread(image_name+folder_image_suffix) # in RGB order\n if image.ndim == 2:\n _LOGGER.warn(\"The image is grayscale! This may deteriorate performance!\")\n image = _np.dstack((image, image, image))\n else:\n image = image[:, :, ::-1] # convert to BGR order\n\n debug = False\n circlesize = 4\n stickwidth = 4\n visim = draw_stickfigure(image, pose, circlesize, stickwidth, debug)\n _scipy.misc.imsave(out_name, visim[:,:,::-1])", "title": "" }, { "docid": "6cfe02127e9d4ecde6ac342e424a3d0f", "score": "0.535804", "text": "def convert_smiles_into_2d_structure_images(smile):\n mol = Chem.MolFromSmiles(smile)\n return Draw.MolToImage(mol, size=(300, 300), fitImage=True).convert(mode='L') # L: grayscale", "title": "" }, { "docid": "57088e4b780e7154989fa086322e8a1f", "score": "0.53553665", "text": "def image_faces(img):\n faces = FACE_CASCADE.detectMultiScale(img, 1.3, 5)\n return faces", "title": "" }, { "docid": "ab00e0fd964a92f86039b4380e69c727", "score": "0.5349909", "text": "def addMultiSplats(originalImage, mudSplatObjects):\n imgW = originalImage.shape[0]\n imgH = originalImage.shape[1]\n xOffset = []\n yOffset = []\n finalSplats = []\n for splatObj in mudSplatObjects:\n if splatObj.scale<=0:\n continue\n mudSplatRef = cv2.imread(splatObj.imgPath, cv2.IMREAD_UNCHANGED)\n mudSplatRef[:,:,3][np.sum(mudSplatRef[:,:,:3], axis=2)>600] = 0\n for ch in range(3):\n mudSplatRef[:,:,ch][mudSplatRef[:,:,3]==0] = np.average(mudSplatRef[:,:,ch][mudSplatRef[:,:,3]!=0].ravel())\n xOffset.append(splatObj.xOffset)\n yOffset.append(splatObj.yOffset)\n scaleParam = splatObj.scale\n rotateParam = splatObj.rotate\n # Clip scaleParam value between 0 & 100\n if scaleParam<0:\n scaleParam = 0\n elif scaleParam>100:\n scaleParam = 100\n # Scale the mud splat image - store in the variable newSplat\n sizeSplat = int(scaleParam*min([imgW, imgH])/100)\n if mudSplatRef.shape[0]<mudSplatRef.shape[1]:\n newSplat = cv2.resize(mudSplatRef,\n (int(mudSplatRef.shape[0]*sizeSplat/mudSplatRef.shape[1]), sizeSplat))\n else:\n newSplat = cv2.resize(mudSplatRef,\n (sizeSplat, int(mudSplatRef.shape[1]*sizeSplat/mudSplatRef.shape[0])))\n if rotateParam is not None:\n rows,cols,nChannels = newSplat.shape\n M = cv2.getRotationMatrix2D((cols/2,rows/2), rotateParam, 1)\n newSplat = cv2.warpAffine(newSplat, M, (cols,rows))\n # Perform histogram matching of newSplat w.r.t. originalImage - only on V\n # channel in HSV format\n alpha_newSplat = newSplat[:,:,3]\n testSplat = copy.deepcopy(newSplat[:,:,:3])\n testSplat = cv2.cvtColor(testSplat, cv2.COLOR_BGR2HSV)\n img2 = copy.deepcopy(originalImage)\n tempSplat = cv2.cvtColor(newSplat[:,:,:3], cv2.COLOR_BGR2HSV)\n img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2HSV)\n testSplat[:,:,2] = np.uint8(hist_match(tempSplat[:,:,2], originalImage[:,:,2]))\n testSplat = cv2.cvtColor(testSplat, cv2.COLOR_HSV2BGR)\n finSplat = np.dstack((testSplat, alpha_newSplat))\n testSplat = copy.deepcopy(finSplat)\n # Calculate standard deviation for Gaussian Blur using Original Image - to\n # be applied to the mud-splat for overlaying\n ret = cv2.threshold(np.uint8(originalImage[25:75,25:75,1]),0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[0]\n temp_wh_half = originalImage[25:75,25:75,1][originalImage[25:75,25:75,1]>=ret]\n sigma = np.std(temp_wh_half)\n # Apply Gaussian Blur to Mud Splat\n testSplat = cv2.GaussianBlur(testSplat, (5, 5), sigma)\n finalSplats.append(testSplat.astype(\"uint8\"))\n if len(finalSplats)==0:\n return originalImage.astype(\"uint8\")\n allMudSplats = np.zeros((imgW, imgH, 4))\n for i, splat in enumerate(finalSplats):\n allMudSplats[yOffset[i]:yOffset[i]+splat.shape[0],\n xOffset[i]:xOffset[i]+splat.shape[1], :] = np.maximum(splat,\n allMudSplats[yOffset[i]:yOffset[i]+splat.shape[0],\n xOffset[i]:xOffset[i]+splat.shape[1], :])\n alpha_s = allMudSplats[:, :, 3]/255\n alpha_l = 1.0 - alpha_s\n muddyImage = copy.deepcopy(originalImage)\n muddyImage[:, :, :3] = np.moveaxis(np.add(np.multiply(alpha_s, np.moveaxis(allMudSplats[:,:,:3],-1,0)),\n np.multiply(alpha_l, np.moveaxis(originalImage[:, :, :3],-1,0))).astype(int),0,-1)\n return muddyImage.astype(\"uint8\")", "title": "" }, { "docid": "0194fa567d77defd3a4f74397db4e790", "score": "0.5349439", "text": "def app(inimage, indir):\n\n # PARAMS\n nchannels = 3 # RGB\n modelw = int(100)\n modelh = int(100)\n\n outimage = indir + '../blinkfinder_face.png'\n \n # Master Eye Open/Closed Detection Model\n eyemodel_file = 'nn_model_final_face_5layers_th.keras'\n eyemodel_weights_file = 'nn_model_weights_final_face_5layers_th.keras'\n\n tstart = time.time()\n print('reading file')\n #model = load_model(eyemodel_file)\n #model.load_weights(eyemodel_weights_file)\n # Use the global model from the top to decrease load time\n global face_model\n \n tfin = time.time()\n print('Time to load model is %0.4f' % (tfin-tstart))\n \n # Find your faces in the image\n allfaces = findface.idface(inimage)\n\n # If nothing is found don't crash\n if allfaces == None:\n print('no faces found')\n return None\n\n # Read the image file + convert to numpy array + RGB\n treads = time.time()\n img = Image.open(inimage)\n img = img.convert(mode='RGB')\n img = np.asarray(img)\n treadf = time.time()\n print('Time to load image is %0.4f' % (treadf-treads))\n\n # Checking if cv2 is faster == no same time\n #imgt = cv2.imread(inimage)\n #imgt = cv2.cvtColor(imgt, cv2.COLOR_BGR2RGB)\n #treadf2 = time.time()\n #print('Time to load image with cv2 is %0.4f' % (treadf2-treadf))\n \n #img = 255-img # my gif yalefaces are weird colorschemes\n imgplt = plt.figure(1);\n plt.clf();\n plt.imshow(img, cmap='gray');\n #plt.show()\n ax = imgplt.gca()\n \n # Cutout faces\n blinkarr = []\n blink_prob = []\n finalface_arr = []\n for (fx,fy,fw,fh) in allfaces:\n \n facecut = img[fy:fy+fw,fx:fx+fh]\n\n # Check if the face is smaller than what the model needs\n if (fw < modelw) | (fh < modelh):\n facecut = cv2.resize(facecut, (modelw, modelh),\n interpolation=cv2.INTER_LINEAR)\n else:\n facecut = cv2.resize(facecut, (modelw, modelh),\n interpolation=cv2.INTER_CUBIC)\n\n # Scale the images to the same as the nn model input\n facecut = facecut/255. \n\n # change channel location for theano\n face_swap = np.zeros((nchannels, modelw, modelh))\n face_swap[0,:,:] = facecut[:,:,2]\n face_swap[1,:,:] = facecut[:,:,0]\n face_swap[2,:,:] = facecut[:,:,1]\n facecut = face_swap\n\n # Add initial dimension size=1\n facecut = np.expand_dims(facecut, axis=0)\n\n tps = time.time()\n prob_closed = face_model.predict(facecut)\n tpf = time.time()\n print('Time to evaluate eyes is %0.4f' % (tpf-tps))\n \n blink_prob.append(prob_closed[0][0])\n #pdb.set_trace()\n if prob_closed > 0.2:\n ax.add_artist(patches.Rectangle((fx,fy),fw,fh,\n fill=False,\n color='r',\n linewidth=3))\n plt.text(fx, fy,'!!!', color='r')\n blinkarr.append(0)\n else:\n blinkarr.append(1)\n ax.add_artist(patches.Rectangle((fx,fy),fw,fh,\n fill=False,\n color='b',\n linewidth=3))\n plt.text(fx, fy,'OK', color='b')\n\n if len(finalface_arr) == 0:\n finalface_arr = np.array([fx,fy,fw,fh,prob_closed])\n else:\n finalface_arr = np.vstack((finalface_arr,\n np.array([fx,fy,fw,fh,prob_closed])))\n print('saving image')\n plt.savefig('app_face_wrap_test.png')\n\n if 0 in blinkarr:\n textmsg = 'has blinks'\n else:\n textmsg = 'has no blinks!'\n\n tfin = time.time()\n print('Total time for image is %0.4f' % (tfin-tstart))\n \n #pdb.set_trace()\n #return textmsg\n return finalface_arr", "title": "" }, { "docid": "8ac5415799d77b4e0620257abe1836d9", "score": "0.53474754", "text": "def embeddings(file, model, tensor=False):\n #model = InceptionResnetV1(pretrained='vggface2').eval() # I am calling this from preProcessPhoto.py\n img = Image.open(file).convert('RGB')\n img_tensor = transforms.functional.to_tensor(img)\n embedding = model(img_tensor.unsqueeze(0))[0]\n if not tensor:\n embedding = embedding.detach().numpy()\n dic = {}\n dic[\"data\"] = embedding\n #print ('Embeddings calculated')\n return dic", "title": "" }, { "docid": "3cbd6c737fda829402b1ebf5f7fee265", "score": "0.53442645", "text": "def transform():\n\n embedding_matrix, word_to_index = resources.create_embedding_matrix()\n\n for partition in ['training','test', 'validation']:\n\n # Transform raw jpegs into numpy arrays\n lib.transform_images(partition=partition, num_frames=10)\n\n # Transform raw audio to feature matrix\n lib.transform_audio(partition=partition, n_mfcc=13)\n\n # Transform text to tokens\n lib.transform_text(partition=partition, word_to_index=word_to_index)\n\n pass", "title": "" }, { "docid": "9c8b4b43185ab98609878550903efdb8", "score": "0.5339812", "text": "def normalizeFaces(self, useSize=True, size=None):\r\n max_size = 0\r\n if size == None:\r\n # use default size if none\r\n size = DEFAULT_FACE_SIZE\r\n \r\n if useSize == False:\r\n # use the maximum face size found\r\n max_size = self.__findMaxSize()\r\n else:\r\n max_size = size\r\n \r\n # loop over the original images\r\n cropped_files = self.getCroppedFaceImages()\r\n self.filecount = len(cropped_files)\r\n\r\n if self.startCallback is not None:\r\n self.startCallback(self.filecount)\r\n \r\n print \"Normalizing \" + str(self.filecount) + \" images\"\r\n for index, fname in enumerate(cropped_files):\r\n image_location = self.cropped_faces_dir + \"\\\\\" + fname\r\n\r\n image = highgui.cvLoadImage(image_location, 1) # a cropped non-normalized image\r\n p = re.compile(CROPFACE_FILENAME_PATTERN)\r\n m = p.match(fname)\r\n prefix = m.group(\"prefix\")\r\n image_index = m.group(\"image_index\")\r\n face_index = m.group(\"face_index\")\r\n \r\n norm_image = self.__normImage(image, max_size) # normalize the image\r\n\r\n norm_filename = prefix + \"_\" + image_index + \"_norm_\" + face_index + \".jpg\"\r\n location = self.norm_dir + \"\\\\\" + norm_filename\r\n highgui.cvSaveImage(location, norm_image) # save the image to file\r\n \r\n if self.iterCallback is not None:\r\n self.iterCallback(index)", "title": "" }, { "docid": "e024e386792a1f41951b98a613a69339", "score": "0.53392315", "text": "def train_face_recognition_model(face_dict):\n faces = []\n labels = []\n index = 1\n labels_dict = {}\n for name, face_photos in face_dict.items():\n labels_dict[index] = name\n for photo in face_photos:\n labels.append(index)\n faces.append(photo)\n index += 1\n face_recognizer = cv2.face.LBPHFaceRecognizer_create()\n face_recognizer.train(faces, np.array(labels))\n return face_recognizer, labels_dict", "title": "" }, { "docid": "8576365a75a3c7921d68ba6229afe37c", "score": "0.53374237", "text": "def create_image(cards, backgrounds, transf, scaleBg):\n\n im1, card_val1 = cards.get_random()\n img1 = np.zeros((imgH, imgW, 4), dtype=np.uint8)\n img1[decalY:decalY + cardH, decalX:decalX + cardW, :] = im1\n seq = transf.to_deterministic()\n im1 = seq.augment_images([img1])[0]\n list_kps_aug_1 = []\n [list_kps_aug_1.append(seq.augment_keypoints([kps])[0]) for kps in list_kps]\n\n im2, card_val2 = cards.get_random()\n img2 = np.zeros((imgH, imgW, 4), dtype=np.uint8)\n img2[decalY:decalY + cardH, decalX:decalX + cardW, :] = im2\n seq = transf.to_deterministic()\n im2 = seq.augment_images([img2])[0]\n list_kps_aug_2 = []\n [list_kps_aug_2.append(seq.augment_keypoints([kps])[0]) for kps in list_kps]\n\n # Remove im2 boxes behind im1\n list_kps_aug_1 = superposed(im2, list_kps_aug_1)\n\n bba = []\n for kps in list_kps_aug_1:\n xmin, xmax, ymin, ymax = kps_rectangle(kps)\n bba.append(BBA(xmin, xmax, ymin, ymax, card_val1))\n\n for kps in list_kps_aug_2:\n xmin, xmax, ymin, ymax = kps_rectangle(kps)\n bba.append(BBA(xmin, xmax, ymin, ymax, card_val2))\n\n bg = backgrounds.get_random()\n bg = scaleBg.augment_image(bg)\n mask1 = im1[:, :, 3]\n mask1 = np.stack([mask1] * 3, -1)\n final = np.where(mask1, im1[:, :, 0:3], bg)\n mask2 = im2[:, :, 3]\n mask2 = np.stack([mask2] * 3, -1)\n final = np.where(mask2, im2[:, :, 0:3], final)\n return final, bba, list_kps_aug_1, list_kps_aug_2", "title": "" } ]
a8670cc1596318191a7492675d7b0937
Initializes a Chirp signal.
[ { "docid": "68205559a42a7f7dd982bb85900d878d", "score": "0.0", "text": "def __init__(self, startf=200, stopf=400, t1=1, method='linear'):\n self.startf = startf\n self.stopf = stopf\n self.t1 = t1\n self.method = method", "title": "" } ]
[ { "docid": "57b079b9520727f56128997987e8372b", "score": "0.6390474", "text": "def define_chirp(self):\n\n sec = 1\n k = 50\n w1 = 100\n w2 = self.chirp_high\n\n t = np.linspace(0, sec, int(self.fs*sec))\n\n chirp = np.sin(2*np.pi * w1 * sec * (np.exp(t *\n (np.log(w2 / w1) / sec)) - 1) / np.log(w2 / w1))\n chirp *= (1-np.exp(-k*t))*(1-np.exp(-k*(sec-t))) / 5\n\n inv_chirp = np.flip(chirp)\n\n return chirp, inv_chirp", "title": "" }, { "docid": "4a9af5dea2d54c6e61760e2a10929b0d", "score": "0.6138426", "text": "def initSignals(self):\n self.signalType = self.config.signalType\n if self.signalType == 'single':\n self.single = self.createSignal(self.config.single)\n elif self.signalType == 'dual':\n self.slowMA = self.createSignal(self.config.slow)\n self.fastMA = self.createSignal(self.config.fast)\n elif self.signalType == 'ribbon':\n start = self.config.ribbonStart\n step = self.config.ribbonSpacing\n self.rib1 = self.createSignal(start)\n self.rib2 = self.createSignal(start + step)\n self.rib3 = self.createSignal(start + step + step)", "title": "" }, { "docid": "925862451245d43ec6145e0978eb5fb5", "score": "0.60119736", "text": "def __init__(self, signal: Callable) -> None:\n super().__init__({}, None, \"\", create_event)\n self.signal = signal", "title": "" }, { "docid": "0be86d2f22a329615216366910564938", "score": "0.56712145", "text": "def __init__(self):\n\n # Create a sender object for emitting signals\n self.sender = SenderObject()", "title": "" }, { "docid": "953bfa52fcbc111b51927ce191a7be3a", "score": "0.5652214", "text": "def initializer():\n signal.signal(signal.SIGINT, signal.SIG_IGN)", "title": "" }, { "docid": "7ba4b07aa53cc920bf32fb5a2e068d06", "score": "0.56466496", "text": "def initialize(self, channel=None):\r\n pass", "title": "" }, { "docid": "6f18350cd99cedc9d1f34061a43d0fc6", "score": "0.5634236", "text": "def __init__(self):\n print 'Initializing sound process.'\n self.freq = 100\n self.p_process = None\n self.playback = True\n self.debug = False", "title": "" }, { "docid": "13650557e1a6216158c9950134946e59", "score": "0.5631929", "text": "def __init__(self):\n\n def left_pulse_amplitude(t):\n if t < self.PAUSE:\n return 0.0\n else:\n return 1.0\n\n self.right_pulse = sine_wave(self.FREQ, self.DURATION, self.RATE)\n self.left_pulse = sine_wave(self.FREQ, self.DURATION, self.RATE, left_pulse_amplitude)\n\n self.waveforms = {}", "title": "" }, { "docid": "c0ed79f91d7fc7cf0b71a21788e68472", "score": "0.55474514", "text": "def __init__(self, name=None, signal=None):\n if not bool(name) and bool(signal):\n print('name and signal must both be non-empty:')\n print('name: %s' % name)\n print('signal: %s' % signal)\n self.name = name\n self.signal = signal", "title": "" }, { "docid": "990a1d4d341703c98f1f1d468745399b", "score": "0.55468553", "text": "def define(cls, first_responder):\n return _SignalDescriptor(first_responder)", "title": "" }, { "docid": "60862b20e3e1ed3d32a75cffc8cc3dc4", "score": "0.5510014", "text": "def __init__(self, channel):\n self.channel = channel", "title": "" }, { "docid": "35eec0804bb2611386d904b5233d9f3b", "score": "0.5507409", "text": "def init(self):\n self._pulseGenerator.clearPulse()\n self._pulseAnalyser.clear()\n self._change = True", "title": "" }, { "docid": "ebea7da6b1542f0f0b49b00ad9c9072f", "score": "0.5503324", "text": "def __init__(__self__, *,\n odata_type: str,\n bitrate: Optional[int] = None,\n channels: Optional[int] = None,\n label: Optional[str] = None,\n sampling_rate: Optional[int] = None):\n pulumi.set(__self__, \"odata_type\", '#Microsoft.Media.Audio')\n if bitrate is not None:\n pulumi.set(__self__, \"bitrate\", bitrate)\n if channels is not None:\n pulumi.set(__self__, \"channels\", channels)\n if label is not None:\n pulumi.set(__self__, \"label\", label)\n if sampling_rate is not None:\n pulumi.set(__self__, \"sampling_rate\", sampling_rate)", "title": "" }, { "docid": "2007cc2098dbdd7096152d5d189a33cc", "score": "0.5495673", "text": "def __init__(self):\n self._buzzer = Buzzer(4) # gpiozero Buzzer object on pin 4.\n self._onTime = .01 # beep duration\n self._offTime = .19 # beep silence duration\n self._level = 0 # beep level initialized to 0\n self._active = False # object active state initialized to False\n self.run() # activate object", "title": "" }, { "docid": "d7e360024fb4bdf142ace796c1e1bdd0", "score": "0.54807645", "text": "def __init__(self):\n # create a ConnectionParameters obj that is pass to connection adapter\n self.con_obj = pika.ConnectionParameters(host='localhost')\n\n # create a instance of connection object\n self.connection = pika.BlockingConnection(self.con_obj)\n\n # create a new channel\n self.channel = self.connection.channel()", "title": "" }, { "docid": "87e73d71009f4f432f149258da1365b3", "score": "0.5478781", "text": "def chirp(duration=1.0, from_frequency=100, to_frequency=None, samplerate=None, level=None, kind='quadratic'):\n if scipy is False:\n raise ImportError('Generating chirps requires Scipy.')\n if samplerate is None:\n samplerate = slab.signal._default_samplerate\n duration = Sound.in_samples(duration, samplerate)\n t = numpy.arange(0, duration, 1) / samplerate # generate a time vector\n t.shape = (t.size, 1) # ensures C-order\n if not to_frequency:\n to_frequency = samplerate / 2\n chirp = scipy.signal.chirp(\n t, from_frequency, t[-1], to_frequency, method=kind, vertex_zero=True)\n out = Sound(chirp, samplerate=samplerate)\n out.level = level\n return out", "title": "" }, { "docid": "1e84f6169d1c0f056d02a1c7810d41f4", "score": "0.54725057", "text": "def initialize(signal_number=DEFAULT_TIMER_SIGNAL_NUMBER,\n update_period_s=DEFAULT_UPDATE_PERIOD_S):\n global initialized\n if initialized:\n return\n initialized = True\n uwsgi.add_timer(signal_number, update_period_s)\n uwsgi.register_signal(signal_number, MULE, emit)", "title": "" }, { "docid": "27c6b52ff9c811837d9b3d232ff63b1e", "score": "0.54659927", "text": "def _setup(self):\n self.iq.setup(frequency=self.mod_freq,\n amplitude=self.mod_amp,\n phase=self.mod_phase,\n input=self._input_signal_dsp_module(),\n gain=0,\n bandwidth=self.bandwidth,\n acbandwidth=self.acbandwidth,\n quadrature_factor=self.quadrature_factor,\n output_signal='quadrature',\n output_direct=self.mod_output)", "title": "" }, { "docid": "cf9e1862fbe0c38ab5edf38e497c966e", "score": "0.54647565", "text": "def init(self):\n # IMPLEMENT ME\n self._state = STATE_INACTIVE\n self._message = GLabel(text = 'Click me!', font_size = 75, x = 90, y = 275,\n linecolor = colormodel.GREEN)\n self._model = None\n if self._state != STATE_INACTIVE:\n self._message = None\n self._previoustouch = None\n self._time = 0\n self._lives = 3\n Sound('cup1.wav').play()", "title": "" }, { "docid": "ceb8b0888ea7539ec2491f7d9b61d094", "score": "0.5432263", "text": "def __init__(self):\n super(sppasAudioPCM, self).__init__()\n\n # The audio file pointer\n self._audio_fp = None\n\n # The list of loaded channels of this audio\n self._channels = list()", "title": "" }, { "docid": "3aaaf6ca9ff4e106e0637195aa041fe5", "score": "0.54153746", "text": "def __init__(self, callsign: str, ssid: Union[str, int] = None):\n\n # 'ssid' must be set first, since it can be updated if we're passed a callsign with an SSID\n self.ssid = ssid\n self.callsign = callsign", "title": "" }, { "docid": "05c71e52577f301f30b370cb5cb7ba8e", "score": "0.5413643", "text": "def __init__(self, frequency=440, amplitude=1, offset=0):\n self.freq = frequency\n self.amp = amplitude\n self.offset = offset", "title": "" }, { "docid": "8fc765823d3d0a3f09fc0caccefe620a", "score": "0.5409681", "text": "def createSignal(init, width):\n assert width >= 1, \"Invalid width = {0}\".format(width)\n if width > 1:\n return hdl.Signal(hdl.modbv(init)[width:])\n else:\n return hdl.Signal(True if init else False)", "title": "" }, { "docid": "2ad49a80ac4b561356021673b315c74e", "score": "0.54024965", "text": "def __init__(self):\n\n self.receivers = {}\n self.one_time_receivers = {}\n self.verbose = False", "title": "" }, { "docid": "dcafd9b3bbd3873dbd81b7bf8a28ccaa", "score": "0.5389067", "text": "def __init__(self):\r\n\r\n # Variable to store audio input data\r\n self.data = []\r\n\r\n # Initializing variables as required\r\n self.pa = pyaudio.PyAudio()\r\n self.stream = self.pa.open(format = pyaudio.paFloat32,\r\n channels = self.CHANNELS,\r\n rate = self.RATE,\r\n input = True,\r\n output = False,\r\n frames_per_buffer = self.CHUNK)\r\n\r\n # Initializing spectrum graph\r\n self.initializeSpectrumGraph()\r\n\r\n # Looping through the audio input continuously\r\n self.loop()", "title": "" }, { "docid": "862396c2de9646e934d5d1a8272a92fd", "score": "0.5385133", "text": "def __init__(self, clk: str, ext_init: bool = False):\n attrs = [] if not ext_init else [(\"uninitialized\", \"\")]\n\n self.clk = clk\n\n self.d = Signal(16)\n self.n_oe = Signal()\n self.q = Signal(16)\n\n self._q = Signal(16, attrs=attrs)", "title": "" }, { "docid": "58d4ef89144bd24df69b0e851e534745", "score": "0.5364038", "text": "def __init__(self):\n ## Input channel configuration\n self.nchannels = 7\n self.channelnames = ['length', # muscle lever output\n 'force', # force transducer output\n 'stimulation', # sync signal from TI's stim\n 'beam', # bpm diode upstream of sample\n 'exposure', # pilatus trigger signal\n 'psd1', # output from top psd connection\n 'psd2'] # bottom psd connection\n ## Sample rate configuration\n self.samplerate = 10000\n self.nsamples = 10000 # if samplemode = 'continuous', \n # this determines buffer size\n self.samplemode = 'finite' # or 'continuous'\n self.TERMINALEND = 'diff' \n # consider 'rse' (referenced single-ended),'nrse'\n # (non-referenced single ended), differential,\n # for configuration of analog input\n ## Initialize task\n self.configure_task()\n ## Dummy data for now\n self.data = None", "title": "" }, { "docid": "1923c63c0e7c6edaead79a75d6daab97", "score": "0.5359241", "text": "def __init__(self, signal_name):\n self._listeners = []\n self._signal_name = signal_name", "title": "" }, { "docid": "feec130e07bc41bb8e500719c5b22856", "score": "0.53586", "text": "def mySignal(scope=\"module\"):\n\n mySignal = PhysioSignal(\n label='simulated',\n samples_per_second=PHYSIO_SAMPLES_PER_SECOND,\n physiostarttime=PHYSIO_START_TIME,\n neuralstarttime=PHYSIO_START_TIME + SCANNER_DELAY,\n signal=PHYSIO_SAMPLES_COUNT * [0] # fill with zeros\n )\n\n return mySignal", "title": "" }, { "docid": "34831033048b385417063860a7c10882", "score": "0.5337399", "text": "def __init__(self):\r\n\t\tself.sendHeartBeatIntervall = 100\r\n\t\tself.estimateHartBeatIntervall = 500\r\n\t\tself.heartbeatreciver = 0.0\r\n\t\tself.udpComm = UDPcomm(MCAST_PORT)\r\n\t\tself.numberOfSegments = 0\r\n\t\tself.allWormSegments = []\r\n\t\tself.uniqueId = 0", "title": "" }, { "docid": "9b525258ae2e327d2640b9df57f93f14", "score": "0.5330938", "text": "def init(sen):\n\n sen.tx_mesg(MesgID.command, SubsysID.oi, OICommand.init, None)\n sen.rx_mesg(MesgID.command, SubsysID.oi, OICommand.init, False)", "title": "" }, { "docid": "4a707b59b87eb90754cff549b9574981", "score": "0.5330636", "text": "def __init__(self, subscription_id=None, subject=None, callbackurl=None, channel=None):\n \n super().__init__()\n\n self._subscription_id = None\n self._subject = None\n self._callbackurl = None\n self._channel = None\n self.discriminator = None\n\n if subscription_id is not None:\n self.subscription_id = subscription_id\n if subject is not None:\n self.subject = subject\n if callbackurl is not None:\n self.callbackurl = callbackurl\n if channel is not None:\n self.channel = channel", "title": "" }, { "docid": "b01b5978b4b8672fc5d690691dc72196", "score": "0.5330106", "text": "def __init__(self, channel: Union[AnalogInput, int], fullRange: float = ..., offset: float = ...) -> None:\n self.analog_input = ...\n self.fullRange = ...\n self.offset = ...\n self.init_analog_input = ...\n self.pidSource = ...", "title": "" }, { "docid": "13f506cada3b0c4bbd89eab86a6718b3", "score": "0.53291184", "text": "def __init__(self, pitch, starting_sample, sampling_freq, stretch_factor):\n self.pitch = pitch\n self.starting_sample = starting_sample\n self.sampling_freq = sampling_freq\n self.stretch_factor = stretch_factor\n self.init_wavetable()\n self.current_sample = 0\n self.previous_value = 0", "title": "" }, { "docid": "42461a529ff931a10fd846002ee2765b", "score": "0.53234035", "text": "def __init__(self):\n self.dutycycle = 0 # default duty cycle level\n self.enable = False # default output state\n self.period = 3 # default period in us\n print(\"Opened connection to \\\"Simulated GateSource\\\"\") # informs the user the object has been constructed", "title": "" }, { "docid": "4932b53c89705a88a6390b7cbcb78c86", "score": "0.52993745", "text": "def __init__(self):\n self.bus = smbus.SMBus(1) # 512-MB RPi the bus is 1. Otherwise, bus is 0.\n self.bus.write_byte_data(self.ADDRESS, self.LTC_CONTROL_REG, self.RUN_MODE)\n # triggers a conversion by writing any value to the trigger register\n self.bus.write_byte_data(self.ADDRESS, self.LTC_TRIGGER_REG, 0x00)", "title": "" }, { "docid": "4ca0513f8a6ff8daeb57030936a6d06f", "score": "0.52899456", "text": "def __init__(self):\n self.connected = False\n self.tmPacketData = None\n self.sendCyclic = False\n self.cyclicPeriodMs = int(UTIL.SYS.s_configuration.TM_CYCLIC_PERIOD_MS)\n self.obcAck1 = ENABLE_ACK\n self.obcAck2 = ENABLE_ACK\n self.obcAck3 = ENABLE_ACK\n self.obcAck4 = ENABLE_ACK\n self.obqAck1 = ENABLE_ACK\n self.obqAck2 = ENABLE_ACK\n self.obqAck3 = ENABLE_ACK\n self.obqAck4 = ENABLE_ACK", "title": "" }, { "docid": "4f470cde7880c120e604b4815b3e8ab5", "score": "0.52835464", "text": "def __init__(self, *args, **kwargs):\n _misc_.Sound_swiginit(self,_misc_.new_Sound(*args, **kwargs))", "title": "" }, { "docid": "e106b6c480fb24dc35ea5ee7013dfd16", "score": "0.52830124", "text": "def __init__(self):\n super().__init__(\"ccx\", 3, [], num_ctrl_qubits=2)\n self.base_gate = XGate()", "title": "" }, { "docid": "94e59a7164521539d278ea60ca885c6f", "score": "0.5280893", "text": "def __init__(self, pxi):\n super().__init__(pxi, \"AnalogInput\")\n self.groundMode = ''\n self.sampleRate = 0\n self.samplesPerMeasurement = 0\n self.source = ''\n self.minValue = -10.0\n self.maxValue = 10.0\n self.startTrigger = StartTrigger()\n self.task = None", "title": "" }, { "docid": "bb0da83abdcf7c6f93d3368501b17c42", "score": "0.5280766", "text": "def __init__(self, param=1): # only default arguments here\n gr.sync_block.__init__(\n self,\n name='Channel Changer', # will show up in GRC\n in_sig=[],\n out_sig = []\n )\n # if an attribute with the same name as a parameter is found,\n # a callback is registered (properties work, too).\n self.message_port_register_in(pmt.intern(\"ChannelChange\"))\n self.set_msg_handler(pmt.intern(\"ChannelChange\"), self.chanChangeCallback)\n self.top_block = param\n self.channel_data = np.zeros((2,7))\n self.startup = True\n self.next_channel = 3\n\n\n \"\"\"Called when a decoded packet is recieved\"\"\"", "title": "" }, { "docid": "6df77b591e4a76727b0774bebb1a9f8d", "score": "0.52766", "text": "def __init__(self, signals, prior_mean, prior_cov, n_samples, burn_in, adaption=True):\n logger.info('Module - %s, Class - %s ' % (__name__, self.__class__.__name__))\n # numpy.set_printoptions(formatter={'float': lambda x: 'float: ' + str(x)})\n self.signals = signals # object Signal\n self.o_mu = prior_mean # object Mean\n self.o_cov = prior_cov # object CovExponentional\n self.n_samples = n_samples\n self.burn_in = burn_in\n self.adaption = adaption\n #self.runGibbs()", "title": "" }, { "docid": "82be31939e3a666c8a7c4106709f3b40", "score": "0.5263707", "text": "def __init__(self):\n\n pi('__init__')\n\n self._connection = None\n self._connected = False\n self._connecting = False\n self._channel = None\n self._closing = False\n self._closed = False\n self._consumer_tag = None\n self._deliveries = []\n self._acked = 0\n self._nacked = 0\n self._message_number = 0\n self._credentials = pika.PlainCredentials('guest', 'guest')\n self._parameters = pika.ConnectionParameters(host='localhost',\n port=PORT,\n virtual_host='/',\n credentials=self._credentials)\n self._queue = 'queue-' + str(uuid.uuid4())\n self.websocket = None\n self._status = 0\n self._person = None\n self._clientid = None\n self._participants = 0\n\n pp(self, '__INIT__')\n\n pr('__init__')", "title": "" }, { "docid": "c6da1e42e2ea3675e364d6bd44dc073d", "score": "0.5259372", "text": "def __init__(__self__, *,\n probe_interval_in_seconds: Optional[int] = None,\n probe_path: Optional[str] = None,\n probe_protocol: Optional[str] = None,\n probe_request_type: Optional[str] = None):\n if probe_interval_in_seconds is not None:\n pulumi.set(__self__, \"probe_interval_in_seconds\", probe_interval_in_seconds)\n if probe_path is not None:\n pulumi.set(__self__, \"probe_path\", probe_path)\n if probe_protocol is not None:\n pulumi.set(__self__, \"probe_protocol\", probe_protocol)\n if probe_request_type is not None:\n pulumi.set(__self__, \"probe_request_type\", probe_request_type)", "title": "" }, { "docid": "bd9b3b99b34c4f67ac6a8b8de71efdd7", "score": "0.52579165", "text": "def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):\n t = asarray(t)\n f0 = float(f0)\n t1 = float(t1)\n f1 = float(f1)\n if method in ['linear', 'lin', 'li']:\n beta = (f1 - f0) / t1\n phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)\n\n elif method in ['quadratic', 'quad', 'q']:\n beta = (f1 - f0) / (t1 ** 2)\n if vertex_zero:\n phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)\n else:\n phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)\n\n elif method in ['logarithmic', 'log', 'lo']:\n if f0 * f1 <= 0.0:\n raise ValueError(\"For a logarithmic chirp, f0 and f1 must be \"\n \"nonzero and have the same sign.\")\n if f0 == f1:\n phase = 2 * pi * f0 * t\n else:\n beta = t1 / log(f1 / f0)\n phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)\n\n elif method in ['hyperbolic', 'hyp']:\n if f0 == 0 or f1 == 0:\n raise ValueError(\"For a hyperbolic chirp, f0 and f1 must be \"\n \"nonzero.\")\n if f0 == f1:\n # Degenerate case: constant frequency.\n phase = 2 * pi * f0 * t\n else:\n # Singular point: the instantaneous frequency blows up\n # when t == sing.\n sing = -f1 * t1 / (f0 - f1)\n phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))\n\n else:\n raise ValueError(\"method must be 'linear', 'quadratic', 'logarithmic',\"\n \" or 'hyperbolic', but a value of %r was given.\"\n % method)\n\n return phase", "title": "" }, { "docid": "3e9e5c0bf2d4e5f7846b33f418286de4", "score": "0.5252838", "text": "def _initialize(self):\n scaled_signal = self.get_read(raw=True, scale=True)\n raw_signal = self.get_read(raw=True, scale=False)\n # add raw signal information to AlignedSignal\n aligned_signal = AlignedSignal(scaled_signal, rna=self.rna)\n aligned_signal.add_raw_signal(raw_signal)\n return aligned_signal", "title": "" }, { "docid": "d1c1df9060022ee00763997ed7cdd0dd", "score": "0.5242748", "text": "def __init__(self, hz=1.0, config_path=None) -> None:\n FourierSeries.__init__(self, config_path)\n self.hz = hz", "title": "" }, { "docid": "d90cb5bb4a6b78d433957e65250f324a", "score": "0.52380836", "text": "def __init__(self, *args, **kwargs):\n\n super(SoundEventHandler, self).__init__(*args, **kwargs)\n\n # Set up the watch manager for the sound driver.\n wm = pyinotify.WatchManager()\n mask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_OPEN\n self.notifier = pyinotify.Notifier(wm, self)\n wm.add_watch(SOUND_DRIVER_PATH, mask)\n\n # Set up the GPIO pins and controllers.\n GPIO.setmode(GPIO.BCM)\n self.amp_controller = AmpController(self._power)\n self.lcd_controller = LCDController()\n\n # This holds the current protocol playing sound.\n self.protocol = None\n\n # The display threads. These will get the song info and display them.\n self.display_threads = {\n 'airplay': None\n }\n # Hold the last lines displayed or the initial default.\n self.last_lines = {\n 'airplay': ['AirPlay', '']\n }", "title": "" }, { "docid": "bc5383d3a2085075dfbadb1611148326", "score": "0.5236553", "text": "def __init__(self, *args, **kwds):\n if args or kwds:\n super(RCData, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.channels_in is None:\n self.channels_in = [0] * 8\n if self.channels_out is None:\n self.channels_out = [0] * 8\n if self.lock is None:\n self.lock = 0\n else:\n self.header = std_msgs.msg.Header()\n self.channels_in = [0] * 8\n self.channels_out = [0] * 8\n self.lock = 0", "title": "" }, { "docid": "cd66da6e61fb80686a9043581ea717f0", "score": "0.52361673", "text": "def __init__(self, path):\n\n self.path = path\n #read headers\n self.file_header, self.signal_header = headers.read_header(path)\n #determine if file is annotated\n self.annotated = 'EDF Annotations' in self.signal_header['names']\n #get channel counts (non-annotation signals)\n num_signals = self.file_header['num_signals'] \n #build list of channel indices\n ann_idx = self.annotation_index()\n self._channel_idxs = np.delete(np.arange(num_signals), ann_idx)\n #compute signal calibrations and offsets\n self._calibrations, self._offsets = self._signal_scales()\n #Determine if read can be optimized\n self._optimized = self._optimized()\n #record section start, size in samples and size in bytes\n self._rec_start = self.file_header['header_bytes']\n self._rec_samples = sum(self.signal_header['samples_per_record'])\n self._rec_bytes = self._rec_samples * 2", "title": "" }, { "docid": "966e6d06dc5fc7230043c787e5d0ff2c", "score": "0.523183", "text": "def __init__(self):\n self.servo = Servo()\n rospy.Subscriber('servo_steer', JointState, self.steer_state_callback) # Steering\n rospy.Subscriber('suspension', JointState, self.suspension_state_callback) # Suspension\n rospy.Subscriber('set_goal_position', GoalPosition, self.set_goal_position) # Dance goal position\n rospy.Subscriber('change_gripper_state', GripperState, self.change_state_gripper) # Gripper goal position\n\n # self.joint_state_publisher = rospy.Publisher('', JointState, queue_size=1)\n # self.goal_position_service = rospy.Service('goal_position', GoalPosition, self.handle_goal_position)\n\n self.servos = [20, 21]\n\n self.bus = smbus.SMBus(1)\n self.bus.write_byte(0x68, 0x10)\n\n self.set_servo_start() # Center all servo's on start\n # self.unclamp_both_servos()", "title": "" }, { "docid": "e6f7d3ff3223afb07f6eeee1885278d6", "score": "0.521178", "text": "def __init__(self,slot,chassis=0,autoset=True,init_dig=True): \r\n self.__FR_MAX = 200\r\n \r\n self.__SF = 0.5 #GS/s\r\n \r\n tmp = self.sigd.SD_AIN()\r\n self.__check_command(tmp.openWithSlot('',chassis,slot) )\r\n self._dig = tmp\r\n \r\n \r\n \r\n self.__channels = []\r\n \r\n for i in range(4):\r\n self.__channels.append(DIGChannel(self.__FR_MAX,i+1))\r\n \r\n self.__autoset = True\r\n self.__trigger_dir = 1 # 1 in IN\r\n self.__trig_dir_list = ['OUT','IN']\r\n \r\n \r\n if init_dig:\r\n self.set_channel(15)\r\n \r\n self._chassis = chassis\r\n self._slot = slot\r\n self.id = 'DIGKEY'", "title": "" }, { "docid": "54d81887da3b34367f4ad9a19afab0f9", "score": "0.5210268", "text": "def __init__(self, SR=44100.0, SNR_dB=5, dial_ms_range=(300, 500), interval_ms=500, volume=0.5):\n self.SR = SR\n self.SNR_dB = SNR_dB\n self.dial_ms_range = dial_ms_range\n self.interval_ms = interval_ms\n self.volume = volume", "title": "" }, { "docid": "0d5302e3f5ef4d12735e7f1371628d77", "score": "0.5207728", "text": "def __init__(self, _piBus, _siAddr=0x40, _readMode=0):\n\t\tself.piBus = int(_piBus)\n\t\tself.siAddr = int(_siAddr)\n\t\tself.pio = pigpio.pi()\n\t\tself.ReadMode = int(_readMode)\n\t\tself.HumiRes = 12\n\t\tself.TempRes = 14\n\t\tself.HeaterOn = 0\n\t\tself.HeaterVal = 0", "title": "" }, { "docid": "72e196c8e5727a00d93e128153f30352", "score": "0.5205544", "text": "def initialize(self):\n self._init_dates()\n md = self.dataset.GetMetadata()\n self.wavelength = float(md[ifc.PYRATE_WAVELENGTH_METRES])\n self.meta_data = md\n # creating code needs to set this flag after 0 -> NaN replacement\n self.nan_converted = False", "title": "" }, { "docid": "90585ba4f380706efd2e2d170af4f175", "score": "0.51964414", "text": "def __init__(self, pubChan: \"PubChan\"):\n self.nxt = pubChan.nxt", "title": "" }, { "docid": "9bf5c4a66eb1baf6c0b396b7c7fa08fe", "score": "0.51927704", "text": "def __init__(self, spectral_data): \n Narrowband.__init__(self, spectral_data)", "title": "" }, { "docid": "293a3f4449e042a0e357e4f1c3223872", "score": "0.51846796", "text": "def __init__(self, channel):\n super().__init__()\n SensorBase.checkPWMChannel(channel)\n self.channel = channel\n \n self._handle = hal.initializePWMPort(hal.getPort(channel))\n self.__finalizer = weakref.finalize(self, _freePWM, self._handle)\n\n self.setDisabled()\n \n hal.setPWMEliminateDeadband(self.handle, False)\n \n hal.report(hal.UsageReporting.kResourceType_PWM, channel)\n self.setName(\"PWM\", channel)\n \n # Python-specific: Need this to free on unit test wpilib reset\n Resource._add_global_resource(self)", "title": "" }, { "docid": "62a6f2f3bb5706d958b289f42b315d2a", "score": "0.51844627", "text": "def _create_signal_socket(self):\n\n if not self.signal_host:\n self.stop()\n raise ConnectionFailed(\"No host to send signal to specified.\")\n\n # time to wait for the sender to give a confirmation of the signal\n # self.signal_socket.RCVTIMEO = self.socket_response_timeout\n\n self.signal_socket = self._start_socket(\n name=\"signal_socket\",\n sock_type=zmq.REQ,\n sock_con=\"connect\",\n endpoint=self._get_endpoint(**self.socket_conf[\"signal\"])\n )\n\n # using a Poller to implement the signal_socket timeout\n self.poller.register(self.signal_socket, zmq.POLLIN)", "title": "" }, { "docid": "19f0c9421da8c7fa4aedc4a87164914f", "score": "0.51842755", "text": "def __init__(self):\n self.connection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost'))\n self.channel = self.connection.channel()\n self.declare()\n self.consume()", "title": "" }, { "docid": "bee55e477d7f7fd2b27a149711dab318", "score": "0.5180326", "text": "def __init__(\n self,\n address=settings.PLC_ADDRESS,\n rack=settings.PLC_RACK,\n slot=settings.PLC_SLOT,\n ):\n # Connect\n self.client = snap7.client.Client()\n self.client.connect(address, rack, slot)", "title": "" }, { "docid": "d0f6a6dd8639f46b65d15c7550058ded", "score": "0.5176307", "text": "def __init__(self, samp_rate=1.0, freq=500): # only default arguments here\n gr.sync_block.__init__(\n self,\n name='Phase Measurement', # will show up in GRC\n in_sig=[np.uint8, np.uint8],\n out_sig=[np.float32]\n )\n # np types int32 \n # if an attribute with the same name as a parameter is found,\n # a callback is registered (properties work, too).\n self.samp_rate = samp_rate\n self.freq = freq", "title": "" }, { "docid": "ef0d6f36380e7de42da747c210a5d33e", "score": "0.51750183", "text": "def __init__(self, sig_type=None, fs=None, f=None, amp=None, phase=None):\n if sig_type is None:\n sig_type = 'sine'\n if fs is None:\n fs = 48000\n if f is None:\n f = 500\n if amp is None:\n amp = 1\n if phase is None:\n phase = 0\n self._sig_type = sig_type\n self._fs = fs\n self._f = f\n self._amp = amp\n self._phase = phase\n\n if sig_type in ['sine']:\n self.fs = fs\n self.f = f\n self.phase = phase\n self.amp = amp\n self.sig_type = sig_type", "title": "" }, { "docid": "0a1c57249430b832f28006497bb34264", "score": "0.51716125", "text": "def __init__(self):\n self._lib_vscr_ratchet_session = VscrRatchetSession()\n self.ctx = self._lib_vscr_ratchet_session.vscr_ratchet_session_new()", "title": "" }, { "docid": "fb8515b849b02897bde7bddf2439bd4b", "score": "0.5157115", "text": "def __init__(__self__, *,\n odata_type: str,\n bitrate: Optional[int] = None,\n channels: Optional[int] = None,\n label: Optional[str] = None,\n profile: Optional[str] = None,\n sampling_rate: Optional[int] = None):\n pulumi.set(__self__, \"odata_type\", '#Microsoft.Media.AacAudio')\n if bitrate is not None:\n pulumi.set(__self__, \"bitrate\", bitrate)\n if channels is not None:\n pulumi.set(__self__, \"channels\", channels)\n if label is not None:\n pulumi.set(__self__, \"label\", label)\n if profile is not None:\n pulumi.set(__self__, \"profile\", profile)\n if sampling_rate is not None:\n pulumi.set(__self__, \"sampling_rate\", sampling_rate)", "title": "" }, { "docid": "07ebb88ee7282565e387ab55a5f7a16b", "score": "0.51531917", "text": "def __init__(self):\n self.pub = rospy.Publisher('/bicycle/cmd_vel', BicycleCommandMsg, queue_size=10)\n self.sub = rospy.Subscriber('/bicycle/state', BicycleStateMsg, self.subscribe )\n self.rate = rospy.Rate(10) # for turtlebot # this is in Hz\n # self.rate = rospy.Rate(100) # for turtlesim\n self.state = BicycleStateMsg()\n self.actual_state = []\n rospy.on_shutdown(self.shutdown)", "title": "" }, { "docid": "a4f6768a9d803cc46b0538fd525d5aa7", "score": "0.51529664", "text": "def __init__(self,drift=0,sig=0.2,**kwargs):\n super().__init__(**kwargs)\n self.sig = sig\n r=self.r\n self.mu = (r+drift)", "title": "" }, { "docid": "8d6372bc3ea893162ef1e4e325c8e71a", "score": "0.51501995", "text": "def __init__(self):\n self._f = None\n # offset to start of WAVEDESC block\n self._offs = 0\n self._smplFmt = \"int16\"\n self._endi = \"\"", "title": "" }, { "docid": "d2bc0bbbf9506a10ff1e8e7b60674fee", "score": "0.5148728", "text": "def __init__(self, trig_Pin, echo_Pin):\n self._trig = Pin(trig_Pin, Pin.OUT)\n self._echo = Pin(echo_Pin, Pin.IN)\n self._sound_speed = 340.0 # m/s", "title": "" }, { "docid": "434c90cbd2ee108103668cff5e934431", "score": "0.5133889", "text": "def __init__(self, slot):\n\n if slot < 1 or slot > 2:\n raise ValueError('slot is outside of 1 and 2.')\n\n if slot == 1:\n self.pc1 = self.pc1_1\n self.pc2 = self.pc2_1\n elif slot == 2:\n self.pc1 = self.pc1_2\n self.pc2 = self.pc2_2\n\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.pc1, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\n GPIO.setup(self.pc2, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\n self.is_initialized = True\n return", "title": "" }, { "docid": "e0b6bce35d4dce5c74659ecf4bf89e91", "score": "0.51299113", "text": "def __init__(self, length, start_angle):\n self.init = False\n try:\n assert (\n ((type(length) is int) or (type(length) is float))\n and length > 0\n and ((type(start_angle) is float) or type(start_angle) is int)\n and -90 <= start_angle <= 90\n )\n except AssertionError:\n print(\"Error in initializing crank!\")\n return\n self.init = True\n self.length = length\n self.angle = math.radians(start_angle)\n _connector = STrig.get_xz(length=self.length, theta=self.angle)\n self.connector = [_connector['x'], 0, _connector['z']]", "title": "" }, { "docid": "bc04d890e1b2c9cb965470cc65a77a6c", "score": "0.51215976", "text": "def define_chirp(sec=1):\n \n k = 50\n w1 = 100\n w2 = 10000\n \n t = np.linspace(0, sec, int(fs*sec))\n \n ch = np.sin(2*np.pi*sec*w1*(((w2/w1)**(t/sec)-1)/(np.log(w2/w1))))*(1-np.e**(-k*t))*(1-np.e**(k*(t-sec))) # analytical form\n \n ch /= 5 # scale so roughly same 'height' as OFDM blocks\n \n inv_ch = np.flip(ch)\n \n return ch, inv_ch", "title": "" }, { "docid": "73d0b65301174fc862ba612ee69b0404", "score": "0.51153713", "text": "def __init__(self):\n # Initialize logging\n self._logger = Logger(self.__class__.__name__, LOG_LEVEL)\n # Initialize TCP socket\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # Initialize message numerator\n self._message_num = 0", "title": "" }, { "docid": "f36b526438d50bd70fa9913fa6217115", "score": "0.5113535", "text": "def __init__(self, header, data):\n self.header = header\n \n self.samples = data\n\n self.ticks = np.arange(len(data))\n\n # assumes that sideband ends 5% of the way into the waveform\n self.calc_baseline(int(0.05*len(data)))", "title": "" }, { "docid": "fd44045ef008c81ddcc9070faa297257", "score": "0.51118684", "text": "def initialize(self):\n\n # Setup zmq context and remote helper\n ctx = zmq.Context()\n\n # Step 1\n self.pupil_remote = zmq.Socket(ctx, zmq.REQ)\n icp_req_add = \"tcp://{}:{}\".format(self.host, self.port)\n self.pupil_remote.connect(icp_req_add)\n\n # Step 2, 3\n logging.info(\"Waiting for Pupil remote.\")\n pub_port = self._get_port(\"PUB_PORT\")\n icp_pub_add = \"tcp://{}:{}\".format(self.host, pub_port)\n\n # Step 4\n self.msg_streamer = Msg_Streamer(ctx, icp_pub_add, hwm=self.hwm)\n\n # Setting up sub port to subscribe to notification\n sub_port = self._get_port(\"SUB_PORT\")\n self.subscriber = ctx.socket(zmq.SUB)\n icp_sub_add = \"tcp://{}:{}\".format(self.host, sub_port)\n self.subscriber.connect(icp_sub_add)\n self.subscriber.subscribe(\"notify.\")\n logging.info(\"Connection Successful.\")", "title": "" }, { "docid": "3c6832e6302a7efe03bec7aba91655a2", "score": "0.5110126", "text": "def __init__(self):\n self.pub = rospy.Publisher('/bicycle/cmd_vel', BicycleCommandMsg, queue_size=10)\n self.sub = rospy.Subscriber('/bicycle/state', BicycleStateMsg, self.subscribe)\n self.state = BicycleStateMsg()\n rospy.on_shutdown(self.shutdown)", "title": "" }, { "docid": "e142bf588e2612d030851157d36852e2", "score": "0.5108422", "text": "def __init__(self, key, request, client_address, broadcast_port, sensor_type=0):\r\n self.Subs = dict()\r\n self.BroadcastPort = broadcast_port\r\n self.Key = key\r\n self.Socket = request[1]\r\n self.ClientAddress = client_address\r\n self.SensorType = sensor_type\r\n self.TimeoutCount = 0", "title": "" }, { "docid": "0f2c703378b93bc945fd0209e42349b6", "score": "0.5107286", "text": "def __init__(self):\n self.__circuit = None # circuit\n self.__circuit_width = None # circuit width\n self.__circuit_slice = [] # sliced circuit\n self.__wild_pattern = [] # wild pattern\n self.__pattern = None # standard pattern\n self.__measured_qubits = [] # measured qubits\n self.__track = False # progress tracking", "title": "" }, { "docid": "2f8bb2af99007fb4b0af7437081a09c3", "score": "0.51011366", "text": "def __init__(self, duty_cycle=0.5, period=0.1, amplitude=1, offset=0):\n self.duty_cycle = duty_cycle\n self.period = period\n self.amplitude = amplitude\n self.offset = offset", "title": "" }, { "docid": "d298fd2d87b9ef1bc6907aeef8d17405", "score": "0.5099189", "text": "def __init__(self, r=0.0, x0=0.0, bch=0.0, x=0.0, b0ch=0.0, r0=0.0, *args, **kw_args):\n #: Positive sequence series resistance of the entire line section.\n self.r = r\n\n #: Zero sequence series reactance of the entire line section.\n self.x0 = x0\n\n #: Positive sequence shunt (charging) susceptance, uniformly distributed, of the entire line section. This value represents the full charging over the full length of the line.\n self.bch = bch\n\n #: Positive sequence series reactance of the entire line section.\n self.x = x\n\n #: Zero sequence shunt (charging) susceptance, uniformly distributed, of the entire line section.\n self.b0ch = b0ch\n\n #: Zero sequence series resistance of the entire line section.\n self.r0 = r0\n\n super(ACLineSegment, self).__init__(*args, **kw_args)", "title": "" }, { "docid": "47967a49674a0bc027048034c0e53b80", "score": "0.5095006", "text": "def __init__(self, audio = None):\n pass", "title": "" }, { "docid": "4f557044c43adc7ecf8ef6859af665b7", "score": "0.5094", "text": "def __init__(self, wavefunction):\n self.wfn = wavefunction", "title": "" }, { "docid": "0b5f31faf00e67e306abc008331358d9", "score": "0.5093199", "text": "def __init__(self, R_shunt, frequency, Q, n_turns_wake=1,\n *args, **kwargs):\n Yokoya_X1 = 1.\n Yokoya_Y1 = 1.\n Yokoya_X2 = 0.\n Yokoya_Y2 = 0.\n switch_Z = False\n\n super(CircularResonator, self).__init__(\n R_shunt, frequency, Q, Yokoya_X1, Yokoya_Y1,\n Yokoya_X2, Yokoya_Y2, switch_Z, n_turns_wake, *args, **kwargs)", "title": "" }, { "docid": "1c30c6c5de461ce9b017cf6e2e14f71e", "score": "0.5086221", "text": "def __init__(self, onset, duration, midiPitch):\n self.MIDIpitch = midiPitch\n self.onset = onset\n self.duration = duration", "title": "" }, { "docid": "bf141385eae43bfc8a90784c7a3f722f", "score": "0.50808406", "text": "def __init__(self, token,\n chat_id, freq=1,\n init_message=None):\n self.bot = Bot(token=token)\n self.chat_id = chat_id\n\n super().__init__(freq=freq,\n init_message=init_message)", "title": "" }, { "docid": "89a001b911b4ec6df8847a9023b6552a", "score": "0.5080179", "text": "def __init__(self, waveform, numberofintervals):\n self._waveform = waveform\n self.numberofintervals = numberofintervals", "title": "" }, { "docid": "82f3c250023852cc4671e56206f16711", "score": "0.5071055", "text": "def __init__(self):\n\n iscconf.ISCConf.__init__(self)", "title": "" }, { "docid": "c782080086b8727b22d4da09c7af4a0d", "score": "0.50673103", "text": "def initialize(self):\n super(CircusAutorestart, self).initialize()\n self.fill_watchers()\n self.periodic = ioloop.PeriodicCallback(self.ping, 1000, self.loop)\n self.periodic.start()\n self.periodic10 = ioloop.PeriodicCallback(self.fill_watchers, 10000,\n self.loop)\n self.periodic10.start()", "title": "" }, { "docid": "b04dd449d8188f49d64ef4b71dd765a0", "score": "0.5064273", "text": "def initialize_sonic_pi():\n # call([\"sonic_pi\", \"set_sched_ahead_time! 0\"])\n # call([\"sonic_pi\", \"use_debug false\"])\n # call([\"sonic_pi\", \"use_synth :pulse\"])\n # call([\"sonic_pi\", \"use_bpm 100\"])\n\n log_print(\"Sonic Pi Initialized\", 3)", "title": "" }, { "docid": "1178486f38582163d91271b8de30ffee", "score": "0.5062409", "text": "def initialize(self) -> None:\n self.add_event('mycroft.awoken', self.handle_awoken)\n self.platform = self.config_core.get(\n \"enclosure\").get(\"platform\", \"unknown\")\n self.wake_word = Configuration.get()['listener']['wake_word']", "title": "" }, { "docid": "1632fc96133ee9e1c8e1828b9b92e9d1", "score": "0.5060402", "text": "def __init__(self, scaled_signal, rna=False):\n self.scaled_signal = scaled_signal\n self.raw_signal = None\n self._add_scaled_signal(scaled_signal)\n self.signal_length = len(self.scaled_signal)\n self.minus_strand = None\n # label can be used for neural network training with all signal continuously labelled\n self.label = defaultdict()\n # predictions can have multiple labels for different sections of current\n self.prediction = defaultdict()\n # guides are sections that we are confident in (guide alignments)\n self.guide = defaultdict(defaultdict)\n # place to store event starts\n self.raw_starts = None\n self.variant_calls = defaultdict()\n self.rna = rna", "title": "" }, { "docid": "b2de1367401f3176d5de42e7efa9ff0f", "score": "0.5054793", "text": "def __init__(\n self, program: bytes, args: Optional[List[bytes]] = None\n ) -> None:\n self.lsig = LogicSig(program, args)\n self.sigkey: Optional[bytes] = None", "title": "" }, { "docid": "6b626865eb483bee836a4742c95fe135", "score": "0.50522864", "text": "def __init__(self, *args, **kwds):\n if args or kwds:\n super(Feedback, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.drivers is None:\n self.drivers = [jackal_msgs.msg.DriveFeedback() for _ in range(2)]\n if self.pcb_temperature is None:\n self.pcb_temperature = 0.\n if self.mcu_temperature is None:\n self.mcu_temperature = 0.\n if self.commanded_mode is None:\n self.commanded_mode = 0\n if self.actual_mode is None:\n self.actual_mode = 0\n else:\n self.header = std_msgs.msg.Header()\n self.drivers = [jackal_msgs.msg.DriveFeedback() for _ in range(2)]\n self.pcb_temperature = 0.\n self.mcu_temperature = 0.\n self.commanded_mode = 0\n self.actual_mode = 0", "title": "" }, { "docid": "a28f7bf3ca9db52de4de32725c02e70a", "score": "0.5050677", "text": "def init(self):\n RPiGPIOInterface.init()\n\n for relay in self._relays.values():\n RPiGPIOInterface.init_channel(relay.gpio_channel, 'OUT', relay.gpio_initial_state)", "title": "" }, { "docid": "5f2b633211bf9e06e7dfcd5af027c99f", "score": "0.5047368", "text": "def __init__(self, options=None):\n\n super(sigprocbase, self).__init__()\n\n self.original_options = options\n\n if not hasattr(self, 'init_options'):\n self.init_options = attrdict()\n init_options = self.init_options\n\n if not hasattr(self, 'init_args'):\n self.init_args = list()\n init_args = self.init_args\n\n if options is None:\n return\n\n if isinstance(options, str):\n options = options.split()\n\n if isinstance(options, (list, tuple)):\n # put assignments in the dictionary\n init_options.update(option.split('=') for option in options if option.count('=') == 1)\n # append all others onto the list\n init_args.extend(option for option in options if option.count('=') != 1)\n elif isinstance(options, dict):\n init_options.update(options)\n else:\n raise ValueError(\"expected options to be an instance of str, tuple, list, or dict, but got %r\" %(type(options).__name__,))\n\n if init_options.has_key('serial_version'):\n v = int(init_options['serial_version'])\n self.check_serial_version(v)", "title": "" }, { "docid": "32d9c3357f534e5c4c9f56ddc1c85074", "score": "0.5046582", "text": "def __init__(self, pi, gpio, carrier_hz):\n\n self.pi = pi\n self.gpio = gpio\n self.carrier_hz = carrier_hz\n self.micros = 1000000 / carrier_hz\n self.on_mics = self.micros / 2\n self.off_mics = self.micros - self.on_mics\n\n self.wf = []\n self.wid = -1\n\n pi.set_mode(gpio, pigpio.OUTPUT)", "title": "" }, { "docid": "e3e5107067e07d1cde8785c435cf0ade", "score": "0.5037671", "text": "def __init__(self):\n signal(SIGTERM, endOfProgramm)\n self.isStart = True\n self.logs = FIFO()\n self.me = getIp()", "title": "" }, { "docid": "a75b1007e4460d47c8dcda481d72ee74", "score": "0.5034243", "text": "def __init__(self, sig0=5, dsigdt=0.1, noiseVar0=1, dnoiseVardt=+0.05):\n self.sig=sig0\n self.noiseVar=noiseVar0\n self.dsigdt=dsigdt\n self.dnoiseVardt=dnoiseVardt", "title": "" }, { "docid": "fbd215c9283f51379690fb72bf80a0e9", "score": "0.5031336", "text": "def __init__(self, *args):\n _misc_.NotificationMessage_swiginit(self,_misc_.new_NotificationMessage(*args))", "title": "" }, { "docid": "cb851718270f2f6ccfaafeb7e7d77d89", "score": "0.502996", "text": "def __init__(self, record_time: int, channels: int, device: int):\n print(\"DEVICE:\", device)\n self.duration = record_time\n if device == -1:\n self.device = None\n else:\n self.device = device\n try:\n self.sounds_stream: sd.InputStream = sd.InputStream(\n samplerate=FS,\n blocksize=BLOCKSIZE,\n channels=channels,\n dtype=np.int32,\n callback=self.__callback,\n device=self.device,\n )\n\n except sd.PortAudioError as e:\n # If we cant open an audio stream, quit the program.\n sys.stderr.write(\"Port Audio Error: %s\\n\" % e)\n raise e\n\n self.queue = queue.Queue()", "title": "" }, { "docid": "f7012ecd3ee39c523edfd0313438762a", "score": "0.50253856", "text": "def __init__(self, can_channel, motor_id):\n self.bus = can.interface.Bus(\n channel=can_channel, bustype='socketcan_ctypes')\n self.motor_id = motor_id\n self.speed = 0\n self.now_speed = 0\n self.alive = False", "title": "" } ]
3b627bf502dda60b5ec37a7e49e885b8
Sets the criteriaboxid of this MdsQuery.
[ { "docid": "ddc5167f97b59c706aae83e78182ca9b", "score": "0.738749", "text": "def criteriaboxid(self, criteriaboxid):\n if criteriaboxid is None:\n raise ValueError(\"Invalid value for `criteriaboxid`, must not be `None`\") # noqa: E501\n\n self._criteriaboxid = criteriaboxid", "title": "" } ]
[ { "docid": "70d3a99d8262de3fefdb7c4708a5b9c7", "score": "0.5326126", "text": "def __init__(self, criteriaboxid=None, handlerclass=None, join=None, label=None, layout=None, properties=None, statement=None, stylename=None, widget=None): # noqa: E501 # noqa: E501\n self._criteriaboxid = None\n self._handlerclass = None\n self._join = None\n self._label = None\n self._layout = None\n self._properties = None\n self._statement = None\n self._stylename = None\n self._widget = None\n self.discriminator = None\n self.criteriaboxid = criteriaboxid\n self.handlerclass = handlerclass\n self.join = join\n self.label = label\n self.layout = layout\n self.properties = properties\n self.statement = statement\n self.stylename = stylename\n self.widget = widget", "title": "" }, { "docid": "a564532420884edb8f893eab6609ef64", "score": "0.51332855", "text": "def setWhereClause(self, where):\n self.__whereCriteria = where", "title": "" }, { "docid": "84cb8f9c088bf3dc769c1df92a466662", "score": "0.48531926", "text": "def req_contactid(self, req_contactid):\n\n self._req_contactid = req_contactid", "title": "" }, { "docid": "6307f1e542c09094354c7f18809ce8d1", "score": "0.47833544", "text": "def restriction_set_definition_id(self, restriction_set_definition_id):\n\n self._restriction_set_definition_id = restriction_set_definition_id", "title": "" }, { "docid": "cefbe82575ea944a05a1044d6e15bd79", "score": "0.4699797", "text": "def set_condition(self, value):\n if value in self.conditions:\n del self['condition']\n self.xml.append(ET.Element(\"{%s}%s\" % (self.condition_ns, value)))\n return self", "title": "" }, { "docid": "e814cecbad036f2b98da9bce720d3bdf", "score": "0.4688223", "text": "def set_condition(self, value):\r\n if value in self.conditions:\r\n del self['condition']\r\n self.xml.append(ET.Element(\"{%s}%s\" % (self.condition_ns, value)))\r\n return self", "title": "" }, { "docid": "3797cba4885c1fa77c488ac3bcb44465", "score": "0.4648649", "text": "def operator_id(self, operator_id):\n\n self._operator_id = operator_id", "title": "" }, { "docid": "b6727673c02da9d160b68e782e0fb05c", "score": "0.45941192", "text": "def odata_id(self, odata_id):\n self._odata_id = odata_id", "title": "" }, { "docid": "b6727673c02da9d160b68e782e0fb05c", "score": "0.45941192", "text": "def odata_id(self, odata_id):\n self._odata_id = odata_id", "title": "" }, { "docid": "937422b55c88700785f7e7d3f82b427c", "score": "0.45799208", "text": "def set_condition(self, value):\r\n if value in self.conditions:\r\n del self['condition']\r\n self.xml.append(ET.Element(\"{%s}%s\" % (self.namespace, value)))\r\n return self", "title": "" }, { "docid": "9b3a6fcd7076620afebdad552db41a84", "score": "0.45340353", "text": "def setCutoffFact(self, x):\r\n self._cutoffFact = x\r\n self._filt.freq = self._env*20000*x", "title": "" }, { "docid": "c408eebe647aceda7412004d3415a354", "score": "0.4503271", "text": "def set_query_search(self, query_search):\n self.query_search = query_search\n return self", "title": "" }, { "docid": "c8dd746ce4884549e59f35261adbc5f7", "score": "0.44402632", "text": "def msclkid(self, msclkid):\n\n self._msclkid = msclkid", "title": "" }, { "docid": "03e2c16109c1b33acbb4194bb08dc610", "score": "0.4434837", "text": "def set_optimizer(self, criterion, optimizer, **kwargs):\n self.criterion = criterion()\n self.optimizer = optimizer(self.parameters(), **kwargs)\n\n return self", "title": "" }, { "docid": "44798699bee77959a27279f3ca08e8dd", "score": "0.44344094", "text": "def attribute_id(self, attribute_id):\n\n self._attribute_id = attribute_id", "title": "" }, { "docid": "58c17c34c70641224f7906e469d084c2", "score": "0.44061822", "text": "def company_id(self, company_id):\n\n self._company_id = company_id", "title": "" }, { "docid": "0df0feb4df59ee4f896e158e4c7b6b66", "score": "0.43995398", "text": "def set_condition(self, value):\r\n if value in self.conditions:\r\n del self['condition']\r\n cond = ET.Element(\"{%s}%s\" % (self.condition_ns, value))\r\n self.parent().xml.append(cond)\r\n return self", "title": "" }, { "docid": "bd85add9d18bec10d542e6ddc1c65a38", "score": "0.439197", "text": "def restriction_set_definition_version_id(self, restriction_set_definition_version_id):\n\n self._restriction_set_definition_version_id = restriction_set_definition_version_id", "title": "" }, { "docid": "212c73094cfffdb99a05b671f5688ca7", "score": "0.43825313", "text": "def _set_id(self, value):\n\n self.oid = value\n return self", "title": "" }, { "docid": "36766917433800517de3a2af5c4b8b7c", "score": "0.43804297", "text": "def setCriteria(self, query=None, start=None, end=None, maxTweets=None, username=None):\n if username:\n criteria = self.criteria.setUsername(username)\n else:\n criteria = self.criteria.setQuerySearch(query).setSince(\n start).setUntil(end).setMaxTweets(maxTweets)\n return criteria", "title": "" }, { "docid": "68c6af97af72b9740cc7d4eaeee2daa1", "score": "0.4369123", "text": "def conditions(self, conditions):\n\n self._conditions = conditions", "title": "" }, { "docid": "68c6af97af72b9740cc7d4eaeee2daa1", "score": "0.4369123", "text": "def conditions(self, conditions):\n\n self._conditions = conditions", "title": "" }, { "docid": "81cb8c9d6725531b71faf40c23919e6d", "score": "0.43483627", "text": "def dataset_id(self, dataset_id):\n\n self._dataset_id = dataset_id", "title": "" }, { "docid": "81cb8c9d6725531b71faf40c23919e6d", "score": "0.43483627", "text": "def dataset_id(self, dataset_id):\n\n self._dataset_id = dataset_id", "title": "" }, { "docid": "81cb8c9d6725531b71faf40c23919e6d", "score": "0.43483627", "text": "def dataset_id(self, dataset_id):\n\n self._dataset_id = dataset_id", "title": "" }, { "docid": "9a308a694538cae05ce59e1c6ef577b4", "score": "0.43470934", "text": "def condition(self, condition):\n\n self._condition = condition", "title": "" }, { "docid": "d364552e0ebc201fb18e3c85ca1ba6cb", "score": "0.4342144", "text": "def conid(self, conid):\n\n self._conid = conid", "title": "" }, { "docid": "564266402b85ecc162e809a80a9b5edf", "score": "0.43344662", "text": "def rule_id(self, rule_id):\n self._rule_id = rule_id", "title": "" }, { "docid": "bed3e324a0328aa47300e6bfbe594060", "score": "0.43211803", "text": "def set_association_id(self, assoc_id=None):\n if assoc_id is None:\n self.assoc_id = self.make_association_id(self.definedby, self.sub,\n self.rel, self.obj)\n else:\n self.assoc_id = assoc_id\n\n return", "title": "" }, { "docid": "74a05b041846f6c3bdc18bd89e1d3329", "score": "0.43120515", "text": "def lob_id(self, lob_id):\n self._lob_id = lob_id", "title": "" }, { "docid": "74a05b041846f6c3bdc18bd89e1d3329", "score": "0.43120515", "text": "def lob_id(self, lob_id):\n self._lob_id = lob_id", "title": "" }, { "docid": "a73d40e46a0b6675d5d1ee839388236e", "score": "0.4304622", "text": "def org_id(self, org_id):\n\n self._org_id = org_id", "title": "" }, { "docid": "c5b511409d9aad62e8566369513951e9", "score": "0.42969185", "text": "def set_domain_id(self, domain_id):\n self.dataset['domain'] = domain_id", "title": "" }, { "docid": "c5b511409d9aad62e8566369513951e9", "score": "0.42969185", "text": "def set_domain_id(self, domain_id):\n self.dataset['domain'] = domain_id", "title": "" }, { "docid": "12940f2436ce347f9a740fc03dff709c", "score": "0.42902064", "text": "def restrictionmosaic(self, restrictionmosaic):\n\n self._restrictionmosaic = restrictionmosaic", "title": "" }, { "docid": "dff3af3a7c12012a89c9384a3a27d6ef", "score": "0.426527", "text": "def set_physic_mode_id(self, idx):\n self.physic_mode_id = idx", "title": "" }, { "docid": "1579c10e215e6456283bf5dffc3c37f6", "score": "0.42617765", "text": "def condition_index(self, condition_index):\n\n self._condition_index = condition_index", "title": "" }, { "docid": "cc6252e00e4886f43e344ed1a01ff6cd", "score": "0.42615882", "text": "def SetActiveAttribute(self, string, p_int):\n ...", "title": "" }, { "docid": "a52e9d0673e3d24f46d5746aa866fc36", "score": "0.4252366", "text": "def coupon_definition_id(self, coupon_definition_id):\n\n self._coupon_definition_id = coupon_definition_id", "title": "" }, { "docid": "7c0ddd58af5f3e52af6c6088ce8016ce", "score": "0.42373362", "text": "def glcid(self, glcid):\n\n self._glcid = glcid", "title": "" }, { "docid": "a9b94cc8d97613d16af7f6228a6ad413", "score": "0.42283228", "text": "def set_business_object_id(self, name, value):\n\n self.cache[\"business_object_ids\"][name] = value", "title": "" }, { "docid": "e4dbbe547d3a4c367245305cff722b01", "score": "0.42055613", "text": "def set_query(self, query):\n self.query = query\n self._create_choices()", "title": "" }, { "docid": "268d9947becad789115a68ec4443d0ce", "score": "0.42005795", "text": "def set_DatasetID(self, value):\n super(AddRowInputSet, self)._set_input('DatasetID', value)", "title": "" }, { "docid": "78b92c44e22ea44e06c9faa40e340ae4", "score": "0.41822872", "text": "def setACSIndex(self, index):\n\n if self._ag._coords is None:\n raise AttributeError('coordinates are not set')\n\n if not isinstance(index, Integral):\n raise TypeError('index must be an integer')\n\n n_csets = self._ag._n_csets\n if n_csets <= index or n_csets < abs(index):\n raise IndexError('coordinate set index is out of range')\n\n if index < 0:\n index += n_csets\n\n self._acsi = index", "title": "" }, { "docid": "f1198de6bf2f76964bcbea508a88fa57", "score": "0.41789317", "text": "def SetAttribute(self, vtkAbstractArray, p_int):\n ...", "title": "" }, { "docid": "1d757978d919b504ec1786eef25b4b8b", "score": "0.41767102", "text": "def staff_id(self, staff_id):\n\n self._staff_id = staff_id", "title": "" }, { "docid": "f95f9f29cf45853dd44f9c72cc917581", "score": "0.4170205", "text": "def set_id(self, _id):\n raise NotImplementedError(\"Should have implemented this\")", "title": "" }, { "docid": "9eb29763d9b4697416b2b8f0f9f9e1c2", "score": "0.4167659", "text": "def __init__(self):\n\n super(Criterian, self).__init__()", "title": "" }, { "docid": "54e4bfa3007f2de5e6d8f4a9b03b774d", "score": "0.41593596", "text": "def filter_clause(self, value):\n\n self._filter_clause = value", "title": "" }, { "docid": "213bb7afb89b8aa75fa69a8ebc7d876e", "score": "0.414132", "text": "def document_id(self, document_id):\n\n self._document_id = document_id", "title": "" }, { "docid": "213bb7afb89b8aa75fa69a8ebc7d876e", "score": "0.414132", "text": "def document_id(self, document_id):\n\n self._document_id = document_id", "title": "" }, { "docid": "f8291a133c24d6d7d26c51c11d5d59d6", "score": "0.41394237", "text": "def set_id(self, id):\n\n\t\tif id is not None and not isinstance(id, int):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: id EXPECTED TYPE: int', None, None)\n\t\t\n\t\tself.__id = id\n\t\tself.__key_modified['id'] = 1", "title": "" }, { "docid": "19e705e4b518b212ad4df6fd37d81b53", "score": "0.4134466", "text": "def criticMode():\n\t\tself.mode = 'critic'", "title": "" }, { "docid": "02f90552d77378ca727d2114999d28b8", "score": "0.41270217", "text": "def criteria(self) -> pulumi.Input['CriteriaArgs']:\n return pulumi.get(self, \"criteria\")", "title": "" }, { "docid": "ff4a1ecee40b63676460f48f8d0f485f", "score": "0.41177654", "text": "def id_classification(self, id_classification):\n\n self._id_classification = id_classification", "title": "" }, { "docid": "1421ff4ed7adfa6d1eb05757ea3f77a5", "score": "0.4112442", "text": "def domain_id(self, domain_id):\n self._domain_id = domain_id", "title": "" }, { "docid": "1c66acf2638373aefa3343675837796c", "score": "0.4110737", "text": "def ruleset_id(self, ruleset_id):\n if self.local_vars_configuration.client_side_validation and ruleset_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `ruleset_id`, must not be `None`\") # noqa: E501\n\n self._ruleset_id = ruleset_id", "title": "" }, { "docid": "556732bfabce50274d6fb022afbcd8b9", "score": "0.41035601", "text": "def setAtticJobParameter( self, jobID, key, value, rescheduleCounter ):\n ret = self._escapeString( jobID )\n if not ret['OK']:\n return ret\n jobID = ret['Value']\n\n ret = self._escapeString( key )\n if not ret['OK']:\n return ret\n key = ret['Value']\n\n ret = self._escapeString( value )\n if not ret['OK']:\n return ret\n value = ret['Value']\n\n ret = self._escapeString( rescheduleCounter )\n if not ret['OK']:\n return ret\n rescheduleCounter = ret['Value']\n\n cmd = 'INSERT INTO AtticJobParameters (JobID,RescheduleCycle,Name,Value) VALUES(%s,%s,%s,%s)' % \\\n ( jobID, rescheduleCounter, key, value )\n result = self._update( cmd )\n if not result['OK']:\n result = S_ERROR( 'JobDB.setAtticJobParameter: operation failed.' )\n\n return result", "title": "" }, { "docid": "bbc078acd4a80cd415f0e2e6568c3cc1", "score": "0.40982753", "text": "def set(self, **kwargs) -> None:\n for kw in kwargs:\n if kw in self.names and isinstance(kwargs[kw], int):\n self.__setattr__(kw, int(kwargs[kw]))", "title": "" }, { "docid": "bbc078acd4a80cd415f0e2e6568c3cc1", "score": "0.40982753", "text": "def set(self, **kwargs) -> None:\n for kw in kwargs:\n if kw in self.names and isinstance(kwargs[kw], int):\n self.__setattr__(kw, int(kwargs[kw]))", "title": "" }, { "docid": "42105ee7fca0915ae765390f6fec58f0", "score": "0.40955722", "text": "def contest_id(self, value):\n assert isinstance(value, (int, str))\n self._contest_id = int(value)", "title": "" }, { "docid": "360f5f8cb3d1d587a7c99404fdc7be00", "score": "0.4089905", "text": "def restrictions(self, restrictions):\n\n self._restrictions = restrictions", "title": "" }, { "docid": "4bc47745a2771b48b9daacbf159fd7ef", "score": "0.40864676", "text": "def setID(self, id):\n DOM.setAttribute(self.getElement(), \"id\", id)", "title": "" }, { "docid": "a0714b5c2d5d0d5085431e67d78d591a", "score": "0.4069319", "text": "def setQids(self, qids):\n \n self.qidlist = [-1 for i in range(self.size)]\n for i in range(len(qids)):\n for j in qids[i]:\n if j >= self.size:\n raise Exception(\"Index %d in query out of training set index bounds\" %j)\n elif j < 0:\n raise Exception(\"Negative index %d in query, query indices must be non-negative\" %j)\n else:\n self.qidlist[j] = i\n if -1 in self.qidlist:\n raise Exception(\"Not all training examples were assigned a query\")\n \n \n self.qidmap = {}\n for i in range(len(self.qidlist)):\n qid = self.qidlist[i]\n if self.qidmap.has_key(qid):\n sameqids = self.qidmap[qid]\n sameqids.append(i)\n else:\n self.qidmap[qid] = [i]\n self.indslist = []\n for qid in self.qidmap.keys():\n self.indslist.append(self.qidmap[qid])", "title": "" }, { "docid": "2646b2e6362f2098e18367928f1887f5", "score": "0.4062654", "text": "def set_requested_by(self,patron_id):\n self._requested_by = patron_id", "title": "" }, { "docid": "7e58b0a9eeca01763b57a4c487480ebf", "score": "0.40540376", "text": "def set_transaction_id(txn_id: str) -> None:\n TangoFilter.transaction_id.set(txn_id)", "title": "" }, { "docid": "d5268d4ca78565a639ce8283c65f79a4", "score": "0.40506887", "text": "def set_association_id(self, assoc_id=None):\n if assoc_id is None:\n self.assoc_id = self.make_association_id(\n self.definedby, self.sub, self.rel, self.obj)\n else:\n self.assoc_id = assoc_id\n\n return self.assoc_id", "title": "" }, { "docid": "1aeee7dc643688186fa2a1e0fdcd9a45", "score": "0.40465385", "text": "def id_status_acordo(self, id_status_acordo):\n self._id_status_acordo = id_status_acordo", "title": "" }, { "docid": "189aba784853c23fb937a49811340590", "score": "0.40351993", "text": "def setStreamID(self, *args):\n return _Math.InPlaceFilterD_setStreamID(self, *args)", "title": "" }, { "docid": "0e4ffb33f6af26a292f0dfd47a182b61", "score": "0.4034066", "text": "def setSetting(self, id, value):\n pass", "title": "" }, { "docid": "4ea7b4b8cba46e82e5c039080ff9fa6c", "score": "0.40300798", "text": "def setC(self, c):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n PolytomyTopoPriorCalculatorBase.setC(self, c)", "title": "" }, { "docid": "7b77a69c9eba7a45a1adb791c782d8f4", "score": "0.40294373", "text": "def _setPropValue(self, id, value):\n self._wrapperCheck(value)\n if self.getPropertyType(id) == 'keyedselection':\n value = int(value)\n setattr(self,id,value)", "title": "" }, { "docid": "ff0262c1bb29ad946e499937b96720d0", "score": "0.40263474", "text": "def set_id(self, _id):\n self.id = _id\n return self", "title": "" }, { "docid": "47f00fc59a0db33e531a0665d4a96a2c", "score": "0.40211022", "text": "def setfilter(self, c, bandwidth):\n c['Filter'] = float(bandwidth)", "title": "" }, { "docid": "430f2b141638e8086c541c971f8c5f0e", "score": "0.40195855", "text": "def jobid(self, value):\n self._jobid = value", "title": "" }, { "docid": "23b20ab1e16f5bcfcc8b42c857c23375", "score": "0.40088797", "text": "def setcommname(self, cmname):\n\t\tself.commname = cmname", "title": "" }, { "docid": "c3374546f67151111c430ffa7aad7dc1", "score": "0.4005534", "text": "def data_id(self, data_id):\n\n self._data_id = data_id", "title": "" }, { "docid": "56d070c2d1dd165f176c0cbcb2c731a3", "score": "0.39989066", "text": "def ChangeQuery(self, value):\n \n # check value\n if not value:\n value = \"\"\n \n # update control\n self._query_search.ChangeValue(value)", "title": "" }, { "docid": "336c5e83540160e1ee288e8057351121", "score": "0.39983198", "text": "def position_id(self, position_id):\n\n self._position_id = position_id", "title": "" }, { "docid": "f0b6ff335bd35ad23a88354bdfc05e18", "score": "0.39952934", "text": "def ChangeMask(self, idif, iasic, ich, mask):\n\n for a in self.asics:\n if (a.getInt(\"DIF_ID\") != idif):\n continue\n if (a.getInt(\"HEADER\") != iasic):\n continue\n\n vg = a.getIntVector(\"MASKDISCRITIME\")\n vg[ich] = mask\n print \" Dac changed\", idif, iasic, ich, mask\n try:\n a.setIntVector(\"MASKDISCRITIME\", vg)\n except Exception, e:\n print e.getMessage()\n a.setModified(1)", "title": "" }, { "docid": "e0b5d33c9d00700641910f09b6644233", "score": "0.39932278", "text": "def set_superset_condition(self, superset_condition):\n for key, value in superset_condition.items():\n if not hasattr(self, key):\n setattr(self, key, value)", "title": "" }, { "docid": "0f69c975f7e215ad931f189148730a84", "score": "0.39895585", "text": "def set_avoid_cidrs(self, *cidrs, append=False, chunk_size=5000):\n return self._set_cidrs('avoid', *cidrs, append=append, chunk_size=chunk_size)", "title": "" }, { "docid": "02ddb8760a37736fddea96b22d97167c", "score": "0.3989511", "text": "def set_if(self, condition_, content):\n\n if condition_:\n self.set(content)", "title": "" }, { "docid": "22951159888430fa7a9177a70298f1fe", "score": "0.39874533", "text": "def compartment_id(self, compartment_id):\n self._compartment_id = compartment_id", "title": "" }, { "docid": "22951159888430fa7a9177a70298f1fe", "score": "0.39874533", "text": "def compartment_id(self, compartment_id):\n self._compartment_id = compartment_id", "title": "" }, { "docid": "22951159888430fa7a9177a70298f1fe", "score": "0.39874533", "text": "def compartment_id(self, compartment_id):\n self._compartment_id = compartment_id", "title": "" }, { "docid": "22951159888430fa7a9177a70298f1fe", "score": "0.39874533", "text": "def compartment_id(self, compartment_id):\n self._compartment_id = compartment_id", "title": "" }, { "docid": "e0611af1e57978d50ba43ca7a7adb6f9", "score": "0.3973222", "text": "def organization_id(self, organization_id):\n\n self._organization_id = organization_id", "title": "" }, { "docid": "e0611af1e57978d50ba43ca7a7adb6f9", "score": "0.3973222", "text": "def organization_id(self, organization_id):\n\n self._organization_id = organization_id", "title": "" }, { "docid": "e0611af1e57978d50ba43ca7a7adb6f9", "score": "0.3973222", "text": "def organization_id(self, organization_id):\n\n self._organization_id = organization_id", "title": "" }, { "docid": "55f5a48a94710df8c324c9191d330d12", "score": "0.39659256", "text": "def owner_id(self, value):\n\t\tself._owner_id = value\n\t\tg.db.execute(\"UPDATE critters SET owner_id = ? WHERE id = ?;\", (self._owner_id, self.id))\n\t\tself._owner = None # so owner is reloaded on next access", "title": "" }, { "docid": "3deb35284364dbaff86329f9cfe391a8", "score": "0.3965871", "text": "def cod_id(self, cod_id: int):\n\n self._cod_id = cod_id", "title": "" }, { "docid": "ce5747cb8954fce0337b497e515aa22b", "score": "0.39644256", "text": "def set(id, value):", "title": "" }, { "docid": "396eb7f91b0bbe05379d6602c6eab4e6", "score": "0.39584073", "text": "def batch_id(self, batch_id):\n\n self._batch_id = batch_id", "title": "" }, { "docid": "01d1eeae548564aae05903d7fdb6dc4f", "score": "0.39582947", "text": "def SetFilter ( self, attribute, values, exclude=0 ):\r\n\t\tassert(isinstance(attribute, str))\r\n\t\tassert iter(values)\r\n\r\n\t\tfor value in values:\r\n\t\t\tAssertInt32 ( value )\r\n\r\n\t\tself._filters.append ( { 'type':SPH_FILTER_VALUES, 'attr':attribute, 'exclude':exclude, 'values':values } )", "title": "" }, { "docid": "ce90482ff2093160b8d6fb5b9b3a1de0", "score": "0.39495757", "text": "def restrictionaccount(self, restrictionaccount):\n\n self._restrictionaccount = restrictionaccount", "title": "" }, { "docid": "122ab36fd3287a777de7720a28073933", "score": "0.39454752", "text": "def set_expense_item_id(self,expense_item_id):\n self.expense_item_id = expense_item_id", "title": "" }, { "docid": "c374d4701366a411d46021dbda7cd4f9", "score": "0.39326566", "text": "def id_escritorio_cobranca(self, id_escritorio_cobranca):\n self._id_escritorio_cobranca = id_escritorio_cobranca", "title": "" }, { "docid": "2b7786159783c257b66e5e4653b31d7c", "score": "0.39316884", "text": "def set_abstol(self, objidx, abstol):\n objidx = self._conv(objidx)\n _proc.multiobjchgattribs(self._env._e, self._cplex._lp,\n objidx, abstol=abstol)", "title": "" }, { "docid": "4873c0bf1df6cd7a20cd45d86fe48248", "score": "0.39316446", "text": "def filter(self, criterion):\r\n if isinstance(criterion, basestring):\r\n criterion = sql.text(criterion)\r\n\r\n if criterion is not None and \\\r\n not isinstance(criterion, sql.ClauseElement):\r\n raise sa_exc.ArgumentError(\r\n \"filter() argument must be of type \"\r\n \"sqlalchemy.sql.ClauseElement or string\")\r\n\r\n criterion = self._adapt_clause(criterion, True, True)\r\n\r\n if self._criterion is not None:\r\n self._criterion = self._criterion & criterion\r\n else:\r\n self._criterion = criterion", "title": "" } ]
967191b6464eefb9b6ddcabbbe49ead0
Get All Detectors in Detector Table
[ { "docid": "919673a91f70b9104b20d9b152079b8e", "score": "0.0", "text": "def getAllPartitions(self):\n connection = sqlite3.connect(self.dataBaseFile)\n c = connection.cursor()\n try:\n c.execute(\"SELECT * FROM Partition\")\n res = c.fetchall()\n return DataObjectCollection(res, partitionDataObject)\n except Exception as e:\n return self.handleError(e,\"error getting partitions\")\n finally:\n connection.close()", "title": "" } ]
[ { "docid": "78ac6ae6c2097d86d3ce2e41feca4a72", "score": "0.734863", "text": "def getAllDetectors(self):\n connection = sqlite3.connect(self.dataBaseFile)\n c = connection.cursor()\n try:\n c.execute(\"SELECT * FROM Detector\")\n res = c.fetchall()\n return DataObjectCollection(res,detectorDataObject)\n except Exception as e:\n return self.handleError(e,\"error getting detectors\")\n finally:\n connection.close()", "title": "" }, { "docid": "2e6ca7be31b6591daecdf6a70c06c291", "score": "0.62646914", "text": "def items(self):\n return self.detectors.items()", "title": "" }, { "docid": "c3d49997a4e8bb8f661cf25bff89fb19", "score": "0.6165963", "text": "def getAllUnmappedDetectors(self):\n connection = sqlite3.connect(self.dataBaseFile)\n c = connection.cursor()\n try:\n res = c.execute(\"SELECT * FROM Detector Where Detector.id not in (select DetectorId From Mapping)\").fetchall()\n return DataObjectCollection(res,detectorDataObject)\n except Exception as e:\n return self.handleError(e,\"error getting unmapped detectors\")\n finally:\n connection.close()", "title": "" }, { "docid": "b4119fe4abaf7b4157892ff633cf70e4", "score": "0.61496323", "text": "def get_detectors(self, dither):\n return tuple(self._hdf5_spectra[dither].keys())", "title": "" }, { "docid": "ea1eaa669624ed5b223b668858abdd9c", "score": "0.5938917", "text": "def _get_detectors(self, data):\n my_detectors = set(data.all_local_detectors())\n comm = data.comm.comm_world\n if comm is None:\n all_detectors = my_detectors\n else:\n all_detectors = comm.gather(my_detectors)\n if comm.rank == 0:\n for detectors in all_detectors:\n my_detectors = my_detectors.union(detectors)\n all_detectors = comm.bcast(my_detectors)\n return all_detectors", "title": "" }, { "docid": "aa4eab8ae2ce55f42b020e259a72862e", "score": "0.58777726", "text": "def get_dets(self, detset):\n c = self.conn.execute('select det from detsets where name=?', (detset,))\n return [r[0] for r in c]", "title": "" }, { "docid": "b1afa0f428da1f976840210891306452", "score": "0.5831054", "text": "def find_all():\n return ItopapiPrototype.find_all(ItopapiVirtualMachine)", "title": "" }, { "docid": "cfe231d362921480e9bc022ebbadaaf0", "score": "0.5687561", "text": "def get_v_id_list(self, detector_id):\n raise NotImplementedError", "title": "" }, { "docid": "ce7b1c50869431e679ba816dabe91910", "score": "0.566973", "text": "def getDetectorsForPartition(self,pcaId):\n connection = sqlite3.connect(self.dataBaseFile)\n c = connection.cursor()\n val = (pcaId,)\n try:\n c.execute(\"SELECT * From Detector WHERE Detector.id in (SELECT d.id FROM Detector d JOIN Mapping m ON d.id = m.DetectorId WHERE PartitionId=?)\",val)\n res = c.fetchall()\n return DataObjectCollection(res, detectorDataObject)\n except Exception as e:\n return self.handleError(e,\"error getting detectors for Partition\")\n finally:\n connection.close()", "title": "" }, { "docid": "02ec08703b3fe3b3c4ed0f4347a87dea", "score": "0.5649628", "text": "def get_all_device_agents():\n data = []\n\n # Establish a database session\n database = db.Database()\n session = database.session()\n result = session.query(DeviceAgent)\n\n # Add to the list of device idx values\n for instance in result:\n data_dict = {}\n data_dict['idx_deviceagent'] = instance.idx_deviceagent\n data_dict['idx_agent'] = instance.idx_agent\n data_dict['idx_device'] = instance.idx_device\n data_dict['enabled'] = bool(instance.enabled)\n data.append(data_dict)\n\n # Return the session to the pool after processing\n database.close()\n\n # Return\n return data", "title": "" }, { "docid": "f7a74bbbde52e3c33efc3392cf51322a", "score": "0.559258", "text": "def _fetchDetectorInfo(self): \n \n self._log.debug(\"In fetch Detector Info \")\n \n result = self._mainConnector.execute(sqlrequests.SQL_GETDETECTORINFO%(self._sampleID))\n \n # only one row in result set\n rows = result.fetchall()\n \n nbResults = len(rows)\n \n if nbResults != 1:\n raise CTBTOError(-1,\"Expecting to have one result for sample_id %s but got %d either None or more than one. %s\"%(self._sampleID,nbResults,rows))\n \n # update data bag\n self._dataBag.update(rows[0].items())\n \n result.close()", "title": "" }, { "docid": "613f9f0f36d3fc5da5c0240abc957a88", "score": "0.5563643", "text": "def get_detections(self, frame):\r\n _, _, h, w = self.net.get_input_shape().shape\r\n out = self.net.forward(cv.resize(frame, (w, h)))\r\n detections = self.__decode_detections(out, frame.shape)\r\n return detections", "title": "" }, { "docid": "5ac6ba401d48f27acf4e7ed5095fb7ff", "score": "0.5560064", "text": "def extract_face_detections(self):\n self.detector.setInput(self.image_blob)\n self.detections = self.detector.forward()", "title": "" }, { "docid": "c28612c15a8ecc6f387a3d6bd5de35ea", "score": "0.55354106", "text": "def retrieve_det_persons(self):\n result = []\n for video in self.det_list.values():\n result += [[det['real_id']] + det['ids'] for frames in video.values() for det in frames]\n\n result = [y for x in result for y in x]\n result.sort()\n result = remove_duplicates(result)\n self.person_ids = result\n result = ['Person ' + str(val) for val in result]\n return result", "title": "" }, { "docid": "1cae645d7853a6a7fc4efc3835523f6b", "score": "0.5489765", "text": "def DetectorFn(images):\n boxes, scores, class_indices = model(tf.convert_to_tensor(images))\n\n return boxes.numpy(), scores.numpy(), class_indices.numpy()", "title": "" }, { "docid": "1ca878dd7adb58ed2cb7a65bb3011d24", "score": "0.548494", "text": "def get_detsets(self, obs_id):\n c = self.conn.execute('select distinct detset from files '\n 'where obs_id=?', (obs_id,))\n return [r[0] for r in c]", "title": "" }, { "docid": "86b743adfc85775ec8655f4122f3987b", "score": "0.546478", "text": "def extracluster_all_vic_stats(self):\n return self.extracluster_vic_stats(self.all_well_metrics)", "title": "" }, { "docid": "43665cbc9156e35153346fcb44070694", "score": "0.54464793", "text": "def return_devices(self):\n \n varOut = []\n\n cam_num = ueye.INT()\n ueye.is_GetNumberOfCameras(cam_num)\n for i in range(cam_num.value):\n hid = ueye.HIDS(cam_num)\n s = ueye.is_InitCamera(self.hid, self.hwnd)\n r = ueye.is_GetSensorInfo(self.hid, self.sinfo)\n sname = self.sinfo.strSensorName.decode('UTF-8')\n self.detected_devices[sname] = i+1\n varOut.append(sname)\n ueye.is_ExitCamera(self.hid)\n \n return varOut", "title": "" }, { "docid": "054539d5f0a34ec9311f71f512b78be6", "score": "0.54356194", "text": "def get_all_devices(self):\n pass", "title": "" }, { "docid": "9920829eda0e5e65f0d41fcff3b47e9a", "score": "0.5426963", "text": "def __iter__(self):\n return iter(self.detectors)", "title": "" }, { "docid": "ad52e13917f89bdad86216b3c143fdd5", "score": "0.5420994", "text": "def get_detector_list(self):\n global dop\n #List of the gene values\n sorter = [(self[detnum], detnum) for (detnum, gene) in self.genome.items()]\n #Sort using the values (first thing in the tuple)\n sorter.sort()\n #List of detectors numbers to use, sorted in order.\n det_list = [val[1] for val in sorter[:dop.number_to_keep]]\n #Add the ones we always keep (converted to strings too)\n det_list = det_list + [\"%02d\" % val for val in dop.keep_list]\n det_list.sort()\n #Here's the list\n return det_list", "title": "" }, { "docid": "608350ca639607a081de750c82078d61", "score": "0.53979146", "text": "def grabDetList():\n dsList = [0, 1, 2, 3, 4, 5]\n\n l0 = det.getGoodChanList(0, mod=1, detType='Enr')\n cpd0 = ['C{}P{}D{}'.format(*det.getChanCPD(0, chan)) for chan in l0]\n l1 = det.getGoodChanList(1, mod=1, detType='Enr')\n cpd1 = ['C{}P{}D{}'.format(*det.getChanCPD(1, chan)) for chan in l1]\n l2 = det.getGoodChanList(2, mod=1, detType='Enr')\n cpd2 = ['C{}P{}D{}'.format(*det.getChanCPD(2, chan)) for chan in l2]\n l3 = det.getGoodChanList(3, mod=1, detType='Enr')\n cpd3 = ['C{}P{}D{}'.format(*det.getChanCPD(3, chan)) for chan in l3]\n l5 = det.getGoodChanList(5, mod=1, detType='Enr')\n cpd5 = ['C{}P{}D{}'.format(*det.getChanCPD(5, chan)) for chan in l5]\n\n a0 = np.intersect1d(cpd0, cpd1, assume_unique=True)\n a01 = np.intersect1d(cpd0, cpd3, assume_unique=True)\n a02 = np.intersect1d(cpd0, cpd5, assume_unique=True)\n a1 = np.intersect1d(cpd1, cpd3, assume_unique=True)\n a2 = np.intersect1d(cpd1, cpd5, assume_unique=True)\n a3 = np.intersect1d(cpd3, cpd5, assume_unique=True)\n a4 = np.intersect1d(a2, a3, assume_unique=True)\n\n a03 = np.intersect1d(a0, a01, assume_unique=True)\n a04 = np.intersect1d(a01, a02, assume_unique=True)\n a05 = np.intersect1d(a03, a04, assume_unique=True)\n\n print(cpd0, cpd1, cpd2, cpd3, cpd5)\n print(a4.tolist())\n print(a05.tolist())\n\n l4 = det.getGoodChanList(4, mod=2, detType='Enr')\n cpd4 = ['C{}P{}D{}'.format(*det.getChanCPD(4, chan)) for chan in l4]\n\n l52 = det.getGoodChanList(5, mod=2, detType='Enr')\n cpd52 = ['C{}P{}D{}'.format(*det.getChanCPD(5, chan)) for chan in l52]\n\n a52 = np.intersect1d(cpd4, cpd52, assume_unique=True)\n print(a52.tolist())", "title": "" }, { "docid": "d1285a654315026c4a192ea87df3c909", "score": "0.53317094", "text": "def all_detection_points(self):\n result = []\n for a in self.all_analysis:\n result.extend(a.detections)\n for o in self.all_observables:\n result.extend(o.detections)\n\n return result", "title": "" }, { "docid": "761af169e867c15e087f97d1a4c30ca8", "score": "0.5287887", "text": "def retrieve_videos_list(self):\n return self.camera_list.keys()", "title": "" }, { "docid": "19bd0f21f8b4a4387149958262f8fa11", "score": "0.52842283", "text": "def getDetector(self,id):\n connection = sqlite3.connect(self.dataBaseFile)\n c = connection.cursor()\n val = (id,)\n try:\n res = c.execute(\"SELECT * FROM Detector WHERE id = ?\", val).fetchone()\n if not res:\n return codes.idUnknown\n return detectorDataObject(res)\n except Exception as e:\n return self.handleError(e,\"error getting detector\")\n finally:\n connection.close()", "title": "" }, { "docid": "ce70c94125c3f92d424cd54aacae7ffd", "score": "0.52612376", "text": "def get_table(self) -> List[Card]:\n return self._get(self._table)", "title": "" }, { "docid": "613874a1832694b5bac29f263628bcb2", "score": "0.5250427", "text": "def get_detections(self, frames):\n return self.run_async(frames)", "title": "" }, { "docid": "2dad36be431aa05d97b3f429e292878c", "score": "0.5248455", "text": "def getAllVehicles(self):\r\n #with self.connection as con:\r\n with sqlite3.connect(self.databaseFile) as con:\r\n cur = con.cursor()\r\n cur.execute(\"SELECT * FROM vehicles\")\r\n \r\n rows = cur.fetchall()\r\n con.commit()\r\n return rows", "title": "" }, { "docid": "e265e7021374f89700d0c598dad93097", "score": "0.5223709", "text": "def process_tables(self, tables):\n result = []\n for vrf in tables['showVRFInterface']:\n temp = {}\n temp['name'] = vrf['name']\n result.append(temp)\n return result", "title": "" }, { "docid": "08bd932d20684f324eba80c47619232d", "score": "0.5212133", "text": "def _get_all_devices(**query):\n\n _initialize()\n\n devices = []\n\n for id in range(pm.lib.Pm_CountDevices()):\n info_ptr = pm.lib.Pm_GetDeviceInfo(id)\n if info_ptr:\n devinfo = info_ptr.contents\n \n dev = iobase.Device(name=devinfo.name,\n input=devinfo.input != 0,\n output=devinfo.output != 0,\n id=id,\n interf=devinfo.interf,\n opened=devinfo.opened != 0)\n devices.append(dev)\n\n return devices", "title": "" }, { "docid": "188b7362d835636125e8e4dfc4a99c58", "score": "0.51915765", "text": "def _fetch_descriptors(\n self, api: ThreatExchangeAPI, td_ids: t.List[int], collab_labels: t.List[str]\n ) -> t.List[ThreatDescriptor]:\n return [\n ThreatDescriptor.from_te_json(ThreatDescriptor.MY_APP_ID, td_json)\n for td_json in api.get_threat_descriptors(td_ids)\n ]", "title": "" }, { "docid": "c2c0e41d0e9567764a9e78d852ddf081", "score": "0.51693386", "text": "def find_detector(self):\r\n self.target_address = None\r\n try:\r\n for device in self.discovered_devices:\r\n self.devices_found[device.address] = device.name\r\n if self.target_name in device.name:\r\n self.target_address = device.address\r\n except Exception as error:\r\n print(str(error))\r\n\r\n return [self.target_name, self.target_address]", "title": "" }, { "docid": "c2794fca83bd532b6c52eae79065855e", "score": "0.5146848", "text": "def get_all_vendors(self):\n pass", "title": "" }, { "docid": "14c43791cfc1f326d4c1f7051e11af32", "score": "0.5141746", "text": "def list_family(table):\n famlist = list({key['device'] for key in table if 'secret' not in key and key['ptcrbid']})\n utilities.lprint(famlist)", "title": "" }, { "docid": "6f0d939fdfcc58754b7456a594250e04", "score": "0.512522", "text": "def getAllVdos(self):\n return self._vdos", "title": "" }, { "docid": "c3efca743199e96c8750444b667eca37", "score": "0.5116909", "text": "def detach_all(self):\n detached = []\n for device in self.udisks.get_all():\n if (device.is_drive and\n device.is_external and\n device.is_detachable and\n self.detach_device(device, force=True)):\n detached.append(device)\n return detached", "title": "" }, { "docid": "3aa9750f97e2129910304eec929f17c0", "score": "0.51041216", "text": "def get_tables(self):\n return self.tables", "title": "" }, { "docid": "e45f317d2eadfaeec1fb6f17480987e0", "score": "0.5103694", "text": "def get_all_drivers():\n driver_dic = []\n cnt = ogr.GetDriverCount()\n for i in range(cnt):\n driver_name = ogr.GetDriver(i).GetName()\n if not driver_name in driver_dic:\n driver_dic.append(driver_name)\n print(driver_dic)", "title": "" }, { "docid": "9b6f0ae57f4c796a3e25234691608860", "score": "0.5098117", "text": "def get_all_devices(self):\n return [self.get_device(device_name) for device_name in sorted(self._device_instances.keys())]", "title": "" }, { "docid": "12e7d1d9ff5a6bdc521f67f19e6f04c4", "score": "0.5096125", "text": "def gensectors(graph, model):\n speer_trees = SpeerTrees(graph, model)\n t_ = list()\n for tree in speer_trees:\n t_ += [x for x in xTreeElement(tree)]\n #print_tree(tree)\n print \"speer_sectors symm:\", len(t_)\n sys.stdout.flush()\n if _ASectors:\n ASectors(speer_trees, graph)\n sys.stdout.flush()\n return speer_trees", "title": "" }, { "docid": "7822df33ad881952743922ecc3ae4a45", "score": "0.5085648", "text": "def get_all_vulnerabilities(self):\n\n resp = self.api_get(self.url + \"resource/api/vulnerabilities\")\n return resp", "title": "" }, { "docid": "980d4e74a40cf8096eeab8a82a280c23", "score": "0.5079254", "text": "def get_torchvision_dataset(self):\n pass", "title": "" }, { "docid": "38ced2ed62854d3b76224052b14b8afb", "score": "0.5078638", "text": "def __process_det_file(self, det_file=''):\n file_text = open(det_file, 'r')\n lines = file_text.readlines()\n file_text.close()\n\n result = []\n for line in lines:\n aux = [float(val) for val in line.rstrip('\\n').split(',')]\n result.append(aux)\n\n for i, det in enumerate(result):\n if len(det) > 5:\n detection = Detection(det[0], det[1], det[2], det[3], det[4], det[5], det[6:])\n result[i] = detection.return_dict()\n else:\n detection = Detection(det[0], det[1], det[2], det[3], det[4], 0, [])\n result[i] = detection.return_dict()\n\n return result", "title": "" }, { "docid": "f9a37cacfe9f81d5891057b280d6e004", "score": "0.507549", "text": "def getTableDescriptors(self, tables):\n pass", "title": "" }, { "docid": "c098b41f3e45e45b80ee19ec0fd14162", "score": "0.5073459", "text": "def detect() -> tp.List[Device]:\n devices = []\n SmlReader.detect(devices)\n PlainReader.detect(devices)\n Bme280Reader.detect(devices)\n return devices", "title": "" }, { "docid": "b45f44f9336089e98cf4c42e0a8c29dc", "score": "0.50556993", "text": "def polydispersity_all_vic_stats(self):\n return self.polydispersity_vic_stats(self.all_well_metrics)", "title": "" }, { "docid": "2dae81ebccc9112ddc97d3605e853c96", "score": "0.50491387", "text": "async def get_entities(proxy: RenaultHub) -> List[RenaultDataEntity]:\n entities: List[RenaultDataEntity] = []\n for vehicle in proxy.vehicles.values():\n entities.extend(await get_vehicle_entities(vehicle))\n return entities", "title": "" }, { "docid": "e0fad8daeb72d44662442c177b013a83", "score": "0.5046604", "text": "def get_all_vehicles() -> List[Vehicle]:\n vehicles = list()\n for veh_id in traci.vehicle.getIDList():\n veh_pos = traci.vehicle.getPosition(veh_id)\n vehicle = Vehicle(veh_id, veh_pos)\n vehicle.emissions = compute_vehicle_emissions(veh_id)\n vehicles.append(vehicle)\n return vehicles", "title": "" }, { "docid": "382410280c39db99b26ef0301f753af4", "score": "0.50390184", "text": "def test_detector_instanciate(self):\n for k, v in ALL_DETECTORS.iteritems():\n v()", "title": "" }, { "docid": "3ac46c549959af563fba943bf1f3519b", "score": "0.50379354", "text": "def list_available(self):\n\n df = pd.DataFrame(columns=[\"Sector\", \"Camera\", \"CCD\"])\n idx = 0\n for sector in np.arange(200):\n for camera in np.arange(1, 5):\n for ccd in np.arange(1, 5):\n dir = f\"{PACKAGEDIR}/data/sector{sector:03}/camera{camera:02}/ccd{ccd:02}/\"\n if not os.path.isdir(dir):\n continue\n fname = f\"tessbackdrop_sector{sector}_camera{camera}_ccd{ccd}.fits\"\n if os.path.isfile(dir + fname):\n df.loc[idx] = np.hstack([sector, camera, ccd])\n idx += 1\n return df", "title": "" }, { "docid": "19d08cb533a31fe5123a880d808d02d0", "score": "0.5035193", "text": "def _list_all(self):\n instances = []\n for id in self.ids:\n try:\n instance = self.dbaas.instances.get(id)\n instances.append(instance)\n except exceptions.NotFound:\n pass\n return instances", "title": "" }, { "docid": "214bdea7488dd0ba7c5baa49cdd998db", "score": "0.50279754", "text": "def _get_detection_results(virus_dir):\n scan_results = []\n for root, _, files in os.walk(virus_dir):\n for file in files:\n full_path = os.path.join(root, file)\n rnd = Random(SEED)\n key = rnd.randint(0, 100)\n try:\n packer = ELFPacker(full_path)\n all_functions = packer.list_functions()\n packer.encrypt(key, all_functions)\n except Exception as ex:\n print(f\"Error encrypting {file}, skipping\")\n print(type(ex))\n continue\n\n # Test each file and record the result\n original = virus_total.scan_file(full_path)['scans']\n packed = virus_total.scan_file(f\"{full_path}.packed\")['scans']\n scan_results.append(DetectionTestResult(file, original, packed))\n\n return scan_results", "title": "" }, { "docid": "e7dc6c25e9c7adec32835136ed8b0aa1", "score": "0.5007313", "text": "def facts_devices(module):\n if \"cvp_facts\" in module.params:\n if \"devices\" in module.params[\"cvp_facts\"]:\n return module.params[\"cvp_facts\"][\"devices\"]\n return []", "title": "" }, { "docid": "99145ab783a5d389e9eb288253ebd7a7", "score": "0.4999163", "text": "def get_features(self):\n\t\tif len(self.tracks) == 1:\n\t\t\tfeatures = self.tracks[0].features\n\t\telif len(self.tracks) > 1:\n\t\t\tfeatures = torch.cat([t.features for t in self.tracks],0)\n\t\telse:\n\t\t\tfeatures = torch.zeros(0).cuda()\n\t\treturn features", "title": "" }, { "docid": "a29cfd398c76df7f83e4d670a8e21cda", "score": "0.49947482", "text": "def __Retrieve(self):\n self.__FPS = self.__framerate.CalculateFPS()\n return [self.__devices[key].Retrieve() for key in self.__devices]", "title": "" }, { "docid": "4c1c4d3f2338c99af53119d2108b8592", "score": "0.49826524", "text": "def get_host_dvs_vmnics(self, host):\n vmnics = []\n host_obj = self._get_host(host)\n for network in host_obj.config.network.proxySwitch:\n vmnics.append(network.spec.backing.pnicSpec[0].pnicDevice)\n return vmnics", "title": "" }, { "docid": "d7cbdb4b0a57cfd85fa91d1c52223c42", "score": "0.49744132", "text": "def getMatchingDevs(self, regex):\n if len(self._devs)==0:\n self.extractDevs()\n \n correspondingDevs = []\n for el in self._devs:\n if re.search(regex, el):\n correspondingDevs.append(el)\n return correspondingDevs", "title": "" }, { "docid": "bb2c7b2d40b05bd2780d271079a64b04", "score": "0.49687558", "text": "def list_devices(table):\n for key in table:\n hwid = \"NO HWID\" if not key['hwid'] else key['hwid']\n fccid = \"NO FCCID\" if not key['fccid'] else key['fccid']\n print(\"{0} {1} - {2} - {3}\".format(key['device'], key['name'], hwid, fccid))", "title": "" }, { "docid": "dc45ef7b3fd32e145bdd63d8ef820c35", "score": "0.49651736", "text": "def get_descriptors(self):\n return self.descriptors", "title": "" }, { "docid": "cd6a83b5e6272f0f12991c9033cce528", "score": "0.4965081", "text": "def find_all(self) -> []:\n pass", "title": "" }, { "docid": "d6cd91967c5911f95ee972a0a80649ed", "score": "0.49642777", "text": "def get_annotated_videos_table(self, device='all'):\n if device == 'all':\n return_table = self.cursor_vid.execute('''SELECT * FROM videos WHERE annotated = 1''')\n else:\n return_table = self.cursor_vid.execute('''SELECT * FROM videos WHERE annotated = 1 AND camera = ?''',\n (device,))\n return return_table", "title": "" }, { "docid": "fc9a9a17abbcfb85adf73834b8065bd7", "score": "0.49614727", "text": "def get_interface_datanets(context, iface):\n return interface.get_interface_datanets(context, iface)", "title": "" }, { "docid": "cb2e7286424205a9d2bb99b8bd9bf436", "score": "0.4947524", "text": "def get_facts(self):\n return []", "title": "" }, { "docid": "deb0a6d773478e0147c95d46efdfdeeb", "score": "0.49424905", "text": "def features(self):\n\n if not self.feat:\n kp2d = self.feature_detector.detect(self)\n disparities = [self.lookup_disparity(x,y) for (x,y) in kp2d]\n self.feat = [ (x,y,z) for ((x,y),z) in zip(kp2d, disparities) if z]\n return self.feat", "title": "" }, { "docid": "f334b2f5936274eadc34c701a50c9a90", "score": "0.49370283", "text": "def GetAllPlugins(self):\n return self.Descriptors", "title": "" }, { "docid": "bb0a23cc91edb1da3b1f3e430e51796d", "score": "0.49366936", "text": "def get_tables(self):\n return self._tables", "title": "" }, { "docid": "bb0a23cc91edb1da3b1f3e430e51796d", "score": "0.49366936", "text": "def get_tables(self):\n return self._tables", "title": "" }, { "docid": "1966780e973a860af15f7146d36cc6c7", "score": "0.4935157", "text": "def all(cls):\n results = []\n for name in FEATURES:\n feat = cls.get_by_name(name)\n if feat is None:\n feat = cls(name=name)\n cls.query.session.add(feat)\n results.append(feat)\n return results", "title": "" }, { "docid": "b17bf3b870b614b627c0ee836590d326", "score": "0.4933535", "text": "def getall():", "title": "" }, { "docid": "393df22978e9217522015b4294c8b4fe", "score": "0.49322718", "text": "def __populate_det__(self):\n for f in listdir(self.detection_dir):\n # For each folder create an dictionary entry with the video name, frame number and detections in it\n if not isfile(join(self.detection_dir, f)):\n self.det_list[f] = {}\n det_list = listdir(join(self.detection_dir, f))\n det_list.sort()\n\n for det in det_list:\n self.det_list[f][det.split('.')[0]] = self.__process_det_file(join(self.detection_dir, f, det))\n return", "title": "" }, { "docid": "d188d09638069a81a26781b0ab784b4d", "score": "0.49307656", "text": "def classicalDetectors(image, method='sift'):\n image = image*255\n round_method = False\n if round_method == True:\n from models.classical_detectors_descriptors import classical_detector_descriptor # with quantization\n points, desc = classical_detector_descriptor(image, **{'method': method})\n y, x = np.where(points)\n # pnts = np.stack((y, x), axis=1)\n pnts = np.stack((x, y), axis=1) # should be (x, y)\n ## collect descriptros\n desc = desc[y, x, :]\n else:\n # sift with subpixel accuracy\n from models.classical_detectors_descriptors import SIFT_det as classical_detector_descriptor\n pnts, desc = classical_detector_descriptor(image, image)\n\n print(\"desc shape: \", desc.shape)\n return pnts, desc", "title": "" }, { "docid": "a398cc07447ad979857375333ea74a85", "score": "0.4927843", "text": "def get_tables(self):\n return [course.get('course_id') for course in self.db.Courses.find()]", "title": "" }, { "docid": "b7c6d6f100a2e44161338d2f7d7adfc5", "score": "0.49222377", "text": "def get_vm_list(self):\n pass", "title": "" }, { "docid": "923b1983c4af68ad85cf03fb11a8f0ea", "score": "0.4901677", "text": "def _get_episodes():\n return list(Episode.select()\n .where(Episode.is_downloaded == 0)\n .order_by(Episode.quality.desc()))", "title": "" }, { "docid": "4a9ea8fe9357642a0cd466327c16aa36", "score": "0.48981166", "text": "def get_all_sensor_active_models(self):\n\n return self.sensor_entity_active_models.values()", "title": "" }, { "docid": "39223f01d276fb51d98b1c32ceb9b6ac", "score": "0.48967618", "text": "def list_rows(db_session, table_id):\n query = text('SELECT * FROM vtable_{}()'.format(table_id))\n return db_session.execute(query).fetchall()", "title": "" }, { "docid": "0693746a010715061e55969624e08fa4", "score": "0.4893917", "text": "def list_devices(cls) -> Iterator[CtapDevice]:", "title": "" }, { "docid": "6c4e9ba35d3f7322c7beef47d1bc9dc1", "score": "0.48886308", "text": "def get_object_ids(self, dither, detector):\n return tuple(self._hdf5_spectra[dither][detector].keys())", "title": "" }, { "docid": "7505ceb0a72262cb52c9518d8452bc7e", "score": "0.48863062", "text": "async def discover_entities(hass: HomeAssistant) -> list[Entity]:\n lights = await pyzerproc.discover()\n\n # Filter out already discovered lights\n new_lights = [\n light\n for light in lights\n if light.address not in hass.data[DOMAIN][DATA_ADDRESSES]\n ]\n\n entities = []\n for light in new_lights:\n hass.data[DOMAIN][DATA_ADDRESSES].add(light.address)\n entities.append(ZerprocLight(light))\n\n return entities", "title": "" }, { "docid": "f134b028fdf7146a5b3847172de56d26", "score": "0.48827463", "text": "def get_all(self):", "title": "" }, { "docid": "f134b028fdf7146a5b3847172de56d26", "score": "0.48827463", "text": "def get_all(self):", "title": "" }, { "docid": "c67268d644457925cb5c1395f97acdf6", "score": "0.48748657", "text": "def test_list_with_descriptor(self):\n irg = ImageRetrieveGlobal(\"../data\")\n l = irg.get_list_using_descriptors_on_directory(6)\n assert len(l) == 83426", "title": "" }, { "docid": "4dd073d5b401ef52a6901f422abef15a", "score": "0.48710057", "text": "def verfers(self):\n if \"k\" in self.ked: # establishment event\n keys = self.ked[\"k\"]\n else: # non-establishment event\n keys = []\n\n return [Verfer(qb64=key) for key in keys]", "title": "" }, { "docid": "690a09413156573d2b2e6ea5904fdcd6", "score": "0.48700637", "text": "def getAllTableInFuncDep(self):\n\t\tself.cursor.execute(\"\"\" SELECT DISTINCT FuncDep.'table' FROM FuncDep\"\"\")\n\t\tretour=[]\n\t\tfor item in self.cursor:\n\t\t\tretour.append(item[0])\n\t\treturn retour", "title": "" }, { "docid": "51b7fa31ab6b2405cb992a07c2678f78", "score": "0.48662972", "text": "def get_all(self):\n return self._entities[:]", "title": "" }, { "docid": "ef8effe94a74446c9a900eb5f7b27a3b", "score": "0.4861777", "text": "def get_devices(self):\n raise NotImplementedError(\"FIXME: Implement method get_devices\")", "title": "" }, { "docid": "15cb8ec20bfd20b8fcf0cf255b4bfad8", "score": "0.48581174", "text": "def get_devices(self):\n return", "title": "" }, { "docid": "40bc122412f9e365e7364578132c27d2", "score": "0.48568493", "text": "async def discover_devices(self):\r\n from bleak import discover\r\n\r\n self.discovered_devices = await discover()\r\n if len(self.discovered_devices) > 0:\r\n return self.find_detector()", "title": "" }, { "docid": "ffc2c740499b4aec10cd068a80ddf3f0", "score": "0.48528233", "text": "def vehicle_list(self):\r\n return [Vehicle(v, self) for v in self.api('VEHICLE_LIST')['response']]", "title": "" }, { "docid": "c9e6f011442de936904c889818fb9bc6", "score": "0.48526895", "text": "def _hdulist(self):\n return fits.HDUList([self._make_primary_hdu(),\n self._make_target_extension(),\n self._make_aperture_extension()])", "title": "" }, { "docid": "586f7f8411a79cba72975d8f54cb4547", "score": "0.48496455", "text": "def devices(self):\n return (self.device(device_id) for device_id in self.device_ids)", "title": "" }, { "docid": "3ac099eacb8266f8c6cbaccd43b43f5b", "score": "0.48488736", "text": "def getDescriptors(self): #Returns descriptors used in model\n return self.descrips", "title": "" }, { "docid": "f73b7b69e7566b4c5d1f7ca9764b73fd", "score": "0.48400122", "text": "def extract_detections(self):\n self.mask_model.setInput(self.face_blob)\n self.predictions = self.mask_model.forward()", "title": "" }, { "docid": "e276c170476bceda023797c61bc71bce", "score": "0.48306018", "text": "async def get_vehicle_entities(vehicle: RenaultVehicleProxy) -> List[RenaultDataEntity]:\n entities: List[RenaultDataEntity] = []\n if \"battery\" in vehicle.coordinators:\n entities.append(RenaultPluggedInSensor(vehicle, \"Plugged In\"))\n entities.append(RenaultChargingSensor(vehicle, \"Charging\"))\n return entities", "title": "" }, { "docid": "cc8a40b967cdd15847c6079ed343b1db", "score": "0.48295394", "text": "def ls_spice(verbose=False):\n count=spiceypy.ktotal('ALL')\n result=[]\n if verbose:\n print(\"Total of %d kernels loaded\"%count)\n for i in range(count):\n (file,type,source,handle)=spiceypy.kdata(i, 'ALL')\n if verbose:\n print(\"%06s %s\"%(type, file))\n result.append(ls_spice_return(type=type,file=file))\n return result", "title": "" }, { "docid": "912f7161cb7dbf470caa12d35789a633", "score": "0.48294827", "text": "def get_entries(self):\n return self.find_by_device_description(\n {'manufacturer': 'Belkin International Inc.'})", "title": "" }, { "docid": "f3f7d835453b0bc5a4a34f0b3c5d5684", "score": "0.4823547", "text": "def extract_tables(image_filename: str, lang: str) -> List[LocatedTable]:\n img = cv2.imread(image_filename, 0)\n return extract_tables_from_image(img, lang)", "title": "" }, { "docid": "0ace2b40122291420f977d3ad6ebb4fe", "score": "0.48214874", "text": "def set_from_sample_tables(\n samples,\n ):\n detected_features = set()\n for sample in samples:\n detected_features.update(set(samples[sample]['feature_ID']))\n return detected_features", "title": "" }, { "docid": "220f60245b88cc02803fe6fd414262e6", "score": "0.48192805", "text": "def get_host_vmnics(self, host):\n vswitch_vmnics = []\n host_obj = self._get_host(host)\n for pnic in host_obj.config.network.pnic:\n vswitch_vmnics.append(pnic.device)\n return vswitch_vmnics", "title": "" }, { "docid": "d4f1a8767bb8607d9bf852225f7efa4a", "score": "0.4819037", "text": "def get_all_vms(self):\n return self.get_objs(self.datacenter.vmFolder, [vim.VirtualMachine])", "title": "" }, { "docid": "eb6d5ea6a8ca7995184591ac3d0727ec", "score": "0.4817321", "text": "async def discover_entities(hass: HomeAssistant) -> list[ZerprocLight]:\n lights = await pyzerproc.discover()\n\n # Filter out already discovered lights\n new_lights = [\n light\n for light in lights\n if light.address not in hass.data[DOMAIN][DATA_ADDRESSES]\n ]\n\n entities = []\n for light in new_lights:\n hass.data[DOMAIN][DATA_ADDRESSES].add(light.address)\n entities.append(ZerprocLight(light))\n\n return entities", "title": "" } ]
a3724545d34835fb51fbdf6c28e8d045
Takes in original obs and randomly cutout a strip to a 55x55
[ { "docid": "991be187fc1ab739f6239b8152d43f1e", "score": "0.7064706", "text": "def cutout(obs, shape=(32, 32)): ##obs.shape==320,64,64,3\n x = np.random.randint(46)\n # obs[:, :x, :,:] = 0\n obs[:, x:x+4,:,:] = 0\n return obs", "title": "" } ]
[ { "docid": "4daf049d01540af5c66255816ca15792", "score": "0.67266196", "text": "def rand_crop(obs, shape=(32, 32)): ##obs.shape==320,64,64,3\n x, y = np.random.randint(15,size=(2,))\n # obs[:, :x, :,:] = 0\n obs[:, :, :y,:] = 0\n # obs[:,64-x:, :, :] = 0\n obs[:, :, 64-y:, :] = 0\n return obs", "title": "" }, { "docid": "9d3c2bf0448c1e4b264cf5963cbb9c02", "score": "0.63534576", "text": "def crosscut(obs, shape=(32, 32)): ##obs.shape==320,64,64,3\n x = np.random.randint(46)\n y = np.random.randint(64)\n obs[:, :, y:y+3,:] = 0\n obs[:, x:x+4,:,:] = 0\n return obs", "title": "" }, { "docid": "083dfc7b9c6ade18d320a5d7ebb40935", "score": "0.60660076", "text": "def cutout(x, level):\n size = 1 + int(level * min(x.size) * 0.499)\n img_height, img_width = x.size\n height_loc = np.random.randint(low=0, high=img_height)\n width_loc = np.random.randint(low=0, high=img_width)\n upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2))\n lower_coord = (min(img_height, height_loc + size // 2), min(img_width, width_loc + size // 2))\n pixels = x.load() # create the pixel map\n for i in range(upper_coord[0], lower_coord[0]): # for every col:\n for j in range(upper_coord[1], lower_coord[1]): # For every row\n pixels[i, j] = (127, 127, 127) # set the color accordingly\n return x", "title": "" }, { "docid": "305b5145df52e970ca34857873bb1eee", "score": "0.6009621", "text": "def crop(self):\r\n h, w = self.h, self.w\r\n p = []\r\n mid_val = self.parts // 2\r\n cols = int(math.sqrt(self.parts))\r\n order = [[j for j in range(cols)] for i in range(cols)]\r\n x = h // cols\r\n pos = 0\r\n for i in range(cols):\r\n if i == cols - 1:\r\n start = i * x\r\n end = h\r\n else:\r\n start = i * x\r\n end = (i + 1) * x\r\n\r\n for j in range(cols):\r\n if j == cols - 1:\r\n s = j * x\r\n e = w\r\n else:\r\n s = j * x\r\n e = (j + 1) * x\r\n\r\n img_slice = self.img[start:end, s:e]\r\n p.append(img_slice)\r\n order[i][j] = [img_slice, pos]\r\n pos += 1\r\n\r\n shuffled_pos = self.store_randomly(order)\r\n return mid_val, shuffled_pos", "title": "" }, { "docid": "0f78be21b0739f7fe3de15892e7e83f4", "score": "0.5943099", "text": "def random_crop(self):\n x, y = randint(0, 239 - self.length), randint(0, 319 - self.width)\n frame = np.array(self.frame)[x:x + self.length, y:y + self.width]\n self.frame = frame.reshape(40 * 40)", "title": "" }, { "docid": "dcd252c6e581f017d16829d25da3d87c", "score": "0.5921398", "text": "def _custom_shoe(cards: List[Card]) -> Shoe:\n shoe = Shoe()\n running_shoe = shoe._running_shoe\n for card in cards:\n position = np.where(running_shoe == card)[0][0]\n running_shoe = np.delete(running_shoe, position)\n shoe._rng.shuffle(running_shoe)\n shoe._running_shoe = np.append(cards, running_shoe)\n return shoe", "title": "" }, { "docid": "d743b5c3eee45f9e89d5b23ce159546b", "score": "0.5837701", "text": "def init(clip_size):\n return np.random.rand() * 2 * clip_size - clip_size", "title": "" }, { "docid": "101eb8c74487c4d15bc1b18f69ddb78c", "score": "0.57974553", "text": "def discretize(sample, grid):\r\n # TODO: Implement this\r\n pass", "title": "" }, { "docid": "4da5914594b79653fe1c1a34c59985f0", "score": "0.57040405", "text": "def tearover(image):\n (width, height) = image.size\n start = 0\n end = random.randint(0,height)\n while end < height:\n if random.random() > .5:\n randheight = random.randint(0,height/6)\n randwidth = random.randint(0,width/2)\n for x in xrange(randwidth, width,2):\n for y in xrange(start, end,2):\n (r,g,b) = image.getpixel((x,y))\n r = int(r * 4)\n image.putpixel((x-randwidth,y), (r,g,b))\n start += end\n end += randheight\n else:\n randheight = random.randint(0,height)\n randwidth = random.randint(0,width/2)\n for x in xrange(0, randwidth,2):\n for y in xrange(start, end,2):\n (r,g,b) = image.getpixel((x,y))\n b = int(b * 4)\n image.putpixel((x + randwidth,y), (r,g,b))\n start += end\n end += randheight\n if end >= height:\n end = height\n if random.random() > .5:\n randheight = random.randint(0,height/6)\n randwidth = random.randint(0,width/2)\n for x in xrange(randwidth, width,2):\n for y in xrange(start, end,2):\n (r,g,b) = image.getpixel((x,y))\n r = int(r * 4)\n image.putpixel((x-randwidth,y), (r,g,b))\n start += end\n end += randheight\n else:\n randheight = random.randint(0,height)\n randwidth = random.randint(0,width/2)\n for x in xrange(0, randwidth,2):\n for y in xrange(start, end,2):\n (r,g,b) = image.getpixel((x,y))\n b = int(b * 4)\n image.putpixel((x + randwidth,y), (r,g,b))\n start += end\n end += randheight\n break\n return image", "title": "" }, { "docid": "d0499d4eb70a58c1f90457fc6bce0811", "score": "0.5638282", "text": "def traceRonchiSlice(input):\n\n img = input[0]\n nbin = input[1]\n winRng = input[2]\n winRng2 = int(winRng/2)\n maxPix = input[3] # *** maxPix IS FOR TESTING PURPOSES ONLY ***\n plot = input[4]\n mxWidth = input[5]\n smth = input[6]\n bright = input[7]\n flatSlc = input[8]\n threshold=input[9]\n \n # REMOVE once testing complete \n \n #first bin the image, if requested\n if (nbin > 1):\n \n #tmp = np.zeros((img.shape[0], img.shape[1])) #*** THIS IS THE PROPER CODE ***\n tmp = np.zeros((img.shape[0], maxPix/nbin))\n \n for i in range(tmp.shape[1]-1):\n tmp[:,i] = np.nansum(img[:,nbin*i:nbin*(i+1)],axis=1)#+img[:,nbin*(i+1)]\n else:\n tmp = img\n\n #find column with maximum signal in median along column\n if (flatSlc is None):\n medArray = np.zeros(tmp.shape[1])\n for i in range(medArray.shape[0]):\n y = tmp[:,i]\n medArray[i] = np.nanmedian(y)\n else:\n medArray = np.zeros(tmp.shape[1])\n for i in range(medArray.shape[0]):\n y = flatSlc[:,i]\n medArray[i] = np.nanmedian(y)\n\n #find column with the maximum signal to identify position and number of Ronchi dips\n #m1, m2 = np.unravel_index(np.nanargmax(tmp), tmp.shape)\n m2 = np.nanargmax(medArray)\n\n #extract signal from this column\n y=np.copy(tmp[:,m2])\n x=np.arange(len(y))\n d2 = np.gradient(np.gradient(y))\n \n #only use regions where signal in all pixels > threshold of the median signal (of the max portion)\n if (flatSlc is None):\n mx = np.nanmedian(y[y>0.05*np.nanmax(np.nanmedian(y))])\n whr = np.where(y > threshold*mx)[0]\n else:\n yFlat = flatSlc[:,m2]\n with warnings.catch_warnings():\n warnings.simplefilter('ignore',RuntimeWarning)\n mx = np.nanmedian(yFlat[yFlat>0.05*np.nanmax(np.nanmedian(yFlat))])\n whr = np.where(yFlat > threshold*mx)[0]\n \n strt = whr[0]\n mxPix = whr[-1]\n\n #now start counting the # of dips\n #and get their positions\n\n trace =[]\n traceAmp = []\n xtmp = (np.arange(winRng)+strt).astype('int')\n ytmp = y[xtmp]\n \n if (bright):\n dipPos = xtmp[np.nanargmin(d2[xtmp])] #position of first dip\n else:\n dipPos = xtmp[np.nanargmax(d2[xtmp])] #position of first dip\n\n #stop when dip position\n while (dipPos < mxPix):\n\n xtmp = (np.arange(winRng)+dipPos - winRng2).astype('int')\n xtmp = xtmp[np.where(np.logical_and(xtmp >0,xtmp<len(y)))]\n \n #fit function to region to determine line centre\n if (bright):\n yfit = np.nanmin(d2[xtmp])-d2[xtmp]\n yfit -= np.nanmin(yfit)\n else:\n yfit = d2[xtmp] - np.nanmin(d2[xtmp])\n \n fitTmp = getFit2(xtmp, yfit, plot=plot, mxWidth=mxWidth)\n trace.append(fitTmp[0])\n traceAmp.append(fitTmp[1])\n \n if (plot):\n plt.figure()\n plt.plot(y)\n plt.plot([whr[0]-1, mxPix], [0.5*mx, 0.5*mx], 'g--')\n plt.plot(xtmp, y[xtmp], 'ro')\n plt.plot([trace[-1],trace[-1]], [np.nanmin(y),np.nanmax(y)],'--')\n plt.show() \n \n #now start search for next dip\n strt = xtmp[-1]\n xtmp = np.arange(winRng)+strt\n xtmp = xtmp[np.where(xtmp < len(y))]\n if (bright):\n with np.errstate(invalid='raise'):\n try:\n dipPos = xtmp[np.nanargmin(d2[xtmp])]\n except(ValueError):\n dipPos = np.nan\n else:\n with np.errstate(invalid='raise'):\n try:\n dipPos = xtmp[np.nanargmax(d2[xtmp])]\n except(ValueError):\n dipPos=np.nan\n \n #count the number of dips\n nDips = len(trace)\n \n #initialize array to hold positions of each dip across detector\n #allTrace = np.zeros((tmp.shape[1],nDips))\n allTrace = np.empty((nDips, int(maxPix/nbin)))\n allAmp = np.empty((nDips, int(maxPix/nbin)))\n allTrace[:] = np.nan\n allAmp[:] = np.nan\n \n #fill in first set of measurements\n allTrace[:,m2] = trace\n\n #now do the rest of the columns\n #first work backwards from starting position\n for i in range(m2-1,0,-1):\n allTrace[:,i],allAmp[:,i] = fitColumn(i,tmp,allTrace,winRng=winRng, reverse=True,mxWidth=mxWidth, bright=bright)\n \n #now work forwards\n for i in range(int(m2+1),int(maxPix/nbin)):\n allTrace[:,i],allAmp[:,i] = fitColumn(i,tmp,allTrace, winRng=winRng, mxWidth=mxWidth,bright=bright)\n\n #now smooth all traces and fill in missing values due to binning\n\n outTrace = np.empty((allTrace.shape[0], allTrace.shape[1]*nbin))\n outTrace[:] = np.nan\n\n outAmp = np.empty((allAmp.shape[0], allAmp.shape[1]*nbin))\n outAmp[:] = np.nan\n \n xTrace = np.arange(allTrace.shape[1])*nbin\n xOut = np.arange(outTrace.shape[1])\n\n if (smth > 0):\n for j in range(allTrace.shape[0]):\n yTrace = allTrace[j,:]\n aTrace = allAmp[j,:]\n \n #remove badly fit regions to be replaced by smoothed curve\n ytmp = np.empty(xOut.shape)\n ytmp[:] = np.nan\n ytmp[xTrace] = yTrace\n gKern = conv.Gaussian1DKernel(smth)\n outTrace[j,:] = conv.convolve(ytmp, gKern, boundary='extend', normalize_kernel=True)\n\n #remove badly fit regions and replace with linear interpolation\n atmp = np.empty(xOut.shape)\n atmp[:]=np.nan\n atmp[xTrace] = aTrace\n whrBad = np.where(~np.isfinite(atmp))[0]\n\n if(len(whrBad)>0):\n whrGood = np.where(np.isfinite(atmp))[0]\n if len(whrGood)>1:\n finter = interp1d(xOut[whrGood],atmp[whrGood], kind='linear', bounds_error=False)\n atmp[whrBad] =finter(xOut[whrBad])\n outAmp[j,:] = atmp\n else:\n outAmp[j,:] = atmp\n else:\n outAmp[j,:] = atmp\n else:\n outTrace = allTrace\n outAmp = allAmp\n \n if (plot):\n #tmp = np.sqrt(img)\n #mn = np.nanmin(img[np.where(tmp != 0)])\n med = np.nanmedian(img)\n fig = plt.figure()\n #plt.imshow(img, aspect='auto', clim=[mn, np.max(img)])\n \n plt.imshow(img, aspect='auto', clim=[med*0.05, med*1.25], cmap='jet', origin='lower')\n\n for i in range(outTrace.shape[0]):\n if (i%2 == 0):\n plt.plot(outTrace[i,:], 'k')\n else:\n plt.plot(outTrace[i,:], 'k--')\n plt.plot(np.repeat(m2, outTrace.shape[0]),outTrace[:,m2],'ro')\n plt.show()\n\n return outTrace, outAmp", "title": "" }, { "docid": "bb0027f57199cf50b7b16fe76d916f12", "score": "0.55995476", "text": "def CloneAndRandomize(self, observer):\n\t\tst = self.Clone()\n\t\t\n\t\t# The observer can see his own hand and the cards in the current trick, and can remember the cards played in previous tricks\n\t\tseenCards = st.playerHands[observer] + st.discards + [card for (player,card) in st.currentTrick]\n\t\t# The observer can't see the rest of the deck\n\t\tunseenCards = [card for card in st.GetCardDeck() if card not in seenCards]\n\t\t\n\t\t# Deal the unseen cards to the other players\n\t\trandom.shuffle(unseenCards)\n\t\tfor p in xrange(1, st.numberOfPlayers+1):\n\t\t\tif p != observer:\n\t\t\t\t# Deal cards to player p\n\t\t\t\t# Store the size of player p's hand\n\t\t\t\tnumCards = len(self.playerHands[p])\n\t\t\t\t# Give player p the first numCards unseen cards\n\t\t\t\tst.playerHands[p] = unseenCards[ : numCards]\n\t\t\t\t# Remove those cards from unseenCards\n\t\t\t\tunseenCards = unseenCards[numCards : ]\n\t\t\n\t\treturn st", "title": "" }, { "docid": "2ec4ea15f6924fc94cc6773f4c7a4ad0", "score": "0.55816954", "text": "def nextract(r, s, outmap, MAX_SAMPLES = 10):\n \n mask = np.sum( outmap == r['id'], axis=0) > 0\n \n # determine masked, region size\n xx = np.arange(mask.shape[1])\n yy = np.arange(mask.shape[0])\n \n X,Y = np.meshgrid(xx,yy)\n minx,maxx = X[mask].min(), X[mask].max() \n miny,maxy = Y[mask].min(), Y[mask].max() \n sx = maxx - minx + 2\n sy = maxy - miny + 2\n nx,ny = mask.shape[1]//sx, mask.shape[0]//sy\n \n #f = plt.figure(figsize = [7,7])\n #plt.imshow(mask)\n \n all_sout = []\n count = 1\n print(\"Sampling noise in {} separate regions\".format( min(nx * ny, MAX_SAMPLES) ) )\n for i in range(nx):\n if count > MAX_SAMPLES:\n break\n print(\"Sampling noise row {}\".format(i))\n for j in range(ny):\n _mask = mask < 0 # empty mask\n #print(sy*(j), sy*(j+1),sx*(i), sx*(i+1), miny, miny+sy, minx, minx+sx )\n m = mask[miny:miny+sy, minx:minx+sx]\n _mask[sy*(j):sy*(j)+m.shape[0],sx*(i):sx*(i)+m.shape[1]] = m\n #f = plt.figure(figsize = [7,7])\n #plt.imshow(_mask)\n\n sout = np.zeros( s.data.shape[0] )\n for k in range(s.data.shape[0]):\n sout[k] = biweight_location( s.data[k][_mask] )\n all_sout.append(sout)\n count += 1\n\n ww = s.grid()\n return ww, np.array(all_sout), mask\n #plt.imshow(mask)", "title": "" }, { "docid": "c65850586e8d0fe7d0237a19462b1866", "score": "0.5558196", "text": "def fit_and_subtract_background(cutout, trace_length = 60, seeing_pix = 4, plotted = False):\n #Define the coordinate such that x starts at the bottom right (small wavelength)\n #and increases toward the upper left. This is the dispersion direction\n \n #y is from bottom left to upper right. This is the cross dispersion direction. \n #plt.imshow(cutout, origin = 'lower')\n #plt.show()\n width = len(cutout[0]) #we have square cutout\n #buffer = round((width - trace_length -5)/2) #buffer area on either ends of the trace\n buffer = int(round(0.85*slit_length/2)) #imported from constant\n \n #x = range(width-2*buffer)\n #y = range(buffer)\n\n #Stashed\n x = range(int(width-2*buffer))\n y = range(int(buffer))\n #blank background\n bkg = np.zeros(np.shape(cutout))\n \n all_res_even = []\n all_res_odd = []\n \n flux = []\n var = []\n #fitBkgs = []\n for i in x: #Iterate from right to left of the thumbnail\n\n #cross_section_even = cutout[i:i+2*buffer, width-buffer-i]\n #print(len(cross_section_even))\n cross_section_even = np.zeros(buffer)\n cross_section_odd = np.zeros(buffer)\n for j in y: #diagonal iteration\n cross_section_even[j] = cutout[i+j , width-buffer-i+j]\n cross_section_odd[j] = cutout[i+j+1 , width-buffer-i+j]\n \n \n #Compute the flux fit \n res_even = fitFlux(cross_section_even)\n res_odd = fitFlux(cross_section_odd)\n yy = np.arange( len(cross_section_even))\n #print(yy)\n fitBkg_even = res_even[0](yy) #res[0] is the polynomial component\n fitBkg_odd = res_odd[0](yy)\n \n for k in y:\n #Generate a background frame\n bkg[i+k , width-buffer-i+k] = fitBkg_even[k]\n bkg[i+k+1 , width-buffer-i+k] = fitBkg_odd[k]\n #print(res[xx])\n \n #Plot cross-section\n if plotted:\n plt.plot(yy,cross_section_even)\n plt.plot(yy,res_even(yy), 'r')\n plt.plot(yy, res_even[0](yy), 'b')\n plt.plot(yy, fitBkg_even, 'k')\n plt.show()\n #print(res_even)\n all_res_even = all_res_even + [ res_even ]\n all_res_odd = all_res_odd + [ res_odd ]\n #fitBkgs += [fitBkg]\n flux_even = np.sum(res_even[1](yy))\n flux_odd = np.sum(res_odd[1](yy))\n flux += [flux_even, flux_odd] #sum the gaussian componenta and add as flux\n var += [ np.sum( res_even(yy) * res_even[1](yy)/flux_even ), \\\n np.sum( res_odd(yy) * res_even[1](yy)/flux_odd )]\n \n #just for plotting\n # flux = []\n # for i in all_res:\n # flux += [i(np.array(y))]\n # flux = np.array(flux)\n # print('size = ',np.shape(flux))\n # plt.contour(y, x, flux)\n \n # #Create a blank background array\n # fitBkgs = np.array(fitBkgs)\n # print(np.shape(fitBkgs))\n # plt.imshow(fitBkgs);plt.colorbar()\n # plt.show()\n # background = np.zeros(np.shape(cutout))\n # print('bkg size, fitBkg', np.shape(background), np.shape(fitBkgs))\n # for i in x:\n # for j in y:\n # #print(i+j, width - buffer - i +j)\n # background[i+j, width - buffer - i +j] = fitBkgs[i,j]\n \n #return np.array(all_res_even), np.array(all_res_odd), bkg\n #print(all_res_even)\n #print(np.array(all_res_even))\n \n #calculate angle\n # angle = angCalc(all_res_even)\n #return all_res_even, bkg, flux, var\n return cutout - bkg, bkg", "title": "" }, { "docid": "73f663c11f0ce2ca26c9fb80f63a2dae", "score": "0.5522644", "text": "def cutout(image,xo=0,yo=0,x_size=0,y_size=0,mask=None):\n \n xo = int(xo);\n yo = int(yo);\n x_size = int(x_size);\n y_size = int(y_size);\n\n imagem = image;\n \n # Initialize some variables..\n #\n x_diff = 0; x_edge = 0;\n y_diff = 0; y_edge = 0;\n x_fin = 0; x_ini = 0;\n y_fin = 0; y_ini = 0;\n\n y_img_size, x_img_size = imagem.shape;\n\n # Get the side sides (at least, 1!) and transform for the size_unit if necessary..\n #\n x_cut_size = max( 1, int(float(x_size)) );\n y_cut_size = max( 1, int(float(y_size)) );\n\n # And if no side size was given, define a default value correspondig to half of original image..\n #\n if not ( x_size ):\n x_cut_size = int(x_img_size/2);\n\n if not ( y_size ):\n y_cut_size = int(y_img_size/2);\n\n # Verify central coordinates values..\n #\n if (xo != 0):\n x_halo = int(float(xo));\n if (yo != 0):\n y_halo = int(float(yo));\n if (xo == 0):\n x_halo = int(x_img_size/2);\n if (yo == 0):\n y_halo = int(y_img_size/2);\n\n # Define the images (in/out) slices to be copied..\n #\n x_ini = x_halo - int(x_cut_size/2) #-1;\n x_fin = x_ini + x_cut_size;\n y_ini = y_halo - int(y_cut_size/2) #-1;\n y_fin = y_ini + y_cut_size;\n\n x_ini_old = max( 0, x_ini ); x_fin_old = min( x_img_size, x_fin );\n y_ini_old = max( 0, y_ini ); y_fin_old = min( y_img_size, y_fin );\n\n x_ini_new = abs( min( 0, x_ini )); x_fin_new = x_cut_size - (x_fin - x_fin_old);\n y_ini_new = abs( min( 0, y_ini )); y_fin_new = y_cut_size - (y_fin - y_fin_old);\n\n # Initialize new image, and take all index list..\n #\n imagemnova = np.zeros((y_cut_size,x_cut_size), dtype=imagem.dtype );\n ind_z = np.where(imagemnova == 0);\n\n # Copy requested image slice..\n #\n imagemnova[ y_ini_new:y_fin_new, x_ini_new:x_fin_new ] = imagem[ y_ini_old:y_fin_old, x_ini_old:x_fin_old ];\n\n # If 'mask', maintain just \"central\" object on it..\n #\n if ( mask ):\n msk = ( mask[0]-y_ini, mask[1]-x_ini )\n\n zip_m = zip( msk[0], msk[1] );\n zip_z = zip( ind_z[0], ind_z[1] );\n\n L = list(set(zip_z) - set(zip_m));\n\n try:\n ind_0, ind_1 = zip(*L);\n indx = ( np.array(ind_0), np.array(ind_1) );\n imagemnova[ indx ] = 0;\n except:\n pass;\n\n return imagemnova;", "title": "" }, { "docid": "1e16c09b095f851e5b2db655296c409e", "score": "0.5486488", "text": "def _sample_outward_snake(rng):\n return {\n \"dim\": utils.sample_log_int(rng, 3, 100),\n \"bs\": utils.sample_log_int(rng, 1, 200),\n \"n_samples\": utils.sample_log_int(rng, 100, 20000),\n }", "title": "" }, { "docid": "c870abf0b999cc29596ba377d2e4f595", "score": "0.54775673", "text": "def plot_images(X,s=5):\n (n,w,h,c)=X.shape\n ret=np.zeros((w*s,h*s,c))\n for x in range(s):\n for y in range(s):\n n=np.random.randint(low=0,high=X.shape[0])\n ret[x*w:x*w+w,y*h:y*h+h,:] = X[n]\n print(ret.shape)\n plt.imshow(ret.reshape(ret.shape[0],ret.shape[0]));plt.show()", "title": "" }, { "docid": "15b290d300a061f1b319429be5bd1e79", "score": "0.5470336", "text": "def addNoise(chunk):\n chunk = chunk.tolist()\n raw_size = len(chunk)\n counter = raw_size\n while (counter < chunk_size):\n for i in range(raw_size):\n if (counter == chunk_size):\n break\n chunk.append(int(chunk[i]*(random.uniform(.95,1.05))))\n counter += 1\n if (counter == chunk_size):\n break\n return np.array(chunk)", "title": "" }, { "docid": "b188bc2e7c7eccb509cfcf1448dbae7a", "score": "0.54617965", "text": "def shuffler1(self, nnhind, mu, n, shuffl_rate):\n pop = np.zeros((mu, n), dtype=int)\n for i in range(0, mu):\n ind = np.copy(nnhind)\n for j in range(n):\n ind = np.roll(ind, -1)\n if np.random.uniform() < shuffl_rate:\n arclength = np.random.randint(2, 6) # 2 to 6 cities in subpath\n ind[0, 0:arclength] = np.flip(ind[0, 0:arclength])\n pop[i, :] = ind\n return pop", "title": "" }, { "docid": "c0c73bb442ae695e7f407cfc50848b1b", "score": "0.54376024", "text": "def random_crop_roi(mask, shape=[64, 128, 128]):\n roi = mask.nonzero()\n # if len(roi.shape) != 3:\n # print('Invalid mask!')\n # return None\n z_min, z_max = roi[:, 0].min(), roi[:, 0].max()\n y_min, y_max = roi[:, 1].min(), roi[:, 1].max()\n x_min, x_max = roi[:, 2].min(), roi[:, 2].max()\n\n z_range = z_max - z_min\n y_range = y_max - y_min\n x_range = x_max - x_min\n\n z_high = max(shape[0] - z_range, 1)\n y_high = max(shape[1] - y_range, 1)\n x_high = max(shape[2] - x_range, 1)\n\n z_split = torch.randint(z_high, (1,), dtype=torch.uint8).item()\n y_split = torch.randint(y_high, (1,), dtype=torch.uint8).item()\n x_split = torch.randint(x_high, (1,), dtype=torch.uint8).item()\n\n # Make sure the crop does not exceed the boundary.\n while z_min - z_split < 0 or z_max + (shape[0] - z_range - z_split) >= mask.shape[0]:\n z_split = torch.randint(z_high, (1,), dtype=torch.uint8).item()\n while y_min - y_split < 0 or y_max + (shape[1] - y_range - y_split) >= mask.shape[1]:\n y_split = torch.randint(y_high, (1,), dtype=torch.uint8).item()\n while x_min - x_split < 0 or x_max + (shape[2] - x_range - x_split) >= mask.shape[2]:\n x_split = torch.randint(x_high, (1,), dtype=torch.uint8).item()\n\n return torch.tensor([[z_min - z_split, z_max + (shape[0] - z_range - z_split)],\n [y_min - y_split, y_max + (shape[1] - y_range - y_split)],\n [x_min - x_split, x_max + (shape[2] - x_range - x_split)]])", "title": "" }, { "docid": "8e63b5f53e195420dc3f40c65ac85087", "score": "0.5428128", "text": "def CloneAndRandomize(self, observer):\n\t\treturn self.Clone()", "title": "" }, { "docid": "4df27d93d4f0bdce03ec4066432ee0fe", "score": "0.53948855", "text": "def add_noise_img(img_row, img_height=64, img_width=64):\n copy = img_row.copy()\n img = np.reshape(copy, (img_height, img_width))\n\n # repeat i times, where 1 <= i <= 5\n for i in range(random.randint(1, 5)):\n # determine short and long edge\n short = random.randint(1, 2)\n long = random.randint(8, 20)\n\n # box is vertical only 30% of the time\n if random.random() > 0.3:\n box_height = short\n box_width = long\n else:\n box_height = long\n box_width = short\n\n # boxes are normally distributed in the image\n x0 = int(random.gauss(img_width/2 - box_width/2, img_width/4))\n y0 = int(random.gauss(img_height/2 - box_height/2, img_height/4))\n\n draw_rect(img, x0, y0, x0 + box_width, y0+box_height)\n\n img = np.reshape(img, (img_height * img_width,))\n\n return img", "title": "" }, { "docid": "3082179c88ba7b247d1382c63ad5c068", "score": "0.5371085", "text": "def make_ridgeline(self): \n\t\tmag_filter = []\n\n\t\tfor num in range(0, self.all_info.size/67):\t\t\t\t#iterate through, only keep data within mag. window\n\t\t\tmag = self.all_info[num, 22]\n\n\t\t\tif self.max_mag < mag < self.min_mag:\n\t\t\t\tmag_filter.append(num)\n\n\t\tall_info_sorted = np.empty([len(mag_filter), 2]) \t\t\t#create new array to store colour and mag\n\t\tall_info_sorted[:, 0] = self.all_info[mag_filter, 22] - self.all_info[mag_filter, 11] \n\t\tall_info_sorted[:, 1] = self.all_info[mag_filter, 22]\n\t\tall_info_sorted = all_info_sorted[all_info_sorted[:, 1].argsort()] \t#organize the new array in order of increasing Ks mag.\n\n\t\tgood_colour = []\n\t\tgood_colour_addsigma = []\n\t\tgood_colour_subsigma = []\n\t\tgood_band1_mag = []\n\n\t\tindex1 = 0\n\t\tindex2 = self.cmd_increments\n\n\n\t\tfor num in range(0,\tall_info_sorted.size/2):\t #iterate through, in desired window sizes calculate the mean 70% of the members in the group\n\t\t\tgroup =\tall_info_sorted[index1:index2, :]\n\n\t\t\tsorted_group = group[group[:, 0].argsort()]\n\t\t\tsorted_group_70 = sorted_group[0:((group.size/2) * 0.70), :]\n\n\t\t\ttry:\n\t\t\t\tmean_colour = sorted_group_70[int(sorted_group_70.size/4 - 1), 0]\n\t\t\t\tsigma_colour = 0.63 * math.fabs(sorted_group_70[int(3*sorted_group_70.size/8 - 1), 0] - sorted_group_70[int(sorted_group_70.size/8 - 1), 0]) #interquartial range, sigma may not be classical definition, may change with binary fraction\n\t\t\t\tmean_band1_mag = sorted_group_70[int(sorted_group_70.size/4 - 1), 1]\t\t\t\t\t\t\t\t\t #take 35th data point = median of first 69\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #take the difference between (17th dp and 52 dp)*0.63 = sigma\n\t\t\t\tgood_colour.append(mean_colour)\n\t\t\t\tgood_colour_addsigma.append(mean_colour + sigma_colour)\n\t\t\t\tgood_colour_subsigma.append(mean_colour - sigma_colour)\n\t\t\t\tgood_band1_mag.append(mean_band1_mag)\n\n\t\t\t\tindex1 += 1\n\t\t\t\tindex2 += 1\n\n\t\t\texcept:\n\t\t\t\tbreak\n\n\t\n\t\tfinal_good_colour = np.delete(np.asarray(good_colour), len(good_colour) - 1, 0)\n\t\tfinal_good_col_addsig = np.delete(np.asarray(good_colour_addsigma), len(good_colour_addsigma) - 1, 0)\n\t\tfinal_good_col_subsig = np.delete(np.asarray(good_colour_subsigma), len(good_colour_subsigma) - 1, 0)\n\t\tfinal_good_band1_mag = np.delete(np.asarray(good_band1_mag), len(good_band1_mag) - 1, 0)\n\n\t\tfig = plt.figure()\n\t\tax1 = fig.add_subplot(111)\n\t\tax1.scatter(self.all_info[mag_filter, 22] - self.all_info[mag_filter, 11], self.all_info[mag_filter, 22], marker = \".\", color = \"r\", label=\"PM Cleaned Data\") \n\t\tax1.scatter(final_good_colour, final_good_band1_mag, marker = \".\", label=\"Filtered Data\")\n\t\tax1.scatter(final_good_col_addsig, final_good_band1_mag, marker = \".\", color= \"y\", label=\"Filtered Data + Sigma\")\n\t\tax1.scatter(final_good_col_subsig, final_good_band1_mag, marker = \".\", color=\"b\", label=\"Filtered Data - Sigma\")\n\n\t\tplt.xlim(0, 8)\n\t\tplt.ylim(24, 14)\n\t\tplt.show()\n\n\t\treturn final_good_colour, final_good_band1_mag, final_good_col_addsig, final_good_col_subsig, mag_filter", "title": "" }, { "docid": "39cd41252cb5e2c68f1d53cc56d14a6f", "score": "0.5362825", "text": "def shiftover(image):\n (width, height) = image.size\n start = 0\n end = random.randint(0,height)\n while end < height:\n if random.random() > .5:\n randheight = random.randint(0,height/6)\n randwidth = random.randint(0,width/2)\n for x in xrange(randwidth, width):\n for y in xrange(start, end):\n (r,g,b) = image.getpixel((x,y))\n image.putpixel((x-randwidth,y), (r,g,b))\n start += end\n end += randheight\n else:\n randheight = random.randint(0,height/6)\n randwidth = random.randint(0,width/2)\n for x in xrange(0, randwidth):\n for y in xrange(start, end):\n (r,g,b) = image.getpixel((x,y))\n image.putpixel((x + randwidth,y), (r,g,b))\n start += end\n end += randheight\n if end >= height:\n end = height\n if random.random() > .5:\n randheight = random.randint(0,height/6)\n randwidth = random.randint(0,width/2)\n for x in xrange(randwidth, width):\n for y in xrange(start, end):\n (r,g,b) = image.getpixel((x,y))\n image.putpixel((x-randwidth,y), (r,g,b))\n start += end\n end += randheight\n else:\n randheight = random.randint(0,height)\n randwidth = random.randint(0,width/2)\n for x in xrange(0, randwidth):\n for y in xrange(start, end):\n (r,g,b) = image.getpixel((x,y))\n image.putpixel((x + randwidth,y), (r,g,b))\n start += end\n end += randheight\n return image", "title": "" }, { "docid": "2838e34ac8165a6dd0cfbe1e40a485f5", "score": "0.53374803", "text": "def make_cutouts(data_entity, times, cut_before=10, cut_after=10):\n samples = []\n for time in times:\n samples.append(data_entity[time - cut_before:time + cut_after + 1])\n\n return samples", "title": "" }, { "docid": "fd37356c3e5a7d00fda0c0801eda872d", "score": "0.5337473", "text": "def create_croppings(self, numpy_array):\n # Jitter the colour channel\n numpy_array = self.colour_channel_jitter(numpy_array)\n y_dim, x_dim = numpy_array.shape[:2]\n # Have the x & y coordinate of the crop\n crop_x = random.randrange(x_dim - self.cropSize)\n crop_y = random.randrange(y_dim - self.cropSize)\n # Select which image ordering we'll use from the maximum hamming set\n perm_index = random.randrange(self.numPermutations)\n final_crops = np.zeros(\n (self.tileSize, self.tileSize, 3, 9), dtype=np.float32)\n for row in range(3):\n for col in range(3):\n x_start = crop_x + col * self.cellSize + \\\n random.randrange(self.cellSize - self.tileSize)\n y_start = crop_y + row * self.cellSize + \\\n random.randrange(self.cellSize - self.tileSize)\n t=self.maxHammingSet[perm_index, row * 3 + col]\n # Put the crop in the list of pieces randomly according to the\n # number picked\n final_crops[:, :, :, self.maxHammingSet[perm_index, row * 3 + col]\n ] = numpy_array[y_start:y_start + self.tileSize, x_start:x_start + self.tileSize, :]\n #patch level normalization\n x=np.transpose(final_crops,(3,0,1,2))\n for i,img in enumerate(x):\n mean, std = np.mean(x[i,...]), np.std(x[i,...]) #calculate mean and std of patch\n if std==0:\n continue #black patch\n x[i,...]=(x[i,...]-mean)/std #normalize the patch\n final_crops=np.transpose(x,(1,2,3,0))\n return final_crops, perm_index", "title": "" }, { "docid": "69f1df9ceacec25ebdfda0a0b0d8e454", "score": "0.53333646", "text": "def testShort(self):\n rng = np.random.RandomState(12345)\n start = 50\n stop = 200\n self.image.image.array[:start, :] = rng.normal(0.0, self.synth.readnoise, (start, self.synth.width))\n self.image.image.array[stop:, :] = rng.normal(0.0, self.synth.readnoise,\n (self.synth.height - stop, self.synth.width))\n # Expect that all traces are too short, hence pruned\n result = self.task.run(self.makeExposure(self.image), self.identity, detectorMap=self.detMap)\n self.assertNumTraces(result, 0)\n\n # Set pruneMinLength to something smaller than the trace length, and they should all be there again\n self.config.pruneMinLength = int(0.9*(stop - start))\n result = self.task.run(self.makeExposure(self.image), self.identity, detectorMap=self.detMap)\n self.assertNumTraces(result)", "title": "" }, { "docid": "03e3f9477e6023a341a0dd211ce61554", "score": "0.52979237", "text": "def random_shark():\n shark.y = random.randint(100, 500)\n shark.x = random.randint((camera.x + 400), (camera.x + 800))", "title": "" }, { "docid": "ce250195873b1342f9ae59486ea64f05", "score": "0.5295921", "text": "def trim28x28(digits, fraction):\n features = np.array(digits.data)\n targets = np.array(digits.target, dtype=np.int)\n #shuffle features and targets with same seed\n order = np.random.permutation(len(features))\n \n digits.update({'features' : features[order]})\n digits.update({'targets' : targets[order]})\n\n length = int(len(digits.target) * fraction)\n \n trimmed_data = digits.data[:length]\n trimmed_targets = digits.target[:length]\n \n digits.update({'data' : trimmed_data})\n digits.update({'target' : trimmed_targets})\n \n return digits", "title": "" }, { "docid": "1814b6080457b354abb3bcd907e6da1e", "score": "0.52771956", "text": "def generate_round(size=35):\n return random.sample(range(1, 49), size)", "title": "" }, { "docid": "805c8375df92efd7e686f08f2e7fe242", "score": "0.52590626", "text": "def draw(self, n):\n if n <= len(self.contents):\n copy_contents = copy.copy(self.contents)\n to_remove = random.sample(copy_contents, n)\n for element in to_remove:\n self.contents.remove(element)\n return to_remove\n else:\n return self.contents", "title": "" }, { "docid": "ced743cc44cdda545933d162b6a881fa", "score": "0.52521425", "text": "def __init__(self):\n self._full_shoe = np.repeat(np.arange(1, 14),\n 4*rule_variation.SHOE_SIZE)\n self._rng = np.random.default_rng()\n self.reshuffle()", "title": "" }, { "docid": "552bb86b6cf4b3b00df879f7d4b3c12b", "score": "0.524329", "text": "def sprinkles(img, size, perc, style='black'):\n x = img.copy()\n number_of_pixels_to_frost = perc * np.ceil((x.shape[0] * x.shape[0]))\n number_of_sprinkles = int(np.ceil(number_of_pixels_to_frost / (size * size)))\n # TODO need to handle RGB channels - multiple arrays\n for sprinkle in range(0, number_of_sprinkles):\n # set boundaries to preven out of index errors\n options = range((size), (x.shape[0] - size))\n # get random index position\n row = np.random.choice(options, replace=False)\n col = np.random.choice(options, replace=False)\n # change initial pixel value\n x[row, col] = np.random.randint(0, 255)\n # randomly determine fill direction\n horizontal_fill_direction = np.random.choice([\"left\", \"right\"])\n vertical_fill_direction = np.random.choice([\"up\", \"down\"])\n if style == 'mean':\n mean = cv2.mean(x)\n # replace pixel values\n if (horizontal_fill_direction == \"left\") & (vertical_fill_direction == \"up\"):\n for i in (range(0, (size - 1))):\n for j in (range(0, (size - 1))):\n for c in [0, 1, 2]:\n if style == 'frosted':\n x[(row - j), (col - i)][c] = np.random.randint(0, 255)\n elif style == 'mean':\n x[(row - j), (col - i)][c] = mean[c]\n else:\n x[(row - j), (col - i)] = 0\n elif (horizontal_fill_direction == \"left\") & (vertical_fill_direction == \"down\"):\n for i in (range(0, (size-1))):\n for j in (range(0, (size-1))):\n for c in [0, 1, 2]:\n if style == 'frosted':\n x[(row - j), (col + i)][c] = np.random.randint(0, 255)\n elif style == 'mean':\n x[(row - j), (col - i)][c] = mean[c]\n else:\n x[(row - j), (col + i)] = 0\n elif (horizontal_fill_direction == \"right\") & (vertical_fill_direction == \"up\"):\n for i in (range(0, (size-1))):\n for j in (range(0, (size-1))):\n for c in [0, 1, 2]:\n if style == 'frosted':\n x[(row + j), (col - i)][c] = np.random.randint(0, 255)\n elif style == 'mean':\n x[(row - j), (col - i)][c] = mean[c]\n else:\n x[(row + j), (col - i)] = 0\n elif (horizontal_fill_direction == \"right\") & (vertical_fill_direction == \"down\"):\n for i in (range(0, (size-1))):\n for j in (range(0, (size-1))):\n for c in [0, 1, 2]:\n if style == 'frosted':\n x[(row - j), (col - i)][c] = np.random.randint(0, 255)\n elif style == 'mean':\n x[(row - j), (col - i)][c] = mean[c]\n else:\n x[(row - j), (col - i)] = 0\n return np.array(x)", "title": "" }, { "docid": "83daab1c8aad0da3dd92e703cfa5854d", "score": "0.5242898", "text": "def random_crop2d(s: array, crop_len: int, tempo_axis: int = 0) -> array:\n if tempo_axis >= s.ndim:\n raise ParameterError('axis out of range')\n\n n = s.shape[tempo_axis]\n idx = randint(high=n - crop_len)\n sli = [slice(None) for i in range(s.ndim)]\n sli[tempo_axis] = slice(idx, idx + crop_len)\n out = s[tuple(sli)]\n return out", "title": "" }, { "docid": "4a0138538b290c32230d088e421c931c", "score": "0.5242155", "text": "def crop_noise(noise_tensor, size, block):\n cut = (noise_tensor.shape[1] - size) // 2\n crop = Cropping2D(cut, name=f\"G_Noise_Crop_block_{block}\")(noise_tensor)\n return crop", "title": "" }, { "docid": "83efb9830f737dcbc9d06cf39a6e69e9", "score": "0.52235734", "text": "def chop(Nside, pos):\n # we paint quasar uniformly as long as it is covered by sdss:\n Npix = chealpy.nside2npix(Nside)\n chunkid = sharedmem.empty(len(pos), dtype='intp')\n print len(pos)\n with sharedmem.MapReduce() as pool:\n chunksize = 1024 * 1024\n def work(i):\n sl = slice(i, i + chunksize)\n chunkid[sl] = chealpy.vec2pix_nest(Nside, pos[sl])\n pool.map(work, range(0, len(pos), chunksize))\n arg = sharedmem.argsort(chunkid)\n chunksize = sharedmem.array.bincount(chunkid, minlength=Npix)\n assert (chunksize == numpy.bincount(chunkid, minlength=Npix)).all()\n return sharedmem.array.packarray(arg, chunksize)", "title": "" }, { "docid": "7313cac17c2b1b0a2784400ac3d678b5", "score": "0.5221182", "text": "def sample(self,size):\n return np.reshape(np.array(random.sample(self.buffer,size)),[size,5])", "title": "" }, { "docid": "cfd9fe9fcec556d2c11d36c3be9f1a5c", "score": "0.5219445", "text": "def speckle(img):\n row,col = img.shape\n gauss = np.random.randn(row,col)\n gauss = gauss.reshape(row,col) \n noisy_img = img + (img * gauss)\n return noisy_img", "title": "" }, { "docid": "83d3318d464a14f602c9e2fcf13c5c16", "score": "0.5216314", "text": "def draw_bs_reps(data, func, size=1):\n\n # Initialize the array of replicates\n reps = np.empty(size)\n\n # Draw a bootstrap sample and compute the statistic\n for i in range(size):\n bs_rep = np.random.choice(data, replace=True, size=len(data))\n reps[i] = func(bs_rep)\n\n return reps", "title": "" }, { "docid": "b2060f260fd4ffdc32f3fea74aef445c", "score": "0.5206738", "text": "def shift_and_subtract_background(cutout, obj_slit = 1, slit_gap = 21, masked_slit = None, plot = False):\n if int(obj_slit) not in [0,1,2]:\n print('Object slit must be 0 (top), 1 (middle), or 2 (bottom)')\n print('Object slit provided: {}'.format(obj_slit))\n return None\n else:\n #loop through the trace from right to left\n width = len(cutout[0]) #we have square cutout\n #buffer = round((width - trace_length -5)/2) #buffer area on either ends of the trace\n buffer = int(round(0.85*slit_length/2)) #imported from constant\n \n #dx0, 2 is here for place holder now. This is in case the three holes are not vertical\n dx0 = 0\n dx2 = 0\n #trace0 = shift(cutout, [-slit_gap,dx0], order = 4)\n trace0 = np.zeros(np.shape(cutout))\n trace0[:,slit_gap:] = cutout[:,:-slit_gap]\n #trace1 = cutout[slit_gap:-slit_gap]\n trace1 = cutout\n #trace2 = shift(cutout, [slit_gap, dx2], order = 4)\n trace2 = np.zeros(np.shape(cutout))\n trace2[:,:-slit_gap] = cutout[:,slit_gap:]\n \n if plot:\n plt.subplot(131)\n plt.imshow(trace0, origin = 'lower')\n plt.subplot(132)\n plt.imshow(trace1, origin = 'lower')\n plt.subplot(133)\n plt.imshow(trace2, origin = 'lower')\n plt.show()\n #package slit number and traces\n all_slit = [0,1,2]\n all_trace = [trace0, trace1, trace2]\n \n #sky slit contains no object, or contaminant. \n sky_slit = all_slit\n sky_slit.remove(obj_slit) #sky slits\n if masked_slit != None:\n sky_slit.remove(masked_slit)\n \n ####background subtraction\n if len(sky_slit) == 1: #one open slit is contaminated\n sky = all_trace[sky_slit[0]]\n elif len(sky_slit) ==2: # no open slit contaminated\n sky = (all_trace[sky_slit[0]] + all_trace[sky_slit[1]])/2 #Average\n \n sky_sub = all_trace[obj_slit] - sky\n if plot:\n plt.subplot(131)\n plt.imshow(all_trace[obj_slit], origin = 'lower')\n plt.subplot(132)\n plt.imshow( sky, origin = 'lower')\n \n \n plt.subplot(133)\n plt.imshow( sky_sub, origin = 'lower')\n plt.show()\n #sky_sub = np.pad(sky_sub, ( (0, np.shape(cutout)[0]-np.shape(sky_sub)[0]) \\\n # ,(0,np.shape(sky_sub[1]-np.shape(cutout)[1]) ) )\\\n # ,mode = 'constant')\n #sky = np.pad(sky_sub, ( (0, np.shape(cutout)[0]-np.shape(sky_sub)[0]) \\\n # ,(0,np.shape(sky_sub[1]-np.shape(cutout)[1]) ) )\\\n # ,mode = 'constant')\n return sky_sub, sky", "title": "" }, { "docid": "87c56c7862f11d0a614224ea5c06eee2", "score": "0.52053064", "text": "def random_crop(im, random_state):\n image_size = im.shape\n patch_size = (500, 400)\n patch_step = (20, 20)\n bound1 = np.arange(0, image_size[0] - patch_size[0],\n patch_step[0]).astype(\"int32\")\n bound2 = np.arange(patch_size[0], image_size[0],\n patch_step[0]).astype(\"int32\")\n g1 = list(zip(bound1, bound2))\n bound3 = np.arange(0, image_size[1] - patch_size[1],\n patch_step[1]).astype(\"int32\")\n bound4 = np.arange(patch_size[1], image_size[1],\n patch_step[1]).astype(\"int32\")\n g2 = list(zip(bound3, bound4))\n random_state.shuffle(g1)\n random_state.shuffle(g2)\n sel1 = g1[0]\n sel2 = g2[0]\n return im[sel1[0]:sel1[1], sel2[0]:sel2[1], ...]", "title": "" }, { "docid": "ae582b22ba6388aadc13911fade4ed8d", "score": "0.5199513", "text": "def Cutout(img: Image, magnitude: float) -> Image:\n if magnitude == 0.0:\n return img\n w, h = img.size\n xy = get_rand_bbox_coord(w, h, magnitude)\n\n img = img.copy()\n PIL.ImageDraw.Draw(img).rectangle(xy, fill=FILLCOLOR)\n return img", "title": "" }, { "docid": "8a3fd918dcf5dc8b0f370f608335973f", "score": "0.51956", "text": "def cut_vertex_machine(H):", "title": "" }, { "docid": "97ffe1980a21efb2e52a158aa801da57", "score": "0.51937324", "text": "def every_other_cut(strip=bpy.context.scene.sequence_editor.active_strip, interval=1, is_odd=False):\n if not strip or not hasattr(strip, 'frame_start') or interval < 1:\n return\n strips_remaining = []\n toggle = False\n deselect_strips()\n bpy.context.scene.frame_current = strip.frame_start\n\n # subcut strip and checker cut\n # TODO verify range includes cut within final incomplete interval\n # TODO allow reverse (neg) strip traversal and checker cutting\n substrip_count = int(strip.frame_final_duration / interval) # how many frame groups\n\n strips = [strip]\n checker_cut = is_odd # T to cut even substrips, F for odd\n print(\"There are {0} substrips to checker cut\".format(substrip_count))\n\n channel = strip.channel\n\n for i in range(substrip_count):\n activate_lone_strip(strip)\n\n # move ahead interval frames\n bpy.context.scene.frame_current += interval\n\n # copy strip as first strip\n substrip = duplicate_strip(strip)\n strips.append(substrip)\n\n # cut first strip along interval\n strip.frame_start = bpy.context.scene.frame_current - interval\n substrip.frame_final_duration = bpy.context.scene.frame_current - substrip.frame_start\n\n # offset internal animation for frames\n strip.animation_offset_start += interval\n strip.frame_start += interval\n\n # place in channel now that strips are cut\n strip.channel = channel\n substrip.channel = channel\n\n # remove second strip\n checker_cut and run_sequencer_op(bpy.ops.sequencer.delete)\n checker_cut = not checker_cut\n\n deselect_strips()\n continue\n\n return strips", "title": "" }, { "docid": "8bfeb2da4a6d65cd0613d2d0a9afe333", "score": "0.5192399", "text": "def draw_bs_sample(data):\n return np.random.choice(data, size=len(data))", "title": "" }, { "docid": "8bfeb2da4a6d65cd0613d2d0a9afe333", "score": "0.5192399", "text": "def draw_bs_sample(data):\n return np.random.choice(data, size=len(data))", "title": "" }, { "docid": "febbc9f34116284953033e15c2aae1f2", "score": "0.51898074", "text": "def unbias_model_set_points(model_from):\n \n # Select a subset of data from the first slice, then use indicies to select\n # the rest of the data\n \n #bin_size = 10\n target_count = 50\n \n # Let's target 50 samples per bin of 10 points\n # I'm being \"clever\" here by mixing a few steps together\n out_indx = np.random.choice(np.where(np.logical_and(0 < model_from[:,0,6], model_from[:,0,6] <= 10))[0], target_count, replace=False).tolist()\n out_indx += np.random.choice(np.where(np.logical_and(10 < model_from[:,0,6], model_from[:,0,6] <= 20))[0], target_count, replace=False).tolist()\n out_indx += np.random.choice(np.where(np.logical_and(20 < model_from[:,0,6], model_from[:,0,6] <= 30))[0], target_count, replace=False).tolist()\n out_indx += np.random.choice(np.where(np.logical_and(30 < model_from[:,0,6], model_from[:,0,6] <= 40))[0], target_count, replace=False).tolist()\n out_indx += np.random.choice(np.where(np.logical_and(40 < model_from[:,0,6], model_from[:,0,6] <= 50))[0], target_count, replace=False).tolist()\n out_indx += np.random.choice(np.where(np.logical_and(50 < model_from[:,0,6], model_from[:,0,6] <= 60))[0], target_count, replace=False).tolist()\n out_indx += np.random.choice(np.where(np.logical_and(60 < model_from[:,0,6], model_from[:,0,6] <= 70))[0], target_count, replace=False).tolist()\n out_indx += np.random.choice(np.where(np.logical_and(70 < model_from[:,0,6], model_from[:,0,6] <= 80))[0], target_count, replace=False).tolist()\n out_indx += np.where(80 < model_from[:,0,6])[0].tolist()\n \n out = model_from[out_indx,:,:]\n \n return out", "title": "" }, { "docid": "fb9d1570742d67bb1ce0311d65de4c6f", "score": "0.5187844", "text": "def rule_smooth(self,amount):\n \n rule = self.rule\n for a in range(amount):\n w = np.random.randint(1,len(rule)//4)\n i = np.random.randint(w,len(rule)-w)\n #print(i)\n #print(w)\n st = rule[i]\n rule[i-w:i+w]=st\n self.rule = rule\n #s_rule = self.rule\n #for x in range(len(self.rule)):\n # n_mean=0\n # for y in range(2*amount+1):\n # n_mean = n_mean+self.rule[(x+y-amount)%len(self.rule)]\n # n_mean = (n_mean//(2*amount+1))%self.states\n # s_rule[x] = n_mean\n #self.rule = s_rule", "title": "" }, { "docid": "1efeffef5c5e9533bede4bbf07ee98c8", "score": "0.51851684", "text": "def make_random_wedges():\n depth = (random.randint(600,1200), random.randint(800, 2000), random.randint(400,1200))\n width = (random.randint(600,1200), random.randint(800, 2000), random.randint(400,1200))\n\n mode = random.choice(['linear', 'clinoform'])\n max_layers = random.randint(4, 10)\n\n strata = [random.randint(0, max_layers) for j in range(5, 20)]\n while not contains_all_nums(strata, max_layers):\n strata = [random.randint(0, max_layers) for j in range(5, 20)]\n\n threshold = random.randint(2, max(len(strata)//4,2))\n split_points = calc_split_points(strata).astype('int8')\n while split_points[1] - split_points[0] < threshold:\n split_points = calc_split_points(strata).astype('int8')\n\n upper_strata = tuple(strata[:split_points[0]])\n middle_strata = tuple(strata[split_points[0]:split_points[1]])\n lower_strata = tuple(strata[split_points[1]:])\n starting_thickness = random.uniform(0, 2)\n thickness=(starting_thickness, starting_thickness + random.uniform(0,2))\n wedges, _, _, _ = bg.models.wedge(depth=depth,\n width=width,\n strat=tuple([upper_strata, middle_strata, lower_strata]),\n mode=mode,\n thickness=thickness,\n )\n return wedges", "title": "" }, { "docid": "508d4ee4193205525e474a429349874d", "score": "0.5184769", "text": "def generate(self):\n for y in range(self.height):\n row = []\n for x in range(self.width):\n row.append(seamless.seamless_noise(\n self.generator, x / self.width, y / self.height, self.detail, self.detail, self.offset))\n self.map.append(row)", "title": "" }, { "docid": "ecfbc852c775615ef001b5b69d22b4a2", "score": "0.5179217", "text": "def SparseDropout(slice_x, keep_prob=0.5):\n \n non_zero_row, non_zero_position = np.nonzero(slice_x)\n random_size = int((1-keep_prob)*len(non_zero_row))\n nonzerolist = np.arange(len(non_zero_row))\n random_indices = np.random.choice(nonzerolist, size = random_size,replace=False)\n \n slice_x[non_zero_row[random_indices],non_zero_position[random_indices]] = 0\n \n return slice_x", "title": "" }, { "docid": "8398bfecdf1ae1c2923413b11963475e", "score": "0.5177996", "text": "def rand_bbox(size, lamb, batch_size):\n W = size[0]\n H = size[1]\n cut_rat = np.sqrt(1. - lamb)\n # print(cut_rat)\n cut_w = np.array(W * cut_rat).astype('int8')\n print(cut_w)\n cut_h = np.array(H* cut_rat).astype('int8')\n print(cut_h)\n\n result = np.ones(shape=(batch_size,), dtype=np.int8)\n # uniform\n cx = np.random.randint(W*result)\n cy = np.random.randint(H*result)\n # print(cx,cy)\n # print(cx-cut_w//2)\n\n bbx1 = np.clip(cx - cut_w // 2, 0, W).reshape(-1,1)\n bby1 = np.clip(cy - cut_h // 2, 0, H).reshape(-1,1)\n bbx2 = np.clip(cx + cut_w // 2, 0, W).reshape(-1,1)\n bby2 = np.clip(cy + cut_h // 2, 0, H).reshape(-1,1)\n\n return np.concatenate([bbx1,bby1,bbx2,bby2], axis=1)", "title": "" }, { "docid": "2870bf77e835c4ab695f3a7f188e34c8", "score": "0.5174749", "text": "def uniform_square(frame_size, elements_number=200):\n square = []\n\n for n in range(elements_number):\n point = random_point_in_borders(frame_size)\n square += [point]\n\n return square", "title": "" }, { "docid": "cbd0c25ef0d43636b108db9d3d455f0a", "score": "0.5172431", "text": "def cut_image(name):\n img = Image.open('screenshots_cropped/' + name)\n\n for i in range(3):\n for j in range(3):\n # (left, upper, right, lower)\n img.crop((j * 200, i * 200, j * 200 + 200, i * 200 + 200)).save('screenshots_split/' + name.split('.')[0] + '_slice_' + str(i) + '_' + str(j) + '.png')", "title": "" }, { "docid": "933ddfcf37b45231e47409a32ed833af", "score": "0.5163682", "text": "def mock_mask(height=100, width=100):\n return np.random.randint(0, 2, size=(height, width))", "title": "" }, { "docid": "0a48967a39b8edd39cb8aeb8d06d7166", "score": "0.51490676", "text": "def apply_noise(size, filters):\n for filt in filters:\n if filt['name'] == ['copy']:\n continue\n elif filt['name'] == 'perfect':\n size *= filt['sample']\n # size = [x * y for x, y in zip(size, filt['sample'])]\n elif filt['name'] == 'poisson':\n size = np.random.poisson(size * filt['sample'])\n # size = [np.random.poisson(x * y) for x, y in zip(size, filt['sample'])]\n elif filt['name'] == 'gauss-multiplicative':\n size = np.round(size*np.random.normal(filt['mean'], filt['sigma'], size.size), 0).astype(int)\n # size = [np.random.normal(filt['mean'], filt['sigma']) * x for x in size]\n elif filt['name'] == 'gauss-additive':\n size += np.random.normal(filt['mean'], filt['sigma'], size.size)\n # size = [np.random.normal(filt['mean'], filt['sigma']) + x for x in size]\n\n return np.round(size)", "title": "" }, { "docid": "d0c5113274718d445056d708720beb34", "score": "0.51465064", "text": "def shuffler2(self, nnhind, mu, n, shuffl_rate):\n pop = np.zeros((mu, n), dtype=int)\n for i in range(0, mu):\n ind = np.copy(nnhind)\n for j in range(n):\n if np.random.uniform() < shuffl_rate:\n ind[0, j - 1], ind[0, j] = ind[0, j], ind[0, j - 1]\n pop[i, :] = ind\n return pop", "title": "" }, { "docid": "c650f9025c02b2e09e4f290f41a6e02f", "score": "0.51453865", "text": "def produce_swap_list(p_mix, radius, shape):\n x_mix = []\n y_mix = []\n for i in range(shape[0]):\n for j in range(shape[1]):\n if random.random() < p_mix:\n x_new = np.clip(i + random.randint(1, radius), 0, shape[0] - 1)\n y_new = np.clip(j + random.randint(1, radius), 0, shape[1] - 1)\n x_mix.append([i, x_new])\n y_mix.append([j, y_new])\n return [x_mix, y_mix]", "title": "" }, { "docid": "722a67c95811007097b2184e72d621ed", "score": "0.5133281", "text": "def test_random_sample_patch_2D_20patch():\n image = numpy.stack([camera()] * 20)\n image0 = numpy.expand_dims(image, -1)\n patch_size = 512\n num_patch = 500\n adoption_rate = 0.5\n input_data = random_sample_patches(image0, patch_size, num_patch, adoption_rate)\n print(input_data[0])\n assert len(input_data) == 20\n\n # Extract patched image (which is the image)\n img_patch = image0[tuple(input_data[0])]\n\n assert img_patch.shape == image0[0:1, ...].shape", "title": "" }, { "docid": "4f59753562c1703161262af10b932403", "score": "0.51325506", "text": "def random_crop(img, new_dim):\n\n dim = img.shape[1]\n offset = dim - new_dim\n \n idx = random.randint(0, offset)\n idy = random.randint(0, offset)\n\n new_img = img[:, idx:idx+new_dim, idy:idy+new_dim]\n\n if random.randint(0, 1) == 0:\n new_img = new_img[:, :, ::-1]\n\n return new_img", "title": "" }, { "docid": "5e82761fb054027055841339b79ddb45", "score": "0.5117356", "text": "def create_card_Reasoning(nb_dim, nb_features):\n item_shape = (nb_dim, nb_features)\n item = np.zeros(item_shape, dtype=int)\n\n r = [0,1,2]\n #random 2 different dimensions\n idim1 = random.randint(0,2)\n dim1 = r[idim1]\n r.remove(dim1)\n idim2 = random.randint(0,len(r)-1)\n dim2 = r[idim2]\n r.remove(dim2)\n dim3 = r[0]\n \n feat = random.randint(0,3)\n feat2 = random.randint(0,3)\n while(feat == feat2):\n feat2 = random.randint(0,3)\n\n np.put(item[dim1],[feat], 1)\n np.put(item[dim2],[feat], 1)\n np.put(item[dim3],[feat2], 1)\n\n return item", "title": "" }, { "docid": "a35c04e06bb91ff51a9e7b4d42c9e689", "score": "0.51129574", "text": "def fix_n(n):\n\n keep=numpy.ones(n.size)\n\n w,=numpy.where( (n > 1.049) & (n < 1.051) )\n n_region=w.size\n n_keep = int(n_region*2.0/60.0)\n print(\"keeping %d/%d from n region\" % (n_keep,n_region))\n\n rvals = numpy.random.random(w.size)\n s=rvals.argsort()\n wrand = s[0:n_keep]\n\n keep[w] = 0\n keep[w[wrand]] = 1\n\n wkeep,=numpy.where(keep==1)\n\n print(\"keeping %d/%d overall\" % (wkeep.size,n.size))\n return wkeep", "title": "" }, { "docid": "ea6b37adedc7cf43f813dc20b40614ec", "score": "0.51043105", "text": "def handle_strip_cuts(strips=[], use_selected=True):\n if not strips and not use_selected:\n return\n if use_selected:\n strips = [strip for strip in bpy.context.scene.sequence_editor.sequences if strip.select]\n if len(strips) == 1:\n every_other_frame_cut(bpy.context.scene.sequence_editor.active_strip, interval=3)\n elif len(strips) > 1:\n every_other_group_cut(bpy.context.scene.sequence_editor.sequences)\n return strips", "title": "" }, { "docid": "f8b4ab68574be661ee199dd33b6c610b", "score": "0.5090504", "text": "def generate_shower(initialtype, tvalues, showernumber):\n\n t = 0\n \n Shower0 = Shower(initialtype, showernumber) \n Parton0 = Parton(initialtype, t, 1, None, Shower0) # Initial parton\n Shower0.PartonList.append(Parton0)\n Shower0.FinalList.append(Parton0)\n Shower0.SplittingPartons.append(Parton0)\n \n while len(Shower0.SplittingPartons) > 0:\n SplittingParton, delta_t, vertex = Shower0.select_splitting_parton()\n t = t+delta_t\n \n end_shower = Shower0.loop_status(t, tvalues)\n if end_shower:\n break\n \n Shower0.SplittingPartons.remove(SplittingParton) \n Shower0.FinalList.remove(SplittingParton)\n momfrac, type1, type2 = Parton.split(SplittingParton, vertex)\n\n for j in range(0,2): #Loop for generating the branched partons\n if j==0: # Parton 1.\n initialfrac = SplittingParton.InitialFrac * momfrac\n NewParton = Parton(type1, t, initialfrac, \n SplittingParton, Shower0)\n SplittingParton.Primary = NewParton\n \n elif j==1: # Parton 2.\n initialfrac = SplittingParton.InitialFrac * (1-momfrac)\n NewParton = Parton(type2, t, initialfrac, \n SplittingParton, Shower0)\n SplittingParton.Secondary = NewParton\n \n Shower0.PartonList.append(NewParton)\n Shower0.FinalList.append(NewParton) \n \n if initialfrac > z_min: # Limit on how soft gluons can split.\n Shower0.SplittingPartons.append(NewParton)\n \n return Shower0", "title": "" }, { "docid": "eaae4b86ef137ac106812e4ddb853e1d", "score": "0.5088194", "text": "def generate_disc_set(nb):\n input = torch.rand(nb, 2)\n target = torch.zeros((nb, 2))\n target[(input - 0.5).pow(2).sum(1) < 0.5/pi, 1] = 1\n target[(input - 0.5).pow(2).sum(1) >= 0.5/pi, 0] = 1\n return input, target", "title": "" }, { "docid": "8c2a5563ada2cc8c4918aa15e3d4106b", "score": "0.50840014", "text": "def random_crop(im, size, pad_size=0):\n if pad_size > 0:\n im = zero_pad(im=im, pad_size=pad_size)\n h, w = im.shape[1:]\n y = np.random.randint(0, h - size)\n x = np.random.randint(0, w - size)\n im_crop = im[:, y : (y + size), x : (x + size)]\n assert im_crop.shape[1:] == (size, size)\n return im_crop", "title": "" }, { "docid": "a40fa98a3e1b25fba4ad88392750b6ef", "score": "0.50813925", "text": "def _random_sub_images(self, data, w, h, nb_per_images):\n nb_images = len(data)\n data_new = np.zeros((nb_images * nb_per_images, w, h,\n self.nb_channels))\n for i in range(nb_images * nb_per_images):\n #Image we sample from\n image_from = int(i // nb_per_images)\n image_w = data[image_from].shape[0]\n image_h = data[image_from].shape[1]\n #Random size\n random_size = np.random.randint(self.window_min_size,\n min(image_w, image_h))\n rdm_corner_0 = np.random.randint(0, image_w - random_size)\n rdm_corner_1 = np.random.randint(0, image_h - random_size)\n rdm_sub = data[image_from][rdm_corner_0 :\n rdm_corner_0 + random_size, rdm_corner_1 : \n rdm_corner_1 + random_size, :]\n rdm_sub = cv2.resize(rdm_sub, (w, h))\n #This is necessary for black and white images.\n rdm_sub = self._ensure_dimensions(rdm_sub)\n data_new[i,:,:,:] = rdm_sub\n return data_new", "title": "" }, { "docid": "422f4260fe8d4debdfe043696e1d5593", "score": "0.50755787", "text": "def get_random_patch(self):\n\n b = self.border\n height, width = self.X_img.shape\n _x = self.rnd.randint(b, height-b)\n _y = self.rnd.randint(b, width-b)\n return self.X_img[_x-b:_x+b+1, _y-b:_y+b+1], \\\n self.y_img[_x-b,_y-b]", "title": "" }, { "docid": "422d19493ec21d68af72789e6806c9ff", "score": "0.5073514", "text": "def makeSomeNoise(self):\n noiseyRows = random.sample(range(self.N), int(self.N*0.1))\n for i in noiseyRows:\n self.Y[i] = self.Y[i]*-1", "title": "" }, { "docid": "4d721adff70147a47bf7ccf34431d14d", "score": "0.50730944", "text": "def cutEveryNSpectra(self):\n sdr = self.sender()\n if sdr == self.ui.actionCropDarkSpectra:\n preview_spectra = self.dark.data.sum(axis=-1)\n elif sdr == self.ui.actionCropNRBSpectra:\n preview_spectra = self.nrb.data.sum(axis=-1)\n plugin = _widgetCutEveryNSpectra()\n \n winPlotEffect = _DialogPlotEffect.dialogPlotEffect(data=preview_spectra,\n x=_np.arange(preview_spectra.size),\n plugin=plugin)\n\n if winPlotEffect is not None:\n # Do stuff here\n params = _copy.deepcopy(winPlotEffect.parameters)\n params.pop('name')\n params.pop('long_name')\n\n cutter = _CutEveryNSpectra(**params)\n if sdr == self.ui.actionCropDarkSpectra:\n self.dark.data = cutter.calculate(self.dark.data)\n # Backup for Undo\n h_list = ['CutDark']\n for k in params:\n h_list.extend([k, params[k]])\n self.bcpre.add_step(h_list)\n self.updateHistory()\n elif sdr == self.ui.actionCropNRBSpectra:\n self.nrb.data = cutter.calculate(self.nrb.data)\n h_list = ['CutNRB']\n for k in params:\n h_list.extend([k, params[k]])\n self.bcpre.add_step(h_list)\n self.updateHistory()\n else:\n pass\n del winPlotEffect", "title": "" }, { "docid": "9d9272bf6a413ab649ba3a9e0e5fde32", "score": "0.5069756", "text": "def generateSample(self):\n sample = []\n for i in range(self.sample_size):\n trial = []\n for j in range(2):\n trial.append((2*random.random()-1)*self.radius)\n sample.append(trial)\n return sample", "title": "" }, { "docid": "01457016c517d002fcb6508ac6621ecf", "score": "0.5068477", "text": "def randomSnack(self):\n # Declarations.\n width = self.controller.getWidth()\n bannerHeight = self.controller.getBannerHeight()\n height = self.controller.getHeight()\n blockSize = self.controller.getBlockSize()\n\n # Generating a random x and y (pos).\n self.x = randrange(0, width, blockSize) # Note : param randrange(begin, max, step)\n self.y = randrange(bannerHeight, height, blockSize)\n\n # Creation of the container to be transferred to the view via the controller.\n rectObject = pygame.Rect(self.x + 1, self.y + 1, blockSize - 2, blockSize - 2)\n # Calls the controller to ask for the drawing of the rect visible on the interface given the pos and the color.\n self.controller.drawSnack(self.color, rectObject)", "title": "" }, { "docid": "126cd2a87a24942a687b8411e55b7a29", "score": "0.5062639", "text": "def img_unbreak(img_pieces, win_len = 100, is_2d = False):\n imgheight, imgwidth = 400, 400 \n index = 0\n if is_2d:\n img = np.zeros((imgheight, imgwidth))\n else:\n img = np.zeros((3, imgheight, imgwidth))\n \n for i in range(0, imgheight, win_len):\n for j in range(0, imgwidth, win_len):\n if is_2d:\n img[j:j + win_len, i:i + win_len] = img_pieces[index,:,:]\n index += 1\n else:\n img[:, j:j + win_len, i:i + win_len] = img_pieces[index*3:(index+1)*3,:,:]\n index += 1\n return img", "title": "" }, { "docid": "ba987323228baefec13512957c9d4795", "score": "0.5054889", "text": "def crop_image(self, obs, augment_frames=False):\n if self.crop_frames:\n crop_size = self.crop_size\n if augment_frames:\n y_margin = np.random.randint(obs.shape[0] - crop_size)\n x_margin = np.random.randint(obs.shape[1] - crop_size)\n else: # central crop\n y_margin = int((obs.shape[0] - crop_size) / 2)\n x_margin = int((obs.shape[1] - crop_size) / 2)\n else:\n y_margin = 0\n x_margin = 0\n crop_size = obs.shape[0]\n obs = obs[y_margin:y_margin + crop_size, x_margin:x_margin + crop_size]\n return obs", "title": "" }, { "docid": "51bdb90b2b22081cae214f14e31c77ca", "score": "0.50459296", "text": "def new_sampling_function(Args, catalog):\n number_of_objects = np.random.randint(1, Args.max_number)\n a = np.hypot(catalog['a_d'], catalog['a_b'])\n cond = (a <= 1.4) & (a > 0.6)\n q_bright, = np.where(cond & (catalog['i_ab'] <= 25.3))\n q, = np.where(cond & (catalog['i_ab'] <= 26))\n blend_catalog = astropy.table.vstack(\n [catalog[np.random.choice(q_bright, size=1)],\n catalog[np.random.choice(q, size=number_of_objects)]])\n blend_catalog['ra'], blend_catalog['dec'] = 0., 0.\n dx, dy = get_random_shift(Args, number_of_objects + 1)\n blend_catalog['ra'] += dx\n blend_catalog['dec'] += dy\n return blend_catalog", "title": "" }, { "docid": "437c30f9148d52b5950af325f39d2bf8", "score": "0.5036847", "text": "def push_sample(self):\n self.outlet.push_sample([random.choice(self.markers)])", "title": "" }, { "docid": "b1665f65e2bbdb9e1d1175ba92186ee4", "score": "0.503428", "text": "def generate_random_valid_tower_untrimmed(self):\n\t\tif np.all(self.coverage):\n\t\t\traise RuntimeError (\"The entire space has been covered\")\n\n\t\ttower = self.generate_random_tower()\n\t\twhile np.all(self.coverage[tower.mask]):\n\t\t\ttower = self.generate_random_tower()\n\t\treturn tower", "title": "" }, { "docid": "8a2afeb9a7171bcb2857d475545cf523", "score": "0.5028819", "text": "def undersample(df, n_samples):\n df_under = pd.DataFrame(columns=df.columns)\n\n for s in df.stiffness.unique():\n if (s == \"8.0\") or (s == \"32.0\") :\n df_under = pd.concat([df_under, df[df.stiffness == s]], axis=0)\n else:\n df_under = pd.concat([df_under, df[df.stiffness == s].sample(n_samples)], axis=0)\n \n print(\"Undersampling. The balanced dataset has shape\", df_under.shape)\n return df_under.reset_index(drop=True)", "title": "" }, { "docid": "0883daf94c4b07c9c9d857606bdba6f5", "score": "0.50246024", "text": "def _reaping(self, tolerance):\n nextgen = []\n for bacterium in self._all_bacteria:\n score = 0.0\n for substrate, expected_product in self._conditions:\n score = score + (abs(float(bacterium.feed(substrate)) - float(expected_product))/abs(float(expected_product)))\n if (score/float(len(self._conditions)) <= float(tolerance)) \\\n and random.random()*50 > float(len(bacterium.geneticCode())):\n nextgen.append(bacterium)\n\n self._all_bacteria=nextgen", "title": "" }, { "docid": "7f6cb5cc3c183d8f835097b03b553886", "score": "0.5016824", "text": "def new_piece():\n return random.choice(pieces)", "title": "" }, { "docid": "cce8dd04cb56774b6d8d646e28ab560f", "score": "0.501358", "text": "def random_gen_2d(xmin,xmax,ymin,ymax,zmin=None,zmax=None,step=120,n_points=None,n_trajs=5):\n n_drones = n_trajs\n\n trajs = [[] for i in range(n_drones)]\n if( n_points is None):\n n_constant = False\n else:\n n_constant = True\n\n for i in range(n_drones):\n print(\"Generating traj\",i)\n xs = []\n ys = []\n if(not n_constant):\n n_points = random.randint(10,20)\n if(zmin is not None and zmax is not None):\n z_value = random.randint(zmin, zmax )\n zs = [z_value]*n_points\n else:\n zs = []\n for j in range(n_points):\n\n if xs == []:\n xs.append(random.randint(xmin, xmax ))\n ys.append(random.randint(ymin, ymax ))\n xs.append(xs[-1]+step)\n ys.append(ys[-1])\n\n else:\n dirs=list( range(4) )\n # 0=su,1=dx,2=giu,3=sx\n if(xs[-1]>xs[-2]):\n dirs.remove(3)\n elif(xs[-1]>xs[-2]):\n dirs.remove(1)\n elif(ys[-1]<ys[-2]):\n dirs.remove(0)\n elif(ys[-1]>ys[-2]):\n dirs.remove(2)\n dir = random.choice(dirs)\n \n if dir==0:\n xs.append(xs[-1])\n new_y= ys[-1] + step\n ys.append(new_y)\n elif dir==1:\n ys.append(ys[-1])\n new_x= xs[-1] + step\n xs.append(new_x)\n elif dir==2:\n xs.append(xs[-1])\n new_y= ys[-1] - step\n ys.append(new_y)\n elif dir==3:\n ys.append(ys[-1])\n new_x= xs[-1] - step\n xs.append(new_x)\n\n\n if(zs ==[]):\n trajs[i] = list(zip(xs,ys))\n else: \n trajs[i] = list(zip(xs,ys,zs))\n return trajs", "title": "" }, { "docid": "2bc48eb6d6aa82e499281f2577912c1e", "score": "0.50046515", "text": "def shuffle_and_trim(elements, size):\n import random as rnd\n size = int(Instagram.random(size, size * 0.2))\n size = 0 if size < 0 else size\n rnd.shuffle(elements)\n return elements[:len(elements) if len(elements) < size else size]", "title": "" }, { "docid": "286e64a2c888d90edd5bff0ce3fd0edb", "score": "0.49985287", "text": "def prepro(I):\n I = I[35:195] # crop\n I = I[::2,::2,0] # downsample by factor of 2\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n return I.astype(np.float).ravel()", "title": "" }, { "docid": "286e64a2c888d90edd5bff0ce3fd0edb", "score": "0.49985287", "text": "def prepro(I):\n I = I[35:195] # crop\n I = I[::2,::2,0] # downsample by factor of 2\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n return I.astype(np.float).ravel()", "title": "" }, { "docid": "a91ace1cd001ee28599da1a4ff1be874", "score": "0.49853662", "text": "def dropSlices(img, nth):\n return Views.stack([Views.hyperSlice(img, 2, i)\n for i in xrange(img.dimension(2)) if 0 == (i+1) % nth])", "title": "" }, { "docid": "d76fd38a94a41a09adf31d9bd4844d8a", "score": "0.49792284", "text": "def sampleBreeds( breed, dogs, N ):\n\n return np.random.choice( dogs[breed], N, replace = False)", "title": "" }, { "docid": "02e3afcb54e8f25dd1cb79e2081b4bae", "score": "0.49744552", "text": "def RandomStiffGen(nt, muE, stE,bounds): \n\t#Generating random ties Young's Modulus and poisson ratio for concrete ties\n\t#The working folder is named the mean after the first letter.\n\tmuPr=0.18\n\tstPr=0.05\n\tPr=0.22\n\t## This function generate a sample of random numbers \n\t# nt from of Gaussian distribution \n\t# such that their average and standard deviation are the same as \n\t#the pool fixed mean muE and standard deviation stE\n\tConcrE1=[]\n\tConcrE=[]\n\tkeepsampling=0.0\n\twhile keepsampling<nt:\n\t\tConcrEi=[random.gauss(muE,stE) for i in range(nt)]\n\t\tConcrPr=[random.gauss(muPr,stPr) for i in range(nt)]\n\t\t#Force the mean to be the same as requested\n\t\tsumstiff=sum(ConcrEi)\n\t\tnewmean=sumstiff/nt\n\t\tXdev=(nt*muE-sumstiff)/nt\n\t\tConcrEc=[ConcrEi[i]+Xdev for i in range(nt)] \n\t\t\n\t\t#Compute the standard deviation of the new sample\n\t\tConcrEsq=[(ConcrEc[i])**2 for i in range(nt)]\n\t\tStD=math.sqrt(abs(sum(ConcrEsq)/nt-muE**2))\n\t\t\n\t\t#Deviation from requested variances\n\t\tdStD2=stE**2-StD**2\n\t\t\n\t\t#Force the standard deviation of the sample to be equal to the standard deviation of the whole population\n\t\t#dstD is subtracted from numbers less than the median and and added to numbers greater than the median , that is:\n\t\t#Compute the median\n\t\tnt1=int(nt/2-1)\n\t\tnt2=int(nt/2)\n\t\tListE=ConcrEc\n\t\tListE.sort() #sort numbers in a list from the lowest to greatest\n\t\tXmed=1.0/2.0*(ListE[nt1]+ListE[nt2])\n\t\t##Divide the list into two lists separated by the mean\n\t\tListE1=[ListE[i] for i in range(nt2)]\n\t\tListE2=[ListE[i] for i in range(nt2,nt)]\n\t\tmuE1=(sum(ListE1))/(nt/2.0)\n\t\tmuE2=(sum(ListE2))/(nt/2.0)\n\t\tdmuE=muE2-muE1\n\t\tDeterm=dmuE**2+4.0*dStD2\n\t\t\n\t\tif Determ<0.0:\n\t\t\tkeepsampling=0.0\n\t\telse:\n\t\t\tdStD=0.5*dmuE-0.5*(math.sqrt(dmuE**2+4.0*dStD2))\n\t\t\t#perturb the numbers to have requested standard dev and mean\n\t\t\tConcrE=[]\n\t\t\tfor i in range(nt):\n\t\t\t\tif ConcrEc[i]<Xmed:\n\t\t\t\t\tEc=ConcrEc[i]+dStD\n\t\t\t\t\tConcrE.append(Ec)\n\t\t\t\telif ConcrEc[i]>Xmed:\n\t\t\t\t\tEc=ConcrEc[i]-dStD\n\t\t\t\t\tConcrE.append(Ec)\n\t\t\t\telse:\t\n\t\t\t\t\tEc=ConcrEc[i]\n\t\t\t\t\tConcrE.append(Ec)\n\t\t\trandom.shuffle(ConcrE)\n\t\t\tkeepsample=[]\n\t\t\tfor i in range(nt):\n\t\t\t\tif ConcrE[i]<=bounds:\n\t\t\t\t\t#print (tie modulus error)\n\t\t\t\t\tkeepsample.append(0)\n\t\t\t\telse:\n\t\t\t\t\tkeepsample.append(1)\n\t\t\tkeepsampling=sum(keepsample)\n\t\t# saving:\n\t#dataModulus=ConcrE\n\t#ConcrE=randomstiffness(stE,muE,nt,ConcrE1)\n\t#print ConcrE\n\treturn ConcrE", "title": "" }, { "docid": "62b16faf446b6568baf927b7c719b2ba", "score": "0.49740142", "text": "def boatfilt(sample_rate, data,flag=False):\n\n #TO DO:\n #Update the smoothing to take data from the center, instead of only datapoints in front\n\n\n ####################################### what is the original type of data? how do we convert/manipulate it?\n data_sample = data[:,0]\n\n #These are values that we chose to find the boat noises, they may not be perfect\n freq_cap=7000\n freq_floor=2000\n amp_cap=1e1\n\n #take spectrogram for boat noises\n f_s, t_s, Sxx = spectrogram(data_sample, fs=sample_rate)\n\n\n #filter out anything above a certain frequency\n for freq in f_s:\n if freq > freq_cap: \n index1 = int(np.where(f_s == freq)[0])\n break\n\n for freq in f_s:\n if freq > freq_floor:\n index2 = int(np.where(f_s == freq)[0])\n break\n\n # Cuts down data to the range of frequencies we are interested in\n Sxx = Sxx[index2:index1]\n f_s = f_s[index2:index1]\n\n\n #filtering out outliers in amplitude spikes to stop skewing main color plot results\n for line in range(0,len(Sxx)):\n for amp in range(0,len(Sxx[line])):\n if Sxx[line][amp] > amp_cap:\n #index = int(np.where(Sxx == amp)[0])\n Sxx[line][amp]=0\n\n \n #calculate power spectral density (psd)\n psd = np.array([])\n for slice in range(0, len(t_s)):\n psd = np.append(psd, np.trapz(np.abs(Sxx[:, slice]), f_s))\n\n # NEW CONTENT STARTS HERE\n\n #this is really the smoothing coefficient, but we're not using it for smoothing\n smoothness = 100\n\n boat_smooth = []\n f_s_smooth = []\n Sxx_smooth = []\n\n\n #Finding a smoothed max/min curve that can be analyzed later to flag for boat sounds\n for i in range(0, (len(psd)-smoothness)):\n\n boat_smooth.append(max(psd[i:i+smoothness]))\n f_s_smooth.append(psd[i])\n Sxx_smooth.append(psd[i])\n\n t_s = t_s[0:len(psd)-smoothness] \n\n if flag == True:\n return t_s, boat_smooth, f_s_smooth, Sxx_smooth\n else:\n return t_s, boat_smooth", "title": "" }, { "docid": "ee4583114b6451096f5a690a8a11c28a", "score": "0.49739295", "text": "def tenrands(shp):\r\n data = numpy.random.random(shp);\r\n return tensor(data, shp);", "title": "" }, { "docid": "6aee2ed227dfa64a8740ff90ee5df4da", "score": "0.49697247", "text": "def single_test():\n # r = np.random.randint(th_min*10, th_max*10, 2)/10.0\n r = [50, 65]\n # scatter = MieScattering(n_layers=2)\n # spect = calc_spectrum(r)[0, :]\n #\n # return r, spect", "title": "" }, { "docid": "ffa99d4212a34b03a4d73650fcd3529a", "score": "0.49640277", "text": "def prepro(I):\n I = I[35:195] # crop\n I = I[::2,::2,0] # downsample by factor of 2\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n return I.astype(np.float).ravel()", "title": "" }, { "docid": "9a315b2cbff61049ecaa7f36553a20d2", "score": "0.49612558", "text": "def shuffle(self):\n sc = self.coordinates\n st = randint(0, len(sc) - 1)\n nd = randint(0, len(sc) - 1)\n sc[st], sc[nd] = sc[nd], sc[st]", "title": "" }, { "docid": "cda36c2e779d5492a38122abcd28d593", "score": "0.4958861", "text": "def gen_border(self, points):\n base = random.randint(0, 5000)\n border = []\n for i in range(points + 1):\n x = float(i) * self.span / points\n y = pnoise1(x + base, self.octaves) * 1\n y += pnoise1(x + base, self.octaves + 4) * 2\n y *= self.scale\n border.append((i, y))\n return border", "title": "" }, { "docid": "bb4292d5933b8a6ee725ac8a17b8007f", "score": "0.49564856", "text": "def stable_income_sample(size: int):\n # the following three percentage numbers are averaged from three real datasets(2016-2018 CPS)\n sample_left = np.zeros(math.ceil(0.287 * size)).astype('Int32')\n sample_middle = np.random.triangular(0, 1250, 100000, math.ceil(0.645 * size)).astype('Int32')\n sample_right = np.random.choice([100000, 1000000], math.floor(0.068 * size)).astype('Int32')\n sample = np.concatenate([sample_left,sample_middle,sample_right],axis=0)\n np.random.shuffle(sample)\n return sample", "title": "" }, { "docid": "6ca4dcd9ba0441c67a22bdb3d76a4539", "score": "0.495394", "text": "def draw_bids(data, rounds):\n output = np.zeros(rounds)\n for i in range(rounds):\n choice = np.random.choice(data[:, 1])\n output[i] = choice\n return output", "title": "" }, { "docid": "6fc913d9151f3f5aac0a72fd16f24248", "score": "0.49466342", "text": "def _getmixup(self, idx):\n if not self.train:\n raise RuntimeWarning('Can not use mixup when in test mode.')\n return self._getitem(idx)\n\n img = self.load_image(idx)\n annot = self.load_annotations(idx)\n\n randidx = random.randint(0, len(self.image_ids) - 1)\n if randidx == idx:\n return self._getitem(idx)\n\n randimg = self.load_image(randidx)\n randannot = self.load_annotations(randidx)\n\n # mixup must swap at least 1/8th of the image, at most 2/3\n min_swp = int(img.shape[0] * 0.125)\n max_swp = int(img.shape[0] * 0.66)\n cutsz = random.randint(min_swp, max_swp)\n \n if np.random.rand() <= 0.5:\n # horizontal cut\n img[:cutsz, :, :] = randimg[:cutsz, :, :]\n\n randannot = randannot[(randannot[:, 1] + 45) < cutsz] # ymin too high\n randannot[:, 3] = np.where(randannot[:, 3] >= cutsz, cutsz-1, randannot[:, 3])\n \n annot = annot[(annot[:, 3] + 45) > cutsz] # drop annot w. low ymax\n annot[:, 1] = np.where(annot[:, 1] < cutsz, cutsz, annot[:, 1])\n\n annot = np.concatenate([annot, randannot])\n else:\n # vertical cut\n img[:, :cutsz, :] = randimg[:, :cutsz, :]\n\n randannot = randannot[(randannot[:, 0] + 45) < cutsz] # xmin too high\n randannot[:, 2] = np.where(randannot[:, 2] >= cutsz, cutsz-1, randannot[:, 2])\n \n annot = annot[(annot[:, 2] + 45) > cutsz] # drop annot w. low xmax\n annot[:, 0] = np.where(annot[:, 0] < cutsz, cutsz, annot[:, 0])\n\n annot = np.concatenate([annot, randannot])\n\n # hack-fix\n # drop annotations with width/height 25 or fewer pixels\n annot = annot[annot[:, 2] - annot[:, 0] > 25]\n annot = annot[annot[:, 3] - annot[:, 1] > 25]\n\n sample = {'img': img, 'annot': annot}\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample", "title": "" }, { "docid": "9f1692e9e212db7bd315474d9240278b", "score": "0.49451122", "text": "def subgen_building(building, build_w, build_h, settings=None):\n pattern = [['ground' for y in range(build_h)] for x in range(build_w)] # fill cells with 'ground'\n if building == 'house': # generate a simple house\n pattern = [['floor' for y in range(build_h)] for x in range(build_w)] # fill cells with 'floor'\n for x in range(0, build_w): # draw horizontal walls\n pattern[x][0] = 'wall'\n pattern[x][-1] = 'wall'\n for y in range(0, build_h): # draw vertical walls\n pattern[0][y] = 'wall'\n pattern[-1][y] = 'wall'\n for i in range(random.randrange(10)): # make some walls inside\n x = random.randrange(build_w)\n y = random.randrange(build_h)\n if pattern[x][y] == 'floor':\n pattern[x][y] = 'wall'\n for i in range(0, random.randrange(3)): # make some furniture\n x = random.randrange(build_w)\n y = random.randrange(build_h)\n if pattern[x][y] == 'floor':\n pattern[x][y] = 'furniture'\n for n in range(1, 8): # make windows\n x = random.randrange(1, build_w - 1)\n y = random.randrange(1, build_h - 1)\n direction = random.randint(1, 4)\n if direction == 1: x = 0\n if direction == 2: x = -1\n if direction == 3: y = 0\n if direction == 4: y = -1\n pattern[x][y] = game_logic.weighted_choice([('small_window', 50), ('large_window', 50)])\n x = random.randrange(1, build_w - 1) # make a door\n y = random.randrange(1, build_h - 1)\n direction = random.randint(1, 4)\n if direction == 1: x = 0\n if direction == 2: x = -1\n if direction == 3: y = 0\n if direction == 4: y = -1\n pattern[x][y] = 'door'\n elif building == 'multiroom_house': # generate a house with multiple rooms\n rooms = []\n if 'room_min' in settings and 'room_max' in settings:\n room_needed = random.randrange(settings['room_min'], settings['room_max'])\n else:\n raise RuntimeError('No minimal and maximal number of rooms specified.')\n x1 = random.randrange(0, build_w) # make first room\n y1 = random.randrange(0, build_h)\n x2 = random.randrange(x1, build_w)\n y2 = random.randrange(y1, build_h)\n first_room = {'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2}\n rooms.append(first_room)\n subgen_multiroom_place_room(x1=x1, y1=y1, x2=x2, y2=y2, pattern=pattern)\n tries = 0\n while len(rooms) < room_needed and tries < 100: # generate more rooms\n tries += 1\n candidates = subgen_multiroom_get_candidates(build_w=build_w, build_h=build_h, pattern=pattern)\n if len(candidates) > 0: # check if there are suitable walls\n candidate = candidates[random.randrange(0, len(candidates))]\n room = subgen_multiroom_fit_room(build_w=build_w, build_h=build_h, pattern=pattern, candidate=candidate)\n if room: # if room is succesifully placed\n rooms.append(room)\n else:\n break\n return pattern", "title": "" }, { "docid": "008fd41a38dfad688fa3accb086a95bf", "score": "0.49446222", "text": "def shrink_matrix(m, cut):\n end = mnist_dim\n start = end - cut\n\n l = [i for i in range(start, end)]\n\n return np.delete(m, l, 1)", "title": "" }, { "docid": "9329e8e92bb462e3115e2db9995151f9", "score": "0.49427566", "text": "def balanced_subsample(y, s):\n sample = []\n # For every label in the dataset\n for label in np.unique(y):\n # Get the index of all images with a specific label\n images = np.where(y==label)[0]\n # Draw a random sample from the images\n random_sample = np.random.choice(images, size=s, replace=False)\n # Add the random sample to our subsample list\n sample += random_sample.tolist()\n return sample", "title": "" }, { "docid": "0e5a23d01b088ed664a773dd5903840b", "score": "0.49417973", "text": "def salt_img(self, img):\n n = int(img.shape[0] * img.shape[1] * 0.001)\n ilist = np.random.randint(0, img.shape[1], n)\n jlist = np.random.randint(0, img.shape[0], n)\n for k in range(n):\n i = ilist[k]\n j = jlist[k]\n if img.ndim == 2:\n img[j, i] = 255\n elif img.ndim == 3:\n img[j:j + 1, i:i + 1, :] = 255\n return img", "title": "" }, { "docid": "7fd1ee084edeb17616e75da3c25d75e0", "score": "0.49324667", "text": "def genere_obs(std_o,shape,lobs):\n\tnmin,nmax = lobs\n\tblist = np.empty(shape=np.prod(shape), dtype=bool)\n\tnobs = np.random.randint(nmin,nmax+1)\n\tepso = np.empty(shape=shape)\n\tblist[:nobs] = False\n\tblist[nobs:] = True\n\tmask = np.random.choice(blist, shape, replace=False)\n\tepso[mask] = np.nan\n\tepso[~mask] = np.random.normal(0, std_o, nobs)\n\treturn epso", "title": "" } ]
09eeddc452f5f4cd65017c3764caf1a9
Used to specify whether file notifications are sent to IoT Hub on upload. Defaults to `false`.
[ { "docid": "de9efd70cdd7ac2d1e5c27927577137f", "score": "0.5638011", "text": "def notifications_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"notifications_enabled\")", "title": "" } ]
[ { "docid": "63969bc2dcab7f4ea14e134acdbd3880", "score": "0.6151067", "text": "def requires_file_upload(self):\r\n return self._requires_file_upload", "title": "" }, { "docid": "9a316cf340b732d001f328addd90ea05", "score": "0.60951287", "text": "def may_attach_files(self, user):\n return defaults.PYBB_ATTACHMENT_ENABLE", "title": "" }, { "docid": "f10c3326a67a1e33f70e30d08f08da5f", "score": "0.6032609", "text": "def message_files(self, files=[]):\n\n if len(files) == 0: return True\n try:\n Settings.dev_print(\"uploading files\")\n self.upload_files(files=files)\n Settings.maybe_print(\"successfully began file uploads\")\n Settings.debug_delay_check()\n return True\n except Exception as e:\n Driver.error_checker(e)\n Settings.err_print(\"failure to upload file(s)\")\n return False", "title": "" }, { "docid": "92a3feed257d8b580e5847ae48a12ecd", "score": "0.59631824", "text": "def is_file_upload_initiate(self, message: Message) -> bool:\n raise NotImplementedError()", "title": "" }, { "docid": "ed54f994b7f9da0bf37aa877d9097556", "score": "0.5947702", "text": "def allow_put_image_file(self):\n return self.get_boolean('allow_put_image_file')", "title": "" }, { "docid": "d0685ce7eeee84236dc82653355c8c40", "score": "0.5928672", "text": "def requires_file_upload(self, value):\r\n self.logger.warn(\"Setting values on requires_file_upload will NOT update the remote Canvas instance.\")\r\n self._requires_file_upload = value", "title": "" }, { "docid": "51397efce2d74b6746716fec3261fd43", "score": "0.5905939", "text": "def sendfile(self):\n return False", "title": "" }, { "docid": "f584ff44098d122e77a858ebb6bade5b", "score": "0.5796519", "text": "def supports_asset_notification(self):\n return 'supports_asset_notification' in profile.SUPPORTS", "title": "" }, { "docid": "bc7d34a9b13bd401deda22cfbba61b20", "score": "0.575488", "text": "def is_file_management_message(self, message: Message) -> bool:\n raise NotImplementedError()", "title": "" }, { "docid": "9128ae7403f17c0573ad74cccc5298ae", "score": "0.5611806", "text": "def can_register_for_configuration_notifications(self):\n return # boolean", "title": "" }, { "docid": "79bfe647657b0308dde1873c98e75206", "score": "0.5541105", "text": "def notify_shared_with_user(sender, instance, **kwargs):\n message = 'has shared a file with you'\n File = instance.file_uploaded\n user = instance.user\n if kwargs.get('created', False):\n Notification.objects.create(\n message=message, file_shared=File, user_notified=user)", "title": "" }, { "docid": "056db42c44acb3d0589187d4c91528b0", "score": "0.5518553", "text": "def is_param_upload_only(self, foo_param):\n if \"racDownload\" in foo_param.interfaces or \"emDownload\" in foo_param.interfaces:\n return False\n else:\n return True", "title": "" }, { "docid": "8ea1f33fbb1acb20f02a85dde94e5b95", "score": "0.55145085", "text": "async def _is_airshare_file_sender(request):\n return web.Response(text=\"File Sender\")", "title": "" }, { "docid": "636c7f5b4baf3699a172472a40351345", "score": "0.5513571", "text": "def is_uploaded(self) -> bool:\n raise NotImplementedError()", "title": "" }, { "docid": "3705ec4589ca6cba5e1206d0d0d2bce5", "score": "0.54789054", "text": "def notify_useronly(self) -> ConfigNodePropertyBoolean:\n return self._notify_useronly", "title": "" }, { "docid": "88d67ce293ba1781b1244b760efd89c4", "score": "0.5438019", "text": "def Upload_to_Distribution(self):\n return self.content is not None", "title": "" }, { "docid": "e3b8b775c15e375b9d159bc40adfb5d3", "score": "0.5434449", "text": "def supports_repository_notification(self):\n return 'supports_repository_notification' in profile.SUPPORTS", "title": "" }, { "docid": "20066b25b51ed5adf53d7d60be533bf9", "score": "0.542918", "text": "def send_email_notification(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"send_email_notification\")", "title": "" }, { "docid": "20066b25b51ed5adf53d7d60be533bf9", "score": "0.542918", "text": "def send_email_notification(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"send_email_notification\")", "title": "" }, { "docid": "f6569ce6e2017556e08bd2dba7ccf977", "score": "0.54291296", "text": "def wants_file(self, file_name: str):\n return False", "title": "" }, { "docid": "64d6df56475f1a5c5cd7d3470c26f8bf", "score": "0.54188013", "text": "def notifications_enabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"notifications_enabled\")", "title": "" }, { "docid": "04a3ebcea7ef80ceec6d9553842d5f78", "score": "0.5408766", "text": "def photo(self):\n return isinstance(self.action, types.SendMessageUploadPhotoAction)", "title": "" }, { "docid": "f92ffc56a347d3963630f8d00ff18ee7", "score": "0.5400637", "text": "def supports_remote_files(self):\n return False", "title": "" }, { "docid": "425a238360f054a083181d7ed1155938", "score": "0.53810084", "text": "def can_register_for_bin_notifications(self):\n return # boolean", "title": "" }, { "docid": "c2ea496dbb988ac6bd3c1d535e1f676d", "score": "0.53731227", "text": "def supports_notifications(self):\n return self.dmaap_url is not None", "title": "" }, { "docid": "9eee360776b73371b2e08e2f087d11b9", "score": "0.536851", "text": "def can_save(self):\r\n if self.files:\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "2b6ddaa00cb19d9e851cce627760e7b3", "score": "0.536421", "text": "def _on_upload_only_changed(self):\n upload_only = self._dialog.get_bool('UPLOAD_ONLY')\n for item_name in self.RENDER_ONLY_SETTINGS:\n self._dialog.enable_widget(item_name, not upload_only)", "title": "" }, { "docid": "4b30eb3f19fdd6b55193f5e13f661296", "score": "0.53349566", "text": "def select_filename(**kwargs: TaskInstance) -> bool:\n filename = random.choice([\"foo\", \"bar\", None]) # nosec for prg usage.\n if filename is not None:\n ti = kwargs[\"ti\"]\n logging.info(f\"Pushing file to workflow: {filename}\")\n ti.xcom_push(key=\"filename\", value=filename)\n else:\n logging.info(\"Chose to skip the downstream.\")\n return True if filename else False", "title": "" }, { "docid": "cbe7c73194558fbae0655bf09565c524", "score": "0.5319611", "text": "def _notify_wanted(self):\n return (\n self.api.on\n and self.api.powerstate == \"On\"\n and self.api.notify_change_supported\n and self.options.get(CONF_ALLOW_NOTIFY, False)\n )", "title": "" }, { "docid": "d4c2d087758e0b7240d3ee4ffbb113ac", "score": "0.530928", "text": "def is_file_url_initiate(self, message: Message) -> bool:\n raise NotImplementedError()", "title": "" }, { "docid": "cee8bd11015556e1fa7bf950c525d8aa", "score": "0.53072023", "text": "def supports_notifications(self):\n # in derived classes we may use self\n # pylint: disable=no-self-use\n return True", "title": "" }, { "docid": "144d036a53946689c2701b45925120cb", "score": "0.52883667", "text": "def has_files(self):\n return bool(self.files)", "title": "" }, { "docid": "844ff7055ff3ea539f5b051d3965e642", "score": "0.52789885", "text": "def require_attachment_acceptance(self) -> Optional[bool]:\n return pulumi.get(self, \"require_attachment_acceptance\")", "title": "" }, { "docid": "f6bba98852f7368d97897c380f8d4c28", "score": "0.5247783", "text": "def supports_item_notification(self):\n return # boolean", "title": "" }, { "docid": "c8aa9bca553d0a1be61a9b19b4f46154", "score": "0.5225759", "text": "def document(self):\n return isinstance(self.action, types.SendMessageUploadDocumentAction)", "title": "" }, { "docid": "417668eaab1fd62d9c99b5247654070d", "score": "0.5181033", "text": "def is_multipart(self):\r\n return True", "title": "" }, { "docid": "94d5fbbce9b4e943e1c858bee63c3003", "score": "0.5170511", "text": "def _is_file_message(message: Dict[Text, Any]) -> bool:\n return (\n \"message\" in message\n and \"attachments\" in message[\"message\"]\n and message[\"message\"][\"attachments\"][0][\"type\"] == \"file\"\n )", "title": "" }, { "docid": "62c8a4ed0fe69ea51808017e18ba96d9", "score": "0.5169221", "text": "def is_file(self) -> bool:\n ...", "title": "" }, { "docid": "a9e886a7b693003c2c93fb1a293aa5e4", "score": "0.51583916", "text": "def notifications_enabled(conf):\n notifications_driver = set(conf.oslo_messaging_notifications.driver)\n return notifications_driver and notifications_driver != {'noop'}", "title": "" }, { "docid": "a9e886a7b693003c2c93fb1a293aa5e4", "score": "0.51583916", "text": "def notifications_enabled(conf):\n notifications_driver = set(conf.oslo_messaging_notifications.driver)\n return notifications_driver and notifications_driver != {'noop'}", "title": "" }, { "docid": "6627725f96569cfcf6e9cbaa42695f21", "score": "0.5151681", "text": "def is_not_file(self):\n return self._tag == 'not_file'", "title": "" }, { "docid": "e1c2d1144534fe3df73a0f7a17b50aaf", "score": "0.5145469", "text": "def allow_get_image_file(self):\n return self.get_boolean('allow_get_image_file')", "title": "" }, { "docid": "647cd66c98551defb7971546fe45bf79", "score": "0.51129884", "text": "def file_check(self, file):\n if config.USE_USER_BLACKLIST:\n user_bool = True\n if str(file.uploader) + '#{}'.format(self.room) in self.user_blacklist:\n user_bool = False\n elif config.USE_USER_WHITELIST:\n user_bool = False\n if str(file.uploader) + '#{}'.format(self.room) in self.user_whitelist:\n user_bool = True\n else:\n user_bool = True\n\n if config.USE_FILENAME_BLACKLIST:\n filename_bool = True\n for item in self.filename_blacklist:\n if item.lower().split('#')[0] in str(file.name).lower() and '#{}'.format(self.room) in item:\n filename_bool = False\n elif config.USE_FILENAME_WHITELIST:\n filename_bool = False\n for item in self.filename_whitelist:\n if item.lower().split('#')[0] in str(file.name).lower() and '#{}'.format(self.room) in item:\n filename_bool = True\n else:\n filename_bool = True\n\n if config.USE_FILETYPE_BLACKLIST:\n filetype_bool = True\n if str(file.filetype) + '#{}'.format(self.room) in self.filetype_blacklist:\n filetype_bool = False\n elif config.USE_FILETYPE_WHITELIST:\n filetype_bool = False\n if str(file.filetype) + '#{}'.format(self.room) in self.filetype_whitelist:\n filetype_bool = True\n else:\n filetype_bool = True\n\n return user_bool and filename_bool and filetype_bool", "title": "" }, { "docid": "3f6b8555186fa75f6fa2d3561c618ac3", "score": "0.5111676", "text": "def large_file_share_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"large_file_share_enabled\")", "title": "" }, { "docid": "3f6b8555186fa75f6fa2d3561c618ac3", "score": "0.5111676", "text": "def large_file_share_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"large_file_share_enabled\")", "title": "" }, { "docid": "e673d523ba391d7ea504716ed1d944ca", "score": "0.5110179", "text": "def publish(self):\n if not self.disabled:\n send_files(self.result_xmls,\n self.attachments,\n hwproduct=self.hwproduct,\n testtype=self.testtype,\n target=self.target,\n release_version = self.release_version)", "title": "" }, { "docid": "09e4a0ddb9aa209803267636d7890ca2", "score": "0.5070828", "text": "def is_param_download_only(self, foo_param):\n if \"racUpload\" in foo_param.interfaces or \"emUpload\" in foo_param.interfaces:\n return False\n else:\n return True", "title": "" }, { "docid": "fd9363c43988428bf09480c734d8d0e4", "score": "0.50670296", "text": "def push_notification_setup() -> bool:\n pass", "title": "" }, { "docid": "ffaaa0e5f0c2f036dba52876012a169a", "score": "0.5062816", "text": "def can_register_for_resource_notifications(self):\n return # boolean", "title": "" }, { "docid": "d0e803dff97debabfa12c52b478b346e", "score": "0.5060032", "text": "def check_file(self: \"FileSignatureChecker\") -> bool | str:\n if not self.check_file_extension():\n return False\n return bool(self.check_file_signature() and self.check_mime_type())", "title": "" }, { "docid": "1b9339782fdc343a0f36bfd408b80312", "score": "0.5045944", "text": "def has_notification(self):\n return self._has_notification", "title": "" }, { "docid": "2fb7628738cf61a80fde582f662f2ae9", "score": "0.50350994", "text": "def can_upload(self) -> bool:\n try:\n self.__load()\n except OSError:\n return False\n\n return self.upload", "title": "" }, { "docid": "59fe078fa51f713be745372e1c7406bb", "score": "0.50241506", "text": "def _fileIsStream(self):\n # DOC {{{\n # }}}\n\n # CODE {{{\n return (not isinstance(self.file_, str))\n # }}}", "title": "" }, { "docid": "5b219ecf96ee1e7e75bc5f623b9385c5", "score": "0.5022072", "text": "def _is_new(self, filepath):\n return filepath not in self._watched_files", "title": "" }, { "docid": "d228e4bf8cb66ac607a465aece8e074f", "score": "0.50163734", "text": "def is_file_binary_response(self, message: Message) -> bool:\n raise NotImplementedError()", "title": "" }, { "docid": "080ac3337b2df3a73ada2233f6efea62", "score": "0.5010879", "text": "def set_follow_filename(self, boolean):\n self.follow_filename = boolean", "title": "" }, { "docid": "16750a60261b40c07aa23d38b4b0ad78", "score": "0.501004", "text": "def is_file_purge_command(self, message: Message) -> bool:\n raise NotImplementedError()", "title": "" }, { "docid": "b2e7d4097882eadc0558050772764ea8", "score": "0.50087863", "text": "def can_register_for_family_notifications(self):\n return # boolean", "title": "" }, { "docid": "6997e5d6df9c7f85f12e24b05dc99e1a", "score": "0.49956262", "text": "def is_file(self) -> bool:", "title": "" }, { "docid": "bfe51db2b7ee80accf04d4b294a7b6c4", "score": "0.49887088", "text": "def file_allowed(self, storage, basename):\n return self.extension_allowed(extension(basename))", "title": "" }, { "docid": "a18a9199d6095eb26d2afa1eb12ad561", "score": "0.4985833", "text": "def is_pending_files(self):\n return len(self.pending) > 0", "title": "" }, { "docid": "08f36ee8626e97624f1b251815385b9f", "score": "0.49806964", "text": "def is_attachment(payload):\n return 'filename' in payload and payload['filename']", "title": "" }, { "docid": "5761f9d9bddd4807a49612caf8dd1a67", "score": "0.49788478", "text": "def is_file_added(self) -> bool:\n return self._file_added", "title": "" }, { "docid": "aa0557ef766768d5ac7874fa0075da24", "score": "0.49765876", "text": "def _accept_image_upload_alert_(self):\n alert = self._wait_alert_()\n return alert is not None", "title": "" }, { "docid": "c01f59d3e35852172716f627d7ba2a9e", "score": "0.4975556", "text": "def is_actionable(media_file):\n return (media_file.file_path == cfg.CONF.tasks.sync_path and\n media_file.synced and\n os.path.exists(os.path.join(cfg.CONF.tasks.sync_path,\n media_file.filename)))", "title": "" }, { "docid": "d06de296ae2bbd50f045645993d0f3c3", "score": "0.49594268", "text": "def is_file(self) -> bool:\n return self._metadata.type is _PathType.FILE", "title": "" }, { "docid": "253d453efa0fe7cf2f2417f4a7e885e2", "score": "0.4959338", "text": "def is_file_list(self, message: Message) -> bool:\n raise NotImplementedError()", "title": "" }, { "docid": "1ac0521d999f38fb44846c6b3e407bd9", "score": "0.49551624", "text": "def __bool__(self):\n\t\tts = FileStorageManagerSingleton.get(self._id)\n\t\treturn bool(abs(ts))", "title": "" }, { "docid": "68019a8dadcf891c8f75c73ba26d56f3", "score": "0.49335846", "text": "def has_files(self) -> bool:\n return bool(self.files or self.failed_files)", "title": "" }, { "docid": "a52a45701d0319e3f13b16c3acfc023b", "score": "0.4924735", "text": "def shouldAutosave(self):\n return (\n bool(self.fileName) and\n not self.autosaveManuallyDisabled and\n not self.isReadOnly()\n )", "title": "" }, { "docid": "425f7e444126ae9326a6677883fd44a5", "score": "0.49237573", "text": "def force_attach(self) -> bool:\n return pulumi.get(self, \"force_attach\")", "title": "" }, { "docid": "3bbab50c3a1a5f3bcaa83dad885a6b9b", "score": "0.49216694", "text": "def uploadable_object(self, obj):\n\n # Exclude generated files.\n filename = os.path.basename(obj)\n if re.match(\"^.*.pitem$\", filename):\n return False\n\n # Exclude files that match patten defined in config. ie, \"*.pyc\"\n for pattern in self.options[\"ignore_patterns\"]:\n if fnmatch.fnmatch(filename, pattern):\n return False\n\n # Binary overrides match patten defined in config. ie, \"*.pyc\"\n for pattern in self.options[\"binary_overrides\"]:\n if fnmatch.fnmatch(filename, pattern):\n return True\n\n # Binary check\n object_path = os.path.abspath(obj)\n if utils.is_binary(object_path):\n return True\n return False", "title": "" }, { "docid": "c5c3c7cda4ae936573aac766e976e194", "score": "0.492109", "text": "def file_disabled(self):\n if not self.lines:\n return False\n return self.lines[0] == (1, 1)", "title": "" }, { "docid": "0badea7596d330da73af2164ae347373", "score": "0.49067765", "text": "def usb_file_system_notification_cb(self, notification_msg):\n self.get_logger().info(\"File system notification:\"\n f\" {notification_msg.path}\"\n f\" {notification_msg.file_name}\"\n f\" {notification_msg.node_name}\"\n f\" {notification_msg.callback_name}\")\n if notification_msg.file_name == software_update_config.UPDATE_SOURCE_DIRECTORY and \\\n notification_msg.callback_name == software_update_config.SCHEDULE_USB_UPDATE_SCAN_CB:\n self.schedule_usb_update_scan(path=notification_msg.path,\n name=notification_msg.file_name,\n node_name=notification_msg.node_name)", "title": "" }, { "docid": "0f045d04176d97659116e1f9d8a4c1f9", "score": "0.4900836", "text": "def test_send_notification_disabled(self) -> None:\n with local_no_notification_app.app_context():\n response = send_notification(\n notification_type=NotificationType.OWNER_ADDED,\n options={},\n recipients=['test@test.com'],\n sender='test2@test.com'\n )\n self.assertEqual(response.status_code, HTTPStatus.ACCEPTED)", "title": "" }, { "docid": "6390ccf126878228b6a30bb87aec956c", "score": "0.4898834", "text": "def is_file(self):\n return self._tag == 'file'", "title": "" }, { "docid": "66222f68fc32d4916ac2fe2131cadfca", "score": "0.4897518", "text": "def email_notify(self):\n if not self.request.cfg.send_email_notify:\n return\n\n email().send(\n from_adress = notifer_email_from_adress,\n to_adress = notifer_email_to_adress,\n subject = \"uploaded: '%s' from '%s'\" % (\n filename, client_info\n ),\n text = email_notify_text % {\n \"client_info\" : client_info,\n \"fileinfo\" : fileinfo,\n \"info\" : \"%s (Python v%s)\" % (\n __info__, sys.version\n ),\n }\n )\n\n self.request.write('<a href=\"?\">continue</a>')", "title": "" }, { "docid": "8361c69425bf011959eb0cd457f33b42", "score": "0.48972294", "text": "def can_register_for_parameter_notifications(self):\n return # boolean", "title": "" }, { "docid": "517422a3c514a7bd243fc097ba22b4c6", "score": "0.48950234", "text": "def is_dax_upload_running():\n if os.path.exists(DAX_UPLOAD_FLAGFILE):\n LOGGER.warn('Upload already running.')\n return True\n else:\n f_obj = open(DAX_UPLOAD_FLAGFILE, 'w')\n today = datetime.now()\n datestr = \"Date: %s%s%s_%s:%s:%s\" % (str(today.year),\n str(today.month),\n str(today.day),\n str(today.hour),\n str(today.minute),\n str(today.second))\n f_obj.write(datestr+'\\n')\n f_obj.close()\n LOGGER.debug('Flagfile created: %s with date: %s\\n' % (DAX_UPLOAD_FLAGFILE,\n datestr))\n return False", "title": "" }, { "docid": "ffb95d2b8ee1874cf53abd2dfaf18469", "score": "0.48910627", "text": "def is_waiting_for_notification(self):\n pass", "title": "" }, { "docid": "a11b3c340e2b552d713fd8126f254b3c", "score": "0.48895785", "text": "def setIgnoreFileTypes(self, ftypes):\n self._ignoreFileTypes = ftypes", "title": "" }, { "docid": "13e5feed314e9fccad8402cfa1b532b2", "score": "0.4885096", "text": "def _is_yal_file_(self,filename) -> bool:\n\n if filename in self.files:return True\n else:return False", "title": "" }, { "docid": "87563b5725b92f772ab16310fde2b340", "score": "0.48844552", "text": "def test_no_file_path(self):\n mock_settings_1 = {\n 'ALLOWED_EMAIL_ATTACHMENTS': ('application/java',)\n }\n with patch.dict('sifter.mailsifter.accessors.settings.MAILSIFTER',\n mock_settings_1):\n self.msg.attach(self.java)\n attachment = get_first_attachment(self.msg)\n with patch('sifter.mailsifter.attachments.settings',\n self.mock_settings):\n with LogCapture() as log_capture:\n actual = attachments.save_attachment(attachment)\n expected = None\n self.assertEqual(actual, expected)\n msg = 'The attachment %s is not an allowed file type' \\\n % self.java_file\n log_capture.check(\n ('sifter.mailsifter.attachments', 'WARNING', msg),\n )", "title": "" }, { "docid": "79261d7c7e5fd60e7ac860bdb7e10b20", "score": "0.48828015", "text": "def is_file_upload_abort(self, message: Message) -> bool:\n raise NotImplementedError()", "title": "" }, { "docid": "ec9c7866627bea47e64218cbdd130f81", "score": "0.48677158", "text": "def is_upload(action):\n return 'r' in action.type._mode and (action.default is None or\n getattr(action.default, 'name') not in (sys.stderr.name, sys.stdout.name))", "title": "" }, { "docid": "6082c6a684d1d1a3eda86a0e624a87d3", "score": "0.4864598", "text": "async def process_extra(self, filename: str) -> bool:\n return True", "title": "" }, { "docid": "18f60f1b1b47f884cc2a5b658bc46a66", "score": "0.48632145", "text": "def ensure_osf_files(settings):\n settings.COPY_GIT_REPOS = True\n if 'osffiles' not in settings.ADDONS_REQUESTED:\n settings.ADDONS_REQUESTED.append('osffiles')", "title": "" }, { "docid": "bb0bdaacac4fe6d2d8946f0b797e3b47", "score": "0.4860689", "text": "def large_file_share_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"large_file_share_enabled\")", "title": "" }, { "docid": "a4b490300b933199300cc7fbe546dff0", "score": "0.4857068", "text": "def send_email_notification(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"send_email_notification\")", "title": "" }, { "docid": "52c6531a0fdc566f1cb04af6c9e74836", "score": "0.48563263", "text": "def isFile(self, field, model):\n\n if field in getattr(self, 'files', \"\").split(\",\"):\n return True\n else:\n return False", "title": "" }, { "docid": "dbd8cb8b478ebc9317ddf902fc0ffa20", "score": "0.48558754", "text": "def status_message(self):\n if self.adapter.has_in_progress_uploads():\n return _(u'Uploading of file data to secure cloud storage for this'\n u' item is currently in progress')\n elif self.adapter.has_uploaded_any_fields():\n return _(u'File data for this item is being served from '\n u'secure cloud storage')\n else:\n return _(u'File data for this item can be uploaded to '\n u'secure cloud storage')", "title": "" }, { "docid": "075da76db3dbf3138bd940252b58a4b3", "score": "0.48492357", "text": "def enable_minimal_download(self):\n return self.properties.get(\"EnableMinimalDownload\", None)", "title": "" }, { "docid": "13a126f5c336193a08ce22187a41647d", "score": "0.48423657", "text": "def set_noti(self, update, context):\n\n chat_user = update.effective_user\n group_chat_id = update.effective_chat.id\n args = self._get_message_arguments(update.message.text)\n\n if not args:\n update.message.reply_text('Missing argument: true or false.')\n return\n\n if update.effective_chat.id < 0: # group\n group = TelegramGroup.objects.filter(chat_id=group_chat_id)\n if not group.exists():\n update.message.reply_text(\"This group has not been registered yet.\")\n\n user = TelegramUser.objects.filter(chat_id=chat_user['id'], role=TelegramUser.ROLE.ADMIN)\n if not user.exists():\n update.message.reply_text(f\"You don't have permission to perform this action.\")\n return\n \n if args[0].lower() == 'true':\n group.receive_notification = True\n group.save()\n elif args[0].lower() == 'false':\n group.receive_notification = False\n group.save()\n else:\n update.message.reply_text('Invalid syntax')\n return\n \n else: # user\n user = TelegramUser.objects.filter(chat_id=chat_user['id'], role=TelegramUser.ROLE.ADMIN)\n if not user.exists():\n update.message.reply_text(\"You don't have permission to perform this action.\")\n return\n user = user.first()\n \n if args[0].lower() == 'true':\n user.receive_notification = True\n user.save()\n elif args[0].lower() == 'false':\n user.receive_notification = False\n user.save()\n else:\n update.message.reply_text('Invalid syntax')\n return\n \n update.message.reply_text('OK')", "title": "" }, { "docid": "16820a3af703cc01c5a736f7ecdbd25d", "score": "0.48415658", "text": "def __accept(file: ModelFile) -> bool:\n return file.remote_size is not None and \\\n file.state == ModelFile.State.DEFAULT", "title": "" }, { "docid": "73ef6035b357710d81c264e77865254d", "score": "0.483944", "text": "def notify_useronly(self, notify_useronly: ConfigNodePropertyBoolean):\n\n self._notify_useronly = notify_useronly", "title": "" }, { "docid": "9997680c262c2dc193c3e72f782774e0", "score": "0.48329785", "text": "def multi_download_enabled(self) -> bool:\n return True", "title": "" }, { "docid": "cbdeb558cfad17a85bb92155f73779f2", "score": "0.48297146", "text": "def sniff(self, filename):\n return False", "title": "" }, { "docid": "cbdeb558cfad17a85bb92155f73779f2", "score": "0.48297146", "text": "def sniff(self, filename):\n return False", "title": "" }, { "docid": "8b1810823d7345ad04e8c4f60930d6bd", "score": "0.48293868", "text": "def no_config_send(self, **kwargs):\n self.vprint(1, \"Missing config file, not sending to server\")", "title": "" } ]
fc11d77f35850396f2e9f33e38fe0f03
Clean up dict after converting everything to dict
[ { "docid": "a1475a6c04b4671008aa165a5c686a8f", "score": "0.6964649", "text": "def scrub_dict(d):\n if type(d) is dict:\n return dict((k, scrub_dict(v)) for k, v in d.iteritems() if v and scrub_dict(v))\n else:\n return d", "title": "" } ]
[ { "docid": "031c08b6ca06896819c3fcdb44be9439", "score": "0.72440386", "text": "def clean_dictionary(d: dict) -> dict:\n for key in d:\n if hasattr(d[key], '__call__'):\n d.pop(key)\n return d", "title": "" }, { "docid": "38802b0e6b0715a331a543b5f946e829", "score": "0.71119124", "text": "def clean_dict(data: Dict[str, Any]) -> Dict[str, Any]:\n\n def clean(k, v):\n if SENSITIVE_KEYS.search(k):\n return \"x\" * 10\n elif isinstance(v, list):\n cleaned_list = []\n for item in v:\n if isinstance(item, dict):\n cleaned_list.append(clean_dict(item))\n else:\n cleaned_list.append(item)\n return cleaned_list\n elif isinstance(v, dict):\n return clean_dict(v)\n return v\n\n return {k: clean(k, v) for k, v in data.items()}", "title": "" }, { "docid": "153992d87fd7c7fad980c86823929226", "score": "0.70828474", "text": "def cleanDict(self, d):\n r = {}\n \n for key, value in d.items():\n key = key.lower()\n \n if key in self.key2field:\n key = self.key2field[key]\n \n if key in self.fields2strclean:\n value = self.clean2str(value)\n \n r[key] = value\n\n return r", "title": "" }, { "docid": "f633f8f65d60c363bfe18ad80b260873", "score": "0.7055537", "text": "def _cleanse_dict(original):\n return {k: v for k, v in six.iteritems(original) if \"_pass\" not in k}", "title": "" }, { "docid": "561bd52b4fdc04b353cde1be6a3856e5", "score": "0.69026816", "text": "def strip_dict(d):\n keys = ('_NameableValue__name', '_axes', '_args', 'valfun', 'dtype',\n 'scale', '_tensor', '_send_node')\n for key in keys:\n if key in d:\n del d[key]", "title": "" }, { "docid": "4613195de080c290b235c387a081fa5a", "score": "0.68525684", "text": "def _sanitize_dict(self, obj, trim_strings, ignored, seen):\n if isinstance(obj, FilterDict):\n obj = self.filter_string_values(obj)\n\n clean_dict = {}\n for key, value in obj.items():\n\n clean_value = self._sanitize(value, trim_strings, ignored, seen)\n\n self._sanitize_dict_key_value(clean_dict, key, clean_value)\n\n return clean_dict", "title": "" }, { "docid": "af334ef48f027048ec41768447df7f81", "score": "0.6760022", "text": "def clean(data_set):\n\tfor datum in data_set:\n\t\tfor (keyname, value) in datum.items():\n\t\t\tdatum.pop(keyname)\n\t\t\t\n\t\t\tif keyname in KEYNAME_CONVERTER_DICT:\n\t\t\t\tkeyname = KEYNAME_CONVERTER_DICT[keyname]\n\t\t\t\n\t\t\tif value in VALUE_CONVERTER_DICT:\n\t\t\t\tvalue = VALUE_CONVERTER_DICT[value]\n\t\t\tdatum[keyname] = value\n\t\t\n\t\tdatum = normalize_datetime(datum)\n\n\t\t# last thing is to remove the keynames don't need\n\t\tfor keyname in datum.keys():\n\t\t\tif not keyname in KEYNAME_VALUE_DICTIONARY:\n\t\t\t\tdatum.pop(keyname)\n\treturn data_set", "title": "" }, { "docid": "05d9e793a1ce306bf556add10f0db5ea", "score": "0.6735997", "text": "def _clean_up_dict(obj: Any) -> Any:\n if isinstance(obj, MutableMapping):\n return {key: _clean_up_dict(value) for key, value in obj.items() if key != \"_target_\"}\n elif isinstance(obj, Enum):\n return str(f\"{obj.name}\")\n elif OmegaConf.is_config(obj): # hydra stores lists as omegaconf.ListConfig, so we convert here\n return OmegaConf.to_container(obj, resolve=True, enum_to_str=True)\n return obj", "title": "" }, { "docid": "384c7332879ab256eaf8950633619bb9", "score": "0.67016435", "text": "def strip(d):\n for k, v in d.items():\n if isinstance(v, dict):\n strip(d[k])\n elif v == None:\n del d[k]", "title": "" }, { "docid": "a79dea0b054ad1fc8c9caec52c366fd4", "score": "0.663671", "text": "def unstrip(self, data: Dict[str, Any]) -> Dict[str, Dict[str, Any]]:\n result = {}\n for key, value in data.items():\n key_path = key.split(DELIM)\n ref, k = self.get_attribute(key_path)\n result[key] = {\"reference\": ref, \"key\": k, \"value\": value}\n return result", "title": "" }, { "docid": "2966bb1943986647f28d2976ac8f8c3d", "score": "0.66207266", "text": "def clean_record(self):\n _dict = {\n key: value for (key, value) in self.record.items() if not key in\n BAMBOO_RESERVED_KEYS\n }\n return remove_mongo_reserved_keys(_dict)", "title": "" }, { "docid": "52b67226c23302c83e53e11e0cb52149", "score": "0.65922636", "text": "def clean_dict(_dict, fields_to_clean=(None, [], {})):\n def _recursive_clean(doc, access):\n for k, v in doc.items():\n if isinstance(v, dict):\n if len(v) == 0:\n del(access[k])\n else:\n _recursive_clean(v, doc[k])\n elif v in fields_to_clean:\n del(access[k])\n _recursive_clean(_dict, _dict)", "title": "" }, { "docid": "9bd59ec4ad011540b8400603aa17459f", "score": "0.6590745", "text": "def clean_up(self):\n self._list = list(filter(lambda x: x is not None, self._list))\n self._dict = {element: key for key, element in enumerate(self._list)}", "title": "" }, { "docid": "ec1c559c8c31ef7010928794668b3e5c", "score": "0.65705", "text": "def clean_doc(doc):\n for key, value in list(doc.items()):\n if value is None:\n del doc[key]\n elif isinstance(value, dict):\n clean_doc(value)\n return doc", "title": "" }, { "docid": "6aba55dd4f15f3773ec9171c70a324c1", "score": "0.6438912", "text": "def restore(self):\n for name, value in self.old_dict.items():\n if value is NotInDict:\n try:\n del self.dict[name]\n except:\n pass\n else:\n self.dict[name] = value", "title": "" }, { "docid": "ebb35d7c35cd6b75fb699782187569a0", "score": "0.6411375", "text": "def pruner(data):\n new_data = {}\n for k, v in data.items():\n if isinstance(v, dict):\n v = pruner(v)\n if not v in (u\"\", None, {}):\n new_data[k] = v\n return new_data", "title": "" }, { "docid": "d0f7e7c3e53e558f1991216b782bcb6a", "score": "0.6401671", "text": "def _coreClean(attribs):\n attribs = copy.deepcopy(attribs)\n if 'Name' in attribs:\n del attribs['Name']\n if 'Description' in attribs:\n del attribs['Description']\n if 'Tags' in attribs:\n del attribs['Tags']\n if 'CreatedAt' in attribs:\n del attribs['CreatedAt']\n # Whether or not the hash should salt future hashes is up for debate\n #if 'UniqueId' in attribs:\n #del attribs['UniqueId']\n return attribs", "title": "" }, { "docid": "e36b9074a873fceef53af60353169ae0", "score": "0.64008045", "text": "def _transform(d):\n for k, v in d.iteritems():\n if isinstance(v, dict):\n d[k] = _transform(v)\n return AttrDict(d)", "title": "" }, { "docid": "fc15aadec109f29f3165b908fc1b9e02", "score": "0.6315103", "text": "def _clean_data(self, data):\n if isinstance(data, list):\n return [self._clean_data(d) for d in data]\n\n if isinstance(data, dict):\n SENSITIVE_FIELDS = {'api', 'token', 'key', 'secret', 'password', 'signature'}\n\n data = dict(data)\n if self.sensitive_fields:\n SENSITIVE_FIELDS = SENSITIVE_FIELDS | {field.lower() for field in self.sensitive_fields}\n\n for key, value in data.items():\n try:\n value = ast.literal_eval(value)\n except ValueError:\n pass\n if isinstance(value, list) or isinstance(value, dict):\n data[key] = self._clean_data(value)\n if key.lower() in SENSITIVE_FIELDS:\n data[key] = self.CLEANED_SUBSTITUTE\n return data", "title": "" }, { "docid": "170d12ca440d4ec485b75869e30a31d7", "score": "0.6304131", "text": "def remove_dict(self, data):\n log.debug(\"NOOP Delete: %s\" % \",\".join(data.keys()))", "title": "" }, { "docid": "d920b6f78ae531047d593331aa21d3e6", "score": "0.62970835", "text": "def _fix_dict(input_dict):\n output_dict = dict()\n\n for k in input_dict.keys():\n if isinstance(input_dict[k], dict):\n output_dict[_replace_builtin(k)] = _fix_dict(input_dict[k])\n elif isinstance(input_dict[k], list):\n output_dict[_replace_builtin(k)] = _fix_list(input_dict[k])\n elif isinstance(input_dict[k], tuple):\n output_dict[_replace_builtin(k)] = _fix_tuple(input_dict[k])\n else:\n output_dict[_replace_builtin(k)] = input_dict[k]\n\n return output_dict", "title": "" }, { "docid": "877c5900ec47165d2d060a44cf0b22ca", "score": "0.62840265", "text": "def minify_job_dict(post_dict):\n for strip_key in [\"_links\", \"retries\", \"localId\"]:\n if strip_key in post_dict:\n del post_dict[strip_key]\n return post_dict", "title": "" }, { "docid": "cef925721436d23d3d5f15f4a89b5b53", "score": "0.6263944", "text": "def cleandict(adict):\n\n newdict = {}\n for k, v in adict.items():\n newdict[np2py(k)] = np2py(v)\n return newdict", "title": "" }, { "docid": "f60a84b9f52f1bfad2e86f5a09079e31", "score": "0.6219388", "text": "def clean_config(config: Optional[dict]) -> dict:\n data = {}\n if isinstance(config, dict):\n for k, v in config.items():\n if v is not None:\n data[k] = v\n\n return data", "title": "" }, { "docid": "c208ef0f8fc58ee4de123a88620603bb", "score": "0.62070125", "text": "def clean(data):\n clean_data = {}\n for account, address in data.items():\n for name, info in address.items():\n if info:\n unit_data = []\n unit_name = f\"{account};{name}\"\n records = map(lambda x: x.split(), info)\n for record in records:\n record_data = {}\n start_date = datetime.datetime.strptime(record[0], \"%m/%d/%Y\").date()\n record_data[\"start_date\"] = start_date\n record_data[\"end_date\"] = start_date + timedelta(int(record[2]))\n record_data[\"_usage\"] = record[1]\n # This (below) will cause an error someday\n record_data[\"charge\"] = record[5][1:]\n record_data[\"avg_temp\"] = record[6]\n unit_data.append(record_data)\n clean_data[unit_name] = unit_data\n return clean_data", "title": "" }, { "docid": "89175c1e98bb6ff91a8dd0d1aa8276e4", "score": "0.6191192", "text": "def scrub_data(dic):\n\n scrubbed_dic = {}\n\n if 'Name' in dic:\n if dic['Name'] == '': return {}\n scrubbed_dic['set_num'] = dic['Set number']\n scrubbed_dic['set_name'] = dic['Name']\n if 'Age range' in dic:\n scrubbed_dic['get_age_range'] = bs_scrub_age_range(dic['Age range'])\n if 'Availability' in dic:\n scrubbed_dic['availability'] = dic['Availability']\n if 'LEGO item numbers' in dic:\n scrubbed_dic['lego_item_num'] = dic['LEGO item numbers']\n if 'Minifigs' in dic:\n scrubbed_dic['get_figures'] = syt.scrub_text2int(dic['Minifigs'])\n if 'Dimensions' in dic:\n scrubbed_dic['dimensions'], scrubbed_dic['volume'] = bs_scrub_dimensions(dic['Dimensions'])\n if 'Pieces' in dic:\n scrubbed_dic['pieces'] = syt.scrub_text2int(dic['Pieces'])\n if 'Price per piece' in dic:\n scrubbed_dic['price_per_piece'] = bs_scrub_price(dic['Price per piece'])\n if 'RRP' in dic:\n scrubbed_dic['original_price'] = bs_scrub_price(dic['RRP'])\n if \"Subtheme\" in dic:\n scrubbed_dic['subtheme'] = dic['Subtheme']\n if \"Theme\" in dic:\n scrubbed_dic['theme'] = dic['Theme']\n if 'Weight' in dic:\n scrubbed_dic['weight'] = bs_scrub_weight(dic['Weight'])\n if 'Year released' in dic:\n scrubbed_dic['year_released'] = bs_scrub_year(dic['Year released'])\n\n if 'United Kingdom' in dic:\n scrubbed_dic['available_uk'] = dic['United Kingdom']\n if 'United States' in dic:\n scrubbed_dic['available_us'] = dic['United States']\n if 'people own this set' in dic:\n scrubbed_dic['bs_own'] = syt.only_numerics_int(dic['people own this set'])\n if 'want this set' in dic:\n scrubbed_dic['bs_want'] = syt.only_numerics_int(dic['want this set'])\n if 'bs_score' in dic:\n scrubbed_dic['bs_score'] = dic['bs_score']\n\n return scrubbed_dic", "title": "" }, { "docid": "d80b8ed4109991f27d011f6e9fc27516", "score": "0.61590904", "text": "def remove_empty(d):\n return {k: v for k, v in d.items() if v}", "title": "" }, { "docid": "2517b0effef329dd3fdbfba4a9dd0235", "score": "0.6154414", "text": "def prepareuser_dict(user_dict: dict) -> dict:\n d = {}\n for k, v in user_dict.items():\n d[k] = makesafe(v)\n return d", "title": "" }, { "docid": "2f5454716aa9f0062e0dc04c997d3a37", "score": "0.6147722", "text": "def _remove_empty(entry):\n new_entry = copy.deepcopy(entry)\n for k, v in new_entry.items():\n if isinstance(v, dict):\n new_entry[k] = _remove_empty(v)\n\n emptykeys = [k for k, v in new_entry.items() if not v]\n for k in emptykeys:\n del new_entry[k]\n\n return new_entry", "title": "" }, { "docid": "b35139075757fc23363af4770721be0a", "score": "0.6144116", "text": "def _standardize_dct(cls, dct):\n result = copy.deepcopy(dct)\n result = cls._ensure_commands_have_template(result)\n return result", "title": "" }, { "docid": "39ac6467c4fab52d370abfa303cbd7fb", "score": "0.61410284", "text": "def post_process(cls, data):\n def remove_percent(k, v):\n if k == 'df_use_percent':\n v = v.rstrip('%')\n return (k, v)\n return dict([remove_percent(k, v) for k, v in data.iteritems()])", "title": "" }, { "docid": "903961c49c6318d89b5368274253ad8f", "score": "0.61317885", "text": "def clean_null_attrs(self, data: dict, **kwargs: dict) -> dict:\n data = deepcopy(data)\n for key in ExpectationConfigurationSchema.REMOVE_KEYS_IF_NONE:\n if key in data and data[key] is None:\n data.pop(key)\n return data", "title": "" }, { "docid": "3043f17c0adce8452c05eb55313e6aca", "score": "0.6124968", "text": "def split(d):\n g = {}\n for k, v in d.items():\n if isinstance(v, dict):\n t = d.pop(k)\n strip_dict(t)\n if t:\n g[k] = t\n elif v == None:\n del d[k]\n return d, g", "title": "" }, { "docid": "3def3726540d5b283cabade16e13a453", "score": "0.6101342", "text": "def remove_empty_items(obj: Dict) -> Dict:\n return {k: v for k, v in obj.items() if not (v is None or v == [] or v == {})}", "title": "" }, { "docid": "3c9275e618ebaf3a1678db38911b500b", "score": "0.6099305", "text": "def non_empty_values(d):\n clean = {}\n for k, v in d.items():\n if isinstance(v, dict):\n v = non_empty_values(v)\n if not is_empty(v):\n clean[k] = v\n return clean", "title": "" }, { "docid": "afa821d5ecdc4730dbfbf0ff14a8227e", "score": "0.60914445", "text": "def cleanup(inp: Dict[str, Any]) -> None:\n if \"inputBinding\" in inp:\n bindings = inp[\"inputBinding\"]\n for field in list(bindings.keys()):\n if field == \"loadContents\":\n inp[field] = bindings.pop(field)", "title": "" }, { "docid": "280549ff5d207ce1c0beba35dbe20420", "score": "0.60726154", "text": "def remove_empty_fields(json_nlp: OrderedDict) -> OrderedDict:\n\n cleaned = OrderedDict()\n for k, v in json_nlp.items():\n if v != '' and v != [] and v != {}:\n cleaned[k] = v\n if 'meta' in cleaned:\n cleaned['meta'] = remove_empty_fields(cleaned['meta'])\n if 'documents' in cleaned:\n for i in range(len(cleaned['documents'])):\n cleaned['documents'][i] = remove_empty_fields(cleaned['documents'][i])\n return cleaned", "title": "" }, { "docid": "9738d3acbb9266528ffd93349d85452a", "score": "0.6061647", "text": "def del_dicts(self):\n del(self._statistics_dict)\n del(self._error_dict)\n del(self._evaluation_dict)", "title": "" }, { "docid": "2c3b87d58733c1e9f4fae8c86e0bd249", "score": "0.60585576", "text": "def clean_payload(payload: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]:\n return FaustAvroSerializer._clean_item(payload)", "title": "" }, { "docid": "3b6e764af5bf85af95ae80dc03dd4573", "score": "0.6051213", "text": "def _after_from_dict(self) -> None:", "title": "" }, { "docid": "0ed27e1e500a96dc92d733bfb0c97b76", "score": "0.6048629", "text": "def clear_dict(cls):\n cls._ip_dict.clear()\n cls._gpio_dict.clear()\n cls._interrupt_controllers.clear()\n cls._interrupt_pins.clear()\n cls._hierarchy_dict.clear()", "title": "" }, { "docid": "e9c2337c99085cbfc87c5e05040034ed", "score": "0.6042033", "text": "def strip_none(_dict: Union[dict, None]) -> Union[dict, None]:\n if not isinstance(_dict, dict):\n return _dict\n\n return _dict if _dict is None else {k: v for k, v in _dict.items() if v is not None}", "title": "" }, { "docid": "e04a8876fef28bbb8466b4f7fc015f4b", "score": "0.60282856", "text": "def delete_none_values(dictionary: dict) -> dict:\n new_dict = {}\n for key, value in dictionary.items():\n if isinstance(value, dict):\n new_dict[key] = delete_none_values(value)\n elif value not in [[], {}, None]:\n new_dict[key] = value\n return new_dict", "title": "" }, { "docid": "9c1a0d613ad63f9faa9f3387f8df1cb2", "score": "0.60278744", "text": "def _dict_normalize(data):\n if isinstance(data, str):\n return str(data)\n elif isinstance(data, collections.Mapping):\n return dict(map(_dict_normalize, iter(data.items())))\n elif isinstance(data, collections.Iterable):\n return type(data)(map(_dict_normalize, data))\n else:\n return data", "title": "" }, { "docid": "640c98e668b386279a82669250ebd0ad", "score": "0.599666", "text": "def valid_config(data: Dict[str, Any]) -> Dict[str, Any]:\n for key in tuple(data):\n data[key.replace('-', '_')] = data.pop(key)\n return data", "title": "" }, { "docid": "d331501e309e1dd045227dcb2ade49f5", "score": "0.5993498", "text": "def unflex(self, lookup: Dict[str, str], kwargs: Dict[str, Any]) -> Dict[str, Any]:\n clean = {}\n for key in kwargs:\n lookup_key = self._lookup_key(key)\n if lookup_key not in lookup:\n if self.strip_extra:\n continue\n clean[key] = kwargs[key] # don't touch this one, let it explode later\n else:\n clean[lookup[lookup_key]] = kwargs[key]\n\n return clean", "title": "" }, { "docid": "29d9442fe9c2bef81824b7481c674f0d", "score": "0.5992928", "text": "def _clean_config_dict(self, config):\n for key in [\n \"estimator_list\",\n \"early_stopping\",\n \"X_id\",\n \"y_id\",\n \"groups\",\n \"cv\",\n \"fit_params\",\n \"scoring\",\n \"max_iters\",\n \"return_train_score\",\n \"n_jobs\",\n ]:\n config.pop(key, None)\n return {k: numpy_types_to_python(v) for k, v in config.items()}", "title": "" }, { "docid": "e12438e250296dc8c065cf2ed80794c4", "score": "0.59610236", "text": "def clear_empty_strings(data):\n for key in data:\n if data[key] == '':\n data[key] = ' '\n if isinstance(data[key], float):\n data[key] = int(data[key])\n elif isinstance(data, dict):\n if data[key] == '':\n data[key] = ' '\n if isinstance(data[key], float):\n data[key] = int(data[key])\n if isinstance(data[key], (dict, list)):\n data[key] = clear_empty_strings(data[key])\n return data", "title": "" }, { "docid": "8dc8366aee824d5634502e5b3d18898c", "score": "0.5933033", "text": "def _trim_model_state_dict(state_dict):\n\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n return new_state_dict", "title": "" }, { "docid": "8dc8366aee824d5634502e5b3d18898c", "score": "0.5933033", "text": "def _trim_model_state_dict(state_dict):\n\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n return new_state_dict", "title": "" }, { "docid": "c0d8440d221019b616e1e8405b3b647e", "score": "0.59312075", "text": "def normalize(record):\n del record['_id']\n del record['simulation_id']\n # Keep a copy of this dict, in case it has a key that would otherwise override it\n resources = record['resources']\n for key, value in resources.items():\n key = re.sub('[^\\w]+', '', key)\n record[key] = value\n del record['resources']\n return record", "title": "" }, { "docid": "37fdbbb3ce2a3dc39c284e2771cda551", "score": "0.59267485", "text": "def clean_recursive(obj):\n if not isinstance(obj, dict):\n return\n for k, v in obj.items():\n if isinstance(v, dict):\n clean_recursive(v)\n elif isinstance(v, list):\n for i in v:\n clean_recursive(i)\n elif not jsonable(v):\n obj[k] = f'{v}'", "title": "" }, { "docid": "40a7e0e7286a0c936ee497ec7a326f7c", "score": "0.59256476", "text": "def __cleanState__(self, stateDict):\n for k in list(stateDict.keys()):\n if k.startswith('_'):\n stateDict.pop(k)\n return stateDict", "title": "" }, { "docid": "40a7e0e7286a0c936ee497ec7a326f7c", "score": "0.59256476", "text": "def __cleanState__(self, stateDict):\n for k in list(stateDict.keys()):\n if k.startswith('_'):\n stateDict.pop(k)\n return stateDict", "title": "" }, { "docid": "01a87d46ac6d5d55dcc7fafe21d8613b", "score": "0.5925139", "text": "def _break_down_json_data(self, json_data):\r\n # type: (str) -> dict\r\n temp_dict = {}\r\n self._reverse_json(json_data, temp_dict)\r\n return temp_dict", "title": "" }, { "docid": "47666afa2fefc4b3d396a8de332aeefd", "score": "0.5921576", "text": "def toDict(self):\n\n dct = copy.deepcopy(vars(self))\n removeList = []\n for key, value in dct.items():\n if (value is None) or (value == []):\n removeList.append(key)\n elif (isinstance(value, SkeleYaml)):\n dct[key] = value.toDict()\n elif (isinstance(value, list)):\n dctList = []\n for element in value:\n dctList.append(element.toDict() if isinstance(element, SkeleYaml) else element)\n dct[key] = dctList\n elif (isinstance(value, dict)):\n dctDict = {}\n for item_key, item_value in value.items():\n dctDict[item_key] = item_value.toDict() if (isinstance(item_value, SkeleYaml)) else item_value\n dct[key] = dctDict\n\n for key in removeList:\n del dct[key]\n\n return dct", "title": "" }, { "docid": "bb7f0c11662049ab9fc923329d761a19", "score": "0.5920316", "text": "def _from_dict_transform(cls: Type[PhoneNumber], data: Dict[str, Any]) -> Dict[str, Any]:\n data = super()._from_dict_transform(data)\n\n if 'added_timestamp' in data:\n data['created_ts'] = data.pop('added_timestamp')\n\n if 'mobile' in data:\n data['number'] = data.pop('mobile')\n\n if 'csrf' in data:\n del data['csrf']\n\n return data", "title": "" }, { "docid": "f3fc269b5875843bb9a13cf19ec0fa2b", "score": "0.5918261", "text": "def sanitize_json(final_output: FinalOutput) -> Dict:\n final_output_copy = deepcopy(final_output)\n for output in final_output_copy.validation_outputs:\n if output.message is not None:\n output.message = output.message.replace(\"\\n\", \"\")\n return asdict(final_output_copy)", "title": "" }, { "docid": "54a0ba3f76a26517ceec93ad78f6bab7", "score": "0.59081584", "text": "def _fixup(d):\n return (\n {(k[1:] if k.startswith(\"_\") else k): v for k, v in d.items()}\n if d\n else None\n )", "title": "" }, { "docid": "80bc7d796b5019a2539a2e77a2f6fa53", "score": "0.5907935", "text": "def _clean_interm_struct(self, input_dict):\n keys_to_remove = ['unique_id_re', 'tasks_slugs', 'ext_attr', 'flags_attr_id',\n 'resources', 'mtime']\n\n # remove schedule attrs\n for key in keys_to_remove:\n if key in input_dict:\n input_dict.pop(key)\n\n # Schedule attrs\n if isinstance(input_dict['dStart'], datetime.datetime):\n input_dict['dStart'] = input_dict['dStart'].replace(**self.replace_time_opts)\n input_dict['dFinish'] = input_dict['dFinish'].replace(**self.replace_time_opts)\n else:\n input_dict['dStart'] = datetime.datetime.strptime(input_dict['dStart'],\n self.datetime_fmt)\n input_dict['dFinish'] = datetime.datetime.strptime(input_dict['dFinish'],\n self.datetime_fmt)\n\n # Task(s) attrs\n for task in input_dict['tasks']:\n self._clear_task_time(task)", "title": "" }, { "docid": "67b8a2a222f548f0002c897bb3ede2e8", "score": "0.5902204", "text": "def clear(self):\n super(Dict, self).clear()\n return self", "title": "" }, { "docid": "c3f077efa3ee52985f9aeaaba6a88f12", "score": "0.5892609", "text": "def dict_strip_unicode_keys(uni_dict):\r\n data = {}\r\n \r\n for key, value in uni_dict.items():\r\n data[str(key)] = value\r\n \r\n return data", "title": "" }, { "docid": "9be9eac7d0bbf553510691d45d3b8b37", "score": "0.588819", "text": "def clean_up_datetime(obj_map):\n clean_map = {}\n for key, value in obj_map.items():\n if isinstance(value, datetime.datetime):\n clean_map[key] = {\n 'year': value.year,\n 'month': value.month,\n 'day': value.day,\n 'hour': value.hour,\n 'minute': value.minute,\n 'second': value.second,\n 'microsecond': value.microsecond,\n 'tzinfo': value.tzinfo\n }\n elif isinstance(value, dict):\n clean_map[key] = clean_up_datetime(value)\n elif isinstance(value, list):\n if key not in clean_map:\n clean_map[key] = []\n if len(value) > 0:\n for index, list_value in enumerate(value):\n if isinstance(list_value, dict):\n clean_map[key].append(clean_up_datetime(list_value))\n else:\n clean_map[key].append(list_value)\n else:\n clean_map[key] = value\n else:\n clean_map[key] = value\n return clean_map", "title": "" }, { "docid": "8742cc173ca7bb8cf14434c1473e4056", "score": "0.58866227", "text": "def simplify(item):\n return dict((k, v) for k, v in item.items() if nonempty(v))", "title": "" }, { "docid": "06b3ce58e3d9fc7875f3103276c87a83", "score": "0.58787936", "text": "def mangle_dictionary(a_dict, curr_dicts, key_to_delete=None):\n curr_dict = a_dict.copy()\n if key_to_delete is not None:\n curr_dict.pop(key_to_delete)\n else:\n curr_dict[\"disallowed_key\"] = \"bogus_value\"\n curr_parent_key, _ = curr_dicts.popitem(True)\n\n q = len(curr_dicts.keys())\n while q > 0:\n next_parent_key, next_parent_dict = curr_dicts.popitem(True)\n next_parent_dict[curr_parent_key] = copy.deepcopy(curr_dict)\n curr_dict = copy.deepcopy(next_parent_dict)\n curr_parent_key = next_parent_key\n q = q - 1\n\n return curr_dict", "title": "" }, { "docid": "5ba44032e11b876f371216a1fdf5d2c6", "score": "0.58744967", "text": "def removereadonlyprops(self, currdict, emptyraise=False, \\\r\n removeunique=True, specify_props=None):\r\n try:\r\n type_str = self.current_client.monolith._typestring\r\n currtype = currdict.get(type_str, None)\r\n oridict = copy.deepcopy(currdict)\r\n if specify_props:\r\n templist = specify_props\r\n else:\r\n templist = [\"Modified\", \"Type\", \"Description\", \"Status\",\\\r\n \"links\", \"SettingsResult\", \"Attributes\", \\\r\n \"@odata.context\", \"@odata.type\", \"@odata.id\",\\\r\n \"@odata.etag\", \"Links\", \"Actions\", \\\r\n \"AvailableActions\", \"BiosVersion\"]\r\n #Attributes removed and readded later as a validation workaround\r\n currdict = self.iterateandclear(currdict, templist)\r\n iloversion = self.getiloversion()\r\n if not iloversion:\r\n return currdict\r\n _ = self.get_validation_manager(iloversion)\r\n self.validationmanager.validatedict(currdict, currtype=currtype, \\\r\n monolith=self.monolith, unique=removeunique, searchtype=None)\r\n if oridict.get(\"Attributes\", None):\r\n currdict[\"Attributes\"] = oridict[\"Attributes\"]\r\n return currdict\r\n except:\r\n if emptyraise is True:\r\n raise EmptyRaiseForEAFP()\r\n elif emptyraise == 'pass':\r\n pass\r\n else:\r\n raise", "title": "" }, { "docid": "acb77e1d1e27a93fd4fc51e348a92eb8", "score": "0.58584136", "text": "def strip_keys_with_value_none(d: Dict[str, Any]) -> Dict[str, Any]:\n return {k: v for k, v in d.items() if v is not None}", "title": "" }, { "docid": "393fb9400d5998a6d497ace296dc1aea", "score": "0.5846509", "text": "def _normalize_dynamo_response(response):\n normalized_dict = {}\n for item in response:\n line = item.get('line')\n del item['line']\n normalized_dict[line] = item\n return normalized_dict", "title": "" }, { "docid": "c0938e21973293dd64a83baaca638b50", "score": "0.5814996", "text": "def clean_up(self):\n self.state = {}", "title": "" }, { "docid": "2144ff8e329951134d20b3f7de348ebb", "score": "0.58128834", "text": "def clean_log(log_d: dict, key_blacklist: Tuple[str, str] = ('image', 'result_ix')) -> dict:\n cleaned = {}\n for k, val in log_d.items():\n if k in key_blacklist:\n continue\n clean_v = clean_log(val) if isinstance(val, dict) else clean_value(val)\n cleaned[clean_key(k)] = clean_v\n return cleaned", "title": "" }, { "docid": "0008e06f9b8ce1ff90e872400729e3fc", "score": "0.58124864", "text": "def normalize(data):\n reps = {\n \"name\": \"resourceName\",\n \"uuid\": \"resourceUUID\",\n }\n return {reps.get(k, k): v for k, v in data.items()}", "title": "" }, { "docid": "5bbec6d03f21cb1034fc97267ff4b5df", "score": "0.5801275", "text": "def _prepare_payload(\n payload: dict[str, Any],\n exclude: Collection[str] | None = None,\n ) -> dict[str, Any]:\n payload = payload.copy()\n exclude = exclude or []\n kwargs = payload.pop(\"kwargs\", None)\n if kwargs:\n payload.update(kwargs)\n\n return {\n camel_case(k): _convert_value(v)\n for k, v in payload.items()\n if k not in {\"self\", \"cls\", *exclude} and v is not None\n }", "title": "" }, { "docid": "fefda3f03372c26c6bc94bfdfa41aaac", "score": "0.5801156", "text": "def clean_dict(dictionary):\n from datetime import datetime \n\n newd = {}\n newd.update(dictionary)\n\n # convert num_liked to an int \n newd['num_liked'] = int(dictionary['num_liked'])\n\n # convert datetime to MM-DD-YYYY format\n format_string = \"%a %b %d %H:%M:%S PDT %Y\"\n datetime_obj = datetime.strptime(dictionary['date'], format_string)\n newd['date'] = datetime_obj.strftime(\"%m-%d-%Y\")\n \n return newd", "title": "" }, { "docid": "c7c3a2ff6c90f5922b3626c35fd7c80e", "score": "0.58001924", "text": "def sanitize_data(fields: Iterable[str], data: Dict) -> Dict:\n tmp = {}\n if 'token' in data:\n del data['token']\n\n for column in fields:\n if column in data:\n tmp[column] = data[column]\n return tmp", "title": "" }, { "docid": "3f572cd70453568f40a8da8d7482a11f", "score": "0.5789297", "text": "def _asdict(self): # reliably restored by inspect\n pass", "title": "" }, { "docid": "3f572cd70453568f40a8da8d7482a11f", "score": "0.5789297", "text": "def _asdict(self): # reliably restored by inspect\n pass", "title": "" }, { "docid": "3f572cd70453568f40a8da8d7482a11f", "score": "0.5789297", "text": "def _asdict(self): # reliably restored by inspect\n pass", "title": "" }, { "docid": "cd7ba3a2fcca7ef04c48f99860e94620", "score": "0.5787689", "text": "def _normalize(d):\n newd = {}\n if not isinstance(d, dict):\n return d\n # if dictionary. iterate over each element and append to newd\n for k, v in d.iteritems():\n if isinstance(v, dict):\n first_key = next(iter(v.viewkeys()))\n if isinstance(first_key, int):\n temp_new = []\n for k1, v1 in v.items():\n temp_new.append(_normalize(v1))\n newd[k] = temp_new\n elif first_key == '':\n newd[k] = v.values()[0]\n else:\n newd[k] = _normalize(v)\n else:\n newd[k] = v\n return newd", "title": "" }, { "docid": "e958194b89ae00933ddd4793f2c74c29", "score": "0.57820725", "text": "def filter_dict(dict_obj):\n ignore_keys = ['_id', 'currentId']\n new_dict = {key: dict_obj[key] for key in dict_obj if key not in ignore_keys}\n return new_dict", "title": "" }, { "docid": "f0ff7286e0fb9a47a82bfd632565ea7b", "score": "0.5777674", "text": "def __clean_entry(entry):\n for key, value in entry.items():\n if not value:\n entry[key] = None\n else:\n if isinstance(value, str):\n value = value.strip()\n if key == 'price' and not isinstance(value, float):\n entry[key] = float(value)\n if key == 'in_stock' and not isinstance(value, bool):\n if value.lower() in ('n', 'no'):\n entry[key] = False\n elif value.lower() in ('y', 'yes'):\n entry[key] = True\n else:\n entry[key] = None\n return entry", "title": "" }, { "docid": "f3634d56c23b2d9f6298dfd13078d12e", "score": "0.5761571", "text": "def clean_dict_gen(universe_dict, verbose=True):\n cleaned_dict = {}\n if verbose:\n print(\"Included Instrument:\")\n\n for df_name in universe_dict:\n if verbose:\n print(df_name)\n\n cleaned_dict[df_name] = clean_data(universe_dict[df_name])\n\n return cleaned_dict", "title": "" }, { "docid": "52fe9e968e07a1972d1930fc58dfd798", "score": "0.5759035", "text": "def _to_dict_no_nest(self, *, exclude: Container[str] = ()) -> dict:\n out = recursive_to_dict(self, exclude=exclude, members=True)\n # Remove all Nones and empty containers\n return {k: v for k, v in out.items() if v}", "title": "" }, { "docid": "5533e071e8d9cc9680febc2c9c871129", "score": "0.5753526", "text": "def remove_null_values(obj):\n if not isinstance(obj, dict):\n return obj\n\n keys = obj.keys()\n for k in keys:\n _obj = obj[k]\n if _obj is None:\n del obj[k]\n elif isinstance(obj[k], dict):\n remove_null_values(obj[k])\n\n return obj", "title": "" }, { "docid": "cb2b5a5531499d71510c6de97c0160e3", "score": "0.5743841", "text": "def cleanup(self):\n\t\tfor dict in (self.rule2func, self.rules, self.rule2name, self.first):\n\t\t\tfor i in dict.keys():\n\t\t\t\tdict[i] = None\n\t\tfor i in dir(self):\n\t\t\tsetattr(self, i, None)", "title": "" }, { "docid": "ee23f85890360e9b10baffc7c287aeda", "score": "0.5741235", "text": "def trim_state_dict(complete_dict, trim_key):\n trimmed = {\n k.replace(trim_key+'.', ''):complete_dict[k] \n for k in complete_dict if k.startswith(trim_key)\n } \n \n # The resulting dictionary can be used to load a part of a model. \n return trimmed", "title": "" }, { "docid": "9cc24f5afdc768e4c8322216814dafd0", "score": "0.5740704", "text": "def break_data_into_smalled_dict(data, parsing_dict):\r\n while type(data) != dict or type(list(data.values())[0]) not in ACCEPTABLE_TYPES:\r\n if type(data) == str and data in parsing_dict and type(parsing_dict[data]) in ACCEPTABLE_TYPES:\r\n data = {data: parsing_dict[data]}\r\n elif type(data) == str and data in parsing_dict and type(parsing_dict[data]) == list:\r\n data = parsing_dict[data][0]\r\n elif type(data) == str and data in parsing_dict and type(parsing_dict[data]) == dict and len(parsing_dict[data]) != 0:\r\n data = parsing_dict[data]\r\n elif type(data) == str and data in parsing_dict and type(parsing_dict[data]) == dict and len(parsing_dict[data]) == 0:\r\n data = {'3h': 0.0}\r\n else:\r\n raise ValueError('Unable to break data into smallest_dict ' +\r\n 'data: ' + str(data))\r\n\r\n return data", "title": "" }, { "docid": "ac1a387b1c394157bad46bd16f7dbbd2", "score": "0.5737467", "text": "def cleanup_key(self, key, value):\n pass", "title": "" }, { "docid": "f305e7677c0920b12432e19a0e0cd593", "score": "0.57356894", "text": "def clean_data(json_data):\n\n return json_data", "title": "" }, { "docid": "4c148e7894e46f56ac7109f8cba15130", "score": "0.5722453", "text": "def dict_strip_unicode_keys(uni_dict):\n if six.PY3:\n return uni_dict\n\n return dict([(smart_bytes(key), value,) for key, value in uni_dict.items()])", "title": "" }, { "docid": "d1f9ca66578b7f8f22ec8118d30c14d8", "score": "0.57197326", "text": "def remove_sensitive_params(data):\n for key, value in data.items():\n if any(x in key for x in SENSITIVE_PARAMS):\n data[key] = None \\\n if isinstance(value, dict) \\\n else SENSITIVE_PARAM_PLACEHOLDER\n elif isinstance(value, dict):\n remove_sensitive_params(value)\n\n return data", "title": "" }, { "docid": "8e2ff5b035529394e557832005d9b718", "score": "0.57107073", "text": "def clean_dict(attrs):\n import numpy as np\n for attr, item in attrs.items():\n if isinstance(attr, (six.binary_type, six.text_type)):\n attr = str(attr)\n if isinstance(item, list):\n clean_item = []\n for a in item:\n if isinstance(a, (six.binary_type, six.text_type)):\n clean_item.append(str(a))\n else:\n clean_item.append(a)\n attrs[attr] = clean_item\n elif isinstance(item, (six.binary_type, six.text_type)):\n attrs[attr] = str(item)\n elif isinstance(item, np.ndarray):\n attrs[attr] = [a for a in item]\n else:\n attrs[attr] = item\n\n return attrs", "title": "" }, { "docid": "761c3051de2c616bd37b021f13b0929c", "score": "0.57083625", "text": "def _clean_args(arg_dict):\n clean_args = {}\n for k, v in arg_dict.iteritems():\n if k not in IGNORED_ARGS:\n clean_args[k] = v\n return clean_args", "title": "" }, { "docid": "7d9eb07eedd34835a2ca8c618718a525", "score": "0.56941676", "text": "def _asdict(self):\n temp_dict = self.actrchunk._asdict()\n dictionary = {re.sub(\"_$\", \"\", key): temp_dict[key] for key in temp_dict}\n return dictionary", "title": "" }, { "docid": "99f3a483a83943d4bfccaccac78a7cc3", "score": "0.569106", "text": "def cleanup_verbaliser():\n verbaliser.qa = {}\n verbaliser.phrases = {}", "title": "" }, { "docid": "000c915bc65dbf1a1dfb627c617316db", "score": "0.5682849", "text": "def iterateandclear(self, dictbody, proplist):\r\n if isinstance(dictbody, dict):\r\n _ = [dictbody.pop(key) for key in proplist if key in dictbody]\r\n for key in dictbody:\r\n dictbody[key] = self.iterateandclear(dictbody[key], proplist)\r\n if isinstance(dictbody, list):\r\n for ind, val in enumerate(dictbody):\r\n dictbody[ind] = self.iterateandclear(val, proplist)\r\n return dictbody", "title": "" }, { "docid": "3dd5204d0e3b3ef7e6d9daa4087d6101", "score": "0.56798685", "text": "def filter_out_none_valued_keys(self, d):\n # type: (typing.Dict[K, V]) -> typing.Dict[K, V]\n new_d = {}\n for k, v in d.items():\n if v is not None:\n new_d[k] = v\n return new_d", "title": "" }, { "docid": "a03fedf15c78f8138c0e7d5c20d3b06c", "score": "0.5654078", "text": "def clean_kwargs(kwargs):\n\n return {k: v for k, v in kwargs.iteritems() if not k.startswith(\"__\")}", "title": "" }, { "docid": "2d23451d3d3acae6d7dabdf3dc1c9417", "score": "0.56375706", "text": "def clean_report(report):\n return {\n \"reportID\": report.get('reportID'),\n \"reportname\": report.get('reportname'),\n \"createtime\": report.get('createtime'),\n \"creator\": report.get('creator'),\n \"start_datetime\": report.get('start_datetime'),\n \"end_datetime\": report.get('end_datetime'),\n \"selectedJobs\": report.get('selectedJobs'),\n \"filter_by\" : report.get('filter_by'),\n 'allWords': report.get('allWords'),\n 'anyWords': report.get('anyWords'),\n 'noneWords': report.get('noneWords'),\n 'data': report.get('data')\n }", "title": "" }, { "docid": "dc9006672cdce00becc29337c320ccb8", "score": "0.56286037", "text": "def restore_empty_strings(data):\n for i, key in enumerate(data):\n if isinstance(data, list):\n if key == \" \":\n data[i] = \"\"\n if isinstance(key, Decimal):\n data[i] = int(data[i])\n if isinstance(key, (dict, list)):\n data[i] = restore_empty_strings(data[i])\n elif isinstance(data, dict):\n if data[key] == ' ':\n data[key] = ''\n if isinstance(data[key], Decimal):\n data[key] = int(data[key])\n if isinstance(data[key], (dict, list)):\n data[key] = restore_empty_strings(data[key])\n return data", "title": "" }, { "docid": "4a5ca4de5deb9b0196eeb56c6e42eb54", "score": "0.5614425", "text": "def update_dict(self, _dict):\n check = {}\n flat_headers = self.flat_headers()\n for header in flat_headers:\n try:\n check[header] = _dict[header]\n except KeyError:\n check[header] = None\n return check", "title": "" } ]
09886154c61e5e66c4e93fe553650ee5
Average a list of datapoints. A list with no nonNone items has an average of zero.
[ { "docid": "c6931123be8acf7dc94fed2857ccc05c", "score": "0.6805623", "text": "def avg(dpList):\n if not dpList:\n return 0.0\n\n dpList = [x for x in dpList if x is not None]\n if not dpList:\n return 0.0\n\n return sum(dpList) / len(dpList)", "title": "" } ]
[ { "docid": "98ec3fc8563d74ec906d33c57f0247cd", "score": "0.76778173", "text": "def mean(lst):\n lst = [elem for elem in lst if elem is not None]\n return np.mean(np.array(lst)) if len(lst) > 0 else 0", "title": "" }, { "docid": "4fb533ab645d793c19574b8c589dd3a4", "score": "0.7548929", "text": "def avg(l: List[float]):\n if len(l) == 0:\n return None\n else:\n return sum(l)/len(l)", "title": "" }, { "docid": "6a7481b159fa8cf3b8b36d674ca954fc", "score": "0.73499876", "text": "def average(values):\n return sum(values, 0.0) / len(values) if values else None", "title": "" }, { "docid": "55e6dafc41e2f29de18e51c6f65d44d6", "score": "0.7311388", "text": "def average(values: List[float]) -> float:\n count = 0 # The number of values seen so far.\n total = 0 # The sum of the values seen so far.\n for value in values:\n if value is not None:\n total += value\n\n count += 1\n\n return total / count", "title": "" }, { "docid": "9b876e5c3c0478dd0e85f82ebc539119", "score": "0.7225258", "text": "def avg(xs):\n assert len(xs) > 0\n return sum(xs) / len(xs)", "title": "" }, { "docid": "f64975ca1c38ae157fbe426e38e9ae9d", "score": "0.7205635", "text": "def get_average_of_list(inputs, weights=[]):\n ave = 0\n for val in inputs:\n ave += val\n if len(inputs) == 0:\n return None\n return ave/len(inputs)", "title": "" }, { "docid": "1809ec1a63d9359e56277415cb15b91d", "score": "0.7067797", "text": "def average(list):\n if len(list) == 0:\n return 0\n else:\n return sum(list) / len(list)", "title": "" }, { "docid": "fa85bf8209bb32f06a468c498dd6f110", "score": "0.7066334", "text": "def aggregate_avg(values):\n length = len(values)\n if length is 0:\n return None\n length_iter = range(length)\n s = 0\n nones = 0\n for i in length_iter:\n if values[i] is None:\n length -= 1\n nones += 1\n if nones > length:\n return None\n else:\n s += values[i]\n agg = float(s) / length\n return agg", "title": "" }, { "docid": "baba2997cd0a1fc9f91a7c3c0b4f4ff6", "score": "0.7054685", "text": "def avg(data):\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n return sum(data)/float(n)", "title": "" }, { "docid": "ba6f073f3ee015d906e6399418ee9c92", "score": "0.7039059", "text": "def mean(l, ignore_nan=False, empty=0):\r\n l = iter(l)\r\n if ignore_nan:\r\n l = filterfalse(np.isnan, l)\r\n try:\r\n n = 1\r\n acc = next(l)\r\n except StopIteration:\r\n if empty == 'raise':\r\n raise ValueError('Empty mean')\r\n return empty\r\n for n, v in enumerate(l, 2):\r\n acc += v\r\n if n == 1:\r\n return acc\r\n return acc / n", "title": "" }, { "docid": "5f0d65e9952c5b91c3518efbce774717", "score": "0.6915473", "text": "def mean(lst):\n length = len(lst)\n return sum(lst)/float(length) if length != 0 else None", "title": "" }, { "docid": "61caf14fd5ce321bbd8f2d7871502f89", "score": "0.6896318", "text": "def _mean_or_nan(xs: Sequence[float]) -> float:\n return typing.cast(float, np.mean(xs)) if xs else np.nan", "title": "" }, { "docid": "343ab1bc11c4c41163005d2fd1f8efb3", "score": "0.68865097", "text": "def average(values):\r\n return sum(values) / len(values) if values else 0", "title": "" }, { "docid": "ab943db4f267f4adf75a9203ccdbc029", "score": "0.68466467", "text": "def mean(l):\n if len(l) == 0:\n return 0\n \n return float(sum(l)) / len(l)", "title": "" }, { "docid": "23a0d6ad40aa87886e3c2a58f4b812ca", "score": "0.67972934", "text": "def mean(data: tuple or list)->float:\n checker(data)\n return sum(data)/len(data)", "title": "" }, { "docid": "bd53c1cfc19a31792046bb3673fbffed", "score": "0.67897373", "text": "def average(data):\n return sum(data) / len(data)", "title": "" }, { "docid": "c5aece1f5a47cbed28cea615a8a213a8", "score": "0.6744001", "text": "def mean(values: Iterable[float]) -> float:\n return sum(values) / len(values)", "title": "" }, { "docid": "404dcdfcd72d9fa65d99f4b41526402e", "score": "0.67413205", "text": "def mean(data):\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n return sum(data)/float(n) # in Python 2 use sum(data)/float(n)", "title": "" }, { "docid": "404dcdfcd72d9fa65d99f4b41526402e", "score": "0.67413205", "text": "def mean(data):\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n return sum(data)/float(n) # in Python 2 use sum(data)/float(n)", "title": "" }, { "docid": "5a11efbaeaa595dc25a29bd3e67e00b3", "score": "0.6738899", "text": "def mean(data):\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n #return sum(data)/n # in Python 2 use sum(data)/float(n)\n return sum(data)/float(n)", "title": "" }, { "docid": "de0d1e030b5f94077b678ee7aa9af9c6", "score": "0.6727487", "text": "def average(list_):\n return sum(list_) / float(len(list_))", "title": "" }, { "docid": "b1ba2246dd0452f7c0a45e982899ed53", "score": "0.67231387", "text": "def _mean_or_nan(xs):\n return np.mean(xs) if xs else np.nan", "title": "" }, { "docid": "e1f3de0321a248153f3cb4e6571e2a75", "score": "0.6691033", "text": "def mean(data):\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n return sum(data)/float(n)", "title": "" }, { "docid": "8e0b9e7213d98006e73f07fa7466f029", "score": "0.6681495", "text": "def compute_average(data):\n sample_size = data.size\n return np.sum(data, axis=0) / len(data) if sample_size != 0 else None", "title": "" }, { "docid": "e5d064ad2c36a38473c5ae7eb29825f6", "score": "0.6678026", "text": "def makeAvgArray(lst):\n arraySize = len(lst[0][0])\n allData = np.array(lst[0][0])\n for i in range(1,len(lst)):\n if arraySize != len(lst[i][0]):\n print(\"Problem with array sizes\")\n else:\n allData = np.dstack((allData,lst[i][0]))\n \n avgData = allData.mean(axis=2,dtype=np.float64)\n return avgData", "title": "" }, { "docid": "1fd7770cbf6790b0cfafd8996ab07d7f", "score": "0.6676062", "text": "def mean(data):\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n return sum(data)/n # in Python 2 use sum(data)/float(n)", "title": "" }, { "docid": "1fd7770cbf6790b0cfafd8996ab07d7f", "score": "0.6676062", "text": "def mean(data):\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n return sum(data)/n # in Python 2 use sum(data)/float(n)", "title": "" }, { "docid": "1fd7770cbf6790b0cfafd8996ab07d7f", "score": "0.6676062", "text": "def mean(data):\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n return sum(data)/n # in Python 2 use sum(data)/float(n)", "title": "" }, { "docid": "1fd7770cbf6790b0cfafd8996ab07d7f", "score": "0.6676062", "text": "def mean(data):\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n return sum(data)/n # in Python 2 use sum(data)/float(n)", "title": "" }, { "docid": "3b771d4bd200d4e885f1582b99ba98e8", "score": "0.66725695", "text": "def mean(data):\r\n n = len(data)\r\n if n < 1:\r\n raise ValueError('mean requires at least one data point')\r\n return sum(data)/n # in Python 2 use sum(data)/float(n)\r", "title": "" }, { "docid": "4495ea460f357438011a8876cbd9658a", "score": "0.66668016", "text": "def mean(ll, ignore_nan=False, empty=0):\n l = iter(ll)\n if ignore_nan:\n l = ifilterfalse(isnan, l)\n try:\n n = 1\n acc = next(l)\n except StopIteration:\n if empty == 'raise':\n raise ValueError('Empty mean')\n return empty\n for n, v in enumerate(l, 2):\n acc += v\n if n == 1:\n return acc\n return acc / n", "title": "" }, { "docid": "d626ce36ca5b2410fbca5d34e001716b", "score": "0.6661905", "text": "def mean(data_set):\n try:\n # Check to see if list has more than one number\n if len(data_set) < 1:\n return None\n\n else:\n\n # Adds all the numbers in the list and then divides that by the amount of numbers in the list and\n # rounds to two decimals to avoid python rounding errors\n return round(sum(data_set) / len(data_set), 2)\n\n # Raises type error if a list of integers was not found\n except TypeError:\n raise TypeError(\"Error: please input a list of integers\")", "title": "" }, { "docid": "f1738d0d81ce99b736b46c8d84d8d7fd", "score": "0.66603774", "text": "def mean(data):\r\n n = len(data)\r\n if n < 1:\r\n return 0\r\n return sum(data)/float(n)", "title": "" }, { "docid": "b7a8782843544655fa6273ab21d18b23", "score": "0.6657961", "text": "def mean(data):\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n return sum(data) / n # in Python 2 use sum(data)/float(n)", "title": "" }, { "docid": "4688e27b19b53015062ae0302adcab45", "score": "0.6651893", "text": "def average(values):\n\treturn sum(values)/len(values)", "title": "" }, { "docid": "6ecd78bb01109afe179f2eb76e07a7fe", "score": "0.6632757", "text": "def _listMean(self, key, val):\n if val is None:\n return 0\n else:\n try:\n return float(val)\n except:\n if len(val) > 0:\n return mean(val)\n else:\n return 0", "title": "" }, { "docid": "321a1323e8f1e495cca911bfc6b845ec", "score": "0.6603475", "text": "def calculate_avg(list_of_data: list) -> list:\n longest_row = max([len(row) for row in list_of_data])\n\n performance_sums = [0 for i in range(longest_row)]\n num_of_ratings_per_cell = [0 for i in range(longest_row)]\n\n for row in list_of_data:\n for i in range(len(row)):\n performance_sums[i] += row[i]\n num_of_ratings_per_cell[i] += 1\n\n avgs = [performance_sums[i]/num_of_ratings_per_cell[i] for i in range(longest_row)]\n\n return avgs", "title": "" }, { "docid": "192d12d6b322c0587aefa97f52a76b58", "score": "0.6601502", "text": "def average(values):\n return sum(values) / len(values)", "title": "" }, { "docid": "28d098aaf4a1e7d7684c6040fa89423d", "score": "0.65982014", "text": "def avg(l):\n return sum(l) / float(len(l))", "title": "" }, { "docid": "cfb1b15f63a5ebe945ed6a3945b66f84", "score": "0.65977556", "text": "def avg(lst: list) -> float:\n return sum(lst) / len(lst)", "title": "" }, { "docid": "5ff969cb804b87c31d3374283a187528", "score": "0.6594997", "text": "def average(a):\r\n return sum(a,0.0)/len(a)", "title": "" }, { "docid": "4ce5b650eed38d70a7d12d70da36dc5d", "score": "0.65777886", "text": "def average(list_of_numbers):\n pass", "title": "" }, { "docid": "169ef6060fca7202f5294284892771a5", "score": "0.6568195", "text": "def average(values):\n return sum(values) / len(values)", "title": "" }, { "docid": "169ef6060fca7202f5294284892771a5", "score": "0.6568195", "text": "def average(values):\n return sum(values) / len(values)", "title": "" }, { "docid": "7d16f8b68992d7bcdc4051f445d5a03e", "score": "0.65492606", "text": "def average(ratings):\n if ratings:\n ratings = filter(lambda x: x is not None, ratings)\n if ratings:\n total = reduce(operator.add, ratings)\n if total != None:\n return float(total)/len(ratings)\n return None", "title": "" }, { "docid": "5811404f7e1d8c175ea67c539bceddd3", "score": "0.6524418", "text": "def average(l):\r\n return sum(l)/len(l)", "title": "" }, { "docid": "6d09aca4457b5d72aca385c6022eee90", "score": "0.64944243", "text": "def average(lst):\n acc = 0.0\n for i in lst:\n acc += float(i)\n return acc / len(lst)", "title": "" }, { "docid": "5e254c1fd51847d9d4676379efe64cc5", "score": "0.6486464", "text": "def get_weighted_average_of_list(inputs, weights=[]):\n ave = 0\n for i, val in enumnerate(inputs):\n if weights[i] > 1 or weights[i] < 0:\n msg = f\"weight value of {weights[i]} at index {i} \"\n msg += \"is not between 0 and 1\"\n raise ValueError(msg)\n ave += val * (1 - weights[i])\n if len(inputs) == 0:\n return None\n return ave/len(inputs)", "title": "" }, { "docid": "971f885b4664ef792c594493b248595a", "score": "0.6485874", "text": "def average(self):\n if not self:\n raise(\"empty list\")\n \n av = self[0]\n\n for d in self[1:]:\n av.u += d.u\n av.v += d.v\n \n av.u /= len(self)\n av.v /= len(self)\n\n return av", "title": "" }, { "docid": "52de17aa2b2096e1f8a3771420fd93fd", "score": "0.64672613", "text": "def mean(l):\n\treturn sum(l) / len(l)", "title": "" }, { "docid": "e363407b8dd2f4119887a9c4dfebef89", "score": "0.6458074", "text": "def get_average(numbers=[]):\n sum = 0.0\n for number in numbers:\n sum += number\n avg = float(float(sum) / len(numbers))\n return avg", "title": "" }, { "docid": "c450d668056052152bdef86638425127", "score": "0.6454827", "text": "def average(*numbers):\n return sum(numbers) / (0.0 + len(numbers))", "title": "" }, { "docid": "1aedb0955b1954c411b82ff7273a90d3", "score": "0.64274716", "text": "def list_mean(Xs):\n if numexpr is not None:\n return numexpr_list_mean(Xs)\n else:\n return arith_list_mean(Xs)", "title": "" }, { "docid": "f7f7caedae90979a6c5f540b0a461877", "score": "0.6425404", "text": "def avgList(self, listInput):\n\n return sum(listInput)/len(listInput)", "title": "" }, { "docid": "b8aaefcd373f99a5bb1c8e5fcaab1cde", "score": "0.64081883", "text": "def mean(values, default=0):\n if len(values) == 0:\n return default\n return sum(values) / len(values)", "title": "" }, { "docid": "1fecdc5b115f661f59f187511a1abfd0", "score": "0.64041656", "text": "def mean(iterable: Collection[float]) -> float:\n return sum(iterable) / len(iterable)", "title": "" }, { "docid": "ed31d683462a1243ea50698e9d2a83fa", "score": "0.6395621", "text": "def avg(cls, data: Dict[datetime, float]) -> float:\n _filtered_data = cls.get_filtered_data(data)\n return BuiltinUtils.return_if_true(\n sum(_filtered_data.values()) / len(_filtered_data),\n _filtered_data,\n BuiltinUtils.Float.NAN,\n )", "title": "" }, { "docid": "bd9abadcb492fc6cb22e1719cfb872a0", "score": "0.6369179", "text": "def mean(list):\n return float(sum(list)) / max(len(list), 1)", "title": "" }, { "docid": "36ffd574fce726e4a0d98b692fd59ea0", "score": "0.63512534", "text": "def get_mean(l):\n return sum(l) / len(l)", "title": "" }, { "docid": "d8f31ecf9e4a1a7f27fd54f2f56d4660", "score": "0.6326443", "text": "def mean(arrays, axis=-1, ignore_nan=False):\n total_sum, total_count = last(\n _iaverage(arrays, axis, weights=None, ignore_nan=ignore_nan)\n )\n return total_sum / total_count", "title": "" }, { "docid": "847684f23aabd0cc0ac60d4cdf7cfc41", "score": "0.6324834", "text": "def list_avg(l):\n \n l.sort()\n return sum(l[1:-1])/float(len(l[1:-1]))", "title": "" }, { "docid": "1e81ad1468f0a21d295a3bec7f7a60fd", "score": "0.63038397", "text": "def average(self, list):\r\n return round(sum(list) / len(list) / 1000, 2)", "title": "" }, { "docid": "710d3f94c35f9fabeaf0fb7e693f15fb", "score": "0.62886286", "text": "def average(iterable):\n return sum(iterable) / float(len(iterable))", "title": "" }, { "docid": "7ed36c45ce039e2338b17a089c5d512a", "score": "0.62834907", "text": "def meanCalculation(data):\n return sum(data)/len(data)", "title": "" }, { "docid": "c27ccbadf4b4f3e747fc96f782367bfc", "score": "0.6279788", "text": "def average(num_list):\r\n sum_of_list = 0\r\n number_count = 0\r\n if num_list == []:\r\n return None\r\n for i in num_list:\r\n sum_of_list += i\r\n number_count += 1\r\n return sum_of_list / number_count", "title": "" }, { "docid": "402527f288c94618bc1ba595e6c3b50a", "score": "0.62768185", "text": "def get_average(data):\n return sum(copy.copy(data))/len(data)", "title": "" }, { "docid": "99200e63a397e00396385bccd5c56364", "score": "0.6276517", "text": "def avg_stats(list_of_stats):\n arr = np.array(list_of_stats)#[N,9]\n N = len(list_of_stats)\n mean = arr.mean(axis = 0)#[9]\n return mean.tolist()", "title": "" }, { "docid": "a6ebc1657032e16206871f6268906d95", "score": "0.6272452", "text": "def average(arrays, axis=-1, weights=None, ignore_nan=False):\n total_sum, total_weight = last(_iaverage(arrays, axis, weights, ignore_nan))\n with catch_warnings():\n simplefilter(\"ignore\", category=RuntimeWarning)\n return np.true_divide(total_sum, total_weight)", "title": "" }, { "docid": "84108f298464424cbe4fe1b453421193", "score": "0.624582", "text": "def test_mean(self):\n items = []\n result = utils.mean(items)\n self.assertEqual(result, 0)\n\n items = [1, 2, 3]\n result = utils.mean(items)\n self.assertEqual(result, 2.0)\n\n items = [-1, 1]\n result = utils.mean(items)\n self.assertEqual(result, 0.0)\n\n items = [-3, -2, -1]\n result = utils.mean(items)\n self.assertEqual(result, -2.0)\n\n items = [1.8, 2.1, 3.7, 4.3]\n result = utils.mean(items)\n self.assertEqual(result, 2.975)", "title": "" }, { "docid": "92b13f1019cdea3e0262b51daea1c7ad", "score": "0.62309474", "text": "def mean(values):\n\treturn (sum(values)/float(len(values)))", "title": "" }, { "docid": "567c0faf0c368ae3ee9b4fe170708a6a", "score": "0.6220864", "text": "def mean(iterable):\n total = 0.0\n qte = 0\n for x in iterable:\n total += x\n qte += 1\n if qte == 0:\n return None\n else:\n return total / qte", "title": "" }, { "docid": "365593ad63825cd999a5a6258d669bc3", "score": "0.62186307", "text": "def mean(list):\r\n total = 0\r\n\r\n for item in list:\r\n total += item\r\n\r\n return total / len(list)", "title": "" }, { "docid": "39fac930bf9fab7cc79dc6b32c4b5cb3", "score": "0.62060416", "text": "def hmean(*args):\n for val in args:\n if val <= 0:\n return float('NaN')\n return len(args) / sum(1. / val for val in args)", "title": "" }, { "docid": "60914ff23b78d8abcb079be51cc8710f", "score": "0.620433", "text": "def average(*args):\n return reduce(lambda x, y: x+y, args) / float(len(args))", "title": "" }, { "docid": "a072c166177699e6c99a9f9574a13e64", "score": "0.6168294", "text": "def mean(x):\n\tif not type(x) == numpy.ndarray:\n\t\treturn None;\n\tsum = 0\n\tfor nb in x:\n\t\tsum += nb\n\treturn float(sum/x.size)", "title": "" }, { "docid": "4f25486b92fac91f8e6d2e2c5d205298", "score": "0.61645216", "text": "def average_value_zero_one(inp):\n return sum([int(i) for i in inp])/len(inp)", "title": "" }, { "docid": "5638cc608c6e3a4dbdffd6bb4a5a0928", "score": "0.616194", "text": "def no_inf_mean(x:torch.Tensor):\n\n no_inf = [a for a in x if torch.isfinite(a)]\n\n if len(no_inf) > 0:\n return sum(no_inf) / len(no_inf)\n else:\n return x.mean()", "title": "" }, { "docid": "0c3deae1819ce29fdcab633e8e0d336e", "score": "0.61579275", "text": "def __getaverage(self, li):\n self.logger.debug(\"Averaging a list of %s elements.\" % len(li))\n return sum(li) / len(li)", "title": "" }, { "docid": "b87bcba046cd01104f534789a2285a2e", "score": "0.6157474", "text": "def arithmetic_average_list(numbers):\n sum = 0\n for item in numbers:\n sum += item\n return sum / len(numbers)", "title": "" }, { "docid": "a5ee98b1672ea61221d41e0d56ec4bc7", "score": "0.6122226", "text": "def average(X):\n res = 0\n for x in X:\n res += x\n res = res / len(X)\n return res", "title": "" }, { "docid": "5ab65366f427662533bf24d4eeac1b50", "score": "0.61163324", "text": "def mean(mylist):\n if not isinstance(mylist, list):\n raise TypeError(\"Mean: %s is not a list!\" % mylist)\n\n return (sum(mylist) / len(mylist))", "title": "" }, { "docid": "64b849438d1c5bd215f2726352a5ee7d", "score": "0.61136794", "text": "def numexpr_list_mean(Xs):\n Xs_sum = numexpr_list_sum(Xs)\n N = len(Xs)\n return numexpr.evaluate('Xs_sum / N')", "title": "" }, { "docid": "f8c7f5d905c873499b7f39c440b9ea68", "score": "0.6112601", "text": "def point_avg(points):\n p_len = len(points)\n center = []\n\n for item in zip(*points):\n center.append(sum(item) / p_len)\n\n return center\n\n raise NotImplementedError()", "title": "" }, { "docid": "648ee86ade60ac140bdcc9f26a82bbfb", "score": "0.61125356", "text": "def compute_average(n):\r\n data = []\r\n start = time()\r\n for _ in range(n):\r\n data.append(None)\r\n end = time()\r\n return (end - start)/n", "title": "" }, { "docid": "d1da55bc470d24b4e3cb3d6dd7491155", "score": "0.6111796", "text": "def mean_points(self, **kwargs):\n if not self._points:\n self.get_points()\n return MathUtils.mean(self._points)", "title": "" }, { "docid": "3c2eac08a3e46bf43dd2a6156d5aac3e", "score": "0.6098111", "text": "def normed_to_average(self, data=None):\n if data is None:\n data = self.get_raw_data()\n series_list = []\n for i in data:\n series_list.append(data[i] / data[i].mean())\n return pandas.concat(series_list, axis=1)", "title": "" }, { "docid": "a5a7af565c5ea4d50fde37eb3060e81a", "score": "0.6087566", "text": "def avg(x):\n \n return(sum(x)/len(x))", "title": "" }, { "docid": "ed57710458d691fd57c037fc8c4ce3c9", "score": "0.6080336", "text": "def mean(L):\n if L == []:\n return []\n return (reduce(add_numerator, L))/len(L)", "title": "" }, { "docid": "bdc7a8a0b344375aa72c6925edb04781", "score": "0.6071401", "text": "def mean(data):\n a = 1.0 * np.array(data)\n return np.mean(a)", "title": "" }, { "docid": "e92bb058fce71125abf58511ad9e2233", "score": "0.60535836", "text": "def find_mean(values):\r\n return sum(values)/len(values)", "title": "" }, { "docid": "1679f70dc68375ea6fd6eb660ad0a26f", "score": "0.60489804", "text": "def gmean_points(self, **kwargs):\n if not self._points:\n self.get_points()\n pts = filter(lambda x: x != 0, self.normalize_points())\n return MathUtils.gmean(pts)", "title": "" }, { "docid": "3f4ee351b15fe60f202d8b3909fd0beb", "score": "0.60476166", "text": "def mean(self) -> float:\n return float(np.nanmean(self.items[: self.count]))", "title": "" }, { "docid": "71b12e34ef3d18f2f041b6bd89359da5", "score": "0.6039922", "text": "def sliding_average(value_list: List[float], window: int) -> float:\n if len(value_list) == 0:\n raise ValueError(\"Cannot perform sliding average on an empty list.\")\n return np.asarray(value_list[-window:]).mean()", "title": "" }, { "docid": "18f959aafb0ef2588311a373c896efdc", "score": "0.60232854", "text": "def average(data):\n\n if MEAN:\n return np.ma.mean(data)\n elif MEDIAN:\n return np.ma.median(data)\n\n # average", "title": "" }, { "docid": "9934895dd2c0608bac0dba184af7e33e", "score": "0.6022202", "text": "def avg(*args):\r\n total, N = accumulate(args)\r\n return float(total) / N", "title": "" }, { "docid": "e5d446a5a8ed940b2a7dd90633ca0351", "score": "0.6015123", "text": "def getMean(myList): \n mysum = 0.0\n for i in range(len(myList)):\n mysum = mysum + myList[i]\n mymean = mysum/len(myList)\n return mymean", "title": "" }, { "docid": "c84402348dc4734cc2f42cfff62d03e9", "score": "0.6007178", "text": "def average(numbers):\n return sum(numbers) / len(numbers)", "title": "" }, { "docid": "304a8ed2c9ef58ed766505371508f42a", "score": "0.59993064", "text": "def mean(x):\n return sum(x) / len(x)", "title": "" }, { "docid": "fc0be89b34eca62a1f6b2122a92e8c58", "score": "0.5994471", "text": "def average(lst):\n # def Average(lst):\n return sum(lst) / len(lst)", "title": "" }, { "docid": "904a2721e5e8dee1ce2fe5b5401442e3", "score": "0.59940207", "text": "def mean(arr):\n return sum(arr) / len(arr)", "title": "" } ]
75c9178eeb21d213ee9acfa8e3d59fe1
My customized model function
[ { "docid": "64c8770a14f07d7dcfe1534d34cbc624", "score": "0.0", "text": "def my_model_backward(labels, fc_filters, reg_scale, conv1d_filters, filter_channel_list ):\n \n ##Record the variables before Backwardmodel is created\n BeforeBackCollectionName = \"BeforeBack_Collection\"\n for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):\n tf.add_to_collection(BeforeBackCollectionName, var)\n print(\"Before Backward Model there is:\",tf.get_collection(BeforeBackCollectionName))\n \n ##Building the model\n with tf.name_scope(\"BackwardModel\"):\n print(\"Before convolution:\", labels)\n preConv = labels\n if conv1d_filters: #If this is not an empty list\n preConv = tf.expand_dims(preConv, axis=2)\n print(\"Your Preconv layer is\", preConv)\n for cnt, (filters_length, filter_channels) in enumerate(zip(conv1d_filters, filter_channel_list)):\n print('window Length {}, Number of Channels: {}'.format(filters_length, filter_channels))\n convf = tf.Variable(tf.random_normal([filters_length, preConv.get_shape().as_list()[-1], filter_channels]))\n preConv = tf.nn.conv1d(preConv, convf, stride = 1, padding='VALID',data_format = \"NWC\")\n print(\"At prev_conV level{} the precoV shape is {}\".format(cnt, preConv.get_shape()))\n backward_fc = tf.squeeze(preConv) #Remove the useless 1 dimension that was caused by the Conv\n print(\"After convolution:\",backward_fc)\n for cnt, filters in enumerate(fc_filters):\n backward_fc = tf.layers.dense(inputs=backward_fc, units=filters, activation=tf.nn.leaky_relu, \n name='backward_fc{}'.format(cnt),\n kernel_initializer=tf.random_normal_initializer(stddev=0.02),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=reg_scale))\n kernel = tf.get_default_graph().get_tensor_by_name('backward_fc{}/kernel:0'.format(cnt))\n tf.summary.histogram('backward_fc{}_weights'.format(cnt), kernel)\n backward_out = backward_fc\n merged_summary_op = tf.summary.merge_all()\n \n ##Take record of the variables that created\n BackCollectionName = \"Backward_Model_Collection\"\n for var in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):\n tf.add_to_collection(BackCollectionName, var)\n print(\"Backward_out.shape\", backward_out.shape)\n return backward_out, merged_summary_op, BackCollectionName, BeforeBackCollectionName", "title": "" } ]
[ { "docid": "1542ec9e58a9ab3a9bdbf240d0a86296", "score": "0.7477061", "text": "def model(self):", "title": "" }, { "docid": "89397b8e83794d799dce2718e3b00cd0", "score": "0.7177977", "text": "def inModel(self):\n \n pass", "title": "" }, { "docid": "539ad520dd5224f800c9eae3aee594f0", "score": "0.7173125", "text": "def run(self, model):", "title": "" }, { "docid": "f89b04e69ab1006a54648e9b5345bfcc", "score": "0.6997021", "text": "def custom_model(func):\n return __model_wrapper(func)", "title": "" }, { "docid": "fb47a4c8797e6f5c0f3c5f8adbc6b6de", "score": "0.6782033", "text": "def buildModel():", "title": "" }, { "docid": "33045bbce43564fff4fcbbd17e9a2223", "score": "0.67714113", "text": "def get_model(self):", "title": "" }, { "docid": "2bca7928f158236940703b825fd9df33", "score": "0.6746197", "text": "def learn_value_of(self, model):", "title": "" }, { "docid": "bac0fec10a3616df9b25580d3cc39b28", "score": "0.62356716", "text": "def get_model():\n pass", "title": "" }, { "docid": "c9b188d25dda6d073ee68db475e46974", "score": "0.62272197", "text": "def build_model(self):\n ...", "title": "" }, { "docid": "0534a2bee435a4d5e764fe1195621292", "score": "0.61925673", "text": "def _model_(self, records):\n # model = self.__model__(record)\n # return model\n raise NotImplementedError", "title": "" }, { "docid": "ed67576dc19cae169c77d0ab1c27d5e7", "score": "0.6173107", "text": "def model_fn(train_dataset,\n eval_dataset):\n pass", "title": "" }, { "docid": "e8d6a180cbca16cfd5c389263edf7301", "score": "0.61500317", "text": "def model(cls):", "title": "" }, { "docid": "3da043af3386bd7146cd6a148a05dff5", "score": "0.6094795", "text": "def get_standardize_model(self):\n pass", "title": "" }, { "docid": "529023cbb34f59b17bc1068313d3e0d8", "score": "0.6088377", "text": "def _model_func(self):\n return self._spectrum.copy()", "title": "" }, { "docid": "3ce629ecd385f944745721875477e75b", "score": "0.6084943", "text": "def get_model(self, key):", "title": "" }, { "docid": "ade9691bc3f4e1c4a2a6a9819cdf8692", "score": "0.60831183", "text": "def CalcModel(self,x):\n model = self.model\n par = self.params\n #function expect parameters\n #if self.parstatus == True: #not enough to solve the problem\n return model(x,par)\n #else:\n # return model(x)", "title": "" }, { "docid": "564737a5efa5f6ab5893ead8b3a42b16", "score": "0.6062361", "text": "def __init__(self, x, model, args):", "title": "" }, { "docid": "5982b148fd51beb233aa86443bea831c", "score": "0.60542", "text": "def sample(self):\n #model should be able to be saved\n pass", "title": "" }, { "docid": "4d68b2f7712d1852bde14eaaf16f191a", "score": "0.60192484", "text": "def _decorate_model(self, valuation_str, format):\n ...", "title": "" }, { "docid": "18bb80bc75e217617a7114761bd7dd55", "score": "0.6017008", "text": "def f(self):\n return self.model_params[..., 12]", "title": "" }, { "docid": "d0480affc9978fcbd6ccc8fd7afedc11", "score": "0.60162026", "text": "def __call__(self, model, X, y):\n return model.fit(X, y)", "title": "" }, { "docid": "9127cb99ade15600a2ecad79f9030a08", "score": "0.6007787", "text": "def _build_model(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "4f38863082a888887c1abffea6d7c995", "score": "0.6006629", "text": "def build_model(self, x):\n raise NotImplementedError()", "title": "" }, { "docid": "1e19013eb205284871d1b58f78439f49", "score": "0.5987694", "text": "def eval_model(self, data_object):\n\n print(\"My model is doing pretty well.\")\n\n return data_object", "title": "" }, { "docid": "6c6bf1c0857481d5762c667a3d948290", "score": "0.59744173", "text": "def predict(self):", "title": "" }, { "docid": "64f8a9c21b18fd3f6e141de38c323a1b", "score": "0.5949171", "text": "def __init__(self, model):\r\n self.model = model", "title": "" }, { "docid": "cd1471224b2a48b5d2785b2a95bf0c8f", "score": "0.59367883", "text": "def __init__(self, model):\r\n \r\n self.model = model", "title": "" }, { "docid": "18a429db8be036eed8ed12fd2bf257d6", "score": "0.5934938", "text": "def _wrap_model(self):\n raise NotImplementedError", "title": "" }, { "docid": "8212c9a56958e18f2a299b05d5e81179", "score": "0.5919049", "text": "def __call__(self, x):\n return self.model(x).numpy()", "title": "" }, { "docid": "7ca94cd1a4d51989b90a69c1aefb3b79", "score": "0.59134036", "text": "def _update_model(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "35ae811da44ebf915cc56d74d00527b2", "score": "0.590668", "text": "def apply(self):", "title": "" }, { "docid": "83e82ef7e4b538da10436c7fc5836b45", "score": "0.5903701", "text": "def buildModel(self):\n raise NotImplementedError", "title": "" }, { "docid": "e3d3e679df81cf58d7506e2756d38283", "score": "0.58882517", "text": "def MakeModel(self):\n pass", "title": "" }, { "docid": "db165a9a1befa7672fcd738e04088a14", "score": "0.5885082", "text": "def initialize(self, model):", "title": "" }, { "docid": "ae8c422b855525cabc2c8861d4e090a7", "score": "0.5858136", "text": "def _build_model(self):\n pass", "title": "" }, { "docid": "67f81c1742da4a2b1632cee3e68fb588", "score": "0.5839869", "text": "def __init__(self, model):\n super().__init__(model)", "title": "" }, { "docid": "d44be13b6b518ffc1edb50b19885fb99", "score": "0.58328104", "text": "def __call__(self):\n return self.model_search()", "title": "" }, { "docid": "54b38a1942f03317c3aea5350a33b6e3", "score": "0.5771637", "text": "def model_params(self):\n pass", "title": "" }, { "docid": "c490ad8d25d8e605119576ec99a95b3e", "score": "0.57662874", "text": "def _make_model_var(value):\n ...", "title": "" }, { "docid": "5a4c390b4fda8870cea13611f38f825a", "score": "0.57428575", "text": "def _model_(self, **record):\n obj = self.__model__(**record)\n return obj", "title": "" }, { "docid": "941206f97c6382c5ebbb2aaee5eaaac8", "score": "0.57328725", "text": "def add_model(self, function, name):\n self.models[name] = function", "title": "" }, { "docid": "0e95997130e41cc9e695b267cff39179", "score": "0.5729992", "text": "def __init__(self, model):\n\n self.model = model", "title": "" }, { "docid": "0e95997130e41cc9e695b267cff39179", "score": "0.5729992", "text": "def __init__(self, model):\n\n self.model = model", "title": "" }, { "docid": "5c825d1e33460ddd79c2f5b3c741e4f4", "score": "0.5726998", "text": "def __init__(self):\n self.model = None", "title": "" }, { "docid": "a5b835791d1678f106b30c01efd8ea04", "score": "0.57179725", "text": "def get_model_api():\n # 1. initialize model once and for all and reload weights\n print(\"load model\") \n with open('res_model.pkl', 'rb') as fin:\n sc, clf = pickle.load(fin)\n\n\n def model_api(input_data):\n print(\"hello from model_api\")\n # 2. process input with simple tokenization and no punctuation\n new_samples = extract_features(input_data)\n #print(new_samples.shape)\n X_new = sc.transform(new_samples)\n # 3. call model predict function\n X_new_preds = clf.predict(X_new)\n # 4. process the output\n # 5. return the output for the api\n \n return X_new_preds\n return model_api", "title": "" }, { "docid": "f522102388664c2202a2ae208c8ab3b8", "score": "0.57162994", "text": "def __build_model(self):\n raise NotImplementedError(\"this method builds the actual model; has to be overwritten\")", "title": "" }, { "docid": "a7a5f1bb11818f66ee6d436a2d4a01b8", "score": "0.5705111", "text": "def build_model(self):\n return", "title": "" }, { "docid": "a7a5f1bb11818f66ee6d436a2d4a01b8", "score": "0.5705111", "text": "def build_model(self):\n return", "title": "" }, { "docid": "1cbfc5ddf583d431b64a41c295a15de1", "score": "0.56986195", "text": "def create_model(self):\r\n pass", "title": "" }, { "docid": "c4b6ea095685321b8625a892df5a35a4", "score": "0.56939", "text": "def _copy_to_model(self, model):", "title": "" }, { "docid": "994065ae7192d859c3bbe9d45580ef54", "score": "0.568314", "text": "def do_BaseModel(self, line):\n if line == \".all()\":\n self.do_all(\"BaseModel\")\n elif line == \".count()\":\n self.printCount(\"BaseModel\")\n elif line[0:5] == \".show\":\n print(\"BaseModel \" + line[7:-2])\n self.do_show(\"BaseModel \" + line[7:-2])\n elif line[0:8] == \".destroy\":\n self.do_destroy(\"BaseModel \" + line[10:-2])\n elif line[0:7] == \".update\":\n self.do_update(\"BaseModel \" + self.reconstructMyArg(line[8:-1]))", "title": "" }, { "docid": "9d697552862a94ecab727a956027931d", "score": "0.5681669", "text": "def predict(self, *args, **kwargs):", "title": "" }, { "docid": "d739036b616a19e6e2942e01f0c9a31a", "score": "0.56654084", "text": "def transform(self, obj: str) -> OscalBaseModel:", "title": "" }, { "docid": "ab3e6dd50b8fa75f4fddc1ecb12051f3", "score": "0.56542367", "text": "def get(self, model_id: Any) -> Any:", "title": "" }, { "docid": "d3ac1a2d57d01304550f7646e697e9ca", "score": "0.56453925", "text": "def compute_p_eval_for(self, model):", "title": "" }, { "docid": "14efcd041592aea2a6ddf326aa85d6ec", "score": "0.5630155", "text": "def _get_model(self):\n return self.__model", "title": "" }, { "docid": "5e80e721ba325c027500f4470898672f", "score": "0.56265914", "text": "def forward(self , x):\n return self.model(x)", "title": "" }, { "docid": "bd9a1ce714e6ac1207bdf5f215121c83", "score": "0.5616488", "text": "def _MODEL_KEY(self):\n raise NotImplementedError", "title": "" }, { "docid": "b8c21c50602fa9ac89a6219cbb70d474", "score": "0.5614811", "text": "def _model_ext(self) -> str:\n raise NotImplementedError()", "title": "" }, { "docid": "83f686786ef7c4ed7a5d1c235e56c180", "score": "0.5598555", "text": "def matches_model(model):\n pass", "title": "" }, { "docid": "c31b910b047192a2256b190414dbef72", "score": "0.55924016", "text": "def eval(self):\n self.model.eval()", "title": "" }, { "docid": "97b9b9cd61060dd051d797d42cece90d", "score": "0.55707157", "text": "def forward(self, x):\n return self.model(x)", "title": "" }, { "docid": "54401d808a9e36a8260c75c2c192be9f", "score": "0.5543304", "text": "def model(options, hparams):\n\n raise NotImplementedError()", "title": "" }, { "docid": "60a01e62be4c329bb51d033443e20fd9", "score": "0.55345464", "text": "def model_params(self):\n\n pass", "title": "" }, { "docid": "e59778b4b0c5535074914377dd3a31b4", "score": "0.5530758", "text": "def model_train_process():\n train_data = read.get_train_data(\"../data/ratings.txt\")\n user_vec,item_vec = lfm_train(train_data,10,0.01,0.1,5)\n\n # 全部用户推荐\n # for userid in user_vec:\n # recom_list = give_recom_result(user_vec,item_vec,userid)\n # ana_recom_result(train_data,userid,recom_list)\n\n # 某一用户推荐\n recom_list = give_recom_result(user_vec,item_vec,\"38\")\n ana_recom_result(train_data,\"38\",recom_list)", "title": "" }, { "docid": "948fb714f06eb07ec12f737f1eee2952", "score": "0.55304754", "text": "def _build_model(self, **kwargs):\n pass", "title": "" }, { "docid": "035ea6632a5006dc5a559b575c72dcf0", "score": "0.5528899", "text": "def eval_model(self):\n return self._eval_model", "title": "" }, { "docid": "60f0be877013586fcacad68d213f0425", "score": "0.5526905", "text": "def __init__(self):\r\n self.model = None", "title": "" }, { "docid": "f2f521d3323ca80dbe455dde95738b5d", "score": "0.55242556", "text": "def _update_model_after_change_of_control_fun(self):\n pass", "title": "" }, { "docid": "e912115ff66a2bd456706707bb865406", "score": "0.5523734", "text": "def model_fn(model_dir):\n return Predictor(model_dir)", "title": "" }, { "docid": "bd9c4147ab41c95c93428fd4d68f8024", "score": "0.55230135", "text": "def transform(self, obj: OscalBaseModel) -> str:", "title": "" }, { "docid": "96a24e85018eb859e99b8af355a372cf", "score": "0.5514755", "text": "def obs_model(self, _input_states, _param):\n raise TypeError # This has to be overridden in child class.", "title": "" }, { "docid": "d40b9a1b7463a1c6c19ae05b8e11a768", "score": "0.55048436", "text": "def apply(self):\n # override", "title": "" }, { "docid": "31c5dbb2936d26d221b81616ae2ab442", "score": "0.5489001", "text": "def SignalModelClass(request):\n return request.param", "title": "" }, { "docid": "6a5041cebbc91747ba6ffd024cc471e6", "score": "0.5488461", "text": "def __init__(self):\n self.model = None", "title": "" }, { "docid": "b847542b73d8bb2d21f9a27019a5f2c4", "score": "0.54868406", "text": "def get(self, request: GetRequest) -> Model:", "title": "" }, { "docid": "330b6e23d452fbf71d1c36e423e70de4", "score": "0.54848015", "text": "def model_fn():\n model = models.Sequential()\n model.add(layers.Dense(4, input_shape=(FEATURE_SIZE,)))\n model.add(layers.Dense(1))\n\n compile_model(model)\n\n return model", "title": "" }, { "docid": "3a3a794b46c8fd4eda0aa08eb499c19f", "score": "0.546664", "text": "def for_model(typ):\n\n def wrapper(fn):\n def for_model(*args, **kwargs):\n result = fn(*args, **kwargs)\n return typ(**result)\n\n fn.for_model = for_model\n return fn\n\n return wrapper", "title": "" }, { "docid": "006e0eac0c08dd12aaf9b3707b5c0472", "score": "0.5460621", "text": "def tb_model(self):\n pass", "title": "" }, { "docid": "56cbd7eccfcaef54d4036320ac3f3d80", "score": "0.5450559", "text": "def setup_model(model):\n #from setup_model import setup_model\n #setup_model(model)\n pass", "title": "" }, { "docid": "a5a157c61861a6f26f18540cad0d597e", "score": "0.5447376", "text": "def train_model(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "0b587f891d8c36d80a2fb2582d1daca1", "score": "0.54443705", "text": "def evaluate(self, model, obj):\n\n return self.function.run(model, obj)", "title": "" }, { "docid": "c5e8ae2da3fcc4a16ae517cd338cdb7d", "score": "0.54266304", "text": "def predict_fn(input_data, model):\n return model(input_data)", "title": "" }, { "docid": "85f33b74345b3d5e2a2f9a9ebacb21fa", "score": "0.5423329", "text": "def objective(self):", "title": "" }, { "docid": "ddb49b2c600089923351631370185f16", "score": "0.54148304", "text": "def model(request):\n return request.getfixturevalue(request.param)", "title": "" }, { "docid": "09da5ce8461f7a9346a774b43af86d30", "score": "0.5413877", "text": "def do_convertmodel(self, args):\r\n print('Processing..')\r\n securicadAdapter.modelData2DB(securicadAdapter.mergeData(con), con)", "title": "" }, { "docid": "74249ca30038721a52eb0326740bfda8", "score": "0.5409809", "text": "def serialize_model(model):", "title": "" }, { "docid": "28efed21d5bb98ffc202886e812f20d1", "score": "0.5404379", "text": "def _unwrap_model(self):\n raise NotImplementedError", "title": "" }, { "docid": "522e98fc83febf49f61aff291cffc6ab", "score": "0.54019475", "text": "def call(self, inputs):\n return self.model(inputs)", "title": "" }, { "docid": "713e0ee0daa5a80d5a16ff7bfd8f86ac", "score": "0.5399866", "text": "def fit(self):\n model = None", "title": "" }, { "docid": "b645ed6bed0b668d56f1f239d55d87bb", "score": "0.5395021", "text": "def model(self):\n return self.m", "title": "" }, { "docid": "8614b955fb923b7fd0f83d95bf8015f5", "score": "0.5384558", "text": "def model_updater_and_returner():\n global communication, queues_and_locks, hyperparameters, dataset_info, placeholders, update_local_vars_op, variables_pack_for_eval_and_save, g1, sess\n ps_fn.update_and_return_model(communication, queues_and_locks, hyperparameters, dataset_info, placeholders, update_local_vars_op, variables_pack_for_eval_and_save, g1, sess)", "title": "" }, { "docid": "11bc04eb8b4ad7c5d9280042d9d39a84", "score": "0.53812194", "text": "def mutate(self, model: Model) -> None:\n raise NotImplementedError()", "title": "" }, { "docid": "d62ad59352b579b61feb3e80e7ed533a", "score": "0.53728515", "text": "def valid_wrt_model(self, model, blamelist=False, pairs=True, \\\n all=False, raw=False, **kwds):\n from mystic.math.legacydata import dataset \n data = dataset() \n data.load(self.positions, self.values)\n #data.lipschitz = L\n for i in range(len(data)):\n data[i].id = i\n return data.valid(model, blamelist=blamelist, pairs=pairs, \\\n all=all, raw=raw, **kwds)", "title": "" }, { "docid": "eabbbbedb7989b4eba655650fac566ba", "score": "0.53720176", "text": "def model(self):\n return self._model", "title": "" }, { "docid": "9c4b140ddde293f1151590921da343d1", "score": "0.5371828", "text": "def get_model(self):\n raise NotImplementedError", "title": "" }, { "docid": "acbd21a64ebcb1029f7423c3ef16b7e4", "score": "0.53701913", "text": "def _train(self):\n pass", "title": "" }, { "docid": "36fa7bb734f2056c5d6dc5beee191303", "score": "0.53657174", "text": "def run(self) -> Optional[models.Model]:\n pass", "title": "" }, { "docid": "603331ce934ce4081a0a7c7675cf96ae", "score": "0.53656983", "text": "def predict():", "title": "" }, { "docid": "8882b19f9579d56d402fa0c5dd05c76d", "score": "0.5363932", "text": "def model(self):\r\n if self.retarder == 'quarterwave':\r\n model = partial(quarterwave_model, q=self.q, u=self.u, v=self.v,\r\n zero=self.zero)\r\n else:\r\n model = partial(halfwave_model, q=self.q, u=self.u, zero=self.zero)\r\n return model", "title": "" }, { "docid": "b8525c824e43bade79186f78a3872ec0", "score": "0.5358047", "text": "def __str__(self):\n return \"%s %i\"%(self.__class__.__name__, self.model)", "title": "" } ]
c26ed3725398a4fed9398fe7c1e7c913
Print list of commands to run
[ { "docid": "99898ba4171ce55afb904a8e93227608", "score": "0.7087355", "text": "def print_commands(commands,\n status_update_callback,\n logger):\n #logger.write(\"Printing commands only.\\n\\n\")\n #for c in commands:\n # for e in c:\n # status_update_callback('#%s' % e[0])\n # print '%s' % e\n # logger.write('# %s command\\n%s\\n\\n' % e)", "title": "" } ]
[ { "docid": "b9b7d4eed90f1015a2643fe265875fb2", "score": "0.81645465", "text": "def print_commands(self):\r\n pass", "title": "" }, { "docid": "2a13ba3352fb1ea2bf46156e1906f339", "score": "0.7973929", "text": "def show_commands(self):\n print('-h\\t--lists all commands')\n print('-o filename\\t--opens file for assembling')\n print('-q\\t--terminates program')", "title": "" }, { "docid": "66b98a823c00b31f12f3088a3fd3d0de", "score": "0.78872323", "text": "def print_commands(self):\r\n\r\n print \"<command>\\t<target>\\t<additional>\"\r\n print \"enable_google\\t<empty>\\t<empty>\\tenable google module\"\r\n print \"disable_google\\t<empty>\\t<empty>\\tdisable google module\"\r\n print \"enable_msn\\t<empty>\\t<empty>\\tenable msn spreader module\"\r\n print \"disable_msn\\t<empty>\\t<empty>\\tdisable msn spreader module\"\r\n print \"enable_usb\\t<empty>\\t<empty>\\tenable usb spreader module\"\r\n print \"disable_usb\\t<empty>\\t<empty>\\tdisable usb spreader module\"\r\n print \"silence\\t<empty>\\tchannel\\tsilence a channel\"\r\n print \"send_ip_list\\t<empty>\\t<empty>\\tspreads a list of ip addresses\"\r\n print \"update\\t<empty\\t<empty>\\tupdate the malware\"\r\n print \"download\\t<empty>\\t<empty>\\tdownload and execute\"\r\n print \"download2\\t<empty>\\t<empty>\\tnewer download and execute\"\r\n print \"remove\\t<empty>\\t<empty\\tremove the bot\"", "title": "" }, { "docid": "51eddbdd9bf06a6dc936db49b78dfdd0", "score": "0.7858923", "text": "def cmdlist_print(self):\n cfg, cmdlist = self.read_config()\n print('\\n')\n print(cmdlist)\n print('\\n')\n cmdset = self.cmdlist_filter(cmdlist)\n for cmd in cmdset:\n print(cmd)", "title": "" }, { "docid": "9a3052c83edae065bbd0d421e9c2862d", "score": "0.7826215", "text": "def show_commands(self):\n print(\n '' +\n '\\n\\t' + bc.OKBLUE + 'COMMANDS:' + bc.ENDC +\n '\\n\\t' + '---------' +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'run', 'Run the script')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'info', 'Information')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'help', 'Help')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'pd', 'Predefined arguments for \"runcom\"')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'so', 'Show options')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'sa', 'Show module info')) +\n '\\n\\t' + ('%-*s ->\\t%s' % (9, 'exit', 'Exit')) +\n '\\n'\n )", "title": "" }, { "docid": "ef7d56ab50ae9ed52b1a08bcf02ae5de", "score": "0.7734223", "text": "def help():\n\n print(\"Commands\")\n print(\"--------\")\n for command in commands:\n print(command)", "title": "" }, { "docid": "b2ba571f2e041a5e41acb638c034e522", "score": "0.7585738", "text": "def print_commands(self):\n commands = [[self.color_message(i, 'BOLD') for i in ['Commands', 'Description']],\n ['-a/--add', 'Add a new element to a task list'],\n ['-r/--remove', 'Remove an element from a task list'],\n ['-f/--finish', 'Finish a task in a task list'],\n ['-u/--unfinish', 'Unfinish a task in a task list'],\n ['-c/--change', 'Change parts of an existing task'],\n ['-v/--view', 'View the whole task list']]\n table_data = commands\n table = AsciiTable(table_data)\n table.inner_row_border = True\n\n if not self.check_table_fit(table):\n self.print_message('Try adding a task to your list! just call `python-todo -a`')\n else:\n self.print_message('Try adding a task to your list! Here\\'s the available commands:')\n print(table.table)", "title": "" }, { "docid": "3c835093115b670c751006c8a752b9e8", "score": "0.75789744", "text": "def print_command_list(commands):\n print(\n f'''\n ________{Fore.GREEN}LIST OF COMMANDS{Fore.RESET}______________________\n | {Fore.RED}Command{Fore.RESET} | {Fore.RED}Description{Fore.RESET} |\n |-----------|----------------------------------| '''\n )\n for com in commands:\n print(\n f\" |{com.center(11)}|{commands[com].center(34)}|\")\n print(\" |______________________________________________|\")", "title": "" }, { "docid": "0c482faa46512b3481a2e87c15d81fbd", "score": "0.74030817", "text": "def print_commands(self):\n print((\"Command:\\tShortcut:\\tAction:\\n\"\n \"north\\t\\tn\\t\\tMove north\\n\"\n \"south\\t\\ts\\t\\tMove south\\n\"\n \"east\\t\\te\\t\\tMove east\\n\"\n \"west\\t\\tw\\t\\tMove west\\n\"\n \"look\\t\\tl\\t\\tLooks around at current location\\n\"\n \"inventory\\ti\\t\\tShows contents of inventory\\n\"\n \"take\\t\\tt\\t\\tPlaces item at current location in inventory\\n\"\n \"diagnostic\\t\\t\\tChecks current health condition\\n\"\n \"attack [0-9]\\ta [0-9]\\t\\tAttack all monsters with indexed item\\n\"\n \"drop [0-9]\\td [0-9]\\t\\tDrops item at specified inventory index\\n\"\n \"help\\t\\t?\\t\\tDisplays this list of valid commands\\n\"\n \"clear\\t\\t\\t\\tClear the terminal screen\\n\"\n \"quit\\t\\tq\\t\\tQuit game\\n\"\n ))", "title": "" }, { "docid": "0a4ddbdeb3bb9b31145cdbb676cadea5", "score": "0.740064", "text": "def print_commands():\n data = read_helpfiles()\n for item in data:\n s = \"{}\".format(item[\"name\"])\n if not type(item[\"synonyms\"]) == list:\n item[\"synonyms\"] = [ item[\"synonyms\"] ]\n for synonym in item[\"synonyms\"]:\n s += \" / {}\".format(synonym)\n print(s)", "title": "" }, { "docid": "5ee3bea8dd6b0ef1e8059a9b957f54a7", "score": "0.7320315", "text": "def show_commands():\n print(\"Showing all arguments for the Budget application: \")\n print(\" --helpme Shows all the argument options\")\n print(\" --init Initiates a new budget with following commands\")\n print(\" --new <category> <label> Adds a new label to current budget in mentioned category\")\n print(\" --delete <category> <label> Removes label from current budget in mentioned category\")\n print(\" --show Shows budget as a pandas dataframe\")\n print(\" --labels Shos labels, ie Food, Snacks, Cinema, Taxis, Clothing\")\n print(\" --add <amount> <label> Adds <amount> into <label> for current month/date/year\")\n print(\" --sub <amount> <label> Subtracts <amount> from <label> for current month/date/year\")\n print(\" --undo Undos the latest command made\")", "title": "" }, { "docid": "7837523fa6961779f1998fd390d71ecb", "score": "0.7317877", "text": "def list_commands(self, *args):\n command_str = 'List of commands:'\n for command in sorted(self.__class__.commands):\n command_str += '\\n{} - {}'.format(command,\n self.commands[command].__doc__)\n self.send_message(command_str)", "title": "" }, { "docid": "cc2685f44cad2d8c4c45ee09cbdd9081", "score": "0.7262187", "text": "def print_commands(commands):\n for item in commands.items():\n print(item[0])\n for character in item[0]:\n print('-', end='')\n print('\\n')\n for command in item[1]:\n print(' * {}'.format(' '.join(command)))\n print('\\n')", "title": "" }, { "docid": "f965f1e0798d6c6935b712add07da1cf", "score": "0.7261889", "text": "def test_list_commands():\n Console.list_all()", "title": "" }, { "docid": "f965f1e0798d6c6935b712add07da1cf", "score": "0.7261889", "text": "def test_list_commands():\n Console.list_all()", "title": "" }, { "docid": "df75dfcd91f608e88eec3f6841bdca43", "score": "0.72294444", "text": "def print_commands(self):\r\n output = \"\\n\"\r\n output += \"-----------------------------------\" + \"\\n\"\r\n column = [\"Service command\", \"Description\"]\r\n data = [[\"change_folder <name>\", \"Move the current working directory\"],\r\n [\"list\", \"Print all files & folders in current directory\"],\r\n [\"read_file <name>\", \"Read 100 char from the file <name> in the current working directory\"],\r\n [\"write_file <name> <input>\", \"Write data in <input> to end of file <name> in current directory\"],\r\n [\"create_folder <name>\", \"Create a new folder with the <name> in the current working directory\"],\r\n [\"register <username> <password> <privileges>\", \"Register a new user with the <privileges> to the server using the <username> and <password> provided\"],\r\n [\"login <username> <password>\", \"Log in the user conforming with <username>\"],\r\n [\"delete <username> <password>\", \"Delete the user conforming with <username> from the server\"],\r\n [\"issued\", \"Check which commands you entered\"],\r\n [\"clear\", \"Clear the issued commands\"]]\r\n output += \"{:<50}\".format(column[0]) + \" --> \" + column[1] + \"\\n\"\r\n for row in data:\r\n output += \"{:<50}\".format(row[0]) + \" --> \" + row[1] + \"\\n\"\r\n output += \"-----------------------------------\" + \"\\n\"\r\n return output", "title": "" }, { "docid": "6ec1d6756133f864ca0f391b4659a441", "score": "0.72091705", "text": "def show_commands(self):\n self.send_status(\"system\", message=u\"The following commands are available:\")\n self.send_status(\"system\", message=u\"!next - connect the next customer. If there is a current chat it is disconnected.\")\n self.send_status(\"system\", message=u\"!end - ends the current chat without connecting the next customer.\")\n self.send_status(\"system\", message=u\"!? - displays this command list.\")", "title": "" }, { "docid": "528f105238136c04aa883619f37ebb8a", "score": "0.7179502", "text": "def list_commands(args):\n cmds = list(ext_plugins.get(group=ACMD_GROUPNAME))\n if len(cmds) == 0:\n print \"::: no command found in registry\"\n return 1\n print \"::: found [%i] command%s\" % (len(cmds),\n \"s\" if len(cmds)>1 else \"\")\n cmds.sort(cmp=lambda x,y: cmp(x.name, y.name))\n cmds = [cmd for cmd in cmds if cmd.name != 'list-commands']\n for i, cmd in enumerate(cmds):\n if args.detailed:\n print \"=\"*80\n print \" - %s\" % (' '.join(cmd.name.split('.')),)\n if args.detailed:\n try:\n cmd.load().parser.print_help()\n except Exception,err:\n print \"** could not inspect command [%s]:\\n%s\" % (\n cmd.name,\n err)\n print \"=\"*80\n print \"\"\n return 0", "title": "" }, { "docid": "3464db4b3660c51e7b22470c3ed64d2e", "score": "0.7170034", "text": "def _PrintCommands(self, commandNames):\n maxlen = 0\n for name in commandNames:\n maxlen = max(maxlen, len(name))\n fmt = \" %%-%ds %%s\" % maxlen\n\n for name in commandNames:\n command = all_commands[name]()\n try:\n summary = command.helpSummary.strip()\n except AttributeError:\n summary = \"\"\n print(fmt % (name, summary))", "title": "" }, { "docid": "0928b1f4e26dc01da9567cb46c80d504", "score": "0.7090239", "text": "def help_print():\r\n print(\"------- LIST OF COMMANDS -------\")\r\n print('MIND THE QUOTES')\r\n print('------- ADD COMMANDS -------')\r\n print('ADD USER \"NAME\" \"USERNAME\" PASSWORD')\r\n print('ADD SERIE NAME YYYY-MM-DD \"SYNOPSIS\" CATEGORY_ID')\r\n print('ADD EPISODIO \"NAME\" \"DESCRIPTION\" SERIE_ID')\r\n print(\"ADD USER_ID SERIE_ID CLASSIFICATION_INITIALS\")\r\n print('------- SHOW & REMOVE COMMANDS -------')\r\n print(\"SHOW/REMOVE USER USER_ID\")\r\n print(\"SHOW/REMOVE SERIE SERIE_ID\")\r\n print(\"SHOW/REMOVE EPISODIO EPISODIO_ID\")\r\n print(\"SHOW/REMOVE ALL USERS/SERIE/EPISODIO\")\r\n print(\"SHOW/REMOVE ALL SERIE_U USER_ID\")\r\n print(\"SHOW/REMOVE ALL SERIE_C CATEGORY_ID\")\r\n print(\"SHOW/REMOVE ALL EPISODIO SERIE_ID\")\r\n print(\"------- UPDATE COMMANDS -------\")\r\n print(\"UPDATE SERIE USER_ID SERIE_ID CLASSIFICATION_ID\")\r\n print(\"UPDATE USER USER_ID PASSWORD\")\r\n print(\"exit or EXIT to exit out of the CLI\")", "title": "" }, { "docid": "df538a65d22b07a65bac7957be9cc8be", "score": "0.7079387", "text": "def showCommandWords(self):\n return ['help', 'map', 'go', 'check', 'pickup', 'inventory', 'quit']", "title": "" }, { "docid": "dbadc7f17d40b6245dfb43cf65dee2bf", "score": "0.70467424", "text": "def help(self):\r\n print(\"Available {0}\".format(self.category))\r\n commandList = sorted(list(self.commands.keys()))\r\n for command in commandList:\r\n print(\" {0:<15}{1}\".format(command+\":\", self.commands[command].description))", "title": "" }, { "docid": "d6239615e630cc28780afcf8c9e600c8", "score": "0.7007139", "text": "def print_commands():\n\n print(\"\"\"Available commands:\n vox new <env>\n Create new virtual environment in $VIRTUALENV_HOME\n\n vox activate (workon, enter) <env>\n Activate virtual environment\n\n vox deactivate (exit)\n Deactivate current virtual environment\n\n vox list (ls)\n List all available environments\n\n vox remove (rm, delete, del) <env>\n Remove virtual environment\n\n vox help (-h, --help)\n Show help\n\"\"\")", "title": "" }, { "docid": "cf8addfdc4c593531d03520fe4bd4360", "score": "0.6998943", "text": "def print_commands(prefix: str, obj: CommandClient) -> None:\n prefix += \" -f \"\n\n cmds = obj.call(\"commands\")\n\n output = []\n for cmd in cmds:\n doc_args = get_formated_info(obj, cmd)\n\n pcmd = prefix + cmd\n output.append([pcmd, doc_args])\n\n max_cmd = max(len(pcmd) for pcmd, _ in output)\n\n # Print formatted output\n formatting = \"{:<%d}\\t{}\" % (max_cmd + 1)\n for line in output:\n print(formatting.format(line[0], line[1]))", "title": "" }, { "docid": "2c20936fac348d4070124c61366d7d9d", "score": "0.6996808", "text": "def list_command():\n print \"Scrapers:\"\n print \"\"\n\n for scraper in Scraper:\n print \" \", scraper.alias\n\n print \"\"\n print \"Reports:\"\n print \"\"\n for report in Report:\n print \" \", report.alias", "title": "" }, { "docid": "9fdad9684f13d0f19e0064cab80e013c", "score": "0.6982909", "text": "def list_commands(self):\n list_str = \"General usage: \" + Color.BOLD + \"./orchid [COMMAND [OPTIONS] [ARGS]]\" + Color.END\n list_str += \"\\nSupported commands:\"\n for key in self.command_manager.get_command_list():\n description = self.command_manager.get_command(key).get_description()\n list_str += (\"\\n \" + Color.BOLD + \"{0:12}\" + Color.END + \" {1}\").format(key, description)\n list_str += \"\\nType \" + Color.BOLD + \"./orchid help [COMMAND]\" + Color.END + \" for more advanced usage information\"\n return list_str", "title": "" }, { "docid": "0f3d228b21bc1a62c06e8de40309cd71", "score": "0.6916607", "text": "def list_command(client, args):\n print(format_tasks(list_tasks(client)))", "title": "" }, { "docid": "7656246d2c07de81e17351fd97b352c2", "score": "0.6903691", "text": "def print_commands(commands, highlighted_terms=None):\n if highlighted_terms is None:\n highlighted_terms = []\n x = 1\n for command in commands:\n print_command(x, command, highlighted_terms)\n x = x + 1\n return \"\"", "title": "" }, { "docid": "265105bd761c794df91de00d97f0acd7", "score": "0.68916726", "text": "def list_buildtest_commands():\n\n cmds_list = [\n \"build\",\n \"buildspec\",\n \"cd\",\n \"cdash\",\n \"clean\",\n \"config\",\n \"debugreport\",\n \"docs\",\n \"history\",\n \"info\",\n \"inspect\",\n \"path\",\n \"report\",\n \"schema\",\n \"schemadocs\",\n \"show\",\n \"stats\",\n \"stylecheck\",\n \"tutorial-examples\",\n \"unittests\",\n ]\n\n for field in cmds_list:\n print(field)", "title": "" }, { "docid": "10aaf3044a22f773c81119703b6dee1c", "score": "0.6888837", "text": "def usage(self) -> None:\n group = self.get_command_groups()\n\n # Display the commands/dicts\n first = True\n for group, commands in group.items():\n if first:\n first = False\n else:\n print(\"\\n\")\n print(f\"{bcolors.PURPLE}{group}{bcolors.END}\")\n\n # print the DESC if needed\n if \"DESC\" in commands:\n print(f\" {commands['DESC']}\\n\")\n\n # print all commands (and not DESC)\n for command, description in sorted(commands.items()):\n if command != \"DESC\":\n print(f\"{bcolors.YELLOW}{command:20s}{bcolors.END} {description}\")", "title": "" }, { "docid": "0e39c80be81a14cc65b3d0f9a48f2958", "score": "0.6847062", "text": "def list_commands(self, conn):\n commands = sorted(self.commands.keys())\n conn.send(\"Available commands are: %s\\n\" % \" \".join(commands))", "title": "" }, { "docid": "29f0f59429557e24f0e512673a229dd5", "score": "0.684092", "text": "def do_cmd(self,arg):\n try:\n print '=================================='\n print ' command list'\n print '=================================='\n commands = [name for name in dir(self) if name[0:3]=='do_']\n for command in commands:\n doc = eval('self.'+command+'.__doc__')\n if not doc==None:\n doc = doc.strip()\n doc = doc.split('\\n')\n print '{cmd:12}: {state}'.format(cmd=command[3:], state=doc[0])\n except:\n print 'error: cannot list commands'", "title": "" }, { "docid": "687aa6c2c18d513938b4207cad8809e2", "score": "0.68222314", "text": "def get_commands():\n out = [\"%prog [OPTIONS] COMMAND [ARGS]\", \"\", \"Commands:\"]\n\n width = max(imap(len, (name[4:] for name in globals()\n if name.startswith('cmd_'))))\n\n for name in globals():\n item = globals()[name]\n if name.startswith('cmd_') and callable(item):\n try:\n doc = item.__doc__.split(\"\\n\")[0]\n except AttributeError:\n doc = \"\"\n\n out.append(\" %*s - %s\" % (width, name[4:], doc))\n\n return \"\\n\".join(out)", "title": "" }, { "docid": "00d91e49a1c853c41494a735620c0e8b", "score": "0.68016857", "text": "def get_commands(self):\n for file_name in self.pdb_log_file_list:\n outString = '{0} {1}'.format(self.com_path,file_name)\n self.commands.append(\"python {}\".format(outString))", "title": "" }, { "docid": "82281f5448538b36c7b5a9eb37e33fc3", "score": "0.68005514", "text": "def _print_help(self):\n endpoints = self._get_endpoints().keys()\n endpoints.sort()\n msg = \"Available commands are:\\n\"\n msg += '\\n'.join([' - %s' % cmd.replace('_', '-')\n for cmd in endpoints])\n errmsg(msg)", "title": "" }, { "docid": "128fc1a55741642c5729db26901bafbf", "score": "0.6739177", "text": "def cli_command_list(args):\n \n filepath = args.get('filename')\n task_name = args.get('task')\n\n if filepath:\n fullpath = os.path.join(os.path.realpath(os.curdir), filepath)\n docstring, callables, default = load_fabfile(fullpath)\n\n if not docstring == None:\n print(docstring.rstrip('\\n'))\n\n print('\\nAvailable commands:\\n')\n\n for task_name in callables:\n task_desc = callables.get(task_name).__doc__\n\n if not task_desc == None:\n description = task_desc.lstrip('\\n').rstrip('\\n')\n else:\n description = '\\n'\n\n print('%s %s' % (green(task_name), description))", "title": "" }, { "docid": "df4645962c624ebd043f2cd698f059de", "score": "0.6730502", "text": "def commands():\n pass", "title": "" }, { "docid": "df4645962c624ebd043f2cd698f059de", "score": "0.6730502", "text": "def commands():\n pass", "title": "" }, { "docid": "df4645962c624ebd043f2cd698f059de", "score": "0.6730502", "text": "def commands():\n pass", "title": "" }, { "docid": "705f127a1138790b1fbe161cb24888f6", "score": "0.6714745", "text": "def commands():\n\n pass", "title": "" }, { "docid": "91081501de5e7f12e6adfa50533bc2e5", "score": "0.6707525", "text": "def print_commands(self, line):\n print('%s%s' % (self.color_commands(), line))", "title": "" }, { "docid": "e3e686e64c4e6930ad05254bc917d764", "score": "0.6688502", "text": "def printList(self, prefix=\"\"):\n for cmd, obj in self.commands.items():\n if isinstance(obj, self.__class__):\n print(prefix + obj.cmd + \": \" + obj.description)\n if prefix == \"\":\n newprefix = \"- \"\n else:\n newprefix = \" \" + prefix\n\n obj.printList(prefix=newprefix)\n\n else:\n print(\"{}{}: {}\".format(prefix, cmd, obj.description))", "title": "" }, { "docid": "17486e5261ac00de8485861db32c9f1a", "score": "0.66829157", "text": "def printValidCommands(self):\n\t\tprint(\"Valid Commands: \")\n\t\tprint(\"\\tmv - move card from Stock to Waste\")\n\t\tprint(\"\\twf - move card from Waste to Foundation\")\n\t\tprint(\"\\twt #T - move card from Waste to Tableau\")\n\t\tprint(\"\\ttf #T - move card from Tableau to Foundation\")\n\t\tprint(\"\\ttt #T1 #T2 - move card from one Tableau column to another\")\n\t\tprint(\"\\th - help\")\n\t\tprint(\"\\tq - quit\")\n\t\tprint(\"\\t*NOTE: Hearts/diamonds are red. Spades/clubs are black.\")", "title": "" }, { "docid": "0d1fcd4b0a39fe9a6b6562d5f4bed0cf", "score": "0.6681607", "text": "def list_command(self):\n raise NotImplementedError", "title": "" }, { "docid": "59b054a6a0add253cad4da170a327cdc", "score": "0.66662", "text": "def list(self, all=False):\n commands, command_idx = self.notebook.read_commands()\n if not all:\n commands = commands[command_idx:]\n command_idx = 0\n for i, command in enumerate(commands):\n ui.show_command_with_index(command, i - command_idx)", "title": "" }, { "docid": "1b5a360a3c81d26e58163fbfdb5958dc", "score": "0.6653501", "text": "def cmd_cmd_history(self):\n for cmd in self.ite_cmd[:-1]:\n print \"{c}\".format( c = cmd.strip() )", "title": "" }, { "docid": "e80122d124f6e2823284a7113f91ddd7", "score": "0.66402894", "text": "def list_commands(self, ctx):\n\n # These are the ones we care about having first for usability reasons\n show_at_top = ['start', 'keygen', 'encrypt', 'decrypt', 'stomp', 'shave', 'diff']\n\n # Append extra commands that are not in the priority list to the end.\n all_commands = sorted(self.commands)\n extras = set(all_commands) - set(show_at_top)\n return show_at_top + sorted(list(extras))", "title": "" }, { "docid": "52a45c5e905f38b52cde990eb7e2cca0", "score": "0.6616461", "text": "def help(self, args):\n print \"Commands:\\n\"\n print \"\"\"\\tascii\\tcat\\tcd\\tcdup\\tclose\n delete\\tget\\thelp\\timage\\tlcd\n ls\\tmdelete\\tmget\\tmkdir\\tmput\n prompt\\tput\\tpwd\\trename\\trmdir\n size\\tshell\\ttype\\tuser\\n\"\"\"", "title": "" }, { "docid": "e17b2f90f9689ed0ad3c182252494d76", "score": "0.660913", "text": "def cmd_help(self, tokens):\n\n print(\"usage: {}\\n\".format(self.__name))\n for cmd in self.__commands.items():\n print(cmd[1])", "title": "" }, { "docid": "52fae7e0a735968f0ffa8f791caafad5", "score": "0.65879834", "text": "def _gen_commands_list(self):\n commands = self.enumerate_commands()\n docs = sorted(\n (cmd_name, self._create_command_summary(cmd_name, handler))\n for cmd_name, handler in commands.items())\n # Skip commands without a docstring.\n docs = [i for i in docs if i[1]]\n # Then calculate maximum length for alignment:\n length = max(len(c) for c in commands)\n\n # Look if color is supported.\n colors = _get_color_module()\n green = reset = ''\n if colors:\n green = colors.Fore.GREEN\n reset = colors.Fore.RESET\n return (\n 'Commands are:\\n' +\n ''.join(\n ' %s%-*s%s %s\\n' % (green, length, cmd_name, reset, doc)\n for cmd_name, doc in docs))", "title": "" }, { "docid": "2b36485bca152ad719131825ed8c0549", "score": "0.6582426", "text": "def bash_completion(self):\n print(' '.join(self._commands.keys()), end=' ')\n return 0", "title": "" }, { "docid": "209a96c2b61a5074bfcf45b07ad38d6a", "score": "0.65691847", "text": "def print_base_objects() -> None:\n root = CommandGraphRoot()\n actions = [\"-o cmd\"] + [f\"-o {key}\" for key in root.children]\n print(\"Specify an object on which to execute command\")\n print(\"\\n\".join(actions))", "title": "" }, { "docid": "823b2508e6eecb9b5e4f82192ed7b213", "score": "0.6558183", "text": "def cmd_help(self, args):\r\n print \"Commands supported:\"\r\n for attr in dir(self):\r\n if attr.startswith(\"cmd_\"):\r\n print attr.replace(\"cmd_\", \"\")", "title": "" }, { "docid": "a15d8e14fc9d1efbbbc122b8f32cfe3b", "score": "0.6537306", "text": "def commands(self) -> list:\n raise NotImplementedError", "title": "" }, { "docid": "3f5a79888b393b6c6c999dbf80b6882f", "score": "0.6519308", "text": "def list_commands(self, ctx):\n rv_all = []\n for folder in folders_of_interest:\n rv_part = []\n for filename in os.listdir(folder):\n if filename.endswith('.py') and not filename.startswith(\"__init__\"):\n if not has_cli_method(os.path.join(folder, filename)):\n continue\n rv_part.append(filename[:-3])\n cli_files[filename[:-3]] = folder\n rv_part.sort()\n rv_all.extend(rv_part) # to sort pipelines then helpers instead of mixing them when help message is printed\n return rv_all", "title": "" }, { "docid": "069ab5e7265d9dcbab8c16049570000f", "score": "0.65006745", "text": "def printTasks(self):\n\n\t\tif len(self.tasks) == 0:\n\t\t\tprint(' There are currently no registered tasks, use command \"add\" + \"task\", to add a task')\n\t\t\treturn\n\n\t\tfor key, tasks in self.tasks.items():\n\t\t\tprint(\"#{0} {1}\".format(key, tasks))", "title": "" }, { "docid": "d357f3949b13534e2002fe0222357267", "score": "0.64955145", "text": "def print_help():\n\n modules = glob.glob(dirname(__file__) + \"/../controllers/*Controller.py\")\n modules = {basename(f)[:-3]: basename(f)[:-13] for f in modules}\n\n calls = []\n for module_name, controller_name in modules.items():\n ctrl_module = import_module(\"controllers.%s\" % module_name)\n ctrl_class = getattr(ctrl_module, module_name)\n\n methods = inspect.getmembers(ctrl_class(), predicate=inspect.ismethod)\n methods = [f[0] for f in methods if not f[0].startswith('__')]\n\n for method in methods:\n if method==\"default\":\n method = \"\"\n calls.append((\"* %s/%s\" % (controller_name, method)).strip(\"/\"))\n\n print('\\nSupported call are:')\n print('\\n'.join(calls))", "title": "" }, { "docid": "ca3254bc2260cec16ffadb8ae1602d51", "score": "0.6482532", "text": "def showResults(self):\n print \"COMMAND : \\\"%s\\\"\" %self.command\n print \"OUTPUT : \\\"%s\\\"\" %self.output.strip()\n print \"ERROR : \\\"%s\\\"\" %self.error.strip()\n print \"RETURN CODE : %d\" %self.returnCode", "title": "" }, { "docid": "cfa48cfbeb41eb75b0e7d3b3d1f8edde", "score": "0.646045", "text": "def display_output(total_time, commands):\n print round(total_time, 1)\n for command in commands:\n print \"{} ({})\".format(command[0], command[1])", "title": "" }, { "docid": "1c8a6dae4e541ab87d4fed8ecc6b2296", "score": "0.64468867", "text": "def print_help():\n\n print('actions:')\n\n print()\n\n for action in ACTIONS:\n action.prefix = sys.argv[0]\n print(action.get_help_line())", "title": "" }, { "docid": "95c53aa9c48a8d7c4035146b1715c961", "score": "0.6434466", "text": "def print_commands(wait_time=-1):\n delete_extra_screens()\n if len(list_of_coordinate_screens) <= 0:\n return\n for i in range(max(list_of_coordinate_screens.keys()) + 1):\n for command in get_screen_commands(i, wait_time):\n print(command)\n print()", "title": "" }, { "docid": "03fd6b7cef041f9f6899f2fd36a05ccc", "score": "0.64215684", "text": "def show_list(self, commands, raw_text=False):\n return_list = []\n if raw_text:\n response_list = self._cli_command(commands, method=u'cli_ascii')\n for response in response_list:\n if response:\n return_list.append(response[u'msg'].strip())\n else:\n response_list = self._cli_command(commands)\n for response in response_list:\n if response:\n return_list.append(response[u'body'])\n\n log.debug(\"Show commands sent are :\")\n log.debug(commands)\n log.debug(\"Result got was :\")\n log.debug(return_list)\n\n return return_list", "title": "" }, { "docid": "77d0f69cd9f13bfefb9e7e21ce87e08a", "score": "0.6420717", "text": "def display_options(self):\n print()\n options = list(self.get_commands().values())\n options.sort(key=lambda op: int(op.name))\n\n for option in options:\n print(f'{\"%3d\" % int(option.name)}. {option.description}')", "title": "" }, { "docid": "7d31fc73e651a8331cfd0d1f6e8681ca", "score": "0.6415283", "text": "def __PrintCommands(self, printer):\n for command_info in self.__command_list:\n arg_list = [arg_info.name for arg_info in command_info.args]\n printer(\n 'class %s(apitools_base_cli.NewCmd):', command_info.class_name)\n with printer.Indent():\n printer('\"\"\"Command wrapping %s.\"\"\"',\n command_info.client_method_path)\n printer()\n printer('usage = \"\"\"%s%s%s\"\"\"',\n command_info.name,\n ' ' if arg_list else '',\n ' '.join('<%s>' % argname for argname in arg_list))\n printer()\n printer('def __init__(self, name, fv):')\n with printer.Indent():\n printer('super(%s, self).__init__(name, fv)',\n command_info.class_name)\n for flag in command_info.flags:\n self.__PrintFlag(printer, flag)\n printer()\n printer('def RunWithArgs(%s):', ', '.join(['self'] + arg_list))\n with printer.Indent():\n self.__PrintCommandDocstring(printer, command_info)\n printer('client = GetClientFromFlags()')\n printer('global_params = GetGlobalParamsFromFlags()')\n printer(\n 'request = messages.%s(', command_info.request_type)\n with printer.Indent(indent=' '):\n for arg in command_info.args:\n rhs = arg.name\n if arg.conversion:\n rhs = arg.conversion % arg.name\n printer('%s=%s,', arg.name, rhs)\n printer(')')\n for flag_info in command_info.flags:\n if flag_info.special:\n continue\n rhs = 'FLAGS.%s' % flag_info.name\n if flag_info.conversion:\n rhs = flag_info.conversion % rhs\n printer('if FLAGS[%r].present:', flag_info.name)\n with printer.Indent():\n printer('request.%s = %s', flag_info.name, rhs)\n call_args = ['request', 'global_params=global_params']\n if command_info.has_upload:\n call_args.append('upload=upload')\n printer('upload = None')\n printer('if FLAGS.upload_filename:')\n with printer.Indent():\n printer('upload = apitools_base.Upload.FromFile(')\n printer(' FLAGS.upload_filename, '\n 'FLAGS.upload_mime_type,')\n printer(' progress_callback='\n 'apitools_base.UploadProgressPrinter,')\n printer(' finish_callback='\n 'apitools_base.UploadCompletePrinter)')\n if command_info.has_download:\n call_args.append('download=download')\n printer('download = None')\n printer('if FLAGS.download_filename:')\n with printer.Indent():\n printer('download = apitools_base.Download.'\n 'FromFile(FLAGS.download_filename, '\n 'overwrite=FLAGS.overwrite,')\n printer(' progress_callback='\n 'apitools_base.DownloadProgressPrinter,')\n printer(' finish_callback='\n 'apitools_base.DownloadCompletePrinter)')\n printer(\n 'result = client.%s(', command_info.client_method_path)\n with printer.Indent(indent=' '):\n printer('%s)', ', '.join(call_args))\n printer('print apitools_base_cli.FormatOutput(result)')\n printer()\n printer()", "title": "" }, { "docid": "70601836fcff72eac982fadacdf041a7", "score": "0.6412335", "text": "def _print_commands(parsed_args: argparse.Namespace,\n argtups: tuple, arg_obj: Args) -> None:\n # Start with the beginning of the command so we can add to it\n prepend = f'{sys.argv[0]} {parsed_args.type} {parsed_args.command}'\n print()\n for argtup in argtups:\n cmd = prepend # Make a copy of the prepend string\n pos_args = argtup[0] # Grab the positional arguments\n named_args = argtup[1] # Grab the named/keyword arguments\n # Iterate the needed positional arguments and the positional\n # argument values simultaneously\n for param, value in zip(arg_obj.positionals, pos_args):\n # Generate the CLI argument and add it to the command\n cmd += f\" --{param.name} '{value}'\"\n # If there are variable keyword arguments allowed by the method\n if arg_obj.varkw:\n # Generate a JSON dump of the keyword arguments\n json_data = json.dumps(named_args)\n # And add that JSON dump as a command argument\n cmd += f\"\"\" --{arg_obj.varkw.name} '{json_data}' \"\"\"\n # Print the commands with spaces\n print(cmd)\n print()", "title": "" }, { "docid": "7b8896329c668251225463ee3dd42942", "score": "0.6412302", "text": "def list_modules(modules):\n print(\"\\nRan these modules:\")\n [print(\"- {0} {1}\".format(m.__name__, m.__version__)) for m in modules]", "title": "" }, { "docid": "98c6a4af561086571323557863df851f", "score": "0.64087033", "text": "def cli(ctx):\n default_command(ctx, \"list\")", "title": "" }, { "docid": "98c6a4af561086571323557863df851f", "score": "0.64087033", "text": "def cli(ctx):\n default_command(ctx, \"list\")", "title": "" }, { "docid": "3a6a889ea5baaa4483cd85566841a069", "score": "0.64019275", "text": "def list_commands(self, ctx: Any) -> List:\n commands = []\n\n for filename in os.listdir(CMD_FOLDER):\n if filename.endswith('.py') and filename.startswith(CMD_PREFIX):\n commands.append(filename[4:-3])\n\n commands.sort()\n\n return commands", "title": "" }, { "docid": "68b82d98d11b807debe62b3950e1fe3c", "score": "0.6397142", "text": "def list_commands():\n data = \"\"\n data += \"\\n#users -> List the users currently online\"\n data += \"\\n#voice -> Join the voice chat\"\n data += \"\\n#stop_voice -> Leave the voice chat\"\n data += \"\\n#exit -> Exit the voice chat\"\n data += \"\\n#mute -> Mute and unmute yourself\"\n\n return data", "title": "" }, { "docid": "7c87193bd7ab508ae58ac7e3063d4431", "score": "0.638985", "text": "def get_all_commands():\n return [\n cat, cp, cd, date, grep, ls, mv, pwd, rm, run, sleep, tee, touch, \n wc, whoami, yes\n ]", "title": "" }, { "docid": "c1854130aef88af47a753203f0fae3b7", "score": "0.63853294", "text": "def cli(self, commands):\n ret = self.connection.cli(commands)\n\n return ret", "title": "" }, { "docid": "ba697f765d585e1b6b81ea4320a9de14", "score": "0.6354974", "text": "def ls(self):\n for n,m in self._active():\n print '%-20s %s' % (n,m.__file__.replace('\\\\','/'))", "title": "" }, { "docid": "d5998264c282a64a131b135bf91765d9", "score": "0.63520056", "text": "def help(ctx): \n run(ctx, \"invoke --list\")\n print()\n print(__doc__)", "title": "" }, { "docid": "db76f497fb57b5cbbe3e589d67311a56", "score": "0.6346162", "text": "def help(self):\n print(__name__, \"{command}\", \"additional argv...\")\n print()\n\n for name in dir(self):\n if not name.startswith('_'):\n func = getattr(self, name)\n print(name, '-')\n print(func.__doc__)\n print()", "title": "" }, { "docid": "269d8b103f393d80187c3f3c73b777aa", "score": "0.6344005", "text": "def showInstructions():\n print(\"\"\"\n RPG Game\n ========\n Commands:\n go [direction]\n get [item]\n\n\t\"\"\")", "title": "" }, { "docid": "c782a81672bb5c69a2a3a113f07552c6", "score": "0.63429916", "text": "def cmdPrint( self, *args):\n return self.cmd( *args, **{ 'verbose': True } )", "title": "" }, { "docid": "1c4e5b2a2c0034920832d010c8c99fc7", "score": "0.63429916", "text": "def do_printCLI(self, arg):\n print(\"TODO\")", "title": "" }, { "docid": "7f84f186588a21c045c45c38a1bb7433", "score": "0.63349795", "text": "def list_commands(self, ctx):\n return self.resource.commands", "title": "" }, { "docid": "73e9f387a24d52a2448a26e5dd081411", "score": "0.6334739", "text": "async def help(self, ctx: commands.Context):\n command_names = [command for command in self.commands]\n add_prefix = [\n f\"{self.prefix}{word}, \" for word in command_names if word not in self.ignore_commands]\n message = \"\"\n for command in add_prefix:\n message += command\n await ctx.send(f\"Available commands are: {message}\")", "title": "" }, { "docid": "92d326718a74aec6eb755a38e331b939", "score": "0.63329995", "text": "def print_invoke_commands(invoke_file_paths):\n for out_file_path in invoke_file_paths:\n diags.info(\"invoke file written to: {}\".format(out_file_path))", "title": "" }, { "docid": "62d248cea575a020893eb51a826340f8", "score": "0.6327432", "text": "def display_help(argv, commands):\n print(\"Usage: {} subcommand [options] [args]\"\n .format(os.path.basename(argv[0])))\n print(\"Available subcommands:\")\n print()\n print(\"[borneo]\")\n commands.sort()\n for command in commands:\n print(\" {}\".format(command))", "title": "" }, { "docid": "9802c2c867afaa3470f194f0ba1ca8fd", "score": "0.63163435", "text": "def print_command(cmd, cmd_imagick):\n t_custom.insert(END, cmd + \" \")\n cb_custom_command.current(imagick_commands.index(cmd_imagick))", "title": "" }, { "docid": "3992958856bdbb2a162541457413affe", "score": "0.6313916", "text": "def list_commands(self, ctx):\n return self.commands", "title": "" }, { "docid": "d053a54af9e10bfd11150c4819cad86a", "score": "0.6313201", "text": "def help_print(self):\n for command in self.commands.items():\n termcolor.cprint(command[0], attrs=['bold'])\n print(\" \" + command[1].__doc__ + '\\n')\n self.titlebar = \"Showing documentation\"\n return len(self.commands)*3", "title": "" }, { "docid": "94a3a445ee3b7d1382bfabfa7f2ff519", "score": "0.6311344", "text": "def run_all(self) -> None:\n print('-' * 10)\n for command in self._command_list:\n command.do_it()\n print('All done!')", "title": "" }, { "docid": "c676adf7002e0de13465668b9fa99f90", "score": "0.630065", "text": "def print_aliases(self) -> None:\n aliases = self.get_aliases()\n for alias, command in sorted(aliases.items()):\n print(f\"{alias:>4} {command}\")", "title": "" }, { "docid": "9b01264f85e21d4ef937e97af933a6a2", "score": "0.62954783", "text": "def show_help():\n\n print \"\\n\\n\"\n\n print \"[ifaces] Get Interfaces\"\n print \"[icounters] Get Interface counters\"\n print \"\\n\"\n\n print \"[vlans] Show vlans\"\n print \"[arps] Get ARP table\"\n print \"[macs] Get MAC Table\"\n print \"[facts] Facts about the switch\"\n print \"[env] Get Enviroment\"\n print \"\\n\"\n\n print \"[checkpoint] Checkpoint running and startup config\"\n print \"[merge] Merge candidate\"\n print \"[replace] Replace candidate\"\n print \"[discard] Discard candidate\"\n print \"[rollback] Rollback to previous checkpoint\"\n print \"[compare] Compare candidate config against running-config\"\n print \"[commit] Commit Candidate\"\n print \"\\n\"\n\n print \"[reboot] Reload the switch with startup-config (reboot)\"\n\n print \"[help|?] Help (this message)\"\n print \"[quit|q] Quit\"", "title": "" }, { "docid": "2deef78fcb72f25e87c93565ad92e4d8", "score": "0.6284886", "text": "def command_list(self):\n\n text = ''\n\n output_address = 0\n for name, attrs in self.used_commands:\n length = attrs['length']\n address = attrs['address']\n offset = attrs['offset']\n direction = attrs['direction']\n\n text += '{2:03x} {0}: {1}'.format(name, length, output_address)\n text += '\\t' + ' '.join(\n '{:02x}'.format(int(byte))\n for byte in self.lz[ address : address + attrs['cmd_length'] ]\n )\n\n if offset is not None:\n repeated_data = self.output[ offset : offset + length * direction : direction ]\n if name == 'flip':\n repeated_data = map(bit_flipped.__getitem__, repeated_data)\n text += ' [' + ' '.join(map('{:02x}'.format, repeated_data)) + ']'\n\n text += '\\n'\n output_address += length\n\n return text", "title": "" }, { "docid": "84f621da752f28c5e5ee93c8209660b3", "score": "0.6273416", "text": "def cli(ctx, pattern):\n matches = utils.grep_commands(pattern)\n if matches:\n for cmd, desc in matches:\n click.secho(\"$ {} :: {}\".format(cmd, desc), fg='green')\n elif matches == []:\n click.echo('No saved commands matches the pattern {}'.format(pattern))\n else:\n click.echo('No commands to show. Add one by `keep new`.')", "title": "" }, { "docid": "8d2e7f3db82a393f42148c6056910eea", "score": "0.6269045", "text": "def list_commands(self, ctx=None):\n cmds = [\n cmd[:-3]\n for cmd in os.listdir(self.command_dir)\n if cmd.endswith(\".py\") and cmd != \"__init__.py\"\n ]\n cmds.sort()\n return cmds", "title": "" }, { "docid": "75e739fb57b7b6c646ec846e93a707b7", "score": "0.626765", "text": "def help():\r\n methods = []\r\n tab = \"\"\r\n # Determine the active module class names\r\n cls_members = inspect.getmembers(sys.modules[__name__], inspect.isclass) # get the list of current module classes\r\n cls_names = [cls[0] for cls in cls_members] # get the names of each class\r\n\r\n for cls in cls_names:\r\n methods = [m for m in dir(eval(cls)) if not m.startswith('__')]\r\n for method in methods: # display method and its __doc__\r\n # Handle output format\r\n if len(method) > 24: tab = \"\\t\"\r\n elif len(method) > 15: tab = \"\\t\\t\"\r\n else: tab = \"\\t\\t\\t\"\r\n print \"%s%s %s\" %(method, tab, eval(\"%s.%s.__doc__\"%(cls,method)))", "title": "" }, { "docid": "9a52f5f5bfcdce8ad82e0d4db297930b", "score": "0.62663984", "text": "def _help(self):\n print('addFile - Adds a new file to the system')\n print('addDir - Adds a new directory to the system')\n print('delete - Deletes a file/directory in the system')\n print('showFileSystem - Shows the file system hierarchy')\n print('exit - Exits the system')\n print('help - Shows all the commands')", "title": "" }, { "docid": "5c8706049a61ff7e9ec5bd0fc52f939b", "score": "0.62612534", "text": "def execute_commands(self, cmds):\n for cmd in cmds:\n if cmd.startswith(\"#\") or cmd == \"\":\n # used as comment\n continue\n print(\"[*] \" + cmd) \n if self.onecmd(cmd): # Stop -> An error happened in one of the commands!\n self.__exit(1)", "title": "" }, { "docid": "aea83820bd2a810b1462fe56abc7fbf6", "score": "0.62466854", "text": "def showInstructions():\n # print a main menu and the commands\n print('''\n RPG Game\n ========\n Commands:\n go [direction]\n get [item]\n equip sword\n ''')", "title": "" }, { "docid": "c18abaca692df2c195efd62d6fc0a7df", "score": "0.6243001", "text": "def help(args, message):\n\treturn \"Supported commands:\\n\"+\"\\n\".join(\"- \"+command for command in sorted(COMMAND_HANDLERS.keys()))", "title": "" }, { "docid": "3ccae7d0c257b65bd00abfaaeb09b0c4", "score": "0.6238527", "text": "def print_help(options, args):\n if not args:\n print(COMMANDS)\n else:\n command_name = args.pop()\n command = lookup_command(command_name)\n print(command.__doc__ % {'prog': os.path.basename(sys.argv[0])})", "title": "" }, { "docid": "18c0576996e2be4faec2a09a00e03fbe", "score": "0.623509", "text": "def print_help():\n print \"\\n# Navigation:\"\n print \" pwd - get path of stratus directory\"\n print \" lpwd - get path of local directory\"\n print \" ls - list files in stratus directory\"\n print \" lls - list files in local directory\"\n print \" cd [path] - navigate to stratus directory at path\"\n print \" lcd [path] - navigate to local directory at path\"\n print \" mkdir [path] - create stratus directory at given path\"\n print \" rmdir [path] - remove stratus directory at given path\"", "title": "" }, { "docid": "b12aa23bc76a12ba4d06d122b9448892", "score": "0.62304467", "text": "def list_options():\n print(\"options:\")\n print(\" 0. exit the program\")\n print(\" 1. insert words\")\n print(\" 2. list topics\")\n print(\" 3. list words from a table\")\n print(\" 4. delete word\")\n print(\" 5. delete topic\")\n print(\" 6. delete all topics\")\n print(\" 7. study easy words\")\n print(\" 8. study hard words\")", "title": "" }, { "docid": "38c47c347195712c55a6df78db20a3ca", "score": "0.6224365", "text": "def command_list(args):\n config = get_config(INI_FILE)\n if config.has_option('DEFAULT', 'key'):\n print_list(config, 'DEFAULT')\n for section in config.sections():\n print_list(config, section)", "title": "" } ]
503fa88ca6c1c1f072a8e6d8dfb77199
Open communications with EBAM Plus unit
[ { "docid": "4e6ef82766b8cadba65d720055a694ad", "score": "0.0", "text": "def connect(self):\n if not self.cli.isOpen():\n self.cli.open()", "title": "" } ]
[ { "docid": "892e4220778f1b71eee360d1382bda18", "score": "0.64897", "text": "def performOpen(self, options={}):\n\n # connect, either through name or by autodetecting\n if self.comCfg.address == '<autodetect>': #input from labber in \"Communication\" section of the driver\n self.daq = zhinst.utils.autoConnect(api_level=ZI_API)\n self.device = zhinst.utils.autoDetect(self.daq)\n else:\n (self.daq, self.device, _) = zhinst.utils.create_api_session(self.comCfg.address, ZI_API, required_devtype='HDAWG', required_err_msg='This driver requires a HDAWG')\n # keep track of node datatypes\n self.daq.setInt('/{}/system/awg/channelgrouping'.format(self.device), 2)\n self._node_datatypes = dict()\n #internal variables to keep track of when AWGs are in use, waveforms are updated, waveform sizes, etc\n self.n_ch = 8\n self.waveform_updated = [False] * (self.n_ch+1) #+1 is for marker waveform\n self.update_sequencer = False\n self.buffer_sizes = [0] * 4\n self.log('Connected', self.device)", "title": "" }, { "docid": "34ed0b0a4501f14833eec75b52414e90", "score": "0.63069475", "text": "def open_connection(self):\n self._dll.connect(self.devID)", "title": "" }, { "docid": "22ca3a115f7393919c902a68f62810f9", "score": "0.61415625", "text": "def open(self):\n self.ts.log('Gridsim Open')\n try:\n if self.comm == 'GPIB':\n raise NotImplementedError('The driver for plain GPIB is not implemented yet. ' +\n 'Please use VISA which supports also GPIB devices')\n elif self.comm == 'VISA':\n try:\n # sys.path.append(os.path.normpath(self.visa_path))\n import visa\n self.rm = visa.ResourceManager()\n self.conn = self.rm.open_resource(self.visa_device)\n self.ts.log('Gridsim Visa config')\n # TODO : Add the connection for AWG430\n # the default pyvisa write termination is '\\r\\n' work with the ELGAR704 (p.3-2 Manual Addendum)\n #self.conn.write_termination = '\\r\\n'\n\n self.ts.sleep(1)\n\n except Exception, e:\n raise gridsim.GridSimError('Cannot open VISA connection to %s\\n\\t%s' % (self.visa_device,str(e)))\n\n else:\n raise ValueError('Unknown communication type %s. Use GPIB or VISA' % self.comm)\n\n self.ts.sleep(2)\n\n except Exception, e:\n raise gridsim.GridSimError(str(e))", "title": "" }, { "docid": "9d3786a2da237b1843dd54d68e1cb731", "score": "0.6125363", "text": "def open(self):\n device_type = 'dell_dnos6'\n if self.transport == 'telnet':\n device_type = 'dell_dnos6_telnet'\n self.device = ConnectHandler(device_type=device_type,\n host=self.hostname,\n username=self.username,\n password=self.password,\n **self.netmiko_optional_args)\n # ensure in enable mode\n self.device.enable()", "title": "" }, { "docid": "f0f90578158b373fe8cd59e804804389", "score": "0.6099459", "text": "def open(self):\n try:\n self.log.info(u\"Try to open {0}\".format(self._device))\n if self._fake_device != None:\n self._ser = testserial.Serial(self._fake_device, baudrate=1200, bytesize=7, \n parity = 'E',stopbits=1)\n else:\n self._ser = serial.Serial(self._device, baudrate=1200, bytesize=7, \n parity = 'E',stopbits=1)\n\n self.log.info(u\"Teleinfo modem successfully opened\")\n except:\n error = \"Error opening Teleinfo modem '{0}' : {1} \".format(self._device, traceback.format_exc())\n self.log.error(u\"{0}\".format(error))\n raise TeleinfoException(error)", "title": "" }, { "docid": "73747074f5cf0d1e94b872db62223a5f", "score": "0.5952297", "text": "def ConnectInstr(instrport): #Connect to instrument with modbus RTU protocol\n print('\\n'+time.strftime(\"%Y-%m-%d %H:%M:%S\")+' : Connecting to instrument...')\n instr_obj = minimalmodbus.Instrument(instrport, 1) #modbus protocols\n time.sleep(5)\n try:\n instr_obj.read_register(48, 1)\n except Exception:\n pass\n instr_obj.serial.baudrate = 9600\n\n return instr_obj", "title": "" }, { "docid": "23a469bac485651482360b0a3d2d175e", "score": "0.59511083", "text": "def connect(self):\n self.serial.open()", "title": "" }, { "docid": "74349dbcd7b275c4380067f75c2ec01d", "score": "0.5846741", "text": "def __init__(self, port):\n self.port = port\n ####connects automatically\n self.cli = serial.Serial(self.port, timeout=1.0)\n ### future: automatically populate w/ EBAM unit data", "title": "" }, { "docid": "a08c268b0e0954af3c86e289651446bc", "score": "0.5845523", "text": "def test_jlink_open_ethernet_and_serial_number_context_manager(self):\n self.dll.JLINKARM_OpenEx.return_value = 0\n\n with jlink.JLink(self.lib, serial_no=123456789, ip_addr='127.0.0.1:80') as jl:\n self.assertTrue(jl.opened()) # Opened in CM.\n self.dll.JLINKARM_Close.assert_called() # Closed on exit.\n\n self.assertEqual(0, self.dll.JLINKARM_EMU_SelectIP.call_count)\n self.assertEqual(1, self.dll.JLINKARM_EMU_SelectIPBySN.call_count)", "title": "" }, { "docid": "e7f35ad3d84f3556e0dc9f5bae7ff073", "score": "0.5834078", "text": "def device_send(self):\n\n # Start the communications objects\n self.comms = communications()\n self.comms.start_sending_device()\n\n # Log that communications have started\n log.info(f\"Sending communications have started\")", "title": "" }, { "docid": "ed49f750f58c8f04057db0024d46425f", "score": "0.5809274", "text": "def identify_connection(self):\n\n # keep looping and wait for an ID response\n count = 0\n while True:\n if ((count % 10) == 0):\n self.log.debug(\"Sending EOM command to port '%s'\",\n self.serial_connection.name)\n count += 1\n self.serial_connection.write(OppRs232Intf.EOM_CMD)\n time.sleep(.01)\n resp = self.serial_connection.read(30)\n if resp.startswith(OppRs232Intf.EOM_CMD):\n break\n if (count == 100):\n self.log.error('No response from OPP hardware: %s' %\n self.serial_connection.name)\n sys.exit()\n\n # Send inventory command to figure out number of cards\n msg = []\n msg.append(OppRs232Intf.INV_CMD)\n msg.append(OppRs232Intf.EOM_CMD)\n cmd = ''.join(msg)\n \n self.log.debug(\"Sending inventory command: %s\", \"\".join(\" 0x%02x\" % ord(b) for b in cmd))\n self.serial_connection.write(cmd)\n \n time.sleep(.1)\n resp = self.serial_connection.read(30)\n \n # resp will contain the inventory response.\n self.platform.process_received_message(resp)\n\n # Now send get gen2 configuration message to find populated wing boards\n self.send_get_gen2_cfg_cmd()\n\n time.sleep(.1)\n resp = self.serial_connection.read(30)\n\n # resp will contain the gen2 cfg reponses. That will end up creating all the\n # correct objects.\n self.platform.process_received_message(resp)\n\n # get the version of the firmware\n self.send_vers_cmd()\n time.sleep(.1)\n resp = self.serial_connection.read(30)\n self.platform.process_received_message(resp)\n \n # see if version of firmware is new enough\n if (self.platform.minVersion < MIN_FW):\n self.log.critical(\"Firmware version mismatch. MPF requires\"\n \" the %s processor to be firmware %s, but yours is %s\",\n self.remote_processor, create_vers_str(MIN_FW),\n create_vers_str(self.platform.minVersion))\n sys.exit()\n \n # get initial value for inputs\n self.serial_connection.write(self.platform.read_input_msg)\n time.sleep(.1)\n resp = self.serial_connection.read(100)\n self.log.debug(\"Init get input response: %s\", \"\".join(\" 0x%02x\" % ord(b) for b in resp))\n self.platform.process_received_message(resp)", "title": "" }, { "docid": "bf5c1200f0b38993f2cccb2bc281d190", "score": "0.57963777", "text": "def open(self):\n self.sdk.SBC_Open(self._serial)", "title": "" }, { "docid": "6edafa4159f99287b60c5ba13bac91b0", "score": "0.5785546", "text": "def start_comms(self):\n\n db.set(\"comms_enabled\", 0)\n time.sleep(1)\n db.set(\"comms_enabled\", 1)\n\n # Create the status dictionaries for the alien, engineer\n alien = {\n \"moving\": False,\n \"action\": False\n }\n\n engineer = {\n \"moving\": False,\n \"action\": False\n }\n\n # Create a counter for each device to check their connection status\n db.incr(\"engineer-counter\")\n db.incr(\"alien-counter\")\n db.incr(\"compound-counter\")\n\n # Store the stauses in the database\n db.set(\"alien_current_status\", json.dumps(alien))\n db.set(\"engineer_current_status\", json.dumps(engineer))\n\n # Start the communications objects\n self.comms = communications()\n self.comms.start_recieveing()", "title": "" }, { "docid": "487a8673cca4c4b3f06c03d33edb2fea", "score": "0.5755397", "text": "def begin_send(self):", "title": "" }, { "docid": "bbb0ba8270a634a8b512d54f12da4f49", "score": "0.5754953", "text": "def open(self):\n try:\n self._log.info(\"Try to open Teleinfo modem '%s' with speed '%s'\" % (self._device, self._vitesse))\n self._ser = serial.Serial(self._device, self._vitesse, bytesize=7, parity='E', stopbits=1)\n self._log.info(\"Teleinfo modem successfully opened\")\n except:\n error = \"Error opening Teleinfo modem '%s' : %s\" % (self._device, traceback.format_exc())\n self._log.error(error)\n raise TeleinfoException(error)", "title": "" }, { "docid": "8115471a6bc5591919186abab026728b", "score": "0.57320786", "text": "def open(self):\n self.xmlrpc.connect()", "title": "" }, { "docid": "9f2b41672797380bb4f3305d8821f672", "score": "0.57143813", "text": "def test_frontend_to_kernel(self):\n comm_manager = self.comm_manager\n blocking_client = self.blocking_client\n blocking_client.execute(\n \"class DummyCommHandler():\\n\"\n \" def __init__(self):\\n\"\n \" get_ipython().kernel.comm_manager.register_target(\\n\"\n \" 'test_api', self.comm_open)\\n\"\n \" def comm_open(self, comm, msg):\\n\"\n \" comm.on_msg(self.comm_message)\\n\"\n \" comm.on_close(self.comm_message)\\n\"\n \" print(msg['content']['data'])\\n\"\n \" def comm_message(self, msg):\\n\"\n \" print(msg['content']['data'])\\n\"\n \"dummy = DummyCommHandler()\\n\"\n )\n # Get input\n msg = self._get_next_msg()\n assert msg['header']['msg_type'] == 'execute_input'\n # Open comm\n comm = comm_manager.new_comm('test_api', data='open')\n msg = self._get_next_msg()\n assert msg['header']['msg_type'] == 'stream'\n assert msg['content']['text'] == 'open\\n'\n # Get message\n comm.send('message')\n msg = self._get_next_msg()\n assert msg['header']['msg_type'] == 'stream'\n assert msg['content']['text'] == 'message\\n'\n # Get close\n comm.close('close')\n msg = self._get_next_msg()\n\n # Received message has a header and parent header. The parent header has\n # the info about the close message type in Python 3\n assert msg['parent_header']['msg_type'] == 'comm_close'\n assert msg['msg_type'] == 'stream'\n assert msg['content']['text'] == 'close\\n'", "title": "" }, { "docid": "d7e92c2345fdd9c2c17def1fc7a8c014", "score": "0.5711419", "text": "def launchConnection(self):\n\n #try:\n # ask for the connexion\n #print \"enter in launchConnection\"\n self.com = NodeCom(self)\n #print \"NodeCom OK\"\n self.com.start()\n #print \"com start()\"\n\n #except:\n # sys.stderr.write(\"EXCEPTION : Can't join the node for starting connection\\n\")\n #f = file(\"Debug.txt\", 'a')\n #f.write(\"EXCEPTION : Can't join the node for starting connection\\n\")\n #f.close()", "title": "" }, { "docid": "d5070dfe14e33a7c228369d733952edc", "score": "0.5710597", "text": "def open(self):\r\n if self.driver_value is None:\r\n self.set_status(STATE_DRIVER_LIBUSB)\r\n self.set_status(STATE_CONNECTING)\r\n if self.index == -1:\r\n for i in range(0, 16):\r\n if self.try_open(i) == 0:\r\n break # We have our driver.\r\n else:\r\n self.try_open(self.index)\r\n self.set_status(STATE_USB_CONNECTED)\r\n self.set_status(STATE_CH341_PARAMODE)\r\n try:\r\n self.driver.CH341InitParallel(self.driver_index, 1) # 0x40, 177, 0x8800, 0, 0\r\n self.set_status(STATE_CH341_PARAMODE_SUCCESS)\r\n except ConnectionError:\r\n self.set_status(STATE_CH341_PARAMODE_FAIL)\r\n self.driver.CH341CloseDevice(self.driver_index)\r\n raise ConnectionRefusedError\r\n self.set_status(STATE_CONNECTED)", "title": "" }, { "docid": "20d5c4735ec8d369ecc00de25d27dda7", "score": "0.5696786", "text": "def test_jlink_open_serial_number_context_manager_manual(self):\n self.dll.JLINKARM_EMU_SelectByUSBSN.return_value = 0\n self.dll.JLINKARM_OpenEx.return_value = 0\n with jlink.JLink(self.lib, open_tunnel=None) as jl:\n # Requires manual open as open_tunnel=None.\n self.dll.JLINKARM_OpenEx.assert_not_called()\n jl.open(serial_no=123456789)\n self.dll.JLINKARM_OpenEx.assert_called()\n self.assertEqual(1, self.dll.JLINKARM_OpenEx.call_count)\n self.assertEqual(1, self.dll.JLINKARM_Close.call_count)", "title": "" }, { "docid": "a488cfc981216804fc8e57165cb65711", "score": "0.56864864", "text": "def setup(self):\n # setup a driver connection to the server\n\n self.log(\"Connecting to server host = %s:%s\" % (self.host, self.port))\n self.driver = pn_driver();\n self.cxtr = pn_connector(self.driver, self.host, self.port, None)\n assert(self.cxtr)\n\n # Enable SSL if database of trusted CAs given\n if self.ca_database:\n self.log(\"Using SSL, CA database = %s\" % self.ca_database)\n\n transport = pn_connector_transport(self.cxtr);\n assert(transport);\n ssl_client = pn_ssl(transport)\n assert(ssl_client)\n rc = pn_ssl_set_trusted_ca_db(ssl_client, self.ca_database)\n assert(rc == 0)\n # we want to fail if the server's certificate is invalid:\n rc = pn_ssl_set_peer_authentication(ssl_client, PN_SSL_VERIFY_PEER, None)\n assert(rc == 0)\n\n # configure SASL\n self.sasl = pn_connector_sasl(self.cxtr)\n pn_sasl_mechanisms(self.sasl, \"ANONYMOUS\")\n pn_sasl_client(self.sasl)\n\n # inform the engine about the connection, and link the driver to it.\n self.conn = pn_connection()\n pn_connector_set_connection(self.cxtr, self.conn)\n\n # create a session, and Link for receiving from the mailbox\n self.log(\"Posting to mailbox = %s\" % self.mailbox)\n self.ssn = pn_session(self.conn)\n self.link = pn_sender(self.ssn, \"sender\")\n dst = pn_link_target(self.link)\n pn_terminus_set_address(dst, self.mailbox)\n\n # now open all the engine endpoints\n pn_connection_open(self.conn)\n pn_session_open(self.ssn)\n pn_link_open(self.link)", "title": "" }, { "docid": "b70a5f7a73c90d4530dd8f58ef4cafdd", "score": "0.56788546", "text": "def open(self):\n _LOGGER.info('Connection opened.')", "title": "" }, { "docid": "cc647425a80c3260aec8b8559ea2b759", "score": "0.5654369", "text": "def test_jlink_open_serial_number_context_manager(self):\n self.dll.JLINKARM_EMU_SelectByUSBSN.return_value = 0\n self.dll.JLINKARM_OpenEx.return_value = 0\n with jlink.JLink(self.lib, serial_no=123456789) as jl:\n self.assertTrue(jl.opened()) # Opened in CM.\n self.dll.JLINKARM_Close.assert_called() # Closed on exit.\n self.assertEqual(1, self.dll.JLINKARM_OpenEx.call_count)", "title": "" }, { "docid": "735fa6e3e0489f635d00646c8db618fe", "score": "0.56512487", "text": "async def do_run():\n devices = await async_connect(device=DEVICE)\n # modem = await async_connect(host=HOST,\n # username=USERNAME,\n # password=PASSWORD)\n modem = devices.modem\n _LOGGER.info('Connected')\n _LOGGER.info('Modem Address: %s', modem.address)\n _LOGGER.info('Loading ALDB')\n await modem.aldb.async_load()\n\n _LOGGER.info('ALDB Load Status: %s', modem.aldb.status)\n for record in modem.aldb:\n _LOGGER.info(modem.aldb[record])\n await async_close()", "title": "" }, { "docid": "ad35bb7f3ff6336a133b41aa2b6f80e4", "score": "0.5636076", "text": "def test_kernel_to_frontend(self):\n comm_manager = self.comm_manager\n blocking_client = self.blocking_client\n\n class DummyCommHandler():\n def __init__(self):\n comm_manager.register_target('test_api', self.comm_open)\n self.last_msg = None\n \n def comm_open(self, comm, msg):\n comm.on_msg(self.comm_message)\n comm.on_close(self.comm_message)\n self.last_msg = msg['content']['data']\n self.comm = comm\n \n def comm_message(self, msg):\n self.last_msg = msg['content']['data']\n \n handler = DummyCommHandler()\n blocking_client.execute(\n \"from ipykernel.comm import Comm\\n\"\n \"comm = Comm(target_name='test_api', data='open')\\n\"\n \"comm.send('message')\\n\"\n \"comm.close('close')\\n\"\n \"del comm\\n\"\n \"print('Done')\\n\"\n )\n # Get input\n msg = self._get_next_msg()\n assert msg['header']['msg_type'] == 'execute_input'\n # Open comm\n msg = self._get_next_msg()\n assert msg['header']['msg_type'] == 'comm_open'\n comm_manager._dispatch(msg)\n assert handler.last_msg == 'open'\n assert handler.comm.comm_id == msg['content']['comm_id']\n # Get message\n msg = self._get_next_msg()\n assert msg['header']['msg_type'] == 'comm_msg'\n comm_manager._dispatch(msg)\n assert handler.last_msg == 'message'\n assert handler.comm.comm_id == msg['content']['comm_id']\n # Get close\n msg = self._get_next_msg()\n assert msg['header']['msg_type'] == 'comm_close'\n comm_manager._dispatch(msg)\n assert handler.last_msg == 'close'\n assert handler.comm.comm_id == msg['content']['comm_id']\n # Get close\n msg = self._get_next_msg()\n assert msg['header']['msg_type'] == 'stream'", "title": "" }, { "docid": "fea8e6be2053418c4d29498128afefed", "score": "0.5631055", "text": "def target_func(self, comm, msg):\n logger.info(\"COMM OPENED MESSAGE: \\n %s \\n\", str(msg))\n self.comm = comm\n\n @self.comm.on_msg\n def _recv(msg):\n self.handle_comm_message(msg)\n comm.send({\"msgtype\": \"commopen\"})", "title": "" }, { "docid": "382a09aba7077ef8b46df9e86057539a", "score": "0.5627267", "text": "def startProtocol(self):", "title": "" }, { "docid": "ef5dc7cd7ba785e5826c03359c0c79d2", "score": "0.5623721", "text": "def comm_open(self, stream, ident, msg):\n content = msg['content']\n comm_id = content['comm_id']\n target_name = content['target_name']\n f = self.targets.get(target_name, None)\n comm = Comm(comm_id=comm_id,\n shell=self.shell,\n iopub_socket=self.iopub_socket,\n primary=False,\n )\n if f is None:\n self.log.error(\"No such comm target registered: %s\", target_name)\n comm.close()\n return\n self.register_comm(comm)\n try:\n f(comm, msg)\n except Exception:\n self.log.error(\n \"Exception opening comm with target: %s\", target_name, exc_info=True)\n comm.close()\n self.unregister_comm(comm_id)", "title": "" }, { "docid": "295f2687614e5b9517dcaad2d9468166", "score": "0.559955", "text": "def open(self, data=None, metadata=None, buffers=None):\n if data is None:\n data = self._open_data\n comm_manager = getattr(self.kernel, 'comm_manager', None)\n if comm_manager is None:\n raise RuntimeError(\"Comms cannot be opened without a kernel \"\n \"and a comm_manager attached to that kernel.\")\n\n comm_manager.register_comm(self)\n try:\n self._publish_msg('comm_open',\n data=data, metadata=metadata, buffers=buffers,\n target_name=self.target_name,\n target_module=self.target_module,\n )\n self._closed = False\n except:\n comm_manager.unregister_comm(self)\n raise", "title": "" }, { "docid": "57374f59ddacf79bdbfd9ca68993d37f", "score": "0.5584093", "text": "def connect(self):\n try:\n self.serialConnection.open()\n if AMOEBA_SERIAL_COMMS:\n print \"Connect.\"\n time.sleep(1)\n readStr = self.serialConnection.readline()\n print readStr\n if readStr == \"can init ok!!\\r\\n\":\n if AMOEBA_SERIAL_COMMS:\n print \"Connection successfully.\"\n # Start receive thread.\n self.connected = True\n else:\n if AMOEBA_SERIAL_COMMS:\n print \"Connection failed.\"\n self.connected = False\n except serial.serialutil.SerialException:\n raise Exception, \"Channel did not open\"", "title": "" }, { "docid": "dc5575e5a01ff6efd8ae1558663c23ef", "score": "0.5572862", "text": "def connectDevice(self):\n self.cm.connect()", "title": "" }, { "docid": "7928fffe382c97916fa45e10feb7db36", "score": "0.5572019", "text": "def open(self):\n if self.transport == \"ssh\":\n self.device = self._netmiko_open(\n device_type=\"arista_eos\",\n netmiko_optional_args=self.netmiko_optional_args,\n )\n # let's try to determine if we need to use new EOS cli syntax\n sh_ver = self._run_commands([\"show version\"])\n if EOSVersion(sh_ver[0][\"version\"]) >= EOSVersion(\"4.23.0\"):\n self.cli_version = 2\n else:\n try:\n connection = self.transport_class(\n host=self.hostname,\n username=self.username,\n password=self.password,\n timeout=self.timeout,\n **self.eapi_kwargs,\n )\n\n if self.device is None:\n self.device = Node(connection, enablepwd=self.enablepwd)\n # does not raise an Exception if unusable\n\n # let's try to determine if we need to use new EOS cli syntax\n sh_ver = self.device.run_commands([\"show version\"])\n self.cli_version = (\n 2 if EOSVersion(sh_ver[0][\"version\"]) >= EOSVersion(\"4.23.0\") else 1\n )\n\n self.device.update_cli_version(self.cli_version)\n except ConnectionError as ce:\n # and this is raised either if device not avaiable\n # either if HTTP(S) agent is not enabled\n # show management api http-commands\n raise ConnectionException(str(ce))", "title": "" }, { "docid": "770b5904c4539e870a0249e958533a0b", "score": "0.557196", "text": "def open(self):\n print \"TRYING TO OPEN\"\n if not self.get_current_user():\n self.redirect()\n print \"Opening...\"\n # callbacks object instance\n self.connection = connection.connection_state.ConnectionState(messagewriter=self.write_message)\n self.connection.progress.display_func = self.progress_update\n \n self.local_message_queue = []\n self.blocking = False\n \n self.BACKEND_CALLBACKS = {} \n\n # Automatically import packages and get function names, while maintaining naming scope\n for subpackagename, subpackage in {\"analytics\": analytics, \"connection\": connection,\n \"data\": data, \"utilities\": utilities}.iteritems():\n package_contents = getmembers(subpackage, ismodule)\n self.BACKEND_CALLBACKS[subpackagename] = {}\n for modname, mod in package_contents:\n mod_contents = getmembers(mod, isfunction)\n self.BACKEND_CALLBACKS[subpackagename][modname] = {}\n self.BACKEND_CALLBACKS[subpackagename][modname].update([o for o in mod_contents])\n\n #for p in subpackages:\n # import and get the modules\n # from\n\n #for name in extension_names:\n # self.BACKEND_CALLBACK.update(dict([o for o in getmembers()])) \n\n # get all functions from connection_state module\n #self.BACKEND_CALLBACKS.update(dict([o for o in getmembers(data_management, isfunction)]))\n #self.BACKEND_CALLBACKS.update(dict([o for o in getmembers(progress, isfunction)]))\n #self.BACKEND_CALLBACKS.update(dict([o for o in getmembers(sparse_low_rank_approximation, isfunction)]))\n \n #self.connection.progress.on_update = self.progress_update # what to do on progress update\n #self.connection.progress.on_complete = self.progress_complete\n\n #self.connection.messagewriter = self.write_message\n \n print 'Connection opened.'\n #print self.BACKEND_CALLBACKS.keys()\n self.write_message(json.dumps({\"open\": \"websocket is connected\"})) # send an opening message", "title": "" }, { "docid": "3358fb772b2fadf9c9f8fd5705a082bc", "score": "0.5570893", "text": "def connect():", "title": "" }, { "docid": "4abafc42499c8d6f05420b6b17461d23", "score": "0.5564476", "text": "def connect(self):\r\n\t\tpass", "title": "" }, { "docid": "a3f0356ff6abf0ab83ad2d89caef3f7f", "score": "0.556142", "text": "def test_jlink_open_ethernet_context_manager(self):\n self.dll.JLNKARM_SelectIP.return_value = 0\n self.dll.JLINKARM_OpenEx.return_value = 0\n self.dll.JLINKARM_GetSN.return_value = 123456789\n\n with jlink.JLink(self.lib, ip_addr='127.0.0.1:80') as jl:\n self.assertTrue(jl.opened()) # Opened in CM.\n self.dll.JLINKARM_Close.assert_called() # Closed on exit.\n\n self.dll.JLINKARM_SelectIP.assert_called_once()", "title": "" }, { "docid": "cd63b20693b29fa26019b37562b8f423", "score": "0.55481255", "text": "def _open(self):\n self._socket = socket.create_connection(\n (self._properties[transport_properties.HOST],\n self._properties[transport_properties.PORT]),\n self._properties[transport_properties.CONNECT_TIMEOUT])", "title": "" }, { "docid": "c7a133fb4562a1534ebc15331fa3e027", "score": "0.55386686", "text": "def initialize_hardware(self):\n rm = visa.ResourceManager()\n self.hardware = rm.open_resource(self.address)\n print('Multimeter server running Made contact with:')\n print(self.hardware.query('*IDN?'))\n print('*'*50)\n self.hardware.timeout=2000", "title": "" }, { "docid": "2e0bdfdd1d52c3b37a68eb3ed04ad30e", "score": "0.55350846", "text": "def open_port(self):\n self.port = serial.Serial(self.port_name, baudrate=500000)", "title": "" }, { "docid": "3f7e7bf2594bc80994ddeaff5ef3a281", "score": "0.55249524", "text": "def connect_ied(self):\n\n com_selection = None\n\n # Find all available COM ports\n port_list = []\n available_ports = list_ports.grep('1c11:b04d')\n for port_no, description, address in available_ports:\n port_list.append(port_no)\n\n # Select port to use\n if len(port_list) > 0:\n if len(port_list) > 1:\n com_selection = gui.choicebox(\n \"Please select COM port to use\", \"Select COM port\",\n port_list)\n else:\n com_selection = port_list[0]\n\n try:\n self.ser = serial.Serial()\n self.ser.baudrate = 115200\n self.ser.timeout = 0.5\n self.ser.port = com_selection\n self.ser.open()\n\n if self.ser.isOpen():\n self.status_box['text'] = 'Connected to: {}'.format(\n com_selection)\n\n self.ser.close()\n except IOError:\n gui.msgbox(\n \"Could not connect to the Infinity Ergodox!\\n\"\n \"If the keyboard was just connected, wait a couple \"\n \"of seconds before trying to connect.\",\n \"Connection error\")\n self.ser = None\n self.status_box['text'] = ''", "title": "" }, { "docid": "31703fdb5fa24671f093f515e60b830b", "score": "0.55199933", "text": "def open(self):\n self.run = True\n if not self.debug:\n self._serial.open()\n t = threading.Thread(target=self.read_data)\n t.daemon = True\n t.start()", "title": "" }, { "docid": "f902841825fd891f6bd36cd8ea45b865", "score": "0.55192024", "text": "def connect(self):\n from labrad.wrappers import connectAsync # imports the connectAsync function from pyLabRAD \n '''\n this is where you connect to whatever computer your device server is on\n the default is the current computer but a connection to the another computer may look like:\n self.cxn = yield connectAsync('10.97.112.13', name = \"client_shell\"). Multiple computers may also be \n add, ie \n self.cxn1 = yield connectAsync(name = \"client_shell\")\n self.cxn2 = yield connectAsync('10.97.112.13', name = \"client_shell\")\n '''\n self.cxn = yield connectAsync(name = \"client_shell\") #asynchronously connects to the LabRAD manager with the name \"client_shell\"\n '''\n Here is where you will connect to specific device servers on the computer connected to, for example:\n arduino_TTL = self.cxn.arduinottl\n ''' \n self.initializeGUI() #now we call the initializeGUI function", "title": "" }, { "docid": "3cfe2f740582057ba0746df31012537e", "score": "0.5513057", "text": "def open_ser(self):\n self.powersupply.serial_instance.port = self.ser_port.get()\n\n if not self.powersupply.serial_instance.isOpen():\n self.powersupply.serial_instance.open()", "title": "" }, { "docid": "f74fe211b1d2e12b84a9d02962b4f25e", "score": "0.5501444", "text": "def __init__(self):\r\n self.cmgr = ic.ConnectionMgr()\r\n self.cmgr.connectMRU()\r\n self.debug = ic.CDebugFacade(self.cmgr)", "title": "" }, { "docid": "e723b2dabb004123c50b5f7dd23b2d57", "score": "0.55013734", "text": "def testOpenStream(self):\r\n\r\n events = []\r\n\r\n def on_stream_start(stream):\r\n events.append('ibb_stream_start')\r\n\r\n\r\n self.xmpp.add_event_handler('ibb_stream_start', on_stream_start)\r\n\r\n t = threading.Thread(name='open_stream',\r\n target=self.xmpp['xep_0047'].open_stream,\r\n args=('tester@localhost/receiver',),\r\n kwargs={'sid': 'testing'})\r\n t.start()\r\n\r\n self.send(\"\"\"\r\n <iq type=\"set\" to=\"tester@localhost/receiver\" id=\"1\">\r\n <open xmlns=\"http://jabber.org/protocol/ibb\"\r\n sid=\"testing\"\r\n block-size=\"4096\"\r\n stanza=\"iq\" />\r\n </iq>\r\n \"\"\")\r\n\r\n self.recv(\"\"\"\r\n <iq type=\"result\" id=\"1\"\r\n to=\"tester@localhost\"\r\n from=\"tester@localhost/receiver\" />\r\n \"\"\")\r\n\r\n t.join()\r\n\r\n time.sleep(0.2)\r\n\r\n self.assertEqual(events, ['ibb_stream_start'])", "title": "" }, { "docid": "07cdc6a0192048d486151f6acbfae0cd", "score": "0.5496872", "text": "def basic_connection_test(self):\n if not self.apn:\n self.apn = 4061702\n if not self.holly_server:\n self.holly_server = 'linux5578'\n self.body = textwrap.dedent(\"\"\"\\\n STARTCALL\n REPORT MDTA basic connection test\n IGNORE answer asr_session document_dump document_transition fetch grammar_activation license log note prompt recognition_start recognition_end redux severe sip_session system_response transfer_start transfer_end vxml_event vxml_trace warning\n EXPECT call_start\n ENDCALL\n \"\"\")", "title": "" }, { "docid": "1b9d48b2b6734ac4a2559a4252783fb0", "score": "0.5492484", "text": "def connect(self):\n\t\tpass", "title": "" }, { "docid": "e44b07800882b1b05502c3b6b59d75b7", "score": "0.5487874", "text": "def send_OPC(self):\n self.scpi.OPC.w()", "title": "" }, { "docid": "f2a7cc3ddd1f27ccaeb77feffc6dd149", "score": "0.5469114", "text": "def run(self):\n\n # self.start_logging()\n # self.msgbus_publish('LOG','%s Start mqtt2gpio adapter; Version: %s, %s '%('INFO', __VERSION__ ,__DATE__))\n self.read_config()\n self.start_logging()\n self.start_mqttbroker()\n self.start_socketcan()\n\n # self.msgbus_publish('MQTT_TX','123456')\n self.msgbus_subscribe('MQTT_RX',self.mqttif)\n self.msgbus_subscribe('CAN_RX',self.canif)", "title": "" }, { "docid": "3201aa2dbdded8eebfcb91456692eebe", "score": "0.546679", "text": "def launch (): \n core.registerNew(CentralComponent)\n\n\n #core.openflow.addListenerByName(\"ConnectionUp\", _init_datapath, priority=2, once=False)", "title": "" }, { "docid": "ea80d189027601a9ba53f012dd531d68", "score": "0.5461234", "text": "def open(self):\n print(\"Entering OPEN()\")\n if self.ser is None:\n return\n else:\n self.ser.isOpen()\n self.ser.flushInput()\n print(\"Leaving OPEN()\")", "title": "" }, { "docid": "4f0db3f3894f54cecc5e8bb57857b890", "score": "0.54567724", "text": "def __init__(self):\n try:\n self.handle = usbtmc.Instrument(int(self.VID, 16), int(self.PID, 16))\n self.handle.timeout = 5000\n except USBError:\n devlist = usbtmc.list_devices()\n for dev in devlist:\n if dev.idProduct == int(self.PID, 16) and dev.idVendor == int(self.VID, 16):\n dev.reset()\n self.handle = usbtmc.Instrument(int(self.VID, 16), int(self.PID, 16))\n break\n raise USBError('Unable to establish connection with oscilloscope.')\n\n print('Connected to Rigol oscilloscope as ' + repr(self.handle.device))\n self.ch1 = self.Channel(1, self)\n self.ch2 = self.Channel(2, self)", "title": "" }, { "docid": "2b355bcbc909f53dd6b8b3f9baee1b58", "score": "0.5451597", "text": "def open(self):\n\t\tself.verboseMessage('Establishing session...')\n\n\t\tidquery = enum.ViBoolean()\n\t\tresetDevice = enum.ViBoolean()\n\t\tinstrumentHandle = enum.ViSession()\n\t\tstatus = self.library.Open(self.resourceName_c, idquery, resetDevice, byref(instrumentHandle))\n\n\t\tif status==vicons.VI_SUCCESS:\n\t\t\tself.verboseMessage('Done establishing session.')\n\t\t\tself.instrumentHandle = instrumentHandle\n\t\t\tself.idQuery = idquery\n\t\t\tself.resetDevice = resetDevice\n\t\t\tself.setIntegrationTime(self.integrationTime)\n\t\telse:\n\t\t\traise Exception('Failed to establish session with device. Error code: {} : {}.'.format(status))\n\n\t\treturn status", "title": "" }, { "docid": "7407477036bbc24f5b08143db7dd335f", "score": "0.5451442", "text": "def open_device_connection(self):\n try:\n server_address = (self.device_ip_address, 41795)\n print(\"Attempting to connect to %s port %s\" % server_address)\n self.sock.connect(server_address)\n except:\n print(\"Error: Unable to connect to device.\")\n exit()", "title": "" }, { "docid": "f01f53d248dd810287de98bde165314c", "score": "0.5442433", "text": "def start(self):\n if self.connected == True:\n # Start the receive thread.\n self.read = True\n thread.start_new_thread(self.readFromBus,())\n # Start the experiment.\n self.serialConnection.flush()\n self.serialConnection.write(\"Start:\\n\")\n if AMOEBA_SERIAL_COMMS_MONITOR:\n print \"Start:\"\n self.stringFromBus = []", "title": "" }, { "docid": "776e150ab4f3b9262fd6f506b1171e00", "score": "0.5434398", "text": "def connect(self):\n self._discovery()\n\n self.sender = SumoSender(self.host, self.c2d_port)\n self.receiver = SumoReceiver('', self.d2c_port, self.sender)\n self.display = SumoDisplay(self.receiver)\n\n self.receiver.start()\n self.sender.start()\n self.display.start()", "title": "" }, { "docid": "990972e28d0d4e2d687f99e5dc2cec33", "score": "0.54335743", "text": "def Open(self):", "title": "" }, { "docid": "990972e28d0d4e2d687f99e5dc2cec33", "score": "0.54335743", "text": "def Open(self):", "title": "" }, { "docid": "755f6f34825079e4e8d0f7e1252e5f44", "score": "0.54225606", "text": "def main():\n present_online_mids, offline_mids, defective_ports = [], [], []\n for i in range(0,MAX_PORTS):\n path = '/dev/ttyUSB' + str(i)\n if os.path.exists(path):\n try:\n s = serial.Serial(port=path, baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=2)\n if not s.is_open:\n s.open()\n assert s.is_open\n mid = read_from_port(s, 252) # get machine id\n assert mid > 0\n write_to_port(s, 253, 100) # set fan speed\n write_to_port(s, 254, 0) # set heater value\n current_temp = read_from_port(s, 255) # get current temperature\n assert current_temp >= MIN_TEMP and current_temp <= MAX_TEMP\n present_online_mids.append(mid)\n except:\n if s.is_open:\n if mid > 0:\n offline_mids.append(mid)\n else:\n defective_ports.append(path)\n if s.is_open:\n s.close()\n check_mids = list(set(online_mids).difference(set(present_online_mids)))\n msg = create_message(check_mids, defective_ports)\n if len(msg) == 0:\n msg = \"Nothing out of order was detected on the server. :)\"\n #thread.start_new_thread(mailer.email, (ADMIN_EMAIL, \"Health report of SBHS Server\", msg))\n mailer.email(ADMIN_EMAIL, \"Health report of SBHS Server\", msg)\n #print msg", "title": "" }, { "docid": "ae47aab37d3aa5f80c98f41e0e50b9c5", "score": "0.5420774", "text": "def __init__(self, dm_file, db_file, net_intf, port=5683, cfg_file_name=\"cfg/agent.json\", debug=False):\n abstract_agent.AbstractAgent.__init__(self, dm_file, db_file, net_intf, cfg_file_name, debug)\n self._can_start = True\n\n # Initialize the underlying Agent DB MTP details for CoAP\n resource_path = 'usp'\n ip_addr = self._get_ip_addr(net_intf)\n if ip_addr is not None:\n url = \"coap://\" + ip_addr + \":\" + str(port) + \"/\" + resource_path\n num_local_agent_coap_mtps = self._init_db_for_mtp(ip_addr, port, resource_path)\n\n # We only support 1 Local Agent CoAP MTP\n if num_local_agent_coap_mtps == 1:\n self._mdns_listener = mdns.Listener()\n self._mdns_listener.listen()\n\n self._binding = coap_usp_binding.CoapUspBinding(ip_addr, self._endpoint_id, port,\n resource_path=resource_path, debug=debug)\n self._binding.listen(url)\n\n self._mdns_announcer = mdns.Announcer(ip_addr, port, resource_path, self._endpoint_id)\n self._mdns_announcer.announce(self._get_friendly_name(), self._get_subtypes())\n\n value_change_notif_poller = CoapValueChangeNotifPoller(self._db)\n value_change_notif_poller.set_mdns_listener(self._mdns_listener)\n value_change_notif_poller.set_binding(self._binding)\n self.set_value_change_notif_poller(value_change_notif_poller)\n\n self.init_subscriptions()\n else:\n self._can_start = False\n self._logger.error(\"The Agent must have 1 and only 1 CoAP Local Agent MTP , %s were found - EXITING\",\n str(num_local_agent_coap_mtps))\n else:\n self._can_start = False\n self._logger.error(\"IP Address could not be found for provided Network Interface [%s] - EXITING\",\n net_intf)", "title": "" }, { "docid": "dfaca6b7bed812c8dbf124c947cf78ab", "score": "0.5420115", "text": "def open ():\n pass", "title": "" }, { "docid": "9326b965557d7fb883995c336f417d5b", "score": "0.5397579", "text": "def start(self, event):\r\n self.send_presence()\r\n self.get_roster()\r\n\r\n # For the purpose of demonstration, we'll set a very small block\r\n # size. The default block size is 4096. We'll also use a window\r\n # allowing sending multiple blocks at a time; in this case, three\r\n # block transfers may be in progress at any time.\r\n stream = self['xep_0047'].open_stream(self.receiver)\r\n\r\n with open(self.filename) as f:\r\n data = f.read()\r\n stream.sendall(data)", "title": "" }, { "docid": "ae9ed318543750b5d0310d367b95edc2", "score": "0.5395365", "text": "def connectionSetup(self):\n self.logger.info(\"Create the connection to the mgr....\")\n # Create a connection to Hal driver mgr\n\n # Create a connection to Hal driver mgr\n self.mgrConnection = HalTransport(HalTransport.HalTransportClientMgr, HalTransport.HalClientMode,\n disconnectHandlerCb=self.connectionDisconnectCb)\n self.mgrConnection.connects()\n\n self.HalMsgsHandler[self.mgrConnection.socket] = self.recvRegisterMsgCb\n\n self.HalMsgsHandler[self.mgrConnection.socket] = self.recvRegisterMsgCb\n\n # register the mgr socket\n self.dispatcher.fd_register(self.mgrConnection.socket, zmq.POLLIN, self.ptpdrv_hal_cb)\n self.dispatcher.fd_register(self.mgrConnection.monitor, zmq.POLLIN, self.ptpdrv_hal_cb)", "title": "" }, { "docid": "a445b003c0566e7b9914c9695fb02d2b", "score": "0.53949326", "text": "def open(self):\n\n self.socket.bind(self.address)\n self.socket.listen(self.queue)", "title": "" }, { "docid": "e116a2c6c48c4631aeda752eca45da3d", "score": "0.53895444", "text": "def OpenConnectionManager(connections, mode, inspection_mode, confirm_to_close):", "title": "" }, { "docid": "5d8506570ea0b2bcac896e5f18a7c1b9", "score": "0.53796446", "text": "def send(self):", "title": "" }, { "docid": "cb2acde760113e43f1c96b4d1153176e", "score": "0.5377481", "text": "def connect(self):\n self.conn = BlitzGateway(self.omero_username, self.omero_password,\n host=self.omero_host,\n port=self.omero_port,\n secure=True)\n self.conn.connect()\n if self.group_id is not None:\n self.conn.setGroupForSession(self.group_id)", "title": "" }, { "docid": "4847ed89ade37af53be9b8fc1225eef6", "score": "0.5377238", "text": "def open(self, port):\n #pdb.set_trace()\n self.glovePntr = 0;\n self.glovePntr = self.gloveDLL.fdOpen(port)\n if self.glovePntr == 0:\n raise IOError(\"Could not connect to 5DT glove.\")", "title": "" }, { "docid": "35a98635da02ff1b02e811db69d054e7", "score": "0.5367362", "text": "def test_jlink_open_ethernet_and_serial_number(self):\n self.dll.JLINKARM_OpenEx.return_value = 0\n\n self.jlink.open(serial_no=123456789, ip_addr='127.0.0.1:80')\n\n self.assertEqual(0, self.dll.JLINKARM_EMU_SelectIP.call_count)\n self.assertEqual(1, self.dll.JLINKARM_EMU_SelectIPBySN.call_count)", "title": "" }, { "docid": "bfe08ec5120bb7e79196da965335ff48", "score": "0.5363972", "text": "def open(self):\n print(\"Open\")\n self.PIN.ON()", "title": "" }, { "docid": "a2aa49e778a193736a33e7a3132ed145", "score": "0.5363606", "text": "def OpenConnection(self):\n try:\n self.s = serial.Serial(self.port,baudrate=9600,bytesize=serial.SEVENBITS,parity=serial.PARITY_ODD,stopbits=serial.STOPBITS_ONE) # Open the port and keep the output in self.s\n time.sleep(2) # Wait 2 seconds\n self.s.flushInput() # Remove data from input buffer\n Init=\"1a\\r\" # This str input ask the device to do an auto adressing (First valve=\"a\"; Second valve=\"b\"; ....)\n self.s.write(Init.encode()) # Send the command to the device encoded in UTF-8\n for i in range (0,Config.NbofValve): # We want to initialize every valve adressed \n string2Send=Config.ValveNB[i]+\"LXR\\r\" # This str input initialize the current valve (Config.ValveNB[0]='a';Config.ValveNB[1]='b')\n self.s.write(string2Send.encode()) # Send the command to the device encoded in UTF-8\n Valve.WaitForIdle(self) # Use the method WaitForIdle to be sure the initialization is finished before sending new instructions\n self.ValveState[\"ValvePosition\"+str(Config.ValveNB[i])]=1 # Save the position of each valve (After initialization every valve shall be in position 1) \n self.ValveState[\"Outrange\"+str(Config.ValveNB[i])]=\"Values within limits\" # For now all the values are in the limits\n except AttributeError:\n print(\"The port \"+str(self.port)+\"is already opened\")\n except serial.SerialException:\n print(\"Wrong port given, please check the file Config.py\")", "title": "" }, { "docid": "5af61a111b53987b61e5e1340255264f", "score": "0.5354413", "text": "def __init__(self, name):\n self.driver=lldriver.GpibDevice(name) #initialize a generic GPIB device\n \n self.data_mode = self.queryDataMode()\n self.acquire_mode = self.queryAcquireMode()\n self.data_width = self.queryDataWidth()\n self.horizontal_scale = self.queryHorizontalScale()\n self.record_length = self.queryRecordLength()\n self.num_of_data_points = self.queryReadLength()\n self.start_point, self.stop_point = self.queryReadStartStopPoints()\n self.data_source = self.queryReadChannels()\n self.verifyAllVerticalScales()\n self.verifyAllVerticalPositions()\n self.horizontal_position = self.queryHorizontalPosition()\n self.trigger_channel = self.queryTriggerChannel()\n self.trigger_level = self.queryTriggerLevel()\n self.trigger_type = self.queryTriggerType()\n \"\"\"\n self.data_mode = DATA_ENCODING_RIB #set the data to ascii\n self.driver.write(SET_DATA_ENCODING + self.data_mode) #inform the scope\n \n self.data_source = DATA_SOURCE_CH3 # select the channel to read from\n self.number_of_channels = 1\n self.driver.write(SET_DATA_SOURCE + self.data_source) #inform the scope\n \n self.acquire_mode = ACQUIRE_MODE_SAMPLE\n self.driver.write(SET_ACQUIRE_MODE + self.acquire_mode)\n \n self.data_width = DATA_WIDTH_8BIT\n self.driver.write(SET_DATA_WIDTH + self.data_width)\n\n self.driver.write(QUERY_HORIZONTAL_RECORDLENGTH)\n self.acquire_points = self.driver.read(100) # obtain the number of\n #points the scope is sending\n\n #snap the data size to the whole waveform\n self.start_point = '0\\n'\n self.stop_point=self.acquire_points\n self.num_of_data_points = int(self.stop_point) - int(self.start_point)\n self.driver.write(SET_DATA_START + self.start_point)\n self.driver.write(SET_DATA_STOP + self.stop_point)\n \"\"\"", "title": "" }, { "docid": "0371c5070b63835ab0187b59422d6786", "score": "0.53447163", "text": "def open(self, transport):\n raise NotImplementedError(self.open)", "title": "" }, { "docid": "81aa1bb817934e75ef64effbaba2e795", "score": "0.5339862", "text": "def open(self):\r\n try:\r\n if self.use_secret:\r\n secret = self.password\r\n else:\r\n secret = ''\r\n\r\n self.device = ConnectHandler(device_type='ruckus_fastiron',\r\n ip=self.hostname, # saves device parameters\r\n port=self.port,\r\n username=self.username,\r\n password=self.password,\r\n timeout=self.timeout,\r\n secret=secret,\r\n verbose=True)\r\n self.device.session_preparation()\r\n # image_type = self.device.send_command(\"show version\") # find the image type\r\n # if image_type.find(\"SPS\") != -1:\r\n # self.image_type = \"Switch\"\r\n # else:\r\n # self.image_type = \"Router\"\r\n\r\n except Exception:\r\n raise ConnectionException(\"Cannot connect to switch: %s:%s\" % (self.hostname,\r\n self.port))", "title": "" }, { "docid": "1ea80cf76fef96f073223cd7d33f6d21", "score": "0.5338131", "text": "def open(self):\n self.open_bit.put(1)", "title": "" }, { "docid": "cf69620355cede30ac2427b65fa2d899", "score": "0.53283995", "text": "def sendLeaveSystemRequestOIE(self):\n #on incrémente le numero de sequence\n self.numeroSequence = self.numeroSequence + 1\n # On construit le message\n longueur = 4\n tipe = 0b001001\n\n z = ( self.numeroSequence << 6 ) + tipe \n deconnexion = struct.pack( '!HH', longueur , z)\n self.transport.write(deconnexion)\n print('je quitte l application')\n self.clientProxy.leaveSystemOKONE()", "title": "" }, { "docid": "87dab1e0ffa840fee13d9d44d54d38b7", "score": "0.53270954", "text": "def __init__(self):\n Device.__init__(self)\n self.__emitter = EventEmitter()\n self.__state = opener_state.CLOSED", "title": "" }, { "docid": "ff07dc148a6dec4425684552ba60d69e", "score": "0.5326324", "text": "def start(self, event):\r\n self.send_presence()\r\n self.get_roster()\r\n\r\n def command_success(iq, session):\r\n print('Command completed')\r\n if iq['command']['form']:\r\n for var, field in iq['command']['form']['fields'].items():\r\n print('%s: %s' % (var, field['value']))\r\n if iq['command']['notes']:\r\n print('Command Notes:')\r\n for note in iq['command']['notes']:\r\n print('%s: %s' % note)\r\n self.disconnect()\r\n\r\n def command_error(iq, session):\r\n print('Error completing command')\r\n print('%s: %s' % (iq['error']['condition'],\r\n iq['error']['text']))\r\n self['xep_0050'].terminate_command(session)\r\n self.disconnect()\r\n\r\n def process_form(iq, session):\r\n form = iq['command']['form']\r\n answers = {}\r\n for var, field in form['fields'].items():\r\n if var != 'FORM_TYPE':\r\n if field['type'] == 'boolean':\r\n answers[var] = raw_input('%s (y/n): ' % field['label'])\r\n if answers[var].lower() in ('1', 'true', 'y', 'yes'):\r\n answers[var] = '1'\r\n else:\r\n answers[var] = '0'\r\n else:\r\n answers[var] = raw_input('%s: ' % field['label'])\r\n else:\r\n answers['FORM_TYPE'] = field['value']\r\n form['type'] = 'submit'\r\n form['values'] = answers\r\n\r\n session['next'] = command_success\r\n session['payload'] = form\r\n\r\n self['xep_0050'].complete_command(session)\r\n\r\n session = {'next': process_form,\r\n 'error': command_error}\r\n\r\n command = self.command.replace('-', '_')\r\n handler = getattr(self['xep_0133'], command, None)\r\n\r\n if handler:\r\n handler(session={\r\n 'next': process_form,\r\n 'error': command_error\r\n })\r\n else:\r\n print('Invalid command name: %s' % self.command)\r\n self.disconnect()", "title": "" }, { "docid": "4ea199ea93ddc63345253fd62c83ac49", "score": "0.5320216", "text": "def open(self, address):\n\t\traise NotImplementedError", "title": "" }, { "docid": "9fba76a8baac7863962cf67fe51af9c5", "score": "0.5310596", "text": "def open(self):\n self.xmlrpc.open()", "title": "" }, { "docid": "e14820a3048f008e6b18886065eb331a", "score": "0.53094405", "text": "def _connect():\n pass", "title": "" }, { "docid": "09a055ddad1ad23b249ca47e737d84c4", "score": "0.53073865", "text": "def setup(self, _port, _pin=\"\", _puk=\"\", _pin2=\"\", _puk2=\"\",\n _baudrate=115200, _parity=serial.PARITY_NONE,\n _stopbits=serial.STOPBITS_ONE, _bytesize=serial.EIGHTBITS,\n _timeout_sec=2):\n # Close potential previous GSM session\n self.__timeout_sec = _timeout_sec\n try:\n self.close()\n except Exception:\n pass\n\n # Create new GSM session\n try:\n self.__serial = serial.Serial(\n port=_port,\n baudrate=_baudrate,\n parity=_parity,\n stopbits=_stopbits,\n bytesize=_bytesize,\n timeout=_timeout_sec\n )\n except serial.serialutil.SerialException:\n logging.error(\"Invalid serial port '\"+str(_port)+\"'\")\n return False\n\n # Initialize the GSM module with specific commands\n is_init = True\n if self.__serial.isOpen():\n # Disable echo from GSM device\n if not self.__sendCmdAndCheckResult(GSMTC35.__BASE_AT+\"E0\"):\n logging.warning(\"Can't disable echo mode (ATE0 command)\")\n # Use verbose answer (GSM module will return str like \"OK\\r\\n\" and not like \"0\")\n if not self.__sendCmdAndCheckResult(GSMTC35.__BASE_AT+\"V1\"):\n logging.error(\"Can't set proper answer type from GSM module (ATV command)\")\n is_init = False\n # Use non-verbose error result (\"ERROR\" instead of \"+CME ERROR: (...)\")\n if not self.__sendCmdAndCheckResult(GSMTC35.__NORMAL_AT+\"CMEE=0\"):\n logging.warning(\"Can't set proper error format returned by GSM module (CMEE command)\")\n\n # Enter PIN/PUK/PIN2/PUK2 as long as it is required (and that all goes well)\n # If PIN/PUK/PIN2/PUK2 in not specified but is needed, a warning will be displayed\n # but the function will continue.\n pin_status = \"\"\n while is_init and (pin_status != GSMTC35.eRequiredPin.READY):\n req_pin_result, pin_status = self.getPinStatus()\n if (not req_pin_result) or (len(pin_status) <=0):\n logging.error(\"Failed to get PIN status\")\n is_init = False\n elif pin_status == GSMTC35.eRequiredPin.READY:\n logging.debug(\"No PIN needed\")\n break\n elif pin_status == GSMTC35.eRequiredPin.PIN:\n if len(_pin) > 0:\n if not self.enterPin(_pin):\n logging.error(\"Invalid PIN \\\"\"+str(_pin)+\"\\\" (YOU HAVE A MAXIMUM OF 3 TRY)\")\n is_init = False\n else:\n logging.debug(\"PIN entered with success\")\n else:\n logging.warning(\"Some initialization may not work without PIN activated\")\n break\n elif pin_status == GSMTC35.eRequiredPin.PUK:\n if len(_puk) > 0:\n if not self.enterPin(_puk):\n logging.error(\"Invalid PUK \\\"\"+str(_puk)+\"\\\"\")\n is_init = False\n else:\n logging.debug(\"PUK entered with success\")\n else:\n logging.warning(\"Some initialization may not work without PUK activated\")\n break\n elif pin_status == GSMTC35.eRequiredPin.PIN2:\n if len(_pin2) > 0:\n if not self.enterPin(_pin2):\n logging.error(\"Invalid PIN2 \\\"\"+str(_pin2)+\"\\\" (YOU HAVE A MAXIMUM OF 3 TRY)\")\n is_init = False\n else:\n logging.debug(\"PIN2 entered with success\")\n else:\n logging.warning(\"Some initialization may not work without PIN2 activated\")\n break\n elif pin_status == GSMTC35.eRequiredPin.PUK2:\n if len(_puk2) > 0:\n if not self.enterPin(_puk2):\n logging.error(\"Invalid PUK2 \\\"\"+str(_puk2)+\"\\\"\")\n is_init = False\n else:\n logging.debug(\"PUK2 entered with success\")\n else:\n logging.warning(\"Some initialization may not work without PUK2 activated\")\n break\n\n #Disable asynchronous triggers (SMS, calls, temperature)\n self.__disableAsynchronousTriggers()\n\n # Set to text mode\n if not self.__sendCmdAndCheckResult(GSMTC35.__NORMAL_AT+\"CMGF=1\"):\n logging.error(\"Impossible to set module to text mode (CMGF command)\")\n is_init = False\n # Select fixed baudrate communication\n if not self.__selectBaudrateCommunicationType(_baudrate):\n # Some function will not work if this is not activated (alarm, wake-up ACK, ...)\n logging.warning(\"Impossible to have fixed baudrate communication (IPR command)\")\n\n self.__initialized = is_init\n if not self.__initialized:\n self.__serial.close()\n\n return self.__initialized", "title": "" }, { "docid": "edd8731b7e3bbf7617808a0e35b90b78", "score": "0.53048444", "text": "def run(self):\n self.api.log(\"started\")\n if hasattr(self.api,'registerCommand'):\n self.api.registerCommand('desk2','switch_desk.sh',['2'],client='local',iconFile='rpi.png')\n else:\n self.error=\"cannot register command (avnav version too old)\" \n if self.error is not None:\n self.api.setStatue('ERROR',self.error)\n else: \n self.api.setStatus('NMEA','running')\n while not self.api.shouldStopMainThread():\n time.sleep(1)", "title": "" }, { "docid": "e37f40d611f92dd346e7ee505286bee0", "score": "0.53013", "text": "def connect(self):\n self.transport.connect()", "title": "" }, { "docid": "223df39e95a52906cd233f7247ed3c45", "score": "0.52851903", "text": "def Connect(self):\n log.debug(\"Connect to telescope hardware\")\n response = self._SendMsg([\"mount\", \"connect\"])\n log.debug(f\"Telescope says {response}\")\n return response", "title": "" }, { "docid": "85de9c178bfbb661b66948c1a7dc9101", "score": "0.5275225", "text": "def ooo_connect():\n # get the uno component context from the PyUNO runtime\n localContext = uno.getComponentContext()\n\n # create the UnoUrlResolver\n resolver = localContext.ServiceManager.createInstanceWithContext(\"com.sun.star.bridge.UnoUrlResolver\", localContext )\n\n # connect to the running office\n try:\n ctx = resolver.resolve(\"uno:socket,host=localhost,port=%s;urp;StarOffice.ComponentContext\"%(options.port))\n except:\n raise Exception(\"Can't connect to openoffice.org server\")\n\n smgr = ctx.ServiceManager\n\n # get the central desktop object\n desktop = smgr.createInstanceWithContext(\"com.sun.star.frame.Desktop\",ctx)\n d = dict()\n d['ctx'] = ctx\n d['smgr'] = smgr\n d['doc'] = None\n d['desktop'] = desktop\n return d", "title": "" }, { "docid": "bfe306696869e5859158490022c6e142", "score": "0.52710205", "text": "def open(self):\n if self.is_disposed:\n raise ObjectDisposedException(\"I2CBus\")\n\n if self.__isOpen:\n return\n\n try:\n self.__bus = SMBus(self.__busID)\n except OSError or IOError:\n msg = \"Error opening bus '\" + str(self.__busID) + \"'.\"\n raise IOException(msg)\n\n if self.__bus.fd is None:\n msg = \"Error opening bus '\" + str(self.__busID) + \"'.\"\n raise IOException(msg)\n\n self.__isOpen = True", "title": "" }, { "docid": "4164e6b8166547283d521dd81a08f8a2", "score": "0.52655077", "text": "def open_bus(self):\n self.handle = self.pi.bb_i2c_open(self.SDA, self.SCL, self.CF)", "title": "" }, { "docid": "3d6fd473b6411eebcbe21d1898294eee", "score": "0.5263931", "text": "def _start_event_dispatch(self):\n\n # gateway host and port to compose URL:\n # TODO commented\n # host = CFG.get_safe('server.oms.host', \"localhost\")\n # port = CFG.get_safe('server.oms.port', \"5000\")\n # path = CFG.get_safe('server.oms.path', \"/ion-service/oms_event\")\n\n #the above are defined in pyon.cfg\n #we will override local host for debugging inside the VM\n host = \"10.208.79.19\"\n # TODO commented\n # self.listener_url = \"http://%s:%s%s\" % (host, port, path)\n # self._register_event_listener(self.listener_url)\n\n return \"OK\"", "title": "" }, { "docid": "632991bac77cd039b5aac829953e7cb7", "score": "0.5263058", "text": "def test_sdpln_start_up_telescope_mid():", "title": "" }, { "docid": "bc501ca0bc67810ebf40ba5c08c08cb0", "score": "0.52578485", "text": "def run(self,parent, number, name=None):\n # We open a new window for a call\n #logger.info(\"Caller run\")\n \n print \"caller run\"\n print \"in caller: \", name\n self.gsm_service = tichy.Service('GSM')\n \n #print parent\n #print dir(parent)\n main = parent\n #main = parent\n #main.etk_obj.visibility_set(1)\n #main = parent\n main.etk_obj.title_set('Paroli Call')\n \n self.edje_file = os.path.join(os.path.dirname(__file__),'paroli-dialer.edj')\n self.edje_obj = gui.edje_gui(main,'tele',self.edje_file)\n self.edje_obj.edj.signal_callback_add(\"func_btn\", \"*\", self.func_btn)\n self.edje_obj.edj.signal_callback_add(\"add_digit\", \"*\", self.add_digit)\n self.edje_obj.edj.signal_callback_add(\"*\", \"*\", self.gui_signals)\n #self.edje_obj.edj.signal_callback_add(\"num_field_pressed\", \"*\", self.num_field_pressed)\n \n \n #self.window = gui.Window(parent)\n #frame = self.view(self.window, title='Dialer')\n #vbox = gui.Box(frame, axis=1)\n\n #text = tichy.Text(\"Initialization\")\n #text.view(vbox)\n\n try:\n # The case when we have an incoming call\n # XXX: we should use an other way to check for a call object !\n if not isinstance(number, (basestring, tichy.Text)):\n call = number\n #print dir(call)\n self.edje_obj.edj.signal_emit('to_incoming_state',\"*\")\n #self.edje_obj.edj.part_text_set('active-call',call.__id)\n self.edje_obj.edj.part_text_set('num_field-text',str(call.number))\n self.edje_obj.edj.layer_set(2)\n self.edje_obj.edj.show()\n \n def make_active(emission, source, param):\n call.activate()\n print \"making active\"\n \n self.edje_obj.edj.signal_callback_add(\"activate_call\", \"*\", make_active)\n \n else: # If not it means we want to initiate the call first\n if name :\n self.edje_obj.edj.part_text_set('num_field-text',str(name))\n else:\n self.edje_obj.edj.part_text_set('num_field-text',str(number))\n self.edje_obj.edj.signal_emit('to_dialing_state',\"*\")\n self.edje_obj.edj.layer_set(2)\n self.edje_obj.edj.show()\n #try:\n call = self.gsm_service.create_call(number)\n call.initiate()\n #except Exception,e :\n #print e\n \n def call_release_pre(emission, source, param):\n try:\n call.release()\n except Exception, e:\n print \"exception here in pre state\"\n call.emit('released')\n \n self.edje_obj.edj.signal_callback_add(\"release_call\", \"*\", call_release_pre)\n\n i, args = yield tichy.WaitFirst(tichy.Wait(call, 'activated'),tichy.Wait(call, 'released'))\n if i == 0: #activated\n print \"call activated\"\n self.edje_obj.edj.signal_emit('to_active_state',\"*\")\n self.edje_obj.edj.part_text_set('num_field-text',str(call.number))\n #print 'should be an active call here: ', self.gsm_service.gsm_call.ListCalls().__contains__('active')\n def call_release(emission, source, param):\n print \"call releasing\"\n try:\n call.release() \n except Exception,e:\n print e\n print \"exception here\"\n call.emit('released')\n #self.edje_obj.edj.signal_emit('notfound',\"*\")\n #yield tichy.Wait(call, 'released')\n \n self.edje_obj.edj.signal_callback_add(\"release_call\", \"*\", call_release)\n yield tichy.WaitFirst(tichy.Wait(call, 'released'))\n\n if call.status not in ['released', 'releasing']:\n #text.value = \"releasing %s\" % call.number\n try:\n call.release()\n yield tichy.Wait(call, 'released')\n except Exception, e:\n print e\n \n except Exception, e:\n print e\n #import traceback\n #logger.error(\"%s, %s\", e, traceback.format_exc())\n #yield tichy.Dialog(self.window, \"Error\", e.message)\n\n self.edje_obj.edj.delete()\n main.etk_obj.visibility_set(0)", "title": "" }, { "docid": "3a7f1efae3f4fe332f02382b6d595621", "score": "0.5257364", "text": "def connected(self):\n self._dll.open(self.devID)", "title": "" }, { "docid": "debcd92160e017f2b38fc97834b49d58", "score": "0.5250276", "text": "def open_connection(self, host=\"127.0.0.1\", port=502, timeout_in_sec=5.0):\n try:\n self.logger.debug(\"Opening modbus connection over TCP...\")\n self.master = modbus_tcp.TcpMaster(host=host, port=port, timeout_in_sec=timeout_in_sec)\n self.logger.info(\"Opened modbus connection over TCP.\")\n except (modbus_tk.modbus.ModbusError, OSError) as error:\n self._process_error(except_object=error, msg=\"Could not open connection\")", "title": "" }, { "docid": "4236266b4f77c2bae12d89c14e6fd985", "score": "0.52455777", "text": "def connect(self):\n pass", "title": "" }, { "docid": "4236266b4f77c2bae12d89c14e6fd985", "score": "0.52455777", "text": "def connect(self):\n pass", "title": "" }, { "docid": "46cebdd91af0ffd78c90af41a00d4aa6", "score": "0.52345", "text": "def open_port(self, prefix='/dev/ttyACM', ports=[], baud=19200):\n\n if not ports:\n ports = range(3)\n\n success = False\n for port in ports:\n dev = prefix + str(port)\n try:\n self.serial = serial.Serial(dev, baud, timeout=2)\n self.serial_open = True\n except:\n print \"Failed to open:\", dev, \"Attempting next port...\"\n self.serial_open = False\n else:\n print \"Succesfully opened:\", dev\n break\n\n # #If manual COM port\n # if(serialport == 'COM0'):\n # try:\n # self.serial = serial.Serial(baudrate = 19200, port = serialport)\n # self.serial_open = True\n # except SerialException:\n # self.serial_open = False\n # print \"Failed to open MANUAL gps serial port\"\n # except:\n # print \"Some other shit failed\"\n # #If Automatic\n # else:\n # self.list_serial_ports()\n # if not len(self.portlist):\n # print \"No Ports Detected\"\n # elif len(self.portlist) == 1:\n # self.serial = serial.Serial(baudrate = 19200, port = self.portlist[0])\n # self.serial_open = True\n # print \"Opened port on \" + self.portlist[0]\n # elif len(self.portlist) > 1:\n # print \"Callum hasn't added the code for automatic port swtiching\"\n # self.serial_open = False\n\n if self.serial_open:\n print \"GPS COM port opened\"\n thread = threading.Thread(target=self.read_from_port, args=(self.serial,))\n #thread.daemon = True\n thread.start()\n #print threading.active_count()\n else:\n print \"GPS COM port connect failed\"", "title": "" }, { "docid": "c01192683d84012e76c79c6f74cc8f54", "score": "0.52312654", "text": "async def do_run():\n # devices = await async_connect(device=DEVICE)\n devices = await async_connect(host=HOST, username=USERNAME, password=PASSWORD)\n await devices.async_load(workdir=PATH, id_devices=0)\n devices.subscribe(device_added)\n await async_enter_linking_mode(link_mode=AllLinkMode.EITHER, group=0)\n _LOGGER.info(\"Press device SET button\")\n await done.get()\n await async_close()", "title": "" }, { "docid": "b12b7a542bfc1b6bd4d12fe692f37473", "score": "0.52282083", "text": "def test_sdpln_start_up_telescope_low():", "title": "" }, { "docid": "5f06a8cf5ea57fe8b3f11e4d90741df9", "score": "0.5228137", "text": "def open(self):\n self.device = self.device_descriptor.get_device()\n if not self.device:\n print >> sys.stderr, \"Failed to open NIA device. Cable isn't plugged in\"\n return False\n try:\n self.handle = self.device.open()\n # try to detach the interfaces from the kernel, silently ignoring\n # failures if they are already detached\n try:\n self.handle.detachKernelDriver(0)\n except Exception, e:\n pass\n try:\n self.handle.detachKernelDriver(1)\n except Exception, e:\n pass\n self.handle.claimInterface(self.device_descriptor.interface_id)\n except usb.USBError, err:\n print >> sys.stderr, err\n\n return True", "title": "" }, { "docid": "dd074e47c7f1b8dfcfc10e94422c2263", "score": "0.5224411", "text": "def open_port(self):\n answer = self.usbhost.send_command(\"RstTmr\")\n if answer in common_functions.wrong_answers:\n common_functions.error_message(common_functions.answer_translate[answer])\n return\n self.scanning = True", "title": "" }, { "docid": "1bc4f3941ccf5b8f5551e01c1c418f6c", "score": "0.522139", "text": "def connect(self):\n ...", "title": "" } ]
e30dc2df7026bc10e25a72089707317c
Edits a Databricks cluster Policy. The specification for the request json can be found at
[ { "docid": "65a789f753aad72de61c8a890fe8fab4", "score": "0.66613555", "text": "def edit_cli(api_client, json_file, json):\n if not bool(json_file) ^ bool(json):\n raise RuntimeError('Either --json-file or --json should be provided')\n if json_file:\n with open(json_file, 'r') as f:\n json = f.read()\n deser_json = json_loads(json)\n ClusterPolicyApi(api_client).edit_cluster_policy(deser_json)", "title": "" } ]
[ { "docid": "9d1a3637d76fd45b18894681b2229123", "score": "0.67657363", "text": "def cluster_update_policy(self, cluster, policy, **attrs):\n return self.service.update_cluster_policy(cluster, policy, **attrs)", "title": "" }, { "docid": "9304fa1bf5d10e026f38e6220ec3cb3d", "score": "0.6192161", "text": "def update_policy(self, *args):", "title": "" }, { "docid": "d11d14ba0b4f29d2a7fc88e5ba940323", "score": "0.5974644", "text": "def update(self, policy):\n self._save(policy, model=self)", "title": "" }, { "docid": "50e0959690db009b2e745ffafd1cf6f3", "score": "0.58497274", "text": "def update_policy(self, data, id):\n return", "title": "" }, { "docid": "ac80df09c4878556e97d43beb7d106ab", "score": "0.5793478", "text": "def cluster_attach_policy(request, cluster, policy, params):\n return senlinclient(request).cluster_attach_policy(\n cluster, policy, **params)", "title": "" }, { "docid": "97ac4b0ae11806d12020337302534ae5", "score": "0.5788183", "text": "def set(self,\n policy,\n ):\n return self._invoke('set',\n {\n 'policy': policy,\n })", "title": "" }, { "docid": "6fbf8a25a5975b0ecc8a0e9a37aafe15", "score": "0.5638495", "text": "def update_policy(self, policy_id, policy):\n raise exception.NotImplemented() # pragma: no cover", "title": "" }, { "docid": "cbd0fded8ba94b5f90d7ae0e4b7ee424", "score": "0.56381166", "text": "def update_policy(self, sec, ptype, old_rule, new_rule):\n pass", "title": "" }, { "docid": "d7bf81a4ac4216f9b6311036be6cc155", "score": "0.5593901", "text": "def update_policy( p_spec, policy_id ):\n org_client.update_policy(\n PolicyId=policy_id,\n Content=specify_policy_content(p_spec),\n Description=p_spec['Description'],\n )", "title": "" }, { "docid": "7ad4f03cab8907166c538340c274c1bd", "score": "0.5494592", "text": "def update_gcp_cluster(ctx, credentials, cluster):\n from googleapiclient import discovery\n from googleapiclient.errors import HttpError\n from google.cloud import storage\n\n spell_client = ctx.obj[\"client\"]\n\n cloud_service = discovery.build(\"cloudresourcemanager\", \"v1\", credentials=credentials)\n project_id = cluster[\"networking\"][\"gcp\"][\"project\"]\n policy = cloud_service.projects().getIamPolicy(resource=project_id, body={}).execute()\n service_account_id = cluster[\"role_credentials\"][\"gcp\"][\"service_account_id\"]\n\n for binding in policy[\"bindings\"]:\n for member in binding[\"members\"]:\n if member.endswith(service_account_id):\n role_name = binding[\"role\"]\n\n iam_service = discovery.build(\"iam\", \"v1\", credentials=credentials)\n role = iam_service.projects().roles().get(name=role_name).execute()\n\n need_to_add = list(set(required_permissions) - set(role[\"includedPermissions\"]))\n need_to_remove = set(role[\"includedPermissions\"]) - set(required_permissions)\n if len(need_to_add) > 0 or len(need_to_remove) > 0:\n click.echo(\n \"Your cluster needs to be updated to have the most recent set of role permissions.\\n\"\n )\n if len(need_to_add) > 0:\n answer = click.confirm(\n \"Role {} is currently missing these permissions:\\n{}\\n\"\n \"Is it ok to add these permissions?\".format(\n role_name, \"\\n\".join([\"- \" + s for s in need_to_add])\n )\n )\n if not answer:\n raise ExitException(\n \"You will not have the ability to use all of the most up to \"\n \"date Spell features until you update your cluster\"\n )\n\n role[\"includedPermissions\"] = role[\"includedPermissions\"] + need_to_add\n iam_service.projects().roles().patch(name=role_name, body=role).execute()\n # refresh role for removal step\n role = iam_service.projects().roles().get(name=role_name).execute()\n click.echo(\"Successfully updated role {}\".format(role_name))\n if len(need_to_remove):\n answer = click.confirm(\n \"Role {} currently has unnecessary permissions:\\n{}\\n\"\n \"Is it ok to remove these permissions?\".format(\n role_name, \"\\n\".join([\"- \" + s for s in need_to_remove])\n )\n )\n if not answer:\n raise ExitException(\n \"You will not have the ability to use all of the most up to \"\n \"date Spell features until you update your cluster\"\n )\n\n role[\"includedPermissions\"] = [\n perm for perm in role[\"includedPermissions\"] if perm not in need_to_remove\n ]\n iam_service.projects().roles().patch(name=role_name, body=role).execute()\n click.echo(\"Successfully updated role {}\".format(role_name))\n\n # verify that S3 key is of service account, not user, fetch otherwise\n storage_client = storage.Client(project=project_id)\n key_id = cluster[\"role_credentials\"][\"gcp\"][\"gs_access_key_id\"]\n service_account_email = cluster[\"role_credentials\"][\"gcp\"][\"service_account_id\"]\n hmac_keys = storage_client.list_hmac_keys(service_account_email=service_account_email)\n hmac_key_ids = [metadata.access_id for metadata in hmac_keys if metadata.state == \"ACTIVE\"]\n if len(hmac_key_ids) == 0 or key_id not in set(hmac_key_ids):\n answer = click.confirm(\n \"Spell previously used the user-specific S3 Interoperable Access Keys for Google Storage\"\n \" access, but now uses the more secure HMAC key of the service account.\"\n \" Is it ok to create these keys and update your cluster?\"\n )\n if answer:\n gs_access_key_id, gs_secret_access_key = get_interoperable_s3_access_keys(\n storage, project_id, service_account_email\n )\n spell_client.update_gcp_cluster_credentials(\n cluster[\"name\"], access_key=gs_access_key_id, secret=gs_secret_access_key\n )\n\n # Add an API key to the external service account, if doesn't already exist.\n gs_service_acct_api_key = create_api_key(iam_service, service_account_id)\n if gs_service_acct_api_key:\n click.echo(\n \"Created a new API key for spell service account {acct}\".format(acct=service_account_id)\n )\n spell_client.update_gcp_cluster_credentials(\n cluster[\"name\"], api_key=gs_service_acct_api_key\n )\n\n # Add firewall rule for internal traffic\n current_version = cluster[\"version\"]\n if current_version < 5:\n click.echo(\"Ensuring firewall rules are up to date...\")\n\n compute_service = discovery.build(\"compute\", \"v1\", credentials=credentials)\n\n region = cluster[\"networking\"][\"gcp\"][\"region\"]\n subnet_name = cluster[\"networking\"][\"gcp\"][\"subnet\"]\n request = compute_service.subnetworks().get(\n project=project_id, region=region, subnetwork=subnet_name\n )\n subnet = request.execute()\n\n cidr = subnet[\"ipCidrRange\"]\n network_url = subnet[\"network\"]\n rule_name = \"{}-internal\".format(cluster[\"name\"])\n body = {\n \"name\": rule_name,\n \"description\": \"Allow traffic between all instances within VPC\",\n \"network\": network_url,\n \"sourceRanges\": [cidr],\n \"allowed\": [{\"IPProtocol\": \"TCP\"}, {\"IPProtocol\": \"UDP\"}],\n }\n\n request = compute_service.firewalls().insert(project=project_id, body=body)\n try:\n response = request.execute()\n global_progress_bar(project_id, compute_service, response)\n click.echo(\"Firewall rules updated!\")\n # If this rule already exists update it because due to a bug in version 4\n # the source IPs are incorrect\n except HttpError as err:\n if err.resp.status != 409:\n raise\n\n request = compute_service.firewalls().update(\n project=project_id, firewall=rule_name, body=body\n )\n response = request.execute()\n global_progress_bar(project_id, compute_service, response)\n\n spell_client.update_cluster_version(cluster[\"name\"], cluster_version)\n\n click.echo(\"Congratulations, your cluster {} is up to date!\".format(cluster[\"name\"]))", "title": "" }, { "docid": "720a5bbc57b47aca20c54db8c2e6ee58", "score": "0.54100037", "text": "def test_patch_patch_policies_id_dashboard_post(self):\n pass", "title": "" }, { "docid": "8a7a296d42393ea6147f478f1592ed7c", "score": "0.5405165", "text": "def UpdatePolicy(self, policy, args):\n if not args.value:\n return self._AllowAllValues(policy, args)\n\n if args.remove:\n return utils.RemoveAllowedValuesFromPolicy(policy, args)\n\n return self._AddValues(policy, args)", "title": "" }, { "docid": "77ff3ccf406764c403f97b13484c18a1", "score": "0.5271467", "text": "def setReplicationPolicy(session,id,policy,serialVersion):\n return None", "title": "" }, { "docid": "1a42ba7511c804c42e0462fe90d5461b", "score": "0.5270215", "text": "def update_policy(self, data, policy_id):\n file_path = os.path.join(self.path, policy_id + \".json\")\n if os.path.isfile(file_path):\n with open(file_path, \"w\") as file:\n json.dump(data, file)\n else:\n raise IOError # Raise exception if policy named filename doesn't exist", "title": "" }, { "docid": "b5db8d99cd0916898bf0f4de235eec2d", "score": "0.5257591", "text": "def ModifyDDoSPolicyCase(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyDDoSPolicyCase\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyDDoSPolicyCaseResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "title": "" }, { "docid": "05858aa569513c318667e0a3b56c996e", "score": "0.5257244", "text": "async def host_policy_update(self, table_name, data, id_instance):\n # Check if table name is provided under label of policy_name\n if str(table_name) not in ['CES_POLICIES', 'CES_POLICY_IDENTITY', 'HOST_POLICIES', 'HOST_POLICY_IDENTITY', 'FIREWALL', 'ID']:\n error = \"Table Type not supported = {}\".format(table_name)\n raise API_ERROR(1001, error)\n\n get_function = self._host_policy_functions[table_name]['update']\n query = get_function(data, id_instance)\n #print (query)\n data = await self.db_host.execute(query)\n #await self._update_timestamp(data['fqdn'], policy_name.upper())\n return data", "title": "" }, { "docid": "b49c9905bd5fe21749f9abfd363391e1", "score": "0.5246436", "text": "def test_update_policy(self):\n pass", "title": "" }, { "docid": "b49c9905bd5fe21749f9abfd363391e1", "score": "0.5246436", "text": "def test_update_policy(self):\n pass", "title": "" }, { "docid": "7c2911a80d44f515237fef00c83975a3", "score": "0.5238447", "text": "def ModifyDDoSPolicy(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyDDoSPolicy\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyDDoSPolicyResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "title": "" }, { "docid": "0825b4f098717a94b7c738e93687ee20", "score": "0.5227313", "text": "def edit_cp():\r\n global CP_MANAGER\r\n returnJSON = createJSON()\r\n\r\n try:\r\n cp_data = request.get_json()\r\n CP_MANAGER.edit_change_plan(cp_data)\r\n return addMessageToJSON(returnJSON, \"success\")\r\n except InvalidInputsError as e:\r\n return addMessageToJSON(returnJSON, e.message)", "title": "" }, { "docid": "af4786f3203ae90d94ecba428ea09f7b", "score": "0.5181487", "text": "def edit(cm_id, caller_id, cluster_id, name=None, address=None, port=None,):\n try:\n cluster = Cluster.objects.get(pk=cluster_id)\n if name:\n cluster.name = name\n if address:\n cluster.address = address\n if port:\n cluster.port = port\n cluster.save()\n except:\n raise CLMException('cluster_edit')", "title": "" }, { "docid": "e635c0cc390ae2a329bc56cbc8eccc6e", "score": "0.5170183", "text": "async def upsert_distribution_policy(\n self, id: str, patch: JSON, *, content_type: str = \"application/merge-patch+json\", **kwargs: Any\n ) -> JSON:", "title": "" }, { "docid": "afa26c07ced53bd7fd37e279a907c7a2", "score": "0.5155713", "text": "def delete_cli(api_client, policy_id):\n ClusterPolicyApi(api_client).delete_cluster_policy(policy_id)", "title": "" }, { "docid": "488e021f0c8ba114d6f70dd70be9d842", "score": "0.5154467", "text": "def set_policy(self, policy, counters=None):\r\n if isinstance(policy, Policy):\r\n policy = policy.name\r\n self.table.set_policy(self.name, policy, counters)", "title": "" }, { "docid": "d99eeda0dea96bb09a1298d36b6c6872", "score": "0.5150901", "text": "async def upsert_distribution_policy(\n self, id: str, patch: IO, *, content_type: str = \"application/merge-patch+json\", **kwargs: Any\n ) -> JSON:", "title": "" }, { "docid": "d9c49feda7fbba9b1c9276326a955211", "score": "0.51439714", "text": "def cluster_attach_policy(self, cluster, policy, **attrs):\n return self.service.attach_policy_to_cluster(cluster, policy, **attrs)", "title": "" }, { "docid": "ff42a3095b575a58e071cd74a4331816", "score": "0.5130333", "text": "def PUT(self, env):\n req = Request(env)\n conn = HTTPConnection('%s:%s' % (self.mds_ip, self.mds_port))\n headers = req.params\n try:\n info = get_container_info(env, self.app)\n if info:\n stor_policy = info['storage_policy']\n headers['storage_policy'] = stor_policy\n except:\n pass\n conn.request('PUT', req.path, headers=headers)\n resp = conn.getresponse()\n return self.app", "title": "" }, { "docid": "5515a104f4f37e19623fc5db5efb0745", "score": "0.510309", "text": "async def upsert_classification_policy(\n self, id: str, patch: IO, *, content_type: str = \"application/merge-patch+json\", **kwargs: Any\n ) -> JSON:", "title": "" }, { "docid": "9563dae26ceb5e1589370e5f9ce86fb5", "score": "0.50973606", "text": "async def upsert_classification_policy(\n self, id: str, patch: JSON, *, content_type: str = \"application/merge-patch+json\", **kwargs: Any\n ) -> JSON:", "title": "" }, { "docid": "33f3fbd78151bb1a0c614ae85a0cba54", "score": "0.5079021", "text": "def specify_policy_content(p_spec):\n return \"\"\"{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"%s\", \"Action\": %s, \"Resource\": \"*\" } ] }\"\"\" % (p_spec['Effect'], json.dumps(p_spec['Actions']))", "title": "" }, { "docid": "86dcdfe682ba80c8530e11ee5ff4a3c0", "score": "0.50767595", "text": "def install_policy(self, policy):\n self.policy = policy", "title": "" }, { "docid": "30be1151a48873c811839f8b2598753a", "score": "0.5030714", "text": "def update_cluster(ClusterId=None, RoleARN=None, Description=None, Resources=None, AddressId=None, ShippingOption=None, Notification=None, ForwardingAddressId=None):\n pass", "title": "" }, { "docid": "877f9fe470114684f58223c55a7b2a36", "score": "0.50152963", "text": "def update(self) -> Resource:\n self.fix_shares()\n data = self._payload(**self.json)\n\n content = self._api.put(f'elasticubes/datasecurity/{self._id}', data=data)\n\n return DataSecurity(self._api, content, self._elasticube)", "title": "" }, { "docid": "89e6471cb94886d760068eab032c68b4", "score": "0.49996296", "text": "def update_policy(self) -> pulumi.Output['outputs.InstanceGroupManagerUpdatePolicyResponse']:\n return pulumi.get(self, \"update_policy\")", "title": "" }, { "docid": "690aad1ecb922c529f7182f504e112f6", "score": "0.4996484", "text": "def test16():\n cr = ClusterRole(\n metadata=ObjectMeta(\n name='create-approve-csr',\n namespace=crud_namespace,\n labels={'kiamol': 'ch17'}\n ),\n rules=[\n PolicyRule(apiGroups=[\"certificates.k8s.io\"],\n resources=[\"certificatesigningrequests\"],\n verbs=[\"create\", \"get\", \"list\", \"watch\"],\n ),\n PolicyRule(apiGroups=[\"certificates.k8s.io\"],\n resources=[\"certificatesigningrequests/approval\"],\n verbs=[\"update\"]),\n PolicyRule(apiGroups=[\"certificates.k8s.io\"],\n resources=[\"signers\"],\n resourceNames=[\"kubernetes.io/kube-apiserver-client\",\n \"kubernetes.io/legacy-unknown\"],\n verbs=['approve'])\n ]\n )\n cr.create()\n try:\n cr.read()\n cr.read()\n cr.metadata.labels['test'] = 'test116'\n cr.update()\n finally:\n cr.delete()", "title": "" }, { "docid": "db9e041ec151a136b0757bf110775806", "score": "0.49927396", "text": "def cluster_policy_list(request, cluster, params):\n policies = senlinclient(request).cluster_policies(\n cluster, **params)\n return [ClusterPolicy(p) for p in policies]", "title": "" }, { "docid": "a47b9f265cd06f61e448830b2c842fc6", "score": "0.49693337", "text": "def patch(self,\n cluster_id,\n compute_cluster_idfw_configuration,\n ):\n return self._invoke('patch',\n {\n 'cluster_id': cluster_id,\n 'compute_cluster_idfw_configuration': compute_cluster_idfw_configuration,\n })", "title": "" }, { "docid": "36355a00293e1185ae8388afcd4bcc4d", "score": "0.49505448", "text": "def test_update_rbac_policy(self):\n policy_id = self.create_rbac_policy(self.tenant_id, self.network_id)\n\n with self.override_role():\n self.ntp_client.update_rbac_policy(\n policy_id, target_tenant=self.tenant_id)", "title": "" }, { "docid": "3e5b1b7ef64f85baee8fbd212ca15bb4", "score": "0.4942969", "text": "def test_update_policy_rule(self):\n pass", "title": "" }, { "docid": "ebdf10656a793bb3f1c1f6f58e591771", "score": "0.49403083", "text": "def apply_acl_on_object(self, acl_str):\n object_acl = json.loads(acl_str)\n # the object_type\n object_id_with_type = object_acl.get('object_id', None)\n api_path = '/permissions' + object_id_with_type\n acl_list = object_acl.get('access_control_list', None)\n api_args = {'access_control_list': acl_list}\n resp = self.patch(api_path, api_args)\n print(resp)\n return resp", "title": "" }, { "docid": "2f402389afbfc419a708261e521d6779", "score": "0.4933602", "text": "def ModifyDDoSPolicyName(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyDDoSPolicyName\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyDDoSPolicyNameResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "title": "" }, { "docid": "e32703da21d5b67654331e5a37629109", "score": "0.49041757", "text": "def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy:\n return response", "title": "" }, { "docid": "7cb74e1777deeee600de9b6329028fd9", "score": "0.4898452", "text": "def cluster_detach_policy(request, cluster, policy):\n senlinclient(request).cluster_detach_policy(\n cluster, policy)", "title": "" }, { "docid": "1272131e1c5cfac6dfe01c4fb7ee3483", "score": "0.4889807", "text": "def update_policy(self) -> Optional[pulumi.Input['InstanceGroupManagerUpdatePolicyArgs']]:\n return pulumi.get(self, \"update_policy\")", "title": "" }, { "docid": "25788e99f78d63b038cce9dcb21070f0", "score": "0.48873943", "text": "def set_definition(self, cluster):\n return self.client._perform_json(\n \"PUT\", \"/admin/clusters/%s\" % (self.cluster_id), body=cluster)", "title": "" }, { "docid": "f653aa9a3acec3cc6ceb32139e044db1", "score": "0.48716247", "text": "def test_edit_role_using_put(self):\n pass", "title": "" }, { "docid": "77bcdd129120fdeecba97270754d2099", "score": "0.48617348", "text": "def test_patch_patch_policies_id_dashboard_delete(self):\n pass", "title": "" }, { "docid": "665463c43505ba9054117768e947f2a5", "score": "0.4858382", "text": "def _create_edit(action, payload, logger, session, broker_client):\n core_params = ['cluster_id', 'name', 'is_active', 'job_type', 'service',\n 'start_date', 'extra']\n params = _get_params(payload, core_params, 'data.', default_value='')\n \n job_type = params['job_type']\n cluster_id = params['cluster_id']\n name = params['name']\n service_name = params['service']\n \n if job_type not in ('one_time', 'interval_based', 'cron_style'):\n msg = 'Unrecognized job type [{0}]'.format(job_type)\n logger.error(msg)\n raise ZatoException(msg)\n \n # For finding out if we don't have a job of that name already defined.\n existing_one_base = session.query(Job).\\\n filter(Cluster.id==cluster_id).\\\n filter(Job.name==name)\n \n if action == 'create':\n existing_one = existing_one_base.first()\n else:\n edit_params = _get_params(payload, ['id'], 'data.')\n job_id = edit_params['id']\n existing_one = existing_one_base.filter(Job.id != job_id).first()\n \n if existing_one:\n raise Exception('Job [{0}] already exists on this cluster'.format(\n name))\n \n # Is the service's name correct?\n service = session.query(Service).\\\n filter(Cluster.id==cluster_id).\\\n filter(Service.name==service_name).first()\n \n if not service:\n msg = 'Service [{0}] does not exist on this cluster'.format(service_name)\n logger.error(msg)\n raise Exception(msg)\n \n # We can create/edit a base Job object now and - optionally - another one\n # if the job type's is either interval-based or Cron-style. The base\n # instance will be enough if it's a one-time job.\n \n extra = params['extra'].encode('utf-8')\n is_active = is_boolean(params['is_active'])\n start_date = params['start_date']\n \n \n if action == 'create':\n job = Job(None, name, is_active, job_type, \n start_date, extra, \n cluster_id=cluster_id, service=service)\n else:\n job = session.query(Job).filter_by(id=job_id).one()\n old_name = job.name\n job.name = name\n job.is_active = is_active\n job.start_date = start_date\n job.service = service\n job.extra = extra\n \n try:\n # Add but don't commit yet.\n session.add(job)\n\n if job_type == 'interval_based':\n request_params = ['weeks', 'days', 'hours', 'minutes', 'seconds', 'repeats']\n ib_params = _get_params(payload, request_params, 'data.', default_value='')\n\n if not any(ib_params[key] for key in ('weeks', 'days', 'hours', 'minutes', 'seconds')):\n msg = \"At least one of ['weeks', 'days', 'hours', 'minutes', 'seconds'] must be given.\"\n logger.error(msg)\n raise ZatoException(msg)\n \n if action == 'create':\n ib_job = IntervalBasedJob(None, job)\n else:\n ib_job = session.query(IntervalBasedJob).filter_by(\n id=job.interval_based.id).one()\n\n for param, value in ib_params.items():\n if value:\n setattr(ib_job, param, value)\n \n session.add(ib_job)\n \n elif job_type == 'cron_style':\n cs_params = _get_params(payload, ['cron_definition'], 'data.')\n cron_definition = cs_params['cron_definition'].strip()\n \n if cron_definition.startswith('@'):\n if not cron_definition in PREDEFINED_CRON_DEFINITIONS:\n msg = ('If using a predefined definition, it must be '\n 'one of {0} instead of [{1}]').format(\n sorted(PREDEFINED_CRON_DEFINITIONS), \n cron_definition)\n logger.error(msg)\n raise Exception(msg)\n \n cron_definition = PREDEFINED_CRON_DEFINITIONS[cron_definition]\n else:\n splitted = cron_definition.strip().split()\n if not len(splitted) == CRON_EXPRESSION_LEN:\n msg = ('Expression [{0}] in invalid, it needs to contain '\n 'exactly {1} whitespace-separated fields').format(\n cron_definition, CRON_EXPRESSION_LEN)\n logger.error(msg)\n raise Exception(msg)\n cron_definition = ' '.join(splitted)\n \n if action == 'create':\n cs_job = CronStyleJob(None, job)\n else:\n cs_job = session.query(CronStyleJob).filter_by(\n id=job.cron_style.id).one()\n \n cs_job.cron_definition = cron_definition\n session.add(cs_job)\n\n # We can commit it all now.\n session.commit()\n \n # Now send it to the broker, but only if the job is active.\n if is_active:\n msg_action = SCHEDULER.CREATE if action == 'create' else SCHEDULER.EDIT\n msg = {'action': msg_action, 'job_type': job_type,\n 'is_active':is_active, 'start_date':start_date,\n 'extra':extra, 'service': service_name, 'name': name\n }\n if action == 'edit':\n msg['old_name'] = old_name\n\n if job_type == 'interval_based':\n for param, value in ib_params.items():\n msg[param] = int(value) if value else 0\n elif job_type == 'cron_style':\n msg['cron_definition'] = cron_definition\n else:\n msg = {'action': SCHEDULER.DELETE, 'name': name}\n \n broker_client.send_json(msg, MESSAGE_TYPE.TO_SINGLETON)\n \n \n except Exception, e:\n session.rollback()\n msg = 'Could not complete the request, e=[{e}]'.format(e=format_exc(e))\n logger.error(msg)\n \n raise \n else:\n job_elem = Element('job')\n \n if action == 'create':\n job_elem.id = job.id\n \n if job_type == 'cron_style':\n # Needs to be returned because we might've been performing\n # a substitution like changing '@hourly' into '0 * * * *'.\n job_elem.cron_definition = cs_job.cron_definition\n\n out = etree.tostring(job_elem)\n \n return ZATO_OK, out", "title": "" }, { "docid": "94ce6a91047c39254cffd51a64698148", "score": "0.48569575", "text": "def create_cli(api_client, json_file, json):\n json_cli_base(json_file, json,\n lambda json: ClusterPolicyApi(api_client).create_cluster_policy(json))", "title": "" }, { "docid": "2471f5e218791f7ae2aa7c4dcec1c923", "score": "0.48379087", "text": "def test_control_acl_update(self):\n with factories.single_commit():\n control = factories.ControlFactory()\n person = factories.PersonFactory()\n control.add_person_with_role_name(person, \"Admin\")\n\n access_control_list = {\n \"Admin\": [\n {\n \"email\": \"user1@example.com\",\n \"name\": \"user1\",\n },\n {\n \"email\": \"user2@example.com\",\n \"name\": \"user2\",\n },\n ]\n }\n self.setup_people(access_control_list)\n\n response = self.api.put(control, {\n \"access_control_list\": access_control_list,\n })\n self.assert200(response)\n control = all_models.Control.query.get(control.id)\n self.assert_obj_acl(control, access_control_list)", "title": "" }, { "docid": "cfc1dc1318e519896e9eb16430e63fd0", "score": "0.48273322", "text": "def put(self):\n request = transforms.loads(self.request.get('request'))\n\n if not self.assert_xsrf_token_or_fail(\n request, 'update-local_chapter', {}):\n return\n\n if not roles.Roles.is_super_admin():\n transforms.send_json_response(\n self, 401, 'Access denied.')\n return\n\n payload = request.get('payload')\n updated_dict = transforms.json_to_dict(\n transforms.loads(payload), self.get_schema_dict())\n\n errors = []\n self.apply_updates(updated_dict, errors)\n if not errors:\n transforms.send_json_response(self, 200, 'Saved.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "title": "" }, { "docid": "239052fde8f1b047f8876d02fcb50166", "score": "0.48078677", "text": "def test_patch_challenge(self):\n patch_challenge = {\n \"dispatchPolicyId\" : \"DFK768EF053898993DD196397EDFDDFBC81751818B7FD1300124455B07E91CB289F87791D78064ECC93754F19B13D419489F162A150A22DD814CKAF0E\",\n \"name\" : \"Dogs\",\n \"description\" : \"Tag the cats\",\n \"evaluationPolicyId\" : \"DFK768EF053898993DD196397EDFDDFBC81751818B7FD1300124455B07E91CB289F87791D78064ECC93754F19B13D419489F162A150A22DD814CKAF0E\",\n \"budget\" : 3.14\n}\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n }\n response = self.client.open(\n '/challenge/{challenge_id}'.format(challenge_id='challenge_id_example'),\n method='PATCH',\n headers=headers,\n data=json.dumps(patch_challenge),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "f97d5c5aa8605fbc9fb50ac87e62800f", "score": "0.4794994", "text": "def set_iam_policy(self, policy):\n api = self._instance._client.database_admin_api\n metadata = _metadata_with_prefix(self.name)\n\n request = iam_policy_pb2.SetIamPolicyRequest(\n resource=self.name,\n policy=policy,\n )\n response = api.set_iam_policy(request=request, metadata=metadata)\n return response", "title": "" }, { "docid": "f04adf6b4f6a139383426c4034923203", "score": "0.47882545", "text": "def _update_cluster_config(session, updater, cluster, **kwargs):\n is_cluster_editable(session, cluster, updater)\n return utils.update_db_object(\n session, cluster, **kwargs\n )", "title": "" }, { "docid": "86b0003e1f4a17dc97aea0b62a399090", "score": "0.47716886", "text": "def change_zone(self, zone_name=None, data=None):\n new_data = {\"comment\": \"A comment for this update\", \"actions\": []}\n new_data['actions'].append(data)\n return self.main_session.patch(url=self.base_url + 'api/v2/zones/' + zone_name, data=json.dumps(new_data))", "title": "" }, { "docid": "98569b02ef2b0b7d0f5697330006e3ca", "score": "0.47692096", "text": "def RunWithArgs(self, bucket, object, entity):\n client = GetClientFromFlags()\n global_params = GetGlobalParamsFromFlags()\n request = messages.StorageObjectAccessControlsPatchRequest(\n bucket=bucket.decode('utf8'),\n object=object.decode('utf8'),\n entity=entity.decode('utf8'),\n )\n if FLAGS['generation'].present:\n request.generation = int(FLAGS.generation)\n if FLAGS['objectAccessControl'].present:\n request.objectAccessControl = apitools_base.JsonToMessage(messages.ObjectAccessControl, FLAGS.objectAccessControl)\n result = client.objectAccessControls.Patch(\n request, global_params=global_params)\n print apitools_base_cli.FormatOutput(result)", "title": "" }, { "docid": "f0eece28e7c49ed7e9bb71282ea3f818", "score": "0.47655374", "text": "def update(challenge, request):\n data = request.form or request.get_json()\n for attr, value in data.items():\n setattr(challenge, attr, value)\n\n db.session.commit()\n return challenge", "title": "" }, { "docid": "08d6d99fc2a2c4108a5ab9487e0c72a9", "score": "0.47572854", "text": "def test_update_es_cluster_plan(self):\n pass", "title": "" }, { "docid": "e5f6387fd63a2b97f0bed7a4d2ae8cbb", "score": "0.47566372", "text": "def put(self):\n data = request.json\n val_alpha_num_special(data.get('name'))\n val_alpha_num_special(data.get('description'))\n validate_privilege(self, 'edit')\n result = create_checklist_category(data)\n return result, 200, security_headers()", "title": "" }, { "docid": "682c2858d783364a98a3fbf806bac5fe", "score": "0.4755202", "text": "def update_service_policy(self, execution_type=None, service_name=None,\n **kwargs):\n pass", "title": "" }, { "docid": "667ebe764be366480a4a264806e15ff3", "score": "0.4753683", "text": "def update_policies(self: object, body: dict) -> dict:\n # [PATCH] https://assets.falcon.crowdstrike.com/support/api/swagger.html#\n # ... /device-control-policies/updateDeviceControlPolicies\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"updateDeviceControlPolicies\",\n body=body\n )", "title": "" }, { "docid": "fb759c1598f26125ed45f1e64a43354f", "score": "0.47327325", "text": "def RunWithArgs(self, bucket, entity):\n client = GetClientFromFlags()\n global_params = GetGlobalParamsFromFlags()\n request = messages.BucketAccessControl(\n bucket=bucket.decode('utf8'),\n entity=entity.decode('utf8'),\n )\n if FLAGS['domain'].present:\n request.domain = FLAGS.domain.decode('utf8')\n if FLAGS['email'].present:\n request.email = FLAGS.email.decode('utf8')\n if FLAGS['entityId'].present:\n request.entityId = FLAGS.entityId.decode('utf8')\n if FLAGS['etag'].present:\n request.etag = FLAGS.etag.decode('utf8')\n if FLAGS['id'].present:\n request.id = FLAGS.id.decode('utf8')\n if FLAGS['kind'].present:\n request.kind = FLAGS.kind.decode('utf8')\n if FLAGS['projectTeam'].present:\n request.projectTeam = apitools_base.JsonToMessage(messages.BucketAccessControl.ProjectTeamValue, FLAGS.projectTeam)\n if FLAGS['role'].present:\n request.role = FLAGS.role.decode('utf8')\n if FLAGS['selfLink'].present:\n request.selfLink = FLAGS.selfLink.decode('utf8')\n result = client.bucketAccessControls.Patch(\n request, global_params=global_params)\n print apitools_base_cli.FormatOutput(result)", "title": "" }, { "docid": "8d203b3619441a24ea2674ec830a666a", "score": "0.47198913", "text": "def ModifyCCPolicySwitch(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyCCPolicySwitch\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyCCPolicySwitchResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "title": "" }, { "docid": "dff9624d0af273ff7e11afbd5928f289", "score": "0.47174728", "text": "def save(self):\n return self.client._perform_json(\n \"PUT\", \"/admin/clusters/%s\" % (self.cluster_id), body=self.settings)", "title": "" }, { "docid": "06d90b656c5f24e82780df8270d38c1b", "score": "0.47139925", "text": "def clustering_policy(self) -> Optional[pulumi.Input[Union[str, 'ClusteringPolicy']]]:\n return pulumi.get(self, \"clustering_policy\")", "title": "" }, { "docid": "24ed2bb36844736f66cc165820d5f757", "score": "0.47131196", "text": "def test_patch_patch_policies_id_dashboard_get(self):\n pass", "title": "" }, { "docid": "fd55bfaacd8d623dd568e83a449b2593", "score": "0.47103947", "text": "def edit_cp_action():\r\n global CP_ACTION_MANAGER\r\n returnJSON = createJSON()\r\n\r\n try:\r\n cp_action_data = request.get_json()\r\n CP_ACTION_MANAGER.edit_change_plan_action(cp_action_data)\r\n return addMessageToJSON(returnJSON, \"success\")\r\n except InvalidInputsError as e:\r\n return addMessageToJSON(returnJSON, e.message)", "title": "" }, { "docid": "36bafe77a42d5332247fb523a8bad68e", "score": "0.47092623", "text": "def policy_config(self, policy_config):\n self._policy_config = policy_config", "title": "" }, { "docid": "22b90d3128c11f0099e2edef840cda78", "score": "0.47077698", "text": "def changed_policy(self):\n pol = self.policy.currentText()\n if pol in ['Qs', 'Ss', 'RS']:\n self.p3_label.setText('')\n self.p3.hide()\n if pol == 'Qs':\n self.p1_label.setText('Q')\n self.p2_label.setText('s')\n elif pol == 'Ss':\n self.p1_label.setText('S')\n self.p2_label.setText('s')\n elif pol == 'RS':\n self.p1_label.setText('R')\n self.p2_label.setText('S')\n elif pol == 'RSs':\n self.p1_label.setText('R')\n self.p2_label.setText('S')\n self.p3_label.setText('s')\n self.p3.show()", "title": "" }, { "docid": "5e446ff2bf61aff028a2ab3f9ffacdd8", "score": "0.4702841", "text": "def RunWithArgs(self, bucket, object, entity):\n client = GetClientFromFlags()\n global_params = GetGlobalParamsFromFlags()\n request = messages.StorageObjectAccessControlsUpdateRequest(\n bucket=bucket.decode('utf8'),\n object=object.decode('utf8'),\n entity=entity.decode('utf8'),\n )\n if FLAGS['generation'].present:\n request.generation = int(FLAGS.generation)\n if FLAGS['objectAccessControl'].present:\n request.objectAccessControl = apitools_base.JsonToMessage(messages.ObjectAccessControl, FLAGS.objectAccessControl)\n result = client.objectAccessControls.Update(\n request, global_params=global_params)\n print apitools_base_cli.FormatOutput(result)", "title": "" }, { "docid": "232b1885448678718ffdec4e2af1d1f3", "score": "0.47018197", "text": "def add_policy(self, lambda_function: _lambda.Function) -> None:\n smt1 = _iam.PolicyStatement(\n resources=[self.db.secret.secret_arn],\n actions=[\"secretsmanager:GetSecretValue\"],\n )\n lambda_function.add_to_role_policy(smt1)\n smt2 = _iam.PolicyStatement(\n resources=[\n f\"arn:aws:rds:{self.region}:{self.account}:cluster:{self.db.db.ref}\"\n ],\n actions=[\n \"rds-data:ExecuteStatement\",\n \"rds-data:BatchExecuteStatement\",\n \"rds-data:BeginTransaction\",\n \"rds-data:CommitTransaction\",\n \"rds-data:ExecuteSql\",\n \"rds-data:RollbackTransaction\",\n \"rds:DescribeDBClusters\",\n ],\n )\n lambda_function.add_to_role_policy(smt2)", "title": "" }, { "docid": "127b6a4a53a30d3c4dfdff53f46e2f56", "score": "0.46917343", "text": "def test_update_ingestion_policy(self):\n pass", "title": "" }, { "docid": "0c215fab693954c0e5c8a0dc4c3f981d", "score": "0.4681245", "text": "def update(self,\n cluster_id,\n compute_cluster_idfw_configuration,\n ):\n return self._invoke('update',\n {\n 'cluster_id': cluster_id,\n 'compute_cluster_idfw_configuration': compute_cluster_idfw_configuration,\n })", "title": "" }, { "docid": "2b71fd5cf0d46ed4dc0c6570ee319511", "score": "0.46703812", "text": "def put(self):\n request = transforms.loads(self.request.get('request'))\n\n if not self.assert_xsrf_token_or_fail(\n request, 'add-new-local_chapter', {}):\n return\n\n if not roles.Roles.is_super_admin():\n transforms.send_json_response(\n self, 401, 'Access denied.')\n return\n\n payload = request.get('payload')\n updated_dict = transforms.json_to_dict(\n transforms.loads(payload), self.get_schema_dict())\n\n errors = []\n self.apply_updates(updated_dict, errors)\n if not errors:\n transforms.send_json_response(self, 200, 'Saved.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "title": "" }, { "docid": "3fd46781cb7a95073db92167f84304e5", "score": "0.4669759", "text": "def get_cluster_policy(self, policy, cluster):\n return self.service.get_cluster_policy(policy, cluster)", "title": "" }, { "docid": "05b312dd1aaa0ff9ce84e2a0ece043cb", "score": "0.46636143", "text": "def test_add_policy(self):\n pass", "title": "" }, { "docid": "c6a583329d43a434de564a92845a0c9a", "score": "0.46587732", "text": "def testSetIsAllowed(self):\n self.namespaces.create([(u'fluiddb/test', u'description')])\n values = [(u'fluiddb/test', Operation.UPDATE_NAMESPACE,\n Policy.CLOSED, [])]\n self.permissions.set(values)\n self.namespaces.set({u'fluiddb/test': u'new description'})", "title": "" }, { "docid": "6be7604c83daa7151fd06f3058fc30ac", "score": "0.46536875", "text": "def _save(cls, policy, model):\n policy_json = policy.to_json()\n policy_dict = json.loads(policy_json)\n model.uid = policy_dict['uid']\n model.type = policy_dict['type']\n model.effect = policy_dict['effect'] == ALLOW_ACCESS\n model.description = policy_dict['description']\n model.context = json.dumps(policy_dict['context'])\n model.subjects = [\n PolicySubjectModel(subject=x, subject_string=string, subject_regex=compiled)\n for y in policy_dict['subjects']\n for (x, string, compiled) in cls._policy_element_to_db(policy, y)\n ]\n model.resources = [\n PolicyResourceModel(resource=x, resource_string=string, resource_regex=compiled)\n for y in policy_dict['resources']\n for (x, string, compiled) in cls._policy_element_to_db(policy, y)\n ]\n model.actions = [\n PolicyActionModel(action=x, action_string=string, action_regex=compiled)\n for y in policy_dict['actions']\n for (x, string, compiled) in cls._policy_element_to_db(policy, y)\n ]\n return model", "title": "" }, { "docid": "7a12c463b0df09db9e580676fecacc1c", "score": "0.46514353", "text": "def update_policy(self, context, policy_id, policy):\n plugin_policy = copy.deepcopy(policy)\n policy_dicts = self._core._update_resource('policy', context,\n policy_id, plugin_policy)\n\n LOG.debug(\"update_policy(): \" + pformat(policy_dicts))\n return policy_dicts", "title": "" }, { "docid": "c8564f413935fb93e99f726687fa1de9", "score": "0.4649794", "text": "def ModifyCCSelfDefinePolicy(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyCCSelfDefinePolicy\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyCCSelfDefinePolicyResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "title": "" }, { "docid": "18b38b4c13dede33bfe9cad579c424e4", "score": "0.46412042", "text": "async def update_application_policy(\n self, appId, policyId,\n keep_empty_params=False\n ):\n http_method = \"put\".upper()\n api_url = format_url(f\"\"\"\n {self._base_url}\n /api/v1/apps/{appId}/policies/{policyId}\n \"\"\")\n\n body = {}\n headers = {}\n form = {}\n\n request, error = await self._request_executor.create_request(\n http_method, api_url, body, headers, form, keep_empty_params=keep_empty_params\n )\n\n if error:\n return (None, error)\n\n response, error = await self._request_executor\\\n .execute(request)\n\n if error:\n return (response, error)\n\n return (response, None)", "title": "" }, { "docid": "d8af787b08dc577e5ecac9549917ddef", "score": "0.46364427", "text": "def editnode(ctx, name, ip, ram, cpus, cluster):\n\n switched = True if cluster else False\n if not cluster:\n cluster = ss.svars['cluster']\n\n if ip:\n if not click.confirm('Changing IP adress after cluster provisionning '\n 'will break things. Do you want to continue?'):\n return\n\n if ip or ram or cpus:\n try:\n changed = nodes.edit_node(name, ip, ram, cpus, cluster=cluster)\n if changed:\n click.echo('\\n\"{}\" configurations changed:\\n - {}'.format(\n name,\n '\\n - '.join(\n '{}: {} -> {}'\n .format(c[0], c[1], c[2]) for c in changed))\n )\n except (ex.LoadError, ex.CreationError) as e:\n print_with_color(e.message, 'red')\n else:\n click.echo('Nothing to do. Type \"help editnode\" for usage.')\n\n if switched:\n set_context(ctx, cluster)", "title": "" }, { "docid": "4e766b22380c01d287212183c20a10c2", "score": "0.46277577", "text": "def add_policy(self, policy_name, policy_text):\n\n self.policies.add((policy_name, policy_text))", "title": "" }, { "docid": "d90e950fa3c1b13ebe6e1b87ce5ad3d6", "score": "0.46182564", "text": "def update(cls, challenge, request):\n data = request.form or request.get_json()\n for attr, value in data.items():\n setattr(challenge, attr, value)\n\n db.session.commit()\n return challenge", "title": "" }, { "docid": "769567d1c35bc03eefc17461b2bf1b80", "score": "0.46092576", "text": "def change(cls, client, resource) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tchangeresource = nsaptlicense()\n\t\t\t\tchangeresource.id = resource.id\n\t\t\t\tchangeresource.sessionid = resource.sessionid\n\t\t\t\tchangeresource.bindtype = resource.bindtype\n\t\t\t\tchangeresource.countavailable = resource.countavailable\n\t\t\t\tchangeresource.licensedir = resource.licensedir\n\t\t\t\tchangeresource.useproxy = resource.useproxy\n\t\t\t\treturn changeresource.perform_operation(client,\"update\")\n\t\t\telse :\n\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\tchangeresources = [ nsaptlicense() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\tchangeresources[i].id = resource[i].id\n\t\t\t\t\t\tchangeresources[i].sessionid = resource[i].sessionid\n\t\t\t\t\t\tchangeresources[i].bindtype = resource[i].bindtype\n\t\t\t\t\t\tchangeresources[i].countavailable = resource[i].countavailable\n\t\t\t\t\t\tchangeresources[i].licensedir = resource[i].licensedir\n\t\t\t\t\t\tchangeresources[i].useproxy = resource[i].useproxy\n\t\t\t\tresult = cls.perform_operation_bulk_request(client, changeresources,\"update\")\n\t\t\treturn result\n\t\texcept Exception as e :\n\t\t\traise e", "title": "" }, { "docid": "de18a628dc0b1e6a4deac601e96ba495", "score": "0.4603846", "text": "def update_policy(self):\n raise UnityTrainerException(\"The update_model method was not implemented.\")", "title": "" }, { "docid": "bb9b6376af9fd6b2ea0a341c491f2765", "score": "0.45919776", "text": "async def upsert_distribution_policy(self, id: str, patch: Union[JSON, IO], **kwargs: Any) -> JSON:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = case_insensitive_dict(kwargs.pop(\"headers\", {}) or {})\n _params = kwargs.pop(\"params\", {}) or {}\n\n content_type: Optional[str] = kwargs.pop(\"content_type\", _headers.pop(\"Content-Type\", None))\n cls: ClsType[JSON] = kwargs.pop(\"cls\", None)\n\n content_type = content_type or \"application/merge-patch+json\"\n _json = None\n _content = None\n if isinstance(patch, (IOBase, bytes)):\n _content = patch\n else:\n _json = patch\n\n request = build_job_router_administration_upsert_distribution_policy_request(\n id=id,\n content_type=content_type,\n api_version=self._config.api_version,\n json=_json,\n content=_content,\n headers=_headers,\n params=_params,\n )\n path_format_arguments = {\n \"endpoint\": self._serialize.url(\"self._config.endpoint\", self._config.endpoint, \"str\", skip_quote=True),\n }\n request.url = self._client.format_url(request.url, **path_format_arguments)\n\n _stream = False\n pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [200, 201]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response)\n\n if response.status_code == 200:\n if response.content:\n deserialized = response.json()\n else:\n deserialized = None\n\n if response.status_code == 201:\n if response.content:\n deserialized = response.json()\n else:\n deserialized = None\n\n if cls:\n return cls(pipeline_response, cast(JSON, deserialized), {}) # type: ignore\n\n return cast(JSON, deserialized) # type: ignore", "title": "" }, { "docid": "2f065ce7f8ab9d3bc1d3ad0529ad7c76", "score": "0.45850617", "text": "def RunWithArgs(self, bucket, entity):\n client = GetClientFromFlags()\n global_params = GetGlobalParamsFromFlags()\n request = messages.BucketAccessControl(\n bucket=bucket.decode('utf8'),\n entity=entity.decode('utf8'),\n )\n if FLAGS['domain'].present:\n request.domain = FLAGS.domain.decode('utf8')\n if FLAGS['email'].present:\n request.email = FLAGS.email.decode('utf8')\n if FLAGS['entityId'].present:\n request.entityId = FLAGS.entityId.decode('utf8')\n if FLAGS['etag'].present:\n request.etag = FLAGS.etag.decode('utf8')\n if FLAGS['id'].present:\n request.id = FLAGS.id.decode('utf8')\n if FLAGS['kind'].present:\n request.kind = FLAGS.kind.decode('utf8')\n if FLAGS['projectTeam'].present:\n request.projectTeam = apitools_base.JsonToMessage(messages.BucketAccessControl.ProjectTeamValue, FLAGS.projectTeam)\n if FLAGS['role'].present:\n request.role = FLAGS.role.decode('utf8')\n if FLAGS['selfLink'].present:\n request.selfLink = FLAGS.selfLink.decode('utf8')\n result = client.bucketAccessControls.Update(\n request, global_params=global_params)\n print apitools_base_cli.FormatOutput(result)", "title": "" }, { "docid": "24d895c450fcf1e22fb8f1a82a5dc0fa", "score": "0.4582995", "text": "def write_policy(input_file, crud, minimize):\n\n db_session = connect_db(DATABASE_FILE_PATH)\n\n if input_file:\n cfg = read_yaml_file(input_file)\n else:\n try:\n cfg = yaml.safe_load(sys.stdin)\n except yaml.YAMLError as exc:\n print(exc)\n sys.exit()\n\n # User supplies file containing resource-specific access levels\n if crud:\n policy = write_policy_with_access_levels(db_session, cfg, minimize)\n # User supplies file containing a list of IAM actions\n else:\n policy = write_policy_with_actions(db_session, cfg, minimize)\n print(json.dumps(policy, indent=4))\n return policy", "title": "" }, { "docid": "fb3d67b2a22eb80972673520d87b7171", "score": "0.45828155", "text": "def test_update_networkconfig_policy(self):\n pass", "title": "" }, { "docid": "7a0c3167266d5f0fc4449cf92c7fbb22", "score": "0.45826247", "text": "def on_patch(self, external_project_id, **kwargs):\n data = api.load_body(pecan.request, validator=self.validator)\n LOG.debug('Start on_patch...%s', data)\n\n existing_acls_map = {acl.operation: acl for acl in\n self.secret.secret_acls}\n for operation in filter(lambda x: data.get(x),\n validators.ACL_OPERATIONS):\n project_access = data[operation].get('project-access')\n user_ids = data[operation].get('users')\n s_acl = None\n if operation in existing_acls_map: # update if matching acl exists\n s_acl = existing_acls_map[operation]\n if project_access is not None:\n s_acl.project_access = project_access\n else:\n s_acl = models.SecretACL(self.secret.id, operation=operation,\n project_access=project_access)\n self.acl_repo.create_or_replace_from(self.secret, secret_acl=s_acl,\n user_ids=user_ids)\n\n acl_ref = '{0}/acl'.format(\n hrefs.convert_secret_to_href(self.secret.id))\n return {'acl_ref': acl_ref}", "title": "" }, { "docid": "4e7cef7a893b2ff62a402d2d1818b837", "score": "0.45777372", "text": "def edit_protocol(self, clusterProtocolObj):\n if isinstance(clusterProtocolObj, ClusterProtocol):\n value = str(clusterProtocolObj.value['tcpClusterPort'])\n elif isinstance(clusterProtocolObj, dict):\n value = json.dumps(clusterProtocolObj)\n else:\n raise AttributeError(\"Invalid Input, must be a ClusterProtocal Object\")\n url = self._url + \"/editProtocol\"\n params = {\n \"f\" : \"json\",\n \"tcpClusterPort\" : value\n }\n return self._con.post(path=url,\n postdata=params)", "title": "" }, { "docid": "242236657abe9ec50b9dee51e19dff53", "score": "0.4575577", "text": "def update(config, hostname, fqdn, username, password):\n\n data = {}\n if fqdn:\n data['fqdn'] = fqdn\n if username:\n data['username'] = username\n if password:\n data['password'] = password\n\n response = make_api_request('PUT', config, '/machines/' + hostname,\n data=json.dumps(data))\n\n print 'Successfully updated', hostname", "title": "" }, { "docid": "da2ecc6f37f780c4446e614d153e15f2", "score": "0.4574489", "text": "def create_policy(self, data):\n return", "title": "" }, { "docid": "199f2d9904e3cdd2a90cc841da654b78", "score": "0.4573477", "text": "def control_nm_policy(self, task, **kwargs):\n _execute_nm_command(task, kwargs, nm_commands.control_policies)", "title": "" }, { "docid": "e12f0aca9a40054d904b78b174fb5f01", "score": "0.4562401", "text": "def modify(self, **kwargs):\n post_dict = {}\n for key, value in kwargs.items():\n if key.startswith(\"schema_\"):\n post_dict.setdefault(\"schema_definition\", {})[\n key.split(\"_\", 1)[1]\n ] = value\n else:\n post_dict[key] = value\n self.system.api.put(\"config/ldap/{}\".format(self.id), data=post_dict)", "title": "" }, { "docid": "bee2c9065131bf36184e9ac052381f50", "score": "0.45583543", "text": "def put(self):\n data = request.json\n return update_category(data=data)", "title": "" }, { "docid": "d1577136c0723478efa9eeff65991056", "score": "0.45566952", "text": "def update_policy_context(self, event, context, request, response, exception, ctx):\n pass", "title": "" }, { "docid": "ea466db0cb28ac7c72ec1ec8f4136272", "score": "0.4552951", "text": "def modify_node(self, lb_id, node_id, node_data):\n\n url = \"%s/loadbalancers/%s/nodes/%s\" % (self.api_user_url, lb_id, node_id)\n node_data = json.dumps(node_data)\n request_result = requests.put(url, data=node_data, headers=self.api_headers, verify=False)\n if self.verbose:\n self.logging.info('http driver modify_node()')\n self.logging.info(request_result.status_code)\n self.logging.info(request_result.text)\n return request_result.status_code", "title": "" }, { "docid": "4639c64caced225fbe6182da80587d9d", "score": "0.45505947", "text": "def update_assessment(self, assessment_form):\n pass", "title": "" } ]
ed6f5b0306438010b938131d2a4e1338
Determines the reaction in which the molecule acts as an acid
[ { "docid": "6ce159e012bddb79bb5caebedafcd836", "score": "0.77446043", "text": "def acid_reaction(self):\r\n return (self + H*H*O > self.conjugate_base() + H*H*H*O*ep)", "title": "" } ]
[ { "docid": "bbb1be8060a4d49a5feb66d2af56c6de", "score": "0.82701194", "text": "def acid_reaction(self):\r\n return Molecule(self.parts, self.count, self.charge).acid_reaction()", "title": "" }, { "docid": "580168c967bde3459afb0f4c7df80b43", "score": "0.79462945", "text": "def acid_reaction(self):\r\n return Reaction(self.acid.copy() + H*H*O, self.base.copy() + H*H*H*O*ep)", "title": "" }, { "docid": "2b63aa55657b68a35a92d521c2cdf655", "score": "0.66009146", "text": "def base_reaction(self):\r\n return (self + H*H*O > self.conjugate_acid() + OH*en)", "title": "" }, { "docid": "2126ab62eeb1a631f25ca8b83a02b9d0", "score": "0.5944158", "text": "def addCimA(model):\n reaccima = Reaction('CIMA')\n reaccima.name = '(R)-Citramalate production'\n reaccima.lower_bound = 0.0\n reaccima.upper_bound = 1000.0\n \n \"\"\"CIMA reaction\"\"\"\n pyr_c = model.metabolites.get_by_id(\"pyr_c\") # Pyruvate\n accoa_c = model.metabolites.get_by_id(\"accoa_c\") # Acetyl-CoA\n h2o_c = model.metabolites.get_by_id(\"h2o_c\") # H2O\n rcitramalate_c = Metabolite(\n 'citramalate_c',\n formula='C5H6O5',\n name='(R)-citramalate',\n charge=-2,\n compartment='c')\n coa_c = model.metabolites.get_by_id(\"coa_c\") # CoA\n \n reaccima.add_metabolites({pyr_c: -1.0,\n accoa_c: -1.0,\n h2o_c: -1.0,\n rcitramalate_c: 1.0,\n coa_c: 1.0})\n reaccima.gene_reaction_rule = 'CimA37' \n# print(reaccima.reaction) \n# print(reaccima.genes) \n model.add_reaction(reaccima)\n reaccima.objective_coefficient = 0.0\n\n \"\"\"Sink for Citramalate\"\"\" \n reaccisink = Reaction('CitraSink')\n reaccisink.name = 'Sink needed to allow (R)-Citramalate to leave the system'\n reaccisink.lower_bound = 0.0\n reaccisink.upper_bound = 1000.0\n \n reaccisink.add_metabolites({rcitramalate_c: -1.0})\n# print(reaccisink.reaction) \n# print(reaccisink.genes) \n model.add_reaction(reaccisink)\n reaccisink.objective_coefficient = 0.0", "title": "" }, { "docid": "f06f9e5f8c77415fe77cb5616766e889", "score": "0.5828262", "text": "def conjugate_acid(self):\r\n return self * H * ep", "title": "" }, { "docid": "2a7724922de999fbb8ec1764a8a0cf82", "score": "0.58022386", "text": "def reaction(self, data):\n\t\tabstract()", "title": "" }, { "docid": "528d4d4dc42c8e1b45cb98fa59512ee9", "score": "0.5790585", "text": "def base_reaction(self):\r\n return Molecule(self.parts, self.count, self.charge).base_reaction()", "title": "" }, { "docid": "165097f2efc6f9b304621f65d8d15d0a", "score": "0.5787457", "text": "async def reaction(self, ctx):\n pass", "title": "" }, { "docid": "41d61146274ab29889df3f8f13e5ec96", "score": "0.57051206", "text": "def base_reaction(self):\r\n return Reaction(self.base.copy() + H*H*O, self.acid.copy() + OH*en)", "title": "" }, { "docid": "39433e51988f89d471e99f538cbe679f", "score": "0.55305785", "text": "def get_reaction(self, reaction_idx: int) -> ChemicalReaction:\n return AllChem.ReactionFromSmarts(self.reactions[reaction_idx])", "title": "" }, { "docid": "e03a3fe3083ce37a3735c5529c17a729", "score": "0.5487207", "text": "def get_reaction(self, name):\n idx = self.dic_enzs.get(name)\n if idx is None:\n return None\n\n return self.enzymes[idx]", "title": "" }, { "docid": "abcdab18e2aed2c8a38ba68e8a606f9b", "score": "0.54684675", "text": "async def custom(self, ctx, *, reaction):\n dictionary = { \"A\" : \"\\U0001f1e6\", \"B\": \"\\U0001f1e7\", \"C\": \"\\U0001f1e8\", \"D\": \"\\U0001f1e9\", \"E\": \"\\U0001f1ea\", \"F\": \"\\U0001f1eb\", \"G\": \"\\U0001f1ec\", \"H\" : \"\\U0001f1ed\", \"I\": \"\\U0001f1ee\", \"J\": \"\\U0001f1ef\", \"K\" : \"\\U0001f1f0\", \"L\": \"\\U0001f1f1\", \"M\" : \"\\U0001f1f2\", \"N\" : \"\\U0001f1f3\", \"O\" : \"\\U0001f1f4\", \"P\" : \"\\U0001f1f5\", \"Q\" : \"\\U0001f1f6\", \"R\" : \"\\U0001f1f7\", \"S\" : \"\\U0001f1f8\", \"T\" : \"\\U0001f1f9\", \"U\" : \"\\U0001f1fa\", \"V\" : \"\\U0001f1fb\", \"W\" : \"\\U0001f1fc\", \"X\" : \"\\U0001f1fd\", \"Y\" : \"\\U0001f1fe\", \"Z\" : \"\\U0001f1ff\"}\n a = reaction\n try:\n listr = [dictionary[char] for char in a]\n dontrun = False\n except KeyError:\n\n dontrun = True\n lenstr = len(reaction)\n if lenstr > 8:\n await self.bot.say(\"Length cannot be more than 8 characters\")\n elif dontrun == True:\n await self.bot.say(\"Could not find, Letters only and caps.\")\n elif lenstr == 8:\n async for x in self.bot.logs_from(ctx.message.channel, before=ctx.message.timestamp, limit=1):\n try:\n await self.bot.add_reaction(x, listr[0])\n await self.bot.add_reaction(x, listr[1])\n await self.bot.add_reaction(x, listr[2])\n await self.bot.add_reaction(x, listr[3])\n await self.bot.add_reaction(x, listr[4])\n await self.bot.add_reaction(x, listr[5])\n await self.bot.add_reaction(x, listr[6])\n await self.bot.add_reaction(x, listr[7])\n except KeyError:\n await self.bot.say(\"Could not find, Letters only and caps.\")\n elif lenstr == 7:\n async for x in self.bot.logs_from(ctx.message.channel, before=ctx.message.timestamp, limit=1):\n try:\n await self.bot.add_reaction(x, listr[0])\n await self.bot.add_reaction(x, listr[1])\n await self.bot.add_reaction(x, listr[2])\n await self.bot.add_reaction(x, listr[3])\n await self.bot.add_reaction(x, listr[4])\n await self.bot.add_reaction(x, listr[5])\n await self.bot.add_reaction(x, listr[6])\n except KeyError:\n await self.bot.say(\"Could not find, Letters only and caps.\")\n\n elif lenstr == 6:\n async for x in self.bot.logs_from(ctx.message.channel, before=ctx.message.timestamp, limit=1):\n try:\n await self.bot.add_reaction(x, listr[0])\n await self.bot.add_reaction(x, listr[1])\n await self.bot.add_reaction(x, listr[2])\n await self.bot.add_reaction(x, listr[3])\n await self.bot.add_reaction(x, listr[4])\n await self.bot.add_reaction(x, listr[5])\n except KeyError:\n await self.bot.say(\"Could not find, Letters only and caps.\")\n elif lenstr == 5:\n async for x in self.bot.logs_from(ctx.message.channel, before=ctx.message.timestamp, limit=1):\n try:\n await self.bot.add_reaction(x, listr[0])\n await self.bot.add_reaction(x, listr[1])\n await self.bot.add_reaction(x, listr[2])\n await self.bot.add_reaction(x, listr[3])\n await self.bot.add_reaction(x, listr[4])\n except KeyError:\n await self.bot.say(\"Could not find, Letters only and caps.\")\n\n elif lenstr == 4:\n async for x in self.bot.logs_from(ctx.message.channel, before=ctx.message.timestamp, limit=1):\n try:\n await self.bot.add_reaction(x, listr[0])\n await self.bot.add_reaction(x, listr[1])\n await self.bot.add_reaction(x, listr[2])\n await self.bot.add_reaction(x, listr[3])\n except KeyError:\n await self.bot.say(\"Could not find, Letters only and caps.\")\n\n elif lenstr == 3:\n async for x in self.bot.logs_from(ctx.message.channel, before=ctx.message.timestamp, limit=1):\n try:\n await self.bot.add_reaction(x, listr[0])\n await self.bot.add_reaction(x, listr[1])\n await self.bot.add_reaction(x, listr[2])\n except KeyError:\n await self.bot.say(\"Could not find, Letters only and caps.\")\n\n elif lenstr == 2:\n async for x in self.bot.logs_from(ctx.message.channel, before=ctx.message.timestamp, limit=1):\n try:\n await self.bot.add_reaction(x, listr[0])\n await self.bot.add_reaction(x, listr[1])\n except KeyError:\n await self.bot.say(\"Could not find, Letters only and caps.\")\n elif lenstr == 1:\n async for x in self.bot.logs_from(ctx.message.channel, before=ctx.message.timestamp, limit=1):\n try:\n await self.bot.add_reaction(x, listr[0])\n except KeyError:\n await self.bot.say(\"Could not find, Letters only and caps.\")\n\n else:\n await self.bot.say(\"Fatal error\")", "title": "" }, { "docid": "f0aa627f08c6c84cdff7bd8c68be2c8f", "score": "0.54097646", "text": "async def amc(self, ctx):\n coins = 1500\n exp = 150\n amc = await get_amc(self.bot.session)\n amc_tex = amc[\"latex\"]\n amc_answer = amc[\"answer\"]\n fn = await generate_image(\"\", amc_tex)\n message = await ctx.send(file=discord.File(fn))\n\n\n os.system(\"rm \" + fn)\n\n for i in letters:\n await message.add_reaction(i)\n\n tried = []\n\n def check(reaction, user):\n return (\n reaction.message.id == message.id\n and reaction.emoji in letters\n and not user.id in tried\n and user.id != message.author.id\n )\n\n while True:\n reaction, user = await self.bot.wait_for(\"reaction_add\", check=check)\n print(\"heyo\")\n print(letters[reaction.emoji], amc_answer)\n if letters[reaction.emoji] == amc_answer:\n if await bitecoin.get_coins(self.bot.session, user.id) < coins // 4:\n await (user.mention + \" you're too poor to play!\")\n else:\n bitecoin_string = user.mention+\" wins! (+{0} bitecoins, +{1} XP)\".format(\n str(coins),\n str(exp)\n )\n problem_string = \"That was problem {0} from the {1} AMC 12{2}\".format(\n str(amc[\"problem\"] + 1),\n amc[\"year\"],\n amc[\"version\"]\n )\n success_string = \"{0}\\n{1}\".format(bitecoin_string, problem_string)\n await ctx.send(success_string)\n await add_coins(self.bot.session, ctx, user, coins)\n await add_exp(self.bot.session, ctx, user, exp)\n break\n else:\n if await bitecoin.get_coins(self.bot.session, user.id) < coins // 4:\n await ctx.send(user.mention + \" you're too poor to play!\")\n else:\n tried.append(user.id)\n await ctx.send(user.mention+\" wrong! (-{0} bitecoins)\".format(coins // 4))\n await add_coins(self.bot.session, ctx, user, -1 * (coins // 4))", "title": "" }, { "docid": "372f3c1b372e00c66ee68228e443598d", "score": "0.5403136", "text": "def revReaction(calcium_oxide):\n YIELD = 1\n CaCO3 = calcium_oxide * YIELD\n CO2 = calcium_oxide * YIELD\n req_reactants = {\"CaCO3\": CaCO3}\n byproducts = {\"CO2\": CO2}\n return req_reactants, byproducts", "title": "" }, { "docid": "4dbedebb4237ca6870a3e5c3deab1ffe", "score": "0.5354357", "text": "def produces_accoa(base):\n ra = cobra.Reaction(\"A\")\n rb = cobra.Reaction(\"B\")\n rc = cobra.Reaction(\"C\")\n base.add_reactions([ra, rb, rc])\n ra.reaction = \"a <--> b\"\n rb.reaction = \"b <--> c\"\n rc.reaction = \"accoa_c + h2o_c + a <--> coa_c + c + ac_c + h_c\"\n base.add_boundary(base.metabolites.a, type=\"sink\")\n base.add_boundary(base.metabolites.h_c, type=\"sink\")\n base.add_boundary(base.metabolites.ac_c, type=\"sink\")\n base.add_boundary(base.metabolites.h2o_c, type=\"sink\")\n base.add_boundary(base.metabolites.coa_c, type=\"sink\")\n base.add_boundary(base.metabolites.accoa_c, type=\"sink\")\n base.add_boundary(base.metabolites.c, type=\"demand\")\n for met in base.metabolites:\n met.compartment = \"c\"\n return base", "title": "" }, { "docid": "a1c66d6b477086d0dc30ba4727d5fc0b", "score": "0.53131276", "text": "def interactions(self):\n return mif.Entry( self.root['entrySet']['entry'][0] ).interactions", "title": "" }, { "docid": "76fee8d5934533ee1aaaa420d351e5c4", "score": "0.53117245", "text": "def C_M_AC(self):\r\n b_squared = np.abs(self.a[0]) # Modulus of a1\r\n gamma_a1 = np.angle(self.a[0]) # Half argument of a1\r\n\r\n return -4*np.pi*b_squared/(self.chord**2)*sin(2*(gamma_a1-self.eta_t))", "title": "" }, { "docid": "8e180c27b3381839d377d9dcd64816dc", "score": "0.52855194", "text": "def ace(self):\n return self.rank == \"A\"", "title": "" }, { "docid": "4ecedc6056f326bbf8e94d2198a7753c", "score": "0.52693504", "text": "def amino_acid(self, symbol=True):\n if symbol:\n vals = self.acids.keys()\n return choice(vals)\n return choice(self.acids.keys())", "title": "" }, { "docid": "af6b30deca85361a4cbd52a31b43ce03", "score": "0.52435786", "text": "def ace(self):\n return self.rank == \"A\"", "title": "" }, { "docid": "9a55faf3517883261d731e6fb5631617", "score": "0.5180681", "text": "def InitiateReaction(self):\n if self.reaction_index in self.reactions_Consuming: # Step 10, Consuming delayed\n self.X_matrix += self.N_matrix_transpose_reactants[self.reaction_index] # Updates only reactants\n self.species_to_update = self.parse.reactant_indices[ self.reaction_index ]\n self.add_delay()\n elif self.reaction_index in self.reactions_NonConsuming: # Step 9, Non-consuming delayed \n self.add_delay()\n self.species_to_update = [] \n else: # Step 8, A normal, non-delayed reaction\n self.X_matrix += self.N_matrix_transpose[self.reaction_index]\n self.species_to_update = self.parse.reaction_affects[ self.reaction_index ]", "title": "" }, { "docid": "acfb0edd0eeb562d99f35db39deaf87e", "score": "0.5159582", "text": "def can_react(state, rxns):\n mol1 = state.pop()\n mol2 = state.pop()\n reaction_mask = [int(rxn.run_reaction([mol1, mol2]) is not None) for rxn in rxns]\n return sum(reaction_mask), reaction_mask", "title": "" }, { "docid": "86c57fd1439822e896ce51a5b572f15c", "score": "0.51522136", "text": "def GetReaction(n, model):\n reaction = model.getReaction(n)\n reaction_id = reaction.getId()\n reaction_name = reaction.getName()\n return reaction_id", "title": "" }, { "docid": "bc0ed70fbdeb29573dc5dfeb98a3f61b", "score": "0.51307166", "text": "def test_map_abstractions(self):\n # H + CH4 <=> H2 + CH3\n r_1 = ARCSpecies(label='H', smiles='[H]', xyz={'coords': ((0, 0, 0),), 'isotopes': (1,), 'symbols': ('H',)})\n r_2 = ARCSpecies(label='CH4', smiles='C', xyz=self.ch4_xyz)\n p_1 = ARCSpecies(label='H2', smiles='[H][H]', xyz=self.h2_xyz)\n p_2 = ARCSpecies(label='CH3', smiles='[CH3]', xyz=self.ch3_xyz)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertIn(atom_map[0], [0, 1])\n self.assertEqual(atom_map[1], 2)\n for index in [2, 3, 4, 5]:\n self.assertIn(atom_map[index], [0, 1, 3, 4, 5])\n self.assertTrue(any(atom_map[r_index] in [0, 1] for r_index in [2, 3, 4, 5]))\n self.assertTrue(check_atom_map(rxn))\n\n # H + CH4 <=> CH3 + H2 (different order)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_2, p_1])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertIn(atom_map[0], [4, 5])\n self.assertEqual(atom_map[1], 0)\n for index in [2, 3, 4, 5]:\n self.assertIn(atom_map[index], [1, 2, 3, 4, 5])\n self.assertTrue(any(atom_map[r_index] in [4, 5] for r_index in [2, 3, 4, 5]))\n self.assertTrue(check_atom_map(rxn))\n\n # CH4 + H <=> H2 + CH3 (different order)\n rxn = ARCReaction(r_species=[r_2, r_1], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[0], 2)\n for index in [1, 2, 3, 4]:\n self.assertIn(atom_map[index], [0, 1, 3, 4, 5])\n self.assertTrue(any(atom_map[r_index] in [0, 1] for r_index in [1, 2, 3, 4]))\n self.assertIn(atom_map[5], [0, 1])\n self.assertTrue(check_atom_map(rxn))\n\n # CH4 + H <=> CH3 + H2 (different order)\n rxn = ARCReaction(r_species=[r_2, r_1], p_species=[p_2, p_1])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[0], 0)\n for index in [1, 2, 3, 4]:\n self.assertIn(atom_map[index], [1, 2, 3, 4, 5])\n self.assertTrue(any(atom_map[r_index] in [4, 5] for r_index in [1, 2, 3, 4]))\n self.assertIn(atom_map[5], [4, 5])\n self.assertTrue(check_atom_map(rxn))\n\n \n # H + CH3NH2 <=> H2 + CH2NH2\n ch3nh2_xyz = {'coords': ((-0.5734111454228507, 0.0203516083213337, 0.03088703933770556),\n (0.8105595891860601, 0.00017446498908627427, -0.4077728757313545),\n (-1.1234549667791063, -0.8123899006368857, -0.41607711106038836),\n (-0.6332220120842996, -0.06381791823047896, 1.1196983583774054),\n (-1.053200912106195, 0.9539501896695028, -0.27567270246542575),\n (1.3186422395164141, 0.7623906284020254, 0.038976118645639976),\n (1.2540872076899663, -0.8606590725145833, -0.09003882710357966)),\n 'isotopes': (12, 14, 1, 1, 1, 1, 1),\n 'symbols': ('C', 'N', 'H', 'H', 'H', 'H', 'H')}\n ch2nh2_xyz = {'coords': ((0.6919493009211066, 0.054389375309083846, 0.02065422596281878),\n (1.3094508022837807, -0.830934909576592, 0.14456347719459348),\n (1.1649142139806816, 1.030396183273415, 0.08526955368597328),\n (-0.7278194451655412, -0.06628299353512612, -0.30657582460750543),\n (-1.2832757211903472, 0.7307667658607352, 0.00177732009031573),\n (-1.155219150829674, -0.9183344213315149, 0.05431124767380799)),\n 'isotopes': (12, 1, 1, 14, 1, 1),\n 'symbols': ('C', 'H', 'H', 'N', 'H', 'H')}\n r_1 = ARCSpecies(label='H', smiles='[H]', xyz={'coords': ((0, 0, 0),), 'isotopes': (1,), 'symbols': ('H',)})\n r_2 = ARCSpecies(label='CH3NH2', smiles='CN', xyz=ch3nh2_xyz)\n p_1 = ARCSpecies(label='H2', smiles='[H][H]', xyz=self.h2_xyz)\n p_2 = ARCSpecies(label='CH2NH2', smiles='[CH2]N', xyz=ch2nh2_xyz)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertIn(atom_map[0], [0,1])\n self.assertEqual(atom_map[1], 2)\n self.assertEqual(atom_map[2], 5)\n self.assertIn(atom_map[3], [0, 1, 3, 4])\n self.assertIn(atom_map[4], [0, 1, 3, 4])\n self.assertIn(atom_map[5], [0, 1, 3, 4])\n self.assertTrue(any(atom_map[r_index] in [0, 1] for r_index in [3, 4, 5]))\n self.assertIn(atom_map[6], [6, 7])\n self.assertIn(atom_map[7], [6, 7])\n self.assertTrue(check_atom_map(rxn))\n\n # CH4 + OH <=> CH3 + H2O\n r_1 = ARCSpecies(label='CH4', smiles='C', xyz=self.ch4_xyz)\n r_2 = ARCSpecies(label='OH', smiles='[OH]', xyz=self.oh_xyz)\n p_1 = ARCSpecies(label='CH3', smiles='[CH3]', xyz=self.ch3_xyz)\n p_2 = ARCSpecies(label='H2O', smiles='O', xyz=self.h2o_xyz)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[0], 0)\n self.assertIn(atom_map[1], [1, 2, 3, 5, 6])\n self.assertIn(atom_map[2], [1, 2, 3, 5, 6])\n self.assertIn(atom_map[3], [1, 2, 3, 5, 6])\n self.assertIn(atom_map[4], [1, 2, 3, 5, 6])\n self.assertEqual(atom_map[5], 4)\n self.assertIn(atom_map[6], [5, 6])\n self.assertTrue(any(atom_map[r_index] in [5, 6] for r_index in [1, 2, 3, 4]))\n self.assertTrue(check_atom_map(rxn))\n\n # NH2 + N2H4 <=> NH3 + N2H3\n r_1 = ARCSpecies(label='NH2', smiles='[NH2]', xyz=self.nh2_xyz)\n r_2 = ARCSpecies(label='N2H4', smiles='NN', xyz=self.n2h4_xyz)\n p_1 = ARCSpecies(label='NH3', smiles='N', xyz=self.nh3_xyz)\n p_2 = ARCSpecies(label='N2H3', smiles='N[NH]', xyz=self.n2h3_xyz)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[0], 0)\n self.assertIn(atom_map[1], [1, 2, 3])\n self.assertIn(atom_map[2], [1, 2, 3])\n self.assertIn(atom_map[3], [4, 5])\n self.assertIn(atom_map[4], [4, 5])\n self.assertTrue(any(atom_map[r_index] in [1, 2, 3] for r_index in [5, 6, 7, 8]))\n self.assertTrue(check_atom_map(rxn))\n\n # NH2 + N2H4 <=> N2H3 + NH3 (reversed product order compared to the above reaction)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_2, p_1])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[0], 5)\n self.assertIn(atom_map[1], [6, 7, 8])\n self.assertIn(atom_map[2], [6, 7, 8])\n self.assertIn(atom_map[3], [0, 1])\n self.assertIn(atom_map[4], [0, 1])\n self.assertTrue(any(atom_map[r_index] in [6, 7, 8] for r_index in [5, 6, 7, 8]))\n self.assertTrue(check_atom_map(rxn))\n\n\n # CH3OO + CH3CH2OH <=> CH3OOH + CH3CH2O / peroxyl to alkoxyl, modified atom and product order\n r_1 = ARCSpecies(\n label=\"CH3OO\",\n smiles=\"CO[O]\", xyz=\"\"\"C -0.41690000 0.03757000 0.00590000\n O 0.83973000 0.69383000 -0.05239000\n O 1.79663000 -0.33527000 -0.02406000\n H -0.54204000 -0.62249000 -0.85805000\n H -1.20487000 0.79501000 -0.01439000\n H -0.50439000 -0.53527000 0.93431000\"\"\")\n r_2 = ARCSpecies(label='CH3CH2OH', smiles='CCO', xyz=\"\"\"C -0.97459464 0.29181710 0.10303882\n C 0.39565894 -0.35143697 0.10221676\n H -1.68942501 -0.32359616 0.65926091\n H -0.93861751 1.28685508 0.55523033\n H -1.35943743 0.38135479 -0.91822428\n H 0.76858330 -0.46187184 1.12485643\n H 1.10301149 0.25256708 -0.47388355\n O 0.30253309 -1.63748710 -0.49196889\n H 1.19485981 -2.02360458 -0.47786539\"\"\")\n p_1 = ARCSpecies(label='CH3OOH', smiles='COO', xyz=\"\"\"C -0.76039072 0.01483858 -0.00903344\n H -1.56632337 0.61401630 -0.44251282\n H -1.02943316 -0.30449156 1.00193709\n O 0.16024511 1.92327904 0.86381800\n H -0.60052507 -0.86954495 -0.63086438\n O 0.44475333 0.76952102 0.02291303\n H 0.30391344 2.59629139 0.17435159\"\"\")\n p_2 = ARCSpecies(label='CH3CH2O', smiles='CC[O]', xyz=\"\"\"C 0.79799272 -0.01511040 0.00517437\n H -1.13881231 -0.99286049 0.06963185\n O 1.17260343 -0.72227959 -1.04851579\n H -1.14162013 0.59700303 0.84092854\n H -1.13266865 0.46233725 -0.93283228\n C -0.74046271 0.02568566 -0.00568694\n H 1.11374677 1.03794239 0.06905096\n H 1.06944350 -0.38306117 1.00698657\"\"\")\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual([0,5,3],atom_map[0:3])\n self.assertIn(tuple(atom_map[3:6]), list(permutations([1, 2, 4])))\n self.assertEqual([12, 7], atom_map[6:8])\n self.assertIn(tuple(atom_map[8:11]),list(permutations([8, 10, 11])))\n self.assertIn(tuple(atom_map[11:13]),list(permutations([13, 14])))\n self.assertEqual([9,6], atom_map[13:]) \n self.assertTrue(check_atom_map(rxn))\n\n # C3H6O + OH <=> C3H5O + H2O\n r_1 = ARCSpecies(label='C3H6O', smiles='CCC=O', xyz=self.c3h6o_xyz)\n r_2 = ARCSpecies(label='OH', smiles='[OH]', xyz=self.oh_xyz)\n p_1 = ARCSpecies(label='C3H5O', smiles='C[CH]C=O', xyz=self.c3h5o_xyz)\n p_2 = ARCSpecies(label='H2O', smiles='O', xyz=self.h2o_xyz)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[:4], [0, 1, 3, 4])\n self.assertIn(atom_map[4], [5,6, 7])\n self.assertIn(atom_map[5], [5, 6, 7])\n self.assertIn(atom_map[6], [5, 6, 7])\n self.assertIn(atom_map[7], [2, 11])\n self.assertIn(atom_map[8], [2, 11])\n self.assertEqual(atom_map[9:], [8, 9, 10])\n\n # C4H10O + OH <=> C4H9O + H2O\n r_1 = ARCSpecies(label='C4H10O', smiles='CC(C)CO', xyz=self.c4h10o_xyz)\n r_2 = ARCSpecies(label='OH', smiles='[OH]', xyz=self.oh_xyz)\n p_1 = ARCSpecies(label='C4H9O', smiles='[CH2]C(C)CO', xyz=self.c4h9o_xyz)\n p_2 = ARCSpecies(label='H2O', smiles='O', xyz=self.h2o_xyz)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[:5], [0, 3, 4, 5, 6])\n for index in [5, 6, 7]:\n self.assertIn(atom_map[index], [1, 2, 15, 16])\n self.assertEqual(atom_map[8],7)\n for i in atom_map[9:12]:\n self.assertIn(i,[8,9,10])\n for i in atom_map[12:14]:\n self.assertIn(i,[11,12])\n self.assertEqual(atom_map[14],13)\n self.assertEqual(atom_map[15],14)\n self.assertIn(atom_map[16], [15, 16])\n self.assertTrue(check_atom_map(rxn))\n\n # C3H6O + C4H9O <=> C3H5O + C4H10O\n r_1 = ARCSpecies(label='C3H6O', smiles='CCC=O', xyz=self.c3h6o_xyz)\n r_2 = ARCSpecies(label='C4H9O', smiles='[CH2]C(C)CO', xyz=self.c4h9o_xyz)\n p_1 = ARCSpecies(label='C3H5O', smiles='C[CH]C=O', xyz=self.c3h5o_xyz)\n p_2 = ARCSpecies(label='C4H10O', smiles='CC(C)CO', xyz=self.c4h10o_xyz)\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(atom_map[0:4], [0, 1, 3, 4])\n self.assertIn(atom_map[4], [5,6, 7])\n self.assertIn(atom_map[5], [5,6, 7])\n self.assertIn(atom_map[6], [5,6, 7])\n self.assertIn(atom_map[7], [2, 14, 15, 16, 18, 19, 20])\n self.assertIn(atom_map[8], [2, 14, 15, 16, 18, 19, 20])\n self.assertIn(2, atom_map[7:9])\n self.assertEqual(atom_map[9], 8)\n self.assertIn(atom_map[10], [9,11])\n self.assertIn(atom_map[11], [14, 15, 16,18,19,20])\n self.assertIn(atom_map[12], [14, 15, 16,18,19,20])\n self.assertEqual(atom_map[13],10)\n self.assertIn(atom_map[14], [9,11])\n self.assertEqual(atom_map[15:17], [12,13])\n self.assertEqual(atom_map[17],17)\n self.assertIn(atom_map[18], [14, 15, 16,18,19,20])\n self.assertIn(atom_map[19], [14, 15, 16,18,19,20])\n self.assertIn(atom_map[20], [14, 15, 16,18,19,20])\n self.assertIn(atom_map[21], [21,22])\n self.assertIn(atom_map[22], [21,22])\n self.assertEqual(atom_map[23],23)\n self.assertTrue(check_atom_map(rxn))\n\n\n # ClCH3 + H <=> CH3 + HCl\n r_1 = ARCSpecies(label=\"ClCH3\", smiles=\"CCl\", xyz=self.ch3cl_xyz)\n r_2 = ARCSpecies(label=\"H\", smiles=\"[H]\", xyz=self.h_rad_xyz)\n p_1 = ARCSpecies(label=\"CH3\", smiles=\"[CH3]\", xyz=self.ch3_xyz_2)\n p_2 = ARCSpecies(label=\"HCl\", smiles=\"[H][Cl]\", xyz=self.hcl_xyz)\n rxn = ARCReaction(r_species=[r_2, r_1], p_species=[p_2, p_1])\n rxn.determine_family(self.rmgdb)\n atom_map = rxn.atom_map\n self.assertEqual(rxn.family.label.lower(),\"cl_abstraction\")\n self.assertEqual(atom_map[:3], [0, 1, 2])\n for i in atom_map[3:]:\n self.assertIn(i, [3, 4, 5])\n self.assertTrue(check_atom_map(rxn))\n # ClCH3 + H <=> CH3 + HCl different order\n rxn_2 = ARCReaction(r_species=[r_1, r_2], p_species=[p_2, p_1])\n rxn_2.determine_family(self.rmgdb)\n atom_map = rxn_2.atom_map\n self.assertEqual(atom_map[:2], [1, 2])\n for index in [2, 3, 4]:\n self.assertIn(atom_map[index], [3, 4, 5])\n self.assertEqual(atom_map[-1], 0)\n self.assertTrue(check_atom_map(rxn))\n\n # [OH] + CC(Cl)C(Cl)Cl <=> OCl + C[CH]C(Cl)Cl\n smiles = []\n for i in '[OH] + CC(Cl)C(Cl)Cl <=> OCl + C[CH]C(Cl)Cl'.split():\n if i != \"<=>\" and i != '+':\n smiles.append(i)\n\n r_1_xyz = {'symbols': ('O', 'H'), 'isotopes': (16, 1),\n 'coords': ((0.48890386738601, 0.0, 0.0), (-0.48890386738601, 0.0, 0.0))}\n\n r_2_xyz = {'symbols': ('C', 'C', 'Cl', 'C', 'Cl', 'Cl', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 35, 12, 35, 35, 1, 1, 1, 1, 1), 'coords': (\n (1.2438372893135106, 0.40661350465687324, -0.16279018264054892),\n (0.07827324125005171, -0.277154649803216, 0.5482887194488805),\n (-0.1538756923467617, 0.5009471321060629, 2.155037501334864),\n (-1.245183156820767, -0.303306879503286, -0.23533878891899096),\n (-1.1043944712471334, -1.3227416585177485, -1.7010412234762065),\n (-1.8186157680197266, 1.3177860639647956, -0.7221760707038685),\n (2.159163866798944, 0.32583527910226096, 0.4346504778666261),\n (1.056514815021544, 1.471768404816661, -0.33289291962920015),\n (1.4499964728678152, -0.05967057895051073, -1.131013164504492),\n (0.3717352549047681, -1.308596593192221, 0.7750989547682503),\n (-2.0374518517222544, -0.751480024679671, 0.37217669645466245))}\n\n p_1_xyz = {'symbols': ('O', 'Cl', 'H'), 'isotopes': (16, 35, 1), 'coords': (\n (-0.3223044372303026, 0.4343354356368888, 0.0), (1.2650242694442462, -0.12042710381137228, 0.0),\n (-0.9427198322139436, -0.3139083318255167, 0.0))}\n\n p_2_xyz = {'symbols': ('C', 'C', 'C', 'Cl', 'Cl', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 12, 35, 35, 1, 1, 1, 1, 1), 'coords': (\n (-1.3496376883278178, -0.020445981649800302, -0.1995184115269273),\n (-0.051149096449292386, -0.3885500107837139, 0.4222976979623008),\n (1.217696701041357, 0.15947991928242372, -0.1242718714010236),\n (1.7092794464102241, 1.570982412202936, 0.8295196720275746),\n (2.474584210365428, -1.0919019396606517, -0.06869614478411318),\n (-1.6045061896547035, 1.0179450876989615, 0.03024632893682861),\n (-1.3137314500783486, -0.14754777860704252, -1.2853589013330937),\n (-2.1459595425475264, -0.6625965540242661, 0.188478021031359),\n (-0.044412318929613885, -0.9093853981117669, 1.373599947353138),\n (1.1078359281702537, 0.47202024365290884, -1.1662963382659064))}\n\n r_1 = ARCSpecies(label='r1', smiles=smiles[0],xyz=r_1_xyz )\n r_2 = ARCSpecies(label='r2', smiles=smiles[1],xyz=r_2_xyz)\n p_1 = ARCSpecies(label='p1', smiles=smiles[2],xyz=p_1_xyz)\n p_2 = ARCSpecies(label='p2', smiles=smiles[3],xyz=p_2_xyz)\n\n rxn1 = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn1.determine_family(self.rmgdb)\n atom_map = rxn1.atom_map\n #expected: [0, 2, 3, 4, 1, 5, [6, 7], [6, 7], [8, 9, 10], [8, 9, 10], [8, 9, 10], 11, 12]\n self.assertEqual(atom_map[:6], [0,2,3,4,1,5])\n self.assertIn(atom_map[6],[6,7])\n self.assertIn(atom_map[7], [6, 7])\n self.assertIn(atom_map[8], [8,9,10])\n self.assertIn(atom_map[9], [8,9,10])\n self.assertIn(atom_map[10], [8,9,10])\n self.assertEqual(atom_map[11],11)\n self.assertEqual(atom_map[12], 12)\n self.assertTrue(check_atom_map(rxn))\n\n # Br abstraction\n\n # OH + CH3Br <=> HOBr + CH3\n r_1_xyz = {'symbols': ('O', 'H'), 'isotopes': (16, 1),\n 'coords': ((0.48890386738601, 0.0, 0.0), (-0.48890386738601, 0.0, 0.0))}\n\n r_2_xyz = {'symbols': ('C', 'Br', 'H', 'H', 'H'), 'isotopes': (12, 79, 1, 1, 1), 'coords': (\n (-0.18386469024502916, -0.0018692264481234688, 0.0013619971891954718),\n (1.7508998155803106, 0.017800204658373744, -0.01296995950979447),\n (-0.5218757573028803, -0.6458197160504338, -0.8118262063895171),\n (-0.5338693855859405, 1.0212985296781085, -0.14294057406667127),\n (-0.5112899824464621, -0.3914097918379277, 0.9663747427767874))}\n\n p_1_xyz = {'symbols': ('O', 'Br', 'H'), 'isotopes': (16, 79, 1), 'coords': (\n (-0.3691040522383542, 0.44403140947953346, 0.0), (1.3490312999095744, -0.1319682267704319, 0.0),\n (-0.9799272476712202, -0.31206318270910166, 0.0))}\n\n p_2_xyz = {'symbols': ('C', 'H', 'H', 'H'), 'isotopes': (12, 1, 1, 1), 'coords': (\n (3.3746019998564553e-09, 5.828827384106545e-09, -4.859105107686622e-09),\n (1.0669051052331406, -0.17519582095514982, 0.05416492980439295),\n (-0.6853171627400634, -0.8375353626879753, -0.028085652887100996),\n (-0.3815879458676787, 1.0127311778142964, -0.026079272058187608))}\n\n r_1 = ARCSpecies(label='r1', smiles='[O][H]', xyz=r_1_xyz)\n r_2 = ARCSpecies(label='r2', smiles='[CH3]Br', xyz=r_2_xyz)\n p_1 = ARCSpecies(label='p1', smiles='OBr', xyz=p_1_xyz)\n p_2 = ARCSpecies(label='p2', smiles='[CH3]', xyz=p_2_xyz)\n\n rxn1 = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn1.determine_family(self.rmgdb)\n atom_map = rxn1.atom_map\n self.assertEqual(atom_map[:4],[0,2,3,1])\n self.assertIn(atom_map[4], [4,5,6])\n self.assertIn(atom_map[5], [4, 5, 6])\n self.assertIn(atom_map[6], [4, 5, 6])\n self.assertTrue(check_atom_map(rxn))\n\n # [H] + CC(=O)Br <=> [H][Br] + C[C](=O)\n r_1_xyz = {'symbols': ('H',), 'isotopes': (1,), 'coords': ((0.0, 0.0, 0.0),)}\n\n r_2_xyz = {'symbols': ('C', 'C', 'O', 'Br', 'H', 'H', 'H'), 'isotopes': (12, 12, 16, 79, 1, 1, 1), 'coords': (\n (-0.7087772076387326, -0.08697184565826255, 0.08295914062572969),\n (0.7238141593293749, 0.2762480677183181, -0.14965326856248656),\n (1.1113560248255752, 1.3624373452907719, -0.554840372311578),\n (2.0636725443687616, -1.041297021241265, 0.20693447296577364),\n (-0.9844931733249197, -0.9305935329026733, -0.5546432084044857),\n (-0.8586221633621384, -0.3455305862905263, 1.134123935245044),\n (-1.3469501841979155, 0.7657075730836449, -0.16488069955797996))}\n\n p_1_xyz = {'symbols': ('C', 'C', 'O', 'H', 'H', 'H'), 'isotopes': (12, 12, 16, 1, 1, 1), 'coords': (\n (-0.4758624005470258, 0.015865899777425058, -0.11215987340300927),\n (0.9456990856850401, -0.031530842469194666, 0.2228995599390481),\n (2.0897646616994816, -0.06967555524967288, 0.492553667108967),\n (-1.08983188764878, -0.06771143046366379, 0.7892594299969324),\n (-0.7261604551815313, 0.9578749227991876, -0.6086176800339509),\n (-0.7436090040071672, -0.8048229943940851, -0.7839351036079769))}\n\n p_2_xyz = {'symbols': ('Br', 'H'), 'isotopes': (79, 1),\n 'coords': ((0.7644788559644482, 0.0, 0.0), (-0.7644788559644482, 0.0, 0.0))}\n\n r_1 = ARCSpecies(label='r1', smiles='[H]', xyz=r_1_xyz)\n r_2 = ARCSpecies(label='r2', smiles='CC(=O)Br', xyz=r_2_xyz)\n p_1 = ARCSpecies(label='p1', smiles='C[C](=O)', xyz=p_1_xyz)\n p_2 = ARCSpecies(label='p2', smiles='[Br][H]', xyz=p_2_xyz)\n\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_2, p_1])\n rxn.determine_family(self.rmgdb)\n atom_map=rxn.atom_map\n self.assertEqual(atom_map[:5], [1, 2, 3, 4, 0])\n self.assertIn(tuple(atom_map[5:]), permutations([5, 6, 7]))\n self.assertTrue(check_atom_map(rxn))\n\n #Change Order [H] + CC(=O)Br <=> C[C](=O) + [H][Br]\n r_1 = ARCSpecies(label='r1', smiles='[H]', xyz=r_1_xyz)\n r_2 = ARCSpecies(label='r2', smiles='CC(=O)Br', xyz=r_2_xyz)\n p_1 = ARCSpecies(label='p1', smiles='C[C](=O)', xyz=p_1_xyz)\n p_2 = ARCSpecies(label='p2', smiles='[H][Br]', xyz=p_2_xyz)\n\n rxn = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn.determine_family(self.rmgdb)\n atom_map=rxn.atom_map\n self.assertEqual(atom_map[:5], [7, 0, 1, 2, 6])\n self.assertIn(tuple(atom_map[5:]), list(permutations([3, 4, 5])))\n self.assertTrue(check_atom_map(rxn))\n\n # [O] + CC(Cl)(Cl)C(Cl)(Cl)Cl <=> [O][Cl] + C[C](Cl)C(Cl)(Cl)Cl\n smiles = ['[O]', 'CC(Cl)(Cl)C(Cl)(Cl)Cl', '[O][Cl]', 'C[C](Cl)C(Cl)(Cl)Cl']\n r_1_xyz = {'symbols': ('O',), 'isotopes': (16,), 'coords': ((0.0, 0.0, 0.0),)}\n \n r_2_xyz = {'symbols': ('C', 'C', 'Cl', 'Cl', 'C', 'Cl', 'Cl', 'Cl', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 35, 35, 12, 35, 35, 35, 1, 1, 1), 'coords': (\n (-1.3340513332954889, 0.2811635614535751, -0.078045907046801),\n (-0.06460593375936133, -0.5810773314093911, -0.02962891425941322),\n (-0.2609310384494481, -1.7354943987581986, 1.3623405448734305),\n (-0.06523629769352735, -1.6097818007913829, -1.5298182298699716),\n (1.2568349080206898, 0.251354210359208, 0.09596787533379413),\n (2.7373740437547514, -0.7858820942054363, 0.1510602855327231),\n (1.4729373085674606, 1.396702908938121, -1.2920641361183987),\n (1.2776463867390788, 1.2712465700052025, 1.5941477468638563),\n (-1.3327512075949484, 0.9633461541030465, -0.9346702675682734),\n (-2.235286345856216, -0.338363905821591, -0.1659562352150731),\n (-1.45193049043298, 0.886786126126846, 0.8266672374741411))}\n \n p_1_xyz = {'symbols': ('O', 'Cl'), 'isotopes': (16, 35),\n 'coords': ((0.8407400963991551, 0.0, 0.0), (-0.8407400963991551, 0.0, 0.0))}\n \n p_2_xyz = {'symbols': ('C', 'C', 'Cl', 'C', 'Cl', 'Cl', 'Cl', 'H', 'H', 'H'),\n 'isotopes': (12, 12, 35, 12, 35, 35, 35, 1, 1, 1), 'coords': (\n (-1.3826664358998055, -0.04852445131046896, -0.016935550260331302),\n (-0.01984344739858957, 0.5351447284412386, 0.14069644461529232),\n (0.06780252918727915, 2.0178457939896477, 1.0316373428560468),\n (1.240695333262242, -0.22627953918952265, -0.15010504208991474),\n (2.5003017492701316, 0.8385176202279041, -0.8511606324628386),\n (1.8619474142609682, -0.9616513146239644, 1.3591396432655138),\n (0.9630230000989414, -1.5484613928720057, -1.3347069863893728),\n (-1.4535219021739985, -1.0095075283181074, 0.502205010423143),\n (-2.1607091682952886, 0.6031752006499635, 0.39420249485619346),\n (-1.6170290723118037, -0.20025911699469934, -1.0749727248137075))}\n \n r_1 = ARCSpecies(label='r1', smiles=smiles[0], xyz=r_1_xyz)\n r_2 = ARCSpecies(label='r2', smiles=smiles[1], xyz=r_2_xyz)\n p_1 = ARCSpecies(label='p1', smiles=smiles[2], xyz=p_1_xyz)\n p_2 = ARCSpecies(label='p2', smiles=smiles[3], xyz=p_2_xyz)\n \n rxn1 = ARCReaction(r_species=[r_1, r_2], p_species=[p_1, p_2])\n rxn1.determine_family(self.rmgdb)\n atom_map = rxn1.atom_map\n self.assertEqual(atom_map[:3],[0,2,3])\n self.assertIn(atom_map[3:5],[[1,4],[4,1]])\n self.assertEqual(atom_map[5],5)\n self.assertIn(atom_map[6], [6,7,8])\n self.assertIn(atom_map[7], [6, 7, 8])\n self.assertIn(atom_map[8], [6, 7, 8])\n self.assertIn(atom_map[9], [9, 10, 11])\n self.assertIn(atom_map[10], [9, 10, 11])\n self.assertIn(atom_map[11], [9, 10, 11])\n self.assertTrue(check_atom_map(rxn1))", "title": "" }, { "docid": "759cf8c8446b559002bd292c7f3c0166", "score": "0.5115621", "text": "def acid_reach_equil(self):\r\n self.acid_reaction().reach_equil_detailed()", "title": "" }, { "docid": "e249c00b60a35ac91cefe739982685ca", "score": "0.5099393", "text": "async def aime(self, ctx):\n coins = 3000\n exp = 300\n aime = await get_aime(self.bot.session)\n aime_tex = aime[\"latex\"]\n aime_answer = aime[\"answer\"]\n fn = await generate_image(\"\", aime_tex)\n message = await ctx.send(file=discord.File(fn))\n\n os.system(\"rm \" + fn)\n\n for i in numbers:\n await message.add_reaction(i)\n\n tried = []\n answers = {}\n\n def check(reaction, user):\n return (\n reaction.message.id == message.id\n and reaction.emoji in numbers\n and not user.id in tried\n and user.id != message.author.id\n )\n\n while True:\n # get number response\n reaction, user = await self.bot.wait_for(\"reaction_add\", check=check)\n print(\"heyo\")\n print(numbers[reaction.emoji], aime_answer)\n if not user.id in answers:\n answers[user.id] = \"\"\n answers[user.id] += numbers[reaction.emoji]\n if len(answers[user.id]) == 3:\n if answers[user.id] == aime_answer:\n bitecoin_string = user.mention+\" wins! (+{0} bitecoins, +{1} XP)\".format(\n str(coins),\n str(exp)\n )\n problem_string = \"That was problem {0} from the {1} AIME {2}\".format(\n str(aime[\"problem\"] + 1),\n aime[\"year\"],\n aime[\"version\"]\n )\n success_string = \"{0}\\n{1}\".format(bitecoin_string, problem_string)\n await add_coins(self.bot.session, ctx, user, coins)\n await add_exp(self.bot.session, ctx, user, exp)\n await ctx.send(success_string)\n break\n else:\n await ctx.send(user.mention + \" wrong!\")\n tried.append(user.id)", "title": "" }, { "docid": "a81418e7dcebe4bde8febdff488c58a5", "score": "0.5086605", "text": "def AC(self):\r\n m = np.abs(self.c[0]) # Modulus of c1\r\n delta = np.angle(self.c[0]) # Argument of c1\r\n\r\n b_squared = np.abs(self.a[0]) # Modulus of a1\r\n gamma_a1 = np.angle(self.a[0]) # Half argument of a1\r\n\r\n x = b_squared/exp(self.psi0)*cos(2*gamma_a1 - self.eta_t)\r\n y = b_squared/exp(self.psi0)*sin(2*gamma_a1 - self.eta_t)\r\n\r\n return x/self.chord, y/self.chord", "title": "" }, { "docid": "d7da861f4fe2bb05d34efb0fe67959b7", "score": "0.5078328", "text": "def add_reaction(self, reaction):\n self.add_reacs([reaction])\n self.calcs()", "title": "" }, { "docid": "d932dc7894e413f695bcc8c3ba171b96", "score": "0.5058877", "text": "def try_acceptor(self, acc, donor):\r\n residue = acc.residue\r\n\r\n # Do some error checking\r\n if not donor.hdonor:\r\n return 0\r\n\r\n _LOGGER.debug(\"Working on %s %s (acceptor) to %s %s (donor)\",\r\n acc.residue, acc.name, donor.residue, donor.name)\r\n if self.is_hbond(donor, acc):\r\n residue.fixed = acc.name\r\n self.fix_flip(acc)\r\n acc.hdonor = 0\r\n return 1\r\n return 0", "title": "" }, { "docid": "40e9435df820c963ae07f6cb1f0a29a2", "score": "0.504683", "text": "def cMAC(self):\n return (2/3) * self.chordRoot * (1 + self.taperRatio + self.taperRatio**2) / (1 + self.taperRatio)", "title": "" }, { "docid": "7ee3982f257d2368e119a803e116065b", "score": "0.5044603", "text": "def testReactionConversion(self):\n from rmgpy.tools.canteraModel import checkEquivalentCanteraReaction\n for i in range(len(self.ctReactions)):\n self.assertTrue(checkEquivalentCanteraReaction(self.ctReactions[i],self.rmg_ctReactions[i]))", "title": "" }, { "docid": "97ec35788aeab3d7409027e62efe6b8e", "score": "0.5040818", "text": "async def on_reaction_add(self, reaction: MessageReaction, /) -> None:", "title": "" }, { "docid": "53d248abb5ce6a06687c0456f404ca4c", "score": "0.5040725", "text": "def cMAC(self):\n return (2 / 3) * self.chordRoot * (1 + self.taperRatio + self.taperRatio ** 2) / (1 + self.taperRatio)", "title": "" }, { "docid": "eb99b534c504a7f19e5a8c235f202180", "score": "0.49687058", "text": "def GetTauReaction(self):\n #Tau is relative, not absolute as in normal NRM (!!!)\n self.minimum_reaction = self.tauPairs[0] # Pick first initiation reaction to occur \n self.tau_reaction = self.minimum_reaction[0] # Pick tau of the reaction \n \n self.minimum_delay = self.Tstruct[1] # From sorted Tstruct (on index 0 is the initial (0, np.nan))\n self.tau_delay = self.minimum_delay[0] - self.sim_t # Convert from absolute to relative time \n \n if self.tau_reaction <= self.tau_delay: # Initiate a reaction\n self.tau = self.tau_reaction\n self.reaction_index = self.minimum_reaction[1]\n self._IsCompletionDelayed = False\n else: # A delayed reaction completes (self.tau_delay < self.tau_reaction)\n self.tau = self.tau_delay\n self.reaction_index = self.minimum_delay[1]\n self._IsCompletionDelayed = True", "title": "" }, { "docid": "918b69b5d590476a8be9e54882847980", "score": "0.49522075", "text": "def test_quickselect_nucleic_acid(self):\n\n nucleic_acids = list(range(401, 414)) + list(range(421, 434))\n\n self.assertIsInstance(self.pdb.is_nucleic_acid(), TopologySeries)\n self.assertItemsEqual(set(self.pdb[self.pdb.is_nucleic_acid()]['resSeq']), nucleic_acids)", "title": "" }, { "docid": "700bc62c20cf612d50f647f8aa793f28", "score": "0.49376124", "text": "def read_reaction(lr):\r\n r_id = read_rfmt_line(lr)\r\n format_version, reaction_name = read_rxn_header(lr)\r\n reactants, products = read_rxn_block_v2000(lr)\r\n data_fields = {}\r\n field_name = None\r\n field_value = True\r\n while True:\r\n field_name, field_value = read_rdf_data_field(lr)\r\n if not field_name:\r\n break\r\n else:\r\n data_fields[field_name] = field_value\r\n # print('field_name', field_name)\r\n reaction = Reaction(r_id, reaction_name, reactants, products, data_fields)\r\n # field_value will contain True if there is another reaction to be read\r\n # or False if end of file has been reached.\r\n return reaction, field_value", "title": "" }, { "docid": "f233f680b8f297def0955f5d2798c497", "score": "0.4936259", "text": "def anticipation(self):\n # TODO: Implement the agent's anticipation mechanism\n #on donne à l'anticipation la valeur de la mémoire pour l'action faite\n self.anticipated_outcome = self.memoire[self._action]\n \n return self.anticipated_outcome", "title": "" }, { "docid": "201a5fffa633190a5f9ff0441ac44cf0", "score": "0.49083716", "text": "def GetReactionFormula(n, model):\n reaction = model.getReaction(n)\n kinetic_law = reaction.getKineticLaw() # Get the kinetic law\n formula = kinetic_law.getFormula()\n return formula", "title": "" }, { "docid": "be88e250b52938b652380fe69478f5f0", "score": "0.4896578", "text": "def dar_cancion(self):\n\t\treturn self.cancion", "title": "" }, { "docid": "a32ea8c1ffb3e41597e779843128fecc", "score": "0.48824877", "text": "def initial_reaction(reactants, products, ea, additional):\n\n reactants = re.findall('[0-9][0-Z]+', reactants)\n products = re.findall('[0-9][0-Z]+', products)\n\n nu_r = []\n nu_p = []\n\n for react in reactants:\n nu_r.append(int(react[0]))\n\n for prod in products:\n nu_p.append(int(prod[0]))\n\n reactants[:] = [additional[a[1:]] for a in reactants]\n products[:] = [additional[a[1:]] for a in products]\n\n return ChemicalReaction(reactants, products, nu_r, nu_p, ea)", "title": "" }, { "docid": "8c4511da495005970994f0fc7c24c4e5", "score": "0.48683763", "text": "def test_map_isomerization_reaction(self):\n reactant_xyz = \"\"\"C -1.3087 0.0068 0.0318\n C 0.1715 -0.0344 0.0210\n N 0.9054 -0.9001 0.6395\n O 2.1683 -0.5483 0.3437\n N 2.1499 0.5449 -0.4631\n N 0.9613 0.8655 -0.6660\n H -1.6558 0.9505 0.4530\n H -1.6934 -0.0680 -0.9854\n H -1.6986 -0.8169 0.6255\"\"\"\n reactant = ARCSpecies(label='reactant', smiles='C([C]1=[N]O[N]=[N]1)', xyz=reactant_xyz)\n product_xyz = \"\"\"C -1.0108 -0.0114 -0.0610\n C 0.4780 0.0191 0.0139\n N 1.2974 -0.9930 0.4693\n O 0.6928 -1.9845 0.8337\n N 1.7456 1.9701 -0.6976\n N 1.1642 1.0763 -0.3716\n H -1.4020 0.9134 -0.4821\n H -1.3327 -0.8499 -0.6803\n H -1.4329 -0.1554 0.9349\"\"\"\n product = ARCSpecies(label='product', smiles='[N-]=[N+]=C(N=O)C', xyz=product_xyz)\n rxn_1 = ARCReaction(label='reactant <=> product', ts_label='TS0', r_species=[reactant], p_species=[product])\n atom_map = map_isomerization_reaction(rxn_1)\n self.assertEqual(atom_map[:6], [0, 1, 2, 3, 4, 5])\n self.assertIn(atom_map[6], [6, 8])\n self.assertIn(atom_map[7], [6, 7])\n self.assertIn(atom_map[8], [7, 8])", "title": "" }, { "docid": "1ad58ddeb08b8ef5cbb5a400c9545369", "score": "0.48480976", "text": "def _observe_reaction_force(self):\n if self.reaction_force_strategy is ReactionForceStrategy.AVG_OVER_MINI_STEPS:\n return self._mini_step_contact['full'].mean(axis=0)\n if self.reaction_force_strategy is ReactionForceStrategy.MEDIAN_OVER_MINI_STEPS:\n median_mini_step = np.argsort(self._mini_step_contact['mag'])[self.mini_steps // 2]\n return self._mini_step_contact['full'][median_mini_step]\n if self.reaction_force_strategy is ReactionForceStrategy.MAX_OVER_MINI_STEPS:\n max_mini_step = np.argmax(self._mini_step_contact['mag'])\n return self._mini_step_contact['full'][max_mini_step]\n else:\n return self._reaction_force[:2]", "title": "" }, { "docid": "8d7a542b4617828c7bf4beb8eeecc947", "score": "0.48460725", "text": "def chorus(self):\n return self._effect_get('EFFECT_MSB', 'CHORUS')", "title": "" }, { "docid": "1b04fd9d531c10402bc1190eef93b8d0", "score": "0.48444307", "text": "def amino_acid_group(self):\n return choice([\n 'Aliphatic',\n 'Aromatic',\n 'Acidic',\n 'Basic',\n 'Hydroxylic',\n 'Sulphur-containing',\n 'Amidic',\n ])", "title": "" }, { "docid": "c811278503fed77289e9b19785fbf3ab", "score": "0.48396912", "text": "def getActions():\r\n return [0,1,2] #Straight, turn right or turn left\r", "title": "" }, { "docid": "f980625d6a35d6264ec01494c09d148c", "score": "0.48359573", "text": "def _parse_reaction(self):\n\n # Split on newlines\n lines = self._input_string.split(\"\\n\")\n\n # Use this pattern to look for reaction lines\n rxn_pattern = re.compile(\"->\")\n\n # Data structures to populate\n species_seen = []\n reactions = {}\n species_list = []\n conc_list = []\n\n # Go through every line\n for line in lines:\n\n # skip blank lines and comments\n if line.strip() == \"\" or line.startswith(\"#\"):\n continue\n\n # reaction line\n if rxn_pattern.search(line):\n\n # Split on ->; should yield exactly two fields\n rxn = line.split(\"->\")\n if len(rxn) != 2:\n err = \"mangled reaction line\\n ({})\\n\".format(line)\n raise ValueError(err)\n\n # reactant is first field\n reactant = rxn[0].strip()\n\n # split second field; should have exactly two outputs\n product_and_rate = rxn[1].split()\n if len(product_and_rate) != 2:\n err = \"mangled reaction line\\n ({})\\n\".format(line)\n raise ValueError(err)\n\n # product is first output\n product = product_and_rate[0].strip()\n\n # rate is second output\n try:\n rate = float(product_and_rate[1])\n except ValueError:\n err = \"mangled reaction line (rate not a float)\\n ({})\\n\".format(line)\n raise ValueError(err)\n\n # Reaction key defines what reaction is specified\n reaction_key = (reactant,product)\n try:\n\n # Make sure this reaction has not been seen before\n reactions[reaction_key]\n err = \"reaction defined more than once ({})\\n\".format(reaction_key)\n raise ValueErro(err)\n\n # Record reaction, rate, and what species have been seen\n except KeyError:\n reactions[reaction_key] = rate\n\n species_seen.append(reactant)\n species_seen.append(product)\n\n else:\n\n # Assume this is a concentration line. split on =. Must have two fields\n conc_line = line.split(\"=\")\n if len(conc_line) != 2:\n err = \"mangled concentration line\\n ({})\\n\".format(line)\n raise ValueError(err)\n\n # First field is species name. Check to see if it has been seen before.\n species = conc_line[0].strip()\n if species in species_list:\n err = \"duplicate species concentration ({})\\n\".format(species)\n raise ValueError(err)\n\n # second field is concentration. must be float.\n try:\n conc = float(conc_line[1])\n except ValueError:\n err = \"mangled concentration line\\n ({})\\n\".format(line)\n raise ValueError(err)\n\n # Record the species and concentration in lists (ordered!)\n species_list.append(species)\n conc_list.append(conc)\n\n # Unique set of species observed in reactions\n species_seen = set(species_seen)\n\n # Make sure that there is a concentration specified for every species in\n # a reaction.\n if not species_seen.issubset(set(species_list)):\n err = \"not all species have initial concentrations\\n\"\n raise ValueError(err)\n\n # Final lists of species names\n self._species_list = species_list[:]\n self._conc_list = conc_list[:]\n self._reactions = reactions\n\n # Concentrations as arrays\n self._initial_conc = np.array(self._conc_list)\n self._current_conc = np.array(self._conc_list)\n\n # Keep track of changes\n self._time_steps = [0]\n self._conc_history = [self._current_conc]\n\n # Number of species\n self._n = len(self._species_list)", "title": "" }, { "docid": "4b84599a26d4410b6868ac6dc318a724", "score": "0.483528", "text": "def GetReactionIDs(self):\n return self.reaction_ids", "title": "" }, { "docid": "9f63bc353a3390fdf846864d86564c2e", "score": "0.482154", "text": "async def react(self) -> None:\n await self._message.add_reaction(\"\\U0001F44D\")", "title": "" }, { "docid": "fe94b507abed9e647c720feeb605011c", "score": "0.48128667", "text": "def get_action(self, state):\n player = state.player1 if self.isPlayer1 else state.player2\n ac = np.random.choice(state.get_valid_moves(player, self.isPlayer1))\n return ac", "title": "" }, { "docid": "ce953af653516f0e23531b3dc1263198", "score": "0.4811957", "text": "def action(self, game_state: GameState) -> int:\n from gym_idsgame.envs.util import idsgame_util\n actions = list(range(self.game_config.num_defense_actions))\n legal_actions = list(filter(lambda action: idsgame_util.is_defense_id_legal(action, self.game_config,\n game_state), actions))\n if len(legal_actions) > 0:\n action_id = np.random.choice(legal_actions)\n else:\n action_id = np.random.choice(actions)\n return action_id", "title": "" }, { "docid": "45fb258e4d34566ca8eb17e10eb0c46d", "score": "0.48070347", "text": "def ac(self):\n if self.max_dex is not None:\n return self.base_ac + min(self.max_dex, self.owner.dexterity)\n else:\n return self.base_ac + self.owner.dexterity", "title": "" }, { "docid": "dbd10be6cc5c9770c833c27a96b41c9f", "score": "0.4785677", "text": "def test_error_canceling_reactions(self):\n # Take ethane as the target\n lot = LevelOfTheory('test')\n ethane = ErrorCancelingSpecies(self.molecule1, (100.0, 'kJ/mol'), lot)\n methyl = ErrorCancelingSpecies(self.molecule2, (20.0, 'kcal/mol'), lot, (21000.0, 'cal/mol'))\n\n # This reaction is not an isodesmic reaction, but that does not matter for the unit test\n rxn = ErrorCancelingReaction(ethane, {methyl: 2})\n self.assertAlmostEqual(rxn.calculate_target_thermo().value_si, 2*21000.0*4.184-(2*20.0*4184-100.0*1000))", "title": "" }, { "docid": "ecc502e7d75ea30d3b58eb363dfafd59", "score": "0.47831067", "text": "def missing_energy_partner(base):\n ra = cobra.Reaction(\"A\")\n rb = cobra.Reaction(\"B\")\n rc = cobra.Reaction(\"C\")\n base.add_reactions([ra, rb, rc])\n ra.reaction = \"a <--> b\"\n rb.reaction = \"b <--> c\"\n rc.reaction = \"atp_c + a <--> c \"\n return base", "title": "" }, { "docid": "6b8f41e000c1d1cabe35fe9fb400d872", "score": "0.47813424", "text": "def antecedents(self):\n return self.follow_down_cable(self.frame.caseframe.slots[0])", "title": "" }, { "docid": "58a28c8a53ed9b372a320c98646018dc", "score": "0.4777051", "text": "def get_reactions(self):\n reactions = DefaultDictKey(Reaction)\n for species_def in self.species_defs:\n behaviours = species_def.rhs\n for behaviour in behaviours:\n reaction = reactions[behaviour.reaction_name]\n if behaviour.role == \"<<\":\n reaction.reactants.append(behaviour)\n elif behaviour.role == \">>\":\n reaction.products.append(behaviour)\n elif behaviour.role == \"(+)\":\n reaction.activators.append(behaviour)\n elif behaviour.role == \"(-)\":\n reaction.inhibitors.append(behaviour)\n elif behaviour.role == \"(.)\":\n reaction.modifiers.append(behaviour)\n return reactions", "title": "" }, { "docid": "49bff6030e2cc0a301521680e87aa21b", "score": "0.477577", "text": "async def idgaf(self, ctx):\n S = \"\\U0001f1f8\"\n U = \"\\U0001f1fa\"\n D = \"\\U0001f1e9\"\n G = \"\\U0001f1ec\"\n O = \"\\U0001f1f4\"\n R = \"\\U0001f1f7\"\n L = \"\\U0001f1f1\"\n I = \"\\U0001f1ee\"\n T = \"\\U0001f1f9\"\n fire = \"\\U0001f525\"\n A = \"\\U0001f1e6\"\n F = \"\\U0001f1eb\"\n ok = \"\\U0001f44c\"\n clap = \"\\U0001f44f\"\n cool = \"\\U0001f60e\"\n\n async for x in self.bot.logs_from(ctx.message.channel, before=ctx.message.timestamp, limit=1):\n await self.bot.add_reaction(x, I)\n await self.bot.add_reaction(x, D)\n await self.bot.add_reaction(x, G)\n await self.bot.add_reaction(x, A)\n await self.bot.add_reaction(x, F)\n await self.bot.add_reaction(x, cool)", "title": "" }, { "docid": "3ed7f47739a65f616d3d459e6b69e158", "score": "0.47740835", "text": "def type_of_interaction(base_residue, aa_residue, aa_coordinates, standard_aa_center, base_atoms):\n squared_xy_dist_list = []\n\n \"\"\"Defines different sets of amino acids\"\"\"\n planar_aa = set ([\"ARG\", \"ASN\", \"ASP\", \"GLU\", \"GLN\", \"HIS\", \"PHE\", \"TRP\", \"TYR\"])\n stacked_aliphatic = set([\"ALA\", \"CYS\", \"ILE\", \"LEU\", \"MET\", \"PRO\", \"SER\", \"THR\", \"VAL\"])\n # Note: LYS and GLY are not in the previous lists\n\n edge_to_edge_aa = set ([\"ARG\", \"ASN\", \"ASP\", \"CYS\", \"GLN\", \"GLU\", \"HIS\", \"LYS\", \"PHE\", \"SER\", \"THR\", \"TYR\", \"TRP\"])\n shb_aa = set ([\"ARG\", \"ASN\", \"ASP\", \"GLU\", \"GLN\", \"HIS\", \"LYS\", \"SER\", \"THR\", \"TYR\"])\n\n # calculate distances from aa atoms to base center\n for aa_atom in aa_residue.atoms(name=aa_fg[aa_residue.sequence]):\n key = aa_atom.name\n aa_x = aa_coordinates[key][0]\n aa_y = aa_coordinates[key][1]\n\n squared_xy_dist = (aa_x**2) + (aa_y**2)\n squared_xy_dist_list.append(squared_xy_dist)\n\n #print base_residue.unit_id(), aa_residue.unit_id(), min(squared_xy_dist_list), mean_z\n\n # for a stacking interaction, the x,y coordinate of at least one atom of the amino acid group needs to be\n # within sqrt(5) = 2.236 of the base center 0,0\n min_dist = np.sqrt(min(squared_xy_dist_list))\n\n if min_dist <= 2.236:\n #print base_residue.unit_id(), aa_residue.unit_id(), min(squared_xy_dist_list), mean_z\n if aa_residue.sequence in planar_aa:\n return stacking_planar_annotation(base_residue, aa_residue, min_dist)\n\n elif aa_residue.sequence in stacked_aliphatic:\n return stacking_non_planar_annotation(aa_residue, aa_coordinates, min_dist)\n\n else:\n return (\"other-stack\",{\"dist-xy-from-center\":min_dist})\n\n # check for interactions in the plane of the base\n mean_z = standard_aa_center[2]\n (num_hydrogen_bonds,hydrogen_bond_list) = count_hydrogen_bonds(base_residue, aa_residue, base_atoms)\n\n if -1.8 <= mean_z < 1.8:\n if aa_residue.sequence in edge_to_edge_aa:\n angle = calculate_angle_between_planar_residues(base_residue, aa_residue)\n if angle:\n if 0 <= angle <= 45 and num_hydrogen_bonds >= 2:\n return (\"pseudopair\",{\"hydrogen-bonds\":hydrogen_bond_list,\"angle-between-planes\":angle})\n elif 45 <= angle:\n return (\"perpendicular-edge\",{\"hydrogen-bonds\":hydrogen_bond_list,\"angle-between-planes\":angle})\n\n if aa_residue.sequence in shb_aa:\n# base_seq = base_residue.sequence\n# base_atoms = NAbaseheavyatoms[base_seq]\n if num_hydrogen_bonds >= 1:\n return (\"SHB\",{\"hydrogen-bonds\":hydrogen_bond_list})\n\n return (\"other-edge\",{\"hydrogen-bonds\":hydrogen_bond_list})\n\n return (\"other\",{\"dist-xy-from-center\":min_dist,\"hydrogen-bonds\":hydrogen_bond_list})", "title": "" }, { "docid": "396289b3257c200d4b653dfc0fc034f5", "score": "0.47727296", "text": "def get_reaper_action_by_uuid(self, uuid):", "title": "" }, { "docid": "bf3f4cd19e3ead65754bf502da38b19d", "score": "0.47706288", "text": "def adjoint(self):\n return self.cofactors().trans()", "title": "" }, { "docid": "9e999f44e1fcad3b549dcc2292bfca0b", "score": "0.4747537", "text": "def equivalence_point(self, titrant=None, titrant_conc=None, conc=None, verbose=True):\r\n # Obtain conditions\r\n if titrant is None:\r\n titrant = eval(input(\"Titrant Identity: \"))\r\n if titrant in strong_acids:\r\n acid = True\r\n elif titrant in strong_bases:\r\n acid = False\r\n else:\r\n raise ValueError(\"Titrant was not a strong acid or base. Make sure you are using polyatomics whenever possible!\")\r\n if titrant_conc is None:\r\n titrant_conc = float(eval(input(\"[Titrant]: \")))\r\n if conc is None:\r\n if acid:\r\n conc = float(eval(input(\"[Base]₀: \")))\r\n else:\r\n conc = float(eval(input(\"[Acid]₀: \")))\r\n\r\n # Check for ka/kb\r\n if self.ka is None:\r\n raise ValueError(\"Please set either the ka or kb of the acid/base solution using [self].set_ka or [self].set_kb\")\r\n\r\n # Compute the concentration of the conjugate solution\r\n conjugate_conc = conc / (conc/titrant_conc + 1)\r\n\r\n # Solve for the final concentration of H⁺/OH⁻\r\n if acid:\r\n # Titrant was a strong acid, use ka\r\n # Print information\r\n if verbose:\r\n print(\"Initial Reaction: \", self.add_H())\r\n print(\"Reaction at Equivalence Point: \", self.acid_reaction())\r\n # Solve for unit change\r\n solutions = solve_expr(\"x**2 / ({} - x) - {}\".format(conjugate_conc, self.ka))\r\n if verbose: print(\"Potential solutions for [H⁺]: \", solutions)\r\n else:\r\n # Titrant was a strong base, use kb\r\n # Print information\r\n if verbose:\r\n print(\"Initial Reaction: \", self.add_OH())\r\n print(\"Reaction at Equivalence Point: \", self.base_reaction())\r\n # Solve for unit change\r\n solutions = solve_expr(\"x**2 / ({} - x) - {}\".format(conjugate_conc, self.kb))\r\n if verbose: print(\"Potential solutions for [OH⁻]: \", solutions)\r\n\r\n # Eliminate invalid solutions\r\n for i, j in enumerate(solutions.copy()):\r\n # Solution should not be less than or equal to zero\r\n if j <= 0:\r\n del solutions[i]\r\n # Solution should not be imaginary\r\n elif type(j) is sympyAdd:\r\n del solutions[i]\r\n\r\n # First check if no valid solutions remain\r\n if len(solutions) == 0:\r\n raise Exception(\"An unknown error occured: all solutions were invalid\")\r\n elif len(solutions) > 1:\r\n raise Exception(\"An unknown error occured: too many solutions were valid\")\r\n\r\n # Print final output\r\n if verbose: \r\n print(\"Valid Solution:\", solutions[0])\r\n print(\"pH:\")\r\n if acid:\r\n return -log10(solutions[0])\r\n else:\r\n return 14 + log10(solutions[0])", "title": "" }, { "docid": "0bfa739a6b184da654abb160b1eaf8ca", "score": "0.47383246", "text": "def anger(self):\n return self.emotions.anger", "title": "" }, { "docid": "df711a74544b61fad871281523782fe7", "score": "0.47381014", "text": "def try_donor(self, donor, acc):\r\n residue = self.residue\r\n\r\n # Do some error checking\r\n if not acc.hacceptor:\r\n return 0\r\n\r\n _LOGGER.debug(\"Working on %s %s (donor) to %s %s (acceptor)\",\r\n donor.residue, donor.name, acc.residue, acc.name)\r\n\r\n if self.is_hbond(donor, acc):\r\n residue.fixed = donor.name\r\n self.fix_flip(donor)\r\n donor.hacceptor = 0\r\n return 1\r\n return 0", "title": "" }, { "docid": "62fee1e7cbaa2e011d35d402895e3f75", "score": "0.47314417", "text": "def aromaticity(self):\n if not self.amino_acids_percent:\n self.get_amino_acids_percent()\n \n Arom= self.amino_acids_percent['Y']+self.amino_acids_percent['W']+self.amino_acids_percent['F']\n return Arom", "title": "" }, { "docid": "2c4026f51006d3727c1216721a61814e", "score": "0.47301862", "text": "def action_type(self, action):\n action_id=self.action_id(action)\n if action_id in [2,7]:\n return 'crossing'\n elif action_id in [0,4,5,9]:\n return 'clockwise'\n elif action_id in [1,3,6,8]:\n return 'counterclockwise'", "title": "" }, { "docid": "4ee26c6933f6e9b47fa31327d6999184", "score": "0.47184804", "text": "async def getquotereaction(self, ctx):\n r = self.settings.getServerStat(ctx.message.guild, \"QuoteReaction\")\n\n if r:\n await ctx.send(\"Current quote reaction is {}\".format(r))\n return\n else:\n await ctx.send(\"No quote reaction set.\")", "title": "" }, { "docid": "ee47957a2c0ad7963d12a9fcfb6d8faf", "score": "0.47175148", "text": "def curate(self):\n \n # evaluate possible reactions\n reactions = []\n while len(self.reactions) > 0:\n reaction = self.reactions.pop()\n if reaction[2](self) == 0:\n self.excluded_reactions.append(reaction)\n else:\n reactions.append(reaction)\n self.reactions = reactions\n\n # evaluate impossible reactions\n excluded_reactions = []\n while len(self.excluded_reactions) > 0:\n reaction = self.excluded_reactions.pop()\n if reaction[2](self) > 0:\n self.reactions.append(reaction)\n else:\n excluded_reactions.append(reaction)\n self.excluded_reactions = excluded_reactions", "title": "" }, { "docid": "b7cefbb3572743a19f3abca10c402800", "score": "0.47120848", "text": "def amm_chn(self): # IVS1\n return self._amm_chn", "title": "" }, { "docid": "fc9205eeca61db50bf876422a9b9eb35", "score": "0.4707719", "text": "def association_coefficient(self):\n return self.category.association_coefficient", "title": "" }, { "docid": "496abc59ac6940d8c3b1792e1ea66793", "score": "0.47037452", "text": "async def setquotereaction(self, ctx):\n # Check for admin status\n isAdmin = ctx.author.permissions_in(ctx.channel).administrator\n if not isAdmin:\n checkAdmin = self.settings.getServerStat(ctx.guild, \"AdminArray\")\n for role in ctx.author.roles:\n for aRole in checkAdmin:\n # Get the role that corresponds to the id\n if str(aRole['ID']) == str(role.id):\n isAdmin = True\n if not isAdmin:\n await ctx.send(\"You do not have permission to use this command.\")\n return\n\n message = await ctx.send(\"Please react to this message with the desired quote reaction.\")\n # Backup then clear - so we don't trigger quoting during this\n backup_reaction = self.settings.getServerStat(ctx.message.guild, \"QuoteReaction\")\n self.settings.setServerStat(ctx.message.guild, \"QuoteReaction\", None)\n # Now we would wait...\n def check(reaction, user):\n return reaction.message.id == message.id and user == ctx.author\n try:\n reaction, user = await self.bot.wait_for('reaction_add', timeout=60, check=check)\n except:\n # Didn't get a reaction\n self.settings.setServerStat(ctx.message.guild, \"QuoteReaction\", backup_reaction)\n await message.edit(content=\"Looks like we ran out of time - run `{}setquotereaction` to try again.\".format(ctx.prefix))\n return\n\n # Got it!\n self.settings.setServerStat(ctx.message.guild, \"QuoteReaction\", str(reaction.emoji))\n\n await message.edit(content=\"Quote reaction set to {}\".format(str(reaction.emoji)))", "title": "" }, { "docid": "e3dbf5a8842ba11526668d5de5f84e21", "score": "0.47022724", "text": "async def reactions(self, ctx):\n await ctx.channel.trigger_typing()\n await self.message_leaderboard(ctx, \"reactions\")", "title": "" }, { "docid": "c26e10a96e9101f758f4ee0ae08a9c6c", "score": "0.46971974", "text": "def cal_formal_charge(atomic_symbol, bonds) -> int:\n if atomic_symbol=='N':\n #if sorted(bonds, key=lambda x: (x[0], x[1])) == [('C', 1), ('O', 1), ('O', 2)]:\n # return 1\n if sum(j for i, j in bonds)==4:\n return 1\n #if atomic_symbol=='O':\n # if sorted(bonds, key=lambda x: (x[0], x[1])) == [('N', 1)]:\n # return -1\n\n return 0", "title": "" }, { "docid": "1062def2df617d6e358d8bf68965764c", "score": "0.46900538", "text": "def reactions(self) -> Set[GitLabReaction]:\n url = self.url + '/award_emoji'\n reactions = get(self._token, url)\n return {GitLabReaction.from_data(r, self._token, self, r['id'])\n for r in reactions}", "title": "" }, { "docid": "fd7ac5e4e26cd6807ed96e5d790915ed", "score": "0.46898758", "text": "def test_ic_character(self):\n self.session.execute_cmd('@ic Char')\n # confirm the menu displayed the next node\n last_msg = self.session.msg.mock_calls[-1][1][0]\n self.assertIn('Select an Archetype by number', last_msg)", "title": "" }, { "docid": "9c4e80bec7d5b726456ab5d7e84abb6f", "score": "0.46850264", "text": "def test_find_error_canceling_reaction(self):\n scheme = IsodesmicScheme(self.propene, [self.propane, self.butane, self.butene, self.caffeine, self.ethyne])\n\n # Note that caffeine and ethyne will not be allowed, so for the full set the indices are [0, 1, 2]\n rxn, _ = scheme._find_error_canceling_reaction([0, 1, 2], milp_software=['lpsolve'])\n self.assertEqual(rxn.species[self.butane], -1)\n self.assertEqual(rxn.species[self.propane], 1)\n self.assertEqual(rxn.species[self.butene], 1)\n\n if self.pyo is not None:\n rxn, _ = scheme._find_error_canceling_reaction([0, 1, 2], milp_software=['pyomo'])\n self.assertEqual(rxn.species[self.butane], -1)\n self.assertEqual(rxn.species[self.propane], 1)\n self.assertEqual(rxn.species[self.butene], 1)", "title": "" }, { "docid": "c41e5d6be2b60b4c1e9785c3857aeae4", "score": "0.46819422", "text": "def try_donor(self, donor, acc):\r\n residue = self.residue\r\n\r\n # Do some error checking\r\n if not acc.hacceptor:\r\n return 0\r\n\r\n # Get the name of the atom to add\r\n if residue.has_atom(\"H2\"):\r\n return 0\r\n elif residue.has_atom(\"H1\"):\r\n newname = \"H2\"\r\n else:\r\n newname = \"H1\"\r\n\r\n _LOGGER.debug(\"Working on %s %s (donor) to %s %s (acceptor)\",\r\n donor.residue, donor.name, acc.residue, acc.name)\r\n\r\n # Act depending on the number of bonds\r\n if len(donor.bonds) == 0:\r\n self.make_atom_with_no_bonds(donor, acc, newname)\r\n if self.is_hbond(donor, acc):\r\n return 1\r\n self.routines.cells.remove_cell(residue.get_atom(newname))\r\n residue.remove_atom(newname)\r\n return 0\r\n if len(donor.bonds) == 1:\r\n self.make_water_with_one_bond(donor, newname)\r\n newatom = donor.residue.get_atom(newname)\r\n return self.try_single_alcoholic_h(donor, acc, newatom)\r\n elif len(donor.bonds) == 2:\r\n loc1, loc2 = self.get_positions_with_two_bonds(donor)\r\n return self.try_positions_with_two_bonds_h(donor, acc, newname, loc1, loc2)\r\n elif len(donor.bonds) == 3:\r\n loc = self.get_position_with_three_bonds(donor)\r\n return self.try_positions_three_bonds_h(donor, acc, newname, loc)\r\n return 0", "title": "" }, { "docid": "f3e981723437002801b9fa301372980c", "score": "0.46745753", "text": "def addReactionAttribute(nxGraph: networkx.classes.MultiGraph):\n if not isinstance(nxGraph, networkx.classes.graph.Graph):\n raise NotImplementedError()\n \n # add reaction to edges\n edges = nxGraph.edges(keys = True)\n\n attributeDict = dict()\n for edge in edges:\n try:\n attributeDict[edge] = edge[2].reaction if edge[2].reaction is not None else ''\n except AttributeError:\n continue\n \n networkx.set_edge_attributes(nxGraph, attributeDict, REACTION_NAME)", "title": "" }, { "docid": "f70d79584e0170313bf7e7ed68f0ecef", "score": "0.46740052", "text": "def CA ( self ):\n \"\"\" In care residue is incomplete and does not have CA \"\"\"\n \"\"\" returns N or C \"\"\"\n\n for AtomRecordInstance in self.Content:\n\n Name = AtomRecordInstance. Name\n\n if Name == ' CA ':\n\n CA = AtomRecordInstance\n\n try: \n return CA\n\n except UnboundLocalError:\n\n print 'Incomplete Residue does not contain CA. Will try to go with N'\n\n for AtomRecordInstance in self.Content:\n\n Name = AtomRecordInstance. Name\n\n if Name == ' N ' :\n\n N = AtomRecordInstance\n\n try: return N\n\n except UnboundLocalError:\n\n print 'Incomplete Residue does not contain CA nor N. Will try to go with C'\n\n for AtomRecordInstance in self.Content:\n\n Name = AtomRecordInstance .Name \n\n if Name == ' C ' :\n\n C = AtomRecordInstance\n self. Print ()\n return C", "title": "" }, { "docid": "109f6b49ec869b8833f40ffceb140627", "score": "0.4670308", "text": "def __calcDChan__(self,name):\n\t\t\n\t\tif name in self.digital_inputs:\n\t\t\treturn self.digital_inputs.index(name)\n\t\telse:\n\t\t\tself.__print__(' invalid channel',name,' , selecting ID1 instead ')\n\t\t\treturn 0", "title": "" }, { "docid": "76f859d31ca068c80268548f39068754", "score": "0.4659111", "text": "def get_random_initial_molecule(self) -> str:\n sum_reactions_r0 = self.rel_r0_rxns.sum(axis=1)\n possible_molecules_idx = np.nonzero(sum_reactions_r0)[0]\n random_molecule_idx = self.rand_generator.choice(possible_molecules_idx)\n return self.reactants[random_molecule_idx]", "title": "" }, { "docid": "ae3caee7bb3f5a20bff2f6656c70aa5c", "score": "0.46562234", "text": "def DiscretiseAction(self, action):\n steering=action\n disc_action=int(steering*(self.steering_discretisation_steps-1))\n return (disc_action,)", "title": "" }, { "docid": "37ff6b6569d51f7b1ac203bbc62cead2", "score": "0.4645135", "text": "def activate_power(self, charact, game, activables, game_state):\n # check if the power should be used before of after moving\n # this depends on the \"activables\" variable, which is a set.\n if charact.power and charact.color in activables:\n character_color = charact.display()[\"color\"]\n question = {\"question type\": f\"activate {character_color} power\",\n \"data\": [0, 1],\n \"game state\": game_state}\n power_activation = ask_question_json(self.client, self.uuid, question)\n\n # log\n self.logger.info(f\"question : {question['question type']}\")\n if power_activation == 1:\n power_answer = \"yes\"\n else:\n power_answer = \"no\"\n self.logger.info(\"answer : \" + power_answer)\n\n # work\n if power_activation:\n self.logger.info(charact.color + \" power activated\")\n charact.power = False\n\n # red character\n if charact.color == \"red\":\n draw = random.choice(game.alibi_cards)\n game.alibi_cards.remove(draw)\n self.logger.info(str(draw) + \" was drawn\")\n if draw == \"fantom\":\n game.position_carlotta += -1 if self.id == 0 else 1\n elif self.id == 0:\n draw.suspect = False\n\n # black character\n if charact.color == \"black\":\n for q in game.characters:\n if q.position in {x for x in passages[charact.position] if\n x not in game.blocked or q.position not in game.blocked}:\n q.position = charact.position\n self.logger.info(\"new position : \" + str(q))\n\n # white character\n if charact.color == \"white\":\n for moved_character in game.characters:\n if moved_character.position == charact.position and charact != moved_character:\n disp = {\n x for x in passages[charact.position]\n if x not in game.blocked or moved_character.position not in game.blocked}\n\n # edit\n available_positions = list(disp)\n # format the name of the moved character to string\n character_to_move = str(\n moved_character).split(\"-\")[0]\n question = {\"question type\": \"white character power move \" + character_to_move,\n \"data\": available_positions,\n \"game state\": game_state}\n selected_index = ask_question_json(self.client, self.uuid, question)\n\n # test\n if selected_index not in range(len(disp)):\n warning_message = (\n ' ! : selected position not available '\n 'Choosing random position.'\n )\n self.logger.warning(warning_message)\n selected_position = disp.pop()\n\n else:\n selected_position = available_positions[selected_index]\n\n self.logger.info(\n f\"question : {question['question type']}\")\n self.logger.info(\"answer : \" +\n str(selected_position))\n moved_character.position = selected_position\n self.logger.info(\"new position : \" +\n str(moved_character))\n\n # purple character\n if charact.color == \"purple\":\n # logger.debug(\"Rappel des positions :\\n\" + str(game))\n\n available_characters = list(colors)\n available_characters.remove(\"purple\")\n question = {\"question type\": \"purple character power\",\n \"data\": available_characters,\n \"game state\": game_state}\n selected_index = ask_question_json(self.client, self.uuid, question)\n\n # test\n if selected_index not in range(len(colors)):\n warning_message = (\n ' ! : selected character not available '\n 'Choosing random character.'\n )\n self.logger.warning(warning_message)\n selected_character = colors.pop()\n\n else:\n selected_character = available_characters[selected_index]\n\n self.logger.info(f\"question : {question['question type']}\")\n self.logger.info(\"answer : \" + selected_character)\n\n # y a pas plus simple ?\n selected_crctr = [x for x in game.characters if x.color\n == selected_character][0]\n charact.position, selected_crctr.position = selected_crctr.position, charact.position\n self.logger.info(\"new position : \" + str(charact))\n self.logger.info(\"new position : \" + str(selected_crctr))\n\n # brown character\n if charact.color == \"brown\":\n # the brown character can take other characters with him\n # when moving.\n return [q for q in game.characters if charact.position == q.position]\n\n # grey character\n if charact.color == \"grey\":\n\n available_rooms = [room for room in range(10)]\n question = {\"question type\": \"grey character power\",\n \"data\": available_rooms,\n \"game state\": game_state}\n selected_index = ask_question_json(self.client, self.uuid, question)\n\n # test\n if selected_index not in range(len(available_rooms)):\n warning_message = (\n ' ! : selected room not available '\n 'Choosing random room.'\n )\n self.logger.warning(warning_message)\n selected_index = random.randint(\n 0, len(available_rooms) - 1)\n selected_room = available_rooms[selected_index]\n\n else:\n selected_room = available_rooms[selected_index]\n\n game.shadow = selected_room\n self.logger.info(f\"question : {question['question type']}\")\n self.logger.info(\"answer : \" + str(game.shadow))\n\n # blue character\n if charact.color == \"blue\":\n\n # choose room\n available_rooms = [room for room in range(10)]\n question = {\"question type\": \"blue character power room\",\n \"data\": available_rooms,\n \"game state\": game_state}\n selected_index = ask_question_json(self.client, self.uuid, question)\n\n # test\n if selected_index not in range(len(available_rooms)):\n warning_message = (\n ' ! : selected room not available '\n 'Choosing random room.'\n )\n self.logger.warning(warning_message)\n selected_index = random.randint(\n 0, len(available_rooms) - 1)\n selected_room = available_rooms[selected_index]\n\n else:\n selected_room = available_rooms[selected_index]\n\n # choose exit\n passages_work = passages[selected_room].copy()\n available_exits = list(passages_work)\n question = {\"question type\": \"blue character power exit\",\n \"data\": available_exits,\n \"game state\": game_state}\n selected_index = ask_question_json(self.client, self.uuid, question)\n\n # test\n if selected_index not in range(len(available_exits)):\n warning_message = (\n ' ! : selected exit not available '\n 'Choosing random exit.'\n )\n self.logger.warning(warning_message)\n selected_exit = passages_work.pop()\n\n else:\n selected_exit = available_exits[selected_index]\n\n self.logger.info(f\"question : {question['question type']}\")\n self.logger.info(\"answer : \" +\n str({selected_room, selected_exit}))\n game.blocked = tuple((selected_room, selected_exit))\n return [charact]", "title": "" }, { "docid": "0ed5ca413e3606d24cc7e20d12265f7b", "score": "0.46435612", "text": "def try_acceptor(self, acc, donor):\r\n residue = acc.residue\r\n\r\n # Do some error checking\r\n if not donor.hdonor:\r\n return 0\r\n\r\n _LOGGER.debug(\"Working on %s %s (acceptor) to %s %s (donor)\",\r\n acc.residue, acc.name, donor.residue, donor.name)\r\n\r\n # We want to ignore the Hs on the acceptor\r\n if self.is_carboxylic_hbond(donor, acc):\r\n\r\n # Eliminate the closer hydrogen\r\n hyds = []\r\n dist = None\r\n donorhatom = None\r\n for hatom in self.hlist:\r\n if hatom.is_hydrogen:\r\n hyds.append(hatom)\r\n\r\n if len(hyds) < 2:\r\n return 1\r\n\r\n dist = util.distance(hyds[0].coords, donor.coords)\r\n dist2 = util.distance(hyds[1].coords, donor.coords)\r\n # Eliminate hyds[0]\r\n if dist < dist2:\r\n self.hlist.remove(hyds[0])\r\n self.routines.cells.remove_cell(hyds[0])\r\n residue.remove_atom(hyds[0].name)\r\n donorhatom = residue.get_atom(hyds[1].name)\r\n elif hyds[1] in self.hlist:\r\n self.hlist.remove(hyds[1])\r\n self.routines.cells.remove_cell(hyds[1])\r\n residue.remove_atom(hyds[1].name)\r\n if residue.has_atom(hyds[0].name):\r\n donorhatom = residue.get_atom(hyds[0].name)\r\n elif len(self.hlist) != 0 and residue.has_atom(self.hlist[0].name):\r\n donorhatom = residue.get_atom(self.hlist[0].name)\r\n\r\n # If only one H is left, we're done\r\n if len(self.hlist) == 1:\r\n if donorhatom != None:\r\n self.rename(donorhatom)\r\n residue.fixed = 1\r\n return 1\r\n\r\n else:\r\n return 0", "title": "" }, { "docid": "a1b7c1f4fb341f46f72d07b2088964bf", "score": "0.4633505", "text": "def reaction(smarts_string, mol1, mol2=False):\n products = []\n try: \n \n # Setup the Indigo library.\n # Enable generation of multiple products from a reaction, but don't\n # allow product molecules to be fed back in to be reacted a second\n # time around.\n indigo = indigo_module.Indigo()\n indigo.setOption(\"rpe-multistep-reactions\", \"true\")\n indigo.setOption(\"rpe-max-depth\", \"1\")\n indigo.setOption(\"rpe-self-reaction\", \"false\")\n indigo.setOption(\"rpe-mode\", \"grid\")\n\n # Load the molecules into Indigo\n molecules = indigo.createArray()\n m1 = indigo.loadMolecule(mol1)\n molecules.arrayAdd(m1)\n\n if (mol2):\n m2 = indigo.loadMolecule(mol2)\n molecules.arrayAdd(m2)\n\n # Initialise a 2D array to hold the reactant table\n reactant_table = indigo.createArray()\n\n # Create the reaction Indigo variable\n rxn = indigo.loadReactionSmarts(smarts_string)\n # Keep mapped atoms\n rxn.automap(\"keep\")\n\n # Build the Indigo array of reactants\n for i in range(0, rxn.countReactants()):\n array = indigo.createArray()\n if (i < molecules.count()):\n array.arrayAdd(molecules.at(i))\n reactant_table.arrayAdd(array)\n\n # Enumerate the products\n output_reactions = indigo.reactionProductEnumerate(rxn, reactant_table)\n num_products = output_reactions.count()\n\n if (num_products > 0):\n # The reaction was successful. Add the products to products[] \n for i in range(num_products):\n product = output_reactions.at(i)\n for p in product.iterateProducts():\n products.append(p.canonicalSmiles())\n\n except IndigoException as e:\n print(\"Indigo Exception: %s\" % (e))\n\n except Exception as e:\n print(\"Exception: %s\" % (e))\n\n finally:\n return products", "title": "" }, { "docid": "05f79c7a8449d59b1e3257c6c4946d5e", "score": "0.46213487", "text": "def get_action(self, observation, is_exploit_episode):\n\t\tpass", "title": "" }, { "docid": "cea108afdb032c5447ac12a51b42f1a5", "score": "0.46083704", "text": "def is_amino_acid(name):\n return (name in name_to_type)", "title": "" }, { "docid": "6086de0235c750157416bea90ae9172b", "score": "0.46068957", "text": "def get_i_act(self, channel, rc=False):\n if rc:\n self.rc_server.send('{}'.format(self._i_act[channel]))\n\n return self._i_act[channel]", "title": "" }, { "docid": "019111e184920e9e476a849d7310d4e3", "score": "0.46059617", "text": "async def upgrade_character(self, ctx, *args):\n if not character.checkRegistration(str(ctx.author.id)):\n return await ctx.send(\"You are not registered. Please register by using the command `!register`\")\n playerID = str(ctx.author.id)\n battler = db.Player.objects.no_dereference().get(battler_id = playerID)\n if len(battler.characters_list) == 0:\n return await ctx.send(\"You don't have an active character\")\n pCharac = battler.getCharacter()\n embed = mdisplay.display_level_up_details(pCharac)\n msg = await ctx.send(embed= embed)\n willpower_emoji = \"🧠\"\n vitality_emoji = \"💟\"\n agility_emoji = \"👟\"\n strength_emoji = \"💪\"\n success_emoji = \"✅\"\n cancel_emoji = \"❎\"\n loop = True \n used_emojis = [success_emoji,cancel_emoji]\n if pCharac.willpower <= pCharac.unused_points:\n await msg.add_reaction(willpower_emoji)\n used_emojis.append(willpower_emoji)\n if pCharac.vitality <= pCharac.unused_points: \n await msg.add_reaction(vitality_emoji)\n used_emojis.append(vitality_emoji)\n if pCharac.agility <= pCharac.unused_points:\n await msg.add_reaction(agility_emoji)\n used_emojis.append(agility_emoji)\n if pCharac.strength <= pCharac.unused_points:\n await msg.add_reaction(strength_emoji)\n used_emojis.append(strength_emoji)\n await msg.add_reaction(success_emoji)\n await msg.add_reaction(cancel_emoji)\n def reaction_filter(reaction, user):\n return str(user.id) == str(ctx.author.id) and str(reaction.emoji) in used_emojis\n \n while loop:\n try:\n pending_collectors =[self.bot.wait_for('reaction_add', timeout=5, check = reaction_filter),\n self.bot.wait_for('reaction_remove', timeout=5, check = reaction_filter)] \n done_collectors, pending_collectors = await asyncio.wait(pending_collectors, return_when=asyncio.FIRST_COMPLETED)\n for collector in pending_collectors:\n collector.cancel()\n for collector in done_collectors:\n reaction, user = await collector\n if reaction.emoji == willpower_emoji:\n if pCharac.willpower <= pCharac.unused_points:\n pCharac.unused_points -= pCharac.willpower\n pCharac.willpower += 1 \n elif reaction.emoji == vitality_emoji:\n if pCharac.vitality <= pCharac.unused_points:\n pCharac.unused_points -= pCharac.vitality\n pCharac.vitality += 1\n elif reaction.emoji == agility_emoji:\n if pCharac.agility <= pCharac.unused_points:\n pCharac.unused_points -= pCharac.agility\n pCharac.agility += 1\n elif reaction.emoji == strength_emoji:\n if pCharac.strength <= pCharac.unused_points:\n pCharac.unused_points -= pCharac.strength\n pCharac.strength += 1\n elif reaction.emoji == success_emoji:\n battler.updateCharacterByName(pCharac.name)\n battler.save()\n await msg.add_reaction('💤')\n loop = False\n elif reaction.emoji == cancel_emoji:\n await msg.add_reaction('💤')\n loop = False\n msg.edit(embed = mdisplay.display_level_up_details(pCharac))\n except asyncio.TimeoutError:\n await msg.add_reaction('💤')\n loop = False", "title": "" }, { "docid": "6f108ad4df68b0605e7ec5607d4a438f", "score": "0.4602062", "text": "def __do_cis_casida(self):\n \n # relation: $ c_{ia} = (\\epsilon_a-\\epsilon_i)^{-1/2}(X_{ia}+Y_{ia}) $\n if self.is_do_cis_casida != \"yes\":\n return\n \n n_state = self.ci['n_state']\n\n for i_state in range(1, n_state): \n one_state = self.ci['state'][i_state]\n noccA = one_state['nocc_allA']\n nvirA = one_state['nvir_allA']\n # add-alpha add-beta sub-alpha sub-beta\n # only use alpha section now. \n alpha_coeffs = one_state['alpha_coeffs']\n omega = self.ci['energy'][i_state] - self.ci['gs_energy'] \n \n for i in range(noccA):\n for j in range(nvirA):\n orb = self.mo['energy']['alpha']\n de = float(orb[j+noccA]) - float(orb[i])\n # page: 76; \n # Nonadiabatic Dynamics of Cis-trans Photoisomerization --- A First Principles ...\n # Benjamin G. Levine\n # \\Omega \\bf F = \\omega \\bf F\n # F = (A-B)^(-1/2) |X+Y>\n # the solved equation in gaussian is F not |X+Y>, so sqrt(de/omega)\n alpha_coeffs[i][j] = alpha_coeffs[i][j] * math.sqrt(de/omega)\n \n # we use $c_{ia} = \\sqrt{\\frac{\\Omega_a}{\\epsilon_a - \\epsilon_i}} |X+Y>$ \n one_state['alpha_coeffs'] = alpha_coeffs\n self.ci['state'][i_state] = one_state\n \n return", "title": "" }, { "docid": "e89210084e2ac89fbdb9b2f67cba6522", "score": "0.4595165", "text": "def get_aid_from_anum(self, num):\n return self.guidance_json['dictAssessNum2AssessId'].get(num)", "title": "" }, { "docid": "ee9f83c990ab09f127436876418f149f", "score": "0.4593632", "text": "async def on_raw_reaction_add(self, payload):\n guild = self.bot.get_guild(payload.guild_id)\n if guild is not None:\n channel = guild.get_channel(payload.channel_id)\n message = await channel.fetch_message(payload.message_id)\n user = await guild.fetch_member(payload.user_id)\n\n # Update cached leaderboards\n if not payload.member.bot:\n if payload.message_id in self.cached_messages:\n if payload.emoji.name == \"➡️\":\n await self.update_leaderboard_message(message, 1)\n await message.remove_reaction(\"➡️\", user)\n elif payload.emoji.name == \"⬅️\":\n await self.update_leaderboard_message(message, -1)\n await message.remove_reaction(\"⬅️\", user)\n\n # Update reaction leaderboards\n if not payload.member.bot:\n reactions = self.leaderboards[str(payload.guild_id)][\"reaction_leaderboard\"]\n\n if payload.emoji.id is not None:\n for guildEmoji in guild.emojis:\n if payload.emoji.id == guildEmoji.id:\n if (\"<:\" + str(payload.emoji.name) + \":\" + str(payload.emoji.id) + \">\") not in reactions:\n reactions[\"<:\" + str(payload.emoji.name) + \":\" + str(payload.emoji.id) + \">\"] = 1\n else:\n reactions[\"<:\" + str(payload.emoji.name) + \":\" + str(payload.emoji.id) + \">\"] += 1\n\n break\n else:\n if payload.emoji.name not in reactions:\n reactions[str(payload.emoji.name)] = 1\n else:\n reactions[str(payload.emoji.name)] += 1\n\n if str(payload.emoji.id) in self.leaderboards[str(payload.guild_id)][\"emoji_leaderboard\"]:\n self.leaderboards[str(payload.guild_id)][\"emoji_leaderboard\"][str(payload.emoji.id)] += 1", "title": "" }, { "docid": "e46b6740f0f4104e77f0b0c1254049eb", "score": "0.45886463", "text": "def _assign_oxidation(self):\r\n result = {}\r\n current_charge = self.charge\r\n\r\n # Check for special cases otherwise not covered by the rules\r\n if self == ((C*3)*(H*8)):\r\n return {H: 0, C: 0}\r\n \r\n # Apply rules for elemental atoms and singular ions\r\n ion_list = [i for i, j in self.parts.items()]\r\n \r\n if len(ion_list) == 1:\r\n if self.charge == 0:\r\n return {ion_list[0]: 0}\r\n else:\r\n return {ion_list[0]: self.charge / self.parts[ion_list[0]]}\r\n\r\n # Assume polyatomic ions are correct\r\n for i in ion_list:\r\n if type(i) == Polyatomic:\r\n result[i] = i.oxidation()\r\n current_charge -= i.oxidation() * self.parts[i]\r\n ion_list.remove(i)\r\n if len(ion_list) == 1:\r\n result[ion_list[0]] = current_charge / self.parts[ion_list[0]]\r\n return result\r\n\r\n # Apply rules for group 1A compounds\r\n for i in ion_list:\r\n if i in [Li, Na, K, Rb, Cs, Fr]:\r\n result[i] = 1\r\n current_charge -= 1 * self.parts[i]\r\n ion_list.remove(i)\r\n if len(ion_list) == 1:\r\n result[ion_list[0]] = current_charge / self.parts[ion_list[0]]\r\n return result\r\n\r\n # Apply rules for group 2A compounds\r\n for i in ion_list:\r\n if i in [Be, Mg, Ca, Sr, Ba, Ra]:\r\n result[i] = 2\r\n current_charge -= 2 * self.parts[i]\r\n ion_list.remove(i)\r\n if len(ion_list) == 1:\r\n result[ion_list[0]] = current_charge / self.parts[ion_list[0]]\r\n return result\r\n\r\n # Apply rules for fluorine, hydrogen, and oxygen\r\n if F in ion_list:\r\n result[F] = -1\r\n current_charge += 1 * self.parts[F]\r\n ion_list.remove(F)\r\n\r\n if len(ion_list) == 1:\r\n result[ion_list[0]] = current_charge / self.parts[ion_list[0]]\r\n return result\r\n \r\n if H in ion_list:\r\n result[H] = 1\r\n current_charge -= 1 * self.parts[H]\r\n ion_list.remove(H)\r\n\r\n if len(ion_list) == 1:\r\n result[ion_list[0]] = current_charge / self.parts[ion_list[0]]\r\n return result\r\n\r\n if O in ion_list:\r\n result[O] = -2\r\n current_charge += 2 * self.parts[O]\r\n ion_list.remove(O)\r\n\r\n if len(ion_list) == 1:\r\n result[ion_list[0]] = current_charge / self.parts[ion_list[0]]\r\n return result\r\n\r\n # Apply rules for group 7A ions\r\n for i in ion_list:\r\n if i in [F, Cl, Br, I, At, Ts]:\r\n result[i] = -1\r\n current_charge += 1 * self.parts[i]\r\n ion_list.remove(i)\r\n if len(ion_list) == 1:\r\n result[ion_list[0]] = current_charge / self.parts[ion_list[0]]\r\n return result\r\n\r\n # Apply rules for group 6A ions\r\n for i in ion_list:\r\n if i in [F, Cl, Br, I, At, Ts]:\r\n result[i] = -2\r\n current_charge += 2 * self.parts[i]\r\n ion_list.remove(i)\r\n if len(ion_list) == 1:\r\n result[ion_list[0]] = current_charge / self.parts[ion_list[0]]\r\n return result\r\n\r\n if len(ion_list) != 1:\r\n raise ValueError(\"Error: insufficient information to assign oxidation states\")", "title": "" }, { "docid": "0642e1db06a8e494ca74961ef0bfa666", "score": "0.45871943", "text": "def get_inhibitor_arc_info(a):\n assert isinstance(a, petrinet.InhibitorArc)\n\n src_id, target_id = str(a.src._id), str(a.target._id)\n _id = '{}->{}'.format(src_id, target_id)\n attribs = {\n 'color': 'black',\n 'arrowType': 'odot',\n 'dir': 'forward'\n }\n return _id, src_id, target_id, attribs", "title": "" }, { "docid": "207ffd3a4959c646584e3a46c31b9255", "score": "0.45800838", "text": "def produces_atp(base):\n ra = cobra.Reaction(\"A\")\n rb = cobra.Reaction(\"B\")\n rc = cobra.Reaction(\"C\")\n base.add_reactions([ra, rb, rc])\n ra.reaction = \"a <--> b\"\n rb.reaction = \"b <--> c\"\n rc.reaction = \"atp_c + h2o_c + a <--> pi_c + adp_c + c + h_c\"\n base.add_boundary(base.metabolites.a, type=\"sink\")\n base.add_boundary(base.metabolites.h2o_c, type=\"sink\")\n base.add_boundary(base.metabolites.h_c, type=\"sink\")\n base.add_boundary(base.metabolites.adp_c, type=\"sink\")\n base.add_boundary(base.metabolites.atp_c, type=\"sink\")\n base.add_boundary(base.metabolites.pi_c, type=\"sink\")\n base.add_boundary(base.metabolites.c, type=\"demand\")\n for met in base.metabolites:\n met.compartment = \"c\"\n return base", "title": "" }, { "docid": "be5c9634da2930200206e8cbc42050cf", "score": "0.45735207", "text": "def act(self, state, deterministic):\n\n # update epsilon\n eps = self.min_epsilon + (self.max_epsilon - self.min_epsilon)*np.exp(-1*self.n_steps/self.eps_decay)\n self.n_steps += 1\n eps = self.min_epsilon\n\n r = np.random.uniform()\n if deterministic or r > eps:\n # TODO: take greedy action (argmax)\n state = torch.tensor([state]).float().cuda()\n action_id = torch.argmax(self.Q(state), dim=1).cpu().detach().numpy()\n else:\n\n # TODO: sample random action\n # Hint for the exploration in CarRacing: sampling the action from a uniform distribution will probably not work.\n # You can sample the agents actions with different probabilities (need to sum up to 1) so that the agent will prefer to accelerate or going straight.\n # To see how the agent explores, turn the rendering in the training on and look what the agent is doing.\n action_id = np.random.choice(range(self.num_actions), 1)\n\n return action_id[0]", "title": "" }, { "docid": "8cb0dfdd76f0e96fa710134896951ada", "score": "0.4571377", "text": "def get_reactions(self):\n pass", "title": "" }, { "docid": "cfd09d3ca22e360343de2b1b38dc5151", "score": "0.45686388", "text": "def get_jointaction(self, jointstate):\n pass", "title": "" }, { "docid": "703009d05790a51fdce6552aea56ccda", "score": "0.456079", "text": "def get_interaction_energies(self,pKa_center,titration,state):\n print('In get_interaction_energies. center: group: %s, titration: %s, state: %s' %(pKa_center,titration,state))\n #\n # Set the calc type and center\n #\n self.apbs_setup.set_type('intene')\n #\n # Run APBS and get the interaction with all other states\n #\n if debug:\n CM.set_calc('IE %s %s' %(pKa_center.residue.resSeq,state))\n potentials=self.getAPBSPotentials(pKa_center,titration,state)\n #\n # construct this side\n #\n energies={}\n #\n # Loop over all groups\n #\n for pKa in self.pKas:\n residue = pKa.residue\n pKaGroup = pKa.pKaGroup\n ambiguity = pKa.amb\n #\n # Loop over each titration\n #\n if pKa not in energies:\n energies[pKa]={}\n #\n for titration in pKaGroup.DefTitrations:\n if titration not in energies[pKa]:\n energies[pKa][titration]={}\n #\n # Get all states\n #\n possiblestates = titration.allstates\n for state in possiblestates:\n #\n # Switch to the particular state\n #\n atomnames = self.getAtomsForPotential(pKa,titration)\n self.hydrogenRoutines.switchstate('pKa', ambiguity, state)\n self.zeroAllRadiiCharges()\n self.setAllRadii()\n self.setCharges(residue, atomnames)\n #\n # Get atoms for potential\n #\n #print 'Atoms for measuring potential',atomnames\n atomlist=[]\n for atomname in atomnames:\n atomlist.append(residue.getAtom(atomname))\n energy=0.0\n count=0\n for atom in protein.getAtoms():\n for atom2 in atomlist:\n if is_sameatom(atom,atom2):\n energy=energy+potentials[1][count]*atom.get(\"ffcharge\")\n #print 'Getting potential',residue.get('name'),atom.name,atom.get('ffcharge')\n count=count+1\n #\n # We set all energies with self to zero\n #\n if pKa==pKa_center:\n energies[pKa][titration][state]=0.0\n else:\n energies[pKa][titration][state]=energy\n return energies", "title": "" }, { "docid": "cb124762efa8b1bc4989782ffb1892ba", "score": "0.45566475", "text": "def getAcqEra(self):\n return getattr(self.data.output, 'acqEra', None)", "title": "" }, { "docid": "4fb8f9c6ce75d38996f05b49d8195b22", "score": "0.45537463", "text": "def acth2omin(self):\n return _min3p.f90wrap_chem__get__acth2omin()", "title": "" }, { "docid": "395dfa372607e3362944aebd0a3ff40e", "score": "0.45465708", "text": "def get_action_id(self, atomic_action_id, object_id):\n idx = None\n for i, ao_pair in enumerate(self.ao_list):\n if ao_pair[0] == atomic_action_id and ao_pair[1] == object_id:\n idx = i + 1 # +1 to allow the NA first action label\n break\n return idx", "title": "" } ]
9684abc1e5468def436d92ce25f527ae
Multiply every element of self by value. Done in place.
[ { "docid": "9d86fa04561e94c82d8a1531e8cf94cb", "score": "0.7325298", "text": "def scale(self, value:Any):\r\n self._inert = (i*value for i in self)\r\n return self", "title": "" } ]
[ { "docid": "f911b7d4237ef29b024e6616813f694d", "score": "0.727317", "text": "def __mul__(self, scalar):\n # BAD python 2.3 compat change\n return type(self)( [scalar*item for item in self] )", "title": "" }, { "docid": "4c920e9ce16034ec191b54391418f4ff", "score": "0.7248915", "text": "def __mul__(self, value):\n return kernel_arithmetics(self, value, \"mul\")", "title": "" }, { "docid": "132cfb2fa489c59129f6d2cb84bf0ffe", "score": "0.71549714", "text": "def __imul__(self, scalar):\n self._values = [v * scalar for v in self._values]\n return self", "title": "" }, { "docid": "f144b9e7d4dbbaebb267300c795ebae3", "score": "0.7055344", "text": "def __mul__(self, other):\n return self.value * other.value", "title": "" }, { "docid": "24ecbfb2143c1f9b0b161aebdc20bf19", "score": "0.70085025", "text": "def __mul__(self, value):\n assert isinstance(value, (int, float))\n if self._peak_dict[\"raw\"] is not None:\n self.set_peaks(\n np.column_stack(\n (self.peaks(\"raw\")[:, 0], self.peaks(\"raw\")[:, 1] * value)\n ),\n \"raw\",\n )\n if self._peak_dict[\"centroided\"] is not None:\n self.set_peaks(\n np.column_stack(\n (self.centroided_peaks[:, 0], self.centroided_peaks[:, 1] * value)\n ),\n \"centroided\",\n )\n if self._peak_dict[\"reprofiled\"] is not None:\n for mz in self._peak_dict[\"reprofiled\"].keys():\n self._peak_dict[\"reprofiled\"][mz] *= float(value)\n return self", "title": "" }, { "docid": "5a162d8f33fb4a8b8fecf1d75689b00b", "score": "0.68893456", "text": "def multiply_with(self, factor):\n for row in self:\n for j in range(len(row)):\n row[j] *= factor\n return self", "title": "" }, { "docid": "d0a7a682b88cfc95985b5b77c97f6092", "score": "0.68711376", "text": "def __mul__(self, val):\n if isinstance(val, (numbers.Integral, numbers.Real, Decimal)):\n return self._multiply(val)\n else:\n raise TypeError('value must be a number')", "title": "" }, { "docid": "cd83db9b3b130920ea936c443c7e60ff", "score": "0.6852584", "text": "def __imul__(self, value):\n raise_if_of_type(value, int)\n if value < 1:\n self._clear()\n if value > 1:\n current = self\n for i in range(0, value - 1):\n self.extend(current)", "title": "" }, { "docid": "35155faec5333a739d089dd5358a4ab3", "score": "0.68164545", "text": "def __mul__(self, x):", "title": "" }, { "docid": "7f4700d9baddf32e057d1d2b578c4bcb", "score": "0.67458034", "text": "def __init__(self, sequence: Iterable[int]=(), initial_value: int=1):\n self.value = functools.reduce(operator.mul, sequence, initial_value)", "title": "" }, { "docid": "12628b8c77b809f4946da9da399339ce", "score": "0.66897285", "text": "def multiply(self, scalar):\n return self * scalar", "title": "" }, { "docid": "6976f3e7f76213f6079d90595cb5f284", "score": "0.6667253", "text": "def __imul__(self, item):\n self += item\n return self", "title": "" }, { "docid": "c38739cc3d597e415870f8d51e60b59a", "score": "0.6649418", "text": "def __mul__(self, val):\n\n if val < 0:\n raise ValueError(\n error_messages['signed_dup'].format(self.__class__.__name__)\n )\n\n result = []\n\n for i in xrange(val):\n result.append(copy.deepcopy(self))\n\n return result", "title": "" }, { "docid": "2b7ba6b5057718d063e26654e91199a0", "score": "0.6645611", "text": "def evalAt(self, value):\n return sum([self.lst[i] * value **\n (len(self.lst) - i - 1) for i in range(len(self.lst))])", "title": "" }, { "docid": "045de8a2aa3fe02cfb76e957a5475d33", "score": "0.6632394", "text": "def __mul__(self, a : Union[numbers.Number, Iterable[numbers.Real]]) -> 'M3Transformable':\n\t\tres = copy.deepcopy(self)\n\t\tres.__imul__(a)\n\t\treturn res", "title": "" }, { "docid": "b6bf98c18ebf2c5e4dc648969050cfb7", "score": "0.66045594", "text": "def product(self):\n cl = self[0]\n for i in range(1, len(self)):\n cl = cl * self[i]\n return cl", "title": "" }, { "docid": "ff6e4daa5918906738ecd7c9eab230d8", "score": "0.659326", "text": "def scale(self, value):\n self._check_mask()\n for blk in self._blocks:\n blk *= value", "title": "" }, { "docid": "ac3f2e73d28c35270ffc77bc8ba25e58", "score": "0.6575594", "text": "def apply(self, values):\n for name, multiplier in self.personality.items():\n if name in values:\n values[name] *= multiplier\n return values", "title": "" }, { "docid": "633dfa08f98b58406842f775e0b7a2ba", "score": "0.65349424", "text": "def __imul__(self, scalar):\n return self*scalar", "title": "" }, { "docid": "6f4b1a262b911013e7db66991f2bd393", "score": "0.6516048", "text": "def mul(self, product, productKey, value):\r\n return round(getattr(product, productKey) * value, 2)", "title": "" }, { "docid": "cf396276fe04de490f14565ca00f856c", "score": "0.6500619", "text": "def mul(value, arg):\n return value * arg", "title": "" }, { "docid": "578def7a5331934384c144f9b12a358e", "score": "0.64892346", "text": "def __mul__(self, other):\n if not isinstance(other, float) and not isinstance(other, int):\n raise TypeError( # pragma: no cover\n \"other should be a number\")\n values = []\n for row in self.values:\n r = []\n for a in row:\n if a is not None:\n x = a * other\n else:\n x = None\n r.append(x)\n values.append(r)\n return self._private_getclass()(self.header, values)", "title": "" }, { "docid": "c154c4e12fb9bee199009d2bb2018ca9", "score": "0.6481746", "text": "def __mul__(self, value:int):\r\n if hasattr(value, '__int__'):\r\n return type(self)(chain.from_iterable(self for i in range(int(value))))\r\n raise TypeError(f'Multiplication is not defined for \"{__regen.tipo(other, True)}\". It must have an \"__int__\"')", "title": "" }, { "docid": "13fbde15e1f8deebd11a784dd71818df", "score": "0.64800286", "text": "def __mul__(self, other):\n if type(other) == int or type(other) == float:\n return Vector([i*other for i in self.elements])\n \n elif type(other) == Vector:\n if len(self.elements) != len(other.elements):\n return 0\n return sum([x*y for x, y in zip(self.elements, other.elements)])", "title": "" }, { "docid": "81dfe4bfb45223820dcb9e526750b07d", "score": "0.64541745", "text": "def __mul__(self, *args):\n return _almath.Transform___mul__(self, *args)", "title": "" }, { "docid": "62d1f5f74db438e7f7091a2452c268b9", "score": "0.64508903", "text": "def __mul__(self, other):\n if self.size() != other.size():\n return\n result = 0\n for i in range(self.size()):\n result += (self[i] * other[i])\n return result", "title": "" }, { "docid": "8aebc610b46295a82cca0df7d4c7cbbf", "score": "0.6446316", "text": "def multiply(li):\n prod = 1\n for i in li:\n prod = prod * i\n return prod", "title": "" }, { "docid": "cb13b8cef7810a2519aeec50cd76076f", "score": "0.6444419", "text": "def mult(self, val, target = None):\n\n if not target:\n target = self\n\n target.resize(self.shape)\n\n\n if isinstance(val, CUDAMatrix):\n if target.shape != val.shape:\n raise IncompatibleDimensionsException\n target.numpy_array[:] = self.numpy_array * val.numpy_array\n\n elif isinstance(val, (int, float, __DTYPE__)):\n target.numpy_array[:] = self.numpy_array * val\n else:\n raise ValueError(\"Value must be of type CUDAMatrix, int, or float.\")\n\n\n\n return target", "title": "" }, { "docid": "3fbf2e3934150a14bc70b04313e9c4cd", "score": "0.6421422", "text": "def mul(self, initial: U = 1) -> U:\n return self.reduce(operator.mul, initial = initial)", "title": "" }, { "docid": "f61b97bd9eac0b021c983e5396bab5bf", "score": "0.64139324", "text": "def __mul__(self, other):\r\n if type(other) == type(self):\r\n return self.inner(other)\r\n elif type(other) == type(1) or type(other) == type(1.0):\r\n product = tuple(a * other for a in self)\r\n return Vector(*product)", "title": "" }, { "docid": "6d9d7cd0faecb71b5cdf48c04abfd45e", "score": "0.64095634", "text": "def __mul__(self, scalar):\n return self._apply_operator(self.data, ops.mul, scalar)", "title": "" }, { "docid": "ad82fcadd5f24cc105162fdf4415a3cd", "score": "0.6394821", "text": "def multiply(self, multiplicants):\n pass", "title": "" }, { "docid": "e894bfaf64cdb0145d11c56edd7f11a3", "score": "0.63815904", "text": "def __mul__(self, times):", "title": "" }, { "docid": "d6b4be10af7ca449987bcee417603b4c", "score": "0.63550884", "text": "def __mul__(self, other):\n\n if isinstance(other, int) or isinstance(other, float):\n return type(self)(value=(self.value() * other))\n else:\n raise TypeError", "title": "" }, { "docid": "8d3baf45b08c2c9518fc67694d851e48", "score": "0.6351529", "text": "def scale_values(self, factor):\n if self.is_symmetric:\n self.values = [factor * x for x in self.values]\n else:\n self.values = [(factor * x[0], factor * x[1])\n for x in self.values]", "title": "" }, { "docid": "ab3473236092536008d017b885ba3e3d", "score": "0.6346492", "text": "def __mul__(self,value):\n copy_matrix: list = deepcopy(self.coefficients)\n constant = 0\n if type(value) in (complex, int, float):\n const: Union[float, complex] = self.constant * value\n for term in copy_matrix:\n term[1][0][0] *= value # We Only need to multiply One Term (the coefficient of the term)\n copy_matrix.append([const])\n return Multinomial(copy_matrix)\n elif type(value) == self.__class__:\n BASE_ARRAY: list = []\n for term in copy_matrix:\n for item in value.coefficients:\n common = findCommon(term[0], item[0])\n NEW_ARR = [[com for com in common],[[] for com in common]]\n \n # Handle Commonly Shared Values\n for var in common:\n \n # Indexes\n i1 = term[0].index(var)\n i2 = item[0].index(var)\n\n # Values TO Compute\n cAe1 = term[1][i1]\n cAe2 = item[1][i2]\n\n # Computed Operations\n base = cAe1[0] * cAe2[0] # Bases\n expo = cAe1[1] + cAe2[1] # Powers\n\n NEW_ARR[1][NEW_ARR[0].index(var)].append(base)\n NEW_ARR[1][NEW_ARR[0].index(var)].append(expo)\n \n\n VAL_ARRAY : List[list] = [[],[]] # Containing values in item\n VAL2_ARRAY : List[list] = [[],[]] # Containing values in term\n\n\n for __val__ in item[0]: \n if __val__ not in common:\n VAL_ARRAY[0].append(__val__)\n VAL_ARRAY[1].append(item[1][item[0].index(__val__)])\n \n for __val__ in term[0]: \n if __val__ not in common:\n VAL2_ARRAY[0].append(__val__)\n VAL2_ARRAY[1].append(term[1][term[0].index(__val__)])\n\n\n if len(VAL_ARRAY[0]) > 0:\n for cpp in VAL_ARRAY[0]:\n NEW_ARR[0].append(cpp)\n NEW_ARR[1].append(VAL_ARRAY[1][VAL_ARRAY[0].index(cpp)])\n \n if len(VAL2_ARRAY[0]) > 0 :\n for cpp in VAL2_ARRAY[0]:\n NEW_ARR[0].append(cpp)\n NEW_ARR[1].append(VAL2_ARRAY[1][VAL2_ARRAY[0].index(cpp)])\n\n BASE_ARRAY.append(NEW_ARR)\n\n const_val = Multinomial([term]) * value.constant\n\n if isinstance(const_val, int):\n constant += const_val\n else:\n BASE_ARRAY += const_val.coefficients\n \n \n _mul_ = Multinomial(BASE_ARRAY) + self.constant * value\n _mul_.constant += constant\n return _mul_\n return NotImplemented", "title": "" }, { "docid": "203176b9b9c256ccb957c9b45534a3df", "score": "0.634523", "text": "def __mul__(self, other):\n result = Vector(len(self)) + self\n for i in range(len(self)):\n result[i] = result[i] * other[i]\n return result", "title": "" }, { "docid": "960aca8dd0e699272f9bf7e6f1d7e00b", "score": "0.63280064", "text": "def __mul__(self, other):\n return self.product(other)", "title": "" }, { "docid": "e47c4484c23440776caac4f76d73cdcb", "score": "0.631142", "text": "def __mul__(self, other):\n return self._int_value * other", "title": "" }, { "docid": "83bbd8ceb4d4d6aa715233ac3e33a18f", "score": "0.63013387", "text": "def __mul__(self, scalarValue):\n checkScalarValidity(scalarValue, 'multiplication')\n\n return Vector(values=scalarValue*self.vector)", "title": "" }, { "docid": "f3c739d0d7771a78e114e7deb5b9fcc8", "score": "0.6283836", "text": "def mult(\r\n seq, # type: list[float]\r\n scalar # type:\r\n ):\r\n # type: () -> list[float]\r\n return [value * scalar for value in seq]", "title": "" }, { "docid": "7f153dd78a02324292127777ecb03a2a", "score": "0.62827563", "text": "def scalar_multiply(c, v):\n return [c * v_i for v_i in v]", "title": "" }, { "docid": "75990c66bc1a559a81c88e29b2883d62", "score": "0.62713", "text": "def __mul__(self, other):", "title": "" }, { "docid": "678a3b3ef0dee7861285a22ed4571c0a", "score": "0.6261526", "text": "def __mul__(self, other):\r\n if isinstance(other, Vector):\r\n return self.inner(other)\r\n elif isinstance(other, (int, float)):\r\n product = tuple(a * other for a in self)\r\n return self.__class__(*product)\r\n else:\r\n raise ValueError(\r\n \"Multiplication with type {} not supported\".format(type(other)))", "title": "" }, { "docid": "0e706766cd2147c1937eebb27949dccd", "score": "0.62579995", "text": "def __mul__(self, scalar):\n values = [v * scalar for v in self._values]\n return Coordinates(values)", "title": "" }, { "docid": "018d607b920d7d63ca45ab2d81cd723f", "score": "0.62574965", "text": "def multiply(self, building, value) -> None:\n mode = ef.get_mode(building)\n objects = self.get_objects(building)\n field_name = ef.convert_format(self.field_name, 'field', mode)\n if mode == 'idf':\n for o in objects:\n setattr(o, field_name, value * getattr(o, field_name))\n if mode == 'json':\n for o in objects:\n assert field_name in o, f'{field_name} not in {repr(o)}'\n o[field_name] = value * o[field_name]", "title": "" }, { "docid": "0172821d1b72e6b6fae82aa0dc33b6df", "score": "0.62537783", "text": "def __setitem__(self, unit, value):\n try:\n dim = self[unit]\n except KeyError:\n dict.__setitem__(self, unit, value)\n else:\n try:\n scale = value / dim\n for k, v in self.items():\n dict.__setitem__(self, k, v * scale)\n except TypeError:\n scale = tuple(v/d for v, d in zip(value, dim))\n for k, v in self.items():\n dict.__setitem__(\n self, k, tuple(v*s for v, s in zip(self[k], scale)))", "title": "" }, { "docid": "f7bed4e361aa4f163295ef707db7be78", "score": "0.6244906", "text": "def multiply(num_list):\n total = 1\n for num in num_list:\n total *= num\n return total", "title": "" }, { "docid": "6685eee1b34f00d782733a5990862bc8", "score": "0.6243078", "text": "def square( self):\n self *= self\n return", "title": "" }, { "docid": "023a85cfe6a9ba65e438f5ab8f8318db", "score": "0.6237205", "text": "def __mul__(self, value):\n from ansys.dpf.core import dpf_operator\n from ansys.dpf.core import operators\n if hasattr(operators, \"math\") and hasattr(operators.math, \"generalized_inner_product\") :\n op= operators.math.generalized_inner_product()\n else :\n op= dpf_operator.Operator(\"generalized_inner_product\")\n op.connect(0,self) \n op.connect(1, value)\n return op", "title": "" }, { "docid": "3921d774598170176b39ebef453679a9", "score": "0.62209487", "text": "def mult_by_scalar(self, alpha, target = None):\n return self.mult(alpha, target)", "title": "" }, { "docid": "db8f076c5cc852f5605e6946da7d0336", "score": "0.62056506", "text": "def __mul__(self, obj):\n if isinstance(obj, self.__class__):\n return sum([a * b for (a, b) in zip(self.coords, obj.coords)])\n\n return self.__class__(*[i * obj for i in self.coords])", "title": "" }, { "docid": "39a98cc28f8a1f70712165bb03e8a6b3", "score": "0.62029", "text": "def _multiply(self, propList, factor_tuple):\n #print \"in _multiply: \", propList, \" ; factor_tuple: \", factor_tuple\n factor = factor_tuple[0]\n return propList * factor", "title": "" }, { "docid": "f3eeed11aaddd130a7f791a0350c8615", "score": "0.61902225", "text": "def __mul__(self,factor):\n return NotImplemented", "title": "" }, { "docid": "d0831655dde553259d19ef674d74e1ce", "score": "0.61716324", "text": "def _mul_scalar(self, c):\n raise NotImplementedError", "title": "" }, { "docid": "d4b383fbb829af865f31feee50f068c5", "score": "0.6164597", "text": "def __mul__(self, other):\r\n try: return Vector(map(lambda x, y: x*y, self, other))\r\n except:\r\n # other is a const\r\n return Vector(map(lambda x: x*other, self))", "title": "" }, { "docid": "a1e7c9107f9f3c32fd9a53defdde9ead", "score": "0.6146156", "text": "def mul(self, x: float):\n check_types([(\"x\", x, [int, float],)])\n return self.apply(func=\"{} * ({})\".format(\"{}\", x))", "title": "" }, { "docid": "c77e73a2fb04faf83e91111609f072eb", "score": "0.6107501", "text": "def __mul__(self, c):\n\n new_vec = Vector(N=self.N, beta=self.beta, empty=1)\n for phase_tuple in self.entries.keys():\n\n print \"multiplying by scalar\", phase_tuple\n\n new_vec.entries[phase_tuple] = c*self.entries[phase_tuple]\n\n return new_vec", "title": "" }, { "docid": "8a733036e49a249b2c56473f234945da", "score": "0.6105886", "text": "def __mul__(self: mask, argument: data) -> data:\n return data(oblivious.mul(self, argument))", "title": "" }, { "docid": "c556a2dcf7e37638b2a2182b27d26ac3", "score": "0.6103758", "text": "def __rmul__(self, scalar):\n return self * scalar", "title": "" }, { "docid": "7123f9a8fde310695fee350c5117cc96", "score": "0.6102066", "text": "def multi(lista):\n res = 1\n for i in range(len(lista)):\n res = res * lista[i] \n return res", "title": "" }, { "docid": "796d6e654fe90e7dbd39b1ff10823f66", "score": "0.6084339", "text": "def __mul__(self, scalar):\n return self.__class__(self.x * scalar, self.y * scalar)", "title": "" }, { "docid": "dc08001669049937f21ea55417060c6b", "score": "0.60798705", "text": "def multiplyForLoop(aListing):\n product = 1\n for num in aListing:\n product *= num\n return product", "title": "" }, { "docid": "fb2a4be67e3c7edb3cdc670dba843e4f", "score": "0.6057075", "text": "def __mul__(self: T, other: Union[np.ndarray, float]) -> T:\n pass", "title": "" }, { "docid": "e3c3338bd7c6f622962a59e55bf1ceac", "score": "0.6056605", "text": "def multiply(numbers_list):\n answer = 0\n for i in numbers_list:\n answer *= i \n return answer", "title": "" }, { "docid": "cc263c8978a168173df288bb7fea2403", "score": "0.60451865", "text": "def __mul__(self, other):\n return Element(self.group,\n self.group.operation(self.value,\n other.value))", "title": "" }, { "docid": "fe25aabcdef8187fca3b3b8d205414db", "score": "0.60430783", "text": "def prod(iterable: Iterable) -> float:\n return reduce(operator.mul, iterable, 1)", "title": "" }, { "docid": "81b9ff20719669f962d9377a0f5d30d6", "score": "0.6041998", "text": "def\t__mul__ (self, rhs):", "title": "" }, { "docid": "d5b9b26750fb83a470ab5b51699d0205", "score": "0.6029162", "text": "def prod(it):\n return functools.reduce(operator.mul, it)", "title": "" }, { "docid": "52f8ecd182f08bd72fceecedc038e31a", "score": "0.60243917", "text": "def __imul__(self, other):\n return self.__mul__(other)", "title": "" }, { "docid": "ed25bc7ae8e32e33e0a9df8088032b98", "score": "0.60149646", "text": "def scale(self, scalar):\n return self * scalar", "title": "" }, { "docid": "ed25bc7ae8e32e33e0a9df8088032b98", "score": "0.60149646", "text": "def scale(self, scalar):\n return self * scalar", "title": "" }, { "docid": "00d52be94c16c3146f01827d2f794680", "score": "0.60130674", "text": "def __mul__(self, other):\n\t\tother_values, self_values, self_times = self._valuesSortedByTime(other)\n\t\tif self.__isTimeNone:\n\t\t\tif isinstance(other, numbers.Real):\n\t\t\t\treturn TimeSeries([self.values()[i] * other for i in range(0, len(self.values()))])\n\t\t\telse:\n\t\t\t\treturn TimeSeries([self.values()[i] * other.values()[i] for i in range(0, len(self.values()))])\n\t\tif isinstance(other, numbers.Real):\n\t\t\treturn TimeSeries([self_values[i] * other for i in range(0, len(self_values))], self_times)\n\t\telse:\n\t\t\treturn TimeSeries([self_values[i] * other_values[i] for i in range(0, len(self_values))], self_times)", "title": "" }, { "docid": "923830fa252ad6026d9205a697de1100", "score": "0.60098505", "text": "def scalar_multiply(c: float, v: Vector) -> Vector:\n return [c * v_i for v_i in v]", "title": "" }, { "docid": "81dce151aed67571eda0b7e94b31de8c", "score": "0.60071063", "text": "def __mul__(self, other):\n if other is 1 or other is self.field.one:\n return self\n return self._op(other, \"__mul__\")", "title": "" }, { "docid": "426bbd43769e4ab1db58e1254fe9f9a5", "score": "0.59839225", "text": "def product(self):", "title": "" }, { "docid": "23caa44791fcc1a32132a2132460a248", "score": "0.59722257", "text": "def prod(lst):\n p = 1.0\n for i in lst:\n p *= i\n return p", "title": "" }, { "docid": "e287d3146d1aa452c4e16adc611b169d", "score": "0.5951092", "text": "def __mul__(self, scalar):\n return self.__class__(self.x * scalar, self.y * scalar, self.z * scalar)", "title": "" }, { "docid": "eabda672fd639a8883b659b5728e831a", "score": "0.59481096", "text": "def __mul__(self, other: Numeric) -> Numeric:\n if isinstance(other, Array):\n return Array(self * other.type)\n return Float()", "title": "" }, { "docid": "23d6c430fef1aceed53da56bb3a97bcb", "score": "0.59469384", "text": "def __mul__(this,graph):\n g = Graph(this)\n for k in this.nodes.keys():\n g.nodes[k] *= graph.nodes[k]\n return g", "title": "" }, { "docid": "4ed0ad1eb2c1df4e09ff6c91f48978fa", "score": "0.5935585", "text": "def __mul__(self, other):\n return self._sie_binop('*', other)", "title": "" }, { "docid": "f0aed258a8454fe85865c34695c62a81", "score": "0.59342635", "text": "def __mul__(self, other):\n if type(other) == type(self):\n return self.inner(other)\n elif type(other) == type(1) or type(other) == type(1.0):\n product = tuple( a * other for a in self )\n return VectorCartesiano(*product)", "title": "" }, { "docid": "3871ee266c0b1747b57d53cc1974da95", "score": "0.59337074", "text": "def prod(iterable):\n return reduce(mul, iterable)", "title": "" }, { "docid": "d95ea7df137af6425c57b57f0a523ec7", "score": "0.5931689", "text": "def __imul__(self, scalar):\n self.x *= scalar\n self.y *= scalar\n return self", "title": "" }, { "docid": "0a1c6771de233d0856af108837cf8fc0", "score": "0.59236056", "text": "def __mul__(self, other):\n # if self.number_field != other.number_field:\n if self.number_field.polynomial != other.number_field.polynomial:\n raise NotImplementedError(\"use same number field\")\n new_generator = [gen1 * gen2 \n for gen1 in self.generator for gen2 in other.generator]\n return Ideal_with_generator(new_generator)", "title": "" }, { "docid": "400e239eda23c6c73f2aa3bf027b139f", "score": "0.5921915", "text": "def __mul__(self,autre):\n tmp = self.carthesien()\n res = tmp * autre\n return res.polaire()", "title": "" }, { "docid": "5977ee4af00293bee603aab6ba96ed5d", "score": "0.59138966", "text": "def __rmul__(self, scalar):\n return self*scalar", "title": "" }, { "docid": "822f97e6cc15905a28fc19212d5842f6", "score": "0.5912698", "text": "def add_in_place(self, v, scale = 1):\r\n for b in v:\r\n self[b] += scale * v[b]\r\n return None", "title": "" }, { "docid": "15e70aa1a0062dcce2f133960cdd3581", "score": "0.59030384", "text": "def __mul__(self, other):\n if isinstance(other, (int, float)):\n result = Vector(len(self))\n for j in range(len(self)):\n result[j] = self[j] * other\n return result\n elif isinstance(other,Vector):\n if len(self) != len(other):\n raise ValueError('dimensions must agree')\n else:\n dot_product = 0\n for j in range(len(self)):\n dot_product += self[j]*other[j]\n return dot_product", "title": "" }, { "docid": "bac9c2241eec3422a153661496371888", "score": "0.5895575", "text": "def _op_mult(self):\n val1 = self.v_stack.pop()\n val2 = self.v_stack.pop()\n val1 = int(val1) * int(val2)\n self.v_stack.append(val1)", "title": "" }, { "docid": "75e481a624720dd081af16446e55221b", "score": "0.5894684", "text": "def _mul_(self,other):\n\n res = self.parent().rat_field()(self._rat*other._rat)\n new_parent = self.parent().extend_type(ring=True)\n # The product of two homogeneous elements is homogeneous\n return new_parent(res).reduce()", "title": "" }, { "docid": "3db820d8a7498143724c5f86bb1f3740", "score": "0.5889069", "text": "def prod(xs):\n p = 1\n for x in xs:\n p *= x\n return p", "title": "" }, { "docid": "3db820d8a7498143724c5f86bb1f3740", "score": "0.5889069", "text": "def prod(xs):\n p = 1\n for x in xs:\n p *= x\n return p", "title": "" }, { "docid": "587b570e0dc4d7b9fd8f1371e5175bc3", "score": "0.58833605", "text": "def mul(self, e, inplace=False):\n return self.__operate(operator.mul, e, inplace)", "title": "" }, { "docid": "9e779187ec5c95fe22643c244bb12959", "score": "0.5882546", "text": "def prod(vals, start=1):\n return reduce(mul, chain([start], vals))", "title": "" }, { "docid": "72dd1590685d3bb46718b54fa9fe1690", "score": "0.58773047", "text": "def __mul__(self, *args):\n return _almath.Velocity6D___mul__(self, *args)", "title": "" }, { "docid": "fcb79c53145b72627d5d17613be09b0d", "score": "0.58688617", "text": "def __mul__(self, other):\n\n\t\tif isinstance(other, np.ndarray):\n\t\t\treturn self.inv_transform(other, np.zeros(self.nb_dim))\n\n\t\tassert all([self.lmbda is not None, other.lmbda is not None]), \"Precision not defined\"", "title": "" }, { "docid": "29afbdd43a3108fd9b1a15a188d85d40", "score": "0.58670753", "text": "def __mul__(*args):\n return _almath.__mul__(*args)", "title": "" }, { "docid": "e08db251e103e63274e43b615ff3a484", "score": "0.5865868", "text": "def __mul__(self, other):\n\n return self._mul_div(other, div=False)", "title": "" }, { "docid": "6fb7abfec66ce623b150c0211de8415e", "score": "0.58607495", "text": "def product(self, start=1):\n if self.__isnd():\n return reduce(\n lambda a, b: (a * b.product()) if isinstance(b, Array) else a * b,\n self,\n start,\n )\n return reduce(lambda a, b: a * b, self, start)", "title": "" } ]
aef59dbb3900ae0a18478a838687c9a8
Creates a new patient.
[ { "docid": "9a6f2fd99b6165685488448bbb96c4b1", "score": "0.7603765", "text": "def new_patient(patient_data: dict) -> Patients:\n database = get_connection()\n patient = Patients(id=str(uuid.uuid4()), created_at=datetime.now(), clinical_information=patient_data)\n database.patients.insert(\n {\n \"patient_data\": patient.dict(exclude_unset=True)\n }\n )\n return patient", "title": "" } ]
[ { "docid": "b89d660e229974b4ffde32f91bc4ad23", "score": "0.76125675", "text": "def createPatient(self):\n p = Prescription()\n p.patient_id = self.patients.data\n p.medication = self.medication.data\n p.frequency = self.frequency.data\n p.start_dt = self.start_dt.data\n p.end_dt = self.end_dt.data\n p.noti_type = self.noti_type.data\n return p", "title": "" }, { "docid": "487120195b03651d8961547ec60581dd", "score": "0.7443344", "text": "def test_creating_new_patient(self):\n\n form_data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"jill23@gmail.com\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\", \"phone\": \"8884445555\",\n \"birthdate\":\"1984-05-05\"}\n\n patient_id = create_new_patient_account(form_data)\n\n self.assertEqual(3, patient_id)", "title": "" }, { "docid": "b387fd68db5fb74468a85d52f68733e9", "score": "0.7229287", "text": "def create_patient(firstname, lastname, age, gender, tsh_data):\n new_patient = {\"First\": firstname,\n \"Last\": lastname,\n \"Age\": age,\n \"Gender\": gender,\n \"TSH Data\": tsh_data,\n \"TSH Result\": \"unknown\"}\n return new_patient", "title": "" }, { "docid": "7d98be2cf3572df461df94e50ed64983", "score": "0.7098241", "text": "def create_patient(patient):\n\n pat = {\n 'resourceType': 'Patient',\n 'identifier': [\n {\n 'hosp_no': patient.pat_no\n }\n ],\n 'name': [\n {\n 'family': patient.sname,\n 'given': patient.fname\n }\n ],\n 'birthDate': patient.dob,\n 'gender': patient.gender,\n 'estimatedDischarge': patient.edd\n }\n\n return pat", "title": "" }, { "docid": "e5282a5d5b712cbdda5f8b44e4e14962", "score": "0.687557", "text": "def test_patient_creation(self):\n node = self.create_xml_patient({'Mobile_Number': '12223334444'})\n payload = self.create_payload([node])\n parse_patient(node, payload)\n payload = reminders.PatientDataPayload.objects.all()[0]\n self.assertEqual(payload.status, 'success')\n patients = reminders.Patient.objects.all()\n self.assertEqual(patients.count(), 1)\n self.assertEqual(patients[0].mobile_number, '+12223334444')\n self.assertEqual(patients[0].raw_data.pk, payload.pk)\n self.assertTrue(patients[0].contact is not None)", "title": "" }, { "docid": "3aceec366d854fac0a4b28bd186eb9e9", "score": "0.68335414", "text": "def test_payload_patient_creation(self):\n self._authorize()\n data = {\n 'Subject_Number': '000-1111',\n 'Pin_Code': '1234',\n 'Date_Enrolled': datetime.datetime.now().strftime('%b %d %Y '),\n 'Mobile_Number': '12223334444',\n }\n patient = self.create_xml_patient(data)\n payload = self.create_xml_payload([patient])\n response = self._post(payload)\n self.assertEqual(response.status_code, 200)\n patients = reminders.Patient.objects.all()\n self.assertEqual(patients.count(), 1)", "title": "" }, { "docid": "e60cee902a390667ad513cc59ebc2683", "score": "0.6803739", "text": "def test_create_patient(self):\n requests = mock.Mock()\n requests.post.side_effect = RequestException()\n info = mock.Mock(\n updates={'sex': 'M', 'dob': '1918-07-18'},\n extra_fields={},\n )\n case_config = copy.deepcopy(CASE_CONFIG)\n case_config['patient_identifiers'] = {}\n case_config['person_preferred_name'] = {}\n case_config['person_preferred_address'] = {}\n case_config['person_attributes'] = {}\n\n case_config['person_properties']['gender']['direction'] = DIRECTION_IMPORT\n case_config['person_properties']['birthdate']['direction'] = DIRECTION_EXPORT\n case_config = OpenmrsCaseConfig(case_config)\n\n with self.assertRaises(RequestException):\n create_patient(requests, info, case_config)\n requests.post.assert_called_with(\n '/ws/rest/v1/patient/',\n json={'person': {'birthdate': '1918-07-18'}},\n raise_for_status=True,\n )", "title": "" }, { "docid": "ef64440a38dfefe093f8a44005e624df", "score": "0.67077416", "text": "def create_medical_record_for_patient():\n if request.method == 'POST':\n patient_id = request.form['patient_id']\n description = request.form['description']\n\n response_create_medical_record = requests.post(server_url + 'doctor/create_medical_record', json={\n 'patient_id': patient_id,\n 'description': description\n })\n response_create_medical_record = response_create_medical_record.json()\n if response_create_medical_record.get('Status') == \"INVALID_PATIENT_ID\":\n return render_template('doctors/create_record_failed.html')\n else:\n referer = request.referrer\n return redirect(referer, code=302)\n else:\n return render_template('/doctors/patient_profile.html')", "title": "" }, { "docid": "e314bf87f3749fef115e019e55d68e49", "score": "0.6517233", "text": "def create_doctor():\n first_name = request.json['first_name']\n second_name = request.json['second_name']\n last_name = request.json['last_name']\n email = request.json['email']\n specialization = request.json['specialization']\n calendar_id = request.json['calendar_id']\n\n new_doctor = Doctor(first_name, second_name, last_name, email, calendar_id, specialization)\n db.session.add(new_doctor)\n db.session.commit()\n return doctor_schema.jsonify(new_doctor)", "title": "" }, { "docid": "93165dab28e078ff237c2008c8e28866", "score": "0.6482036", "text": "def test_create_patient(self):\n url = reverse('patient:patient-list')\n data = {\n \"birth_date\": \"1980-05-21\",\n \"patient_name\": \"testpatient2\",\n \"status\": \"A\",\n \"gender\": \"M\",\n \"patient_contact\" : \"+12342134523\"\n }\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Patient.objects.count(), 2)", "title": "" }, { "docid": "7cc4369f460c563fcc769db8d52b6962", "score": "0.63703495", "text": "def test_adding_new_patient(self):\n\n data = {\"dietitian_id\": 1, \"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"jill237@gmail.com\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\", \"phone\": \"8884445555\",\n \"birthdate\":\"1984-05-05\"}\n result = self.client.post(\"/patient/new-patient\", data=data,\n follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"registered new patient\", result.data)\n\n data = {\"dietitian_id\": 1, \"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"jsmith@gmail.com\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\", \"phone\": \"8884445555\",\n \"birthdate\":\"1984-05-05\"}\n result = self.client.post(\"/patient/new-patient\", data=data,\n follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"email address already exists\", result.data)", "title": "" }, { "docid": "96d6731982f1f1941200e0c74ce358de", "score": "0.6301111", "text": "def __init__(self, patient_number):\n logging.info(\"Creating patient {}...\".format(patient_number))\n self.patient_number = patient_number\n self.signals, self.additional_fields = self.get_raw_signals()\n self.mit_bih_labels_str, self.labels_locations, self.labels_descriptions = self.get_annotations()\n self.heartbeats = self.slice_heartbeats()\n logging.info(\"Completed patient {}.\\n\\n\".format(patient_number))", "title": "" }, { "docid": "cab55d2eacd496ccc0c080858903f201", "score": "0.62336093", "text": "def create():\n data = request.get_json()\n print(\"DATA: \", data)\n db_helper.insert_new_record(data['first_name'], data['last_name'], data['class_period'], data['current_belt'], data['student_teacher_id'])\n result = {'success': True, 'response': 'Done'}\n return jsonify(result)", "title": "" }, { "docid": "4ec670052ca9691cee066be2bb08823c", "score": "0.621039", "text": "def save(self):\n db = DBStorage()\n p = self.createPatient()\n db.add_prescription(p)", "title": "" }, { "docid": "8a014cc7f3a64198ddd10f514a76ca7a", "score": "0.6187116", "text": "def test_pacient_creation(self):\n url = '/api/v1/pacientes/'\n data = {\n \"name\": \"Victor\",\n \"last_name\": \"Herver\",\n \"mother_name\": \"Segura\",\n \"father_name\": \"Guadalupe Segura Delgado\",\n \"phone\": \"5515336643\",\n \"birthdate\": \"2016-07-16\",\n \"gender\": \"M\",\n \"email\": \"vicherve000r@gmail.com\"\n }\n request = self.client.post(url, data)\n\n self.assertEqual(request.status_code, status.HTTP_201_CREATED)", "title": "" }, { "docid": "c77a34d569c06b87d270592f3f825949", "score": "0.61572254", "text": "def createRecord(self):\n self.dto.getRecord().append(self.controller.createNewObj())\n print(\"Record added.\")", "title": "" }, { "docid": "76df14ddb2fac497da0ec3d2febc454b", "score": "0.60350746", "text": "def test_post_patient(client):\n\n access_token = get_access(client, roles=[\"staging\"])\n\n admission = \"5\"\n data = {\"height\": \"15.0\"}\n url = \"patient/\" + admission\n\n response = client.post(\n url, data=json.dumps(data), headers=make_headers(access_token)\n )\n responseData = json.loads(response.data)[\"data\"]\n patient = session.query(Patient).get(admission)\n assert response.status_code == 200\n assert data[\"height\"] == str(patient.height)\n assert admission == str(responseData)", "title": "" }, { "docid": "65c717dbedbb71fca9f2b37c0c2e0b29", "score": "0.60070324", "text": "def post_patient_add(form):\n # Extract data from the form and add updated info.\n LastName = form.LastName.data\n SSN = form.SSN.data\n UpdatedBy = get_current_user()\n UpdatedDateTime = get_current_datetime()\n\n # Get cursor.\n conn = engine.raw_connection()\n cursor = conn.cursor()\n\n # Insert a new row into the crosswalk if one doesn't already exist. The\n # locks are used to ensure there are no race conditions. Cf\n # https://stackoverflow.com/questions/3407857/only-inserting-a-row-if-its-not-already-there\n q = (\"insert into {schema}.PersonCrosswalk \".format(schema=app_schema) +\n \"(PersonID, PatientLastName, PatientSSN, UpdatedBy, UpdatedDateTime) \" +\n \"select newid(), ?, ?, ?, ? \" +\n \"where \" +\n \" not exists \" +\n \" (select 0 \" +\n \" from {schema}.PersonCrosswalk with (updlock, holdlock) \".format(schema=app_schema) +\n \" where PatientLastName = ? and PatientSSN = ?)\")\n params = [LastName, SSN, UpdatedBy, UpdatedDateTime,\n LastName, SSN]\n cursor.execute(q, params)\n\n # Determine the PersonID for the patient.\n q = (\"select PersonID \" +\n \"from {schema}.PersonCrosswalk \".format(schema=app_schema) +\n \"where PatientLastName = ? and PatientSSN = ?\")\n params = [LastName, SSN]\n cursor.execute(q, params)\n x = cursor.fetchone()\n print(\"x\", x, file=sys.stderr)\n person_id = x[0]\n cursor.commit()\n\n return person_id", "title": "" }, { "docid": "9b6f2df71668eb47a547c84682cd2dc7", "score": "0.59759456", "text": "def put(self, patient_id):\n args = self.parser.parse_args()\n\n # Convert data_time and validate\n date_of_birth = utils.str2date(args.date_of_birth)\n if date_of_birth is None:\n abort()\n\n return_id = None\n with open_session() as session:\n record = Patient(args.first_name,\n args.last_name,\n args.gender,\n date_of_birth)\n\n session.add(record)\n session.flush()\n session.commit()\n\n return_id = record.id\n\n # Check for error handling\n if return_id is None:\n return gen_response(\"internal server error\")\n else:\n return gen_response({\"id\": return_id})", "title": "" }, { "docid": "ca125f6250c0f555d79fec3760adb2b0", "score": "0.59172237", "text": "def create_patient_appointment():\n if request.method == 'POST':\n patient_email = request.form['patient_email']\n doctor_email = request.form['doctor_email']\n date = request.form['date']\n time = request.form['time']\n\n response = requests.post(server_url + 'patient/create_appointment', json={\n 'patient_email': patient_email,\n 'doctor_email': doctor_email,\n 'date': date,\n 'time': time\n })\n\n response = response.json()\n\n if response.get('Status') == \"DOCTOR_HAS_AN_APPOINTMENT_SELECTED_TIME_SLOT\":\n return render_template('patients/appointment_failed.html')\n elif response.get('Status') == \"DOCTOR_IS_NOT_AVAILABLE_AT_THAT_TIME\":\n return render_template('patients/appointment_failed.html')\n elif response.get('Status') == \"INVALID_PATIENT_EMAIL\":\n return render_template('patients/appointment_failed.html')\n elif response.get('Status') == \"INVALID_DOCTOR_EMAIL\":\n return render_template('patients/appointment_failed.html')\n else:\n referer = request.referrer\n return redirect(referer, code=302)\n else:\n return render_template('patients/dashboard.html')", "title": "" }, { "docid": "740ed662443e34cd8316a6c8f01db05d", "score": "0.5900429", "text": "def post(request, id_patient):\n assoc_physio_with_patient_request = json.loads(request.body.decode('utf-8'))\n\n AssocPhysioWithPatientView.validate_assoc_physio_with_patient_request(\n assoc_physio_with_patient_request)\n\n association_info = PatientPhysioService.assoc_physio_with_patient(\n assoc_physio_with_patient_request, id_patient)\n\n return JsonResponse(association_info, safe=False)", "title": "" }, { "docid": "e697a0f2a2eec25a73639f88edfa676f", "score": "0.5893371", "text": "def createobj(self, firstname='', middlename='', lasttname='', email='', phno='', gender='', address='',\n peronjson={}, notes='', tenantid=''):\n personobj = {'firstname': firstname, 'middlename': middlename, 'lasttname': lasttname, 'email': email, 'phno':\n phno, 'gender': gender, 'address': address, 'peronjson': peronjson, 'notes': notes, 'tenantid': tenantid}\n self.persontdetailscoll.insert(tenantobj, safe=True)", "title": "" }, { "docid": "3c86753b2f43b91a9d500e8818a392f8", "score": "0.58394384", "text": "def save_patient(self, patient_record):\n drf = self._get_drivers_factory(self.patients_repository)\n with drf.get_driver() as driver:\n patient_record.record_id = driver.add_record(driver.encode_record(patient_record))\n return patient_record", "title": "" }, { "docid": "0c23a71098ef096dc76b76488c569e64", "score": "0.58058333", "text": "def create():", "title": "" }, { "docid": "0c23a71098ef096dc76b76488c569e64", "score": "0.58058333", "text": "def create():", "title": "" }, { "docid": "7cdff79b8dbeb263b12a04d35aa72409", "score": "0.579701", "text": "def create(request: PlantRequestCreate) -> Plant:\n logger.debug(f'Executing Plant create with request:{request}')\n return Plant(request.name, request.bed_id)", "title": "" }, { "docid": "af1c1de6c1dfab9d6152ccd964410630", "score": "0.5783346", "text": "def create_appointment():\n\n form = AppointmentForm()\n\n if form.validate_on_submit():\n\n appointment = Appointment(\n title = form.title.data,\n description = form.description.data,\n location = form.location.data,\n start = form.start.data,\n client = form.client.data,\n user = current_user\n )\n\n try:\n db.session.add(appointment)\n db.session.commit()\n\n flash('Successfully created the appointment.')\n\n return redirect(url_for('appointment.read_appointments'))\n except:\n flash('Error creating the appointment')\n\n return render_template('appointments/form.html.j2', form=form, title='Create appointment')", "title": "" }, { "docid": "4414fe1f0cef6b7a36f83cdd4ce70885", "score": "0.576393", "text": "def create_person(self):", "title": "" }, { "docid": "ad6bb0d4f8d82eb135bab88ebc136229", "score": "0.57421815", "text": "def allocate_patient(self, patient):\n \n # Reset patient allocation status\n patient.session = 'none'\n patient.unallocated_to_session = True\n \n # Define dictionary to call function based on patient's cov status\n func_dict = {'negative': self.allocate_cov_neg_patient,\n 'positive': self.allocate_cov_pos_patient,\n 'recovered': self.allocate_cov_neg_patient}\n \n function_to_call = func_dict[patient.status]\n function_to_call(patient)\n \n # Unallocated patients\n if patient.unallocated_to_session:\n self._pop.unallocated_patients.append(patient)\n \n # Add allocated unit location\n if patient.current_unit != 'HOME':\n patient.current_unit_location = self.unit_location_lookup[patient.current_unit]\n \n # Add travel times to non-home for allocated patients:\n if patient.unallocated_to_session == False and (\n patient.current_unit_location != patient.default_unit_location):\n patient.displaced = True\n patient.current_travel_time = \\\n self.travel_times.loc[patient.location][patient.current_unit_location]\n patient.displaced_additional_time = \\\n patient.current_travel_time - patient.default_time\n self._pop.displaced_patients.append(patient)", "title": "" }, { "docid": "a1fe6627b29eaf5969824992dc67e555", "score": "0.57205594", "text": "def create_new_record(account,userName,password):\n new_record = Records(account,userName,password)\n return new_record", "title": "" }, { "docid": "12ad0efea84938006e8961ef865be097", "score": "0.5698703", "text": "def create():\n pass", "title": "" }, { "docid": "5d39fa23872046396e973473da280b65", "score": "0.5686724", "text": "def create(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "0d8a7cf1ae0368c972e2113cae987d96", "score": "0.56644845", "text": "def create(self, **attributes):\n return self.save(self.model(**attributes))", "title": "" }, { "docid": "4522695e46ef2885944d70d26059bae8", "score": "0.56620395", "text": "def create_appointment_form(request, post):\n # string_date = \"{0}-{1}-{2}\".format(year, month, day)\n # date = datetime.datetime.strptime(string_date, '%Y-%m-%d').date()\n new_appointment = None\n date_string = post.get(\"date\") + \"-\" + post.get(\"time\")\n date = datetime.datetime.strptime(date_string, '%Y-%m-%d-%H:%M')\n the_user = request.user\n notes = post.get(\"notes\")\n\n if the_user.userprofile.is_doctor():\n patient_id = int(post.get(\"patient\", the_user.pk))\n patient = User.objects.get(pk=patient_id)\n doctor = User.objects.get(pk=the_user.id)\n new_appointment = Appointment.objects.create(date=date, doctor=doctor, patient=patient, notes=notes)\n\n elif request.user.userprofile.is_patient():\n doctor_id = int(post.get(\"doctor\", the_user.pk))\n doctor = User.objects.get(pk=doctor_id)\n patient = User.objects.get(pk=the_user.id)\n new_appointment = Appointment.objects.create(date=date, doctor=doctor, patient=patient, notes=notes)\n\n return new_appointment", "title": "" }, { "docid": "6cb850bf97fb36360675f023675146d4", "score": "0.5661561", "text": "def create(self):\n\n pass", "title": "" }, { "docid": "40504c0e789b0c415a9f838b5c50adf8", "score": "0.56577677", "text": "def create(self, **kwargs):\n return self.save(self.new(**kwargs))", "title": "" }, { "docid": "a319f4ffcad6bcc36310ae73f9f04cd9", "score": "0.56198823", "text": "def insert_patient(service, command_params, user_name, cc_events):\n event = current_events(service, command_params[0])\n begin = event['start'][\"dateTime\"]\n begin = begin.split(\"T\")\n date = begin[0]\n time = begin[1][:5]\n\n slots = user_pre_slotted(cc_events, user_name)\n if already_booked(slots, event[\"attendees\"], user_name) == False:\n print(f\"You have already joined a slot on '{date}' at '{time}'.\")\n return\n\n if fully_booked(slots, event[\"attendees\"], user_name) == False:\n print(f\"Sorry this event is fully booked.\")\n return\n\n event['attendees'].append({'email': f'{user_name}@student.wethinkcode.co.za'})\n\n if do_you_have_meetings(service, date, time, user_name) == False:\n print(\"You already have a meeting at this time in your calendar.\")\n return\n \n try:\n service.events().update(calendarId='teamtwotesting@gmail.com', eventId=event['id'], body=event, maxAttendees=2, sendUpdates='all', sendNotifications=True, alwaysIncludeEmail=True).execute()\n print(\"You have successfully joined the meeting\")\n except:\n print(\"No event with that name was found\")\n\n return", "title": "" }, { "docid": "e4d7ac130cf3757f9d9db8f347279353", "score": "0.56083864", "text": "def test_creating_new_dietitian(self):\n\n form_data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"jill23@gmail.com\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\"}\n\n dietitian_id = create_new_dietitian_account(form_data)\n\n self.assertEqual(2, dietitian_id)", "title": "" }, { "docid": "1021941e90f16ced6e983b6c1e983c22", "score": "0.55837417", "text": "def create(cls, *args, **kwargs):\r\n return cls(*args, **kwargs).save()", "title": "" }, { "docid": "b3c114fcb8daeae4f40d1d0e21dfc6f0", "score": "0.5576966", "text": "def create_an_incident(self):\n sql = \"\"\"INSERT INTO incidences (createdOn,\\\n createdBy,\\\n type,\\\n location,\\\n status,\\\n comment)\\\n VALUES(\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\',\\'%s\\')\n RETURNING id\"\"\" % (\n self.createdOn,\n self.createdBy,\n self.incidence_type,\n self.location,\n self.status,\n self.comment\n )\n conn = self.db_obj.con\n curr = conn.cursor()\n curr.execute(sql, self)\n conn.commit()", "title": "" }, { "docid": "5f52b757fb2f30f9b11ea22ffcab9b14", "score": "0.55596465", "text": "def create(self):\n pass", "title": "" }, { "docid": "5f52b757fb2f30f9b11ea22ffcab9b14", "score": "0.55596465", "text": "def create(self):\n pass", "title": "" }, { "docid": "5f52b757fb2f30f9b11ea22ffcab9b14", "score": "0.55596465", "text": "def create(self):\n pass", "title": "" }, { "docid": "6b23fec7eb7cf40c4c3184aa14fe7d78", "score": "0.555189", "text": "def create(self, datastore, **kwargs):\n return self.save(datastore, (self.new(**kwargs)))", "title": "" }, { "docid": "31241969aa422b8a83d6fa901e546bbc", "score": "0.5540743", "text": "def create(self, validated_data):\n return Speaker.objects.create(**validated_data)", "title": "" }, { "docid": "49f567c8ea0bb3af8adf67b97b5edded", "score": "0.55387104", "text": "def create(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "11419b75365852c18264965196e23eb2", "score": "0.5531", "text": "def create(self, validated_data):\n place_data = validated_data.pop('place')\n place = Place.objects.create(**place_data)\n place.save()\n animal_report = AnimalReport.objects.create(place=place,\n **validated_data)\n animal_report.save()\n return animal_report", "title": "" }, { "docid": "69fcffcbc8e5fc486efca4fdcfeff892", "score": "0.55293274", "text": "def create(cls, **kwargs):\r\n return cls().fill(**kwargs).save()", "title": "" }, { "docid": "9c72f9659a4aef4b2c89225d5b506b9e", "score": "0.5509934", "text": "def create_patron(email=None):\n email = \"patron-email\" if email is None else email\n return Patron.objects.create(name=\"patron-name\", email=email, comments=\"\")", "title": "" }, { "docid": "2c335031ca91a67113f396d588b2b746", "score": "0.55095816", "text": "def insert_person():\r\n body = request.get_json()\r\n\r\n try:\r\n INSERT_PERSON_SCHEMA.validate(body)\r\n except SchemaError as err:\r\n raise ServiceBodyError(str(err))\r\n\r\n with sqlite_client:\r\n person = (body.get('name'), body.get('cpf'))\r\n message = add_person(sqlite_client, person)\r\n\r\n return jsonify({'id': message})", "title": "" }, { "docid": "af49e2f9490f8c309a518f7752cac0bd", "score": "0.5497627", "text": "def create(self, validated_data):\n\n new_courier = Courier(\n courier_id=validated_data['courier_id'],\n courier_type=validated_data['courier_type']\n )\n new_courier.save()\n self.save_hours(validated_data)\n self.save_regions(validated_data)\n return new_courier", "title": "" }, { "docid": "2e8c0b3ee15c14bc7ef7469d0c8a8593", "score": "0.549733", "text": "def test_adding_patient_goals(self):\n\n data = {\"goal-body\": \"New goal body.\"}\n result = self.client.post(\"/patient/1/add-goal.json\", data=data)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"New goal\", result.data)", "title": "" }, { "docid": "eef8237396d694be2edc07d95832de95", "score": "0.54894924", "text": "def create(cls, **kwargs):\n instance = cls(**kwargs)\n return instance.save()", "title": "" }, { "docid": "eef8237396d694be2edc07d95832de95", "score": "0.54894924", "text": "def create(cls, **kwargs):\n instance = cls(**kwargs)\n return instance.save()", "title": "" }, { "docid": "eef8237396d694be2edc07d95832de95", "score": "0.54894924", "text": "def create(cls, **kwargs):\n instance = cls(**kwargs)\n return instance.save()", "title": "" }, { "docid": "def2eddc3b73f0fb016ccfc6b5d94a8b", "score": "0.54849267", "text": "def create(self):\n resource_name = self.__class__.__name__.lower()\n payload = {resource_name: self.to_dict()}\n new_attributes = self.api.post(self.path, payload, self.http_headers())\n self.error = None\n self.merge(new_attributes)\n return self.success()", "title": "" }, { "docid": "4eadea0472035c475807cab0beb97773", "score": "0.548148", "text": "def create(self, validated_data):\n return Prospect.objects.create(**validated_data)", "title": "" }, { "docid": "a58069ea12dd142c76f0332ee738adab", "score": "0.5471179", "text": "def create(cls, **kwargs):\n instance = cls(**kwargs)\n instance.save()\n return instance", "title": "" }, { "docid": "35687ca9bed37badd5f61106c8cf0cba", "score": "0.54706717", "text": "def create_report():\n\n LocalCreateReportForm = CreateReportForm.get_instance()\n for department in Department.query.all():\n if len(department.fields) > 0:\n LocalCreateReportForm.add_department(department)\n\n form = LocalCreateReportForm()\n form.user_id.data = current_user.id\n if form.validate_on_submit():\n # Add the new report to the database\n db.session.add(form.report)\n db.session.commit()\n\n return redirect(url_for('reports.my_reports'))\n else:\n flash_form_errors(form)\n return render_template('reports/create.html', form=form)", "title": "" }, { "docid": "8b2ede49ec741691be9b119f27c08dc0", "score": "0.5467763", "text": "def post(self, request):\n data = request.data\n try:\n career_planning = CareerPlanning(**data)\n career_planning.save()\n LOGGER.info(\"CareerPlanning created successfully\")\n except Exception, error:\n LOGGER.error(\"Error:%s\", str(error))\n return Response({\"status\": \"FAILED\", \"message\": str(error)})\n return Response({\"status\": \"SUCCESS\", \"message\": \"Record saved successfully\"})", "title": "" }, { "docid": "d325e6226fca58aac3dfe33ac133a6e9", "score": "0.5455954", "text": "def writePatientData(self):\n print \"adding SMART data to data profile %s: %s\"%(self.pid, self.fullname)\n \n if not self.populated_p:\n self._populate()\n \n # create a directory for the patient\n OUTPUT_DIR = os.path.join(self.output_dir, \"patient_%s\"%self.pid)\n try:\n os.mkdir(OUTPUT_DIR)\n\n # create a demographics file\n with open(os.path.join(OUTPUT_DIR, \"Demographics.xml\"), 'w') as demo:\n demo.write(self.demographics_doc)\n\n # create the rest of the data:\n for i, doc in enumerate(self.data):\n with open(os.path.join(OUTPUT_DIR, \"doc_%s.xml\"%i), 'w') as d:\n d.write(doc)\n\n except OSError:\n print \"Patient with id %s already exists, skipping...\"%self.pid", "title": "" }, { "docid": "4b9ff88366afd0ec0c1514f517338ecc", "score": "0.5440148", "text": "def participant_create(request):\n if request.method == 'POST':\n serializer = ParticipantSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n participant = get_object_or_404(Participant, email=serializer.data['email'])\n serializer = ParticipantSerializer(participant, data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_202_ACCEPTED)\n else:\n content = {'detail': config.PARTICIPANT_ALREADY_REGISTERED_OR_BAD_REQUEST}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "35242e0e489ab69e4c79e983b89dee1a", "score": "0.54116076", "text": "def create(self, validated_data):", "title": "" }, { "docid": "6e4ad9344befa97fa4a57064d9821bb1", "score": "0.5406388", "text": "def create(self, person):\n current_id = self._generate_person_id()\n self._timeline.add_event({\n 'type': EventTimeLine.PERSON_CREATION,\n 'personId': current_id,\n 'status': person.status,\n 'address': person.address.to_dict(),\n 'name': person.name.to_dict()\n })\n return current_id", "title": "" }, { "docid": "c1891742d6089d4a99caa548e3b5b530", "score": "0.5405952", "text": "def test_perform_create(self):\n data = {\n 'name': 'Jane Joe',\n 'crm': 1234,\n 'email': 'jane@joe.com',\n 'phone': '+55998754128'\n }\n response = self.unath_client.post(reverse('doctor-list'), data=data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n response = self.client.post(reverse('doctor-list'), data=data)\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "title": "" }, { "docid": "4969a9259b982dfb2ada20d4565d7c11", "score": "0.5395102", "text": "def create(self, identity, data=None, record=None, **kwargs):\n self._populate_access_and_validate(identity, data, record, **kwargs)", "title": "" }, { "docid": "d0c7225a48a887cbe3154085510e961e", "score": "0.5390685", "text": "def createDonor():\n\n form = CreateDonorForm()\n if form.validate_on_submit():\n donor = Donor(first_name=form.first_name.data.lower(), last_name=form.last_name.data.lower(), email=form.email.data.lower(),\n age=form.age.data, blood_type=form.blood_type.data)\n db.session.add(donor)\n db.session.commit()\n send_donor_email(donor)\n flash(f'Donor Added To Database', category='Success')\n return redirect(url_for('DonorPage', donor_id=donor.id))\n return render_template('new_donor.html', title=\"Register\", form=form)", "title": "" }, { "docid": "72a2046eb08dd4a8db3d7bf4eb7fb3b5", "score": "0.53826076", "text": "def test_adding_patient_posts(self):\n\n data = {\"meal-time\": \"2020-02-25 08:00:00\", \n \"meal-setting\": \"At home!\", \"TEB\": \"Some thoughts..\",\n \"hunger\": 2, \"fullness\": 8, \"satisfaction\": 5,\n \"meal-notes\": \"Some notes.\"}\n \n result = self.client.post(\"/post/new-post\", data=data,\n follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Post added successfully\", result.data)", "title": "" }, { "docid": "9878a11d0dbfc27991843442696db830", "score": "0.537984", "text": "def create_teacher(username, password, email, preferred_language,skype_id,name, phone_number, country,availability):\n person.create_person(username,password,email,preferred_language,skype_id,name,phone_number,country)\n teacher_account_id = person.get_last()\n query = 'INSERT INTO teacher VALUES( %s,%s );'\n args = (teacher_account_id, availability)\n database.connection.save_data(query, args)", "title": "" }, { "docid": "fe972f2efee63e37d365bcb5f181d381", "score": "0.53771555", "text": "def create_question():\n if request.content_type != \"application/json\":\n abort(415)\n question_text = request.json['question']\n answer = request.json['answer']\n difficulty = request.json['difficulty']\n category = request.json['category']\n\n question_object = Question(question_text, answer, category, difficulty)\n db.session.add(question_object)\n db.session.commit()\n return jsonify({\n \"success\": True\n }), 201", "title": "" }, { "docid": "819136d8db974e075a470dc40a23a35b", "score": "0.5374074", "text": "def add_patient(self, patient):\n if isinstance(patient, Patient):\n self._patient_list.append(patient)\n self.calculate_avg_cholesterol()", "title": "" }, { "docid": "466db07307fd9f076d1a3bb997db63de", "score": "0.5370152", "text": "def createRoom():\n\n global room_creation_params\n\n # get POST body\n profid = request.args.get(\"pid\")\n questions = request.args.get('q')\n\n # keep params for callback\n room_creation_params[\"profid\"] = profid\n room_creation_params[\"questions\"] = questions\n\n return redirect(stripe_test_URL) # redirect to stripe payment confirmation page", "title": "" }, { "docid": "12626863e4f64f84657c3fa3529286bb", "score": "0.5365115", "text": "def test_new_contact_association(self):\n node = self.create_xml_patient({'Mobile_Number': '12223334444',\n 'Pin_Code': '4444'})\n payload = self.create_payload([node])\n parse_patient(node, payload)\n patient = payload.patients.all()[0]\n self.assertTrue(patient.contact is not None)\n self.assertEqual(patient.contact.phone, '+12223334444')\n self.assertEqual(patient.contact.pin, '4444')", "title": "" }, { "docid": "8aa5e3a9ec70ce5e91bdd3d56a9ff9c1", "score": "0.536304", "text": "def post(self):\n\n body = request.get_json(force=True)\n\n # Deserialize\n try:\n d = DiagnosisSchema(strict=True).load(body).data\n # Request body not valid\n except ValidationError as e:\n abort(400, 'could not create diagnosis: {}'.format(e.messages))\n\n # Add to and save in database\n db.session.add(d)\n db.session.commit()\n\n return DiagnosisSchema(201, 'diagnosis {} created'\n .format(d.kf_id)).jsonify(d), 201", "title": "" }, { "docid": "0750b522e7ff24ac243f8a8dfa097533", "score": "0.53591514", "text": "def add_hospital(request):\n if request.POST:\n post = request.POST\n name = post.get(\"name\")\n address = post.get(\"address\")\n city = post.get(\"city\")\n state = post.get(\"state\")\n zip = post.get(\"zip\")\n hospital = Hospital.objects.create(\n name=name,\n address=address,\n city=city,\n state=state,\n zip=zip\n )\n\n if hospital:\n return redirect('add_hospital')\n\n return render(request, 'add_hospital.html')", "title": "" }, { "docid": "0a65c4082a82fdb43155a4248b447c1f", "score": "0.5320365", "text": "def create_participant(name='Not Brian', email='test@email.com') ->\\\n Participant:\n participant = Participant(name=name, email=email)\n return participant", "title": "" }, { "docid": "5afb1f53804af1d44bb2786767658de9", "score": "0.5315677", "text": "def create_new_trial(\n self, study_id: int, template_trial: Optional[\"FrozenTrial\"] = None\n ) -> int:\n raise NotImplementedError", "title": "" }, { "docid": "432184efe3adce4babdb70a90913967a", "score": "0.53069377", "text": "def create(self):\n \n # create the sequence structure by calling the self.project.create\n self.project.create()", "title": "" }, { "docid": "5b4bebc030adb9b66f7f0664378cafd2", "score": "0.5304375", "text": "def create_appointment():\n\n msg = render_template('date')\n return question(msg)", "title": "" }, { "docid": "6c52f85805557fefd67c9f73c86ea6a7", "score": "0.53037214", "text": "def create_question():\n body = request.get_json()\n\n question_text = body.get('question', None)\n answer = body.get('answer', None)\n category = body.get('category', 1)\n difficulty = body.get('difficulty', 1)\n\n try:\n question = Question(question=question_text,\n answer=answer,\n category=category,\n difficulty=difficulty)\n question.insert()\n\n selection = Question.query.order_by(Question.id).all()\n current_questions = paginate_questions(request, selection)\n\n return jsonify({\n 'success': True,\n 'created': question.id,\n 'questions': current_questions,\n 'total_questions': len(selection)\n })\n\n except Exception:\n abort(422)", "title": "" }, { "docid": "a9f8548cbb4b8e5e89237f9057260871", "score": "0.53035647", "text": "def create_note(self, noteTitle, note, date):\n user = User.objects.create(username='userdemo')\n user.set_password('calnote24')\n user.save()\n Note.objects.create(noteTitle=noteTitle, note=note, date=date, user_id=user.id)", "title": "" }, { "docid": "128cdd786e25e1937d8d3ea0c60959b6", "score": "0.53002936", "text": "def create(self):\n ...", "title": "" }, { "docid": "877de1db4eea8ae13ebfa0130305c3ec", "score": "0.5297617", "text": "def create(self, validated_data):\n return Advisor.objects.create(**validated_data)", "title": "" }, { "docid": "d5e816bacad5248f1bae9633fa5b52ef", "score": "0.5293215", "text": "def create_individual(self):\n pass", "title": "" }, { "docid": "6dd98b3467ba44ffc5a0c581eb4a5f2a", "score": "0.528758", "text": "def create_pet(pet_name, pet_type, pet_breed, pet_gender, \n pet_color, pet_status, pet_image, last_address):\n\n pet = Pet(pet_name=pet_name,\n pet_type=pet_type, \n pet_breed=pet_breed, \n pet_gender=pet_gender,\n pet_color=pet_color,\n pet_status=pet_status,\n pet_image=pet_image,\n last_address=last_address)\n\n db.session.add(pet)\n db.session.commit()\n\n return pet", "title": "" }, { "docid": "9b44ed4d80077305b9bb70a93f94c0f7", "score": "0.5281986", "text": "def post(self):\n app.logger.info('Request to Create a Pet')\n content_type = request.headers.get('Content-Type')\n if not content_type:\n abort(status.HTTP_400_BAD_REQUEST, \"No Content-Type set\")\n\n data = {}\n # Check for form submission data\n if content_type == 'application/x-www-form-urlencoded':\n app.logger.info('Processing FORM data')\n data = {\n 'name': request.form['name'],\n 'category': request.form['category'],\n 'available': request.form['available'].lower() in ['true', '1', 't']\n }\n elif content_type == 'application/json':\n app.logger.info('Processing JSON data')\n data = request.get_json()\n else:\n message = 'Unsupported Content-Type: {}'.format(content_type)\n app.logger.info(message)\n abort(status.HTTP_400_BAD_REQUEST, message)\n\n pet = Pet()\n try:\n pet.deserialize(data)\n except DataValidationError as error:\n raise BadRequest(str(error))\n pet.save()\n app.logger.info('Pet with new id [%s] saved!', pet.id)\n location_url = api.url_for(PetResource, pet_id=pet.id, _external=True)\n return pet.serialize(), status.HTTP_201_CREATED, {'Location': location_url}", "title": "" }, { "docid": "93ce04e4315abb616464ac30e70377a6", "score": "0.52749753", "text": "def test_create(self):\n isok, response = self.onep.create(\n self.cik,\n 'client',\n {\n 'writeinterval': 'inherit',\n 'name': 'testclient',\n 'visibility': 'parent',\n 'limits': {\n 'dataport': 'inherit',\n 'datarule': 'inherit',\n 'dispatch': 'inherit',\n 'disk': 'inherit',\n 'io': 'inherit',\n 'share': 'inherit',\n 'client': 'inherit',\n 'sms': 'inherit',\n 'sms_bucket': 'inherit',\n 'email': 'inherit',\n 'email_bucket': 'inherit',\n 'http': 'inherit',\n 'http_bucket': 'inherit',\n 'xmpp': 'inherit',\n 'xmpp_bucket': 'inherit'}\n })\n client_rid = response\n self.assertTrue(isok, 'client creation succeeded')\n self.assertTrue(re.match(\"^[0-9a-f]{40}$\", client_rid), 'rid is formatted correctly')\n\n isok, response = self.onep.info(\n self.cik,\n client_rid,\n {'key': True}\n )\n client_cik = response['key']\n\n # Add a dataport\n isok, response = self.onep.create(\n client_cik,\n 'dataport',\n {\n 'format': 'string',\n 'retention': {\n 'count': 'infinity',\n 'duration': 'infinity',\n },\n 'limits': {\n 'dataport': 'inherit',\n 'datarule': 'inherit',\n 'dispatch': 'inherit',\n 'disk': 'inherit',\n 'io': 'inherit',\n 'share': 'inherit',\n 'client': 'inherit',\n 'sms': 'inherit',\n 'sms_bucket': 'inherit',\n 'email': 'inherit',\n 'email_bucket': 'inherit',\n 'http': 'inherit',\n 'http_bucket': 'inherit',\n 'xmpp': 'inherit',\n 'xmpp_bucket': 'inherit',\n }\n }\n )\n dataport_rid = response\n self.assertTrue(isok, 'dataport creation succeeded')\n self.assertTrue(re.match(\"^[0-9a-f]{40}$\", dataport_rid), 'rid is formatted correctly')", "title": "" }, { "docid": "56e59f202986da5bdac4c3ebd10c466b", "score": "0.5271654", "text": "def test_showing_patient_registration(self):\n\n result = self.client.get(\"/patient/new-patient\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Register a New Patient\", result.data)", "title": "" }, { "docid": "71a5155714d37a9621abed6b574f4d90", "score": "0.5255513", "text": "def do_create(self, subject):\n model = self.subject_type.model_class()\n subject = model.objects.create(**dict(self.action_params))\n\n return subject", "title": "" }, { "docid": "511b467ee34a957d47bb26ba040497b4", "score": "0.52541316", "text": "def new_record(request, patient_id):\n if (request.user.patient_username.id != patient_id):\n Logs.objects.create(type='READ', user_id=request.user.uid, interface='PATIENT', status=STATUS_ERROR, details='[New Record] Logged in user does not match ID in URL. URL ID: ' + str(patient_id))\n return redirect('/patient/login/')\n\n patient = patient_does_not_exists(patient_id)\n\n form = CreateNewRecord(request.POST)\n\n if request.method == 'POST':\n if form.is_valid():\n type = form.cleaned_data['type']\n\n if (type == 'Readings'):\n return redirect('new_readings_record', patient_id=patient_id)\n elif (type == 'TimeSeries'):\n return redirect('new_timeseries_record', patient_id=patient_id)\n elif (type == 'Images'):\n return redirect('new_images_record', patient_id=patient_id)\n elif (type == 'Videos'):\n return redirect('new_videos_record', patient_id=patient_id)\n elif (type == 'Documents'):\n return redirect('new_documents_record', patient_id=patient_id)\n\n Logs.objects.create(type='UPDATE', user_id=patient.username.uid, interface='PATIENT', status=STATUS_OK, details='[New Record]')\n else:\n Logs.objects.create(type='UPDATE', user_id=patient.username.uid, interface='PATIENT', status=STATUS_ERROR, details='[New Record] Invalid Form')\n\n context = {\n 'form': form,\n 'patient': patient,\n }\n\n return render(request, 'new_record.html', context)\n\n Logs.objects.create(type='READ', user_id=patient.username.uid, interface='PATIENT', status=STATUS_OK, details='[New Record] Render Form')\n\n context = {\n 'form': form,\n 'patient': patient,\n }\n\n return render(request, 'new_record.html', context)", "title": "" }, { "docid": "2e5c85d299a684ad0c09e05cdd9fd7ec", "score": "0.5247617", "text": "def create(self):\n self.created_date = timezone.now()\n self.save()", "title": "" }, { "docid": "7d1167db4af1891d72fe1301bc799556", "score": "0.52459395", "text": "def test_create_record(self):\n pass", "title": "" }, { "docid": "0ddc0b4655b52d4eec48f1609ba426d1", "score": "0.52403265", "text": "def create_record(self, name, zone, type, data, extra=None):\n params = {\"type\": self.RECORD_TYPE_MAP[type], \"name\": name, \"data\": data}\n if extra:\n try:\n params[\"priority\"] = extra[\"priority\"]\n except KeyError:\n params[\"priority\"] = None\n try:\n params[\"port\"] = extra[\"port\"]\n except KeyError:\n params[\"port\"] = None\n try:\n params[\"weight\"] = extra[\"weight\"]\n except KeyError:\n params[\"weight\"] = None\n\n if \"ttl\" in extra:\n params[\"ttl\"] = extra[\"ttl\"]\n\n res = self.connection.request(\n \"/v2/domains/%s/records\" % zone.id, data=json.dumps(params), method=\"POST\"\n )\n\n return Record(\n id=res.object[\"domain_record\"][\"id\"],\n name=res.object[\"domain_record\"][\"name\"],\n type=type,\n data=data,\n zone=zone,\n ttl=res.object[\"domain_record\"].get(\"ttl\", None),\n driver=self,\n extra=extra,\n )", "title": "" }, { "docid": "41beb21a95f1f5d06eae97b01e60ee33", "score": "0.52390325", "text": "def post(self):\n dao = ClaimDao()\n return dao.create(api.payload)", "title": "" }, { "docid": "2b9df7e11b5430408d46bcc19a2423ef", "score": "0.5236254", "text": "def create_person(params):\n form = dict()\n form['gender'] = params.get('gender', None)\n form['title'] = params.get('title', None)\n\n return form", "title": "" }, { "docid": "b1e5c4834b85415a765e217e4a49ac74", "score": "0.5232018", "text": "def test_meeting_registrant_create(self):\n pass", "title": "" }, { "docid": "657daba58d3bafe9ce9294bd46671b12", "score": "0.52311075", "text": "def post(self):\n app.logger.info('Request to Create a Pet')\n content_type = request.headers.get('Content-Type')\n if not content_type:\n abort(status.HTTP_400_BAD_REQUEST, \"No Content-Type set\")\n\n data = {}\n # Check for form submission data\n if content_type == 'application/x-www-form-urlencoded':\n app.logger.info('Processing FORM data')\n app.logger.info(type(request.form))\n app.logger.info(request.form)\n data = {\n 'name': request.form['name'],\n 'category': request.form['category'],\n 'available': request.form['available'].lower() in ['yes', 'y', 'true', 't', '1']\n }\n elif content_type == 'application/json':\n app.logger.info('Processing JSON data')\n data = request.get_json()\n else:\n message = 'Unsupported Content-Type: {}'.format(content_type)\n app.logger.info(message)\n abort(status.HTTP_400_BAD_REQUEST, message)\n\n pet = Pet()\n try:\n pet.deserialize(data)\n except DataValidationError as error:\n raise BadRequest(str(error))\n pet.create()\n app.logger.info('Pet with new id [%s] saved!', pet.id)\n location_url = api.url_for(PetResource, pet_id=pet.id, _external=True)\n return pet.serialize(), status.HTTP_201_CREATED, {'Location': location_url}", "title": "" }, { "docid": "8fbf4fa9cc7a941c1a3b5e30b2852b66", "score": "0.52303135", "text": "def createNewRecord(eventID=None):\n eventID=cleanRecordID(eventID)\n rec = None\n if eventID > 0:\n #test that countEvent record exits\n cnt = CountEvent.query.filter(CountEvent.ID == eventID).count()\n if cnt > 0:\n rec = Assignment(eventID,getUID())\n db.session.add(rec)\n else:\n flash(printException(\"Invalid countEvent ID during Count Event creation.\",\"error\"))\n \n return rec", "title": "" }, { "docid": "59a21c10cc080aac3e372a3b344bf7c4", "score": "0.52181023", "text": "def create(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"create\"), kwargs)", "title": "" }, { "docid": "59a21c10cc080aac3e372a3b344bf7c4", "score": "0.52181023", "text": "def create(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"create\"), kwargs)", "title": "" } ]
4ea9e42b0e93fcfff85b39a7b86c1d6c
Cross rows and columns on an iteration to obtain the diagonal
[ { "docid": "0d56d91b140566eb4cbc70485a88c8ef", "score": "0.63815534", "text": "def get_inv_diagonal(rows, columns):\n diagonal_boxes = []\n j = len(rows) - 1\n for i in range(len(rows)):\n diagonal_boxes.append(rows[i] + columns[j])\n j = j - 1\n return diagonal_boxes", "title": "" } ]
[ { "docid": "e64b4d30af1573095f8b90fb69ceed39", "score": "0.676744", "text": "def _diags(self):\n indices = jnp.arange(self.dim)[:,jnp.newaxis]\n bin_reps = (indices >> jnp.arange(self.N)[::-1]) & 1\n spins = 1 - 2 * bin_reps\n spins_prime = jnp.hstack( (spins[:,1:] , spins[:,0:1]) )\n self.diag_elements = -(spins * spins_prime).sum(axis=1)", "title": "" }, { "docid": "b09aee24778ba979e29b1d06c4fb54e5", "score": "0.66441387", "text": "def make_diagonal(x):\n m = np.zeros((len(x), len(x)))\n for i in range(len(m[0])):\n m[i, i] = x[i]\n return m", "title": "" }, { "docid": "9cf3968001331f24c2ac9b3f8a3c027a", "score": "0.65205795", "text": "def diag(ctx, diagonal, **kwargs):\n A = ctx.matrix(len(diagonal), **kwargs)\n for i in xrange(len(diagonal)):\n A[i,i] = diagonal[i]\n return A", "title": "" }, { "docid": "28533bdbc1237b812868c90501551066", "score": "0.64974296", "text": "def get_diagonal(rows, columns):\n diagonal_boxes = []\n for i in range(len(rows)):\n diagonal_boxes.append(rows[i] + columns[i])\n return diagonal_boxes", "title": "" }, { "docid": "cfec17374cb56696de509bb8c640e2c1", "score": "0.6473868", "text": "def diagonalize(width, height):\n A = createBoard(width, height)\n\n for row in range(height):\n for col in range(width):\n if row == col:\n A[row][col] = 1\n else:\n A[row][col] = 0\n\n return A", "title": "" }, { "docid": "3225d07e2545b37f771f1afdb38b6abe", "score": "0.64582306", "text": "def spiral_diagonals():\n\n yield 1\n\n spiral = count(2)\n skip = 1\n\n while True:\n for _ in range(4):\n yield next(islice(spiral, skip, skip+1))\n\n skip += 2", "title": "" }, { "docid": "3225d07e2545b37f771f1afdb38b6abe", "score": "0.64582306", "text": "def spiral_diagonals():\n\n yield 1\n\n spiral = count(2)\n skip = 1\n\n while True:\n for _ in range(4):\n yield next(islice(spiral, skip, skip+1))\n\n skip += 2", "title": "" }, { "docid": "4c6c4cc2b0c68524deca1dd0f282821e", "score": "0.64017594", "text": "def test_get_row_col_diag():", "title": "" }, { "docid": "e1831631932374c9a1eac3bf573c8715", "score": "0.62937367", "text": "def _batch_diag(bmat):\r\n return torch.diagonal(bmat, dim1=-2, dim2=-1)", "title": "" }, { "docid": "73cfa9a11b5a5f524ef90c8165edbdc1", "score": "0.6289989", "text": "def get_diags(self):\n b = [None] * (len(self.board) - 1)\n grid_forward = [b[i:] + r + b[:i] for i, r in enumerate(self.get_rows())]\n forwards = [[c for c in r if c is not None] for r in zip(*grid_forward)]\n grid_back = [b[:i] + r + b[i:] for i, r in enumerate(self.get_rows())]\n backs = [[c for c in r if c is not None] for r in zip(*grid_back)]\n return forwards + backs", "title": "" }, { "docid": "e2b59693cbc812601b4027d0c39346e8", "score": "0.6273086", "text": "def createMatrixDiagonal():\n z=np.diag(1+np.arange(4),k=-1)\n print z", "title": "" }, { "docid": "dfe50b9e553488bde1463fe61ca4ec10", "score": "0.6239426", "text": "def _batch_diag(bmat):\n return torch.diagonal(bmat, dim1=-2, dim2=-1)", "title": "" }, { "docid": "4cbae120ca57eab0f73ff4a6e92efdac", "score": "0.62092847", "text": "def diag(*args):\n return _casadi.diag(*args)", "title": "" }, { "docid": "c32cc8b060f7232c2490757ef5199854", "score": "0.6146213", "text": "def diagonal_matrix(diag):\n\n return diag*identity(len(diag))", "title": "" }, { "docid": "9cb756c5d01fff7fd58e52a43b41ef37", "score": "0.6106345", "text": "def diagonalsPos (matrix, cols, rows):\n for di in ([(j, i - j) for j in range(cols)] for i in range(cols + rows -1)):\n yield [matrix[i][j] for i, j in di if i >= 0 and j >= 0 and i < cols and j < rows]", "title": "" }, { "docid": "211a4f232045b703b956b9be5dfbeb98", "score": "0.6098331", "text": "def diagonalsPos(self):\n matrix=self.board\n cols=self.COLS\n rows=self.ROWS\n for di in ([(j, i - j) for j in range(rows)] for i in range(rows + cols - 1)):\n yield [matrix[i][j] for i, j in di if i >= 0 and j >= 0 and i < cols and j < rows]", "title": "" }, { "docid": "fe09405d71784007807d76cfd1aae0c0", "score": "0.6059796", "text": "def diag(X):\n d = numpy.zeros(X.shape[0])\n\n for i in range(X.shape[0]):\n d[i] = X[i, i]\n\n return d", "title": "" }, { "docid": "625e04b76f2ffc843787f0821650754c", "score": "0.6001264", "text": "def intersecao_diagonal_col(tab, filas):\n # intersecao_diagonal_col: tabuleiro x tuplo -> tuplo\n pos_livres = ()\n for i in filas[1]:\n for j in filas[2]:\n # verifica se a posicao de intersecao entre a diagonal e a coluna\n # esta livre e se estiver adiciona-a a um tuplo\n if i == 1 and j == 1:\n if eh_posicao_livre(tab, 1):\n pos_livres += (1,)\n if i == 2 and j == 1:\n if eh_posicao_livre(tab, 5):\n pos_livres += (5,)\n if i == 3 and j == 1:\n if eh_posicao_livre(tab, 9):\n pos_livres += (9,)\n if i == 1 and j == 2:\n if eh_posicao_livre(tab, 7):\n pos_livres += (7,)\n if i == 2 and j == 2:\n if eh_posicao_livre(tab, 5):\n pos_livres += (5,)\n if i == 3 and j == 2:\n if eh_posicao_livre(tab, 3):\n pos_livres += (3,)\n return pos_livres", "title": "" }, { "docid": "28032da22342555dbb5e74ee974a9383", "score": "0.5998008", "text": "def diagonals(self):\n first_diagonal = [self.field[i][i] for i in range(3)]\n second_diagonal = [self.field[2][0], self.field[1][1], self.field[0][2]]\n return [first_diagonal, second_diagonal]", "title": "" }, { "docid": "1ac63665e9226b495692b105e74c1aae", "score": "0.5959447", "text": "def diag(self):\n assert self.ndim == 2\n D = np.empty(self.n, dtype=object)\n for i,Ki in enumerate(self.K):\n if hasattr(Ki, \"diag\"):\n D[i] = Ki.diag()\n else:\n D[i] = np.diag(Ki)\n return KronMatrix(D)", "title": "" }, { "docid": "79915c8b8c72615510d66c18806818e2", "score": "0.5948967", "text": "def get_secondary_diagonal(i, j):\n if j < (NUMBERS_IN_SUM - 1):\n return 0\n if i > (GRID_SIZE - NUMBERS_IN_SUM):\n return 0\n return functools.reduce(\n operator.mul,\n (grid[i + x][j - x] for x in range(0, NUMBERS_IN_SUM))\n )", "title": "" }, { "docid": "3dcb9febc04f58d5a177ff15f5b6fe20", "score": "0.59430474", "text": "def kronecker_diag_vector_columns(A: ndarray, X: ndarray):\n return mod.vstack([kronecker_diag_vector(A, X[:, j]) for j in range(X.shape[1])]).T", "title": "" }, { "docid": "fe3a7aaafd9b53588ecd807aefc4608a", "score": "0.5926964", "text": "def diag(self, c, ha):\n return divmod((c[1]-c[0])-(ha[1]-ha[0])-1, self.k+1)[0]", "title": "" }, { "docid": "7e8700ff6df3d58cff6f0c5d87013596", "score": "0.58981705", "text": "def generate_main_diagonals(board: np.ndarray):\n main_diagonals = []\n main_diagonals.append([board[i, i] for i in range(0, 6)])\n main_diagonals.append([board[i, i - 1] for i in range(1, 6)])\n main_diagonals.append([board[i, i - 2] for i in range(2, 6)])\n main_diagonals.append([board[i, i + 1] for i in range(0, 6)])\n main_diagonals.append([board[i, i + 2] for i in range(0, 5)])\n main_diagonals.append([board[i, i + 3] for i in range(0, 4)])\n return main_diagonals", "title": "" }, { "docid": "4cd2ce9b2536cba42d68e6f175858a36", "score": "0.58962685", "text": "def rowdiag(arr: Array, k: int = 0) -> Array:\n return np.diag(arr, k)[:, None]", "title": "" }, { "docid": "2ef016acd5622f843888c8d04332fe44", "score": "0.5894563", "text": "def diagonalsNeg(self):\n matrix = self.board\n cols = self.COLS\n rows = self.ROWS\n for di in ([(j, i - cols + j + 1) for j in range(cols)] for i in range(cols + rows - 1)):\n yield [matrix[i][j] for i, j in di if i >= 0 and j >= 0 and i < cols and j < rows]", "title": "" }, { "docid": "9141d12f06c4955ac2a8967185c4f98c", "score": "0.58883375", "text": "def iterated_diagonal(self, times=1, coord=1):\n\n if self.degree is None:\n raise TypeError(f'only for homogeneous elements')\n\n if self.arity < coord:\n raise TypeError(f'arity = {self.arity} < coord = {coord}')\n\n answer = self.zero()\n for k, v in self.items():\n left, spx, right = k[:coord - 1], k[coord - 1], k[coord:]\n for p in combinations_with_replacement(range(self.degree + 1), times):\n p = (0,) + p + (self.degree,)\n new_k = []\n for i, j in pairwise(p):\n new_k.append(Simplex(spx[i:j + 1]))\n answer += self.create({left + tuple(new_k) + right: v})\n return answer", "title": "" }, { "docid": "8f3f391221ad4c0c2acde8c074585416", "score": "0.5879887", "text": "def kronecker_diag_tensor(A: ndarray, X: ndarray):\n return X * A", "title": "" }, { "docid": "2c9c8a8aa1fd8409e0cc7d07b335de70", "score": "0.58752453", "text": "def cross_2_matrix(x):\n\n return np.array([ \\\n [0., -x[2], x[1]], \\\n [x[2], 0., -x[0]], \\\n [-x[1], x[0], 0.]])", "title": "" }, { "docid": "e9d0a32ad6e62c4077ea201f84b4a787", "score": "0.5860487", "text": "def diag(P):\n return vmap(jnp.diag)(P)", "title": "" }, { "docid": "dca3e116566d31328c68004c1b8a9d04", "score": "0.58490616", "text": "def diagonalsNeg (matrix, cols, rows):\n for di in ([(j, i - cols + j + 1) for j in range(cols)] for i in range(cols + rows - 1)):\n yield [matrix[i][j] for i, j in di if i >= 0 and j >= 0 and i < cols and j < rows]", "title": "" }, { "docid": "440d903a960242fd84a6c4ded5938d41", "score": "0.5843311", "text": "def joint_diagonalization(C, V=None, eps=1e-3, max_iter=1000, verbose=-1):\n \n d = C.shape[1]\n list_pairs = list(itertools.combinations(range(d), 2))\n \n if V is None:\n V = np.eye(d) + 1j*np.zeros((d, d))\n\n O_cs = np.sum([off_frobenius(c) for c in C])\n counter = 0\n \n if verbose > 0:\n print('Iter: {:.0f}, Diagonalization: {:.2f}'.format(counter, O_cs))\n \n diff = np.inf\n \n while ((diff > eps) and (counter < max_iter)):\n counter += 1\n for (i,j) in list_pairs:\n V_ = np.eye(d) + 1j*np.zeros((d, d))\n idx = (slice(None), ) + np.ix_([i,j],[i,j])\n R = rotation(C[idx])\n V_[np.ix_([i,j],[i,j])] = V_[np.ix_([i,j],[i,j])].dot(R)\n V = V.dot(V_.T)\n C = np.matmul(np.matmul(V_, C), V_.T)\n\n O_cs_new = np.sum([off_frobenius(c) for c in C])\n diff = np.abs(O_cs - O_cs_new)\n \n if verbose > 0:\n print('Iter: {:.0f}, Diagonalization: {:.2f}'.format(counter, O_cs))\n O_cs = O_cs_new\n \n return V, C", "title": "" }, { "docid": "03567108208b8b6f4376425d758f0424", "score": "0.58382577", "text": "def diagonal_iterator(self):\n return matrix_iterators.DiagonalIterator(self)", "title": "" }, { "docid": "bf076dfb8d2de9bce20ac3eac620808a", "score": "0.5833296", "text": "def diag(self) -> list:\n return [self._v[0], self._v[4], self._v[8]]", "title": "" }, { "docid": "63a93a04f387436d14a6df5de83acb7c", "score": "0.5828457", "text": "def get_diags(self):\n diag_fw = []\n diag_rv = []\n for i in range(self.board_len):\n # Build diagonals\n diag_fw.append(self.board_tiles[i][i])\n diag_rv.append(self.board_tiles[i][self.board_len-1-i])\n\n return diag_fw, diag_rv", "title": "" }, { "docid": "3b402d70bd6021695f10d458c04bf115", "score": "0.58283925", "text": "def get_primary_diagonal(i, j):\n if j > (GRID_SIZE - NUMBERS_IN_SUM):\n return 0\n if i > (GRID_SIZE - NUMBERS_IN_SUM):\n return 0\n return functools.reduce(\n operator.mul,\n (grid[i + x][j + x] for x in range(0, NUMBERS_IN_SUM))\n )", "title": "" }, { "docid": "b57a47b219f3dd7b2f981b2dcd72a5e8", "score": "0.58111554", "text": "def diagonal_grid(height, width):\n grid = create_grid(height, width) # initially all 0s\n\n for r in range(height):\n for c in range(width):\n if r == c:\n grid[r][c] = 1\n\n return grid", "title": "" }, { "docid": "b57a47b219f3dd7b2f981b2dcd72a5e8", "score": "0.58111554", "text": "def diagonal_grid(height, width):\n grid = create_grid(height, width) # initially all 0s\n\n for r in range(height):\n for c in range(width):\n if r == c:\n grid[r][c] = 1\n\n return grid", "title": "" }, { "docid": "8e14cd330130aa1dc66eb5506e58dfaa", "score": "0.5793961", "text": "def diag(P):\n return np.diagonal(P, axis1=1, axis2=2)", "title": "" }, { "docid": "9c7e5e770aeee28a7f933238b263c706", "score": "0.578298", "text": "def my_diag(A):\n\n N = np.size(A, 1) # get number of diag elements = num colums\n ii = np.arange(0, N) # create seq of int from 1 to N\n return np.asarray(A[ii, ii]).flatten() # ensure output is 1d array ", "title": "" }, { "docid": "248ae03ea869e00e08c83db865b7390a", "score": "0.57275915", "text": "def side_tri():\n for row in range(1,8):\n print()\n\n for hashNum in range(int(5-fabs(4-row)),1,-1):\n # range starting sequence with 'fabs()' function\n # reduce necessary reverse counting\n print('#',end='')", "title": "" }, { "docid": "0e136d1168b83d357f25dbe08bdcf36f", "score": "0.5727518", "text": "def cross_cancel(a, i, b, j):\n q = (j - i)*cols\n for p in range(i*cols, (i + 1)*cols):\n mat[p] = a*mat[p] - b*mat[p + q]", "title": "" }, { "docid": "598258c5dab8b6e43d8fc6ab45147c4f", "score": "0.5716038", "text": "def makeDiagonalMatrix(a):\r\n sign = 1\r\n n = len(a)\r\n for i in range(n):\r\n startCol = i\r\n while startCol < n and a[i][startCol] == 0:\r\n # try to find not zero factor, and swap\r\n # with current row\r\n notZeroIdx = findNotZeroIndex(a, i)\r\n if notZeroIdx != -1:\r\n # swap two rows\r\n a[i], a[notZeroIdx] = a[notZeroIdx], a[i]\r\n sign *= -1\r\n break\r\n # all rows with zero factors\r\n # step to the right to find column without zeros\r\n startCol += 1\r\n\r\n if startCol == n:\r\n return (sign, a)\r\n \r\n ai = a[i]\r\n for j in range(i+1, n):\r\n aj = a[j]\r\n k = aj[startCol] / ai[startCol]\r\n for idx in range(startCol, n):\r\n aj[idx] -= k * ai[idx]\r\n a[j][i] = 0\r\n \r\n return (sign, a)", "title": "" }, { "docid": "8b45207f22df9d91ae6ca6d80ef4d10a", "score": "0.56690115", "text": "def gen_1d(mat, e, i, d):\n w = mat\n for j in range(i):\n w = tt.kron(e,w)\n for j in range(d-i-1):\n w = tt.kron(w,e)\n return w", "title": "" }, { "docid": "eb349259ba667301d94f5ae66acd76ce", "score": "0.5668876", "text": "def inverse_diagonal_flip(squarelotron, ring):\n \"\"\"i_d_s[row][column] -> i_d_s[4 - column][4 - row]\"\"\"\n i_d_s = deepcopy(squarelotron)\n if (ring == 'outer'):\n for column in range(4):\n i_d_s[0][column], i_d_s[4 - column][4] = i_d_s[4 - column][4], i_d_s[0][column]\n for row in range(1, 4):\n i_d_s[row][0], i_d_s[4][4 - row] = i_d_s[4][4 - row], i_d_s[row][0]\n if (ring == 'inner'):\n for column in range(1,3):\n i_d_s[1][column], i_d_s[4 - column][3] = i_d_s[4 - column][3], i_d_s[1][column]\n i_d_s[2][1], i_d_s[3][2] = i_d_s[3][2], i_d_s[2][1]\n return i_d_s", "title": "" }, { "docid": "913815427afd65695cf7ccd39524c761", "score": "0.56141204", "text": "def diag(self, X):\n diagK = np.zeros((X.shape[0], 1))\n for i in range(X.shape[0]):\n diagK[i] = self.function(X[i, :], X[i, :], **self.parameters)\n return diagK", "title": "" }, { "docid": "8f28290785a54ee136e4dcf32ca760c2", "score": "0.5611146", "text": "def _diag_neighbors(self, point):\n return [point - self.NS - 1, \n point - self.NS + 1, \n point + self.NS - 1, \n point + self.NS + 1]", "title": "" }, { "docid": "febddc3f59fa25382621e1a5a3fc2892", "score": "0.56053215", "text": "def diag(self, tensor):\n return np.diag(tensor)", "title": "" }, { "docid": "38600b8cc3405d95bffc8dadebf3a148", "score": "0.5595967", "text": "def diag(cc, k):\n if not cc.imds.made_ip_imds:\n cc.imds.make_ip(cc.ip_partition)\n imds = cc.imds\n\n t1, t2 = cc.t1, cc.t2\n nkpts, nocc, nvir = t1.shape\n kconserv = cc.khelper.kconserv\n\n Hr1 = -np.diag(imds.Loo[k])\n\n Hr2 = np.zeros((nkpts, nkpts, nocc, nocc, nvir), dtype=t1.dtype)\n if cc.ip_partition == 'mp':\n foo = cc.eris.fock[:, :nocc, :nocc]\n fvv = cc.eris.fock[:, nocc:, nocc:]\n for ki in range(nkpts):\n for kj in range(nkpts):\n kb = kconserv[ki, k, kj]\n Hr2[ki, kj] = fvv[kb].diagonal()\n Hr2[ki, kj] -= foo[ki].diagonal()[:, None, None]\n Hr2[ki, kj] -= foo[kj].diagonal()[:, None]\n else:\n idx = np.arange(nocc)\n for ki in range(nkpts):\n for kj in range(nkpts):\n kb = kconserv[ki, k, kj]\n Hr2[ki, kj] = imds.Lvv[kb].diagonal()\n Hr2[ki, kj] -= imds.Loo[ki].diagonal()[:, None, None]\n Hr2[ki, kj] -= imds.Loo[kj].diagonal()[:, None]\n\n if ki == kconserv[ki, kj, kj]:\n Hr2[ki, kj] += np.einsum('ijij->ij', imds.Woooo[ki, kj, ki])[:, :, None]\n\n Hr2[ki, kj] -= np.einsum('jbjb->jb', imds.Wovov[kj, kb, kj])\n\n Wovvo = np.einsum('jbbj->jb', imds.Wovvo[kj, kb, kb])\n Hr2[ki, kj] += 2. * Wovvo\n if ki == kj: # and i == j\n Hr2[ki, ki, idx, idx] -= Wovvo\n\n Hr2[ki, kj] -= np.einsum('ibib->ib', imds.Wovov[ki, kb, ki])[:, None, :]\n\n kd = kconserv[kj, k, ki]\n Hr2[ki, kj] -= 2. * np.einsum('ijcb,jibc->ijb', t2[ki, kj, k], imds.Woovv[kj, ki, kd])\n Hr2[ki, kj] += np.einsum('ijcb,ijbc->ijb', t2[ki, kj, k], imds.Woovv[ki, kj, kd])\n\n return amplitudes_to_vector(cc, Hr1, Hr2, k)", "title": "" }, { "docid": "b1867ed91032767eadb8e8f124556328", "score": "0.55893636", "text": "def is_diagonal(i, j):\n return 1 if i == j else 0", "title": "" }, { "docid": "b1867ed91032767eadb8e8f124556328", "score": "0.55893636", "text": "def is_diagonal(i, j):\n return 1 if i == j else 0", "title": "" }, { "docid": "bf188b4de8c22daee2aa0f1108622ec6", "score": "0.5584881", "text": "def generate_cross_operator(cross: np.ndarray, bx: np.ndarray, by: np.ndarray) -> np.ndarray:\n diff_op = np.zeros([3, 3, by.shape[1], bx.shape[1]])\n for i in range(3):\n for j in range(3):\n diff_op[i, j] = np.outer(by[i], bx[j]) * cross.T\n\n return diff_op", "title": "" }, { "docid": "275d669977d2b3e9615047f083f7f927", "score": "0.55831826", "text": "def Matrix_cross(a, b):\n return _viso2.Matrix_cross(a, b)", "title": "" }, { "docid": "001ff31f403286a41f8b0c47e67d8133", "score": "0.5582104", "text": "def matrix_2_cross(M):\n\n return np.array([[-M[1,2]], [M[0,2]], [-M[0,1]]])", "title": "" }, { "docid": "53411e7c542a8afde2e6a088dd3e6a31", "score": "0.55803776", "text": "def get_diag2(grid):\n \n if grid == []:\n return grid\n\n return [grid[0][len(grid)-1]] + get_diag2(grid[1:])", "title": "" }, { "docid": "d5eeb122f7ef5197df962c014f0032fc", "score": "0.55750895", "text": "def diagonals(n, display = True):\n\n # First create an id-matrix (diagonal) and its flipped version (anti-diagonal)\n matrix = np.identity(n)\n trans = np.flip(matrix, axis = 1)\n\n # Fuse them together by masking\n mask = (matrix == 0)\n matrix[mask] = trans[mask]\n\n if display:\n print(matrix)\n\n return matrix", "title": "" }, { "docid": "c2919a7830cec37c163a30eea2d07a63", "score": "0.556744", "text": "def generate_second_diagnals(board: np.ndarray):\n second_diagonals = []\n second_diagonals.append([board[i, 3 - i] for i in range(0, 4)])\n second_diagonals.append([board[i, 4 - i] for i in range(0, 5)])\n second_diagonals.append([board[i, 5 - i] for i in range(0, 6)])\n second_diagonals.append([board[i, 6 - i] for i in range(0, 6)])\n second_diagonals.append([board[i, 7 - i] for i in range(1, 6)])\n second_diagonals.append([board[i, 8 - i] for i in range(2, 6)])\n return second_diagonals", "title": "" }, { "docid": "d11a2e20d1f67b1377cc09fc6e1129d1", "score": "0.55620104", "text": "def kronecker_diag_vector(A: ndarray, X: ndarray):\n return kronecker_diag_tensor(A.ravel(), X.squeeze()).reshape(X.shape)", "title": "" }, { "docid": "f25a1b5751aed66f2dd6a88c06be9181", "score": "0.5544281", "text": "def batched_mat_diag(diag):\n N = diag.shape[0] # batch size\n r = diag.size(-1) # square matrix row or column\n diag_mat = torch.stack([torch.diag(m) for m in diag.view(-1, r)])\n return diag_mat.view(N, r, r)", "title": "" }, { "docid": "f8ab010ed0a9e6cbf793b60feea4d2b0", "score": "0.5540476", "text": "def block_diag(*matrices):\n # TODO: Use scipy block_diag once\n # https://github.com/scipy/scipy/issues/4908 is fixed and\n # the fix widespread.\n rows, cols = np.sum([mat.shape for mat in matrices], axis=0)\n b_mat = np.zeros((rows,cols), dtype='complex')\n rows, cols = 0, 0\n for mat in matrices:\n new_rows = rows + mat.shape[0]\n new_cols = cols + mat.shape[1]\n b_mat[rows:new_rows, cols:new_cols] = mat\n rows, cols = new_rows, new_cols\n return b_mat", "title": "" }, { "docid": "3fd02471a30e5f2d2fcfd6fee56016ef", "score": "0.5527801", "text": "def diag(self, X):\n return self.kernel.diag(X)", "title": "" }, { "docid": "40ecbe2e29fedd8d6db7a89a1a6b05d8", "score": "0.55277205", "text": "def _matrix_diag(d):\n orig_shape = d.shape\n d = np.reshape(d, (int(np.prod(d.shape[:-1])), d.shape[-1]))\n diag_list = []\n for i in range(d.shape[0]):\n diag_list.append(np.diag(d[i, ...]))\n return np.reshape(diag_list, orig_shape + (d.shape[-1],))", "title": "" }, { "docid": "6fffa23f552107b2a00d1080c68f103d", "score": "0.5519105", "text": "def main_diagonal_flip(squarelotron, ring):\n \"\"\"m_d_s[row][column] -> m_d_s[column][row]\"\"\"\n m_d_s = deepcopy(squarelotron)\n if (ring == 'outer'):\n for column in range(1, 5):\n m_d_s[0][column], m_d_s[column][0] = m_d_s[column][0], m_d_s[0][column]\n for row in range(1, 4):\n m_d_s[row][4], m_d_s[4][row] = m_d_s[4][row], m_d_s[row][4]\n if (ring == 'inner'):\n for column in range(2,4):\n m_d_s[1][column], m_d_s[column][1] = m_d_s[column][1], m_d_s[1][column]\n m_d_s[2][3], m_d_s[3][2] = m_d_s[3][2], m_d_s[2][3]\n return m_d_s", "title": "" }, { "docid": "686e6ca42b76d2c911a2894cc6e243ad", "score": "0.5514092", "text": "def diagonal(state, n):\n\n left_str = [indices[i] + indices[i] for i in range(n)]\n out_str = [indices[:n]]\n einstr = \"\".join(left_str + [\"->\"] + out_str)\n return np.einsum(einstr, state)", "title": "" }, { "docid": "3f112eb3f65e8a3ece59fec8e43c0817", "score": "0.5501681", "text": "def symmetrise(a):\r\n return a + a.T - np.diag(a.diagonal())", "title": "" }, { "docid": "a46838fac23a0ccc90908a3e7773d5ea", "score": "0.5490428", "text": "def sample_diags(self):\n\n eta_diags = np.zeros(self.n_vert ) # eta_diags[v] = eta_z[v]z[v]\n theta = np.zeros(self.n_vert ) # theta_l = n_l * phi_l\n\n for l in range(self.n_comm):\n #eta\n kap_post = self.kap + self.edge_cts[l,l] + np.sum(self.diags[self.comm_idxs[l]])\n # kap_post = self.kap + self.edge_cts[l,l]\n lam_post = self.lam + self.n[l]**2 /2.\n\n # numpy uses a different convention for the parameterization of the gamma distribution\n eta_diags[self.comm_idxs[l]] = np.random.gamma(kap_post, 1./lam_post)\n\n # phi\n gam_post = self.gam + np.sum(self.A[self.comm_idxs[l],:],axis=1) + 2*self.diags[self.comm_idxs[l]]\n theta[self.comm_idxs[l]] = self.n[l] * np.random.dirichlet(gam_post)\n\n self.diags = np.random.poisson(0.5 * theta**2 * eta_diags)", "title": "" }, { "docid": "87dc9aac9e37d2914481723941fdb315", "score": "0.54826766", "text": "def col_clashes(bs,c): # column check\r\n \r\n for i in range(c):\r\n if share_diagonal(i, bs[i], c, bs[c]):\r\n return True\r\n \r\n return False", "title": "" }, { "docid": "1be7f885d4123daec616a68b70935f4e", "score": "0.54758424", "text": "def diag(self, X):\n return np.ones(X.shape[0])", "title": "" }, { "docid": "5a3439bd80905f94979b78bce3672a0a", "score": "0.5472584", "text": "def diag(self, X):\n X1 = np.atleast_2d(X)[:,self.columns]\n return self.kernel.diag(X1)", "title": "" }, { "docid": "8eacfedbd7bc4ec9597a39c9c1814a19", "score": "0.546197", "text": "def intersecao_diagonal_linha(tab, filas):\n # intersecao_diagonal_linha: tabuleiro x tuplo -> tuplo\n pos_livres = ()\n for i in filas[0]:\n for j in filas[2]:\n # verifica se a posicao de intersecao entre a diagonal e a linha\n # esta livre e se estiver adiciona-a a um tuplo\n if i == 1 and j == 1:\n if eh_posicao_livre(tab, 1):\n pos_livres += (1,)\n if i == 2 and j == 1:\n if eh_posicao_livre(tab, 5):\n pos_livres += (5,)\n if i == 3 and j == 1:\n if eh_posicao_livre(tab, 9):\n pos_livres += (9,)\n if i == 1 and j == 2:\n if eh_posicao_livre(tab, 3):\n pos_livres += (3,)\n if i == 2 and j == 2:\n if eh_posicao_livre(tab, 5):\n pos_livres += (5,)\n if i == 3 and j == 2:\n if eh_posicao_livre(tab, 7):\n pos_livres += (7,)\n return pos_livres", "title": "" }, { "docid": "467447a741456057fd12ca62b1d809b2", "score": "0.5459449", "text": "def Laplaciano2D(Nx, Ny, diagonal):\n N = Nx * Ny\n A = np.zeros((N,N))\n\n# Primero llena los bloques tridiagonales\n for j in range(0,Ny):\n ofs = Nx * j\n A[ofs, ofs] = diagonal; \n A[ofs, ofs + 1] = 1\n for i in range(1,Nx-1):\n A[ofs + i, ofs + i] = diagonal\n A[ofs + i, ofs + i + 1] = 1\n A[ofs + i, ofs + i - 1] = 1\n A[ofs + Nx - 1, ofs + Nx - 2] = 1; \n A[ofs + Nx - 1, ofs + Nx - 1] = diagonal \n\n# Despues llena las dos diagonales externas\n for k in range(0,N-Nx):\n A[k, Nx + k] = 1\n A[Nx + k, k] = 1\n\n return A", "title": "" }, { "docid": "e930898b0bbc9331c59fccfb205458f3", "score": "0.5448413", "text": "def shift_subdiag(subdiag, v, upper_right_corner=0.0):\n return torch.cat((upper_right_corner * v[[-1]], subdiag * v[:-1]))", "title": "" }, { "docid": "b15d19db4962ed3d96e98d276fd1cc61", "score": "0.54344213", "text": "def _generate_matrix(self):\r\n # Calculate the cross product between the location of each thruster and the direction it points in\r\n rot = np.transpose(np.cross(np.transpose(self.ROTATION), np.transpose(self.thruster_layout), 1))\r\n self.matrix = np.concatenate((self.ROTATION, rot))\r\n for thruster in range(8):\r\n if self.disabled[thruster]:\r\n self.matrix[:, thruster] = 0.0\r\n self.pseudo_inverse_matrix = linalg.pinv(self.matrix)\r\n return self.pseudo_inverse_matrix", "title": "" }, { "docid": "24c379816c1058fa19b6a8f925749622", "score": "0.5427588", "text": "def side_transpose(self):\n t_matrix = []\n counter_row = 0\n for i in reversed(range(self.size[0])):\n t_matrix.append([])\n for j in reversed(range(self.size[1])):\n t_matrix[counter_row].append(self.matrix[j][i])\n counter_row += 1\n\n new_matrix = Matrix(size=[self.size[1], self.size[0]])\n new_matrix.matrix = t_matrix\n return new_matrix", "title": "" }, { "docid": "72ba504b3e986c14ea64fd4bb79b6855", "score": "0.5408212", "text": "def rot_CCW(i, j, matrix_side):\n\n shift = (matrix_side - 1) / 2\n\n # convert matrix notation into Cartesian\n (x, y) = (i, j)\n\n # center at origin\n (x, y) = (x - shift, y - shift)\n\n # rotate\n (x, y) = (-y, x)\n\n # shift back\n (x, y) = (x + shift, y + shift)\n\n # return to matrix notation\n (i, j) = (int(x), int(y))\n\n return (i,j)", "title": "" }, { "docid": "2ef9c79eff818d34486416e838c5efbc", "score": "0.53960687", "text": "def tridiag(N, lower, main, upper):\n return sparse.diags([lower,main,upper],\n offsets=[-1,0,1],\n shape=(N, N),\n format='csr')", "title": "" }, { "docid": "ea43836a28fb32e1f0f1633b410fee32", "score": "0.53944796", "text": "def crossings_r():\n r = empty((crossing_count.value, 3))\n for i, cross in enumerate(iter_crossings()):\n r[i, :] = cross.r[:]\n\n return r", "title": "" }, { "docid": "753e8f34c8acca5e030f62daf6d67643", "score": "0.5387093", "text": "def rotate_2d_matrix(matrix):\n\n c = matrix.copy()\n\n for iter in range(len(matrix[0])):\n aux = []\n for row in reversed(c):\n aux.append(row[iter])\n matrix[iter] = aux", "title": "" }, { "docid": "802c9752821a548960a03e720380bcb1", "score": "0.53832906", "text": "def test_diagonal(matrix1):\n mt = matrix1.return_diagonal_vector()\n assert mt.elements[0,0] == 0\n assert mt.elements[1,0] == -4\n assert mt.elements[2,0] == 1", "title": "" }, { "docid": "0b60069f1451687a2fe25e7cc22128fe", "score": "0.53818864", "text": "def rotateBoard_clockwise(board):\n\t\treturn zip(*board[::-1])", "title": "" }, { "docid": "30699c51ce45cd87a3b9e3867a6d998c", "score": "0.53814954", "text": "def _spdiag(val_column):\n if val_column.shape[0] == 0:\n return sps.csr_matrix((1,0))\n return sps.diags(val_column.flat, 0, format=\"csr\")", "title": "" }, { "docid": "271fa350c422387aeca9df2d03f44425", "score": "0.5369987", "text": "def create_diags_cumcon(par,sol):\n\n # unpack\n a_lowdiag = sol.a_lowdiag\n a_updiag = sol.a_updiag\n b_lowdiag = sol.b_lowdiag\n b_updiag = sol.b_updiag\n centdiag = sol.centdiag\n \n for iz in prange(par.Nz):\n for ia in range(par.Na):\n for ib in range(par.Nb):\n\n a = par.grid_a[ia]\n adrift = (par.ra + par.eta)*a - par.ltau0*a**par.ltau + par.xi*par.w\n \n a_low = -np.fmin(sol.d[iz,ia,ib] + adrift,0)/par.dab[ia]\n a_up = np.fmax(sol.d[iz,ia,ib] + adrift,0)/par.daf[ia]\n b_low = -np.fmin(sol.s[iz,ia,ib] - sol.d_adj[iz,ia,ib],0)/par.dbb[ib]\n b_up = np.fmax(sol.s[iz,ia,ib] - sol.d_adj[iz,ia,ib],0)/par.dbf[ib]\n \n # correct boundaries\n if ib == par.Nb-1:\n\n a_low = -np.fmin(sol.d[iz,ia,ib-1] + adrift,0)/par.dab[ia]\n a_up = np.fmax(sol.d[iz,ia,ib-1] + adrift,0)/par.daf[ia]\n b_low = -np.fmin(sol.s[iz,ia,ib] - sol.d_adj[iz,ia,ib-1],0)/par.dbb[ib]\n \n # update\n i = ib*par.Na + ia\n \n a_centdiag = a_low + a_up\n b_centdiag = b_low + b_up\n centdiag[iz,i] = 1 + par.DeltaCUMCON*(a_centdiag + b_centdiag - par.switch_diag[iz])\n \n a_updiag[iz,i] = -par.DeltaCUMCON*a_up\n a_lowdiag[iz,i] = -par.DeltaCUMCON*a_low\n \n b_updiag[iz,i] = -par.DeltaCUMCON*b_up\n b_lowdiag[iz,i] = -par.DeltaCUMCON*b_low", "title": "" }, { "docid": "b23f67f0c81ec6a32977c71190e63b47", "score": "0.5364464", "text": "def batched_diag_vec(mat):\n N = mat.shape[0] # batch size\n r = mat.size(-1) # square matrix row or column\n diag_mat = torch.stack([torch.diag(m) for m in mat.view(-1, r, r)])\n return diag_mat.view(N, r)", "title": "" }, { "docid": "28880bbcab18c1e140f792f1abe4f29b", "score": "0.53596884", "text": "def Diagonal(self, *args):\n return _gp.gp_Mat_Diagonal(self, *args)", "title": "" }, { "docid": "938b25e27369884d0e4724a709331d77", "score": "0.5344848", "text": "def rotate_2d_matrix(matrix):\n\n l = len(matrix)\n for z in range(3):\n for i in range(0, int(l/2)):\n for j in range(i, l - 1 - i):\n temp = matrix[i][j]\n matrix[i][j] = matrix[j][l - 1 - i]\n matrix[j][l - 1 - i] = matrix[l - 1 - i][l - 1 - j]\n matrix[l - 1 - i][l - 1 - j] = matrix[l - 1 - j][i]\n matrix[l - 1 - j][i] = temp", "title": "" }, { "docid": "e1373c5f8df83ccee7ebb15d9521adc4", "score": "0.5343749", "text": "def __diag_adj(self, current_block):\n\t\trow = current_block[0]\n\t\tcol = current_block[1]\n\t\t\n\t\trows = self.__list_unique(self.__incr(row, len(self.grid)), self.__decr(row))\n\t\tcols = self.__list_unique(self.__incr(col, len(self.grid[0])), self.__decr(col))\n\t\tadjacent = []\n\t\t\n\t\tfor i in range(len(rows)):\n\t\t\tfor j in range(len(cols)):\n\t\t\t\tif rows[i] != row and cols[j] != col:\n\t\t\t\t\tadjacent.append((rows[i], cols[j]))\n\t\t\n\t\treturn adjacent", "title": "" }, { "docid": "20a48718486221de5bd97ff8694593f0", "score": "0.53401065", "text": "def diagonalSum(mat):\n total = 0\n \n n = len(mat)\n for i in range(0, n): \n for j in range(0, n): \n if (i == j): \n total += mat[i][j] \n elif ((i + j) == (n - 1)): \n total += mat[i][j]\n return total", "title": "" }, { "docid": "467734773513466248aae8262ad8acc5", "score": "0.53368264", "text": "def mix_columns_matrix(self):\n\n def D(b):\n \"\"\"\n Return the `e x e` matrix `D` with `b^i` along the\n diagonal.\n\n EXAMPLE::\n\n sage: sr = mq.SR(1, 2, 1, 4)\n sage: sr.mix_columns_matrix() # indirect doctest\n [ a + 1 0 0 0 a 0 0 0]\n [ 0 a^2 + 1 0 0 0 a^2 0 0]\n [ 0 0 a 0 0 0 a + 1 0]\n [ 0 0 0 a^2 0 0 0 a^2 + 1]\n [ a 0 0 0 a + 1 0 0 0]\n [ 0 a^2 0 0 0 a^2 + 1 0 0]\n [ 0 0 a + 1 0 0 0 a 0]\n [ 0 0 0 a^2 + 1 0 0 0 a^2]\n \"\"\"\n D = Matrix(self.base_ring(), self._e, self._e)\n for i in range(self._e):\n D[i, i] = b**(2**i)\n return D\n\n r = self.r\n c = self.c\n e = self.e\n k = self.k\n a = k.gen()\n\n M = Matrix(k, r*e, r*e)\n\n if r == 1:\n self._insert_matrix_into_matrix(M, D(1), 0, 0)\n\n elif r == 2:\n self._insert_matrix_into_matrix(M, D(a+1), 0, 0)\n self._insert_matrix_into_matrix(M, D(a+1), e, e)\n self._insert_matrix_into_matrix(M, D(a), e, 0)\n self._insert_matrix_into_matrix(M, D(a), 0, e)\n\n elif r == 4:\n self._insert_matrix_into_matrix(M, D(a), 0, 0)\n self._insert_matrix_into_matrix(M, D(a), e, e)\n self._insert_matrix_into_matrix(M, D(a), 2*e, 2*e)\n self._insert_matrix_into_matrix(M, D(a), 3*e, 3*e)\n\n self._insert_matrix_into_matrix(M, D(a+1), 0, e)\n self._insert_matrix_into_matrix(M, D(a+1), e, 2*e)\n self._insert_matrix_into_matrix(M, D(a+1), 2*e, 3*e)\n self._insert_matrix_into_matrix(M, D(a+1), 3*e, 0)\n\n self._insert_matrix_into_matrix(M, D(1), 0, 2*e)\n self._insert_matrix_into_matrix(M, D(1), e, 3*e)\n self._insert_matrix_into_matrix(M, D(1), 2*e, 0)\n self._insert_matrix_into_matrix(M, D(1), 3*e, 1*e)\n\n self._insert_matrix_into_matrix(M, D(1), 0, 3*e)\n self._insert_matrix_into_matrix(M, D(1), e, 0)\n self._insert_matrix_into_matrix(M, D(1), 2*e, 1*e)\n self._insert_matrix_into_matrix(M, D(1), 3*e, 2*e)\n\n mix_columns = Matrix(k, r*c*e, r*c*e)\n\n for i in range(c):\n self._insert_matrix_into_matrix(mix_columns, M, r*e*i, r*e*i)\n\n return mix_columns", "title": "" }, { "docid": "9e1012109cb52cfe913cc1199251aea2", "score": "0.5336553", "text": "def diagonal(self):\n # Check if fit had been called\n check_is_fitted(self, [\"X\"])\n try:\n check_is_fitted(self, [\"_X_diag\"])\n if self._is_transformed:\n Y_diag = self.X[0].diagonal()[1]\n for i in range(1, self._h):\n Y_diag += self.X[i].diagonal()[1]\n except NotFittedError:\n # Calculate diagonal of X\n if self._is_transformed:\n X_diag, Y_diag = self.X[0].diagonal()\n # X_diag is considered a mutable and should not affect the kernel matrix itself.\n X_diag.flags.writeable = True\n for i in range(1, self._h):\n x, y = self.X[i].diagonal()\n X_diag += x\n Y_diag += y\n self._X_diag = X_diag\n else:\n # case sub kernel is only fitted\n X_diag = self.X[0].diagonal()\n # X_diag is considered a mutable and should not affect the kernel matrix itself.\n X_diag.flags.writeable = True\n for i in range(1, self._n_iter):\n x = self.X[i].diagonal()\n X_diag += x\n self._X_diag = X_diag\n\n # if self.as_tensor:\n # self._X_diag = torch.tensor(self._X_diag)\n # if Y_diag is not None:\n # Y_diag = torch.tensor(Y_diag)\n if self._is_transformed:\n return self._X_diag, Y_diag\n else:\n return self._X_diag", "title": "" }, { "docid": "90108e1bf8d89772c830bdc24301cd39", "score": "0.5335854", "text": "def compute_diagonals(self) -> np.ndarray:\n num_turns = len(self.speaker_turn_scores)\n constraint_matrix = np.zeros((num_turns, num_turns))\n for i in range(num_turns - 1):\n speaker_turn_score = self.speaker_turn_scores[i + 1]\n if speaker_turn_score != 0:\n if speaker_turn_score > self.threshold:\n constraint_matrix[i, i + 1] = -1\n constraint_matrix[i + 1, i] = -1\n else:\n constraint_matrix[i, i + 1] = 1\n constraint_matrix[i + 1, i] = 1\n return constraint_matrix", "title": "" }, { "docid": "aca9e3bd622894a2c768d554cffd6cb6", "score": "0.53193736", "text": "def obter_diagonal(tab, num):\n # obter_diagonal: tabuleiro x inteiro -> vector\n if eh_tabuleiro(tab) and type(num) is int and 1 <= num <= 2:\n diag = ()\n if num == 1:\n for i in range(3):\n diag += (tab[i][i],)\n else:\n for i in range(2, -1, -1): # i vai tomar os valores de 2, 1, 0.\n diag += (tab[i][-i - 1],)\n # tab[2][-3] = pos 7, tab[1][-2] = pos 5, tab[0][-1] = pos 3\n else:\n raise ValueError(\"obter_diagonal: algum dos argumentos e invalido\")\n return diag", "title": "" }, { "docid": "f9b16b13d423a3dc5ac53f4092c25348", "score": "0.5302585", "text": "def diag(*args):\n return _casadi.Sparsity_diag(*args)", "title": "" }, { "docid": "a5d02b8ed81af5e50225bf3294b19cd0", "score": "0.5298932", "text": "def optimise_diagonals(self, directions):\n #consider that square diagonals are 1.4x the width\n pass\n \n return directions", "title": "" }, { "docid": "0a1f9d944a1321db26f4ee89419425c6", "score": "0.5298597", "text": "def _diag(self, X1: ArrayLike, X2: ArrayLike, to_dense=True) -> ArrayLike:\n cov = self(X1, X2)\n if to_dense:\n return cov\n return tf.linalg.diag_part(cov)", "title": "" }, { "docid": "b13f5dda7191cd7ce6de469886463fc4", "score": "0.52920663", "text": "def secondary_diagonal(matrix):\n def get_next_diag_point(coordinates, max_row, max_column):\n # if i can go down do that\n if coordinates[0] < max_row:\n return (coordinates[0] + 1, coordinates[1])\n # elif if can go right do that\n if coordinates[1] < max_column:\n return(coordinates[0], coordinates[1] + 1)\n # else return none\n else:\n return (None, None)\n\n def sort_diag(matrix, diag_coordinates, max_row, max_column):\n\n current_row = diag_coordinates[0]\n current_column = diag_coordinates[1]\n\n diag_values = []\n\n while 0 <= current_row <= max_row and 0 <= current_column <= max_row:\n diag_values.append(matrix[current_row][current_column])\n current_row -= 1\n current_column += 1\n\n current_row = diag_coordinates[0]\n current_column = diag_coordinates[1]\n\n while 0 <= current_row <= max_row and 0 <= current_column <= max_row:\n diag_values = sorted(diag_values)\n matrix[current_row][current_column] = diag_values.pop(0)\n current_row -= 1\n current_column += 1\n\n diag_coordinates = get_next_diag_point(\n diag_coordinates, max_row, max_column)\n\n if diag_coordinates[0] != None:\n return sort_diag(matrix, diag_coordinates, max_row, max_column)\n\n sort_diag(matrix, (0, 0), len(matrix) - 1, len(matrix[0]) - 1)\n\n return matrix", "title": "" }, { "docid": "cef0669796bf9a361ae96bb237e48d7f", "score": "0.52811134", "text": "def create_diagonal_downleft_upright(diagonal, x, y):\n for z in range(7): # To have a 7 pixel list\n # Tries to get values that might be out of bounds, three pixels down\n # left and three pixels up right in a diagonal from the token\n try:\n diagonal.append(sense.get_pixel(x - z + 3, y + z - 3))\n except: # Catches out of bounds errors\n ValueError\n return(diagonal) # Returns the list of pixels", "title": "" }, { "docid": "b15750dc60252f300c89a5326e6c9c6a", "score": "0.52620816", "text": "def diag(cc, k):\n if not cc.imds.made_ea_imds:\n cc.imds.make_ea(cc.ea_partition)\n imds = cc.imds\n\n t1, t2 = cc.t1, cc.t2\n nkpts, nocc, nvir = t1.shape\n kconserv = cc.khelper.kconserv\n\n Hr1 = np.diag(imds.Lvv[k])\n\n Hr2 = np.zeros((nkpts, nkpts, nocc, nvir, nvir), dtype=t2.dtype)\n if cc.ea_partition == 'mp':\n foo = cc.eris.fock[:, :nocc, :nocc]\n fvv = cc.eris.fock[:, nocc:, nocc:]\n for kj in range(nkpts):\n for ka in range(nkpts):\n kb = kconserv[k, ka, kj]\n Hr2[kj, ka] -= foo[kj].diagonal()[:, None, None]\n Hr2[kj, ka] += fvv[ka].diagonal()[None, :, None]\n Hr2[kj, ka] += fvv[kb].diagonal()\n else:\n idx = np.eye(nvir, dtype=bool)\n for kj in range(nkpts):\n for ka in range(nkpts):\n kb = kconserv[k, ka, kj]\n Hr2[kj, ka] -= imds.Loo[kj].diagonal()[:, None, None]\n Hr2[kj, ka] += imds.Lvv[ka].diagonal()[None, :, None]\n Hr2[kj, ka] += imds.Lvv[kb].diagonal()\n\n Hr2[kj, ka] += np.einsum('abab->ab', imds.Wvvvv[ka, kb, ka])\n\n Hr2[kj, ka] -= np.einsum('jbjb->jb', imds.Wovov[kj, kb, kj])[:, None, :]\n Wovvo = np.einsum('jbbj->jb', imds.Wovvo[kj, kb, kb])\n Hr2[kj, ka] += 2. * Wovvo[:, None, :]\n if ka == kb:\n for a in range(nvir):\n Hr2[kj, ka, :, a, a] -= Wovvo[:, a]\n\n Hr2[kj, ka] -= np.einsum('jaja->ja', imds.Wovov[kj, ka, kj])[:, :, None]\n\n Hr2[kj, ka] -= 2 * np.einsum('ijab,ijab->jab', t2[k, kj, ka], imds.Woovv[k, kj, ka])\n Hr2[kj, ka] += np.einsum('ijab,ijba->jab', t2[k, kj, ka], imds.Woovv[k, kj, kb])\n\n return amplitudes_to_vector(cc, Hr1, Hr2, k)", "title": "" }, { "docid": "ca34d75f5a69fe41e0127442057d3028", "score": "0.52586377", "text": "def Diagonal(self, *args):\n return _gp.gp_Mat2d_Diagonal(self, *args)", "title": "" }, { "docid": "6d1228340755f8765ed5983529a45e05", "score": "0.5254952", "text": "def mask_diagonals(n, k):\n return np.tri(n, k=-(abs(k)+1), dtype=bool) | ~np.tri(n, k=abs(k), dtype=bool)", "title": "" }, { "docid": "06161783732eaf1a32ca3d7255c0e30d", "score": "0.5244569", "text": "def Decomposition_meth():\n A=np.array([[2,1,-1],[4,1,0],[-2,-3,8]]) #Here we are setting up the\n n=len(A) # Matricies\n l=np.identity(n) # set the diagonals of l to 1\n u=np.zeros((n,n))#\n for k in range(n):\n u[k,k]=A[k,k]-np.dot(l[k,:],u[:,k]) #first calculate the diagonals of u\n for j in range(k+1,n):\n u[k,j]=(A[k,j]-np.dot(l[k,:],u[:,j]))/l[k,k] #calculate the rows of u\n for i in range(k+1,n):\n l[i,k]=(A[i,k]-np.dot(l[i,:],u[:,k]))/u[k,k] #calculate the columns\n return l, u", "title": "" } ]
44dc5cc5a85b70e4d2e57f030e7e7e14
Compute the Section, in the coordinate syteme given by the Location Law. To have the Normal to section equal to the Location Law Normal. If contact beetween and is forced.
[ { "docid": "3abe8a46cf705b9ace13b235e31319b7", "score": "0.52541476", "text": "def ModifiedSection(self, *args):\n return _GeomFill.GeomFill_SectionPlacement_ModifiedSection(self, *args)", "title": "" } ]
[ { "docid": "6f00126098607b4a21a1e7a81f945b25", "score": "0.58446944", "text": "def ConstantSection(self, *args):\n return _GeomFill.GeomFill_SectionLaw_ConstantSection(self, *args)", "title": "" }, { "docid": "a4ed5004f5f44261468c0151db23d3eb", "score": "0.5688747", "text": "def SectionShape(self, *args):\n return _GeomFill.GeomFill_SectionLaw_SectionShape(self, *args)", "title": "" }, { "docid": "1e8f98b8fd40e6a97ca83c2fe8b901a9", "score": "0.56785524", "text": "def Section(self, *args):\n return _GeomFill.GeomFill_SectionPlacement_Section(self, *args)", "title": "" }, { "docid": "4183c64083fa8302d612b6aa7d5de790", "score": "0.54366714", "text": "def CirclSection(self, *args):\n return _GeomFill.GeomFill_SectionLaw_CirclSection(self, *args)", "title": "" }, { "docid": "d8739136da53a3bd72bf124b21e8b605", "score": "0.5392841", "text": "def section_modulus(self, point=None):\n x_c, y_c = self.center\n if point is None:\n # taking x and y as maximum distances from centroid\n x_min, y_min, x_max, y_max = self.bounds\n y = max(y_c - y_min, y_max - y_c)\n x = max(x_c - x_min, x_max - x_c)\n else:\n # taking x and y as distances of the given point from the center\n point = Point2D(point)\n y = point.y - y_c\n x = point.x - x_c\n\n second_moment = self.second_moment_of_area()\n S_x = second_moment[0]/y\n S_y = second_moment[1]/x\n\n return S_x, S_y", "title": "" }, { "docid": "44bb30788c32639b59a532a270624a81", "score": "0.53315586", "text": "def IsConicalLaw(self, *args):\n return _GeomFill.GeomFill_SectionLaw_IsConicalLaw(self, *args)", "title": "" }, { "docid": "3d0f9286e8379e6db449dae2cd510107", "score": "0.52138305", "text": "def cross_section(formula_like, data, lat_long, cutoff, kernel = 'uniform'):\n y, X = dmatrices(formula_like, data, eval_env = 1, NA_action = 'raise')\n # TODO: handle cases where people provide weird formulas?\n\n lat_long = parse_lat_long(lat_long, data)\n # Raise an exception if the data look funky\n nobs = check_parameters(y, X, lat_long, cutoff)\n\n # TODO: consider a more sophisticated way of calculating residuals (e.g. one that\n # allows for fancy fixed effects)\n betahat, _, rank, _ = np.linalg.lstsq(X, y)\n if rank != X.shape[1]:\n raise np.linalg.LinAlgError('X matrix is not full rank!')\n del rank\n residuals = (y - X @ betahat)\n sigma = _cross_section_calculate_sigma(lat_long, residuals, cutoff,\n kernel, metric = 'greatcircle')\n filling = (X.T @ sigma @ X) / nobs\n\n bread = np.linalg.inv(X.T @ X)\n sandwich = nobs * (bread.T @ filling @ bread)\n se = np.sqrt(np.diag(sandwich)).reshape(-1, 1)\n return se", "title": "" }, { "docid": "93514fa5f32a24962c843f92496f7813", "score": "0.5130999", "text": "def Section(self, *args):\n return _GeomFill.GeomFill_SectionGenerator_Section(self, *args)", "title": "" }, { "docid": "60e02612b2e816d15ad93ad3842a52b3", "score": "0.50981796", "text": "def compute_section(k, depth):\n level = InternalNode.compute_level(k, depth)\n relative_depth = InternalNode.compute_relative_depth(k, depth)\n section = math.ceil(relative_depth / math.pow(2, level - 1))\n\n return section", "title": "" }, { "docid": "eb77be2e0c9f81d08792377e709fa0d6", "score": "0.48136702", "text": "def Section_serie(self, upperlst):\n self.Empty_lines() # necessary for the recursion\n if self.sym == 'section':\n self.Section(upperlst)\n self.Empty_lines()\n self.Section_serie(upperlst)", "title": "" }, { "docid": "d6c768e7f4866cd91d41b4d4f1bdeee1", "score": "0.4685902", "text": "def GetInterval(self, *args):\n return _GeomFill.GeomFill_SectionLaw_GetInterval(self, *args)", "title": "" }, { "docid": "609131db03a7fcee9dc8f7c4ddcfc107", "score": "0.4674694", "text": "def get_section(point, sections):\n\tmatching_section = None\n\tfor section in sections:\n\t\tif (\n\t\t\toverlap_check(\n\t\t\t\tpoint[0], section.top_left_point[0], section.top_right_point[0]) and\n\t\t\toverlap_check(\n\t\t\t\tpoint[1], section.top_left_point[1], section.bottom_left_point[1])):\n\t\t\tmatching_section = section\n\t\t\tbreak\n\telse:\n\t\traise Exception(\n\t\t\t'get_section error: no matching section found for point: {}.'.format(point))\n\treturn matching_section", "title": "" }, { "docid": "9a172c5f7a05f2d4ef0ffa5e9d6d3a1a", "score": "0.46630144", "text": "def sector(self):\n return self.occupation.sector", "title": "" }, { "docid": "15bcd8c9a30cc7230b988897b7f6231e", "score": "0.46502638", "text": "def SetInterval(self, *args):\n return _GeomFill.GeomFill_SectionLaw_SetInterval(self, *args)", "title": "" }, { "docid": "26a951dddbe6829b61f173e2d129d003", "score": "0.46413225", "text": "def _make_section(self):\n return Section()", "title": "" }, { "docid": "3be6db8042fd63a2378e0ac5b1b270c1", "score": "0.46383053", "text": "def divide_sections(self):\n # ftxt is the filetxt without whitespace\n ftxt = \"\\n\".join(self._ltxt).lower()\n\n # Get first section\n divide = ftxt.lower().split('masses')\n self.check_len_sect('masses', divide) # error checking\n ind = self.search_in_list_of_str(divide, lambda s: \"xlo xhi\" in s, 1)[0]\n self.params_sect = divide[ind]\n\n # Get masses section\n divide = divide[1-ind]\n divide = divide.split('atoms')\n self.check_len_sect('Masses', divide) # error checking\n # Find the section with 2 columns\n self.masses_sect, divide = self.get_numeric_section(divide, 2, 'masses')\n\n # Get atoms section\n divide = divide.split(\"bonds\")\n self.check_len_sect('Atoms', divide)\n try:\n self.atoms_sect, divide = self.get_numeric_section(divide, 9, 'atoms')\n except SystemExit as e:\n self.atoms_sect, divide = self.get_numeric_section(divide, 10, \"atoms\")\n\n # Get bonds section\n divide = divide.split(\"angles\")\n self.check_len_sect('Bonds', divide)\n self.bonds_sect, divide = self.get_numeric_section(divide, 4, 'bonds')\n\n # Get angles and dihedral section\n divide = divide.split(\"dihedrals\")\n self.check_len_sect('Angles', divide)\n self.angles_sect, self.dihedrals_sect = self.get_numeric_section(divide, 5, 'angles')\n\n self.dihedrals_sect = self.dihedrals_sect.replace(\"dihedrals\", \"\")", "title": "" }, { "docid": "ce91844f33e045f76c9eae87f5c6b2c1", "score": "0.4624149", "text": "def cross_section(eps, E, theta):\n\n def beta_func(eps, E, theta):\n\n \"\"\"\n Return the parameter beta (formula 4 from Gould's article)\n\n Parameters:\n eps : energy of the target photon (eV)\n E : energy of the gamma photon (eV)\n theta : angle between the two momenta of the two photons (rad)\n \"\"\"\n\n def parameter_s(eps, E, theta):\n\n \"\"\"\n Return the parameter s (formula 3 from Gould's article)\n\n Parameters:\n eps : energy of the target photon (eV)\n E : energy of the gamma photon (eV)\n theta : angle between the two momenta of the two photons (rad)\n \"\"\"\n\n s = eps*E/(2*(mc2*keV2eV)**2)*(1-np.cos(theta))\n ind = np.where(s>=1) #for pair production to occur, s>=1 and if s=1, it is the threshold condition.\n\n return s, ind\n\n s, ind = parameter_s(eps, E, theta)\n s = s[ind[0]]\n\n return np.sqrt(1-1/s), ind\n\n beta, ind = beta_func(eps, E, theta)\n\n return 1/2.0 * np.pi * r0**2 * (1-beta**2)*((3-beta**4)*np.log((1+beta)/(1-beta))-2*beta*(2-beta**2)), ind", "title": "" }, { "docid": "0fcecda6375d5a06af03811a5d7b1c7c", "score": "0.4623405", "text": "def primary_section(self):\n try:\n t = Section.objects.filter(active = True, extends = None, realm__id = self.id).order_by('display_order')[0]\n except IndexError:\n raise ObjectDoesNotExist(\"no communism.section available\")\n \n return t", "title": "" }, { "docid": "b7728ee3a5c461e2283b32c984672b53", "score": "0.46182197", "text": "def contact_normal(self) -> Vec2d:\n delta: Vec2d = (\n self.world_joint.b.local_to_world(self.world_joint.anchor_b)\n - self.world_joint.a.local_to_world(self.world_joint.anchor_a)\n )\n return delta.normalized()", "title": "" }, { "docid": "6efcdf03384ddb8ca781c1af59f9b583", "score": "0.45624885", "text": "def D0(self, *args):\n return _GeomFill.GeomFill_SectionLaw_D0(self, *args)", "title": "" }, { "docid": "3bf6c3d844f0b4f85c279d0d500ead81", "score": "0.4561176", "text": "def NormalContact(self, contact_type=AbaqusNormalContactType.Exponential, maxStiffness: float = None,\n pressure: float = None, clearance: float = None):\n pass", "title": "" }, { "docid": "188f1357cd25c14f38aba157812a4384", "score": "0.45445007", "text": "def section(self):\n return self._section", "title": "" }, { "docid": "a9bd10fe84ab7e1afbd427f0de071ea9", "score": "0.4516778", "text": "def find_section(self, doc, section_keywords):\n doc = self.docstring.split('\\n')\n section_start = None\n section_end = None\n for idx, txt in enumerate(doc):\n if idx == 0:\n continue\n if re.match(self.numpy_section_regex, txt.strip()) and doc[idx - 1].strip().lower() in section_keywords:\n section_start = idx - 1\n continue\n if section_start is not None and re.match(self.numpy_section_regex ,txt.strip()) is not None:\n section_end = idx - 2\n break\n return section_start, section_end", "title": "" }, { "docid": "9208c5c3feec26f18aa09124bafb6da4", "score": "0.45032695", "text": "def getsection(self):\n return self._section", "title": "" }, { "docid": "9208c5c3feec26f18aa09124bafb6da4", "score": "0.45032695", "text": "def getsection(self):\n return self._section", "title": "" }, { "docid": "a00c782159fcfc16684170719f65b923", "score": "0.4485477", "text": "def _get_instructor_sections(person, term):\n if person is None or term is None:\n return None\n return get_sections_by_instructor_and_term(person, term)", "title": "" }, { "docid": "f9565ed57dcbe6ce08726ce90ec3519d", "score": "0.4468013", "text": "def _calculate_section(x, number_of_sections, period_x):\n \n dx = period_x / number_of_sections\n\n return int(np.floor(((x+0.5*dx) % period_x) / dx))", "title": "" }, { "docid": "32abf44ea1c87f3b94ee71d6e020deee", "score": "0.44574255", "text": "def obstruction(self, par):\n\n if self.obs == 'square':\n R = self.R\n cx, cy = int(self.L/4), int(self.W/2)\n\n X,Y = np.meshgrid(np.linspace(0, self.L, self.L_n), np.linspace(0, self.W, self.W_n))\n grid = np.stack((X,Y), axis = -1)\n grid[:,:,0] -= cx\n grid[:,:,1] -= cy\n\n par.indices = np.transpose(abs(grid[:,:,0]) + abs(grid[:,:,1]) <= R)\n \n elif self.obs == 'cylinder':\n R = self.R\n cx, cy = int(self.L/4), int(self.W/2)\n\n X,Y = np.meshgrid(np.linspace(0, self.L, self.L_n), np.linspace(0, self.W, self.W_n))\n grid = np.stack((X,Y), axis = -1)\n grid[:,:,0] -= cx\n grid[:,:,1] -= cy\n\n par.indices = np.transpose((grid[:,:,0]**2 + grid[:,:,1]**2) <= R**2)\n \n return par", "title": "" }, { "docid": "708d8a1b3389d15003c6ea67edab2f4c", "score": "0.44374767", "text": "def Section(self, upperlst):\n assert self.sym == 'section'\n bodylst = [] # body of the section item\n item = [self.sym, self.text, bodylst] # 'section', 'id', body\n upperlst.append(tuple(item)) # ready to be appended\n self.lex()", "title": "" }, { "docid": "f3cae458b83a5507c621a61536d5497a", "score": "0.44321916", "text": "def calc_lateral_area_section(self, sec):\n r = sec.diam/2\n h = sec.L\n lateral_area = 2 * math.pi * r * r * h\n \n return lateral_area", "title": "" }, { "docid": "07d6138ae9981c69b2318b642d5c00ef", "score": "0.44003388", "text": "def initialize_sector_geometry(self, phi):\n # These polar radii bound the region between the inner\n # and outer ellipses that define the sector.\n sma1, sma2 = self.bounding_ellipses()\n eps_ = 1.0 - self.eps\n\n # polar vector at one side of the elliptical sector\n self._phi1 = phi - self.sector_angular_width / 2.0\n r1 = (sma1 * eps_ / math.sqrt((eps_ * math.cos(self._phi1))**2\n + (math.sin(self._phi1))**2))\n r2 = (sma2 * eps_ / math.sqrt((eps_ * math.cos(self._phi1))**2\n + (math.sin(self._phi1))**2))\n\n # polar vector at the other side of the elliptical sector\n self._phi2 = phi + self.sector_angular_width / 2.0\n r3 = (sma2 * eps_ / math.sqrt((eps_ * math.cos(self._phi2))**2\n + (math.sin(self._phi2))**2))\n\n r4 = (sma1 * eps_ / math.sqrt((eps_ * math.cos(self._phi2))**2\n + (math.sin(self._phi2))**2))\n\n # sector area\n sa1 = _area(sma1, self.eps, self._phi1, r1)\n sa2 = _area(sma2, self.eps, self._phi1, r2)\n sa3 = _area(sma2, self.eps, self._phi2, r3)\n sa4 = _area(sma1, self.eps, self._phi2, r4)\n self.sector_area = abs((sa3 - sa2) - (sa4 - sa1))\n\n # angular width of sector. It is calculated such that the sectors\n # come out with roughly constant area along the ellipse.\n self.sector_angular_width = max(min((self._area_factor / (r3 - r4)\n / r4), self._phi_max),\n self._phi_min)\n\n # compute the 4 vertices that define the elliptical sector.\n vertex_x = np.zeros(shape=4, dtype=float)\n vertex_y = np.zeros(shape=4, dtype=float)\n\n # vertices are labelled in counterclockwise sequence\n vertex_x[0:2] = np.array([r1, r2]) * math.cos(self._phi1 + self.pa)\n vertex_x[2:4] = np.array([r4, r3]) * math.cos(self._phi2 + self.pa)\n vertex_y[0:2] = np.array([r1, r2]) * math.sin(self._phi1 + self.pa)\n vertex_y[2:4] = np.array([r4, r3]) * math.sin(self._phi2 + self.pa)\n vertex_x += self.x0\n vertex_y += self.y0\n\n return vertex_x, vertex_y", "title": "" }, { "docid": "e86017074a6ce52a557e72b16d1d1205", "score": "0.43625197", "text": "def get_or_new_section(self, name, params=None):\n if name not in self.sections:\n self.sections[name.lower()] = SectionTerm(name, term_args=params, doc=self, parent=self.root)\n\n return self.sections[name.lower()]", "title": "" }, { "docid": "27b4a5eea6772d93f98157044fc3f395", "score": "0.43574446", "text": "def section_locs(loc):\n r, c = loc\n return (((r, new_c) for new_c in RAIL_LEGAL_COLUMNS) if is_rail_loc(loc)\n else ((new_r, c) for new_r in body_section_rows(r)))", "title": "" }, { "docid": "eff5fd13eef56abd93a8c9a8947bc9d0", "score": "0.43468818", "text": "def SetLocation(self, *args):\n return _GeomFill.GeomFill_SectionPlacement_SetLocation(self, *args)", "title": "" }, { "docid": "c81b23e474323ed86eb76f44ef18423b", "score": "0.43353423", "text": "def ang_overlap_stark(l_1, l_2, m_1, m_2, field_orientation, dm_allow):\n dl = l_2 - l_1\n dm = m_2 - m_1\n l, m = int(l_1), int(m_1)\n if field_orientation=='parallel':\n if (dm == 0) and (dm in dm_allow):\n if dl == +1:\n return +(((l+1)**2-m**2)/((2*l+3)*(2*l+1)))**0.5\n elif dl == -1:\n return +((l**2-m**2)/((2*l+1)*(2*l-1)))**0.5\n elif (dm == +1) and (dm in dm_allow):\n if dl == +1:\n return -((l+m+2)*(l+m+1)/(2*(2*l+3)*(2*l+1)))**0.5\n elif dl == -1:\n return +((l-m)*(l-m-1)/(2*(2*l+1)*(2*l-1)))**0.5\n elif (dm == -1) and (dm in dm_allow):\n if dl == +1:\n return +((l-m+2)*(l-m+1)/(2*(2*l+3)*(2*l+1)))**0.5\n elif dl == -1:\n return -((l+m)*(l+m-1)/(2*(2*l+1)*(2*l-1)))**0.5\n \n elif field_orientation=='crossed':\n if dm == +1:\n if dl == +1:\n return +(0.5*(-1)**(m-2*l)) * (((l+m+1)*(l+m+2))/((2*l+1)*(2*l+3)))**0.5 \n elif dl == -1:\n return -(0.5*(-1)**(-m+2*l)) * (((l-m-1)*(l-m)) /((2*l-1)*(2*l+1)))**0.5\n elif dm == -1:\n if dl == +1:\n return +(0.5*(-1)**(m-2*l)) * (((l-m+1)*(l-m+2))/((2*l+1)*(2*l+3)))**0.5\n elif dl == -1:\n return -(0.5*(-1)**(-m+2*l)) * (((l+m-1)*(l+m)) /((2*l-1)*(2*l+1)))**0.5\n return 0.0", "title": "" }, { "docid": "96bb3a6f44a7e73b321bd38bb810ea8e", "score": "0.43206578", "text": "def _get_ligand_section(self):\n self._ligand_lines = self._pdb.get_atoms_of_resname(self._ligand_resname)\n self._ligand = ''.join(self._ligand_lines)\n if self._verbose:\n print('Ligand lines: \\n' + self._ligand)", "title": "" }, { "docid": "6a7c2e06783747b13188f7329763492b", "score": "0.43114468", "text": "def well(self):\n return self.sections[\"Well\"]", "title": "" }, { "docid": "b682963086feb6cd51a8fe31ed34ede4", "score": "0.42945835", "text": "def area(self):\n if self.included_angle:\n return (\n self.side_1_length * self.side_2_length *\n math.sin(self.included_angle)\n )\n else:\n raise AngleInformationRequired()", "title": "" }, { "docid": "a05c70982b1c7bdaf0ab7eb7fe50758f", "score": "0.42884183", "text": "def IsConstant(self, *args):\n return _GeomFill.GeomFill_SectionLaw_IsConstant(self, *args)", "title": "" }, { "docid": "2678c1ac5bd7a8f6cdfd302647ba2955", "score": "0.4277232", "text": "def getCrossSection(self):\n return self.cross_section", "title": "" }, { "docid": "91268ef48c97304edf0c8b466c12df6a", "score": "0.4257494", "text": "def SectionID():\n return lib.PDElements_Get_SectionID()", "title": "" }, { "docid": "5589e9687ae158f901433f60ffadbc4c", "score": "0.4257107", "text": "def get_section(self, section):\n return self._sections[section]", "title": "" }, { "docid": "3de4ca803e4d97451aff454263694bef", "score": "0.42569566", "text": "def get_section(self, section):\n return self.sections[section].get()", "title": "" }, { "docid": "69c7074d04fd9215e2cad068e6f1d7d0", "score": "0.42519492", "text": "def NormalContact(self, contact_type=AbaqusNormalContactType.ScaleFactor,\n contactStiffnessScaleFactor: float = 1.0, initialStiffnessScaleFactor: float = 1.0,\n overclosure_method: str = 'factor', # or 'measure'\n overclosure: float = 0.0):\n pass", "title": "" }, { "docid": "05da90e8ca90f4ca7e2a3eb39f0713d5", "score": "0.424086", "text": "def containing_cuboid(self,periodicity=None):\n \n dirvec0 = self._dir_vector / self._norm\n tmp = np.cross(dirvec0, np.eye(3, dtype=float))\n projs = np.sqrt(np.sum(tmp**2, axis=1))\n bounds = (np.vstack((\n self._point1 + self._radius1 * projs,\n self._point1 - self._radius1 * projs,\n self._point2 + self._radius2 * projs,\n self._point2 - self._radius2 * projs ))\n + self.shift_vector)\n return np.vstack(( bounds.min(axis=0), bounds.max(axis=0) ))", "title": "" }, { "docid": "40981146b23fea4f69d38554ae8d536b", "score": "0.42243722", "text": "def NormalContact(self, contact_type=AbaqusNormalContactType.Tabular, pressure: typing.Iterable = None,\n overclosure: typing.Iterable = None):\n pass", "title": "" }, { "docid": "dd49ca1b104ab972238362bd2988c613", "score": "0.42174706", "text": "def contracted_cross_section(self, standardized_production_process, standardized_decay_channel, period, mode=0) :\n contracted_cross_section = 0.\n channel = standardized_production_process+\"_\"+standardized_decay_channel\n if mode == 2 :\n ## determine cross section for single-mass mode\n if self.mA<self.hww_cross_point :\n contracted_cross_section = self.signal_channel_to_cross_section[(channel, period)][\"H\"]\n else :\n contracted_cross_section = self.signal_channel_to_cross_section[(channel, period)][\"h\"]\n elif mode == 1 :\n ## determine cross section for degenerate-masses mode\n if abs(self.mA-self.mh) < abs(self.mA-self.mH) :\n contracted_cross_section = self.signal_channel_to_cross_section[(channel, period)][\"A\"]\n contracted_cross_section+= self.signal_channel_to_cross_section[(channel, period)][\"h\"]\n else:\n contracted_cross_section = self.signal_channel_to_cross_section[(channel, period)][\"A\"]\n contracted_cross_section+= self.signal_channel_to_cross_section[(channel, period)][\"H\"]\n if self.mA == 130. :\n contracted_cross_section = self.signal_channel_to_cross_section[(channel, period)][\"A\"]\n contracted_cross_section+= self.signal_channel_to_cross_section[(channel, period)][\"H\"]\n contracted_cross_section+= self.signal_channel_to_cross_section[(channel, period)][\"h\"]\n else :\n ## determine cross section for non-degenerate-masses mode\n for higgs in self.signal_channel_to_cross_section[(channel, period)] :\n contracted_cross_section += self.signal_channel_to_cross_section[(channel, period)][higgs]\n return contracted_cross_section", "title": "" }, { "docid": "f7b09782544b35452419675e5654b8ca", "score": "0.42113063", "text": "def __add_section(self) -> None:\n\n logging.debug('Adding section \"%s\"', self.section)\n\n match = self.__get_match()\n\n if match is not None and self.add_after:\n match.add_after.space(self.space).section(self.section)\n elif match is not None and not self.add_after:\n match.add_before.section(self.section)\n else:\n self.updater.add_section(self.section)", "title": "" }, { "docid": "c02d28cc05d0f407d9968bdaeffd690d", "score": "0.42092827", "text": "def sectionPosition(self, p_int): # real signature unknown; restored from __doc__\n return 0", "title": "" }, { "docid": "61193d162f2a37c3a1812bd51e098fc5", "score": "0.4191562", "text": "def getSection(self, section_num=None):\n if section_num is None:\n section_num = self.self.defaultSectionNumber()\n if section_num not in self.sections:\n raise IOError(\"Within block {} there is no section number {:d}\"\\\n .format(self.ident, section_num))\n return self.sections[section_num]", "title": "" }, { "docid": "9b2ff001dafef4fd778ec11c7450c94a", "score": "0.41878837", "text": "def _get_clinic_and_zone(self, contact):\n if contact and contact.location and\\\n contact.location.type.slug in const.ZONE_SLUGS:\n contact_clinic = contact.location.parent\n contact_zone = contact.location\n elif contact and contact.location and\\\n contact.location.type.slug in const.CLINIC_SLUGS:\n contact_clinic = contact.location\n contact_zone = None\n else:\n contact_clinic = None\n contact_zone = None\n return contact_clinic, contact_zone", "title": "" }, { "docid": "2a347255c1ea104b77a0a3c5abeac7f6", "score": "0.41798285", "text": "def contact(init, tspan, a, beta, omega, h):\n return con.contact(init, tspan, h, a, lambda x: x, forcing(beta, omega))", "title": "" }, { "docid": "b603d2d89f06a5e65c71a8371eaa5f80", "score": "0.416543", "text": "def new_section(self, name, params=None):\n self.sections[name.lower()] = SectionTerm(name, term_args=params, doc=self, parent=self.root)\n\n # Set the default arguments\n s = self.sections[name.lower()]\n\n if name.lower() in self.decl_sections:\n s.args = self.decl_sections[name.lower()]['args']\n\n return s", "title": "" }, { "docid": "78ace01f5c6e0e6bf8cab2015ff02428", "score": "0.41519082", "text": "def _reset(self):\n # simulation parameters\n self._step = 0\n self._t = 0\n\n # contact status\n # left half cylinder\n self.pin_in_section_iP = False\n # flat section\n self.pin_in_section_iPiR = False\n # right half cylinder\n self.pin_in_section_iR = False\n\n self.pin_in_section = \"\"\n\n self._contact_point_obj = None\n self._distance_obj = None\n\n # predefined list of contact point on each body (one contact point on a body)\n self.u_P_list_LCS = np.array([np.zeros(2), np.zeros(2)])", "title": "" }, { "docid": "97e702a730e6ca4ee343f658c94fe377", "score": "0.41499963", "text": "def SetTolerance(self, *args):\n return _GeomFill.GeomFill_SectionLaw_SetTolerance(self, *args)", "title": "" }, { "docid": "cfe7c7806102056c0ae5c07a84db8833", "score": "0.41364145", "text": "def __parse_section__(self, section):\n #print \"Finding \"+section\n FILE = open(self.filepath, 'r')\n while True:\n #Don't want empty lines being split. So we continue if one is found. Will have empty lines in the section once @section identifier is found\n line = FILE.readline()\n if line==\"\\n\":\n continue\n if not line:break\n lineSP = line.split()\n if len(lineSP)<=1:\n continue\n if lineSP[0]=='@section' and re.search(section, line):\n #Grab info on next lines until next section is found. Then break.\n section_string = \"\"\n new_section = False\n\n while not new_section:\n line_string = FILE.readline()\n if not line_string:break\n line_stringSP = line_string.split()\n if len(line_stringSP)<=1:\n if not re.search('@verbatim', line_string) and not re.search('@endverbatim', line_string) and section=='Options':\n continue\n \n #if line_stringSP[0]==\"@section\": new_section=True\n if re.search('@section', line_string): new_section=True\n else:\n #We only want verbatim options or LI. We then parse it in protocol builder. Which is a bitch because everyone follows something different!\n if section=='Options':\n verbatim=False\n #print line_stringSP[0]\n if line_stringSP[0]=='@li':\n section_string = section_string+line_string\n continue\n elif re.search('@verbatim', line_string):\n verbatim=True\n while verbatim:\n line_string = FILE.readline()\n if re.search('@endverbatim', line_string):\n verbatim=False\n continue\n if re.search('@section', line_string):\n new_section=True\n verbatim=False\n break\n section_string = section_string+line_string\n else:\n #So that verbatims are not in the result string\n if not re.search(\"verbatim\", line_string):\n line_string = line_string.replace('@li', \"\")\n section_string = section_string+line_string\n\n #else:new_section=False\n #Next section found, returning.\n FILE.close()\n #print \"Section: \"+section\n #print section_string\n self.sections[section].set(section_string)\n return\n \n elif lineSP[0]=='@page' and section==\"AppName\":\n self.sections[\"AppName\"].set(lineSP[1])\n return", "title": "" }, { "docid": "d4a207a5cabe727d7cc5d5959025cce0", "score": "0.41344148", "text": "def setup_layout_constants(self):\n\n if self.num_contacts==None:\n self.num_contacts=self.calculate_num_contacts()\n\n # Determine layer types needed\n if self.tx_type == \"nmos\":\n self.implant_type = \"n\"\n self.well_type = \"p\"\n elif self.tx_type == \"pmos\":\n self.implant_type = \"p\"\n self.well_type = \"n\"\n else:\n self.error(\"Invalid transitor type.\",-1)\n \n \n # This is not actually instantiated but used for calculations\n self.active_contact = contact(layer_stack=(\"active\", \"contact\", \"metal1\"),\n dimensions=(1, self.num_contacts))\n\n \n # The contacted poly pitch (or uncontacted in an odd technology)\n self.poly_pitch = max(2*self.contact_to_gate + self.contact_width + self.poly_width,\n self.poly_space)\n\n # The contacted poly pitch (or uncontacted in an odd technology)\n self.contact_pitch = 2*self.contact_to_gate + self.contact_width + self.poly_width\n \n # The enclosure of an active contact. Not sure about second term.\n active_enclose_contact = max(drc[\"active_enclosure_contact\"],\n (self.active_width - self.contact_width)/2)\n # This is the distance from the edge of poly to the contacted end of active\n self.end_to_poly = active_enclose_contact + self.contact_width + self.contact_to_gate\n \n\n # Active width is determined by enclosure on both ends and contacted pitch,\n # at least one poly and n-1 poly pitches\n self.active_width = 2*self.end_to_poly + self.poly_width + (self.mults - 1)*self.poly_pitch\n\n # Active height is just the transistor width\n self.active_height = self.tx_width\n\n # Poly height must include poly extension over active\n self.poly_height = self.tx_width + 2*self.poly_extend_active\n\n # The active offset is due to the well extension\n self.active_offset = vector([self.well_enclose_active]*2)\n\n # Well enclosure of active, ensure minwidth as well\n if info[\"has_{}well\".format(self.well_type)]:\n self.cell_well_width = max(self.active_width + 2*self.well_enclose_active,\n self.well_width)\n self.cell_well_height = max(self.tx_width + 2*self.well_enclose_active,\n self.well_width)\n # We are going to shift the 0,0, so include that in the width and height\n self.height = self.cell_well_height - self.active_offset.y\n self.width = self.cell_well_width - self.active_offset.x\n else:\n # If no well, use the boundary of the active and poly\n self.height = self.poly_height\n self.width = self.active_width\n \n # The active offset is due to the well extension\n self.active_offset = vector([self.well_enclose_active]*2)\n\n # This is the center of the first active contact offset (centered vertically)\n self.contact_offset = self.active_offset + vector(active_enclose_contact + 0.5*self.contact_width,\n 0.5*self.active_height)\n \n \n # Min area results are just flagged for now.\n debug.check(self.active_width*self.active_height>=drc[\"minarea_active\"],\"Minimum active area violated.\")\n # We do not want to increase the poly dimensions to fix an area problem as it would cause an LVS issue.\n debug.check(self.poly_width*self.poly_height>=drc[\"minarea_poly\"],\"Minimum poly area violated.\")", "title": "" }, { "docid": "bb5a1c460eda0aeddb37cd83931ec247", "score": "0.41335955", "text": "def get_section_by_offset(self, offset):\r\n\r\n sections = [s for s in self.sections if s.contains_offset(offset)]\r\n\r\n if sections:\r\n return sections[0]\r\n\r\n return None", "title": "" }, { "docid": "41018c871187bc9427e0b0e3175322e1", "score": "0.41269243", "text": "def is_manifold(self):\r\n return _base._rsf.is_mesh_manifold(self._rhino_id)", "title": "" }, { "docid": "92e26854738705fb6b89dddd5a8cf0ca", "score": "0.4122497", "text": "def _contact_geometry_LCS(self, q):\n self._contact_point_obj.contact_geometry_LCS()\n self._contact_point_obj.contact_points_LCS(q)", "title": "" }, { "docid": "4dfc5a129cb5473c0c41e3dbccaa190a", "score": "0.41207856", "text": "def _set_section(self, section):\n\n if self.section != section:\n if self.section > section:\n raise dns.exception.FormError\n self.section = section", "title": "" }, { "docid": "fba1f25919598f51d83e1248884ffd16", "score": "0.41192183", "text": "def _find_normal(self):\n\n if len(self.sites) == 3:\n normal = np.cross(self._sites[0].coords - self._sites[1].coords,\n self._sites[0].coords - self._sites[2].coords)\n else:\n # TODO Make an average of possible normals\n normal = np.cross(self._sites[0].coords - self._sites[1].coords,\n self._sites[0].coords - self._sites[2].coords)\n\n # Make length of the normal equal to 1\n normal = normal / np.linalg.norm(normal)\n\n # Flip normal in case it is pointing towards the origin.\n if utils.angle_between(-self.center, normal) < math.pi / 2:\n normal = - normal\n\n return normal", "title": "" }, { "docid": "42021d020d11a4c5292961c8bf6b23d7", "score": "0.41058886", "text": "def TangentialContact(self, contact_type=AbaqusTangentialContactType.UserDefined,\n nStateDependentVars: int = 0, cohesion: float = 0.0, G0: float = 250.0, R: float = 5.19,\n e_cref: float = 0.89, Lambda: float = 0.147, xi: float = 0.424, phi: float = 31.2,\n dd: float = 7.57, n_p: float = 2.06, n_d: float = 0.46, e0: float = 0.5771,\n thickness: float = 5 * 0.23 * 1e-3):\n pass", "title": "" }, { "docid": "41e27b56da39963353b5fae785968cac", "score": "0.4103783", "text": "def phys2arcsec_lens(self, phys):\n return phys / self.D_d/const.arcsec", "title": "" }, { "docid": "53ff4552f8cd272b761f90168facd06a", "score": "0.41002315", "text": "def halfspace(self):\n v = self.asvec3\n alldone = np.all(v.angle(v.R) <= 90)\n while not alldone:\n ang = v.angle(v.R)\n for ix, do in enumerate(ang > 90):\n if do:\n v[ix] = -v[ix]\n alldone = np.all(v.angle(v.R) <= 90)\n if self.type == Lin:\n v = v.aslin\n if self.type == Fol:\n v = v.asfol\n return v", "title": "" }, { "docid": "e87a057eaeffd8628ad1b85204826599", "score": "0.40996963", "text": "def _getChainLiloSection(self, device, label):\n self.__debug(\"Section 'chain' for \" + device + \" with label: \" + label)\n return \"\"\"# {label} chain section\n other = {device}\n label = {label}\n\"\"\".format(device=device, label=label)", "title": "" }, { "docid": "1634f32a6108a6da045a24e29119e5d9", "score": "0.4088214", "text": "def world_center(self):\r\n return copy(self._sweep.c)", "title": "" }, { "docid": "fc8a6886d64be31a53097f9c03b19217", "score": "0.40725702", "text": "def extract_sections(self, document):\n split_document = section_split.split_document(document)\n passages = []\n for i, passage in enumerate(split_document.passages):\n if 'title' in passage.infons:\n if (passage.infons['title'] in self.sections_to_extract and\n len(split_document.passages) > i+1):\n next_passage = split_document.passages[i+1]\n if 'title' not in next_passage.infons:\n passages.append(next_passage)\n \n if passages or self.extract_strict:\n extracted_passages = bioc.BioCPassage()\n if passages:\n extracted_passages.offset = passages[0].offset\n extracted_passages.text = ' '.join(map(lambda x: x.text, passages))\n else:\n extracted_passages.offset = 0\n extracted_passages.text = ''\n split_document.passages = [extracted_passages]\n return split_document\n else:\n warnings.warn('Loader found document containing none of the ' + \n 'provided sections to extract. Returning original ' + \n 'document.')\n return document", "title": "" }, { "docid": "37f0200e891e50d674092ef925a814da", "score": "0.40715367", "text": "def Mults(self, *args):\n return _GeomFill.GeomFill_SectionLaw_Mults(self, *args)", "title": "" }, { "docid": "ebe24f20671acee2d1ea06c52b9900b5", "score": "0.4069525", "text": "def real_section():\n input_directory = get_data(\"section\")\n\n jetzt = datetime.now()\n timestamp = jetzt.strftime(\"%b%d_%H%M_\")\n testname = str(timestamp + \"DemoTest_Section\")\n\n output_dir = fibermorph.make_subdirectory(create_results_cache(), append_name=testname)\n\n fibermorph.section(input_directory, output_dir, jobs=4, resolution=1.06)\n\n return True", "title": "" }, { "docid": "875b183c3403890a4d877e6b54e348d8", "score": "0.40628403", "text": "def sub_sector(self):\n return self.occupation.sub_sector", "title": "" }, { "docid": "9b3c958ecbd35ed85e63dac7da61fa9d", "score": "0.40622595", "text": "def get_termination(self, physics):", "title": "" }, { "docid": "be6ece51b9fe943d68350e318e38ce88", "score": "0.4052284", "text": "def cylindrical_sector(axis, ctr1, ctr2, t1, t2, radius,\n startingangle, sweptangle, material, averaging=''):\n\n command('cylindrical_sector', axis, ctr1, ctr2, t1, t2, radius, startingangle, sweptangle, material, averaging)", "title": "" }, { "docid": "d81d0a3e554049cf0094ca45154eeac5", "score": "0.40509227", "text": "def TangentialContact(self, contact_type=AbaqusTangentialContactType.LagrangeMultiplier,\n directionality=ISOTROPIC, friction_coefficient: typing.Union[float, typing.Iterable] = None,\n slip_rate: float = None, contact_pressure: float = None, temp: float = None,\n dependencies: int = 0, field: typing.Iterable = None, shearStressLimit: float = None):\n pass", "title": "" }, { "docid": "6d41e56b82792876bb68212ae30e840c", "score": "0.40508494", "text": "def coherence(self):\n if np.any(self.m < 50):\n simon(\n \"Number of segments used in averaging is \"\n \"significantly low. The result might not follow the \"\n \"expected statistical distributions.\"\n )\n c = self.unnorm_power\n p1 = self.pds1.unnorm_power\n p2 = self.pds2.unnorm_power\n\n meanrate1 = self.nphots1 / self.n / self.dt\n meanrate2 = self.nphots2 / self.n / self.dt\n\n P1noise = poisson_level(norm=\"none\", meanrate=meanrate1, n_ph=self.nphots1)\n P2noise = poisson_level(norm=\"none\", meanrate=meanrate2, n_ph=self.nphots2)\n\n coh = raw_coherence(c, p1, p2, P1noise, P2noise, self.n)\n\n # Calculate uncertainty\n uncertainty = (2**0.5 * coh * (1 - coh)) / (np.sqrt(coh) * self.m**0.5)\n\n uncertainty[coh == 0] = 0.0\n\n return (coh, uncertainty)", "title": "" }, { "docid": "e1a059ff4eebb5cd114d5fc2826b04f3", "score": "0.40457305", "text": "def extractSection(self, section, xform = True, keep = False, check = True):\n raise NotImplementedError()", "title": "" }, { "docid": "3183344ec7adba2a0e630707ac606bd2", "score": "0.40392458", "text": "def starycen(self): \n return self.pars[2::3]", "title": "" }, { "docid": "25534dc6f0e1d60e5569337027e8d5a8", "score": "0.40376878", "text": "def calculate_contact_regions(self, target, cutoff_bo=0.001,\n cutoff_el=1.e10, name=''):\n\n from ase.units import Hartree, kcal, mol\n factor = Hartree / (kcal/mol)\n contact, strengths = self.identify_contact_region(target, cutoff_bo,\n cutoff_el/factor)\n target_zone = [f for f in contact if f in target]\n environment_zone = [f for f in contact if f not in target]\n nm = name + '_' if len(name) > 0 else ''\n self.set_fragment_quantities(\n nm+'contact_regions', {f: 1.0 if f in target_zone else (\n -1.0 if f in environment_zone else 0.0)\n for f in self.fragment_names})\n self.set_fragment_quantities(nm+'contact_bond_orders', strengths)", "title": "" }, { "docid": "e8b45e43657c06c32216fa12b9d75373", "score": "0.40350166", "text": "def separate_by_section_identifier(\n\tcoord_type, coord, section_identifier, prev_section, current_section,\n\tprev_section_dynamic, current_section_dynamic):\n\tif section_identifier == prev_section.top_left_point:\n\t\tprev_section_dynamic.append(coord)\n\telif section_identifier == current_section.top_left_point:\n\t\tcurrent_section_dynamic.append(coord)\n\telse:\n\t\traise Exception(\n\t\t\t\"\"\"separate_by_section_identifier error: {}_coordinate in new \\\n\t\t\tsection. It is likely that the particle has moved through 3 sections in a \\\n\t\t\tsingle iteration.\"\"\".format(coord_type))\n\treturn prev_section_dynamic, current_section_dynamic", "title": "" }, { "docid": "9e59393446d3deaa85196bc6898e2495", "score": "0.40321377", "text": "def contact(self):\n \n if self.state is MSState.INFECTED:\n cellmates = self.random.sample(self.model.schedule.agents,k=1)\n t = self.model.schedule.time-self.infection_time\n if t>0:\n var_ptrans = self.get_ptrans(self.infection_course[0][t], self.model.F)\n for other in cellmates:\n # if other.state is MSState.SUSCEPTIBLE and self.infection_course[0][t] > self.model.v_threshold and self.random.random() < var_ptrans: \n if other.state is MSState.SUSCEPTIBLE and self.random.random() < var_ptrans:\n other.state = MSState.INFECTED\n other.infection_time = self.model.schedule.time\n other.infection_course = other.infect_stein(self.model.G,self.unique_id,self.infection_course[0][t],self.init_v)\n if self.unique_id == 1:\n self.model.r0 += 1\n # print(self.init_v,var_ptrans,self.infection_course[0][t],other.init_v)", "title": "" }, { "docid": "1047630c0250a258f2306aafc78c4bc9", "score": "0.40317523", "text": "def get_stiffness_matrix(self):\n\n # compute geometric parameters\n (_, _, l0, c) = self.get_geometric_properties()\n\n # extract relevant properties\n E = self.material.elastic_modulus\n A = self.section.area\n ixx = self.section.ixx\n cx = c[0]\n cy = c[1]\n\n # compute bar stiffness matrix\n k_el_bar = E * A / l0 * np.array([\n [1, 0, 0, -1, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [-1, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]\n ])\n\n # compute beam stiffness matrix\n k_el_beam = E * ixx / (l0 * l0 * l0) * np.array([\n [0, 0, 0, 0, 0, 0],\n [0, 12, 6*l0, 0, -12, 6*l0],\n [0, 6*l0, 4*l0*l0, 0, -6*l0, 2*l0*l0],\n [0, 0, 0, 0, 0, 0],\n [0, -12, -6*l0, 0, 12, -6*l0],\n [0, 6*l0, 2*l0*l0, 0, -6*l0, 4*l0*l0]\n ])\n\n k_el = k_el_bar + k_el_beam\n\n # construct rotation matrix\n T = np.array([\n [cx, cy, 0, 0, 0, 0],\n [-cy, cx, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0],\n [0, 0, 0, cx, cy, 0],\n [0, 0, 0, -cy, cx, 0],\n [0, 0, 0, 0, 0, 1]\n ])\n\n return np.matmul(np.matmul(np.transpose(T), k_el), T)", "title": "" }, { "docid": "7ffe49dc87e560b7c46b3670a75e068b", "score": "0.40284318", "text": "def north(self) -> float:\n return self._north", "title": "" }, { "docid": "d74e22526c22383acb3423a46e1a3a04", "score": "0.40260527", "text": "def GetAverageLaw(self, *args):\n return _GeomFill.GeomFill_LocationLaw_GetAverageLaw(self, *args)", "title": "" }, { "docid": "3757c449a61d703e088610fcf3c3f58e", "score": "0.40220815", "text": "def center_space(self) -> aecPoint:\n try:\n flrCenter = self.center_floor\n flrCenter.z = self.level + (self.height * 0.5)\n return flrCenter\n except Exception:\n traceback.print_exc() \n return None", "title": "" }, { "docid": "033880f7fdb9ccf70319a550c879241c", "score": "0.4018748", "text": "def required_section(self) -> int:\n return self._data['required_section']", "title": "" }, { "docid": "2301a58c6a3ee36012ef2c50266c26b7", "score": "0.40132964", "text": "def calc_concentration(self):\n conc = self.grid.at_node[\"tracer~conservative__mass_concentration\"]\n conc.fill(0.0)\n\n u_albertson = self._albertson_velocity\n\n y = self.distance_to_centerline\n x = self.distance_along_centerline\n\n conc[self.plug_flow] = 1.0\n u_albertson[self.plug_flow] = 1.0\n\n a = (\n y[self.establishing_flow]\n + 0.5 * SQRT_PI * self.CONST_ALBERTSON * x[self.establishing_flow]\n - 0.5 * self.river.width\n )\n b = np.clip(\n SQRT_TWO * self.CONST_ALBERTSON * x[self.establishing_flow], 0.01, None\n )\n conc[self.establishing_flow] = np.exp(-np.sqrt(a / b))\n u_albertson[self.establishing_flow] = np.exp(-((a / b) ** 2))\n\n v1 = self.river.width / (\n SQRT_PI * self.CONST_ALBERTSON * x[self.established_flow]\n )\n v2 = y[self.established_flow] / (\n SQRT_TWO * self.CONST_ALBERTSON * x[self.established_flow]\n )\n conc[self.established_flow] = np.sqrt(v1) * np.exp(-np.sqrt(v2))\n u_albertson[self.established_flow] = np.sqrt(v1) * np.exp(-(v2**2))\n\n return conc", "title": "" }, { "docid": "02d7e53e899320d20b3309c1bc8327e6", "score": "0.4012937", "text": "def salinity_srl(cond, T_90, P):\n\n A1 = 0.0000207\n A2 = -0.000000000637\n A3 = 3.989E-15\n B1 = 0.03426\n B2 = 0.0004464\n B3 = 0.4215\n B4 = -0.003107\n C0 = 0.6766097\n C1 = 0.0200564\n C2 = 0.0001104259\n C3 = -0.00000069698\n C4 = 0.0000000010031\n\n a = [ 0.008,\n -0.1692,\n 25.3851,\n 14.0941,\n -7.0261,\n 2.7081]\n\n b = [ 0.0005,\n -0.0056,\n -0.0066,\n -0.0375,\n 0.0636,\n -0.0144]\n\n\n if (cond <= 0):\n return 0\n else:\n # convert Siemens/meter to mmhos/cm\n C = cond * 10\n # Convert its-90 to its-68\n # ITS68 = ITS90 * 1.00024\n t = T_90 * 1.00024\n R = C / 42.914\n val = 1 + (B1 * t) + (B2 * t * t) + (B3 * R) + (B4 * R * t)\n if val:\n RP = 1 + ((P * (A1 + P * (A2 + P * A3))) / val)\n val = RP * (C0 + (t * (C1 + t * (C2 + t * (C3 + t * C4)))))\n\n RT = R / val\n\n if RT <= 0:\n RT = 0.000001\n\n sum1, sum2 = 0, 0\n for i in range(6):\n temp = RT ** (i / 2.)\n # sum1 += a[i] * temp\n sum1 += a[i] * temp\n # sum2 += b[i] * temp\n sum2 += b[i] * temp\n\n val = 1 + (0.0162 * (t - 15))\n if val:\n salinity = sum1 + (sum2 * (t - 15) / val)\n else:\n salinity = -99\n\n return salinity", "title": "" }, { "docid": "97acdafca9f05dba61befcc8d4c68623", "score": "0.40058246", "text": "def Centering(self,Center,GOB):\n if self.Wallnum==11:\n RecCenter=GOB.Wall[0].getCenter()\n Center=Point(RecCenter.getX(),(RecCenter.getY()+10))\n \n return Center\n elif self.Wallnum==12:\n RecCenter=GOB.Wall[1].getCenter()\n Center=Point(RecCenter.getX(),(RecCenter.getY()+10))\n \n return Center\n elif self.Wallnum==13:\n RecCenter=GOB.Wall[2].getCenter()\n Center=Point(RecCenter.getX(),(RecCenter.getY()+10))\n \n return Center\n elif self.Wallnum==21:\n RecCenter=GOB.Wall2[0].getCenter()\n Center=Point(RecCenter.getX(),(RecCenter.getY()+10))\n \n return Center\n elif self.Wallnum==22:\n RecCenter=GOB.Wall2[1].getCenter()\n Center=Point(RecCenter.getX(),(RecCenter.getY()+10))\n \n return Center\n elif self.Wallnum==23:\n RecCenter=GOB.Wall2[2].getCenter()\n Center=Point(RecCenter.getX(),(RecCenter.getY()+10))\n \n return Center\n else:\n \n return Center", "title": "" }, { "docid": "859b12002decd43e28e9c8f48cf7e102", "score": "0.3998899", "text": "def check_start_section(self, line: str):\n pattern = r\"\\\\section\\{(.+)\\}\"\n match = re.search(pattern, line)\n if (match):\n return match.groups()[0]\n else:\n return None", "title": "" }, { "docid": "7cedfb11d3534c97ffe65de943bd6360", "score": "0.3994325", "text": "def get_section(r, num_sections, tube_radius):\n\n\tsection = int(r / (tube_radius / num_sections))\n\treturn section", "title": "" }, { "docid": "3f27b14b0c006d79caa05a2877cc6761", "score": "0.3989951", "text": "def calculate_soil_hydraulic_properties(self,Sand,Clay,OrgMat,DF=1):\n\n\n # do calculations\n\n\n\n #Water content at permanent wilting point\n Pred_thWP = ( -(0.024*Sand) + (0.487*Clay) + (0.006*OrgMat)\n +(0.005*Sand*OrgMat) - (0.013*Clay*OrgMat)\n +(0.068*Sand*Clay) + 0.031 )\n\n th_wp = Pred_thWP+(0.14*Pred_thWP)-0.02\n\n # Water content at field capacity and saturation\n Pred_thFC = ( -(0.251*Sand) + (0.195*Clay) + (0.011*OrgMat)\n +(0.006*Sand*OrgMat) - (0.027*Clay*OrgMat)\n +(0.452*Sand*Clay) + 0.299 )\n\n PredAdj_thFC = Pred_thFC+((1.283*(np.power(Pred_thFC,2)))-(0.374*Pred_thFC)-0.015)\n\n Pred_thS33 = ( (0.278*Sand) + (0.034*Clay) + (0.022*OrgMat)\n -(0.018*Sand*OrgMat) - (0.027*Clay*OrgMat)\n -(0.584*Sand*Clay) + 0.078 )\n\n PredAdj_thS33 = Pred_thS33+((0.636*Pred_thS33)-0.107)\n Pred_thS = (PredAdj_thFC+PredAdj_thS33)+((-0.097*Sand)+0.043)\n\n pN = (1-Pred_thS)*2.65\n pDF = pN*DF\n PorosComp = (1-(pDF/2.65))-(1-(pN/2.65))\n PorosCompOM = 1-(pDF/2.65)\n\n DensAdj_thFC = PredAdj_thFC+(0.2*PorosComp)\n DensAdj_thS = PorosCompOM\n\n th_fc = DensAdj_thFC\n th_s = DensAdj_thS\n\n # Saturated hydraulic conductivity (mm/day)\n lmbda = 1/((np.log(1500)-np.log(33))/(np.log(th_fc)-np.log(th_wp)))\n Ksat = (1930*(th_s-th_fc)**(3-lmbda))*24\n\n # Water content at air dry\n th_dry = th_wp / 2\n\n #round values\n th_dry = round(10_000*th_dry)/10_000\n th_wp = round(1000*th_wp)/1000\n th_fc = round(1000*th_fc)/1000\n th_s = round(1000*th_s)/1000\n Ksat = round(10*Ksat)/10\n\n\n\n return th_wp,th_fc,th_s,Ksat", "title": "" }, { "docid": "9f69c7fe11b2ac68fd22138aadd3b389", "score": "0.39855662", "text": "def doSimpleSolver(self):\n\t\thandle_group = self.handleGroup()\n\t\thandle = handle_group.handle(0)\n\t\thandlePath = OpenMaya.MDagPath.getAPathTo(handle)\n\t\tfnHandle = OpenMayaAnim.MFnIkHandle(handlePath)\n\n\t\t# Get the position of the end_effector\n\t\tend_effector = OpenMaya.MDagPath()\n\t\tfnHandle.getEffector(end_effector)\n\t\ttran = OpenMaya.MFnTransform(end_effector)\n\t\teffector_position = tran.rotatePivot(OpenMaya.MSpace.kWorld)\n\n\t\t# Get the position of the handle\n\t\thandle_position = fnHandle.rotatePivot(OpenMaya.MSpace.kWorld)\n\n\t\t# Get the start joint position\n\t\tstart_joint = OpenMaya.MDagPath()\n\t\tfnHandle.getStartJoint(start_joint)\n\t\tstart_transform = OpenMaya.MFnTransform(start_joint)\n\t\tstart_position = start_transform.rotatePivot(OpenMaya.MSpace.kWorld)\n\n\t\t# Calculate the rotation angle\n\t\tv1 = start_position - effector_position\n\t\tv2 = start_position - handle_position\n\t\tangle = v1.angle(v2)\n\n\t\t# -------- Figure out which way to rotate --------\n\t\t#\n\t\t# define two vectors U and V as follows\n\t\t# U = EndEffector(E) - StartJoint(S)\n\t\t# N = Normal to U passing through EndEffector\n\t\t#\n\t\t# Clip handle_position to half-plane U to determine the region it\n\t\t# lies in. Use the region to determine the rotation direction.\n\t\t#\n\t\t# U\n\t\t# ^ Region Rotation\n\t\t# | B \n\t\t# (E)---N A C-C-W\n\t\t# A | B C-W\n\t\t# | B\n\t\t# |\n\t\t# (S)\n\t\t#\n\n\t\trot = 0.0 # Rotation about Z-axis\n\n\t\t# U and N define a half-plane to clip the handle against\n\t\tU = effector_position - start_position\n\t\tU.normalize()\n\n\t\t# Get a normal to U\n\t\tzAxis = OpenMaya.MVector(0.0, 0.0, 1.0)\n\t\tN = U ^ zAxis # Cross product\n\t\tN.normalize()\n\n\t\t# P is the handle position vector\n\t\tP = handle_position - effector_position\n\n\t\t# Determine the rotation direction\n\t\tPdotN = P[0] * N[0] + P[1] * N[1]\n\t\tif PdotN < 0:\n\t\t\trot = angle # counter-clockwise\n\t\telse:\n\t\t\trot = -1.0 * angle # clockwise\n\n\t\t# get and set the Joint Angles \n\t\tjointAngles = OpenMaya.MDoubleArray()\n\t\ttry:\n\t\t\tself._getJointAngles(jointAngles)\n\t\texcept:\n\t\t\t# getting angles failed, do nothing\n\t\t\tpass\n\t\telse:\n\t\t\tjointAngles.set(jointAngles[0] + rot, 0)\n\t\t\tself._setJointAngles(jointAngles)", "title": "" }, { "docid": "76e634a5d61bcbb39c45d034a423d507", "score": "0.39842612", "text": "def section_start(section, staff, **kwargs):\n kwargs[\"section\"] = section\n kwargs[\"staff\"] = staff\n return kwargs", "title": "" }, { "docid": "f2daddc4343ea25f8b18cf1bc9120c75", "score": "0.39837497", "text": "def get_section_repr(self):\n\n weights = []\n for bone_indx in sorted(self.__bone_weights.keys()):\n weights.append((bone_indx, self.__bone_weights[bone_indx]))\n\n clones = []\n for clone_hash in sorted(self.__clones.keys()):\n clones.append(self.__clones[clone_hash])\n\n return self.__position, weights, clones", "title": "" }, { "docid": "37f33210a42c731d918696dc189fcec9", "score": "0.39816588", "text": "def sectionCoordinates(i, j):\n return 3 * (i//3), 3 * (j//3)", "title": "" }, { "docid": "3133cf907c8141bca5143f5f823a2320", "score": "0.3981315", "text": "def centroid_space(self) -> aecPoint:\n try:\n centroid = self.centroid_floor \n return aecPoint(centroid.x, centroid.y, (self.level + (self.height * 0.5)))\n except:\n traceback.print_exc() \n return None", "title": "" }, { "docid": "58fb78039c7ab4b16af53bffbf313bea", "score": "0.39707473", "text": "def boundary (self):\n\t\tobj = copy.deepcopy(self)\n\t\tcells = obj.cells\n\t\tif obj.dim == -1: return SimplicialComplex([], [])\n\t\tvertices = obj.vertices.points\n\t\td = obj.dim\n\t\tdictos = obj.dictos\n\t\th = obj.homology\n\t\ta = array(h[d])\n\n\t\tV = array( len(a)*[1] )\n\t\tJ = a[:,0]\n\t\tI = a[:,1]\n\n\t\tA = sparse.coo_matrix((V,(I,J)), shape=(max(I)+1, max(J)+1)).tocsr()\n\n\t\t# make boundary orientation coherent --------------------------\n\n\t\tdef simplex(cell):\n\t\t\tpoint = obj.vertices.ind\n\t\t\treturn [eval(point[k])+[1.0] for k in cell]\n\t\t\n\t\tdef volume(cell):\n\t\t\treturn linalg.det(mat(simplex(cell)))\n\n\t\tdef orientation():\n\t\t\tif d == obj.rn:\t # solid complex\n\t\t\t\tout = [volume(cell) for cell in cells[-1]]\n\t\t\telse:\t\t\t\t# embedded complex\n\t\t\t\tout = [linalg.det(linalg.qr(mat(simplex(cell)))[1][:,:-1])\n\t\t\t\t\t for cell in cells[-1]] # DEBUG (choose minor with det(minor != 0\t))\n\t\t\treturn out\t\t\n\n\t\t\n\t\tboundary_indices = [i for i in range(A.shape[0]) if A[i].sum() == 1 ] \n\t\t\t\n\t\tboundary_signs = orientation()\t \n\n\t\tboundary_pairs = [(i,j) for (i,j) in a \n\t\t\tif (j in boundary_indices)] \n\n\n\t\tfacetsdict = dict([[v,k] for k,v in dictos[d-1].items()])\n\t\tcellsdict = dict([[v,k] for k,v in dictos[d].items()])\n\t\t\t\t\t\t\n\t\tdef invertOrientation(facet):\n\t\t\tfacet[0],facet[-1] = facet[-1],facet[0]\n\t\t\treturn facet\n\n\n\t\tfacets = [eval(facetsdict[k]) for k in boundary_indices]\n\t\tfacets = [invertOrientation(eval(facetsdict[facet])) \n\t\t\t\t\tif boundary_signs[face]<0 else eval(facetsdict[facet])\n\t\t\t\t\tfor face,facet in boundary_pairs]\n\t\t\t\t\n\t\t# remapping section -----------------------------------------------\n\t\t\n\t\tif facets != []:\n\t\t\toldinds = list(set(CAT(facets)))\n\t\t\tnewverts = PointSet([eval(obj.vertices.ind[k]) for k in oldinds])\n\t\t\tnewfacets = [[ newverts.dict[obj.vertices.ind[k]] \n\t\t\t\t\t\t\tfor k in facet] for facet in facets]\n\t\t\treturn SimplicialComplex(newverts.points, newfacets)\n\t\telse: return SimplicialComplex([], [])", "title": "" }, { "docid": "417b29feadfe2b0afca619ecafd3ba3e", "score": "0.39671686", "text": "def incenter(self):\n s = self.sides\n v = self.vertices\n A,B,C = v[0],v[1],v[2]\n a,b,c = s[1].length,s[2].length,s[0].length\n x = simplify( (a*A[0] + b*B[0] + c*C[0]) / (a+b+c) )\n y = simplify( (a*A[1] + b*B[1] + c*C[1]) / (a+b+c) )\n return Point(x, y)", "title": "" }, { "docid": "b5e4fb0d7f9d1c242a8e4f207ce2842f", "score": "0.39605057", "text": "def compute_o_c_from_s_c(self):\n if self.s_c is not None:\n if self.o_c is None:\n self.o_c = np.zeros(self.s_c.shape)\n self.o_c[1::,:] = -1 * np.diff(self.s_c,n=1,axis=0)\n self.o_c[np.diag_indices(len(self.t))] = self.i - np.diag(self.s_c) # allow for outflow in year 0 already\n return self.o_c\n else:\n # o_c already exists. Doing nothing.\n return self.o_c\n else:\n # s_c does not exist. Doing nothing\n return None", "title": "" }, { "docid": "5d311ec928b1d0dc781038fcee4d6708", "score": "0.3957711", "text": "def get_stiffness_matrix(self):\n\n # compute geometric parameters\n (_, _, l0, c) = self.get_geometric_properties()\n\n # extract relevant properties\n E = self.material.elastic_modulus\n A = self.section.area\n cx = c[0]\n cy = c[1]\n\n # construct rotation matrix\n T = np.array([\n [cx, cy, 0, 0],\n [0, 0, cx, cy]\n ])\n\n # compute bar stiffness matrix\n k = E * A / l0 * np.array([\n [1, -1],\n [-1, 1]\n ])\n\n return np.matmul(np.matmul(np.transpose(T), k), T)", "title": "" } ]
f81c994135f7b7437388af46633d6c0d
Promote im1, im2 to nearest appropriate floating point precision.
[ { "docid": "2ce479e3318c24ed1654c0ae94b655c7", "score": "0.6312333", "text": "def _as_floats(im1, im2):\n float_type = np.result_type(im1.dtype, im2.dtype, np.float32)\n im1 = np.asarray(im1, dtype=float_type)\n im2 = np.asarray(im2, dtype=float_type)\n return im1, im2", "title": "" } ]
[ { "docid": "a31b8f00695681215b37a0107c052974", "score": "0.58750874", "text": "def _normalize(op1, op2, shouldround = 0, prec = 0):\r\n # Yes, the exponent is a long, but the difference between exponents\r\n # must be an int-- otherwise you'd get a big memory problem.\r\n numdigits = int(op1.exp - op2.exp)\r\n if numdigits < 0:\r\n numdigits = -numdigits\r\n tmp = op2\r\n other = op1\r\n else:\r\n tmp = op1\r\n other = op2\r\n\r\n\r\n if shouldround and numdigits > prec + 1:\r\n # Big difference in exponents - check the adjusted exponents\r\n tmp_len = len(str(tmp.int))\r\n other_len = len(str(other.int))\r\n if numdigits > (other_len + prec + 1 - tmp_len):\r\n # If the difference in adjusted exps is > prec+1, we know\r\n # other is insignificant, so might as well put a 1 after the precision.\r\n # (since this is only for addition.) Also stops use of massive longs.\r\n\r\n extend = prec + 2 - tmp_len\r\n if extend <= 0:\r\n extend = 1\r\n tmp.int *= 10 ** extend\r\n tmp.exp -= extend\r\n other.int = 1\r\n other.exp = tmp.exp\r\n return op1, op2\r\n\r\n tmp.int *= 10 ** numdigits\r\n tmp.exp -= numdigits\r\n return op1, op2", "title": "" }, { "docid": "62a81ec966441f71446039384ccd8d2c", "score": "0.5739299", "text": "def __compare_images(self, im1, im2):\n width = min(im1.width, im2.width)\n height = min(im1.height, im2.height)\n mwidth = max(im1.width, im2.width)\n mheight = max(im1.height, im2.height)\n\n threshold2 = self.threshold**2 * mwidth * mheight # speed up\n\n rms = 0.0\n for i in xrange(width):\n for j in xrange(height):\n\tif im1.pixel(i,j)!=im2.pixel(i,j):\n\t rms += (im1.pixel(i,j)-im2.pixel(i,j) )**2\n if rms >= threshold2:\n\treturn self.threshold\n\t\n if im1.width>im2.width:\n bigger = im1\n else:\n bigger = im2\n \n for i in xrange(width, mwidth):\n for j in xrange(height):\n\trms += (bigger.pixel(i,j)-255)**2 # 255 - white\n\n return math.sqrt(rms/mwidth/mheight)", "title": "" }, { "docid": "db398d301e2532b1d969067170deacc4", "score": "0.57300484", "text": "def _normalize(op1, op2, prec = 0):\n if op1.exp < op2.exp:\n tmp = op2\n other = op1\n else:\n tmp = op1\n other = op2\n\n # Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).\n # Then adding 10**exp to tmp has the same effect (after rounding)\n # as adding any positive quantity smaller than 10**exp; similarly\n # for subtraction. So if other is smaller than 10**exp we replace\n # it with 10**exp. This avoids tmp.exp - other.exp getting too large.\n tmp_len = len(str(tmp.int))\n other_len = len(str(other.int))\n exp = tmp.exp + min(-1, tmp_len - prec - 2)\n if other_len + other.exp - 1 < exp:\n other.int = 1\n other.exp = exp\n\n tmp.int *= 10 ** (tmp.exp - other.exp)\n tmp.exp = other.exp\n return op1, op2", "title": "" }, { "docid": "abf0e36088c24b4233f3cdbaa0364fc1", "score": "0.5674722", "text": "def testPrecision2(self):\n self.launchSimplePrecisionTest(1.0)", "title": "" }, { "docid": "4a4304b1bb471368433f47a297c33142", "score": "0.56192464", "text": "def _normalizeUp(self, val1, val2):\n tmp = ceil(val1 / val2)\n return tmp * val2", "title": "" }, { "docid": "47b5272dd8f5547319022a6cb43057a4", "score": "0.55256575", "text": "def alignImages(im1, im2, gray=False):\n try:\n if not gray:\n # Convert images to grayscale\n im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)\n im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)\n else:\n im1Gray = im1\n im2Gray = im2\n\n # Detect ORB features and compute descriptors.\n orb = cv2.ORB_create(MAX_FEATURES)\n keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)\n keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)\n\n # Match features.\n matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)\n matches = matcher.match(descriptors1, descriptors2, None)\n\n # Sort matches by score\n matches.sort(key=lambda x: x.distance, reverse=False)\n\n # Remove not so good matches\n numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)\n matches = matches[:numGoodMatches]\n\n # Draw top matches\n imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)\n cv2.imwrite(\"matches.jpg\", imMatches)\n\n # Extract location of good matches\n points1 = np.zeros((len(matches), 2), dtype=np.float32)\n points2 = np.zeros((len(matches), 2), dtype=np.float32)\n\n for i, match in enumerate(matches):\n points1[i, :] = keypoints1[match.queryIdx].pt\n points2[i, :] = keypoints2[match.trainIdx].pt\n\n # Find homography\n h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)\n\n # Use homography\n if gray:\n height, width = im2.shape\n else:\n height, width, channel = im2.shape\n im1Reg = cv2.warpPerspective(im1, h, (width, height))\n\n return im1Reg, h\n except Exception:\n e = sys.exc_info()[1]\n print(e.args[0])\n return None, None", "title": "" }, { "docid": "f5e63aa17c942dedd776c2ef0184f950", "score": "0.5454601", "text": "def mult_precision(el1, el2, cdict, ldict):\r\n return min(len(cdict[el1] & cdict[el2]), len(ldict[el1] & ldict[el2])) \\\r\n / float(len(cdict[el1] & cdict[el2]))", "title": "" }, { "docid": "89a6db2738b437cc1cec9fa57ee1b734", "score": "0.5421006", "text": "def ratio_comparison(val1: float, val2: float, exp: float=1.0) -> float:\n if exp <= 0.0:\n raise ValueError(\"Parameter exp must be > 0.0, specified value is: %f\" % exp)\n\n return 1.0 - (min((abs(val2 - val1) ** exp), val1) / val1 * 1.0)", "title": "" }, { "docid": "de8fc3986137944cc7a2964e245219d9", "score": "0.53993475", "text": "def testPrecision1(self):\n self.launchSimplePrecisionTest(0.1)", "title": "" }, { "docid": "b61308e145c9099d8aa7a82cedea792f", "score": "0.5391751", "text": "def fixed_prec_div(self, other):\n\n if hasattr(self, 'fixed_precision') and hasattr(self, 'fixed_precision'):\n\n\n if self.precision < other.precision:\n out = torch.LongTensor.old_div(self.old_mul(10 ** (other.precision - self.precision)), other)\n out.precision = 0\n out.fixed_precision = True\n\n elif self.precision == other.precision:\n out = torch.LongTensor.old_div(self, other)\n out.precision = 0\n out.fixed_precision = True\n\n else:\n out = torch.LongTensor.old_div(self, other.old_mul(10 ** (self.precision - other.precision)))\n out.precision = 0\n out.fixed_precision = True\n\n return out\n\n elif (hasattr(self, 'fixed_precision') and not hasattr(other, 'fixed_precision')) or \\\n (not hasattr(self, 'fixed_precision') and hasattr(other, 'fixed_precision')):\n raise AttributeError(\"Tried to call fixed precision operation on non-fixed precision tensor and \"\n \"a regular tensor.\")\n else:\n raise AttributeError(\"Tried to call fixed precision operation on non-fixed precision tensors\")", "title": "" }, { "docid": "35793f20ad394abcac072fbaf7c51c82", "score": "0.5383921", "text": "def pixbypix_similarity(img1, img2):\n \n assert img1.shape == img2.shape, 'Images should be of the same size'\n \n return 1.0 - np.sum(np.absolute(np.subtract(img1, img2)))/(img1.shape[0]*img1.shape[1])", "title": "" }, { "docid": "57b69104ff2352c57aa1c8b787cbd9a2", "score": "0.5376542", "text": "def align(self, img1: np.ndarray, img2: np.ndarray) -> None:\n pass", "title": "" }, { "docid": "0cc43f1137993cbe4d7fba0dbfc4fd24", "score": "0.52840865", "text": "def _normalizeDown(self, val1, val2):\n tmp = floor(val1 / val2)\n if tmp < 1.00:\n return val1\n return tmp * val2", "title": "" }, { "docid": "efdde709e3c1bdca9b644bc4b487ed0a", "score": "0.5275946", "text": "def normalize_image(stim_in, new_min = 1, new_max = 256):\n stim = stim_in.copy()\n stim = stim - stim.min()\n stim = stim/float(stim.max())\n stim = stim * (new_max - new_min)\n stim = stim + new_min\n return stim.round()", "title": "" }, { "docid": "fd03bb1a9027785c8c1c2b3ad4bb273e", "score": "0.5230716", "text": "def fixed_prec_add(self, other):\n\n if hasattr(self, 'fixed_precision') and hasattr(self, 'fixed_precision'):\n\n if self.precision > other.precision:\n out = torch.LongTensor.old_add(self, other.old_mul(10 ** (self.precision - other.precision)))\n out.precision = self.precision\n out.fixed_precision = True\n\n elif self.precision < other.precision:\n out = torch.LongTensor.old_add(self.old_mul(10 ** (other.precision - self.precision)), other)\n out.precision = other.precision\n out.fixed_precision = True\n\n else:\n out = torch.LongTensor.old_add(self, other)\n out.precision = self.precision\n out.fixed_precision = True\n\n return out\n elif (hasattr(self, 'fixed_precision') and not hasattr(other, 'fixed_precision')) or \\\n (not hasattr(self, 'fixed_precision') and hasattr(other, 'fixed_precision')):\n raise AttributeError(\"Tried to call fixed precision operation on non-fixed precision tensor and \"\n \"a regular tensor.\")\n else:\n raise AttributeError(\"Tried to call fixed precision operation on non-fixed precision tensors\")", "title": "" }, { "docid": "4a6ae1a03b8c4e10659da44a3ea14e5b", "score": "0.5230062", "text": "def ssim_1d(img1, img2):\n mu1 = img1.mean(dim=-1).mean(dim=-1).mean(dim=-1)\n mu2 = img2.mean(dim=-1).mean(dim=-1).mean(dim=-1)\n \n mu1_sq = mu1.pow(2)\n mu2_sq = mu2.pow(2)\n\n sigma1_sq = img1.pow(2).mean(dim=-1).mean(dim=-1).mean(dim=-1) - mu1_sq\n sigma2_sq = img2.pow(2).mean(dim=-1).mean(dim=-1).mean(dim=-1) - mu2_sq\n sigma12 = (img1*img2).mean(dim=-1).mean(dim=-1).mean(dim=-1) - mu1*mu2\n sigma1 = sigma1_sq.sqrt()\n sigma2 = sigma2_sq.sqrt()\n\n C1 = 1e-2**2 \n C2 = 3e-2**2\n\n ssim_val = ((2 * mu1 * mu2 + C1)*(2 * sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))\n\n return ssim_val.mean()", "title": "" }, { "docid": "a31ce95f56ec80abbe6134967629bdb7", "score": "0.52253747", "text": "def precisionUnconstrained():", "title": "" }, { "docid": "a31ce95f56ec80abbe6134967629bdb7", "score": "0.52253747", "text": "def precisionUnconstrained():", "title": "" }, { "docid": "e41ce051b6d60133a8ba2b718f27a97f", "score": "0.5225036", "text": "def mediapon(a,p1,b,p2):\n p1=float(p1)\n return (a*p1+b*p2)/(p1+p2)", "title": "" }, { "docid": "5f7aec6e9883c915d125a267d6329ccc", "score": "0.5214701", "text": "def mul():\n return round(num1*num2)", "title": "" }, { "docid": "e048bde3b9365880b29ac4b848feec95", "score": "0.51766056", "text": "def fixed_prec_mul(self, other, norm_left_prec=True):\n\n if hasattr(self, 'fixed_precision') and hasattr(other, 'fixed_precision'):\n\n if self.precision + other.precision > 17:\n raise OverflowError\n\n # modify tensor to be the precision of self\n if (norm_left_prec):\n out = torch.LongTensor.old_mul(self, other).old_div(10 ** other.precision)\n out.precision = self.precision\n out.fixed_precision = True\n # modify tensor to be the precision of the other tensor\n else:\n out = torch.LongTensor.old_mul(self, other).old_div(10 ** self.precision)\n out.precision = other.precision\n out.fixed_precision = True\n\n return out\n elif (hasattr(self, 'fixed_precision') and not hasattr(other, 'fixed_precision')) or \\\n (not hasattr(self,'fixed_precision') and hasattr(other, 'fixed_precision')):\n raise AttributeError(\"Tried to call fixed precision operation on non-fixed precision tensor and \"\n \"a regular tensor.\")\n else:\n raise AttributeError(\"Tried to call fixed precision operation on non-fixed precision tensors\")", "title": "" }, { "docid": "648bec1cd20d358a875e21b2eebf1526", "score": "0.51757574", "text": "def image_compare(image_a, image_b):", "title": "" }, { "docid": "288afe8f9ad806958be0f14bbbc000a7", "score": "0.516284", "text": "def _make_comparison(self, x1, x2):\n b, s = self._hps['bandwidth'], self._hps['scale']\n scale = s ** 2\n return scale * np.exp(-1 * np.linalg.norm(x1 - x2) ** 2 / (2 * b ** 2))", "title": "" }, { "docid": "c4abcdc38631d090940359d64eea174d", "score": "0.5074651", "text": "def calculate_64float(im):\r\n im /= 256\r\n return im", "title": "" }, { "docid": "4ff4dce33e79fe6eb0d0019b857f15cf", "score": "0.5072839", "text": "def compare_images(image1: str, image2: str):\n img1 = Image.open(image1)\n img2 = Image.open(image2)\n\n # Don't compare if images are of different modes or different sizes.\n if (img1.mode != img2.mode) or (img1.size != img2.size):\n return None\n\n pairs = zip(img1.getdata(), img2.getdata())\n if len(img1.getbands()) == 1:\n # for gray-scale jpegs\n dif = sum(abs(p1 - p2) for p1, p2 in pairs)\n else:\n dif = sum(abs(c1 - c2) for p1, p2 in pairs for c1, c2 in zip(p1, p2))\n\n ncomponents = img1.size[0] * img1.size[1] * 3\n return (dif / 255.0 * 100) / ncomponents # Difference (percentage)", "title": "" }, { "docid": "48793425da4cbe518d7b7fea0010b417", "score": "0.5069418", "text": "def precision(true_positives, false_positives):\n try:\n return true_positives / (true_positives + false_positives)\n except ZeroDivisionError:\n return np.nan", "title": "" }, { "docid": "8d01c90c7c6287dd24dce3424baaaea2", "score": "0.5059548", "text": "def auto_dtype(A, B):\n\n # Datatype that would be used after appplying NumPy type promotion rules\n precision = np.result_type(A.dtype, B.dtype)\n\n # Cast to float32 dtype for dtypes that are not float\n # if np.issubdtype(precision, float)==0:\n # precision = np.float32\n\n return precision", "title": "" }, { "docid": "39ac3d483679dee0216870d3e45ce5eb", "score": "0.5058591", "text": "def img_as_float(image, force_copy=False):\n return convert(image, np.floating, force_copy)", "title": "" }, { "docid": "cbc1d1c59b040f363a197b83aa92f0fe", "score": "0.5056507", "text": "def _unite_two_images(image_1: Image, image_2: Image) -> Image:\n w1, h1 = image_1.size\n w2, h2 = image_2.size\n resulting_image: Image = Image.new(\"RGB\", (w1 + w2, max((h1, h2))))\n resulting_image.paste(image_1, (0, 0))\n resulting_image.paste(image_2, (w1, 0))\n return resulting_image", "title": "" }, { "docid": "bf8df10e1b195ecb7844ccef243c02f4", "score": "0.5052598", "text": "def assureRatio(img):\n b, c, h, w = img.size()\n if h > w:\n main = nn.UpsamplingBilinear2d(size=(h, h), scale_factor=None)\n img = main(img)\n return img", "title": "" }, { "docid": "bf8df10e1b195ecb7844ccef243c02f4", "score": "0.5052598", "text": "def assureRatio(img):\n b, c, h, w = img.size()\n if h > w:\n main = nn.UpsamplingBilinear2d(size=(h, h), scale_factor=None)\n img = main(img)\n return img", "title": "" }, { "docid": "bf8df10e1b195ecb7844ccef243c02f4", "score": "0.5052598", "text": "def assureRatio(img):\n b, c, h, w = img.size()\n if h > w:\n main = nn.UpsamplingBilinear2d(size=(h, h), scale_factor=None)\n img = main(img)\n return img", "title": "" }, { "docid": "c7fedcea30ad6dedb4fb234e1644f596", "score": "0.5042733", "text": "def equal(im1, im2):\n return ImageChops.difference(im1, im2).getbbox() is None", "title": "" }, { "docid": "ff6145b14bdc9de544eac6770360de3b", "score": "0.50385743", "text": "def warp_image_from_match(im1, im2, points1, points2):\n \n val, trans = cv2.findHomography(points2, points1, cv2.RANSAC)\n return cv2.warpPerspective(im2, val, (im1.shape[1], im1.shape[0]))", "title": "" }, { "docid": "b40fac3290389f79f919d9a1bab6c15d", "score": "0.5029899", "text": "def im2double(im):\n min_val = np.min(im.ravel())\n max_val = np.max(im.ravel())\n out = (im.astype('float') - min_val) / (max_val - min_val)\n return out", "title": "" }, { "docid": "520a541823a07adb5cd5e020eb566231", "score": "0.5011206", "text": "def img_similarity(imgA: GrayImg, imgB: GrayImg, th=2) -> float:\n sim = np.subtract(imgA, imgB)\n sim = np.abs(sim)\n sim = np.trunc(sim)\n sim = np.sum(sim <= th)\n sim /= np.prod(imgA.shape)\n return sim", "title": "" }, { "docid": "6d651be2ad181a6add17a6ba35b47ac7", "score": "0.5001238", "text": "def mediapond(a,p1,b,p2):\n p1=float(p1)\n return (a*p1+b*p2)/(p1+p2)", "title": "" }, { "docid": "cd6715c1c5a807193637cc0dce49a2a7", "score": "0.49935883", "text": "def test_Evaluator(self):\n toy1_eval = Evaluator(EvalTest.toy_pickle1, EvalTest.pt_analogy_path)\n toy2_eval = Evaluator(EvalTest.toy_pickle2, EvalTest.pt_analogy_path)\n precision1, raw_score1, score1 = toy1_eval.get_metrics()\n precision2, raw_score2, score2 = toy2_eval.get_metrics()\n rounded_precision1 = round(precision1, 6)\n rounded_raw_score1 = round(raw_score1, 6)\n rounded_score1 = round(score1, 6)\n rounded_precision2 = round(precision2, 6)\n rounded_raw_score2 = round(raw_score2, 6)\n rounded_score2 = round(score2, 6)\n self.assertAlmostEqual(rounded_precision1,\n 0.000114,\n places=5,\n msg=\"precision = {}\".format(rounded_precision1))\n self.assertAlmostEqual(rounded_raw_score1,\n 1.0,\n places=3,\n msg=\"precision = {}\".format(rounded_raw_score1))\n self.assertAlmostEqual(rounded_score1,\n 1.0,\n places=3,\n msg=\"precision = {}\".format(rounded_raw_score1))\n self.assertAlmostEqual(rounded_precision2,\n 0.023048,\n places=3,\n msg=\"precision = {}\".format(rounded_precision2))\n self.assertAlmostEqual(rounded_raw_score2,\n 0.071604,\n places=3,\n msg=\"precision = {}\".format(rounded_raw_score2))\n self.assertAlmostEqual(rounded_score2,\n 0.297778,\n places=3,\n msg=\"precision = {}\".format(rounded_score2))", "title": "" }, { "docid": "f1ce1a3fea32e19e2c8bf7e4bd15d3ee", "score": "0.49925113", "text": "def test_identity2by2_float(self):\n # Make a preference matrix and recommendation system\n pref = np.array([[1, 0],\n [0, 1]])\n qrs = QuantumRecommendation(pref, nprecision_bits=3)\n\n # Test the quantum recommendations for a user\n user = np.array([0.6, 0.8])\n\n # Test with a threshold of 0 (full rank matrix)\n prods, probs = qrs.recommend(user, threshold=0.0)\n self.assertEqual(set(prods), {0, 1})\n self.assertTrue(np.allclose(list(sorted(probs)), [0.36, 0.64], atol=1e-2))", "title": "" }, { "docid": "33ad13958cb710a6a63d2a18c57f70ed", "score": "0.498249", "text": "def parallel_dist(img_path_1, img_path_2, dist):\n # If the first parameter is None, don't do anything\n if not img_path_1:\n return 0\n img1_sitk = sitk.ReadImage(img_path_1)\n img2_sitk = sitk.ReadImage(img_path_2)\n\n resample_size = (1.5, 1.5, 1.5)\n sz_out = None\n sp_out = None\n\n if not sz_out:\n img_in_mm_space1, sz_out, sp_out = resample_image(img1_sitk, resample_size)\n else:\n img_in_mm_space1, _, _ = resample_image(img1_sitk, resample_size, sz_out, sp_out)\n\n if not sz_out:\n img_in_mm_space2, sz_out, sp_out = resample_image(img2_sitk, resample_size)\n else:\n img_in_mm_space2, _, _ = resample_image(img2_sitk, resample_size, sz_out, sp_out)\n\n img1 = sitk.GetArrayFromImage(img_in_mm_space1)\n img2 = sitk.GetArrayFromImage(img_in_mm_space2)\n try:\n assert img2.shape == img1.shape, \"Target2 and target2 should be of same shape\"\n except AssertionError:\n return 0\n\n a = img1.ravel()\n b = img2.ravel()\n\n if dist[0] == 'Correlation':\n scores = 1. - correlation(a, b)\n\n elif dist[0] == 'Dice':\n try:\n scores = 1. - avg_dice_distance(a, b, dist[1:])\n except:\n scores = 1. - avg_dice_distance(a, b)\n\n return scores", "title": "" }, { "docid": "7b983c4aa617f5968a33f1107be62857", "score": "0.49766368", "text": "def cmpRatio(subInfo1, subInfo2):\n val1 = subInfo1[VALUE]\n val2 = subInfo2[VALUE]\n work1 = subInfo1[WORK]\n work2 = subInfo2[WORK]\n return float(val1) / work1 > float(val2) / work2", "title": "" }, { "docid": "c344a89f21c6aa32c31c3b3f39221370", "score": "0.4970191", "text": "def compare_frames(im1, im2):\n\n return ((im1 - im2) ** 2).sum() / im1.size", "title": "" }, { "docid": "22906cb1fd74535f81a8d3ce9f052465", "score": "0.49518326", "text": "def promote(type1, type2):\n from flypy.compiler.typing import inference\n\n if type1 == type2:\n return type1\n elif (type(type1), type(type2)) == (inference.Method, inference.Method):\n # promote Method types\n # TODO: Bit of a hack, do this better\n func1, obj1 = type1.parameters\n func2, obj2 = type2.parameters\n result = promote(obj1, obj2)\n if result == obj1:\n return type1\n elif result == obj2:\n return type2\n else:\n raise TypeError(\"Cannot promote methods %s and %s\" % (type1, type2))\n else:\n t1, t2 = to_blaze(type1), to_blaze(type2)\n result = ds.promote(t1, t2)\n return resolve_type(result)", "title": "" }, { "docid": "3b42802f3a4a4337809cce3c954409d7", "score": "0.49509656", "text": "def symRatio(m1, m2):\n return m1*m2/(m1+m2)/(m1+m2)", "title": "" }, { "docid": "07be2b5fa2f8ff990d1c5f38ebc6b8f4", "score": "0.49431974", "text": "def find_scalar_correction(t1: TensorConvertible, t2:TensorConvertible) -> complex:\n if not isinstance(t1, np.ndarray):\n t1 = t1.to_tensor(preserve_scalar=True)\n if not isinstance(t2, np.ndarray):\n t2 = t2.to_tensor(preserve_scalar=True)\n\n epsilon = 10**-14\n for i,a in enumerate(t1.flat):\n if abs(a)>epsilon:\n if abs(t2.flat[i])<epsilon: return 0\n return a/t2.flat[i]\n\n return 0", "title": "" }, { "docid": "da545dedfce3e4510a5919cffce4b13c", "score": "0.49419412", "text": "def _compare_img(img1, img2):\n h1 = img1.histogram()\n h2 = img2.histogram()\n diff_squares = [(h1[i] - h2[i]) ** 2 for i in xrange(len(h1))]\n rms = math.sqrt(sum(diff_squares) / len(h1))\n return rms", "title": "" }, { "docid": "858e69cb8d135c104aab26dd4200d1ee", "score": "0.49379155", "text": "def adapt_x(self, img1s, img2s):\n # Normalization of images into 0,1 range\n img1s = img1s + 0.5\n img1s = tf.expand_dims(img1s, axis=1)\n img2s = img2s + 0.5\n img2s = tf.expand_dims(img2s, axis=1)\n\n x_adapt = tf.concat((img1s, img2s), axis=1)\n\n return x_adapt", "title": "" }, { "docid": "55ea8de066c15ad079cf75f58e1a6b58", "score": "0.49358866", "text": "def _ratio(self, a, b):\n if b == 0:\n return 1\n else:\n return a / b", "title": "" }, { "docid": "83f0610d2ece3ff97040dfa806f9bb8b", "score": "0.49303484", "text": "def _onetailed_to_twotailed(p1, p2):\n return np.minimum(np.minimum(p1, p2) * 2, 1)", "title": "" }, { "docid": "bd54f4b864f4b4ad978cb5a771d45da3", "score": "0.4909013", "text": "def diff_ims_error(im1, im2, p=2, per_pixel=True):\n\n r = np.abs(im1 - im2)**p\n res = r.sum()\n res = res**(1/p)\n\n if per_pixel is True:\n res = res / r.size\n\n return(res)", "title": "" }, { "docid": "4ce21d49e10df8111de8351ede801b33", "score": "0.4896638", "text": "def normalize_complete_images(images: np.ndarray) -> np.ndarray:\n min_pv = np.min(images)\n # shift pixel values to a minimum of 0\n images = images - min_pv\n # new maximum of the images\n max_pv = np.max(images)\n # the images values are set to 0-1\n return images / max_pv", "title": "" }, { "docid": "5ce02d4452a14efba556c96bf898bc34", "score": "0.48938563", "text": "def align_image(image1, image2):\n # convert images to grayscale\n gray_image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)\n gray_image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)\n \n # detect ORB features and compute descriptors\n orb = cv2.ORB_create(MAX_FEATURES)\n keypoints1, descriptors1 = orb.detectAndCompute(gray_image1, None)\n keypoints2, descriptors2 = orb.detectAndCompute(gray_image2, None)\n \n # match features\n matcher = cv2.DescriptorMatcher_create('BruteForce-Hamming')\n matches = matcher.match(descriptors1, descriptors2, None)\n\n # sort matches by score\n matches.sort(key=lambda x: x.distance, reverse=False)\n \n # remove poor matches\n num_good_matches = int(len(matches) * GOOD_MATCH_PERCENT)\n matches = matches[:num_good_matches]\n \n # draw top matches\n image_matches = cv2.drawMatches(image1, keypoints1, image2,\n keypoints2, matches, None)\n cv2.imwrite('matches.jpg', image_matches)\n \n # extract location of good matches\n points1 = np.zeros((len(matches), 2), dtype=np.float32)\n points2 = np.zeros((len(matches), 2), dtype=np.float32)\n \n for i, match in enumerate(matches):\n points1[i, :] = keypoints1[match.queryIdx].pt\n points2[i, :] = keypoints2[match.trainIdx].pt\n \n # find homography\n h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)\n \n # use homography\n height, width, channels = image2.shape\n registered_image = cv2.warpPerspective(image1, h, (width, height))\n \n return registered_image, h", "title": "" }, { "docid": "5124d8b5fb441c7acf8d61b248593646", "score": "0.48867106", "text": "def similarness(i1,i2):\n ## Open and resize images to 200x200\n #i1 = Image.open(image1).resize((200,200))\n #i2 = Image.open(image2).resize((200,200))\n\n # Get histogram and seperate into RGB channels\n i1hist = numpy.array(i1.histogram()).astype('float32')\n i1r, i1b, i1g = i1hist[0:256], i1hist[256:256*2], i1hist[256*2:]\n # Re bin the histogram from 256 bins to 48 for each channel\n i1rh = numpy.array([sum(i1r[i*16:16*(i+1)]) for i in range(16)]).astype('float32')\n i1bh = numpy.array([sum(i1b[i*16:16*(i+1)]) for i in range(16)]).astype('float32')\n i1gh = numpy.array([sum(i1g[i*16:16*(i+1)]) for i in range(16)]).astype('float32')\n # Combine all the channels back into one array\n i1histbin = numpy.ravel([i1rh, i1bh, i1gh]).astype('float32')\n\n # Same steps for the second image\n i2hist = numpy.array(i2.histogram()).astype('float32')\n i2r, i2b, i2g = i2hist[0:256], i2hist[256:256*2], i2hist[256*2:]\n i2rh = numpy.array([sum(i2r[i*16:16*(i+1)]) for i in range(16)]).astype('float32')\n i2bh = numpy.array([sum(i2b[i*16:16*(i+1)]) for i in range(16)]).astype('float32')\n i2gh = numpy.array([sum(i2g[i*16:16*(i+1)]) for i in range(16)]).astype('float32')\n i2histbin = numpy.ravel([i2rh, i2bh, i2gh]).astype('float32')\n\n return cv2.compareHist(i1histbin, i2histbin, 0)", "title": "" }, { "docid": "aa1787cf061c6c9fcc9a49f7643eab7d", "score": "0.48754698", "text": "def perturbation(x, m1, m2):\n M1M2 = m2 - m1\n scale = np.inner(x, M1M2) / np.inner(M1M2, M1M2)\n return scale * M1M2 # TODO: negative?", "title": "" }, { "docid": "5774b2711ff2b6e43ceb59368ff73689", "score": "0.48678064", "text": "def image_psnr(im1, im2):\n print(f'image 1 shape {im1.shape}, image 2 shape {im2.shape}')\n #img_arr1 = np.array(im1).astype('float32')\n #img_arr2 = np.array(im2).astype('float32')\n #mse = tf.reduce_mean(tf.squared_difference(img_arr1, img_arr2))\n #psnr = tf.constant(255**2, dtype=tf.float32)/mse\n # result = tf.constant(10, dtype=tf.float32)*log10(psnr)\n # with tf.Session():\n # result = result.eval()\n im1 = tf.image.convert_image_dtype(im1, tf.float32)\n im2 = tf.image.convert_image_dtype(im2, tf.float32)\n psnr = tf.image.psnr(im1, im2, max_val=1.0)\n return psnr", "title": "" }, { "docid": "f6f5c3e617bf8859a03909c851596ab4", "score": "0.4865858", "text": "def compute_multiscale_ssim(image1: jnp.ndarray, image2: jnp.ndarray):\n image1 = tf.convert_to_tensor(image1)\n image2 = tf.convert_to_tensor(image2)\n return tf.image.ssim_multiscale(image1, image2, max_val=1.0)", "title": "" }, { "docid": "884e38b39fbbfdaeebed9ca05d1668b8", "score": "0.48656425", "text": "def align(im1, im2, method='correlation', ratio=0.1):\n if method == 'correlation':\n shift = find_shift(im1, im2, ratio=ratio)\n print(shift)\n shift = np.rint(shift[0])\n elif method == 'fft':\n try:\n shift = shift_fft(im1, im2)\n except RuntimeError as detail:\n print(detail)\n shift = np.array((0,0))\n else:\n raise TypeError('Unknown method: {:s}.'.format(method))\n \n shape = np.shape(im2)\n result = np.zeros(shape)\n if (shift >= 0).all():\n result[shift[0]:, shift[1]:] = im2[0:shape[0]-shift[0], 0:shape[1]-shift[1]]\n if (shift < 0).all():\n result[0:shape[0]+shift[0], 0:shape[1]+shift[1]] = im2[-shift[0]:, -shift[1]:]\n elif shift[0] < 0 and shift[1] >= 0:\n result[0:shape[0]+shift[0], shift[1]:] = im2[-shift[0]:, 0:shape[1]-shift[1]]\n elif shift[0] >= 0 and shift[1] < 0:\n result[shift[0]:, 0:shape[1]+shift[1]] = im2[0:shape[0]-shift[0], -shift[1]:]\n return result", "title": "" }, { "docid": "fadc41a903220437c7760cd7b63df5c7", "score": "0.4856345", "text": "def approx_equal(a, b, threshold=1e-5):\n return np.all(np.abs(a - b) < threshold)", "title": "" }, { "docid": "478e2e181d4c48b32970ba7331fb6ef9", "score": "0.48490298", "text": "def diff(img1, img2):\n\n imageData1 = re.sub('^data:image/.+;base64,', '', img1)\n imageData2 = re.sub('^data:image/.+;base64,', '', img2)\n\n encodedImg1 = BytesIO(base64.b64decode(imageData1))\n encodedImg2 = BytesIO(base64.b64decode(imageData2))\n\n im1 = Image.open(encodedImg1)\n im2 = Image.open(encodedImg2)\n\n # Generate diff image in memory.\n diff_img = ImageChops.difference(im1, im2)\n\n # Calculate difference as a ratio.\n stat = ImageStat.Stat(diff_img)\n # stat.mean can be [r,g,b] or [r,g,b,a].\n removed_channels = 0\n num_channels = len(stat.mean) - removed_channels\n sum_channel_values = sum(stat.mean[:num_channels])\n max_all_channels = num_channels * 255\n diff_ratio = sum_channel_values / max_all_channels\n\n return diff_ratio", "title": "" }, { "docid": "37c1c953226ecf8ded2be72177dec2a9", "score": "0.4846069", "text": "def test_case_p(self):\n self.assertEqual(0.8, self.my_d1 * self.my_i1 / self.my_i2)", "title": "" }, { "docid": "f8f980fe2c087d1e87393f89b01e6d25", "score": "0.4839289", "text": "def _ssim(img1, img2):\n\n C1 = (0.01 * 255)**2\n C2 = (0.03 * 255)**2\n\n img1 = img1.astype(np.float64)\n img2 = img2.astype(np.float64)\n kernel = cv2.getGaussianKernel(11, 1.5)\n window = np.outer(kernel, kernel.transpose())\n\n mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]\n mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]\n mu1_sq = mu1**2\n mu2_sq = mu2**2\n mu1_mu2 = mu1 * mu2\n sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq\n sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq\n sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2\n\n ssim_map = ((2 * mu1_mu2 + C1) *\n (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *\n (sigma1_sq + sigma2_sq + C2))\n return ssim_map.mean()", "title": "" }, { "docid": "c6e25106e746440c3434ca6a32d2bdf6", "score": "0.4832083", "text": "def plomp(f1, f2):\n b1 = 3.51\n b2 = 5.75\n xstar = 0.24\n s1 = 0.0207\n s2 = 18.96\n s = np.tril(xstar / ((s1 * np.minimum(f1, f2)) + s2))\n pd = np.exp(-b1 * s * np.abs(f2 - f1)) - np.exp(-b2 * s * np.abs(f2 - f1))\n return pd", "title": "" }, { "docid": "d6e514a32b66cb464e4c690e55190707", "score": "0.48216885", "text": "def CheckSimilarity(img1: np.ndarray, img2: np.ndarray, thres=0.90):\n sim = ssim(img1, img2, data_range=img2.max() - img2.min(), multichannel=True)\n if sim >= thres:\n return True\n return False", "title": "" }, { "docid": "9ddeb19a8a940a784a41873522e9add8", "score": "0.48209465", "text": "def rmse_compare(img_a, img_b):\n \n # getting images resolution \n N,M = img_a.shape # both images have the same size\n \n # converting the matrices to int32, allowing negative difference\n img_a = img_a.astype(np.int32)\n img_b = img_b.astype(np.int32)\n \n # computing rsme\n rmse = np.sum(np.square(img_a-img_b))\n rmse = math.sqrt(rmse / (N*M))\n\n return rmse", "title": "" }, { "docid": "4c37bb86035555f24a81e5a9c85733d5", "score": "0.4816197", "text": "def matchDistortImg(im1, im2, scale=4, maxDist=40, mapBlur=30, showProgress=False):\n\n ## Determine scale and offset factors needed to match histograms\n for i in range(3):\n im1 -= im1.mean()\n im2 -= im2.mean()\n im1 /= im1.std()\n im2 /= im2.std()\n \n imws = []\n if showProgress:\n imws.append(showImg(im1, title=\"Original image 1\"))\n imws.append(showImg(im2, title=\"Original image 2\"))\n \n \n ## Scale down image to quickly find a rough displacement map\n print(\"Scaling images down for fast displacement search\")\n #im1s = downsamplend(im1, (scale,scale))\n #im2s = downsamplend(im2, (scale,scale))\n im1s = downsample(downsample(im1, scale), scale)\n imss = downsample(downsample(im2, scale), scale)\n (dispMap, goodMap) = makeDispMap(im1s, im2s, maxDist=maxDist/scale, normBlur=5.0, matchSize=10., showProgress=showProgress)\n #showImg(make3Color(r=dispMap[..., 0], g=dispMap[..., 1], b=goodMap), title=\"Rough displacement map\")\n \n \n border = 20\n ## clean up border of displacement map\n #for i in range(border-1,-1,-1):\n #dispMap[i] = dispMap[i+1]\n #dispMap[-i] = dispMap[-i-1]\n #dispMap[:,i] = dispMap[:,i+1]\n #dispMap[:,-i] = dispMap[:,-i-1]\n #showImg(make3Color(r=dispMap[..., 0], g=dispMap[..., 1], b=goodMap), title=\"Rough displacement map\")\n \n \n ## Determine range of displacements to search, exclude border pixels\n ## TODO: this should exclude regions of the image which obviously do not match, rather than just chopping out the borders.\n dmCrop = dispMap[border:-border, border:-border]\n search = [\n [scale*(dmCrop[...,0].min()-1), scale*(dmCrop[...,0].max()+1)], \n [scale*(dmCrop[...,1].min()-1), scale*(dmCrop[...,1].max()+1)]\n ]\n print(\"Finished initial search; displacement range is\", search)\n \n \n ## Generate full-size displacement map\n (dispMap2, goodMap2) = makeDispMap(im1, im2, searchRange=search, normBlur=2*scale, matchSize=5.*scale, showProgress=showProgress)\n if showProgress:\n imws.append(showImg(make3Color(r=dispMap2[..., 0], g=dispMap2[..., 1], b=goodMap2), title=\"Full displacement map\"))\n \n \n ## blur the map to make continuous\n dm2Blur = blur(dispMap2.astype(np.float32), (mapBlur, mapBlur, 0))\n if showProgress:\n imws.append(showImg(dm2Blur, title=\"blurred full disp map\"))\n \n \n ## Generate matched images\n print(\"Distorting image to match..\")\n im2d = scipy.geometric_transform(im2, lambda x: (x[0] + (dm2Blur[x[0], x[1], 0]), x[1] + (dm2Blur[x[0], x[1], 1])))\n \n if showProgress:\n for w in imws:\n w.hide()\n \n return im2d", "title": "" }, { "docid": "7ce6972135a70fb8bd84aed5cfa2b863", "score": "0.48096797", "text": "def compare_dresses_clear(d1, d2):\n image1 = imread(d1)\n v1 = model.predict(image1.reshape(1, *image1.shape))\n v1 = np.ravel(v1)\n\n image2 = imread(d2)\n v2 = model.predict(image2.reshape(1, *image2.shape))\n v2 = np.ravel(v2)\n\n similarityscore = get_similarity(v1, v2)\n\n return similarityscore", "title": "" }, { "docid": "79f7a507b85590d3db49e026565c5dac", "score": "0.48063517", "text": "def equal_float(a, b, decimals=None):\n if decimals is not None:\n a = round(a, decimals)\n b = round(b, decimals)\n return abs(a - b) <= (sys.float_info.epsilon * min(abs(a), abs(b)))", "title": "" }, { "docid": "9a36a1194641fd4e3830cfedf0edfe85", "score": "0.47944134", "text": "def calculate_precision_recall_all_images(\n all_prediction_boxes, all_gt_boxes, iou_threshold):\n\n tp = 0\n fp = 0\n fn = 0\n for i in range(len(all_prediction_boxes)):\n result = calculate_individual_image_result(all_prediction_boxes[i], all_gt_boxes[i], iou_threshold)\n tp += result[\"true_pos\"]\n fp += result[\"false_pos\"]\n fn += result[\"false_neg\"]\n\n precision = calculate_precision(tp,fp,fn)\n recall = calculate_recall(tp,fp,fn)\n #print(precision,recall)\n return (precision, recall)", "title": "" }, { "docid": "3f3129c27652bcd6aa90f84bbb4c188d", "score": "0.47938433", "text": "def absolute_pixel_error(img1, img2):\n return np.absolute(np.subtract(img1.astype(np.int16), img2.astype(np.int16))).max()", "title": "" }, { "docid": "e3e5c54f73807f90c028b8fd0b5aba8d", "score": "0.4786554", "text": "def compare_images(img1_ptr, img2_ptr):\r\n if img1_ptr.shape == img2_ptr.shape:\r\n return (img1_ptr == img2_ptr).all()\r\n return False", "title": "" }, { "docid": "9ad5e5b208a5c4fd7892674b297555b1", "score": "0.478353", "text": "def test_precip_small(self):\n self.precip_cube.data[:, 1, 1] = 0.0\n self.precip_cube.data[0, 1, 1] = 0.075\n expected = self.fg_cube.copy()\n # expected.data contains all ones except:\n expected.data[1, 1] = 0.625\n result = self.plugin.apply_precip(self.fg_cube, self.precip_cube)\n self.assertArrayAlmostEqual(result.data, expected.data)", "title": "" }, { "docid": "0fe3c964ca1cfcafb4c04db8d53d52dd", "score": "0.4775707", "text": "def comparing_two_pictures(image_1, image_2):\n np_im1 = np.array(image_1)[:, :, :3] # sometimes PNG files can have 4 channels, which are not needed here\n np_im2 = np.array(image_2)[:, :, :3] # sometimes PNG files can have 4 channels, which are not needed here\n\n res_im = np_im1 - np_im2\n number_of_the_same = res_im.size - np.count_nonzero(res_im)\n return number_of_the_same / res_im.size", "title": "" }, { "docid": "b49930868d5ea3c65536c10524152aed", "score": "0.47729737", "text": "def match(image1,image2):\r\n im1, keypoints1, descriptors1 = ReadKeys(image1)\r\n im2, keypoints2, descriptors2 = ReadKeys(image2)\r\n threshold = 0.65\r\n\r\n # array to store the matched pairs\r\n matches = []\r\n\r\n # iterate over the descriptors1\r\n for index, descriptor in enumerate(descriptors1):\r\n\r\n # calculate the angle\r\n def getAngle(des2):\r\n dotProduct = np.dot(descriptor, des2)\r\n angle = math.acos(dotProduct)\r\n return angle\r\n\r\n # create an array of angles\r\n descriptor_angles = map(getAngle, descriptors2)\r\n # Get the smallest values\r\n two_smallest = sorted(descriptor_angles)[:2]\r\n # get the ratio of the smallest values\r\n angles_ratio = two_smallest[0]/two_smallest[1]\r\n\r\n # check if the ratio is smaller than the threshold, if its not continue\r\n if angles_ratio >= threshold:\r\n continue\r\n\r\n # get the index of the smallest angles\r\n smallest_index = descriptor_angles.index(two_smallest[0])\r\n\r\n # get the match\r\n match = [keypoints1[index], keypoints2[smallest_index]]\r\n # add the match to the list of matches\r\n matches.append(match)\r\n\r\n #use ransac to improve the keypoints\r\n matches = RANSAC(matches)\r\n im3 = DisplayMatches(im1, im2, matches)\r\n return im3", "title": "" }, { "docid": "7cfc051a393267c9a46425009ddd9696", "score": "0.4766808", "text": "def metropolis_ratio(lnpost0, lnpost1):\n if (hasattr(lnpost0, '__iter__') and hasattr(lnpost1, '__iter__') and\n len(lnpost0) != len(lnpost1)):\n raise ValueError('lnpost0 and lnpost1 have different lenghts.')\n\n return np.minimum(lnpost1 - lnpost0, 0.0)", "title": "" }, { "docid": "b7171903ce77769cb498fa95713c1ead", "score": "0.4763534", "text": "def machine_precision():\n x = 1.0\n eps = 1.0\n while (not x + eps == x):\n eps = eps/2.0\n return 2.0*eps", "title": "" }, { "docid": "50ab00d080796b42cd69e4f3861ed30e", "score": "0.4760872", "text": "def classify_ahash(cls, image1, image2, size=(8, 8), exact=25):\n image1 = image1.resize(size).convert('L').filter(ImageFilter.BLUR)\n image1 = ImageOps.equalize(image1)\n code1 = cls.get_code(image1, size)\n image2 = image2.resize(size).convert('L').filter(ImageFilter.BLUR)\n image2 = ImageOps.equalize(image2)\n code2 = cls.get_code(image2, size)\n\n assert len(code1) == len(code2), \"error\"\n\n return cls.compare_code(code1, code2)", "title": "" }, { "docid": "15855178ea8e20d5c7b32fd33f0fbf16", "score": "0.4753725", "text": "def sanitize_coordinates(_x1, _x2, img_size: int, padding: int=0, cast: bool=True):\n _x1 = _x1 * img_size\n _x2 = _x2 * img_size\n if cast:\n _x1 = _x1.long()\n _x2 = _x2.long()\n x1 = torch.min(_x1, _x2)\n x2 = torch.max(_x1, _x2)\n x1 = torch.clamp(x1 - padding, min=0)\n x2 = torch.clamp(x2 + padding, max=img_size)\n return x1, x2", "title": "" }, { "docid": "8cd95948b8f443596db47a2e962bcbf4", "score": "0.47524875", "text": "def SetPrecision2(self, Primary=defaultNamedNotOptArg, Dual=defaultNamedNotOptArg, PrimaryTol=defaultNamedNotOptArg, DualTol=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(81, LCID, 1, (3, 0), ((3, 1), (3, 1), (3, 1), (3, 1)),Primary\n\t\t\t, Dual, PrimaryTol, DualTol)", "title": "" }, { "docid": "4680eda67be22a8d05cc13543a86ab21", "score": "0.47521424", "text": "def quintile_normalize(array_1d_1, array_1d_2):\n array_1d_1 -= np.nanmean(array_1d_1)\n array_1d_2 -= np.nanmean(array_1d_2)\n combined_distro = np.vstack((array_1d_1, array_1d_2))\n arg_sort_combined = np.argsort(combined_distro, axis=1)\n sort_combined = np.sort(combined_distro, axis=1)\n substitution_values = np.mean(sort_combined, axis=0)\n\n array_1d_1, array_1d_2 = np.vsplit(substitution_values[arg_sort_combined], 2)\n array_1d_1, array_1d_2 = (array_1d_1[0, :], array_1d_2[0, :])\n\n return array_1d_1, array_1d_2", "title": "" }, { "docid": "22b53e9e7e38fec87e0fa11107aeca8d", "score": "0.47452807", "text": "def _ssim(img1, img2):\n C1 = (0.01 * 255)**2\n C2 = (0.03 * 255)**2\n\n img1_0 = img1.astype(np.float64) # 单通道 float64\n img2_0 = img2.astype(np.float64)\n\n kernel = cv2.getGaussianKernel(11, 1.5)\n window = np.outer(kernel, kernel.transpose())\n\n mu1 = cv2.filter2D(img1_0, -1, window)[5:-5, 5:-5] # whats mean\n mu2 = cv2.filter2D(img2_0, -1, window)[5:-5, 5:-5]\n mu1_sq = mu1**2\n mu2_sq = mu2**2\n mu1_mu2 = mu1 * mu2\n sigma1_sq = cv2.filter2D(img1_0**2, -1, window)[5:-5, 5:-5] - mu1_sq\n sigma2_sq = cv2.filter2D(img2_0**2, -1, window)[5:-5, 5:-5] - mu2_sq\n sigma12 = cv2.filter2D(img1_0 * img2_0, -1, window)[5:-5, 5:-5] - mu1_mu2\n\n ssim_map = ((2 * mu1_mu2 + C1) *\n (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *\n (sigma1_sq + sigma2_sq + C2))\n return ssim_map.mean()", "title": "" }, { "docid": "14437c811bddb755a71afc98d193ad2e", "score": "0.47452763", "text": "def test_different_images_pass(self):\n similarity = self.processor.is_similar(self.pathForSimilarImage)\n self.assertEqual(similarity,True)", "title": "" }, { "docid": "b3e6cb74d076101ce91446e6cbc539fd", "score": "0.4737539", "text": "def drawMatches2(I1, kp1, I2, kp2, matches):\n print(\"drawing matches\")\n img1 = cv2.imread(I1, 0)\n img2 = cv2.imread(I2, 0)\n\n # Create a new output image that concatenates the two images together\n # (a.k.a) a montage\n rows1 = len(img1)\n cols1 = len(img1[0])\n rows2 = len(img2)\n cols2 = len(img2[0])\n\n out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')\n\n # Place the first image to the left\n out[:rows1,:cols1] = np.dstack([img1, img1, img1])\n\n # Place the next image to the right of it\n out[:rows2,cols1:] = np.dstack([img2, img2, img2])\n\n # For each pair of points we have between both images\n # draw circles, then connect a line between them\n for mat in range(0, len(kp1)):\n\n # Get the matching keypoints for each of the images\n # x - columns\n # y - rows\n if (len(kp1[mat]) == 4):\n (y1,x1,scale1,_) = (kp1[mat])\n (y2,x2,scale2,_) = (kp2[mat])\n else:\n (y1,x1,scale1) = (kp1[mat])\n (y2,x2,scale2) = (kp2[mat])\n\n\n color1 = (0,0,0)#color_scale(scale1)\n color2 = (0,0,0)#color_scale(scale2)\n\n # Draw a small circle at both co-ordinates\n # radius 4\n # colour blue\n # thickness = 1\n cv2.circle(out, (int(x1),int(y1)), 6 * int(10 / 2.0 ), color1, 2)\n cv2.circle(out, (int(x2)+cols1,int(y2)), 6 * int(10 / 2.0), color2, 2)\n\n # Draw a line in between the two points\n # thickness = 1\n # colour blue\n\n if (scale1 != scale2):\n cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (125,0,125), 3)\n else:\n cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), color1, 3)\n\n\n # Show the image\n #cv2.imshow('Matched Features', out)\n cv2.imwrite(str(I1) + \"-advanced-matching.jpg\", out)\n #cv2.waitKey(0)\n #cv2.destroyWindow('Matched Features')\n\n # Also return the image if you'd like a copy\n return out", "title": "" }, { "docid": "88f466e37c152e5836b86b3c37ea30b1", "score": "0.4737055", "text": "def _ssim_for_multi_scale(img1,\n img2,\n max_val=255,\n filter_size=11,\n filter_sigma=1.5,\n k1=0.01,\n k2=0.03):\n if img1.shape != img2.shape:\n raise RuntimeError(\n 'Input images must have the same shape (%s vs. %s).' %\n (img1.shape, img2.shape))\n if img1.ndim != 4:\n raise RuntimeError('Input images must have four dimensions, not %d' %\n img1.ndim)\n\n img1 = img1.astype(np.float32)\n img2 = img2.astype(np.float32)\n _, height, width, _ = img1.shape\n\n # Filter size can't be larger than height or width of images.\n size = min(filter_size, height, width)\n\n # Scale down sigma if a smaller filter size is used.\n sigma = size * filter_sigma / filter_size if filter_size else 0\n\n if filter_size:\n window = np.reshape(_f_special_gauss(size, sigma), (1, size, size, 1))\n mu1 = signal.fftconvolve(img1, window, mode='valid')\n mu2 = signal.fftconvolve(img2, window, mode='valid')\n sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')\n sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')\n sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')\n else:\n # Empty blur kernel so no need to convolve.\n mu1, mu2 = img1, img2\n sigma11 = img1 * img1\n sigma22 = img2 * img2\n sigma12 = img1 * img2\n\n mu11 = mu1 * mu1\n mu22 = mu2 * mu2\n mu12 = mu1 * mu2\n sigma11 -= mu11\n sigma22 -= mu22\n sigma12 -= mu12\n\n # Calculate intermediate values used by both ssim and cs_map.\n c1 = (k1 * max_val)**2\n c2 = (k2 * max_val)**2\n v1 = 2.0 * sigma12 + c2\n v2 = sigma11 + sigma22 + c2\n ssim = np.mean((((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2)),\n axis=(1, 2, 3)) # Return for each image individually.\n cs = np.mean(v1 / v2, axis=(1, 2, 3))\n return ssim, cs", "title": "" }, { "docid": "3bdc714f0fdc91e029633b62ba156152", "score": "0.47364783", "text": "def is_aligned(self, other, atol=2e-2):\n pix = self.coord_to_pix(other.center)\n pix_other = other.coord_to_pix(self.center)\n pix_all = np.append(pix, pix_other)\n aligned = np.allclose(np.round(pix_all) - pix_all, 0, atol=atol)\n return aligned and self.interp == other.interp", "title": "" }, { "docid": "3bdc714f0fdc91e029633b62ba156152", "score": "0.47364783", "text": "def is_aligned(self, other, atol=2e-2):\n pix = self.coord_to_pix(other.center)\n pix_other = other.coord_to_pix(self.center)\n pix_all = np.append(pix, pix_other)\n aligned = np.allclose(np.round(pix_all) - pix_all, 0, atol=atol)\n return aligned and self.interp == other.interp", "title": "" }, { "docid": "3108bbc345371f58b629b5d52b007770", "score": "0.47294623", "text": "def image_mse(img1, img2):\n assert img1.shape[0] == img2.shape[0] and img1.shape[1] == img2.shape[1]\n num_pixels = img1.shape[0] * img1.shape[1]\n summed_difference = np.sum((img1-img2)**2)\n return summed_difference / num_pixels", "title": "" }, { "docid": "3c5294200770a3e0724347298dafb521", "score": "0.4720636", "text": "def calc_precision_recall(img_results):\n true_pos = 0; false_pos = 0; false_neg = 0\n for _, res in img_results.items():\n true_pos += res['true_pos']\n false_pos += res['false_pos']\n false_neg += res['false_neg']\n\n try:\n precision = true_pos/(true_pos + false_pos)\n except ZeroDivisionError:\n precision = 0.0\n try:\n recall = true_pos/(true_pos + false_neg)\n except ZeroDivisionError:\n recall = 0.0\n\n return (precision, recall)", "title": "" }, { "docid": "0058091ecf83089e4e8d7a76a73cab19", "score": "0.47198", "text": "def test_Image_inplace_multiply():\n import time\n t1 = time.time()\n for i in xrange(ntypes):\n # First try using the dictionary-type Image init\n image1 = galsim.Image(ref_array.astype(types[i]))\n image2 = galsim.Image((2 * ref_array).astype(types[i]))\n image1 *= image2\n np.testing.assert_array_equal((2 * ref_array**2).astype(types[i]), image1.array,\n err_msg=\"Inplace multiply in Image class (dictionary call) does\"\n +\" not match reference for dtype = \"+str(types[i]))\n # Then try using the eval command to mimic use via ImageD, ImageF etc.\n image_init_func = eval(\"galsim.Image\"+tchar[i])\n image1 = image_init_func(ref_array.astype(types[i]))\n image2 = image_init_func((2 * ref_array).astype(types[i]))\n image1 *= image2\n np.testing.assert_array_equal((2 * ref_array**2).astype(types[i]), image1.array,\n err_msg=\"Inplace multiply in Image class does not match reference for dtype = \"\n +str(types[i]))\n for j in xrange(i): # Only multiply simpler types to this one.\n image2_init_func = eval(\"galsim.Image\"+tchar[j])\n image1 = image_init_func(ref_array.astype(types[i]))\n image2 = image2_init_func((2 * ref_array).astype(types[j]))\n image1 *= image2\n np.testing.assert_array_equal((2 * ref_array**2).astype(types[i]), image1.array,\n err_msg=\"Inplace multiply in Image class does not match reference for dtypes = \"\n +str(types[i])+\" and \"+str(types[j]))\n t2 = time.time()\n print 'time for %s = %.2f'%(funcname(),t2-t1)", "title": "" }, { "docid": "0168ae24adeb10f928c62cce856e3213", "score": "0.47186404", "text": "def GetFloatingPointCorrectionResolution(self) -> \"double\":\n return _itkMattesMutualInformationImageToImageMetricv4Python.itkMattesMutualInformationImageToImageMetricv4IF2IF2_Superclass_GetFloatingPointCorrectionResolution(self)", "title": "" }, { "docid": "0202b0414484707458c831903a090e1f", "score": "0.47158992", "text": "def _normalize_pmfs(dist1, dist2):\n event_space = list(set().union(dist1.outcomes, dist2.outcomes))\n p = np.array([dist1[e] if e in dist1.outcomes else 0 for e in event_space])\n q = np.array([dist2[e] if e in dist2.outcomes else 0 for e in event_space])\n return p, q", "title": "" }, { "docid": "6bf840e82fa8d9167d2758c5cae7f071", "score": "0.47142127", "text": "def comp_precision(self, s2):\n self.s2 = s2\n self.q = self.n_samples_per_obs_node_permuted / self.s2\n self.q_diag = sp.diags(self.q).tocsc()\n self.q_inv_diag = sp.diags(1.0 / self.q).tocsc()\n self.q_inv_grad = -1.0 / self.n_samples_per_obs_node_permuted", "title": "" }, { "docid": "daf10e5752c3eb031da50265526f228e", "score": "0.4709413", "text": "def assert_same_resolution(images):\n if isinstance(images, str):\n images = [images]\n\n if len(images) == 0:\n raise Exception(\"Can't check if images are of the same \"\n \"resolution/affine. No image has been given\")\n\n ref = get_reference_info(images[0])\n for i in images[1:]:\n shape, aff = get_reference_info(i)\n if not (ref[0] == shape) and (ref[1] == aff).any():\n raise Exception(\"Images are not of the same resolution/affine\")", "title": "" }, { "docid": "8f6fec57f5adc4f2f68fd086730ec776", "score": "0.47093713", "text": "def test_Image_inplace_scalar_pow():\n import time\n t1 = time.time()\n for i in xrange(ntypes):\n # First try using the dictionary-type Image init\n image1 = galsim.Image((ref_array**2).astype(types[i]))\n image2 = galsim.Image(ref_array.astype(types[i]))\n image2 **= 2\n np.testing.assert_array_almost_equal(image1.array, image2.array, decimal=4,\n err_msg=\"Inplace scalar pow in Image class (dictionary \"\n +\"call) does not match reference for dtype = \"+str(types[i]))\n\n # Then try using the eval command to mimic use via ImageD, ImageF etc.\n image_init_func = eval(\"galsim.Image\"+tchar[i])\n image1 = image_init_func((ref_array.astype(types[i]))**2)\n image2 = image_init_func(ref_array.astype(types[i]))\n image2 **= 2\n np.testing.assert_array_equal(image2.array, image1.array, \n err_msg=\"Inplace scalar pow in Image class does\"\n +\" not match reference for dtype = \"+str(types[i]))\n\n # float types can also be taken to a float power\n if types[i] in [np.float32, np.float64]:\n # First try using the dictionary-type Image init\n image1 = galsim.Image(ref_array.astype(types[i]))\n image2 = galsim.Image((ref_array**(1./1.3)).astype(types[i]))\n image2 **= 1.3\n np.testing.assert_array_almost_equal(image1.array, image2.array, decimal=4,\n err_msg=\"Inplace scalar pow in Image class (dictionary \"\n +\"call) does not match reference for dtype = \"+str(types[i]))\n t2 = time.time()\n print 'time for %s = %.2f'%(funcname(),t2-t1)", "title": "" }, { "docid": "9b1de065709486373d6d1489c9f11241", "score": "0.4709276", "text": "def previous_precision_update(\n previous_precision_param, exog_precision, bounded_reg_link\n):\n if exog_precision is None:\n previous_precision = previous_precision_param\n else:\n previous_precision = bounded_reg_link.inverse_precision(\n np.dot(exog_precision, previous_precision_param)\n )\n return previous_precision", "title": "" }, { "docid": "701fb20e3dca20a706d8d675663bfd74", "score": "0.47077474", "text": "def convert_image_to_floats(self,image):\n\n if np.max(image) <= 1.0:\n return image\n else:\n return image / 255.0", "title": "" }, { "docid": "0b46bae7b838092f080442e455bf7769", "score": "0.47062522", "text": "def _sim_size(r1, r2, imsize):\n return 1.0 - (r1[\"size\"] + r2[\"size\"]) / imsize", "title": "" }, { "docid": "0e96432ebdf90eb63bfee8aa284d095e", "score": "0.4698607", "text": "def normalize_image(image, original_minval, original_maxval, target_minval,\n target_maxval):\n with tf.name_scope('NormalizeImage', values=[image]):\n original_minval = float(original_minval)\n original_maxval = float(original_maxval)\n target_minval = float(target_minval)\n target_maxval = float(target_maxval)\n image = tf.to_float(image)\n image = tf.subtract(image, original_minval)\n image = tf.multiply(image, (target_maxval - target_minval) /\n (original_maxval - original_minval))\n image = tf.add(image, target_minval)\n return image", "title": "" }, { "docid": "6f5524dc2e2fc2a0a449ea9473978063", "score": "0.46967104", "text": "def match_keypoints(kps1, kps2, desc1, desc2):\n\n # Using squared euclidian distance method to calculate the distance\n distance = cdist(desc1, desc2, 'sqeuclidean')\n\n coordinates_in_image_1 = np.array([kps1[point].pt for point in np.where(distance < 7300)[0]])\n coordinates_in_image_2 = np.array([kps2[point].pt for point in np.where(distance < 7300)[1]])\n\n return np.concatenate((coordinates_in_image_1, coordinates_in_image_2), axis=1)", "title": "" }, { "docid": "6bfcedc224caaed0e4eb4f51d23afa69", "score": "0.46952808", "text": "def __precisionPorcentage(self, resultingTestFeaturesMatrix, resultingTargetTestVector, classifier):\n hitCounter = 0\n totalEntries = len(resultingTargetTestVector)\n for j in range(totalEntries):\n result = classifier.predict(resultingTestFeaturesMatrix[j])\n if resultingTargetTestVector[j] == result:\n hitCounter+=1\n #Calculating porcentage with 4 decimal points\n return float('{0:.4f}'.format((hitCounter/totalEntries)*100))", "title": "" }, { "docid": "3b6fe71aea9d09deaf5c55fd5edb9673", "score": "0.46949807", "text": "def normalize_image(x):\n ma = float(x.max().cpu().data)\n mi = float(x.min().cpu().data)\n d = ma - mi if ma != mi else 1e5\n return (x - mi) / d", "title": "" } ]
05af6a2b3aebd38c9780fc80281b3661
Creates one alignment by going through a traceback path.
[ { "docid": "77995b1a764fbe875ed1c5f68ef3c08c", "score": "0.6657382", "text": "def _create_alignment(self, path):\r\n path.reverse()\r\n start = path[0]\r\n\r\n alignment_a = strings.EMPTY\r\n alignment_b = strings.EMPTY\r\n\r\n cur_char_align_1 = 0\r\n cur_char_align_2 = 0\r\n\r\n for i in range(0, len(path) - 1):\r\n if path[i + 1].x - path[i].x > 0 and path[i + 1].y - path[i].y > 0:\r\n alignment_a += self._data.sequence_a[start.y + cur_char_align_1]\r\n alignment_b += self._data.sequence_b[start.x + cur_char_align_2]\r\n cur_char_align_1 += 1\r\n cur_char_align_2 += 1\r\n elif path[i + 1].x - path[i].x > 0:\r\n alignment_a += strings.GAP\r\n alignment_b += self._data.sequence_b[start.x + cur_char_align_2]\r\n cur_char_align_2 += 1\r\n elif path[i + 1].y - path[i].y > 0:\r\n alignment_a += self._data.sequence_a[start.y + cur_char_align_1]\r\n alignment_b += strings.GAP\r\n cur_char_align_1 += 1\r\n\r\n return [alignment_a, alignment_b]", "title": "" } ]
[ { "docid": "b68da0baa939d1d9b8ef083853ff5e18", "score": "0.6136285", "text": "def doAlignment(self):\n\n seq1len = len(self.seq1)\n seq2len = len(self.seq2)\n\n # 1st subscript = sequence 1,\n # 2nd subscript = sequence 2\n scores = [ [0 for i in range(seq2len+1)] for j in range(seq1len+1) ]\n tracebk = [ [0 for i in range(seq2len+1)] for j in range(seq1len+1) ]\n \n # initialize the traceback matrix\n for i in range(1, seq1len+1):\n tracebk[i][0] = 'l'\n for j in range(1, seq2len+1):\n tracebk[0][j] = 'u'\n \n # calculate the scores for the alignment matrix and directional\n # pointers for the traceback matrix\n for i in range(1, seq1len+1):\n for j in range(1, seq2len+1):\n # calculate the maximum subscores for this position\n sdiag = scores[i-1][j-1] + self.svals[self.seq1[i-1]][self.seq2[j-1]]\n sup = scores[i][j-1] + self.gapp\n sleft = scores[i-1][j] + self.gapp\n # do not assess a penalty for end gaps\n if j == seq2len:\n sleft -= self.gapp\n if i == seq1len:\n sup -= self.gapp\n # record maximum subscore and direction\n if (sdiag >= sup) and (sdiag >= sleft):\n tracebk[i][j] = 'd'\n scores[i][j] = sdiag\n elif (sup >= sdiag) and (sup >= sleft):\n tracebk[i][j] = 'u'\n scores[i][j] = sup\n else:\n tracebk[i][j] = 'l'\n scores[i][j] = sleft\n\n self.score = scores[seq1len][seq2len]\n \n # follow the directional pointers in the traceback matrix\n # to generate an optimal alignment\n seq1a = list()\n seq2a = list()\n seq1aindex = list()\n seq2aindex = list()\n i = seq1len\n j = seq2len\n while (i > 0) or (j > 0):\n if tracebk[i][j] == 'd':\n seq1a.append(self.seq1[i-1])\n seq2a.append(self.seq2[j-1])\n seq1aindex.append(i-1)\n seq2aindex.append(j-1)\n i -= 1\n j -= 1\n elif tracebk[i][j] == 'u':\n seq1a.append('-')\n seq2a.append(self.seq2[j-1])\n seq1aindex.append(-1)\n seq2aindex.append(j-1)\n j -= 1\n else:\n seq1a.append(self.seq1[i-1])\n seq2a.append('-')\n seq1aindex.append(i-1)\n seq2aindex.append(-1)\n i -= 1\n \n seq1a.reverse()\n seq2a.reverse()\n seq1aindex.reverse()\n seq2aindex.reverse()\n self.seq1aligned = ''.join(seq1a)\n self.seq2aligned = ''.join(seq2a)\n self.seq1indexed = seq1aindex\n self.seq2indexed = seq2aindex\n\n # go through the sequence indexes and mark the gaps with (-nextbaseindex - 1)\n # so that the index lookups return a more informative value\n seq1gv = seq2gv = -1\n for cnt in range(len(self.seq1indexed)):\n if self.seq1indexed[cnt] == -1:\n self.seq1indexed[cnt] = seq1gv\n else:\n seq1gv -= 1\n if self.seq2indexed[cnt] == -1:\n self.seq2indexed[cnt] = seq2gv\n else:\n seq2gv -= 1\n\n #print self.seq1indexed", "title": "" }, { "docid": "b68da0baa939d1d9b8ef083853ff5e18", "score": "0.6136285", "text": "def doAlignment(self):\n\n seq1len = len(self.seq1)\n seq2len = len(self.seq2)\n\n # 1st subscript = sequence 1,\n # 2nd subscript = sequence 2\n scores = [ [0 for i in range(seq2len+1)] for j in range(seq1len+1) ]\n tracebk = [ [0 for i in range(seq2len+1)] for j in range(seq1len+1) ]\n \n # initialize the traceback matrix\n for i in range(1, seq1len+1):\n tracebk[i][0] = 'l'\n for j in range(1, seq2len+1):\n tracebk[0][j] = 'u'\n \n # calculate the scores for the alignment matrix and directional\n # pointers for the traceback matrix\n for i in range(1, seq1len+1):\n for j in range(1, seq2len+1):\n # calculate the maximum subscores for this position\n sdiag = scores[i-1][j-1] + self.svals[self.seq1[i-1]][self.seq2[j-1]]\n sup = scores[i][j-1] + self.gapp\n sleft = scores[i-1][j] + self.gapp\n # do not assess a penalty for end gaps\n if j == seq2len:\n sleft -= self.gapp\n if i == seq1len:\n sup -= self.gapp\n # record maximum subscore and direction\n if (sdiag >= sup) and (sdiag >= sleft):\n tracebk[i][j] = 'd'\n scores[i][j] = sdiag\n elif (sup >= sdiag) and (sup >= sleft):\n tracebk[i][j] = 'u'\n scores[i][j] = sup\n else:\n tracebk[i][j] = 'l'\n scores[i][j] = sleft\n\n self.score = scores[seq1len][seq2len]\n \n # follow the directional pointers in the traceback matrix\n # to generate an optimal alignment\n seq1a = list()\n seq2a = list()\n seq1aindex = list()\n seq2aindex = list()\n i = seq1len\n j = seq2len\n while (i > 0) or (j > 0):\n if tracebk[i][j] == 'd':\n seq1a.append(self.seq1[i-1])\n seq2a.append(self.seq2[j-1])\n seq1aindex.append(i-1)\n seq2aindex.append(j-1)\n i -= 1\n j -= 1\n elif tracebk[i][j] == 'u':\n seq1a.append('-')\n seq2a.append(self.seq2[j-1])\n seq1aindex.append(-1)\n seq2aindex.append(j-1)\n j -= 1\n else:\n seq1a.append(self.seq1[i-1])\n seq2a.append('-')\n seq1aindex.append(i-1)\n seq2aindex.append(-1)\n i -= 1\n \n seq1a.reverse()\n seq2a.reverse()\n seq1aindex.reverse()\n seq2aindex.reverse()\n self.seq1aligned = ''.join(seq1a)\n self.seq2aligned = ''.join(seq2a)\n self.seq1indexed = seq1aindex\n self.seq2indexed = seq2aindex\n\n # go through the sequence indexes and mark the gaps with (-nextbaseindex - 1)\n # so that the index lookups return a more informative value\n seq1gv = seq2gv = -1\n for cnt in range(len(self.seq1indexed)):\n if self.seq1indexed[cnt] == -1:\n self.seq1indexed[cnt] = seq1gv\n else:\n seq1gv -= 1\n if self.seq2indexed[cnt] == -1:\n self.seq2indexed[cnt] = seq2gv\n else:\n seq2gv -= 1\n\n #print self.seq1indexed", "title": "" }, { "docid": "e082605145a0d93d9def7730a661143e", "score": "0.60112727", "text": "def get_traceback(self):\n aligned_target = list()\n aligned_subject = list()\n\n end, diagonal, up, left = range(4)\n\n i, j = len(self.target), len(self.sequence)\n\n move = self.next_move(i, j)\n\n while move != end:\n if move == diagonal:\n aligned_target.append(self.target[i - 1])\n aligned_subject.append(self.sequence[j - 1])\n\n i -= 1\n j -= 1\n\n elif move == up:\n aligned_target.append(self.target[i - 1])\n aligned_subject.append('-')\n\n i -= 1\n\n elif move == left:\n aligned_target.append('-')\n aligned_subject.append(self.sequence[j - 1])\n\n j -= 1\n\n move = self.next_move(i, j)\n\n aligned_target = ''.join(reversed(aligned_target))\n aligned_subject = ''.join(reversed(aligned_subject))\n\n return aligned_target, aligned_subject, i, j", "title": "" }, { "docid": "afb7ed54e19bee6c29db48f12d536643", "score": "0.58765894", "text": "def do_traceback(alignments, sequenceA, sequenceB, rowColumn=None):\n \n # Pad both sequences with a leading space\n # Doing so aligns the letters of the sequence with the indices of the matrix\n sequenceA = \" \" + sequenceA\n sequenceB = \" \" + sequenceB\n assert len(sequenceA) == size(alignments, 0)\n assert len(sequenceB) == size(alignments, 1)\n \n # Get the starting indices\n if rowColumn is None or len(rowColumn) != 2:\n traceRow, traceCol = unravel_index(argmax(alignments), \n (size(alignments, 0), size(alignments, 1)))\n else:\n traceRow, traceCol = rowColumn\n \n tracedA = ''\n middle = ''\n tracedB = ''\n while alignments[traceRow, traceCol] != 0:\n paths = _calculate_costs(alignments, (traceRow, traceCol), (sequenceA[traceRow], sequenceB[traceCol]))\n path = argmax(paths)\n \n if path == 0:\n tracedA = sequenceA[traceRow] + tracedA\n tracedB = sequenceB[traceCol] + tracedB\n if sequenceA[traceRow] == sequenceB[traceCol]:\n middle = sequenceA[traceRow] + middle\n elif alignments[traceRow, traceCol] - alignments[traceRow - 1, traceCol - 1] > 0:\n middle = '+' + middle\n else:\n middle = ' ' + middle\n \n traceRow -= 1\n traceCol -= 1\n \n elif path == 1:\n tracedA = sequenceA[traceRow] + tracedA\n tracedB = '-' + tracedB\n middle = ' ' + middle\n \n traceRow -= 1\n \n elif path == 2:\n tracedA = '-' + tracedA\n tracedB = sequenceB[traceCol] + tracedB\n middle = ' ' + middle\n \n traceCol -= 1\n \n elif path == 3:\n print \"Impossible case\"\n sys.exit(0)\n \n return (tracedA, middle, tracedB)", "title": "" }, { "docid": "e0f6f9a37393fafe7677b16fbd658446", "score": "0.5846906", "text": "def _align(unaligned):\n aligned, group, last_title = [], [], 'TOP-LEVEL SEGMENT'\n\n for title, line in unaligned:\n\n if title != last_title:\n aligned.append((last_title, group))\n group = [line]\n else:\n group.append(line + '.')\n\n last_title = title\n \n # Edge case\n aligned.append((last_title, group))\n return aligned", "title": "" }, { "docid": "a0436fc52f1d89006600cb4140355194", "score": "0.53535104", "text": "def test_fasta_from_alignment(self):\n self.assertEqual(fasta_from_alignment({}),'')\n self.assertEqual(fasta_from_alignment(self.alignment_dict),\\\n self.fasta_with_label)\n self.assertEqual(fasta_from_alignment(self.alignment_dict,\n line_wrap=2),self.fasta_with_label_lw2)\n self.assertEqual(fasta_from_alignment(self.alignment_object),\\\n self.fasta_with_label)", "title": "" }, { "docid": "14d995ee429cbcca67c172dd247a138a", "score": "0.5319187", "text": "def setUp(self):\n self.strings = ['AAAA','CCCC','gggg','uuuu']\n self.labels = ['1st','2nd','3rd','4th']\n self.infos = [\"Dog\", \"Cat\", \"Mouse\", \"Rat\"]\n self.sequences_with_labels = map(Sequence, self.strings)\n self.sequences_with_names = map(Sequence, self.strings)\n for l,sl,sn in zip(self.labels,self.sequences_with_labels,\\\n self.sequences_with_names):\n sl.Label = l\n sn.Name = l\n self.fasta_no_label='>0\\nAAAA\\n>1\\nCCCC\\n>2\\ngggg\\n>3\\nuuuu'\n self.fasta_with_label=\\\n '>1st\\nAAAA\\n>2nd\\nCCCC\\n>3rd\\nGGGG\\n>4th\\nUUUU'\n self.fasta_with_label_lw2=\\\n '>1st\\nAA\\nAA\\n>2nd\\nCC\\nCC\\n>3rd\\nGG\\nGG\\n>4th\\nUU\\nUU'\n self.alignment_dict = {'1st':'AAAA','2nd':'CCCC','3rd':'GGGG',\n '4th':'UUUU'}\n self.alignment_object = Alignment(self.alignment_dict)\n for label, info in zip(self.labels, self.infos):\n self.alignment_object.NamedSeqs[label].Info = Info(species=info)\n self.fasta_with_label_species=\\\n '>1st:Dog\\nAAAA\\n>2nd:Cat\\nCCCC\\n>3rd:Mouse\\nGGGG\\n>4th:Rat\\nUUUU'\n self.alignment_object.RowOrder = ['1st','2nd','3rd','4th']", "title": "" }, { "docid": "7b52e2db25efab773595e51f2421787f", "score": "0.5312586", "text": "def _create_alignments(self):\r\n alignments = []\r\n\r\n for i in range(0, len(AlignmentOutputData.paths)):\r\n alignments.append(self._create_alignment(AlignmentOutputData.paths[i]))\r\n\r\n AlignmentOutputData.alignments = alignments", "title": "" }, { "docid": "271e898dfa4fa199b785a830f2f2cb3f", "score": "0.52894443", "text": "def test_build_tree_from_alignment(self):\n \n tree = build_tree_from_alignment(self.align1, RNA, False)\n \n self.assertTrue(isinstance(tree, PhyloNode))\n self.assertEqual(len(tree.tips()), 7)\n self.assertRaises(NotImplementedError, build_tree_from_alignment, \\\n self.align1, RNA, True)", "title": "" }, { "docid": "47bc631c878ca2137fc355fa9e6024a9", "score": "0.52575177", "text": "def compute_local_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n\n # Find the higest score.\n highest_score = (float('-inf'), 0, 0)\n for x_key in range(len(alignment_matrix)):\n #print x_key\n for y_key in range(len(alignment_matrix[0])):\n if alignment_matrix[x_key][y_key] > highest_score[0]:\n highest_score = (alignment_matrix[x_key][y_key], x_key, y_key)\n \n # Start the traceback at the entry with the highest score\n score, x_key, y_key = highest_score\n\n # Initialize alignments to empty strings\n alignment_x = ''\n alignment_y = ''\n\n while x_key > 0 or y_key > 0:\n copy_matrix = alignment_matrix[x_key][y_key]\n\n if copy_matrix <= 0 or x_key == 0 or y_key == 0:\n break\n\n # Sequences are the same length\n if copy_matrix == alignment_matrix[x_key - 1][y_key - 1] + scoring_matrix[seq_x[x_key - 1]][seq_y[y_key - 1]]:\n alignment_x = seq_x[x_key - 1] + alignment_x\n alignment_y = seq_y[y_key - 1] + alignment_y\n x_key -= 1\n y_key -= 1\n elif copy_matrix == alignment_matrix[x_key - 1][y_key] + scoring_matrix[seq_x[x_key - 1]]['-']:\n alignment_x = seq_x[x_key - 1] + alignment_x\n alignment_y = '-' + alignment_y\n x_key -= 1\n else:\n alignment_x = '-' + alignment_x\n alignment_y = seq_y[y_key - 1] + alignment_y\n y_key -= 1\n\n #while x_key > 0:\n # alignment_x = seq_x[x_key - 1] + alignment_x\n # alignment_y = '-' + alignment_y\n # x_key -= 1\n #\n #while y_key > 0:\n # alignment_x = '-' + alignment_x\n # alignment_y = seq_y[y_key - 1] + alignment_y\n # y_key -= 1\n\n return (score, alignment_x, alignment_y)", "title": "" }, { "docid": "12cee8c544ee54ea6962f5682587534a", "score": "0.52291536", "text": "def make_alignment_line(strand, kmer, prob, event):\n assert strand in ['c', 't'], \"Strand must be either 'c' or 't'. strand: {}\".format(strand)\n assert isinstance(prob, float), \"probability must be a float: prob {}\".format(prob)\n assert isinstance(kmer, str), \"kmer must be a string: kmer {}\".format(kmer)\n assert isinstance(event, float), \"event must be a float: event {}\".format(event)\n entry_line = \"blank\\t0\\tblank\\tblank\\t{strand}\\t0\\t0.0\\t0.0\\t0.0\\t{kmer}\\t0.0\\t0.0\\t{prob}\\t{event}\\t0.0\\n\"\n return entry_line.format(strand=strand, kmer=kmer, prob=prob, event=event)", "title": "" }, { "docid": "f55113cab14077898d7726b0a78d3df4", "score": "0.5199806", "text": "def get_alignment(cls, a0, a1):\n pass", "title": "" }, { "docid": "1b4eab00fa89d3753a86560bafb56fe8", "score": "0.51966286", "text": "def alignSeq(parentMatrix, scoringMatrix, seq1, seq2, endGap, penalty):\n\n #Adjust matrix, delete the numbers and replace with directions\n matrix = adjustMatrix(parentMatrix)\n\n #Creation of variables\n s1, s2 = len(seq1), len(seq2)\n a1, a2 = \"\", \"\"\n x, y = 0, 0\n count= 0\n\n #Loop over the matrix for the traceback\n #The if statements are telling where to go in the matrix\n while matrix[s2-x][s1-y] != matrix[0][0]:\n if matrix[s2-x][s1-y] == \"right\":\n a1 += seq1[len(seq1)-1-y]\n a2 += \"-\"\n y = y + 1\n count += 1\n if matrix[s2-x][s1-y] == \"diagonal\":\n #print scoringMatrix[s2-x][s1-y]\n a1 += seq1[len(seq1)-1-y]\n a2 += seq2[len(seq2)-1-x]\n y = y + 1\n x = x + 1\n count += 1\n if matrix[s2-x][s1-y] == \"down\":\n #print scoringMatrix[s2-x][s1-y]\n a2 += seq2[len(seq2)-1-x]\n a1 += \"-\"\n x = x + 1\n count += 1\n scoring = scoringMatrix[s2][s1]\n\n #Get the alignment\n alignment = [a1[::-1], a2[::-1]]\n\n #Calculation on the end gaps\n #The end gaps are calculated in this function (endGapCalc)\n #The penalty is added to the alignment again for correcting for the end gaps\n #The penalty of the endgaps is added to the total scoring\n ends = endGapCalc(alignment)\n scoring += ends * penalty\n scoring -= ends*endGap\n\n #Printing of the output\n print \"Alignment score: \", scoring\n return alignment", "title": "" }, { "docid": "2f1fcf7c212b8349cceefdb87967affd", "score": "0.51093143", "text": "def test019_alignment(self):\n w = self.doc['WR-P-E-J-0000000001.p.1.s.3.w.10']\n a = w.annotation(folia.Alignment)\n target = next(a.resolve())\n self.assertEqual( target, self.doc['WR-P-E-J-0000000001.p.1.s.3.w.5'] )", "title": "" }, { "docid": "9b9089a2cdf56ad4e98fef8f490d0e46", "score": "0.51092285", "text": "def align_recurse(\n fastq_name, # type: str\n reads_by_length, # type: Dict[int, List[str]]\n reference, # type: str\n gap_open, # type: int\n gap_extension, # type: int\n):\n # type: (...) -> Dict[int, List[Alignment]]\n # Keep track of matrix re-used lines\n logging.info(\"FASTQ %s: Aligning reads\", fastq_name)\n alignment_start = time.time() # type: float\n alignments = defaultdict(list) # type: defaultdict[List]\n reuse = 0 # type: int\n for length, reads_list in reads_by_length.items(): # type: int, List[str]\n count, temp = 0, '' # type: int, str\n total = len(reads_list) # type: int\n for read in reads_list: # type: read\n count += 1\n # seq = summary.sequence # type: str\n # names = tuple(read.get_readid() for read in summary.reads) # type: Tuple[str]\n # fastqs = {read.get_source() for read in summary.reads} # type: Set[str]\n if not temp: # basically, first sequence to be aligned\n al_ref, al_read, score = recnw.nw_aff(\n reference,\n read,\n gap_op=gap_open,\n gap_ext=gap_extension,\n sim=-1,\n terminate=0\n ) # type: str, str, int\n aligned = Alignment(ref_align=al_ref, read_align=al_read, score=score) # type: Alignment\n aligned.unaligned = read # type: str\n aligned.source = fastq_name # type: str\n alignments[length].append(aligned)\n temp = read # type: str\n continue\n index = toolkit.sim_seq(seq1=temp, seq2=read) # type: int\n reuse += index\n if count == total: # de-allocate memory\n al_ref, al_read, score = recnw.nw_aff(\n reference,\n read,\n gap_op=gap_open,\n gap_ext=gap_extension,\n sim=index,\n terminate=1\n ) # type: str, str, int\n else:\n al_ref, al_read, score = recnw.nw_aff(\n reference,\n read,\n gap_op=gap_open,\n gap_ext=gap_extension,\n sim=index,\n terminate=0\n ) # type: str, str, int\n aligned = Alignment(ref_align=al_ref, read_align=al_read, score=score) # type: Alignment\n aligned.unaligned = read # type: str\n aligned.source = fastq_name # type: str\n alignments[length].append(aligned)\n temp = read # type: str\n logging.debug(\"FASTQ %s: Alignment took %s seconds\", fastq_name, round(time.time() - alignment_start, 3))\n return dict(alignments)", "title": "" }, { "docid": "b95ed03272f4636e22b154dea050e8d2", "score": "0.5103053", "text": "def align_template_library(MSA: str) -> None:\n import glob, os\n f = open(my_loc() + \"/data/all_trna_structure_seqs.dat\", \"w\")\n\n for pdb in glob.glob(my_loc() + \"/trnas/*.pdb\"):\n print(\"Evaluating PDB:\", pdb)\n seq = match_pdb_to_best_sequence(pdb, MSA)\n modomics_pdb_seq_str = modomics_from_pdb(pdb).sequence\n if seq is None:\n f.write(\"{}\\t{} UNALIGNED_PDB_SEQ\\n\".format(pdb, modomics_pdb_seq_str))\n continue\n f.write(\"{}\\t{} ALIGNED_TO\\n\".format(pdb, seq[0][1].sequence))\n f.write(\"{}\\t{} PDB_SEQ\\n\".format(pdb, import_dash_pattern(seq[0][1], modomics_pdb_seq_str)))\n #exit()\n #print(pdb, seq)\n #print(pdb, import_dash_pattern(seq, modomics_from_pdb(pdb)))\n f.close()", "title": "" }, { "docid": "9f59775bc2f206223e2cc71f2f693039", "score": "0.5089458", "text": "def alignment_state(n_possibility = 0):\n\n if self.mt_paths[k][l][n_possibility] == 0:\n return seq_align_a + self.seq_a[k], seq_align_b + self.seq_b[l], k-1, l-1\n elif self.mt_paths[k][l][n_possibility] == 1:\n return seq_align_a + \"-\", seq_align_b + self.seq_b[l], k, l-1\n elif self.mt_paths[k][l][n_possibility] == 2:\n return seq_align_a + self.seq_a[k], seq_align_b + \"-\", k-1, l", "title": "" }, { "docid": "7b2fddde370bf90edda52a48a67351f3", "score": "0.5081967", "text": "def local_alignment(self):\n first = self.first_seq.get()\n second = self.second_seq.get()\n alignments = pairwise2.align.localms(first, second, \\\n self.match_score.get(), self.mismatch_score.get(), self.gap_opening.get(), \\\n self.gap_extension.get(), one_alignment_only = True)\n text = pairwise2.format_alignment(*alignments[0])\n self.result.text.delete(1.0, END)\n self.result.text.insert(1.0, text)", "title": "" }, { "docid": "8c005872f4d2ecf2337b918cae05acdd", "score": "0.50535446", "text": "def prepare_align_txt(self):\n f_align_txt = os.path.join(self.output_dir, 'align.txt')\n with open(f_align_txt, 'w') as output:\n for i, f in enumerate(self.flist):\n command = '{}\\t{}\\t{}\\t{}\\n'.format(f, '0', '0', str(i))\n output.write(command)", "title": "" }, { "docid": "09fdcc6eb5d825651442d6c9e1624d9a", "score": "0.50228524", "text": "def test_raxml_alignment(self):\n phy_node, parsimony_phy_node, log_likelihood, total_exec \\\n = raxml_alignment(self.align1)", "title": "" }, { "docid": "2fc900cc3d9048350d0b1fc421bf30b0", "score": "0.49816957", "text": "def do_align(self):\n\t\tpass", "title": "" }, { "docid": "79a6200cd4e38b5fbabcd43cba110f40", "score": "0.4978067", "text": "def make_align_matrix(query_seq,target_seq,score_matrix):\n align_matrix = initialize(query_seq,target_seq)\n global_max_score = 0 #record start for traceback to save search cost later\n global_max_score_coordinates = (0,0)\n\n\n # iterate through query and target starting at 1 not 0, since the 0th row\n # and col are for the starting gap/start of the sequence. Starting gaps are\n # not penalized and the 0 score is used in traceback to know when to end,\n # so leave the 0th row and column values at their initialized values of 0.\n for r,q in enumerate(query_seq,start=1):\n for c,t in enumerate(target_seq,start=1):\n\n # Directions Key:\n # the second element of each tuple in each cell of align_matrix\n # is the traceback direction, used in the final step.`\n # 0 = match, go back diagonally \\\n # 1 = gap in query, go back left --\n # 2 = gap in target, go back up |\n # 4 = this is the default zero, do not go anywhere (will win ties)\n\n match_score = (compute_match_score(r,c,q,t,align_matrix,score_matrix), 0)\n query_gap_score = (compute_query_gap_score(r,c,q,t,align_matrix,score_matrix), 1)\n target_gap_score = (compute_target_gap_score(r,c,q,t,align_matrix,score_matrix), 2)\n\n max_score = max((0,4),match_score,query_gap_score,target_gap_score,\n key=lambda n:n[0])\n\n # update goal state record\n if max_score[0] > global_max_score:\n global_max_score = max_score[0]\n global_max_score_coordinates = (r,c)\n\n align_matrix[r][c] = max_score\n\n return align_matrix,global_max_score,global_max_score_coordinates", "title": "" }, { "docid": "1098202c9106b98e0e171d30d7b9edf3", "score": "0.49510124", "text": "def _compute_alignments(self):\r\n for y in range(1, self._tbl_len_y):\r\n cur_char_seq1 = self._data.sequence_a[y - 1]\r\n\r\n for x in range(1, self._tbl_len_x):\r\n cur_char_seq2 = self._data.sequence_b[x - 1]\r\n AlignmentOutputData.table_values[y][x] = self._value(cur_char_seq1, cur_char_seq2, x, y)", "title": "" }, { "docid": "a5a0e007383a45c8988bf84f256239c8", "score": "0.4942328", "text": "def generate_align_file(ref_coords, log_file, align_file):\n list_refcoords = text_list_to_python_list(ref_coords)\n no_of_stars = len(list_refcoords) / 2\n list_xref = list_refcoords[::2]\n list_yref = list_refcoords[1::2]\n\n data = pd.read_csv(log_file, sep='\\s+', comment='#', header=None)\n list_xsub = data[0].tolist()\n list_ysub = data[1].tolist()\n\n remove_file(align_file)\n with open(align_file, 'w') as f:\n for index in range(0, no_of_stars):\n f.write(str(list_xref[index]) + ' ' + str(list_yref[index]) + ' ' +\n str(list_xsub[index]) + ' ' + str(list_ysub[index]) + '\\n')", "title": "" }, { "docid": "cf15d187dbfe58b236e47b9d34856dba", "score": "0.49313843", "text": "def align(self):\n raise NotImplementedError", "title": "" }, { "docid": "36ab98ce261420e7bfe22649dad49b92", "score": "0.49312246", "text": "def align(self):\n source_A_hat, target_A_hat, source_feats, target_feats = self.get_elements()\n print(\"Running Multi-level embedding\")\n GAlign = self.multi_level_embed(source_A_hat, target_A_hat, source_feats, target_feats)\n print(\"Running Refinement Alignment\")\n S_GAlign = self.refinement_alignment(GAlign, source_A_hat, target_A_hat)\n return S_GAlign", "title": "" }, { "docid": "b1e1080708eb0b5f0ea9a920e3eece9d", "score": "0.49011767", "text": "def reformat_trace(self, ichr='|', mchr='.', gchr='-', schr=':'):\n # Format for the trace\n text = format(self.aln)\n\n # Extract the alignment elements.\n self.target, self.trace, self.query = text.splitlines()\n\n # Show only aligned regions for local and semiglobal alignments\n if self.param.mode in (const.LOCAL_ALIGN, const.SEMIGLOBAL_ALIGN):\n\n char = \" \" if self.param.mode == const.LOCAL_ALIGN else \"-\"\n lcount = len(self.trace) - len(self.trace.lstrip(char))\n rcount = len(self.trace) - len(self.trace.rstrip(char))\n\n if lcount:\n if self.query.startswith(char):\n self.target = self.target[lcount:]\n else:\n self.query = self.query[lcount:]\n\n if rcount:\n if self.query.endswith(char):\n self.target = self.target[:-rcount]\n else:\n self.query = self.query[:-rcount]\n\n # Strip off leading/trailing gaps or padding.\n self.target = self.target.strip(char)\n self.trace = self.trace.strip(char)\n self.query = self.query.strip(char)\n\n # Alignment length\n self.len = len(self.trace)\n\n # Identity.\n self.icount = self.trace.count(ichr)\n self.iperc = 100 * self.icount / self.len if self.len else 0\n\n # Similarities.\n self.scount = self.icount + self.trace.count(schr)\n self.sperc = 100 * self.scount / self.len if self.len else 0\n\n # Mismatches.\n self.mcount = self.trace.count(mchr)\n self.mperc = 100 * self.mcount / self.len if self.len else 0\n\n # Gaps.\n self.gcount = self.trace.count(gchr)\n self.gperc = 100 * self.gcount / self.len if self.len else 0\n\n # Unpack the paths.\n t_path, q_path = self.aln.aligned\n\n # Target start/end.\n self.t_start = t_path[0][0] + 1\n self.t_end = t_path[-1][-1]\n\n # Query start end.\n self.q_start = q_path[0][0] + 1\n self.q_end = q_path[-1][-1]", "title": "" }, { "docid": "ce436395f49884481f4f8b7df9eddc2d", "score": "0.4850275", "text": "def find_traceback_start(align_alg, align_params):\n\tmax_val = 0\n\tmax_loc = []\n\n\tif not align_params.global_alignment:\n\t\tfor i in range(0, len(align_params.seq_a)+1):\n\t\t\tfor j in range(0, len(align_params.seq_b)+1):\n\t\t\t\tscore = align_alg.m_matrix.get_score(i,j)\n\t\t\t\tif score > max_val:\n\t\t\t\t\tmax_val = score\n\t\t\t\t\tmax_loc = []\n\t\t\t\t\tmax_loc.append(('M',i,j))\n\t\t\t\telif fuzzy_equals(score, max_val):\n\t\t\t\t\tmax_loc.append(('M',i,j))\n\telse:\n\t\tfor i in range(0, len(align_params.seq_a)+1):\n\t\t\tscore = align_alg.m_matrix.get_score(i,len(align_params.seq_b))\n\t\t\tif score > max_val:\n\t\t\t\tmax_val = score\n\t\t\t\tmax_loc = []\n\t\t\t\tmax_loc.append(('M',i,len(align_params.seq_b)))\n\t\tfor j in range(0, len(align_params.seq_b)+1):\n\t\t\tscore = align_alg.m_matrix.get_score(len(align_params.seq_a),j)\n\t\t\tif score > max_val:\n\t\t\t\tmax_val = score\n\t\t\t\tmax_loc = []\n\t\t\t\tmax_loc.append(('M',len(align_params.seq_a),j))\n\n\treturn max_val, max_loc", "title": "" }, { "docid": "6ab0391b7287b1c3c505a5b427db763c", "score": "0.4849843", "text": "def generate_alignment() -> str:\n alignment_num = randint(3, 18)\n\n if alignment_num == 3:\n tie_breaker = randint(1, 2)\n if tie_breaker == 1:\n return 'chaotic evil'\n else:\n return 'chaotic neutral'\n elif alignment_num == 4 or alignment_num == 5:\n return 'lawful evil'\n elif 6 <= alignment_num <= 8:\n return 'neutral evil'\n elif 9 <= alignment_num <= 12:\n return 'neutral'\n elif 13 <= alignment_num <= 15:\n return 'neutral good'\n elif 16 <= alignment_num <= 17:\n tie_breaker = randint(1, 2)\n if tie_breaker == 1:\n return 'lawful good'\n else:\n return 'neutral good'\n else:\n tie_breaker = randint(1, 2)\n if tie_breaker == 1:\n return 'chaotic good'\n else:\n return 'chaotic neutral'", "title": "" }, { "docid": "16691817ceb1b16c26edea7e5f47ca3c", "score": "0.48463997", "text": "def create(bio_type):\n # Capture either all DNA or all Protein file names\n # Determine which files to load in (case-insensitive)\n if bio_type.lower() != \"dna\" and bio_type.lower() != \"protein\":\n print(\"Warning in Alignment.py: biotype \\\"\" + bio_type + \"\\\" not recongnised. Enter \\\"DNA\\\" or \\\"Protein\\\"\")\n return # Exception Handling\n\n directory = sf.directory(bio_type)\n sequence_filenames = [s for s in listdir(directory) if isfile(join(directory, s))]\n print(\"Alignment of \" + bio_type + \" Sequences: \" + str(sequence_filenames))\n\n # All sequences must be of the same length\n seq_lengths = sequence_lengths(sequence_filenames, bio_type, directory)\n dna_alignment(sequence_filenames, seq_lengths, directory) if bio_type.lower() == \"dna\" else protein_alignment(\n sequence_filenames, seq_lengths, directory)", "title": "" }, { "docid": "c269453261661e8371fe6fd685cd4af2", "score": "0.48275983", "text": "def traceback_rec(scoring_matrix, i,j, match,mismatch,gap, seq1,seq2, align1,align2):\n if scoring_matrix[i,j] == 0:\n return align1,align2\n else:\n all_alignments1, all_alignments2 = [],[]\n tmp_list1, tmp_list2 = [],[]\n\n if ((scoring_matrix[i-1,j-1]+match == scoring_matrix[i,j] and seq1[i-1] == seq2[j-1]) # match\n or (scoring_matrix[i-1,j-1]+mismatch == scoring_matrix[i,j] and seq1[i-1] != seq2[j-1])): # mismatch\n a1,a2 = align1.copy(),align2.copy()\n a1[-1] = seq1[i-1]+a1[-1]\n a2[-1] = seq2[j-1]+a2[-1]\n tmp_list1, tmp_list2 = traceback_rec(scoring_matrix, i-1,j-1, match,mismatch,gap, seq1,seq2, a1,a2)\n all_alignments1, all_alignments2 = all_alignments1+tmp_list1, all_alignments2+tmp_list2\n\n if scoring_matrix[i, j-1]+gap == scoring_matrix[i, j]: # gap left \n a1,a2 = align1.copy(),align2.copy()\n a1[-1] = \"-\"+a1[-1]\n a2[-1] = seq2[j-1]+a2[-1]\n tmp_list1, tmp_list2 = traceback_rec(scoring_matrix, i,j-1, match,mismatch,gap, seq1,seq2, a1,a2)\n all_alignments1, all_alignments2 = all_alignments1+tmp_list1, all_alignments2+tmp_list2\n\n if scoring_matrix[i-1, j]+gap == scoring_matrix[i, j]: # gap top\n a1,a2 = align1.copy(),align2.copy()\n a1[-1] = seq1[i-1] + a1[-1]\n a2[-1] = \"-\" + a2[-1]\n tmp_list1, tmp_list2 = traceback_rec(scoring_matrix, i-1,j, match,mismatch,gap, seq1,seq2, a1,a2)\n all_alignments1, all_alignments2 = all_alignments1+tmp_list1, all_alignments2+tmp_list2\n \n return all_alignments1, all_alignments2", "title": "" }, { "docid": "96fabf4a39a7410b28f7862cd77862db", "score": "0.4813056", "text": "def global_alignment(seq1, seq2, indel, to_print):\n scoring_matrix = AlignmentGraph(seq1, seq2, indel)\n alignment = scoring_matrix.back_trace()\n print(\"Score: \" + str(alignment[0]))\n print(\"Length: \" + str(alignment[1]))\n\n if to_print:\n print(alignment[2])\n print(alignment[3])", "title": "" }, { "docid": "154ceaabb40e89639cc3025f65e7e9cb", "score": "0.47955626", "text": "def test_alignment_str():\n seq1 = seq.NucleotideSequence(\"ACCTGA\")\n seq2 = seq.NucleotideSequence(\"TATGCT\")\n ali_str = [\"A-CCTGA----\",\n \"----T-ATGCT\"]\n trace = align.Alignment.trace_from_strings(ali_str)\n alignment = align.Alignment([seq1, seq2], trace, None)\n assert str(alignment).split(\"\\n\") == ali_str", "title": "" }, { "docid": "d54271adbd4d4d895ab86216fa8292b4", "score": "0.47919238", "text": "def align(self, sequences):\n seqs = [copy.deepcopy(s) for s in sequences]\n c = seqs[0]\n aligned = [c]\n klass = c.__class__\n with tqdm(total=len(seqs)-1) as pbar:\n for s in seqs[1:]:\n score, traceback = c.global_align_multiple_solutions(s, self.sm, self.g)\n c, s = next(c.recover_global_align_multiple_solutions(s, traceback))\n aligned = self.update_aligned_with_gaps(aligned, c)\n aligned.append(klass(s)) # add temp alignments to the list of processed\n c = self.consensus(aligned + [s], klass)\n pbar.update()\n return c, aligned", "title": "" }, { "docid": "20ee8b114045d92cea0a72150c2bcdb4", "score": "0.47711417", "text": "def _align(ifar_path: str, ofar_path: str, afst_path: str) -> str:\n afar_path = _mktemp(\"a.far\")\n _log_check_call(\n [\"baumwelchdecode\", ifar_path, ofar_path, afst_path, afar_path])\n _rmtemp(ifar_path)\n _rmtemp(ofar_path)\n return afar_path", "title": "" }, { "docid": "ea6c05b208a7a01a46509703d16b5521", "score": "0.47549355", "text": "def generate_streamed_alignment(self):\n debug(\"generate streamed aln\")\n if self.blacklist:\n self.remove_blacklistitem()\n debug(len(self.new_seqs))\n debug(len(self.new_seqs_otu_id))\n if len(self.new_seqs) > 0:\n self.data.write_files() # should happen before aligning in case of pruning\n if len(self.new_seqs_otu_id) > 0:\n self.write_query_seqs()\n self.align_query_seqs()\n self.place_query_seqs()\n self.data.prune_short()\n self.est_full_tree()\n self.data.tre = Tree.get(path=\"{}/RAxML_bestTree.{}\".format(self.workdir, self.date),\n schema=\"newick\",\n preserve_underscores=True,\n taxon_namespace=self.data.aln.taxon_namespace)\n self.data.write_files()\n if os.path.exists(\"{}/previous_run\".format(self.workdir)):\n prev_dir = \"{}/previous_run{}\".format(self.workdir, self.date)\n i = 0\n while os.path.exists(prev_dir):\n i += 1\n prev_dir = \"{}/previous_run{}\".format(self.workdir, self.date) + str(i)\n os.rename(\"{}/previous_run\".format(self.workdir), prev_dir)\n if self.config.gb_id_filename is not True:\n os.rename(self.blast_subdir, \"{}/previous_run\".format(self.workdir))\n if os.path.exists(\"{}/last_completed_update\".format(self.workdir)): # TODO: this and the following line are not used.\n os.rename(self.tmpfi, \"{}/last_completed_update\".format(self.workdir))\n for filename in glob.glob('{}/RAxML*'.format(self.workdir)):\n if not os.path.exists(\"{}/previous_run\".format(self.workdir)):\n os.makedirs('{}/previous_run/'.format(self.workdir))\n if self.config.gb_id_filename is not True:\n os.rename(filename, \"{}/previous_run/{}\".format(self.workdir, filename.split(\"/\")[-1]))\n else:\n os.rename(filename, \"{}/previous_run/{}\".format(self.workdir, filename.split(\"/\")[-1]))\n for filename in glob.glob('{}/papara*'.format(self.workdir)):\n os.rename(filename, \"{}/previous_run/{}\".format(self.workdir, filename.split(\"/\")[-1]))\n os.rename(\"{}/{}\".format(self.workdir, self.newseqs_file),\n \"{}/previous_run/newseqs.fasta\".format(self.workdir))\n self.data.write_labelled(label='^ot:ottTaxonName', add_gb_id=True)\n self.data.write_otus(\"otu_info\", schema='table')\n self.new_seqs = {} # Wipe for next run\n self.new_seqs_otu_id = {}\n self.repeat = 1\n else:\n if _VERBOSE:\n sys.stdout.write(\"No new sequences after filtering.\\n\")\n self.repeat = 0\n self.calculate_bootstrap()\n else:\n if _VERBOSE:\n sys.stdout.write(\"No new sequences found.\\n\")\n self.repeat = 0\n self.calculate_bootstrap()\n self.reset_markers()\n local_blast.del_blastfiles(self.workdir) # delete local blast db\n self.data.dump()\n json.dump(self.data.otu_dict, open('{}/otu_dict.json'.format(self.workdir), 'wb'))", "title": "" }, { "docid": "f278fd321a41f762667b916c5fe8399e", "score": "0.47326958", "text": "def align(offset, alignment):\n if offset % alignment == 0:\n return offset\n return offset + (alignment - (offset % alignment))", "title": "" }, { "docid": "80b6b4f284057b3e69f4e14fccb5f3bc", "score": "0.47321048", "text": "def test_alignment_mapper(test_seqrepo_access):\n return AlignmentMapper(test_seqrepo_access, TranscriptMappings(), UTADatabase())", "title": "" }, { "docid": "f08612bb4e64727ee5900361c8111cf6", "score": "0.4704902", "text": "def traceback(query,target,complete_align_matrix,max_score=0,max_coordinates=()):\n if max_coordinates: #if starting location not passed in, find it.\n for r in range(len(complete_align_matrix)):\n for c in range(len(complete_align_matrix[0])):\n if complete_align_matrix[r][c][0] > max_score:\n max_score = complete_align_matrix[r][c][0]\n max_coordinates = (r,c)\n\n row,col = max_coordinates\n current_cell = complete_align_matrix[row][col]\n score,direction = current_cell\n\n # Set variables for iteration\n query_align = ''\n target_align = ''\n queryChar = query[row-1]\n targetChar = target[col-1]\n\n # Walk path back through alignment matrix to a cell with score value 0\n timeout_counter = len(query)*len(target)+5 # to avoid endless loops\n while score > 0 and timeout_counter > 0:\n timeout_counter += -1\n # Build up the optimal alignment sequence, backwards\n #matrix has queryLen+1 rows and targetLen+1 cols, since both start at *\n target_align = targetChar + target_align\n query_align = queryChar + query_align\n # Determine which direction to go and then move\n if direction == 0: #match\n row,col = row-1,col-1\n queryChar = query[max(0,row-1)]\n targetChar = target[max(0,col-1)]\n if direction == 1: #gap in query\n row,col = row,col-1\n queryChar = '*'\n targetChar = target[max(0,col-1)]\n if direction == 2: #gap in target\n row,col = row-1,col\n queryChar = query[max(0,row-1)]\n targetChar = '*'\n score,direction = complete_align_matrix[row][col]\n\n # quick error handling\n if timeout_counter == 0:\n print(\"Row, Col, Location, Direction:\",row,col,current_cell,direction)\n print(\"Target:\",target)\n print(\"Query:\",query)\n print(\"Query Alignment, Target Alignment:\",query_align,target_align)\n raise RuntimeError(\"Traceback timed out, with state values shown above\")\n\n # otherwise, return the completed alignments\n return query_align,target_align,max_score", "title": "" }, { "docid": "65d40c40a884b1c8c4e2dbeeb84194dc", "score": "0.46805134", "text": "def alignCtx(*args, **kwargs):\n\n pass", "title": "" }, { "docid": "eb1ce3cc470f5ab4a9a6a042c70d9a9d", "score": "0.46526983", "text": "def parsl_first_align(directory, spider_type):\n files = [f.strip() for f in open(f\"{directory}/filenames.txt\").readlines()]\n\n # Start the alignment processes\n print(\"starting first alignment\")\n align_futures = []\n for index, file in enumerate(files):\n align_futures.append(\n star_align(\n file, spider_type))\n # Wait for the alignment to finish\n align_futures = [a.result() for a in align_futures]\n print(\"First alignment finished\")", "title": "" }, { "docid": "0eec03f07dbf30a2b1cf10c6c90d9ed2", "score": "0.46336746", "text": "def align(self, alignment = None):\n\t\tif alignment is None:\n\t\t\tif self.processor_architecture == KatzSystemArchitecture.X64:\n\t\t\t\talignment = 8\n\t\t\telse:\n\t\t\t\talignment = 4\n\t\toffset = self.cur_pos % alignment\n\t\tif offset == 0:\n\t\t\treturn\n\t\toffset_to_aligned = (alignment - offset) % alignment\n\t\tself.read(offset_to_aligned)\n\t\treturn", "title": "" }, { "docid": "9d66430eb5625c04961c49babd1e3dd6", "score": "0.46256766", "text": "def AlignGenerator(fbam):\n samdb = dict()\n smap={'+' : 1, '-' : -1}\n samfile = pysam.Samfile(fbam, 'rb') \n for rec in samfile.fetch():\n for cont in rec.tags:\n if cont[0]=='NM':NM=cont[1]\n if cont[0]=='XS':\n orient=cont[1]\n break\n if rec.qname in samdb:\n samdb[rec.qname].append((samfile.getrname(rec.rname), rec.pos, smap[orient], NM))\n else:\n samdb[rec.qname]=[(samfile.getrname(rec.rname), rec.pos, smap[orient], NM)]\n samfile.close()\n bamdb = [(fid, finfo) for fid, finfo in samdb.items()]\n return bamdb", "title": "" }, { "docid": "6dc875405bb71c15108216dc72315ea1", "score": "0.4620654", "text": "def get_aligned_args(args):\n if args.aligned_dir is not None:\n if args.aligned_name is not None:\n raise ValueError(\n \"args -aligned_dir and -aligned_name should not be both set\")\n return get_aligned_dir(args.aligned_dir)\n elif args.aligned_name is not None:\n return get_aligned_name(args.aligned_name)\n else:\n raise ValueError(\n \"No aligned argument. Set -aligned_dir xor -aligned_name\")", "title": "" }, { "docid": "3e2f5a3f67f97d09df09a218337ea3a4", "score": "0.4619358", "text": "def transalignStepCmds_1(thisDir, thisParentDir, options):\n from libSimControl import which, getBranchDir, verifyDirExists, verifyFileExists\n import os\n for d in [thisDir, thisParentDir]:\n verifyDirExists(d)\n for f in [os.path.join(thisDir, 'seq.rev'), os.path.join(options.rootDir, 'seq.rev')]:\n verifyFileExists(f)\n \n DRAW_REV_BLOCK_SIZE = 10**4\n DRAW_REV_NT_PER_PIX = 10**5\n \n pipes = []\n cmds = []\n \n outname = os.path.join(thisDir, 'inter-intra.aln.rev')\n if not os.path.exists(outname):\n cmd = [which('evolver_transalign')]\n cmd.append('-in1')\n cmd.append(os.path.join(thisDir, 'inter', 'inter.aln.rev'))\n cmd.append('-in2')\n cmd.append(os.path.join(thisDir, 'intra', 'intra.aln.rev'))\n cmd.append('-out')\n cmd.append(outname + '.tmp')\n cmd.append('-log')\n cmd.append(os.path.join(thisDir, 'logs', 'transalign1.log'))\n pipes.append(None)\n cmds.append(cmd)\n cmd = [which('mv')]\n cmd.append(outname + '.tmp')\n cmd.append(outname)\n pipes.append(None)\n cmds.append(cmd)\n \n outname = os.path.join(thisDir, 'aln.rev')\n if not os.path.exists(outname):\n # we have this check because if this is a restarted job\n # then we needn't recalculate these\n if isBranchOrRoot(thisParentDir):\n # In these cases the alignment above the branch point should not be carried\n # into the the descendant genomes. Alignments should only go back to the most\n # recent branch point.\n cmd = [which('ln')]\n cmd.append('-s')\n cmd.append(os.path.join(thisDir, 'inter-intra.aln.rev'))\n cmd.append(outname)\n pipes.append(None)\n cmds.append(cmd)\n else:\n cmd = [which('evolver_transalign')]\n cmd.append('-in1')\n cmd.append(os.path.join(thisParentDir, 'aln.rev'))\n cmd.append('-in2')\n cmd.append(os.path.join(thisDir, 'inter-intra.aln.rev'))\n cmd.append('-out')\n cmd.append(outname + '.tmp')\n cmd.append('-log')\n cmd.append(os.path.join(thisDir, 'logs', 'transalign2.log'))\n pipes.append(None)\n cmds.append(cmd)\n cmd = [which('mv')]\n cmd.append(outname + '.tmp')\n cmd.append(outname)\n pipes.append(None)\n cmds.append(cmd)\n\n outname = os.path.join(thisDir, 'stats', 'cds_aln.cycle.rev')\n if not os.path.exists(outname):\n cmd = [which('evolver_evo')]\n cmd.append('-cdsalns')\n cmd.append(os.path.join(thisDir, 'intra', 'intra.aln.rev'))\n cmd.append('-alns')\n cmd.append(outname + '.tmp')\n cmd.append('-annots1')\n cmd.append(os.path.join(thisDir, 'inter', 'inter.outannots.gff'))\n cmd.append('-annots2')\n cmd.append(os.path.join(thisDir, 'annots.gff'))\n cmd.append('-log')\n cmd.append(os.path.join(thisDir, 'logs', 'cds_aln.cycle.log'))\n pipes.append(None)\n cmds.append(cmd)\n cmd = [which('mv')]\n cmd.append(outname + '.tmp')\n cmd.append(outname)\n pipes.append(None)\n cmds.append(cmd)\n\n # draw the cycle chromosome map\n outname = os.path.join(thisDir, 'stats', 'img.cycle.cmap.pdf')\n if not os.path.exists(outname):\n cmd = [which('evolver_drawrev')]\n cmd.append('-fromrev')\n cmd.append(os.path.join(thisDir, 'inter', 'inter.aln.rev'))\n cmd.append('-tocmap')\n cmd.append(outname + '.tmp')\n cmd.append('-blocksize')\n cmd.append(str(DRAW_REV_BLOCK_SIZE))\n pipes.append(None)\n cmds.append(cmd)\n cmd = [which('mv')]\n cmd.append(outname + '.tmp')\n cmd.append(outname)\n pipes.append(None)\n cmds.append(cmd)\n\n # draw the cycle dot plot\n outname = os.path.join(thisDir, 'stats', 'img.cycle.lmap.png')\n if not os.path.exists(outname):\n cmd = [which('evolver_drawrev')]\n cmd.append('-fromrev')\n cmd.append(os.path.join(thisDir, 'inter', 'inter.aln.rev'))\n cmd.append('-tolmap')\n cmd.append(outname + '.tmp')\n cmd.append('-npp')\n cmd.append(str(DRAW_REV_NT_PER_PIX))\n pipes.append(None)\n cmds.append(cmd)\n cmd = [which('mv')]\n cmd.append(outname + '.tmp')\n cmd.append(outname)\n pipes.append(None)\n cmds.append(cmd)\n\n outname = os.path.join(thisDir, 'stats', 'tmpstats.branch.difflength.txt')\n if not os.path.exists(outname):\n cmd = [which('evolver_evo')]\n cmd.append('-nologcmdlineandtime')\n cmd.append('-ancstats')\n cmd.append(os.path.join(thisDir, 'aln.rev'))\n cmd.append('-log')\n cmd.append(outname + '.tmp')\n pipes.append(None)\n cmds.append(cmd)\n cmd = [which('mv')]\n cmd.append(outname + '.tmp')\n cmd.append(outname)\n pipes.append(None)\n cmds.append(cmd)\n \n outname = os.path.join(thisDir, 'stats', 'tmpstats.cycle.difflength.txt')\n if not os.path.exists(outname):\n cmd = [which('evolver_evo')]\n cmd.append('-nologcmdlineandtime')\n cmd.append('-ancstats')\n cmd.append(os.path.join(thisDir, 'intra', 'intra.aln.rev'))\n cmd.append('-log')\n cmd.append(outname + '.tmp')\n pipes.append(None)\n cmds.append(cmd)\n cmd = [which('mv')]\n cmd.append(outname + '.tmp')\n cmd.append(outname)\n pipes.append(None)\n cmds.append(cmd)\n \n # draw the branch chromosome map\n outname = os.path.join(thisDir, 'stats', 'img.branch.cmap.pdf')\n if not os.path.exists(outname):\n cmd = [which('evolver_drawrev')]\n cmd.append('-fromrev')\n cmd.append(os.path.join(thisDir, 'aln.rev'))\n cmd.append('-tocmap')\n cmd.append(outname + '.tmp')\n cmd.append('-blocksize')\n cmd.append(str(DRAW_REV_BLOCK_SIZE))\n pipes.append(None)\n cmds.append(cmd)\n cmd = [which('mv')]\n cmd.append(outname + '.tmp')\n cmd.append(outname)\n pipes.append(None)\n cmds.append(cmd)\n \n # draw the branch dot plot\n outname = os.path.join(thisDir, 'stats', 'img.branch.lmap.png')\n if not os.path.exists(outname):\n cmd = [which('evolver_drawrev')]\n cmd.append('-fromrev')\n cmd.append(os.path.join(thisDir, 'aln.rev'))\n cmd.append('-tolmap')\n cmd.append(outname + '.tmp')\n cmd.append('-npp')\n cmd.append(str(DRAW_REV_NT_PER_PIX))\n pipes.append(None)\n cmds.append(cmd)\n cmd = [which('mv')]\n cmd.append(outname + '.tmp')\n cmd.append(outname)\n pipes.append(None)\n cmds.append(cmd)\n \n outname = os.path.join(thisDir, 'stats', 'cds_aln.branch.rev')\n if not os.path.exists(outname):\n cmd = [which('evolver_evo')]\n cmd.append('-cdsalns')\n cmd.append(os.path.join(thisDir, 'aln.rev'))\n cmd.append('-alns')\n cmd.append(outname + '.tmp')\n cmd.append('-annots1')\n cmd.append(os.path.join(getBranchDir(thisDir), 'stats', 'cds_annots.gff'))\n cmd.append('-annots2')\n cmd.append(os.path.join(thisDir, 'annots.gff'))\n cmd.append('-log')\n cmd.append(os.path.join(thisDir, 'logs', 'cds_aln.branch.log'))\n pipes.append(None)\n cmds.append(cmd)\n cmd = [which('mv')]\n cmd.append(outname + '.tmp')\n cmd.append(outname)\n pipes.append(None)\n cmds.append(cmd)\n \n outname = os.path.join(thisDir, 'stats', 'codonSubs.cycle.txt')\n if not os.path.exists(outname):\n cmd = [which('evolver_evo')]\n cmd.append('-getcodonsubs')\n cmd.append(os.path.join(thisDir, 'stats', 'cds_aln.cycle.rev'))\n cmd.append('-out')\n cmd.append(outname + '.tmp')\n cmd.append('-log')\n cmd.append(os.path.join(thisDir, 'logs', 'getCodonSubs.cycle.log'))\n pipes.append(None)\n cmds.append(cmd)\n cmd = [which('mv')]\n cmd.append(outname + '.tmp')\n cmd.append(outname)\n pipes.append(None)\n cmds.append(cmd)\n \n outname = os.path.join(thisDir, 'stats', 'codonSubs.branch.txt')\n if not os.path.exists(outname):\n cmd = [which('evolver_evo')]\n cmd.append('-getcodonsubs')\n cmd.append(os.path.join(thisDir, 'stats', 'cds_aln.branch.rev'))\n cmd.append('-out')\n cmd.append(outname + '.tmp')\n cmd.append('-log')\n cmd.append(os.path.join(thisDir, 'logs', 'getCodonSubs.branch.log'))\n pipes.append(None)\n cmds.append(cmd)\n cmd = [which('mv')]\n cmd.append(outname + '.tmp')\n cmd.append(outname)\n pipes.append(None)\n cmds.append(cmd)\n \n return cmds, pipes", "title": "" }, { "docid": "e0b3e90f34de2fc294f6a2d25b94966e", "score": "0.46144277", "text": "def align(self, alignment = None):\n\t\tif alignment is None:\n\t\t\tif self.reader.processor_architecture == PROCESSOR_ARCHITECTURE.AMD64:\n\t\t\t\talignment = 8\n\t\t\telse:\n\t\t\t\talignment = 4\n\t\toffset = self.current_position % alignment\n\t\tif offset == 0:\n\t\t\treturn\n\t\toffset_to_aligned = (alignment - offset) % alignment\n\t\tself.seek(offset_to_aligned, 1)\n\t\treturn", "title": "" }, { "docid": "1220364a6f00303d2d6f54ca74b18be2", "score": "0.46107894", "text": "def dna_alignment(sequence_filenames, seq_lengths, directory):\n file = open('data/Alignments/dna.aln', 'w')\n\n for idx, val in enumerate(zip(seq_lengths, sequence_filenames)):\n with open(directory + val[1], 'r') as sequence:\n name = sf.output_filename(val[1]) # Genome name\n # Content\n content = sf.dna_sequence(sf.output_filename(val[1])) # ignore top/ description line\n content = content.strip() # sometimes there is a space at the end of a DNA sequence\n # print(\"MAX VALUE IN SEQUENCE: \" + str(max(seq_lengths)))\n # print(\"THIS VALUE IN SEQUENCE: \" + str(seq_lengths[i]))\n # print(\"difference: \" + str((max(seq_lengths) - seq_lengths[i])))\n blanks_list = ['-'] * int((max(seq_lengths) - val[0]))\n blanks = ''.join(blanks_list)\n content = content + blanks\n content = str.join('', content.splitlines())\n\n file.write(\">\" + name + \"\\n\" + content + \"\\n\") # save\n file.close()\n\n aln = open('data/Alignments/dna.aln', 'r')\n print(aln.read())", "title": "" }, { "docid": "316e83534da2b800aea279a15ff44a0a", "score": "0.46093792", "text": "def recoveringAlignments(self, t, q):\n\n print \"\\n\\nRECOVERING ALIGNMENTS\\n\"\n\n esents = \"the blue house\"\n fsents = \"a mansão azul\"\n\n tp = collections.defaultdict(Decimal)\n\n for (fs, es) in self.trainingCorpus: # only 1 example\n a = collections.defaultdict(Decimal)\n l = len(es)\n m = len(fs)\n for i in range(m):\n temp_qt = 0\n pair = \"\"\n save_i = 0\n save_j = 0\n for j in range(-1, l): # include NULL at position -1\n qt = q[(j, i, l, m)] * t[(fs[i], 'NULL' if j == -1 else es[j])]\n if temp_qt < qt:\n temp_qt = qt\n pair = 'NULL' if j == -1 else es[j] # include NULL at position -1\n save_j = j\n save_i = i\n a[(save_i, fs[i], save_j, pair, m)] = temp_qt\n tp[(fs[i], save_j, pair, m)] = temp_qt\n\n alignments = \"\"\n for (i, f, j, e, m), val in sorted(a.iteritems(), key=lambda (k, v): k[0], reverse=False):\n print(\"{}|{}, {}|{}, {}={} \".format(i+1, f, j+1, e, m, val)),\n alignments += f + '/' + e + ' '\n print\"\\n\", alignments, \"\\n\"\n\n print \"TRANSLATION PROBABILITIES\"\n for (f, j, e, m), val in tp.items():\n print(\"({}, {}|{}, m={}) = {} \".format(f, j+1, e, m, val))\n\n return tp", "title": "" }, { "docid": "ab62d977ba2089724f3d7a739526c3c6", "score": "0.46087387", "text": "def _init(self, trcback):\r\n\r\n import mako.template\r\n mods = {}\r\n rawrecords = traceback.extract_tb(trcback)\r\n new_trcback = []\r\n for filename, lineno, function, line in rawrecords:\r\n if not line:\r\n line = ''\r\n try:\r\n (line_map, template_lines) = mods[filename]\r\n except KeyError:\r\n try:\r\n info = mako.template._get_module_info(filename)\r\n module_source = info.code\r\n template_source = info.source\r\n template_filename = info.template_filename or filename\r\n except KeyError:\r\n # A normal .py file (not a Template)\r\n if not util.py3k:\r\n try:\r\n fp = open(filename, 'rb')\r\n encoding = util.parse_encoding(fp)\r\n fp.close()\r\n except IOError:\r\n encoding = None\r\n if encoding:\r\n line = line.decode(encoding)\r\n else:\r\n line = line.decode('ascii', 'replace')\r\n new_trcback.append((filename, lineno, function, line, \r\n None, None, None, None))\r\n continue\r\n\r\n template_ln = module_ln = 1\r\n line_map = {}\r\n for line in module_source.split(\"\\n\"):\r\n match = re.match(r'\\s*# SOURCE LINE (\\d+)', line)\r\n if match:\r\n template_ln = int(match.group(1))\r\n module_ln += 1\r\n line_map[module_ln] = template_ln\r\n template_lines = [line for line in\r\n template_source.split(\"\\n\")]\r\n mods[filename] = (line_map, template_lines)\r\n\r\n template_ln = line_map[lineno]\r\n if template_ln <= len(template_lines):\r\n template_line = template_lines[template_ln - 1]\r\n else:\r\n template_line = None\r\n new_trcback.append((filename, lineno, function, \r\n line, template_filename, template_ln, \r\n template_line, template_source))\r\n if not self.source:\r\n for l in range(len(new_trcback)-1, 0, -1):\r\n if new_trcback[l][5]:\r\n self.source = new_trcback[l][7]\r\n self.lineno = new_trcback[l][5]\r\n break\r\n else:\r\n if new_trcback:\r\n try:\r\n # A normal .py file (not a Template)\r\n fp = open(new_trcback[-1][0], 'rb')\r\n encoding = util.parse_encoding(fp)\r\n fp.seek(0)\r\n self.source = fp.read()\r\n fp.close()\r\n if encoding:\r\n self.source = self.source.decode(encoding)\r\n except IOError:\r\n self.source = ''\r\n self.lineno = new_trcback[-1][1]\r\n return new_trcback", "title": "" }, { "docid": "690f0181a9f18b0a52ef192b70d0aba7", "score": "0.45933938", "text": "def do_align(sequenceA, sequenceB):\n \n # Pad both sequences with a leading space\n # Doing so aligns the letters of the sequence with the indices of the matrix\n sequenceA = \" \" + sequenceA\n sequenceB = \" \" + sequenceB\n \n # Initialize the local alignment matrix\n alignments = zeros((len(sequenceA), len(sequenceB)))\n\n # Perform the alignment, column by column\n for a in range(1, len(sequenceA)):\n for b in range(1, len(sequenceB)):\n # Find the associated score in the BLOSUM matrix\n paths = _calculate_costs(alignments, (a, b), (sequenceA[a], sequenceB[b]))\n alignments[a, b] = max(paths)\n return alignments", "title": "" }, { "docid": "c5be4078e4923634be01c942db771f79", "score": "0.45857918", "text": "def generate(self, seed = None):\n alignment = Alignment(self.alphabet)\n \n # initialize random generator\n if seed is not None:\n random.seed(seed)\n\n records = alignment._records # hack to add SeqRecord objects to Alignment\n \n for n in xrange(self.nseq):\n seqrecord = SeqRecord(Seq(''), id='seq%d' % (n+1), description='')\n records.append(seqrecord)\n \n for pos in xrange(self.seqlen):\n # logging.debug((pos, alleles_per_site[pos])) \n \n if random.random() <= self.freqs_per_site[pos]:\n nt = self.alleles_per_site[pos][0]\n else:\n nt = self.alleles_per_site[pos][1]\n \n seqrecord.seq += nt\n logging.debug(seqrecord.seq.tostring())\n \n logging.debug([seqrecord.seq.tostring() for seqrecord in records])\n \n \n # write output. It could use the alignment._format function when it will\n # support ldhat files.\n output = '%s %s 1\\n' % (self.nseq, self.seqlen)\n for seq in records:\n output += seq.format('fasta')\n \n return output, alignment", "title": "" }, { "docid": "fb84f2776345efe6c2132cc1d33dff16", "score": "0.45857704", "text": "def traceback(S, D, I, A, B, gap_init_penalty, max_score_row, max_score_column):\n\n\tm = len(S)\n\tn = len(S[0])\n\n\topt_align = []\n\ti = max_score_row\n\tj = max_score_column\n\tcurrent_mat = 'S'\n\n\twhile i <= m and j <= n:\n\t\tif current_mat == 'S':\n\t\t\tif i == m - 1 or j == n - 1 or S[i][j] == 0:\n\t\t\t\tbreak\n\t\t\tif S[i][j] == D[i][j]:\n\t\t\t\tcurrent_mat = 'D'\n\t\t\t\tcontinue\n\t\t\tif S[i][j] == I[i][j]:\n\t\t\t\tcurrent_mat = 'I'\n\t\t\t\tcontinue\n\t\t\topt_align.append((A[i], B[j]))\n\t\t\ti += 1\n\t\t\tj += 1\n\t\t\t\n\t\tif current_mat == 'D':\n\t\t\topt_align.append((A[i], ' '))\n\t\t\tif i == m - 1 or D[i][j] == S[i + 1][j] - gap_init_penalty:\n\t\t\t\tcurrent_mat = 'S'\n\t\t\ti += 1\n\t\t\tcontinue\n\t\t\t\n\t\tif current_mat == 'I':\n\t\t\topt_align.append((' ', B[j]))\n\t\t\tif j == n - 1 or I[i][j] == S[i][j + 1] - gap_init_penalty:\n\t\t\t\tcurrent_mat = 'S'\n\t\t\tj += 1\n\t\t\tcontinue\n\t\t\t\n\n\trow_last = i\n\tcol_last = j\n\t\n\taligned_string_A = ''\n\taligned_string_mid = ''\n\taligned_string_B = ''\n\t\n\tfor pair in opt_align:\n\t\taligned_string_A += pair[0]\n\t\taligned_string_B += pair[1]\n\t\tif pair[0] == ' ' or pair[1] == ' ':\n\t\t\taligned_string_mid += '-'\n\t\telse:\n\t\t\taligned_string_mid += \"|\"\n\t\t\t\n\treturn opt_align", "title": "" }, { "docid": "c15a888aa6981d76d5b58aef7cd1221d", "score": "0.45851487", "text": "def _init(self, trcback):\r\n\r\n import mako.template\r\n mods = {}\r\n rawrecords = traceback.extract_tb(trcback)\r\n new_trcback = []\r\n for filename, lineno, function, line in rawrecords:\r\n if not line:\r\n line = ''\r\n try:\r\n (line_map, template_lines) = mods[filename]\r\n except KeyError:\r\n try:\r\n info = mako.template._get_module_info(filename)\r\n module_source = info.code\r\n template_source = info.source\r\n template_filename = info.template_filename or filename\r\n except KeyError:\r\n # A normal .py file (not a Template)\r\n if not util.py3k:\r\n try:\r\n fp = open(filename, 'rb')\r\n encoding = util.parse_encoding(fp)\r\n fp.close()\r\n except IOError:\r\n encoding = None\r\n if encoding:\r\n line = line.decode(encoding)\r\n else:\r\n line = line.decode('ascii', 'replace')\r\n new_trcback.append((filename, lineno, function, line, \r\n None, None, None, None))\r\n continue\r\n\r\n template_ln = module_ln = 1\r\n line_map = {}\r\n for line in module_source.split(\"\\n\"):\r\n match = re.match(r'\\s*# SOURCE LINE (\\d+)', line)\r\n if match:\r\n template_ln = int(match.group(1))\r\n else:\r\n template_ln += 1\r\n module_ln += 1\r\n line_map[module_ln] = template_ln\r\n template_lines = [line for line in\r\n template_source.split(\"\\n\")]\r\n mods[filename] = (line_map, template_lines)\r\n\r\n template_ln = line_map[lineno]\r\n if template_ln <= len(template_lines):\r\n template_line = template_lines[template_ln - 1]\r\n else:\r\n template_line = None\r\n new_trcback.append((filename, lineno, function, \r\n line, template_filename, template_ln, \r\n template_line, template_source))\r\n if not self.source:\r\n for l in range(len(new_trcback)-1, 0, -1):\r\n if new_trcback[l][5]:\r\n self.source = new_trcback[l][7]\r\n self.lineno = new_trcback[l][5]\r\n break\r\n else:\r\n if new_trcback:\r\n try:\r\n # A normal .py file (not a Template)\r\n fp = open(new_trcback[-1][0], 'rb')\r\n encoding = util.parse_encoding(fp)\r\n fp.seek(0)\r\n self.source = fp.read()\r\n fp.close()\r\n if encoding:\r\n self.source = self.source.decode(encoding)\r\n except IOError:\r\n self.source = ''\r\n self.lineno = new_trcback[-1][1]\r\n return new_trcback", "title": "" }, { "docid": "58b1f914025ead8a8f93a6d890e4c0d0", "score": "0.45762372", "text": "def align_protein():\n file = [file for file in os.listdir(\"./data/download/\") if \".hmm\" in file][0]\n cmd = (\"hmmalign \" \n \"./data/download/%s \"\n \"./data/download/all_protein \"\n \"> ./data/download/aligned_prot \")%file\n print \"Aligning sequences\"\n process = os.system(cmd)\n if process:\n print cmd\n raise\n print \"Reading Alignment\"\n alignment = AlignIO.read(open(\"./data/download/aligned_prot\"), \"stockholm\")\n print \"Writing Alignment\"\n write_fasta(\"./data/download/aligned_prot\", alignment)\n sys.stdout.flush()", "title": "" }, { "docid": "d013d958ad552cc67d0d76f8c6b4b821", "score": "0.45664984", "text": "def global_alignment(self):\n first = self.first_seq.get()\n second = self.second_seq.get()\n alignments = pairwise2.align.globalms(first, second, \\\n self.match_score.get(), self.mismatch_score.get(), self.gap_opening.get(), \\\n self.gap_extension.get(), one_alignment_only = True)\n text = pairwise2.format_alignment(*alignments[0])\n self.result.text.delete(1.0, END)\n self.result.text.insert(1.0, text)", "title": "" }, { "docid": "789f47234a3194970d77bfa71adf8f66", "score": "0.4562442", "text": "def generate_alignment(pair, gap, extension, matrix, normalize):\n\n align1, align2, score = align(pair[0], pair[1], gap, extension, matrix)\n\n normalization_constant = min(len(pair[0]), len(pair[1])) if normalize else 1\n\n return align1, align2, score/normalization_constant", "title": "" }, { "docid": "ce2c92e92281935c893661c1fe0d92a6", "score": "0.45574936", "text": "def compute_local_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n dummy_i = len(seq_x)\n dummy_j = len(seq_y)\n align_x = ''\n align_y = ''\n \n #find the maximum value over the entire matrix\n max_score = 0\n for dummy_idx1 in range(len(seq_x)+1):\n for dummy_idx2 in range(len(seq_y)+1):\n if alignment_matrix[dummy_idx1][dummy_idx2] > max_score:\n dummy_i = dummy_idx1\n dummy_j = dummy_idx2\n max_score = alignment_matrix[dummy_idx1][dummy_idx2]\n\n # trace back\n while (dummy_i != 0 and dummy_j != 0 and alignment_matrix [dummy_i][dummy_j] != 0):\n if alignment_matrix [dummy_i][dummy_j] == alignment_matrix[dummy_i-1][dummy_j-1] + scoring_matrix[seq_x[dummy_i-1]][seq_y[dummy_j-1]]:\n align_x = seq_x[dummy_i-1] + align_x\n align_y = seq_y[dummy_j-1] + align_y\n dummy_i -= 1\n dummy_j-=1\n else:\n if alignment_matrix[dummy_i][dummy_j] == alignment_matrix[dummy_i-1][dummy_j] + scoring_matrix[seq_x[dummy_i-1]]['-']:\n align_x = seq_x[dummy_i-1] + align_x\n align_y = '-' + align_y\n dummy_i -= 1\n else:\n align_y = seq_y[dummy_j-1] + align_y\n align_x = '-' + align_x\n dummy_j -= 1\n return max_score, align_x, align_y", "title": "" }, { "docid": "abc40a718c70f59aa8ce94b114a29c9c", "score": "0.45495784", "text": "def make_traceback(exc_info, source_hint=None):\n exc_type, exc_value, tb = exc_info\n if isinstance(exc_value, TemplateSyntaxError):\n exc_info = translate_syntax_error(exc_value, source_hint)\n initial_skip = 0\n else:\n initial_skip = 1\n return translate_exception(exc_info, initial_skip)", "title": "" }, { "docid": "0f0ea3babc891169994fa54f12b74e22", "score": "0.45460954", "text": "def protein_alignment(sequence_filenames, seq_lengths, directory):\n file = open('data/Alignments/protein.aln', 'w')\n\n for i, s in zip(seq_lengths, sequence_filenames):\n with open(directory + s, 'r') as sequence:\n name = sf.output_filename(s) # Genome name\n # Content\n content = sequence.read()\n blanks_list = ['-'] * (max(seq_lengths) - i)\n blanks = ''.join(blanks_list)\n content += blanks\n\n file.write(\">\" + name + \"\\n\" + content + \"\\n\") # save\n file.close()\n\n aln = open('data/Alignments/protein.aln', 'r')\n print(aln.read())", "title": "" }, { "docid": "9d00ffede53f7c5d6589e4758bb0aa40", "score": "0.45446834", "text": "def align_left(packer):\r\n return Alignment(packer, xalign=0.0, xscale=0.0)", "title": "" }, { "docid": "1204399c55180280300692bc1392ea5a", "score": "0.45389292", "text": "def testQuestion1_8(self): \r\n aln_global_8 = alignGlobal(self.seqA, self.seqB, self.blosum62_matrix, -8)\r\n expected1 = aln_global_8.seqs[0].getSeqString()\r\n expected2 = aln_global_8.seqs[1].getSeqString()\r\n self.assertEquals(\"THISLINE-\", expected1)\r\n self.assertEquals(\"ISALIGNED\", expected2)", "title": "" }, { "docid": "ef10ed6fbf4bac7a703282674bac4093", "score": "0.45305872", "text": "def make_traceback(exc_info, source_hint=None):\r\n exc_type, exc_value, tb = exc_info\r\n if isinstance(exc_value, TemplateSyntaxError):\r\n exc_info = translate_syntax_error(exc_value, source_hint)\r\n initial_skip = 0\r\n else:\r\n initial_skip = 1\r\n return translate_exception(exc_info, initial_skip)", "title": "" }, { "docid": "d206f36bf7d4c5295ce5f2a71323243a", "score": "0.4509268", "text": "def align_sequences(self): \n idx, db_params = self.get_current_db()\n logger.info(\"-----Step {}.2 Aligning sequences againsts {}-----\".format(str(idx), db_params['dbname']))\n print(\"{}.2 Aligning sequences againsts {}\".format(str(idx), db_params['dbname']))\n if db_params['dbtype'].upper() == 'BLAST':\n self.run_blast(db_params)\n \n elif db_params['dbtype'].upper() == 'HMM':\n self.run_hmmscan(db_params)\n \n else:\n logger.error('invalid dbtype value for {}'.format(str(db_params['dbtype'])))", "title": "" }, { "docid": "0fbbee433dcdcb40fd0355fbc6cdedf8", "score": "0.45064473", "text": "def Align(self, method, anchor=0, loglevel=1):\n\n map_a2b = alignlib_lite.py_makeAlignmentVector()\n s1 = \"A\" * anchor + self.mSequence1 + \"A\" * anchor\n s2 = \"A\" * anchor + self.mSequence2 + \"A\" * anchor\n\n self.strand = \"+\"\n\n if method == \"dialign\":\n dialign = WrapperDialign.Dialign(self.mOptionsDialign)\n dialign.Align(s1, s2, map_a2b)\n elif method == \"blastz\":\n blastz = WrapperBlastZ.BlastZ(self.mOptionsBlastZ)\n blastz.Align(s1, s2, map_a2b)\n if blastz.isReverseComplement():\n self.strand = \"-\"\n self.mSequence2 = Genomics.complement(self.mSequence2)\n\n elif method == \"dialignlgs\":\n dialignlgs = WrapperDialign.Dialign(self.mOptionsDialignLGS)\n dialignlgs.Align(s1, s2, map_a2b)\n elif method == \"dba\":\n dba = WrapperDBA.DBA()\n dba.Align(s1, s2, map_a2b)\n elif method == \"clustal\":\n raise NotImplementedError(\"clustal wrapper needs to be updated\")\n clustal = WrapperClustal.Clustal()\n clustal.Align(s1, s2, map_a2b)\n elif method == \"nw\":\n seq1 = alignlib_lite.py_makeSequence(s1)\n seq2 = alignlib_lite.py_makeSequence(s2)\n alignator = alignlib_lite.py_makeAlignatorDPFull(alignlib_lite.py_ALIGNMENT_GLOBAL,\n gop=-12.0,\n gep=-2.0)\n alignator.align(map_a2b, seq1, seq2)\n elif method == \"sw\":\n seq1 = alignlib_lite.py_makeSequence(s1)\n seq2 = alignlib_lite.py_makeSequence(s2)\n alignlib_lite.py_performIterativeAlignment(\n map_a2b, seq1, seq2, alignator_sw, min_score_sw)\n else:\n # use callback function\n method(s1, s2, map_a2b)\n\n if map_a2b.getLength() == 0:\n raise AlignmentError(\"empty alignment\")\n\n if anchor:\n map_a2b.removeRowRegion(\n anchor + len(self.mSequence1) + 1, map_a2b.getRowTo())\n map_a2b.removeRowRegion(1, anchor)\n map_a2b.removeColRegion(\n anchor + len(self.mSequence2) + 1, map_a2b.getColTo())\n map_a2b.removeColRegion(1, anchor)\n map_a2b.moveAlignment(-anchor, -anchor)\n\n f = alignlib_lite.py_AlignmentFormatExplicit(map_a2b,\n alignlib_lite.py_makeSequence(\n self.mSequence1),\n alignlib_lite.py_makeSequence(self.mSequence2))\n\n self.mMethod = method\n self.mAlignment = map_a2b\n self.mAlignedSequence1, self.mAlignedSequence2 = f.mRowAlignment, f.mColAlignment\n f = alignlib_lite.py_AlignmentFormatEmissions(map_a2b)\n self.mAlignment1, self.mAlignment2 = f.mRowAlignment, f.mColAlignment\n self.mAlignmentFrom1 = map_a2b.getRowFrom()\n self.mAlignmentTo1 = map_a2b.getRowTo()\n self.mAlignmentFrom2 = map_a2b.getColFrom()\n self.mAlignmentTo2 = map_a2b.getColTo()\n self.mNumGaps, self.mLength = map_a2b.getNumGaps(), map_a2b.getLength()\n self.mAligned = self.mLength - self.mNumGaps\n\n self.SetPercentIdentity()\n self.SetBlockSizes()", "title": "" }, { "docid": "faf4e072969c1b833dcb4f59291d3b9f", "score": "0.45054802", "text": "def align_and_build_tree(seqs, moltype, best_tree=False, params=None):\n aln = align_unaligned_seqs(seqs, moltype=moltype, params=params)\n tree = build_tree_from_alignment(aln, moltype, best_tree, params)\n return {'Align':aln, 'Tree':tree}", "title": "" }, { "docid": "a7203c1c16723aef367cab8ab06428d6", "score": "0.44903916", "text": "def align_consensus(self):\n self.alignpars.needleman_Wunsch(self.seqs[0], self.seqs[1])\n res = self.alignpars.recover_align()\n\n for i in range(2, len(self.seqs)):\n res = self.add_seq_alignment(res, self.seqs[i])\n return res", "title": "" }, { "docid": "6099a393bb6418807788fbfa84ffb032", "score": "0.44642758", "text": "def write_alignment(self, alignment):\n truncate=10\n handle = self.handle \n \n if len(alignment)==0:\n raise ValueError(\"Must have at least one sequence\")\n length_of_seqs = alignment.get_alignment_length()\n for record in alignment:\n if length_of_seqs != len(record.seq):\n raise ValueError(\"Sequences must all be the same length\")\n if length_of_seqs <= 0:\n raise ValueError(\"Non-empty sequences are required\")\n \n if len(alignment) > len(set([r.id[:truncate] for r in alignment])):\n raise ValueError(\"Repeated identifier, possibly due to truncation\")\n\n\n # From experimentation, the use of tabs is not understood by the\n # EMBOSS suite. The nature of the expected white space is not\n # defined in the PHYLIP documentation, simply \"These are in free\n # format, separated by blanks\". We'll use spaces to keep EMBOSS\n # happy.\n handle.write(\" %i %s\\n\" % (len(alignment), length_of_seqs))\n block=0\n while True:\n for record in alignment:\n if block==0:\n #Write name (truncated/padded to 10 characters)\n \"\"\"\n Quoting the PHYLIP version 3.6 documentation:\n \n The name should be ten characters in length, filled out to\n the full ten characters by blanks if shorter. Any printable\n ASCII/ISO character is allowed in the name, except for\n parentheses (\"(\" and \")\"), square brackets (\"[\" and \"]\"),\n colon (\":\"), semicolon (\";\") and comma (\",\"). If you forget\n to extend the names to ten characters in length by blanks,\n the program [i.e. PHYLIP] will get out of synchronization\n with the contents of the data file, and an error message will\n result.\n\n Note that Tab characters count as only one character in the\n species names. Their inclusion can cause trouble.\n \"\"\"\n name = record.id.strip()\n #Either remove the banned characters, or map them to something\n #else like an underscore \"_\" or pipe \"|\" character...\n for char in \"[](),\":\n name = name.replace(char,\"\")\n for char in \":;\":\n name = name.replace(char,\"|\")\n\n #Now truncate and right pad to expected length.\n handle.write(name[:truncate].ljust(truncate))\n else:\n #write 10 space indent\n handle.write(\" \"*truncate)\n #Write five chunks of ten letters per line...\n for chunk in range(0,5):\n i = block*50 + chunk*10\n seq_segment = record.seq.tostring()[i:i+10]\n #TODO - Force any gaps to be '-' character? Look at the alphabet...\n #TODO - How to cope with '?' or '.' in the sequence?\n handle.write(\" %s\" % seq_segment)\n if i+10 > length_of_seqs : break\n handle.write(\"\\n\")\n block=block+1\n if block*50 > length_of_seqs : break\n handle.write(\"\\n\")", "title": "" }, { "docid": "c9a8ae9817d826f24d984b0d9bf6010e", "score": "0.4461919", "text": "def nextframe(self):\n # extract the next frame, slice it by atoms if necessary.\n frame = self.cord.nextframe()\n self.ca_origframe = frame # not .copy()'ing it, when I wrote it we didn't need to.\n if self.atomlist is None:\n self.curframe = frame.copy()\n else:\n self.curframe = frame[self.atomlist].copy()\n\n if self._align_to_next_frame:\n # We wanted to align to the first frame of the DCD. See\n # above for an explanation.\n if self.atomlist is None:\n self.aligntoframe = frame.copy()\n else:\n self.aligntoframe = frame[self.atomlist].copy()\n self._align_to_next_frame = False\n\n X = self.aligntoframe.copy()\n Y = self.curframe.copy()\n\n natoms,ndimensions = numpy.shape(X)\n \n center1 = sum(X,0) / float(natoms)\n center2 = sum(Y,0) / float(natoms)\n X -= center1\n Y -= center2\n\n E0 = sum(sum(X * X)) + sum(sum(Y * Y))\n\n correlation_matrix = numpy.dot(numpy.transpose(Y), X)\n\n V, S, W_trans = numpy.linalg.svd(correlation_matrix)\n\n is_reflection = (numpy.linalg.det(V) * numpy.linalg.det(W_trans)) < 0.0\n if is_reflection:\n # reflect along smallest principal axis\n S[-1] = -S[-1]\n V[-1,:] = V[-1,:] * (-1.0)\n\n optimal_rotation = numpy.dot(V, W_trans)\n self._frame = numpy.dot(frame, optimal_rotation) - center2 + center1\n \n self.nextframe_end_hook(self)\n return self._frame\n \n \n # UPDATE (JRD, Sept. 2007): This section of code contained between the #**...*# markers\n # is not the correct way to align frames for a single protein MD trajectory. The proper \n # method for aligning frames is due to Kabsch: Kabsch, Wolfgang, (1976) \"A solution of \n # the best rotation to relate two sets of vectors\", Acta Crystallographica 32:922\n #*******************************************************************************************#\n # Create a wrapper function to take the msd between the two frames.\n # This is what is passed to the simplex optimizer.\n #rmsd = lambda vect: mp3.functions.rmsd(self.aligntoframe, \\\n # mp3.functions.cordtransform(self.curframe, move=vect[0:3], rotate=vect[3:6] ))\n #if self.verbose:\n # dispval = 1\n #else:\n # dispval = 0\n #if self.callback:\n # callback = lambda xk: self.callback(xk, self)\n # # your callback can increment this to figure out what step it is on\n # self.iterstep = 0 \n #else:\n # callback = None\n #if self.minimizer == \"scipy:powell\":\n # result = scipy.optimize.fmin_powell(rmsd,self.guess,disp=dispval,full_output=1,\n # ftol=1e-6,callback=callback)\n # self.iterations = result[3]\n # self.funcalls = result[4]\n #elif self.minimizer == \"scipy:simplex\":\n # result = scipy.optimize.fmin(rmsd,self.guess,disp=dispval,full_output=1,\n # callback=callback)\n # self.iterations = result[2]\n # self.funcalls = result[3]\n #else:\n # sys.stderr.write(\"ERROR: minimizer must be either scipy:powell or scipy:simplex\")\n # sys.exit()\n #self.guess = result[0]\n #self._frame = mp3.functions.cordtransform(frame, move=self.guess[0:3],\n # rotate=self.guess[3:6])\n #if self._saveaverage:\n # self._sum_of_frames += self._frame\n # self._sum_of_frames_count += 1\n #\n #self.nextframe_end_hook(self)\n #return self._frame\n #*******************************************************************************************# ", "title": "" }, { "docid": "f45e3a21a16f14cf101d49341a3f2dd4", "score": "0.44582355", "text": "def align(cls, sequences, bin_path=None, matrix=None,\n gap_penalty=None):\n if bin_path is None:\n app = cls(sequences, matrix=matrix)\n else:\n app = cls(sequences, bin_path, matrix=matrix)\n if gap_penalty is not None:\n app.set_gap_penalty(gap_penalty)\n app.start()\n app.join()\n return app.get_alignment()", "title": "" }, { "docid": "2b85981a1f56820aaf9da8766de91614", "score": "0.44408533", "text": "def doAlignment(self):\n \n records = []\n for index, sequence in enumerate(self.seqs):\n seq = Seq(sequence)\n record = SeqRecord(seq, id = str(index))\n records.append(record)\n \n # Maybe test if MUSCLE is available? Why though.\n\n muscle_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\", \"bin\"))\n #print(muscle_path)\n if sys.platform.startswith('win32'): # platform-specifity goes here\n muscle_name = \"muscle3.8.31_i86win32.exe\"\n elif sys.platform.startswith('linux'):\n muscle_name = \"muscle3.8.31_i86linux32\"\n elif sys.platform.startswith('darwin'):\n muscle_name = \"muscle3.8.31_i86darwin32\"\n else:\n raise\n muscle_exe = os.path.join(muscle_path, muscle_name)\n \n #with open(muscle_path + \"test.out\", \"w\") as f:\n # SeqIO.write(records, f, \"fasta\")\n muscle_cline = MuscleCommandline(muscle_exe)\n #print(muscle_cline)\n child = subprocess.Popen(str(muscle_cline),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n #stderr=subprocess.PIPE, # Apparently muscle hangs on this. Somehow. Something about race conditions of stdputs on win?\n universal_newlines=True,\n shell=(sys.platform!=\"win32\"))\n \n SeqIO.write(records, child.stdin, \"fasta\")\n \n child.stdin.close()\n #print(child.stdout)\n align = AlignIO.read(child.stdout, \"fasta\")\n #print(\"alive\")\n #print(align)\n \n seqObjs = [x for x in align]\n seqObjs.sort(key = lambda x: x.id) # MUSCLE groups seqs by distance. There is a -stable key, but it is not entirely debugged, so.\n self.seqsaligned = [str(x.seq) for x in seqObjs]\n \n for seq in self.seqsaligned:\n seqindexed = []\n lettercount = 0\n for l in seq:\n if l == '-':\n seqindexed.append(-1)\n else:\n seqindexed.append(lettercount)\n lettercount += 1\n self.seqsindexed.append(seqindexed)\n\n # go through the sequence indexes and mark the gaps with (-nextbaseindex - 1)\n # so that the index lookups return a more informative value\n for seq in self.seqsindexed:\n seqgv = -1\n for cnt in range(len(self.seqsindexed[0])):\n if seq[cnt] == -1:\n seq[cnt] = seqgv\n else:\n seqgv -= 1\n\n #print self.seq1indexed", "title": "" }, { "docid": "b5ba2bf0219b4ab14539a062f0435ef1", "score": "0.4438545", "text": "def align():\n sh.clustalo('-i', amplified, '-o', aligned)", "title": "" }, { "docid": "02a4bb30bc538faa7afcb19da0c6449e", "score": "0.44383174", "text": "def align_query_seqs(self, papara_runname=\"extended\"):\n cwd = os.getcwd()\n if not self._query_seqs_written:\n self.write_query_seqs()\n for filename in glob.glob('{}/papara*'.format(self.workdir)):\n os.rename(filename, \"{}/{}_tmp\".format(self.workdir, filename.split(\"/\")[-1]))\n if _VERBOSE:\n sys.stdout.write(\"aligning query sequences \\n\")\n # note: sometimes there are still sp in any of the aln/tre\n # hack for the alien taxa thing\n self.remove_alien_aln_tre()\n self.data.write_papara_files()\n os.chdir(self.workdir) # Clean up dir moving\n try:\n assert self.data.aln.taxon_namespace == self.data.tre.taxon_namespace\n subprocess.call([\"papara\",\n \"-t\", \"random_resolve.tre\",\n \"-s\", \"aln_ott.phy\",\n # \"-j\", \"{}\".format(self.config.num_threads),\n \"-q\", self.newseqs_file,\n \"-n\", papara_runname]) # FIXME directory ugliness\n if _VERBOSE:\n sys.stdout.write(\"Papara done\")\n except OSError as e:\n if e.errno == os.errno.ENOENT:\n sys.stderr.write(\"failed running papara. Is it installed?\\n\")\n sys.exit(-5)\n # handle file not found error.\n else:\n # Something else went wrong while trying to run `wget`\n raise\n os.chdir(cwd)\n assert os.path.exists(path=\"{}/papara_alignment.{}\".format(self.workdir, papara_runname))\n self.data.aln = DnaCharacterMatrix.get(path=\"{}/papara_alignment.\"\n \"{}\".format(self.workdir, papara_runname), schema=\"phylip\")\n self.data.aln.taxon_namespace.is_mutable = True # Was too strict...\n if _VERBOSE:\n sys.stdout.write(\"Papara done\")\n lfd = \"{}/logfile\".format(self.workdir)\n with open(lfd, \"a\") as log:\n log.write(\"Following papara alignment, aln has {} seqs \\n\".format(len(self.data.aln)))\n self._query_seqs_aligned = 1", "title": "" }, { "docid": "adae19f367dbdd3b50b1906c5a280d59", "score": "0.4424975", "text": "def create_link_stack_likelihood(**kwargs):\n castro_stacker = DMCastroStacker(**kwargs)\n return castro_stacker", "title": "" }, { "docid": "6ffad0ab6a89c1f1f1c477562d71d09f", "score": "0.44144654", "text": "def test_reading_length_coords_mismatch(self):\n path = \"MAF/length_coords_mismatch.maf\"\n alignments = Align.parse(path, \"maf\")\n self.assertEqual(alignments.metadata[\"MAF Version\"], \"1\")\n self.assertEqual(alignments.metadata[\"Scoring\"], \"autoMZ.v1\")\n alignment = next(alignments)\n self.assertEqual(alignment.score, 6441)\n self.assertEqual(len(alignment.sequences), 2)\n self.assertEqual(alignment.sequences[0].id, \"mm8.chr10\")\n self.assertEqual(len(alignment.sequences[0]), 129993255)\n self.assertEqual(\n alignment.sequences[0].seq[3009319 : 3009319 + 162],\n \"TCATAGGTATTTATTTTTAAATATGGTTTGCTTTATGGCTAGAACACACCGATTACTTAAAATAGGATTAACCCCCATACACTTTAAAAATGATTAAACAACATTTCTGCTGCTCGCTCACATTCTTCATAGAAGATGACATAATGTATTTTCCTTTTGGTT\",\n )\n self.assertEqual(\n alignment[0],\n \"TCATAGGTATTTATTTTTAAATATGGTTTGCTTTATGGCTAGAACACACCGATTACTTAAAATAGGATTAACC--CCCATACACTTTAAAAATGATTAAACAACATTTCTGCTGCTCGCTCACATTCTTCATAGAAGATGACATAATGTATTTTCCTTTTGGTT\",\n )\n self.assertEqual(alignment.sequences[1].id, \"oryCun1.scaffold_133159\")\n self.assertEqual(len(alignment.sequences[1]), 13221)\n self.assertEqual(\n alignment.sequences[1].seq[11087 : 11087 + 164],\n \"TCACAGATATTTACTATTAAATATGGTTTGTTATATGGTTACGGTTCATAGGTTACTTGGAATTGGATTAACCTTCTTATTCATTGCAGAATTGGTTACACTGTGTTCTTGACCTTTGCTTGTTTTCTCCATGGAAACTGATGTCAAATACTTTCCCTTTGGTT\",\n )\n self.assertEqual(\n alignment[1],\n \"TCACAGATATTTACTATTAAATATGGTTTGTTATATGGTTACGGTTCATAGGTTACTTGGAATTGGATTAACCTTCTTATTCATTGCAGAATTGGTTACACTGTGTTCTTGACCTTTGCTTGTTTTCTCCATGGAAACTGATGTCAAATACTTTCCCTTTGGTT\",\n )\n self.assertEqual(\n alignment.sequences[1].annotations[\"quality\"],\n \"99569899999998999999999999999999999999999999999999999999999999999999999757878999975999999999999999979999999999997899999999999997997999999869999996999988997997999999\",\n )\n self.assertEqual(alignment.sequences[1].annotations[\"leftStatus\"], \"N\")\n self.assertEqual(alignment.sequences[1].annotations[\"leftCount\"], 0)\n self.assertEqual(alignment.sequences[1].annotations[\"rightStatus\"], \"N\")\n self.assertEqual(alignment.sequences[1].annotations[\"rightCount\"], 0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nmm8.chr10 3009319 TCATAGGTATTTATTTTTAAATATGGTTTGCTTTATGGCTAGAACACACCGATTACTTAA\n 0 |||.||.||||||.|.||||||||||||||.|.|||||.||.....||..|.||||||..\noryCun1.s 11087 TCACAGATATTTACTATTAAATATGGTTTGTTATATGGTTACGGTTCATAGGTTACTTGG\n\nmm8.chr10 3009379 AATAGGATTAACC--CCCATACACTTTAAAAATGATTAAACAACATTTCTGCTGCTCGCT\n 60 |||.|||||||||--|..||.||.|..|.||.||.|||.||....||..||....|.|||\noryCun1.s 11147 AATTGGATTAACCTTCTTATTCATTGCAGAATTGGTTACACTGTGTTCTTGACCTTTGCT\n\nmm8.chr10 3009437 CACATTCTTCATAGAAGATGACATAATGTATTTTCCTTTTGGTT 3009481\n 120 ....||||.|||.|||..|||..|.|..||.|||||.||||||| 164\noryCun1.s 11207 TGTTTTCTCCATGGAAACTGATGTCAAATACTTTCCCTTTGGTT 11251\n\"\"\",\n )\n self.assertEqual(\n format(alignment, \"maf\"),\n \"\"\"\\\na score=6441.000000\ns mm8.chr10 3009319 162 + 129993255 TCATAGGTATTTATTTTTAAATATGGTTTGCTTTATGGCTAGAACACACCGATTACTTAAAATAGGATTAACC--CCCATACACTTTAAAAATGATTAAACAACATTTCTGCTGCTCGCTCACATTCTTCATAGAAGATGACATAATGTATTTTCCTTTTGGTT\ns oryCun1.scaffold_133159 11087 164 + 13221 TCACAGATATTTACTATTAAATATGGTTTGTTATATGGTTACGGTTCATAGGTTACTTGGAATTGGATTAACCTTCTTATTCATTGCAGAATTGGTTACACTGTGTTCTTGACCTTTGCTTGTTTTCTCCATGGAAACTGATGTCAAATACTTTCCCTTTGGTT\nq oryCun1.scaffold_133159 99569899999998999999999999999999999999999999999999999999999999999999999757878999975999999999999999979999999999997899999999999997997999999869999996999988997997999999\ni oryCun1.scaffold_133159 N 0 N 0\n\n\"\"\",\n )\n with self.assertRaises(ValueError) as cm:\n next(alignments)\n self.assertEqual(\n str(cm.exception), \"sequence size is incorrect (found 219, expected 319)\"\n )", "title": "" }, { "docid": "c7317b18e9fd7ce76831a13de7313978", "score": "0.44033468", "text": "def _seq_to_aligned(self, seq, key):\n return seq", "title": "" }, { "docid": "427273e9c7ec5dcfb996660cdad9f652", "score": "0.43997595", "text": "def maker_one(self) -> None:\n cell1 = Cell('A')\n cell2 = Cell('B')\n cell3 = Cell('C')\n cell4 = Cell('A')\n cell5 = Cell('B')\n cell6 = Cell('C')\n cell7 = Cell('A')\n cell8 = Cell('B')\n cell9 = Cell('C')\n h_leyline1 = Leyline([cell1, cell2])\n h_leyline2 = Leyline([cell3])\n self.horizontal_leylines = [h_leyline1, h_leyline2]\n l_diag_leyline = Leyline([cell4, cell6])\n l_diag_leyline2 = Leyline([cell5])\n self.left_diagonal_leylines = [l_diag_leyline, l_diag_leyline2]\n r_diag_leyline = Leyline([cell8, cell9])\n r_diag_leyline2 = Leyline([cell7])\n self.right_diagonal_leylines = [r_diag_leyline, r_diag_leyline2]\n self.leyline_tracker = [h_leyline1, h_leyline2,\n l_diag_leyline, l_diag_leyline2,\n r_diag_leyline2, r_diag_leyline]", "title": "" }, { "docid": "6c85255e34e59995a5550a0a7a0fbeeb", "score": "0.43904948", "text": "def align(self, src_path, tgt_path, frame=33, start=-16, max_window=None):\n if not max_window:\n max_window = self._compute_maxwindow()\n return self.__list_align(\n src_path, tgt_path,\n frame, start, max_window\n )", "title": "" }, { "docid": "cd439e130b51033d76de4981c771769b", "score": "0.43874982", "text": "def format_alignment(self, alignment):\n if not isinstance(alignment, Alignment):\n raise TypeError(\"Expected an Alignment object\")\n lines = []\n for sequence, line in zip(alignment.sequences, alignment):\n try:\n name = sequence.id\n except AttributeError: # Seq or plain string\n lines.append(\">\")\n else: # SeqRecord\n if sequence.description:\n lines.append(f\">{sequence.id} {sequence.description}\")\n else:\n lines.append(f\">{sequence.id}\")\n lines.append(line)\n return \"\\n\".join(lines) + \"\\n\"", "title": "" }, { "docid": "bf6d00073ee470fc88fc8da56bd0662c", "score": "0.43856972", "text": "def align_frame(frame):\n return landscape(frame)", "title": "" }, { "docid": "5f76ee57e1b0d587a9e21055fb382957", "score": "0.43843296", "text": "def write_concatenated_alignment(id_pairing, alignment_1, alignment_2,\n target_sequence_1, target_sequence_2):\n\n def _unfilter(string):\n \"\"\"\n Uppercases all of the letters in string,\n converts all \".\" to \"-\"\n \"\"\"\n string = np.char.upper(string)\n string[string==\".\"] = \"-\"\n return string\n\n def _prepare_header(id1, id2):\n # id1_id2\n header_format = \"{}_{}\"\n concatenated_header = header_format.format(id1, id2)\n\n return concatenated_header\n\n sequences_to_write = [] # list of (header,seq1,seq2) tuples\n\n # load the monomer alignments\n with open(alignment_1) as f1, open(alignment_2) as f2:\n ali_1 = Alignment.from_file(f1)\n ali_2 = Alignment.from_file(f2)\n\n ali_1 = ali_1.apply(func=_unfilter,columns=np.array(range(ali_1.matrix.shape[1])))\n ali_2 = ali_2.apply(func=_unfilter,columns=np.array(range(ali_2.matrix.shape[1])))\n\n target_index_1 = ali_1.id_to_index[target_sequence_1]\n target_index_2 = ali_2.id_to_index[target_sequence_2]\n\n # prepare the target sequence\n target_sequences = (\n ali_1.matrix[target_index_1, :],\n ali_2.matrix[target_index_2, :]\n )\n\n # Target header must end with /1-range for correct focus mode\n length = len(target_sequences[0]) + len(target_sequences[1])\n\n target_header = \"{}_{}/1-{}\".format(\n parse_header(target_sequence_1)[0],\n parse_header(target_sequence_2)[0],\n length\n )\n\n # store target sequence for writing\n sequences_to_write.append(\n (target_header, target_sequences[0], target_sequences[1])\n )\n\n # the target sequence is the first in the output file\n target_seq_idx = 0\n\n # create other headers and sequences\n for id1, id2 in zip(id_pairing.id_1, id_pairing.id_2):\n\n # prepare the concatenated header\n concatenated_header = _prepare_header(id1, id2)\n\n # get indices of the sequences\n index_1 = ali_1.id_to_index[id1]\n index_2 = ali_2.id_to_index[id2]\n\n # save the information\n sequences_to_write.append(\n (\n concatenated_header,\n ali_1.matrix[index_1, :],\n ali_2.matrix[index_2, :]\n )\n )\n\n # concatenate strings\n sequences_full = OrderedDict([\n (header, np.concatenate([seq1, seq2])) for header, seq1, seq2 in sequences_to_write\n ])\n\n sequences_monomer_1 = OrderedDict([\n (header, seq1) for header, seq1, seq2 in sequences_to_write\n ])\n\n sequences_monomer_2 = OrderedDict([\n (header, seq2) for header, seq1, seq2 in sequences_to_write\n ])\n\n full_ali = Alignment.from_dict(sequences_full)\n monomer_ali_1 = Alignment.from_dict(sequences_monomer_1)\n monomer_ali_2 = Alignment.from_dict(sequences_monomer_2)\n\n return target_header, target_seq_idx, full_ali, monomer_ali_1, monomer_ali_2", "title": "" }, { "docid": "875071eb1e533dec2fd4de80049f02e9", "score": "0.43829623", "text": "def align_seqs_to_breaking_labels(align_seqs, lengths):\n if type(align_seqs[0][0]) not in [list, tuple, np.ndarray]: # Single alignment sequence\n label_seq = np.full((lengths,), 0.0, dtype=np.float32)\n for i, (start, duration, tag) in enumerate(align_seqs):\n if (DataLoader.TAGS_ALL[tag] == 'E'\n or DataLoader.TAGS_ALL[tag] == 'S'\n or (DataLoader.TAGS_ALL[tag] == None\n # Assumes that all non-phone tags can be treated as the same token\n and align_seqs.shape[0] > i + 1\n and DataLoader.TAGS_ALL[align_seqs[i + 1][2]] != None)):\n label_seq[start + duration - 1] = 1.0\n return label_seq\n else: # List of alignment sequences\n label_seqs = [align_seqs_to_breaking_labels(align_seq, length) for align_seq, length in\n zip(align_seqs, lengths)]\n return label_seqs", "title": "" }, { "docid": "2197297ff11a5f31defc1465beb1da07", "score": "0.438095", "text": "def compute_local_alignment(seq_x,seq_y,scoring_matrix,align_matrix):\n #get starting position (max entry of local alignment)\n score = -1\n start_pos = [0, 0]\n for pos_x in range(0, len(seq_x) + 1):\n for pos_y in range(0, len(seq_y) + 1):\n if align_matrix[pos_x][pos_y] > score:\n score = align_matrix[pos_x][pos_y]\n start_pos = [pos_x, pos_y]\n \n index_i = start_pos[0]\n index_j = start_pos[1]\n align_x = ''\n align_y = ''\n \n while index_i != 0 and index_j != 0 and align_matrix[index_i][index_j] != 0:\n if align_matrix[index_i][index_j] == align_matrix[index_i - 1][index_j - 1] + scoring_matrix[seq_x[index_i - 1]][seq_y[index_j - 1]]:\n # if current position came from diagonal \n align_x = seq_x[index_i -1] + align_x\n align_y = seq_y[index_j - 1] + align_y\n index_i -= 1\n index_j -= 1\n elif align_matrix[index_i][index_j] == align_matrix[index_i - 1][index_j] + scoring_matrix[seq_x[index_i - 1]]['-']:\n # if current pos came from top\n align_x = seq_x[index_i -1] + align_x\n align_y = '-' + align_y\n index_i -= 1\n else:\n # if current pos came from left\n align_x = '-' + align_x\n align_y = seq_y[index_j - 1] + align_y\n index_j -= 1\n \n # consider row 0 and col 0 \n while index_i != 0 and align_matrix[index_i][index_j] != 0:\n align_x = seq_x[index_i -1] + align_x\n align_y = '-' + align_y\n index_i -= 1\n \n while index_j != 0 and align_matrix[index_i][index_j] != 0:\n align_x = '-' + align_x\n align_y = seq_y[index_j - 1] + align_y\n index_j -= 1\n \n return (score, align_x, align_y)", "title": "" }, { "docid": "722a10e0b4d6a6829b1db0b7ffb6720e", "score": "0.43761492", "text": "def _ZipAligned(dex_files, output_path):\n with zipfile.ZipFile(output_path, 'w') as z:\n for i, dex_file in enumerate(dex_files):\n name = 'classes{}.dex'.format(i + 1 if i > 0 else '')\n zip_helpers.add_to_zip_hermetic(z, name, src_path=dex_file, alignment=4)", "title": "" }, { "docid": "19c7196f960b685b2c719cf3bdc1e89c", "score": "0.43669242", "text": "def predict(self):\t\n\t\tif len(self.stack) == 0: \n\t\t\treturn self.SHIFT, None\n\t\t\n\t\telse:\n\t\t\ts0 = self.sequence[self.stack[-1]]\n\t\t\tb0 = self.sequence[self.queue[0]]\n\t\t\tif s0.pos in self.template[\"LEFTARC\"] and b0.pos in self.template[\"LEFTARC\"][s0.pos].get(\"exception\", {}):\n\t\t\t\tif len(self.queue) is 1: return self.LEFTARC, self.template[\"LEFTARC\"][s0.pos][\"exception\"][b0.pos]\n\t\t\t\telse: \treturn self.SHIFT, None\n\t\t\telif s0.pos in self.template[\"LEFTARC\"] and (b0.pos in self.template[\"LEFTARC\"][s0.pos].get(\"norm\",{}) or \\\n\t\t\t\t\t\t\t\t\t\tb0.pos in self.template[\"LEFTARC\"][s0.pos]):\n\t\t\t\tlabel = self.template[\"LEFTARC\"][s0.pos].get(\"norm\",{}).get(b0.pos) or self.template[\"LEFTARC\"][s0.pos][b0.pos]\n\t\t\t\treturn self.LEFTARC, label\n\t\t\telif b0.pos in self.template[\"RIGHTARC\"] and s0.pos in self.template[\"RIGHTARC\"][b0.pos]:\n\t\t\t\thas_dependents = self.hasDependents(b0)\n\t\t\t\tif has_dependents: return self.SHIFT, None\n\t\t\t\telse: return self.RIGHTARC , self.template[\"RIGHTARC\"][b0.pos][s0.pos]\n\t\t\telse: return self.SHIFT, None", "title": "" }, { "docid": "14b75fda1b8b8ea2508069d7a9210e9f", "score": "0.4365685", "text": "def make_aligned_record(cls, alignment, input_record, target=True):\n if target:\n aligned_seq = alignment.align_str[0]\n input_seq = str(alignment.target)\n else:\n aligned_seq = alignment.align_str[2]\n input_seq = str(alignment.query)\n \n # Check that align_str contains the same sequence as input_record, with possible gaps\n if aligned_seq.replace('-', '') != input_seq.replace('-', ''):\n raise ValueError(\"After ungapping, aligned_seq must be the same sequence as input_seq.\")\n \n new_record = SeqRecord(seq=Seq(aligned_seq),\n id=input_record.id,\n name=input_record.name,\n description=input_record.description,\n dbxrefs=input_record.dbxrefs,\n features=input_record.features,\n annotations=input_record.annotations,\n letter_annotations=None,\n )\n \n new_record = PlottableRecord(new_record)\n \n # Loop over all associated lists/arrays that have an entry for \n # each base in the sequence\n for k, v in input_record.letter_annotations.items():\n new_record.letter_annotations[k] = align_data(v, input_seq, aligned_seq)\n \n #new_record.coverage = align_data(new_record.coverage, input_seq, aligned_seq, align_str)\n \n if input_record._chrom_data is not None:\n new_data = []\n \n data = input_record._chrom_data[0]\n data = np.array( align_data(data, input_seq, aligned_seq, fill_item=np.array([0, 1])), dtype=object )\n x_shift = [x[0]-i-0.5 for i, x in enumerate(data)]\n new_data.append(data - x_shift)\n \n for data in input_record._chrom_data[1:]:\n data = np.array( align_data(data, input_seq, aligned_seq, fill_item=np.array([np.nan, np.nan])), dtype=object )\n new_data.append(data)\n \n new_record._chrom_data = new_data\n else:\n new_record._chrom_data = None\n \n new_record.seq = Seq(aligned_seq)\n \n # Shift positions of sequence features to match alignment\n for feat in new_record.features:\n ind0, ind1 = feat.location.start.position, feat.location.end.position\n ind0 = alignment.map_coordinate(ind0, target=target)\n ind1 = alignment.map_coordinate(ind1, target=target)\n feat.location = FeatureLocation(ind0, ind1)\n \n return new_record", "title": "" }, { "docid": "d471b56e605d17c39f9801cf529c4cb6", "score": "0.43619895", "text": "def traceback(self) -> str:\n node = self\n lines = []\n\n while node.parent is not None:\n lines.append(repr(node.dependency))\n node = node.parent\n\n lines.reverse()\n\n for index in range(1, len(lines)):\n lines[index] = _tree_spacer * (index - 1) + _tree_leader + lines[index]\n\n return '\\n'.join(lines)", "title": "" }, { "docid": "4f781d7e155d6ec003b7964b57447a0c", "score": "0.43615854", "text": "def _create_stacktrace(self, core_dir, core_name, exe_name):\n host = os.path.split(core_dir)[-1].split(\".\")[-1]\n core_full = os.path.join(core_dir, core_name)\n stack_trace_file = os.path.join(core_dir, f\"{core_name}.stacktrace\")\n\n self.log.debug(\"Generating a stacktrace from the %s core file from %s\", core_full, host)\n run_local(self.log, \" \".join(['ls', '-l', core_full]))\n\n try:\n command = [\n \"gdb\", f\"-cd={core_dir}\",\n \"-ex\", \"'set pagination off'\",\n \"-ex\", \"'thread apply all bt full'\",\n \"-ex\", \"detach\",\n \"-ex\", \"quit\",\n exe_name, core_name\n ]\n\n except RunException as error:\n raise RunException(f\"Error obtaining the exe name from {core_name}\") from error\n\n try:\n output = run_local(self.log, \" \".join(command), check=False, verbose=False)\n with open(stack_trace_file, \"w\", encoding=\"utf-8\") as stack_trace:\n stack_trace.writelines(output.stdout)\n\n except IOError as error:\n raise RunException(f\"Error writing {stack_trace_file}\") from error\n\n except RunException as error:\n raise RunException(f\"Error creating {stack_trace_file}\") from error", "title": "" }, { "docid": "9d39f82fb62b43f5b7f8d86f0d87b83c", "score": "0.43592653", "text": "def align(cls, sequences, bin_path=None, matrix=None):\n if bin_path is None:\n app = cls(sequences, matrix=matrix)\n else:\n app = cls(sequences, bin_path, matrix=matrix)\n app.start()\n app.join()\n return app.get_alignment()", "title": "" }, { "docid": "17ebbdd1e2cf0722aa61d959c7ca57fc", "score": "0.43586102", "text": "def compute_local_alignment(seq_x='AA',seq_y='TAAT',scoring_matrix=build_scoring_matrix(), alignment_matrix=compute_alignment_matrix(global_flag=False)):\r\n #numx=len(seq_x)\r\n #numy=len(seq_y)\r\n #pdb.set_trace()\n #####find max score in alignment_matrix\n maxscore=[-float('Inf'),0,0] #traverse alignment matrixand identify maxium score(s)\n for idx in range(len(alignment_matrix)):\n for idy in range(len(alignment_matrix[0])):\n if alignment_matrix[idx][idy]>maxscore[0]:\n maxscore=[alignment_matrix[idx][idy],idx,idy]\n \n score,idx,idy=maxscore\n outx=''\r\n outy=''\n while idx>0 and idy>0:\r\n curscore=alignment_matrix[idx][idy]\n if curscore<=0:\n break\n if curscore==alignment_matrix[idx-1][idy-1]+scoring_matrix[seq_x[idx-1]][seq_y[idy-1]]:\n outx=seq_x[idx-1]+outx\n outy=seq_y[idy-1]+outy\n idx-=1\n idy-=1\n elif curscore==alignment_matrix[idx-1][idy]+scoring_matrix[seq_x[idx-1]]['-']:\n outx=seq_x[idx-1]+outx\n outy='-'+outy\n idx-=1\n else:\n outx='-'+outx\n outy=seq_y[idy-1]+outy\n idy-=1\n #while idx>0:\n # outx=seq_x[idx-1]+outx\n # outy='-'+outy\n # idx-=1\n #while idy>0:\n # outx='-'+outx\n # outy=seq_y[idy-1]+outy\n # idy-=1\n return (score,outx,outy)", "title": "" }, { "docid": "8165cef5c626ad2f36cf052fb1aa5972", "score": "0.43582466", "text": "def align_to_template(sequence, structures, template, gap=\"-\"):\n if (len(sequence) != len(structures)) or (len(template) < len(sequence)):\n raise ValueError(\n \"sequence {0} and structure {1} have different length\".format(\n repr(sequence), repr(structures)\n )\n )\n if len([x for x in structures if x not in template]) != 0:\n raise ValueError(\n \"{0} items in the structure {1} is not in the template\".format(\n len([x for x in structures if x not in template]), repr(structures)\n )\n )\n\n out = []\n idxA, idxB = 0, 0\n while idxB < len(template):\n if idxA < len(sequence):\n segment, structure = sequence[idxA], structures[idxA]\n else:\n segment, structure = gap, \"\"\n current_structure = template[idxB]\n if current_structure == structure:\n out.append(segment)\n idxA += 1\n else:\n out.append(gap)\n idxB += 1\n\n return out", "title": "" }, { "docid": "0e1b2e12ba1361144af6d5aeec136ae5", "score": "0.4340873", "text": "def set_align(alignment):\r\n arg_str = p2e._base._util._convert_args_to_string(\"set.view.align\", \r\n alignment)\r\n p2e._app.Exec(arg_str)", "title": "" }, { "docid": "406dd04a3cb82aacd70c74a710514dac", "score": "0.43397593", "text": "def test012_alignment(self):\n w = self.doc['WR-P-E-J-0000000001.p.1.s.6.w.8']\n\n a = w.append( folia.Alignment, cls=\"coreference\")\n a.append( folia.AlignReference, id='WR-P-E-J-0000000001.p.1.s.6.w.1', type=folia.Word)\n a.append( folia.AlignReference, id='WR-P-E-J-0000000001.p.1.s.6.w.2', type=folia.Word)\n\n self.assertEqual( next(a.resolve()), self.doc['WR-P-E-J-0000000001.p.1.s.6.w.1'] )\n self.assertEqual( list(a.resolve())[1], self.doc['WR-P-E-J-0000000001.p.1.s.6.w.2'] )\n\n self.assertTrue( xmlcheck(w.xmlstring(),'<w xmlns=\"http://ilk.uvt.nl/folia\" xml:id=\"WR-P-E-J-0000000001.p.1.s.6.w.8\"><t>ze</t><pos class=\"VNW(pers,pron,stan,red,3,mv)\"/><lemma class=\"ze\"/><alignment class=\"coreference\"><aref type=\"w\" id=\"WR-P-E-J-0000000001.p.1.s.6.w.1\"/><aref type=\"w\" id=\"WR-P-E-J-0000000001.p.1.s.6.w.2\"/></alignment></w>'))", "title": "" }, { "docid": "28dab0363ce86c33b8942cea74682940", "score": "0.4334542", "text": "def pack_technique(lines, pdb_dict):\n\n if pdb_dict[\"technique\"] is not None:\n lines.append(\"EXPDTA {}\".format(pdb_dict[\"technique\"]).ljust(80))", "title": "" }, { "docid": "63924f479a5c7e4efd55b1df7e0f4b7a", "score": "0.43317086", "text": "def generate_seq(input_alignments, input_start):\n\tsequence = \"\"\n\tfor row in range(input_start,len(input_alignments),4):\n\t\tnew_row = input_alignments[row].replace(\"\\n\",\"\") \n\t\tsequence += new_row.split()[1]\n\treturn new_row.split()[0], sequence", "title": "" }, { "docid": "1d27d985130c8b0216f3ab4b85df9e7e", "score": "0.43314445", "text": "def align_skeleton(skeleton, code):\n\n ###############\n # My Solution #\n ###############\n\n skeleton, code = skeleton.replace(\" \", \"\"), code.replace(\" \", \"\")\n\n def helper_align(skeleton_idx, code_idx):\n \"\"\"\n Aligns the given skeletal segment with the code.\n Returns (match, cost)\n match: the sequence of corrections as a string\n cost: the cost of the corrections, in edits\n \"\"\"\n if skeleton_idx == len(skeleton) and code_idx == len(code):\n return '', 0\n\n if skeleton_idx < len(skeleton) and code_idx == len(code):\n edits = \"\".join([\"-[\" + c + \"]\" for c in skeleton[skeleton_idx:]])\n return edits, len(skeleton) - skeleton_idx\n\n if skeleton_idx == len(skeleton) and code_idx < len(code):\n edits = \"\".join([\"+[\" + c + \"]\" for c in code[code_idx:]])\n return edits, len(code) - code_idx\n \n possibilities = []\n skel_char, code_char = skeleton[skeleton_idx], code[code_idx]\n # Match\n if skel_char == code_char:\n match_result, match_cost = helper_align(skeleton_idx+1, code_idx+1)\n match_total = skel_char + match_result\n possibilities.append((match_total, match_cost))\n # Insert\n insert_result, insert_cost = helper_align(skeleton_idx, code_idx+1)\n insert_total = \"+[\" + code_char + \"]\" + insert_result\n possibilities.append((insert_total, insert_cost+1))\n # Delete\n delete_result, delete_cost = helper_align(skeleton_idx+1, code_idx)\n delete_total = \"-[\" + skel_char + \"]\" + delete_result\n possibilities.append((delete_total, delete_cost+1))\n return min(possibilities, key=lambda x: x[1])\n result, cost = helper_align(0, 0)\n return result", "title": "" }, { "docid": "ab57b01475e224bcd1346123f873b984", "score": "0.4323716", "text": "def ConvertSequence2StructuralAlignment( src1, src2, source=None, format=\"plain\", check_residues = 1):\n\n ca1 = GetPdbCoordinates( src1, renumber = 1)\n\n if len(ca1) == 0:\n raise \"no coordinates found for %s\" % src1\n\n ca2 = GetPdbCoordinates( src2, renumber = 1 )\n\n if len(ca2) == 0:\n raise \"no coordinates found for %s\" % src2\n\n if string.lower(format) not in (\"plain\",):\n raise \"unknown alignment format %s\" % format\n\n if source:\n lines = open(source, \"r\").readlines()\n else:\n lines = sys.stdin.readlines()\n\n ## replace gap characters\n lines = map(lambda x: re.sub( \"\\s\", \"\", string.replace(x, \".\", \"-\")), lines)\n if not lines:\n raise ValueError, \"alignment is empty\"\n\n lali = len(lines[0])\n\n current1 = 0\n current2 = 0\n\n index1 = 0\n index2 = 0\n\n output = []\n\n alignment = []\n\n for x in range(0, lali):\n\n res1 = lines[0][x]\n res2 = lines[1][x]\n\n if res1 != \"-\": current1+=1\n if res2 != \"-\": current2+=1\n\n try:\n while (ca1[index1][0] < current1): index1 += 1\n while (ca2[index2][0] < current2): index2 += 1 \n except IndexError:\n break\n\n if res1 == \"-\" or res2 == \"-\":\n continue\n\n (i1, aa1, x1, y1, z1) = ca1[index1]\n (i2, aa2, x2, y2, z2) = ca2[index2] \n\n if check_residues:\n if aa1 != res1:\n sys.stderr.write(\"# mismatch in 1:%s at residue alignment %i(%s) -> structure %i(%s)\\n\" %\\\n (source, current1, res1, index1, aa1))\n if aa2 != res2:\n sys.stderr.write(\"# mismatch in 2:%s at residue %i(%s) -> %i(%s)\\n\" %\\\n (source, current2, res2, index2, aa2))\n\n alignment.append( (x1, y1, z1, x2, y2, z2, 1) )\n\n return alignment", "title": "" }, { "docid": "432d548b439e4474e868c9afae131a52", "score": "0.4317112", "text": "def alignments_format(alignment_before, alignment_after):\n\t\n\talignment_before_file = open(alignment_before,\"r\")\n\tcontent = alignment_before_file.readlines()\n\t\n\talignment_after_file = open(alignment_after,\"a\")\t\t\t\n\talignment_after_file.write(content[0])\n\n\tspecie_row = 1\n\t\n\twhile specie_row < len(content):\n\t\talignment_after_file.write(content[specie_row].split()[0] + \" \" + content[specie_row].split()[1] + \"\\n\")\n\t\tspecie_row+=1\n\t\n\talignment_before_file.close()\n\talignment_after_file.close()", "title": "" }, { "docid": "e471a978fa18460bfc6fc5903873894e", "score": "0.43147826", "text": "def alignment_marks(locations = ((-3500, -3500), (3500, 3500), (-3500, 3500), (3500, -3500)), size = (200,5), layer = 1):\n marks = Device('Marks')\n alignMARK=pg.cross(size[0], size[1],layer=layer)\n\n for i in np.arange(0,len(locations),1):\n alignMark = marks.add_ref(alignMARK)\n alignMark.move(origin=alignMark.center,destination=locations[i])\n \n marks.flatten()\n return marks", "title": "" }, { "docid": "d6d69bec0a41b03f9571d52f6530b69c", "score": "0.4306208", "text": "def align_iraf(textlist_images, log_imexam, ref_coords, align_coords, align_db, itern=2, prefix_str='a'):\n list_images = text_list_to_python_list(textlist_images)\n template_image = list_images[0]\n list_images = list_images[1:]\n shutil.copy(template_image, prefix_str + template_image)\n\n if len(list_images) > 0 and len(text_list_to_python_list(ref_coords)) != 0:\n for image in list_images:\n print (\"File Name: {0}: \".format(image))\n output_filename = prefix_str + image\n list_temp = []\n for value in range(0, int(itern)):\n temp_prefix = str(value)\n remove_file(log_imexam)\n imexam_coords(image, ref_coords=ref_coords, log_file=log_imexam)\n generate_align_file(ref_coords=ref_coords, log_file=log_imexam, align_file=align_coords)\n\n geomap(align_coords=align_coords, align_db=align_db)\n geotran(image, align_db=align_db, align_coords=align_coords, prefix_str=temp_prefix)\n image = temp_prefix + image\n\n if value != int(itern) - 1:\n list_temp.append(image)\n else:\n shutil.move(image, output_filename)\n\n remove_file(align_db)\n remove_file(log_imexam)\n remove_file(align_coords)\n\n for temp_image in list_temp:\n remove_file(temp_image)\n\n elif len(list_images) > 0 and len(text_list_to_python_list(ref_coords)) == 0:\n print (\"ERROR: Reference Coordinates Not Specified In The File {0}\".format(ref_coords))\n sys.exit(1)\n\n else:\n print (\"ERROR: Too Few Images To Align\")\n sys.exit(1)\n\n display_text(\"Alignment Using GEOMAP & GEOTRAN Completed\")", "title": "" }, { "docid": "910002f3c5169d0964c7d237abcc759d", "score": "0.43011254", "text": "def check_aligned(textlist_images, log_align, ref_coords='stars.coo', log_imexam='log_imexam'):\n list_images = text_list_to_python_list(textlist_images)\n list_refcoords = text_list_to_python_list(ref_coords)\n\n no_of_stars = len(list_refcoords) / 2\n list_xref = list_refcoords[::2]\n list_yref = list_refcoords[1::2]\n\n remove_file(log_imexam)\n for image in list_images:\n print (\"File Name: {0}: \".format(image))\n imexam_coords(image, ref_coords, log_imexam)\n\n data_sub = pd.read_csv(log_imexam, sep='\\s+', comment='#', header=None)\n\n remove_file(log_align)\n with open(log_align, 'w') as fout:\n fout.write(\"{0:>7s}{1:>9s}{2:>11s}{3:>11s}{4:>11s}{5:>11s}{6:>9s}\\n\\n\".format\n (\"Star_ID\", \"X-Ref\", \"Y-Ref\", \"X-Img\", \"Y-Img\", \"X-Err\", \"Y-Err\"))\n for i in range(0, len(list_images)):\n fout.write(\"# IMAGE #{0} - {1}\\n\".format(i + 1, list_images[i]))\n for j in range(0, no_of_stars):\n fout.write(\"{0:>4}{1:>13.2f}{2:>11.2f}{3:>11.2f}{4:>11.2f}{5:>9.2f}{6:>9.2f}\\n\".format\n (j + 1, float(list_xref[j]), float(list_yref[j]),\n float(data_sub.loc[i * no_of_stars + j, 0]), float(data_sub.loc[i * no_of_stars + j, 1]),\n (float(data_sub.loc[i * no_of_stars + j, 0]) - float(list_xref[j])),\n (float(data_sub.loc[i * no_of_stars + j, 1]) - float(list_yref[j]))))\n fout.write('\\n')\n\n display_text(\"Log Of Alignment Has Been Generated\")", "title": "" } ]
eb61ee2d5e6ae2243355dfd18f6e52c1
Construct a multibox layer, return a class and localization predictions.
[ { "docid": "3a35ef534a00913dc524ad506142ceac", "score": "0.5202887", "text": "def ssd_multibox_layer(inputs,\n num_classes,\n sizes,\n ratios=[1],\n normalization=-1,\n bn_normalization=False):\n net = inputs\n if normalization > 0:\n net = custom_layers.l2_normalization(net, scaling=True)\n # Number of anchors.\n num_anchors = len(sizes) + len(ratios)\n\n # Location.\n num_loc_pred = num_anchors * 4\n\n loc_pred = slim.separable_conv2d(net, None, [3, 3], depth_multiplier=1, stride=1, rate=1, normalizer_fn=slim.batch_norm, scope='conv_loc_dipthwise')\n loc_pred = slim.conv2d(loc_pred, num_loc_pred, [1, 1], stride=1, normalizer_fn=slim.batch_norm, activation_fn=None, scope='conv_loc_pointwise')\n loc_pred = tf.reshape(loc_pred,\n tensor_shape(loc_pred, 4)[:-1]+[num_anchors, 4])\n # Class prediction.\n num_cls_pred = num_anchors * num_classes\n\n cls_pred = slim.separable_conv2d(net, None, [3, 3], depth_multiplier=1, stride=1, rate=1, normalizer_fn=slim.batch_norm, scope='conv_cls_dipthwise')\n cls_pred = slim.conv2d(cls_pred, num_cls_pred, [1, 1], stride=1, normalizer_fn=slim.batch_norm, activation_fn=None, scope='conv_cls_pointwise')\n cls_pred = tf.reshape(cls_pred,\n tensor_shape(cls_pred, 4)[:-1]+[num_anchors, num_classes])\n return cls_pred, loc_pred", "title": "" } ]
[ { "docid": "a909d4e71632524361ad39fd32aef038", "score": "0.6050399", "text": "def build_classifier(self, **kwargs):\n # loc and conf layers\n in_channels = tuple(self.feature_layers[name].out_channels for name in self.classifier_source_names)\n\n _dbox_num_per_fpixel = [len(aspect_ratio) * 2 for aspect_ratio in self.aspect_ratios]\n # loc\n # dbox_num * 2=(original and \"with vertical offset\") * 12(=cx,cy,w,h,x1,y1,x2,y2,...)\n # note that the reason of multiplying 2 of dbox_num *2 is for default boxes with vertical offset\n out_channels = tuple(dbox_num * 2 * 12 for dbox_num in _dbox_num_per_fpixel)\n localization_layers = [\n *Conv2d.block('_loc', len(_dbox_num_per_fpixel), in_channels, out_channels, kernel_size=(3, 5),\n padding=(1, 2), batch_norm=False)\n ]\n self.localization_layers = nn.ModuleDict(OrderedDict(localization_layers))\n\n # conf\n # dbox_num * 2=(original and \"with vertical offset\") * 2(=text or background)\n # note that the reason of multiplying 2 of dbox_num *2 is for default boxes with vertical offset\n out_channels = tuple(dbox_num * 2 * 2 for dbox_num in _dbox_num_per_fpixel)\n confidence_layers = [\n *Conv2d.block('_conf', len(_dbox_num_per_fpixel), in_channels, out_channels, kernel_size=(3, 5),\n padding=(1, 2), batch_norm=False)\n ]\n self.confidence_layers = nn.ModuleDict(OrderedDict(confidence_layers))", "title": "" }, { "docid": "33cab3e1b17dc2cdb42b5b0064b0a0a6", "score": "0.54174715", "text": "def __init__(self, num_classes=20):\n input_size = 300.0\n # we will divide by input_size,\n # so that bounding box coordinates are in [0,1]\n\n steps = [s/input_size for s in (8, 16, 32, 64, 100, 300)]\n # 8 ~ 300/38, 16 ~ 300/19, 32 ~ 300/10, ...\n # for example: one step in the first feature map corresponds\n # approximately to eight steps on the original input image,\n # but i don't completely understand why.\n\n scales = [s/input_size for s in (30, 60, 111, 162, 213, 264, 315)]\n # 0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05\n # (the last scale is not used directly)\n\n aspect_ratios = ((2,), (2, 3), (2, 3), (2, 3), (2,), (2,))\n # one tuple for each scale\n\n # i believe we must treat steps, scales, and aspect_ratios\n # like hyperparameters, they are not exact values\n\n feature_map_sizes = (38, 19, 10, 5, 3, 1)\n # we know these form the network architecture\n\n boxes = []\n for i, fm_size in enumerate(feature_map_sizes):\n for h, w in itertools.product(range(fm_size), repeat=2):\n\n # center of a default box\n cx = (w + 0.5)*steps[i]\n cy = (h + 0.5)*steps[i]\n\n s = scales[i]\n boxes.append((cx, cy, s, s))\n\n s = math.sqrt(scales[i] * scales[i + 1])\n boxes.append((cx, cy, s, s))\n\n s = scales[i]\n for ar in aspect_ratios[i]:\n boxes.append((cx, cy, s*math.sqrt(ar), s/math.sqrt(ar)))\n boxes.append((cx, cy, s/math.sqrt(ar), s*math.sqrt(ar)))\n\n # there are 8732 default boxes overall,\n # 4*(38**2) + 6*(19**2) + 6*(10**2) + 6*(5**2) + 4*(3**2) + 4*(1**2) = 8732\n self.default_boxes = torch.FloatTensor(boxes)\n\n self.variances = [0.1, 0.2]\n # you can read about variances here:\n # github.com/rykov8/ssd_keras/issues/53\n # github.com/weiliu89/caffe/issues/155\n\n self.num_classes = num_classes", "title": "" }, { "docid": "1b4c406d34ce6ef0d48203bf3e93b0df", "score": "0.5258347", "text": "def __init__(self, emb_szs, n_cont, n_dest_clusters):\n super(TaxiModel, self).__init__()\n # define construction/layers\n self._embeds = [Embedding(vs, ed) for vs, ed in emb_szs]\n self._n_emb = sum(ed for _, ed in emb_szs)\n self._n_cont = n_cont\n\n self._hidden_lyr = Dense(500, activation='relu')\n self._logit_lyr = Dense(n_dest_clusters, activation='softmax')\n self._n_dest_clusters = n_dest_clusters", "title": "" }, { "docid": "c6b119a820e7e98864d1270913b477a7", "score": "0.5206907", "text": "def __init__(self, **kwargs):\n super().__init__(TSMultiLabelClassification, **kwargs)", "title": "" }, { "docid": "08702fa16f7cbccca6a65a4b15f83db9", "score": "0.5203831", "text": "def Classifier(shapes, drop_rate=0, activation='relu' , name='Classifier'):\n if type(shapes) is int: shapes = [shapes]\n def func(x):\n x = MLP(shapes[:-1], drop_rate, activation=activation, name='%s_MLP'%name)(x)\n act = 'sigmoid' if shapes[-1]==1 else 'softmax'\n x = kl.Dense(shapes[-1], activation=act, name='{}_output'.format(name))(x)\n return x\n return func", "title": "" }, { "docid": "c332be301cb388e6786361b0895788f0", "score": "0.51861954", "text": "def multiclass_nms(multi_bboxes,\n multi_scores,\n score_thr,\n nms_cfg,\n max_num=-1,\n score_factors=None,\n multi_tags=None):\n num_classes = multi_scores.shape[1]\n bboxes, labels = [], []\n nms_cfg_ = nms_cfg.copy()\n nms_type = nms_cfg_.pop('type', 'nms')\n nms_op = getattr(nms_wrapper, nms_type)\n for i in range(1, num_classes):\n cls_inds = multi_scores[:, i] > score_thr\n if not cls_inds.any():\n continue\n # get bboxes and scores of this class\n if multi_bboxes.shape[1] == 4:\n _bboxes = multi_bboxes[cls_inds, :]\n else:\n _bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4]\n _scores = multi_scores[cls_inds, i]\n if multi_tags is not None:\n tags = multi_tags[0]\n if score_factors is not None:\n _scores *= score_factors[cls_inds]\n ###########\n # only test (no use)\n # trans width the same with height\n # _bboxes_cx = (_bboxes[:,0] + _bboxes[:,2]) / 2\n # _bboxes_h = _bboxes[:,3] - _bboxes[:,1]\n # _bboxes_w = _bboxes[:,2] - _bboxes[:,0]\n # _bboxes[:,0] = _bboxes_cx - _bboxes_h * 0.5 + 0.5\n # _bboxes[:,2] = _bboxes_cx + _bboxes_h * 0.5 - 0.5\n ###########\n cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1)\n if nms_type not in ['tag_nms', 'soft_tag_nms']:\n cls_dets, inds = nms_op(cls_dets, **nms_cfg_)\n else:\n cls_dets, inds= nms_op(cls_dets, tags, **nms_cfg_)\n cls_labels = multi_bboxes.new_full(\n (cls_dets.shape[0], ), i - 1, dtype=torch.long)\n ###########\n # only test (no use) \n # trans back\n # _bboxes_w = _bboxes_w[inds]\n # _bboxes_cx = (cls_dets[:,0] + cls_dets[:,2]) / 2\n # cls_dets[:,0] = _bboxes_cx - _bboxes_w * 0.5 + 0.5\n # cls_dets[:,2] = _bboxes_cx + _bboxes_w * 0.5 - 0.5\n ###########\n bboxes.append(cls_dets)\n labels.append(cls_labels)\n if bboxes:\n bboxes = torch.cat(bboxes)\n labels = torch.cat(labels)\n if bboxes.shape[0] > max_num:\n _, inds = bboxes[:, -1].sort(descending=True)\n inds = inds[:max_num]\n bboxes = bboxes[inds]\n labels = labels[inds]\n else:\n bboxes = multi_bboxes.new_zeros((0, 5))\n labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)\n\n return bboxes, labels", "title": "" }, { "docid": "a80f8001fb63d98a67e84df9e34fa318", "score": "0.5178727", "text": "def _demo_mm_inputs(\n input_shape=(1, 3, 300, 300), num_items=None, num_classes=10):\n (N, C, H, W) = input_shape\n\n rng = np.random.RandomState(0)\n\n imgs = rng.rand(*input_shape)\n\n img_metas = [{\n 'img_shape': (H, W, C),\n 'ori_shape': (H, W, C),\n 'pad_shape': (H, W, C),\n 'filename': '<demo>.png',\n 'scale_factor': 1.0,\n 'flip': False,\n } for _ in range(N)]\n\n gt_bboxes = []\n gt_labels = []\n\n for batch_idx in range(N):\n if num_items is None:\n num_boxes = rng.randint(1, 10)\n else:\n num_boxes = num_items[batch_idx]\n\n cx, cy, bw, bh = rng.rand(num_boxes, 4).T\n\n tl_x = ((cx * W) - (W * bw / 2)).clip(0, W)\n tl_y = ((cy * H) - (H * bh / 2)).clip(0, H)\n br_x = ((cx * W) + (W * bw / 2)).clip(0, W)\n br_y = ((cy * H) + (H * bh / 2)).clip(0, H)\n\n boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T\n class_idxs = rng.randint(1, num_classes, size=num_boxes)\n\n gt_bboxes.append(torch.FloatTensor(boxes))\n gt_labels.append(torch.LongTensor(class_idxs))\n\n mm_inputs = {\n 'imgs': torch.FloatTensor(imgs),\n 'img_metas': img_metas,\n 'gt_bboxes': gt_bboxes,\n 'gt_labels': gt_labels,\n 'gt_bboxes_ignore': None,\n }\n return mm_inputs", "title": "" }, { "docid": "b334865b49a6edbcb112ed10c5937f54", "score": "0.5131908", "text": "def construct_large(self, input_shape, output_shape, NUM_TRAIN_EXAMPLES, pooling_len=3):\n\n poolpadding = 'valid'\n ks = 3\n\n #pool = tf.keras.layers.MaxPooling1D\n pool = tf.keras.layers.AvgPool1D\n \n kl_divergence_function = (lambda q, p, _: tfd.kl_divergence(q, p) / # pylint: disable=g-long-lambda\n tf.cast(NUM_TRAIN_EXAMPLES, dtype=tf.float32))\n\n model_in = tf.keras.layers.Input(shape=input_shape)\n conv_1 = tfp.layers.Convolution1DFlipout(100, kernel_size=ks, padding=\"same\", strides=1,\n kernel_divergence_fn=kl_divergence_function,\n activation=tf.nn.relu)\n x = conv_1(model_in)\n x = pool(pooling_len, padding=poolpadding)(x)\n\n conv_2_1 = tfp.layers.Convolution1DFlipout(50, kernel_size=ks, padding=\"same\", strides=1,\n kernel_divergence_fn=kl_divergence_function,\n activation=tf.nn.relu)#, data_format='channels_first')\n x = conv_2_1(x)\n x = pool(pooling_len, padding=poolpadding)(x)\n\n conv_2_2 = tfp.layers.Convolution1DFlipout(50, kernel_size=ks, padding=\"same\", strides=1,\n kernel_divergence_fn=kl_divergence_function,\n activation=tf.nn.relu)\n x = conv_2_2(x)\n x = pool(pooling_len, padding=poolpadding)(x)\n\n\n conv_3 = tfp.layers.Convolution1DFlipout(25, kernel_size=ks, padding=\"same\", strides=1,\n kernel_divergence_fn=kl_divergence_function,\n activation=tf.nn.relu)\n\n x = conv_3(x)\n x = tf.keras.layers.Flatten()(x)\n\n dense_1_1 = tfp.layers.DenseFlipout(50, kernel_divergence_fn=kl_divergence_function)\n x = dense_1_1(x)\n #x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Activation('relu')(x)\n\n dense_1_2 = tfp.layers.DenseFlipout(50, kernel_divergence_fn=kl_divergence_function)\n x = dense_1_2(x)\n #x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Activation('relu')(x)\n\n dense_1_3 = tfp.layers.DenseFlipout(50, kernel_divergence_fn=kl_divergence_function)\n x = dense_1_3(x)\n #x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Activation('relu')(x)\n\n dense_1_4 = tfp.layers.DenseFlipout(50, kernel_divergence_fn=kl_divergence_function)\n x = dense_1_4(x)\n #x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Activation('relu')(x)\n\n dense_2 = tfp.layers.DenseFlipout(output_shape, kernel_divergence_fn=kl_divergence_function,\n activation=tf.nn.softmax)\n #dense_3 = tfp.layers.DenseVariational(output_shape, activation=None)\n model_out = dense_2(x)\n model = tf.keras.Model(model_in, model_out)\n return model", "title": "" }, { "docid": "24404f9f560f46db1ebe46e810f00279", "score": "0.5113409", "text": "def create_classifier(self):\n\t\tlayers = OrderedDict([\n\t\t\t('fcstart', nn.Linear(self.input_size, self.hidden_units[0])),\n\t\t\t('relustart', nn.ReLU()),\n\t\t\t('dropoutstart', nn.Dropout(self.dropout_rate)),\n\t\t])\n\t\tfor i in range(len(self.hidden_units) - 1):\n\t\t\tlayers['fc{}'.format(i)] = nn.Linear(self.hidden_units[i], self.hidden_units[i + 1])\n\t\t\tlayers['relu{}'.format(i)] = nn.ReLU()\n\t\t\tlayers['dropout{}'.format(i)] = nn.Dropout(self.dropout_rate)\n\t\tlayers['output'] = nn.Linear(self.hidden_units[-1], self.output_size)\n\t\tlayers['logsoftmax'] = nn.LogSoftmax(dim=1)\n\t\tclassifier = nn.Sequential(layers)\n\t\treturn classifier", "title": "" }, { "docid": "bd8671acb63f99c6e7daf63dbd1e35dd", "score": "0.51073676", "text": "def multiclass_nms(multi_bboxes,\n multi_scores,\n score_thr,\n nms_cfg,\n max_num=-1,\n score_factors=None):\n num_classes = multi_scores.shape[1]\n bboxes, labels = [], []\n nms_cfg_ = nms_cfg.copy()\n nms_type = nms_cfg_.pop('type', 'nms')\n nms_op = getattr(nms_wrapper, nms_type)\n for i in range(1, num_classes):\n cls_inds = multi_scores[:, i] > score_thr\n if not cls_inds.any():\n continue\n # get bboxes and scores of this class\n if multi_bboxes.shape[1] == 4:\n _bboxes = multi_bboxes[cls_inds, :]\n else:\n _bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4]\n _scores = multi_scores[cls_inds, i]\n if score_factors is not None:\n _scores *= score_factors[cls_inds]\n cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1)\n cls_dets, _ = nms_op(cls_dets, **nms_cfg_)\n cls_labels = multi_bboxes.new_full(\n (cls_dets.shape[0], ), i - 1, dtype=torch.long)\n bboxes.append(cls_dets)\n labels.append(cls_labels)\n if bboxes:\n bboxes = torch.cat(bboxes)\n labels = torch.cat(labels)\n if bboxes.shape[0] > max_num:\n _, inds = bboxes[:, -1].sort(descending=True)\n inds = inds[:max_num]\n bboxes = bboxes[inds]\n labels = labels[inds]\n else:\n bboxes = multi_bboxes.new_zeros((0, 5))\n labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)\n\n return bboxes, labels", "title": "" }, { "docid": "45d9816c861e159722bb5473794a692e", "score": "0.51040095", "text": "def create_layers(self):\n # First of all, retrieve datas from MNIST dataset\n (x_train, y_train), (x_val, y_val) = mnist.load_data()\n # Reshaping the array to 4-dims so that it can work with the Keras API\n x_train = x_train.reshape((x_train.shape[0],) + self.input_shape)\n x_val = x_val.reshape((x_val.shape[0],) + self.input_shape)\n # Making sure that the values are float so that we can get decimal point\n # after division\n x_train = x_train.astype('float32')\n x_val = x_val.astype('float32')\n # Normalizing the RGB codes by dividing it to the max RGB value\n x_train /= 255\n x_val /= 255\n\n # Add layers to the model\n # 3x3 Convolution\n self.add_layer(Conv2D(36, kernel_size=(3, 3), input_shape=self.input_shape))\n self.add_layer(BatchNormalization())\n self.add_layer(Activation(tf.nn.relu))\n # Dropout of 0.2\n self.add_layer(Dropout(0.2))\n # 2x2 Max Pooling\n self.add_layer(MaxPooling2D(pool_size=(2, 2)))\n # 3x3 Convolution\n self.add_layer(Conv2D(28, kernel_size=(2, 2)))\n self.add_layer(BatchNormalization())\n self.add_layer(Activation(tf.nn.relu))\n # 2x1 Max Pooling\n self.add_layer(MaxPooling2D(pool_size=(2, 1)))\n self.add_layer(Flatten())\n self.add_layer(Dense(128, activation=tf.nn.relu))\n # Dropout of 0.2\n self.add_layer(Dropout(0.2))\n self.add_layer(Dense(10, activation=tf.nn.softmax))\n\n self.datas = { \"x_train\" : x_train, \"y_train\" : y_train, \"x_val\" : x_val, \"y_val\" : y_val }\n\n # Print the model summary\n self.get_model().summary()", "title": "" }, { "docid": "c4a577f27eb567b19240e79287dd1563", "score": "0.5086511", "text": "def create_core_layers(n_factors,\n n_hidden_layers,\n user_layers,\n item_layers,\n hidden_layers_kwdargs=[]):\n\n mlp_layers = keras.layers.Concatenate()([user_layers, item_layers])\n\n for x, i in enumerate(range(n_hidden_layers)[::-1]):\n current_kwdargs = {}\n\n if x < len(hidden_layers_kwdargs):\n current_kwdargs = hidden_layers_kwdargs[x]\n\n mlp_layers = keras.layers.Dense(n_factors * (2 ** i),\n activation=\"relu\",\n **current_kwdargs)(mlp_layers)\n\n return mlp_layers", "title": "" }, { "docid": "1347f7cf4b8f8508ec42854ad0a612c6", "score": "0.5067773", "text": "def create_model(self, pEpochStep, pMaxEpoch):\n\n print(\"\")\n print(\"\")\n print(\"\")\n print(\"##################### MCProtMultiClass #####################\")\n print(\"\")\n\n ######### CREATE THE CONVOLUTION BUILDER OBJECT\n self.convBuilder_ = MolConvBuilder(\"weight_regularization_collection\")\n\n ######### CREATE THE MCBRNAFDO OBJECTS\n BNAFDOConv = BNAFDO_from_config_file('convbnafdo', self.config_, \n self.isTrainingPH_, pEpochStep, pMaxEpoch)\n BNAFDOFull = BNAFDO_from_config_file('fullbnafdo', self.config_, \n self.isTrainingPH_, pEpochStep, pMaxEpoch)\n\n ######### PREPARE THE INPUT FEATURES\n auxPT = PyPeriodicTable()\n if self.aminoInput_:\n self.embeddingAtomTypes_ = tf.get_variable(\"EmbeddingAminoTypes\", \n [auxPT.get_num_aminoacids(), max(self.numInputFeatures_, 3)], \n initializer=tf.random_uniform_initializer())\n else:\n self.embeddingAtomTypes_ = tf.get_variable(\"EmbeddingAtomTypes\", \n [auxPT.get_num_atoms(), max(self.numInputFeatures_, 3)], \n initializer=tf.random_uniform_initializer())\n inFeatures = tf.nn.embedding_lookup(self.embeddingAtomTypes_, self.proteinPH_.atomTypesPH_)\n if self.numInputFeatures_ > 0:\n inFeatures = tf.concat([self.featuresPH_, inFeatures], axis=-1)\n \n ######### CREATE THE NETWORK\n self.encoder_ = ProtEncoder(self.config_, self.convBuilder_, \n self.proteinPH_, inFeatures, BNAFDOConv)\n\n #Last fully connected layers.\n if self.numFeaturesLastLayer_ > 0:\n fc1 = BNAFDOFull(self.encoder_.latentCode_, \"Full_1_BAD\")\n fc1 = self.convBuilder_.create_1x1_convolution(fc1, self.numFeaturesLastLayer_, \"Full_1\")\n fc2 = BNAFDOFull(fc1, \"Full_2_BAD\")\n else:\n fc1 = self.encoder_.latentCode_\n fc2 = BNAFDOFull(fc1, \"Full_2_BAD\", \n pApplyBN = True, pApplyNoise = False, pApplyAF = True, pApplyDO = False)\n self.logits_ = self.convBuilder_.create_1x1_convolution(fc2, \n self.numOutClasses_, \"Full_2\")\n\n self.predictions_ = tf.sigmoid(self.logits_)\n\n #Get the number of trainable parameters\n totalParams = get_num_parameters()\n\n #Print final statistics\n print(\"############ Number of convolutions:\", self.encoder_.numConvs_+3)\n print(\"############ Number of 1x1 convolutions:\", self.encoder_.num1x1Convs_+3)\n print(\"############ Number of mol convolutions:\", self.encoder_.numMolConvs_)\n print(\"############ Number of parameters:\", totalParams)\n print(\"\")\n print(\"\")\n print(\"\")", "title": "" }, { "docid": "30e9dea035a05c4a3592283f5d7be179", "score": "0.5052257", "text": "def __call__(self, longest_x, longest_y, name): # here you construct the layers that implement the component\n pass", "title": "" }, { "docid": "15113a42319365f0236ddbf61949cc8d", "score": "0.5039112", "text": "def multi_train(self):\n # Initialize multiclass labels\n self.initialize_multiclass_labels()\n\n # Initialize variables\n num_features = self.data.shape[0]\n num_examples = self.data.shape[1]\n cls_weighted_sum = {cls: None for cls in self.classes}\n cls_output = {cls: None for cls in self.classes}\n\n # Initialize weights\n for cls in self.classes:\n weights = np.array([0 for _ in range(num_features)]).reshape(num_features, 1)\n bias = 0\n self.cls_weights[cls] = (weights, bias)\n\n # Main loop over all classes\n last_error = 1.0\n same_error = 0\n while same_error < self.stopping_condition:\n for cls in self.classes:\n # Initialize labels\n labels = self.multicls_labels[cls]\n\n # Compute weighted sum\n weighted_sum = np.dot(self.cls_weights[cls][0].T, self.data) + self.cls_weights[cls][1]\n cls_weighted_sum[cls] = weighted_sum\n\n # Compute weight changes\n weight_changes = (1 / num_examples) * np.dot(self.data, (labels - weighted_sum).T)\n bias_change = (1 / num_examples) * np.sum(labels - weighted_sum)\n\n # Update weights\n weights = self.cls_weights[cls][0] + self.learning_rate * weight_changes\n bias = self.cls_weights[cls][1] + self.learning_rate * bias_change\n self.cls_weights[cls] = (weights, bias)\n\n # Pass through activation function\n activation = np.vectorize(self.signum)\n output = activation(weighted_sum)\n cls_output[cls] = output\n\n # Classify\n classifications = self.multi_classify(self.data, cls_output)\n self.classifications = classifications\n\n # Compute training error\n training_error = self.compute_training_error(classifications)\n if training_error == last_error:\n same_error += 1\n else:\n same_error = 0\n last_error = training_error\n self.errors.append(training_error)\n print(training_error)", "title": "" }, { "docid": "a7f89622b38ae5578de7d293025f2281", "score": "0.5037743", "text": "def __init__(self, layers, mini_batch_size):\n self.layers = layers\n self.mini_batch_size = mini_batch_size\n self.params = [param for layer in self.layers for param in layer.params]\n self.x = T.matrix(\"x\")\n self.y = T.matrix(\"y\")\n self.x_single = T.vector(\"x_single\")\n init_layer = self.layers[0]\n init_layer.set_inpt(self.x, self.x, self.mini_batch_size)\n init_layer.set_single_inpt(self.x_single)\n\n for j in xrange(1, len(self.layers)):\n prev_layer, layer = self.layers[j - 1], self.layers[j]\n layer.set_inpt(\n prev_layer.output, prev_layer.output_dropout, self.mini_batch_size)\n layer.set_single_inpt(\n prev_layer.single_output)\n\n self.output = self.layers[-1].output\n self.output_dropout = self.layers[-1].output_dropout\n self.single_output = self.layers[-1].single_output", "title": "" }, { "docid": "cd61556d6b69855d60636018e53d8974", "score": "0.5037078", "text": "def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ranges,\n num_points_per_lvl):\n num_points = points.size(0)\n num_gts = gt_labels.size(0)\n if num_gts == 0:\n return gt_labels.new_full((num_points,), self.num_classes), \\\n gt_bboxes.new_zeros((num_points, 4)), \\\n gt_bboxes.new_zeros((num_points, 1))\n\n areas = gt_bboxes[:, 2] * gt_bboxes[:, 3]\n # TODO: figure out why these two are different\n # areas = areas[None].expand(num_points, num_gts)\n areas = areas[None].repeat(num_points, 1)\n regress_ranges = regress_ranges[:, None, :].expand(\n num_points, num_gts, 2)\n points = points[:, None, :].expand(num_points, num_gts, 2)\n gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 5)\n gt_ctr, gt_wh, gt_angle = torch.split(gt_bboxes, [2, 2, 1], dim=2)\n\n cos_angle, sin_angle = torch.cos(gt_angle), torch.sin(gt_angle)\n rot_matrix = torch.cat([cos_angle, sin_angle, -sin_angle, cos_angle],\n dim=-1).reshape(num_points, num_gts, 2, 2)\n offset = points - gt_ctr\n offset = torch.matmul(rot_matrix, offset[..., None])\n offset = offset.squeeze(-1)\n\n w, h = gt_wh[..., 0], gt_wh[..., 1]\n offset_x, offset_y = offset[..., 0], offset[..., 1]\n left = w / 2 + offset_x\n right = w / 2 - offset_x\n top = h / 2 + offset_y\n bottom = h / 2 - offset_y\n bbox_targets = torch.stack((left, top, right, bottom), -1)\n\n # condition1: inside a gt bbox\n inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0\n if self.center_sampling:\n # condition1: inside a `center bbox`\n radius = self.center_sample_radius\n stride = offset.new_zeros(offset.shape)\n\n # project the points on current lvl back to the `original` sizes\n lvl_begin = 0\n for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl):\n lvl_end = lvl_begin + num_points_lvl\n stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius\n lvl_begin = lvl_end\n\n inside_center_bbox_mask = (abs(offset) < stride).all(dim=-1)\n inside_gt_bbox_mask = torch.logical_and(inside_center_bbox_mask,\n inside_gt_bbox_mask)\n\n # condition2: limit the regression range for each location\n max_regress_distance = bbox_targets.max(-1)[0]\n inside_regress_range = (\n (max_regress_distance >= regress_ranges[..., 0])\n & (max_regress_distance <= regress_ranges[..., 1]))\n\n # if there are still more than one objects for a location,\n # we choose the one with minimal area\n areas[inside_gt_bbox_mask == 0] = INF\n areas[inside_regress_range == 0] = INF\n min_area, min_area_inds = areas.min(dim=1)\n\n labels = gt_labels[min_area_inds]\n labels[min_area == INF] = self.num_classes # set as BG\n bbox_targets = bbox_targets[range(num_points), min_area_inds]\n angle_targets = gt_angle[range(num_points), min_area_inds]\n\n return labels, bbox_targets, angle_targets", "title": "" }, { "docid": "f87acbd7a499b000870f830a500e0aa7", "score": "0.50217277", "text": "def _init_layers(self) -> None:\n # cls branch\n self.fc_cls = Linear(self.embed_dims, self.cls_out_channels)\n # reg branch\n self.fc_reg = MLP(self.embed_dims, self.embed_dims, 4, 3)", "title": "" }, { "docid": "72c0e8e94904a00ad67e869a5c05f030", "score": "0.49995658", "text": "def get_multi_symbol_train(network, num_classes, from_layers, num_filters, strides, pads,\n sizes, ratios, normalizations=-1, steps=[], min_filter=128,\n nms_thresh=0.5, force_suppress=False, nms_topk=400, **kwargs):\n label = mx.sym.Variable('label_det')\n body = import_module(network).get_symbol(num_classes, **kwargs)\n internals = body.get_internals()\n data = internals['data']\n res3 = internals[from_layers[0]+\"_output\"]\n res4 = internals[from_layers[1]+\"_output\"]\n conv_feat = internals[from_layers[2]+\"_output\"]\n\n ### remove res3 from input layer of SSD\n from_layers=from_layers[1:]\n num_filters=num_filters[1:]\n strides=strides[1:]\n pads=pads[1:]\n sizes=sizes[1:]\n ratios=ratios[1:]\n \n layers = multi_layer_feature(body, from_layers, num_filters, strides, pads,\n min_filter=min_filter)\n\n loc_preds, cls_preds, anchor_boxes = multitask_layer(layers, \\\n num_classes, sizes=sizes, ratios=ratios, normalization=normalizations, \\\n num_channels=num_filters, clip=False, interm_layer=0, steps=steps)\n\n tmp = mx.contrib.symbol.MultiBoxTarget(\n *[anchor_boxes, label, cls_preds], overlap_threshold=.5, \\\n ignore_label=-1, negative_mining_ratio=3, minimum_negative_samples=0, \\\n negative_mining_thresh=.5, variances=(0.1, 0.1, 0.2, 0.2),\n name=\"multibox_target\")\n loc_target = tmp[0]\n loc_target_mask = tmp[1]\n cls_target = tmp[2]\n\n cls_prob = mx.symbol.SoftmaxOutput(data=cls_preds, label=cls_target, \\\n ignore_label=-1, use_ignore=True, grad_scale=1., multi_output=True, \\\n normalization='valid', name=\"cls_prob\")\n loc_loss_ = mx.symbol.smooth_l1(name=\"loc_loss_\", \\\n data=loc_target_mask * (loc_preds - loc_target), scalar=1.0)\n loc_loss = mx.symbol.MakeLoss(loc_loss_, grad_scale=1., \\\n normalization='valid', name=\"loc_loss\")\n\n # monitoring training status\n cls_label = mx.symbol.MakeLoss(data=cls_target, grad_scale=0, name=\"cls_label\")\n det = mx.contrib.symbol.MultiBoxDetection(*[cls_prob, loc_preds, anchor_boxes], \\\n name=\"detection\", nms_threshold=nms_thresh, force_suppress=force_suppress,\n variances=(0.1, 0.1, 0.2, 0.2), nms_topk=nms_topk)\n det = mx.symbol.MakeLoss(data=det, grad_scale=0, name=\"det_out\")\n\n # segmentation task (pyramid pooling module)\n res3_block = mx.sym.BlockGrad(data=res3, name=\"res3_block\")\n res3_reduced = mx.sym.Convolution(data=res3_block, kernel=(1,1), stride=(1,1), pad=(0,0), \\\n num_filter=128, no_bias=True, workspace=1024, name=\"res3_reduced\")\n res3_reduced_bn = mx.sym.BatchNorm(data=res3_reduced, fix_gamma=True, eps=eps, use_global_stats=use_global_stats, name='res3_reduced_bn')\n res3_reduced2 = mx.sym.Convolution(data=res3_reduced_bn, kernel=(3,3), stride=(1,1), pad=(1,1), \\\n num_filter=128, no_bias=True, workspace=1024, name=\"res3_reduced2\")\n res3_reduced2_bn = mx.sym.BatchNorm(data=res3_reduced2, fix_gamma=True, eps=eps, use_global_stats=use_global_stats, name='res3_reduced2_bn')\n res4_block = mx.sym.BlockGrad(data=res4, name=\"res4_block\")\n res4_reduced = mx.sym.Convolution(data=res4_block, kernel=(1,1), stride=(1,1), pad=(0,0), \\\n num_filter=256, no_bias=True, workspace=1024, name=\"res4_reduced\")\n res4_reduced_bn = mx.sym.BatchNorm(data=res4_reduced, fix_gamma=True, eps=eps, use_global_stats=use_global_stats, name='res4_reduced_bn')\n res4_reduced2 = mx.sym.Convolution(data=res4_reduced_bn, kernel=(3,3), stride=(1,1), pad=(1,1), \\\n num_filter=256, no_bias=True, workspace=1024, name=\"res4_reduced2\")\n res4_reduced2_bn = mx.sym.BatchNorm(data=res4_reduced2, fix_gamma=True, eps=eps, use_global_stats=use_global_stats, name='res4_reduced2_bn')\n res5_reduced = mx.symbol.Convolution(data=conv_feat, kernel=(1,1), stride=(1,1), pad=(0,0), \\\n num_filter=512, no_bias=True, workspace=1024, name=\"res5_reduced\")\n res5_reduced_bn = mx.sym.BatchNorm(data=conv_feat, fix_gamma=True, eps=eps, use_global_stats=use_global_stats, name='res5_reduced_bn')\n\n score_pool1 = mx.sym.Pooling(res5_reduced_bn, global_pool=False, kernel=(1,1), stride=(1,1), pad=(0,0), pool_type='avg', name='score_pool1')\n score_pool2 = mx.sym.Pooling(res5_reduced_bn, global_pool=False, kernel=(2,2), stride=(2,2), pad=(0,0), pool_type='avg', name='score_pool2')\n score_pool4 = mx.sym.Pooling(res5_reduced_bn, global_pool=False, kernel=(4,4), stride=(4,4), pad=(0,0), pool_type='avg', name='score_pool4')\n \n score2_pool4 = mx.symbol.Convolution(data=score_pool4, kernel=(1,1), stride=(1,1), pad=(0,0), \\\n num_filter=128, no_bias=True, workspace=1024, name=\"score2_pool4\")\n score2_pool4_bn = mx.sym.BatchNorm(data=score2_pool4, fix_gamma=True, eps=eps, use_global_stats=use_global_stats, name='score2_pool4_bn')\n score2_pool2 = mx.symbol.Convolution(data=score_pool2, kernel=(1,1), stride=(1,1), pad=(0,0), \\\n num_filter=256, no_bias=True, workspace=1024, name=\"score2_pool2\")\n score2_pool2_bn = mx.sym.BatchNorm(data=score2_pool2, fix_gamma=True, eps=eps, use_global_stats=use_global_stats, name='score2_pool2_bn')\n score2_pool1 = mx.symbol.Convolution(data=score_pool1, kernel=(1,1), stride=(1,1), pad=(0,0), \\\n num_filter=512, no_bias=True, workspace=1024, name=\"score2_pool1\")\n score2_pool1_bn = mx.sym.BatchNorm(data=score2_pool1, fix_gamma=True, eps=eps, use_global_stats=use_global_stats, name='score2_pool1_bn')\n\n affine_matrix = mx.sym.var(\"affine_matrix\", shape=(1,6))\n grid = mx.sym.GridGenerator(data=affine_matrix, transform_type='affine', target_shape=(64, 128))\n score3_samp4 = mx.sym.BilinearSampler(data=score2_pool4_bn, grid=grid, name='score3_samp4')\n score3_samp2 = mx.sym.BilinearSampler(data=score2_pool2_bn, grid=grid, name='score3_samp2')\n score3_samp1 = mx.sym.BilinearSampler(data=score2_pool1_bn, grid=grid, name='score3_samp1')\n score3_sampy = mx.sym.BilinearSampler(data=res5_reduced_bn, grid=grid, name='score3_sampy')\n score3_samp0 = mx.sym.BilinearSampler(data=res4_reduced2_bn, grid=grid, name='score3_samp0')\n score3_sampx = mx.sym.BilinearSampler(data=res3_reduced2_bn, grid=grid, name='score3_sampx')\n score3_concat = mx.sym.concat(score3_samp4, score3_samp2, score3_samp1, score3_sampy, score3_samp0, score3_sampx, dim=1, name='score3_concat')\n score3_conv = mx.symbol.Convolution(data=score3_concat, kernel=(3,3), stride=(1,1), pad=(1,1), \\\n num_filter=seg_classes, no_bias=True, workspace=1024, name=\"score3_conv\")\n score3_conv_bn = mx.sym.BatchNorm(data=score3_conv, fix_gamma=True, eps=eps, use_global_stats=use_global_stats, name='score3_conv_bn')\n score4_conv = mx.symbol.Deconvolution(data=score3_conv_bn, kernel=(4,4), stride=(2,2), pad=(1,1), \\\n num_filter=seg_classes, workspace=1024, name=\"score4_conv\")\n fcnxs = mx.symbol.SoftmaxOutput(data=score4_conv, multi_output=True, grad_scale=4., \\\n use_ignore=True, ignore_label=255, name=\"seg_out\")\n \n # group output\n out = mx.symbol.Group([cls_prob, loc_loss, cls_label, det, fcnxs])\n return out", "title": "" }, { "docid": "3bf65297abd620aaeef5c3bc7afcaf42", "score": "0.49944657", "text": "def prep_ctn_image_train(self):\n container = QtGui.QGridLayout()\n\n # prepare tabs for selection\n self.tb_train_rand_patches = QtGui.QWidget()\n self.tb_train_rand_nuclei = QtGui.QWidget()\n\n # prepare containers\n self.ctn_cropped_train_preview = self.prep_ctn_cropped_train_preview()\n self.ctn_train_rand_patches = self.prep_ctn_train_rand_patches()\n self.ctn_train_rand_nuclei = self.prep_ctn_train_rand_nuclei()\n\n # add containers\n self.tb_train_rand_patches.setLayout(self.ctn_train_rand_patches)\n self.tb_train_rand_nuclei.setLayout(self.ctn_train_rand_nuclei)\n\n # add tabs\n self.tbs_train_regions = QtGui.QTabWidget()\n\n self.tbs_train_regions.addTab(self.tb_train_rand_patches, gui_labels.tb_train_rand_patches)\n self.tbs_train_regions.addTab(self.tb_train_rand_nuclei, gui_labels.tb_train_rand_nuclei)\n\n self.cur_train_tab = 0\n\n self.tbs_train_regions.currentChanged.connect(self.change_cur_train_tab)\n\n container.addWidget(self.tbs_train_regions, 0, 0)\n\n return container", "title": "" }, { "docid": "cd2589924b82bd03d41e799c1ee6387d", "score": "0.49836934", "text": "def multiclass_nms(multi_bboxes,\n multi_scores,\n score_thr,\n nms_cfg,\n max_num=-1,\n score_factors=None,\n Ensemble_Test=False, # 识别图片时的是否使用多模型集成识别?\n save_mode=False,\n roi_feats=None, # 新加入的参数 为了得到预测框所对应的map\n rois=None,\n bbox_pred=None,\n cls_score=None,\n img_metas=None,\n mode_name=None,\n ):\n # 排除背景类之后的剩余类数量\n # size(int) 沿着某个轴计算size\n num_classes = multi_scores.size(1) - 1\n # exclude background category\n #\n if multi_bboxes.shape[1] > 4:\n # 前4个列算作背景类去掉,后边的是物体\n # shape: [multi_bboxes数量,物体类数*4]\n bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)[:, 1:]\n else:\n bboxes = multi_bboxes[:, None].expand(-1, num_classes, 4)\n\n #去除第一列的背景分数, 保留的列是各类物体的分数\n # shape: [multi_bboxes数量,物体类数]\n scores = multi_scores[:, 1:]\n\n # filter out boxes with low scores\n ''' \n # 低分数过滤\n # 过滤掉分数 < score_thr的行\n valid_mask打印:\n tensor([[ True],\n [ True],\n [ True],\n [False],\n shape: [multi_bboxes数量,1]\n '''\n\n '''\n 1、排除score过小的框\n \n '''\n valid_mask = scores > score_thr\n # bboxes对应scores保留行\n # shape: [过滤后保留行数,物体类数*4]\n bboxes = bboxes[valid_mask]\n\n if score_factors is not None:\n # 默认为空 不会执行 fcos_head.py的时候会用上\n scores = scores * score_factors[:, None]\n # 过滤掉小于score_thr的行\n scores = scores[valid_mask]\n # 保留非零行\n labels = valid_mask.nonzero()[:, 1]\n\n if bboxes.numel() == 0:\n # bboxes数为0时,直接返回\n bboxes = multi_bboxes.new_zeros((0, 5))\n labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)\n return bboxes, labels\n\n # Modified from https://github.com/pytorch/vision/blob\n # /505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39.\n # strategy: in order to perform NMS independently per class.\n # we add an offset to all the boxes. The offset is dependent\n # only on the class idx, and is large enough so that boxes\n # from different classes do not overlap\n\n # 同一类物体的框重叠过多就会被NMS抑制\n # print()\n # print(\"------------------------------------bbox_nms.py 1111---------------------------------\")\n # print(\"===multi_bboxes:\", multi_bboxes.shape)\n # print(\"===multi_scores:\", multi_scores.shape)\n # print(\"====roi_feats[0]:\",roi_feats[0].shape)\n # print(\"===roi_feats:\",roi_feats.shape)\n # print(\"===filter_low_score_roi_feats\",filter_low_score_roi_feats.shape)\n # print()\n # print(\"===valid_mask:\", valid_mask.shape)\n # print(\"===bboxes:\", bboxes.shape)\n # print(\"===labels:\", labels.shape)\n # print(\"--------------------------------------------------------------------------------------\")\n # print()\n\n '''\n 2、nms抑制\n\n '''\n # 模型集成性能测试\n if Ensemble_Test == True and False:\n bboxes,scores,labels = Ensemble_bboxes_union(\n img_metas=img_metas,\n mode_name=mode_name,\n cur_bboxes=bboxes,\n cur_scores=scores,\n cur_labels=labels\n )\n\n # bboxes, scores, labels = Ensembel_bboxes_intersection(\n # img_metas=img_metas,\n # mode_name=mode_name,\n # cur_bboxes=bboxes,\n # cur_scores=scores,\n # cur_labels=labels\n # )\n\n\n max_coordinate = bboxes.max()\n offsets = labels.to(bboxes) * (max_coordinate + 1)\n bboxes_for_nms = bboxes + offsets[:, None]\n nms_cfg_ = nms_cfg.copy()\n nms_type = nms_cfg_.pop('type', 'nms')\n # nms_wrapper就是nms抑制的处理逻辑\n nms_op = getattr(nms_wrapper, nms_type)\n # nms_op:NMS操作(具体注释,输入,输出格式 进入上边一行的nms_wrapper里看)\n # dets是NMS抑制后留下的bbox, keep是保留的行索引\n # dets使用科学计数法\n dets, keep = nms_op(\n torch.cat([bboxes_for_nms, scores[:, None]], 1), # 这个cat操作将bbox和score按列拼接到一起 (?,4) + (?,1) ---> (?,5)\n **nms_cfg_)\n bboxes = bboxes[keep]\n scores = dets[:, -1] # soft_nms will modify scores 经过nms抑制过后的scores\n labels = labels[keep]\n\n\n\n\n # 3、框数量超过设定值,则要按照置信度取Top-max_num\n # bboxes、scores、labels已经过nms抑制\n final_bboxes=bboxes\n final_scores=scores\n final_labels=labels\n top_max_inds = None\n if keep.size(0) > max_num:\n # 保存前 max_num个框\n _, inds = scores.sort(descending=True)\n inds = inds[:max_num]\n bboxes = bboxes[inds]\n scores = scores[inds]\n labels = labels[inds]\n top_max_inds = inds\n\n if save_mode == True:\n # 开启保存模式 --- 自定义函数\n # 保存最后识别的框 和 特征\n save_tensor( valid_mask=valid_mask,\n roi_feats=roi_feats, # 保存\n rois=rois, # 保存\n bbox_pred=bbox_pred, # 保存\n cls_score=cls_score, # 保存\n keep=keep, # NMS抑制后所保留的下标\n bboxes=final_bboxes, # 保存,为了方便使用自己编写的Ensemble_union 和 Ensemble_intersection函数\n scores=final_scores, # 保存,为了方便使用自己编写的Ensemble_union 和 Ensemble_intersection函数\n labels=final_labels, # 保存,为了方便使用自己编写的Ensemble_union 和 Ensemble_intersection函数\n\n top_max_inds=top_max_inds,\n img_metas=img_metas,\n mode_name=mode_name\n )\n\n #\n # print()\n # print(\"------------------------------------bbox_nms.py 2222---------------------------------\")\n # print(\"===max_coordinate:\", max_coordinate)\n # print(\"===offsets:\", offsets)\n # print(\"===bboxes_for_nms:\", bboxes_for_nms)\n # print(\"===nms_cfg_:\", nms_cfg_ )\n # print(\"===nms_type:\", nms_type)\n # print(\"===nms_op:\", nms_op)\n # print(\"--------\")\n # print(\"===dets:\", dets.shape, dets)\n # print(\"===keep(NMS的 inds):\", keep.shape, keep)\n # print(\"--------\")\n # print(\"===scores:\", scores.shape, scores)\n # print(\"===labels:\",labels.shape,labels)\n # print(\"===bboxes:\", bboxes.shape,bboxes)\n # print(\"===final_roi_feats\",final_roi_feats.shape)\n # print(\"===final_rois:\",final_rois.shape)\n # print(\"--------------------------------------------------------------------------------------\")\n # print()\n\n return torch.cat([bboxes, scores[:, None]], 1), labels", "title": "" }, { "docid": "8e53492491d7d6d02e2b09411d298fe9", "score": "0.49690792", "text": "def __init__(self, n_in_channels: int = 1, n_hidden_layers: int = 3, n_kernels: int = 32, kernel_sizes: Union[tuple, int] = (7,), batch_norm: bool = False, kernel_size_out: int = 7, activation=\"relu\"):\n super(CNNBaseMulti, self).__init__()\n \n if type(kernel_sizes)==int:\n kernel_sizes = tuple([kernel_sizes])\n elif type(kernel_sizes)==list:\n kernel_sizes = tuple(kernel_sizes)\n \n cnn = []\n for i in range(n_hidden_layers):\n cnn.append(MultiConv(in_channels=n_in_channels, out_channels=n_kernels, kernel_sizes=kernel_sizes, bias=not batch_norm))\n if batch_norm: cnn.append(nn.BatchNorm2d(n_kernels))\n cnn.append(activation_func(activation))\n n_in_channels = n_kernels\n self.hidden_layers = nn.Sequential(*cnn)\n\n self.output_layer = ConvAuto(in_channels=n_in_channels, out_channels=1, kernel_size=kernel_size_out, bias=True)", "title": "" }, { "docid": "0136982776ab1e0fbc956c7c1f484fbb", "score": "0.49655685", "text": "def __init__(self, nlayers, nz, nmix=1, joint=False, name=None):\n super().__init__(name=name)\n self.nmix = max(1, nmix)\n self.nz = nz\n self.nlayers = nlayers\n self.joint = joint\n\n self.base = [\n tf.keras.layers.Dense(nz * nmix, activation=tf.nn.relu)\n for _ in range(nlayers)\n ]\n self.mus_layer = tf.keras.layers.Dense(nz * nmix)\n\n if self.joint:\n self.base_cond_mul = [\n tf.keras.layers.Dense(nz * nmix) for _ in range(nlayers)\n ]\n self.base_cond_add = [\n tf.keras.layers.Dense(nz * nmix) for _ in range(nlayers)\n ]\n\n if nmix > 1:\n self.mix = [\n tf.keras.layers.Dense(nmix, activation=tf.nn.relu)\n for _ in range(nlayers)\n ]\n self.logit_layer = tf.keras.layers.Dense(nmix)", "title": "" }, { "docid": "327bb5b5a39bd2789bef0b0697d8b637", "score": "0.4965377", "text": "def create_model(self):\n\n user_input = (self.user_input\n if self.user_input is not None else\n keras.Input(shape=(1), name=\"user\", dtype=\"int64\"))\n item_input = (self.item_input\n if self.item_input is not None else\n keras.Input(shape=(1), name=\"item\", dtype=\"int64\"))\n\n user_preprocessing_layers = (\n self.user_preprocessing_layers\n if self.user_preprocessing_layers is not None\n else user_input\n )\n item_preprocessing_layers = (\n self.item_preprocessing_layers\n if self.item_preprocessing_layers is not None\n else item_input\n )\n\n mlp_layers = MultiLayerPerceptron.create_core_layers(\n self.n_factors,\n self.n_hidden_layers,\n user_preprocessing_layers,\n item_preprocessing_layers\n )\n\n mlp_output = keras.layers.Dense(1,\n activation=\"sigmoid\",\n use_bias=False)(mlp_layers)\n\n return keras.Model(inputs=[user_input, item_input],\n outputs=[mlp_output],\n name=\"multi-layer_perceptron\")", "title": "" }, { "docid": "4236666a709df0e71681b695b6f76058", "score": "0.49642342", "text": "def __init__(\r\n self,\r\n image_size: tuple,\r\n out_channels: int,\r\n num_channel_initial: int,\r\n extract_levels: List[int],\r\n out_kernel_initializer: str,\r\n out_activation: str,\r\n name: str = \"GlobalNet\",\r\n **kwargs,\r\n ):\r\n super().__init__(\r\n image_size=image_size,\r\n out_channels=out_channels,\r\n num_channel_initial=num_channel_initial,\r\n out_kernel_initializer=out_kernel_initializer,\r\n out_activation=out_activation,\r\n name=name,\r\n **kwargs,\r\n )\r\n\r\n # save parameters\r\n assert out_channels == 3\r\n self._extract_levels = extract_levels\r\n self._extract_max_level = max(self._extract_levels) # E\r\n self.reference_grid = layer_util.get_reference_grid(image_size)\r\n self.transform_initial = tf.constant_initializer(\r\n value=list(np.eye(4, 3).reshape((-1)))\r\n )\r\n # init layer variables\r\n num_channels = [\r\n num_channel_initial * (2 ** level)\r\n for level in range(self._extract_max_level + 1)\r\n ] # level 0 to E\r\n self._downsample_blocks = [\r\n layer.DownSampleResnetBlock(\r\n filters=num_channels[i], kernel_size=7 if i == 0 else 3\r\n )\r\n for i in range(self._extract_max_level)\r\n ] # level 0 to E-1\r\n self._conv3d_block = layer.Conv3dBlock(filters=num_channels[-1]) # level E\r\n self._dense_layer = layer.Dense(\r\n units=12, bias_initializer=self.transform_initial\r\n )", "title": "" }, { "docid": "cbaf42453514aae74aacd822ed90afc2", "score": "0.4934941", "text": "def __init__(self, num_classes=1000, width_mult=1.0, version='v1', round_nearest=8):\n super(MobileNet, self).__init__()\n input_channel = 32\n if version == 'v2':\n settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]\n last_channel = 1280\n layer = mobilenet.InvertedResidual\n elif version == 'v1':\n settings = [[1, 64, 1, 1], [1, 128, 2, 2], [1, 256, 2, 2], [1, 512, 6, 2], [1, 1024, 2, 2]]\n last_channel = 1024\n layer = SepConvBNReLU\n self.settings = settings\n self.version = version\n input_channel = mobilenet._make_divisible(input_channel * width_mult, round_nearest)\n self.last_channel = mobilenet._make_divisible(last_channel * max(1.0, width_mult), round_nearest)\n self.conv1 = mobilenet.ConvBNReLU(3, input_channel, stride=2)\n for j, (t, c, n, s) in enumerate(settings):\n output_channel = mobilenet._make_divisible(c * width_mult, round_nearest)\n layers = []\n for i in range(n):\n stride = s if i == 0 else 1\n layers.append(layer(input_channel, output_channel, stride=stride, expand_ratio=t))\n input_channel = output_channel\n self.add_module('layer{}'.format(j + 1), nn.Sequential(*layers))\n if self.version == 'v2':\n self.head_conv = mobilenet.ConvBNReLU(input_channel, self.last_channel, kernel_size=1)\n self.classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(self.last_channel, num_classes))\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.zeros_(m.bias)", "title": "" }, { "docid": "78a3300f75d8af57326f69f1e159ac1b", "score": "0.493083", "text": "def create_model(self):\n\n user_input = (self.user_input\n if self.user_input is not None else\n keras.Input(shape=(1), name=\"user\", dtype=\"int64\"))\n item_input = (self.item_input\n if self.item_input is not None else\n keras.Input(shape=(1), name=\"item\", dtype=\"int64\"))\n\n user_preprocessing_layers = (\n self.user_preprocessing_layers\n if self.user_preprocessing_layers is not None\n else user_input\n )\n item_preprocessing_layers = (\n self.item_preprocessing_layers\n if self.item_preprocessing_layers is not None\n else item_input\n )\n\n user_dense_kwdargs = {}\n item_dense_kwdargs = {}\n hidden_layers_kwdargs = []\n neumf_output_kernel = \"glorot_uniform\"\n\n if self.gmf_trained and self.mlp_trained:\n if self.gmf_trained.n_factors != self.gmf_n_factors:\n raise RuntimeError(\"GMF factors are not consistent.\")\n\n if self.mlp_trained.n_factors != self.mlp_n_factors:\n raise RuntimeError(\"MLP factors are not consistent.\")\n if self.mlp_trained.n_hidden_layers != self.mlp_n_hidden_layers:\n raise RuntimeError(\"MLP factors are not consistent.\")\n\n user_dense_kwdargs, item_dense_kwdargs = (\n self.gmf_trained.get_core_layers_kwdargs()\n )\n\n hidden_layers_kwdargs = self.mlp_trained.get_core_layers_kwdargs()\n\n gmf_output_kernel, _ = self.gmf_trained.get_output_weights()\n mlp_output_kernel, _ = self.mlp_trained.get_output_weights()\n neumf_output_kernel = keras.initializers.Constant(\n np.concatenate((gmf_output_kernel * self.alpha,\n mlp_output_kernel * (1 - self.alpha)))\n )\n\n gmf_layers = GeneralizedMatrixFactorization.create_core_layers(\n self.gmf_n_factors,\n user_preprocessing_layers,\n item_preprocessing_layers,\n user_dense_kwdargs,\n item_dense_kwdargs\n )\n\n mlp_layers = MultiLayerPerceptron.create_core_layers(\n self.mlp_n_factors,\n self.mlp_n_hidden_layers,\n user_preprocessing_layers,\n item_preprocessing_layers,\n hidden_layers_kwdargs\n )\n\n neumf_layers = [gmf_layers, mlp_layers]\n neumf_layers = keras.layers.Concatenate()(neumf_layers)\n neumf_layers = (\n keras.layers.Dense(1,\n activation=\"sigmoid\",\n kernel_initializer=neumf_output_kernel,\n kernel_constraint=keras.constraints.unit_norm(),\n use_bias=False)(neumf_layers)\n )\n\n return keras.Model(inputs=[user_input, item_input],\n outputs=[neumf_layers],\n name=\"neural_matrix_factorization\")", "title": "" }, { "docid": "c61ebdc1110291d06e240c595cb11d83", "score": "0.4927373", "text": "def __init__(self, *,\n input_shape,\n num_classes,\n layers=4,\n features_root=16,\n split_block=SplitBlock,\n merge_block=MergeBlock,\n center_block=CenterBlock,\n final_block=FinalBlock,\n double_center_features=True):\n super().__init__()\n input_feats, input_size = input_shape\n\n max_layer_scale = 2**layers\n if input_size % max_layer_scale != 0:\n msg = (f\"input size: {input_size} not divisible by \"\n f\"2**layers: {max_layer_scale}\")\n raise ValueError(msg)\n\n layers_l = list(range(layers))\n\n def layer_to_in_shape(layer):\n if not layer:\n return input_shape\n return (features_root * 2**(layer-1), input_size // 2**layer)\n\n def layer_to_out_shape(layer):\n return (features_root * 2**layer, input_size // 2**(layer+1))\n\n def layer_to_hor_shape(layer):\n return (features_root * 2**layer, input_size // 2**layer)\n\n in_shapes = map(layer_to_in_shape, layers_l)\n out_shapes = map(layer_to_out_shape, layers_l)\n hor_shapes = map(layer_to_hor_shape, layers_l)\n\n all_shapes = zip(in_shapes, out_shapes, hor_shapes)\n\n for layer, (in_shape, out_shape, hor_shape) in enumerate(all_shapes):\n down_name = f\"d{layer}\"\n self.add_module(\n down_name,\n split_block(in_shape, out_shape, hor_shape, layer)\n )\n\n # Depends on if features were doubled in the center layer\n in_feat, in_size = out_shape\n if double_center_features:\n in_feat *= 2\n out_feat, out_size = in_feat//2, in_size*2\n\n merge_in_shape = (in_feat, in_size)\n merge_out_shape = (out_feat, out_size)\n\n merge_name = f\"m{layer}\"\n self.add_module(\n merge_name,\n merge_block(merge_in_shape, merge_out_shape, hor_shape, layer)\n )\n\n in_feats = 2**(layers-1) * features_root\n out_feats = 2*in_feats if double_center_features else in_feats\n self.center = center_block(in_feats, out_feats)\n\n self.final_feats = features_root if double_center_features else features_root // 2\n self.final = final_block(self.final_feats, num_classes)\n self.layers = layers", "title": "" }, { "docid": "c73f4ca713069d1eac4be5610d407932", "score": "0.49226174", "text": "def create_core_layers(n_factors,\n user_layers,\n item_layers,\n user_dense_kwdargs={},\n item_dense_kwdargs={}):\n\n gmf_layers = [\n keras.layers.Dense(n_factors, **user_dense_kwdargs)(user_layers),\n keras.layers.Dense(n_factors, **item_dense_kwdargs)(item_layers)\n ]\n gmf_layers = keras.layers.Multiply()(gmf_layers)\n\n return gmf_layers", "title": "" }, { "docid": "cdd332d142f4ef4d7565b04527c4460e", "score": "0.4921449", "text": "def Lenet():\n # Determine proper input shape\n input_img = Input(shape=(256, 256, 3))\n\n tower_1 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)\n tower_1 = Conv2D(64, (3, 3), padding='same', activation='relu')(tower_1)\n\n tower_2 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)\n tower_2 = Conv2D(64, (5, 5), padding='same', activation='relu')(tower_2)\n\n tower_3 = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(input_img)\n tower_3 = Conv2D(64, (1, 1), padding='same', activation='relu')(tower_3)\n\n output = keras.layers.concatenate([tower_1, tower_2, tower_3], axis=1)\n model = Model(inputs=input_img, outputs=output)\n return model", "title": "" }, { "docid": "2173d75abf4e34a65b83f25ca85cf3a6", "score": "0.49165982", "text": "def construct_model():\n\n model = Sequential()\n\n model.add(Conv2D(8, 9, input_shape=(300, 300, 3), activation='relu'))\n model.add(MaxPooling2D())\n model.add(Conv2D(20, 9, activation='relu'))\n model.add(MaxPooling2D())\n model.add(Conv2D(32, 9, activation='relu'))\n model.add(MaxPooling2D())\n model.add(Flatten())\n model.add(Dropout(0.1))\n model.add(Dense(200, activation='relu'))\n model.add(Dense(100, activation='relu'))\n model.add(Dense(50, activation='relu'))\n model.add(Dense(1, activation='sigmoid'))\n\n model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return model", "title": "" }, { "docid": "0007a910faaa7789fa4c2836cb7dd940", "score": "0.4909797", "text": "def _build_mlp(layer,\n training=False,\n hidden_unit_number=50,\n hidden_layer_number=3,\n output_unit_number=1,\n activation=tf.nn.sigmoid,\n final_activation=None):\n prev_layer_unit_number = layer.get_shape().as_list()[1]\n Ws, bs = [], []\n\n unit_numbers = [hidden_unit_number] * (\n hidden_layer_number - 1) + [output_unit_number]\n for i, unit_number in enumerate(unit_numbers):\n # MLP weights picked uniformly from +/- 4*sqrt(6)/sqrt(n_in + n_out)\n range = _get_weight_init_range(prev_layer_unit_number, unit_number)\n W = tf.Variable(\n tf.random_uniform(\n [prev_layer_unit_number, unit_number],\n minval=-range,\n maxval=range))\n b = tf.Variable(tf.zeros([unit_number]))\n Ws.append(W)\n bs.append(b)\n\n layer = tf.matmul(layer, W) + b\n if i < len(unit_numbers) - 1:\n # layer = tf.layers.batch_normalization(layer, training=training)\n layer = activation(layer)\n # if dropout_rate > 0:\n # layer = tf.layers.dropout(layer, rate=dropout_rate, training=training)\n else:\n if final_activation:\n layer = final_activation(layer)\n prev_layer_unit_number = unit_number\n\n return layer, Ws + bs", "title": "" }, { "docid": "ca94bdeba7d44a4ccbae7d911374f38c", "score": "0.49091384", "text": "def multiclass_nms(*args, **kwargs):\n return mmdeploy.codebase.mmdet.core.post_processing._multiclass_nms(\n *args, **kwargs)", "title": "" }, { "docid": "4b9dab5977535d5c5e3930b2a24425bc", "score": "0.4906074", "text": "def EX_1(self):\n class _EX1:\n def __init__(self):\n # hyperbox classifications\n self.B_cls = np.array([1, 2]) # default\n # some default gfmm values\n self.gfmm = GFMM()\n self.gfmm.n = 2\n self.gfmm.m = 2\n self.gfmm.p = 2\n self.gfmm.ϴ = .4\n self.gfmm.B_cls = self.B_cls\n # Fig 4.a\n self.Va = np.array([[.1], [.1]])\n self.Wa = np.array([[.1], [.1]])\n # Fig 4.b\n self.Vb = np.array([[.1, .7], [.1, .7]])\n self.Wb = np.array([[.1, .7], [.1, .7]])\n # Fig 4.c\n self.Vc = np.array([[.1, .7], [.1, .7]])\n self.Wc = np.array([[.5, .7], [.5, .7]])\n # Fig 4.d\n self.Vd = np.array([[.1, .4], [.1, .3]])\n self.Wd = np.array([[.5, .7], [.5, .7]])\n # Fig 4.e\n self.Ve = np.array([[.1, .45], [.1, .3]])\n self.We = np.array([[.45, .7], [.5, .7]])\n # input values\n self.a1 = np.array([.1, .1])\n self.a2 = np.array([.7, .7])\n self.a3 = np.array([.5, .5])\n self.a4 = np.array([.4, .3])\n self.X = np.array([self.a1, self.a2, self.a3, self.a4])\n self.d = np.array([1, 2, 1, 2])\n # expected U matrix\n self.U = np.array([[0, 1, 0],\n [0, 0, 1]])\n # sample prediction values\n self.p1 = np.array([.3, .3])\n self.p2 = np.array([.6, .6])\n self.p3 = np.array([.5, .4])\n self.p4 = np.array([.1, .6])\n self.p5 = np.array([0, 0])\n self.p6 = np.array([.9, .9])\n self.P = np.array([self.p1, self.p2, self.p3, self.p4, self.p5, self.p6])\n self.Z = np.array([1, 2, 2, 1, 1, 2])\n return _EX1()", "title": "" }, { "docid": "15b1a7b90ce0fff622e85420821566c5", "score": "0.49055824", "text": "def get_targets(self, gt_bboxes, gt_labels, feat_shape, img_shape):\n img_d, img_h, img_w = img_shape\n bs, _, feat_d, feat_h, feat_w = feat_shape\n depth_ratio = float(feat_d / img_d)\n width_ratio = float(feat_w / img_w)\n height_ratio = float(feat_h / img_h)\n center_heatmap_target = gt_bboxes[-1].new_zeros([bs, self.num_classes, feat_d, feat_h, feat_w])\n wh_target = gt_bboxes[-1].new_zeros([bs, self.dim, feat_d, feat_h, feat_w])\n offset_target = gt_bboxes[-1].new_zeros([bs, self.dim, feat_d, feat_h, feat_w])\n wh_offset_target_weight = gt_bboxes[-1].new_zeros([bs, self.dim, feat_d, feat_h, feat_w])\n\n for batch_id in range(bs):\n gt_bbox = gt_bboxes[batch_id]\n gt_label = gt_labels[batch_id]\n center_x = (gt_bbox[:, [0]] + gt_bbox[:, [3]]) * depth_ratio / 2\n center_y = (gt_bbox[:, [1]] + gt_bbox[:, [4]]) * width_ratio / 2\n center_z = (gt_bbox[:, [2]] + gt_bbox[:, [5]]) * height_ratio / 2\n\n # N,3\n gt_centers = torch.cat((center_x, center_y, center_z), dim=1)\n\n for j, ct in enumerate(gt_centers):\n ctx_int, cty_int, ctz_int = ct.int()\n ctx, cty, ctz = ct\n\n scale_box_d = (gt_bbox[j][3] - gt_bbox[j][0]) * depth_ratio\n scale_box_h = (gt_bbox[j][4] - gt_bbox[j][1]) * height_ratio\n scale_box_w = (gt_bbox[j][5] - gt_bbox[j][2]) * width_ratio\n\n # radius = gaussian_radius([scale_box_d, scale_box_h, scale_box_w],\n # min_overlap=0.3)\n # radius = max(0, int(radius))\n radius = int(pow(scale_box_d * scale_box_h * scale_box_w, 1 / 3)) * 0.2\n radius = max(0, int(radius))\n ind = gt_label[j].int()\n if ind==-1:\n continue\n ind-=1\n gen_gaussian_target(center_heatmap_target[batch_id, ind],\n [ctx_int, cty_int, ctz_int], radius)\n\n wh_target[batch_id, 0, ctz_int, cty_int, ctx_int] = scale_box_d\n wh_target[batch_id, 1, ctz_int, cty_int, ctx_int] = scale_box_w\n wh_target[batch_id, 2, ctz_int, cty_int, ctx_int] = scale_box_h\n\n offset_target[batch_id, 0, ctz_int, cty_int, ctx_int] = ctx - ctx_int\n offset_target[batch_id, 1, ctz_int, cty_int, ctx_int] = cty - cty_int\n offset_target[batch_id, 2, ctz_int, cty_int, ctx_int] = ctz - ctz_int\n\n wh_offset_target_weight[batch_id, :, ctz_int, cty_int, ctx_int] = 1\n\n avg_factor = max(1, center_heatmap_target.eq(1).sum())\n target_result = dict(\n center_heatmap_target=center_heatmap_target,\n wh_target=wh_target,\n offset_target=offset_target,\n wh_offset_target_weight=wh_offset_target_weight)\n return target_result, avg_factor", "title": "" }, { "docid": "d21d517385cbb7aac567aa7d7e8edb57", "score": "0.49038076", "text": "def make_basic_picklable_cnn(nb_filters=64, nb_classes=10,\n input_shape=(None, 28, 28, 1)):\n layers = [Conv2D(nb_filters, (8, 8), (2, 2), \"SAME\"),\n ReLU(),\n Conv2D(nb_filters * 2, (6, 6), (2, 2), \"VALID\"),\n ReLU(),\n Conv2D(nb_filters * 2, (5, 5), (1, 1), \"VALID\"),\n ReLU(),\n Flatten(),\n Linear(nb_classes),\n Softmax()]\n model = MLP(layers, input_shape)\n return model", "title": "" }, { "docid": "8e37cd6afbbcdac598cd110bb40520a0", "score": "0.49025187", "text": "def create_mlp(input_dim, output_dim, num_hidden, activation='relu', name='mlp'):\r\n x = K.Input(input_dim)\r\n y = K.layers.Dense(units=num_hidden, activation=activation)(x)\r\n y = K.layers.Dense(units=output_dim, activation=activation)(y)\r\n return K.Model(x, y, name=get_unique_name(name))", "title": "" }, { "docid": "f1828e6aa45d3637725d93fd7936ffa2", "score": "0.49001083", "text": "def build(self, input_shapes):\n if len(input_shapes) != len(self._prediction_heads[BOX_ENCODINGS]):\n raise ValueError('This box predictor was constructed with %d heads,'\n 'but there are %d inputs.' %\n (len(self._prediction_heads[BOX_ENCODINGS]),\n len(input_shapes)))\n for stack_index, input_shape in enumerate(input_shapes):\n net = []\n\n # Add additional conv layers before the class predictor.\n features_depth = static_shape.get_depth(input_shape)\n depth = max(min(features_depth, self._max_depth), self._min_depth)\n tf.logging.info(\n 'depth of additional conv before box predictor: {}'.format(depth))\n\n if depth > 0 and self._num_layers_before_predictor > 0:\n for i in range(self._num_layers_before_predictor):\n net.append(keras.Conv2D(depth, [1, 1],\n name='SharedConvolutions_%d/Conv2d_%d_1x1_%d'\n % (stack_index, i, depth),\n padding='SAME',\n **self._conv_hyperparams.params()))\n net.append(self._conv_hyperparams.build_batch_norm(\n training=(self._is_training and not self._freeze_batchnorm),\n name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_norm'\n % (stack_index, i, depth)))\n net.append(self._conv_hyperparams.build_activation_layer(\n name='SharedConvolutions_%d/Conv2d_%d_1x1_%d_activation'\n % (stack_index, i, depth),\n ))\n # Until certain bugs are fixed in checkpointable lists,\n # this net must be appended only once it's been filled with layers\n self._shared_nets.append(net)\n self.built = True", "title": "" }, { "docid": "be9192d70e8280d325bb6b49e47f168a", "score": "0.48967448", "text": "def __init__(self,\n image_size, out_channels,\n num_channel_initial, extract_levels,\n out_kernel_initializer, out_activation,\n **kwargs):\n super(LocalNet, self).__init__(**kwargs)\n\n # save parameters\n self._extract_levels = extract_levels\n self._extract_max_level = max(self._extract_levels) # E\n self._extract_min_level = min(self._extract_levels) # D\n\n # init layer variables\n\n nc = [num_channel_initial * (2 ** level) for level in range(self._extract_max_level + 1)] # level 0 to E\n self._downsample_blocks = [layer.DownSampleResnetBlock(filters=nc[i], kernel_size=7 if i == 0 else 3)\n for i in range(self._extract_max_level)] # level 0 to E-1\n self._conv3d_block = layer.Conv3dBlock(filters=nc[-1]) # level E\n\n self._upsample_blocks = [layer.LocalNetUpSampleResnetBlock(nc[level]) for level in\n range(self._extract_max_level - 1, self._extract_min_level - 1, -1)] # level D to E-1\n\n self._extract_layers = [\n # if kernels are not initialized by zeros, with init NN, extract may be too large\n layer.Conv3dWithResize(output_shape=image_size, filters=out_channels,\n kernel_initializer=out_kernel_initializer,\n activation=out_activation)\n for _ in self._extract_levels]", "title": "" }, { "docid": "830cca92e74787142f270e1662de2439", "score": "0.48949665", "text": "def add_extras(feature_layer, mbox, num_classes):\n nets_outputs, transform_layers, extra_layers, loc_layers, conf_layers = [list() for _ in range(5)]\n last_int_layer = [layer for layer in feature_layer[0] if isinstance(layer, int)][-1]\n fpn_channels = []\n for layer, depth, box in zip(feature_layer[0], feature_layer[1], mbox):\n if isinstance(layer, int):\n nets_outputs.append(layer)\n fpn_channels.append(depth // 2)\n if layer == last_int_layer:\n transform_layers += [nn.Sequential(ConvBNReLU(depth, depth // 2, 3), SPPModule(3), ConvBNReLU(depth * 2, depth // 2, 3))]\n else:\n transform_layers += [ConvBNReLU(depth, depth // 2, 3)]\n elif layer == 'Conv:S':\n extra_layers += [ConvBNReLU(in_channels, depth, 3, stride=2)]\n else:\n raise ValueError(layer + ' does not support by YOLO')\n in_channels = depth // 2 if isinstance(layer, int) else depth\n loc_layers += [nn.Sequential(ConvBNReLU(in_channels, in_channels, 3), nn.Conv2d(in_channels, box * 4, kernel_size=3, padding=1))]\n conf_layers += [nn.Sequential(ConvBNReLU(in_channels, in_channels, 3), nn.Conv2d(in_channels, box * num_classes, kernel_size=3, padding=1))]\n num_stack = 1 if len(feature_layer) == 2 else feature_layer[2]\n fpn = nn.Sequential(*[PANModule(fpn_channels) for _ in range(num_stack)])\n return nets_outputs, (transform_layers, extra_layers, fpn), (loc_layers, conf_layers)", "title": "" }, { "docid": "86bcfd8521959141fece53d20be9aaee", "score": "0.4892189", "text": "def __init__(self , layers , mini_batch_size):\n self.layers = layers\n self.mini_batch_size = mini_batch_size\n self.params = [param for layer in self.layers for param in layer.params]\n self.x = T.matrix('x')\n self.y = T.ivector('y')\n init_layer = self.layers[0]\n init_layer.set_input(self.x , self.x ,self.mini_batch_size)\n for j in xrange(1 , len(self.layers)):\n prev_layer, layer = self.layers[j-1], self.layers[j]\n layer.set_input(prev_layer.output , prev_layer.output_dropout, self.mini_batch_size)\n self.output = self.layers[-1].output\n self.output_dropout = self.layers[-1].output_dropout", "title": "" }, { "docid": "4c87366a3fcdd58809221a707c007f5a", "score": "0.4890406", "text": "def test_mlp_multiple_layers(self):\n batch_size = 3\n input_dim = 2\n hidden_sizes = (5, 4, 3)\n x = np.zeros(shape=[batch_size, input_dim], dtype=np.float32)\n with self.cached_session() as sess:\n x_ph = tf.placeholder(dtype=tf.float32, shape=[None, input_dim])\n ret = tf_utils.mlp(x_ph, hidden_sizes=hidden_sizes)\n sess.run(tf.global_variables_initializer())\n n_trainable_variables = 6 # 3 kernels and 3 bias\n ret_eval = sess.run(ret, feed_dict={x_ph: x})\n self.assertEqual(ret_eval.shape, (batch_size, 3))\n trainable_variables = sess.run(tf.trainable_variables())\n variable_shapes = [var.shape for var in trainable_variables]\n self.assertEqual(len(trainable_variables), n_trainable_variables)\n # kernels\n self.assertIn((2, 5), variable_shapes)\n self.assertIn((5, 4), variable_shapes)\n self.assertIn((4, 3), variable_shapes)\n # biases\n self.assertIn((5,), variable_shapes)\n self.assertIn((4,), variable_shapes)\n self.assertIn((3,), variable_shapes)", "title": "" }, { "docid": "a1b6e937e3cd59736da9814199ed5eb4", "score": "0.48809582", "text": "def get_context_data(self, **kwargs):\n context = super(ClassifierModelLabellingMixin, self).get_context_data(**kwargs)\n context['multilabel'] = self.learning_model.multilabel\n\n return context", "title": "" }, { "docid": "eec6825fcad403331d475142c36c68eb", "score": "0.48803502", "text": "def substitute_model(img_rows=28, img_cols=28, num_channels = 1, nb_classes=10):\r\n input_shape = (None, img_rows, img_cols, num_channels)\r\n\r\n # Define a fully connected model (it's different than the black-box)\r\n layers = [Flatten(),\r\n Linear(200),\r\n ReLU(),\r\n Linear(200),\r\n ReLU(),\r\n Linear(nb_classes),\r\n Softmax()]\r\n\r\n return MLP(layers, input_shape)", "title": "" }, { "docid": "f57b3c278e7d722d1ec3de285e3d3292", "score": "0.4867362", "text": "def _prepare_labels_for_eval(data,\n target_num_instances=MAX_NUM_INSTANCES,\n target_polygon_list_len=MAX_NUM_POLYGON_LIST_LEN,\n use_instance_mask=False):\n image = data['image']\n height = tf.shape(image)[0]\n width = tf.shape(image)[1]\n boxes = data['groundtruth_boxes']\n classes = data['groundtruth_classes']\n classes = tf.cast(classes, dtype=tf.float32)\n num_labels = tf.shape(classes)[0]\n boxes = preprocess_ops.pad_to_fixed_size(boxes, -1, [target_num_instances, 4])\n classes = preprocess_ops.pad_to_fixed_size(classes, -1,\n [target_num_instances, 1])\n is_crowd = data['groundtruth_is_crowd']\n is_crowd = tf.cast(is_crowd, dtype=tf.float32)\n is_crowd = preprocess_ops.pad_to_fixed_size(is_crowd, 0,\n [target_num_instances, 1])\n labels = {}\n labels['width'] = width\n labels['height'] = height\n labels['groundtruth_boxes'] = boxes\n labels['groundtruth_classes'] = classes\n labels['num_groundtruth_labels'] = num_labels\n labels['groundtruth_is_crowd'] = is_crowd\n\n if use_instance_mask:\n polygons = data['groundtruth_polygons']\n polygons = preprocess_ops.pad_to_fixed_size(polygons, POLYGON_PAD_VALUE,\n [target_polygon_list_len, 1])\n labels['groundtruth_polygons'] = polygons\n if 'groundtruth_area' in data:\n groundtruth_area = data['groundtruth_area']\n groundtruth_area = preprocess_ops.pad_to_fixed_size(\n groundtruth_area, 0, [target_num_instances, 1])\n labels['groundtruth_area'] = groundtruth_area\n\n return labels", "title": "" }, { "docid": "62db486230b7b6a81c2ab582e9c0df79", "score": "0.48628312", "text": "def train_and_predict_multi_classification(xtrain, ytrain, xtest, model_type):\n try:\n model = Model.multi_classification_models[model_type]\n\n model.fit(xtrain, ytrain)\n ypred = model.predict(xtest)\n\n yscore = model.predict_proba(xtest)\n return ypred, yscore, model\n except ValueError as e:\n st.error(e)\n except LightGBMError as er:\n xtrain = xtrain.rename(columns = lambda x:re.sub('[^A-Za-z0-9_]+', '', x))\n xtest = xtest.rename(columns = lambda x:re.sub('[^A-Za-z0-9_]+', '', x))\n return train_and_predict_multi_classification(xtrain, ytrain, xtest, model_type)", "title": "" }, { "docid": "84b95a856204b81374b7df44eac95502", "score": "0.48583898", "text": "def __init__(self,\n layer,\n layer_num=1,\n hidden_dim=None,\n use_bias=True,\n reg_index=None,\n reg_slice=None,\n reg_factor=0.0,\n **kwargs):\n if type(layer) is list:\n self.layer = layer[0]\n self.layers = layer\n self.layer_num = len(self.layers)\n else:\n self.layer = layer\n self.layers = []\n self.layer_num = layer_num\n self.hidden_dim = hidden_dim\n self.use_bias = use_bias\n if reg_index is None or type(reg_index) is list:\n self.reg_index = reg_index\n else:\n self.reg_index = [reg_index]\n if type(reg_slice) is list or reg_index is None:\n self.reg_slice = reg_slice\n else:\n self.reg_slice = [reg_slice] * len(self.reg_index)\n if reg_factor is None or type(reg_factor) is list or reg_index is None:\n self.reg_weight = reg_factor\n else:\n self.reg_weight = [reg_factor] * len(self.reg_index)\n\n self.W, self.b = None, None\n self.supports_masking = self.layer.supports_masking\n super(MultiHead, self).__init__(self.layer, **kwargs)", "title": "" }, { "docid": "5e6b5c9406d24ffdca42f5a46a555c3f", "score": "0.48573786", "text": "def add_extras(feature_layer, mbox, num_classes):\n nets_outputs, transform_layers, extra_layers, loc_layers, conf_layers = [list() for _ in range(5)]\n last_int_layer = [layer for layer in feature_layer[0] if isinstance(layer, int)][-1]\n for layer, depth, box in zip(feature_layer[0], feature_layer[1], mbox):\n if isinstance(layer, int):\n nets_outputs.append(layer)\n if layer == last_int_layer:\n if isinstance(depth, list):\n extra_layers += [ConvBNReLUx2(depth[0], depth[1], 3)]\n else:\n extra_layers += [ConvBNReLUx2(depth, depth // 2, 3)]\n else:\n prev_depth = feature_layer[1][feature_layer[0].index(layer) + 1]\n if isinstance(depth, list):\n transform_layers += [ConvBNReLU(prev_depth[1], depth[0] // 2, 3)]\n extra_layers += [ConvBNReLUx2(int(depth[0] * 1.5), depth[1], 3)]\n else:\n transform_layers += [ConvBNReLU(prev_depth // 2, depth // 2, 3)]\n extra_layers += [ConvBNReLUx2(int(depth * 1.5), depth // 2, 3)]\n elif layer == 'Conv:S':\n extra_layers += [ConvBNReLU(in_channels, depth, 3, stride=2)]\n else:\n raise ValueError(layer + ' does not support by YOLO')\n in_channels = depth[1] if isinstance(depth, list) else depth // 2 if isinstance(layer, int) else depth\n loc_layers += [nn.Sequential(ConvBNReLU(in_channels, in_channels, 3), nn.Conv2d(in_channels, box * 4, kernel_size=3, padding=1))]\n conf_layers += [nn.Sequential(ConvBNReLU(in_channels, in_channels, 3), nn.Conv2d(in_channels, box * num_classes, kernel_size=3, padding=1))]\n in_channels = depth[0] if isinstance(depth, list) else depth\n return nets_outputs, (transform_layers, extra_layers), (loc_layers, conf_layers)", "title": "" }, { "docid": "7d1556b61381d166f9742c5ecf177abb", "score": "0.48473367", "text": "def y_(self):\n return tf.placeholder(dtype=tf.float32,\n shape=(None, self._n_class),\n name='multilabel')", "title": "" }, { "docid": "68d9835428d6d5ea79c63d8898496a6c", "score": "0.48460925", "text": "def _create_embedding_layer():", "title": "" }, { "docid": "a87187c2f66cccbd214312352cf9e543", "score": "0.48425937", "text": "def create_model(p_image_width, p_image_height, p_num_classes) :\n input_shape = (p_image_height, p_image_width, 1)\n\n #we will use a sequential model for training \n model = Sequential()\n\t\n #CONV 3x3x32 => RELU => NORMALIZATION => MAX POOL 3x3 block\n model.add(Conv2D(32, (3, 3), padding=\"same\", input_shape=input_shape))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=-1))\n model.add(MaxPooling2D(pool_size=(3, 3)))\n\n #CONV 3x3x64 => RELU => NORMALIZATION => MAX POOL 2x2 block\n model.add(Conv2D(64, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=-1))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n #CONV 3x3x128 => RELU => NORMALIZATION => MAX POOL 2x2 block\n model.add(Conv2D(128, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=-1))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n #FLATTEN => DENSE 1024 => RELU => NORMALIZATION block\n model.add(Flatten())\n model.add(Dense(1024))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization())\n\n #final DENSE => SOFTMAX block for multi-label classification\n model.add(Dense(p_num_classes))\n model.add(Activation(\"softmax\"))\n\n #using categorical_crossentropy loss function with adam optimizer\n model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\n return model", "title": "" }, { "docid": "f1a0a4dd651a641b34d4927c53cc85fa", "score": "0.4839935", "text": "def __init__(self, layer_names=list(), thicknesses=list(), refractive_index=interp1d,\n **kwargs):\n # TODO: parameter check, and allow to receive n and k seperately\n\n # declare Multilayer parameters\n # kwargs\n if 'active_layer' in kwargs:\n self.active_layer = kwargs['active_layer']\n if 'vac_wavelength' in kwargs:\n vac_wavelength = kwargs['vac_wavelength']\n\n # method parameters\n self.layer_names = np.array(layer_names,ndmin = 1)\n self.thicknesses = np.array(thicknesses,ndmin = 1)\n if isinstance(refractive_index, interp1d):\n self.n = refractive_index\n elif isinstance(refractive_index, (list, np.ndarray)) and not vac_wavelength is None:\n self.n = interp1d(vac_wavelength, np.array(refractive_index, ndmin=2))\n else:\n raise TypeError(\"\")\n self.layers = len(self.layer_names)\n\n ## parameters check\n if self.layer_names.ndim != 1:\n raise ValueError(\"the dinemsion of layer_names must be 1\")\n if self.thicknesses.ndim != 1:\n raise ValueError(\"the dinemsion of thicknesses must be 1\")\n if self.n.y.ndim != 2:\n raise ValueError(\"the dinemsion of refractive indices must be less than 3\")\n # length: layers = len(layer_names == thicknesses == n.y(# of row))\n # length: n.x == n.y(# of column) (maybe, included in interp1d)\n if self.thicknesses.shape[0] != self.layers:\n raise ValueError(\"the dimension of thicknesses (%s) must be equal to %s\" %\n (self.thicknesses.shape[0], self.layers))\n if self.n.y.shape[0] != self.layers:\n raise ValueError(\"the rows of refractive index (%s) must be equal to %s\" %\n (self.n.y.shape[0], self.layers))\n # data types\n \"\"\"if self.layer_names.dtype.kind != \"U\":\n raise ValueError(\"layer name must be string, not %s\" % (np.array(layer_names).dtype))\"\"\"", "title": "" }, { "docid": "2ff497a5c3a404e0890c5aa43b118cf8", "score": "0.48398775", "text": "def build_model():\n # Create pipeline\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize, min_df = 5)),\n ('tfidf', TfidfTransformer(use_idf = True)),\n ('clf', MultiOutputClassifier(RandomForestClassifier(n_estimators = 10,\n min_samples_split = 10)))\n ])\n \n # Create parameters dictionary\n parameters = {'vect__min_df': [1, 5],\n 'tfidf__use_idf':[True, False],\n 'clf__estimator__n_estimators':[10, 25], \n 'clf__estimator__min_samples_split':[2, 5, 10]}\n \n # Create scorer\n scorer = make_scorer(multiclass_f1_score)\n \n # Create grid search object\n cv = GridSearchCV(pipeline, param_grid = parameters, scoring = scorer, verbose = 10)\n return cv", "title": "" }, { "docid": "06143c19e24ec4fdf06b13d6fc7d3cdd", "score": "0.48389766", "text": "def construct_model():\n\n logging.debug(\"Running with num classes: %i\", FLAGS.classes)\n\n # Here TinyImageNetHead is explicitly constructed for clarity and passed\n # as the use_head arg to TinyImageNet. Can also use `use_head=True`.\n head = TinyImageNetHead(num_classes=FLAGS.classes,\n l1=FLAGS.l1,\n l2=FLAGS.l2,\n dropout=FLAGS.dropout,\n name='head')\n\n model = TinyImageNet(\n levels=FLAGS.levels,\n width=64,\n use_head=head,\n use_tail=True\n )\n return model", "title": "" }, { "docid": "deac4e7adfc26cbdc8a9b28ba02b1d1e", "score": "0.48384234", "text": "def make_mlp_model(layer_size, num_layers, activate_final=True, activation=tf.nn.relu, name=\"mlp\"):\n return snt.Sequential([\n snt.nets.MLP([layer_size] * num_layers, activate_final=activate_final, activation=activation),\n snt.LayerNorm(axis=1, create_offset=True, create_scale=True)\n ], name=name)", "title": "" }, { "docid": "0ad5822d774f19d8413deeb73b1c8178", "score": "0.48334405", "text": "def __init__(self, intermediate_classifiers={}):\n self.classifiers = intermediate_classifiers\n self.classlabels = []\n self.trained = False", "title": "" }, { "docid": "274a043a9de8ecda0a258e73b384bd7e", "score": "0.48297486", "text": "def build_model(\r\n num_classes, n_scales=5, backbone_model=\"resnet50\"):\r\n # Define the focal loss bias. #\r\n b_focal = tf.constant_initializer(\r\n np.log(0.01 / 0.99))\r\n \r\n # Classification and Regression Feature Layers. #\r\n cls_cnn = []\r\n reg_cnn = []\r\n for n_layer in range(4):\r\n cls_cnn.append(layers.Conv2D(\r\n 256, 3, padding=\"same\", \r\n activation=None, use_bias=False, \r\n name=\"cls_layer_\" + str(n_layer+1)))\r\n \r\n reg_cnn.append(layers.Conv2D(\r\n 256, 3, padding=\"same\", \r\n activation=None, use_bias=False, \r\n name=\"reg_layer_\" + str(n_layer+1)))\r\n \r\n # Backbone Network. #\r\n if backbone_model.lower() == \"resnet50\":\r\n backbone = tf.keras.applications.ResNet50(\r\n include_top=False, input_shape=[None, None, 3])\r\n \r\n c3_c5_layer_names = [\r\n \"conv3_block4_out\", \r\n \"conv4_block6_out\", \"conv5_block3_out\"]\r\n if backbone_model.lower() == \"resnet101\":\r\n backbone = tf.keras.applications.ResNet101(\r\n include_top=False, input_shape=[None, None, 3])\r\n \r\n c3_c5_layer_names = [\r\n \"conv3_block4_out\", \r\n \"conv4_block23_out\", \"conv5_block3_out\"]\r\n else:\r\n backbone = tf.keras.applications.MobileNetV2(\r\n include_top=False, input_shape=[None, None, 3])\r\n \r\n c3_c5_layer_names = [\r\n \"block_6_expand\", \"block_13_expand\", \"Conv_1\"]\r\n \r\n # Extract the feature maps. #\r\n feature_maps = [\r\n backbone.get_layer(layer_name).output\r\n for layer_name in c3_c5_layer_names]\r\n \r\n c3_output = feature_maps[0]\r\n c4_output = feature_maps[1]\r\n c5_output = feature_maps[2]\r\n \r\n # Feature Pyramid Network Feature Maps. #\r\n p3_1x1 = layers.Conv2D(\r\n 256, 1, 1, \"same\", name=\"c3_1x1\")(c3_output)\r\n p4_1x1 = layers.Conv2D(\r\n 256, 1, 1, \"same\", name=\"c4_1x1\")(c4_output)\r\n p5_1x1 = layers.Conv2D(\r\n 256, 1, 1, \"same\", name=\"c5_1x1\")(c5_output)\r\n \r\n # P6 to P7. #\r\n p6_output = layers.Conv2D(\r\n 256, 3, 2, \"same\", name=\"c6_3x3\")(p5_1x1)\r\n p6_relu = tf.nn.relu(p6_output)\r\n p7_output = layers.Conv2D(\r\n 256, 3, 2, \"same\", name=\"c7_3x3\")(p6_relu)\r\n \r\n # Upsampling and Residual Connections. #\r\n p6_residual = p6_relu + layers.UpSampling2D(\r\n size=(2, 2), name=\"ups_P7\")(p7_output)\r\n p5_residual = p5_1x1 + layers.UpSampling2D(\r\n size=(2, 2), name=\"ups_P6\")(p6_residual)\r\n p4_residual = p4_1x1 + layers.UpSampling2D(\r\n size=(2, 2), name=\"ups_P5\")(p5_residual)\r\n p3_residual = p3_1x1 + layers.UpSampling2D(\r\n size=(2, 2), name=\"ups_P4\")(p4_residual)\r\n \r\n # CNN Feature Map layer. #\r\n x_cnn_features = layers.Conv2D(\r\n 256, 3, 1, \"same\", \r\n name=\"cnn_feature_map\")(p3_residual)\r\n \r\n # Output Layers. #\r\n cls_outputs = []\r\n for n_scale in range(n_scales):\r\n layer_cls_output = x_cnn_features\r\n for n_layer in range(4):\r\n layer_cls_output = \\\r\n cls_cnn[n_layer](layer_cls_output)\r\n \r\n cnn_cls_name = \"cnn_cls_output_\" + str(n_scale+1)\r\n tmp_output = tf.nn.relu(layer_cls_output)\r\n cls_output = layers.Conv2D(\r\n num_classes, 3, 1, \r\n bias_initializer=b_focal, \r\n padding=\"same\", name=cnn_cls_name)(tmp_output)\r\n cls_outputs.append(tf.expand_dims(cls_output, axis=3))\r\n \r\n reg_outputs = []\r\n for n_scale in range(n_scales):\r\n layer_reg_output = x_cnn_features\r\n for n_layer in range(4):\r\n layer_reg_output = \\\r\n reg_cnn[n_layer](layer_reg_output)\r\n \r\n cnn_reg_name = \"cnn_reg_output_\" + str(n_scale+1)\r\n tmp_output = tf.nn.relu(layer_reg_output)\r\n reg_output = layers.Conv2D(\r\n 4, 3, 1, use_bias=True, \r\n padding=\"same\", name=cnn_reg_name)(tmp_output)\r\n reg_outputs.append(\r\n tf.expand_dims(tf.nn.sigmoid(reg_output), axis=3))\r\n \r\n cls_outputs = tf.concat(cls_outputs, axis=3)\r\n reg_outputs = tf.concat(reg_outputs, axis=3)\r\n \r\n x_output = tf.concat([\r\n reg_outputs, cls_outputs], axis=4)\r\n return tf.keras.Model(\r\n inputs=backbone.input, outputs=x_output)", "title": "" }, { "docid": "88e87a345e0b2d821c71b30b9c559071", "score": "0.48287064", "text": "def multiclass_nms(multi_bboxes,\n multi_scores,\n score_thr,\n nms_cfg,\n max_num=-1,\n score_factors=None):\n num_classes = multi_scores.size(1) - 1\n # exclude background category\n if multi_bboxes.shape[1] > 4:\n bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)[:, 1:]\n else:\n bboxes = multi_bboxes[:, None].expand(-1, num_classes, 4)\n scores = multi_scores[:, 1:]\n\n # filter out boxes with low scores\n valid_mask = scores > score_thr\n bboxes = bboxes[valid_mask]\n if score_factors is not None:\n scores = scores * score_factors[:, None]\n scores = scores[valid_mask]\n labels = valid_mask.nonzero()[:, 1]\n\n if bboxes.numel() == 0:\n bboxes = multi_bboxes.new_zeros((0, 5))\n labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)\n return bboxes, labels\n\n # Modified from https://github.com/pytorch/vision/blob\n # /505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39.\n # strategy: in order to perform NMS independently per class.\n # we add an offset to all the boxes. The offset is dependent\n # only on the class idx, and is large enough so that boxes\n # from different classes do not overlap\n max_coordinate = bboxes.max()\n offsets = labels.to(bboxes) * (max_coordinate + 1)\n bboxes_for_nms = bboxes + offsets[:, None]\n nms_cfg_ = nms_cfg.copy()\n nms_type = nms_cfg_.pop('type', 'nms')\n nms_op = getattr(nms_wrapper, nms_type)\n dets, keep = nms_op(\n torch.cat([bboxes_for_nms, scores[:, None]], 1), **nms_cfg_)\n bboxes = bboxes[keep]\n scores = dets[:, -1] # soft_nms will modify scores\n labels = labels[keep]\n\n if keep.size(0) > max_num:\n _, inds = scores.sort(descending=True)\n inds = inds[:max_num]\n bboxes = bboxes[inds]\n scores = scores[inds]\n labels = labels[inds]\n \n return torch.cat([bboxes, scores[:, None]], 1), labels", "title": "" }, { "docid": "28157bb7a4830b16d34d8062eb229b4c", "score": "0.48284262", "text": "def build_model(self, output_dim, regreession_type = 'classification' ,**layerargs):\n if regreession_type.lower() == 'classification':\n pass\n elif regreession_type.lower() == 'regression':\n pass\n else:\n raise TypeError('regression_type should be one of [\"classification, regression\"] not {}'.format(regreession_type))\n\n entry_layer = keras.layers.Dense(**layerargs)\n out_layer = keras.layers.Dense(output_dim)", "title": "" }, { "docid": "2098fb140c00b720ee5c79ee67338faf", "score": "0.4815617", "text": "def get_model(nlevels, nfm = 16, input_shape = (64, 64, 64, 1), nconv=2, dropout_in=False, dropout_out=False, lrelu_alpha=None, batch_norm=False):\n\n #Define the PReLU activation\n #prelu = kl.advanced_activations.PReLU(init='uniform', weights=None)\n\n # Initialize the model.\n #This is not a Sequential model so create input state\n input_state = kl.Input(shape = input_shape)\n\n #These are hard coded following [1]\n conv_kernel_size = (5, 5, 5)\n updown_kernel_size = (2, 2, 2)\n updown_stride = updown_kernel_size\n\n def NonLinearity(t, lrelu=lrelu_alpha, dropout=False, batch_norm=batch_norm):\n if batch_norm:\n to = kl.BatchNormalization(scale=False)(t)\n else:\n to = t\n \n if lrelu is not None:\n to = kl.LeakyReLU(alpha=lrelu_alpha)(to)\n else:\n to = kl.PReLU()(to)\n\n if dropout:\n to = kl.Dropout(dropout)(to)\n \n return to\n\n # Fine grained features to forward\n forward_list = []\n\n for fi in range(nlevels):\n # Add the initial convolution layer, with nfm feature maps.\n # For simplicity we are doing two convolutions for each non-zero level (see [1]).\n if fi == 0:\n shortcut = input_state\n residual = kl.Conv3D(nfm, kernel_size = conv_kernel_size, padding='same')(input_state)\n residual = NonLinearity(residual, batch_norm=False)\n\n else:\n #shortcut = kl.Conv3D(nfm*2**fi, kernel_size = (1, 1, 1), padding='same')(x)\n shortcut = x\n residual = kl.Conv3D(nfm*2**fi, kernel_size = conv_kernel_size, padding='same')(x)\n residual = NonLinearity(residual, dropout=dropout_in, batch_norm=False)\n for ci in range(nconv-1):\n residual = kl.Conv3D(nfm*2**fi, kernel_size = conv_kernel_size, padding='same')(residual)\n residual = NonLinearity(residual, dropout=dropout_in,\n batch_norm=(ci==nconv-2) and batch_norm)\n\n # Perform elementwise sum with input to train on residuals.\n x = add([residual, shortcut])\n\n #Save a copy for fine-grained features forwarding\n forward_list.append(x)\n\n # Peform a down convolution with PReLU activation, double the number of feature maps.\n x = kl.Conv3D(nfm*2**(fi+1), kernel_size = updown_kernel_size, strides=updown_stride)(x)\n if abs(fi - nlevels) < nlevels:\n x = NonLinearity(x, dropout=dropout_out)\n else:\n x = NonLinearity(x)\n\n # Check average pooling vs. residual network?\n\n # Step back up to achieve initial resolution\n for fi in range(nlevels)[::-1]:\n\n #Grab the shortcut\n #shortcut = kl.Conv3D(nfm*2**(fi+1), kernel_size = (1, 1, 1), padding='same')(x)\n shortcut = x\n \n if fi != (nlevels-1):\n #Concatenate with fine grained forwarded features along filters axis\n x = kl.Concatenate(axis=-1)([forward_list[fi+1], x])\n \n # Do some convolutions, then forward the residuals\n residual = kl.Conv3D(nfm*2**(fi+1), kernel_size = conv_kernel_size, padding='same')(x)\n residual = NonLinearity(residual, dropout=dropout_in, batch_norm=False)\n for ci in range(nconv-1):\n residual = kl.Conv3D(nfm*2**(fi+1), kernel_size = conv_kernel_size, padding='same')(residual)\n residual = NonLinearity(residual, dropout=dropout_in,\n batch_norm=(ci==nconv-2) and batch_norm)\n x = add([residual, shortcut])\n\n # Peform a deconvolution with PReLU activation, halve the number of channels\n x = kl.Conv3DTranspose(nfm*2**fi, kernel_size = updown_kernel_size, strides=updown_stride)(x)\n if abs(fi - nlevels) < nlevels:\n x = NonLinearity(x, dropout=dropout_out)\n else:\n x = NonLinearity(x)\n\n # Data show should now have size (batch_size, input_x, input_y, input_z, nfm)\n # Final forwarding and convolution\n x = kl.Concatenate(axis=-1)([forward_list[0], x])\n residual = kl.Conv3D(nfm, kernel_size = conv_kernel_size, padding='same')(x)\n shortcut = kl.Conv3D(nfm, kernel_size = (1, 1, 1), padding='same')(x)\n x = add([residual, shortcut])\n x = NonLinearity(x, batch_norm=False)\n\n # Final layer is a (1, 1, 1) filter with 2 features corresponding to the\n # foreground and background (see [1]).\n x = kl.Conv3D(2, kernel_size = (1, 1, 1))(x)\n x = NonLinearity(x, batch_norm=False)\n\n # Apply softmax\n x = kl.Activation('softmax')(x)\n\n model = km.Model(inputs = input_state, outputs = x)\n\n return model", "title": "" }, { "docid": "c4a472f8735869434eb025ebdaf50ade", "score": "0.4814554", "text": "def build_multi_input_model(shape_vec, shape_mat):\n\n # first branch for the\n inp1 = Input(shape=(1,), name='Country_ID')\n model1 = Embedding(23, 2, name='Country_Embedding')(inp1)\n model1 = Flatten()(model1)\n\n # second branch for the vector input\n inp2 = Input(shape=shape_vec, name=\"Date_and_Regimes\")\n\n # third branch for the matrix input\n inp3 = Input(shape=shape_mat, name=\"Ensemble\")\n model3 = Flatten()(inp3)\n \n # concatenate the two inputs\n x = Concatenate(axis=1)([model1, inp2, model3])\n\n # add the hiddden layers\n x = Dense( 100 , activation='linear' , name=\"Combined_Hidden_Layer_1\" )( x )\n x = Dense( 100 , activation='relu' , name=\"Combined_Hidden_Layer_2\" )( x )\n x = Dense( 100 , activation='relu' , name=\"Combined_Hidden_Layer_3\" )( x )\n\n x = Dense( 2 , activation='linear' , name=\"Output_Layer\" )(x)\n\n # returns the Model\n return Model([inp1, inp2, inp3], outputs=x)", "title": "" }, { "docid": "775bf78eaa9f41a4241f3c1e90020aec", "score": "0.4809", "text": "def build_model(n_class):\n\n model = Sequential()\n\n model.add(Conv2D(64, (3, 3), padding='same', \n input_shape=SHAPE,\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.3))\n\n model.add(Conv2D(64, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(128, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(128, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(256, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(256, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(256, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n\n model.add(Conv2D(512, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(512, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n model.add(Dropout(0.4))\n\n model.add(Conv2D(512, (3, 3), padding='same',\n kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization())\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.5))\n\n model.add(Flatten())\n model.add(Dense(512,kernel_regularizer=regularizers.l2(WEIGHT_DECAY)))\n model.add(Activation('relu'))\n model.add(BatchNormalization(name='features_layer'))\n\n model.add(Dropout(0.5))\n model.add(Dense(n_class, name='ll_dense'))\n model.add(Activation('softmax'))\n return model", "title": "" }, { "docid": "f6434349ad65d44de00a8f0023dcd11f", "score": "0.480721", "text": "def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):\n output_placeholder = input_placeholder\n with tf.variable_scope(scope):\n for i in range(n_layers):\n output_placeholder = tf.layers.dense(inputs=output_placeholder, units=size, activation=activation)\n output_placeholder = tf.layers.dense(inputs=output_placeholder, units=output_size, activation=output_activation)\n return output_placeholder", "title": "" }, { "docid": "c5204d6888df7a9360474995f7f21573", "score": "0.48049867", "text": "def create_model(opt):\n model = find_model_using_name(opt.model)\n instance = model(opt)\n instance.initialize()\n multigpu_instance = MultiGPUModelWrapper(opt, instance)\n print(\"model [%s] was created\" % type(instance).__name__)\n return multigpu_instance", "title": "" }, { "docid": "9a1ca06ccac294102c9dbaead2625a51", "score": "0.48012176", "text": "def classify_multi(classifiers, X_train, y_train, X_test, y_test=None, groups=None):\n \n num_samples = X_test.shape[0]\n num_classes = 9\n num_classifiers = len(classifiers)\n print(num_samples, num_classes, num_classifiers)\n probabilities = np.zeros((num_samples, num_classes, num_classifiers))\n predictions = np.zeros((num_samples, num_classifiers))\n scores = np.zeros(num_classifiers)\n \n for i in range(num_classifiers):\n pred, score, clf, proba = classify(classifiers[i], X_train, y_train, X_test, y_test, groups)\n predictions[:, i] = pred\n scores[i] = score\n probabilities[:, :, i] = proba\n \n return predictions, scores, classifiers, probabilities", "title": "" }, { "docid": "3aa90b4bb8da38e91408b6418cdf26ac", "score": "0.47961676", "text": "def get_box_classifier_feature_extractor_model(self, name=None):\n if not self.classification_backbone:\n self.classification_backbone = inception_resnet_v2.inception_resnet_v2(\n self._train_batch_norm,\n output_stride=self._first_stage_features_stride,\n align_feature_maps=True,\n weight_decay=self._weight_decay,\n weights=None,\n include_top=False)\n with tf.name_scope(name):\n with tf.name_scope('InceptionResnetV2'):\n proposal_feature_maps = self.classification_backbone.get_layer(\n name='block17_20_ac').output\n proposal_classifier_features = self.classification_backbone.get_layer(\n name='conv_7b_ac').output\n\n keras_model = model_util.extract_submodel(\n model=self.classification_backbone,\n inputs=proposal_feature_maps,\n outputs=proposal_classifier_features)\n for variable in keras_model.variables:\n self._variable_dict[variable.name[:-2]] = variable\n return keras_model", "title": "" }, { "docid": "b7baf1aafc6c249f881889b5b51b7790", "score": "0.47794324", "text": "def generateBase(self, test_loader):\n Features = []\n ItermClass = []\n for imgs, targets in tqdm(test_loader):\n # features = self.batched_inference(imgs)\n features = self.forward(imgs)\n Features.append(features)\n ItermClass.append(targets)\n\n # Features = torch.cat(Features, dim=1).cpu() # [num_models, num_imgs, features_dim]\n # List[num_batch x List[num_models x (num_images, features_dim)]]\n # List[num_models x (num_images x num_batch, features_dim)]\n Features = [torch.cat(x, 0) for x in zip(*Features)] # to numpy\n ItermClass = torch.cat(ItermClass, dim=0)\n result = {'feature': Features, 'class': ItermClass}\n return result", "title": "" }, { "docid": "547b4f09d48169f14dd57f16b47e6b98", "score": "0.47780475", "text": "def multi_label_classify(self, input):\n raise MethodNotImplemented", "title": "" }, { "docid": "71fc1ee926097fa6d185b83cd49c74f1", "score": "0.47764125", "text": "def construct(self, width, height, num_channels, num_categories, pretrained_weights=False):\n raise NotImplementedError()", "title": "" }, { "docid": "32e9eab9a4f75bf578fb9574c4881e9b", "score": "0.47745636", "text": "def multiclasskeypoint_nms(multi_bboxes,\n multi_keypoints,\n multi_scores,\n score_thr,\n nms_cfg,\n max_num=-1,\n score_factors=None):\n num_classes = multi_scores.shape[1]\n bboxes, keypoints, labels = [], [], []\n nms_cfg_ = nms_cfg.copy()\n nms_type = nms_cfg_.pop('type', 'nms')\n nms_op = getattr(nms_wrapper, nms_type)\n for i in range(1, num_classes):\n cls_inds = multi_scores[:, i] > score_thr\n if not cls_inds.any():\n continue\n # get bboxes and scores of this class\n if multi_bboxes.shape[1] == 4:\n _bboxes = multi_bboxes[cls_inds, :]\n _keypoints = multi_keypoints[cls_inds, :]\n else:\n _bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4]\n #待扩展\n _keypoints = None\n _scores = multi_scores[cls_inds, i]\n if score_factors is not None:\n _scores *= score_factors[cls_inds]\n cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1)\n cls_dets, inds = nms_op(cls_dets, **nms_cfg_)\n cls_keypoints = torch.cat([_keypoints, _scores[:, None]], dim=1)\n cls_keypoints = cls_keypoints[inds, : ]\n cls_labels = multi_bboxes.new_full((cls_dets.shape[0], ),\n i - 1,\n dtype=torch.long)\n bboxes.append(cls_dets)\n keypoints.append(cls_keypoints)\n labels.append(cls_labels)\n if bboxes:\n bboxes = torch.cat(bboxes)\n keypoints = torch.cat(keypoints)\n labels = torch.cat(labels)\n if bboxes.shape[0] > max_num:\n _, inds = bboxes[:, -1].sort(descending=True)\n inds = inds[:max_num]\n bboxes = bboxes[inds]\n keypoints = keypoints[inds]\n labels = labels[inds]\n else:\n bboxes = multi_bboxes.new_zeros((0, 5))\n keypoints = multi_bboxes.new_zeros((0, 9))\n labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)\n\n return bboxes, keypoints, labels", "title": "" }, { "docid": "5f25788f42065ee5186eb640a09ad4ac", "score": "0.47700873", "text": "def _create_model(self, num_cats, num_features, output_dim = 1, emb_dim = 128, lstm_dim = 128, \n batch_size = 16, dropout = 0.1, count_data = False, inference_mask = -1):\n\n cont_inputs = Input(shape = (self.train_window, num_features), batch_size = self.batch_size)\n cat_inputs = Input(shape = (self.train_window,), batch_size = self.batch_size)\n embedding = Embedding(num_cats, emb_dim)(cat_inputs)\n\n masked_input = Masking(mask_value = inference_mask)(cont_inputs)\n concatenate = Concatenate()([masked_input, embedding])\n\n lstm_out = LSTMResetStateful(lstm_dim, \n return_sequences = True,\n stateful = True,\n dropout = dropout, \n recurrent_dropout = dropout, \n unit_forget_bias = True,\n name = 'lstm')(concatenate)\n\n mu = Dense(output_dim, \n kernel_initializer = 'glorot_normal',\n bias_initializer = 'glorot_normal',\n name = 'mu')(lstm_out)\n\n sigma = Dense(output_dim, \n kernel_initializer = 'glorot_normal',\n bias_initializer = 'glorot_normal',\n name = 'sigma')(lstm_out)\n \n model = Model(inputs = [cont_inputs, cat_inputs], outputs = [mu, sigma])\n\n return model", "title": "" }, { "docid": "e07a17d3f89dbea1e517fbdc3485c971", "score": "0.47671142", "text": "def __init__(self, in_features, out_features, input_layer=False):\n\n #######################\n # PUT YOUR CODE HERE #\n #######################\n\n #######################\n # END OF YOUR CODE #\n #######################", "title": "" }, { "docid": "c6f3546a25271ecb80fd9e06fd07ec0f", "score": "0.4767031", "text": "def __init__(self, layer_sizes):\n super().__init__()\n\n self.embedding_model = ProductModel()\n\n # Then construct the layers.\n self.dense_layers = tf.keras.Sequential()\n\n # Use the ReLU activation for all but the last layer.\n for layer_size in layer_sizes[:-1]:\n self.dense_layers.add(tf.keras.layers.Dense(layer_size, activation=\"relu\"))\n\n # No activation for the last layer.\n for layer_size in layer_sizes[-1:]:\n self.dense_layers.add(tf.keras.layers.Dense(layer_size))", "title": "" }, { "docid": "ff729f3f31effd59f52ded99077bf9f3", "score": "0.4766644", "text": "def build(self, input_shapes):\n feature_channels = [\n shape_utils.get_dim_as_int(input_shape[3])\n for input_shape in input_shapes\n ]\n has_different_feature_channels = len(set(feature_channels)) > 1\n if has_different_feature_channels:\n inserted_layer_counter = 0\n target_channel = max(set(feature_channels), key=feature_channels.count)\n tf.logging.info('Not all feature maps have the same number of '\n 'channels, found: {}, appending additional projection '\n 'layers to bring all feature maps to uniformly have {} '\n 'channels.'.format(feature_channels, target_channel))\n else:\n # Place holder variables if has_different_feature_channels is False.\n target_channel = -1\n inserted_layer_counter = -1\n\n def _build_layers(tower_name_scope, feature_index):\n conv_layers, base_tower_layers = self._compute_base_tower(\n tower_name_scope=tower_name_scope, feature_index=feature_index)\n if tower_name_scope not in self._head_scope_conv_layers:\n self._head_scope_conv_layers[tower_name_scope] = conv_layers\n return base_tower_layers\n\n for feature_index in range(len(input_shapes)):\n # Additional projection layers should not be shared as input channels\n # (and thus weight shapes) are different\n inserted_layer_counter, projection_layers = (\n self._insert_additional_projection_layer(\n inserted_layer_counter, target_channel))\n self._additional_projection_layers.append(projection_layers)\n\n if self._share_prediction_tower:\n box_tower_scope = 'PredictionTower'\n else:\n box_tower_scope = 'BoxPredictionTower'\n # For box tower base\n box_tower_layers = _build_layers(box_tower_scope, feature_index)\n self._base_tower_layers_for_heads[BOX_ENCODINGS].append(box_tower_layers)\n\n for head_name in self._sorted_head_names:\n if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND:\n tower_name_scope = 'ClassPredictionTower'\n else:\n tower_name_scope = '{}PredictionTower'.format(head_name)\n box_tower_layers = _build_layers(tower_name_scope, feature_index)\n self._base_tower_layers_for_heads[head_name].append(box_tower_layers)\n\n self.built = True", "title": "" }, { "docid": "2df29a11580a0bfccd6b7a333f207c15", "score": "0.47648188", "text": "def __init__(self,\r\n hidden_size,\r\n output_size,\r\n num_layers,\r\n **kwargs):\r\n\r\n a = tf.keras.layers.Conv2D(\r\n output_size, 3, padding='same', **kwargs)\r\n\r\n layers = [a]\r\n\r\n for layer in range(num_layers):\r\n\r\n a = ResBlockDown(hidden_size,\r\n output_size,\r\n **kwargs)\r\n\r\n b = ResBlock(hidden_size,\r\n output_size,\r\n **kwargs)\r\n\r\n layers.extend([a, b])\r\n\r\n a = tf.keras.layers.LayerNormalization(axis=[1, 2, 3], **kwargs)\r\n\r\n b = tf.keras.layers.ReLU(negative_slope=0.2)\r\n\r\n c = tf.keras.layers.GlobalAveragePooling2D()\r\n\r\n d = tf.keras.layers.Dense(1, **kwargs)\r\n\r\n layers.extend([a, b, c, d])\r\n\r\n super(Discriminator, self).__init__(layers)\r\n\r\n # these parameters need to be stored so that\r\n # tf.keras.model.save_model works\r\n self.hidden_size = hidden_size\r\n self.output_size = output_size\r\n self.num_layers = num_layers\r\n self.kwargs = kwargs", "title": "" }, { "docid": "9103088b3de54ed82d378ba0a6ddfbbe", "score": "0.47620067", "text": "def construct_small(self, input_shape, output_shape, NUM_TRAIN_EXAMPLES, pooling_len=10):\n \n poolpadding = 'valid'\n pool = tf.keras.layers.MaxPooling1D\n \n kl_divergence_function = (lambda q, p, _: tfd.kl_divergence(q, p) / # pylint: disable=g-long-lambda\n tf.cast(NUM_TRAIN_EXAMPLES, dtype=tf.float32))\n \n model_in = tf.keras.layers.Input(shape=input_shape)\n conv_1 = tfp.layers.Convolution1DFlipout(6, kernel_size=5, padding=\"same\", strides=1,\n kernel_divergence_fn=kl_divergence_function,\n activation=tf.nn.relu)\n x = conv_1(model_in)\n \n x = pool(pooling_len, padding=poolpadding)(x)\n x = tf.keras.layers.Flatten()(x)\n \n \n dense = tfp.layers.DenseFlipout(output_shape, kernel_divergence_fn=kl_divergence_function,\n activation=tf.nn.softmax)\n \n model_out = dense(x)\n model = tf.keras.Model(model_in, model_out)\n \n return model", "title": "" }, { "docid": "c6c372dbc93c3833d0800f25ee636eab", "score": "0.47575995", "text": "def __init__(self, scale=True, kfolds=5, alpha_stepsize=1/3.0):\n \n self.kfolds = kfolds\n self.act_description = (\"activity values: coefficients from \"\n \"fitted model\")\n\n # initialize attributes\n self.act_ = None \n self.sig_ = None \n \n mtk = MultiTaskLasso()\n parameters = {\n \"alpha\": [np.exp(-x) for x in np.arange(0, 10, alpha_stepsize)],\n }\n self.clf = GridSearchCV(mtk, parameters, cv=kfolds, n_jobs=4)", "title": "" }, { "docid": "cdd3d08a6912a1fb6bb74b17d62e618e", "score": "0.47564104", "text": "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y, z):\n assert isinstance(x, dict)\n assert isinstance(y, Tensor)\n assert isinstance(z, Tensor)\n return x\n\n net = Net()\n model_ = Model(net)\n\n return model_", "title": "" }, { "docid": "2ca640a26100dce09f13586554756a12", "score": "0.4756237", "text": "def _add_model(self):\n l1 = self._add_layer(\"layer1\", self.x, 784, 150, activation_function=tf.sigmoid)\n\n l1_drop_out = tf.nn.dropout(l1, self.keep_prob)\n\n prediction = self._add_layer(\"layer2\", l1_drop_out, 150, 10, activation_function=tf.nn.softmax)\n\n # add regular\n regularizer = tf.contrib.layers.l2_regularizer(scale=0.001)\n reg_term = tf.contrib.layers.apply_regularization(regularizer)\n\n self._loss = -tf.reduce_sum(self.label * tf.log(prediction)) + reg_term\n # 优化器选取 学习率设置 此处学习率置为0.1\n train_step = tf.train.AdamOptimizer(beta2=0.9999).minimize(self.loss)\n\n # for predict\n self.prediction = tf.argmax(prediction, 1)\n\n # for test\n correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(self.label, 1))\n self._accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n return train_step", "title": "" }, { "docid": "ef34bebc9146253e79de45f1a83ab28e", "score": "0.4753683", "text": "def build_model(self):### \r\n # Define input and label images\r\n self.images = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, self.color_dim], name='images')\r\n self.labels = tf.placeholder(tf.float32, [None, self.label_size, self.label_size, self.color_dim], name='labels')\r\n \r\n # Define CNN weights and biases\r\n self.weights = {\r\n 'w1': tf.Variable(tf.random_normal([9, 9, 1, 64], mean=0, stddev=1e-3), name='w1'),\r\n 'w2': tf.Variable(tf.random_normal([1, 1, 64, 32], mean=0, stddev=1e-3), name='w2'),\r\n 'w3': tf.Variable(tf.random_normal([5, 5, 32, 1], mean=0, stddev=1e-3), name='w3')\r\n } \r\n self.biases = {\r\n 'b1': tf.Variable(tf.zeros([64]), name='b1'),\r\n 'b2': tf.Variable(tf.zeros([32]), name='b2'),\r\n 'b3': tf.Variable(tf.zeros([1]), name='b3')\r\n }\r\n \r\n # Model output\r\n self.pred = self.model()\r\n \r\n # Define loss function (MSE) \r\n self.loss = tf.reduce_mean(tf.square(self.labels - self.pred))\r\n \r\n self.saver = tf.train.Saver()", "title": "" }, { "docid": "5b61fe604285c64d5983f111ab8d96fa", "score": "0.47535312", "text": "def __init__(\n self,\n task_type: constants.ModelTaskType,\n n_units_in: int,\n n_units_out: int,\n n_layers_hidden: int = 1,\n n_units_hidden: int = 100,\n nonlin: Nonlin = \"relu\",\n nonlin_out: Optional[List[Tuple[Nonlin, int]]] = None,\n lr: float = 1e-3,\n weight_decay: float = 1e-3,\n opt_betas: Tuple[float, float] = (0.9, 0.999),\n n_iter: int = 1000,\n batch_size: int = 500,\n n_iter_print: int = 100,\n random_state: int = 0,\n patience: int = 10,\n n_iter_min: int = 100,\n dropout: float = 0.1,\n clipping_value: int = 1,\n batch_norm: bool = False,\n early_stopping: bool = True,\n residual: bool = False,\n loss: Optional[Callable] = None,\n device: Any = constants.DEVICE,\n ) -> None:\n super(MLP, self).__init__()\n\n if n_units_in < 0:\n raise ValueError(\"n_units_in must be >= 0\")\n if n_units_out < 0:\n raise ValueError(\"n_units_out must be >= 0\")\n\n utils.enable_reproducibility(random_state)\n self.device = device\n self.task_type = task_type\n self.random_state = random_state\n\n block: Type[LinearLayer]\n if residual:\n block = ResidualLayer\n else:\n block = LinearLayer\n\n # network\n layers: List[nn.Module] = []\n\n if n_layers_hidden > 0:\n layers.append(\n block(\n n_units_in,\n n_units_hidden,\n batch_norm=batch_norm,\n nonlin=nonlin,\n device=device,\n )\n )\n n_units_hidden += int(residual) * n_units_in\n\n # add required number of layers\n for i in range(n_layers_hidden - 1): # pylint: disable=unused-variable\n layers.append(\n block(\n n_units_hidden,\n n_units_hidden,\n batch_norm=batch_norm,\n nonlin=nonlin,\n dropout=dropout,\n device=device,\n )\n )\n n_units_hidden += int(residual) * n_units_hidden\n\n # add final layers\n layers.append(nn.Linear(n_units_hidden, n_units_out, device=device))\n else:\n layers = [nn.Linear(n_units_in, n_units_out, device=device)]\n\n if nonlin_out is not None:\n total_nonlin_len = 0\n activations = []\n for nonlin, nonlin_len in nonlin_out:\n total_nonlin_len += nonlin_len\n activations.append((get_nonlin(nonlin), nonlin_len))\n\n if total_nonlin_len != n_units_out:\n raise RuntimeError(\n f\"Shape mismatch for the output layer. Expected length {n_units_out}, but got {nonlin_out} \"\n f\"with length {total_nonlin_len}\"\n )\n layers.append(MultiActivationHead(activations, device=device))\n elif self.task_type == \"classification\":\n layers.append(MultiActivationHead([(GumbelSoftmax(), n_units_out)], device=device))\n\n self.model = nn.Sequential(*layers).to(self.device)\n\n # optimizer\n self.lr = lr\n self.weight_decay = weight_decay\n self.opt_betas = opt_betas\n self.optimizer = torch.optim.Adam(\n self.parameters(),\n lr=self.lr,\n weight_decay=self.weight_decay,\n betas=self.opt_betas,\n )\n\n # training\n self.n_iter = n_iter\n self.n_iter_print = n_iter_print\n self.n_iter_min = n_iter_min\n self.batch_size = batch_size\n self.patience = patience\n self.clipping_value = clipping_value\n self.early_stopping = early_stopping\n if loss is not None:\n self.loss = loss\n else:\n if task_type == \"classification\":\n self.loss = nn.CrossEntropyLoss()\n else:\n self.loss = nn.MSELoss()", "title": "" }, { "docid": "70207f44e522e7732cadf0b9d4761789", "score": "0.47526684", "text": "def testMultiClass(self):\n cont_features = [\n tf.contrib.layers.real_valued_column('feature', dimension=4)]\n\n classifier = tf.contrib.learn.DNNClassifier(n_classes=3,\n feature_columns=cont_features,\n hidden_units=[3, 3])\n\n classifier.fit(input_fn=_iris_input_fn, steps=1000)\n classifier.evaluate(input_fn=_iris_input_fn, steps=100)\n self.assertTrue('centered_bias_weight' in classifier.get_variable_names())\n # TODO(ispir): Enable accuracy check after resolving the randomness issue.\n # self.assertGreater(scores['accuracy/mean'], 0.6)", "title": "" }, { "docid": "7dec432c4648186d550d01574b04df80", "score": "0.47501418", "text": "def test_model_multiclass(self):\n data_loader = DataLoader(self._static_dataset,\n batch_size=64,\n shuffle=True,\n num_workers=0)\n\n for batch in data_loader:\n # context is a list of list of word embeddings\n batch[\"device\"] = \"cpu\"\n out = self.model(batch).squeeze()\n loss = multi_class_cross_entropy(out, batch[\"l\"])\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()\n break\n loss = loss.data.numpy()\n np.testing.assert_equal(math.isnan(loss), False)\n np.testing.assert_equal(loss >= 0, True)", "title": "" }, { "docid": "e089844769f493ce91b8f5780885facd", "score": "0.47487363", "text": "def create_classifier(train_features, train_targets, modelname):\n if modelname == 'SVM':\n model = svm.LinearSVC(max_iter=10000)\n\n vec = DictVectorizer()\n features_vectorized = vec.fit_transform(train_features)\n model.fit(features_vectorized, train_targets)\n\n return model, vec", "title": "" }, { "docid": "f98c7b73d0edcdaee88fe3b1b2bc4bf0", "score": "0.4745512", "text": "def build(self, inputs, training_pl):\n pass", "title": "" }, { "docid": "31458f38dc7e3890189b0baac405512c", "score": "0.4739339", "text": "def __init__(self, \r\n batch_size=1,\r\n iou_thrs=0.5,\r\n nms_thrs=0.5,\r\n conf_thrs=0.5,\r\n min_face=20,\r\n scale_factor=0.79,\r\n min_size=12,\r\n max_size=224,\r\n no_mask=False,\r\n alpha=(1., 0.5, 0.5),\r\n rd_size=False,\r\n src_size=False,\r\n nms_topk=None,\r\n model_root='data/model',\r\n demo_root='data/demo'):\r\n self.batch_size = batch_size\r\n self.iou_thrs = iou_thrs\r\n self.nms_thrs = nms_thrs\r\n self.conf_thrs = conf_thrs\r\n self.min_face = min_face\r\n self.scale_factor = scale_factor\r\n self.min_size = min_size\r\n self.max_size = max_size\r\n self.no_mask = no_mask\r\n self.rd_size = rd_size\r\n self.src_size = src_size\r\n self.alpha = alpha\r\n self.nms_topk = nms_topk\r\n self.model_root = model_root\r\n self.demo_root = demo_root\r\n\r\n self.neg_thrs = 0.3\r\n self.pos_thrs = 0.65\r\n self.part_thrs = 0.4\r\n\r\n self.prior_boxes = {}\r\n self.cell_size = 13\r\n self.sizelist = self._get_size_list(self.min_size, self.max_size)\r\n #print(self.sizelist)\r\n\r\n self._setup()\r\n self.sess = sm.Session()\r\n print(self.sess._variables)", "title": "" }, { "docid": "760af9cd8167c9379053e4b80affe142", "score": "0.4738971", "text": "def _predict_by_feat_single(self,\n tl_heat: Tensor,\n br_heat: Tensor,\n tl_off: Tensor,\n br_off: Tensor,\n img_meta: dict,\n tl_emb: Optional[Tensor] = None,\n br_emb: Optional[Tensor] = None,\n tl_centripetal_shift: Optional[Tensor] = None,\n br_centripetal_shift: Optional[Tensor] = None,\n rescale: bool = False,\n with_nms: bool = True) -> InstanceData:\n if isinstance(img_meta, (list, tuple)):\n img_meta = img_meta[0]\n\n batch_bboxes, batch_scores, batch_clses = self._decode_heatmap(\n tl_heat=tl_heat.sigmoid(),\n br_heat=br_heat.sigmoid(),\n tl_off=tl_off,\n br_off=br_off,\n tl_emb=tl_emb,\n br_emb=br_emb,\n tl_centripetal_shift=tl_centripetal_shift,\n br_centripetal_shift=br_centripetal_shift,\n img_meta=img_meta,\n k=self.test_cfg.corner_topk,\n kernel=self.test_cfg.local_maximum_kernel,\n distance_threshold=self.test_cfg.distance_threshold)\n\n if rescale and 'scale_factor' in img_meta:\n batch_bboxes /= batch_bboxes.new_tensor(\n img_meta['scale_factor']).repeat((1, 2))\n\n bboxes = batch_bboxes.view([-1, 4])\n scores = batch_scores.view(-1)\n clses = batch_clses.view(-1)\n\n det_bboxes = torch.cat([bboxes, scores.unsqueeze(-1)], -1)\n keepinds = (det_bboxes[:, -1] > -0.1)\n det_bboxes = det_bboxes[keepinds]\n det_labels = clses[keepinds]\n\n if with_nms:\n det_bboxes, det_labels = self._bboxes_nms(det_bboxes, det_labels,\n self.test_cfg)\n\n results = InstanceData()\n results.bboxes = det_bboxes[..., :4]\n results.scores = det_bboxes[..., 4]\n results.labels = det_labels\n return results", "title": "" }, { "docid": "dc29baae1fdaad2531b09c7ef1e4424f", "score": "0.47385395", "text": "def __init__(self, num_concepts, num_classes, hidden_sizes=(10, 5, 5, 10), dropout=0.5, **kwargs):\n super().__init__()\n self.num_concepts = num_concepts\n self.num_classes = num_classes\n self.hidden_sizes = hidden_sizes\n self.dropout = dropout\n self.model = keras.Sequential()\n index = 1\n for h, h_next in zip(hidden_sizes, hidden_sizes[1:]):\n #self.model.add(keras.layers.Dense(h, h_next, activation='linear'))\n #self.model.add(keras.layers.Dropout(self.dropout))\n #self.model.add(keras.layers.Dense(h, h_next, activation='relu'))\n\n self[\"para_lin\"+index] = keras.layers.Dense(h, h_next, activation='linear')\n self[\"para_drop\"+index] = keras.layers.Dropout(self.dropout)\n self[\"para_relu\"+index] = keras.layers.Dense(h, h_next, activation='relu')\n index += 1\n\n self.model.pop()", "title": "" }, { "docid": "eff690265bb00acc6cca7129e7d43413", "score": "0.4737579", "text": "def mlp(**kwargs):\n return MLP(**kwargs)", "title": "" }, { "docid": "e9ad5fc237fcc2b081740c8bde24b4b6", "score": "0.47351134", "text": "def c3d_model(batch_size):\n main_input = Input(shape=(batch_size, 112, 112, 3), name=\"main_input\")\n # 1st layer group\n x = Conv3D(\n 64,\n kernel_size=(3, 3, 3),\n activation=\"relu\",\n padding=\"same\",\n name=\"conv1\",\n strides=(1, 1, 1),\n )(main_input)\n x = MaxPooling3D(\n pool_size=(1, 2, 2), strides=(1, 2, 2), padding=\"valid\", name=\"pool1\"\n )(x)\n # 2nd layer group\n x = Conv3D(\n 128,\n kernel_size=(3, 3, 3),\n activation=\"relu\",\n padding=\"same\",\n name=\"conv2\",\n strides=(1, 1, 1),\n )(x)\n x = MaxPooling3D(\n pool_size=(2, 2, 2), strides=(2, 2, 2), padding=\"valid\", name=\"pool2\"\n )(x)\n # 3rd layer group\n x = Conv3D(\n 256,\n kernel_size=(3, 3, 3),\n activation=\"relu\",\n padding=\"same\",\n name=\"conv3a\",\n strides=(1, 1, 1),\n )(x)\n x = Conv3D(\n 256,\n kernel_size=(3, 3, 3),\n activation=\"relu\",\n padding=\"same\",\n name=\"conv3b\",\n strides=(1, 1, 1),\n )(x)\n x = MaxPooling3D(\n pool_size=(2, 2, 2), strides=(2, 2, 2), padding=\"valid\", name=\"pool3\"\n )(x)\n # 4th layer group\n x = Conv3D(\n 512,\n kernel_size=(3, 3, 3),\n activation=\"relu\",\n padding=\"same\",\n name=\"conv4a\",\n strides=(1, 1, 1),\n )(x)\n x = Conv3D(\n 512,\n kernel_size=(3, 3, 3),\n activation=\"relu\",\n padding=\"same\",\n name=\"conv4b\",\n strides=(1, 1, 1),\n )(x)\n x = MaxPooling3D(\n pool_size=(2, 2, 2), strides=(2, 2, 2), padding=\"valid\", name=\"pool4\"\n )(x)\n # 5th layer group\n x = Conv3D(\n 512,\n kernel_size=(3, 3, 3),\n activation=\"relu\",\n padding=\"same\",\n name=\"conv5a\",\n strides=(1, 1, 1),\n )(x)\n x = Conv3D(\n 512,\n kernel_size=(3, 3, 3),\n activation=\"relu\",\n padding=\"same\",\n name=\"conv5b\",\n strides=(1, 1, 1),\n )(x)\n x = ZeroPadding3D(padding=(0, 1, 1))(x)\n x = MaxPooling3D(\n pool_size=(2, 2, 2), strides=(2, 2, 2), padding=\"valid\", name=\"pool5\"\n )(x)\n x = Flatten()(x)\n # FC layers group\n x = Dense(2048, activation=\"relu\", name=\"fc6\")(x)\n x = Dropout(0.5)(x)\n x = Dense(2048, activation=\"relu\", name=\"fc7\")(x)\n x = Dropout(0.5)(x)\n predictions = Dense(2, activation=\"softmax\", name=\"fc8\")(x)\n\n model = Model(inputs=main_input, outputs=predictions)\n return model", "title": "" }, { "docid": "175cf9a0f39b7271e535429ee808b9f6", "score": "0.47335392", "text": "def create_folds(self):\n \n \"\"\"\n namedlist is a factory function for creating mutable collections of list items;\n it is similar to python's list but enables us to name each component and access using\n dot notation.\n \"\"\"\n Fold = namedlist('Fold', 'input output rmse')\n \n \"\"\"\n class attribute folds is a dictionary with 2 keys;\n key=1, refers to namedlist that holds data related to fold 1\n key=2, refers to namedlist that holds data related to fold 2\n \"\"\"\n self.folds = dict() \n for i in [1,2]:\n self.folds[i] = Fold(input=None, output=None,rmse=None)\n \n # add inputs and outputs to the folds by intelligently splitting data; see class method add_data_in_folds()\n self.add_data_in_folds()\n # Standardize inputs in the folds for better ML performance; see class method standardize_folds_inputs() \n self.standardize_folds_inputs()\n \n \"\"\"\n Now after having inputs and outputs in both folds, we update RMSE.\n As of now, we have not extracted any feature.\n Hence, we consider a base model i.e., one that spits out mean of its training target. \n \"\"\"\n # predictions of base model over fold 1 is a constant; mean of target variable in fold 2\n # predictions of base model over fold 2 is a constant; mean of target variable in fold 1\n # updating RMSE based on this logic. \n \n self.folds[1]._update(rmse=rmse(np.abs(self.folds[1].output - self.folds[2].output.mean()), 0))\n self.folds[2]._update(rmse=rmse(np.abs(self.folds[2].output - self.folds[1].output.mean()), 0))", "title": "" }, { "docid": "3780acb6578b700ca62085e8bcdb8a91", "score": "0.47323805", "text": "def __init__(self, input_root: str, target_resolution: Tuple[int] = (1.5, 1.5, 8),\n target_shape: Tuple[int]= None, class_indexes: Tuple[int] = [1, 2, 3, 4],\n patch_size: Tuple[int] = (128, 128, 26), train_batch_size: int=64,\n val_batch_size: int=64, num_workers: int=4) -> None:\n super().__init__()\n self.input_root = input_root\n self.target_resolution = target_resolution\n self.target_shape = target_shape\n self.class_indexes = class_indexes\n self.train_batch_size = train_batch_size\n self.val_batch_size = val_batch_size\n self.num_workers = num_workers\n self.train_transform, self.test_transform = None, None\n #self.train_transform, self.test_transform = self.init_transforms(patch_size)", "title": "" }, { "docid": "6da75919e450f3dea37fcc3f618a2ae1", "score": "0.47290415", "text": "def construct_mlp(num_frames, input_size, num_classes,\n hidden_layer_size=128, num_hidden_layers=1, l2_reg=1e-5):\n # Input layer\n inp = Input(shape=(num_frames, input_size), dtype='float32', name='input')\n y = inp\n\n # Add hidden layers\n repr_size = input_size\n for idx in range(num_hidden_layers):\n y = TimeDistributed(Dense(hidden_layer_size, activation='relu',\n kernel_regularizer=regularizers.l2(l2_reg)),\n name='dense_{}'.format(idx+1),\n input_shape=(num_frames, repr_size))(y)\n repr_size = hidden_layer_size\n\n\n # Output layer\n y = TimeDistributed(Dense(num_classes, activation='sigmoid',\n kernel_regularizer=regularizers.l2(l2_reg)),\n name='output_t',\n input_shape=(num_frames, repr_size))(y)\n\n # Apply autopool over time dimension\n # y = AutoPool1D(kernel_constraint=keras.constraints.non_neg(),\n # axis=1, name='output')(y)\n y = AutoPool1D(axis=1, name='output')(y)\n\n m = Model(inputs=inp, outputs=y)\n m.name = 'urban_sound_classifier'\n print(m.summary())\n\n return m", "title": "" }, { "docid": "dc901677df2fdf0c682f5cb528b06fca", "score": "0.47278613", "text": "def model(self): \r\n \r\n # Layer 1: Patch extraction and representation\r\n conv1 = tf.nn.relu(tf.nn.conv2d(self.images, self.weights['w1'], strides=[1,1,1,1], padding='VALID') + self.biases['b1'])\r\n \r\n # Layer 2: Non-linear mapping\r\n conv2 = tf.nn.relu(tf.nn.conv2d(conv1, self.weights['w2'], strides=[1,1,1,1], padding='VALID') + self.biases['b2'])\r\n \r\n # Layer 3: Reconstruction\r\n conv3 = tf.nn.conv2d(conv2, self.weights['w3'], strides=[1,1,1,1], padding='VALID') + self.biases['b3']\r\n \r\n return conv3", "title": "" }, { "docid": "06e82e31806ee145053fc83481a0a361", "score": "0.47266337", "text": "def __init__(self, **kwargs):\n self.group = {}\n #Rennome train_list a cause conflit avec fonction train\n self.train_list = []\n self.train_labels = []\n self.means = {}\n self.variances = {}", "title": "" }, { "docid": "78bdc3f6b979db9c7ccef741df50d64b", "score": "0.4721535", "text": "def __init__(__self__, *,\n task_type: pulumi.Input[str],\n training_data: pulumi.Input['MLTableJobInputArgs'],\n featurization_settings: Optional[pulumi.Input['NlpVerticalFeaturizationSettingsArgs']] = None,\n limit_settings: Optional[pulumi.Input['NlpVerticalLimitSettingsArgs']] = None,\n log_verbosity: Optional[pulumi.Input[Union[str, 'LogVerbosity']]] = None,\n target_column_name: Optional[pulumi.Input[str]] = None,\n validation_data: Optional[pulumi.Input['MLTableJobInputArgs']] = None):\n pulumi.set(__self__, \"task_type\", 'TextClassificationMultilabel')\n pulumi.set(__self__, \"training_data\", training_data)\n if featurization_settings is not None:\n pulumi.set(__self__, \"featurization_settings\", featurization_settings)\n if limit_settings is not None:\n pulumi.set(__self__, \"limit_settings\", limit_settings)\n if log_verbosity is None:\n log_verbosity = 'Info'\n if log_verbosity is not None:\n pulumi.set(__self__, \"log_verbosity\", log_verbosity)\n if target_column_name is not None:\n pulumi.set(__self__, \"target_column_name\", target_column_name)\n if validation_data is not None:\n pulumi.set(__self__, \"validation_data\", validation_data)", "title": "" }, { "docid": "87b5b76663d175daec41d1a0504566c2", "score": "0.47211018", "text": "def create_model(url, num_classes = 10, input_shape = (224, 224, 3)):\n feature_extractor_layer = hub.KerasLayer(url,\n trainable=False, # freeze the underlying patterns\n name='feature_extraction_layer',\n input_shape = input_shape)\n #Note if i add layer to model directly, it doent accept input_shape, so first need to create layer, then add it to model\n\n model = tf.keras.Sequential([feature_extractor_layer,\n Dense(num_classes,\n activation='softmax',\n name='output_layer')\n ])\n\n return model", "title": "" }, { "docid": "3fdf4aabfef7292416872007edaaf7b3", "score": "0.4718426", "text": "def get_bboxes_single(self,\n cls_score_list,\n bbox_pred_list,\n mlvl_anchors,\n img_shape,\n scale_factor,\n cfg,\n rescale=False):\n assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)\n mlvl_bboxes = []\n mlvl_scores = []\n for cls_score, bbox_pred, anchors in zip(cls_score_list,\n bbox_pred_list, mlvl_anchors):\n assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n cls_score = cls_score.permute(\n 1, 2, 0).reshape(-1, self.cls_out_channels)\n\n if self.use_sigmoid_cls:\n scores = cls_score.sigmoid()\n else:\n scores = cls_score.softmax(-1)\n\n bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 5)\n nms_pre = cfg.get('nms_pre', -1)\n if nms_pre > 0 and scores.shape[0] > nms_pre:\n # Get maximum scores for foreground classes.\n if self.use_sigmoid_cls:\n max_scores, _ = scores.max(dim=1)\n else:\n max_scores, _ = scores[:, 1:].max(dim=1)\n _, topk_inds = max_scores.topk(nms_pre)\n anchors = anchors[topk_inds, :]\n bbox_pred = bbox_pred[topk_inds, :]\n scores = scores[topk_inds, :]\n bboxes = delta2bbox_rotated(anchors, bbox_pred, self.target_means,\n self.target_stds, img_shape)\n mlvl_bboxes.append(bboxes)\n mlvl_scores.append(scores)\n mlvl_bboxes = torch.cat(mlvl_bboxes)\n if rescale:\n mlvl_bboxes[..., :4] /= mlvl_bboxes.new_tensor(scale_factor)\n mlvl_scores = torch.cat(mlvl_scores)\n if self.use_sigmoid_cls:\n # Add a dummy background class to the front when using sigmoid\n padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)\n mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)\n det_bboxes, det_labels = multiclass_nms_rotated(mlvl_bboxes,\n mlvl_scores,\n cfg.score_thr, cfg.nms,\n cfg.max_per_img)\n return det_bboxes, det_labels", "title": "" }, { "docid": "739aee41d0353689225c3f0a04027f9f", "score": "0.47070077", "text": "def build(self, unused_input_shapes):\n\n # 2 means is A, or is not A, here A is yes,no, unknown\n\n self.ynu_dense = tf.keras.layers.Dense(\n units=3, kernel_initializer=self.initializer, name='ynu_final_dense')\n\n super(BertYNULogitsLayer, self).build(unused_input_shapes)", "title": "" }, { "docid": "8148fe85963d23f1a6a82f765b80290a", "score": "0.47054854", "text": "def __call__(self, feature_map, level_id):\n batch_size, height, width, _ = K.shape(feature_map)\n n_poly = 2 * self.degree * self.proposals_per_anchor\n n_cl = self.n_classes * self.proposals_per_anchor\n n_h = 8 * self.proposals_per_anchor\n nf = self.n_features\n\n p = Conv2D(nf, (3, 3), padding='same', activation='relu')(feature_map)\n p = Conv2D(nf, (3, 3), padding='same', activation='relu')(p)\n p = Conv2D(nf, (3, 3), padding='same', activation='relu')(p)\n p = Conv2D(nf, (3, 3), padding='same', activation='relu')(p)\n p = Conv2D(n_poly, (3, 3), padding='same', activation='sigmoid', name='poly_output')(p)\n K.reshape(p, (batch_size, height, width, self.proposals_per_anchor, 2 * self.degree))\n\n cl = Conv2D(nf, (3, 3), padding='same', activation='relu')(feature_map)\n cl = Conv2D(nf, (3, 3), padding='same', activation='relu')(cl)\n cl = Conv2D(nf, (3, 3), padding='same', activation='relu')(cl)\n cl = Conv2D(nf, (3, 3), padding='same', activation='relu')(cl)\n cl = Conv2D(n_cl, (3, 3), padding='same', activation='sigmoid', name='class_output')(cl)\n K.reshape(cl, (batch_size, height, width, self.proposals_per_anchor, self.n_classes))\n\n h = None\n if self.propose_homographies:\n h = Conv2D(nf, (3, 3), padding='same', activation='relu', name='hom_conv_1')(feature_map)\n h = Conv2D(nf, (3, 3), padding='same', activation='relu', name='hom_conv_2')(h)\n h = Conv2D(nf, (3, 3), padding='same', activation='relu', name='hom_conv_3')(h)\n h = Conv2D(nf, (3, 3), padding='same', activation='relu', name='hom_conv_4')(h)\n h = Conv2D(n_h, (3, 3), padding='same', activation='sigmoid', name='homography_output')(h)\n K.reshape(h, (batch_size, height, width, self.proposals_per_anchor, 8))\n return p, cl, h", "title": "" } ]
6a9bd6c9182d895d73b587ffbc60567d
Setup mock response 404
[ { "docid": "e7ddeeff820f007b87fef5066cd97f48", "score": "0.0", "text": "def __init__(self, data, status_code=404):\n self.data = data\n self.status_code = status_code", "title": "" } ]
[ { "docid": "f54208934f6fd1b96047516946acd0a7", "score": "0.76103675", "text": "def test_return_404(self):\n buffer = {}\n\n def start_response(code, headers):\n buffer['code'] = code\n buffer['headers'] = headers\n\n result = self.instance.return_404(\n start_response, msg=\"404 File Not Found\")\n assert buffer['code'] == '404 File Not Found'\n assert buffer['headers'] == [('Content-Type', 'text/html')]\n assert result == '404 File Not Found'", "title": "" }, { "docid": "4da7f5f35d61e8c723875dbe72768863", "score": "0.7597303", "text": "def assert404(self, response):\r\n\r\n self.assertStatus(response, 404)", "title": "" }, { "docid": "ba016e3b35cda04ebb2ad798129a9dc8", "score": "0.75368464", "text": "def _monkeyed_requests_get_404(url):\n if url in [\n \"http://config-binding-service:10000/service_component_all/testhostname\",\n \"http://config-binding-service:10000/service_component/testhostname\",\n ]:\n return FakeResponse(status_code=404, thejson={})\n raise Exception(\"Unexpected URL {0}!\".format(url))", "title": "" }, { "docid": "6ab0542671fcf0939cf4a70e14409b06", "score": "0.7514979", "text": "def test_404(self):\n response = self.client.get(\n '/wrong/url',\n headers=self.get_api_headers('email', 'password'))\n self.assertTrue(response.status_code == 404)\n json_response = json.loads(response.data.decode('utf-8'))\n self.assertTrue(json_response['error'] == 'not found')", "title": "" }, { "docid": "027d3f6439b0a479edcc45410f1c846e", "score": "0.7490862", "text": "def test_404(self):\n self.default_regr_test('404', '404/')", "title": "" }, { "docid": "65152ace6166ff12982d31d3ea32f03f", "score": "0.7480433", "text": "def test_get_404(self):\n res = self.get(\n '/{}/{}'.format(\n self.url_prefix,\n 12345,\n ),\n )\n assert res.status_code == 404", "title": "" }, { "docid": "b5ade7e28f9f810e6a94a47ce0267522", "score": "0.7429136", "text": "def test_response_404(self):\n client = Client(HTTP_HOST=settings.HTTP_HOST)\n response = client.get('/hudson-valley/foo/')\n self.failUnlessEqual(response.status_code, 404)", "title": "" }, { "docid": "91fe03e59a304696b2c233ba9b8f1d7d", "score": "0.74245477", "text": "def test_404(self):\n try:\n self.urlopen('/')\n except urllib2.HTTPError, err:\n self.assertEquals(err.code, 404, 'Expected 404 response')\n else:\n self.fail('Expected 404 response')", "title": "" }, { "docid": "5c41763da5c6c783868e13d45515d20c", "score": "0.74037343", "text": "def test_404(client):\n response = client.get('/blabla')\n assert response.status_code == 404", "title": "" }, { "docid": "ceb27c46ce4c1f69a040be9d01e66ea1", "score": "0.73859936", "text": "def test_404(self):\n response = self.client.get('/404')\n self.assertEqual(response.status_code, status.NOT_FOUND)\n self.assertEqual(response.content_type, 'text/html; charset=utf-8')", "title": "" }, { "docid": "98d3b50cce6ec407442fac0a2ee7e1fa", "score": "0.73101526", "text": "def test_get_invalid_path_raise_404(self):\n\n result = self.app.simulate_get('/somerandompath')\n self.assertEqual(result.status_code, 404)", "title": "" }, { "docid": "d2f0866e6b76b4aeb3dfc2853e2de706", "score": "0.73096615", "text": "def _mock_response(self):\r\n\r\n return flexmock(raise_for_status=lambda: None)", "title": "" }, { "docid": "d2f0866e6b76b4aeb3dfc2853e2de706", "score": "0.73096615", "text": "def _mock_response(self):\r\n\r\n return flexmock(raise_for_status=lambda: None)", "title": "" }, { "docid": "20dd3a7856b8fd29582f80a8782e8c33", "score": "0.7308766", "text": "def test_not_found_error(self):\n with client.test_mode as t:\n t.register('/ping/', 'ERRORED!!', status_code=404)\n with self.assertRaises(exc.NotFound):\n r = client.get('/ping/')", "title": "" }, { "docid": "811c550ecc91b7a497585b2a73680a36", "score": "0.73023844", "text": "def test_handle_api_error_404():\n # When I make an API request and receive a 404\n c = Client(account_id=\"foo\", api_key=\"bar\")\n response = Mock(status_code=404)\n error = Mock(message=\"foo\", response=response)\n\n # Then I should raise a NewRelicApiException\n c._handle_api_error.when.called_with(error)\\\n .should.throw(NewRelicUnknownApplicationException)", "title": "" }, { "docid": "ee4f6b5ee6716565e456eeb964542423", "score": "0.7275061", "text": "def test_get_unknown_url(self):\r\n self.get_response('/foo/bar', 404)", "title": "" }, { "docid": "1d9664aa4ae963d8bcb31d2014ab0b1a", "score": "0.7245447", "text": "def test_404(self):\n response = self.client.get(r('records:house', 234))\n self.assertEqual(404, response.status_code)", "title": "" }, { "docid": "1b924b822741378ea8d8b839fb2f6007", "score": "0.7231784", "text": "def mocked_requests_get_500_response(*args, **kwargs):\n response = Response()\n response.status_code = 500\n return response", "title": "" }, { "docid": "8b374873199db6c2bfa94331117c8218", "score": "0.7219081", "text": "def test_pretender_expired_add_preset_404():\n\n http_mock = HTTPMock(\"localhost\", 8000, timeout=0.1)\n time.sleep(0.3)\n preset = http_mock.when(\"POST /fred/test/one\")\n assert_raises(ConfigurationError, preset.reply, b\"You tested fred well\", 200)", "title": "" }, { "docid": "2624935f0219104918c8b9c26bfa0781", "score": "0.7181408", "text": "def test_404_error_page_working(self):\n\t\tresponse = self.client.get('/234234fasds')\n\t\tself.assertEquals(response.status_code, 404)", "title": "" }, { "docid": "fd5b874a1a8c65b9806a0de3548970ec", "score": "0.71808684", "text": "def handle_404(self):\n self.http_response = HTTPResponseBuilder(404, None, None)", "title": "" }, { "docid": "0206f2994c16834c6f90893174f6dc09", "score": "0.7154922", "text": "def test_get_erratum_failure(self):\n with mock.patch('errata.requests.get') as get:\n # Engage the not-found branch\n response = mock.MagicMock(status_code=404)\n response.json.return_value = test_structures.example_erratum\n get.return_value = response\n e = errata.get_erratum(123456)\n self.assertFalse(e)", "title": "" }, { "docid": "6dc0b1fe02c0f13f341b8d135cd7f845", "score": "0.7124236", "text": "def test_get404(self):\n with self.assertRaises(Exception) as context:\n self.api.get(\"bank_fake\")\n self.assertTrue(\"bank account not found\" in context.exception.__str__())", "title": "" }, { "docid": "68e9ef1526893129cc586b91273d26bc", "score": "0.7121717", "text": "def _can_not_send_response(url, *args, **kwargs):\n # Force a failure\n request = mock.Mock()\n request.status_code = 403\n request.content = '{}'\n return request", "title": "" }, { "docid": "b098cd84a873b0db631a201e9093f761", "score": "0.71021515", "text": "def test_404_handler():\n request = RequestFactory().get('/not/a/real/path/')\n request.user = AnonymousUser()\n request.development = development\n response = handle404(request)\n assert response.status_code == 404", "title": "" }, { "docid": "b4f64b0bf45d8af1ae4ef3dad7de122a", "score": "0.7101007", "text": "def test_404_errors(self):\n result = self.app.get(\"/api/v1/red-flags/////\")\n self.assertEqual(result.status_code, 404) \n self.assertIn('page not found', str(result.data))", "title": "" }, { "docid": "5a2fcd07d5aa80a48d46c702b7ce1cce", "score": "0.7061931", "text": "def test_core_response_failure(dirbs_core_mock, flask_app):\n response = flask_app.get(basic_status_api+'imei=12345678909999&token=12345token&source=web')\n assert response.status_code == 503", "title": "" }, { "docid": "08cf634951304320f5dcd679eb1e063f", "score": "0.70524615", "text": "def test_not_weird_status_code(self):\n self.resp.status_code = 404\n self.meth_str_prefix(self.req)\n self.assert_header_not_set()", "title": "" }, { "docid": "d7afa60da2d3c9e4fdf46bb3116a4318", "score": "0.705134", "text": "def test_assert_404(self):\n self.assert404(self.client.get(\"/oops/\"))", "title": "" }, { "docid": "371fb1f3f4101b219570a5158c49860d", "score": "0.7048711", "text": "def test_get_notFound(self):\n resp = self.client.get('/users/1000')\n\n assert resp.status_code == 404\n assert json.loads(resp.data) == {\n 'info': {},\n 'status': {\n 'statusCode': 404,\n 'statusMsg': 'Not found',\n 'statusDetails': {}\n },\n 'result': {}\n }", "title": "" }, { "docid": "10938474e9e615341943db83ac2b22de", "score": "0.70455563", "text": "def testGet404(self, _):\n name = 'badname'\n\n self.testapp.get('/catalogs/' + name, status=httplib.NOT_FOUND)", "title": "" }, { "docid": "006c9843b06decae220eef8ffe259c2f", "score": "0.70256203", "text": "def test_bad_request(self):\n with self.assertRaises(NotFound):\n x = self.client.get('/')\n y = None", "title": "" }, { "docid": "b501626e30a4504283710f8cb104003d", "score": "0.7023371", "text": "def test_index_404(client):\n response = client.get('/banana') # Doesn't exist\n assert response.status_code == 404", "title": "" }, { "docid": "8051e918304b892e418d687df5ad1b9c", "score": "0.6998629", "text": "def test_404(self, get_model):\n resource = IModelResource()\n request = DummyRequest()\n request.method = 'GET'\n resource.request = request\n get_model.return_value = None\n with self.assertRaises(HTTPNotFound):\n resource['foobar']", "title": "" }, { "docid": "48c57f0940545c482c7a16e3a9520251", "score": "0.69884664", "text": "async def test_http_error404(aresponses: ResponsesMockServer) -> None:\n aresponses.add(\n MATCH_HOST,\n \"/http/404\",\n \"GET\",\n aresponses.Response(text=\"Not Found!\", status=404),\n )\n\n async with ClientSession() as session:\n client = Roku(HOST, session=session)\n with pytest.raises(RokuError):\n assert await client._request(\"http/404\")", "title": "" }, { "docid": "4d1174c1162760f97a5ceff9a3af203d", "score": "0.69790065", "text": "def mock_response_500(monkeypatch):\n\n def mock_get(*args, **kwargs):\n response = requests.models.Response()\n response.status_code = 500\n return response\n\n monkeypatch.setattr(\n requests, \"get\", mock_get\n ) # apply the monkeypatch for requests.get to mock_get", "title": "" }, { "docid": "adefcb5bc629a158ef9d19609cd82f39", "score": "0.6965118", "text": "def test_bad_inbound_returns_404(dummy_request):\n with pytest.raises(HTTPNotFound):\n inbound_api(dummy_request)", "title": "" }, { "docid": "0e9fe2f8ce0520dd17391b897cfc2c72", "score": "0.69566447", "text": "def test_server_answer_error(self, mock_make_request):\n mock_make_request.return_value.status_code = 404\n with self.assertRaises(SystemExit):\n rss_reader.server_answer('4556547112')", "title": "" }, { "docid": "169c12aa19121c5777d75f96e36e2149", "score": "0.69484127", "text": "def test_workspace_from_url_404(mock_request):\n\n # arrange\n url_404 = 'https://raw.githubusercontent.com/OCR-D/assets/master/data/kant_aufklaerung_1784/data/mets.xmlX'\n mock_request.side_effect = Exception('HTTP request failed')\n\n with pytest.raises(Exception) as exc:\n Resolver().workspace_from_url(mets_url=url_404)\n\n # assert\n assert \"HTTP request failed\" in str(exc)\n assert mock_request.call_count == 1", "title": "" }, { "docid": "3d89be7e2494403ef9fd04bc6f4ee25e", "score": "0.6934553", "text": "def mock_response_500(monkeypatch):\n\n def mock_get(*args, **kwargs):\n return httpx.Response(status_code=500)\n\n monkeypatch.setattr(\n httpx, \"get\", mock_get\n ) # apply the monkeypatch for requests.get to mock_get", "title": "" }, { "docid": "e02e85de4b5ce5abce6ae7d30a178545", "score": "0.69239825", "text": "def _assert_response_not_found(self, url):\n response = self.client.get(url)\n\n self.assertResponseNotFound(response)", "title": "" }, { "docid": "96afcad20919760822a6d059298e6a2b", "score": "0.6911752", "text": "def test_return_404(self):\n project_metadata_detail_url = reverse('polaaar:project_metadata_detail', args=[5, ])\n response = self.client.get(project_metadata_detail_url)\n self.assertEqual(response.status_code, 404)", "title": "" }, { "docid": "c937fe8219137e162f64823f4139669c", "score": "0.68957675", "text": "def test_should_raise_exception_when_recipe_not_found(self):\n fake_resp = flexmock(status=404)\n flexmock(requests).should_receive(\"get\").with_args(\"some url\").and_return(fake_resp)\n\n loader = RemoteLoader(\"some url\")\n self.assertRaises(Exception, loader.load)\n\n \"\"\"\"Should raise an exception when the server can't be found\"\"\"", "title": "" }, { "docid": "45ce09342fb3558b391af9a40e22e87d", "score": "0.6891508", "text": "def mock_bad_response(self):\n response = Response()\n content_bad = u'Internal Server Error'.encode('utf-8')\n response.status_code = 400\n response._content = content_bad\n return response", "title": "" }, { "docid": "7eb75f3afb4ba5003dd2792523c860ac", "score": "0.6841213", "text": "def test_404_returns_notfound_template(testapp):\n response = testapp.get('/journal/500', status=404)\n title = response.html.find_all(class_='not_found')[0].getText()\n assert \"404 Page not found\" in title", "title": "" }, { "docid": "88c4145e7d5ef42c6f3e84c734d033d5", "score": "0.6837926", "text": "def _404(request):\n request.response.status = 404\n return {}", "title": "" }, { "docid": "4fc15807609c68b99bcffa22e8557b0a", "score": "0.683589", "text": "def test_not_found(self):\n assert self.router.not_found().route == NotFoundRouter(api=api).route", "title": "" }, { "docid": "574e2b912eb7e25fd39386efd90f8d71", "score": "0.6832258", "text": "def test_throwing_http_connector_404_error_noretry(self):\n self.Error404ServerResource.render_GET = mock.Mock(wraps=self.Error404ServerResource.render_GET)\n\n routedConnector = HttpConnector('dst', 'http://127.0.0.1:%s/send' % self.Error404Server.getHost().port)\n \n self.publishRoutedDeliverSmContent(self.routingKey, self.testDeliverSMPdu, '1', 'src', routedConnector)\n\n # Wait 3 seconds\n exitDeferred = defer.Deferred()\n reactor.callLater(3, exitDeferred.callback, None)\n yield exitDeferred\n \n self.assertEqual(self.Error404ServerResource.render_GET.call_count, 1)", "title": "" }, { "docid": "3bff397dbe75ba522e77bfa3b4e2bf7d", "score": "0.6816768", "text": "def test_nok_response(self):\n a = AuthLogin()\n mock = MagicMock()\n mock.return_value = ERROR_RESPONSE_401, 401\n a.fetch_from_github = mock\n resp, status_code = a.post()\n self.assertEqual(status_code, 401)", "title": "" }, { "docid": "95f771dc60f523bd39ce5db194817952", "score": "0.67972654", "text": "def assert_not_found(self, *args, **kwargs):\n status, _ = self.call_app(*args, **kwargs)\n self.assertEqual(status, 404)", "title": "" }, { "docid": "ab71efaeb0a7cfe656c92d5da86e0cfd", "score": "0.6793786", "text": "def test_details_404(logged_client):\n response = logged_client.get('/transactions/3', follow_redirects=True)\n assert response.status_code == 404", "title": "" }, { "docid": "9f81f110d34898cca6ad14a354ad0777", "score": "0.6785313", "text": "def test_throwing_http_connector_404_error_noretry(self):\n self.Error404ServerResource.render_POST = mock.Mock(wraps=self.Error404ServerResource.render_POST)\n\n dlr_url = 'http://127.0.0.1:%s/dlr' % self.Error404Server.getHost().port\n dlr_level = 1\n msgid = 'anything'\n message_status = 'DELIVRD'\n self.publishDLRContent(message_status, msgid, dlr_url, dlr_level)\n\n # Wait 3 seconds\n exitDeferred = defer.Deferred()\n reactor.callLater(3, exitDeferred.callback, None)\n yield exitDeferred\n \n self.assertEqual(self.Error404ServerResource.render_POST.call_count, 1)", "title": "" }, { "docid": "0134c51d0acfb8ffc6737fc0f5ced30d", "score": "0.67744696", "text": "def mock_requests_response_http_500(url, payload, headers):\n Response = namedtuple(\"Response\", [\"status_code\"])\n return Response(500)", "title": "" }, { "docid": "c856bc9b37329b49f3fd48e1e1b9fb3a", "score": "0.6756816", "text": "def test_sa_get_request_error(self, _get_response):\n _get_response.side_effect = RDBInvalidRequestException('Mock database error')\n response = self.client.get(api_route_for('/stack-analyses/request_id'))\n self.assertEqual(response.status_code, 404)", "title": "" }, { "docid": "5810c76211b5bb981d64bce5331c2e7c", "score": "0.6748045", "text": "def test_failed_server_response(self, mock_post):\n self.assertRaises(requests.exceptions.HTTPError,\n simple_addition_with_misconfigured_decorator, self.first_number, self.second_number)", "title": "" }, { "docid": "8a24962fb6ba180a8709b9c9e0f4988c", "score": "0.6728379", "text": "def dummy_assert():\n raise bottle.HTTPError(body='Not what we expected',\n status=400)", "title": "" }, { "docid": "c10dc3d1fed116945b12b18edc14484b", "score": "0.6701152", "text": "def mock_response(request):\n status = request.node.get_closest_marker(\"status\", 200)\n content = request.node.get_closest_marker(\"content\", 'CONTENT')\n json_data = request.node.get_closest_marker(\"json\")\n raise_for_status = request.node.get_closest_marker(\"raise_for_status\")\n\n mock_resp = Mock()\n # mock raise_for_status call w/optional error\n mock_resp.raise_for_status = Mock()\n if raise_for_status:\n mock_resp.raise_for_status.side_effect = raise_for_status\n # set status code and content\n mock_resp.status_code = status\n mock_resp.content = content.args[0] if content is not None else content\n # add json data if provided\n if json_data:\n mock_resp.json = Mock(\n return_value=json_data\n )\n return mock_resp", "title": "" }, { "docid": "33c3634905bda583629246f215654359", "score": "0.6699321", "text": "def test_page_not_found(self):\n rv = self.app.get('/a-page-which-doesnt-exist')\n self.assertEqual(rv.status_code, 404)", "title": "" }, { "docid": "37e853003723ec28a2aa8a505052a1db", "score": "0.6695575", "text": "def error_404_response(url):\n responses.add(\n responses.POST,\n url=url,\n json={'any_key': 'any_value'},\n status=404\n )\n responses.add(\n responses.DELETE,\n url=url,\n json={'any_key': 'any_value'},\n status=404\n )", "title": "" }, { "docid": "97906468b8b14239a106cbd3f5ee1ed1", "score": "0.6694394", "text": "def test_error_exception(self, accept_json):\n response = self.client.get(api_route_for('/_error'), headers=accept_json)\n assert response.status_code == 404", "title": "" }, { "docid": "3f77ed3ed656ffa09a4daf8ff2a5ef74", "score": "0.6692655", "text": "def test_indexed_file_index_document_request_not_found(app):\n\n class MockResponse:\n \"\"\"\n Mock response for requests lib\n \"\"\"\n\n def __init__(self, data, status_code=404):\n \"\"\"\n Setup mock response 404\n \"\"\"\n self.data = data\n self.status_code = status_code\n\n def json(self):\n \"\"\"\n Mock json() call\n \"\"\"\n return self.data\n\n def text(self):\n \"\"\"\n Mock text() call\n \"\"\"\n return \"Not Found\"\n\n with patch(\"fence.blueprints.data.indexd.flask.current_app\", return_value=app):\n with patch(\n \"fence.blueprints.data.indexd.requests.get\",\n return_value=MockResponse(data=None),\n ):\n indexed_file = IndexedFile(file_id=\"some id\")\n with pytest.raises(NotFound):\n indexed_file.index_document", "title": "" }, { "docid": "30ea1237a936e79d6b46481384443c49", "score": "0.66852516", "text": "def test_nonexistent_get(self, testapp):\n res = testapp.get(self.base_url.format(id=uuid4().hex), status=404)\n assert res.status_code == 404", "title": "" }, { "docid": "7220ac29d22292705890e08ebf574e3b", "score": "0.6667463", "text": "def handle_404(request, response, exception):\n response.write('Not found.')\n response.set_status(404)", "title": "" }, { "docid": "8eb8c341373a3f213351b588a763fe1a", "score": "0.665665", "text": "def test_client_get_not_found(self):\n riot_answer = {\"status\":\n {\"message\": \"Data not found - summoner not found\", \"status_code\": 404}}\n httpretty.register_uri(httpretty.GET, re.compile(SummonerAPITestCase.RIOT_URL),\n body=json.dumps(riot_answer), status=404)\n\n test_client = Client()\n response = test_client.get(\n '/karma/get_summoner', {'name': '123false', 'server': 'euw1'})\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(json.loads(response.content), riot_answer)", "title": "" }, { "docid": "b1e5bb7cc94cbf732d11da7bb176421f", "score": "0.6651185", "text": "def mock_response_token_401(monkeypatch):\n response_200 = requests.models.Response()\n response_200.status_code = 200\n\n response_401 = requests.models.Response()\n response_401.status_code = 401\n\n def mock_get(*args, **kwargs):\n if kwargs[\"url\"] == \"fake_url/api/me\":\n return response_401\n elif kwargs[\"url\"] == \"fake_url/api/docs/spec.json\":\n return response_200\n\n monkeypatch.setattr(\n requests, \"get\", mock_get\n ) # apply the monkeypatch for requests.get to mock_get", "title": "" }, { "docid": "b5a618c5bde596b6718dc2951f402f55", "score": "0.66494375", "text": "def not_found(environ, start_response):\n# print >> sys.stderr, 'Not Found error'\n start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])\n return ['Not Found']", "title": "" }, { "docid": "72b7f07016159eed8aadedd6c0ba45a2", "score": "0.664854", "text": "def test_get_fail(self, m_mock):\n m_mock.side_effect = Exception('Boom')\n\n response = self.client.get('/database/availability/')\n\n self.assertEqual(200, response.status_code)", "title": "" }, { "docid": "56c8b5f813c366a9d47f5f51c7d46d18", "score": "0.6643576", "text": "def test_404(app, client):\n with app.app_context():\n response = client.get('/unknown_page')\n assert response.status_code == 404\n assert 'Page Not Found' in str(response.data)", "title": "" }, { "docid": "2b7951ec9d1ca52e3d9092e70e86ba14", "score": "0.66373044", "text": "def test_get_erratum_unauthorized(self):\n with mock.patch('errata.requests.get') as get:\n # Create the requests.response object. The status code\n # here will change the path of execution to the\n # unauthorized branch of code\n response = mock.MagicMock(status_code=401)\n get.return_value = response\n with self.assertRaises(exceptions.ErrataToolUnauthenticatedException):\n errata.get_erratum(123456)", "title": "" }, { "docid": "ff6c9989163187f2d05d0968fb848ba8", "score": "0.6632007", "text": "def test_not_found(self):\n invalid_user = \"jim_the_duck_guy\"\n actual_reply = requests.get(\"{}/{}\".format(self.url, invalid_user))\n self.assertEqual(actual_reply.status_code, 404,\n \"Got {} but expected 404\".format(\n actual_reply.status_code))", "title": "" }, { "docid": "8a7d93faea3f20df04301fcb94c2339d", "score": "0.6624998", "text": "def test_get_non_existing(self):\n response = self.app.get('/api/v3/meals/10', headers=self.admin_header)\n self.assertEqual(response.status_code, 404)", "title": "" }, { "docid": "b9cf4bc23d9c0c2966ca2e73e723f300", "score": "0.6619398", "text": "def test_nonexist(self):\n res = self.app.get(\"/event/3\")\n self.assertEqual(res.status_code, 404)", "title": "" }, { "docid": "40ce695d1ef56d45421f23906db00985", "score": "0.6605671", "text": "def test_indexed_file_index_document_request_has_json_no_urls(app):\n\n class MockResponse:\n \"\"\"\n Mock response for requests lib\n \"\"\"\n\n def __init__(self, data, status_code=200):\n \"\"\"\n Setup mock response\n \"\"\"\n self.data = data\n self.status_code = status_code\n\n def json(self):\n \"\"\"\n Mock json() call\n \"\"\"\n return self.data\n\n with patch(\"fence.blueprints.data.indexd.flask.current_app\", return_value=app):\n with patch(\n \"fence.blueprints.data.indexd.requests.get\",\n return_value=MockResponse({\"not_urls\": [\"some url\"]}),\n ):\n indexed_file = IndexedFile(file_id=\"some id\")\n with pytest.raises(InternalError):\n indexed_file.index_document", "title": "" }, { "docid": "78a7d74aef41922208230c75ffd905f9", "score": "0.6602796", "text": "def test_not_found(self, client, fake_auth):\n fake_auth.login(admin_uid)\n self._api_users_report(client, 'foo', expected_status_code=404)", "title": "" }, { "docid": "15d90c23f3eebc9a6c207a9ef3b5aea5", "score": "0.6595912", "text": "def test_404_sent_requesting_beyond_vaid_page(self):\n\n res = self.client().get('/questions?page=9000')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], \"Not found\")", "title": "" }, { "docid": "d8cc5e554a3d61252d7d7a4ba0237b06", "score": "0.6594278", "text": "def not_found(environ, start_response):\n start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])\n return ['Not Found']", "title": "" }, { "docid": "299ceaaa313c289ba2480c77c086240f", "score": "0.658541", "text": "def test_get_request_invalid_url(self):\n response = self.client.get(api_route_for('/stack-analyses/request_id/sdf/dsfds'))\n self.assertEqual(response.status_code, 404)", "title": "" }, { "docid": "3983b41b8fde56414b0b7503a188c0fb", "score": "0.65843815", "text": "def test_exceptions_to_giveup(self):\n err = Mock(\n response={'Error': {'Code': 'ResourceNotFoundException'}}\n )\n\n result = ThreatIntel._exceptions_to_giveup(err)\n assert_equal(result, True)", "title": "" }, { "docid": "11d59355ac4dae0c1acfd564f9e050f4", "score": "0.6581873", "text": "def test_indexed_file_index_document_request_service_not_available(app):\n\n class MockResponse:\n \"\"\"\n Mock response for requests lib\n \"\"\"\n\n def __init__(self, data, status_code=503):\n \"\"\"\n Setup mock response 503\n \"\"\"\n self.data = data\n self.status_code = status_code\n\n def json(self):\n \"\"\"\n Mock json() call\n \"\"\"\n return self.data\n\n def text(self):\n \"\"\"\n Mock text() call\n \"\"\"\n return \"Not Found\"\n\n with patch(\"fence.blueprints.data.indexd.flask.current_app\", return_value=app):\n with patch(\n \"fence.blueprints.data.indexd.requests.get\",\n return_value=MockResponse(data=None),\n ):\n indexed_file = IndexedFile(file_id=\"some id\")\n with pytest.raises(UnavailableError):\n indexed_file.index_document", "title": "" }, { "docid": "ae9904151f02bec712e94886ca7c3870", "score": "0.65773314", "text": "def error_404(self, exc):\n raise exc", "title": "" }, { "docid": "febe8bfbf7f6fdf07d3c58025c57e6bf", "score": "0.65750533", "text": "def mock_response_token_401(monkeypatch):\n response_200 = httpx.Response(status_code=200)\n response_401 = httpx.Response(status_code=401)\n\n def mock_get(*args, **kwargs):\n if kwargs[\"url\"] == \"fake_url/api/me\":\n return response_401\n elif kwargs[\"url\"] == \"fake_url/api/docs/spec.json\":\n return response_200\n\n monkeypatch.setattr(\n httpx, \"get\", mock_get\n ) # apply the monkeypatch for requests.get to mock_get", "title": "" }, { "docid": "02a70b36dbedf6ab63fff1294020928e", "score": "0.6572969", "text": "def mock_response_200(monkeypatch):\n\n def mock_get(*args, **kwargs):\n response = requests.models.Response()\n response.status_code = 200\n return response\n\n monkeypatch.setattr(\n requests, \"get\", mock_get\n ) # apply the monkeypatch for requests.get to mock_get", "title": "" }, { "docid": "f7a63c2674bd5f80d4cc74b547e6fd4f", "score": "0.65702283", "text": "def test_get_filtered_list_fail(self):\n with mock.patch('errata.requests.get') as get:\n response = mock.MagicMock(status_code=404)\n response.json.return_value = test_structures.example_erratum_filtered_list\n get.return_value = response\n with self.assertRaises(exceptions.ErrataToolError):\n errata.get_filtered_list()", "title": "" }, { "docid": "cbeee789b036bd853da9341d332206eb", "score": "0.6566745", "text": "def test_get_not_found(self, app):\n obj_id = uuid.uuid4()\n request = app.get('{base}/{id}'.format(base=self.base_path, id=obj_id),\n headers=self.headers, status=404)\n result = request.json\n\n assert result['status'] == 'error'\n assert self.NOT_FOUND_MESSAGE.format(obj_id) in result['message']", "title": "" }, { "docid": "cd31abffcb2a56c52b617d7cf2ca62fd", "score": "0.6565082", "text": "def not_found(environ, start_response):\r\n req = Request(environ)\r\n if req.method == 'POST':\r\n resp = Response(\r\n status=500,\r\n content_type='application/json',\r\n body=dumps(dict(\r\n result=None,\r\n error='Path Not Found',\r\n id=1)))\r\n else:\r\n resp = Response(\r\n status=500,\r\n content_type='text/plain',\r\n body=\"Path Not Found\")\r\n return resp(environ, start_response) #\r", "title": "" }, { "docid": "d39a5f32ed8bcfb8c180d4a16642206d", "score": "0.65645397", "text": "def not_found(data=None):\n return json_response(404, data)", "title": "" }, { "docid": "d49a42ef27823674f847af2db7808633", "score": "0.6552911", "text": "def page_not_found(error):\n return Response('404 Error: Not an API call.', mimetype='text/plain')", "title": "" }, { "docid": "af2bee1448418abfc8a4246fbc517239", "score": "0.654979", "text": "def test_not_found(self, client, fake_auth):\n fake_auth.login(admin_uid)\n self._api_notes_report(client, 'foo', expected_status_code=404)", "title": "" }, { "docid": "398e2fefb7638dfda3322a25a38e7445", "score": "0.65493184", "text": "def test_get_html_non_existant_resource(self):\r\n response = self.get_response('/artists/99999',\r\n 404,\r\n headers={'Accept': 'text/html'})\r\n assert self.is_html_response(response)", "title": "" }, { "docid": "24c8266383ab6d4eab5a8ca28b18e305", "score": "0.654782", "text": "def test_indexed_file_index_document_request_has_json_exception(\n app, supported_protocol\n):\n\n class MockResponse:\n \"\"\"\n Mock response for requests lib\n \"\"\"\n\n def __init__(self, data, status_code=200):\n \"\"\"\n Setup mock response\n \"\"\"\n self.data = data\n self.status_code = status_code\n\n def json(self):\n \"\"\"\n Mock json() call with ValueError\n \"\"\"\n raise ValueError(\"unable to get json\")\n\n with patch(\"fence.blueprints.data.indexd.flask.current_app\", return_value=app):\n with patch(\n \"fence.blueprints.data.indexd.requests.get\",\n return_value=MockResponse({\"urls\": [f\"{supported_protocol}://some/url\"]}),\n ):\n indexed_file = IndexedFile(file_id=\"some id\")\n with pytest.raises(InternalError):\n indexed_file.index_document", "title": "" }, { "docid": "83fb1ca6dc11e18499b8f814f56ecaba", "score": "0.6543966", "text": "def test_bad_about_returns_404(dummy_request):\n dummy_request.method = 'POST'\n with pytest.raises(HTTPNotFound):\n about_view(dummy_request)", "title": "" }, { "docid": "084ab9aeb35e5b79cd496de6d60a1ac0", "score": "0.65402395", "text": "def respond_not_found(self):\n self.respond('404 Not Found')\n return 'Not Found'", "title": "" }, { "docid": "beb19974b25d81ca3f36dc2ce8689288", "score": "0.6539248", "text": "def test_404_on_non_existant_event(self):\n response = self.client.get('/events/0')\n resp = json.loads(response.data.decode('utf-8'))\n # Check that we get the correct response\n testutil.assertEqual(self, {\n response.status_code: 404,\n resp['status']: 404,\n resp['message']: 'Event Not Found'\n })", "title": "" }, { "docid": "4aa68843f637e1f0ddad1f5ef3adeaed", "score": "0.6536449", "text": "def test_v1_404(self, fake_logger):\n resp = self.app.get('/api/1/inf/vlan',\n headers={'X-Auth': self.token})\n\n status = resp.status_code\n expected = 404\n\n self.assertEqual(status, expected)", "title": "" }, { "docid": "f70c680ded50c6f9b6c2fbc2b077b3a6", "score": "0.6535862", "text": "def not_found(self,environ, start_response):\n start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])\n return ['Not Found']", "title": "" }, { "docid": "15f824318cad0a8a3d76f9bed85392d8", "score": "0.65313005", "text": "def test_verify404(self):\n bank_verify = BankAccountVerify(\n amounts = [11, 35]\n )\n with self.assertRaises(Exception) as context:\n self.api.verify(\"bank_fakeId\", bank_verify)\n self.assertTrue(\"bank account not found\" in context.exception.__str__())", "title": "" }, { "docid": "c3d2662d99a5c6af2407842c85d2df43", "score": "0.65289015", "text": "def test_404_if_page_invalid(self):\n res = self.client().get('/questions\\?page\\=1000')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertFalse(data['success'])\n self.assertTrue(data['message'])", "title": "" }, { "docid": "c1667c2462abb9d81d328a8a63f3135b", "score": "0.6521607", "text": "def send404Response(self):\n self.sendTemplateResponse(\"404.html\", {})", "title": "" }, { "docid": "8c4b8a606e1452c9a9c74f3026350b77", "score": "0.65211797", "text": "def test_vmuem_device_get_command_when_no_records_found_html_response(requests_mock):\n from VMwareWorkspaceONEUEM import vmwuem_device_get_command\n with open(\"test_data/get_device_404_no_records_found_message.html\") as file:\n response = file.read()\n\n requests_mock.get(BASE_URL + 'devices/{uuid}'.format(uuid=\"1234\"),\n text=response, status_code=404)\n with pytest.raises(DemistoException) as e:\n vmwuem_device_get_command(client, {\"uuid\": \"1234\"})\n\n assert e.value.args[0] == HTTP_ERROR[404]", "title": "" }, { "docid": "e7c381057c40c3bee3bde3054ef81d7c", "score": "0.6520004", "text": "def test_get_weather_of_invalid_city_raise_404(self):\n\n result = self.app.simulate_get('/weathers/random')\n self.assertEqual(result.status_code, 404)", "title": "" }, { "docid": "f157f0477e6264994aa59d4eb31f9525", "score": "0.65194434", "text": "def mock_response_200(monkeypatch):\n\n def mock_get(url, *args, **kwargs):\n if \"/api/me\" in url:\n return httpx.Response(status_code=200, json={\"username\": \"booohh\"})\n return httpx.Response(status_code=200)\n\n monkeypatch.setattr(\n httpx, \"get\", mock_get\n ) # apply the monkeypatch for requests.get to mock_get", "title": "" } ]
93ce6917d53ddbc7e14b937630567268
shut down continuous recording.
[ { "docid": "ba300671e375e518c21b746a7550fa37", "score": "0.0", "text": "def continuousEnd(self):\n print \"Die!\"\n self.threadsDieNow = True", "title": "" } ]
[ { "docid": "1781197fec3dc9f16a56bb3ff133e825", "score": "0.7465613", "text": "def recording_stop(self):\n self.recording = False\n self.threads['recording'] = None", "title": "" }, { "docid": "f5575fc4a17a3afdc8b91b3c6c9272fb", "score": "0.7301088", "text": "def stop_capture(self):\n\n self.kill_event.set()", "title": "" }, { "docid": "1f4eaa573a8996786ee3e9726e4ff35b", "score": "0.7283388", "text": "def stop_recording(self):\n if self._rec:\n self._rec = False\n if self._stop_timer_thread is not None:\n self._stop_timer_thread.cancel()\n self._stop_timer_thread = None\n self._input_stream.stop()\n if self.max_memory >= 0:\n self._memory_thread.join()\n self._manage_record(True)\n self._prev_ab = None\n self._rectime = 0.0", "title": "" }, { "docid": "d7fb25bdefe0c1976e7d89a119d4d85a", "score": "0.72220755", "text": "def stop_recording(self):\n self.__recording = False\n if self.__record_data:\n del self.__record_data[-1]", "title": "" }, { "docid": "d02b44a24547210aead5fd74b901768d", "score": "0.7211104", "text": "def stopRecording(self):\r\n if self.mode == 'Real':\r\n # pl.pylink.endRealTimeMode()\r\n self.pylink.sendCommand(\"record_status_message=NoTitle\")\r\n self.pylink.clearScreen(0)\r\n self.pylink.stopRecording()", "title": "" }, { "docid": "e80933f262981efb23377be953fbb9d8", "score": "0.71914256", "text": "def stop(self):\n self.alive = False\n self.can_track = False\n self.join(4)", "title": "" }, { "docid": "c6e4e9d87241a1085788dddf36a549ba", "score": "0.71139574", "text": "def stop_recording():\n def stop():\n server.shutdown()\n server.server_close()\n stop_event.set()\n threading.Thread(target=stop, daemon=True).start()", "title": "" }, { "docid": "3ec77a8ad43e2688bce4fd2c29762910", "score": "0.7083102", "text": "def stop_capture(self):\n self.video_playing = False", "title": "" }, { "docid": "90c613acb7f800c8ee42ccc04bf80cce", "score": "0.7063252", "text": "def _stop_recording(self):\n self.set_option(\"_log\", False)", "title": "" }, { "docid": "5da76cedad9eb3682afad0d1c0966d34", "score": "0.70572186", "text": "def stop_capture(self):\n self.send_command('p_capture', 'on')", "title": "" }, { "docid": "1d04b4c97f3662a80f6150e6a3e2684e", "score": "0.690807", "text": "def ShutDown(self):\n self.stop = True", "title": "" }, { "docid": "41130e5c96cdce4a509a41442ff4d2c0", "score": "0.6874045", "text": "def stop(self):\n self.cap.stop()\n cv2.destroyAllWindows()", "title": "" }, { "docid": "e4e65b50bd8c100cbef888dc2c711995", "score": "0.6872908", "text": "def stop_record(self):\n if self.state != 'stop':\n self.status_bar.showMessage(\"Stop Recording\")\n self.worker.pause_thread()\n self.pause_but.setChecked(False)\n self.start_but.setChecked(False)\n self.stop_but.setChecked(True)\n self.redo_but.setChecked(False)\n self.timer.stop()\n self.state = 'stop'\n self.stop = time.time() - drv_ftdi.T_START\n else:\n self.stop_but.setChecked(True)", "title": "" }, { "docid": "4df4a41b383f927ebc088f834d8b9d4b", "score": "0.68382823", "text": "def stopAudioRecording(self):\n return _agorartc.RtcEngineBridge_stopAudioRecording(self)", "title": "" }, { "docid": "d28c02af10b3919cd35bd5502c2785e3", "score": "0.67881495", "text": "def stop(self):\n if self.is_continuous is None:\n # we don't have any waveforms, so skip\n return\n debug('bbb.Device(%s).stop()', self)\n self.guard_proxy.stop()\n self.is_continuous = None", "title": "" }, { "docid": "38765b5ba043a9f11fccd7211af67c8b", "score": "0.678777", "text": "def stopRecording(self):\n if self._recording:\n self._recording = False\n for outlet in self.outlets:\n if IKNRecorder.providedBy(outlet):\n outlet.stopRecording()\n self.episodes[-1].stop()", "title": "" }, { "docid": "df8711e2e2ae685e01ac9357a929c42a", "score": "0.67861503", "text": "def stop(self):\n self.running=False", "title": "" }, { "docid": "95d5e3f030306b38033904ba433b6976", "score": "0.67841864", "text": "def stop(self):\n self._device.close()", "title": "" }, { "docid": "0b508507e2f79fd241724001e2ef13ab", "score": "0.67837995", "text": "def stop(self):\n self.running = False", "title": "" }, { "docid": "0b508507e2f79fd241724001e2ef13ab", "score": "0.67837995", "text": "def stop(self):\n self.running = False", "title": "" }, { "docid": "0b508507e2f79fd241724001e2ef13ab", "score": "0.67837995", "text": "def stop(self):\n self.running = False", "title": "" }, { "docid": "0b508507e2f79fd241724001e2ef13ab", "score": "0.67837995", "text": "def stop(self):\n self.running = False", "title": "" }, { "docid": "0b508507e2f79fd241724001e2ef13ab", "score": "0.67837995", "text": "def stop(self):\n self.running = False", "title": "" }, { "docid": "215632333172609eb2f936e00d5189fa", "score": "0.67811215", "text": "def stop(self):\n self.capturing_images = False\n self.acquisition_started = False\n if self.handle.value is not None:\n self.command(\"AcquisitionStop\")\n self.PvAPI.PvCaptureEnd(self.handle)\n self.PvAPI.PvCaptureQueueClear(self.handle)\n self.PvAPI.PvCameraClose(self.handle)\n self.handle.value = None", "title": "" }, { "docid": "8d68b678e5ed9c8b5b8459fe62abe22b", "score": "0.67623204", "text": "def stop(self):\n self.running = False\n self.resume()", "title": "" }, { "docid": "01a857fdcc2792a3212c6f32e68588b5", "score": "0.67500085", "text": "def stopRecorder(self):\n print('###----------------stopping recorder------------------------###')\n newItem = {\"initialSlice\":self.initialSlice,\"finalSlice\":True,\"recordingId\":self.recordingId,\n \"data\":self.sliceQueue,\"sensorMetaData\":self.sensorMetaData,\"sliceAddress\":self.relativeSliceAddress,\n \"sliceTimestampAddress\":self.timestampOfSlice,\"nextSliceAddress\":-1}\n self._internalDB.pushRecordingData(newItem)\n self.timestampOfSlice = self.timestampOfSlice + 1\n self.sliceEndTimestamp = self.timestampOfSlice + TEN_MINUTES\n del self.sliceQueue\n self.sliceQueue = []\n #set recording to inactive\n self._internalDB.setRecordingToInActive(self.recordingId)", "title": "" }, { "docid": "ef4a8b628bd35c5ae0ddfc54e1056f30", "score": "0.6728963", "text": "def stop(self):\n self.active = False", "title": "" }, { "docid": "e22c46df6773649efee23e49419d8d55", "score": "0.67166525", "text": "def shut_down(self):\n self.select.off()\n utime.sleep_ms(1)\n self.select.on()\n utime.sleep_ms(1)\n self.reset.on()\n utime.sleep_ms(60)\n self.read_pos()\n utime.sleep_ms(1)\n self.select.off()\n utime.sleep_ms(1)\n\n self.SPI.deinit()", "title": "" }, { "docid": "66f44af591e57a781aef375c6a15a729", "score": "0.67128444", "text": "def stop(self):\n self._running = False", "title": "" }, { "docid": "f4b296e2464bae2492f3ecca9f43a0bd", "score": "0.669485", "text": "def stop(self):\n self.open = False\n # GPIO.output(12, False)\n # GPIO.output(12, False)\n self.ser.close()", "title": "" }, { "docid": "07b86d629d05002f41fede4f03932159", "score": "0.6688895", "text": "def stop(self):\n self.input_drv.stop()\n self.stopped = True", "title": "" }, { "docid": "ba8c4b76929dd5f7bfd1b187efc0a698", "score": "0.66860694", "text": "def stop(self):\n self.is_running = False", "title": "" }, { "docid": "d3450c635c705ec994871ca1ccb5aa23", "score": "0.66853684", "text": "def stop(self):\n self._running = False\n self._controller.holdCurrentPosition()\n self._conn.disconnect()", "title": "" }, { "docid": "bd114ab2a96bc036a9e0c48ee7f50813", "score": "0.6684002", "text": "def stop(self):\n self.device.media_controller.stop()\n self.status.clear()", "title": "" }, { "docid": "993175a82ba38563bf404a7e8fd27a20", "score": "0.66766953", "text": "def stop(self):\n self._running = False\n self._controller.setSpeed(0)\n self._conn.disconnect()", "title": "" }, { "docid": "412fbf1ed55e12334d3c166206bce01d", "score": "0.66756123", "text": "def stop(self):\n self._stopped = True\n self.is_processing_frames = False", "title": "" }, { "docid": "af329376608e3f46b94d002d86e8eb88", "score": "0.66752017", "text": "def stop(self):\n self.ctrl_obj.finish = True", "title": "" }, { "docid": "fae4c7256dee1c8e35b272eaefa428d5", "score": "0.6656928", "text": "def quit(self):\n self.device.media_controller.stop()\n self.device.quit_app()\n self.status.clear()", "title": "" }, { "docid": "efacf861f4b8ba6119ae4e0e67b39306", "score": "0.6654802", "text": "def stop(self):\n self.alive = False\n self.events.put(None)\n #self.responses.put('<exit>')", "title": "" }, { "docid": "e467e96718949524e5701bde106d4f52", "score": "0.66534317", "text": "def stopTrial(self):\r\n self.sendMsg('stop_trial')\r\n time.sleep(10 / 1000.0)\r\n self.stopRecording()", "title": "" }, { "docid": "0e8f2a77b956254c674b50864a6aae09", "score": "0.6648398", "text": "def stop(self):\n # When everything done, release the capture\n if self.cap:\n self.cap.release()\n cv.destroyWindow(self.WINDOWNAME)", "title": "" }, { "docid": "acc1e539d90462c5340c39874d5ff87e", "score": "0.6645024", "text": "def post_loop(self):\n self._camera.stop_recording()\n self._connection.close()\n self._connection = None\n self._server.close()\n self._server = None", "title": "" }, { "docid": "385bcbe2b83d78976bad075b3f5c3c90", "score": "0.66450053", "text": "def stop(self):\n self.servo.stop()", "title": "" }, { "docid": "5705f4b110994d48cdc80e11672035ac", "score": "0.66444844", "text": "def shut_down(self):\n pass", "title": "" }, { "docid": "81ee52dad7ddabe59f920e96cd9a833b", "score": "0.6643764", "text": "def stop():", "title": "" }, { "docid": "81ee52dad7ddabe59f920e96cd9a833b", "score": "0.6643764", "text": "def stop():", "title": "" }, { "docid": "3af149f0019d5a97e9b44543e4847c14", "score": "0.6638287", "text": "def stop(self):\n self._pwm_handle.stop()", "title": "" }, { "docid": "cd9aff4120ede01db83cb931dce9e13c", "score": "0.6610988", "text": "def stop(self):\n self.control_value = 0\n self.control_event.set()\n self.join()", "title": "" }, { "docid": "82d4b648ea11fd6d71ebbcbf30346078", "score": "0.6610176", "text": "def shutdown():\n # command executed after Ctrl+C is pressed\n rospy.loginfo(\"Stop ASRControl\")\n rospy.sleep(1)", "title": "" }, { "docid": "1a9fbdd97d2759e9fa709723d3929f50", "score": "0.6608558", "text": "def stop(self):\n self._motor_pwm.changeDutyCycle(0)\n # self.motor_pwm.stop() # stops the PWM output, would need to start back up again", "title": "" }, { "docid": "25c3279c59c5d9fe1665c8f8add3d76e", "score": "0.6602557", "text": "def stop(self):\n self.comm.close_all_serial() # Close all serial connections opened with SNAPconnect\n log_file.write(\"\\nTest Case quick done\\n\\n\\n\")\n log_file.close()\n sys.exit(0) # Exit the program", "title": "" }, { "docid": "6cb9dc06e392950a0e1a902d18f47bdd", "score": "0.6602516", "text": "def stop(self):\n self.isRunning = False", "title": "" }, { "docid": "420165e6f2d31d90c9d7140e57de06c4", "score": "0.6600614", "text": "def capture_stop(self):\n if not self.capturing and not self.error:\n return\n\n yield self._fbf_monitor.unsubscribe_from_beams()\n self._state_sensor.set_value(self.STOPPING)\n self.target_stop()\n\n # talk to servers and do something clever\n\n # deallocate servers\n\n self._state_sensor.set_value(self.READY)", "title": "" }, { "docid": "d21b9fffc6e06294323c60164d7fb142", "score": "0.6586137", "text": "def kill(self):\r\n cv2.VideoCapture(self.id).release()", "title": "" }, { "docid": "d0d5f4b326805052627b54c1af8e717e", "score": "0.6583031", "text": "def shutdown(self):\n self.stop = True", "title": "" }, { "docid": "d7c2535849643fbf973529c7101d3ef5", "score": "0.6581554", "text": "def shutdown(self):\n self._observer.stop()", "title": "" }, { "docid": "91c6b28d64be9400bf5f193137f5f0af", "score": "0.65798545", "text": "def clean_stop(self):\n self.running=False", "title": "" }, { "docid": "489f4071dc7718308029f4f1d608216b", "score": "0.65793", "text": "def on_exit(self):\n self.pyaudio.terminate()", "title": "" }, { "docid": "98a42bf631283809a7660ad035544c97", "score": "0.65761703", "text": "def stop(self):\n self.running = False\n self.hasStopped = True", "title": "" }, { "docid": "5ba5c873202094c1fc23d941ee744140", "score": "0.65666133", "text": "def stop_recording(self):\n\n if self.recording:\n self._send_socket('stop_recording')\n else:\n raise ValueError(\n \"You must call swift.start_recording(file_name) before trying\"\n \" to stop the recording\")", "title": "" }, { "docid": "fb1dd6afd942e1286e7189ea859cd9bd", "score": "0.65648264", "text": "def stop(self):\n _BaseVoiceKey.stop(self)\n self._event_queue.stop_flag = True", "title": "" }, { "docid": "cdb44d57de6903f5c206db16dc97e20e", "score": "0.6547091", "text": "def stop(self):\n self.running = False\n self.join(2)", "title": "" }, { "docid": "e81d39152b4c45d5971311988bc11b03", "score": "0.6545784", "text": "def stop(self):", "title": "" }, { "docid": "e81d39152b4c45d5971311988bc11b03", "score": "0.6545784", "text": "def stop(self):", "title": "" }, { "docid": "e81d39152b4c45d5971311988bc11b03", "score": "0.6545784", "text": "def stop(self):", "title": "" }, { "docid": "d76dd5079619b64f4362d186594af97c", "score": "0.6538213", "text": "def stop(self):\n self.write_register(15, 0)", "title": "" }, { "docid": "57fe607b0fd064bae2bc707c62e91fac", "score": "0.65288705", "text": "def shut_down(self):\n print \"Shutting Down...\"\n self.stop()\n self.__disconnect()\n print \"Shutdown Complete\"", "title": "" }, { "docid": "c7e0e90fe95837b8f7d743617111da64", "score": "0.65286595", "text": "def stopReplay(self):\n LOG_WARNING(\"stopReplay\", \"FRAME\")\n self.running = False\n if self.nctrsFramesFile != None:\n self.nctrsFramesFile.close()\n self.nctrsFramesFile = None\n self.frameRateMs = None\n UTIL.TASK.s_processingTask.notifyGUItask(\"UPDATE_REPLAY\")", "title": "" }, { "docid": "ffcb9cfa7c11855779cc9e4585c2765f", "score": "0.6517656", "text": "def shutdown():\n # command executed after Ctrl+C is pressed\n rospy.loginfo(\"Stop ASRControl\")\n rospy.sleep(1)", "title": "" }, { "docid": "eaefc62f60a58a3497322d15c19269a4", "score": "0.6513693", "text": "def stop(self):\n self.current.stop()\n self.__log_current__()\n self.current = None", "title": "" }, { "docid": "b67368e2b3d8ca541bf77ccf0c976537", "score": "0.6509204", "text": "def _stop(self):\n print('Shutting down Igor...')\n duration = self._stop_time - self._start_time\n tics = float(self._num_spins)\n avg_frequency = tics / duration\n #print('Ran for: {0} seconds.'.format(duration))\n #print('Average processing frequency: {0} Hz'.format(avg_frequency))\n\n # Set the LED color strategy back to the default\n self._group_command.clear()\n self._group_command.led.color = 'transparent'\n self._group.send_command_with_acknowledgement(self._group_command)\n\n self._on_stop()", "title": "" }, { "docid": "67c0ee2ae22b719ffe252425270539bd", "score": "0.6506412", "text": "def stop(self):\n self.killed = True", "title": "" }, { "docid": "f2e3ed18dc0397e6a06f176b1eabd55d", "score": "0.6506238", "text": "def stop_logcat(self):\n self.logcatctl.stop_logcat()\n self.logcatctl.save_capture(self.capture_file, self.timed_file, self.traces_file)\n self.logcatctl = None", "title": "" }, { "docid": "b5759b0fd66923b6a2d66f465ba050bf", "score": "0.64901555", "text": "def stop(self):\n ...", "title": "" }, { "docid": "b5759b0fd66923b6a2d66f465ba050bf", "score": "0.64901555", "text": "def stop(self):\n ...", "title": "" }, { "docid": "19f2dfd0187fb0ede0c904bec346c922", "score": "0.64839786", "text": "def stop(self):\n self._stop_reader()\n self.serial.close()", "title": "" }, { "docid": "707b16f1edb4ad1252e9df292bd0c971", "score": "0.6472282", "text": "def camera_stop(self):\n self._bus.camera_stop()", "title": "" }, { "docid": "1b13699ae5a65a25b46368f4e4bad6b3", "score": "0.6465616", "text": "def adapter_stop(self):\n try:\n self.stop_target_poller()\n self.stop_gdb()\n if self.state.openocd_need_run:\n self.stop_oocd()\n self.stop_writer_thread()\n self.stop_reader_thread()\n self.__socket_stuff['srv'].close()\n except Exception as e:\n log.debug_exception(e)\n self.state.error = True\n self.state.running = False\n self.log_cmd(A2VSC_STOPPED_STRING)", "title": "" }, { "docid": "357eb30068309741fda0dccdc14458cd", "score": "0.64577174", "text": "def stop(self):\n self.streams.stop()", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6456787", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6456787", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6456787", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6456787", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6456787", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6456787", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6456787", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6456787", "text": "def stop(self):\n pass", "title": "" }, { "docid": "ad08463af6262f7551e852d7278a8379", "score": "0.6456787", "text": "def stop(self):\n pass", "title": "" }, { "docid": "3d37f8a405dd03b12733342b1718fa58", "score": "0.6451524", "text": "def stop(self):\r\n\r\n print(\"Stopping\")\r\n self._observer.stop()\r\n self._observer.join()\r\n print(\"Stopped\")", "title": "" }, { "docid": "90ba2a1c96183dddff10e796e0cbe633", "score": "0.6445778", "text": "def stop(self):\n pass\n # TODO add log", "title": "" }, { "docid": "82f26e753964ede5105dc15c4c582126", "score": "0.6439423", "text": "def stop(self):\r\n pass", "title": "" }, { "docid": "82f26e753964ede5105dc15c4c582126", "score": "0.6439423", "text": "def stop(self):\r\n pass", "title": "" }, { "docid": "82f26e753964ede5105dc15c4c582126", "score": "0.6439423", "text": "def stop(self):\r\n pass", "title": "" }, { "docid": "2e944d771a5196f5d984730288e3b653", "score": "0.64359045", "text": "def quit(self):\n self.running = False", "title": "" }, { "docid": "de753c3a05ba048b696b9c4be8abdd91", "score": "0.64249325", "text": "def stop(self) -> None:\n status = self.is_on()\n self.__active = False\n self.__emit_if_necessary(status)", "title": "" }, { "docid": "bd44ba5bd5453f7e0276f19e82cdde2a", "score": "0.6424452", "text": "def stop(self):\n self.step = -1\n self.status = 'idle'\n self.message = ''\n self.options = []\n self.stepCompletionTime = None\n hardware.turnHeaterOff()\n hardware.turnCoolerOff()\n self.stopTasks()", "title": "" }, { "docid": "92733f90f9f401c93b984dcbf2312d86", "score": "0.6414934", "text": "def stop(self):\r\n\t\tself.__time = 0", "title": "" }, { "docid": "934db13ddd50260c2997731aa56e9f4f", "score": "0.6411159", "text": "def stop_cb(evt):\n print('CLOSING on {}'.format(evt))\n speech_recognizer.stop_continuous_recognition()\n nonlocal done\n done = True", "title": "" }, { "docid": "934db13ddd50260c2997731aa56e9f4f", "score": "0.6411159", "text": "def stop_cb(evt):\n print('CLOSING on {}'.format(evt))\n speech_recognizer.stop_continuous_recognition()\n nonlocal done\n done = True", "title": "" }, { "docid": "e29144dbda7c3d98208fbb46d09e4c69", "score": "0.6409661", "text": "def close(self):\n print(\" -- sending stream termination command...\")\n self.keepRecording=False #the threads should self-close\n while(self.t.isAlive()): #wait for all threads to close\n time.sleep(.1)\n self.stream.stop_stream()\n self.p.terminate()", "title": "" }, { "docid": "b9798761672d5f4de40d422401c7b3b9", "score": "0.64068305", "text": "def stop(self, error=None, **kwargs):\n if error:\n self.logger.exception(error)\n self.autosave()\n self.running = False", "title": "" } ]
ffd55afce1716d645c191e4472691883
Add validation result objects to a list of results.
[ { "docid": "490a20fdad84e9b78d1db58b47fd8327", "score": "0.0", "text": "async def get_valid_invalid_results(\n self, classification_tokens: List, transcripts: List,\n classification: Classification, results: List, gene_tokens: List,\n mane_data_found: Dict, is_identifier: bool,\n hgvs_dup_del_mode: HGVSDupDelModeEnum,\n endpoint_name: Optional[Endpoint] = None,\n baseline_copies: Optional[int] = None,\n copy_change: Optional[CopyChange] = None,\n do_liftover: bool = False\n ) -> None:\n raise NotImplementedError", "title": "" } ]
[ { "docid": "cb8ce75ee4fcce71f7ba9fb8bc9628ce", "score": "0.693104", "text": "def merge_results(self, final, result):\n final.errors += result.errors\n final.failures += result.failures\n final.skipped += result.skipped\n final.expectedFailures += result.expectedFailures\n final.unexpectedSuccesses += result.unexpectedSuccesses\n return final", "title": "" }, { "docid": "7ea4134d5fd868cfb001e29dbe6750ed", "score": "0.6813103", "text": "def add_results(self, activities, objectives, results, evaluations):\n self._results.append([activities, objectives, results, evaluations])", "title": "" }, { "docid": "af6de7dfc54079d552d132255a5ca956", "score": "0.66896266", "text": "def _format_validation_results(self, results):\n internal_validator = {\n 'name': 'deckhand',\n 'version': '1.0'\n }\n\n formatted_results = []\n\n for result in results:\n formatted_result = {\n 'name': types.DECKHAND_SCHEMA_VALIDATION,\n 'status': result['status'],\n 'validator': internal_validator,\n 'errors': result['errors']\n }\n formatted_results.append(formatted_result)\n\n return formatted_results", "title": "" }, { "docid": "c20a40034fb4ceac6b5288dcb0b2ad09", "score": "0.6343326", "text": "def append_result(self, result, check, audited_object=\"\", *kwargs):\n if self.__site_parser.get_current_url() not in self.__results.keys():\n self.__results[self.__site_parser.get_current_url()] = []\n\n self.__results[self.__site_parser.get_current_url()].append(\n {\"result\": result, \"check\": check, \"check_arguments\": kwargs,\n \"audited_object\": audited_object})", "title": "" }, { "docid": "766b99c485275d8498b0234a862b11b2", "score": "0.63011426", "text": "def _add_results(self, results, trial_id):\n for result in results:\n self.logger.debug(\"Appending result: %s\" % result)\n result[\"trial_id\"] = trial_id\n result_record = ResultRecord.from_json(result)\n result_record.save()", "title": "" }, { "docid": "a136af5ccb3476c30de16ec30e1aaa6f", "score": "0.6224016", "text": "def add(self, result):\n\n self._results.append(result)", "title": "" }, { "docid": "d20962a8b22abf6571bfadf2d19b7a61", "score": "0.6219308", "text": "def validate_all(self):\n\n validation_results = []\n\n for document in self._documents:\n result = self._validate_one(document)\n validation_results.append(result)\n\n return self._format_validation_results(validation_results)", "title": "" }, { "docid": "7cb331db82fb66eaaf17f32266afdb7e", "score": "0.61187434", "text": "def AddResults(self, application_result):\n\n analysis_result = application_result.analysis_results.add()\n analysis_result.analysis_name = self._descriptor.name\n\n for trigger in self._descriptor.descriptive_triggers:\n self._module.AddDescriptiveResults(trigger, analysis_result)\n for diff_pair in self._descriptor.diff_pairs:\n self._module.AddDiffResults(diff_pair, analysis_result)\n\n if not analysis_result.results:\n del application_result.analysis_results[-1]", "title": "" }, { "docid": "9cf03cac27c5e75e6cc05a9e8908b544", "score": "0.60919553", "text": "def fetch_results(self):\n for text in helpers.get_prepared_string(self.text).split(' '):\n self.add_to_results(self.get_validated_words(text))", "title": "" }, { "docid": "b41a8ed336aca688c1520c4d5b1bf350", "score": "0.5991383", "text": "def _add_results(self, results=None):\n\n self.ensure_current_task()\n\n if results is None:\n raise ValueError(\"results cannot be None\")\n\n self.current_task.results.extend(results)", "title": "" }, { "docid": "b2c9261a4107e71e16ceeeb44d445bd5", "score": "0.5968941", "text": "def address_results(self, val: list):\n self._address_results = []\n if val is not None:\n for item in val:\n if isinstance(item, AddressResult):\n self._address_results.append(item)", "title": "" }, { "docid": "776a5b333a6fae74b62757f21ad1b4fd", "score": "0.593856", "text": "def merge_results(self, results):\n for index, results_per_index in results:\n try:\n self.results[index].extend(results_per_index)\n except AttributeError:\n self.results[index] = results_per_index\n except IndexError:\n log_info('Merger', '{} {}'.format(\n index, len(self.results)\n ))\n raise", "title": "" }, { "docid": "fa65818b5a260198dc7e849ce46df329", "score": "0.59117955", "text": "def validate_result(self, result):\n raise NotImplementedError", "title": "" }, { "docid": "a230a950cf2a46ff4e258630ebf66bbf", "score": "0.5877756", "text": "def add_result(self, result, check):\n self.lock.acquire()\n self.results[check].append(result)\n self.lock.release()", "title": "" }, { "docid": "d7ec2947731ace72c22b3c0cdb5488eb", "score": "0.58283293", "text": "def add_result(self, result, as_flat):\n if as_flat:\n for k, v in result.items():\n self.add_item(k, v)\n else:\n self.add_item(result.name(), result)", "title": "" }, { "docid": "456028cd31d862ecdac197da58063b43", "score": "0.58201575", "text": "def parse_results(self, cfg):\n for m in list(self.models_not_done):\n self.parse_model_result(cfg, m)", "title": "" }, { "docid": "eeba2ec8d40a71a432a6cd255ae2be30", "score": "0.58087206", "text": "def validation_results(self) -> Dict[int, Dict[str, ValidatorOutput]]:\n return self.scene_validation_results", "title": "" }, { "docid": "b34e83cd9e1d1ba3b9058402a0e8c3c7", "score": "0.5780802", "text": "def append_result(self, result):\n if not isinstance(result, ResultSet):\n raise _errors.CommandResultError(\n \"Result have to be an instance of ResultSet\"\n )\n if self.error:\n raise _errors.CommandResultError(\n \"Result sets cannot be added for error results\"\n )\n self.__results.append(result)", "title": "" }, { "docid": "709a7dd3b03ad3222e38d42d62580181", "score": "0.57668424", "text": "def add_results(self, new_results):\n from pandas import (DataFrame, MultiIndex)\n if self.results is None:\n if len(new_results) == 0:\n return\n r = new_results[0]\n midx_x = [('x', _) for _ in range(len(r.x))]\n len_cv_vec = 0 if r.cv_vec is None else len(r.cv_vec)\n midx_cv = [('cv', _) for _ in range(len_cv_vec)]\n midx = MultiIndex.from_tuples(\n midx_x + [('fx', 0)] +\n midx_cv + [('cv', 0), ('who', 0), ('error', 0)])\n self.results = DataFrame(columns=midx)\n\n assert all([isinstance(_, Result) for _ in new_results])\n # notification for all received results at once\n self.eventbus.publish(\"new_results\", results=new_results)\n\n new_rows = []\n for r in new_results:\n new_rows.append(\n np.r_[r.x, r.fx,\n [] if r.cv_vec is None else r.cv_vec,\n [r.cv, r.who, r.error]])\n results_new = DataFrame(new_rows, columns=self.results.columns)\n self.results = self.results.append(results_new, ignore_index=True)\n\n if len(self.results) / 100 > self._last_nb / 100:\n self.info()\n self._last_nb = len(self.results)", "title": "" }, { "docid": "cd8274b63d570f18e23e15b99a547982", "score": "0.5761514", "text": "def extra_processing(self, results):\n return results", "title": "" }, { "docid": "5992f31e44ec0f959621425bde33b6ff", "score": "0.5760019", "text": "def add_result_filter(self, result_filter):\n check(result_filter, ResultFilter)\n self.results_filters.append(result_filter)", "title": "" }, { "docid": "9825d8d15467d4686f749d691e103970", "score": "0.5732227", "text": "def _validate(self):\n errors = [ ] \n # Add some errors if input is not valid\n return errors", "title": "" }, { "docid": "eb3c66f971de8ad8ec9935b8cdd0d2c4", "score": "0.5721829", "text": "def append(self, result):\n self._result.append(result)", "title": "" }, { "docid": "107f9dcb19f19422bfb22c0629df497d", "score": "0.5704033", "text": "def results(self, results):\n\n self._results = results", "title": "" }, { "docid": "107f9dcb19f19422bfb22c0629df497d", "score": "0.5704033", "text": "def results(self, results):\n\n self._results = results", "title": "" }, { "docid": "75600982cdc7118a631ab3c875aca94e", "score": "0.5698786", "text": "def _process_result(compute_node, out_plugin, test, result, results_list, node):\n if result:\n logger.info(\n 'Test case for {0} with {1} PASSED on {2}.'.format(\n node, out_plugin, test))\n else:\n logger.error(\n 'Test case for {0} with {1} FAILED on {2}.'.format(\n node, out_plugin, test))\n results_list.append((compute_node, out_plugin, test, result))", "title": "" }, { "docid": "301531a953643474d54ea500eb253278", "score": "0.56813383", "text": "def process_result_list(results, colors=None, legends=None):\n # check how many results were passed\n single_result = False\n legend_error = False\n if isinstance(results, list):\n if len(results) == 1:\n single_result = True\n else:\n single_result = True\n results = [results]\n\n # handle results according to their number\n if single_result:\n # assign colors and create list for later handling\n if colors is not None:\n colors = assign_colors(results, colors)\n colors = [colors]\n\n # create list of legends for later handling\n if not isinstance(legends, list):\n legends = [legends]\n else:\n # if more than one result is passed, we use one color per result\n colors = assign_colors_for_list(len(results), colors)\n\n # check whether list of legends has the correct length\n if legends is None:\n # No legends were passed: create some custom legends\n legends = []\n for i_leg in range(len(results)):\n legends.append('Result ' + str(i_leg))\n else:\n # legends were passed by user: check length\n if isinstance(legends, list):\n if len(legends) != len(results):\n legend_error = True\n else:\n legend_error = True\n\n # size of legend list and size of results does not match\n if legend_error:\n raise ValueError('List of results passed and list of labels do '\n 'not have the same length but should. Stopping.')\n\n return results, colors, legends", "title": "" }, { "docid": "215635eba1b9dc20f15dde7029a53cb8", "score": "0.56778073", "text": "def extend(self, result):\n self._result.extend(result)", "title": "" }, { "docid": "38e7a5e4262c86ec4eeb85c18e812693", "score": "0.56765807", "text": "def _process_results(self, results: Optional[Dict]) -> List[Dict]:\n if not results:\n return []\n mapped_resources = []\n for result in results[\"results\"][\"bindings\"]:\n _, name = get_prefix(result['article']['value'])\n source_resource = Resource.create_resource(result[self.source]['value'])\n target_resource = Resource.create_resource(result[self.target]['value'])\n mapped_resources.append({\n \"article_name\": name,\n self.source: source_resource,\n self.target: target_resource\n })\n return mapped_resources", "title": "" }, { "docid": "542f65c64ede994a1a606c6489f4b709", "score": "0.56661177", "text": "def init_result(self) -> None:\n if self.aggregate:\n self.result = None\n self.input_val = None\n else:\n self.result = []\n self.fail_id = []", "title": "" }, { "docid": "43518c6d7257feed1b1b8055edfe3388", "score": "0.5661733", "text": "def process_everything_results(everything_results_list):\n everything_results = []\n \n for everything_item in everything_results_list:\n author = everything_item.get('author')\n title = everything_item.get('title')\n description = everything_item.get('description')\n url = everything_item.get('url')\n urlToImage = everything_item.get('urlToImage')\n publishedAt = everything_item.get('publishedAt')\n \n everything_object = Everything(author, title, description, url, urlToImage, publishedAt)\n everything_results.append(everything_object)\n \n return everything_results", "title": "" }, { "docid": "8c95e388c57ea81087bc5e1e1d407dc8", "score": "0.5647437", "text": "def add_result(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "8c95e388c57ea81087bc5e1e1d407dc8", "score": "0.5647437", "text": "def add_result(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "8c95e388c57ea81087bc5e1e1d407dc8", "score": "0.5647437", "text": "def add_result(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "8c95e388c57ea81087bc5e1e1d407dc8", "score": "0.5647437", "text": "def add_result(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "f8cd7bd29f546aca65eff1eeb08c8db2", "score": "0.5644896", "text": "def add_result(self, result):\n self.last_res_received = time.time()\n\n if self.done():\n return\n\n self.results.append(result)\n expected_num = self.num_results\n\n if expected_num > -1 and len(self.results) >= expected_num:\n self.set_result(self.results)\n return\n\n if expected_num == -1:\n self.schedule_finisher(\"last_res_received\")", "title": "" }, { "docid": "5e89c4afddc683d25636fbf0b69665d5", "score": "0.5618919", "text": "def validate_result(self, result: Dict[str, Any]) -> Dict[str, Any]:\n if self.validator:\n return self.validator.validate_result(result)\n return result", "title": "" }, { "docid": "9aa3715da26da21f46104ffccbaf0a2f", "score": "0.5597463", "text": "def results(self) -> typing.List[\"Result\"]:\n return self._results", "title": "" }, { "docid": "9865e58f7d9fb561fd6f66768aabb18e", "score": "0.5597421", "text": "def to_objects(self, results):\n return results", "title": "" }, { "docid": "64d9db64a22d6f9823bd3b1f2b98dc7d", "score": "0.5590992", "text": "def f_add_result(self, *args, **kwargs):\n return self._nn_interface._add_generic(self, type_name=RESULT,\n group_type_name=RESULT_GROUP,\n args=args, kwargs=kwargs)", "title": "" }, { "docid": "9fc4680315939e4a107bf9ebe77f189b", "score": "0.5575114", "text": "def process_results(self, doc, results):\n pass", "title": "" }, { "docid": "e56643c2af199abd1544e52b7898cb3c", "score": "0.5574975", "text": "def run_validation(parsed_data: List[Row]) -> List[bool]:\n return [validate_entry(row) for row in parsed_data]", "title": "" }, { "docid": "5ce4a431ca4d72b609663a215e4538c9", "score": "0.55742073", "text": "def processResults (self):\n\t\tif 0 and self.verbose:\n\t\t\tprint \"ProcessRecs\"\n\t\t\tprint \"there are %d records to process\" % len (self)", "title": "" }, { "docid": "07b76aeb2808dadbbe1cb04db93250dc", "score": "0.5571103", "text": "def results(self):\n for res in self.data.get('results'):\n yield self.clazz(self.client, res)", "title": "" }, { "docid": "b80c255163666cabb02fa7c997101301", "score": "0.55411005", "text": "def addResults(self, ScoreData):\r\n\t\tassert isinstance(ScoreData, list)\r\n\t\tfor scores in ScoreData:\r\n\t\t\tself.scoreHistory+= scores", "title": "" }, { "docid": "f1ca73bd2ff97a76839aeb811bd32c31", "score": "0.55408156", "text": "def _result_resolution(self, result_list):\n\n # Object resolution occurs in-place\n for a_result_item in enumerate(result_list):\n for a_result_attribute in enumerate(a_result_item[1]):\n try:\n # Primitive types should remain primitive types,\n # Nodes to be resolved to native objects\n resolved_object = a_result_attribute[1]\n\n resolved_object = self._object_resolution(resolved_object)\n\n result_list[a_result_item[0]][\n a_result_attribute[0]\n ] = resolved_object\n\n except KeyError as exc:\n # Not being able to match the label set of a node with a known object results\n # in a KeyError in the internal dictionary used for resolution. If it is impossible\n # to match, then raise an exception with more details about the error.\n if isinstance(a_result_attribute[1], Node):\n raise NodeClassNotDefined(\n a_result_attribute[1], self._NODE_CLASS_REGISTRY\n ) from exc\n\n if isinstance(a_result_attribute[1], Relationship):\n raise RelationshipClassNotDefined(\n a_result_attribute[1], self._NODE_CLASS_REGISTRY\n ) from exc\n\n return result_list", "title": "" }, { "docid": "b39331b89e5a56406be76d59ae222893", "score": "0.5536229", "text": "def add_result(self, name, results):\n self.data[name] = results\n\n return self", "title": "" }, { "docid": "666830bf53dca989d0f280885f6c7960", "score": "0.5508297", "text": "def GroupResult(self):\n ...", "title": "" }, { "docid": "3a54edca4c50f28a2f7d95aff738b0f9", "score": "0.5480869", "text": "def parse_results(self):\n all_options = self.__option_manager.get_options()\n sequence_path = all_options['General/SEQUENCE_PATH']\n sequence_name = sequence_path.strip(\"/ \\n\").split(\"/\")[-1]\n for group in self.__tool_objects.keys():\n # Setting tool objects to each group's result manager and\n # instanciate them.\n self.__result_managers[group] = \\\n eval(\"{}ResultManager\".format(group))(sequence_name)\n self.__result_managers[group].set_tools(self.__tool_objects[group])\n # Checking the integrity of output file\n Logger().info(\"Checking {}'s output files...\".format(group))\n # Actually calling CDSFindingToolResultManager or tRNA etc..\n self.__result_managers[group].check_output()\n Logger().info(\"{} done\".format(group))\n for group in self.__result_managers.keys():\n # Parsing the output file\n Logger().info(\"Parsing {}'s output files...\".format(group))\n # Actually calling CDSFindingToolResultManager or tRNA etc..\n self.__result_managers[group].parse_output()\n Logger().info(\"{} done\".format(group))", "title": "" }, { "docid": "ce2b1d62d2ae9b8791bdf0982db8d038", "score": "0.5476021", "text": "def handle_results(self, results):\n def send_feedback():\n if self.feedbackq is not None:\n for result in results.results:\n if result.error != 0:\n result.index = results.index\n result.type = results.type\n self.feedbackq.put(result)\n\n if self.aggregate_limit == -1:\n if results.failed:\n send_feedback()\n else:\n if results.failed:\n self.bad_indexes[results.index] = results.results\n send_feedback()\n else:\n self.good_indexes[results.index] = results.results\n for result in results.results:\n self.add_result(result.res, result.quality_id)", "title": "" }, { "docid": "833ffcdb7eb4a259f0a1756a8af11af7", "score": "0.5475123", "text": "def unifications(self, desired_result):\n # TODO: is this really necessary\n assert desired_result.groundedp()\n back = []\n for p in self.rules:\n prod = p.unify(desired_result)\n if prod is not None:\n back.append(rule_app.from_prod(p, prod))\n return back", "title": "" }, { "docid": "9fe4cef61fa6fab1980ff093d558e228", "score": "0.54681915", "text": "def _eval_results(self):\n raise NotImplementedError('Must be implemented in child class.')", "title": "" }, { "docid": "2d3e06b2228dfaa0a5199ed6147eff2f", "score": "0.5461157", "text": "def process_results(self, doc, results):\n # TODO: implement evaluation.\n raise NotImplementedError('Evaluation not implemented')", "title": "" }, { "docid": "6d5279b57920b48f5d439dd32520d1aa", "score": "0.54608333", "text": "def _process_records(self):\n self._records = list()\n for record in self.results:\n wrapped = GeneticAttributeRecord.process(record)\n self._records.append(wrapped)", "title": "" }, { "docid": "f9b20d9ccbdb6efe0bb3368550f7647e", "score": "0.5459889", "text": "def __flatten_validation_results(validation_results):\n output = {}\n for img_name in validation_results:\n r = validation_results[img_name]\n for label in r:\n if label == 'loss':\n output['{0}/loss'] = r[label]\n continue\n for k, v in r.items():\n output['{0}/{1}/{2}'.format(img_name, label, k)] = v\n\n return output", "title": "" }, { "docid": "190e03d0e134f9b894988253e1020e8f", "score": "0.5454409", "text": "def result_list(request, format=None):\n\tif request.method == 'GET':\n\t\tresult = Result.objects.all()\n\t\tserializer = ResultSerializer(result, many=True)\n\t\treturn Response(serializer.data)\n\telif request.method == 'POST':\n\t\tserializer = ResultSerializer(data=request.data)\n\t\tif serializer.is_valid():\n\t\t\tserializer.save()\n\t\t\treturn Response(serializer.data, status=status.HTTP_201_CREATED)\n\t\treturn Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "1a2c445ca9ff663a4148fb4b2a885eb0", "score": "0.54477745", "text": "def from_list(cls, errors):\n result = cls(errors=errors)\n return result", "title": "" }, { "docid": "fcfaf7d96d43b6ccd9db0b3d1ec3c525", "score": "0.5438895", "text": "def fin_verify_results():\n global_helper.raise_if_false_in_list(results=result)", "title": "" }, { "docid": "fcfaf7d96d43b6ccd9db0b3d1ec3c525", "score": "0.5438895", "text": "def fin_verify_results():\n global_helper.raise_if_false_in_list(results=result)", "title": "" }, { "docid": "1a0929508f57159fa53a96472925f053", "score": "0.5421311", "text": "def run_results_checks():\n try:\n beckwith = BeckwithResultsCheck() # `David Beckwith papers`\n beckwith.run_check()\n yoken = YokenResultsCheck() # `Mel B. Yoken collection`\n yoken.run_check()\n john_hay = JohnHayResultsCheck() # `John Hay papers`\n john_hay.run_check()\n gregorian = GregorianResultsCheck() # `Vartan Gregorian papers`\n gregorian.run_check()\n brown = BrownResultsCheck() # `John Nicholas Brown II papers`\n brown.run_check()\n except Exception:\n log.exception( 'exception; traceback...' )\n # raise", "title": "" }, { "docid": "5e236a4881db9be18b2b78188ae83e76", "score": "0.5418421", "text": "def load_data(self) -> list[TestResult]:\n try:\n data = etree.parse(self.resultfilename).getroot()\n except OSError:\n return []\n\n testresults = []\n for testcase in data:\n category = Category.OK\n status = 'ok'\n name = '{}.{}'.format(testcase.get('classname'),\n testcase.get('name'))\n message = ''\n time = float(testcase.get('time'))\n extras = []\n\n for child in testcase:\n if child.tag in ('error', 'failure', 'skipped'):\n if child.tag == 'skipped':\n category = Category.SKIP\n else:\n category = Category.FAIL\n status = child.tag\n type_ = child.get('type')\n message = child.get('message', default='')\n if type_ and message:\n message = '{0}: {1}'.format(type_, message)\n elif type_:\n message = type_\n if child.text:\n extras.append(child.text)\n elif child.tag in ('system-out', 'system-err') and child.text:\n if child.tag == 'system-out':\n heading = _('Captured stdout')\n else:\n heading = _('Captured stderr')\n contents = child.text.rstrip('\\n')\n extras.append('----- {} -----\\n{}'.format(heading,\n contents))\n\n extra_text = '\\n\\n'.join(extras)\n testresults.append(\n TestResult(category, status, name, message, time, extra_text))\n\n return testresults", "title": "" }, { "docid": "63c7dad318c9d28c8e08a7cd3631618c", "score": "0.541409", "text": "def log_results(self, results):\n for result in results:\n self.log_result(result)\n self.log_summary()", "title": "" }, { "docid": "13da97d39da3109813bc5d4fa4801a51", "score": "0.5408926", "text": "def add_graph_check_results(self, report: Report, runner_filter: RunnerFilter) -> None:\n\n checks_results = self.run_graph_checks_results(runner_filter, self.check_type)\n\n for check, check_results in checks_results.items():\n for check_result in check_results:\n entity = check_result[\"entity\"]\n entity_file_path = Path(entity[CustomAttributes.FILE_PATH])\n\n clean_check_result: _CheckResult = {\n \"result\": check_result[\"result\"],\n \"evaluated_keys\": check_result[\"evaluated_keys\"],\n }\n\n file_code_lines = self.definitions_raw[entity_file_path]\n start_line = entity[\"__start_line__\"]\n end_line = cast(\"int\", entity[\"__end_line__\"])\n\n record = Record(\n check_id=check.id,\n bc_check_id=check.bc_id,\n check_name=check.name,\n check_result=clean_check_result,\n code_block=file_code_lines[start_line - 1 : end_line],\n file_path=self.extract_file_path_from_abs_path(clean_file_path(entity_file_path)),\n file_line_range=[start_line, end_line],\n resource=entity[CustomAttributes.ID],\n check_class=check.__class__.__module__,\n file_abs_path=str(entity_file_path.absolute()),\n evaluations=None,\n severity=check.severity,\n )\n if self.breadcrumbs:\n breadcrumb = self.breadcrumbs.get(record.file_path, {}).get(record.resource)\n if breadcrumb:\n record = GraphRecord(record, breadcrumb)\n record.set_guideline(check.guideline)\n report.add_record(record=record)", "title": "" }, { "docid": "e4ee897cd5f1f203ede55543771d2e17", "score": "0.54015386", "text": "def expand(self, examples):\n coders = (beam.coders.coders.StrUtf8Coder(),\n beam.coders.coders.ProtoCoder(lint_result_pb2.LintResult))\n return (\n [examples | linter for linter in self._linters if linter.should_run()]\n | 'MergeResults' >> beam.Flatten()\n | 'DropEmpty' >> beam.Filter(lambda (_, r): r and len(r.warnings))\n | 'ToDict' >> beam.combiners.ToDict()\n | 'WriteResults' >> beam.io.textio.WriteToText(\n self._results_path,\n coder=beam.coders.coders.PickleCoder(),\n shard_name_template=''))", "title": "" }, { "docid": "8f7a26608186494ba256bb1480d5515c", "score": "0.5399355", "text": "def _validated_batch_result(payload, body):\n assert isinstance(payload, list), \"batch result must be list\"\n assert len(body) == len(payload), \"batch result len mismatch\"\n for req, res in zip(body, payload):\n assert req['id'] == res['id'], \"id mismatch: %s -> %s\" % (req, res)\n for idx, item in enumerate(payload):\n if 'error' in item:\n raise RPCError.build(item['error'], body, idx)\n assert 'result' in item, \"batch[%d] resp empty\" % idx\n return [item['result'] for item in payload]", "title": "" }, { "docid": "d580f2984d8031fd01b15536739fbb55", "score": "0.53988755", "text": "def perform_analysis(self):\n # For each tool group.\n for group in self.__result_managers.keys():\n # Fill the merged list of the element searched\n self.__result_managers[group].merge_results()\n self.__result_managers[group].sort()\n self.__result_managers[group].check_conflict()", "title": "" }, { "docid": "012456c34c42f3c01c54a0227dd3f0f5", "score": "0.5394815", "text": "def append_the_results(self,jobs,integration_step):\n error_found=False\n for job in jobs:\n try:\n if integration_step >= 0 :\n with open(pjoin(job['dirname'],'res_%s.dat' % integration_step)) as res_file:\n results=res_file.readline().split()\n else:\n # should only be here when doing fixed order with the 'only_generation'\n # option equal to True. Take the results from the final run done.\n with open(pjoin(job['dirname'],'res.dat')) as res_file:\n results=res_file.readline().split()\n except IOError:\n if not error_found:\n error_found=True\n error_log=[]\n error_log.append(pjoin(job['dirname'],'log.txt'))\n continue\n job['resultABS']=float(results[0])\n job['errorABS']=float(results[1])\n job['result']=float(results[2])\n job['error']=float(results[3])\n job['niters_done']=int(results[4])\n job['npoints_done']=int(results[5])\n job['time_spend']=float(results[6])\n job['err_percABS'] = job['errorABS']/job['resultABS']*100.\n job['err_perc'] = job['error']/job['result']*100.\n if error_found:\n raise aMCatNLOError('An error occurred during the collection of results.\\n' + \n 'Please check the .log files inside the directories which failed:\\n' +\n '\\n'.join(error_log)+'\\n')", "title": "" }, { "docid": "fa115775a1d59024a35b0b1b3d5dd65f", "score": "0.53944707", "text": "def processResults(result_list, isPlotting, isWriteToFile, output_file_name):\n displayResults(result_list)\n if(isPlotting):\n plotResults(result_list)\n if(isWriteToFile):\n saveResultsToCsv(result_list, output_file_name)", "title": "" }, { "docid": "0cf1b76ff49bc724238579bca290900a", "score": "0.5390379", "text": "def get_result_batch (self, start, num):\n\t\ttry:\n\t\t\tresponseDoc = self.get_response_doc (start, num)\n\t\texcept NoMatchingRecordsException, msg:\n\t\t\treturn []\n\t\t\n\t\tresponseDoc.xpath_delimiter = \":\"\n\t\t\n\t\t# print \"searchResult_constructor:\", self.searchResult_constructor.__name__\n\t\treturn self.instantiate_results(responseDoc)", "title": "" }, { "docid": "3b10d38a6d4fd23334ce811422273f39", "score": "0.5390015", "text": "def append(self, result):\n if not isinstance(result, CompletedProcess):\n raise TypeError(\"result must be a CompleteProcess\")\n self.list.append(result)\n self.returncode += result.returncode", "title": "" }, { "docid": "a29425df66c22cb79bd0363ed25bae98", "score": "0.53675246", "text": "def results(self):\n return ( result for result in TestResult.all() if result.model == self )", "title": "" }, { "docid": "19fe45cee79f3750c3897c63b75274a5", "score": "0.53668755", "text": "def test_resultlist_result():\n\n\tdef callabl(result):\n\t\treturn result\n\n\tclass ResultListChild(ResultList):\n\t\t\"\"\"A child class of ResultList to test result(slice).\"\"\"\n\n\tclass ResultChild(Result):\n\t\t\"\"\"A child class of Result.\"\"\"\n\n\tresults = ({0: 1}, ResultChild(results=({0: 1}, )))\n\tlst = ResultListChild(results, result_class=ResultChild)\n\tassert isinstance(lst.result(slice(0, -1)), ResultListChild)\n\tassert isinstance(lst.result(0), ResultChild)\n\t# Assert that the ResultChild (index 1) is not wrapped in another ResultChild\n\tassert lst.result(0) == lst.result(1)\n\n\t# Test with result_class callable, and also test unwrapping the ResultChild\n\tlst = ResultList(results, result_class=callabl)\n\tassert lst.result(0) == lst.result(0)", "title": "" }, { "docid": "f6cb94cbac6a2c7faf4f626272a137f6", "score": "0.5355807", "text": "def get_results_so_far(self):", "title": "" }, { "docid": "b0b9d7856bb3c6e3678606e552fd4231", "score": "0.5353472", "text": "def manipulate_result(self, result, completions_sentences, scores):\n indexes = list(result.items())\n result = []\n for i in range(min(len(indexes), RESULT_LEN-len(completions_sentences))):\n sentence, sentence_data = self.data_manager.get_sentence_data(indexes[i][0])\n if not any([sentence == com_sentence for com_sentence in completions_sentences]):\n result += [f\"{sentence} \" + \"(\" + sentence_data + f\", {indexes[i][1]}) score: {scores}\"]\n completions_sentences += [sentence]\n return result", "title": "" }, { "docid": "defdab64f7269fb30cd28fb94541140b", "score": "0.5342131", "text": "def accumulate_result(self, test_name, result):\n self.number_of_tests_run += 1\n if result == FAILURE:\n self.failed_tests.append(test_name)", "title": "" }, { "docid": "8115ecc162f053d343494f95dfeabc68", "score": "0.53331447", "text": "def _processResults(self, data):\n pass", "title": "" }, { "docid": "9d1b0dbdd8ca968d64ccc852963b54ee", "score": "0.53300047", "text": "def add_result(self, **kwargs):\n\n item = self.defaults.copy()\n self._update(item, kwargs)\n\n missing = [k for k in self.required if k not in item]\n if missing:\n raise KeyError(\"Missing keys: %r\" % missing)\n\n self.data.append(item)", "title": "" }, { "docid": "a249f6bdb45bf6b957edc9424546e221", "score": "0.53243446", "text": "def _check_results(self):\n\n ev = event.Event(\n 'yoda_on_check_results', self._test_engine.test_run.id)\n self._mh.fire_event(ev)\n if ev.will_run_default():\n if self._test_results_output_create == True:\n for output_handler in self._test_results_output_handler:\n trof = TestResultsOutputFactory(self._mh.ext_cfg['Yoda']['db_results_dsn'].format(var_dir=syscfg.HTK_VAR_DIR), output_handler)\n trof.create(self._test_engine.test_run)", "title": "" }, { "docid": "b1239df639c56e2f69aa23b2e551d57b", "score": "0.53217053", "text": "def results(self):\n return ( result for result in TestResult.all() if result.test == self )", "title": "" }, { "docid": "2618c6c8e40b1454d74a9621a147788f", "score": "0.5320803", "text": "def add_batch_results(self, batch, batch_id, prev_turns, references, preds):\n for i in xrange(batch['size']):\n target = batch['decoder_tokens'][i]\n # Padded turn\n if not target:\n continue\n uuid = batch['uuids'][i]\n role = batch['kbs'][i].facts['personal']['Role']\n example_id = '{uuid}-{role}-{turn}'.format(uuid=uuid, role=role, turn=batch_id)\n context = [turns[i] for turns in prev_turns]\n self.add_results(example_id=example_id, prev_turns=context, target=references[i], pred=preds[i])", "title": "" }, { "docid": "70e1c525f8f28b6c6070c141ea813c7c", "score": "0.5305249", "text": "def __call__(self, results):\n\n # Deal with one sample when data loader call 'get_item' func which only return one sample\n if isinstance(results, dict):\n results = super().__call__(results)\n return results\n\n # Deal with multi samples when data loader call 'get_item' func which return multi samples\n results_ = []\n for instance in results:\n instance = super().__call__(instance)\n results_.append(instance)\n return results_", "title": "" }, { "docid": "d25923bab014f9ca547d5b1f321fb8c4", "score": "0.5287147", "text": "def addResult(self,parameterslist, score):\r\n\t\t# result optimal for either maximization or minimization\r\n\t\tif self.maximization == (score > self.optimalscore):\r\n\t\t\tself.bestParameters = parameterslist\r\n\t\t\tself.optimalscore = score\r\n\t\tself.scoreHistory.append(score)\r\n\t\t\r\n\t\tif self.keepIntermediateParameters:\r\n\t\t\tself.parameterHistory.append(parameterslist)\r\n\r\n\t\tif self.showIntermediateResults:\r\n\t\t\tprint(\"iteration \" + num2str(len(self.scoreHistory)) + \"\\t has score \\t\" + num2str(score) )", "title": "" }, { "docid": "c6ce569af23937ab494d3ea50689c175", "score": "0.52812344", "text": "def test_load_summarized_results_list(self):\n obs = list(load_summarized_results_list([self.summary_fp_1]))\n self.assertEqual(len(obs), 1)\n self.assertEqual(type(obs[0]), BenchSummary)\n\n obs = list(load_summarized_results_list([self.summary_fp_1,\n self.summary_fp_2]))\n self.assertEqual(len(obs), 2)\n for o in obs:\n self.assertEqual(type(o), BenchSummary)", "title": "" }, { "docid": "41d450960a9563ad1013b808b1f62018", "score": "0.52743906", "text": "def get_results(self):\n return WorkflowResults(task_results={task.name: task.results for task in self})", "title": "" }, { "docid": "e392573ec2df02fb88dfff3e4ddd7cde", "score": "0.5263223", "text": "def results(self):", "title": "" }, { "docid": "600695729c0a6fe290dad621740897f0", "score": "0.52496594", "text": "def validate(self):\n errors = []\n for validator in self.validators:\n try:\n validator.validate(self._value)\n except ValidationException as e:\n errors.append(e.message)\n return errors", "title": "" }, { "docid": "e1a1ef640856927d2879dda28c017437", "score": "0.52489626", "text": "def add_rows(self):\n for related_student_result in list(self.collector.results.values()):\n self.append(StudentRowList(assignments=self.assignments, related_student_result=related_student_result))", "title": "" }, { "docid": "3eede20309978c04f54fce209735a981", "score": "0.52423936", "text": "def displayResults(result_list):\n for r in result_list:\n r.printResult()", "title": "" }, { "docid": "48c279e9084d3ef05e57fdcffa544d74", "score": "0.52407503", "text": "def _results_combine(self, results, new_results):\n id_list = [result[\"UserBooksId\"] for result in results]\n for new_result in new_results:\n if new_result.get(\"UserBooksId\", 0) not in id_list:\n results.append(new_result)\n return results", "title": "" }, { "docid": "eee18318c453f0699740d785f7149851", "score": "0.52355295", "text": "def validations(self) -> typing.List[\"Validation\"]:\n return typing.cast(\n typing.List[\"Validation\"],\n self._properties.get(\"validations\"),\n )", "title": "" }, { "docid": "ebd33c8865cb0bb19c9618a672e0ede5", "score": "0.5230539", "text": "def create(self, validated_data):\n return Result.objects.create(**validated_data)", "title": "" }, { "docid": "b279ba8ec0c24f37ef58e9672a596b25", "score": "0.5227938", "text": "def add_test_results(self, test_xml, actual_path):\n self.path_to_actual = actual_path\n # Go through all the test nodes under 'results'.\n if not test_xml:\n return\n\n temp_test_cases = []\n for i in range(0, len(list(test_xml))):\n test_child = test_xml[i]\n\n node = test_child.find('error')\n error_msg = node.text if node is not None else ''\n\n node = test_child.find('error-type')\n error_type = node.text.strip() if node is not None else ''\n\n node = test_child.find('query-time')\n query_time = 0\n try:\n query_time = float(node.text if node is not None else '0')\n except ValueError:\n pass\n\n node = test_child.find('sql')\n sq = node.text if node is not None else ''\n\n test_child_name = test_child.get('name')\n if not test_child_name:\n continue\n test_result = TestCaseResult(test_child_name, str(\n i), sq, query_time, error_msg, error_type, test_child.find('table'), self.test_config, self.test_metadata)\n temp_test_cases.append(test_result)\n\n if temp_test_cases:\n # Clear any dummy place holders.\n self.test_case_map = temp_test_cases", "title": "" }, { "docid": "6a615aa214828a3e10308457652c082c", "score": "0.52224493", "text": "def run(self, instances_list):\n rule_results = []\n\n def get_instance(list_of_instances, content_type):\n for _instance in list_of_instances:\n if isinstance(_instance, content_type.model_class()):\n return _instance\n\n instance_identifier = None\n rules = Rule.objects.filter(ruleset=self, active=True).order_by(\n 'sequence')\n instance = None\n\n for rule in rules:\n instance = get_instance(instances_list, rule.process_model)\n result = rule.run_evaluate(instance)\n if not instance_identifier:\n instance_identifier = rule.instance_identifier\n if result:\n rule_results.append({\n \"identity\": getattr(instance, instance_identifier),\n \"result\": result\n })\n if rule.exit_on_match:\n return rule_results\n else:\n if rule.exit_on_fail:\n return rule_results\n\n if not rule_results:\n rule_results = [{'identity': getattr(\n instance, instance_identifier), 'result': None}]\n return rule_results", "title": "" }, { "docid": "74680980e7a5fb9b8557d36fe0f59313", "score": "0.5207572", "text": "def results(self):\n raise NotImplementedError(\"\")", "title": "" }, { "docid": "74680980e7a5fb9b8557d36fe0f59313", "score": "0.5207572", "text": "def results(self):\n raise NotImplementedError(\"\")", "title": "" }, { "docid": "e4ce5b53dee11c3f8d2543cd0ed77140", "score": "0.5199762", "text": "def f_add_result_group(self, *args, **kwargs):\n\n return self._nn_interface._add_generic(self, type_name=RESULT_GROUP,\n group_type_name=RESULT_GROUP,\n args=args, kwargs=kwargs)", "title": "" }, { "docid": "687b9fbf65020492df54c3684221ba31", "score": "0.51994735", "text": "def results():", "title": "" }, { "docid": "47cebc9120890ef177cada3526ef432f", "score": "0.51927984", "text": "def test_add_result(self):\n MockTestSuite.components = (SuccessCase,)\n\n run_data = RunData(run_name=None)\n main_test = MockTestSuite(run_data=run_data)\n test_case = next(iter(main_test))\n\n # Simulate starting the test.\n self.client.start_test_run(main_test)\n self.client.start_composite(main_test)\n self.client.start_test(test_case)\n\n # Check that the results are still None.\n self._validate_test_result(main_test, success=None)\n self._validate_test_result(test_case, success=None,\n error_tuple=(None, ''))\n\n # Simulate ending the test.\n self.client.stop_test(test_case)\n ERROR_STRING = 'test error'\n EXPECTED_STRING = 'ERROR: ' + ERROR_STRING\n self.client.add_result(test_case, TestOutcome.ERROR,\n ERROR_STRING)\n self.client.stop_composite(main_test)\n\n # Check that the results are updated.\n self._validate_test_result(test_case, success=False,\n error_tuple=(TestOutcome.ERROR, EXPECTED_STRING))\n self._validate_test_result(main_test, success=False)", "title": "" }, { "docid": "4f8f4dea5f561678ee5d165cafb01aff", "score": "0.51917744", "text": "def _convert_results(top1_error, top1_loss, top5_error):\r\n\r\n assert isinstance(top1_error, list), \"input should be a list\"\r\n length = len(top1_error)\r\n top1_error_list = []\r\n top5_error_list = []\r\n top1_loss_list = []\r\n for i in range(length):\r\n top1_error_list.append(top1_error[i].avg)\r\n top5_error_list.append(top5_error[i].avg)\r\n top1_loss_list.append(top1_loss[i].avg)\r\n return top1_error_list, top1_loss_list, top5_error_list", "title": "" }, { "docid": "aba68c7013a92422dd8e5878e583aba6", "score": "0.51893574", "text": "def validate(self, val):\n errors = []\n\n if (not self.required) and val is None:\n return errors\n\n if self.data_type:\n err_msg = self.validate_type(val)\n\n if err_msg:\t # There was an error\n errors.append(err_msg)\n return errors\n\n for validator in self.validators:\n passed, err = validator(val)\n\n if not passed:\n errors.append(err)\n\n return errors", "title": "" }, { "docid": "dddf20f9d0f120a6d6666da7faec5322", "score": "0.518749", "text": "def __init__(self, results):\n self.results = results\n self.ifilter = None # https://docs.python.org/2/library/itertools.html#itertools.ifilter\n self.reset()", "title": "" } ]
87d1242011a4ea0509275c443be6bd65
Fixture function which creates the model metadata json file without the sensor or neural_network information
[ { "docid": "eec09f5a125600f51fb444f12b957841", "score": "0.5558178", "text": "def create_model_metadata_action_space():\n model_metadata_path = \"test_model_metadata_action_space.json\"\n model_metadata = {\n \"action_space\": [\n {\n \"steering_angle\": 45,\n \"speed\": 0.8\n },\n {\n \"steering_angle\": -45,\n \"speed\": 0.8\n },\n {\n \"steering_angle\": 0,\n \"speed\": 0.8\n },\n {\n \"steering_angle\": 22.5,\n \"speed\": 0.8\n },\n {\n \"steering_angle\": -22.5,\n \"speed\": 0.8\n },\n {\n \"steering_angle\": 0,\n \"speed\": 0.4\n }\n ]\n }\n with open(model_metadata_path, 'w') as file:\n json.dump(model_metadata, file, indent=4)\n return model_metadata_path", "title": "" } ]
[ { "docid": "b9b61a5120be8456706b5b11b043eaa7", "score": "0.7641225", "text": "def create_model_metadata():\n model_metadata_path = \"test_model_metadata.json\"\n model_metadata = {\n \"action_space\": [\n {\n \"steering_angle\": -30,\n \"speed\": 0.6\n },\n {\n \"steering_angle\": -15,\n \"speed\": 0.6\n },\n {\n \"steering_angle\": 0,\n \"speed\": 0.6\n },\n {\n \"steering_angle\": 15,\n \"speed\": 0.6\n },\n {\n \"steering_angle\": 30,\n \"speed\": 0.6\n }\n ],\n \"sensor\": [\"STEREO_CAMERAS\"],\n \"neural_network\": \"DEEP_CONVOLUTIONAL_NETWORK_SHALLOW\"\n }\n with open(model_metadata_path, 'w') as file:\n json.dump(model_metadata, file, indent=4)\n return model_metadata_path", "title": "" }, { "docid": "6981662265c68ddf309c402d0bd8b0fe", "score": "0.6228512", "text": "def test_parse_model_metadata(create_model_metadata):\n sensor, network, simapp_version = ModelMetadata.parse_model_metadata(create_model_metadata)\n os.remove(create_model_metadata)\n assert sensor == [\"STEREO_CAMERAS\"]\n assert network == \"DEEP_CONVOLUTIONAL_NETWORK_SHALLOW\"\n assert simapp_version == SIMAPP_VERSION_2", "title": "" }, { "docid": "31471fef5b0b8f4bf161b265566aef0f", "score": "0.6157858", "text": "def model_metadata(version, id_preprocessing_metadata, epochs, validation_split, learning_rate, mae_test,\n mae_train, saved_model_name):\n create_from_dict({'version': version, 'preprocessing_metadata_id': id_preprocessing_metadata, 'epochs': epochs,\n 'validation_split': validation_split, 'learning_rate': learning_rate, 'mae_test': mae_test,\n 'mae_train': mae_train, 'model_name': saved_model_name, 'created_at': datetime.now()})", "title": "" }, { "docid": "38df4a298d0c176e5e22a5e48bfd00df", "score": "0.6151132", "text": "def make_metadata_from_model(model):\n infos = model[\"infos\"]\n # Description section\n description = \"<p><strong>Description: </strong></p>\"\n tmp = \"This model was trained by {} using the {} recipe in {}. \"\n description += tmp.format(infos[\"uploader\"], infos[\"recipe_name\"], ASTEROID_REF)\n tmp = \"</a>It was trained on the <code>{}</code> task of the {} dataset.</p>\"\n description += tmp.format(model[\"task\"], model[\"dataset\"])\n\n # Training config section\n description += \"<p>&nbsp;</p>\"\n description += \"<p><strong>Training config:</strong></p>\"\n description += two_level_dict_html(infos[\"training_config\"])\n\n # Results section\n description += \"<p>&nbsp;</p>\"\n description += \"<p><strong>Results:</strong></p>\"\n display_result = {k: v for k, v in infos[\"final_metrics\"].items() if \"pesq\" not in k.lower()}\n description += display_one_level_dict(display_result)\n\n # Software section\n description += \"<p>&nbsp;</p>\"\n description += \"<p><strong>Versions:</strong></p>\"\n description += display_one_level_dict(infos[\"software_versions\"])\n\n # License section\n description += \"<p>&nbsp;</p>\"\n description += \"<p><strong>License notice:</strong></p>\"\n description += infos[\"license_note\"]\n\n # Putting it together.\n metadata = {\n \"title\": infos[\"upload_name\"],\n \"upload_type\": \"software\",\n \"description\": description,\n \"creators\": [{\"name\": infos[\"uploader\"], \"affiliation\": infos[\"affiliation\"]}],\n \"communities\": [{\"identifier\": \"zenodo\"}, {\"identifier\": \"asteroid-models\"}],\n \"keywords\": [\n \"Asteroid\",\n \"audio source separation\",\n model[\"dataset\"],\n model[\"task\"],\n model[\"model_name\"],\n \"pretrained model\",\n ],\n \"license\": \"CC-BY-SA-3.0\",\n }\n return metadata", "title": "" }, { "docid": "50ae4a7bc4780c78eec7416a69f4e6dd", "score": "0.6109582", "text": "def createModel(self):\n # initializing the corpus\n trainDummy = SentenceDataset([Sentence('')])\n corpus: Corpus = Corpus(train=trainDummy, test=trainDummy, dev=trainDummy)\n tagger = self.getTagger(corpus)\n file = self.path+'/' + BLANK_MODEL \n tagger.save(file)", "title": "" }, { "docid": "41e82f2a504c19733bf509f47b290107", "score": "0.6045753", "text": "def initialise_attributes(self, n_params, n_summaries, n_covariance_sims,\n n_derivative_sims, fast_train, dtype, save, \n filename):\n if dtype == tf.float64:\n self.dtype = tf.float32\n self.itype = tf.int64\n else:\n self.dtype = tf.float32\n self.itype = tf.int32\n \n self.save = save\n if self.save:\n if filename is None:\n self.filename = \"model\"\n else:\n self.filename = str(filename)\n else:\n self.filename = None\n\n if fast_train:\n self.trainer = self.fast_train\n else:\n self.trainer = self.loop_train\n self.n_params = self.u.positive_integer(n_params, \"n_params\")\n self.n_summaries = self.u.positive_integer(n_summaries, \"n_summaries\")\n self.n_s = self.u.positive_integer(\n n_covariance_sims, \"n_covariance_sims\")\n self.n_d = self.u.positive_integer(\n n_derivative_sims, \"n_derivative_sims\")\n self.single_dataset = self.u.check_num_datasets(self.n_s, self.n_d)\n\n self.numerical = None\n self.test_numerical = None\n self.use_external = None\n self.test_use_external = None\n # self.sims_at_once = None\n # self.loop_sims = None\n\n self.dΔμ_dx = None\n self.n_st = None\n self.n_sm1 = None\n self.identity = None\n\n self.model = None\n self.optimiser = None\n\n self.θ_fid = None\n self.test_θ_fid = None\n self.δθ = None\n self.d2μ_dθdx = None\n self.test_δθ = None\n self.dataset = None\n self.test_dataset = None\n self.derivative_dataset = None\n self.test_derivative_dataset = None\n # self.indices = None\n # self.derivative_indices = None\n\n self.MLE_F = None\n self.MLE_Finv = None\n self.MLE_Cinv = None\n self.MLE_dμ_dθ = None\n self.MLE_μ = None\n self.MLE_θ_fid = None\n\n self.initialise_history()\n self.load_useful_constants()", "title": "" }, { "docid": "502fa65112361d8a43481f65140ee473", "score": "0.60204464", "text": "def test_train_model_create():\n path_create = os.path.join('created', 'train_options.json')\n subprocess.run(\n ['fnet', 'train', path_create],\n check=True,\n )\n assert os.path.exists(path_create)", "title": "" }, { "docid": "cbe35e4234b6db5cd9d7c5993b7e3906", "score": "0.601315", "text": "def test_metadata_fixture_without_report_flag(testdir):\n testdir.makepyfile('''\n def test_metadata(json_metadata):\n json_metadata['x'] = 'foo'\n ''')\n res = testdir.runpytest()\n assert res.ret == 0\n assert not (testdir.tmpdir / '.report.json').exists()", "title": "" }, { "docid": "93d56d743d91efc542ff9c7a2ab2e4b7", "score": "0.59655833", "text": "def serialize_metadata():\n print(\"Serializing Metadata ...\")\n import pickle\n machine_datafile = 'machines_meta.pickle'\n model_datafile = 'machine_models_meta.pickle'\n print(f\"Writing machine data to {machine_datafile}\")\n with open(machine_datafile, 'wb') as fp:\n pickle.dump(global_vars.machines, fp)\n print(f\"Writing models data to {model_datafile}\")\n with open(model_datafile, 'wb') as fp:\n pickle.dump(global_vars.machine_models, fp)\n print(\"Finished\")", "title": "" }, { "docid": "4fc9e5ea66d346a0aca2b98c704acd98", "score": "0.5945161", "text": "def _write_metadata_one_model(argument_dict):\n\n from gewittergefahr.deep_learning import cnn\n from gewittergefahr.deep_learning import upconvnet\n from gewittergefahr.deep_learning import input_examples\n from gewittergefahr.scripts import train_upconvnet\n\n # Read input args.\n cnn_file_name = argument_dict[train_upconvnet.CNN_FILE_ARG_NAME]\n cnn_feature_layer_name = argument_dict[\n train_upconvnet.FEATURE_LAYER_ARG_NAME]\n\n top_training_dir_name = argument_dict[train_upconvnet.TRAINING_DIR_ARG_NAME]\n first_training_time_string = argument_dict[\n train_upconvnet.FIRST_TRAINING_TIME_ARG_NAME\n ]\n last_training_time_string = argument_dict[\n train_upconvnet.LAST_TRAINING_TIME_ARG_NAME]\n\n top_validation_dir_name = argument_dict[\n train_upconvnet.VALIDATION_DIR_ARG_NAME\n ]\n first_validation_time_string = argument_dict[\n train_upconvnet.FIRST_VALIDATION_TIME_ARG_NAME\n ]\n last_validation_time_string = argument_dict[\n train_upconvnet.LAST_VALIDATION_TIME_ARG_NAME]\n\n num_examples_per_batch = argument_dict[\n train_upconvnet.NUM_EX_PER_BATCH_ARG_NAME\n ]\n num_epochs = argument_dict[train_upconvnet.NUM_EPOCHS_ARG_NAME]\n num_training_batches_per_epoch = argument_dict[\n train_upconvnet.NUM_TRAINING_BATCHES_ARG_NAME\n ]\n num_validation_batches_per_epoch = argument_dict[\n train_upconvnet.NUM_VALIDATION_BATCHES_ARG_NAME\n ]\n output_dir_name = argument_dict[train_upconvnet.OUTPUT_DIR_ARG_NAME]\n\n # Process input args.\n first_training_time_unix_sec = time_conversion.string_to_unix_sec(\n first_training_time_string, TIME_FORMAT)\n last_training_time_unix_sec = time_conversion.string_to_unix_sec(\n last_training_time_string, TIME_FORMAT)\n\n first_validation_time_unix_sec = time_conversion.string_to_unix_sec(\n first_validation_time_string, TIME_FORMAT)\n last_validation_time_unix_sec = time_conversion.string_to_unix_sec(\n last_validation_time_string, TIME_FORMAT)\n\n # Find training and validation files.\n training_file_names = input_examples.find_many_example_files(\n top_directory_name=top_training_dir_name, shuffled=True,\n first_batch_number=FIRST_BATCH_NUMBER,\n last_batch_number=LAST_BATCH_NUMBER, raise_error_if_any_missing=False)\n\n validation_file_names = input_examples.find_many_example_files(\n top_directory_name=top_validation_dir_name, shuffled=True,\n first_batch_number=FIRST_BATCH_NUMBER,\n last_batch_number=LAST_BATCH_NUMBER, raise_error_if_any_missing=False)\n\n # Write metadata.\n upconvnet_metafile_name = cnn.find_metafile(\n model_file_name='{0:s}/foo.h5'.format(output_dir_name),\n raise_error_if_missing=False\n )\n print('Writing upconvnet metadata to: \"{0:s}\"...'.format(\n upconvnet_metafile_name\n ))\n\n return upconvnet.write_model_metadata(\n cnn_file_name=cnn_file_name,\n cnn_feature_layer_name=cnn_feature_layer_name, num_epochs=num_epochs,\n num_examples_per_batch=num_examples_per_batch,\n num_training_batches_per_epoch=num_training_batches_per_epoch,\n training_example_file_names=training_file_names,\n first_training_time_unix_sec=first_training_time_unix_sec,\n last_training_time_unix_sec=last_training_time_unix_sec,\n num_validation_batches_per_epoch=num_validation_batches_per_epoch,\n validation_example_file_names=validation_file_names,\n first_validation_time_unix_sec=first_validation_time_unix_sec,\n last_validation_time_unix_sec=last_validation_time_unix_sec,\n pickle_file_name=upconvnet_metafile_name)", "title": "" }, { "docid": "50decf22c6e2c431cc20c3003fd2b60e", "score": "0.59433866", "text": "def create_metadata(kwargs=None):\n assert(kwargs is not None)\n log = logging.getLogger('decals_sim')\n # Pack the input parameters into a meta-data table and write out.\n #metacols = [\n # ('BRICKNAME', 'S10'),\n # ('OBJTYPE', 'S10'),\n # ('NOBJ', 'i4'),\n # ('CHUNKSIZE', 'i2'),\n # ('NCHUNK', 'i2'),\n # ('ZOOM', 'i4', (4,)),\n # ('SEED', 'S20'),\n # ('RMAG_RANGE', 'f4', (2,))]\n #metacat = Table(np.zeros(1, dtype=metacols))\n metacat = fits_table()\n for key in ['brickname','objtype']: #,'nchunk']:\n\tmetacat.set(key, np.array( [kwargs[key]] ))\n\tmetacat.set('nobj', np.array( [kwargs['args'].nobj] ))\n\tmetacat.set('zoom', np.array( [kwargs['args'].zoom] ))\n\tmetacat.set('cutouts', np.array( [kwargs['args'].cutouts] ))\n\tmetacat.set('stamp_size', np.array( [kwargs['args'].stamp_size] ))\n\tmetacat.set('bright_galaxies', np.array( [kwargs['args'].bright_galaxies] ))\n\t#metacat['RMAG_RANGE'] = kwargs['args'].rmag_range\n\t#if not kwargs['args'].seed:\n\t# log.info('Random seed = {}'.format(kwargs['args'].seed))\n\t# metacat['SEED'] = kwargs['args'].seed\n #metacat_dir = os.path.join(kwargs['decals_sim_dir'], kwargs['objtype'],kwargs['brickname'][:3],kwargs['brickname']) \n metacat_dir = get_savedir(**kwargs)\n if not os.path.exists(metacat_dir): \n\tos.makedirs(metacat_dir)\n \n metafile = os.path.join(metacat_dir, 'metacat'+get_fnsuffix(**kwargs))\n log.info('Writing {}'.format(metafile))\n if os.path.isfile(metafile):\n\tos.remove(metafile)\n metacat.writeto(metafile)\n \n # Store new stuff\n kwargs['metacat']=metacat\n kwargs['metacat_dir']=metacat_dir", "title": "" }, { "docid": "373c4b2d079b77e2596b7836e86de311", "score": "0.5890948", "text": "def test_model_download():\n BartForConditionalGeneration.from_pretrained(MODEL_NAME)\n MarianMTModel.from_pretrained(MARIAN_MODEL)", "title": "" }, { "docid": "a820a4aacd25acd4124e204383f54039", "score": "0.5856737", "text": "def generate_metadata(year):\n \n point_1_path = nei_filepath + nei_file_path[0]\n NEI_meta = compile_metadata(point_1_path, _config, year)\n\n #Write metadata to json\n write_metadata('NEI', year, NEI_meta)", "title": "" }, { "docid": "5472ef79428551a729434d79aaaab854", "score": "0.5835976", "text": "def create_meta_config(exercise_name, prob_spec_exercise, author):\n config_data = None\n with open(f\"{prob_spec_exercise}/metadata.toml\") as file:\n config_data = toml.load(file) # Get the blurb, source, and source_url\n\n # Add the files, authors, and contributors to the config_data\n config_data[\"files\"] = {}\n config_data[\"files\"][\"test\"] = [f\"{exercise_name}-test.lisp\"]\n config_data[\"files\"][\"solution\"] = [f\"{exercise_name}.lisp\"]\n config_data[\"files\"][\"example\"] = [\".meta/example.lisp\"]\n config_data[\"authors\"] = [author]\n config_data[\"contributors\"] = []\n\n with open(f\"{TARGET}/{exercise_name}/.meta/config.json\", 'w') as file:\n # Encode into a string in json format and write to file\n file.write(json.dumps(config_data, cls = CustomJSONEncoder, indent = 3))\n file.write(\"\\n\")", "title": "" }, { "docid": "42e1647eb4ff3450ff72d373ed29233b", "score": "0.58160853", "text": "def load_meta(meta_filename: str):\n \n if os.path.exists(meta_filename):\n with open(meta_filename) as f:\n model_meta = json.load(f)\n else:\n print('model meta file doe not exist')\n model_meta = {}\n\n return model_meta", "title": "" }, { "docid": "ee10a176a0ad203c9138e306dc44b347", "score": "0.57996464", "text": "def main():\n # set up\n out_dir = \"models\"\n os.system(\"mkdir -p {}\".format(out_dir))\n \n # read in models\n models_file = \"models.tsv\"\n models = pd.read_csv(models_file, sep=\"\\t\")\n \n for model_idx in range(models.shape[0]):\n model_info = models.iloc[model_idx]\n\n # set up model dir\n model_dir = \"{}/{}\".format(out_dir, model_info[\"model\"])\n os.system(\"mkdir -p {}\".format(model_dir))\n os.system(\"cp model.yaml {}/model.TMP.yaml\".format(model_dir))\n\n # adjust model yaml\n old_yaml = \"{}/model.TMP.yaml\".format(model_dir)\n new_yaml = \"{}/model.yaml\".format(model_dir)\n with open(new_yaml, \"w\") as out:\n with open(old_yaml, \"r\") as fp:\n for line in fp:\n if \"{{ args_meta_url }}\" in line:\n line = \" url: {}\\n\".format(model_info[\"args_meta_url\"])\n if \"{{ args_meta_md5 }}\" in line:\n line = \" md5: {}\\n\".format(model_info[\"args_meta_md5\"]) \n if \"{{ args_index_url }}\" in line:\n line = \" url: {}\\n\".format(model_info[\"args_index_url\"])\n if \"{{ args_index_md5 }}\" in line:\n line = \" md5: {}\\n\".format(model_info[\"args_index_md5\"])\n if \"{{ args_data_url }}\" in line:\n line = \" url: {}\\n\".format(model_info[\"args_data_url\"])\n if \"{{ args_data_md5 }}\" in line:\n line = \" md5: {}\\n\".format(model_info[\"args_data_md5\"])\n\n out.write(line)\n\n # clean up\n os.system(\"rm {}\".format(old_yaml))\n\n return", "title": "" }, { "docid": "46fafcd703b29cb2e30869486c79ac33", "score": "0.57523125", "text": "def test_parse_model_metadata_exception():\n with pytest.raises(Exception, match=r\".*Model metadata does not exist:.*\"):\n sensor, network, simapp_version = ModelMetadata.parse_model_metadata(\"dummy_file.json\")", "title": "" }, { "docid": "ec2f7f100ef7df2cb96d52bcad2ccbc3", "score": "0.5735432", "text": "def deserialize_model_fixture():\n class Model:\n\n def predict(self, values):\n return [1]\n\n return Model()", "title": "" }, { "docid": "9a3c44febc6bcddf051d9eb04b6c52d5", "score": "0.5720061", "text": "def _create_model_config_data_file(self, class_num):\n if not os.path.exists(self.model_config_data_file):\n with open(self.model_config_data_file, 'w+') as f:\n f.write('classes = {}\\n'.format(class_num))\n f.write('train = cfg/train.txt\\n')\n f.write('valid = cfg/valid.txt\\n')\n f.write('names = cfg/voc.names\\n')\n f.write('backup = backup\\n')", "title": "" }, { "docid": "d76fd9d720bea54f411bd06f33c4d96c", "score": "0.57000697", "text": "def _init_model(self, model=None):\n if model is not None:\n if vega.is_torch_backend() and self.use_cuda:\n model = model.cuda()\n return model\n model_cfg = Config(ClassFactory.__configs__.get('model'))\n if \"model_desc_file\" in model_cfg and model_cfg.model_desc_file is not None:\n desc_file = model_cfg.model_desc_file\n desc_file = desc_file.replace(\"{local_base_path}\", self.local_base_path)\n if \":\" not in desc_file:\n desc_file = os.path.abspath(desc_file)\n if \":\" in desc_file:\n local_desc_file = FileOps.join_path(\n self.local_output_path, os.path.basename(desc_file))\n FileOps.copy_file(desc_file, local_desc_file)\n desc_file = local_desc_file\n model_desc = Config(desc_file)\n logging.info(\"net_desc:{}\".format(model_desc))\n elif \"model_desc\" in model_cfg and model_cfg.model_desc is not None:\n model_desc = model_cfg.model_desc\n elif \"models_folder\" in model_cfg and model_cfg.models_folder is not None:\n folder = model_cfg.models_folder.replace(\"{local_base_path}\", self.local_base_path)\n pattern = FileOps.join_path(folder, \"desc_*.json\")\n desc_file = glob.glob(pattern)[0]\n model_desc = Config(desc_file)\n else:\n return None\n if model_desc is not None:\n self.model_desc = model_desc\n net_desc = NetworkDesc(model_desc)\n model = net_desc.to_model()\n if vega.is_torch_backend() and self.use_cuda:\n model = model.cuda()\n return model\n else:\n return None", "title": "" }, { "docid": "f11906d1b121f507135c22cdd7522c10", "score": "0.56869036", "text": "def prepare_data(self):\n\n if self.do_train:\n self.train_examples = ExamplesBuilder(\n self.data_dir, Split.train, self.delimiter, self.is_bio\n ).examples\n self.val_examples = ExamplesBuilder(\n self.data_dir, Split.dev, self.delimiter, self.is_bio\n ).examples\n self.test_examples = ExamplesBuilder(\n self.data_dir, Split.test, self.delimiter, self.is_bio\n ).examples\n if self.num_samples > 0:\n self.train_examples = self.train_examples[: self.num_samples]\n self.val_examples = self.val_examples[: self.num_samples]\n self.test_examples = self.test_examples[: self.num_samples]\n # build vocab from examples\n self.word_alphabet = Alphabet(\"word\")\n self.char_alphabet = Alphabet(\"character\")\n self.label_alphabet = Alphabet(\"label\")\n all_examples = self.train_examples + self.val_examples + self.test_examples\n self.build_alphabet(all_examples)\n # save vocab\n os.makedirs(self.vocab_path, exist_ok=True)\n self.char_alphabet.save(self.vocab_path)\n self.word_alphabet.save(self.vocab_path)\n self.label_alphabet.save(self.vocab_path)\n\n self.train_dataset = self.create_dataset(self.train_examples)\n self.val_dataset = self.create_dataset(self.val_examples)\n self.test_dataset = self.create_dataset(self.test_examples)\n\n else:\n if self.vocab_path.endswith(\".pkl\"):\n with open(self.vocab_path, \"rb\") as fp:\n model_dict = pickle.load(fp)\n self.word_alphabet = model_dict[\"word_alphabet\"]\n self.char_alphabet = model_dict[\"char_alphabet\"]\n self.label_alphabet = model_dict[\"label_alphabet\"]\n else:\n word_alphabet = Alphabet(\"word\")\n word_alphabet.load(self.vocab_path)\n char_alphabet = Alphabet(\"character\")\n char_alphabet.load(self.vocab_path)\n label_alphabet = Alphabet(\"label\")\n label_alphabet.load(self.vocab_path)\n self.word_alphabet = word_alphabet\n self.char_alphabet = char_alphabet\n self.label_alphabet = label_alphabet\n\n self.tokenizer = Tokenizer()\n # constructed on demand\n self.examples = None\n self.dataset = None\n\n if any(\n label.startswith(\"L-\") or label.startswith(\"U-\")\n for label, _ in self.label_alphabet.items()\n ):\n self.tag_scheme = \"BILOU\"\n else:\n self.tag_scheme = \"BIO\"\n self.show_data_summary()", "title": "" }, { "docid": "4102d925597c77922085b46bed126480", "score": "0.56367385", "text": "def setUp(self):\n self.data_model = dict(\n metrics=dict(metric_type=dict(name=\"type\")),\n sources=dict(\n quality_time=dict(\n parameters=dict(\n status=dict(\n api_values={\n \"target met (green)\": \"target_met\",\n \"target not met (red)\": \"target_not_met\",\n }\n )\n )\n )\n ),\n )\n self.metric = dict(\n type=\"metric_type\",\n name=\"default metric 1\",\n unit=\"units\",\n scale=\"count\",\n recent_measurements=[\n dict(count=dict(value=10, status=\"target_met\")),\n dict(count=dict(value=20, status=\"target_not_met\")),\n ],\n )", "title": "" }, { "docid": "6f2e7de9679bc8e5a183e2e60b395697", "score": "0.56319577", "text": "def test_load_model_metadata(s3_bucket, aws_region, model_metadata_s3_key):\n model_metadata_local_path = 'test_model_metadata.json'\n model_metadata = ModelMetadata(bucket=s3_bucket,\n s3_key=model_metadata_s3_key,\n region_name=aws_region,\n local_path=model_metadata_local_path)\n model_metadata.get_model_metadata_info()\n assert os.path.isfile(model_metadata_local_path)\n # Remove file downloaded\n if os.path.isfile(model_metadata_local_path):\n os.remove(model_metadata_local_path)", "title": "" }, { "docid": "37acf6b3c2595ed6c6771abbf9215b0e", "score": "0.5619721", "text": "def create_dataset(args, device):\n metadata_path = join(\n args.data_dir, 'metadata.json')\n\n # data is only downloaded if it is not found in\n # `args.data_dir` directory\n source_file, target_file = download(args)\n\n source_tokenizer = create_tokenizer(\n args=args, prefix='source',\n data_path=source_file)\n\n target_tokenizer = create_tokenizer(\n args=args, prefix='target',\n data_path=target_file)\n\n tokenizers = source_tokenizer, target_tokenizer\n\n if not exists(metadata_path):\n # if dataset does not exist then create it\n # downloading and tokenizing the raw files\n data_files = source_file, target_file\n train, valid, test = transform_dataset(\n args=args, data_files=data_files)\n\n train_files, train_size = train\n valid_files, valid_size = valid\n test_files, test_size = test\n\n print('Saving metadata to {}'.format(metadata_path))\n # save the location of the files in a metadata\n # json object and delete the file in case of\n # interrupt so it wont be left in corrupted state\n with open(metadata_path, 'w') as fh:\n try:\n json.dump({\n 'train': [train_files, train_size],\n 'valid': [valid_files, valid_size],\n 'test': [test_files, test_size]\n }, fh)\n except KeyboardInterrupt:\n shutil.rmtree(metadata_path)\n\n else:\n print('Loading metadata from {}'.format(\n metadata_path))\n with open(metadata_path, 'r') as fh:\n filenames = json.load(fh)\n\n train_files, train_size = filenames['train']\n valid_files, valid_size = filenames['valid']\n test_files, test_size = filenames['test']\n\n # shuffle dataset and use subword sampling\n # reguralization only during training\n train_dataset = create_loader(\n args=args,\n filenames=train_files,\n tokenizers=tokenizers,\n sampled=True,\n shuffle=True)\n\n valid_dataset = create_loader(\n args=args,\n filenames=test_files,\n tokenizers=tokenizers)\n\n test_dataset = create_loader(\n args=args,\n filenames=test_files,\n tokenizers=tokenizers)\n\n train = train_dataset, train_size\n valid = valid_dataset, valid_size\n test = test_dataset, test_size\n\n return (train, valid, test), tokenizers", "title": "" }, { "docid": "894108dca42a50dde43bb55d030db98c", "score": "0.5617493", "text": "def create_model(dataset_dir, sample_rate, min_track_duration, min_array_length, from_json, json_save, output_json, json_path, model_name, test_size, epochs, activation, dropout, num_segments, num_mfcc=13, n_fft=2048, hop_length=512):\n\n if not from_json:\n mapping = [] \n labels = []\n mfcc_data = []\n\n samples_per_segment = int(sample_rate * min_track_duration / num_segments)\n num_mfcc_vectors_per_segment = math.ceil(samples_per_segment / hop_length)\n\n # Loop through all genre sub-folder\n for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_dir)):\n\n # Ensure we're processing a genre sub-folder level\n if dirpath is not dataset_dir:\n\n # Save genre label (i.e., sub-folder name) in the mapping\n semantic_label = dirpath.split(\"/\")[-1]\n mapping.append(semantic_label)\n print(\"\\nProcessing: {}\".format(semantic_label))\n\n # Process all audio files in genre sub-dir\n for f in filenames:\n\n # Load audio file\n file_path = os.path.join(dirpath, f)\n signal, sample_rate = librosa.load(file_path, sr=sample_rate)\n signal = signal[min_array_length:]\n\n # Process all segments of audio file\n for d in range(num_segments):\n\n # Calculate start and finish sample for current segment\n start = samples_per_segment * d\n finish = start + samples_per_segment\n\n # Extract mfcc\n mfcc = librosa.feature.mfcc(signal[start:finish], sample_rate, n_mfcc=num_mfcc, n_fft=n_fft, hop_length=hop_length)\n mfcc = mfcc.T\n\n # Store only mfcc feature with expected number of vectors\n if len(mfcc) == num_mfcc_vectors_per_segment:\n mfcc_data.append(mfcc.tolist())\n labels.append(i-1)\n print(\"{}, segment:{}\".format(file_path, d+1))\n if json_save:\n import json\n data = {\n \"mapping\": mapping,\n \"labels\": labels,\n \"mfcc\": mfcc_data\n }\n # Save MFCCs to json file\n with open(json_path, \"w\") as fp:\n json.dump(data, fp, indent=4)\n \n else:\n import json\n \"\"\"Loads training dataset from json file.\n :param json_path (str): Path to json file containing data\n :return X (ndarray): Inputs\n :return y (ndarray): Targets\n \"\"\"\n with open(json_path, \"r\") as fp:\n data = json.load(fp)\n mfcc_data = data[\"mfcc\"]\n labels = data[\"labels\"]\n \n # Make testing data\n mfcc_test = []\n labels_test = []\n for i in range(int(len(mfcc_data)/10)):\n mfcc_test.append(mfcc_data[i])\n mfcc_data.remove(mfcc_data[i])\n\n labels_test.append(labels[i])\n labels.remove(labels[i])\n\n # Convert lists to numpy arrays\n x = np.array(mfcc_data)\n y = np.array(labels)\n x_test = np.array(mfcc_test)\n y_test = np.array(labels_test)\n\n print(\"Data succesfully loaded!\")\n\n # Create train/val split\n x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=test_size)\n \n\n if not dropout:\n # Build network topology\n model = keras.Sequential([\n\n # Input layer\n keras.layers.Flatten(input_shape=(x.shape[1], x.shape[2])),\n\n # 1st dense layer\n keras.layers.Dense(512, activation=activation),\n\n # 2nd dense layer\n keras.layers.Dense(256, activation=activation),\n\n # 3rd dense layer\n keras.layers.Dense(64, activation=activation),\n\n # Output layer\n keras.layers.Dense(10, activation='softmax')\n ])\n else:\n\n model = keras.Sequential([\n\n # Input layer\n keras.layers.Flatten(input_shape=(x.shape[1], x.shape[2])),\n\n # 1st dense layer\n keras.layers.Dense(512, activation=activation),\n\n # 1st dropout layer \n keras.layers.Dropout(0.2),\n\n # 2nd dense layer\n keras.layers.Dense(128, activation=activation),\n \n # 2nd dropout layer\n keras.layers.Dropout(0.2),\n\n # 3rd dense layer\n keras.layers.Dense(64, activation=activation),\n \n # 3rd dropout layers\n keras.layers.Dropout(0.2),\n\n # Output layer\n keras.layers.Dense(10, activation='softmax')\n ])\n\n # Compile model\n optimiser = keras.optimizers.Adam(learning_rate=0.0001)\n model.compile(optimizer=optimiser,\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n model.summary()\n\n # Train model\n history = model.fit(x_train, y_train, validation_data=(x_val, y_val), batch_size=32, epochs=epochs)\n\n # Evaluate the model\n test_loss, test_acc = model.evaluate(x_test, y_test)\n print('\\nTest Loss: {}'.format(test_loss))\n print('\\nTest Accuracy: {}'.format(test_acc))\n\n # History object from .fit()\n history_dict = history.history\n history_dict.keys()\n\n # Save the model\n model.save(model_name)", "title": "" }, { "docid": "e4ad334fb68a1543d211056d63aa2038", "score": "0.5594866", "text": "def get_metadata(self):\n return {\n 'package_name': 'nrl',\n 'package_version': get_version(),\n # TODO get losses or training info from word2vec\n 'parameters': {\n 'random_walk': WalkerParameters.schema().dump(self.random_walk_parameters),\n 'word2vec': Word2VecParameters.schema().dump(self.word2vec_parameters),\n },\n }", "title": "" }, { "docid": "037a8d42038cdb63a5ceefca228ce1b2", "score": "0.55665475", "text": "def gen_metadatajson(template, ziproot, basename, year, resolution):\n md = json.load(open(template, 'r'))\n\n # Update the title, and year of temporal_coverage (20 years period)\n start_year = int(year) - 10\n md['title'] = md['title'].format(year=year, resolution=resolution)\n md['resolution'] = md['resolution'].format(resolution=resolution)\n md['temporal_coverage']['start'] = str(start_year)\n md['temporal_coverage']['end'] = str(start_year + 19)\n\n # Special case for Aus extent\n if basename.startswith('NaRCLIM_baseline_Aus_Extent'):\n md['bounding_box'] = {\n \"top\": \"-10.0000000\",\n \"right\": \"154.0000000\",\n \"bottom\": \"-43.7400000\",\n \"left\": \"112.9000000\"\n }\n\n # update layer info\n md['files'] = {}\n for filename in glob.glob(os.path.join(ziproot, basename, 'data', '*.tif')):\n # get zip root relative path\n zippath = os.path.relpath(filename, ziproot)\n layer_num = re.match(r'.*(\\d\\d).tif', os.path.basename(filename)).group(1)\n\tmd['files'][zippath] = {\n 'layer': 'B{0}'.format(layer_num)\n }\n mdfile = open(os.path.join(ziproot, basename, 'bccvl', 'metadata.json'), 'w')\n\n json.dump(md, mdfile, indent=4)\n mdfile.close()", "title": "" }, { "docid": "cafc3a58a0a233446e791a6d561f3f84", "score": "0.5557305", "text": "def __make_metadata( self ):\r\n\r\n xmlFilePath = os.path.join( self.outFolder, self.outFile + '.xml')\r\n\r\n make_shapefile_metadata( xmlFilePath, self.metadataDict )", "title": "" }, { "docid": "9456aab8d0b8ec8d62514814724fee2b", "score": "0.5547646", "text": "def available_models() -> Dict:\n\n data_file = pathlib.Path(__file__).parent.resolve() / \"model_data.json\"\n\n with open(data_file, \"r\", encoding=\"utf-8\") as json_file:\n model_data = json.load(json_file)\n\n print(\"Available model grids:\", end=\"\")\n\n for model_name, model_dict in model_data.items():\n print(f\"\\n - {model_dict['name']}:\")\n print(f\" - Label = {model_name}\")\n\n if \"parameters\" in model_dict:\n print(f\" - Model parameters: {model_dict['parameters']}\")\n\n if \"teff range\" in model_dict:\n print(f\" - Teff range (K): {model_dict['teff range']}\")\n\n if \"wavelength range\" in model_dict:\n print(\n f\" - Wavelength range (um): {model_dict['wavelength range']}\"\n )\n\n if \"resolution\" in model_dict:\n print(f\" - Resolution lambda/Dlambda: {model_dict['resolution']}\")\n\n if \"information\" in model_dict:\n print(f\" - Extra details: {model_dict['information']}\")\n\n if \"file size\" in model_dict:\n print(f\" - File size: {model_dict['file size']}\")\n\n if \"reference\" in model_dict:\n print(f\" - Reference: {model_dict['reference']}\")\n\n if \"url\" in model_dict:\n print(f\" - URL: {model_dict['url']}\")\n\n return model_data", "title": "" }, { "docid": "3d0502558c39e33c099e4e9edc0a95ad", "score": "0.5519478", "text": "def create_metadata(self) -> _metadata_fb.ModelMetadataT:\n model_metadata = _metadata_fb.ModelMetadataT()\n model_metadata.name = self.name\n model_metadata.version = self.version\n model_metadata.description = self.description\n model_metadata.author = self.author\n model_metadata.license = self.licenses\n return model_metadata", "title": "" }, { "docid": "be6250472d3735f0d699ee7b913ec76a", "score": "0.5499235", "text": "def create_data():\n transforms = torchvision.transforms.ToTensor()\n batch_size = 1000\n num_workers = 4\n \n # first, check if we have some environment variables configured\n root = os.environ.get('TRW_DATA_ROOT')\n if root is None:\n root = './data'\n \n # standard pytorch pipeline using a data loader\n mnist_train = torchvision.datasets.MNIST(root=root, train=True, transform=transforms, download=True)\n train_loader = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, num_workers=num_workers, pin_memory=True)\n\n mnist_test = torchvision.datasets.MNIST(root=root, train=False, transform=transforms, download=True)\n test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, num_workers=num_workers, pin_memory=True)\n \n # adapt to trw interface\n mnist = collections.OrderedDict()\n mnist['train'] = trw.train.SequenceAdaptorTorch(train_loader, features=['images', 'targets'])\n mnist['test'] = trw.train.SequenceAdaptorTorch(test_loader, features=['images', 'targets'])\n return {'mnist': mnist}", "title": "" }, { "docid": "c5607a5fe90dae6c75b6c18dde03c1b8", "score": "0.5496728", "text": "def fixture_ethereum_mock_data():\n return {}", "title": "" }, { "docid": "2fc95f9ea626554bea7a3b46a2f35a07", "score": "0.54963446", "text": "def fixture_ship_data():\n return load_sample_data(name=\"bathymetry\")", "title": "" }, { "docid": "774992b133c74011714a5299eb431e04", "score": "0.5492297", "text": "def test_register_metadata(self):\n register_metadata(\"foo.csv\", config = {})", "title": "" }, { "docid": "1ceef0be981d3ad95b94565b1ab391a6", "score": "0.5480682", "text": "def fixture_optimism_mock_data():\n return {}", "title": "" }, { "docid": "7e916a5771b89304b5b842d766e8ea2b", "score": "0.54790634", "text": "def generate_output_filename(\n root,\n data,\n suffix=None,\n extension='.json',\n create_model_subdirectory=True\n):\n # Default to using *all* species if the key is not present in the\n # data dictionary.\n species = _encode(data.get('species', 'all'))\n\n antibiotic = _encode(data['antibiotic'])\n seed = data['seed']\n model = data['model']\n\n # Single site exists in the input dictionary. Create the path\n # accordingly.\n if 'site' in data.keys():\n site = data['site']\n\n # We are training for different time periods. Adjust everything\n # accordingly.\n if 'train_years' in data.keys():\n train = '_'.join(data['train_years'])\n test = '_'.join(data['test_years'])\n\n filename = f'Site_{site}_' \\\n f'Train_years_{train}_' \\\n f'Test_years_{test}_' \\\n f'Model_{model}_' \\\n f'Species_{species}_Antibiotic_{antibiotic}_Seed_{seed}'\n\n # Regular training\n else:\n filename = f'Site_{site}_' \\\n f'Model_{model}_' \\\n f'Species_{species}_Antibiotic_{antibiotic}_Seed_{seed}'\n\n # Except `train_site` and `test_site` to highlight different\n # scenarios.\n else:\n train_site = data['train_site']\n test_site = data['test_site']\n\n if type(train_site) != str:\n train_site = '_'.join(train_site)\n\n if type(test_site) != str:\n test_site = '_'.join(test_site)\n\n filename = f'Train_site_{train_site}_' \\\n f'Test_site_{test_site}_'\n\n # We are training for different time periods. Adjust everything\n # accordingly.\n if 'train_years' in data.keys():\n train = '_'.join(data['train_years'])\n test = '_'.join(data['test_years'])\n\n filename += f'Train_years_{train}_' \\\n f'Test_years_{test}_' \\\n\n filename += f'Model_{model}_' \\\n f'Species_{species}_Antibiotic_{antibiotic}_Seed_{seed}'\n\n # Ensures that the suffix is only added if it exists. Else, we will\n # add spurious underscores.\n if suffix:\n filename += f'_{suffix}'\n\n filename += extension\n\n if create_model_subdirectory:\n os.makedirs(os.path.join(root, model), exist_ok=True)\n filename = os.path.join(root, model, filename)\n else:\n filename = os.path.join(root, filename)\n\n return filename", "title": "" }, { "docid": "a75c407593c9c81c150c2aba9548b7c4", "score": "0.5468378", "text": "def setup():\n path_dset = CONFIG['path_src_ds']\n fpath_lbl = path_dset + 'trainLabels.csv'\n labels_df = read_csv(fpath_lbl)\n labels_df['ext'] = 'jpeg'\n labels_df.columns = ['fname', 'label', 'ext']\n labels_df = labels_df.reindex(columns=['fname', 'ext', 'label']) # swap column 'label' with 'ext'\n # Add features Patientnr. and Left/Right Eyr\n feat_ds = labels_df.loc[:, 'fname'].str.split('_', expand=True)\n labels_df['f_patient'] = feat_ds.loc[:, 0]\n labels_df['f_eye'] = feat_ds.loc[:, 1]\n # Save labels.csv in train directory\n labels_df.to_csv(path_dset + 'labels.csv')\n\n # Reorder training dataset from /train/... to /train/label1/...\n # Create /train/label directories\n my_ot.create_directory(path_dset + 'train') # parent\n labels_lst = labels_df['label'].unique()\n labels_lst.sort()\n\n for l in labels_lst:\n my_ot.create_directory(path_dset + 'train/' + str(l))\n\n # Move the files to the correct new label-directories\n for l in labels_lst:\n path_src = path_dset + 'train/'\n path_dst = path_dset + 'train/' + str(l)\n fnames = ['{}.{}'.format(l[0], l[1]) for l in labels_df.loc[labels_df['label'] == l, ['fname', 'ext']].values]\n my_ot.move_files(fnames=fnames, path_src=path_src, path_dst=path_dst)", "title": "" }, { "docid": "8f3cd3e520baf9bb935005febb728e19", "score": "0.5457985", "text": "def test_fc(self):\n # These entries exist for both Nodal and VARIANT, but have different values\n # for the same model\n print(self.nhf.metadata.items())\n self.assertEqual(self.nhf.metadata[\"nMom\"], 35)\n self.assertEqual(self.nhf.metadata[\"nscoef\"], 3)\n\n # These entries are only for VARIANT\n self.assertEqual(self.nhf.metadata[\"npcbdy\"], 30)\n self.assertEqual(self.nhf.metadata[\"npcsym\"], 0)\n self.assertEqual(self.nhf.metadata[\"npcsec\"], 0)\n self.assertEqual(self.nhf.metadata[\"iwnhfl\"], 0)\n self.assertEqual(self.nhf.metadata[\"nMoms\"], 0)", "title": "" }, { "docid": "612ad2ec8c8d40a372eea2116d23097d", "score": "0.54553926", "text": "def get_metadata(metadata, model_params):\n if model_params[\"name\"] == \"HeMISUnet\":\n return np.array([m[0][\"missing_mod\"] for m in metadata])\n else:\n return [model_params[\"film_onehotencoder\"].transform([metadata[k][0]['film_input']]).tolist()[0]\n for k in range(len(metadata))]", "title": "" }, { "docid": "dc826d5c1d43648d22b9f30e96e7e133", "score": "0.5451584", "text": "def build_default_metadata():\n td = TestData()\n \n td.rtr_a, _ = Device.objects.get_or_create(\n name=\"rtr_a\",\n community=\"public\",\n begin_time=now())\n\n DeviceOIDSetMap(device=td.rtr_a,\n oid_set=OIDSet.objects.get(name=\"FastPollHC\")).save()\n DeviceOIDSetMap(device=td.rtr_a,\n oid_set=OIDSet.objects.get(name=\"Errors\")).save()\n DeviceOIDSetMap(device=td.rtr_a,\n oid_set=OIDSet.objects.get(name=\"IfRefPoll\")).save()\n\n rtr_b_begin = make_aware(datetime.datetime(2013,6,1), utc)\n rtr_b_end = make_aware(datetime.datetime(2013,6,15), utc)\n td.rtr_b, _ = Device.objects.get_or_create(\n name=\"rtr_b\",\n community=\"public\",\n begin_time = rtr_b_begin,\n end_time = rtr_b_end)\n\n DeviceOIDSetMap(device=td.rtr_b,\n oid_set=OIDSet.objects.get(name=\"FastPollHC\")).save()\n\n td.rtr_inf, _ = Device.objects.get_or_create(\n name=\"rtr_inf\",\n community=\"public\",\n begin_time=now())\n\n DeviceOIDSetMap(device=td.rtr_inf,\n oid_set=OIDSet.objects.get(name=\"InfFastPollHC\")).save()\n\n td.rtr_alu, _ = Device.objects.get_or_create(\n name=\"rtr_alu\",\n community=\"public\",\n begin_time=now())\n\n DeviceOIDSetMap(device=td.rtr_alu,\n oid_set=OIDSet.objects.get(name=\"ALUFastPollHC\")).save()\n DeviceOIDSetMap(device=td.rtr_alu,\n oid_set=OIDSet.objects.get(name=\"ALUErrors\")).save()\n\n td.rtr_z_post_data = {\n \"name\": \"rtr_z\",\n \"community\": \"private\",\n }\n\n IfRef.objects.get_or_create(\n device=td.rtr_a,\n begin_time=td.rtr_a.begin_time,\n ifIndex=1,\n ifName=\"xe-0/0/0\",\n ifAlias=\"test interface\",\n ipAddr=\"10.0.0.1\",\n ifSpeed=0,\n ifHighSpeed=10000,\n ifMtu=9000,\n ifOperStatus=1,\n ifAdminStatus=1,\n ifPhysAddress=\"00:00:00:00:00:00\")\n\n IfRef.objects.get_or_create(\n device=td.rtr_a,\n begin_time=td.rtr_a.begin_time,\n ifIndex=1,\n ifName=\"xe-1/0/0\",\n ifAlias=\"test interface:hide:\",\n ipAddr=\"10.0.0.1\",\n ifSpeed=0,\n ifHighSpeed=10000,\n ifMtu=9000,\n ifOperStatus=1,\n ifAdminStatus=1,\n ifPhysAddress=\"00:00:00:00:00:00\")\n\n\n IfRef.objects.get_or_create(\n device=td.rtr_b,\n ifIndex=1,\n ifName=\"xe-1/0/0\",\n ifAlias=\"test interface\",\n ipAddr=\"10.0.0.2\",\n ifSpeed=0,\n ifHighSpeed=10000,\n ifMtu=9000,\n ifOperStatus=1,\n ifAdminStatus=1,\n ifPhysAddress=\"00:00:00:00:00:00\",\n begin_time=rtr_b_begin,\n end_time=rtr_b_end)\n\n IfRef.objects.get_or_create(\n device=td.rtr_b,\n ifIndex=1,\n ifName=\"xe-2/0/0\",\n ifAlias=\"test interface\",\n ipAddr=\"10.0.0.2\",\n ifSpeed=0,\n ifHighSpeed=10000,\n ifMtu=9000,\n ifOperStatus=1,\n ifAdminStatus=1,\n ifPhysAddress=\"00:00:00:00:00:00\",\n begin_time=rtr_b_begin,\n end_time=rtr_b_begin + datetime.timedelta(days=4))\n\n IfRef.objects.get_or_create(\n device=td.rtr_b,\n ifIndex=1,\n ifName=\"xe-2/0/0\",\n ifAlias=\"test interface with new ifAlias\",\n ipAddr=\"10.0.1.2\",\n ifSpeed=0,\n ifHighSpeed=10000,\n ifMtu=9000,\n ifOperStatus=1,\n ifAdminStatus=1,\n ifPhysAddress=\"00:00:00:00:00:00\",\n begin_time=rtr_b_begin + datetime.timedelta(days=4),\n end_time=rtr_b_begin + datetime.timedelta(days=7))\n\n IfRef.objects.get_or_create(\n device=td.rtr_inf,\n begin_time=td.rtr_inf.begin_time,\n ifIndex=1,\n ifName=\"xe-3/0/0\",\n ifAlias=\"test interface\",\n ipAddr=\"10.0.0.3\",\n ifSpeed=0,\n ifHighSpeed=10000,\n ifMtu=9000,\n ifOperStatus=1,\n ifAdminStatus=1,\n ifPhysAddress=\"00:00:00:00:00:00\")\n\n IfRef.objects.get_or_create(\n device=td.rtr_alu,\n begin_time=td.rtr_inf.begin_time,\n ifIndex=1,\n ifName=\"3/1/1\",\n ifAlias=\"test interface\",\n ipAddr=\"10.0.0.4\",\n ifSpeed=0,\n ifHighSpeed=10000,\n ifMtu=9000,\n ifOperStatus=1,\n ifAdminStatus=1,\n ifPhysAddress=\"00:00:00:00:00:00\")\n\n users_testdata(td)\n\n return td", "title": "" }, { "docid": "75b26254854866d79cd3ea007b90fe10", "score": "0.5438519", "text": "def create_experiment_data(save_file_name, noise=0, kinetics=2, number_samples=1, noise_std=0.05):\n # generate no-noise experimental data for testing identifiability\n user_ode_opts = {'iter': 'Newton', 'discr': 'Adams', 'atol': 1e-10, 'rtol': 1e-10,\n 'time_points': 200, 'display_progress': True, 'verbosity': 30}\n # initial ss to begin all simulations from\n y0 = np.array([5, 1, 1])\n # get and set true parameter values, if available separately\n default_parameters = true_parameter_values()\n\n # create simulation object to simulate model with above parameters and initial conditions\n model_1 = ModelSim(kotte_model.kotte_ck_ode, kotte_model.kotte_ck_flux, noise=noise, **{'kinetics': kinetics,\n 'ode_opts': user_ode_opts,\n 't_final': 200,\n 'wt_y0': y0,\n 'i_parameter':\n default_parameters,\n 'sample_size':\n number_samples,\n 'noise_std': noise_std})\n # initial value determination for wt before perturbation\n wt_ss, wt_dynamic = model_1.run_initial_sim([default_parameters], ['default_parameters'])\n\n # all parameter perturbations\n parameter_perturbation = [{\"wt\": 0}, {\"ac\": 1}, {\"ac\": 4}, {\"ac\": 9}, {\"ac\": -.1}, {\"ac\": -.5},\n {\"k1cat\": .1}, {\"k1cat\": .5}, {\"k1cat\": 1}, {\"k1cat\": -.1}, {\"k1cat\": -.5},\n {\"V3max\": .1}, {\"V3max\": .5}, {\"V3max\": 1}, {\"V3max\": -.1}, {\"V3max\": -.5},\n {\"V2max\": .1}, {\"V2max\": .5}, {\"V2max\": 1}, {\"V2max\": -.1}, {\"V2max\": -.5}]\n\n experiment_id = ['experiment_{}'.format(parameter_id) for parameter_id, _ in enumerate(parameter_perturbation)]\n experiment_details = model_1.change_parameter_values(parameter_perturbation)\n\n # call model.simulate to get initial (WT) steady state for all parameter sets strating from same y0\n model_1.sim_model(parameter=experiment_details, experiment_ids=experiment_id, initial_value=[wt_ss[0]['y']])\n\n # create dictionary suitable for writing to df\n experiment_df, multi_index_labels = model_1.create_df(parameter_perturbation, experiment_details)\n\n # get experimental system steady state data without noise using Convenience Kinetics for v3 (kinetics = 2)\n # experiment_df, multi_index_labels, dyn_df, dyn_labels = generate_expdata(y0, cvode_options, ode_parameter_values,\n # noise=noise, kinetics=kinetics,\n # dynamic_plot=0, perturbation_plot=0,\n # number_of_samples=number_samples, noise_std=noise_std)\n\n # save data frame to csv file\n experiment_df.to_csv(save_file_name, index_label=multi_index_labels)\n # dyn_df.to_csv(save_dyn_file_name, index_label=dyn_labels)\n print(' Experiment Data written to file \\n')\n\n return experiment_df", "title": "" }, { "docid": "478763ce883bf600e9302e485503d614", "score": "0.54301673", "text": "def save_model(parameters,metainfo,name):\n\tif not(os.path.isdir(name)):\n\t\tos.mkdir(name)\n\n\tFILE = open(os.path.join(name,'metainfo.txt'),'w')\n\tmetainfo = yaml.dump(metainfo,FILE)\n\tFILE.close()\n\n\tfor dim_index in range(len(dimensions)):\n\t\ttorch.save(parameters[dim_index][0],os.path.join(name,'b'+str(dim_index))+'.pt')\n\t\ttorch.save(parameters[dim_index][1],os.path.join(name,'w'+str(dim_index))+'.pt')", "title": "" }, { "docid": "a47920331424b07508e5e3713a31e76f", "score": "0.5401527", "text": "def get_data_info(self):\n Data_info = {}\n data_er_samples = {}\n data_er_sites = {}\n data_er_locations = {}\n data_er_ages = {}\n\n if self.data_model == 3.0:\n print((\"data model: %1.1f\" % (self.data_model)))\n Data_info[\"er_samples\"] = []\n Data_info[\"er_sites\"] = []\n Data_info[\"er_locations\"] = []\n Data_info[\"er_ages\"] = []\n\n # self.magic_file may have a full path, but this breaks cb.Contribution\n # determine if magic_file exists in WD, and if it doesn't, copy it in\n\n magic_file_real = os.path.realpath(self.magic_file)\n magic_file_short = os.path.split(self.magic_file)[1]\n WD_file_real = os.path.realpath(\n os.path.join(self.WD, magic_file_short))\n if magic_file_real == WD_file_real:\n fnames = {'measurements': magic_file_short}\n else:\n # copy measurements file to WD, keeping original name\n shutil.copy(magic_file_real, WD_file_real)\n fnames = {'measurements': magic_file_short}\n self.con = cb.Contribution(self.WD, custom_filenames=fnames, read_tables=[\n 'measurements', 'specimens', 'samples', 'sites', 'locations', 'criteria', 'ages'])\n if 'specimens' in self.con.tables:\n spec_container = self.con.tables['specimens']\n self.spec_data = spec_container.df\n else:\n self.con.add_empty_magic_table('specimens')\n self.spec_data = self.con.tables['specimens'].df\n if 'samples' in self.con.tables:\n samp_container = self.con.tables['samples']\n samp_container.front_and_backfill(['azimuth', 'dip'])\n self.samp_data = samp_container.df\n samp_data2 = self.samp_data.rename(\n columns=map_magic.samp_magic3_2_magic2_map)\n data_er_samples = samp_data2.T.to_dict()\n else:\n self.con.add_empty_magic_table('samples')\n self.samp_data = self.con.tables['samples'].df\n if 'sites' in self.con.tables:\n site_container = self.con.tables['sites']\n self.site_data = site_container.df\n if 'age' in self.site_data.columns:\n self.site_data = self.site_data[self.site_data['age'].notnull(\n )]\n age_ids = [col for col in self.site_data.columns if col.startswith(\n \"age\") or col == \"site\"]\n age_data = self.site_data[age_ids].rename(\n columns=map_magic.site_magic3_2_magic2_map)\n # save this in 2.5 format\n er_ages = age_data.to_dict('records')\n data_er_ages = {}\n for s in er_ages:\n s = self.convert_ages_to_calendar_year(s)\n data_er_ages[s['er_site_name']] = s\n sites = self.site_data.rename(\n columns=map_magic.site_magic3_2_magic2_map)\n # pick out what is needed by thellier_gui and put in 2.5 format\n er_sites = sites.to_dict('records')\n data_er_sites = {}\n for s in er_sites:\n data_er_sites[s['er_site_name']] = s\n else:\n self.con.add_empty_magic_table('sites')\n self.site_data = self.con.tables['sites'].df\n if 'locations' in self.con.tables:\n location_container = self.con.tables[\"locations\"]\n self.loc_data = location_container.df # only need this for saving tables\n if self.loc_data['location'].isnull().any():\n self.loc_data.replace(\n {'location': {None: 'unknown'}}, inplace=True)\n self.loc_data.set_index('location', inplace=True)\n self.loc_data['location'] = self.loc_data.index\n loc2_data = self.loc_data.rename(\n columns=map_magic.loc_magic3_2_magic2_map)\n # there were problems with this command in some contributions:\n #data_er_locations = loc2_data.to_dict('index') #TRY\n else:\n self.con.add_empty_magic_table('locations')\n self.loc_data = self.con.tables['locations'].df\n\n else: # try 2.5 data model\n\n print((\"data model: %1.1f\" % (self.data_model)))\n self.read_magic_file(os.path.join(\n self.WD, \"er_samples.txt\"), 'er_sample_name')\n\n try:\n data_er_samples = self.read_magic_file(\n os.path.join(self.WD, \"er_samples.txt\"), 'er_sample_name')\n except:\n print(\"-W- Can't find er_sample.txt in project directory\")\n\n try:\n data_er_sites = self.read_magic_file(\n os.path.join(self.WD, \"er_sites.txt\"), 'er_site_name')\n except:\n print(\"-W- Can't find er_sites.txt in project directory\")\n\n try:\n data_er_locations = self.read_magic_file(os.path.join(\n self.WD, \"er_locations.txt\"), 'er_location_name')\n except:\n print(\"-W- Can't find er_locations.txt in project directory\")\n\n try:\n data_er_ages = self.read_magic_file(\n os.path.join(self.WD, \"er_ages.txt\"), 'er_sample_name')\n except:\n try:\n data_er_ages = self.read_magic_file(\n os.path.join(self.WD, \"er_ages.txt\"), 'er_site_name')\n except:\n print(\"-W- Can't find er_ages in project directory\")\n\n Data_info[\"er_samples\"] = data_er_samples\n Data_info[\"er_sites\"] = data_er_sites\n Data_info[\"er_locations\"] = data_er_locations\n Data_info[\"er_ages\"] = data_er_ages\n\n return(Data_info)", "title": "" }, { "docid": "6bd838d35063434079fb4006cf037db6", "score": "0.54003215", "text": "def savemodel(self):\n\n epochs = 2\n\n # Only way to test data generator is to flow_from_directory and train\n # simplenet.\n\n X, y = self.load_data()\n\n model = self.SimpleNet()\n\n fit_dict, compile_dict = self.model_dicts(epochs)\n\n model.compile(**compile_dict)\n\n model.fit(X, y, **fit_dict)\n\n filepath = str(os.path.join(self.data_dir, 'Whole_Model_test.h5'))\n\n model.save(filepath)\n os.remove(filepath)", "title": "" }, { "docid": "4aaba645fdb0cec6dae95b2e4a0f1fb2", "score": "0.53935707", "text": "def create_model(self):", "title": "" }, { "docid": "0addac3fd4f58a871a43e9db3903683f", "score": "0.53929067", "text": "def test_build_sample1(self):\n data = [\n {\n \"file_name\": \"JW_MEL_007_NORM_IGO_10075_D_2_3_S15_R1_001.fastq.gz\",\n \"id\": UUID(\"a46c5e6b-0793-4cd2-b5dd-92b3d71cf1ac\"),\n \"metadata\": {\n \"R\": \"R1\",\n \"baitSet\": \"IMPACT468_BAITS\",\n \"barcodeId\": None,\n \"barcodeIndex\": None,\n \"captureConcentrationNm\": \"10.377358490566039\",\n \"captureInputNg\": \"110.0\",\n \"captureName\": \"Pool-05257_CD-06287_AY-10075_D_2-Tube2_1\",\n settings.CMO_SAMPLE_CLASS_METADATA_KEY: \"Normal\",\n \"collectionYear\": \"\",\n \"dataAnalystEmail\": \"\",\n \"dataAnalystName\": \"\",\n \"externalSampleId\": \"JW_MEL_007_NORM\",\n \"flowCellId\": \"HCYYWBBXY\",\n \"flowCellLanes\": [1, 2, 3],\n settings.IGO_COMPLETE_METADATA_KEY: True,\n \"investigatorEmail\": \"email2@internet.com\",\n \"investigatorName\": \"Jane Doe\",\n \"investigatorSampleId\": \"JW_MEL_007_NORM\",\n \"labHeadEmail\": \"email@internet.com\",\n \"labHeadName\": \"John Smith\",\n \"libraryConcentrationNgul\": 10.6,\n settings.LIBRARY_ID_METADATA_KEY: \"10075_D_2_3\",\n \"libraryVolume\": None,\n settings.ONCOTREE_METADATA_KEY: None,\n settings.PATIENT_ID_METADATA_KEY: \"C-8VK0V7\",\n \"piEmail\": \"\",\n \"preservation\": \"EDTA-Streck\",\n \"projectManagerName\": \"\",\n \"readLength\": \"101/8/8/101\",\n settings.RECIPE_METADATA_KEY: \"IMPACT468\",\n settings.REQUEST_ID_METADATA_KEY: \"10075_D_2\",\n \"sequencingCenter\": \"MSKCC\",\n \"platform\": \"Illumina\",\n \"runDate\": \"2019-12-12\",\n \"runId\": \"JAX_0397\",\n \"runMode\": \"HiSeq High Output\",\n settings.SAMPLE_ID_METADATA_KEY: \"10075_D_2_3\",\n settings.CMO_SAMPLE_NAME_METADATA_KEY: \"C-8VK0V7-N001-d\",\n \"sampleOrigin\": \"Plasma\",\n \"sex\": \"F\",\n \"species\": \"Human\",\n settings.SAMPLE_CLASS_METADATA_KEY: \"Blood\",\n \"tissueLocation\": \"\",\n \"tumorOrNormal\": \"Normal\",\n },\n \"path\": \"/ifs/archive/GCL/hiseq/FASTQ/JAX_0397_BHCYYWBBXY/Project_10075_D_2/Sample_JW_MEL_007_NORM_IGO_10075_D_2_3/JW_MEL_007_NORM_IGO_10075_D_2_3_S15_R1_001.fastq.gz\",\n },\n {\n \"file_name\": \"JW_MEL_007_NORM_IGO_10075_D_2_3_S15_R2_001.fastq.gz\",\n \"id\": UUID(\"c71c259a-ebc0-4490-9af1-bc99387a70d7\"),\n \"metadata\": {\n \"R\": \"R2\",\n \"baitSet\": \"IMPACT468_BAITS\",\n \"barcodeId\": None,\n \"barcodeIndex\": None,\n \"captureConcentrationNm\": \"10.377358490566039\",\n \"captureInputNg\": \"110.0\",\n \"captureName\": \"Pool-05257_CD-06287_AY-10075_D_2-Tube2_1\",\n settings.CMO_SAMPLE_CLASS_METADATA_KEY: \"Normal\",\n \"collectionYear\": \"\",\n \"dataAnalystEmail\": \"\",\n \"dataAnalystName\": \"\",\n \"externalSampleId\": \"JW_MEL_007_NORM\",\n \"flowCellId\": \"HCYYWBBXY\",\n \"flowCellLanes\": [1, 2, 3],\n settings.IGO_COMPLETE_METADATA_KEY: True,\n \"investigatorEmail\": \"email2@internet.com\",\n \"investigatorName\": \"Jane Doe\",\n \"investigatorSampleId\": \"JW_MEL_007_NORM\",\n \"labHeadEmail\": \"email@internet.com\",\n \"labHeadName\": \"John Smith\",\n \"libraryConcentrationNgul\": 10.6,\n \"libraryIgoId\": None,\n \"libraryVolume\": None,\n settings.ONCOTREE_METADATA_KEY: None,\n settings.PATIENT_ID_METADATA_KEY: \"C-8VK0V7\",\n \"piEmail\": \"\",\n \"preservation\": \"EDTA-Streck\",\n \"projectManagerName\": \"\",\n \"readLength\": \"101/8/8/101\",\n settings.RECIPE_METADATA_KEY: \"IMPACT468\",\n settings.REQUEST_ID_METADATA_KEY: \"10075_D_2\",\n \"sequencingCenter\": \"MSKCC\",\n \"platform\": \"Illumina\",\n \"runDate\": \"2019-12-12\",\n \"runId\": \"JAX_0397\",\n \"runMode\": \"HiSeq High Output\",\n settings.SAMPLE_ID_METADATA_KEY: \"10075_D_2_3\",\n settings.CMO_SAMPLE_NAME_METADATA_KEY: \"C-8VK0V7-N001-d\",\n \"sampleOrigin\": \"Plasma\",\n \"sex\": \"F\",\n \"species\": \"Human\",\n settings.SAMPLE_CLASS_METADATA_KEY: \"Blood\",\n \"tissueLocation\": \"\",\n \"tumorOrNormal\": \"Normal\",\n },\n \"path\": \"/ifs/archive/GCL/hiseq/FASTQ/JAX_0397_BHCYYWBBXY/Project_10075_D_2/Sample_JW_MEL_007_NORM_IGO_10075_D_2_3/JW_MEL_007_NORM_IGO_10075_D_2_3_S15_R2_001.fastq.gz\",\n },\n ]\n\n sample = build_sample(data)\n\n expected_sample = {\n \"CN\": \"MSKCC\",\n \"ID\": [\"s_C_8VK0V7_N001_d_HCYYWBBXY\"],\n \"LB\": \"10075_D_2_3\",\n \"PL\": \"Illumina\",\n \"PU\": [\"HCYYWBBXY\"],\n \"R1\": [\n \"/ifs/archive/GCL/hiseq/FASTQ/JAX_0397_BHCYYWBBXY/Project_10075_D_2/Sample_JW_MEL_007_NORM_IGO_10075_D_2_3/JW_MEL_007_NORM_IGO_10075_D_2_3_S15_R1_001.fastq.gz\"\n ],\n \"R1_bid\": [UUID(\"a46c5e6b-0793-4cd2-b5dd-92b3d71cf1ac\")],\n \"R2\": [\n \"/ifs/archive/GCL/hiseq/FASTQ/JAX_0397_BHCYYWBBXY/Project_10075_D_2/Sample_JW_MEL_007_NORM_IGO_10075_D_2_3/JW_MEL_007_NORM_IGO_10075_D_2_3_S15_R2_001.fastq.gz\"\n ],\n \"R2_bid\": [UUID(\"c71c259a-ebc0-4490-9af1-bc99387a70d7\")],\n \"bam\": [],\n \"bam_bid\": [],\n \"SM\": \"s_C_8VK0V7_N001_d\",\n \"bait_set\": \"IMPACT468_BAITS\",\n \"sample_id\": \"10075_D_2_3\",\n \"patient_id\": \"C-8VK0V7\",\n \"request_id\": \"10075_D_2\",\n \"run_date\": [\"2019-12-12\"],\n \"run_id\": [\"JAX_0397\"],\n \"preservation_type\": [\"EDTA-Streck\"],\n \"species\": \"Human\",\n \"specimen_type\": \"Blood\",\n \"tumor_type\": \"Normal\",\n \"pi\": \"John Smith\",\n \"pi_email\": \"email@internet.com\",\n }\n\n print(\"Testing build_sample ---\")\n print(json.dumps(sample, cls=UUIDEncoder))\n print(json.dumps(expected_sample, cls=UUIDEncoder))\n\n self.assertTrue(sample == expected_sample)", "title": "" }, { "docid": "489508c1a6a05febe3830bdab73c7b8e", "score": "0.5379084", "text": "def _create_model(self):", "title": "" }, { "docid": "2e10893ebae0e8c9367772ca54c366e4", "score": "0.5377167", "text": "def create_model(train: NDict, paths: NDict) -> torch.nn.Module:\n if train[\"target\"] == \"classification\":\n num_classes = 2\n gt_label = \"data.gt.classification\"\n skip_keys = [\"data.gt.subtype\"]\n class_names = [\"Benign\", \"Malignant\"]\n elif train[\"target\"] == \"subtype\":\n num_classes = 4\n gt_label = \"data.gt.subtype\"\n skip_keys = [\"data.gt.classification\"]\n class_names = [\"Luminal A\", \"Luminal B\", \"HER2-enriched\", \"triple negative\"]\n else:\n raise (\"unsuported target!!\")\n model = ModelMultiHead(\n conv_inputs=((\"data.input.img\", 1),),\n backbone=BackboneInceptionResnetV2(input_channels_num=1),\n heads=[\n HeadGlobalPoolingClassifier(\n head_name=\"head_0\",\n dropout_rate=0.5,\n conv_inputs=[(\"model.backbone_features\", 384)],\n layers_description=(256,),\n num_classes=num_classes,\n pooling=\"avg\",\n ),\n ],\n )\n # create lightining trainer.\n pl_trainer = Trainer(\n default_root_dir=paths[\"model_dir\"],\n max_epochs=train[\"trainer\"][\"num_epochs\"],\n accelerator=train[\"trainer\"][\"accelerator\"],\n devices=train[\"trainer\"][\"devices\"],\n num_sanity_val_steps=-1,\n )\n return model, pl_trainer, num_classes, gt_label, skip_keys, class_names", "title": "" }, { "docid": "627618e839050d3a20b2ab194184f13f", "score": "0.53744453", "text": "def meta(self) -> MetaDict:\n return self.training_configs[self.final_identifier].meta", "title": "" }, { "docid": "19decea0225a76b3df78558838f76a9c", "score": "0.53706425", "text": "def create_model(self, compartmentId, projectId, dataassetId, displayName, description, fap=0.01, trainingFraction=0.7):\n # fap - Model parameter: False Acceptance Percentage, have to be in range [0.01, 0.05]\n # trainingFraction - Model parameter: Training Fraction, a percentage to split data into training and test, value range [0.7, 0.9]\n\n TRAIN_URL = f\"{CLOUD_ENV_URL}/20210101/models\"\n train_payload = {\n \"compartmentId\": compartmentId,\n \"displayName\": displayName,\n \"description\": description,\n \"projectId\": projectId,\n \"modelCreationDetails\": {\n \"modelType\": \"ANOMALY_MULTIVARIATE\",\n \"fap\": fap,\n \"trainingFraction\": trainingFraction,\n \"dataAssets\": [ dataassetId ]\n }\n }\n\n session = requests.Session()\n create_train_res = session.request(\"POST\", TRAIN_URL, headers=HEADER, data=json.dumps(train_payload),\n allow_redirects=True, auth=auth)\n create_train_json = json.loads(create_train_res.text)\n print(create_train_res.text)\n model_id = create_train_json[\"id\"]\n\n # This loop waits for train to be created (upto 15 mins) depending on your training data size u might want to wait longer\n timeout = time.time() + 60 * 15\n while True:\n view_train_res = session.request(\"GET\", f\"{TRAIN_URL}/{model_id}\", headers=HEADER,\n allow_redirects=True, auth=auth)\n view_train_json = json.loads(view_train_res.text)\n if time.time() > timeout:\n print(\"TIMEOUT\")\n break\n elif view_train_json[\"lifecycleState\"] == \"ACTIVE\":\n print(\"SUCCESS\")\n break\n elif view_train_json[\"lifecycleState\"] == \"CREATING\":\n print(\"Still creating...\")\n elif view_train_json[\"lifecycleState\"] == \"FAILED\":\n print(\"FAILED\")\n break\n time.sleep(10)\n\n return model_id", "title": "" }, { "docid": "9c36e383e7c9fefdab761a010711d9b1", "score": "0.53618383", "text": "def prepare_test_file(odir, trname):\n\n # read json file\n test = pd.read_json(trname)\n\n # clean the text\n test['feature'] = test.apply(feature_process_test, args=[nlp, spacy_stopwords], axis=1)\n\n # write to json into odir\n test['feature'].to_json(os.path.join(odir, 'actual_test_feature.json'), orient='records', lines=True)", "title": "" }, { "docid": "a86fb4bbcb6a51328644fe56b3434fa7", "score": "0.535647", "text": "def build_fake_data(num_examples=10):\n\n class Dummy(object):\n pass\n\n num_examples = 10\n mnist_data = Dummy()\n mnist_data.train = Dummy()\n mnist_data.train.images = np.float32(np.random.randn(\n num_examples, np.prod(IMAGE_SHAPE)))\n mnist_data.train.labels = np.int32(np.random.permutation(\n np.arange(num_examples)))\n mnist_data.train.num_examples = num_examples\n mnist_data.validation = Dummy()\n mnist_data.validation.images = np.float32(np.random.randn(\n num_examples, np.prod(IMAGE_SHAPE)))\n mnist_data.validation.labels = np.int32(np.random.permutation(\n np.arange(num_examples)))\n mnist_data.validation.num_examples = num_examples\n return mnist_data", "title": "" }, { "docid": "e21c8220ec8f0bca76ed9fbe816de735", "score": "0.5349457", "text": "def meta_data():\n meta_data = {\n \"data_id\": 123,\n \"microscope\": \"3iW1-0\",\n \"stage_location\": [100, 100, 100],\n \"xy_pixel_size\": 0.1,\n \"z_spacing\": 1.0,\n \"x_stage_direction\": 1,\n \"y_stage_direction\": 1,\n \"z_stage_direction\": 1,\n \"is_montage\": False,\n \"time_stamp\": \"2019-10-19:12:24;12\",\n }\n return meta_data", "title": "" }, { "docid": "a7982fb44859889a52cbe73120de64f8", "score": "0.53466135", "text": "def load_model_info(file):\n \n print ('Reading in: ' + file)\n model_file = open(file + '.json', 'r')\n loaded_model = model_file.read()\n model_file.close()\n model = model_from_json(loaded_model) \n model.load_weights(file + '.h5')\n return model", "title": "" }, { "docid": "50bb851aee132ec7aca54e58ba14a67e", "score": "0.5345808", "text": "def get_model_train_setup(model_name, args):\n if model_name == 'FastPitch':\n return dict()\n elif model_name == 'HiFi-GAN':\n return dict(\n # audio\n segment_size=args.segment_size,\n filter_length=args.filter_length,\n num_mels=args.num_mels,\n hop_length=args.hop_length,\n win_length=args.win_length,\n sampling_rate=args.sampling_rate,\n mel_fmin=args.mel_fmin,\n mel_fmax=args.mel_fmax,\n mel_fmax_loss=args.mel_fmax_loss,\n max_wav_value=args.max_wav_value,\n # other\n seed=args.seed,\n # optimization\n base_lr=args.learning_rate,\n lr_decay=args.lr_decay,\n epochs_all=args.epochs,\n )\n elif model_name == 'WaveGlow':\n return dict()\n else:\n raise NotImplementedError(model_name)", "title": "" }, { "docid": "7244c2f7e464edbf907c5463e84f77f5", "score": "0.5333062", "text": "def interact_model(\r\n #file1,file2,file3,\r\n model_name='1558M',\r\n seed=None,\r\n nsamples=1,\r\n batch_size=1,\r\n length=None,\r\n temperature=1,\r\n top_k=40,\r\n top_p=1,\r\n models_dir='models',\r\n):\r\n sameKeyword = True # True: for same keyword in all headings, False: for random keyword for each heading \r\n models_dir = os.path.expanduser(os.path.expandvars(models_dir))\r\n if batch_size is None:\r\n batch_size = 3\r\n assert nsamples % batch_size == 0\r\n\r\n enc = encoder.get_encoder(model_name, models_dir)\r\n hparams = model.default_hparams()\r\n with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:\r\n hparams.override_from_dict(json.load(f))\r\n\r\n st_head = [\"<h1>\", \"<h2>\", \"<h3>\"]\r\n en_head = [\"</h1>\", \"</h2>\", \"</h3>\"]\r\n if length is None:\r\n length = 300#hparams.n_ctx - 2\r\n elif length > hparams.n_ctx:\r\n raise ValueError(\"Can't get samples longer than window size: %s\" % hparams.n_ctx)\r\n\r\n with tf.Session(graph=tf.Graph()) as sess:\r\n context = tf.placeholder(tf.int32, [batch_size, None])\r\n np.random.seed(seed)\r\n tf.set_random_seed(seed)\r\n output = sample.sample_sequence(\r\n hparams=hparams, length=length,\r\n context=context,\r\n batch_size=batch_size,\r\n temperature=temperature, top_k=top_k, top_p=top_p\r\n )\r\n\r\n saver = tf.train.Saver()\r\n ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))\r\n saver.restore(sess, ckpt)\r\n try:\r\n os.remove(\"output.csv\")\r\n except:\r\n pass\r\n outpt = csv.writer(open('output.csv', 'w', encoding='utf-8'))\r\n outpt.writerow([\"keyword\", \"GUID\", \"Description\", \"Tags\", \"Article\",\"Article-english\", \"Category\"])\r\n \r\n # open text file\r\n with open('tx654.txt') as f0:#open('u\\\\text.txt') as f0:#open('tx654.txt') as f0:\r\n txt = f0.readlines()\r\n \r\n # open title file\r\n with open('ttt165.txt') as f1:#open('u\\\\titles.txt') as f1: #open('ttt165.txt') as f1:\r\n titles = f1.readlines()\r\n\r\n # open keywords file\r\n with open('kk654.txt') as f2:#open('u\\\\keywords.txt') as f2: #open('kk654.txt') as f2:\r\n keywords = f2.readlines()\r\n\r\n # open images file\r\n with open('im95.txt') as f3:#open('u\\\\images.txt') as f3: #open('im95.txt') as f3:\r\n images = f3.readlines()\r\n\r\n\r\n \r\n\r\n\r\n for xm, (title,tt) in enumerate (zip(titles,txt)): \r\n keyword = translate(keywords[xm % len(keywords)]) \r\n print(\"=\" * 20) \r\n tt = tt[0:tt.rindex(\".\")]\r\n usd_titles = []\r\n #tt= tt.replace(\"\\n\",\"\")\r\n title = title.replace(\"\\n\",\"\") \r\n usd_titles.append(title)\r\n title = translate(title)\r\n highlight = title.split(\" \")\r\n highlight.extend(keyword.split(\" \"))\r\n\r\n \r\n\r\n\r\n print(\"Generating text for: \", title)\r\n print(\"Input Sentence: \", tt) \r\n print(\"=\" * 20)\r\n inps = tt.split(\".\")\r\n \r\n imgs = random.sample(images, min(len(inps)-1,len(images)))\r\n tits = random.sample(titles, min(len(inps)-1,len(titles)))\r\n kkw = [translate(k) for k in random.sample(keyword, min(len(inps)-1,len(keywords)))]\r\n\r\n temp = [translate(t.replace(\"\\n\",\"\")).split(\" \") for t in tits]\r\n [highlight.extend(tt) for tt in temp]\r\n\r\n\r\n\r\n article = \"\"\r\n art_eng = \"\"\r\n for enm,inp in enumerate(inps):\r\n \r\n while True:\r\n context_tokens = enc.encode(inp) \r\n out = sess.run(output, feed_dict={context: [context_tokens for _ in range(batch_size)]})[:, len(context_tokens):]\r\n if not \"<|endoftext|>\" in enc.decode(out[0]):\r\n break\r\n \r\n\r\n amb = inp + enc.decode(out[0])\r\n amb = amb[0:amb.rindex(\".\")] + \".\"\r\n \r\n art_eng += inp + amb\r\n article += highlight_Article(translate(inp + amb),highlight)\r\n if enm < len(inps)-1: \r\n img = imgs[enm].replace(\"\\n\",\"\")\r\n article += \"\\n <img src=\" + img + \" alt = \" + keyword + \"> \\n\"\r\n art_eng += \"\\n <img src=\" + img + \" alt = \" + keyword + \"> \\n\" \r\n \r\n t2 = tits[enm].replace(\"\\n\",\"\")\r\n \r\n hd = random.randint(0,2)\r\n if sameKeyword: \r\n kk = keyword\r\n else:\r\n kk = kkw[enm]\r\n article += st_head[hd] + kk + \" - \" + translate(t2) + en_head[hd] + \"\\n\" \r\n \r\n \r\n title = keyword +\" - \"+ title\r\n print(art_eng) \r\n #article = article.replace(\" <| Endoftext |>\", \"\") #\r\n #article = article.replace(\"<|endoftext|>\", \"\")\r\n #article = translate(article)\r\n #article = highlight_Article(article,highlight)\r\n tags = translate(\",\".join(selectRandom(keywords,3,4)))\r\n categories = translate(\",\".join(selectRandom(keywords,1,2)))\r\n #article = addImages(article,images)\r\n outpt.writerow([keyword, xm+1, title, tags, article,art_eng, categories])", "title": "" }, { "docid": "01208fbdcc8fde8d760fa147b5a189b6", "score": "0.532804", "text": "def setUp(self):\n # Assign an existing FMU to the model, depending on the platform identified\n dir_path = os.path.dirname(__file__)\n \n # Define the path of the FMU file\n if platform.architecture()[0]==\"32bit\":\n print \"32-bit architecture\"\n self.filePath = os.path.join(dir_path, \"..\", \"modelica\", \"FmuExamples\", \"Resources\", \"FMUs\", \"FirstOrder.fmu\")\n else:\n print \"64-bit architecture\"\n self.filePath = os.path.join(dir_path, \"..\", \"modelica\", \"FmuExamples\", \"Resources\", \"FMUs\", \"FirstOrder_64bit.fmu\")\n \n # Path of the CSV data\n self.csv_inputPath = os.path.join(dir_path, \"..\", \"modelica\", \"FmuExamples\", \"Resources\", \"data\", \"SimulationData_FirstOrder.csv\")", "title": "" }, { "docid": "d43099bda5b622012d866de6e76eeb9a", "score": "0.5327353", "text": "def __loadMeta(self):\n\n def load(meta_dir: str, nb_file: int = None):\n if meta_dir != \"\":\n with open(meta_dir) as f:\n data = f.readlines()\n\n if nb_file is None:\n return [d.split(\"\\t\") for d in data[1:]]\n else:\n return [d.split(\"\\t\") for d in data[1:]][:nb_file]\n\n # load meta data only on the first features (to keep the order)\n self.metadata[\"weak\"] = load(self.meta_train_weak)\n self.metadata[\"uid\"] = load(self.meta_train_uid)\n self.metadata[\"test\"] = load(self.meta_test)\n\n # Use to extend training dataset, gather only 0.2 * len(training_dataset) of the uod\n if self.expand_with_uod:\n nb_file_for_uod = len(self.metadata[\"weak\"]) * self.expand_percent\n self.metadata[\"uod\"] = load(self.meta_train_uod, int(nb_file_for_uod))", "title": "" }, { "docid": "d8b0a5600840c1547409b852532c8bd8", "score": "0.53234285", "text": "def make_model(args):\n if args.recipe in [\"train\", \"train_simclr\", \"infer\", \"build\"]:\n model = make_multimodal_multitask_model(**args.__dict__)\n if args.model_file is None:\n plot_condensed_architecture_diagram(\n activation_layer=args.activation_layer,\n bottleneck_type=args.bottleneck_type,\n conv_block_layer_order=args.conv_block_layer_order,\n conv_block_size=args.conv_block_size,\n conv_blocks=args.conv_blocks,\n conv_type=args.conv_type,\n dense_block_layer_order=args.dense_block_layer_order,\n dense_block_size=args.dense_block_size,\n dense_blocks=args.dense_blocks,\n dense_dropout=args.dense_dropout,\n dense_layer_order=args.dense_layer_order,\n dense_layers=args.dense_layers,\n image_ext=args.image_ext,\n normalization_layer=args.normalization_layer,\n output_folder=args.output_folder,\n pool_type=args.pool_type,\n residual_block_layer_order=args.residual_block_layer_order,\n residual_blocks=args.residual_blocks,\n spatial_dropout=args.spatial_dropout,\n tensor_maps_in=args.tensor_maps_in,\n tensor_maps_out=args.tensor_maps_out,\n )\n plot_architecture_diagram(\n model=model,\n output_folder=args.output_folder,\n image_ext=args.image_ext,\n )\n elif args.recipe == \"train_keras_logreg\":\n model = make_shallow_model(\n tensor_maps_in=args.tensor_maps_in,\n tensor_maps_out=args.tensor_maps_out,\n optimizer=args.optimizer,\n learning_rate=args.learning_rate,\n learning_rate_schedule=args.learning_rate_schedule,\n model_file=args.model_file,\n donor_layers=args.donor_layers,\n l1=args.l1,\n l2=args.l2,\n )\n image_path = os.path.join(args.output_folder, f\"architecture{args.image_ext}\")\n plot_architecture_diagram(\n model=model,\n output_folder=args.output_folder,\n image_ext=args.image_ext,\n )\n else:\n hyperparameters = {}\n if args.recipe == \"train_sklearn_logreg\":\n if args.l1 == 0 and args.l2 == 0:\n c = 1e7\n else:\n c = 1 / (args.l1 + args.l2)\n hyperparameters[\"c\"] = c\n hyperparameters[\"l1_ratio\"] = c * args.l1\n elif args.recipe == \"train_sklearn_svm\":\n hyperparameters[\"c\"] = args.c\n elif args.recipe == \"train_sklearn_randomforest\":\n hyperparameters[\"n_estimators\"] = args.n_estimators\n hyperparameters[\"max_depth\"] = args.max_depth\n hyperparameters[\"min_samples_split\"] = args.min_samples_split\n hyperparameters[\"min_samples_leaf\"] = args.min_samples_leaf\n elif args.recipe == \"train_sklearn_xgboost\":\n hyperparameters[\"n_estimators\"] = args.n_estimators\n hyperparameters[\"max_depth\"] = args.max_depth\n hyperparameters[\"gamma\"] = args.gamma\n hyperparameters[\"l1_ratio\"] = args.l1\n hyperparameters[\"l2_ratio\"] = args.l2\n else:\n raise ValueError(\"Unknown train mode: \", args.recipe)\n assert len(args.tensor_maps_out) == 1\n model_type = args.recipe.split(\"_\")[-1]\n model = make_sklearn_model(\n model_type=model_type,\n hyperparameters=hyperparameters,\n num_workers=args.num_workers,\n )\n return model", "title": "" }, { "docid": "79d2d99a37f2e6d9ba9cb336b7c44652", "score": "0.5323147", "text": "def get_model_spec():\n return {\"model_name\": model_params[\"model_name\"], \n \"pretrained\": model_params[\"pretrained\"], \n \"arg_scope_dict\": {\"weight_decay\": model_params[\"weight_decay\"]}}", "title": "" }, { "docid": "efc0ac80a1ea1630876fa58530ddc134", "score": "0.532015", "text": "def test_task(\n model_file: InputBinaryFile(str),\n examples_file: InputBinaryFile(str),\n confusion_matrix: OutputTextFile(str),\n results: OutputTextFile(str),\n):\n\n import time\n import json\n\n import numpy as np\n import requests\n from tensorflow.python.keras.backend import get_session\n from tensorflow.python.keras.saving import load_model\n from tensorflow.python.saved_model.simple_save import simple_save\n\n with get_session() as sess:\n model = load_model(model_file)\n simple_save(\n sess,\n '/output/mnist/1/',\n inputs={'input_image': model.input},\n outputs={t.name: t for t in model.outputs},\n )\n\n model_url = 'http://localhost:9001/v1/models/mnist'\n\n for _ in range(60):\n try:\n requests.get(f'{model_url}/versions/1').raise_for_status()\n break\n except requests.RequestException:\n time.sleep(5)\n else:\n raise Exception(\"Waited too long for sidecar to come up!\")\n\n response = requests.get(f'{model_url}/metadata')\n response.raise_for_status()\n assert response.json() == {\n 'model_spec': {'name': 'mnist', 'signature_name': '', 'version': '1'},\n 'metadata': {\n 'signature_def': {\n 'signature_def': {\n 'serving_default': {\n 'inputs': {\n 'input_image': {\n 'dtype': 'DT_FLOAT',\n 'tensor_shape': {\n 'dim': [\n {'size': '-1', 'name': ''},\n {'size': '28', 'name': ''},\n {'size': '28', 'name': ''},\n {'size': '1', 'name': ''},\n ],\n 'unknown_rank': False,\n },\n 'name': 'conv2d_input:0',\n }\n },\n 'outputs': {\n 'dense_1/Softmax:0': {\n 'dtype': 'DT_FLOAT',\n 'tensor_shape': {\n 'dim': [{'size': '-1', 'name': ''}, {'size': '10', 'name': ''}],\n 'unknown_rank': False,\n },\n 'name': 'dense_1/Softmax:0',\n }\n },\n 'method_name': 'tensorflow/serving/predict',\n }\n }\n }\n },\n }\n\n examples = np.load(examples_file)\n assert examples['val_x'].shape == (100, 28, 28, 1)\n assert examples['val_y'].shape == (100, 10)\n\n response = requests.post(f'{model_url}:predict', json={'instances': examples['val_x'].tolist()})\n response.raise_for_status()\n\n predicted = np.argmax(response.json()['predictions'], axis=1).tolist()\n actual = np.argmax(examples['val_y'], axis=1).tolist()\n zipped = list(zip(predicted, actual))\n accuracy = sum(1 for (p, a) in zipped if p == a) / len(predicted)\n\n print(f\"Accuracy: {accuracy:0.2f}\")\n # TODO: Figure out how to access artifacts via pipelines UI\n # print(\"Generating confusion matrix\")\n # labels = list(range(10))\n # cm = [[0] * 10 for _ in range(10)]\n # for pred, target in zipped:\n # cm[target][pred] += 1\n # for target in range(10):\n # for predicted in range(10):\n # count = cm[target][predicted]\n # confusion_matrix.write(f'{target},{predicted},{count}\\n')\n #\n # with open('/output/mlpipeline-ui-metadata.json', 'w') as f:\n # json.dump(\n # {\n # \"version\": 1,\n # \"outputs\": [\n # {\n # \"type\": \"confusion_matrix\",\n # \"format\": \"csv\",\n # \"source\": \"minio://mlpipeline/cm.tgz\",\n # \"schema\": [\n # {\"name\": \"target\", \"type\": \"CATEGORY\"},\n # {\"name\": \"predicted\", \"type\": \"CATEGORY\"},\n # {\"name\": \"count\", \"type\": \"NUMBER\"},\n # ],\n # \"labels\": list(map(str, labels)),\n # }\n # ],\n # },\n # f,\n # )", "title": "" }, { "docid": "bdefacd631de56b0d0d5dc4280d406af", "score": "0.5319854", "text": "def model_preprocess(model, config):\n model.compile(loss=keras.losses.categorical_crossentropy, \n optimizer=keras.optimizers.Adam(), metrics=[F1_Class])\n model.summary() # print to a file\n\n #if not os.path.isdir(config.TRAIN_RESULT_PATH):\n # os.makedirs(config.TRAIN_RESULT_PATH)\n\n # Check if the folder of traiining result exists\n if not os.path.exists(config.TRAIN_RESULT_PATH):\n os.makedirs(config.TRAIN_RESULT_PATH, exist_ok=True)\n else:\n print(\"\\nERROR: The folder to hold trained models is not empty. Please delete it.\\n\")\n sys.exit() \n \n with open(os.path.join(config.TRAIN_RESULT_PATH, 'net_architecture.txt'), 'w') as f:\n with redirect_stdout(f):\n model.summary()\n return model", "title": "" }, { "docid": "2b1518bc9c9d8684e492c55d34b4c930", "score": "0.5317915", "text": "def _create_metadata(self, metadata, description, identifier: str) -> ExternalData:\n # initialize the summary\n summary = {\"type\": \"rnaseq_counts\", \"id\": identifier, \"title\": \"Public data from GREIN\",\n \"description\": \"Public dataset from Grein\",\n \"sample_ids\": list()\n }\n \n # change to a nice title if available\n if \"Title\" in description and description[\"Title\"]:\n summary[\"title\"] = \"GREIN dataset \" + identifier + \": \" + description[\"Title\"]\n if \"Summary\" in description and description[\"Summary\"]:\n summary[\"description\"] = description[\"Summary\"]\n\n samples = self._get_sample_ids(metadata) # gets sample ids for dictionary\n summary['sample_ids'] = samples\n metadata_obj = ExternalData.from_dict(summary) # converts th\n list_metadata = self._get_metadata(metadata)\n metadata_obj.sample_metadata = list_metadata # adds metadata via setter in the object\n return metadata_obj", "title": "" }, { "docid": "109ed554bc960330338b9ffe85eb2af7", "score": "0.53132576", "text": "def load_model(\n city:str,\n pollutant: str = 'PM2.5',\n build=False,\n update=True, with_interact=False):\n\n dataset = Dataset(city)\n dataset.monitor = dataset.pollutant = pollutant\n # remove . from pollutant name for saving file\n poll_name = pollutant.replace('.', '')\n if with_interact:\n dataset.with_interact = True\n \n else:\n dataset.with_interact = False\n \n # load model_meta\n poll_meta = load_meta(dataset.model_folder + f'{poll_name}_model_meta.json')\n \n split_lists = poll_meta['split_lists']\n\n # load model\n model = pickle.load(\n open(\n dataset.model_folder +\n f'{poll_name}_rf_model.pkl',\n 'rb'))\n\n\n if 'use_impute' in poll_meta.keys(): \n dataset.use_impute = poll_meta['use_impute']\n else:\n dataset.use_impute = 0\n\n if build:\n # build data from scratch\n dataset.build_all_data(build_fire=True, build_holiday=False)\n # load raw data\n dataset.load_()\n\n if 'zone_list' in poll_meta.keys():\n dataset.zone_list = poll_meta['zone_list']\n\n if 'with_interact' in poll_meta.keys():\n dataset.with_interact = poll_meta['with_interact']\n\n if 'log_poll' in poll_meta.keys():\n dataset.log_poll = poll_meta['log_poll']\n\n \n # build the first dataset\n #print('rolling_win', poll_meta['rolling_win'])\n dataset.feature_no_fire(\n pollutant=pollutant,\n rolling_win=poll_meta['rolling_win'],\n fill_missing=poll_meta['fill_missing'],\n cat_hour=poll_meta['cat_hour'],\n group_hour=poll_meta['group_hour'], \n cat_month=poll_meta['cat_month'])\n dataset.fire_dict = poll_meta['fire_dict']\n if 'zone_list' in poll_meta.keys():\n dataset.zone_list = poll_meta['zone_list']\n fire_cols, zone_list = dataset.merge_fire(dataset.fire_dict, damp_surface=dataset.fire_dict['damp_surface'], wind_damp=dataset.fire_dict['wind_damp'], wind_lag=dataset.fire_dict['wind_lag'])\n\n #print('\\n fire_columns', fire_cols)\n # build lag_data\n dataset.lag_dict = poll_meta['lag_dict']\n dataset.x_cols_org = poll_meta['x_cols_org']\n #print('\\n x_cols_org', dataset.x_cols_org)\n dataset.data_org = dataset.data[[dataset.monitor] + dataset.x_cols_org]\n dataset.build_lag(\n lag_range=np.arange(\n 1,\n dataset.lag_dict['n_max'],\n dataset.lag_dict['step']),\n roll=dataset.lag_dict['roll'])\n dataset.x_cols = poll_meta['x_cols']\n #print('\\n x_cols', dataset.x_cols)\n\n # split data\n dataset.split_data(split_ratio=split_lists[2])\n trn_index = dataset.split_list[0]\n test_index = dataset.split_list[1]\n\n xtrn, ytrn, dataset.x_cols, weights = dataset.get_data_matrix(\n use_index=trn_index, x_cols=dataset.x_cols)\n xtest, ytest, _, sample_weight = dataset.get_data_matrix(\n use_index=test_index, x_cols=dataset.x_cols)\n sample_weight = np.ones(len(ytest))\n\n if update:\n model.fit(xtrn, ytrn, weights)\n\n print(\n 'raw model performance',\n cal_scores(\n ytest,\n model.predict(xtest),\n header_str='test_', sample_weight=sample_weight))\n\n # calculate the average error\n trn_error = cal_error(dataset, model, data_index=trn_index)\n # resample\n ytrn_pred_df_avg = trn_error.resample('d').mean().dropna()\n print(\n 'daily avg training error',\n cal_scores(\n ytrn_pred_df_avg['actual'].values,\n ytrn_pred_df_avg['pred'].values,\n header_str='avg_trn_'))\n\n # calculate the average error\n ytest_pred_df = cal_error(dataset, model, data_index=test_index)\n # resample\n ytest_pred_df_avg = ytest_pred_df.resample('d').mean().dropna()\n print(\n 'daily avg test error',\n cal_scores(\n ytest_pred_df_avg['actual'].values,\n ytest_pred_df_avg['pred'].values,\n header_str='avg_test_'))\n\n # obtain feature of importance without lag\n importances = model.feature_importances_\n feat_imp = pd.DataFrame(\n importances,\n index=dataset.x_cols,\n columns=['importance'])\n feat_imp['imp_std'] = np.std([tree.feature_importances_ for tree in model.estimators_], axis=0)\n feat_imp = feat_imp.sort_values(\n 'importance', ascending=False).reset_index()\n feat_imp['index'] = feat_imp['index'].str.split('_lag_', expand=True)[0]\n feat_imp = feat_imp.groupby('index').sum()\n feat_imp = feat_imp.sort_values(\n 'importance', ascending=False).reset_index()\n\n return dataset, model, fire_cols, zone_list, feat_imp, poll_meta", "title": "" }, { "docid": "d5f98acf8ac36721decc01e2a7345390", "score": "0.5312485", "text": "def _get_data(self):\r\n # train\r\n train_example_path = os.path.join(self.data_dir, 'train.txt')\r\n train_example_path_tfrecord = None if not self.save_tfrecord_dir \\\r\n else os.path.join(self.save_tfrecord_dir, 'train.tfrecord')\r\n self.train_examples = self._create_examples(\r\n self.read_file(train_example_path), 'train')\r\n self.num_train_examples = len(self.train_examples)\r\n if self.recreate_tfrecord and train_example_path_tfrecord and self.num_train_examples > 0:\r\n self._save_tfrecords(\r\n examples=self.train_examples, output_file=train_example_path_tfrecord)\r\n\r\n # dev\r\n dev_example_path = os.path.join(self.data_dir, 'dev.txt')\r\n dev_example_path_tfrecord = None if not self.save_tfrecord_dir \\\r\n else os.path.join(self.save_tfrecord_dir, 'dev.tfrecord')\r\n self.dev_examples = self._create_examples(\r\n self.read_file(dev_example_path), 'dev')\r\n self.num_dev_examples = len(self.dev_examples)\r\n if self.recreate_tfrecord and dev_example_path_tfrecord and self.num_dev_examples > 0:\r\n self._save_tfrecords(\r\n examples=self.dev_examples, output_file=dev_example_path_tfrecord)\r\n\r\n # test\r\n test_example_path = os.path.join(self.data_dir, 'test.txt')\r\n test_example_path_tfrecord = None if not self.save_tfrecord_dir \\\r\n else os.path.join(self.save_tfrecord_dir, 'test.tfrecord')\r\n if tf.gfile.Exists(test_example_path):\r\n self.test_examples = self._create_examples(\r\n self.read_file(test_example_path), 'test')\r\n self.num_test_examples = len(self.test_examples)\r\n if self.recreate_tfrecord and test_example_path_tfrecord and self.num_test_examples > 0:\r\n self._save_tfrecords(\r\n examples=self.test_examples, output_file=test_example_path_tfrecord)\r\n else:\r\n self.test_examples = None\r\n self.num_test_examples = 0", "title": "" }, { "docid": "c3e54bba36ee0a48f3ac3697991a594c", "score": "0.53102654", "text": "def makedtopo():\n from clawpack.geoclaw import okada\n dtopo_fname = 'usgs100227.tt1'\n dtopo_cfg = 'usgs100227.cfg'\n if os.path.exists(dtopo_fname):\n print \"*** Not regenerating dtopo file (already exists): %s\" % dtopo_fname\n else:\n print \"Using Okada model to create %s \" % dtopo_fname\n okada.builddynamicdeffile(dtopo_cfg, dtopo_cfg, dtopo_fname)", "title": "" }, { "docid": "19aca5c6c74704ef1f84b400c35107d6", "score": "0.53079414", "text": "def _gen_dataset_config(self) -> str:", "title": "" }, { "docid": "3f80d8bf0d13561785a58a1dda334206", "score": "0.5306163", "text": "def buildModel(self):\n\t\tself.verbose = self.config.getBooleanConfig(\"common.verbose\")[0]\n\t\tgrowth = self.config.getStringConfig(\"train.growth\")[0]\n\t\tchangepoints = self.changepoints\n\t\tnumChangepoints = self.config.getIntConfig(\"train.num.changepoints\")[0]\n\t\tchangepointRange = self.config.getFloatConfig(\"train.changepoint.range\")[0]\n\t\tyearlySeasonality = typedValue(self.config.getStringConfig(\"train.yearly.seasonality\")[0])\n\t\tweeklySeasonality = typedValue(self.config.getStringConfig(\"train.weekly.seasonality\")[0])\n\t\tdailySeasonality = typedValue(self.config.getStringConfig(\"train.daily.seasonality\")[0])\n\t\tholidays = self.holidays\n\t\tseasonalityMode = self.config.getStringConfig(\"train.seasonality.mode\")[0]\n\t\tseasonalityPriorScale = self.config.getFloatConfig(\"train.seasonality.prior.scale\")[0]\n\t\tholidaysPriorScale = self.config.getFloatConfig(\"train.holidays.prior.scale\")[0]\n\t\tchangepointPriorScale = self.config.getFloatConfig(\"train.changepoint.prior.scale\")[0]\n\t\tmcmcSamples = self.config.getIntConfig(\"train.mcmc.samples\")[0]\n\t\tintervalWidth = self.config.getFloatConfig(\"train.interval.width\")[0]\n\t\tuncertaintySamples = self.config.getIntConfig(\"train.uncertainty.samples\")[0]\n\n\t\t# parameter details at https://github.com/facebook/prophet/blob/main/python/prophet/forecaster.py\n\t\tself.model = Prophet(growth=growth, changepoints=changepoints, n_changepoints=numChangepoints,\\\n\t\t\tchangepoint_range=changepointRange, yearly_seasonality=yearlySeasonality, weekly_seasonality=weeklySeasonality,\\\n\t\t\tdaily_seasonality=dailySeasonality, holidays=holidays, seasonality_mode=seasonalityMode,\\\n \t\t\tseasonality_prior_scale=seasonalityPriorScale, holidays_prior_scale=holidaysPriorScale,\\\n\t\t\tchangepoint_prior_scale=changepointPriorScale,mcmc_samples=mcmcSamples,interval_width=intervalWidth,\\\n\t\t\tuncertainty_samples=uncertaintySamples)", "title": "" }, { "docid": "3d73507719ccddddb11ce50c5b3b64cc", "score": "0.5302936", "text": "def test_train_model_pred_custom():\n path_test_json = (\n pathlib.Path(__file__).parent / 'data' / 'train_options_custom.json'\n )\n subprocess.run(\n ['fnet', 'train', str(path_test_json), '--gpu_ids', '-1'],\n check=True,\n )\n assert os.path.exists('test_model_custom')\n subprocess.run(\n [\n 'fnet', 'predict', 'test_model_custom',\n '--dataset', 'dummymodule.dummy_custom_dataset',\n '--idx_sel', '2',\n '--gpu_ids', '-1',\n ],\n check=True,\n )\n for fname in ['tifs', 'predictions.csv', 'predict_options.json']:\n assert os.path.exists(os.path.join('predictions', fname))", "title": "" }, { "docid": "2075bd4ca73227a78bce91118f141d97", "score": "0.529975", "text": "def test_create_empty(self):\n config = export_unity_package.AssetConfiguration(self.package, {})\n self.assertEqual(self.default_metadata, config.importer_metadata)\n self.assertEqual(set(self.labels), config.labels)\n self.assertEqual(set(), config.paths)\n self.assertEqual({}, config.override_metadata)", "title": "" }, { "docid": "0aed4d8d9af29b9868adea9fee1b7cb1", "score": "0.5278864", "text": "def test_json(self, tmpdir):\n model = self.model\n assert model.json() == self.jsoncompact\n assert model.json(indent=4) == self.jsonindent\n\n model = DM(self.jsoncompact)\n assert model.json() == self.jsoncompact\n assert model.json(indent=4) == self.jsonindent\n\n model = DM(self.jsonindent)\n assert model.json() == self.jsoncompact\n assert model.json(indent=4) == self.jsonindent\n\n jsonfile = Path(tmpdir, 'model.json')\n \n with open(jsonfile, 'w') as f:\n model.json(fp=f)\n with open(jsonfile) as f:\n assert f.read() == self.jsoncompact\n\n with open(jsonfile, 'w') as f:\n model.json(fp=f, indent=4)\n with open(jsonfile) as f:\n assert f.read() == self.jsonindent", "title": "" }, { "docid": "8f17f07ed159d504a5376ae0cf1f7cf3", "score": "0.5276896", "text": "def create_files_for_hugobot(self):\n self.create_pre_processing_file()\n self.create_temporal_abstraction_file()", "title": "" }, { "docid": "fca74aedd0aab1fd332e0ce9019fd64f", "score": "0.527668", "text": "def __init__(self):\n self.name = 'contextual_model_multi_stimuli'\n self.figures = [\n 'f3a.npz',\n # 'f4.npz',\n 'f5.npz',\n # 'tbp.npz',\n # 'tbtcso.npz',\n # 'bw.npz'\n ]\n self.target_data = 'label_dict'\n self.config = Config()\n self.output_size = [1, 10]\n self.im_size = (10, 51, 51, 75)\n self.repeats = 20\n self.model_input_image_size = [10, 51, 51, 75]\n self.default_loss_function = 'pearson'\n self.score_metric = 'pearson'\n self.preprocess = [None]\n self.folds = {\n 'train': 'train',\n 'test': 'test'}\n self.targets = {\n 'image': tf_fun.bytes_feature,\n 'label': tf_fun.float_feature\n }\n self.tf_dict = {\n 'image': tf_fun.fixed_len_feature(dtype='string'),\n 'label': tf_fun.fixed_len_feature(\n dtype='float',\n length=self.output_size[0])\n }\n self.tf_reader = {\n 'image': {\n 'dtype': tf.float32,\n 'reshape': self.im_size\n },\n 'label': {\n 'dtype': tf.float32,\n 'reshape': self.output_size\n }\n }", "title": "" }, { "docid": "1e26e389786ef0975b5e21414626baa1", "score": "0.52724", "text": "def setUp(self):\n _, self.spec_file = tempfile.mkstemp()\n self.out_dir = tempfile.mkdtemp()\n self.out_file = os.path.join(self.out_dir, 'map.json')\n with open(self.spec_file, 'w') as f:\n json.dump(self.SPEC, f)", "title": "" }, { "docid": "3b2d254aee3198f15d5812c9bf4bd9d3", "score": "0.5272093", "text": "def save():\n print(\"Saving model\")\n model.save('training.h5')\n encoder_model.save('encoder.h5')\n decoder_model.save('decoder.h5')\n\n model_metadata = { 'input_token_index': input_token_index,\n 'target_token_index': target_token_index,\n 'max_encoder_seq_length': max_encoder_seq_length }\n\n with open('model_metadata.pickle', 'wb') as f:\n pickle.dump(model_metadata, f)", "title": "" }, { "docid": "08015497570878b9213f7a450157277e", "score": "0.5266621", "text": "def get_default_dataset_model(\n framework: str) -> Tuple[Type[Dataset], Type[ModelWrapper]]:\n if framework == 'keras':\n dataset = get_dataset_random_mock(PetDataset)\n model_path = copy_model_to_tmp(\n ResourceURI(TensorFlowPetDatasetMobileNetV2.pretrained_model_uri)\n )\n model = TensorFlowPetDatasetMobileNetV2(\n model_path,\n dataset,\n from_file=True\n )\n\n elif framework == 'tensorflow':\n dataset = get_dataset_random_mock(MagicWandDataset)\n model_path = get_tmp_path()\n keras_model = load_model(\n ResourceURI(MagicWandModelWrapper.pretrained_model_uri),\n compile=False\n )\n keras_model.save(model_path)\n model = MagicWandModelWrapper(model_path, dataset, from_file=True)\n\n elif framework == 'tflite':\n dataset = get_dataset_random_mock(MagicWandDataset)\n model_path = copy_model_to_tmp(\n ResourceURI(MagicWandModelWrapper.pretrained_model_uri)\n .with_suffix('.tflite')\n )\n model = MagicWandModelWrapper(model_path, dataset, from_file=True)\n\n elif framework == 'onnx':\n dataset = get_dataset_random_mock(COCODataset2017)\n model_path = copy_model_to_tmp(\n ResourceURI(ONNXYOLOV4.pretrained_model_uri)\n )\n shutil.copy(\n ResourceURI(ONNXYOLOV4.pretrained_model_uri).with_suffix('.cfg'),\n model_path.with_suffix('.cfg')\n )\n model = ONNXYOLOV4(model_path, dataset)\n\n elif framework == 'torch':\n dataset = get_dataset_random_mock(PetDataset)\n model_path = copy_model_to_tmp(\n ResourceURI(PyTorchPetDatasetMobileNetV2.pretrained_model_uri)\n )\n model = PyTorchPetDatasetMobileNetV2(\n model_path,\n dataset=dataset,\n from_file=True\n )\n # save whole model instead of state dict\n model.save_model(model_path, export_dict=False)\n\n elif framework == 'darknet':\n dataset = get_dataset_random_mock(COCODataset2017)\n model_path = copy_model_to_tmp(\n ResourceURI(TVMDarknetCOCOYOLOV3.pretrained_model_uri)\n )\n model = TVMDarknetCOCOYOLOV3(model_path, dataset)\n\n elif framework == 'iree':\n dataset = get_dataset_random_mock(MagicWandDataset)\n model_path = get_tmp_path(suffix='.vmfb')\n iree_compiler = IREECompiler(dataset, model_path)\n iree_compiler.compile(\n ResourceURI(MagicWandModelWrapper.pretrained_model_uri)\n )\n model = MagicWandModelWrapper(model_path, dataset, from_file=True)\n\n elif framework == 'tvm':\n dataset = get_dataset_random_mock(MagicWandDataset)\n model_path = get_tmp_path(suffix='.tar')\n tvm_compiler = TVMCompiler(dataset, model_path, modelframework='keras')\n tvm_compiler.compile(\n ResourceURI(MagicWandModelWrapper.pretrained_model_uri)\n )\n model = MagicWandModelWrapper(model_path, dataset, from_file=True)\n\n else:\n raise UnknownFramework(f'Unknown framework: {framework}')\n\n model.save_io_specification(model.model_path)\n return dataset, model", "title": "" }, { "docid": "992640259f4efbd2871b52674c1d8966", "score": "0.5266402", "text": "def test_base_model_instantiation(self):\n new = BaseModel()\n self.assertFalse(os.path.exists('file.json'))", "title": "" }, { "docid": "e5fe3979b76111ef57af45587bffb537", "score": "0.5264739", "text": "def test_create():\n\n with tempfile.TemporaryDirectory() as td:\n fp = os.path.join(td, 'outputs.h5')\n\n with Outputs(fp, 'w') as f:\n f.meta = meta\n f.time_index = time_index\n\n with h5py.File(fp, 'r') as f:\n test_meta = pd.DataFrame(f['meta'][...])\n test_ti = f['time_index'][...]\n assert test_meta.shape == (100, 2)\n assert len(test_ti) == 8760\n\n assert f.attrs['package'] == 'rex'\n assert f.attrs['version'] == __version__", "title": "" }, { "docid": "1be6706781b10bb2d0bb81302619195f", "score": "0.5263993", "text": "def _set_metadata(self, output_model: ModelEntity) -> None:\n metadata = {\"image_size\": int(self.config.dataset.image_size)}\n\n # Set the task type for inferencer\n metadata[\"task\"] = str(self.task_type).lower().split(\"_\")[-1] # type: ignore\n output_model.set_data(\"metadata\", json.dumps(metadata).encode())", "title": "" }, { "docid": "791d2463b7cca3ee16db1e9064a55b7f", "score": "0.525611", "text": "def main(args):\n train_data_path = os.path.join(args.train_data, \"train.txt\")\n with open(train_data_path) as f:\n train_data = f.read()\n preprocessed_train_data = get_preprocessed_data(train_data)\n\n # write preprocessed train txt file\n preprocessed_train_data_path = os.path.join(args.preprocessed_train_data, \"train.txt\")\n with open(preprocessed_train_data_path, \"w\") as f:\n f.write(preprocessed_train_data)\n\n validation_data_path = os.path.join(args.validation_data, \"valid.txt\")\n with open(validation_data_path) as f:\n validation_data = f.read()\n preprocessed_validation_data = get_preprocessed_data(validation_data)\n\n # write preprocessed validation txt file\n preprocessed_validation_data_path = os.path.join(args.preprocessed_validation_data, \"valid.txt\")\n with open(preprocessed_validation_data_path, \"w\") as f:\n f.write(preprocessed_validation_data)\n\n # Write MLTable yaml file as well in output folder\n # Since in this example we are not doing any preprocessing, we are just copying same yaml file from input,change it if needed\n\n # read and write MLModel yaml file for train data\n train_data_mltable_path = os.path.join(args.train_data, \"MLTable\")\n preprocessed_train_data_mltable_path = os.path.join(args.preprocessed_train_data, \"MLTable\")\n with open(train_data_mltable_path, \"r\") as file:\n yaml_file = yaml.safe_load(file)\n with open(preprocessed_train_data_mltable_path, \"w\") as file:\n yaml.dump(yaml_file, file)\n\n # read and write MLModel yaml file for validation data\n validation_data_mltable_path = os.path.join(args.validation_data, \"MLTable\")\n preprocessed_validation_data_mltable_path = os.path.join(args.preprocessed_validation_data, \"MLTable\")\n with open(validation_data_mltable_path, \"r\") as file:\n yaml_file = yaml.safe_load(file)\n with open(preprocessed_validation_data_mltable_path, \"w\") as file:\n yaml.dump(yaml_file, file)", "title": "" }, { "docid": "116df7d5971013028315e85ec69bc8ef", "score": "0.52551806", "text": "def build_model():", "title": "" }, { "docid": "51a8bb173b2d4fcc44d8cb05e09b101a", "score": "0.5252852", "text": "def create_data_model():\n data = {}\n # Special location don't consume token, while regular one consume one\n data[\"tokens\"] = [\n 0, # 0 depot\n 0, # 1 special node\n 0, # 2 special node\n 0, # 3 special node\n 0, # 4 special node\n 0, # 5 special node\n -1, # 6\n -1, # 7\n -1, # 8\n -1, # 9\n -1, # 10\n -1, # 11\n -1, # 12\n -1, # 13\n -1, # 14\n -1, # 15\n -1, # 16\n -1, # 17\n -1, # 18\n ]\n # just need to be big enough, not a limiting factor\n data[\"vehicle_tokens\"] = [20, 20, 20, 20]\n data[\"num_vehicles\"] = 4\n data[\"depot\"] = 0\n return data", "title": "" }, { "docid": "89539ba01a98acf69c2e7689820a381c", "score": "0.5251825", "text": "def setup_model(args: argparse.ArgumentParser):\n # set up model of detectron2\n cfg: CfgNode = get_cfg()\n model_file_name = args.model\n print(model_file_name)\n fi_cfg = model_zoo.get_config_file(model_file_name)\n cfg.merge_from_file(fi_cfg)\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.thresh\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file_name)\n predictor = DefaultPredictor(cfg)\n\n return predictor, cfg", "title": "" }, { "docid": "0a34b5660d8f80f26a4899b075d8922c", "score": "0.5249236", "text": "def setup_factory_data(jp_environ, jp_env_jupyter_path):\n source = os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\", \"etc/config/metadata\")\n destination = os.path.join(jp_env_jupyter_path, \"metadata\")\n shutil.copytree(source, destination)\n yield destination # this return value probably won't be used, but here nonetheless", "title": "" }, { "docid": "fb5c01ccf2919474075f64bb13d3bbb9", "score": "0.5248487", "text": "def supplementMetadataFromJSON(self):\n\n if len(sys.argv) > 4:\n #If there are multiple entries for each element, you can specify 'unique'\n # by typing \"False\" as the last entry.\n field_descriptions = sys.argv.pop()\n else:\n field_descriptions = None\n print \"guessing at field descriptions for the import\"\n \"\"\"\n The anchor should be intuited, not named.\n \"\"\"\n anchor = sys.argv.pop()\n\n if len(sys.argv)==3:\n filename = sys.argv.pop()\n else:\n print \"you must supply exactly one argument to 'addCategoricalFromFile'\"\n raise\n Bookworm=BookwormSQLDatabase()\n Bookworm.importNewFile(filename,anchorField=anchor,jsonDefinition=field_descriptions)", "title": "" }, { "docid": "e6e6e3fb8045ab451806f9ba6055b241", "score": "0.5247023", "text": "def dataset_description_file(outdir):\n # dictionary that will be written for the basic dataset description version\n data_dict = {\n \"Name\": os.path.splitext(os.path.basename(outdir))[0],\n \"BIDSVersion\": \"1.4.0\",\n \"DatasetType\": \"raw\",\n }\n file_path = os.path.join(outdir, \"dataset_description.json\")\n # check if dataset_description.json exists, if it doesn't create it\n if not os.path.exists(file_path):\n LGR.warning(\n \"phys2bids could not find dataset_description.json,\" \"generating it with provided info\"\n )\n utils.write_json(file_path, data_dict)", "title": "" }, { "docid": "1754bf3a77758ef7b655e44146930b97", "score": "0.52451664", "text": "def test__extract_metadata_json(metadata):\n protocol = JsonProtocol(\n text=\"abc\",\n api_level=APIVersion(major=2, minor=2),\n filename=\"\",\n contents={},\n schema_version=2,\n metadata=metadata\n )\n\n assert analyze._extract_metadata(protocol) == models.Meta(\n name=\"my protocol\",\n author=\"some author\",\n apiLevel=\"2.2\"\n )", "title": "" }, { "docid": "a42921cfb35c2c12e12aee838efd83ef", "score": "0.5236573", "text": "def setUp(self):\n number_of_model_data = 4000\n model_data = random.uniform(-1,1, size=(number_of_model_data,6))\n model_data[:,0:3] = model_data[:,0:3]*(1.0/(sqrt(sum(model_data[:,0:3]*model_data[:,0:3],1)))).reshape(number_of_model_data,1)\n model_data[:,3] = model_data[:,0]*2\n model_data[:,4] = model_data[:,1]*2\n model_data[:,5] = model_data[:,2]*2\n self.model_data = model_data\n\n number_of_validate_data = 100\n validate_data = random.uniform(-1,1, size=(number_of_validate_data,6))\n validate_data[:,0:3] = validate_data[:,0:3]*(1.0/(sqrt(sum(validate_data[:,0:3]*validate_data[:,0:3],1)))).reshape(number_of_validate_data,1)\n validate_data[:,3] = validate_data[:,0]*2\n validate_data[:,4] = validate_data[:,1]*2\n validate_data[:,5] = validate_data[:,2]*2\n self.validation_data = validate_data\n self.normal_validation_data = validate_data[:,3:6]", "title": "" }, { "docid": "ad0c1e70c9eeeed44feae0667583d97a", "score": "0.5235715", "text": "def bodyDataset(\n testSplit: float = 0.15,\n shuffle: bool = True,\n bodyModel: BodyModel = BODY25,\n local_import: bool = False,\n):\n\n x_train, x_test, y_train, y_test, labels = importBodyCSVDataset(\n testSplit, local_import\n )\n\n # Shuffle in unison\n if shuffle:\n shuffler_train = np.random.permutation(x_train.shape[0])\n shuffler_test = np.random.permutation(x_test.shape[0])\n x_train = x_train[shuffler_train]\n x_test = x_test[shuffler_test]\n y_train = y_train[shuffler_train]\n y_test = y_test[shuffler_test]\n\n # Format to requested body model\n assert bodyModel in [BODY18, BODY25]\n if bodyModel == BODY18:\n x_train = x_train[:, BODY25_to_BODY18_indices]\n x_test = x_test[:, BODY25_to_BODY18_indices]\n\n # One-hot encoding\n y_train_onehot = get_one_hot(\n np.array([labels.index(sample) for sample in y_train]), len(labels)\n )\n y_test_onehot = get_one_hot(\n np.array([labels.index(sample) for sample in y_test]), len(labels)\n )\n\n return {\n \"x_train\": x_train,\n \"y_train\": y_train,\n \"y_train_onehot\": y_train_onehot,\n \"x_test\": x_test,\n \"y_test\": y_test,\n \"y_test_onehot\": y_test_onehot,\n \"labels\": np.array(labels),\n }", "title": "" }, { "docid": "f9f3ec1d30971acc63ba412c80c846c0", "score": "0.5235714", "text": "def prepare_nmt_data(FLAGS):\n # setting relevant info:\n data_dir = FLAGS.data_dir\n train_data = data_dir + FLAGS.train_data\n valid_data = data_dir + FLAGS.valid_data\n test_data = data_dir + FLAGS.test_data\n\n source_lang = FLAGS.source_lang\n target_lang = FLAGS.target_lang\n\n src_vocabulary_size = FLAGS.src_vocab_size\n tgt_vocabulary_size = FLAGS.tgt_vocab_size\n\n # Create vocabularies of the appropriate sizes.\n src_vocab_path = (train_data % str(src_vocabulary_size)) + ('.vocab.%s' % source_lang)\n tgt_vocab_path = (train_data % str(tgt_vocabulary_size)) + ('.vocab.%s' % target_lang)\n\n create_vocabulary(src_vocab_path, train_data % source_lang, src_vocabulary_size)\n create_vocabulary(tgt_vocab_path, train_data % target_lang, tgt_vocabulary_size)\n\n # Create token ids for the training data.\n src_train_ids_path = (train_data % str(src_vocabulary_size)) + ('.ids.%s' % source_lang)\n tgt_train_ids_path = (train_data % str(tgt_vocabulary_size)) + ('.ids.%s' % target_lang)\n\n data_to_token_ids(train_data % source_lang, src_train_ids_path, src_vocab_path)\n data_to_token_ids(train_data % target_lang, tgt_train_ids_path, tgt_vocab_path)\n\n # Create token ids for the development data.\n src_dev_ids_path = (valid_data % str(src_vocabulary_size)) + ('.ids.%s' % source_lang)\n tgt_dev_ids_path = (valid_data % str(tgt_vocabulary_size)) + ('.ids.%s' % target_lang)\n\n data_to_token_ids(valid_data % source_lang, src_dev_ids_path, src_vocab_path)\n data_to_token_ids(valid_data % target_lang, tgt_dev_ids_path, tgt_vocab_path)\n\n # Create token ids for the test data.\n src_test_ids_path = (test_data % str(src_vocabulary_size)) + ('.ids.%s' % source_lang)\n tgt_test_ids_path = (test_data % str(tgt_vocabulary_size)) + ('.ids.%s' % target_lang)\n\n data_to_token_ids(test_data % source_lang, src_test_ids_path, src_vocab_path)\n data_to_token_ids(test_data % target_lang, tgt_test_ids_path, tgt_vocab_path)\n\n return (src_train_ids_path, tgt_train_ids_path,\n src_dev_ids_path, tgt_dev_ids_path,\n src_test_ids_path, tgt_test_ids_path)", "title": "" }, { "docid": "ca9a2d3b48f97f10398821ac975bb6bd", "score": "0.52346563", "text": "def prepare_data(dataset_cfg) -> dict:\n pass", "title": "" }, { "docid": "65d62d093210c68629adb9c6f0052d99", "score": "0.5228662", "text": "def valid_simple_ml_predictor_data():\n from citrine.informatics.data_sources import GemTableDataSource\n from citrine.informatics.descriptors import RealDescriptor\n x = RealDescriptor(\"x\", 0, 100, \"\")\n y = RealDescriptor(\"y\", 0, 100, \"\")\n z = RealDescriptor(\"z\", 0, 100, \"\")\n data_source = GemTableDataSource(\n table_id=uuid.UUID('e5c51369-8e71-4ec6-b027-1f92bdc14762'),\n table_version=2\n )\n return dict(\n module_type='PREDICTOR',\n status='VALID',\n status_info=[],\n archived=False,\n display_name='ML predictor',\n schema_id='08d20e5f-e329-4de0-a90a-4b5e36b91703',\n id=str(uuid.uuid4()),\n config=dict(\n type='Simple',\n name='ML predictor',\n description='Predicts z from input x and latent variable y',\n inputs=[x.dump()],\n outputs=[z.dump()],\n latent_variables=[y.dump()],\n training_data=[data_source.dump()]\n )\n )", "title": "" }, { "docid": "c6a6e071503e17a2794cad90caafaa14", "score": "0.5227246", "text": "def setUp(self):\n\n self.parameters = 'bcipy/parameters/parameters.json'\n self.temp_dir = tempfile.mkdtemp()", "title": "" }, { "docid": "6c66882d8c2baeec9df0840a77dbfcbf", "score": "0.522638", "text": "def read_data(params: dict) -> Tuple[dict, dict]:\n # reading CSV files to Pandas dataframes\n train_df = pd.read_csv(\n Path(params[\"data\"][\"path_to_data\"]) / params[\"data\"][\"train_filename\"]\n )\n valid_df = pd.read_csv(\n Path(params[\"data\"][\"path_to_data\"])\n / params[\"data\"][\"validation_filename\"]\n )\n test_df = pd.read_csv(\n Path(params[\"data\"][\"path_to_data\"]) / params[\"data\"][\"test_filename\"]\n )\n\n # creating PyTorch Datasets\n train_dataset = TextClassificationDataset(\n texts=train_df[params[\"data\"][\"text_field_name\"]].values.tolist(),\n labels=train_df[params[\"data\"][\"label_field_name\"]].values,\n max_seq_length=params[\"model\"][\"max_seq_length\"],\n model_name=params[\"model\"][\"model_name\"],\n )\n\n valid_dataset = TextClassificationDataset(\n texts=valid_df[params[\"data\"][\"text_field_name\"]].values.tolist(),\n labels=valid_df[params[\"data\"][\"label_field_name\"]].values,\n max_seq_length=params[\"model\"][\"max_seq_length\"],\n model_name=params[\"model\"][\"model_name\"],\n )\n\n test_dataset = TextClassificationDataset(\n texts=test_df[params[\"data\"][\"text_field_name\"]].values.tolist(),\n labels=test_df[params[\"data\"][\"label_field_name\"]].values,\n max_seq_length=params[\"model\"][\"max_seq_length\"],\n model_name=params[\"model\"][\"model_name\"],\n )\n\n set_global_seed(params[\"general\"][\"seed\"])\n\n # creating PyTorch data loaders and placing them in dictionaries (for Catalyst)\n train_val_loaders = {\n \"train\": DataLoader(\n dataset=train_dataset,\n batch_size=params[\"training\"][\"batch_size\"],\n shuffle=True,\n ),\n \"valid\": DataLoader(\n dataset=valid_dataset,\n batch_size=params[\"training\"][\"batch_size\"],\n shuffle=False,\n ),\n }\n\n test_loaders = {\n \"test\": DataLoader(\n dataset=test_dataset,\n batch_size=params[\"training\"][\"batch_size\"],\n shuffle=False,\n )\n }\n\n return train_val_loaders, test_loaders", "title": "" }, { "docid": "93e6507b5994ab0d35fe16d06b9bd2ca", "score": "0.52199", "text": "def get_model_info():\n return json.dumps({'name': str(model.approach),\n 'features': model.features,\n 'datatypes': format_datatypes(model.training_problem.datatypes, model.features),\n 'outcome': model.outcome,\n 'training_set': {'path': model.training_problem.data.info.get('path', None),\n 'auc': model.training_auc,\n 'n_samples': model.training_problem.n_samples,\n 'n_features': model.training_problem.n_features,\n 'prevalence': model.training_problem.prevalence},\n 'positive_outcome': model.positive_outcome})", "title": "" }, { "docid": "3c80d97b46e4596deef732757d3f2eeb", "score": "0.52183616", "text": "def generate_nn_files():\r\n mics = []\r\n data = []\r\n num_train_lines = 0\r\n num_val_lines = 0\r\n if Path(f\"{DATA_PATH}train.libsvm\").is_file():\r\n print(\"Starting to collect training data...\")\r\n with open(f\"{DATA_PATH}train.libsvm\") as input_file:\r\n for line in input_file:\r\n line_split = line.split(\" \")\r\n mic = int(line_split[0]) - 13\r\n x = []\r\n for feat in FEATURES_TO_USE:\r\n m = REMatcher(f\".+{feat}:(\\\\d+).*\") # Check if feature is in libsvm formatted string\r\n if m.match(line):\r\n if feat > 524799: # Features after this id are antibiotics\r\n x.append(\"1\")\r\n else:\r\n x.append(str(m.group(1)))\r\n else:\r\n if feat > 524799: # Features after this id are antibiotics\r\n x.append(\"0\")\r\n else:\r\n x.append(\"0\")\r\n\r\n mics.append(mic)\r\n data.append(x)\r\n\r\n print(\"Training data collected\")\r\n with open(f\"{DATA_PATH}nn_train\", \"w\") as train_file, open(f\"{DATA_PATH}nn_validation\", \"w\") as val_file:\r\n for mic, row in zip(mics, data):\r\n if random.random() < 0.8:\r\n train_file.write(f\"{mic}:{','.join(row)}\\n\")\r\n num_train_lines += 1\r\n else:\r\n val_file.write(f\"{mic}:{','.join(row)}\\n\")\r\n num_val_lines += 1\r\n\r\n print(\"Wrote training and validation files\")\r\n\r\n num_test_lines = 0\r\n with open(f\"{DATA_PATH}test.libsvm\") as input_file, \\\r\n open(f\"{DATA_PATH}nn_test\", \"w\") as test_file:\r\n for line in input_file:\r\n line_split = line.split(\" \")\r\n mic = int(line_split[0]) - 13\r\n x = []\r\n for feat in FEATURES_TO_USE:\r\n m = REMatcher(f\".+{feat}:(\\\\d+).*\") # Check if feature is in libsvm formatted string\r\n if m.match(line):\r\n if feat > 524799: # Features after this id are antibiotics\r\n x.append(\"1\")\r\n else:\r\n x.append(str(m.group(1)))\r\n else:\r\n if feat > 524799: # Features after this id are antibiotics\r\n x.append(\"0\")\r\n else:\r\n x.append(\"0\")\r\n\r\n test_file.write(f\"{mic}:{','.join(x)}\\n\")\r\n num_test_lines += 1\r\n\r\n print(\"Wrote test file\")\r\n print(f\"Number of rows in training file: {num_train_lines}\")\r\n print(f\"Number of rows in validation file: {num_val_lines}\")\r\n print(f\"Number of rows in test file: {num_test_lines}\")\r\n print(\"If you have restarted project with new data, then you will need to update top of nn.py file with above values.\")", "title": "" }, { "docid": "1d0dfbfd666d21995df567ddbfd58e42", "score": "0.52147394", "text": "def test__extract_metadata_none():\n assert analyze._extract_metadata(None) == models.Meta(\n name=None,\n author=None,\n apiLevel=None)", "title": "" }, { "docid": "52f5558e2a7ea57a10382db0c6ef92f4", "score": "0.5208306", "text": "def _generate_examples(self, json_file_path, image_dir_path):\n with epath.Path(json_file_path).open() as f:\n data = json.loads(f.read())\n for label, images in data.items():\n for image_name in images:\n image = os.path.join(image_dir_path, image_name + \".jpg\")\n features = {\"image\": image, \"label\": label}\n if self.version > \"2.0.0\":\n features[\"id\"] = image_name\n yield image_name, features", "title": "" }, { "docid": "ca41f72417332be6d98c9d8a324fe2a2", "score": "0.5206537", "text": "def test_api_save_torchscript(tmpdir):\n input_features = [category_feature(encoder={\"vocab_size\": 5})]\n output_features = [\n category_feature(name=\"class\", decoder={\"vocab_size\": 5}, reduce_input=\"sum\", output_feature=True)\n ]\n\n data_csv = generate_data(input_features, output_features, os.path.join(tmpdir, \"dataset.csv\"))\n val_csv = shutil.copyfile(data_csv, os.path.join(tmpdir, \"validation.csv\"))\n test_csv = shutil.copyfile(data_csv, os.path.join(tmpdir, \"test.csv\"))\n\n config = {\n \"input_features\": input_features,\n \"output_features\": output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n }\n model = LudwigModel(config)\n model.train(training_set=data_csv, validation_set=val_csv, test_set=test_csv, output_directory=tmpdir)\n\n test_df = pd.read_csv(test_csv)\n output_df_expected, _ = model.predict(test_df, return_type=pd.DataFrame)\n\n save_path = os.path.join(tmpdir, \"torchscript\")\n os.makedirs(save_path, exist_ok=True)\n model.save_torchscript(save_path)\n inference_module = InferenceModule.from_directory(save_path)\n output_df, _ = inference_module.predict(test_df, return_type=pd.DataFrame)\n\n for col in output_df.columns:\n assert output_df[col].equals(output_df_expected[col])", "title": "" }, { "docid": "77035b88f17a7e6dca6c4ba259fa0c46", "score": "0.5195542", "text": "def test_trt_serialize_resnet50_bzs():\n if not os.environ.get(\"project_path\"):\n logger.error(\"==== env project_path is not set ====\")\n raise Exception(\"please export project_path=path/of/root_tests\")\n model_root = os.path.join(\n os.environ.get(\"project_path\"), \"Data/python-model-infer\")\n\n model_name = \"ResNet50_pretrained\"\n tmp_path = os.path.join(model_root, \"classification\")\n model_path = os.path.join(tmp_path, model_name, \"model\")\n test_img_url = \"https://paddle-inference-dist.bj.bcebos.com/inference_demo/python/resnet50/ILSVRC2012_val_00000247.jpeg\"\n if not os.path.exists(\"./ILSVRC2012_val_00000247.jpeg\"):\n wget.download(test_img_url, out=\"./\")\n\n opt_cache_path = os.path.join(model_path, \"_opt_cache\")\n\n if os.path.exists(opt_cache_path):\n logger.warning(\"==== _opt_cache should be empty ====\")\n logger.warning(\"==== _opt_cache will be remove ====\")\n shutil.rmtree(opt_cache_path)\n assert os.path.exists(os.path.join(model_path, \"_opt_cache\")\n ) == False, \"_opt_cache is not empty before this test\"\n\n files_before_serialize = os.listdir(model_path)\n logger.info(\"==== files_before_serialize: {} ====\".format(\n files_before_serialize))\n\n # create 3 different predictor, should create 3 serialize hash files\n predictor = prepare_predictor(model_path=model_path, trt_max_batch_size=1)\n predictor = prepare_predictor(model_path=model_path, trt_max_batch_size=2)\n predictor = prepare_predictor(model_path=model_path, trt_max_batch_size=4)\n\n files_after_serialize = os.listdir(os.path.join(model_path, \"_opt_cache\"))\n logger.info(\"==== files_after_serialize: {} ====\".format(\n files_after_serialize))\n\n assert len(files_after_serialize) == 3, \"serialize file should be only one\"", "title": "" } ]
7eb379bc8c9ad909a2f3fdef4962d706
Test the time step increment of a ToySquares object
[ { "docid": "ba63fcb9b4c122394b641ca64959ac70", "score": "0.8912831", "text": "def test_increment_time_step(self):\n toy_squares = self.test_constructor()\n toy_squares.increment_time_step()", "title": "" } ]
[ { "docid": "92261ed520f4713cf8c807793616e38a", "score": "0.74221474", "text": "def timeStep(self):", "title": "" }, { "docid": "5bd38aef68c994295bfa238f3756f482", "score": "0.6563983", "text": "def increment_time_step(self):\n self.time_step += 1", "title": "" }, { "docid": "5d7964b2c1f640401c478e8c218168c8", "score": "0.64641386", "text": "def test_time_step(self):\n self.plugin.vel_x.data = self.plugin.vel_x.data / 6.0\n self.plugin.vel_y.data = self.plugin.vel_y.data / 6.0\n expected_data = np.array(\n [\n [np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan],\n [np.nan, 2.0, 3.0],\n [np.nan, 1.0, 2.0],\n ]\n )\n result = self.plugin.process(self.cube, datetime.timedelta(hours=1))\n self.assertArrayAlmostEqual(\n result.data[~result.data.mask], expected_data[~result.data.mask]\n )", "title": "" }, { "docid": "e69189a99858d655c1b2063a00ba010b", "score": "0.63712984", "text": "def test_StarShipSystem_tick_03( self ) :\n\t\timport datetime\n\t\tstart = datetime.datetime.today()\n\t\tupdate = datetime.timedelta(seconds=1)\n\t\tc = 2\n\t\trt = 4\n\t\twhile ( self.sss.tickAccum.seconds < 2 ) :\n\t\t\tif (self.sss.tickAccum > update) and c>0 :\n\t\t\t\tself.sss.tickAccum -= update\n\t\t\t\tc -= 1\n\t\t\tself.sss.tick()\n\t\tend = datetime.datetime.today()\n\t\truntime = end - start\n\t\n\t\tself.assertEqual ( self.sss.tickAccum.seconds, 2, \"Should show 2 seconds\" )\n\t\tself.assertEqual ( runtime.seconds, rt, \"The runtime should be about 4 seconds\")", "title": "" }, { "docid": "53fbbb7538a2e4e9c8fe34814d46e16e", "score": "0.6363598", "text": "def testIncrementTheta (self):\n #TBD ???\n pass", "title": "" }, { "docid": "9fbf73c549b13d8a410b264451a73532", "score": "0.63243574", "text": "def test_get_next_round_nsdi():", "title": "" }, { "docid": "d9c9e14a68ca9f9bde41ed8c2c68f71a", "score": "0.631228", "text": "def step_then_11(context, t_prop):\n t = float(t_prop)\n R1_t = context.R1.at(t)\n R2 = context.R2\n \n numpy.testing.assert_allclose(R1_t.data, R2.data, rtol=1.0e-5, atol=1.0e-5)", "title": "" }, { "docid": "9ade81476145af0ea3cb849b5518c4b7", "score": "0.6262304", "text": "def test_increment_round(game):\n\n game.increment_round()\n assert game.round == 1", "title": "" }, { "docid": "c179c33ed48e0877bf9512ced9db3261", "score": "0.6159567", "text": "def update_on_timestep(self):", "title": "" }, { "docid": "177d3bbbea749b84adcda3dc085d62c3", "score": "0.60840863", "text": "def times(self):", "title": "" }, { "docid": "0169f9d2086dc974eb749859a5a64048", "score": "0.6079619", "text": "def step(self, timestep: float) -> None:\n pass", "title": "" }, { "docid": "ff1c55492a2d923d6d26c2f90fb63be6", "score": "0.6071873", "text": "def test_StarShipSystem_tick_02( self ) :\n\t\tc = 0\n\t\twhile ( self.sss.tickAccum.seconds < 1 ) :\n\t\t\tc += 1\n\t\t\tself.sss.tick()\n\t\tprint ( \"1 second took %i ticks.\" % (c,))", "title": "" }, { "docid": "812b9587f36a0b36e7cb15dc1103a1dd", "score": "0.6066257", "text": "def step(self, sim, tstep):\n pass", "title": "" }, { "docid": "c71fdb024a3974c199dba3809bf162ec", "score": "0.6045584", "text": "def test_time_event(self, mock_time):\n # Expected value.\n exp = 50\n\n # Set up test data and state.\n timer = sw.timer()\n\n # Run test.\n act = next(timer)\n\n # Determine if test passed.\n self.assertEqual(exp, act)", "title": "" }, { "docid": "9f64d96983ecab15384345e73751337a", "score": "0.60414743", "text": "def test_update_measurement(self):\n self.s.measurement_fct=MeasurementGenTesting()\n self.s.step()\n ms=self.s.current_state[\"perception\"]\n self.assertTrue(all([m[\"value\"]==0 for m in ms]))\n self.s.step()\n self.s.step()\n ms=self.s.current_state[\"perception\"]\n self.assertTrue(all([m[\"value\"]==2 for m in ms]))", "title": "" }, { "docid": "1bdc0c9e11e2504295b81c169d87dade", "score": "0.60058105", "text": "def test_lead_time(self):\n result = self.plugin.process(self.cube, self.timestep)\n result.coord(\"forecast_period\").convert_units(\"s\")\n lead_time = result.coord(\"forecast_period\").points\n self.assertEqual(len(lead_time), 1)\n self.assertEqual(lead_time[0], self.timestep.total_seconds())", "title": "" }, { "docid": "687a40bb6ff47335de19ba5f4c2b559e", "score": "0.59964746", "text": "def update_time(self, steps):\r\n self.steps += steps\r\n self.time = self.steps * self.dt", "title": "" }, { "docid": "01a6983e26f681316abaef360141b862", "score": "0.5985662", "text": "def step(self, X, Y):", "title": "" }, { "docid": "6af4e45168283695556b92ae180533d0", "score": "0.5960896", "text": "def test_elapsed(self):\n pi = progress.GoalTrackerItem(\"testitem\")\n # special case before goal is set\n self.assert_(pi.elapsed() == 0.0)\n pi.goalitems = 100\n self.assert_(pi.elapsed() == 0.0)\n pi.items = 100\n time.sleep(0.20)\n # should work before done()\n self.assert_(pi.elapsed() >= 0.10)\n pi.done()\n # should work after done()\n self.assert_(pi.elapsed() >= 0.10)", "title": "" }, { "docid": "decd7c8e5889da912f57bf6e89d176b4", "score": "0.59508085", "text": "def test_increment_completed_steps_explicit_steps(self):\n status = self._status(total_steps=5)\n status.increment_completed_steps(3)\n assert status.completed_steps == 3\n status.refresh_from_db()\n assert status.completed_steps == 3", "title": "" }, { "docid": "e6c49a1dc2779b7c25b86a43446a4e92", "score": "0.5921265", "text": "def timestep(self):\n return self.timestep", "title": "" }, { "docid": "0d9ae0c1c6c07b95816fe625e4635b4e", "score": "0.5916275", "text": "def advance_time(delta):\n MockTime.now += delta", "title": "" }, { "docid": "dd31eeafce85dca89080236ec56adfe5", "score": "0.5865651", "text": "def test_processing_time_credit_credit_lane(self):\n lane = self.create_random_lane()\n range_low = datetime.timedelta(seconds=13 - 4.0 * 2.5)\n range_high = datetime.timedelta(seconds=13 + 4.0 * 2.5)\n for i in range(100):\n process_time = lane.processing_time_credit_credit_lane()\n assert process_time >= range_low\n assert process_time <= range_high", "title": "" }, { "docid": "97e237abfc56287a2239790d50cbc619", "score": "0.5858015", "text": "def testSFTime(self):\n t = SoSFTime()\n s = SoSFTime()\n t.setValue(150.5)\n s.setValue(t)\n self.failUnless(150.5 == t.getValue() == s.getValue(), \n 'setValue on SoSFTime failed')", "title": "" }, { "docid": "4028e259896e939e3badd510d76f721f", "score": "0.58454764", "text": "def test_descent(self):\n schedule = parts.LinearSchedule(\n begin_t=5, decay_steps=7, begin_value=1.0, end_value=0.3)\n for step in range(20):\n val = schedule(step)\n if step <= 5:\n self.assertEqual(1.0, val)\n elif step >= 12:\n self.assertEqual(0.3, val)\n else:\n self.assertAlmostEqual(1.0 - ((step - 5) / 7) * 0.7, val)", "title": "" }, { "docid": "64a45a75cc75b2ac62563056fcdc3065", "score": "0.58266306", "text": "def test_processing_time_etc_etc_lane(self):\n lane = self.create_random_lane()\n range_low = datetime.timedelta(seconds=5 - 4 * 1)\n range_high = datetime.timedelta(seconds=5 + 4 * 1)\n for i in range(100):\n process_time = lane.processing_time_etc_etc_lane()\n assert process_time >= range_low\n assert process_time <= range_high", "title": "" }, { "docid": "b91f6852d4255899d8dd198754e17919", "score": "0.58263683", "text": "def step(self, dt):\n\t\tpass", "title": "" }, { "docid": "9679062f31e97c84ad969f76a4bebc0e", "score": "0.58236074", "text": "def test_processing_time_credit_gen_lane(self):\n lane = self.create_random_lane()\n range_low = datetime.timedelta(seconds=13.5 - 4.0 * 2.5)\n range_high = datetime.timedelta(seconds=13.5 + 4.0 * 2.5)\n for i in range(100):\n process_time = lane.processing_time_credit_gen_lane()\n assert process_time >= range_low\n assert process_time <= range_high", "title": "" }, { "docid": "7718e53eae857f5f31163f2829511f23", "score": "0.58108217", "text": "def next_time(self, event):\n\n self.time_update(self.stime.val + self.time_step)", "title": "" }, { "docid": "2647fdce8b24f2cd64141d9e43214658", "score": "0.5809018", "text": "def _update_step(self):\n self.step_t += 1\n self.remaining_budget_t_minus_1 = self.remaining_budget_t\n self.remaining_budget_t -= self.cost_t\n self.bct_t = (self.remaining_budget_t_minus_1 - self.remaining_budget_t) * 1.0 / self.remaining_budget_t_minus_1\n self.rol_t -= 1\n self.cpm_t = self.cost_t * 1.0 / self.bids_t\n self.wr_t = self.wins_t * 1.0 / self.bids_t", "title": "" }, { "docid": "02374894fff8785408ec1314e9c9f29d", "score": "0.5808046", "text": "def test_processing_time_etc_gen_lane(self):\n lane = self.create_random_lane()\n range_low = datetime.timedelta(seconds=6 - 4 * 1)\n range_high = datetime.timedelta(seconds=6 + 4 * 1)\n for i in range(100):\n process_time = lane.processing_time_etc_gen_lane()\n assert process_time >= range_low\n assert process_time <= range_high", "title": "" }, { "docid": "9560c73a553ae2309212b02630b60d09", "score": "0.57870334", "text": "def test_now(self, step, cur_time):\n return (\n (self._test_freq_steps and step % self._test_freq_steps == 0) or \\\n (cur_time > self._test_freq)\n )", "title": "" }, { "docid": "2aac277c4fd1f0a1ded4d499781386ab", "score": "0.57689273", "text": "def test_t1(self):\n self.assertAlmostEqual(self.troe.T1.value_si, self.T1, 6)", "title": "" }, { "docid": "3468cd1d2b7c67afb4d08e6e85a09287", "score": "0.57666284", "text": "def test_elapsed(self):\n pi = progress.TrackerItem(\"testitem\")\n # special case before items is set\n self.assert_(pi.elapsed() == 0.0)\n pi.items = 100\n time.sleep(0.20)\n # should work before done()\n self.assert_(pi.elapsed() >= 0.10)\n pi.done()\n # should work after done()\n self.assert_(pi.elapsed() >= 0.10)", "title": "" }, { "docid": "f4ecc49329afee6c413f637e71af9036", "score": "0.57515264", "text": "def test_t2(self):\n self.assertAlmostEqual(self.troe.T2.value_si, self.T2, 6)", "title": "" }, { "docid": "b7df1c935977db6e4a6aff9957f7f150", "score": "0.57505155", "text": "def get_timestep(self):\n return int(1000 * self._timestep)", "title": "" }, { "docid": "c813cbefbe3b7da590b96a55e771ca3e", "score": "0.57503116", "text": "def addTiming(self, seconds):", "title": "" }, { "docid": "0d2588818501efea29b66d82de26ab35", "score": "0.5747856", "text": "def test_processing_time_cash_gen_lane(self):\n lane = self.create_random_lane()\n range_low = datetime.timedelta(seconds=13.5 - 4 * 2.5)\n range_high = datetime.timedelta(seconds=13.5 + 4 * 2.5)\n for i in range(100):\n process_time = lane.processing_time_credit_gen_lane()\n assert process_time >= range_low\n assert process_time <= range_high", "title": "" }, { "docid": "c3def3f0e983f55a2dc39e67f17e14d4", "score": "0.5743975", "text": "def step_when_10(context, t_prop):\n t = float(t_prop)\n\n R = context.R\n U = context.U\n \n R2 = U.apply(t, R)\n\n context.R2 = R2", "title": "" }, { "docid": "734f6af0a8f682712ec4418d604020f6", "score": "0.5731872", "text": "def testdimtime(self):\n assert self.data.time.shape == (self.nsteps, )", "title": "" }, { "docid": "cb873b3d1b341ae03e9c1ced724022a2", "score": "0.5726068", "text": "def GetTimeStep(self):\n \n return self.time_step", "title": "" }, { "docid": "b2625963c7a0e058aae7c5520b6ceba6", "score": "0.5719384", "text": "def test_StarShipSystem_tick_01( self ) :\n\t\timport time\n\t\tself.sss.tick()\n\t\tself.assertEqual( self.sss.tickAccum.seconds, 0, \"Should be 0\" )\n\t\ttime.sleep(1)\n\t\tself.sss.tick()\n\t\tself.assertEqual( self.sss.tickAccum.seconds, 1, \"Should be about 1\" )\n\t\tself.assertEqual( self.sss.failAccum.seconds, 1, \"Should be about 1\" )", "title": "" }, { "docid": "b0af5145c044f41ccb8b1bf4ff465402", "score": "0.5714873", "text": "def test_timer() -> None:\n # Create a timer to increment periodically\n period = 0.01\n counter = 0\n\n def increment() -> None:\n nonlocal counter\n counter += 1\n\n timer = PeriodicTimer(period, increment)\n # Start\n timer.start()\n # Wait a bit\n factor = 10\n sleep(period * factor)\n # Expect roughly 'factor' number of ticks\n assert pytest.approx(factor, counter)\n timer.stop()", "title": "" }, { "docid": "a76335f62efbcda8ae6322fd91f2e04c", "score": "0.5698876", "text": "def test_advance_time_facility_second(self):\n datetime_now = datetime.datetime.now()\n add_timedelta_second = datetime.timedelta(seconds=1)\n test_facility = toll_queue.Facility(datetime_now)\n test_facility.advance_time_facility()\n assert test_facility.get_current_time() == datetime_now + add_timedelta_second", "title": "" }, { "docid": "7ed0917b16407b1262ee2137003308a8", "score": "0.56942266", "text": "def game_step(self, time_since_last_frame):", "title": "" }, { "docid": "9ca78f35cb366c59c749667cc2b8c8aa", "score": "0.5664922", "text": "def test_clock(self):\n pass", "title": "" }, { "docid": "a99ba666c98b76b1eafc420b77a69b2f", "score": "0.5663857", "text": "def compute_timestep(self, time, data):\n raise NotImplementedError()", "title": "" }, { "docid": "6b2f515ca8e6e6021f7cf2106496f3d6", "score": "0.564358", "text": "def test_linspace_steps():\n t1 = Time([\"2021-01-01 00:00:00\", \"2021-01-01 12:00:00\"])\n t2 = Time(\"2021-01-02 00:00:00\")\n atol = 2 * np.finfo(float).eps * abs(t1 - t2).max()\n\n ts, st = np.linspace(t1, t2, 7, retstep=True)\n assert ts.shape == (7, 2)\n assert st.shape == (2,)\n assert all(ts[1].isclose(ts[0] + st, atol=atol))\n assert all(ts[6].isclose(ts[0] + 6 * st, atol=atol))\n assert all(st.isclose(TimeDelta([14400, 7200], format=\"sec\"), atol=atol))", "title": "" }, { "docid": "cdd8dcaabde2123edb49d3c58ed44a2d", "score": "0.56411654", "text": "def next_test_time2(self, next_test_time2):\n \n self._next_test_time2 = next_test_time2", "title": "" }, { "docid": "4af877549b0e855f9756068dbbd6cdc5", "score": "0.5636389", "text": "def timestep(self):\n return self._timestep", "title": "" }, { "docid": "bb491dd3e82146b6d4301bff149e9924", "score": "0.56305397", "text": "def test_time(self):\n main.TA.set_ta_status(\"agupta60\", \"online\")\n ta_time_old = main.TA.get_ta_timings()\n old_time = \"\"\n new_time = \"\"\n for ta in ta_time_old:\n if ta[\"name\"] == \"agupta60\":\n old_time = ta[\"time in minutes\"]\n\n time.sleep(60)\n\n main.TA.set_ta_status(\"agupta60\", \"offline\")\n ta_time_new = main.TA.get_ta_timings()\n for ta in ta_time_new:\n if ta[\"name\"] == \"agupta60\":\n new_time = ta[\"time in minutes\"]\n\n self.assertTrue(new_time -old_time >= 1)\n # self.assertEquals(new_time - old_time , 1)", "title": "" }, { "docid": "e290b497646fc6623dff7087342e81e8", "score": "0.5618815", "text": "def next_test_time2(self):\n return self._next_test_time2", "title": "" }, { "docid": "7bd42ef76b4b8dad799d24ee81becaf5", "score": "0.560193", "text": "def step_time(self):\n return self.query('SB')", "title": "" }, { "docid": "9017743e75cc8edf97829008e529a2bb", "score": "0.56009877", "text": "def test_ascent(self):\n schedule = parts.LinearSchedule(\n begin_t=5, end_t=12, begin_value=-0.4, end_value=0.4)\n for step in range(20):\n val = schedule(step)\n if step <= 5:\n self.assertEqual(-0.4, val)\n elif step >= 12:\n self.assertEqual(0.4, val)\n else:\n self.assertAlmostEqual(-0.4 + ((step - 5) / 7) * 0.8, val)", "title": "" }, { "docid": "a8b66b4fe691212bdf020a1c05ec8deb", "score": "0.5588988", "text": "def _after_apply(self, step: int, t: float):\n self._previous_step = step\n self._previous_time = t", "title": "" }, { "docid": "4ba46c086edd9641cd57af4f11083e61", "score": "0.55759805", "text": "def test_dt(self, signals):\n assert signals.dt == pytest.approx(signals.times[-1]-signals.times[-2])", "title": "" }, { "docid": "15cfdf74e1fc1f212313294349995804", "score": "0.557085", "text": "def times(self, number):\n self.expected_calls = number\n return self", "title": "" }, { "docid": "b3b06f65ad870bed2d528d8bf3bc52b7", "score": "0.55681413", "text": "def test_correct_time(self):\n def new_func(s, x):\n s.steps = x is self.conf.conf_parser['TIME']\n\n with patch.object(read_config.TimeParser, 'parse', new_func):\n self.assertTrue(self.conf.get_time()[0])", "title": "" }, { "docid": "61f358ee48496020e55203992a712daa", "score": "0.5561349", "text": "def current_time_step(self) -> int:\n return self._current_time_step", "title": "" }, { "docid": "e98f5fbb08a37fc090fd1042582ecb75", "score": "0.55432296", "text": "def step(self):", "title": "" }, { "docid": "2741662d6cac42030e39aa178363f8ff", "score": "0.55347985", "text": "def testStepSpeedProfile (self):\n self.m_Rotor.ResetStepSpeedProfile()\n (B, E, S) = self.m_Rotor.StepSpeedProfile()\n profile = '%d, %d, %d' % (B, E, S)\n self.assertEquals(configData[Axis.MotorStepVelocityLabel], profile)\n\n (newB, newE, newS) = (B+1, E+1, S+1)\n self.m_Rotor.SetStepSpeedProfile (newB, newE, newS)\n \n (newerB, newerE, newerS) = self.m_Rotor.StepSpeedProfile()\n self.assertEquals(newB, newerB)\n self.assertEquals(newE, newerE)\n self.assertEquals(newS, newerS)\n \n self.m_Rotor.ResetStepSpeedProfile()\n (B, E, S) = self.m_Rotor.StepSpeedProfile()\n profile = '%d, %d, %d' % (B, E, S)\n self.assertEquals(configData[Axis.MotorStepVelocityLabel], profile)", "title": "" }, { "docid": "2ff3b3873bd8449928581d836b48139f", "score": "0.55305475", "text": "def SetTimeStep(self, time_step):\n \n\n self.time_step = time_step", "title": "" }, { "docid": "b6be684b45d0c9d2f1df3f1d8d9484a8", "score": "0.55272067", "text": "def is_update(self, step: int):\r\n pass", "title": "" }, { "docid": "78db4c0cbaa97a14585b501a6aa2fe29", "score": "0.5524013", "text": "def nsteps(self) -> int:", "title": "" }, { "docid": "693dc281f03eec10b3c97d050c5d5604", "score": "0.55155164", "text": "def test_valid_quantity_operations1(self):\n t0 = TimeDelta(106400.0, format=\"sec\")\n q1 = 10.0 * u.second\n t1 = t0 + q1\n assert isinstance(t1, TimeDelta)\n assert t1.value == t0.value + q1.to_value(u.second)\n q2 = 1.0 * u.day\n t2 = t0 - q2\n assert isinstance(t2, TimeDelta)\n assert allclose_sec(t2.value, t0.value - q2.to_value(u.second))\n # now comparisons\n assert t0 > q1\n assert t0 < 1.0 * u.yr\n # and broadcasting\n q3 = np.arange(12.0).reshape(4, 3) * u.hour\n t3 = t0 + q3\n assert isinstance(t3, TimeDelta)\n assert t3.shape == q3.shape\n assert allclose_sec(t3.value, t0.value + q3.to_value(u.second))", "title": "" }, { "docid": "18c33bf3810ba1de2741a7d5ebfb6eb8", "score": "0.5515391", "text": "def step_given_7(context):\n # create test aggregatedimer\n agg = qr.TestAggregate(\"trimer-2\")\n agg.build()\n \n # get the associated time axis and the relaxation tensor and Hamiltonian\n time = qr.TimeAxis(0, 320, 1.0)\n time2 = qr.TimeAxis(0, 32, 10.0)\n context.time = time\n context.time2 = time2\n \n HH = agg.get_Hamiltonian()\n context.H = HH\n \n SBI = qr.qm.TestSystemBathInteraction(name=\"trimer-2-lind\")\n LL = qr.qm.LindbladForm(HH, SBI)\n \n context.L= LL\n \n # initial density matrix\n R = qr.ReducedDensityMatrix(dim=HH.dim)\n R.data[2,2] = 1.0 \n \n context.R = R", "title": "" }, { "docid": "353b1b353fe3b446e9f51a4930ed04e2", "score": "0.5507582", "text": "def time_step(cls, grid, dt=0.0):\n g = grid\n dx2, dy2 = g.dx ** 2, g.dy ** 2\n dnr_inv = 0.5 / (dx2 + dy2)\n u = g.u\n \n error = g.np.empty([1], ctype='f')\n \n cly_time_step_task(u.queue, u, dy2, dx2, dnr_inv, error)\n \n return error.item().value", "title": "" }, { "docid": "34da7c5f7931ebddda6427e4f549b04f", "score": "0.5507303", "text": "def _increase_step(self):\r\n self.global_step += 1", "title": "" }, { "docid": "0b53a4b80f3e13019b68f7bb8ab6bf8f", "score": "0.5494972", "text": "def test_splittime(self):\n # Expected value.\n exp = (4, 10, 35)\n\n # Set up test data and state.\n factors = (3600, 60, 1)\n duration = sum(e * f for e, f in zip(exp, factors))\n\n # Run test.\n act = sw.split_time(duration)\n\n # Determine if test passed.\n self.assertEqual(exp, act)", "title": "" }, { "docid": "aaa290ce4e6bf6e3ad5607fa38aef868", "score": "0.54923105", "text": "def test_step():\n\n game = \"MountainCar-v0\"\n memory_size = 5\n dataset = Dataset(game, memory_size)\n\n prev_state = dataset.get_state()\n result = dataset.step(2)\n assert result[1] == -1\n assert prev_state[0] != dataset.get_state()[0]\n assert prev_state[1] != dataset.get_state()[1]\n assert len(dataset.memory) == 1\n assert dataset.position == 1", "title": "" }, { "docid": "9ad647a008dcc27ca32130fc422f85dc", "score": "0.5490248", "text": "def onTimeStep(integrator, model, time):\n print(\"onTimeStep, time: {}\".format(time))", "title": "" }, { "docid": "e8cd540c8f2b0c47cc966bc36bf9f6c1", "score": "0.5487745", "text": "def test_basic(self):\n expected_time_interval = 60\n expected_cubes = self.cubes.copy()\n for cube in expected_cubes:\n cube.convert_units(\"m/s\")\n cubes, time_interval = Accumulation()._check_inputs(self.cubes)\n self.assertEqual(cubes, expected_cubes)\n self.assertEqual(time_interval, expected_time_interval)", "title": "" }, { "docid": "56ca9e3d009d560c7f131d5fcbeefb57", "score": "0.54866344", "text": "def test_edit_results_with_time(self):\n reqs_to_edit = {\n 'TIME': list(np.round(np.arange(5, 10.1, .1), 10)),\n 'JOINT_1': {\n 'FY': list(np.ones(51))\n }\n }\n\n edit_results(self.res_to_edit, reqs_to_edit)\n self.assertEqual(0,1)", "title": "" }, { "docid": "e9e0427fe46c62e8b7fcf90d807ca506", "score": "0.548562", "text": "def tstep(self):\n return self._tstep", "title": "" }, { "docid": "8c1a3d4e5a233c169d1a943efa5bf70e", "score": "0.548541", "text": "def test_explicit_step(self):\n with tempfile.TemporaryDirectory() as temp_dir:\n os.chdir(temp_dir)\n\n for i in range(5):\n easy_tf_log.tflog('foo', i, step=(10 * i))\n # These ones should continue from where the previous ones left off\n for i in range(5):\n easy_tf_log.tflog('foo', i)\n\n event_filename = osp.join('logs', os.listdir('logs')[0])\n event_n = 0\n for event in tf_train.summary_iterator(event_filename):\n if event_n == 0: # metadata\n event_n += 1\n continue\n if event_n <= 5:\n self.assertEqual(event.step, 10 * (event_n - 1))\n if event_n > 5 and event_n <= 10:\n self.assertEqual(event.step, 40 + (event_n - 5))\n event_n += 1", "title": "" }, { "docid": "6ba2d57a53d98532ca4b6e51183ada7c", "score": "0.5483919", "text": "def step(self, t):\n if self.beginTime > t: # it is a delay anim\n return True\n\n o = self.obj\n if self.isContinue:\n if t > self.beginTime + self.len:\n self.beginTime = self.beginTime + ((t - self.beginTime) // self.len) * self.len\n else:\n if t > self.beginTime + self.len:\n self.__setLastValue()\n return False\n\n prec = float(t - self.beginTime) / float(self.len)\n\n try:\n v = self.mapping.map(prec)\n setattr(self.obj, self.attr, v)\n except:\n pass\n return True", "title": "" }, { "docid": "57dbe50dff29c9677b72959aefcbc248", "score": "0.54822505", "text": "def testPeriodic(self):", "title": "" }, { "docid": "1ab560b82596b906a583d8cd318f8f82", "score": "0.54667205", "text": "def test_single_orbit_call_orbit_starts_0_UT_using_next(self):\n\n self.testInst.load(date=self.stime)\n self.testInst.orbits.next()\n self.etime = self.stime + dt.timedelta(seconds=(self.inc_min * 60 - 1))\n assert (self.testInst.index[0] == self.stime)\n assert (self.testInst.index[-1] == self.etime)\n return", "title": "" }, { "docid": "f45bd339ac4272bd75517a7d3dcc1fed", "score": "0.54601943", "text": "def next_test_time1(self):\n return self._next_test_time1", "title": "" }, { "docid": "7fdf992c7f00f0cc5076b70923cd4877", "score": "0.5459306", "text": "def time(self):\n self._counter += 0.0001\n return self._counter", "title": "" }, { "docid": "5cb890774c2fbd290c89a7cedff86767", "score": "0.5448303", "text": "def step_when_15(context):\n time2 = context.time2\n LL = context.L\n HH = context.H\n \n with qr.eigenbasis_of(HH):\n U = qr.qm.EvolutionSuperOperator(time2, ham=HH, relt=LL, mode=\"jit\")\n U2 = qr.qm.EvolutionSuperOperator(time2, ham=HH, relt=LL, mode=\"all\")\n U2.set_dense_dt(10)\n \n for tt in range(1,time2.length):\n U.calculate_next()\n U2.data[tt,:,:,:,:] = U.data\n \n context.U = U2", "title": "" }, { "docid": "e5e83ed8812e07ae6c70ceae33ec7ddd", "score": "0.54471076", "text": "def add_time_step (self, zeros = False):\n # self..append(copy.deepcopy(self.ald_grid[-1]))\n # self.pl_grid.append(copy.deepcopy(self.pl_grid[-1]))\n \n self.config['timestep'] += 1\n self.grids[self.config['timestep'], : ] = self.grids[self.config['timestep'] - 1, : ] \n if zeros:\n self.grids[self.config['timestep'], : ] = 0", "title": "" }, { "docid": "ba6fa5d67da7c8052eee4fd2eb4beadb", "score": "0.5443192", "text": "def test_s(self):\n q = quantity.Time(1.0,\"s\")\n self.assertAlmostEqual(q.value, 1.0, 6)\n self.assertAlmostEqual(q.value_si, 1.0, delta=1e-6)\n self.assertEqual(q.units, \"s\")", "title": "" }, { "docid": "bf404c45f571a83bcd95362db5b7dad4", "score": "0.5440967", "text": "def advance(self, step: float):\n pass", "title": "" }, { "docid": "392f4682c231387765e1e04e70290170", "score": "0.5438822", "text": "def test__auto_stage(self, mock_time, mock_conn):\n capcom = Launcher(target_altitude=10)\n vessel = mock_conn().space_center.active_vessel\n control = vessel.control\n\n VALUES = [[95, False],\n [89, True],\n [50, True],\n [25, True], ]\n\n for new_thrust, calls_made in VALUES:\n with self.subTest(f'thrust_ratio: {new_thrust}%'):\n mock_conn().reset_mock()\n mock_time.reset_mock()\n vessel.available_thrust = new_thrust\n self.assertEqual(capcom._auto_stage(100), new_thrust)\n if calls_made:\n control.activate_next_stage.assert_called_once_with()\n mock_time.sleep.assert_has_calls([call(0.1), call(0.1)])\n else:\n mock_time.sleep.assert_not_called()", "title": "" }, { "docid": "ac4d7ea541711dda2eef1164a272ff0b", "score": "0.5436809", "text": "def test_06_DuskPlus(self):\n l_time = 'dusk + 00:21'\n l_time, l_seconds = TimeField()._rise_set(l_time, self.m_riseset)\n # print('D1-06-A - Seconds {} - \"{}\"\\n'.format(l_seconds, l_time))\n self.assertEqual(l_seconds, (20 * 60 + 58) * 60 + 30)", "title": "" }, { "docid": "ecc33d472e5680452f8ecf4672f139e3", "score": "0.54353464", "text": "def timestep(self, value):\n if value <= self._max_timestep:\n self.timestep = value\n else:\n message = 'The timestep for suite {0} cannot be set greater than the max_timestep of {1}'.format(self._name, self._max_timestep)\n raise Exception(message)", "title": "" }, { "docid": "849485d1a7443225c1a6057570e47787", "score": "0.5434748", "text": "def next_test_time1(self, next_test_time1):\n \n self._next_test_time1 = next_test_time1", "title": "" }, { "docid": "0a2edec5e02e23f9f6251e61ec4dce96", "score": "0.54231876", "text": "def secondstep(x: int) -> int:\n global points\n if x < 6:\n points = points + 1\n else:\n if x == CONSTANT:\n points = points + 3\n else:\n points = points + 2\n return(points)", "title": "" }, { "docid": "fe4289ec60e63601c8df626b339a7860", "score": "0.5422475", "text": "def step_when_14(context):\n # define and calculate evolution superoperator\n time2 = context.time2\n LL = context.L\n HH = context.H\n \n with qr.eigenbasis_of(HH):\n U = qr.qm.EvolutionSuperOperator(time2, ham=HH, relt=LL, mode=\"jit\")\n U.set_dense_dt(10)\n \n for tt in range(1,time2.length):\n U.calculate_next(save=True)\n \n context.U = U", "title": "" }, { "docid": "ca5a03fe1bd7bb318ebbe3ef856cfd0d", "score": "0.5415081", "text": "def current_step_test(simtime, simdt, plotdt):\n model = moose.Neutral('/model')\n comp = create_1comp_neuron('/model/neuron')\n stim = moose.PulseGen('/model/stimulus')\n stim.delay[0] = 20e-3\n stim.level[0] = 1e-9\n stim.width[0] = 40e-3\n stim.delay[1] = 1e9\n moose.connect(stim, 'output', comp, 'injectMsg')\n data = moose.Neutral('/data')\n current_tab = moose.Table('/data/current')\n moose.connect(current_tab, 'requestOut', stim, 'getOutputValue')\n vm_tab = moose.Table('/data/Vm')\n moose.connect(vm_tab, 'requestOut', comp, 'getVm')\n for i in range(10):\n moose.setClock(i, simdt)\n moose.setClock(8, plotdt)\n moose.reinit()\n moose.start(simtime)\n ts = np.linspace(0, simtime, len(vm_tab.vector))\n return ts, current_tab.vector, vm_tab.vector,", "title": "" }, { "docid": "55dc6727088edca89a12c4bc5e38c6b5", "score": "0.5410559", "text": "def step(self):\n if self.model.system == 'Fixed time':\n self.fixed_timer()\n if self.model.system == 'Flow based':\n times = self.model.calculate_timer()\n self.flow_based_timer(times)\n if self.model.system == 'Demand based':\n self.calculate_demand()\n self.demand_based_timer()", "title": "" }, { "docid": "dbecfe6f2abe74028ad4f545c794145d", "score": "0.5410346", "text": "def test_hr(self):\n q = quantity.Time(1.0,\"hr\")\n self.assertAlmostEqual(q.value, 1.0, 6)\n self.assertAlmostEqual(q.value_si, 3600.0, delta=1e-6)\n self.assertEqual(q.units, \"hr\")", "title": "" }, { "docid": "847a95559de3c0e98ba7fbb87641b26c", "score": "0.540858", "text": "def advance(self, delta):\n self.current_time += delta", "title": "" }, { "docid": "19d0b38c0d584563db429523a6f759a3", "score": "0.5402186", "text": "def next_time_step(self):\r\n\r\n super().next_time_step()\r\n self.__electricity_consumption.append(0.0)", "title": "" }, { "docid": "3a4ee9fc8a8654fb80820edbaa63484b", "score": "0.5392162", "text": "def test_update_measurements_agents(self):\n self.s.measurement_fct=MeasurementGenTesting()\n self.s.step()\n ms=[a.current_state[\"perception\"] for a in self.s.schedule.agents]\n self.assertTrue(all([m[\"value\"]==0 for m in ms]))\n self.s.step()\n self.s.step()\n ms=[a.current_state[\"perception\"] for a in self.s.schedule.agents]\n self.assertTrue(all([m[\"value\"]==2 for m in ms]))", "title": "" }, { "docid": "a2aa7013942c4d3acb768b33656f106c", "score": "0.538775", "text": "def test_time_array(self):\n\n for index, result in enumerate(trajectory_optimization_results):\n time_array = result[2]\n previous = time_array[0]\n for j in time_array[1:]:\n self.assertTrue(j > previous, \n \"The time_array must be monotonicly increasing in time\")\n previous = j\n\n self.assertTrue(time_array[-1] >= self.minimum_time, \"Time was too short\")\n self.assertTrue(time_array[-1] <= self.maximum_time, \"Time was too long\")", "title": "" }, { "docid": "a8fdee4e9f87816a3e54c4292b8bb05e", "score": "0.53861445", "text": "def _test_self(self,go_started):\n\n if self.temporal_variables & TIME_SEC_ABSOLUTE:\n self.time_sec_absolute = -1.0\n if self.temporal_variables & FRAMES_ABSOLUTE:\n self.frames_absolute = -1\n\n if go_started:\n if not (self.eval_frequency & NOT_DURING_GO):\n if self.temporal_variables & TIME_SEC_SINCE_GO:\n self.time_sec_since_go = -1.0\n if self.temporal_variables & FRAMES_SINCE_GO:\n self.frames_since_go = -1\n return self.during_go_eval()\n else:\n if not (self.eval_frequency & NOT_BETWEEN_GO):\n if self.temporal_variables & TIME_SEC_SINCE_GO:\n self.time_sec_since_go = None\n if self.temporal_variables & FRAMES_SINCE_GO:\n self.frames_since_go = None\n return self.between_go_eval()", "title": "" }, { "docid": "1540b88ab29d2321c269c45e2692cad7", "score": "0.53852063", "text": "def _update_times(self):\n self._times = self.tmin + (self.tstep * np.arange(self.shape[-1]))\n self._times.flags.writeable = False", "title": "" }, { "docid": "eb3d5e5ba0f95e8e5aa7a948543d0ee4", "score": "0.53736806", "text": "def observation_in_time(self):", "title": "" } ]
f7246852e12b56d8f4f1b9b13c43776f
Add a client to the block list.
[ { "docid": "54876cdd5ba23390cdbba72baed894dd", "score": "0.0", "text": "def block_client(self, mac):\n\n self._mac_cmd(mac, 'block-sta')", "title": "" } ]
[ { "docid": "59f946378fcf9476230d213bfc604000", "score": "0.77596116", "text": "def add_client(self, client: socket.socket) -> None:\n self._clients.append(client)", "title": "" }, { "docid": "e1384726375c262471690e2be48147fa", "score": "0.7421137", "text": "def register(self, client):\n self.clients.append(client)", "title": "" }, { "docid": "f12882b570184bad301cce6c262b3641", "score": "0.7261749", "text": "def AddClient(self, client):\n self.index_manager.AddValue(client)", "title": "" }, { "docid": "3763eb97dee5d2c82788c02f21f8434f", "score": "0.7251518", "text": "def add(self, client):\n #memoreaza client folosind repository\n self.__repo.add(client)", "title": "" }, { "docid": "32d807e7a371edfd713551592962282c", "score": "0.72344816", "text": "def add(self, client: IoTClient):\n if not isinstance(client, IoTClient):\n raise TypeError(\"client must be an instance of IoTClient\")\n\n # add the client to the client list\n self.__clients[client.id] = client\n\n # call the on_connect handler\n self.__on_connect_handlers(client)", "title": "" }, { "docid": "745232297e8de4a64a438fe641f89527", "score": "0.6698022", "text": "def registerAccountClient(self, client):\n if not client in self.clients:\n self.clients.append(client)", "title": "" }, { "docid": "12103618b222f2a9dd403baa7e011c35", "score": "0.6520009", "text": "def add_new_client(self, conn: Any, addr: Tuple[str, int]):\n self._session_actor.inbox.put({\"addr\": addr, \"conn\": conn})", "title": "" }, { "docid": "d971575eb6bd43e423d0bd7d33406595", "score": "0.63778263", "text": "def add_block(self, block):\n\t\tself.blocks.append(block)", "title": "" }, { "docid": "d77dffa4daa8b17d6e21828371f95779", "score": "0.63413894", "text": "def addBlock( self, block ):\n \n self.blocks.append( block )", "title": "" }, { "docid": "5f08249688b2077679db752e585ca193", "score": "0.63249004", "text": "def add_client(self, wan_optimizer, client_address):\n self.__wan_optimizer_to_clients[wan_optimizer].append(client_address)", "title": "" }, { "docid": "6b446bccf659e8cb1d875b07d733583a", "score": "0.6231892", "text": "def registerAccountClient(self, client):\n print \"signing onto\", client.accountName\n self.onlineClients.append(client)\n self.contactsList.registerAccountClient(client)\n return client", "title": "" }, { "docid": "01517995434df4d1582ac218818f78f4", "score": "0.61347955", "text": "def add_paired_client(self, client_uuid, client_public):\n self.paired_clients[client_uuid] = client_public", "title": "" }, { "docid": "8980382086952cb48d03bb461d07adc9", "score": "0.612988", "text": "def register(self, client):\n self.clients[client.peer] = {\"object\": client, \"partner\": None}", "title": "" }, { "docid": "e4fbc5dc98871a49386472d445d0e1db", "score": "0.6125557", "text": "def add_block_entities(controller, async_add_entities, clients):\n switches = []\n\n for mac in controller.option_block_clients:\n if mac in controller.entities[DOMAIN][BLOCK_SWITCH] or mac not in clients:\n continue\n\n client = controller.api.clients[mac]\n switches.append(UniFiBlockClientSwitch(client, controller))\n\n if switches:\n async_add_entities(switches)", "title": "" }, { "docid": "255d0924095b280b5fedd8f2938718d6", "score": "0.6115384", "text": "async def add_client(self):\n \n auth = httpx.DigestAuth(self.userid, self.password)\n self.clients[self.host] = httpx.AsyncClient(\n auth=auth,\n base_url=self.base_url,\n headers={},\n )", "title": "" }, { "docid": "67881b7b5cbf4caaa1611279b062e547", "score": "0.6073293", "text": "def Added(client, event):\n\n # Prevent anyone from adding a Client inside of a Client\n client.manage_permission(AddClient, [], 0)\n\n # Create client/* folders and set permissions\n # -------------------------------------------\n for x in [\n ['samples', 'Samples'],\n ['analysisrequests', 'Analysis Requests'],\n ['configuration', 'Configuration'],\n ]:\n instance = api.content.create(client, 'Folder', x[0], x[1])\n disallow_default_contenttypes(instance)\n\n mp = client.samples.manage_permission\n mp(AddSample, ['Manager', 'LabManager', 'LabClerk', 'Owner'], 0)\n mp(ModifyPortalContent, ['Manager', 'LabManager', 'LabClerk', 'Owner'], 0)\n\n mp = client.analysisrequests.manage_permission\n mp(AddAnalysisRequest, ['Manager', 'LabManager', 'LabClerk', 'Owner'], 0)\n mp(ModifyPortalContent, ['Manager', 'LabManager', 'LabClerk', 'Owner'], 0)\n\n # Create client/configuration/* and set permissions\n # -------------------------------------------------\n folder = client.configuration\n for x in [\n ['contacts', 'Contacts'],\n ['samplepoints', 'Sample Points'],\n ]:\n instance = api.content.create(folder, 'Folder', x[0], x[1])\n\n mp = folder.contacts.manage_permission\n mp(AddContact, ['Manager', 'LabManager', 'LabClerk', 'Owner'], 0)\n mp(ModifyPortalContent, ['Manager', 'LabManager', 'LabClerk', 'Owner'], 0)\n\n mp = folder.samplepoints.manage_permission\n mp(AddSamplePoint, ['Manager', 'LabManager', 'LabClerk', 'Owner'], 0)\n mp(ModifyPortalContent, ['Manager', 'LabManager', 'LabClerk', 'Owner'], 0)", "title": "" }, { "docid": "107eea8ec869a09108178acfca86fb97", "score": "0.6061662", "text": "def addCl(self,cl=None):\n if cl:\n self.clists.append(cl)", "title": "" }, { "docid": "c8d673a8b33e7dcc499d3e396e56b84d", "score": "0.5999908", "text": "def addClient(self, callable, filter=None):\n self.clients[callable] = filter\n self.updateClients()", "title": "" }, { "docid": "cdc041c27d26e47245f5ffdaa4130bdc", "score": "0.5958317", "text": "def __init__(self):\n \n self.client_list = []", "title": "" }, { "docid": "426f5670327218fb0ac15b66156a8ba5", "score": "0.58794713", "text": "def add_block(self, block: Block) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "31d87372bc37714909e4d3aa0472fb51", "score": "0.584763", "text": "def add_client(user_name, user_surname, user_age, bank_name, banks_dict):\n number_id = get_id_number(bank_name, banks)\n new_client = {\"name\": user_name, \"surname\": user_surname, \"age\": user_age, \"ID\": number_id, \"funds\": 0, \"loan\": 0}\n banks_dict[bank_name].append(new_client)\n return number_id", "title": "" }, { "docid": "9c6e3f6528df0a3263a617d44022f36b", "score": "0.5821729", "text": "def registerClient():", "title": "" }, { "docid": "b8909ba9c05edd87dd099ea399ef2435", "score": "0.58012664", "text": "def add(self,\n new_block: Block\n ) -> None:\n\n self.chain.append(new_block)", "title": "" }, { "docid": "aceff90f248e32f87b2937e7f8fdd621", "score": "0.5783249", "text": "def updateClients(self):\n self.clientItems = list(self.clients.items())", "title": "" }, { "docid": "4e37400d9a54bceb3677a3839e4ff719", "score": "0.57489556", "text": "def add_block(self, block):\n block.block_header.set_hash_prev_block(self.blocks[-1].get_hash())\n self.blocks.append(block)", "title": "" }, { "docid": "a4a9c83e2ff5dea811b3b03501c0bef5", "score": "0.5707269", "text": "def add_client(self, client_id, client_secret, redirect_uris,\n authorized_grants=None, authorized_response_types=None):\n self.write(client_id,\n {\"identifier\": client_id,\n \"secret\": client_secret,\n \"redirect_uris\": redirect_uris,\n \"authorized_grants\": authorized_grants,\n \"authorized_response_types\": authorized_response_types})\n\n return True", "title": "" }, { "docid": "e517d393eacae1a32c5276730b3b7103", "score": "0.5706276", "text": "def add(self, item):\n\n\t\tself.items.append(item)", "title": "" }, { "docid": "626b3cdcbd24a74917b8df4927a6724a", "score": "0.56934667", "text": "def register_client (self, client):\n\n self._clients[client._fileno] = client\n\n if not client._is_blocking:\n self._event_manager.register(client._fileno, client._events)\n\n if self._is_long_running and not client._is_channel and not client._is_host:\n # we're serving long-running requests so we must unregister the host filenos so no more clients can use\n # this process until the current client is finished\n for host in self._hosts:\n self._event_manager.unregister(host._fileno)", "title": "" }, { "docid": "c4d9ecde6f6f4e98dc4ca009bf64cae5", "score": "0.5689533", "text": "def __init__(self):\n self.clients = []", "title": "" }, { "docid": "02308efc66713f03e56b1cd42fe8207d", "score": "0.56512237", "text": "def addItem(self, item):\n self.items.append(item)", "title": "" }, { "docid": "66626fa13dffdea7751648416c5ae685", "score": "0.5641339", "text": "def add_client_to_ss_by_hand(self, client, check_send_errors=False, ssh_host=None,\n ssh_user=None, ssh_pass=None, sec_1_host=None, sec_1_user=None, sec_1_pass=None,\n management_wsdl_url=None, management_client_id=None):\n # UC MEMBER_47 1. Start adding the client\n self.log('MEMBER_47 1. Start adding the client')\n self.wait_until_visible(type=By.ID, element=clients_table_vm.ADD_CLIENT_BTN_ID).click()\n\n # UC MEMBER_47 2. Set the class, code, subsystem\n client_class = client['class']\n self.log('MEMBER_47 2. Set the class to {0}'.format(client_class))\n select = Select(self.wait_until_visible(type=By.ID, element=popups.ADD_CLIENT_POPUP_MEMBER_CLASS_DROPDOWN_ID))\n select.select_by_visible_text(client_class)\n\n client_code = client['code']\n self.log('MEMBER_47 2. Set the member code to {0}'.format(client_code))\n input_code = self.wait_until_visible(type=By.ID, element=popups.ADD_CLIENT_POPUP_MEMBER_CODE_AREA_ID)\n self.input(input_code, client_code)\n\n client_subsystem_code = client['subsystem_code']\n self.log('MEMBER_47 2. Set the subsystem code to {0}'.format(client_subsystem_code))\n subsystem_input = self.wait_until_visible(type=By.XPATH,\n element=popups.ADD_CLIENT_POPUP_SUBSYSTEM_CODE_AREA_XPATH)\n self.input(subsystem_input, client_subsystem_code)\n\n self.wait_jquery()\n if ssh_host is not None:\n log_checker = auditchecker.AuditChecker(ssh_host, ssh_user, ssh_pass)\n current_log_lines = log_checker.get_line_count()\n\n # Try to save the client\n self.log('MEMBER_47 4, 5. Click \"OK\". System verifies and saves the client.')\n self.wait_until_visible(type=By.XPATH, element=popups.ADD_CLIENT_POPUP_OK_BTN_XPATH).click()\n self.wait_jquery()\n if check_send_errors:\n new_driver = None\n old_driver = self.driver\n\n self.log('MEMBER_48 4a. sending of the registration request failed')\n try:\n self.reset_webdriver(sec_1_host, sec_1_user, sec_1_pass, close_previous=False)\n disable_management_wsdl(self, management_client_id, management_wsdl_url)()\n new_driver = self.driver\n self.driver = old_driver\n # Continue warning popup when visible\n self.wait_until_visible(type=By.XPATH, element=popups.WARNING_POPUP_CONTINUE_XPATH).click()\n self.wait_jquery()\n popups.confirm_dialog_click(self)\n\n self.log('MEMBER_48 4a.1 System displays the error message: '\n '\"Failed to send registration request: X\", where X is description of the error')\n # Wait until error message is visible\n error_msg = self.wait_until_visible(type=By.CSS_SELECTOR, element=messages.ERROR_MESSAGE_CSS,\n timeout=60).text\n # Check if error message is as expected\n self.is_true(re.match(REGISTRATION_REQUEST_SENDING_FAILED, error_msg))\n\n self.log('MEMBER_48 4a.2 System logs the event \"Register client failed\" to the audit log')\n # Check if \"Register client failed\" is in audit log\n logs_found = log_checker.check_log(log_constants.REGISTER_CLIENT_FAILED,\n from_line=current_log_lines + 1)\n self.is_true(logs_found, msg='\"Register client failed\" event not found in log\"')\n finally:\n self.driver = new_driver\n self.reload_webdriver(sec_1_host, sec_1_user, sec_1_pass)\n enable_management_wsdl(self, management_client_id, management_wsdl_url)()\n self.tearDown()\n self.driver = old_driver\n # Removing added client\n remove_client(self=self, client=client)\n return\n\n warning = self.wait_until_visible(type=By.ID, element=popups.CONFIRM_POPUP_TEXT_AREA_ID).text\n self.is_true(warning in get_expected_warning_messages(client))\n\n # UC MEMBER_48 1. Register the client by confirming the registration popup\n self.log('MEMBER_48 1. Register the client by confirming the registration popup')\n popups.confirm_dialog_click(self)\n self.wait_jquery()\n\n # UC MEMBER_48 2-5 System verifies existing subsystem, creates and sends SOAP request, receives success response\n self.log(\n 'MEMBER_48 2-5 System verifies existing subsystem, creates and sends SOAP request, receives success response')\n\n # Try to find the client in client list and check the status\n self.log(\n 'MEMBER_47 6 / MEMBER_48 6. Verify that the client has been added and check that the status is {0}.'.format(\n 'registration in progress'))\n status_title = added_client_row(self, client).find_element_by_class_name('status').get_attribute('title')\n self.log('Status title: {0}'.format(status_title))\n if status_title.lower() == clients_table_vm.CLIENT_STATUS_SAVED:\n # Something is wrong, status should be \"registration in progress\". Set the exception to be raised\n # later but go on with the current test.\n self.log('MEMBER_47 6. WARNING: status should be \"registration in progress\" but is \"saved\"')\n self.log('MEMBER_47 6. WARNING: CONTINUING TEST WITH STATUS \"saved\"')\n status_title = 'registration in progress'\n self.exception = True\n\n self.is_equal(status_title, 'registration in progress', test_name,\n 'MEMBER_48 6. TITLE NOT CORRECT: {0}'.format(status_title),\n 'MEMBER_48 6. EXPECTED MESSAGE: {0}'.format('registration in progress')\n )\n\n if log_checker is not None:\n # UC MEMBER_48 7. System logs the event \"Register client\" to the audit log\n self.log('MEMBER_48 7. System logs the event \"Register client\" to the audit log')\n time.sleep(1.5)\n logs_found = log_checker.check_log(log_constants.REGISTER_CLIENT,\n from_line=current_log_lines + 1)\n self.is_true(logs_found, msg='\"Register client\" event not found in log\"')", "title": "" }, { "docid": "4e9fc0a227ff82d55f208099b1b32d1a", "score": "0.56267583", "text": "def open(self):\n logging.info('Client Connected')\n if self.id not in _clients:\n _clients[self.id] = self", "title": "" }, { "docid": "e3899ce03fefe57d1ff7d37a44b33f5d", "score": "0.5626066", "text": "def add_item(self, new_item):\n self._private_list_.append(new_item)", "title": "" }, { "docid": "5e4161146c54b4100e01e6613dae11f9", "score": "0.56037384", "text": "def add(self, item):\n self._items.append(item)", "title": "" }, { "docid": "8b632b7ff2f400495182a3363fd4b773", "score": "0.5575148", "text": "def add_item(self, item):\n self.data['items'].append(item)", "title": "" }, { "docid": "9da1cdcb4955e2fa1dcec1ad42c37c88", "score": "0.55528104", "text": "def add_ip_block(self, block):\n raise NotImplementedError()", "title": "" }, { "docid": "6536a83e45c449534c2448c7894d2c26", "score": "0.55241716", "text": "def add_client(self, url, max_queue_size=50000, batch_size=10000,\n flush_interval_seconds=5, enable_internal_metrics=True,\n queue_impl=queue.Queue):\n server, token = self.get_server_info_from_endpoint(url)\n\n if self.existing_client(server):\n raise RuntimeError(\"client with id \" + url + \" already exists.\")\n\n client = WavefrontClient(\n server=server,\n token=token,\n max_queue_size=max_queue_size,\n batch_size=batch_size,\n flush_interval_seconds=flush_interval_seconds,\n enable_internal_metrics=enable_internal_metrics,\n queue_impl=queue_impl,\n )\n self.clients.append(client)", "title": "" }, { "docid": "a87713838ca6fd29ae43ff9f67bce343", "score": "0.54989386", "text": "def add_block(self):\n block = Block(len(self))\n self.append(block)\n return block", "title": "" }, { "docid": "dd49c973629644b749c4cb9e73a4e3c5", "score": "0.5480726", "text": "def create_clients(self):\n self.wrk = control.Wrk()\n self.wrk.set_script(\"foo\", content=\"\")\n self.clients = [self.wrk]", "title": "" }, { "docid": "f1220a61a656253ca3a54e1ca99240e1", "score": "0.5476566", "text": "def attach(self, block_in):\n self.blocks.append(block_in)", "title": "" }, { "docid": "171319b6b6cbeddef680a74b4e9422f3", "score": "0.5463883", "text": "def add_block(self, block):\n # validate block is valid\n if not self.validate_block(block):\n raise errors.BlockIsInvalid\n\n self.chain[block.user] = block", "title": "" }, { "docid": "58bbb41702b0c44fac3929b8365fb2a0", "score": "0.5458044", "text": "def cb_client_connected(self, client):\n pass", "title": "" }, { "docid": "14f71cb33a2d93d57bc9ecff84082840", "score": "0.5454138", "text": "def register_message_receiver(self, client):\n\n\t\tif client is None:\n\t\t\treturn\n\t\tself.registered_clients.append(client)", "title": "" }, { "docid": "faac6f8ffb2bdfaa63ca658b549cef13", "score": "0.5452201", "text": "def add_block(self, id_vip, id_block, override):\n\n url = 'vip/add_block/' + \\\n str(id_vip) + '/' + str(id_block) + '/' + str(override)\n\n code, xml = self.submit(None, 'GET', url)\n\n return self.response(code, xml)", "title": "" }, { "docid": "9cc7c5b54c41192eecb8e9703187fc45", "score": "0.54349095", "text": "def add(self, items):\n pass", "title": "" }, { "docid": "5396eb4f05129af40d7b06a156cdad43", "score": "0.5426118", "text": "def added_client_row(self, client):\n self.added_client_id = ' : '.join(\n [SYSTEM_TYPE, ssh_server_actions.get_server_name(self), client['class'], client['code'],\n client['subsystem_code']])\n self.log('Finding added client: '.format(self.added_client_id))\n table_rows = self.by_css(clients_table_vm.CLIENT_ROW_CSS, multiple=True)\n client_row_index = clients_table_vm.find_row_by_client(table_rows, client_id=self.added_client_id)\n if client_row_index is not None:\n return table_rows[client_row_index]\n return None", "title": "" }, { "docid": "fad63210fa8d6bd341bf76875a71c96f", "score": "0.542435", "text": "def _add_user(self, user: str, conn: socket.socket) -> None:\n self.members.add(user)\n self.clients[user] = conn", "title": "" }, { "docid": "9ca6e18ba56d35172db45790e7d52184", "score": "0.5410622", "text": "def llenar_lista_cliente(self):\n clientes = self.basedatos.obtenerCliente()\n\n if clientes:\n for cliente in clientes:\n self.lista_clientes.addItem(\n \"{0} -- {1} -- {2} -- {3} -- {4} -- {5} -- {6} \"\n .format(cliente[0], cliente[1], cliente[2], cliente[3],\n cliente[4], cliente[5], cliente[6]))", "title": "" }, { "docid": "fe7e2deda6f23e668a0e118cd6b19b96", "score": "0.540829", "text": "def clients(self, clients):\n\n self._clients = clients", "title": "" }, { "docid": "343f53861eb2f8935e1132237909145a", "score": "0.5402406", "text": "def add_item(self, item: Item):\n self.items.append(item)", "title": "" }, { "docid": "4ee872a0aebbe919e3ae764ced6e4b2e", "score": "0.5400771", "text": "def new_client() -> Client:\n client = Client(name=\"\", address=\"\", postal_code=\"\", city=\"\", country=\"\")\n edit_client(client, True)\n return client", "title": "" }, { "docid": "dc2a1237d16b78581f27a0f12ccdeea0", "score": "0.53716797", "text": "def connect_clients(self, new_client):\n\n # Store the client\n\n self.master.clients[new_client.id] = new_client\n\n # Connect each client\n\n msg1 = MSG_CONNECT(new_client.id, new_client.name, new_client.hostname, new_client.port, new_client.is_dummy)\n\n for client in list(self.master.clients.values()):\n\n # Tell other clients about the new connection\n\n if client.connected:\n\n client.send(msg1)\n\n # Tell the new client about other clients\n\n if client != self.client_info:\n\n msg2 = MSG_CONNECT(client.id, client.name, client.hostname, client.port, client.is_dummy)\n\n new_client.send(msg2)\n\n return", "title": "" }, { "docid": "628f5e3828ce723470e2fe3d4a6d1b1a", "score": "0.53673327", "text": "def add(self, h, handle, new_list=False, unique=False):\n record_id, data = self._find(h)\n if record_id is None:\n handles = [handle] if not new_list else handle\n self.block.add(self._marshal(h, handles))\n else:\n if unique:\n raise ValueError('duplicate entry')\n handles = self._unmarshal(data, just_handles=True)\n handles.append(handle)\n self.block.put(record_id, self._marshal(h, handles))", "title": "" }, { "docid": "61370674345c7336d8212396a7782806", "score": "0.53640395", "text": "def add_peer(self, ip):\n self.log.info(\"adding peer %i\", ip)\n\n ip_string = ip_int_to_string(ip)\n\n self.log.debug(\"trying to connect to peer at IP: %s and port %i\",\n ip_string, self.client_port)\n\n # create a socket for my new peer\n peer_socket = self.z_context.socket(zmq.PUSH)\n peer_socket.connect(\"tcp://%s:%i\" % (ip_string, self.peer_port))\n\n # add socket to poller\n self.poller.register(peer_socket, zmq.POLLOUT)\n\n # store off new peer\n self.peers[ip] = {\"ip_address\": ip,\n \"ip_string\": ip_string,\n \"socket\": peer_socket}\n\n peer_addresses = self.list_peers()\n\n self.log.debug(\"list of peers: %s\", peer_addresses)\n\n # send a Hello message to the new client\n self.send_hello(self.peers[ip])\n\n return", "title": "" }, { "docid": "d649355ee9b62afdc52913207b035e74", "score": "0.53601736", "text": "def append_item(self,item):\n self.items.append(item)", "title": "" }, { "docid": "a9447fcaa28b9da9b3a8347c8b05cde7", "score": "0.53584146", "text": "def attached(self, client, identity):\n self.clients[client] = identity\n\n host = ':'.join(map(str, client.broker.transport.getHost()[1:]))\n\n msg = self.service.welcomeMessage % {\n 'you': getattr(identity, 'name', str(identity)),\n 'host': host,\n 'longversion': copyright.longversion,\n }\n\n client.callRemote('console', [(\"stdout\", msg)])\n\n client.capabilities = _defaultCapabilities\n client.callRemote('listCapabilities').addCallbacks(\n self._cbClientCapable, self._ebClientCapable,\n callbackArgs=(client,),errbackArgs=(client,))", "title": "" }, { "docid": "684d07f25827ed463a31f858c10603b4", "score": "0.53481865", "text": "def announce_new_block(block):\n\n for peer in peers:\n url = f'http://{peer}/add_block'\n requests.post(url, data=json.dumps(block.__dict__, sort_keys=True)))", "title": "" }, { "docid": "942e7422b6b25215ecbab221cb83a1d2", "score": "0.53474665", "text": "def add_broker(client, logger, name, address, port=None, networks=None):\n check_broker_exists(client.manager.get_brokers().items, name,\n must_exist=False)\n\n client.manager.add_broker(name, address, port, networks)\n\n logger.info('Broker {0} was added successfully!'\n .format(name))", "title": "" }, { "docid": "784d312b922200c8c7b7bd969543a014", "score": "0.5347447", "text": "def append(self, item):\n _check_layer(item, error=True)\n\n self._list.append(item)\n self.events.add_item(item=item, index=len(self)-1)\n self.total = self.total+1", "title": "" }, { "docid": "3b6266f70f6a9d467a953a0f14754b12", "score": "0.5345409", "text": "def addInstance(self):\n self.numOrderedRequests.append((0, 0))\n self.clientAvgReqLatencies.append({})\n self.instanceStarted.append(time.perf_counter())", "title": "" }, { "docid": "5cfc46954b76784c2d156acb4a2b5a6e", "score": "0.5325812", "text": "def add_regional_clients_from_list(self, region_names):\n for region_name in region_names:\n self.add_regional_client(region_name)", "title": "" }, { "docid": "3b11bf228ae806603eb09fb8b385d973", "score": "0.5319278", "text": "def add_item(self, item):\n self.__items.append(item)\n item.connect(\"destroyed\", self.remove_item)\n self.emit(\"item-added\", item)", "title": "" }, { "docid": "23cf8e56cb5cb7bc6b93d0298b61d234", "score": "0.5304122", "text": "def register_client(self):\n addr = request.form['address']\n self.manager.register_client(addr)\n return \"success\"", "title": "" }, { "docid": "acda76d0c92c22c1b269dd717cc32867", "score": "0.5298383", "text": "def add_peer(req_ip):\n global peer_list\n\n peer_list.append(req_ip)\n\n data = {\n \"response\" : \"ADDED\"\n }\n return json.dumps(data).encode()", "title": "" }, { "docid": "1670b6ef43b3f9b350414f1206d013ad", "score": "0.5297369", "text": "def client_updated(self, client):\n assert client in self.clients\n self._clients_dirty = True", "title": "" }, { "docid": "090ea00dbe2343351f96057a4d413cdf", "score": "0.52964896", "text": "def populate_queue(self, client_length: int) -> None:\n if self._queue is None:\n self._queue = []\n # inserimos os novos clientes a fila\n for _ in range(client_length):\n new_client = Client()\n self._queue.append(new_client)\n # registro do tamanho atual da fila\n self.queue_length_register()", "title": "" }, { "docid": "b5b413d585b60294e1b93f7abb798e5b", "score": "0.5283135", "text": "def enqueue(self, item):\n self.lst.append(item)", "title": "" }, { "docid": "2f5520f9d82045edd1f878c0972b9727", "score": "0.527464", "text": "def clients():\n pass", "title": "" }, { "docid": "2f5520f9d82045edd1f878c0972b9727", "score": "0.527464", "text": "def clients():\n pass", "title": "" }, { "docid": "2f5520f9d82045edd1f878c0972b9727", "score": "0.527464", "text": "def clients():\n pass", "title": "" }, { "docid": "900e4fc30e596bbc6fcc26d7bd8aeed0", "score": "0.527228", "text": "def addFront(self, item):\n self.items.append(item)", "title": "" }, { "docid": "4eafeb768def02b9bfdd7bf14f285474", "score": "0.52688843", "text": "def add_block(self, data: list):\n block = Block.mine_block(self.last_block, data)\n self.chain.append(block)\n return block", "title": "" }, { "docid": "164830c980e489f624124bcc43dd1b6c", "score": "0.526642", "text": "def announce_new_block(block):\n for peer in peers:\n url = \"{}/add_block\".format(peer)\n requests.post(url, data=json.dumps(block.__dict__, sort_keys=True))", "title": "" }, { "docid": "4bb685be60fcdcc174d6b0380b49e0db", "score": "0.5261435", "text": "def add_client_data(self, module_name, module_data_pickle):\n\n # Check if this module has already been inserted and modify its name accordingly\n mn = module_name\n module_index = 2\n while module_name in self.client_data:\n module_name = mn + str(module_index)\n module_index += 1\n\n # Add client data to attribute of service\n self.client_data[module_name] = pickle.loads(module_data_pickle)\n return 0", "title": "" }, { "docid": "c753fb6b4e289a982946a9e286fa94c6", "score": "0.5253155", "text": "def handle_connect(self, request):\n logging.info(\"Client %s connected\", request.sid)\n self.client_sids.append(request.sid)", "title": "" }, { "docid": "052128ba9fe82d0fdbbe2d88721a7250", "score": "0.5248263", "text": "async def register_new_client(self, client_socket: socket.socket, client_addr: tuple):\n await self.event_loop.sock_sendall(client_socket, 'Your name: '.encode('utf-8'))\n while True:\n name = await self.event_loop.sock_recv(client_socket, 1024)\n name = name.decode('utf-8')\n clean_name = re.sub('\\W+', '', name)\n names = fetchall('clients', ['name'])\n if clean_name in names:\n await self.event_loop.sock_sendall(client_socket, 'Enter another name'.encode('utf-8'))\n continue\n ip, port = client_addr\n addr = str(ip) + ':' + str(port)\n insert('clients', {'name': clean_name, 'addr': addr})\n await self.event_loop.sock_sendall(client_socket, 'Successfully registered'.encode('utf-8'))\n break", "title": "" }, { "docid": "6c6b7cf52ce54539c9f947f363571999", "score": "0.5246998", "text": "def add_block(self, block):\n try:\n self.validate_next_block(self.blocks[-1], block)\n self.blocks.append(block)\n except HashMismatchError as hash_error:\n raise InvalidChainError(str(hash_error))\n except IndexMismatchError as index_error:\n raise InvalidChainError(str(index_error))", "title": "" }, { "docid": "e3474e51938a189fd0b998048e7f36ce", "score": "0.5243932", "text": "def add(self, *items):\n for item in items:\n if not isinstance(item, Ingredient):\n logger.error(\n \"Object {} is not added to the {}\".format(item.__class__.__name__, self.__class__.__name__))\n assert isinstance(item, Ingredient)\n self.list_ingredients.append(item)\n logger.info(\"Ingredient {} is added to the {}\".format(item.__class__.__name__, self.__class__.__name__))", "title": "" }, { "docid": "0ff4396fd494a29d72035ac62c268139", "score": "0.5239952", "text": "def set_client(self, client):\n self.client = client", "title": "" }, { "docid": "5a3da30ca53a16b9f5e04926162f8017", "score": "0.52394164", "text": "def _register_client(self, client, region_name):\n for item in client.meta.method_to_api_mapping:\n method = getattr(client, item)\n wrapped_method = functools.partial(self._wrap_client, region_name, method)\n setattr(client, item, wrapped_method)", "title": "" }, { "docid": "9426c0b6149ba66820b29da4810c5d7f", "score": "0.5235405", "text": "def __init__(self, client, controller):\n super().__init__(client, controller)\n\n self._is_blocked = client.blocked", "title": "" }, { "docid": "0946c6afb479a77a30b5c7987f25388d", "score": "0.5228123", "text": "def _add_to_electrolyte_component_list(self):\n parent = self.parent_block()\n parent.cation_set.add(self.local_name)", "title": "" }, { "docid": "d85273ae62c42c4b06eac52a1f52e7e5", "score": "0.52254707", "text": "def add_block_to_chain(self, block: Block) -> None:\n self.__chain.append(block)", "title": "" }, { "docid": "b5f11c6c0f52e8554a02c8a23f5441f4", "score": "0.52252537", "text": "def add(cls, client, resource) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\taddresource = vlan()\n\t\t\t\taddresource.id = resource.id\n\t\t\t\taddresource.aliasname = resource.aliasname\n\t\t\t\taddresource.dynamicrouting = resource.dynamicrouting\n\t\t\t\taddresource.ipv6dynamicrouting = resource.ipv6dynamicrouting\n\t\t\t\taddresource.mtu = resource.mtu\n\t\t\t\taddresource.sharing = resource.sharing\n\t\t\t\treturn addresource.add_resource(client)\n\t\t\telse :\n\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\taddresources = [ vlan() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\taddresources[i].id = resource[i].id\n\t\t\t\t\t\taddresources[i].aliasname = resource[i].aliasname\n\t\t\t\t\t\taddresources[i].dynamicrouting = resource[i].dynamicrouting\n\t\t\t\t\t\taddresources[i].ipv6dynamicrouting = resource[i].ipv6dynamicrouting\n\t\t\t\t\t\taddresources[i].mtu = resource[i].mtu\n\t\t\t\t\t\taddresources[i].sharing = resource[i].sharing\n\t\t\t\tresult = cls.add_bulk_request(client, addresources)\n\t\t\treturn result\n\t\texcept Exception as e :\n\t\t\traise e", "title": "" }, { "docid": "71e5f92c6352cb688cc1359f809c3797", "score": "0.5213567", "text": "def client(self, **kwargs):\n\n self.utils.console(\"Adding client...\")\n return ClientFactory(**kwargs)", "title": "" }, { "docid": "bd41db1bd5a70db987ee6f8e23e8cb37", "score": "0.5205551", "text": "def add_server(self, *, server_name: str, server_addr: str, **kwargs) -> ServerBlock:\n block = ServerBlock(\n indent_level=self.indent_level + 1, server_name=server_name, server_addr=server_addr, **kwargs\n )\n self.sub_blocks.append(block)\n return block", "title": "" }, { "docid": "9143ff0ebe62ddb1420cab6296c9b260", "score": "0.5191479", "text": "def add(controller: Controller) -> None:\n _controllers.append(controller)", "title": "" }, { "docid": "35471c676deb6b96018bed69d48a6895", "score": "0.51883", "text": "def connect(self, client, client_address):\n port = self.__next_port\n self.__next_port = self.__next_port + 1\n self.address_to_port[client_address] = port\n self.port_to_network_element[port] = client\n\n # Tell the WAN that this client is connected. You can think of this as\n # being like a BGP announcement in the real Internet.\n self.port_to_network_element[self.wan_port].add_client(\n self, client_address)", "title": "" }, { "docid": "e27c04e6fff849348600e115cf6cf73c", "score": "0.51842386", "text": "def add_frontend(self, frontend):\n self.frontends.append(frontend)", "title": "" }, { "docid": "cbbbded34a6eeb4c7f3ff9cd512c85b9", "score": "0.5173663", "text": "def add(self):\n pass", "title": "" }, { "docid": "46e077b9470375dc16083ec36a1dd135", "score": "0.51630753", "text": "def client(self):\n ...", "title": "" }, { "docid": "b9058a49c7f980e5d153a621e0be834a", "score": "0.5162922", "text": "def create_client(self, *args, **kwargs):", "title": "" }, { "docid": "72586ae4ffe1e2afde69cfb17451cf47", "score": "0.5156382", "text": "def add(cls, client, resource) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\taddresource = clusternodegroup()\n\t\t\t\taddresource.name = resource.name\n\t\t\t\taddresource.strict = resource.strict\n\t\t\t\taddresource.sticky = resource.sticky\n\t\t\t\taddresource.state = resource.state\n\t\t\t\taddresource.priority = resource.priority\n\t\t\t\treturn addresource.add_resource(client)\n\t\t\telse :\n\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\taddresources = [ clusternodegroup() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\taddresources[i].name = resource[i].name\n\t\t\t\t\t\taddresources[i].strict = resource[i].strict\n\t\t\t\t\t\taddresources[i].sticky = resource[i].sticky\n\t\t\t\t\t\taddresources[i].state = resource[i].state\n\t\t\t\t\t\taddresources[i].priority = resource[i].priority\n\t\t\t\tresult = cls.add_bulk_request(client, addresources)\n\t\t\treturn result\n\t\texcept Exception as e :\n\t\t\traise e", "title": "" }, { "docid": "1000845e2d8a8fc4d4a2262134310ea1", "score": "0.5153636", "text": "def __init__(self, client):\n self._client = client\n self.beds = {}\n self.update()", "title": "" }, { "docid": "66eeca81507dba637b4a650e2b3cee4d", "score": "0.5147056", "text": "def insertar(self, cliente):\n self.fila.append(cliente)\n self.enfila+=1", "title": "" }, { "docid": "66eeca81507dba637b4a650e2b3cee4d", "score": "0.5147056", "text": "def insertar(self, cliente):\n self.fila.append(cliente)\n self.enfila+=1", "title": "" }, { "docid": "d815b87624faa9e72db3448a420b362d", "score": "0.51454425", "text": "def setup_new_client(self):\n # pylint: disable=E1101\n add_client = True\n\n try:\n connection, dummy_address = self.serversocket.accept()\n logging.debug(\n \"Accepted client with fd: %s\", connection.fileno())\n\n connection.setblocking(0)\n except IOError as error:\n logging.error(\"Error accepting client connection. (error: %s)\",\n errno.errorcode[error.errno])\n add_client = False\n\n try:\n self.epoll.register(connection.fileno(), select.EPOLLIN)\n except select.error as error:\n if (error.errno == errno.ENOSPC or\n error.errno == errno.ENOMEM):\n logging.warning(\n \"Cannot add more clients to epoll instance, \"\n \"dropping client. (error: %s)\",\n errno.errorcode[error.errno])\n connection.shutdown()\n connection.close()\n add_client = False\n elif error.errno == errno.EEXIST:\n logging.warning(\n \"Client already registered to epoll. (error: %s)\",\n errno.errorcode[error.errno])\n else:\n raise WesEntropyException(\"Error in epoll. (error: %s)\" %\n errno.errorcode[error.errno])\n\n # If there wasn't an error, add client to connections\n if add_client:\n self.connections[connection.fileno()] = connection\n logging.debug(\"Set up new client: %i\", connection.fileno())\n self.requests[connection.fileno()] = {'bytes_req' : 0,\n 'partial_req' : \"\",\n 'send_stats' : False}\n # pylint: enable=E1101", "title": "" }, { "docid": "a519cd2499d44dd2e095bd9f7e5703a9", "score": "0.514375", "text": "def initClient(self, client, addr):\r\n\r\n client.client_id = addr.port\r\n client.host = addr.host\r\n\r\n client.server_status = self.server_status\r\n client.chunks_amount = None\r\n client.chunks = None\r\n client.data = ''\r\n\r\n\r\n client.mode = None\r\n client.game_type = None\r\n client.max_score = 0\r\n client.best_item = None\r\n client.top = []\r\n client.best_item_place = None\r\n\r\n\r\n log_msg = 'class Leadreboards, client %s initialized' % (client.client_id,)\r\n log.msg(log_msg)\r\n\r\n # Add new client to global dictionary\r\n self.clients[client.client_id] = client\r\n\r\n return client", "title": "" }, { "docid": "f9af26a02aaa0865aaa8171a575e9b4c", "score": "0.5135487", "text": "def agregar_cliente(self):\n self.llamar = AgregarCliente()\n self.close()", "title": "" }, { "docid": "57eb5e70352fa302d3d1f718cfbfcc4a", "score": "0.51305807", "text": "def list_added_blocks(self):\n raise NotImplementedError()", "title": "" }, { "docid": "97ad4381d4f18bc7e909e61d175d7b4a", "score": "0.5128211", "text": "def handle_client(self, client_id, conf):\n client = monoclient.Client(client_id, conf)\n # some debug from the server\n print(f\"[NEW CONNECTION] {client.conf.addr} connected.\")\n # append the connection to the clients\n self.clients.append(client)\n connected = True # 1 if the client is still connected\n while connected:\n client.send(input(\">>> \"), self.dcf)\n client_data = client.recv(self.bites, self.dcf)\n print(f\"{client.conf.__repr__()}: {client_data}\")", "title": "" } ]
2e9296b5bd5a7a3a9fb4203aac2f90f1
Compute precision and recall of different score threshold
[ { "docid": "69bd9b243c1d4b4825931a395827e329", "score": "0.0", "text": "def rp_various_th(gt_pds, ths, etol, metric=azimuth_distance):\n pr = []\n for t in ths:\n pdf = pds_to_pdf(gt_pds, t)\n _, r, p, _, _ = eval_recall_precision(pdf, [etol], metric=metric)\n pr.append((r[0], p[0]))\n return pr", "title": "" } ]
[ { "docid": "7f9a8dc1f92914411102b0d0628a5bb2", "score": "0.78416824", "text": "def precision_at_recall(scores, labels, target_recall):\n positive_scores = scores[labels == 1.0]\n threshold = np.percentile(positive_scores, 100 - target_recall*100)\n predicted = scores >= threshold\n return precision_score(labels, predicted)", "title": "" }, { "docid": "0f55aba8f5aa41e9d5ba16ed3bbd0008", "score": "0.7492272", "text": "def precision_recall_scores(precis, recall, thresholds, pos_thresh=0.5):\n assert np.all(np.diff(thresholds) >= 0)\n inds = np.where(thresholds >= pos_thresh)[0]\n if not inds.size:\n # If there are no predicted positives, then precision is 0\n return 0, 0\n # Index of the closest threshold at least pos_thresh:\n thresh_ind = min(inds)\n return precis[thresh_ind], recall[thresh_ind]", "title": "" }, { "docid": "99e3bb2583be8abe61f7dfff45906d20", "score": "0.7428885", "text": "def score(y_true, y_pred):\n precision = precision_score(y_true, y_pred)\n recall = recall_score(y_true, y_pred)\n f1 = f1_score(y_true, y_pred)\n return precision, recall, f1", "title": "" }, { "docid": "b22f27a3d7991c0458df69c0b3e9d081", "score": "0.7411244", "text": "def compute_precision_recall(correct_chunk_cnt, found_pred_cnt, found_correct_cnt):\n\n if found_pred_cnt > 0:\n precision = 100 * correct_chunk_cnt / found_pred_cnt\n else:\n precision = 0\n\n if found_correct_cnt > 0:\n recall = 100 * correct_chunk_cnt / found_correct_cnt\n else:\n recall = 0\n\n return precision, recall", "title": "" }, { "docid": "819b5be7103df56ee8ea2ba088142855", "score": "0.733715", "text": "def precision_recall(y_true, y_pred):\n TP = FP = FN = TN = 0\n for i in range(len(y_pred)):\n if y_true[i] == 1 and y_pred[i] == 1:\n TP += 1\n elif y_true[i] == 0 and y_pred[i] == 1:\n FP += 1\n elif y_true[i] == 1 and y_pred[i] == 0:\n FN += 1\n elif y_true[i] == 0 and y_pred[i] == 0:\n TN += 1\n precision = (TP * 100.0) / (TP + FP + 0.001)\n recall = (TP * 100.0) / (TP + FN)\n return precision, recall", "title": "" }, { "docid": "6de9e87d0805aea137ffd67399f26ea8", "score": "0.7331613", "text": "def score_precision(df, target):\n total_positive = np.sum(df.predicted == target)\n true_positive = np.sum(np.logical_and(\n df.predicted == df.truth, df.predicted == target))\n if total_positive == 0:\n return 0\n return true_positive / total_positive", "title": "" }, { "docid": "fbdc1a442d08138a78c540d47fa53bb9", "score": "0.73304975", "text": "def compute_precision_recall(correct_chunk_cnt, found_pred_cnt,\n found_correct_cnt):\n\n if found_pred_cnt > 0:\n precision = 100 * correct_chunk_cnt / found_pred_cnt\n else:\n precision = 0\n\n if found_correct_cnt > 0:\n recall = 100 * correct_chunk_cnt / found_correct_cnt\n else:\n recall = 0\n\n return precision, recall", "title": "" }, { "docid": "b6653dbb3e6b42dea70f67fc987af1aa", "score": "0.72807497", "text": "def precision_recall(\n y_true: np.ndarray, probas_pred: np.ndarray, num_positives: int\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n probas_pred = probas_pred.flatten()\n y_true = y_true.flatten()\n # to handle duplicates scores, we sort (score, NOT(jugement)) for predictions\n # eg,the final order will be (0.5, False), (0.5, False), (0.5, True), (0.4, False), ...\n # This allows to have the worst possible AP.\n # It prevents participants from putting the same score for all predictions to get a good AP.\n order = argsort(list(zip(probas_pred, ~y_true)))\n order = order[::-1] # sort by decreasing score\n probas_pred = probas_pred[order]\n y_true = y_true[order]\n\n ntp = np.cumsum(y_true) # number of true positives <= threshold\n nres = np.arange(len(y_true)) + 1 # number of results\n\n precisions = ntp / nres\n recalls = ntp / num_positives\n return precisions, recalls, probas_pred", "title": "" }, { "docid": "f5d359f366a8980a69003e7c9f0aaa8f", "score": "0.7261193", "text": "def precision_recall(true_pos, false_pos, false_neg):\n try:\n precision = true_pos / (true_pos + false_pos)\n except ZeroDivisionError:\n precision = 0\n print(\"Precision could not be calculated.\")\n try:\n recall = true_pos / (true_pos + false_neg)\n except ZeroDivisionError:\n recall = 0\n print(\"Recall could not be calculated.\")\n \n return precision, recall", "title": "" }, { "docid": "6a5b4c7ee0791cf2058708b550c57f28", "score": "0.7260751", "text": "def calculate_precision_recall(self,queryid,relevances,predictions):\n ret = len(predictions)\n rel = len(relevances)\n relret = len(set(relevances.values()).intersection(set(predictions.values())))\n\n self.precision[queryid] = relret * 1.0 / ret\n self.recall[queryid] = relret * 1.0 / rel", "title": "" }, { "docid": "a430652ac0d2178f9d54b28f1ca4e546", "score": "0.72429925", "text": "def metrics(y_true, y_pred, show_report=False, threshold=0.5):\n cls_report = None\n y_scores = np.where(y_pred > threshold, 1, 0)\n micro_avg_f1 = f1_score(y_true, y_scores, average='samples')\n if show_report: cls_report = classification_report(y_true,y_scores) \n mAP = average_precision_score(y_true, y_scores, average=\"samples\")\n auc_score = roc_auc_score(y_true, y_scores, average=\"samples\")\n return micro_avg_f1, cls_report, mAP, auc_score", "title": "" }, { "docid": "4a1573954374cb86c38d9000f4443881", "score": "0.72296077", "text": "def get_precision_and_recall(self, gt, pred, iou):\n tps, fps, fns = [], [], []\n precisions, recalls, score_thresholds = [], [], []\n\n if pred is None:\n return {\n \"tp\": np.array([0]), \"fp\": [sum(len(gt_boxes) for gt_boxes in gt.values())], \"fn\": np.array([0]),\n \"precision\": np.array([0]), \"recall\": np.array([0]), \"scores\": np.array([0]),\n \"ap11\": 0, \"ap\": 0, \"monotonic_recalls\": np.array([0]), \"monotonic_precisions\": np.array([0]),\n \"ap11_recalls\": np.array([0]), \"ap11_precisions\": np.array([0])\n }\n\n scores = list(pred.keys())\n pred_boxes = list(pred.values())\n # loop over scores to calculate statistics for the score\n for score_index, score in enumerate(scores):\n score_tp, score_fp, score_fn = 0, 0, 0\n # create dict with active predicitons (prediction with the same or higher score)\n active_preds = {}\n for pred_entry in pred_boxes[score_index:]:\n for filename, bbox in zip(pred_entry[\"filename\"], pred_entry[\"bboxes\"]):\n if filename not in active_preds.keys():\n active_preds[filename] = [bbox]\n else:\n active_preds[filename].append(bbox)\n # loop over gt images\n for filename, image_gt_boxes in gt.items():\n img_tp, img_fp, img_fn = self.get_image_stats(image_gt_boxes, active_preds.get(filename, None), iou)\n score_tp += img_tp\n score_fp += img_fp\n score_fn += img_fn\n # calculate precision and recall for the threshold\n score_precision = score_tp/(score_tp + score_fp) if score_tp + score_fp > 0 else 0\n score_recall = score_tp/(score_tp + score_fn) if score_tp + score_fn > 0 else 0\n\n tps.append(score_tp)\n fps.append(score_fp)\n fns.append(score_fn)\n precisions.append(score_precision)\n recalls.append(score_recall)\n score_thresholds.append(score)\n\n # convert data to np.arrays for further processing\n tps = np.array(tps)\n fps = np.array(fps)\n fns = np.array(fns)\n precisions = np.array(precisions)\n recalls = np.array(recalls)\n score_thresholds = np.array(score_thresholds)\n\n # calculate additional stats\n # AP11\n precisions_at_recall_value = []\n for recall_value in np.linspace(0.0, 1.0, 11):\n indices = np.argwhere(np.array(recalls) >= recall_value).flatten()\n precision_max = max(precisions[indices]) if indices.size > 0 else 0\n precisions_at_recall_value.append(precision_max)\n ap11 = np.mean(precisions_at_recall_value)\n\n #AP\n sorted_indices = np.argsort(recalls)\n sorted_recalls = recalls[sorted_indices]\n sorted_precision = precisions[sorted_indices]\n # make the precision values monotonically\n calc_recalls = [0] + sorted_recalls.tolist() + [1]\n calc_precisions = [0] + sorted_precision.tolist() + [0]\n for i in range(len(calc_recalls)-2, -1, -1):\n calc_precisions[i] = max(calc_precisions[i], calc_precisions[i+1])\n # get indices where the recall value changes\n changing_index_list = []\n for i in range(1, len(calc_recalls)):\n if calc_recalls[i] != calc_recalls[i-1]:\n changing_index_list.append(i)\n ap = 0.0\n for i in changing_index_list:\n ap += ((calc_recalls[i]-calc_recalls[i-1])*calc_precisions[i])\n\n return {\n \"tp\": tps, \"fp\": fps, \"fn\": fns, \"precision\": precisions, \"recall\": recalls, \"scores\": score_thresholds,\n \"ap11\": ap11, \"ap\": ap, \"monotonic_recalls\": np.array(calc_recalls), \"monotonic_precisions\": np.array(calc_precisions),\n \"ap11_recalls\": np.linspace(0.0, 1.0, 11), \"ap11_precisions\": np.array(precisions_at_recall_value)\n }", "title": "" }, { "docid": "672e076ec43249bff3c9049dbd72019a", "score": "0.72296077", "text": "def get_precision_and_recall(self, gt, pred, iou):\n tps, fps, fns = [], [], []\n precisions, recalls, score_thresholds = [], [], []\n\n if pred is None:\n return {\n \"tp\": np.array([0]), \"fp\": [sum(len(gt_boxes) for gt_boxes in gt.values())], \"fn\": np.array([0]),\n \"precision\": np.array([0]), \"recall\": np.array([0]), \"scores\": np.array([0]),\n \"ap11\": 0, \"ap\": 0, \"monotonic_recalls\": np.array([0]), \"monotonic_precisions\": np.array([0]),\n \"ap11_recalls\": np.array([0]), \"ap11_precisions\": np.array([0])\n }\n\n scores = list(pred.keys())\n pred_boxes = list(pred.values())\n # loop over scores to calculate statistics for the score\n for score_index, score in enumerate(scores):\n score_tp, score_fp, score_fn = 0, 0, 0\n # create dict with active predicitons (prediction with the same or higher score)\n active_preds = {}\n for pred_entry in pred_boxes[score_index:]:\n for filename, bbox in zip(pred_entry[\"filename\"], pred_entry[\"bboxes\"]):\n if filename not in active_preds.keys():\n active_preds[filename] = [bbox]\n else:\n active_preds[filename].append(bbox)\n # loop over gt images\n for filename, image_gt_boxes in gt.items():\n img_tp, img_fp, img_fn = self.get_image_stats(image_gt_boxes, active_preds.get(filename, None), iou)\n score_tp += img_tp\n score_fp += img_fp\n score_fn += img_fn\n # calculate precision and recall for the threshold\n score_precision = score_tp/(score_tp + score_fp) if score_tp + score_fp > 0 else 0\n score_recall = score_tp/(score_tp + score_fn) if score_tp + score_fn > 0 else 0\n\n tps.append(score_tp)\n fps.append(score_fp)\n fns.append(score_fn)\n precisions.append(score_precision)\n recalls.append(score_recall)\n score_thresholds.append(score)\n\n # convert data to np.arrays for further processing\n tps = np.array(tps)\n fps = np.array(fps)\n fns = np.array(fns)\n precisions = np.array(precisions)\n recalls = np.array(recalls)\n score_thresholds = np.array(score_thresholds)\n\n # calculate additional stats\n\n # AP11\n precisions_at_recall_value = []\n for recall_value in np.linspace(0.0, 1.0, 11):\n indices = np.argwhere(np.array(recalls) >= recall_value).flatten()\n precision_max = max(precisions[indices]) if indices.size > 0 else 0\n precisions_at_recall_value.append(precision_max)\n ap11 = np.mean(precisions_at_recall_value)\n\n #AP\n sorted_indices = np.argsort(recalls)\n sorted_recalls = recalls[sorted_indices]\n sorted_precision = precisions[sorted_indices]\n # make the precision values monotonically\n calc_recalls = [0] + sorted_recalls.tolist() + [1]\n calc_precisions = [0] + sorted_precision.tolist() + [0]\n for i in range(len(calc_recalls)-2, -1, -1):\n calc_precisions[i] = max(calc_precisions[i], calc_precisions[i+1])\n # get indices where the recall value changes\n changing_index_list = []\n for i in range(1, len(calc_recalls)):\n if calc_recalls[i] != calc_recalls[i-1]:\n changing_index_list.append(i)\n ap = 0.0\n for i in changing_index_list:\n ap += ((calc_recalls[i]-calc_recalls[i-1])*calc_precisions[i])\n\n return {\n \"tp\": tps, \"fp\": fps, \"fn\": fns, \"precision\": precisions, \"recall\": recalls, \"scores\": score_thresholds,\n \"ap11\": ap11, \"ap\": ap, \"monotonic_recalls\": np.array(calc_recalls), \"monotonic_precisions\": np.array(calc_precisions),\n \"ap11_recalls\": np.linspace(0.0, 1.0, 11), \"ap11_precisions\": np.array(precisions_at_recall_value)\n }", "title": "" }, { "docid": "23a6b1841cc13a78be8f909496674083", "score": "0.72176933", "text": "def mAP_SCORE(self):\n\n def count_ap(rec, prec, use_07_metric=False):\n if use_07_metric:\n # 11 point metric\n ap = 0.\n for t in np.arange(0., 1.1, 0.1):\n if np.sum(rec >= t) == 0:\n p = 0\n else:\n p = np.max(prec[rec >= t])\n ap = ap + p / 11.\n else:\n # correct AP calculation\n # first append sentinel values at the end\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n # compute the precision envelope\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n # to calculate area under PR curve, look for points\n # where X axis (recall) changes value\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n # and sum (\\Delta recall) * prec\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n return ap\n\n ap = -1 * np.ones(self.cls_num)\n precision = -1 * np.ones(self.cls_num, dtype=np.float32)\n recall = -1 * np.ones(self.cls_num, dtype=np.float32)\n for cls_i in range(self.cls_num):\n if self.pre_scores[cls_i] == []: continue\n pre_scores = np.asarray(self.pre_scores[cls_i])\n fp = np.asarray(self.false_positive[cls_i])\n tp = np.asarray(self.true_positive[cls_i])\n\n sorted_ind = np.argsort(-pre_scores)\n pre_scores = pre_scores[sorted_ind]\n fp = fp[sorted_ind]\n tp = tp[sorted_ind]\n\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(self.gt_obj_num[cls_i])\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap[cls_i] = count_ap(rec, prec)\n precision[cls_i] = prec[-1]\n recall[cls_i] = rec[-1]\n return ap, precision, recall", "title": "" }, { "docid": "ff0abcba17e65115c70b69bd18a08bde", "score": "0.7154851", "text": "def precision_recall_scores(prec_rec_counts_in):\n\n\tprediction_scores = [] \n\t\n\tgold = [i[1] for i in prec_rec_counts_in]\n\t#print(gold)\n\n\tpredictions = [i[2:] for i in prec_rec_counts_in]\n\t#print(predictions)\n\n\tfor i in range(len(predictions[0])):\n\t\n\t\tpredicted = [ed[i] for ed in predictions] # collect all perditions or i edit distance \n\t\t\n\t\t# manually calculate precision, recall and fscore\n\t\ttn, fp, fn, tp = confusion_matrix(gold, predicted).ravel()\n\t\tprecision = (tp/(tp+fp))\n\t\trecall = (tp/(tp+fn))\n\t\tfscore = (2*precision*recall)/(precision+recall)\n\t\tprediction_scores.append([i, precision, recall, fscore, tn, fp, fn, tp])\n\t\t\n\t\t# ## use sklearn to calculate precisions, recall, fscore\n\t\t# bPrecis, bRecall, bFscore, bSupport = pr(gold, predicted, average='binary')\t\n\t\t# prediction_scores.append([i, bPrecis, bRecall, bFscore])\t\n\n\tmydf = pd.DataFrame(prediction_scores, columns=['edit_dist','precision','recall','fscore','TrueNeg', 'FalsePos', 'FalseNeg', 'TruePos'])\n\n\treturn mydf", "title": "" }, { "docid": "68f3c60818466e97893c00bed2979652", "score": "0.7143471", "text": "def evaluate(predictions, labels, threshold):\n predictions, labels = np.array(predictions), np.array(labels)\n shape = predictions.shape\n\n #if dimensions equals 3, reshape it to 2 dimensions\n if len(shape) > 2:\n predictions = np.reshape(predictions,(shape[0]*shape[1], shape[2]))\n labels = np.reshape(labels, (shape[0]*shape[1], shape[2]))\n\n assert predictions.shape == labels.shape\n\n if threshold:\n for i in range(predictions.shape[0]):\n predictions[i,:][predictions[i,:]>= threshold]=1\n predictions[i,:][predictions[i,:]< threshold]=0\n\n else:\n #TOP K\n for i in range(predictions.shape[0]):\n k = np.sum(labels[i])\n pos = predictions[i].argsort()\n predictions[i].fill(0)\n predictions[i][pos[-int(k):]] = 1\n \n #labels = labels.astype(int)\n coverage= coverage_error(labels, predictions)\n #print(labels[:10], predictions[:10])\n average_precision = label_ranking_average_precision_score(labels, predictions)\n ranking_loss = label_ranking_loss(labels, predictions)\n pak = patk(predictions, labels)\n ham_loss = hamming_loss(labels, predictions)\n \n micro_precision, micro_recall, micro_f1,macro_precision, macro_recall, macro_f1 = bipartition_scores(labels, predictions)\n \n performance = np.asarray([coverage,average_precision,ranking_loss,micro_f1,macro_f1,micro_precision,micro_recall,macro_precision,macro_recall, pak[0], pak[1], pak[2], ham_loss])\n #print (\"Performance: \" , performance)\n return performance", "title": "" }, { "docid": "dc1d0e2482c2a87e0b536547545d3689", "score": "0.71021", "text": "def precision(pred, gt):\n\n N = gt.size(0)\n pred_flat = pred.view(N, -1)\n gt_flat = gt.view(N, -1)\n tp = torch.sum((pred_flat != 0) * (gt_flat != 0))\n fp = torch.sum((pred_flat != 0) * (gt_flat == 0))\n\n score = tp.float() / (tp + fp).float()\n\n return score.sum() / N", "title": "" }, { "docid": "0405946c7c263c739eb0cb91726f907a", "score": "0.70805335", "text": "def evaluate_preds(y_test, y_pred):\r\n accuracy = accuracy_score(y_test, y_pred)\r\n precision = precision_score(y_test, y_pred)\r\n recall = recall_score(y_test, y_pred)\r\n f1 = f1_score(y_test, y_pred)\r\n mrtric_dict ={\"accuracy\": round(accuracy,2),\r\n \"precision\": round(precision,2),\r\n \"recall\": round(recall,2),\r\n \"f1\": round(f1,2)}\r\n print(f\"Acc: {accuracy * 100:.2f}%\")\r\n print(f\"Precision: {precision:.2f}\")\r\n print(f\"Recall: {recall:2f}\")\r\n print(f\"F1: {f1:2f}\")", "title": "" }, { "docid": "0405946c7c263c739eb0cb91726f907a", "score": "0.70805335", "text": "def evaluate_preds(y_test, y_pred):\r\n accuracy = accuracy_score(y_test, y_pred)\r\n precision = precision_score(y_test, y_pred)\r\n recall = recall_score(y_test, y_pred)\r\n f1 = f1_score(y_test, y_pred)\r\n mrtric_dict ={\"accuracy\": round(accuracy,2),\r\n \"precision\": round(precision,2),\r\n \"recall\": round(recall,2),\r\n \"f1\": round(f1,2)}\r\n print(f\"Acc: {accuracy * 100:.2f}%\")\r\n print(f\"Precision: {precision:.2f}\")\r\n print(f\"Recall: {recall:2f}\")\r\n print(f\"F1: {f1:2f}\")", "title": "" }, { "docid": "215ad0de23cd3ca8bfeedecaad1f1f03", "score": "0.7078433", "text": "def precision(y_true,y_pred):\n\ttrue_positives = K.sum(K.round(K.clip(y_true * y_pred,0,1)))\n\tpredicted_positives = K.sum(K.round(K.clip(y_pred,0,1)))\n\tprecision = true_positives / (predicted_positives + K.epsilon())\n\treturn precision", "title": "" }, { "docid": "0c63dd81b0b28f581356ac5f1bc1d4ef", "score": "0.7044306", "text": "def __get_prf_scores(correct_num, pred_num, gold_num, eval_type):\n if correct_num == pred_num == gold_num == 0:\n return 1.2333, 1.2333, 1.2333 # highlight this info by illegal outputs instead of outputting 0.\n\n minimum = 1e-20\n precision = correct_num / (pred_num + minimum)\n recall = correct_num / (gold_num + minimum)\n f1 = 2 * precision * recall / (precision + recall + minimum)\n\n results = {f\"{eval_type}_precision\": round(precision, 5),\n f\"{eval_type}_recall\": round(recall, 5),\n f\"{eval_type}_f1\": round(f1, 5)}\n return results", "title": "" }, { "docid": "27083c79ad0975a956c08b276fbc3e5b", "score": "0.7035228", "text": "def get_metrics_auto(y_true, y_score):\r\n precision, recall, thresholds = precision_recall_curve(y_true, y_score)\r\n average_precision = average_precision_score(y_true, y_score)\r\n return precision, recall, thresholds, average_precision", "title": "" }, { "docid": "55701a9960dfeabd0ad4293233ad5b00", "score": "0.70274293", "text": "def scores(Y_pred, Y_test, activation_threshold = 0.1 ,plot_results= True, print_results = False):\n\n # post process the data\n\n np.putmask(Y_pred[:,0], Y_pred[:,0] <=0, 0)\n np.putmask(Y_pred[:,1], Y_pred[:,1] >=1, 1)\n np.putmask(Y_pred[:,0],Y_pred[:,1] < Y_pred[:,0],0)\n np.putmask(Y_pred[:,1],Y_pred[:,1] < Y_pred[:,0],0)\n np.putmask(Y_pred[:,1],Y_pred[:,2] < activation_threshold,0)\n np.putmask(Y_pred[:,0],Y_pred[:,2] < activation_threshold,0) \n\n # find negative in prediction\n pred_negatives = (Y_pred[:,0] ==0) &(Y_pred[:,1] ==0)\n pred_positives = ~pred_negatives\n obs_negatives = (Y_test[:,0] ==0) &(Y_test[:,1] ==0)\n obs_positives = ~obs_negatives\n TP = obs_positives[pred_positives].sum()\n FN = obs_positives[pred_negatives].sum()\n TN = obs_negatives[pred_negatives].sum()\n FP = obs_negatives[pred_positives].sum()\n\n recall = TP / float(TP + FN)\n precision = TP / float(TP+ FP)\n f1 = 2* precision*recall / (precision + recall)\n accuracy = (TP + TN)/ float(obs_negatives.sum() +obs_positives.sum() )\n if print_results:\n print('number of Predicted negatives:',pred_negatives.sum() )\n print('number of Predicted positives:',pred_positives.sum() )\n print('number of Observed negatives:', obs_negatives.sum() )\n print('number of Observed positives:', obs_positives.sum() )\n print('f1:', f1)\n print('precision :' ,precision)\n print('recall : ', recall)\n print('accuracy:', accuracy)\n\n results = {\n 'accuracy': accuracy,\n 'f1_score': f1,\n 'precision': precision,\n 'recall_score': recall}\n if plot_results:\n pd_results = pd.DataFrame.from_dict(results, orient = 'index')\n pd_results = pd_results.transpose() \n sns.barplot(data = pd_results)\n\n return results", "title": "" }, { "docid": "4ed124e2b818582c1710e1b520c41d93", "score": "0.7020553", "text": "def eval_tag_precisionrecall(self,truth_targets, pred_targets):\n #merge all sequences\n t_tags = []\n p_tags = []\n for truth_target in truth_targets:\n for t_t in truth_target:\n t_tags.append(t_t)\n for pred_target in pred_targets:\n for p_t in pred_target:\n p_tags.append(p_t)\n tags = list(set(t_tags+p_tags))\n if 'None' in tags:\n tags.remove('None')\n #print ''\n #print tags\n \n rp = []\n rr = []\n s='Precision/Recall:\\n'\n for tag in tags:\n TP = 0.0\n FP = 0.0\n FN = 0.0\n TN = 0.0\n for (t,p) in zip(t_tags,p_tags):\n if t==tag and p==tag:\n TP+=1\n elif t==tag and p!=tag:\n FN+=1\n elif t!=tag and p==tag:\n FP+=1\n elif t!=tag and p!=tag:\n TN+=1\n #s='Accuracy='+str((TP+TN)/(TP+FP+FN+TN))+'\\n'\n if TP!=0:\n #s=s+str(tag)+':'+str(round(TP/(TP+FP),4))+'/'+str(round(TP/(TP+FN),4))+'\\n'\n rp.append(TP/(TP+FP))\n rr.append(TP/(TP+FN))\n else:\n #s=s+str(tag)+':'+'0.00/0.00'+'\\n'\n rp.append(0.0)\n rr.append(0.0)\n\n return rp,rr", "title": "" }, { "docid": "17689503caa1a743ce303db717ba2b0c", "score": "0.6998094", "text": "def precision(y_true,y_pred):\n\t\ttrue_positives = K.sum(K.round(K.clip(y_true * y_pred,0,1)))\n\t\tpredicted_positives = K.sum(K.round(K.clip(y_pred,0,1)))\n\t\tprecision = true_positives / (predicted_positives + K.epsilon())\n\t\treturn precision", "title": "" }, { "docid": "c9e0bd198bbcb558ffe298ef23e025a5", "score": "0.6987729", "text": "def calculate_f1_score(y_test, y_pred):\n print('# Running precision, recall and F1-score')\n print('# F1-Score:\\t\\t%.2f' % f1_score(y_test, y_pred, average=\"macro\"))\n print('# Precision:\\t%.2f' % precision_score(y_test, y_pred, average=\"macro\"))\n print('# Recall:\\t\\t%.2f' % recall_score(y_test, y_pred, average=\"macro\"))", "title": "" }, { "docid": "1f2135fd39ae089d4f1376d0a94dea49", "score": "0.6975017", "text": "def macro_precision(y_true, y_pred):\n # find the number of classes by taking\n # length of unique values in true list\n num_classes = len(np.unique(y_true))\n\n # initialize precision to 0\n precision = 0\n\n # loop over all classes\n for class_ in range(num_classes):\n # all classes except current are considered negative\n temp_true = [1 if p == class_ else 0 for p in y_true]\n temp_pred = [1 if p == class_ else 0 for p in y_pred]\n\n # calculate true positive for current class\n tp = acc.true_positive(temp_true, temp_pred)\n\n # calculate false positive for current class\n fp = acc.false_positive(temp_true, temp_pred)\n\n temp_precision = tp / (tp + fp)\n\n # keep adding precision\n precision += temp_precision\n # calculate and return average precision over all classes\n precision /= num_classes\n return precision", "title": "" }, { "docid": "3b73f234242e91da942d532f961af5b0", "score": "0.6963007", "text": "def f1_score(probabilities, threshold, y):\n classified_samples = probabilities < threshold\n\n true_positive = np.intersect1d(np.argwhere(classified_samples == True), np.argwhere(y == 1)).size\n false_positive = np.intersect1d(np.argwhere(classified_samples == True), np.argwhere(y == 0)).size\n false_negative = np.intersect1d(np.argwhere(classified_samples == False), np.argwhere(y == 1)).size\n\n zero_division = math.pow(10, -7)\n precision = true_positive / (true_positive + false_positive + zero_division)\n recall = true_positive / (true_positive + false_negative + zero_division)\n\n return 2 * precision * recall / (precision + recall + zero_division)", "title": "" }, { "docid": "4ebd1cd962e7fa70f433ff16816e72f9", "score": "0.6960321", "text": "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "title": "" }, { "docid": "4ebd1cd962e7fa70f433ff16816e72f9", "score": "0.6960321", "text": "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "title": "" }, { "docid": "832b543b8daa26e83fb5dfb6ad722289", "score": "0.69591415", "text": "def cm_precision_recall(prediction,truth):\n confusion_matrix = Counter()\n\n positives = [1]\n\n binary_truth = [x in positives for x in truth]\n binary_prediction = [x in positives for x in prediction]\n\n for t, p in zip(binary_truth, binary_prediction):\n confusion_matrix[t,p] += 1\n\n cm = np.array([confusion_matrix[True,True], confusion_matrix[False,False], confusion_matrix[False,True], confusion_matrix[True,False]])\n #print cm\n precision = (cm[0]/(cm[0]+cm[2]+0.000001))\n recall = (cm[0]/(cm[0]+cm[3]+0.000001))\n return cm, precision, recall", "title": "" }, { "docid": "a130996d6747275c7c9a76144f9be0fa", "score": "0.6957958", "text": "def get_precision(labels_true, labels_pred, average_type):\n return precision_score(labels_true, labels_pred, average=average_type)", "title": "" }, { "docid": "1263493b490dd9f8fb6aea872579576a", "score": "0.6951625", "text": "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "title": "" }, { "docid": "1263493b490dd9f8fb6aea872579576a", "score": "0.6951625", "text": "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "title": "" }, { "docid": "1263493b490dd9f8fb6aea872579576a", "score": "0.6951625", "text": "def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision", "title": "" }, { "docid": "bd89b08211378c6e486757b6bf38ce81", "score": "0.69417936", "text": "def _prec_recall_f1_score(pred_items, gold_items):\n common = Counter(gold_items) & Counter(pred_items)\n num_same = sum(common.values())\n if num_same == 0:\n return 0, 0, 0\n precision = 1.0 * num_same / len(pred_items)\n recall = 1.0 * num_same / len(gold_items)\n f1 = (2 * precision * recall) / (precision + recall)\n return precision, recall, f1", "title": "" }, { "docid": "a89a86c7e628635d9daf7acfddfa007e", "score": "0.6938572", "text": "def micro_precision(y_true, y_pred):\n # find the number of classes by taking\n # length of unique values in true list\n num_classes = len(np.unique(y_true))\n\n # initialize tp and fp to 0\n tp = 0\n fp = 0\n\n # loop over all classes\n for class_ in range(num_classes):\n # all classes except current are considered negative\n temp_true = [1 if p == class_ else 0 for p in y_true]\n temp_pred = [1 if p == class_ else 0 for p in y_pred]\n\n # calculate true positive for current class\n # and update overall tp\n tp += acc.true_positive(temp_true, temp_pred)\n\n # calculate false positive for current class\n # and update overall fp\n fp += acc.false_positive(temp_true, temp_pred)\n\n # calculate and return overall precision\n precision = tp / (tp + fp)\n return precision", "title": "" }, { "docid": "d8f60248d4c549aee58e4f8e11cfb550", "score": "0.69385195", "text": "def precision_series(pred_scores, true_scores, tols):\n N_tols = len(tols)\n N_scores = len(pred_scores)\n pred = np.outer(pred_scores, np.ones(N_tols))>np.outer(np.ones(N_scores), tols)\n true = np.outer(true_scores, np.ones(N_tols))>np.outer(np.ones(N_scores), tols)\n PP = pred.sum(axis=0).astype(np.int32)\n TP = true.sum(axis=0).astype(np.int32)\n\n #where PP is 0 we should output a precision of 1\n TP = (PP==0) + TP*(PP!=0)\n PP = (PP==0) + PP*(PP!=0)\n return TP / PP.astype(np.float64)", "title": "" }, { "docid": "8d99f10bd4402b53bc58612a59792ffa", "score": "0.69314224", "text": "def precision(y_true, y_pred):\n true_positives = backend.sum(backend.round(backend.clip(y_true * y_pred, 0, 1)))\n predicted_positives = backend.sum(backend.round(backend.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + backend.epsilon())\n return precision", "title": "" }, { "docid": "34070f7be9c9f272e9b7cf11a5c6e2aa", "score": "0.6926707", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true[:, 0] * y_pred[:, 0], 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred[:, 0], 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "6430ca0825dde056dcd299d096893bc9", "score": "0.6923345", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "6430ca0825dde056dcd299d096893bc9", "score": "0.6923345", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "6430ca0825dde056dcd299d096893bc9", "score": "0.6923345", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "6430ca0825dde056dcd299d096893bc9", "score": "0.6923345", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "6430ca0825dde056dcd299d096893bc9", "score": "0.6923345", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "6c2e210a7922003722d91adbca1d2add", "score": "0.69208294", "text": "def f1score(precision_value, recall_value, epsilon=1e-5):\n return 2 * (precision_value * recall_value) / (precision_value + recall_value + epsilon)", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.69172484", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.69172484", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.69172484", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.69172484", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.69172484", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.69172484", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.69172484", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.69172484", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.69172484", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.69172484", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.69172484", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.69172484", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.69172484", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.69172484", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "fbdaa0812e4fe823061a7f859e0889f4", "score": "0.69172484", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "e1065602acce320fe53efd23f79ca74a", "score": "0.6885583", "text": "def get_accuracy(actual_scores: List[float], predicted_scores: List[float], threshold: float):\n assert len(actual_scores) == len(predicted_scores)\n hits = 0\n for actual_score, predicted_score in zip(actual_scores, predicted_scores):\n actual_label = 1 if actual_score > 0.5 else 0\n predicted_label = 1 if predicted_score > threshold else 0\n if actual_label == predicted_label:\n hits += 1\n return hits / len(actual_scores)", "title": "" }, { "docid": "5e1aace6610447d2616112250a3225e4", "score": "0.6883423", "text": "def evaluate_performance(y_pred, y_label):\n precision = precision_score(y_label, y_pred)\n recall = recall_score(y_label, y_pred)\n f1 = f1_score(y_label, y_pred)\n f05 = fbeta_score(y_label, y_pred, beta=0.5)\n conf = confusion_matrix(y_label, y_pred) / len(y_pred)\n report = classification_report_imbalanced(y_true=y_label, y_pred=y_pred)\n print(report)\n print(f'f1: {f1} // f0.5: {f05}')\n return precision, recall, f1, f05, conf, report", "title": "" }, { "docid": "fd2a5461e3b29cae6a33af5189ec3dab", "score": "0.68777174", "text": "def scores(y_ground_arr,y_pred_arr,class_thresholds):\n\tscores_arr=[0,0,0,0]\n\t#Careful here, this method isn't the easiest to read. Let me show a practical example\n\t#y_ground_pred=[[0,0,1,1,0],[0.45,0.1,0.99,0.3,0.6]] with thresholds [0.4,0.4,0.6,0.25,0.9]\n\t#then the predicted classes are [1,0,1,0,0] (so we have 1 false pos, 1 false neg, 2 true neg, \n\t#1 true pos). So temp=2*[0,0,1,1,0]-[1,0,1,0,0]=[-1,0,1,2,0] which shows how false pos will be \n\t#marked by a value -1, false neg by a 2, true pos by a 1 and true neg by a 0, which is why I do:\n\t#false_pos=scores_arr[2]\n\t#false_neg=scores_arr[-1]\n\t#true_pos=scores_arr[1]\n\t#true_neg=scores_arr[0]\n\tfor y_ground_pred in zip(y_pred_arr,y_ground_arr):\n\t\ttemp=2*np.ceil(y_ground_pred[0]-class_thresholds)-y_ground_pred[1]\n\t\tfor jj in temp:\n\t\t\tscores_arr[int(jj)]+=1\n\tfalse_pos=scores_arr[2]\n\tfalse_neg=scores_arr[-1]\n\ttrue_pos=scores_arr[1]\n\ttrue_neg=scores_arr[0]\n\t#these all just follow from the textbook definitions\n\tif false_neg==0:\n\t\trecall=1.\n\telse:\n\t\trecall=true_pos/(true_pos+false_neg)\n\tif false_pos==0:\n\t\tprecision=1.\n\telse:\n\t\tprecision=true_pos/(true_pos+false_pos)\n\tif precision==0. and recall==0.:\n\t\tf1score=0.\n\telse:\n\t\tf1score=2*recall*precision/(recall+precision)\n\tif (false_pos+false_neg)==0:\n\t\tjaccard_index=1.\n\telse:\n\t\tjaccard_index=true_pos/(false_pos+false_neg+true_pos)\n\treturn recall,precision,f1score,jaccard_index", "title": "" }, { "docid": "8833133bb9cfe815ca4926511aa75941", "score": "0.68741435", "text": "def precision(self, y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "c6164ea9f5290acff3cb9d46a1805165", "score": "0.6873356", "text": "def score(self, data):\r\n X, y = data, [j[-1] for j in data]\r\n pred = self.predict(X)\r\n precision = [pred[i]==y[i] for i in range(len(y))]\r\n return float(sum(precision)) / len(precision)", "title": "" }, { "docid": "261f3462f6be21a345c0f40c08082dfd", "score": "0.687112", "text": "def compute_recall(true_positives, other_positives):\n\treturn true_positives / (true_positives + other_positives)", "title": "" }, { "docid": "2e6c2e1349b7f93edc401d95e2d45f0c", "score": "0.6867064", "text": "def calculate_accuracy(precision, recall, num_bots, num_humans):\n total = num_bots + num_humans\n true_positive = num_bots * recall\n true_negative = total - true_positive * (1/precision + 1/recall - 2)\n false_negative = num_bots - true_positive\n false_positive = num_humans - true_negative\n return (true_positive + true_negative) / total", "title": "" }, { "docid": "ed882938cd988ee5b020dda28e7f7f1e", "score": "0.68628496", "text": "def evaluate_preds(y_true, y_preds):\n accuracy = accuracy_score(y_true, y_preds)\n precision = precision_score(y_true, y_preds, average='micro')\n recall = recall_score(y_true, y_preds, average='micro')\n f1 = f1_score(y_true, y_preds, average='micro')\n metric_dict = {\"accuracy\": round(accuracy, 2),\n \"precision\": round(precision, 2),\n \"recall\": round(recall, 2),\n \"f1\": round(f1, 2)}\n print(f\"Acc: {accuracy * 100:.2f}%\")\n print(f\"Precision: {precision:.2f}\")\n print(f\"Recall: {recall:.2f}\")\n print(f\"F1 score: {f1:.2f}\")\n \n return metric_dict", "title": "" }, { "docid": "5a300273bb1bb81dcd5fe65ab3c8d649", "score": "0.685248", "text": "def precision(y_true, y_pred):\n # count true positives\n truth = K.round(K.clip(y_true, K.epsilon(), 1))\n pred_pos = K.round(K.clip(y_pred, K.epsilon(), 1))\n true_pos = K.sum(K.cast(K.all(K.stack([truth, pred_pos], axis=2), axis=2),\n dtype='float32'))\n pred_pos_ct = K.sum(pred_pos) + K.epsilon()\n precision = true_pos/pred_pos_ct\n\n return precision", "title": "" }, { "docid": "206798a8b3228442310bf94b343c92ec", "score": "0.68450826", "text": "def my_precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "a0fcacfd534a13969b5495eaa14d8d05", "score": "0.684228", "text": "def recall(prediction, ground_truth):\n return precision_recall_f1(prediction, ground_truth)[1]", "title": "" }, { "docid": "f35b5123e1b3b26beb79e97fa27a0aab", "score": "0.68391824", "text": "def set_scores(self, preds, actual):\n \n precisions = []\n recalls = []\n\n total_prec = precision_score(actual, preds, average='weighted')\n total_rec = recall_score(actual, preds, average='weighted')\n total_f1 = f1_score(actual, preds, average='weighted')\n \n return {'precision': total_prec,\n 'recall': total_rec,\n 'f1': total_f1}", "title": "" }, { "docid": "1026eff837d25c18df65851afb0a104e", "score": "0.6834064", "text": "def evaluate(truth, detected, tol):\n \n detected = list(detected)\n \n fn = 0\n tp = 0\n \n for tx,ty,_ in truth:\n index = contains(detected, tx, ty, tol)\n if index >= 0:\n del detected[index]\n tp += 1\n else:\n fn += 1\n \n fp = len(detected)\n \n f1_score = 0.0\n precision = 0.0\n recall = 0.0\n \n if (tp + fp) != 0:\n precision = float(tp) / float(tp + fp)\n if (tp + fn) != 0: \n recall = float(tp) / float(tp + fn)\n if (precision + recall) != 0:\n f1_score = 2 * (precision * recall) / (precision + recall)\n \n return tp, fn, fp, precision, recall, f1_score", "title": "" }, { "docid": "8d3260df16ba21486954f8c675cafab7", "score": "0.6812829", "text": "def calc_precision(score_table, actual_priors):\n actual_indicator = (score_table > 0).any()\n precision = sum(actual_indicator.mul(actual_priors))\n logging.info( \"precision = {}\".format(precision))\n return precision\n\n # logging.info( score_table.transpose()[actual_indicator == False].index)", "title": "" }, { "docid": "cbb56d36fe4b7d3e06fd5fe189adce91", "score": "0.6807726", "text": "def precision_recall_f1(prediction, ground_truth):\n if not isinstance(prediction, list):\n prediction_tokens = prediction.split()\n else:\n prediction_tokens = prediction\n if not isinstance(ground_truth, list):\n ground_truth_tokens = ground_truth.split()\n else:\n ground_truth_tokens = ground_truth\n common = Counter(prediction_tokens) & Counter(ground_truth_tokens)\n num_same = sum(common.values())\n if num_same == 0:\n return 0, 0, 0\n p = 1.0 * num_same / len(prediction_tokens)\n r = 1.0 * num_same / len(ground_truth_tokens)\n f1 = (2 * p * r) / (p + r)#调和平均\n return p, r, f1", "title": "" }, { "docid": "2eb614cc5432b53e18930ff88e5a55b0", "score": "0.68041104", "text": "def mrr_precision_at_k(golden, preds, k_list=[1,]):\n my_score = 0\n precision_at = np.zeros(len(k_list))\n for key, elem in enumerate(golden):\n if elem in preds[key]:\n location = np.where(preds[key]==elem)[0][0]\n my_score += 1/(1+ location)\n for k_index, k_value in enumerate(k_list):\n if location < k_value:\n precision_at[k_index] += 1\n return my_score/len(golden), precision_at/len(golden)", "title": "" }, { "docid": "d97641112853ab196f235609bdc0e2be", "score": "0.6802612", "text": "def score_metrics(y_true, y_pred):\n\n m = matrix_evolution.confusion_matrix(y_true, y_pred)\n tp = m[0][0]\n fp = m[0][1]\n fn = m[1][0]\n tn = m[1][1]\n accuracy = (tp)/(tp + fp + tn + fn)\n precision = (tp)/(tp + fp)\n recall = (tp)/(tp + fn)\n p = precision\n r = recall\n f1 = (2 * p * r)/(p + r)\n specificity = (tn)/(tn + fp)\n b1 = 0.5\n fb1 = ((1 + b1**2)*(p * r))/(p * b1**2 + r)\n b2 = 2\n fb2 = ((1 + b2**2)*(p * r))/(p * b2**2 + r)\n print(\"Accuracy = \", accuracy)\n print(\"Precision = \", precision)\n print(\"Recall = \", recall)\n print(\"Specificity = \", specificity)\n print(\"F1 =\", f1)\n print(\"FbTheta for 0.5 = \", fb1)\n print(\"FbTheta for 2.0 = \", fb2)", "title": "" }, { "docid": "443f5e1277c56585846ab4345ce554ba", "score": "0.6794229", "text": "def compute_metrics(preds, test_dataset):\n y_preds = [prob_to_label(list(probabilities)) for probabilities in preds] # predicted labels\n prob_positive = [list(prob)[0] for prob in preds] # probabilities of the positive class 'fic' (for AUPRC); order is ['fic', 'non']\n prob_greater = [list(prob)[1] for prob in preds] # probabilities of the greater class (for AUROC); order is ['fic', 'non']\n\n y_true = []\n for instance in test_dataset:\n y_true.append(instance.fields['label'].label)\n\n # Compute classification metrics:\n f1 = f1_score(y_true, y_preds, pos_label='fic')\n auroc = roc_auc_score(y_true, y_score=prob_greater)\n w_f1 = f1_score(y_true, y_preds, average='weighted')\n precision = precision_score(y_true, y_preds, pos_label='fic')\n recall = recall_score(y_true, y_preds, pos_label='fic')\n acc = accuracy_score(y_true, y_preds)\n auprc = average_precision_score(y_true, y_score=prob_positive, pos_label='fic')\n\n return round(f1,4), round(auroc,4), round(w_f1,4), round(precision,4), round(recall,4), round(acc,4), round(auprc,4)", "title": "" }, { "docid": "22cbf00af7155f62364432f9464120ff", "score": "0.6794096", "text": "def multi_label_precision(true, pred):\n if len(pred):\n precision = float(len(set(true).intersection(set(pred)))/len(pred))\n else:\n precision = 0\n return precision", "title": "" }, { "docid": "c53d6a41538fc511b2ce4d00bdfe1f6c", "score": "0.6792758", "text": "def precision_recall_f1(true: np.ndarray, pred: np.ndarray):\n num_predicted = np.unique(pred).size\n num_intersect = np.intersect1d(pred, true).size\n num_observed = np.unique(true).size\n p = num_intersect / num_predicted\n r = num_intersect / num_observed\n f1 = 2 * p * r / (p + r) if p != 0 or r != 0 else 0\n return p, r, f1", "title": "" }, { "docid": "141ebe712938b9a4c862492b3d4c05f5", "score": "0.67528003", "text": "def evaluate_metrics(submission_df: pd.DataFrame, gt_df: pd.DataFrame):\n\n # Subset submission_df to query_ids that we have labels for in gt_df\n submission_df = submission_df[submission_df[\"query_id\"].isin(gt_df[\"query_id\"])]\n\n gt_pairs = {\n tuple(row)\n for row in gt_df[[\"query_id\", \"reference_id\"]].itertuples(index=False)\n if not pd.isna(row.reference_id)\n }\n\n # Binary indicator for whether prediction is a true positive or false positive\n y_true = np.array(\n [\n tuple(row) in gt_pairs\n for row in submission_df[[\"query_id\", \"reference_id\"]].itertuples(index=False)\n ]\n )\n # Confidence score, as if probability. Only property required is greater score == more confident.\n probas_pred = submission_df[\"score\"].values\n\n p, r, t = precision_recall(y_true, probas_pred, len(gt_pairs))\n\n # Micro-average precision\n ap = average_precision(r, p)\n\n # Metrics @ Precision>=90%\n pp90, rp90, tp90 = find_operating_point(p, r, t, required_x=0.9)\n\n if rp90 is None:\n # Precision was never above 90%\n rp90 = 0.0\n\n return ap, rp90", "title": "" }, { "docid": "a24c0959a740c646c2b3b01b9e459749", "score": "0.67333466", "text": "def score_fmeasure(df, target):\n precision = score_precision(df, target)\n recall = score_recall(df, target)\n if precision + recall == 0:\n return 0\n return 2 * (precision * recall) / (precision + recall)", "title": "" }, { "docid": "230e59e6f7249d3213818b7f49571131", "score": "0.672807", "text": "def precision_recall(true, estimated):\n precision, recall = [], []\n p_old, r_old = None, None\n for estimated_ in estimated:\n if not estimated_.any():\n continue\n p, r, _, _ = precision_recall_fscore_support(\n true, np.abs(estimated_) > 0, average='micro')\n if p_old is not None:\n if p == p_old and r == r_old:\n continue\n precision.append(p)\n recall.append(r)\n\n p_old = p\n r_old = r\n return precision, recall", "title": "" }, { "docid": "c097adf69ec48d741cb49c0637184925", "score": "0.67263806", "text": "def precision_score(self):\n precision = self._tp / (self._tp + self._fp)\n return precision", "title": "" }, { "docid": "7d6ab0365023b3f58d20387300b14daf", "score": "0.6723987", "text": "def precision(self):\n return self.prediction_comparison.evaluate_float()", "title": "" }, { "docid": "6ca904a6ebd68a7dc0930244725400c6", "score": "0.671832", "text": "def _average_precision(y_true, y_pred):\n true_positives, false_positives, _, _ = _calc_classification_statistics(y_true, y_pred)\n predicted_positives = true_positives + false_positives\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "title": "" }, { "docid": "e326e794858ee9f0d65cbf981e4f01c1", "score": "0.6716893", "text": "def average_precision(gt_list, pred_list, confidence_score=True):\n pred_list = [(i, det) for i in range(len(pred_list)) for det in pred_list[i]]\n if len(pred_list) == 0:\n return 0\n\n if confidence_score :\n sorted_ind = np.argsort([-det[1].confidence for det in pred_list])\n pred_list_sorted = [pred_list[i] for i in sorted_ind]\n ap, prec, rec = voc_ap(gt_list, pred_list_sorted)\n else:\n n = 10\n precs = []\n recs = []\n aps = []\n for _ in range(n):\n shuffled_ind = np.random.permutation(len(pred_list))\n pred_list_shuffled = [pred_list[i] for i in shuffled_ind]\n ap, prec, rec = voc_ap(gt_list, pred_list_shuffled)\n precs.append(prec)\n recs.append(rec)\n aps.append(ap)\n prec = np.mean(precs)\n rec = np.mean(recs)\n ap = np.mean(aps)\n return ap, prec, rec", "title": "" }, { "docid": "c91c1349db704c705b150a547049a4cb", "score": "0.67154664", "text": "def recall_metric(y_true, y_pred):\t\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\t\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\t\n recall = true_positives / (possible_positives + K.epsilon())\t\n return recall", "title": "" }, { "docid": "1eb52a24ccf7559b402d19f553fc15bb", "score": "0.6715394", "text": "def score_recall(df, target):\n true_positive = np.sum(np.logical_and(\n df.predicted == df.truth, df.predicted == target))\n false_negative = np.sum(np.logical_and(\n df.predicted != df.truth, df.truth == target))\n if true_positive + false_negative == 0:\n return 0\n return true_positive / (true_positive + false_negative)", "title": "" }, { "docid": "0f89d0eb2fbd1d2af16c3c40f7d63beb", "score": "0.6709475", "text": "def precision_at_k(df, k, threshold = 7.0): \n\n # removing items where true rating is unknown\n df = df.query(\"rating > 0\").copy()\n \n df.sort_values(by=[\"u_id\", \"prediction\"], ascending=False, inplace=True)\n\n for column in [\"prediction\", \"rating\"]:\n df[column + \"_rank\"] = df.groupby([\"u_id\"])[column].rank(ascending=False).astype(int)\n\n df[\"recommended\"] = df[\"prediction\"].apply(lambda x: 1 if x >= threshold else 0)\n df[\"consumed\"] = df[\"rating\"].apply(lambda x: 1 if x >= threshold else 0)\n df[\"hit\"] = df[\"recommended\"]*df[\"consumed\"]\n \n return df.query(\"prediction_rank <= @k\").groupby(\"u_id\")[\"hit\"].mean().mean()", "title": "" }, { "docid": "afa7a3e02eac0d812dc2bac8daf995d5", "score": "0.6706214", "text": "def __call__(self, params):\n thresholds = params\n output = np.zeros_like(self.prediction)\n\n # Threshold to output\n for n in range(self.N):\n for k in range(self.classes_num):\n if self.prediction[n, k] > thresholds[k]:\n output[n, k] = 1\n\n # Calculate score\n score = metrics.f1_score(self.target, output, average='macro')\n\n return score", "title": "" }, { "docid": "72b53a91c0443803cc0c0db2df32f8ce", "score": "0.6705617", "text": "def recommender_precision(predicted: List[list], actual: List[list]) -> int:\n \n precision = np.mean(list(map(lambda x, y: np.round(_precision(x,y), 4), predicted, actual)))\n return precision", "title": "" }, { "docid": "0bd369126a02bc6718cea6121b00a1a0", "score": "0.6678248", "text": "def precision_recall_f1(prediction, ground_truth):\n if not isinstance(prediction, list):\n prediction_tokens = prediction.split()\n else:\n prediction_tokens = prediction\n if not isinstance(ground_truth, list):\n ground_truth_tokens = ground_truth.split()\n else:\n ground_truth_tokens = ground_truth\n common = Counter(prediction_tokens) & Counter(ground_truth_tokens)\n num_same = sum(common.values())\n if num_same == 0:\n return 0, 0, 0\n p = 1.0 * num_same / len(prediction_tokens)\n r = 1.0 * num_same / len(ground_truth_tokens)\n f1 = (2 * p * r) / (p + r)\n return p, r, f1", "title": "" }, { "docid": "c442238ea300f58973df0aad8a8f1c9e", "score": "0.66774607", "text": "def precision(self, truth_path, pred_path):\n truth_targets = []\n truth_seqs = self.read_seqs(truth_path)\n for truth_seq in truth_seqs:\n #token.strip().split()[-1]: the tag of this token (label)\n truth_targets.append([token.strip().split()[-1] for token in truth_seq])\n pred_targets = []\n pred_seqs = self.read_seqs(pred_path)\n for pred_seq in pred_seqs:\n pred_targets.append([token.strip().split()[-1] for token in pred_seq])\n \n p,r = self.eval_tag_precisionrecall(truth_targets, pred_targets)\n return p", "title": "" }, { "docid": "f9d49284699c14955f5501aabd924fcc", "score": "0.66523105", "text": "def eval_performance(Y, Y_hat, thresh, verbose=False):\n nTruePos = np.sum(np.logical_and(Y_hat >= thresh, Y==1))\n nTrueNeg = np.sum(np.logical_and(Y_hat < thresh, Y==0))\n nFalsePos = np.sum(np.logical_and(Y_hat >= thresh, Y==0))\n nFalseNeg = np.sum(np.logical_and(Y_hat < thresh, Y==1))\n fallout = nFalsePos / float(nTrueNeg+nFalsePos)\n recall = nTruePos / float(nTruePos+nFalseNeg)\n precision = nTruePos / float(nTruePos+nFalsePos)\n specificity = nTrueNeg / float(nTrueNeg + nFalsePos)\n f1 = 2*(precision*recall) / (precision+recall)\n f1Alt = 2*nTruePos / float(2*nTruePos + nFalsePos + nFalseNeg)\n\n # for predicted probabilities, see empirically how many are actually membrane.\n # See calibrate() for more details\n bins = np.logical_and(Y_hat >= (thresh-.05), Y_hat < (thresh+.05))\n if np.sum(bins) > 0:\n probT = np.sum(np.logical_and(Y==1, bins)) / float(np.sum(bins))\n else:\n probT = np.nan\n \n if verbose:\n print '[info]: for threshold: %0.2f' % thresh\n print '[info]: p(%d%%): %0.3f' % (100*thresh,probT)\n print '[info]: FP/N (fall-out): %0.3f' % fallout\n print '[info]: TN/N (specificity): %0.3f' % specificity\n print '[info]: TP/P (recall): %0.3f' % recall\n print '[info]: TP/(TP+FP) (precision): %0.3f' % precision\n print '[info]: F1: %0.3f' % f1\n\n return {'nTruePos' : nTruePos,\n 'nTrueNeg' : nTrueNeg,\n 'nFalsePos' : nFalsePos,\n 'nFalseNeg' : nFalseNeg,\n 'recall' : recall,\n 'precision' : precision,\n 'fallout' : fallout,\n 'f1' : f1}", "title": "" }, { "docid": "91163aaa3656e4881a505ecedffb11b3", "score": "0.6643194", "text": "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n return true_positives / (predicted_positives + K.epsilon())", "title": "" }, { "docid": "04d2323ae94e3c0605c755c21e89277a", "score": "0.66273767", "text": "def _compute_precisions(gold_labels, ranked_lines, threshold):\n precisions = [0.0] * threshold\n threshold = min(threshold, len(ranked_lines))\n\n for i, line_number in enumerate(ranked_lines[:threshold]):\n if gold_labels[line_number] == 1:\n precisions[i] += 1.0\n\n for i in range(1, threshold): # accumulate\n precisions[i] += precisions[i - 1]\n for i in range(1, threshold): # normalize\n precisions[i] /= i+1\n return precisions", "title": "" }, { "docid": "bffd09b093552448027491a0fd898a1e", "score": "0.662516", "text": "def precision(actuals: SubsetPosetVec, predicted: SubsetPosetVec) -> float:\n\n tp = true_positives(actuals, predicted)\n fp = false_positives(actuals, predicted)\n\n return tp /(tp + fp)", "title": "" }, { "docid": "20b0babd8a33bd249c0d093fb42b8c44", "score": "0.6620956", "text": "def f1_score(prediction, ground_truth):\n return precision_recall_f1(prediction, ground_truth)[2]", "title": "" }, { "docid": "caf0931d4d0a45adcdb8e8d63f3b8971", "score": "0.661679", "text": "def get_recall_prec(pred_vec: Tensor, target_vec: Tensor) -> tuple:\n correct = pred_vec & target_vec\n recall = correct.sum(1) / target_vec.sum(1).float() # Enforce Float\n prec = correct.sum(1) / (pred_vec.sum(1) + 1e-6)\n return recall.mean(), prec.mean()", "title": "" } ]
0f2f04ded56e31af9f78b9577d14f7e1
Get a set of legal combinations of target (exposure time, sensitivity). Gets the target exposure value, which is a product of sensitivity (ISO) and exposure time, and returns equivalent tuples of (exposure time,sensitivity) that are all legal and that correspond to the four extrema in this 2D param space, as well as to two "middle" points. Will open a device session if its_session is None.
[ { "docid": "912be3127dd161a655a69a2043356cf1", "score": "0.7153181", "text": "def get_target_exposure_combos(its_session=None):\n if its_session is None:\n with its.device.ItsSession() as cam:\n exposure = get_target_exposure(cam)\n props = cam.get_camera_properties()\n else:\n exposure = get_target_exposure(its_session)\n props = its_session.get_camera_properties()\n\n sens_range = props['android.sensor.info.sensitivityRange']\n exp_time_range = props['android.sensor.info.exposureTimeRange']\n\n # Combo 1: smallest legal exposure time.\n e1_expt = exp_time_range[0]\n e1_sens = exposure / e1_expt\n if e1_sens > sens_range[1]:\n e1_sens = sens_range[1]\n e1_expt = exposure / e1_sens\n\n # Combo 2: largest legal exposure time.\n e2_expt = exp_time_range[1]\n e2_sens = exposure / e2_expt\n if e2_sens < sens_range[0]:\n e2_sens = sens_range[0]\n e2_expt = exposure / e2_sens\n\n # Combo 3: smallest legal sensitivity.\n e3_sens = sens_range[0]\n e3_expt = exposure / e3_sens\n if e3_expt > exp_time_range[1]:\n e3_expt = exp_time_range[1]\n e3_sens = exposure / e3_expt\n\n # Combo 4: largest legal sensitivity.\n e4_sens = sens_range[1]\n e4_expt = exposure / e4_sens\n if e4_expt < exp_time_range[0]:\n e4_expt = exp_time_range[0]\n e4_sens = exposure / e4_expt\n\n # Combo 5: middle exposure time.\n e5_expt = (exp_time_range[0] + exp_time_range[1]) / 2.0\n e5_sens = exposure / e5_expt\n if e5_sens > sens_range[1]:\n e5_sens = sens_range[1]\n e5_expt = exposure / e5_sens\n if e5_sens < sens_range[0]:\n e5_sens = sens_range[0]\n e5_expt = exposure / e5_sens\n\n # Combo 6: middle sensitivity.\n e6_sens = (sens_range[0] + sens_range[1]) / 2.0\n e6_expt = exposure / e6_sens\n if e6_expt > exp_time_range[1]:\n e6_expt = exp_time_range[1]\n e6_sens = exposure / e6_expt\n if e6_expt < exp_time_range[0]:\n e6_expt = exp_time_range[0]\n e6_sens = exposure / e6_expt\n\n return {\n \"minExposureTime\" : (int(e1_expt), int(e1_sens)),\n \"maxExposureTime\" : (int(e2_expt), int(e2_sens)),\n \"minSensitivity\" : (int(e3_expt), int(e3_sens)),\n \"maxSensitivity\" : (int(e4_expt), int(e4_sens)),\n \"midExposureTime\" : (int(e5_expt), int(e5_sens)),\n \"midSensitivity\" : (int(e6_expt), int(e6_sens))\n }", "title": "" } ]
[ { "docid": "8994c58b2d36c61ffa0f80920bee15bd", "score": "0.60776895", "text": "def get_target_exposure(its_session=None):\n cached_exposure = None\n for s in sys.argv[1:]:\n if s == \"target\":\n cached_exposure = __get_cached_target_exposure()\n if cached_exposure is not None:\n print \"Using cached target exposure\"\n return cached_exposure\n if its_session is None:\n with its.device.ItsSession() as cam:\n measured_exposure = __do_target_exposure_measurement(cam)\n else:\n measured_exposure = __do_target_exposure_measurement(its_session)\n __set_cached_target_exposure(measured_exposure)\n return measured_exposure", "title": "" }, { "docid": "549d8d24a57df7ff314cc2b7e95a65d7", "score": "0.59759665", "text": "def __do_target_exposure_measurement(its_session):\n print \"Measuring target exposure\"\n\n # Get AE+AWB lock first, so the auto values in the capture result are\n # populated properly.\n r = [[0.45, 0.45, 0.1, 0.1, 1]]\n sens, exp_time, gains, xform, _ \\\n = its_session.do_3a(r,r,r,do_af=False,get_results=True)\n\n # Convert the transform to rational.\n xform_rat = [{\"numerator\":int(100*x),\"denominator\":100} for x in xform]\n\n # Linear tonemap\n tmap = sum([[i/63.0,i/63.0] for i in range(64)], [])\n\n # Capture a manual shot with this exposure, using a linear tonemap.\n # Use the gains+transform returned by the AWB pass.\n req = its.objects.manual_capture_request(sens, exp_time)\n req[\"android.tonemap.mode\"] = 0\n req[\"android.tonemap.curveRed\"] = tmap\n req[\"android.tonemap.curveGreen\"] = tmap\n req[\"android.tonemap.curveBlue\"] = tmap\n req[\"android.colorCorrection.transform\"] = xform_rat\n req[\"android.colorCorrection.gains\"] = gains\n cap = its_session.do_capture(req)\n\n # Compute the mean luma of a center patch.\n yimg,uimg,vimg = its.image.convert_capture_to_planes(cap)\n tile = its.image.get_image_patch(yimg, 0.45, 0.45, 0.1, 0.1)\n luma_mean = its.image.compute_image_means(tile)\n\n # Compute the exposure value that would result in a luma of 0.5.\n return sens * exp_time * 0.5 / luma_mean[0]", "title": "" }, { "docid": "1c0bb153f5c72ff028eaa0ddd3e5d59c", "score": "0.54415935", "text": "def next_target(self, old_sInd, modes):\n\n OS = self.OpticalSystem\n TL = self.TargetList\n Obs = self.Observatory\n TK = self.TimeKeeping\n\n # create DRM\n DRM = {}\n\n # selecting appropriate koMap\n koMap = self.koMaps[modes[0][\"syst\"][\"name\"]]\n\n # allocate settling time + overhead time\n tmpCurrentTimeAbs = (\n TK.currentTimeAbs.copy() + Obs.settlingTime + modes[0][\"syst\"][\"ohTime\"]\n )\n tmpCurrentTimeNorm = (\n TK.currentTimeNorm.copy() + Obs.settlingTime + modes[0][\"syst\"][\"ohTime\"]\n )\n\n # look for available targets\n # 1. initialize arrays\n slewTimes = np.zeros(TL.nStars) * u.d\n # fZs = np.zeros(TL.nStars) / u.arcsec**2\n dV = np.zeros(TL.nStars) * u.m / u.s\n intTimes = np.zeros(TL.nStars) * u.d\n obsTimes = np.zeros([2, TL.nStars]) * u.d\n sInds = np.arange(TL.nStars)\n\n # 2. find spacecraft orbital START positions (if occulter, positions\n # differ for each star) and filter out unavailable targets\n sd = None\n if OS.haveOcculter:\n sd = Obs.star_angularSep(TL, old_sInd, sInds, tmpCurrentTimeAbs)\n obsTimes = Obs.calculate_observableTimes(\n TL, sInds, tmpCurrentTimeAbs, self.koMaps, self.koTimes, modes[0]\n )\n slewTimes = Obs.calculate_slewTimes(\n TL, old_sInd, sInds, sd, obsTimes, tmpCurrentTimeAbs\n )\n\n # 2.1 filter out totTimes > integration cutoff\n if len(sInds.tolist()) > 0:\n sInds = np.intersect1d(self.intTimeFilterInds, sInds)\n\n # start times, including slew times\n startTimes = tmpCurrentTimeAbs.copy() + slewTimes\n startTimesNorm = tmpCurrentTimeNorm.copy() + slewTimes\n\n # 2.5 Filter stars not observable at startTimes\n try:\n koTimeInd = np.where(\n np.round(startTimes[0].value) - self.koTimes.value == 0\n )[0][\n 0\n ] # find indice where koTime is startTime[0]\n sInds = sInds[\n np.where(np.transpose(koMap)[koTimeInd].astype(bool)[sInds])[0]\n ] # filters inds by koMap #verified against v1.35\n except: # noqa: E722 If there are no target stars to observe\n sInds = np.asarray([], dtype=int)\n\n # 3. filter out all previously (more-)visited targets, unless in\n if len(sInds.tolist()) > 0:\n sInds = self.revisitFilter(sInds, tmpCurrentTimeNorm)\n\n # 4.1 calculate integration times for ALL preselected targets\n (\n maxIntTimeOBendTime,\n maxIntTimeExoplanetObsTime,\n maxIntTimeMissionLife,\n ) = TK.get_ObsDetectionMaxIntTime(Obs, modes[0])\n maxIntTime = min(\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife\n ) # Maximum intTime allowed\n\n if len(sInds.tolist()) > 0:\n if OS.haveOcculter and old_sInd is not None:\n (\n sInds,\n slewTimes[sInds],\n intTimes[sInds],\n dV[sInds],\n ) = self.refineOcculterSlews(\n old_sInd, sInds, slewTimes, obsTimes, sd, modes[0]\n )\n endTimes = tmpCurrentTimeAbs.copy() + intTimes + slewTimes\n else:\n intTimes[sInds] = self.calc_targ_intTime(\n sInds, startTimes[sInds], modes[0]\n )\n sInds = sInds[\n np.where(intTimes[sInds] <= maxIntTime)\n ] # Filters targets exceeding end of OB\n endTimes = startTimes + intTimes\n\n if maxIntTime.value <= 0:\n sInds = np.asarray([], dtype=int)\n\n # 5.1 TODO Add filter to filter out stars entering and exiting keepout between\n # startTimes and endTimes\n\n # 5.2 find spacecraft orbital END positions (for each candidate target),\n # and filter out unavailable targets\n if len(sInds.tolist()) > 0 and Obs.checkKeepoutEnd:\n try:\n # endTimes may exist past koTimes so we have an exception\n # to hand this case\n koTimeInd = np.where(\n np.round(endTimes[0].value) - self.koTimes.value == 0\n )[0][\n 0\n ] # koTimeInd[0][0] # find indice where koTime is endTime[0]\n sInds = sInds[\n np.where(np.transpose(koMap)[koTimeInd].astype(bool)[sInds])[0]\n ] # filters inds by koMap #verified against v1.35\n except: # noqa: E722\n sInds = np.asarray([], dtype=int)\n\n # 6. choose best target from remaining\n if len(sInds.tolist()) > 0:\n # choose sInd of next target\n sInd, waitTime = self.choose_next_target(\n old_sInd, sInds, slewTimes, intTimes[sInds]\n )\n # Should Choose Next Target decide there are no stars it wishes to\n # observe at this time.\n if (sInd is None) and (waitTime is not None):\n self.vprint(\n (\n \"There are no stars Choose Next Target would like to Observe. \"\n \"Waiting {}\"\n ).format(waitTime)\n )\n return DRM, None, None, waitTime, None\n elif (sInd is None) and (waitTime is None):\n self.vprint(\n (\n \"There are no stars Choose Next Target would like to Observe \"\n \"and waitTime is None\"\n )\n )\n return DRM, None, None, waitTime, None\n # store selected star integration time\n det_mode = copy.deepcopy(modes[0])\n if (\n self.int_WA[sInd] > modes[1][\"IWA\"]\n and self.int_WA[sInd] < modes[1][\"OWA\"]\n ):\n det_mode[\"BW\"] = det_mode[\"BW\"] + modes[1][\"BW\"]\n det_mode[\"OWA\"] = modes[1][\"OWA\"]\n det_mode[\"inst\"][\"sread\"] = (\n det_mode[\"inst\"][\"sread\"] + modes[1][\"inst\"][\"sread\"]\n )\n det_mode[\"inst\"][\"idark\"] = (\n det_mode[\"inst\"][\"idark\"] + modes[1][\"inst\"][\"idark\"]\n )\n det_mode[\"inst\"][\"CIC\"] = (\n det_mode[\"inst\"][\"CIC\"] + modes[1][\"inst\"][\"CIC\"]\n )\n det_mode[\"syst\"][\"optics\"] = np.mean(\n (det_mode[\"syst\"][\"optics\"], modes[1][\"syst\"][\"optics\"])\n )\n det_mode[\"instName\"] = \"combined\"\n intTime = self.calc_targ_intTime(sInd, startTimes[sInd], det_mode)[0]\n else:\n intTime = intTimes[sInd]\n\n # if no observable target, advanceTime to next Observable Target\n else:\n self.vprint(\n \"No Observable Targets at currentTimeNorm= \"\n + str(TK.currentTimeNorm.copy())\n )\n return DRM, None, None, None, None\n\n # update visited list for selected star\n self.starVisits[sInd] += 1\n # store normalized start time for future completeness update\n self.lastObsTimes[sInd] = startTimesNorm[sInd]\n\n # populate DRM with occulter related values\n if OS.haveOcculter:\n DRM = Obs.log_occulterResults(\n DRM, slewTimes[sInd], sInd, sd[sInd], dV[sInd]\n )\n return DRM, sInd, intTime, waitTime, det_mode\n\n return DRM, sInd, intTime, waitTime, det_mode", "title": "" }, { "docid": "74bbc6d1315805fe07c41fb6d254d887", "score": "0.53218526", "text": "def next_target(self, old_sInd, mode, char_mode):\n OS = self.OpticalSystem\n ZL = self.ZodiacalLight\n TL = self.TargetList\n Obs = self.Observatory\n TK = self.TimeKeeping\n SU = self.SimulatedUniverse\n\n # create DRM\n DRM = {}\n\n # create appropriate koMap\n koMap = self.koMaps[mode[\"syst\"][\"name\"]]\n char_koMap = self.koMaps[char_mode[\"syst\"][\"name\"]]\n\n # allocate settling time + overhead time\n tmpCurrentTimeAbs = TK.currentTimeAbs.copy()\n tmpCurrentTimeNorm = TK.currentTimeNorm.copy()\n\n # look for available targets\n # 1. initialize arrays\n slewTimes = np.zeros(TL.nStars) * u.d\n # fZs = np.zeros(TL.nStars) / u.arcsec**2\n dV = np.zeros(TL.nStars) * u.m / u.s\n intTimes = np.zeros(TL.nStars) * u.d\n char_intTimes = np.zeros(TL.nStars) * u.d\n obsTimes = np.zeros([2, TL.nStars]) * u.d\n sInds = np.arange(TL.nStars)\n detectable_sInds = np.arange(TL.nStars)\n\n # 2. find spacecraft orbital START positions (if occulter, positions\n # differ for each star) and filter out unavailable targets\n sd = None\n if OS.haveOcculter:\n sd = Obs.star_angularSep(TL, old_sInd, sInds, tmpCurrentTimeAbs)\n obsTimes = Obs.calculate_observableTimes(\n TL, sInds, tmpCurrentTimeAbs, self.koMaps, self.koTimes, mode\n )\n slewTimes = Obs.calculate_slewTimes(\n TL, old_sInd, sInds, sd, obsTimes, tmpCurrentTimeAbs\n )\n\n # 2.1 filter out totTimes > integration cutoff\n if len(sInds.tolist()) > 0:\n sInds = np.intersect1d(self.intTimeFilterInds, sInds)\n\n # start times, including slew times\n startTimes = tmpCurrentTimeAbs.copy() + slewTimes\n startTimesNorm = tmpCurrentTimeNorm.copy() + slewTimes\n\n # 2.5 Filter stars not observable at startTimes\n try:\n tmpIndsbool = list()\n for i in np.arange(len(sInds)):\n koTimeInd = np.where(\n np.round(startTimes[sInds[i]].value) - self.koTimes.value == 0\n )[0][\n 0\n ] # find indice where koTime is startTime[0]\n tmpIndsbool.append(\n koMap[sInds[i]][koTimeInd].astype(bool)\n ) # Is star observable at time ind\n sInds = sInds[tmpIndsbool]\n del tmpIndsbool\n except: # noqa: E722 If there are no target stars to observe\n sInds = np.asarray([], dtype=int)\n\n # 2.7 Filter off all non-earthlike-planet-having stars\n if TL.earths_only or self.char_only:\n sInds = np.intersect1d(sInds, self.promotable_stars)\n\n # 3. filter out all previously (more-)visited targets, unless in\n if len(sInds.tolist()) > 0:\n sInds = self.revisitFilter(sInds, tmpCurrentTimeNorm)\n\n # 4.1 calculate integration times for ALL preselected targets\n (\n maxIntTimeOBendTime,\n maxIntTimeExoplanetObsTime,\n maxIntTimeMissionLife,\n ) = TK.get_ObsDetectionMaxIntTime(Obs, mode)\n maxIntTime = min(\n maxIntTimeOBendTime,\n maxIntTimeExoplanetObsTime,\n maxIntTimeMissionLife,\n OS.intCutoff,\n ) # Maximum intTime allowed\n\n (\n maxIntTimeOBendTime,\n maxIntTimeExoplanetObsTime,\n maxIntTimeMissionLife,\n ) = TK.get_ObsDetectionMaxIntTime(Obs, char_mode)\n char_maxIntTime = min(\n maxIntTimeOBendTime,\n maxIntTimeExoplanetObsTime,\n maxIntTimeMissionLife,\n OS.intCutoff,\n ) # Maximum intTime allowed\n\n if len(sInds.tolist()) > 0:\n intTimes[sInds] = self.calc_targ_intTime(sInds, startTimes[sInds], mode)\n\n # Adjust integration time for stars with known earths around them\n for star in sInds:\n if star in self.promotable_stars:\n earths = np.intersect1d(\n np.where(SU.plan2star == star)[0], self.known_earths\n ).astype(int)\n if np.any(earths):\n fZ = ZL.fZ(Obs, TL, star, startTimes[star], mode)\n fEZ = SU.fEZ[earths].to(\"1/arcsec2\").value / u.arcsec**2\n if SU.lucky_planets:\n phi = (1 / np.pi) * np.ones(len(SU.d))\n dMag = deltaMag(SU.p, SU.Rp, SU.d, phi)[\n earths\n ] # delta magnitude\n WA = np.arctan(SU.a / TL.dist[SU.plan2star]).to(\"arcsec\")[\n earths\n ] # working angle\n else:\n dMag = SU.dMag[earths]\n WA = SU.WA[earths]\n\n if np.all((WA < mode[\"IWA\"]) | (WA > mode[\"OWA\"])):\n intTimes[star] = 0.0 * u.d\n else:\n earthlike_inttimes = OS.calc_intTime(\n TL, star, fZ, fEZ, dMag, WA, mode\n )\n earthlike_inttimes[~np.isfinite(earthlike_inttimes)] = (\n 0 * u.d\n )\n earthlike_inttime = earthlike_inttimes[\n (earthlike_inttimes < maxIntTime)\n ]\n if len(earthlike_inttime) > 0:\n intTimes[star] = np.max(earthlike_inttime)\n else:\n intTimes[star] = np.max(earthlike_inttimes)\n endTimes = (\n startTimes\n + (intTimes * mode[\"timeMultiplier\"])\n + Obs.settlingTime\n + mode[\"syst\"][\"ohTime\"]\n )\n\n sInds = sInds[\n (intTimes[sInds] <= maxIntTime)\n ] # Filters targets exceeding maximum intTime\n sInds = sInds[(intTimes[sInds] > 0.0 * u.d)] # Filters with an inttime of 0\n detectable_sInds = sInds # Filters targets exceeding maximum intTime\n\n if maxIntTime.value <= 0:\n sInds = np.asarray([], dtype=int)\n\n if len(sInds.tolist()) > 0:\n # calculate characterization starttimes\n temp_intTimes = intTimes.copy()\n for sInd in sInds:\n if sInd in self.promotable_stars:\n temp_intTimes[sInd] = 0 * u.d\n else:\n temp_intTimes[sInd] = (\n intTimes[sInd].copy()\n + (intTimes[sInd] * (mode[\"timeMultiplier\"] - 1.0))\n + Obs.settlingTime\n + mode[\"syst\"][\"ohTime\"]\n )\n char_startTimes = startTimes + temp_intTimes\n\n # characterization_start = char_startTimes\n char_intTimes[sInds] = self.calc_targ_intTime(\n sInds, char_startTimes[sInds], char_mode\n ) * (1 + self.charMargin)\n\n # Adjust integration time for stars with known earths around them\n for star in sInds:\n if star in self.promotable_stars:\n char_earths = np.intersect1d(\n np.where(SU.plan2star == star)[0], self.known_earths\n ).astype(int)\n if np.any(char_earths):\n fZ = ZL.fZ(Obs, TL, star, char_startTimes[star], char_mode)\n fEZ = SU.fEZ[char_earths].to(\"1/arcsec2\").value / u.arcsec**2\n if SU.lucky_planets:\n phi = (1 / np.pi) * np.ones(len(SU.d))\n dMag = deltaMag(SU.p, SU.Rp, SU.d, phi)[\n char_earths\n ] # delta magnitude\n WA = np.arctan(SU.a / TL.dist[SU.plan2star]).to(\"arcsec\")[\n char_earths\n ] # working angle\n else:\n dMag = SU.dMag[char_earths]\n WA = SU.WA[char_earths]\n\n if np.all((WA < char_mode[\"IWA\"]) | (WA > char_mode[\"OWA\"])):\n char_intTimes[star] = 0.0 * u.d\n else:\n earthlike_inttimes = OS.calc_intTime(\n TL, star, fZ, fEZ, dMag, WA, char_mode\n ) * (1 + self.charMargin)\n earthlike_inttimes[~np.isfinite(earthlike_inttimes)] = (\n 0 * u.d\n )\n earthlike_inttime = earthlike_inttimes[\n (earthlike_inttimes < char_maxIntTime)\n ]\n if len(earthlike_inttime) > 0:\n char_intTimes[star] = np.max(earthlike_inttime)\n else:\n char_intTimes[star] = np.max(earthlike_inttimes)\n char_endTimes = (\n char_startTimes\n + (char_intTimes * char_mode[\"timeMultiplier\"])\n + Obs.settlingTime\n + char_mode[\"syst\"][\"ohTime\"]\n )\n\n sInds = sInds[\n (char_intTimes[sInds] <= char_maxIntTime)\n ] # Filters targets exceeding maximum intTime\n sInds = sInds[\n (char_intTimes[sInds] > 0.0 * u.d)\n ] # Filters with an inttime of 0\n\n if char_maxIntTime.value <= 0:\n sInds = np.asarray([], dtype=int)\n\n # 5.1 TODO Add filter to filter out stars entering and exiting keepout\n # between startTimes and endTimes\n try:\n tmpIndsbool = list()\n for i in np.arange(len(sInds)):\n koTimeInd = np.where(\n np.round(char_startTimes[sInds[i]].value) - self.koTimes.value == 0\n )[0][\n 0\n ] # find indice where koTime is startTime[0]\n tmpIndsbool.append(\n char_koMap[sInds[i]][koTimeInd].astype(bool)\n ) # Is star observable at time ind\n sInds = sInds[tmpIndsbool]\n del tmpIndsbool\n except: # noqa: E722 If there are no target stars to observe\n sInds = np.asarray([], dtype=int)\n\n # 5.2 find spacecraft orbital END positions (for each candidate target),\n # and filter out unavailable targets\n if len(sInds.tolist()) > 0 and Obs.checkKeepoutEnd:\n try:\n tmpIndsbool = list()\n for i in np.arange(len(sInds)):\n # find indices where koTime is endTime[0]\n koTimeInd = np.where(\n np.round(endTimes[sInds[i]].value) - self.koTimes.value == 0\n )[0][0]\n # Is star observable at time ind\n tmpIndsbool.append(koMap[sInds[i]][koTimeInd].astype(bool))\n sInds = sInds[tmpIndsbool]\n del tmpIndsbool\n except: # noqa: E722\n sInds = np.asarray([], dtype=int)\n\n if len(detectable_sInds.tolist()) > 0 and Obs.checkKeepoutEnd:\n try:\n tmpIndsbool = list()\n for i in np.arange(len(detectable_sInds)):\n koTimeInd = np.where(\n np.round(endTimes[detectable_sInds[i]].value)\n - self.koTimes.value\n == 0\n )[0][\n 0\n ] # find indice where koTime is endTime[0]\n tmpIndsbool.append(\n koMap[detectable_sInds[i]][koTimeInd].astype(bool)\n ) # Is star observable at time ind\n detectable_sInds = detectable_sInds[tmpIndsbool]\n del tmpIndsbool\n except: # noqa: E722\n detectable_sInds = np.asarray([], dtype=int)\n\n if len(sInds.tolist()) > 0 and Obs.checkKeepoutEnd:\n try:\n tmpIndsbool = list()\n for i in np.arange(len(sInds)):\n koTimeInd = np.where(\n np.round(char_endTimes[sInds[i]].value) - self.koTimes.value\n == 0\n )[0][\n 0\n ] # find indice where koTime is endTime[0]\n tmpIndsbool.append(\n char_koMap[sInds[i]][koTimeInd].astype(bool)\n ) # Is star observable at time ind\n sInds = sInds[tmpIndsbool]\n del tmpIndsbool\n except: # noqa: E722\n sInds = np.asarray([], dtype=int)\n\n # 6.2 Filter off coronograph stars with too many visits and no detections\n no_dets = np.logical_and(\n (self.sInd_charcounts[sInds] >= self.max_successful_chars),\n (self.sInd_charcounts[sInds] == 0),\n )\n sInds = sInds[np.where(np.invert(no_dets))[0]]\n\n # using starVisits here allows multiple charcounts\n # to count towards orbit determination detections\n no_dets = np.logical_and(\n (self.starVisits[detectable_sInds] >= self.n_det_remove),\n (self.sInd_detcounts[detectable_sInds] == 0),\n )\n detectable_sInds = detectable_sInds[np.where(np.invert(no_dets))[0]]\n\n # find stars that are available for detection revisits\n detectable_sInds_tmp = []\n for dsInd in detectable_sInds:\n # if dsInd not awaiting characterization or\n # (is char'able and already char'd)\n if dsInd not in self.promotable_stars or (\n dsInd in self.promotable_stars and dsInd in self.promoted_stars\n ):\n detectable_sInds_tmp.append(dsInd)\n detectable_sInds = np.array(detectable_sInds_tmp)\n\n if not np.any(sInds) and np.any(detectable_sInds):\n if not self.char_only:\n sInds = detectable_sInds\n # implied else is sInds = []\n\n # 6. choose best target from remaining\n if len(sInds.tolist()) > 0:\n # choose sInd of next target\n sInd, waitTime = self.choose_next_target(\n old_sInd, sInds, slewTimes, intTimes[sInds]\n )\n\n # Should Choose Next Target decide there are no stars it wishes\n # to observe at this time\n if (sInd is None) and (waitTime is not None):\n self.vprint(\n (\n \"There are no stars Choose Next Target would like to Observe. \"\n \"Waiting {}\"\n ).format(waitTime)\n )\n return DRM, None, None, waitTime\n elif (sInd is None) and (waitTime is None):\n self.vprint(\n (\n \"There are no stars Choose Next Target would like to Observe \"\n \"and waitTime is None\"\n )\n )\n return DRM, None, None, waitTime\n # store selected star integration time\n intTime = intTimes[sInd]\n\n # if no observable target, advanceTime to next Observable Target\n else:\n self.vprint(\n \"No Observable Targets at currentTimeNorm= \"\n + str(TK.currentTimeNorm.copy())\n )\n return DRM, None, None, None\n\n # update visited list for selected star\n self.starVisits[sInd] += 1\n # store normalized start time for future completeness update\n self.lastObsTimes[sInd] = startTimesNorm[sInd]\n\n # populate DRM with occulter related values\n if OS.haveOcculter:\n DRM = Obs.log_occulterResults(\n DRM, slewTimes[sInd], sInd, sd[sInd], dV[sInd]\n )\n return DRM, sInd, intTime, slewTimes[sInd]\n\n return DRM, sInd, intTime, waitTime", "title": "" }, { "docid": "0251b20684f4e15d87f6ae0b48fa8616", "score": "0.48103523", "text": "def get_exposure_info():\n import mastquery.query\n\n master = 'grizli-v1-19.12.04'\n master = 'grizli-v1-19.12.05'\n master = 'grizli-v1-20.10.12'\n\n tab = utils.read_catalog('{0}_visits.fits'.format(master))\n all_visits = np.load('{0}_visits.npy'.format(master), allow_pickle=True)[0]\n\n all_files = []\n for v in all_visits:\n all_files.extend(v['files'])\n\n prog = [f[1:4] for f in all_files]\n _res = np.unique(np.array(prog), return_counts=True)\n t = utils.GTable()\n t['prog'] = _res[0]\n t['count'] = _res[1]\n so = np.argsort(t['count'])\n t = t[so[::-1]]\n for pr in t['prog']:\n if os.path.exists('{0}_query.fits'.format(pr)):\n #print('Skip ', pr)\n continue\n\n print(pr)\n\n try:\n _q = mastquery.query.run_query(obs_id='[ij]{0}*'.format(pr))\n _p = mastquery.query.get_products_table(_q)\n except:\n continue\n\n _q.write('{0}_query.fits'.format(pr))\n _p.write('{0}_prod.fits'.format(pr))\n\n # Send to AWS\n from grizli.aws import db\n import pandas as pd\n from astropy.table import Table\n\n engine = db.get_db_engine()\n\n files = glob.glob('*query.fits')\n files.sort()\n\n cols = ['obs_id', 'target', 'target_ra', 'target_dec', 't_min', 't_max', 'exptime', 'wavelength_region', 'filter', 'em_min', 'em_max', 'target_classification', 'obs_title', 't_obs_release', 'instrument_name', 'proposal_pi', 'proposal_id', 'proposal_type', 'sregion', 'dataRights', 'mtFlag', 'obsid', 'objID', 'visit']\n\n for i, file in enumerate(files):\n print(file)\n _q = Table.read(file, character_as_bytes=False)\n\n _q['proposal_id'] = np.cast[np.int16](_q['proposal_id'])\n _q['obsid'] = np.cast[np.int64](_q['obsid'])\n _q['objID'] = np.cast[np.int64](_q['objID'])\n _q.rename_column('ra','target_ra')\n _q.rename_column('dec','target_dec')\n _q.rename_column('footprint', 'sregion')\n \n df = _q[cols].to_pandas()\n df.to_sql('mast_query', engine, index=False, if_exists='append', method='multi')\n\n files = glob.glob('*_prod.fits')\n files.sort()\n\n cols = ['obsid', 'dataset']\n\n for i, file in enumerate(files):\n print(i, file)\n _p = Table.read(file, character_as_bytes=False)\n\n _p['obsid'] = np.cast[np.int64](_p['obsid'])\n _p['dataset'] = [d[:-1] for d in _p['observation_id']]\n\n df = _p[cols].to_pandas()\n df.to_sql('mast_products', engine, index=False, if_exists='append', method='multi')\n\n ##########\n # Exposure log\n\n # Initialize, adding an array column manually for the footprints\n v = all_visits[0]\n N = len(v['files'])\n fps = [np.array(fp.convex_hull.boundary.xy)[:, :-1].tolist() for fp in v['footprints']]\n df = pd.DataFrame()\n df['file'] = [f.split('_')[0] for f in v['files']]\n df['dataset'] = [f.split('_')[0][:-1] for f in v['files']]\n df['extension'] = [f.split('_')[1][:3] for f in v['files']]\n df['filter'] = v['filter']\n df['parent'] = v['parent']\n df['awspath'] = v['awspath']\n df['product'] = v['product']\n df['filter'] = v['product'].split('-')[-1]\n\n df['ra'] = [fp.centroid.xy[0][0] for fp in v['footprints']]\n df['dec'] = [fp.centroid.xy[1][0] for fp in v['footprints']]\n df['area'] = [fp.area*np.cos(df['dec'][i]/180*np.pi)*3600 for i, fp in enumerate(v['footprints'])]\n\n # Make table\n engine.execute('drop table exposure_log;')\n df.to_sql('exposure_log', engine, index=False, if_exists='append', method='multi')\n engine.execute('alter table exposure_log add column footprint float [];')\n engine.execute('delete from exposure_log where True;')\n \n engine.execute('ALTER TABLE exposure_log ADD COLUMN mdrizsky float;')\n engine.execute('ALTER TABLE exposure_log ADD COLUMN exptime float;')\n engine.execute('ALTER TABLE exposure_log ADD COLUMN expstart float;')\n engine.execute('ALTER TABLE exposure_log ADD COLUMN ndq int;')\n engine.execute('ALTER TABLE exposure_log ADD COLUMN expflag VARCHAR;')\n engine.execute('ALTER TABLE exposure_log ADD COLUMN sunangle float;')\n\n engine.execute('ALTER TABLE exposure_log ADD COLUMN gsky101 real;')\n engine.execute('ALTER TABLE exposure_log ADD COLUMN gsky102 real;')\n engine.execute('ALTER TABLE exposure_log ADD COLUMN gsky103 real;')\n engine.execute('ALTER TABLE exposure_log ADD COLUMN persnpix integer;')\n engine.execute('ALTER TABLE exposure_log ADD COLUMN perslevl real;')\n \n _exp = db.from_sql(\"select distinct(file) from exposure_log\", engine)\n db_files = np.unique(_exp['file'])\n charge = db.from_sql(\"select * from charge_fields\", engine)\n \n SKIP = 1000\n df0 = None\n \n for i, v in enumerate(all_visits):\n _count = np.sum([f.split('_')[0] in db_files for f in v['files']])\n \n if _count == len(v['files']):\n continue\n \n if v['parent'] not in charge['field_root']:\n print('Warning: {0} not in charge[\"field_root\"]'.format(v['parent']))\n continue\n \n print(i, v['parent'], v['product'], _count, len(v['files']))\n\n N = len(v['files'])\n\n fps = [np.array(fp.convex_hull.boundary.xy)[:, :-1].tolist() for fp in v['footprints']]\n\n df = pd.DataFrame()\n df['file'] = [f.split('_')[0] for f in v['files']]\n df['dataset'] = [f.split('_')[0][:-1] for f in v['files']]\n df['extension'] = [f.split('_')[1][:3] for f in v['files']]\n df['filter'] = v['filter']\n df['parent'] = v['parent']\n df['awspath'] = v['awspath']\n df['product'] = v['product']\n df['filter'] = v['product'].split('-')[-1]\n\n df['ra'] = [fp.centroid.xy[0][0] for fp in v['footprints']]\n df['dec'] = [fp.centroid.xy[1][0] for fp in v['footprints']]\n df['area'] = [fp.area*np.cos(df['dec'][i]/180*np.pi)*3600 for i, fp in enumerate(v['footprints'])]\n df['footprint'] = fps\n \n if df0 is None:\n df0 = df \n else:\n df0 = df0.append(df)\n \n if len(df0) > SKIP:\n # Send to DB and reset append table\n print('>>> to DB >>> ({0}, {1})'.format(i, len(df0)))\n df0.to_sql('exposure_log', engine, index=False, if_exists='append', method='multi')\n df0 = df[:0]", "title": "" }, { "docid": "05baad0254b50ed3555051b1501e9d7e", "score": "0.47046822", "text": "def Environement(x,y,C0,sig,target):\r\n E = C0*np.exp(-((x-target[0])**2+(y-target[1])**2)/sig)\r\n return E", "title": "" }, { "docid": "bbf6a9e447049c4ce403a24c53125e39", "score": "0.4605702", "text": "def ComputeExposure2():\n exposure = SpotCamCStructure.EXPOSURE_STRUCT2()\n _ComputeExposure(byref(exposure))\n return exposure", "title": "" }, { "docid": "6e93b64e29b7941132a054a4460b0c99", "score": "0.45974216", "text": "def get_target_data(self, variable_id, target_lat, target_lon, train_date_start, target_end_date, resolution, path):\n if type(target_lat) is list and type(target_lon) is list:\n # this part need to be improved\n if self.target_us_all is True:\n # subsampling to get the corresponding resolution\n spatial_map = self.get_target_map(resolution, path)\n else:\n spatial_map = self.remove_masked_data('us', target_lat, target_lon, path)\n target = self.get_covariates_data_parallel_updated(train_date_start, target_end_date, [variable_id], spatial_map, path)\n return target\n else:\n target_lat = self.find_the_cloest_value(target_lat)\n target_lon = self.find_the_cloest_value(target_lon)\n if train_date_start.year == target_end_date.year:\n file_name = path + variable_id + '.' + str(train_date_start.year) + '.h5'\n target_file = pd.read_hdf(file_name)\n date_index = pd.date_range(start=train_date_start, end=target_end_date)\n target = target_file.loc[target_lat, target_lon, date_index]\n target = target.to_frame()\n target = target.rename(columns={target.columns[-1]: variable_id})\n else:\n # start year\n file_name = path + variable_id + '.' + str(train_date_start.year) + '.h5'\n target_file = pd.read_hdf(file_name)\n date_index = pd.date_range(start=train_date_start, end=pd.Timestamp(train_date_start.year, 12, 31))\n target_start = target_file.loc[target_lat, target_lon, date_index]\n target_start = target_start.to_frame()\n # end year\n file_name = path + variable_id + '.' + str(target_end_date.year) + '.h5'\n target_file = pd.read_hdf(file_name)\n date_index = pd.date_range(start=pd.Timestamp(target_end_date.year, 1, 1), end=target_end_date)\n target_end = target_file.loc[target_lat, target_lon, date_index]\n target_end = target_end.to_frame()\n if target_end_date.year - train_date_start.year > 1:\n for year in range(train_date_start.year + 1, target_end_date.year):\n file_name = path + variable_id + '.' + str(year) + '.h5'\n target_file = pd.read_hdf(file_name)\n date_index = pd.date_range(start=pd.Timestamp(year, 1, 1), end=pd.Timestamp(year, 12, 31))\n target_file = target_file.loc[target_lat, target_lon, date_index]\n target_temp = target_file.to_frame()\n target_start = target_start.append(target_temp)\n target = target_start.append(target_end)\n target = target.rename(columns={target.columns[-1]: variable_id})\n return target", "title": "" }, { "docid": "5a9a66e94c612dee770708591c49460d", "score": "0.4500449", "text": "def getOptimalSet(plate, exposure):\n\n from Totoro.dbclasses import Set as TotoroSet\n\n dither = exposure.ditherPosition\n\n incompleteSets = [\n set for set in plate.sets if set.getStatus()[0] in ['Incomplete', 'Unplugged']\n ]\n\n validSets = []\n signalNoise = []\n for ss in incompleteSets:\n\n setDithers = ss.getDitherPositions()\n\n if dither in setDithers:\n # If the dither exists, skips this set\n continue\n elif dither is None:\n # If the exposure has not a dither position (usually for mock\n # mock exposures), selects one of the unused dithers in the set.\n tmpDither = getValidDither(ss)\n assert tmpDither is not None, 'failed getting valid dither'\n exposure.ditherPosition = tmpDither\n\n exposures = ss.totoroExposures + [exposure]\n mockSet = TotoroSet.fromExposures(exposures)\n status = mockSet.getStatus(silent=True)[0]\n\n if status in ['Good', 'Excellent']:\n validSets.append(ss)\n # Adds 100 to SN2 array to make sure complete set are always chosen\n signalNoise.append(mockSet.getSN2Array() + 100)\n elif status in ['Incomplete', 'Unplugged']:\n validSets.append(ss)\n signalNoise.append(mockSet.getSN2Array())\n\n # Restore original dither position, in case we have changed it\n exposure.ditherPosition = dither\n\n if len(validSets) == 0:\n return None\n\n signalNoise = np.array(signalNoise)\n\n # Calculates the contribution of each mock set to the total completion.\n completion = np.zeros((signalNoise.shape[0], 2), np.float)\n completion[:, 0] = np.nanmean(signalNoise[:, 0:2], axis=1)\n completion[:, 0] /= config['SN2thresholds']['plateBlue']\n completion[:, 1] = np.nanmean(signalNoise[:, 2:], axis=1)\n completion[:, 1] /= config['SN2thresholds']['plateRed']\n completion = np.nanmin(completion, axis=1)\n\n # Selects the set that contributes more to the total completion.\n return validSets[np.argmax(completion)]", "title": "" }, { "docid": "6000beaf69da2e2fcbd430595d507d34", "score": "0.4496659", "text": "def getObsParams(self, target, mjd, epoch='J2000', station=None, output_Vmag=True, _inp=None):\n\n AU_DE430 = 1.49597870700000000e+11 # m\n GSUN = 0.295912208285591100e-03 * AU_DE430**3 / 86400.0**2\n # convert AU to m:\n a = target['a'] * AU_DE430\n e = target['e']\n # convert deg to rad:\n i = target['i'] * np.pi / 180.0\n w = target['w'] * np.pi / 180.0\n Node = target['Node'] * np.pi / 180.0\n M0 = target['M0'] * np.pi / 180.0\n t0 = target['epoch']\n H = target['H']\n G = target['G']\n\n asteroid = Asteroid(a, e, i, w, Node, M0, GSUN, t0, H, G)\n\n # jpl_eph - path to eph used by pypride\n radec, radec_dot, Vmag = asteroid.raDecVmag(mjd, self.inp['jpl_eph'], epoch=epoch, station=station,\n output_Vmag=output_Vmag, _inp=_inp)\n # print(radec.ra.hms, radec.dec.dms, radec_dot, Vmag)\n\n return radec, radec_dot, Vmag", "title": "" }, { "docid": "d6207d94f1f4fadbf9ff47242ee8f77f", "score": "0.44942063", "text": "def calc_acquisition(blur_pixel, exposure_time, readout_time, camera_size_x, angular_range, number_of_proj):\n\n mid_detector = camera_size_x / 2.0\n delta_blur = np.arccos(1 - blur_pixel / mid_detector) * 180.0 / np.pi\n rot_speed = delta_blur / exposure_time\n\n scan_time = angular_range / rot_speed\n frame_rate = number_of_proj / scan_time\n print(\"*************************************\")\n print(\"Total # of proj: \", number_of_proj)\n print(\"Exposure Time: \", exposure_time, \"s\")\n print(\"Readout Time: \", readout_time, \"s\")\n print(\"Angular Range: \", angular_range, \"degrees\")\n print(\"Camera X size: \", camera_size_x)\n print(\"Blur Error: \", blur_pixel, \"pixels\")\n print(\"*************************************\")\n print(\"Rot Speed: : \", rot_speed, \"degrees/s\")\n print(\"Scan Time:: \", scan_time, \"s\")\n print(\"Frame Rate: \", frame_rate, \"fps\")\n print(\"*************************************\")\n \n return frame_rate, rot_speed", "title": "" }, { "docid": "1146e72f7678e2f4dcd5572c66d9fef6", "score": "0.44863427", "text": "def get_env_featutures(obs, units_dimension, actions_spectrum):\n #Spatial features :\n feature_minimap = np.array([obs.observation['feature_minimap']])\n feature_screen = np.array([obs.observation['feature_screen']])\n #Non spatial features raw :\n non_spatial_state=[]\n last_actions = obs.observation['last_actions']\n action_result = obs.observation['action_result']\n game_loop = obs.observation['game_loop']\n score_cumulative = obs.observation['score_cumulative']\n player = obs.observation['player']\n control_groups = obs.observation['control_groups']\n available_actions = obs.observation['available_actions']\n raw_available_actions = available_actions.copy()\n single_select = obs.observation['single_select']\n multi_select = obs.observation['multi_select']\n alerts = obs.observation['alerts']\n #Non spatial features preprocessed:\n last_actions = one_hot_and_reduce(preprocess_last_actions(last_actions), 541) #one_hot dim 541 | '541' is based on pysc2/lib/actions.py\n action_result = one_hot_and_reduce(preprocess_non_spatial(action_result, 'action'), 215) #one_hot dim 215 |# '215' is based on https://github.com/Blizzard/s2client-proto/blob/master/s2clientprotocol/error.proto\n game_loop = preprocess_game_loop(game_loop)\n score_cumulative = preprocess_quantitative_arrays(score_cumulative, 'score_cumulative')\n player = preprocess_quantitative_arrays(player[1:], 'player') #We don't take the \"player id which is at index 0\"\n control_groups = preprocess_control_groups(control_groups, units_dimension)\n available_actions = one_hot_and_reduce(available_actions, 541)\n single_select = one_hot_and_reduce(single_select[:,0], units_dimension)\n multi_select = one_hot_and_reduce(preprocess_non_spatial(multi_select, 'multi_select')[:,0], units_dimension)\n alerts = preprocess_alerts(alerts)\n L_non_spatial_inputs = [last_actions, action_result, game_loop, score_cumulative, player, control_groups, available_actions, single_select, multi_select, alerts]\n for input in L_non_spatial_inputs:\n non_spatial_state+=list(input)\n non_spatial_state = np.array([non_spatial_state])\n\n actions_filter = np.array([[1 if action in raw_available_actions else 0 for action in actions_spectrum]])\n available_actions = np.array([available_actions])\n\n dict_env_inputs = {'minimap_features':feature_minimap, 'screen_features':feature_screen,\n 'available_actions':available_actions, 'non_spatial_state':non_spatial_state, 'actions_filter':actions_filter}\n\n return dict_env_inputs, raw_available_actions", "title": "" }, { "docid": "46e563b95aa0695f55267bfa6f709c45", "score": "0.4478354", "text": "def GetExposureRange(self):\n expcaps = c_uint32()\n self._dll.is_Exposure(self._hcam, EXPOSURE_CMD_GET_CAPS, byref(expcaps), sizeof(expcaps))\n if not expcaps.value & EXPOSURE_CAP_EXPOSURE:\n return None\n\n # Note: min seems always the same, but max depends on the frame-rate\n # => just return the max frame time?\n stdrng = (c_double * 3)() # min/max/inc in ms\n self._dll.is_Exposure(self._hcam, EXPOSURE_CMD_GET_EXPOSURE_RANGE, byref(stdrng), sizeof(stdrng))\n rng = (stdrng[0] * 1e-3, stdrng[1] * 1e-3)\n\n # TODO: if expcaps.value & EXPOSURE_CAP_LONG_EXPOSURE\n\n return rng", "title": "" }, { "docid": "d800dc75670b8f3c44c5e062185b06e7", "score": "0.44675544", "text": "def getObsParams(self, target, mjd, epoch='J2000', station=None, output_Vmag=True, _inp=None):\n\n AU_DE421 = 1.49597870699626200e+11 # m\n GSUN = 0.295912208285591100e-03 * AU_DE421**3 / 86400.0**2\n # convert AU to m:\n a = target['a'] * AU_DE421\n e = target['e']\n # convert deg to rad:\n i = target['i'] * np.pi / 180.0\n w = target['w'] * np.pi / 180.0\n Node = target['Node'] * np.pi / 180.0\n M0 = target['M0'] * np.pi / 180.0\n t0 = target['epoch']\n H = target['H']\n G = target['G']\n\n asteroid = Asteroid(a, e, i, w, Node, M0, GSUN, t0, H, G)\n\n # jpl_eph - path to eph used by pypride\n radec, radec_dot, Vmag = asteroid.raDecVmag(mjd, self.inp['jpl_eph'], epoch=epoch, station=station,\n output_Vmag=output_Vmag, _inp=_inp)\n # print(radec.ra.hms, radec.dec.dms, radec_dot, Vmag)\n\n return radec, radec_dot, Vmag", "title": "" }, { "docid": "2ddb58f66c4892128bbfe402c6add4eb", "score": "0.446051", "text": "def _get_target_performance_parameters(self):\n return self.__target_performance_parameters", "title": "" }, { "docid": "1bad6fb620c4f6bcfc0999af256702a4", "score": "0.4418532", "text": "def get_sky(night, expid, specprod, specs=range(10)):\n # AR specprod : cascades, daily\n incident = CoAdd(\"full\")\n detected = {}\n # Loop over cameras.\n for camera in [\"b\", \"r\", \"z\"]:\n detected[camera] = CoAdd(camera)\n # Loop over spectrographs.\n for spec in specs:\n # Read the flat-fielded (constant) sky model in this spectrograph.\n skypath = os.path.join(\n os.getenv(\"DESI_ROOT\"),\n \"spectro\",\n \"redux\",\n specprod,\n \"exposures\",\n \"{}\".format(night),\n \"{:08}\".format(expid),\n \"sky-{}{}-{:08}.fits\".format(camera, spec, expid),\n )\n if not os.path.isfile(skypath):\n print(\"\\t\\tSkipping non-existent {}.\".format(skypath))\n continue\n with fitsio.FITS(str(skypath)) as hdus:\n exptime = hdus[0].read_header()[\"EXPTIME\"]\n flux = hdus[\"SKY\"].read()\n ivar = hdus[\"IVAR\"].read()\n mask = hdus[\"MASK\"].read()\n # Verify that we have the expected wavelengths.\n assert np.allclose(detected[camera].wave, hdus[\"WAVELENGTH\"].read())\n # There are actually small variations in flux!\n # TODO: figure out where these variations come from.\n # For now, take the median over fibers.\n detected[camera] += Spectrum(\n camera, np.median(flux, axis=0), np.median(ivar, axis=0)\n )\n # Scale to the exposure time.\n detected[camera] /= exptime\n # Correct for throughput and accumulate over cameras.\n incident += detected[camera] / spec_thru[camera]\n return incident, detected", "title": "" }, { "docid": "345900c752e7cd7923abc6bb99fb61ea", "score": "0.4395104", "text": "def _get_targets(self):\n \n targets = list()\n\n for id, tdict in self._target_dictionary.items():\n spectra = list()\n coadd = list()\n\n for spectra_label, spectra_ref in zip([\"spectra\", \"coadd\"], [spectra, coadd]):\n spectra_dict_list = tdict[spectra_label]\n\n for spectrum_dict in spectra_dict_list :\n # we are not doing a copy here (except maybe for R ...)\n wave = self._shared[spectrum_dict[\"wave\"][0]:spectrum_dict[\"wave\"][1]]\n flux = self._shared[spectrum_dict[\"flux\"][0]:spectrum_dict[\"flux\"][1]]\n ivar = self._shared[spectrum_dict[\"ivar\"][0]:spectrum_dict[\"ivar\"][1]]\n rdata = self._shared[spectrum_dict[\"rdata\"][0]:spectrum_dict[\"rdata\"][1]]\n roffsets = self._shared[spectrum_dict[\"roffsets\"][0]:spectrum_dict[\"roffsets\"][1]]\n rshape = tuple(self._shared[spectrum_dict[\"rshape\"][0]:spectrum_dict[\"rshape\"][1]].astype(int))\n rdata = rdata.reshape((roffsets.size,rdata.shape[0]//roffsets.size))\n spectra_ref.append( SimpleSpectrum(wave, flux, ivar, \n scipy.sparse.dia_matrix((rdata, roffsets), \n shape=rshape)) ) \n targets.append(Target(id, spectra, coadd=coadd))\n\n return targets", "title": "" }, { "docid": "7507fe3b009a9deeb66fc3c9333ae369", "score": "0.43876693", "text": "def get_soft_target_model_updates(target, source, tau):\n\tpass", "title": "" }, { "docid": "b5f4df315f9ebe8e0a6d652fa5be88b3", "score": "0.43266526", "text": "def _get_obs(self):\r\n obs = self.env._get_obs()\r\n obs['achieved_goal'] = np.concatenate([obs['achieved_goal'], self.default_epsilon])\r\n obs['desired_goal'] = np.concatenate([obs['desired_goal'], self.default_epsilon])\r\n return obs", "title": "" }, { "docid": "dfe1c6fb65a60e1f3daba75b9cf26301", "score": "0.4323028", "text": "def Calculate_exposure_for_SN(self,SN,verbose=False):\n methods = ['nrsirs2rapid','nrsirs2']\n traditional = ['nrsrapid', 'nrsrapidd6','nrs']\n exp_times = []\n self.exp_time = None\n g = []\n #bds = [(100,3000)]\n for m in methods:\n self.read_mode = m\n #res = minimize(self.SN_minimise,g0,args=(SN),options={'eps':1},\n #bounds=[(0,40)])#,method='Nelder-Mead')\n groups, t = self.groups_for_SN(target=SN)\n g += [groups]\n exp_times += [t]\n exp_times = np.array(exp_times)\n m = np.argmin(exp_times)\n self.ngroups = g[m]\n self.read_mode = methods[m]\n \n if verbose:\n print('Optimal mode: ' + methods[m])\n print('exposure time: ' + str(exp_times[m]))\n return exp_times[m]", "title": "" }, { "docid": "37c23e69ead3cfda8f24e5011bf5b050", "score": "0.42916855", "text": "def _determine_contextparams(self, optimizer):\n # Determine optimal parameters for fixed context\n cx = optimizer.select_query_point(self.cx_boundaries)\n return cx[:self.context_dims], cx[self.context_dims:]", "title": "" }, { "docid": "122859367b1572591475a05c4d34e369", "score": "0.4266038", "text": "def _get_obs(self):\n #self.obstaclemoveto()\n rospy.logdebug(\"Start Get Observation ==>\")\n # We get the laser scan data\n laser_scan = self.get_laser_scan()\n\n discretized_laser_scan = self.discretize_observation( laser_scan,\n self.new_ranges\n )\n # We get the odometry so that SumitXL knows where it is.\n odometry = self.get_odom()\n x_position = odometry.pose.pose.position.x\n #print('x_position',x_position)\n y_position = odometry.pose.pose.position.y\n base_orientation_quat = odometry.pose.pose.orientation\n base_roll, base_pitch, base_yaw = self.get_orientation_euler(base_orientation_quat)\n v = odometry.twist.twist.linear.x\n self.theta_dot = odometry.twist.twist.angular.z\n\n ###################human input ##############################\n joy = self.get_joy()\n if joy.linear.x > 0 or joy.angular.z > 0:\n self.joy_linear = joy.linear.x\n self.joy_angular = joy.angular.z\n ###################human agent ##################################\n else:\n print('I am here')\n xdiff = self.desired_point.x - x_position\n ydiff = self.desired_point.y - y_position\n observations = [round(base_yaw, 2),round(v, 2),round(self.theta_dot, 2),round(xdiff, 2),round(ydiff, 2)]#+ discretized_laser_scan\n observations = numpy.append(observations,numpy.array(discretized_laser_scan))\n observations = numpy.append(observations,numpy.array(discretized_laser_scan[:18]))\n s = numpy.array([observations])\n a = self.choose_action(s)\n print('a[0]: ',a[0])\n self.joy_linear = a[0]\n self.joy_angular = a[1]\n ###################obstacle avoidance###########################################\n tran = numpy.array([[numpy.cos(base_yaw),-numpy.sin(base_yaw)],[numpy.sin(base_yaw),numpy.cos(base_yaw)]]).dot(numpy.array([[1,0],[0,0.1]]))\n self.u_gtg = tran.dot(numpy.array([[self.joy_linear],[self.joy_angular]]))\n self.obstacle_avoidance()\n # We round to only two decimals to avoid very big Observation space\n\n ################################################ Data Collection for Human agent Training ########################################################################\n #is_data_needed = numpy.array([base_yaw,v,self.theta_dot,self.joy_linear,self.joy_angular,self.desired_point.x-(x_position),self.desired_point.y-(y_position)])\n\n if joy.linear.x > 0 or joy.angular.z > 0:\n del_x = (self.desired_point.x-(x_position))\n del_y = (self.desired_point.y-(y_position))\n laser_scan_recorded = numpy.append(discretized_laser_scan,discretized_laser_scan[0])\n #data_needed = numpy.array([\"%10.3e\"%(base_yaw),\"%10.3e\"%(v),\"%10.3e\"%(self.theta_dot),\"%10.3e\"%(del_x),\"%10.3e\"%(del_y)])#.encode() # save the needed data in form of string\n data_needed = numpy.array([(base_yaw),(v),(self.theta_dot),(del_x),(del_y)])\n data_needed = numpy.concatenate((data_needed,laser_scan_recorded),axis=0)\n #data_needed = numpy.append(data_needed,\"%10.3e\"%(self.joy_linear),\"%10.3e\"%(self.joy_angular))\n data_needed = numpy.append(data_needed,(joy.linear.x))\n data_needed = numpy.append(data_needed,(joy.angular.z))\n #print( data_needed)\n\n with open(\"scaled_house_data_test.dat\", \"a\", newline='') as f:\n #f.write(data_needed+b\"\\n\") # write the data to the file\n f.write(str(data_needed).replace('\\n','').replace('[','').replace(']','')+'\\n')\n ################################################ Change Observations ########################################################################################\n observations = [round(x_position, 2),round(y_position, 2),round(base_yaw, 2),round(v, 2),round(self.theta_dot, 2),round(self.joy_linear, 2),round(self.joy_angular, 2)]+discretized_laser_scan+[self.e_norm, round(self.theta_bt_uh_ob,2)]\n\n #print(\"Observations==>\"+observations)\n rospy.logdebug(\"Observations==>\"+str(observations))\n rospy.logdebug(\"END Get Observation ==>\")\n return observations", "title": "" }, { "docid": "a49a3b358e69627b3f944d3946203911", "score": "0.42557463", "text": "def get_exposure_value():\n validation = validate_request (request, 'exposureRequestSchema', 'swagger/getExposureValue.yml')\n logging.info (\"get_exposure_value({0})\".format (request.json))\n return database.get_exposure_value (loc=request.json['loc'],\n stime=ExposureUtil.to_timestamp (request.json['stime']),\n etime=ExposureUtil.to_timestamp (request.json['etime']),\n tres=request.json['tres'],\n tstat=request.json['tstat'])", "title": "" }, { "docid": "73f4bc4b79d23c5b855e41a6202682ee", "score": "0.42357865", "text": "def criteria_for_target(self, target):\n return self._criteria.loc[self._criteria['TargetID'] == target]", "title": "" }, { "docid": "fd122974e863cd2e90008af97e25afb3", "score": "0.42329487", "text": "def _get_random_experimental_setting(self):\n m = np.random.uniform(*self.m_range, 2)\n q0_t_q1 = np.random.uniform(*self.q0_t_q1_range, 1)\n c = np.random.uniform(.5, 1.5, 1)\n q0 = np.sqrt(q0_t_q1 / c)\n q1 = -q0 * c\n v_ref_a, v_ref_b = np.random.uniform(*self.v_ref_range, 2)\n return m, np.array([q0, q1]).ravel(), v_ref_a, v_ref_b", "title": "" }, { "docid": "bbf5aa8b775a7acda4b7e65ef41ce291", "score": "0.42311847", "text": "def get_dark(exptime,caldir = None):\n \n #Search for all possible dark frames\n available_darks = glob(caldir+'master_dark*')\n available_times = []\n #Check the exposure time. If any match, use that dark.\n for darkname in available_darks:\n dark_hdu = fits.open(darkname)[0]\n dark_time = dark_hdu.header['EXPTIME']\n available_times.append(dark_time)\n if exptime == dark_time:\n dark = dark_hdu.data\n return dark,darkname\n \n #If we're here, then no darks with matching exposure times were found. Scale the longest \n #dark down to the given exposure time!\n #Find the index with the longest time, grab that time and the corresponding dark frame\n max_dark_idx = np.argmax(available_times)\n max_dark_time = available_times[max_dark_idx]\n darkname = available_darks[max_dark_idx]\n long_dark = fits.getdata(darkname)\n #Scale to the exposure time!\n dark = long_dark * exptime / max_dark_time\n return dark,darkname", "title": "" }, { "docid": "96e74c872e64fba049e57056f29031be", "score": "0.42263195", "text": "def choose_next_target(self,old_sInd,sInds,slewTime,intTimes):\n SU = self.SimulatedUniverse\n OS = SU.OpticalSystem\n ZL = SU.ZodiacalLight\n self.Completeness = SU.Completeness\n TL = SU.TargetList\n Obs = self.Observatory\n TK = self.TimeKeeping\n mode = self.mode\n # now, start to look for available targets\n cnt = 0\n while not TK.mission_is_over():\n TK.obsStart = TK.currentTimeNorm.to('day')\n\n dmag = self.dmag_startSaved\n WA = OS.WA0\n startTime = np.zeros(sInds.shape[0])*u.d + self.TimeKeeping.currentTimeAbs\n\n tovisit = np.zeros(self.schedule_startSaved.shape[0], dtype=bool)\n fZtovisit = np.zeros(self.schedule_startSaved.shape[0], dtype=bool)\n\n DRM = {}#Create DRM\n\n startTime = np.zeros(sInds.shape[0])*u.d + self.TimeKeeping.currentTimeAbs\n\n #Estimate Yearly fZmin###########################################\n tmpfZ = np.asarray(self.fZ_startSaved)\n fZ_matrix = tmpfZ[self.schedule,:]#Apply previous filters to fZ_startSaved[sInds, 1000]\n #Find minimum fZ of each star\n fZmintmp = np.zeros(self.schedule.shape[0])\n for i in xrange(self.schedule.shape[0]):\n fZmintmp[i] = min(fZ_matrix[i,:])\n\n #Find current fZ\n indexFrac = np.interp((self.TimeKeeping.currentTimeAbs-self.TimeKeeping.missionStart).value%365.25,[0,365.25],[0,1000])#This is only good for 1 year missions right now\n fZinterp = np.zeros(self.schedule.shape[0])\n fZinterp[:] = (indexFrac%1)*fZ_matrix[:,int(indexFrac)] + (1-indexFrac%1)*fZ_matrix[:,int(indexFrac%1+1)]#this is the current fZ\n\n commonsInds = [x for x in self.schedule if x in sInds]#finds indicies in common between sInds and self.schedule\n imat = [self.schedule.tolist().index(x) for x in commonsInds]\n CbyT = self.CbyT[imat]\n t_dets = self.t_dets[imat]\n Comp00 = self.Comp00[imat]\n fZ = fZinterp[imat]\n fZmin = fZmintmp[imat]\n\n commonsInds2 = [x for x in self.schedule_startSaved if((x in sInds) and (x in self.schedule))]#finds indicies in common between sInds and self.schedule\n imat2 = [self.schedule_startSaved.tolist().index(x) for x in commonsInds2]\n dec = self.TargetList.coords.dec[imat2].value\n\n currentTime = TK.currentTimeAbs\n r_targ = TL.starprop(imat2,currentTime,False)\n #dec = np.zeros(len(imat2))\n #for i in np.arange(len(imat2)):\n c = SkyCoord(r_targ[:,0],r_targ[:,1],r_targ[:,2],representation='cartesian')\n c.representation = 'spherical'\n dec = c.dec\n\n \n if len(sInds) > 0:\n # store selected star integration time\n selectInd = np.argmin(abs(fZ-fZmin))\n sInd = sInds[selectInd]#finds index of star to sacrifice\n t_det = t_dets[selectInd]*u.d\n\n #Create a check to determine if the mission length would be exceeded.\n timeLeft = TK.missionFinishNorm - TK.currentTimeNorm#This is how much time we have left in the mission in u.d\n if(timeLeft > (Obs.settlingTime + mode['syst']['ohTime'])):#There is enough time left for overhead time but not for the full t_det\n if(timeLeft > (t_det+Obs.settlingTime + mode['syst']['ohTime'])):#If the nominal plan for observation time is greater than what we can do\n t_det = t_det\n else:\n t_det = timeLeft - (Obs.settlingTime + mode['syst']['ohTime'])#We reassign t_det to fill the remaining time\n break \n else:#There is insufficient time to cover overhead time\n TK.allocate_time(timeLeft*u.d)\n sInd = None\n t_det = None\n break\n\n # if no observable target, call the TimeKeeping.wait() method\n else:\n TK.allocate_time(TK.waitTime*TK.waitMultiple**cnt)\n cnt += 1\n else:\n return None#DRM, None, None\n return sInd", "title": "" }, { "docid": "8a5c99112d1ba57358a63ffcea2f3d9b", "score": "0.4209246", "text": "def getSlewParameters(antenna, source1, source2=None):\n # Get RA/DEC and az/el of source1\n ra1 = helper.convertHms(utils.getSourceRa(source1))\n dec1 = helper.convertHms(utils.getSourceDec(source1))\n az1, el1 = commands.azel(source1)\n\n # Get RA/DEC and az/el of source2, or if source2 = None, then\n # use the current antenna position\n if source2 == None:\n mp = utils.getAntennaMp(antenna)\n root = \"Drive\"\n if antenna > 6: root = 'AntennaCommon.Drive'\n az2 = commands.queryDouble('%s.%s.Track.actualAzimuth' % (mp, root))\n el2 = commands.queryDouble('%s.%s.Track.actualElevation' % (mp, root))\n ra2 = commands.queryDouble('%s.%s.rightAscension' % (mp, root))\n dec2 = commands.queryDouble('%s.%s.declination' % (mp, root))\n else:\n ra2 = helper.convertHms(utils.getSourceRa(source2))\n dec2 = helper.convertHms(utils.getSourceDec(source2))\n az2, el2 = commands.azel(source2)\n\n # Adjust az/el for wraps\n if antenna <= 6 : \n ha1 = utils.getHa(source1)\n if source2 <> None : \n ha2 = utils.getHa(source2)\n else :\n ha2 = commands.lst() - ra2\n if ha2 < -12.0 : ha2 += 24.0\n if ha2 > 12.0 : ha2 += -24.0\n if dec1 > utils.OBSERVATORY_LATITUDE and ha1 > 0.0: \n az1 += -360.0\n if dec2 > utils.OBSERVATORY_LATITUDE and ha2 > 0.0 and source2 <> None:\n az2 += -360.0\n elif antenna < 16 :\n if az2 < 45.0 and az1 > 225.0: \n az2 += 360.0\n elif az2 > 315.0 and az1 < 135.0:\n az2 += -360.0\n\n # Compute distance\n azDist = abs(az1 - az2)\n elDist = abs(el1 - el2)\n\n # Done\n return azDist, elDist, az1, el1", "title": "" }, { "docid": "413d53ab33497fbbaade9944cbf97491", "score": "0.41902357", "text": "def get_acquisition_timings(self):\n exposure = c_float()\n accumulate = c_float()\n kinetic = c_float()\n error = self._dll.GetAcquisitionTimings(byref(exposure), byref(accumulate), byref(kinetic))\n self._exposure = exposure.value\n self._accumulate = accumulate.value\n self._kinetic = kinetic.value", "title": "" }, { "docid": "2ca491c97d2a2665d074aed926778a53", "score": "0.41880116", "text": "def session():\n try:\n with open(os.devnull, 'w') as dev_null:\n _cmd = ['/usr/sbin/iscsiadm', '-m', 'session', '-P', '3']\n _iscsi_logger.debug('Executing %s', _cmd)\n output = subprocess.check_output(_cmd, stderr=dev_null).decode('utf-8')\n # _iscsi_logger.debug('%s output: %s', _cmd, output)\n devices = {}\n\n device_info = {}\n target = None\n for line in output.splitlines():\n # new section describing a different Target is starting\n # save any data collected about the previous Target\n if 'Target:' in line:\n if target is not None and device_info != {}:\n devices[target] = device_info\n device_info = {}\n match = _TARGET_PATTERN.search(line.strip())\n if match:\n target = match.group(1)\n else:\n target = None\n continue\n if 'Current Portal:' in line:\n match = _PORTAL_PATTERN.search(line.strip())\n if match:\n device_info['current_portal_ip'] = match.group(2)\n device_info['current_portal_port'] = match.group(3)\n if 'Persistent Portal:' in line:\n match = _PORTAL_PATTERN.search(line.strip())\n if match:\n device_info['persistent_portal_ip'] = match.group(2)\n device_info['persistent_portal_port'] = match.group(3)\n if 'iSCSI Session State:' in line:\n match = _SESS_STATE_PATTERN.search(line.strip())\n if match:\n device_info['session_state'] = match.group(1)\n if 'Attached scsi disk' in line:\n match = _DISK_PATTERN.search(line.strip())\n if match:\n device_info['device'] = match.group(1)\n device_info['state'] = match.group(2)\n if target is not None and device_info != {}:\n devices[target] = device_info\n\n return devices\n except OSError:\n _iscsi_logger.error('Failed to execute /usr/sbin/iscsiadm')\n return {}\n except subprocess.CalledProcessError as e:\n if e.returncode in (15, 21):\n # non-fatal error that we should not warn the user about\n # see ISCSIADM(8)\n _iscsi_logger.debug('Error running /usr/sbin/iscsiadm [%s]', str(e))\n else:\n _iscsi_logger.warning('Error running /usr/sbin/iscsiadm [%s]', str(e))\n return {}", "title": "" }, { "docid": "0c0158d8a577deee6703b70468b6ee3a", "score": "0.41845503", "text": "def observation_characterization(self, sInd, mode):\n\n OS = self.OpticalSystem\n ZL = self.ZodiacalLight\n TL = self.TargetList\n SU = self.SimulatedUniverse\n Obs = self.Observatory\n TK = self.TimeKeeping\n\n # selecting appropriate koMap\n koMap = self.koMaps[mode[\"syst\"][\"name\"]]\n\n # find indices of planets around the target\n pInds = np.where(SU.plan2star == sInd)[0]\n pinds_earthlike = np.array([])\n fEZs = SU.fEZ[pInds].to(\"1/arcsec2\").value\n # dMags = SU.dMag[pInds]\n WAs = SU.WA[pInds].to(\"arcsec\").value\n\n # get the detected status, and check if there was a FA\n det = self.lastDetected[sInd, 0]\n if det is None:\n det = np.ones(pInds.size, dtype=bool)\n FA = len(det) == len(pInds) + 1\n if FA:\n pIndsDet = np.append(pInds, -1)[det]\n else:\n pIndsDet = pInds[det]\n\n # initialize outputs, and check if there's anything (planet or FA)\n # to characterize\n characterized = np.zeros(len(det), dtype=int)\n fZ = 0.0 / u.arcsec**2.0\n systemParams = SU.dump_system_params(\n sInd\n ) # write current system params by default\n SNR = np.zeros(len(det))\n intTime = None\n if len(det) == 0: # nothing to characterize\n return characterized, fZ, systemParams, SNR, intTime\n\n # look for last detected planets that have not been fully characterized\n if not (FA): # only true planets, no FA\n tochar = self.fullSpectra[pIndsDet] < self.max_successful_chars\n else: # mix of planets and a FA\n truePlans = pIndsDet[:-1]\n tochar = np.append(\n (self.fullSpectra[truePlans] < self.max_successful_chars), True\n )\n\n # 1/ find spacecraft orbital START position including overhead time,\n # and check keepout angle\n if np.any(tochar):\n # start times\n startTime = TK.currentTimeAbs.copy()\n startTimeNorm = TK.currentTimeNorm.copy()\n # planets to characterize\n koTimeInd = np.where(np.round(startTime.value) - self.koTimes.value == 0)[\n 0\n ][\n 0\n ] # find indice where koTime is startTime[0]\n # wherever koMap is 1, the target is observable\n tochar[tochar] = koMap[sInd][koTimeInd]\n\n # 2/ if any planet to characterize, find the characterization times at the\n # detected fEZ, dMag, and WA\n if np.any(tochar):\n pinds_earthlike = np.logical_and(\n np.array([(p in self.known_earths) for p in pIndsDet]), tochar\n )\n\n if self.lastDetected[sInd, 0] is None:\n fZ = ZL.fZ(Obs, TL, sInd, startTime, mode)\n fEZ = fEZs[tochar] / u.arcsec**2\n dMag = self.int_dMag[sInd] * np.ones(len(tochar))\n WA = self.int_WA[sInd] * np.ones(len(tochar))\n else:\n fZ = ZL.fZ(Obs, TL, sInd, startTime, mode)\n fEZ = self.lastDetected[sInd, 1][det][tochar] / u.arcsec**2\n dMag = self.lastDetected[sInd, 2][det][tochar]\n WA = self.lastDetected[sInd, 3][det][tochar] * u.arcsec\n # dMag = self.int_dMag[sInd]*np.ones(len(tochar))\n # WA = self.int_WA[sInd]*np.ones(len(tochar))\n\n intTimes = np.zeros(len(tochar)) * u.day\n\n # if lucky_planets, use lucky planet params for dMag and WA\n if SU.lucky_planets or sInd in self.known_rocky:\n phi = (1 / np.pi) * np.ones(len(SU.d))\n e_dMag = deltaMag(SU.p, SU.Rp, SU.d, phi) # delta magnitude\n e_WA = np.arctan(SU.a / TL.dist[SU.plan2star]).to(\n \"arcsec\"\n ) # working angle\n WA[pinds_earthlike[tochar]] = e_WA[pIndsDet[pinds_earthlike]]\n dMag[pinds_earthlike[tochar]] = e_dMag[pIndsDet[pinds_earthlike]]\n # else:\n # e_dMag = SU.dMag\n # e_WA = SU.WA\n # WA[pinds_earthlike[tochar]] = e_WA[pIndsDet[pinds_earthlike]]\n # dMag[pinds_earthlike[tochar]] = e_dMag[pIndsDet[pinds_earthlike]]\n # pdb.set_trace() ###\n intTimes[tochar] = OS.calc_intTime(TL, sInd, fZ, fEZ, dMag, WA, mode)\n intTimes[~np.isfinite(intTimes)] = 0 * u.d\n # add a predetermined margin to the integration times\n intTimes = intTimes * (1.0 + self.charMargin)\n # apply time multiplier\n totTimes = intTimes * (mode[\"timeMultiplier\"])\n # end times\n endTimes = startTime + totTimes\n endTimesNorm = startTimeNorm + totTimes\n # planets to characterize\n tochar = (\n (totTimes > 0)\n & (totTimes <= OS.intCutoff)\n & (endTimesNorm <= TK.OBendTimes[TK.OBnumber])\n )\n\n # 3/ is target still observable at the end of any char time?\n if np.any(tochar) and Obs.checkKeepoutEnd:\n koTimeInds = np.zeros(len(endTimes.value[tochar]), dtype=int)\n\n # find index in koMap where each endTime is closest to koTimes\n for t, endTime in enumerate(endTimes.value[tochar]):\n if endTime > self.koTimes.value[-1]:\n # case where endTime exceeds largest koTimes element\n endTimeInBounds = np.where(\n np.floor(endTime) - self.koTimes.value == 0\n )[0]\n koTimeInds[t] = (\n endTimeInBounds[0] if endTimeInBounds.size != 0 else -1\n )\n else:\n koTimeInds[t] = np.where(\n np.round(endTime) - self.koTimes.value == 0\n )[0][\n 0\n ] # find indice where koTime is endTimes[0]\n tochar[tochar] = [koMap[sInd][koT] if koT >= 0 else 0 for koT in koTimeInds]\n\n # 4/ if yes, allocate the overhead time, and perform the characterization\n if np.any(tochar):\n # Save Current Time before attempting time allocation\n currentTimeNorm = TK.currentTimeNorm.copy()\n currentTimeAbs = TK.currentTimeAbs.copy()\n\n if np.any(np.logical_and(pinds_earthlike, tochar)):\n intTime = np.max(intTimes[np.logical_and(pinds_earthlike, tochar)])\n else:\n intTime = np.max(intTimes[tochar])\n extraTime = intTime * (mode[\"timeMultiplier\"] - 1.0) # calculates extraTime\n success = TK.allocate_time(\n intTime + extraTime + mode[\"syst\"][\"ohTime\"] + Obs.settlingTime, True\n ) # allocates time\n if not (success): # Time was not successfully allocated\n char_intTime = None\n lenChar = len(pInds) + 1 if FA else len(pInds)\n characterized = np.zeros(lenChar, dtype=float)\n char_SNR = np.zeros(lenChar, dtype=float)\n char_fZ = 0.0 / u.arcsec**2\n char_systemParams = SU.dump_system_params(sInd)\n return characterized, char_fZ, char_systemParams, char_SNR, char_intTime\n\n pIndsChar = pIndsDet[tochar]\n log_char = \" - Charact. planet inds %s (%s/%s detected)\" % (\n pIndsChar,\n len(pIndsChar),\n len(pIndsDet),\n )\n self.logger.info(log_char)\n self.vprint(log_char)\n\n # SNR CALCULATION:\n # first, calculate SNR for observable planets (without false alarm)\n planinds = pIndsChar[:-1] if pIndsChar[-1] == -1 else pIndsChar\n SNRplans = np.zeros(len(planinds))\n if len(planinds) > 0:\n # initialize arrays for SNR integration\n fZs = np.zeros(self.ntFlux) / u.arcsec**2.0\n systemParamss = np.empty(self.ntFlux, dtype=\"object\")\n Ss = np.zeros((self.ntFlux, len(planinds)))\n Ns = np.zeros((self.ntFlux, len(planinds)))\n # integrate the signal (planet flux) and noise\n dt = intTime / float(self.ntFlux)\n timePlus = (\n Obs.settlingTime.copy() + mode[\"syst\"][\"ohTime\"].copy()\n ) # accounts for the time since the current time\n for i in range(self.ntFlux):\n # calculate signal and noise (electron count rates)\n if SU.lucky_planets:\n fZs[i] = ZL.fZ(Obs, TL, sInd, currentTimeAbs, mode)[0]\n Ss[i, :], Ns[i, :] = self.calc_signal_noise(\n sInd, planinds, dt, mode, fZ=fZs[i]\n )\n # allocate first half of dt\n timePlus += dt / 2.0\n # calculate current zodiacal light brightness\n fZs[i] = ZL.fZ(Obs, TL, sInd, currentTimeAbs + timePlus, mode)[0]\n # propagate the system to match up with current time\n SU.propag_system(\n sInd, currentTimeNorm + timePlus - self.propagTimes[sInd]\n )\n self.propagTimes[sInd] = currentTimeNorm + timePlus\n # save planet parameters\n systemParamss[i] = SU.dump_system_params(sInd)\n # calculate signal and noise (electron count rates)\n if not SU.lucky_planets:\n Ss[i, :], Ns[i, :] = self.calc_signal_noise(\n sInd, planinds, dt, mode, fZ=fZs[i]\n )\n # allocate second half of dt\n timePlus += dt / 2.0\n\n # average output parameters\n fZ = np.mean(fZs)\n systemParams = {\n key: sum([systemParamss[x][key] for x in range(self.ntFlux)])\n / float(self.ntFlux)\n for key in sorted(systemParamss[0])\n }\n # calculate planets SNR\n S = Ss.sum(0)\n N = Ns.sum(0)\n SNRplans[N > 0] = S[N > 0] / N[N > 0]\n # allocate extra time for timeMultiplier\n\n # if only a FA, just save zodiacal brightness\n # in the middle of the integration\n else:\n totTime = intTime * (mode[\"timeMultiplier\"])\n fZ = ZL.fZ(Obs, TL, sInd, currentTimeAbs.copy() + totTime / 2.0, mode)[\n 0\n ]\n\n # calculate the false alarm SNR (if any)\n SNRfa = []\n if pIndsChar[-1] == -1:\n fEZ = self.lastDetected[sInd, 1][-1] / u.arcsec**2.0\n dMag = self.lastDetected[sInd, 2][-1]\n WA = self.lastDetected[sInd, 3][-1] * u.arcsec\n C_p, C_b, C_sp = OS.Cp_Cb_Csp(TL, sInd, fZ, fEZ, dMag, WA, mode)\n S = (C_p * intTime).decompose().value\n N = np.sqrt((C_b * intTime + (C_sp * intTime) ** 2.0).decompose().value)\n SNRfa = S / N if N > 0.0 else 0.0\n\n # save all SNRs (planets and FA) to one array\n SNRinds = np.where(det)[0][tochar]\n SNR[SNRinds] = np.append(SNRplans, SNRfa)\n\n # now, store characterization status: 1 for full spectrum,\n # -1 for partial spectrum, 0 for not characterized\n char = SNR >= mode[\"SNR\"]\n # initialize with full spectra\n characterized = char.astype(int)\n WAchar = WAs[char] * u.arcsec\n # find the current WAs of characterized planets\n WAs = systemParams[\"WA\"]\n if FA:\n WAs = np.append(WAs, self.lastDetected[sInd, 3][-1] * u.arcsec)\n # check for partial spectra (for coronagraphs only)\n if not (mode[\"syst\"][\"occulter\"]):\n IWA_max = mode[\"IWA\"] * (1.0 + mode[\"BW\"] / 2.0)\n OWA_min = mode[\"OWA\"] * (1.0 - mode[\"BW\"] / 2.0)\n char[char] = (WAchar < IWA_max) | (WAchar > OWA_min)\n characterized[char] = -1\n # encode results in spectra lists (only for planets, not FA)\n charplans = characterized[:-1] if FA else characterized\n self.fullSpectra[pInds[charplans == 1]] += 1\n self.partialSpectra[pInds[charplans == -1]] += 1\n\n # schedule target revisit\n self.scheduleRevisit(sInd, None, None, None)\n\n return characterized.astype(int), fZ, systemParams, SNR, intTime", "title": "" }, { "docid": "feaa490c0386a210c752e005128380f3", "score": "0.4181969", "text": "def get_opts(fp_hdf_out, train_test_times):\n opts = dict()\n # -------- DATA ------------------------\n opts['fp_hdf_out'] = fp_hdf_out\n opts['sampling_rate'] = 30 # Sampling rate of the wavelets\n opts['training_indices'] = train_test_times[0].tolist()\n opts['testing_indices'] = train_test_times[1].tolist()\n\n # -------- MODEL PARAMETERS --------------\n opts['model_function'] = 'the_decoder'\n opts['model_timesteps'] = 64 # How many timesteps in the input layer\n\n opts['optimizer'] = 'adam'\n opts['learning_rate'] = 0.0007\n opts['kernel_size'] = 3\n opts['conv_padding'] = 'same'\n opts['act_conv'] = 'elu'\n opts['act_fc'] = 'elu'\n opts['dropout_ratio'] = 0\n opts['filter_size'] = 64\n opts['num_units_dense'] = 1024\n opts['num_dense'] = 2\n opts['gaussian_noise'] = 1\n opts['num_convs_tsr'] = 4\n opts['average_output'] = 2**opts['num_convs_tsr']\n\n # -------- TRAINING----------------------\n opts['batch_size'] = 8\n opts['steps_per_epoch'] = 250\n opts['validation_steps'] = 250\n opts['epochs'] = 20\n opts['shuffle'] = True\n opts['random_batches'] = True\n\n # -------- MISC--------------- ------------\n opts['tensorboard_logfolder'] = './'\n opts['model_folder'] = './'\n opts['log_output'] = False\n opts['save_model'] = False\n\n return opts", "title": "" }, { "docid": "cb77370fea770582cc283773895df9fa", "score": "0.41816464", "text": "def exposure(self) -> Q_:\n if self.config['exposure'] is not None:\n return self.config['exposure']\n try:\n exposure = float(self._driver.ExposureTime.ToString()) * Q_('us')\n return exposure\n except _genicam.TimeoutException:\n self.logger.error('Timeout getting the exposure')\n return self.config['exposure']", "title": "" }, { "docid": "333f9f2066f78622ed1f9988a148c298", "score": "0.417969", "text": "def _mode_choice_simulate(tours,\n skims,\n stack,\n orig_key,\n dest_key,\n spec,\n additional_constants,\n omx=None):\n\n # FIXME - log\n # print \"Skims3D %s skim_key2 values = %s\" % ('in_period', tours['in_period'].unique())\n # print \"Skims3D %s skim_key2 values = %s\" % ('out_period', tours['out_period'].unique())\n\n # FIXME - check that periods are in time_periods?\n\n in_skims = askim.Skims3D(stack=stack,\n left_key=orig_key, right_key=dest_key,\n skim_key=\"in_period\",\n offset=-1)\n out_skims = askim.Skims3D(stack=stack,\n left_key=dest_key, right_key=orig_key,\n skim_key=\"out_period\",\n offset=-1)\n\n if omx is not None:\n in_skims.set_omx(omx)\n out_skims.set_omx(omx)\n\n skims.set_keys(orig_key, dest_key)\n\n locals_d = {\n \"in_skims\": in_skims,\n \"out_skims\": out_skims,\n \"skims\": skims\n }\n locals_d.update(additional_constants)\n\n choices, _ = asim.simple_simulate(tours,\n spec,\n skims=[in_skims, out_skims, skims],\n locals_d=locals_d)\n\n alts = spec.columns\n choices = choices.map(dict(zip(range(len(alts)), alts)))\n\n return choices", "title": "" }, { "docid": "97c857c4fc01ce6043ec482982a0f8ac", "score": "0.4178437", "text": "def _get_obs(self):\n robot_qpos, robot_qvel = shadow_get_obs(self.sim, name='robot0')\n achieved_goal = self._get_achieved_goal()\n cube_vel = self.sim.data.get_joint_qvel('rubik:free_joint_0_0_0').copy()\n observation = np.concatenate([robot_qpos, robot_qvel, cube_vel, achieved_goal])\n assert observation.shape == (62,)\n angle = achieved_goal[-1:]\n return {\n 'observation': observation.copy(),\n 'achieved_goal': angle.copy(),\n 'desired_goal': self.goal.copy(),\n }", "title": "" }, { "docid": "427fafcc8b25aec1b2c1eddc818e5899", "score": "0.41778055", "text": "def guessParamsFromCursors(self,x1,y1,x2,y2):\n x0_hz = x1\n X = self.Xdata\n Y = self.Ydata\n for k,x in enumerate(self.Xdata):\n if x0_hz < x:\n iMax = k\n break\n\n # iMax = Y.argmax()\n\n x_0 = X[iMax]\n a = Y[iMax]\n\n HWHM_left = -1\n HWHM_right= -1\n offset = Y.mean()\n a = a-offset\n for index in range(iMax,len(X)):\n if Y[index]<=offset+a/2:\n FWHM = X[index] - x_0\n break\n if HWHM_right==-1:\n HWHM_right = X[index] - x_0\n for index in range(iMax,0,-1):\n if Y[index]<=offset+a/2 :\n HWHM_left = x_0 - X[index]\n break\n if HWHM_left ==-1:\n HWHM_left = x_0 - X[index]\n\n FWHM = HWHM_left + HWHM_right\n if FWHM == 0:\n FWHM = (X[len(X)-1]-X[0])/10\n\n # convert a to area\n a = a*math.pi*FWHM/2\n self.param_guess = {__GAMMA_HZ__:FWHM,__X0_HZ__:x_0,__AREA__:a,__OFFSET__:offset}", "title": "" }, { "docid": "b2a8fedd9a236d9ce98cfd2de28f0040", "score": "0.41717553", "text": "def run_sim(self):\n\n OS = self.OpticalSystem\n TL = self.TargetList\n SU = self.SimulatedUniverse\n Obs = self.Observatory\n TK = self.TimeKeeping\n\n # TODO: start using this self.currentSep\n # set occulter separation if haveOcculter\n if OS.haveOcculter:\n self.currentSep = Obs.occulterSep\n\n # choose observing modes selected for detection (default marked with a flag)\n allModes = OS.observingModes\n det_modes = list(filter(lambda mode: \"imag\" in mode[\"inst\"][\"name\"], allModes))\n base_det_mode = list(\n filter(lambda mode: mode[\"detectionMode\"], OS.observingModes)\n )[0]\n # and for characterization (default is first spectro/IFS mode)\n spectroModes = list(\n filter(lambda mode: \"spec\" in mode[\"inst\"][\"name\"], allModes)\n )\n if np.any(spectroModes):\n char_modes = spectroModes\n # if no spectro mode, default char mode is first observing mode\n else:\n char_modes = [allModes[0]]\n\n # begin Survey, and loop until mission is finished\n log_begin = \"OB%s: survey beginning.\" % (TK.OBnumber + 1)\n self.logger.info(log_begin)\n self.vprint(log_begin)\n t0 = time.time()\n sInd = None\n ObsNum = 0\n while not TK.mission_is_over(OS, Obs, det_modes[0]):\n\n # acquire the NEXT TARGET star index and create DRM\n old_sInd = sInd # used to save sInd if returned sInd is None\n DRM, sInd, det_intTime, waitTime, det_mode = self.next_target(\n sInd, det_modes\n )\n\n if sInd is not None:\n ObsNum += 1\n\n if OS.haveOcculter:\n # advance to start of observation\n # (add slew time for selected target)\n _ = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)\n\n # beginning of observation, start to populate DRM\n DRM[\"star_ind\"] = sInd\n DRM[\"star_name\"] = TL.Name[sInd]\n DRM[\"arrival_time\"] = TK.currentTimeNorm.copy().to(\"day\")\n DRM[\"OB_nb\"] = TK.OBnumber\n DRM[\"ObsNum\"] = ObsNum\n pInds = np.where(SU.plan2star == sInd)[0]\n DRM[\"plan_inds\"] = pInds.astype(int)\n log_obs = (\n \" Observation #%s, star ind %s (of %s) with %s planet(s), \"\n + \"mission time at Obs start: %s\"\n ) % (\n ObsNum,\n sInd,\n TL.nStars,\n len(pInds),\n TK.currentTimeNorm.to(\"day\").copy().round(2),\n )\n self.logger.info(log_obs)\n self.vprint(log_obs)\n\n # PERFORM DETECTION and populate revisit list attribute\n DRM[\"det_info\"] = []\n (\n detected,\n det_fZ,\n det_systemParams,\n det_SNR,\n FA,\n ) = self.observation_detection(sInd, det_intTime, det_mode)\n # update the occulter wet mass\n if OS.haveOcculter:\n DRM = self.update_occulter_mass(DRM, sInd, det_intTime, \"det\")\n det_data = {}\n det_data[\"det_status\"] = detected\n det_data[\"det_SNR\"] = det_SNR\n det_data[\"det_fZ\"] = det_fZ.to(\"1/arcsec2\")\n det_data[\"det_params\"] = det_systemParams\n det_data[\"det_mode\"] = dict(det_mode)\n det_data[\"det_time\"] = det_intTime.to(\"day\")\n del det_data[\"det_mode\"][\"inst\"], det_data[\"det_mode\"][\"syst\"]\n DRM[\"det_info\"].append(det_data)\n\n # PERFORM CHARACTERIZATION and populate spectra list attribute\n DRM[\"char_info\"] = []\n if char_modes[0][\"SNR\"] not in [0, np.inf]:\n (\n characterized,\n char_fZ,\n char_systemParams,\n char_SNR,\n char_intTime,\n ) = self.observation_characterization(sInd, char_modes)\n else:\n char_intTime = None\n lenChar = len(pInds) + 1 if True in FA else len(pInds)\n characterized = np.zeros((lenChar, len(char_modes)), dtype=float)\n char_SNR = np.zeros((lenChar, len(char_modes)), dtype=float)\n char_fZ = np.array([0.0 / u.arcsec**2, 0.0 / u.arcsec**2])\n char_systemParams = SU.dump_system_params(sInd)\n\n for mode_index, char_mode in enumerate(char_modes):\n char_data = {}\n assert char_intTime != 0, \"Integration time can't be 0.\"\n # update the occulter wet mass\n if OS.haveOcculter and char_intTime is not None:\n char_data = self.update_occulter_mass(\n char_data, sInd, char_intTime, \"char\"\n )\n if np.any(characterized):\n self.vprint(\n \" Char. results are: {}\".format(\n characterized[:-1, mode_index]\n )\n )\n # populate the DRM with characterization results\n char_data[\"char_time\"] = (\n char_intTime.to(\"day\")\n if char_intTime is not None\n else 0.0 * u.day\n )\n char_data[\"char_status\"] = (\n characterized[:-1, mode_index]\n if FA\n else characterized[:, mode_index]\n )\n char_data[\"char_SNR\"] = (\n char_SNR[:-1, mode_index] if FA else char_SNR[:, mode_index]\n )\n char_data[\"char_fZ\"] = char_fZ[mode_index].to(\"1/arcsec2\")\n char_data[\"char_params\"] = char_systemParams\n # populate the DRM with FA results\n char_data[\"FA_det_status\"] = int(FA)\n char_data[\"FA_char_status\"] = (\n characterized[-1, mode_index] if FA else 0\n )\n char_data[\"FA_char_SNR\"] = char_SNR[-1] if FA else 0.0\n char_data[\"FA_char_fEZ\"] = (\n self.lastDetected[sInd, 1][-1] / u.arcsec**2\n if FA\n else 0.0 / u.arcsec**2\n )\n char_data[\"FA_char_dMag\"] = (\n self.lastDetected[sInd, 2][-1] if FA else 0.0\n )\n char_data[\"FA_char_WA\"] = (\n self.lastDetected[sInd, 3][-1] * u.arcsec\n if FA\n else 0.0 * u.arcsec\n )\n\n # populate the DRM with observation modes\n char_data[\"char_mode\"] = dict(char_mode)\n del char_data[\"char_mode\"][\"inst\"], char_data[\"char_mode\"][\"syst\"]\n DRM[\"char_info\"].append(char_data)\n\n DRM[\"exoplanetObsTime\"] = TK.exoplanetObsTime.copy()\n\n # append result values to self.DRM\n self.DRM.append(DRM)\n\n else: # sInd == None\n sInd = old_sInd # Retain the last observed star\n if (\n TK.currentTimeNorm.copy() >= TK.OBendTimes[TK.OBnumber]\n ): # currentTime is at end of OB\n # Conditional Advance To Start of Next OB\n if not TK.mission_is_over(\n OS, Obs, det_mode\n ): # as long as the mission is not over\n TK.advancetToStartOfNextOB() # Advance To Start of Next OB\n elif waitTime is not None:\n # CASE 1: Advance specific wait time\n _ = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)\n self.vprint(\"waitTime is not None\")\n else:\n startTimes = (\n TK.currentTimeAbs.copy() + np.zeros(TL.nStars) * u.d\n ) # Start Times of Observations\n observableTimes = Obs.calculate_observableTimes(\n TL,\n np.arange(TL.nStars),\n startTimes,\n self.koMaps,\n self.koTimes,\n base_det_mode,\n )[0]\n # CASE 2 If There are no observable targets for the\n # rest of the mission\n # Are there any stars coming out of keepout before end of mission\n if (\n observableTimes[\n (\n TK.missionFinishAbs.copy().value * u.d\n > observableTimes.value * u.d\n )\n * (\n observableTimes.value * u.d\n >= TK.currentTimeAbs.copy().value * u.d\n )\n ].shape[0]\n ) == 0:\n self.vprint(\n (\n \"No Observable Targets for Remainder of mission at \"\n \"currentTimeNorm = {}\"\n ).format(TK.currentTimeNorm)\n )\n # Manually advancing time to mission end\n TK.currentTimeNorm = TK.missionLife\n TK.currentTimeAbs = TK.missionFinishAbs\n else:\n # CASE 3 nominal wait time if at least 1 target is still in\n # list and observable\n # TODO: ADD ADVANCE TO WHEN FZMIN OCURS\n inds1 = np.arange(TL.nStars)[\n observableTimes.value * u.d\n > TK.currentTimeAbs.copy().value * u.d\n ]\n inds2 = np.intersect1d(\n self.intTimeFilterInds, inds1\n ) # apply intTime filter\n # apply revisit Filter #NOTE this means stars you added to\n # the revisit list\n inds3 = self.revisitFilter(\n inds2, TK.currentTimeNorm.copy() + self.dt_max.to(u.d)\n )\n self.vprint(\n \"Filtering %d stars from advanceToAbsTime\"\n % (TL.nStars - len(inds3))\n )\n oTnowToEnd = observableTimes[inds3]\n # there is at least one observableTime between now and the\n # end of the mission\n if not oTnowToEnd.value.shape[0] == 0:\n # advance to that observable time\n tAbs = np.min(oTnowToEnd)\n else:\n tAbs = (\n TK.missionStart + TK.missionLife\n ) # advance to end of mission\n tmpcurrentTimeNorm = TK.currentTimeNorm.copy()\n # Advance Time to this time OR start of next OB following\n # this time\n _ = TK.advanceToAbsTime(tAbs)\n self.vprint(\n (\n \"No Observable Targets a currentTimeNorm = {:.2f} \"\n \"Advanced To currentTimeNorm = {:.2f}\"\n ).format(\n tmpcurrentTimeNorm.to(\"day\"),\n TK.currentTimeNorm.to(\"day\"),\n )\n )\n else: # TK.mission_is_over()\n dtsim = (time.time() - t0) * u.s\n log_end = (\n \"Mission complete: no more time available.\\n\"\n + \"Simulation duration: %s.\\n\" % dtsim.astype(\"int\")\n + \"Results stored in SurveySimulation.DRM (Design Reference Mission).\"\n )\n self.logger.info(log_end)\n print(log_end)", "title": "" }, { "docid": "fa4acb388d936f3e1fe9e91b6e6990fe", "score": "0.41694167", "text": "def dpmodes(self):\n return self.second_stage.yresult[:,self.dps_ix]", "title": "" }, { "docid": "f68ba2056d0c0781fe00d61b486d0a5b", "score": "0.4166098", "text": "def parameter_estimation(self):\n\n # Iterates through each type of plant cost to predict parameters.\n initial_stub_cost_parameters = ['Connect_system_cost-Medium _', 'Constr_cost-Medium _', 'Fixed_cost-Medium _',\n 'Infra_cost-Medium _', 'Insurance_cost-Medium _', 'Pre_dev_cost-Medium _',\n 'Var_cost-Medium _']\n\n condition = True\n while condition:\n full_cost_parameters = self._create_parameter_names(initial_stub_cost_parameters)\n\n parameters_of_plant = {\n self._change_columns(cost_variable_required): ExtrapolateInterpolate(self.cost_data['Plant_Size'],\n self.cost_data[\n cost_variable_required])(self.capacity)\n for cost_variable_required in full_cost_parameters}\n self.check_plant_exists(parameters_of_plant)\n\n if all(math.isnan(value) for value in parameters_of_plant.values()):\n self.start_year += 1\n else:\n condition = False\n\n durations = ['Pre_Dur', 'Operating_Period', 'Constr_Dur', 'Efficiency', 'Average_Load_Factor']\n durations_parameters = {self._change_columns(dur): self._estimate_non_interpolatable_parameters(dur) for dur in durations}\n # logger.info(self._estimate_non_interpolatable_parameters.cache_info())\n yearly_cost_spread = ['Constr', 'Pre']\n\n yearly_cost_perc = {self._change_columns(spread): self._payment_spread_estimator(spread) for spread in\n yearly_cost_spread}\n\n parameters = {**parameters_of_plant, **durations_parameters, **yearly_cost_perc}\n parameters = self.check_pre_dev_spend(parameters)\n parameters['variable_o_and_m_per_mwh'] *= uniform(elecsim.scenario.scenario_data.o_and_m_multiplier[0], elecsim.scenario.scenario_data.o_and_m_multiplier[1])\n self._use_historical_efficiency_data(parameters)\n\n return parameters", "title": "" }, { "docid": "711837d1a8a94d4e6fdc5e80c5894703", "score": "0.4147142", "text": "def _determine_contextparams(self, optimizer):\n # Choose the first samples uniform randomly\n if len(optimizer.X_) < optimizer.initial_random_samples:\n cx = np.random.uniform(self.cx_boundaries[:, 0],\n self.cx_boundaries[:, 1])\n return cx[:self.context_dims], cx[self.context_dims:]\n\n # Prepare entropy search objective\n self._init_es_ensemble()\n # Generate data for function mapping\n # query_context x query_parameters x eval_context -> entropy reduction\n n_query_points = 500\n n_data_dims = 2 * self.context_dims + self.dimension\n X = np.empty((n_query_points, n_data_dims))\n y = np.empty(n_query_points)\n for i in range(n_query_points):\n # Select query point and evaluation context randomly\n query = np.random.uniform(self.cx_boundaries[:, 0],\n self.cx_boundaries[:, 1])\n ind = np.random.choice(self.n_context_samples)\n # Store query point in X and value of entropy-search in y\n X[i, :self.context_dims + self.dimension] = query\n X[i, self.context_dims + self.dimension:] = \\\n self.context_samples[ind] - query[:self.context_dims]\n y[i] = self.entropy_search_ensemble[ind](query)[0]\n\n # Fit GP model to this data\n kernel = C(1.0, (1e-10, 100.0)) \\\n * RBF(length_scale=(1.0,)*n_data_dims,\n length_scale_bounds=[(0.01, 10.0),]*n_data_dims) \\\n + WhiteKernel(1.0, (1e-10, 100.0))\n self.es_surrogate = GaussianProcessRegressor(kernel=kernel)\n self.es_surrogate.fit(X, y)\n\n # Select query based on mean entropy reduction in surrogate model\n # predictions\n contexts = np.random.uniform(self.context_boundaries[:, 0],\n self.context_boundaries[:, 1],\n (250, self.context_dims))\n def objective_function(cx):\n X_query = np.empty((250, n_data_dims))\n X_query[:, :self.context_dims + self.dimension] = cx\n X_query[:, self.context_dims + self.dimension:] = \\\n contexts - cx[:self.context_dims]\n es_pred, es_cov = \\\n self.es_surrogate.predict(X_query, return_cov=True)\n return es_pred.mean() + self.kappa * np.sqrt(es_cov.mean())\n\n cx = global_optimization(\n objective_function, boundaries=self.cx_boundaries,\n optimizer=self.optimizer, maxf=optimizer.maxf)\n return cx[:self.context_dims], cx[self.context_dims:]", "title": "" }, { "docid": "5884dfcdac3610bd089bae38f7306433", "score": "0.41468692", "text": "def execute(self) -> bool:\n\n # check if the dome is already open\n if self.telescope.dome_status() is False: # dome is closed\n self.telescope.open_dome()\n \n # image each target\n for target in self.targets:\n\n # enable tracking again as a precaution\n self.telescope.enable_tracking()\n\n # open dome again as a precaution\n self.telescope.open_dome()\n\n # check whether object is visible, and try slewing the\n # telescope to point at object\n if self.telescope.goto_target(target) is False:\n self.__log(\"Unable to point telescope at \"+target+\". Object\"\n \" is most likely not visible or there has been a\"\n \" telescope error. Skipping \"+target+\"...\", color=\"red\")\n continue # try imaging next target\n\n # variables to produce seo file format name\n year = time.strftime(\"%Y\", time.gmtime()) # 2016\n month = time.strftime(\"%B\", time.gmtime())[0:3].lower() # oct\n day = time.strftime(\"%d\", time.gmtime()) # 07\n base_name = \"-band_\"+str(self.exposure_time)+\"sec\"\n base_name += \"_bin\"+str(self.binning)+\"_\"+year+month+day+\"_\"\n base_name += self.user+\"_num\"\n\n # how many darks we have taken\n dark_count = 0\n \n # take exposures for each filter\n for f in filters:\n\n #enable tracking as a precaution\n self.telescope.enable_tracking()\n \n self.telescope.change_filter(f)\n # take exposures! \n for n in range(self.exposure_count):\n filename = str(target)+\"_\"+str(f)+base_name+str(n)+\"_seo\"\n self.__log(\"Taking exposure {} for {}\".format(n, target))\n self.telescope.take_exposure(filename)\n\n filename = str(target)+\"_dark\"+base_name+str(dark_count)+\"_seo\"\n dark_count += 1\n self.telescope.take_dark(filename)\n\n # reset filter to clear\n self.telescope.change_filter('clear')\n\n # take any leftover darks\n for n in range(dark_count, self.exposure_count):\n filename = str(target)+\"_dark\"+base_name+str(n)+\"_seo\"\n self.telescope.take_dark(filename)\n\n # take 5*exposure_count biases\n for n in range(5*self.exposure_count):\n filename = str(target)+\"_bias\"+base_name+str(n)+\"_seo\"\n self.telescope.take_bias(filename)\n\n self.close()\n\n return True", "title": "" }, { "docid": "1f5fd51391e5bb17c32856ff924c52c0", "score": "0.41323462", "text": "def captureSetup(self, x_start, x_end, x_bin, y_start, y_end, y_bin, exposure_time): \r\n self.frame_x = int((x_end - x_start + 1)/x_bin)\r\n self.frame_y = int((y_end - y_start + 1)/y_bin)\r\n \r\n # Setup acquisition & determine how large a frame is (in pixels).\r\n frame_size = pvc.uns32(0)\r\n region = pvc.rgn_type(x_start, x_end, x_bin, y_start, y_end, y_bin)\r\n check(pvcam.pl_exp_setup_cont(self.hcam,\r\n pvc.uns16(1),\r\n ctypes.byref(region),\r\n pvc.int16(pvc.TIMED_MODE),\r\n pvc.uns32(exposure_time),\r\n ctypes.byref(frame_size),\r\n pvc.int16(pvc.CIRC_OVERWRITE)),\r\n \"pl_exp_setup_cont\")\r\n\r\n # Store frame size in bytes.\r\n #\r\n self.frame_bytes = frame_size.value\r\n\r\n # Allocate storage for the frames. Use PVCAM's recommendation for the size.\r\n #\r\n size = self.getParameterDefault(\"param_frame_buffer_size\")\r\n self.data_buffer = numpy.ascontiguousarray(numpy.zeros(size, dtype = numpy.uint8))\r\n self.buffer_len = int(size/self.frame_bytes)", "title": "" }, { "docid": "4afa1c482096b9a3a76223e1046a480f", "score": "0.41262776", "text": "def update_exposure_time(self, t, units='ms'):\n IS_EXPOSURE_CMD_SET_EXPOSURE = 12\n nCommand = IS_EXPOSURE_CMD_SET_EXPOSURE\n Param = ctypes.c_double(t)\n SizeOfParam = 8\n self.dll.is_Exposure(self.ModuleHandle, nCommand, ctypes.pointer(Param), SizeOfParam)", "title": "" }, { "docid": "937c4a446a47a77a5658967f9b721ab1", "score": "0.41237077", "text": "def trx_mode_params(equipment, trx_type_variety='', trx_mode='', error_message=False):\n trx_params = {}\n default_si_data = equipment['SI']['default']\n try:\n trxs = equipment['Transceiver']\n mode_params = next(mode for trx in trxs \\\n if trx == trx_type_variety \\\n for mode in trxs[trx].mode \\\n if mode['format'] == trx_mode)\n trx_params = {**mode_params}\n trx_params['frequency'] = equipment['Transceiver'][trx_type_variety].frequency\n # TODO: novel automatic feature maybe unwanted if spacing is specified\n trx_params['spacing'] = automatic_spacing(trx_params['baud_rate'])\n except StopIteration :\n if error_message:\n print(f'could not find tsp : {trx_type_variety} with mode: {trx_mode} in eqpt library')\n print('Computation stopped.')\n exit()\n else:\n # default transponder charcteristics\n trx_params['frequency'] = {'min': default_si_data.f_min, 'max': default_si_data.f_max}\n trx_params['baud_rate'] = default_si_data.baud_rate\n trx_params['spacing'] = default_si_data.spacing\n trx_params['OSNR'] = default_si_data.OSNR\n trx_params['bit_rate'] = default_si_data.bit_rate\n trx_params['roll_off'] = default_si_data.roll_off\n trx_params['power'] = db2lin(default_si_data.power_dbm)*1e-3\n trx_params['nb_channel'] = automatic_nch(trx_params['frequency']['min'],\n trx_params['frequency']['max'],\n trx_params['spacing'])\n print('N channels = ', trx_params['nb_channel'])\n return trx_params", "title": "" }, { "docid": "c8337d44195ca90af03d0de312b0043a", "score": "0.41210523", "text": "def estimation(self):\n # TODO: estimation from the last known lane\n if not self._estimation_:\n if self.station:\n consumed = self.trt_station(self.station.id, hours=True)\n target = self.station.target(self)\n ect = timeutils.due_date(target - consumed, today())\n self._estimation_[self.station.id] = {'station': self.station, 'target': target, 'ect': ect}\n for position in range(self.station.id+1, max(self.board.stations)+1):\n station = self.board.stations[position]\n target = station.target(self)\n ect = timeutils.due_date(target, ect)\n self._estimation_[position] = {'station': station, 'target': target, 'ect': ect}\n return self._estimation_", "title": "" }, { "docid": "4a33f8c5316b765da65bccc991962176", "score": "0.4116239", "text": "def ss_to_exposure_time(pSpecDevice, master_clock, shutter_speed):\r\n\r\n exposure_time_value = ctypes.c_double()\r\n\r\n ret = pSpecDevice.duShutterSpeedToExposureTime(master_clock,shutter_speed, ctypes.byref(exposure_time_value))\r\n\r\n if ret <=0:\r\n print \"[PythonPrismError] Converting shutter speed value to exposure time failed\"\r\n return (-1,-1,-1)\r\n else:\r\n print \"[PythonPrism-FromDevice] ShutterSpeed: \", shutter_speed , \" with master clock: \", master_clock, \" equals to Exposure Time: \",exposure_time_value.value\r\n return (exposure_time_value.value)", "title": "" }, { "docid": "075c6d6ef1527561fb02fc6ea7b8f8de", "score": "0.4113632", "text": "def ComputeExposure():\n exposure = SpotCamCStructure.EXPOSURE_STRUCT()\n _ComputeExposure(byref(exposure))\n return exposure", "title": "" }, { "docid": "50fbceec0849bc3086722a1ec3b9db42", "score": "0.41111958", "text": "def get_expansions_specs(features):\n # Find indexes of required features in the original feature space.\n idxs_credit = find_substring_occurences(features, \"Credit amount\")\n idxs_duration = find_substring_occurences(features, \"Duration\")\n idxs_purpose = find_substring_occurences(features, \"Purpose\")\n\n # Concatenate indexes of transformable features.\n transformable_feature_idxs = sorted(idxs_credit + idxs_duration + idxs_purpose)\n reduced_features = features[transformable_feature_idxs]\n\n # Find indexes of required features in the reduced feature space.\n idxs_credit = find_substring_occurences(reduced_features, \"Credit amount\")\n idxs_duration = find_substring_occurences(reduced_features, \"Duration\")\n idxs_purpose = find_substring_occurences(reduced_features, \"Purpose\")\n\n # Set required expansions for features in the reduced feature space.\n expansions = [\n FeatureExpansionSpec(idxs_credit, expand_quantized),\n FeatureExpansionSpec(idxs_duration, expand_quantized),\n FeatureExpansionSpec(idxs_purpose, expand_categorical),\n ]\n\n return expansions, transformable_feature_idxs", "title": "" }, { "docid": "40c7799139564ad15058b4cd8cb66b45", "score": "0.41048968", "text": "def attr_choose(data, attributes, target):\r\n best = attributes[0]\r\n max_gain = 0\r\n\r\n for attr in attributes:\r\n new_gain = info_gain(attributes, data, attr, target)\r\n if new_gain > max_gain:\r\n max_gain = new_gain\r\n best = attr\r\n\r\n return best", "title": "" }, { "docid": "eafd9d767f642f8bf890f56b6f434877", "score": "0.41029027", "text": "def exploreTemporalStability(Range,spatialRes,MaxOrbits,stepSize=10,loc = OrbSys.L4, Evaluator = MaxAngularLibration):\n \n Alpha = np.arctan2(loc[1],loc[0]) #Initial Angle from Origin\n r = sqrt(loc[0]**2+loc[1]**2) #Radius from origin\n \n radii = np.linspace(Range[0], Range[1],spatialRes)\n \n Orbits = np.arange(MaxOrbits,step = stepSize)\n Wanders = np.full((len(radii),len(Orbits)),None)\n \n i=0\n \n for i in range(len(radii)):\n x = radii[i]\n O = orbit(OrbSys,[x*cos(Alpha),x*sin(Alpha),0,0])\n \n for j in range(len(Orbits)): #Evolve orbits in given steps\n try:\n O.evolve(stepSize,20,\"odeint\")\n except AssertionError:\n print(\"Integrator Failed\")\n break\n W = Evaluator(O,O.initialPosition) #Not efficient as re-evaluates entire array each time but also not limiting step\n Wanders[i][j] = W\n j+=1\n \n i+=1\n return (radii,Wanders,Orbits)", "title": "" }, { "docid": "4f85f470a76f62ca57c49d740cd5a82e", "score": "0.40967703", "text": "def observation_characterization(self, sInd, modes):\n\n OS = self.OpticalSystem\n ZL = self.ZodiacalLight\n TL = self.TargetList\n SU = self.SimulatedUniverse\n Obs = self.Observatory\n TK = self.TimeKeeping\n\n nmodes = len(modes)\n\n # selecting appropriate koMap\n koMap = self.koMaps[modes[0][\"syst\"][\"name\"]]\n\n # find indices of planets around the target\n pInds = np.where(SU.plan2star == sInd)[0]\n\n # get the detected status, and check if there was a FA\n det = self.lastDetected[sInd, 0]\n\n pIndsDet = []\n tochars = []\n intTimes_all = []\n FA = len(det) == len(pInds) + 1\n\n # initialize outputs, and check if there's anything (planet or FA)\n # to characterize\n characterizeds = np.zeros((det.size, len(modes)), dtype=int)\n fZ = 0.0 / u.arcsec**2 * np.ones(nmodes)\n systemParams = SU.dump_system_params(\n sInd\n ) # write current system params by default\n SNR = np.zeros((len(det), len(modes)))\n intTime = None\n if det.size == 0: # nothing to characterize\n return characterizeds, fZ, systemParams, SNR, intTime\n\n # look for last detected planets that have not been fully characterized\n for m_i, mode in enumerate(modes):\n\n if FA is True:\n pIndsDet.append(np.append(pInds, -1)[det])\n else:\n pIndsDet.append(pInds[det])\n\n # look for last detected planets that have not been fully characterized\n if not (FA): # only true planets, no FA\n tochar = self.fullSpectra[m_i][pIndsDet[m_i]] == 0\n else: # mix of planets and a FA\n truePlans = pIndsDet[m_i][:-1]\n tochar = np.append((self.fullSpectra[m_i][truePlans] == 0), True)\n\n # 1/ find spacecraft orbital START position including overhead time,\n # and check keepout angle\n if np.any(tochar):\n # start times\n startTime = (\n TK.currentTimeAbs.copy() + mode[\"syst\"][\"ohTime\"] + Obs.settlingTime\n )\n startTimeNorm = (\n TK.currentTimeNorm.copy()\n + mode[\"syst\"][\"ohTime\"]\n + Obs.settlingTime\n )\n # planets to characterize\n koTimeInd = np.where(\n np.round(startTime.value) - self.koTimes.value == 0\n )[0][\n 0\n ] # find indice where koTime is startTime[0]\n # wherever koMap is 1, the target is observable\n tochar[tochar] = koMap[sInd][koTimeInd]\n\n # 2/ if any planet to characterize, find the characterization times\n # at the detected fEZ, dMag, and WA\n if np.any(tochar):\n fZ[m_i] = ZL.fZ(Obs, TL, sInd, startTime, mode)\n fEZ = self.lastDetected[sInd, 1][det][tochar] / u.arcsec**2\n dMag = self.lastDetected[sInd, 2][det][tochar]\n WA = self.lastDetected[sInd, 3][det][tochar] * u.arcsec\n intTimes = np.zeros(len(tochar)) * u.day\n intTimes[tochar] = OS.calc_intTime(\n TL, sInd, fZ[m_i], fEZ, dMag, WA, mode\n )\n intTimes[~np.isfinite(intTimes)] = 0 * u.d\n # add a predetermined margin to the integration times\n intTimes = intTimes * (1 + self.charMargin)\n # apply time multiplier\n totTimes = intTimes * (mode[\"timeMultiplier\"])\n # end times\n endTimes = startTime + totTimes\n endTimesNorm = startTimeNorm + totTimes\n # planets to characterize\n tochar = (\n (totTimes > 0)\n & (totTimes <= OS.intCutoff)\n & (endTimesNorm <= TK.OBendTimes[TK.OBnumber])\n )\n\n # 3/ is target still observable at the end of any char time?\n if np.any(tochar) and Obs.checkKeepoutEnd:\n koTimeInds = np.zeros(len(endTimes.value[tochar]), dtype=int)\n # find index in koMap where each endTime is closest to koTimes\n for t, endTime in enumerate(endTimes.value[tochar]):\n if endTime > self.koTimes.value[-1]:\n # case where endTime exceeds largest koTimes element\n endTimeInBounds = np.where(\n np.floor(endTime) - self.koTimes.value == 0\n )[0]\n koTimeInds[t] = (\n endTimeInBounds[0] if endTimeInBounds.size != 0 else -1\n )\n else:\n koTimeInds[t] = np.where(\n np.round(endTime) - self.koTimes.value == 0\n )[0][\n 0\n ] # find indice where koTime is endTimes[0]\n tochar[tochar] = [\n koMap[sInd][koT] if koT >= 0 else 0 for koT in koTimeInds\n ]\n\n tochars.append(tochar)\n intTimes_all.append(intTimes)\n else:\n tochar[tochar] = False\n tochars.append(tochar)\n intTimes_all.append(np.zeros(len(tochar)) * u.day)\n\n # 4/ if yes, allocate the overhead time, and perform the characterization\n # for the maximum char time\n if np.any(tochars):\n pIndsChar = []\n for m_i, mode in enumerate(modes):\n if len(pIndsDet[m_i]) > 0 and np.any(tochars[m_i]):\n if (\n intTime is None\n or np.max(intTimes_all[m_i][tochars[m_i]]) > intTime\n ):\n intTime = np.max(intTimes_all[m_i][tochars[m_i]])\n pIndsChar.append(pIndsDet[m_i][tochars[m_i]])\n log_char = \" - Charact. planet inds %s (%s/%s detected)\" % (\n pIndsChar[m_i],\n len(pIndsChar[m_i]),\n len(pIndsDet[m_i]),\n )\n self.logger.info(log_char)\n self.vprint(log_char)\n else:\n pIndsChar.append([])\n\n if intTime is not None:\n extraTime = intTime * (\n modes[0][\"timeMultiplier\"] - 1.0\n ) # calculates extraTime\n success = TK.allocate_time(\n intTime + extraTime + modes[0][\"syst\"][\"ohTime\"] + Obs.settlingTime,\n True,\n ) # allocates time\n if not (success): # Time was not successfully allocated\n return (characterizeds, fZ, systemParams, SNR, None)\n\n # SNR CALCULATION:\n # first, calculate SNR for observable planets (without false alarm)\n if len(pIndsChar[0]) > 0:\n planinds = pIndsChar[0][:-1] if pIndsChar[0][-1] == -1 else pIndsChar[0]\n else:\n planinds = []\n if len(pIndsChar[1]) > 0:\n planinds2 = (\n pIndsChar[1][:-1] if pIndsChar[1][-1] == -1 else pIndsChar[1]\n )\n else:\n planinds2 = []\n SNRplans = np.zeros((len(planinds)))\n SNRplans2 = np.zeros((len(planinds2)))\n if len(planinds) > 0 and len(planinds2) > 0:\n # initialize arrays for SNR integration\n fZs = np.zeros((self.ntFlux, nmodes)) / u.arcsec**2\n systemParamss = np.empty(self.ntFlux, dtype=\"object\")\n Ss = np.zeros((self.ntFlux, len(planinds)))\n Ns = np.zeros((self.ntFlux, len(planinds)))\n Ss2 = np.zeros((self.ntFlux, len(planinds2)))\n Ns2 = np.zeros((self.ntFlux, len(planinds2)))\n # integrate the signal (planet flux) and noise\n dt = intTime / self.ntFlux\n timePlus = (\n Obs.settlingTime.copy() + modes[0][\"syst\"][\"ohTime\"].copy()\n ) # accounts for the time since the current time\n for i in range(self.ntFlux):\n # allocate first half of dt\n timePlus += dt\n fZs[i, 0] = ZL.fZ(\n Obs, TL, sInd, TK.currentTimeAbs.copy() + timePlus, modes[0]\n )[0]\n fZs[i, 1] = ZL.fZ(\n Obs, TL, sInd, TK.currentTimeAbs.copy() + timePlus, modes[1]\n )[0]\n SU.propag_system(\n sInd,\n TK.currentTimeNorm.copy() + timePlus - self.propagTimes[sInd],\n )\n self.propagTimes[sInd] = TK.currentTimeNorm.copy() + timePlus\n systemParamss[i] = SU.dump_system_params(sInd)\n Ss[i, :], Ns[i, :] = self.calc_signal_noise(\n sInd, planinds, dt, modes[0], fZ=fZs[i, 0]\n )\n Ss2[i, :], Ns2[i, :] = self.calc_signal_noise(\n sInd, planinds2, dt, modes[1], fZ=fZs[i, 1]\n )\n\n # allocate second half of dt\n timePlus += dt\n\n # average output parameters\n systemParams = {\n key: sum([systemParamss[x][key] for x in range(self.ntFlux)])\n / float(self.ntFlux)\n for key in sorted(systemParamss[0])\n }\n for m_i, mode in enumerate(modes):\n fZ[m_i] = np.mean(fZs[:, m_i])\n # calculate planets SNR\n S = Ss.sum(0)\n N = Ns.sum(0)\n S2 = Ss2.sum(0)\n N2 = Ns2.sum(0)\n SNRplans[N > 0] = S[N > 0] / N[N > 0]\n SNRplans2[N2 > 0] = S2[N2 > 0] / N2[N2 > 0]\n # allocate extra time for timeMultiplier\n extraTime = intTime * (mode[\"timeMultiplier\"] - 1)\n TK.allocate_time(extraTime)\n\n # if only a FA, just save zodiacal brightness in the middle of the\n # integration\n else:\n totTime = intTime * (mode[\"timeMultiplier\"])\n TK.allocate_time(totTime / 2.0)\n for m_i, mode in enumerate(modes):\n fZ[m_i] = ZL.fZ(Obs, TL, sInd, TK.currentTimeAbs.copy(), mode)[0]\n TK.allocate_time(totTime / 2.0)\n\n # calculate the false alarm SNR (if any)\n for m_i, mode in enumerate(modes):\n if len(pIndsChar[m_i]) > 0:\n SNRfa = []\n if pIndsChar[m_i][-1] == -1:\n fEZ = self.lastDetected[sInd, 1][-1] / u.arcsec**2\n dMag = self.lastDetected[sInd, 2][-1]\n WA = self.lastDetected[sInd, 3][-1] * u.arcsec\n C_p, C_b, C_sp = OS.Cp_Cb_Csp(\n TL, sInd, fZ[m_i], fEZ, dMag, WA, mode\n )\n S = (C_p * intTime).decompose().value\n N = np.sqrt(\n (C_b * intTime + (C_sp * intTime) ** 2).decompose().value\n )\n SNRfa.append([S / N if N > 0 else 0.0])\n\n # save all SNRs (planets and FA) to one array\n SNRinds = np.where(det)[0][tochars[m_i]]\n if m_i == 0:\n SNR[SNRinds, 0] = np.append(SNRplans[:], SNRfa)\n else:\n SNR[SNRinds, 1] = np.append(SNRplans2[:], SNRfa)\n\n # now, store characterization status: 1 for full spectrum,\n # -1 for partial spectrum, 0 for not characterized\n char = SNR[:, m_i] >= mode[\"SNR\"]\n # initialize with full spectra\n characterized = char.astype(int)\n WAchar = self.lastDetected[sInd, 3][char] * u.arcsec\n # find the current WAs of characterized planets\n WAs = systemParams[\"WA\"]\n if FA:\n WAs = np.append(WAs, self.lastDetected[sInd, 3][-1] * u.arcsec)\n # check for partial spectra\n IWA_max = mode[\"IWA\"] * (1 + mode[\"BW\"] / 2.0)\n OWA_min = mode[\"OWA\"] * (1 - mode[\"BW\"] / 2.0)\n char[char] = (WAchar < IWA_max) | (WAchar > OWA_min)\n characterized[char] = -1\n # encode results in spectra lists (only for planets, not FA)\n charplans = characterized[:-1] if FA else characterized\n self.fullSpectra[m_i][pInds[charplans == 1]] += 1\n self.partialSpectra[m_i][pInds[charplans == -1]] += 1\n characterizeds[:, m_i] = characterized.astype(int)\n\n return characterizeds, fZ, systemParams, SNR, intTime", "title": "" }, { "docid": "de623890f3d22e433a9aaa5f93a8c1a2", "score": "0.40945598", "text": "def exposure_full(self):\n if self.local_wht_filename is not None:\n file = pyfits.open(self.local_wht_filename)\n exp_full = file[0].data\n print(\"separate exposure map loaded\")\n else:\n file = pyfits.open(self.local_filename)\n exp_full = file[self._extension_wht].data\n #else:\n # exp_full = file['WHT'].data\n exp_full[np.isnan(exp_full)] = 0\n file.close()\n return exp_full", "title": "" }, { "docid": "e616a8393c06e54a237807d6598c105c", "score": "0.40913182", "text": "def get_T(E='auto'):\n # attenuator setup\n att_conf=att_setup()\n Si_th=np.array(att_conf[2]) \n Cu_th=np.array(att_conf[1])\n abs_mat=att_conf[0]\n if E is 'auto':\n #E=8000 # temporary: don't have channel access -> set E to 8000eV \n E=caget('XF:11IDA-OP{Mono:DCM-Ax:Energy}Mtr.RBV') ### get energy automatically with channel access\n print ('getting energy from global PV: E=',E,'eV (currently not implemented in test version (no channel access) -> 8000eV default)') # future: add PV name for house keeping\n if E> 30000 or E< 2000:\n raise attfuncs_Exception(\"error: Input argument E has to be 2000<E<30000 [eV]\")\n else:\n if E<= 30000 and E>=2000:\n E=np.array(E)\n print ('manual input: E= ',E,'eV')\n else:\n raise attfuncs_Exception(\"error: could not convert energy argument. Input argument E has to be 2000<E<30000 [eV] or 'auto'\")\n abs_th=np.append(Cu_th,Si_th)\n read_conf=np.zeros(len(abs_th))\n for m in range(0,len(abs_th)): ### uncomment when chennel access and PVs available\n read_conf[m] = caget ('XF:11IDB-BI{Attn:%02d}Pos-Sts'%(m+1)) \n \n #read_conf[m]=caget(\"'XF:11IDB-ATT{slot:\",str(int(m+1)),\"}RBV-VALUE'\")\n# read_conf=np.array([0,1,0,1,0,1,0,0,1]) ### just for testing!! comment/delete!\n \n \n \n sT=np.zeros(len(abs_th))\n for m in range(0, len(abs_th)):\n sT[m]=xf.get_T(abs_mat[m],E/1000.,abs_th[m])\n x=sT*read_conf;x[x==0]=1\n T_tot=np.product(x)\n Si_ind=read_conf[len(Cu_th):len(read_conf)]\n Cu_ind=read_conf[0:len(Cu_th)]\n print ('Found:')\n print ('Si wafer configuration: ',Si_th*Si_ind,' Cu foil configuration: ',Cu_th*Cu_ind)\n print ('current Transmission at ',E, 'eV: T= ','{:.2e}'.format(T_tot))\n return T_tot", "title": "" }, { "docid": "f255465da5c5d09d193feca9ae0a05b9", "score": "0.40860504", "text": "def _filter_by_expansions(target, query=None):\n\tif query is None:\n\t\treturn target\n\tif \"minExpansions\" in query:\n\t\ttarget = target[target.numExpansions>=query[\"minExpansions\"]]\n\tif \"maxExpansions\" in query:\n\t\ttarget = target[target.numExpansions<=query[\"maxExpansions\"]]\n\treturn target", "title": "" }, { "docid": "116aa025debbb8a6d82d41e8b3ec1d1f", "score": "0.4083428", "text": "def run_sim(self):\n\n OS = self.OpticalSystem\n TL = self.TargetList\n SU = self.SimulatedUniverse\n Obs = self.Observatory\n TK = self.TimeKeeping\n ZL = self.ZodiacalLight\n Comp = self.Completeness\n\n # TODO: start using this self.currentSep\n # set occulter separation if haveOcculter\n if OS.haveOcculter:\n self.currentSep = Obs.occulterSep\n\n # choose observing modes selected for detection (default marked with a flag)\n allModes = OS.observingModes\n det_mode = list(filter(lambda mode: mode[\"detectionMode\"], allModes))[0]\n # and for characterization (default is first spectro/IFS mode)\n spectroModes = list(\n filter(lambda mode: \"spec\" in mode[\"inst\"][\"name\"], allModes)\n )\n if np.any(spectroModes):\n char_mode = spectroModes[0]\n # if no spectro mode, default char mode is first observing mode\n else:\n char_mode = allModes[0]\n\n # begin Survey, and loop until mission is finished\n log_begin = \"OB%s: survey beginning.\" % (TK.OBnumber)\n self.logger.info(log_begin)\n self.vprint(log_begin)\n t0 = time.time()\n sInd = None\n ObsNum = 0\n while not TK.mission_is_over(OS, Obs, det_mode):\n\n # acquire the NEXT TARGET star index and create DRM\n old_sInd = sInd # used to save sInd if returned sInd is None\n DRM, sInd, det_intTime, waitTime = self.next_target(\n sInd, det_mode, char_mode\n )\n # pdb.set_trace() ###Rhonda debug\n if sInd is not None:\n ObsNum += (\n 1 # we're making an observation so increment observation number\n )\n\n if OS.haveOcculter:\n # advance to start of observation\n # (add slew time for selected target)\n _ = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)\n\n # beginning of observation, start to populate DRM\n DRM[\"star_ind\"] = sInd\n DRM[\"star_name\"] = TL.Name[sInd]\n DRM[\"arrival_time\"] = TK.currentTimeNorm.to(\"day\").copy()\n DRM[\"OB_nb\"] = TK.OBnumber\n DRM[\"ObsNum\"] = ObsNum\n pInds = np.where(SU.plan2star == sInd)[0].astype(int)\n DRM[\"plan_inds\"] = pInds\n log_obs = (\n \" Observation #%s, star ind %s (of %s) with %s planet(s), \"\n + \"mission time at Obs start: %s, exoplanetObsTime: %s\"\n ) % (\n ObsNum,\n sInd,\n TL.nStars,\n len(pInds),\n TK.currentTimeNorm.to(\"day\").copy().round(2),\n TK.exoplanetObsTime.to(\"day\").copy().round(2),\n )\n self.logger.info(log_obs)\n self.vprint(log_obs)\n\n detected = np.array([])\n detection = False\n FA = False\n\n if not self.char_only:\n # if sInd not promoted of (char'able and char'd)\n if sInd not in self.promotable_stars or (\n sInd in self.promotable_stars and sInd in self.promoted_stars\n ):\n # PERFORM DETECTION and populate revisit list attribute\n (\n detected,\n det_fZ,\n det_systemParams,\n det_SNR,\n FA,\n ) = self.observation_detection(\n sInd, det_intTime.copy(), det_mode\n )\n\n if 1 in detected:\n detection = True\n self.sInd_detcounts[sInd] += 1\n self.sInd_dettimes[sInd] = (\n self.sInd_dettimes.get(sInd) or []\n ) + [TK.currentTimeNorm.copy().to(\"day\")]\n self.vprint(\" Det. results are: %s\" % (detected))\n\n # update the occulter wet mass\n if OS.haveOcculter:\n DRM = self.update_occulter_mass(\n DRM, sInd, det_intTime.copy(), \"det\"\n )\n # populate the DRM with detection results\n DRM[\"det_time\"] = det_intTime.to(\"day\")\n DRM[\"det_status\"] = detected\n DRM[\"det_SNR\"] = det_SNR\n DRM[\"det_fZ\"] = det_fZ.to(\"1/arcsec2\")\n if det_intTime is not None:\n det_comp = Comp.comp_per_intTime(\n det_intTime,\n TL,\n sInd,\n det_fZ,\n self.ZodiacalLight.fEZ0,\n self.int_WA[sInd],\n det_mode,\n )[0]\n DRM[\"det_comp\"] = det_comp\n else:\n DRM[\"det_comp\"] = 0.0\n if np.any(pInds):\n DRM[\"det_fEZ\"] = (\n SU.fEZ[pInds].to(\"1/arcsec2\").value.tolist()\n )\n DRM[\"det_dMag\"] = SU.dMag[pInds].tolist()\n DRM[\"det_WA\"] = SU.WA[pInds].to(\"mas\").value.tolist()\n DRM[\"det_params\"] = det_systemParams\n # populate the DRM with observation modes\n DRM[\"det_mode\"] = dict(det_mode) # moved to det_observation section\n del DRM[\"det_mode\"][\"inst\"], DRM[\"det_mode\"][\"syst\"]\n\n if not self.det_only:\n if (detection and sInd not in self.ignore_stars) or (\n sInd in self.promotable_stars and sInd not in self.ignore_stars\n ):\n # PERFORM CHARACTERIZATION and populate spectra list attribute\n TL.int_comp[sInd] = 1.0\n do_char = True\n\n if sInd not in self.promotable_stars:\n (\n maxIntTimeOBendTime,\n maxIntTimeExoplanetObsTime,\n maxIntTimeMissionLife,\n ) = TK.get_ObsDetectionMaxIntTime(Obs, char_mode)\n char_maxIntTime = min(\n maxIntTimeOBendTime,\n maxIntTimeExoplanetObsTime,\n maxIntTimeMissionLife,\n OS.intCutoff,\n ) # Maximum intTime allowed\n startTime = TK.currentTimeAbs.copy()\n pred_char_intTime = self.calc_targ_intTime(\n np.array([sInd]), startTime, char_mode\n )\n\n # Adjust integration time for stars\n # with known earths around them\n fZ = ZL.fZ(Obs, TL, sInd, startTime, char_mode)\n fEZ = SU.fEZ[pInds].to(\"1/arcsec2\").value / u.arcsec**2\n\n if SU.lucky_planets:\n phi = (1 / np.pi) * np.ones(len(SU.d))\n dMag = deltaMag(SU.p, SU.Rp, SU.d, phi)[\n pInds\n ] # delta magnitude\n WA = np.arctan(SU.a / TL.dist[SU.plan2star]).to(\n \"arcsec\"\n )[\n pInds\n ] # working angle\n else:\n dMag = SU.dMag[pInds]\n WA = SU.WA[pInds]\n # dMag = SU.dMag[pInds]\n # WA = SU.WA[pInds]\n earthlike_inttimes = OS.calc_intTime(\n TL, sInd, fZ, fEZ, dMag, WA, char_mode\n ) * (1 + self.charMargin)\n earthlike_inttimes[~np.isfinite(earthlike_inttimes)] = (\n 0 * u.d\n )\n earthlike_inttime = earthlike_inttimes[\n (earthlike_inttimes < char_maxIntTime)\n ]\n if len(earthlike_inttime) > 0:\n pred_char_intTime = np.max(earthlike_inttime)\n else:\n pred_char_intTime = np.max(earthlike_inttimes)\n if not pred_char_intTime <= char_maxIntTime:\n do_char = False\n\n if do_char:\n if char_mode[\"SNR\"] not in [0, np.inf]:\n (\n characterized,\n char_fZ,\n char_systemParams,\n char_SNR,\n char_intTime,\n ) = self.observation_characterization(sInd, char_mode)\n if np.any(characterized):\n self.promoted_stars.append(sInd)\n self.vprint(\n \" Char. results are: %s\" % (characterized)\n )\n if np.any(\n np.logical_and(\n self.is_earthlike(pInds, sInd),\n (characterized == 1),\n )\n ):\n self.known_earths = np.union1d(\n self.known_earths,\n pInds[self.is_earthlike(pInds, sInd)],\n ).astype(int)\n if sInd not in self.det_prefer:\n self.det_prefer.append(sInd)\n if sInd not in self.ignore_stars:\n self.ignore_stars.append(sInd)\n if 1 in characterized:\n self.sInd_charcounts[sInd] += 1\n\n else:\n char_intTime = None\n lenChar = len(pInds) + 1 if FA else len(pInds)\n characterized = np.zeros(lenChar, dtype=float)\n char_SNR = np.zeros(lenChar, dtype=float)\n char_fZ = 0.0 / u.arcsec**2\n char_systemParams = SU.dump_system_params(sInd)\n assert char_intTime != 0, \"Integration time can't be 0.\"\n # update the occulter wet mass\n if OS.haveOcculter and char_intTime is not None:\n DRM = self.update_occulter_mass(\n DRM, sInd, char_intTime, \"char\"\n )\n # populate the DRM with characterization results\n DRM[\"char_time\"] = (\n char_intTime.to(\"day\")\n if char_intTime is not None\n else 0.0 * u.day\n )\n DRM[\"char_status\"] = (\n characterized[:-1] if FA else characterized\n )\n DRM[\"char_SNR\"] = char_SNR[:-1] if FA else char_SNR\n DRM[\"char_fZ\"] = char_fZ.to(\"1/arcsec2\")\n if char_intTime is not None:\n char_comp = Comp.comp_per_intTime(\n char_intTime,\n TL,\n sInd,\n char_fZ,\n self.ZodiacalLight.fEZ0,\n self.int_WA[sInd],\n char_mode,\n )[0]\n DRM[\"char_comp\"] = char_comp\n else:\n DRM[\"char_comp\"] = 0.0\n DRM[\"char_params\"] = char_systemParams\n # populate the DRM with FA results\n DRM[\"FA_det_status\"] = int(FA)\n DRM[\"FA_char_status\"] = characterized[-1] if FA else 0\n DRM[\"FA_char_SNR\"] = char_SNR[-1] if FA else 0.0\n DRM[\"FA_char_fEZ\"] = (\n self.lastDetected[sInd, 1][-1] / u.arcsec**2\n if FA\n else 0.0 / u.arcsec**2\n )\n DRM[\"FA_char_dMag\"] = (\n self.lastDetected[sInd, 2][-1] if FA else 0.0\n )\n DRM[\"FA_char_WA\"] = (\n self.lastDetected[sInd, 3][-1] * u.arcsec\n if FA\n else 0.0 * u.arcsec\n )\n\n DRM[\"char_mode\"] = dict(char_mode)\n del DRM[\"char_mode\"][\"inst\"], DRM[\"char_mode\"][\"syst\"]\n\n # populate the DRM with observation modes\n # DRM['det_mode'] = dict(det_mode) #moved to det_observation section\n # del DRM['det_mode']['inst'], DRM['det_mode']['syst']\n\n DRM[\"exoplanetObsTime\"] = TK.exoplanetObsTime.copy()\n\n # append result values to self.DRM\n self.DRM.append(DRM)\n\n # handle case of inf OBs and missionPortion < 1\n if np.isinf(TK.OBduration) and (TK.missionPortion < 1.0):\n self.arbitrary_time_advancement(\n TK.currentTimeNorm.to(\"day\").copy() - DRM[\"arrival_time\"]\n )\n\n else: # sInd == None\n sInd = old_sInd # Retain the last observed star\n if (\n TK.currentTimeNorm.copy() >= TK.OBendTimes[TK.OBnumber]\n ): # currentTime is at end of OB\n # Conditional Advance To Start of Next OB\n if not TK.mission_is_over(\n OS, Obs, det_mode\n ): # as long as the mission is not over\n TK.advancetToStartOfNextOB() # Advance To Start of Next OB\n elif waitTime is not None:\n # CASE 1: Advance specific wait time\n _ = TK.advanceToAbsTime(TK.currentTimeAbs.copy() + waitTime)\n self.vprint(\"waitTime is not None\")\n else:\n startTimes = (\n TK.currentTimeAbs.copy() + np.zeros(TL.nStars) * u.d\n ) # Start Times of Observations\n observableTimes = Obs.calculate_observableTimes(\n TL,\n np.arange(TL.nStars),\n startTimes,\n self.koMaps,\n self.koTimes,\n det_mode,\n )[0]\n # CASE 2 If There are no observable targets for the\n # rest of the mission\n if (\n observableTimes[\n (\n TK.missionFinishAbs.copy().value * u.d\n > observableTimes.value * u.d\n )\n * (\n observableTimes.value * u.d\n >= TK.currentTimeAbs.copy().value * u.d\n )\n ].shape[0]\n ) == 0:\n self.vprint(\n (\n \"No Observable Targets for Remainder of mission at \"\n \"currentTimeNorm = {}\"\n ).format(TK.currentTimeNorm)\n )\n # Manually advancing time to mission end\n TK.currentTimeNorm = TK.missionLife\n TK.currentTimeAbs = TK.missionFinishAbs\n else:\n # CASE 3 nominal wait time if at least 1 target is still\n # in list and observable\n # TODO: ADD ADVANCE TO WHEN FZMIN OCURS\n inds1 = np.arange(TL.nStars)[\n observableTimes.value * u.d\n > TK.currentTimeAbs.copy().value * u.d\n ]\n # apply intTime filter\n inds2 = np.intersect1d(self.intTimeFilterInds, inds1)\n # apply revisit Filter\n # NOTE this means stars you added to the revisit list\n inds3 = self.revisitFilter(\n inds2, TK.currentTimeNorm.copy() + self.dt_max.to(u.d)\n )\n self.vprint(\n \"Filtering %d stars from advanceToAbsTime\"\n % (TL.nStars - len(inds3))\n )\n oTnowToEnd = observableTimes[inds3]\n # there is at least one observableTime between now and\n # the end of the mission\n if not oTnowToEnd.value.shape[0] == 0:\n # advance to that observable time\n tAbs = np.min(oTnowToEnd)\n else:\n # advance to end of mission\n tAbs = TK.missionStart + TK.missionLife\n tmpcurrentTimeNorm = TK.currentTimeNorm.copy()\n # Advance Time to this time\n # OR start of next OB following this time\n _ = TK.advanceToAbsTime(tAbs)\n self.vprint(\n (\n \"No Observable Targets a currentTimeNorm= {:.2f} \"\n \"Advanced To currentTimeNorm = {:.2f}\"\n ).format(\n tmpcurrentTimeNorm.to(\"day\"),\n TK.currentTimeNorm.to(\"day\"),\n )\n )\n else: # TK.mission_is_over()\n dtsim = (time.time() - t0) * u.s\n log_end = (\n \"Mission complete: no more time available.\\n\"\n + \"Simulation duration: %s.\\n\" % dtsim.astype(\"int\")\n + \"Results stored in SurveySimulation.DRM (Design Reference Mission).\"\n )\n self.logger.info(log_end)\n self.vprint(log_end)", "title": "" }, { "docid": "bb6fd15a0093a67be7f177d14b7a2336", "score": "0.40811446", "text": "def get_exposure_time(self, darktime=False):\n\n # look up address of exposure subroutine\n # then get current instruction\n if darktime:\n darkadd = self.seq.program.subroutines[self.darksub]\n instruction = self.seq.program.instructions[darkadd]\n else:\n exposureadd = self.seq.program.subroutines[self.exposuresub]\n instruction = self.seq.program.instructions[exposureadd]\n iterexp = instruction.repeat\n self.exptime = float(iterexp) * self.exposure_unit # in seconds\n\n return self.exptime", "title": "" }, { "docid": "e6b7a2f39349c204e37ebe5e9c34b3dc", "score": "0.40715143", "text": "def getDESSky(num, base_dir='Training/DESSky'):\n # print('%s/%s_g_sky.fits' % (base_dir, num))\n g_des_sky = fits.open(glob.glob('%s/%s_g_sky.fits' % (base_dir, num))[0])\n r_des_sky = fits.open(glob.glob('%s/%s_r_sky.fits' % (base_dir, num))[0])\n i_des_sky = fits.open(glob.glob('%s/%s_i_sky.fits' % (base_dir, num))[0])\n return g_des_sky, r_des_sky, i_des_sky", "title": "" }, { "docid": "b9ee356283f961ac3cb5ca7f8a744337", "score": "0.40685672", "text": "def get_mtf(image, params, sources):\n # Some image testing to catch unexpected inputs\n if len(image.shape) != 2:\n print('Input image must be 2-dimensional')\n return\n if image.shape[0] != image.shape[1]:\n print('Input image must be square')\n return\n\n if (sources != None):\n if (len(sources.shape) != 2) or (sources.shape[0] != sources.shape[1]):\n print('Input sources must be of the same size as image')\n return\n\n # Pull out the necessary paramters\n D = params['D'] # telescope primary mirror diameter in meters\n wave = params['wave'] # observing wavelength in meters\n F = params['F'] # effective focal length at detector in meters\n Apix = params['Apix'] # pixel size of detector in meters\n platescale = Apix / F # plate scale of detector in radians/pixel\n\n # Calculate the sky in the image.\n skyInfo = mmm.mmm(image)\n skyMode = skyInfo['mode']\n skySigma = skyInfo['sigma']\n\n # Apodize the image with a Hanning kernal to enforce periodicity\n szx = image.shape[0]\n szy = image.shape[1]\n han = hanning(szx, szy)\n img_skysub = image - skyMode\n\n fftim = fftpack.fft2(img_skysub * han) / (szx * szy)\n absim = np.real( fftim * fftim.conjugate() )\n absim[0,0] = np.nan # don't count the DC component\n wrapim = fftpack.fftshift( absim ) # this is the 2D power spectrum\n ind = np.where( np.isfinite(wrapim) == False )\n xcen = ind[0][0]\n ycen = ind[1][0]\n\n tmp = radialProfile.azimuthalAverage(wrapim, center=[xcen,ycen], \n ignoreNAN=True)\n pix = tmp[0]\n value = tmp[1]\n rms = tmp[2]\n npts = tmp[3]\n\n cut_d = 2.0 * platescale # detector minimum angle in radians\n cut_t = wave / D # telescope minimum angle in radians\n rat = cut_d / cut_t\n freq = pix / (0.5 * szx * rat)\n error = rms / np.sqrt(npts)\n\n # Ignore frequencies higher than the critical frequency\n keepind = np.where(freq <= 1)\n freq = freq[keepind]\n power = value[keepind]\n error = error[keepind]\n\n pspec_sources_2d = fftpack.fft2(sources * han) / (szx * szy)\n pspec_sources_2d = np.real(pspec_sources_2d * pspec_sources_2d.conjugate())\n pspec_sources_2d[0,0] = np.nan\n pspec_sources_2d = fftpack.fftshift( pspec_sources_2d )\n \n tmp = radialProfile.azimuthalAverage(pspec_sources_2d, center=[xcen, ycen],\n ignoreNAN=True)\n\n pspec_freq = tmp[0]\n pspec_sources = tmp[1]\n\n pspec_sources /= np.median(pspec_sources)\n pspec_sources = pspec_sources[keepind]\n\n return (freq, power, error, pspec_sources)", "title": "" }, { "docid": "b0dfebd353823223ea5ac09b24e5dfb8", "score": "0.40676352", "text": "def get_target_examples(self):\n return self.model.decode(self._target_encoding), self._target_source", "title": "" }, { "docid": "79c255ba67f5d3a8acb41574afe5ce4f", "score": "0.40636122", "text": "def __get_travel_model_sub_dictionaries(self):\n # get travel model parameter from the opus dictionary\n travel_model_configuration = self.config_dictionary['travel_model_configuration'] # contains matsim4urbansim and matsim_config parameter\n \n # matsim4urbansim\n matsim4urbansim_config = travel_model_configuration['matsim4urbansim'] # contains parameter for matsim/urbansim integration\n # matsim_config\n matsim_config = travel_model_configuration['matsim_config'] # contains various matsim_config parameter \n matsim_common = matsim_config['common']\n \n return travel_model_configuration, matsim4urbansim_config, matsim_common", "title": "" }, { "docid": "a523bcf32cb096853b28801eea418c1e", "score": "0.40632382", "text": "def _get_target(self, model, x):\n dataset = self.model.dataset.get_data().train\n labels = dataset.labels\n\n if self.options.attack_mode in ('mean', 'random', 'index'):\n source = dataset.images[labels == self.options.attack_target]\n latent = model.encode(source)\n if self.options.attack_mode == 'mean':\n latent = np.mean(latent, axis=0)\n elif self.options.attack_mode == 'random':\n index = random.randint(0, source.shape[0] - 1)\n latent = latent[index].reshape([1, -1])\n source = source[index].reshape([1, -1])\n elif self.options.attack_mode == 'index':\n latent = latent[self.options.attack_index].reshape([1, -1])\n source = source[self.options.attack_index].reshape([1, -1])\n\n latent = np.broadcast_to(latent, [x.shape[0], latent.shape[1]])\n elif self.options.attack_mode == 'untargeted':\n source = None\n latent = model.encode(x)\n\n return latent, source", "title": "" }, { "docid": "eb18753f3a75050ba2faac1b7c32fa55", "score": "0.40623856", "text": "def get_target_info(self, target_index):\n res_details = self._get_res_details(\n '/cgi-bin/disk/iscsi_portal_setting.cgi?',\n func='extra_get',\n targetInfo=1,\n targetIndex=target_index,\n ha_sync='1',\n sid=self.sid)\n\n root = ET.fromstring(res_details['data'])\n LOG.debug('TS get_target_info.authPassed: (%s)',\n root.find('authPassed').text)\n if root.find('authPassed').text == '0':\n raise exception.VolumeBackendAPIException(\n data=_('Session id expired'))\n if root.find('result').text < '0':\n raise exception.VolumeBackendAPIException(\n data=_('Get target info failed'))\n\n target_list = root.find('targetInfo')\n target_tree = target_list.findall('row')\n for target in target_tree:\n if target_index == target.find('targetIndex').text:\n return target", "title": "" }, { "docid": "6e7424c4bc12449821047786cfcadafe", "score": "0.40548176", "text": "def select_expression_data(input_data_file,\nmetadata_file,\nlst_experiments,\noutput_file):\n # Read data\n data = pd.read_csv(\n input_data_file,\n header=0,\n sep='\\t',\n index_col=0).T\n\n # Read metadata\n metadata = pd.read_csv(\n metadata_file,\n header=0,\n sep='\\t',\n index_col=0)\n \n map_experiment_sample = metadata[['sample_name', 'ml_data_source']]\n \n # Get expression data associated with experiment_id\n selected_mapping = map_experiment_sample.loc[lst_experiments]\n selected_sample_ids = list(selected_mapping['ml_data_source'].values)\n\n selected_data = data.loc[selected_sample_ids]\n\n print('The selected dataset contains {} samples and {} genes'.\n format(selected_data.shape[0],selected_data.shape[1]))\n\n # Save selected gene expression data\n selected_data.to_csv(output_file, sep='\\t', index=True)", "title": "" }, { "docid": "7821448803354e098012d1808d3351cd", "score": "0.4053796", "text": "def select_action(self):\n if self.algorithm == \"UCB-eff\":\n actual_q = self.q_table[self.layer][ tuple(self.outcomes_observed) ][ tuple(self.actions_index_did) ]\n action_index = np.where(actual_q == np.max(actual_q))[0]\n action, action_index = self.give_disp_value(action_index)\n self.actions_index_did.append(action_index)\n self.actions_value_did.append(action)\n return action_index, action\n\n # if (self.method==\"ep-greedy\")|(self.experiments_did<self.min_actions):\n if (self.method==\"ep-greedy\"):\n\n # if (self.method != \"ep-greedy\")&(self.experiments_did==0):\n # if (self.method != \"ep-greedy\")&(self.experiments_did==0):\n #\n # print(\"Trying stage!\")\n # self.ep_saved = self.ep\n # self.ep_method_saved = self.ep_method\n # self.ep_method = \"classics\"\n # self.ep = 1\n\n if self.ep_method==\"exp-decay\": #set to whatever otherwise\n self.ep = np.exp(-self.experiments_did/self.time_tau)\n if self.ep<self.min_ep:\n self.ep=self.min_ep\n r = random.random()\n if (r< self.ep) | (self.ep==1):\n action_index = random.choice(self.action_indexes)\n action, action_index = self.give_disp_value(action_index)\n self.actions_index_did.append(action_index)\n self.actions_value_did.append(action)\n return action_index, action\n else:\n\n actual_q = self.q_table[self.layer][ tuple(self.outcomes_observed) ][ tuple(self.actions_index_did) ]\n action_index = np.where(actual_q == np.max(actual_q))[0]\n action, action_index = self.give_disp_value(action_index)\n self.actions_index_did.append(action_index)\n self.actions_value_did.append(action)\n return action_index, action\n\n # elif (self.method == \"ucb\")&(self.experiments_did>=self.min_actions):\n elif (self.method == \"ucb\"):\n # if self.experiments_did == self.min_actions:\n # self.ep = self.ep_saved\n # self.ep_method = self.ep_method_saved\n n_visits = np.array(self.n_table[self.layer][tuple(self.outcomes_observed[:self.layer])][tuple(self.actions_index_did[:(self.layer+1)])])+1\n if self.ucb_method==\"ucb1\":\n ucb =np.sqrt(2* np.log(np.sum(n_visits))/ n_visits)\n # np.save(\"ucb1/ucb1\"+str(self.experiments_did),ucb,allow_pickle=True)\n\n elif self.ucb_method==\"ucb2\":\n time = np.sum(n_visits)\n ucb = np.sqrt(2*np.log(1 + time*np.log(time)**2)/n_visits)\n # np.save(\"ucb2/ucb2\"+str(self.experiments_did),ucb,allow_pickle=True)\n elif self.ucb_method == \"ucb3\":\n ucb = np.sqrt(2* np.log(np.sum(n_visits)))/ n_visits\n elif self.ucb_method == \"ucb4\":\n #https://arxiv.org/abs/1102.2490\n #I use c=0 for optimality according to the authors...\n #Notice i put [actions]\n qs = np.arange(.01,1,.01)\n ucb = np.zeros(len(n_visits))\n for actions in range(len(n_visits)):\n to_max = []\n for q in qs:\n value_inside = n_visits[actions]*self.kl(self.q_table[self.layer][tuple(self.outcomes_observed)][tuple(self.actions_index_did)][actions], q)\n if value_inside <= np.log(self.experiments_did+1):\n to_max.append(value_inside)\n else:\n to_max.append(-1)\n ucb[actions] = -self.q_table[self.layer][tuple(self.outcomes_observed)][tuple(self.actions_index_did)][actions] + max(to_max)\n else:\n print(\"Error in the ucb method! is either ucb1, ucb2 or ucb3\")\n\n ucb_q_table = self.q_table[self.layer][tuple(self.outcomes_observed)][tuple(self.actions_index_did)] + ucb\n action_index = np.where(ucb_q_table == max( ucb_q_table ))[0]\n action, action_index = self.give_disp_value(action_index)\n self.actions_index_did.append(action_index)\n self.actions_value_did.append(action)\n\n return action_index, action\n\n # elif (self.method == \"thompson-sampling\")&(self.experiments_did>=self.min_actions):\n elif (self.method == \"thompson-sampling\"):\n if self.experiments_did == self.min_actions:\n self.ep = self.ep_saved\n self.ep_method = self.ep_method_saved\n # np.random.seed(datetime.now().microsecond()*int(np.random.random()))\n th = np.random.beta(self.alphas_search[self.layer][tuple(self.outcomes_observed[:self.layer])][tuple(self.actions_index_did[:(self.layer+1)])], self.betas_search[self.layer][tuple(self.outcomes_observed[:self.layer])][tuple(self.actions_index_did[:(self.layer+1)])] )\n action_index = np.argmax(th)\n action, action_index = self.give_disp_value(action_index)\n self.actions_index_did.append(action_index)\n self.actions_value_did.append(action)\n return action_index, action", "title": "" }, { "docid": "a557109c2fb5aceffaa5cc33df7bfe07", "score": "0.40506417", "text": "def get_monitoring_data_specs(self):\n space = CompositeSpace((self.get_input_space(),\n self.get_target_space(),self.get_latent_space()))\n source = (self.get_input_source(), self.get_target_source(),self.get_latent_source())\n return (space, source)", "title": "" }, { "docid": "e50907cf44e0458305b02e5d17289111", "score": "0.40480548", "text": "def get_gym_env_info(env, if_print) -> (str, int, int, int, int, bool, float):\n gym.logger.set_level(40) # Block warning: 'WARN: Box bound precision lowered by casting to float64'\n assert isinstance(env, gym.Env)\n\n env_name = env.unwrapped.spec.id\n\n state_shape = env.observation_space.shape\n state_dim = state_shape[0] if len(state_shape) == 1 else state_shape # sometimes state_dim is a list\n\n target_return = getattr(env, 'target_return', None)\n target_return_default = getattr(env.spec, 'reward_threshold', None)\n if target_return is None:\n target_return = target_return_default\n if target_return is None:\n target_return = 2 ** 16\n\n max_step = getattr(env, 'max_step', None)\n max_step_default = getattr(env, '_max_episode_steps', None)\n if max_step is None:\n max_step = max_step_default\n if max_step is None:\n max_step = 2 ** 10\n\n if_discrete = isinstance(env.action_space, gym.spaces.Discrete)\n if if_discrete: # make sure it is discrete action space\n action_dim = env.action_space.n\n action_max = int(1)\n elif isinstance(env.action_space, gym.spaces.Box): # make sure it is continuous action space\n action_dim = env.action_space.shape[0]\n action_max = float(env.action_space.high[0])\n assert not any(env.action_space.high + env.action_space.low)\n else:\n raise RuntimeError('| Please set these value manually: if_discrete=bool, action_dim=int, action_max=1.0')\n\n print(f\"\\n| env_name: {env_name}, action space if_discrete: {if_discrete}\"\n f\"\\n| state_dim: {state_dim:4}, action_dim: {action_dim}, action_max: {action_max}\"\n f\"\\n| max_step: {max_step:4}, target_return: {target_return}\") if if_print else None\n return env_name, state_dim, action_dim, action_max, max_step, if_discrete, target_return", "title": "" }, { "docid": "f1a210c97e102ea63ddbe91ef8eda28d", "score": "0.40392885", "text": "def get_episode(self, env, get_action_ops, gym_or_pyco, obs_dim):\n obs_buf = []\n act_buf = []\n rew_buf = []\n val_buf = []\n logp_buf = []\n last_rew_buf = []\n last_val_buf = []\n last_obs_buf = []\n\n obs, rew, done, ep_ret, ep_len = env.reset(), 0, False, 0, 0\n\n if gym_or_pyco == 'gym':\n obs = obs.reshape(1, obs_dim[0], obs_dim[1], obs_dim[2])\n else:\n obs = rgb_input_pyco(obs, obs_dim)\n obs = obs.reshape(1, obs_dim[0], obs_dim[1], 1)\n\n obs_buf.append(obs)\n rew_buf.append(rew)\n\n\n with self.sess as sess:\n\n seed = np.random.randint(low=1, high=100, size=1)[0]\n\n for i in range(seed):\n sess.run(get_action_ops, feed_dict={self.x_ph: obs})\n\n a, v_t, logp_t = sess.run(get_action_ops, feed_dict={self.x_ph: obs})\n\n act_buf.append(a[0])\n val_buf.append(v_t[0])\n logp_buf.append(logp_t)\n\n while not done:\n obs, rew, done, _ = env.step(act_buf[-1])\n if done:\n if rew == None:\n rew = 0\n last_obs_buf = rgb_input_pyco(obs, obs_dim)\n last_obs_buf = last_obs_buf.reshape(1, obs_dim[0], obs_dim[1], 1)\n last_rew_buf = rew\n a, v_t, logp_t = self.sess.run(get_action_ops, feed_dict={self.x_ph: last_obs_buf})\n last_val_buf = v_t[0]\n\n elif gym_or_pyco == 'gym':\n obs = obs.reshape(1, obs_dim[0], obs_dim[1], obs_dim[2])\n obs_buf.append(obs)\n if rew == None:\n rew = 0\n rew_buf.append(rew)\n else:\n rew_buf.append(rew)\n\n a, v_t, logp_t = self.sess.run(get_action_ops, feed_dict={self.x_ph: obs})\n\n act_buf.append(a[0])\n val_buf.append(v_t[0])\n logp_buf.append(logp_t)\n\n else:\n obs = rgb_input_pyco(obs, obs_dim)\n obs = obs.reshape(1, obs_dim[0], obs_dim[1], 1)\n obs_buf.append(obs)\n if rew == None:\n rew = 0\n rew_buf.append(rew)\n else:\n rew_buf.append(rew)\n\n a, v_t, logp_t = self.sess.run(get_action_ops, feed_dict={self.x_ph: obs})\n\n act_buf.append(a[0])\n val_buf.append(v_t[0])\n logp_buf.append(logp_t)\n\n\n return obs_buf, act_buf, rew_buf, val_buf, logp_buf, last_rew_buf, last_val_buf, last_obs_buf", "title": "" }, { "docid": "57fc3f8a3a39f0cb4c50f75978212a08", "score": "0.40374622", "text": "def extract_target(self):\n self.target_name=self.targetcol_name()\n self.target_data=self.targetcol_data()\n if self.coded:\n self.target_coded()\n return self.target_data,self.classes\n\n elif self.targetencode:\n self.target_encode()\n return self.target_code,self.classes\n\n elif self.qcut:\n self.target_qcut()\n return self.target_code,self.classes\n\n elif self.scaletarget:\n return self.target_scaled,self.target_name\n\n else:\n return self.target_data,self.target_name", "title": "" }, { "docid": "d7c0ab6d0499619d3badcb655882d1f0", "score": "0.40320644", "text": "def quality(self, state, actions, params=None):\n qualities = []\n\n # Deproject points.\n point_cloud_image = state.camera_intr.deproject_to_image(\n state.rgbd_im.depth)\n\n # Compute negative SSE from the best fit plane for each grasp.\n for i, action in enumerate(actions):\n if not isinstance(action, SuctionPoint2D):\n not_suction_msg = (\"This function can only be used to evaluate\"\n \" suction quality.\")\n raise ValueError(not_suction_msg)\n\n # x,y in matrix A and z is vector z.\n points = self._points_in_window(point_cloud_image,\n action,\n segmask=state.segmask)\n A, b = self._points_to_matrices(points)\n # vector w w/ a bias term represents a best-fit plane.\n w = self._best_fit_plane(A, b)\n\n # Compute curvature.\n fx = w[0]\n fy = w[1]\n fxx = 2 * w[2]\n fxy = w[3]\n fyy = 2 * w[4]\n curvature = (fxx * fyy - fxy**2) / ((1 + fx**2 + fy**2)**2)\n\n # Store quality.\n quality = np.exp(-np.abs(curvature))\n qualities.append(quality)\n\n return np.array(qualities)", "title": "" }, { "docid": "dd3974d95b54f8a81f03e65b40ebcb06", "score": "0.4030685", "text": "def __Get_Lib_Xpixels_Ypixels__ (cls) :\n\t\t# Loading the spectrometer driver\n\t\tlib = ctypes.WinDLL (cls.GetAndorDriverFolder() + \"atmcd32d.dll\")\n\t\t\n\t\t# Verifying that there is a single Andor camera\n\t\ttotalCameras = ctypes.c_long()\n\t\tif lib.GetAvailableCameras( ctypes.byref(totalCameras) ) != DRV_SUCCESS : \n\t\t\traise RuntimeError (\"Error in GetAvailableCameras\")\n\n\t\tif totalCameras.value > 1 : \n\t\t\traise RuntimeError (\"More than one Andor camera is present\")\n\n\t\t# Initialize the camera\t\n\t\tresult = lib.Initialize( cls.GetAndorDriverFolder() ) \n\t\tif result != DRV_SUCCESS : raise RuntimeError (\"Error in Initialize: %s \" % SpectrometerErrorMsg[result])\n\t\t\n\t\t# Find out the number of pixels for figures\n\t\txpixels = ctypes.c_int(); ypixels = ctypes.c_int()\n\t\tif lib.GetDetector( ctypes.byref(xpixels), ctypes.byref(ypixels) ) != DRV_SUCCESS : \n\t\t\traise RuntimeError (\"Error in GetDetector\")\n\t\treturn lib, xpixels.value, ypixels.value", "title": "" }, { "docid": "59e4c47e46df41669a6f1f73205bb31c", "score": "0.40224922", "text": "def get_frame(self, target_yaw, target_pitch, target_loc = None, distance = 0):\r\n # Read the next frame\r\n retval,frame = self.cap.read()\r\n assert(retval) \r\n \r\n undistorted_frame = self.undistort_frame(frame)\r\n \r\n now = self.cap.get(cv2.cv.CV_CAP_PROP_POS_MSEC)\r\n current_yaw = self.log.get_ekf_yaw(now)\r\n current_loc = self.log.get_ekf_loc(now)\r\n if target_loc == None:\r\n target_loc = self.log.get_desired_loc(now)\r\n \r\n # For debugging\r\n #current_loc = target_loc\r\n #target_loc = np.array([[0.,0.,0.]])\r\n #current_loc = target_loc\r\n #target_loc = current_loc\r\n #current_yaw = 0\r\n #target_yaw = current_yaw\r\n #target_pitch = self.pitch\r\n #target_yaw = (target_yaw - current_yaw)*.75 + current_yaw\r\n \r\n # Calculate R1: World frame to camera frame\r\n R_y1 = self.yaw_matrix(ned2image_yaw(current_yaw))\r\n R_p1 = self.pitch_matrix(self.pitch) \r\n R1 = R_p1.dot(R_y1)\r\n \r\n # Calculate R2: World frame to desired frame\r\n R_y2 = self.yaw_matrix(ned2image_yaw(target_yaw))\r\n R_p2 = self.pitch_matrix(target_pitch)\r\n R2 = R_p2.dot(R_y2)\r\n \r\n # Translation Vectors\r\n T1 = ned2wun_loc(current_loc)\r\n T2 = ned2wun_loc(target_loc)\r\n \r\n # Get plane equation for translational corrections\r\n if distance == 0:\r\n K = self.horizontal_plane_vector()\r\n else:\r\n K = self.plane_vector(distance, ned2image_yaw(target_yaw))\r\n \r\n # Calculate full homography and transform frame\r\n H = self.get_homography(R1,R2,T1,T2,K)\r\n rotated_frame = cv2.warpPerspective(undistorted_frame,H,(self.w,self.h))\r\n return rotated_frame, frame", "title": "" }, { "docid": "405b42ef175be48da54b7b066d3c0bf9", "score": "0.401567", "text": "def get_data_specs(self, model):\n if self.supervised:\n # b=model.get_input_space()\n # a = model.get_latent_space()\n # space = CompositeSpace([model.get_input_space(),\n # CompositeSpace([model.get_target_space(),model.get_latent_space()])])\n # sources = (model.get_input_source(), (model.get_target_source(),model.get_latent_source()))\n # mapping = DataSpecsMapping((space, sources))\n # flat_source = mapping.flatten(sources)\n # # flat_source == ('features', 'features', 'targets')\n # flat_space = mapping.flatten(space)\n # return (flat_space, flat_source)\n space = CompositeSpace([model.get_input_space(),\n model.get_target_space(),model.get_latent_space()])\n sources = (model.get_input_source(), model.get_target_source(),model.get_latent_source())\n return space, sources\n else:\n return (model.get_input_space(), model.get_input_source())", "title": "" }, { "docid": "275877232df5ea59e625c46ca653a545", "score": "0.40150547", "text": "def observations(self, tmin=None, tmax=None, freq=None,\n update_observations=False):\n if tmin is None and self.settings['tmin']:\n tmin = self.settings['tmin']\n else:\n tmin = self.get_tmin(tmin, freq, use_oseries=False,\n use_stresses=True)\n if tmax is None and self.settings['tmax']:\n tmax = self.settings['tmax']\n else:\n tmax = self.get_tmax(tmax, freq, use_oseries=False,\n use_stresses=True)\n if freq is None:\n freq = self.settings[\"freq\"]\n\n for key, setting in zip([tmin, tmax, freq], [\"tmin\", \"tmax\", \"freq\"]):\n if key != self.settings[setting]:\n update_observations = True\n\n if self.oseries_calib is None or update_observations:\n oseries_calib = self.oseries.series.loc[tmin:tmax]\n\n # sample measurements, so that frequency is not higher than model\n # keep the original timestamps, as they will be used during\n # interpolation of the simulation\n sim_index = self.get_sim_index(tmin, tmax, freq,\n self.settings[\"warmup\"])\n if not oseries_calib.empty:\n index = get_sample(oseries_calib.index, sim_index)\n oseries_calib = oseries_calib.loc[index]\n else:\n oseries_calib = self.oseries_calib\n return oseries_calib", "title": "" }, { "docid": "ab1f9b70b73c235566b2a9796bd7cc6a", "score": "0.40142804", "text": "def get_exposures_detailed(self) -> Dict[str, Dict[str, Tuple[str, float]]]:\n exposures_table = {}\n\n for idx, security in enumerate(self.securities):\n exposures = security.get_exposures()\n for exposure in exposures:\n exposures_table.setdefault(exposure, {})\n exposures_table[exposure].setdefault(exposures[exposure], [])\n exposures_table[exposure][exposures[exposure]].append(\n (security.get_isin(), self.weights[idx])\n )\n\n return exposures_table", "title": "" }, { "docid": "49302213a3664e375df2d3fd21db6d03", "score": "0.40135843", "text": "def _calculate_target_positions(self):\n\n if self._base_currency is not None and self._exposure is not None and self._net_liquidation is not None:\n self._get_currencies(self._base_currency)\n self._target_positions = {\n k: round(self._exposure * v * self._net_liquidation\n / (self._contracts[k].tickers.close\n * int(self._contracts[k].contract.multiplier)\n * self._fx[self._contracts[k].contract.currency])) if v else 0\n for k, v in self._scaled_signals.items()\n }\n else:\n # TODO: review\n self._target_positions = {k: 0 for k in self._scaled_signals.keys()}", "title": "" }, { "docid": "287a5aa756d4ba85e7a2323a4d58f1ba", "score": "0.40102503", "text": "def get_measurements(self, action1, outcome1, action2, outcome_2, V): \n return np.array([action1, outcome1, action2, outcome_2, V])", "title": "" }, { "docid": "3d42602837bdbc5f218df7da3d9077d0", "score": "0.40033", "text": "def acquisition(self):\r\n fs, _ = self.gp.predict(self.gp.X)\r\n next_fs, vars = self.gp.predict(self.X_s)\r\n opt = np.min(fs)\r\n improves = opt - next_fs - self.xsi\r\n if not self.minimize:\r\n improve = -improves\r\n Z = improves / vars\r\n eis = improves * norm.cdf(Z) + vars * norm.pdf(Z)\r\n return self.X_s[np.argmax(eis)], eis", "title": "" }, { "docid": "bf584d27115468808d757b55d3600303", "score": "0.4003269", "text": "def get_obs(self, time):\r\n return self.obs, self.obs_error", "title": "" }, { "docid": "b7dd2427590ae25c1e93858bc8a52562", "score": "0.40024698", "text": "def getCombination(self, set_A, Exp=3):\n K_A = len(set_A)\n if Exp==1:\n self.C = np.ones((1,self.nD2DPairs))*np.nan\n nC = 0;\n for nKs in range (1, (math.floor((self.nD2DPairs-1)/2)+1)):\n set_S = list(combinations(range(0,self.nD2DPairs),nKs))\n for ni in range(len(set_S)):\n self.C[nC,set_S[ni]]= set_S[ni]\n nC = nC + 1\n self.C = np.vstack((self.C, np.nan*np.ones((1,self.nD2DPairs))))\n return self.C \n if Exp == 2:\n if K_A <= 2:\n return np.ones((1,self.nD2DPairs))*1j\n elif K_A == self.nD2DPairs:\n self.C = np.ones((1,self.nD2DPairs))*np.nan\n nC = 0\n for nKs in range (1, (math.floor((self.nD2DPairs-1)/2)+1)):\n set_S = list(combinations(range(0,self.nD2DPairs),nKs))\n for ni in range(len(set_S)):\n self.C[nC,set_S[ni]]= set_S[ni]\n nC = nC + 1\n self.C = np.vstack((self.C, np.nan*np.ones((1,self.nD2DPairs))))\n self.C = self.C[:-1]\n return self.C \n else:\n self.C = np.ones((1,self.nD2DPairs))*1j\n nC = 0\n for nKj in range(2, K_A+1):\n combJDevice = list(combinations(set_A,nKj))\n for nKs in range(1, min(nKj,self.nD2DPairs-nKj+1)):\n for nj in range(len(combJDevice)):\n set_S = list(set(range(0,self.nD2DPairs))-set(combJDevice[nj]))\n combSDevice = list(combinations(set_S,nKs))\n for ni in range (len(combSDevice)):\n self.C[nC,combJDevice[nj]] = np.nan\n self.C[nC,combSDevice[ni]]= combSDevice[ni]\n nC = nC + 1\n self.C = np.vstack((self.C, 1j*np.ones((1,self.nD2DPairs))))\n self.C = self.C[:-1]\n return self.C \n\n if Exp == 3:\n if K_A <= 2:\n return np.ones((1,self.nD2DPairs))*1j\n else:\n self.C = []\n for seq in itertools.product([0,1,2], repeat=K_A):\n c_s = list(seq)\n if c_s.count(2) >= 2 and c_s.count(2) <= K_A-1 and c_s.count(1) >= 1 and c_s.count(1) <= c_s.count(2)-1:\n np_cs = np.array(c_s, dtype=complex) \n np_cs[np_cs == 2] = np.nan\n np_cs[np_cs == 0] = 1j\n ks_idx = np.where(~np.isnan(np_cs) & np.isreal(np_cs))\n np_cs[ks_idx] = ks_idx\n self.C.append(np_cs)\n return np.array([element for (i,element) in enumerate(self.C)])", "title": "" }, { "docid": "1717a3f8c612d5121b915a4163a69863", "score": "0.39981684", "text": "def GetObservation(self):\n observation = []\n observation.extend(self.GetMotorAngles().tolist())\n observation.extend(self.GetMotorVelocities().tolist())\n observation.extend(self.GetMotorTorques().tolist())\n observation.extend(list(self.GetBaseOrientation()))\n observation.extend(list(np.array(self.GetBasePosition()[0:2])-np.array(self.getTarget())))\n return observation", "title": "" }, { "docid": "5008ac13577ff6d76f216c199d4bb178", "score": "0.3995169", "text": "def goal_run(self, current_params):\n # display.plot_C3([self.logdir], only_iterations=False)\n exp_values = []\n sim_values = []\n exp_stds = []\n exp_shots = []\n goals = []\n grads = []\n seq_weigths = []\n count = 0\n seqs_pp = self.seqs_per_point\n #TODO: seq per point is not constant. Remove.\n\n for target, data in self.learn_data.items():\n\n self.learn_from = data['seqs_grouped_by_param_set']\n self.gateset_opt_map = data['opt_map']\n indeces = self.select_from_data(self.batch_sizes[target])\n\n for ipar in indeces:\n\n count += 1\n m = self.learn_from[ipar]\n gateset_params = m['params']\n gateset_opt_map = self.gateset_opt_map\n m_vals = m['results'][:seqs_pp]\n m_stds = m['results_std'][:seqs_pp]\n m_shots = m['shots'][:seqs_pp]\n sequences = m['seqs'][:seqs_pp]\n num_seqs = len(sequences)\n if target == 'all':\n num_seqs = len(sequences) * 3\n\n self.exp.set_parameters(current_params, self.opt_map, scaled=True)\n if \"init_gateset_params\" in self.__dict__.keys():\n self.exp.gateset.set_parameters(\n self.init_gateset_params,\n self.init_gateset_opt_map,\n scaled=False\n )\n self.exp.gateset.set_parameters(\n gateset_params, gateset_opt_map, scaled=False\n )\n # We find the unique gates used in the sequence and compute\n # only them.\n self.exp.opt_gates = list(\n set(itertools.chain.from_iterable(sequences))\n )\n self.exp.get_gates()\n self.exp.evaluate(sequences)\n sim_vals = self.exp.process(labels=self.state_labels[target])\n\n exp_stds.extend(m_stds)\n exp_shots.extend(m_shots)\n\n if target == 'all':\n goal = neg_loglkh_multinom_norm(\n m_vals,\n tf.stack(sim_vals),\n tf.constant(m_stds, dtype=tf.float64),\n tf.constant(m_shots, dtype=tf.float64)\n )\n else:\n goal = g_LL_prime(\n m_vals,\n tf.stack(sim_vals),\n tf.constant(m_stds, dtype=tf.float64),\n tf.constant(m_shots, dtype=tf.float64)\n )\n goals.append(goal.numpy())\n seq_weigths.append(num_seqs)\n sim_values.extend(sim_vals)\n exp_values.extend(m_vals)\n\n with open(self.logdir + self.logname, 'a') as logfile:\n logfile.write(\n \"\\n Parameterset {}, #{} of {}:\\n {}\\n {}\\n\".format(\n ipar + 1,\n count,\n len(indeces),\n json.dumps(self.gateset_opt_map),\n self.exp.gateset.get_parameters(\n self.gateset_opt_map, to_str=True\n ),\n )\n )\n logfile.write(\n \"Sequence Simulation Experiment Std Shots\"\n \" Diff\\n\"\n )\n\n for iseq in range(len(sequences)):\n m_val = np.array(m_vals[iseq])\n m_std = np.array(m_stds[iseq])\n shots = np.array(m_shots[iseq])\n sim_val = sim_vals[iseq].numpy()\n int_len = len(str(num_seqs))\n with open(self.logdir + self.logname, 'a') as logfile:\n for ii in range(len(sim_val)):\n logfile.write(\n f\"{iseq + 1:8} \"\n f\"{float(sim_val[ii]):8.6f} \"\n f\"{float(m_val[ii]):8.6f} \"\n f\"{float(m_std[ii]):8.6f} \"\n f\"{float(shots[0]):8} \"\n f\"{float(m_val[ii]-sim_val[ii]): 8.6f}\\n\"\n )\n logfile.flush()\n\n goal = g_LL_prime_combined(goals, seq_weigths)\n # TODO make gradient free function use any fom\n\n with open(self.logdir + self.logname, 'a') as logfile:\n logfile.write(\"\\nFinished batch with \")\n logfile.write(\"{}: {}\\n\".format(self.fom.__name__, goal))\n # print(\"{}: {}\".format(self.fom.__name__, goal))\n for cb_fom in self.callback_foms:\n val = float(\n cb_fom(exp_values, sim_values, exp_stds, exp_shots).numpy()\n )\n logfile.write(\"{}: {}\\n\".format(cb_fom.__name__, val))\n # print(\"{}: {}\".format(cb_fom.__name__, val))\n # print(\"\")\n logfile.flush()\n\n for cb_fig in self.callback_figs:\n fig = cb_fig(exp_values, sim_values.numpy(), exp_stds)\n fig.savefig(\n self.logdir\n + cb_fig.__name__ + '/'\n + 'eval:' + str(self.evaluation) + \"__\"\n + self.fom.__name__ + str(round(goal, 3))\n + '.png'\n )\n plt.close(fig)\n\n self.optim_status['params'] = [\n par.numpy().tolist()\n for par in self.exp.get_parameters(self.opt_map)\n ]\n self.optim_status['goal'] = goal\n self.optim_status['time'] = time.asctime()\n self.evaluation += 1\n return goal", "title": "" }, { "docid": "3acbb054e502f7af2d2bfe9ddbbd7c50", "score": "0.39951217", "text": "def get_mvttimes(data, target_data):\n\n # Shift the data and calculate target locations if you haven't\n if 'X' not in data.keys():\n print('shifting')\n data = shift_x(data, target_data)\n if 't_dist' not in data.keys():\n print('t)disting')\n data = t_dist(data, target_data)\n\n vigor = {}\n\n # Pull out variables for ease of use later\n x, y, vx, vy, v = data['X'], data['Y'], data['Right_HandXVel'], data['Right_HandYVel'], data['Right_HandVel'] \n\n tx = data['ty']\n ty = data['tx']\n t_diff = data['t_diff']\n P = data['P']\n\n # Determine index's for spcific points\n vigor.update({'idx': {}})\n\n # Find target show.\n data['EVENTS']['TIMES'] = np.squeeze(data['EVENTS']['TIMES'])\n for k, item in enumerate(data['EVENTS']['LABELS']):\n if item[0:9] == 'TARGET_ON':\n break\n elif k == len(data['EVENTS']['LABELS']):\n k = -1\n if k != -1:\n idx_targetshow = int(1000*float('%0.2f' % data['EVENTS']['TIMES'][k]))\n else:\n idx_targetshow = 0\n \n # Find movement onset.\n if max(P[idx_targetshow:]) > 0.05:\n b = next(i for i, p in enumerate(P[idx_targetshow:]) if p > .05)\n elif max(P[idx_targetshow:]) > 0.025:\n b = next(i for i, p in enumerate(P[idx_targetshow:]) if p > .025)\n else:\n print('going 3')\n maxp = max(P[idx_targetshow:])\n b = next(i for i, p in enumerate(P[idx_targetshow:]) if p > 0.75*maxp)\n\n b += idx_targetshow\n a = b\n \n for idx_onset in np.arange(b,50,-1):\n if np.std(t_diff[idx_onset:idx_onset+50])<2e-3 and v[idx_onset]<.03:\n break\n try:\n idx_onset += 0\n except:\n print('Didn\\'t find onset')\n idx_onset = b\n\n # This is a plot checker. Generally uneeded.\n # if np.mean(P[idx_onset-50:idx_onset])>.01:\n # import matplotlib.pyplot as plt\n # plt.plot(P)\n # plt.plot(idx_onset,P[idx_onset],'x',color='red')\n # plt.show()\n\n # plt.plot(x,y,'o')\n # plt.plot(0,0,'x',markersize=10, color = 'green')\n # plt.plot(x[idx_onset],y[idx_onset],'x',markersize=10,color = 'red')\n # plt.show()\n\n # Find peak velocity.\n idx_peakv = np.argmax(v)\n \n # Find at target.\n if max(P) > 0.1:\n idx_attarget = next(i for i, p in enumerate(P[a:-1]) if p > 0.10)+a\n elif min(data['t_dist']) < 0.04:\n idx_attarget = next(i for i, d in enumerate(data['t_dist']) if d < 0.04)\n elif np.argmin(data['t_dist']) - idx_onset>0:\n idx_attarget = np.argmin(data['t_dist'])\n else:\n idx_attarget = np.argmax(P)\n\n # Find moveback.\n try:\n idx_moveback = next(i for i, d in enumerate(P[idx_attarget:]) if P[i+idx_attarget]<P[i-1+idx_attarget])+idx_attarget\n except:\n idx_moveback = np.argmax(P)\n\n if idx_moveback < idx_onset:\n idx_moveback = idx_onset + 100\n\n for idx_offset in range(idx_moveback,len(P)):\n if np.std(t_diff[idx_offset:idx_offset+40])<2e-3 and v[idx_offset]<.03:\n break\n\n # Get movement duration\n move_dur = 0.001*(idx_attarget - idx_onset)\n \n # Get reaction time\n react_time = 0.001*(idx_onset-idx_targetshow)\n if react_time < -0.1:\n print('REACT < -0.1 U BORKED IT.')\n\n # Get peak velocity\n peak_vel = np.max(data['Right_HandVel'][idx_onset:idx_moveback])\n\n # Return peak velocity\n if idx_offset - idx_moveback > 10:\n peak_vel_moveback = np.max(data['Right_HandVel'][idx_moveback:idx_offset])\n else:\n peak_vel_moveback = np.max(data['Right_HandVel'][idx_moveback-10:idx_offset])\n\n # Maximum Excursion\n maxex = np.max(P)\n\n vigor['idx'].update({'onset': idx_onset,\n 'peakv': idx_peakv,\n 'at_target': idx_attarget,\n 'offset': idx_offset,\n 'target_show': idx_targetshow,\n 'move_back': idx_moveback})\n\n # Another plot checker.\n # if react_time<.1:\n # import matplotlib.pyplot as plt\n # fig, (ax1,ax2) = plt.subplots(2,1)\n # ax1.plot(data['Right_HandVel'])\n # ax1.plot(vigor['idx']['onset'],\n # data['Right_HandVel'][vigor['idx']['onset']],\n # marker = 'x', color = 'red')\n # ax2.plot(t_diff)\n # ax2.plot(vigor['idx']['onset'],\n # t_diff[vigor['idx']['onset']],\n # marker = 'x', color = 'red')\n # plt.show()\n # blank = 1\n # input(\"Press Enter to continue...\")\n\n vigor.update({'move_dur': move_dur,\n 'peak_vel': peak_vel,\n 'peak_vel_moveback': peak_vel_moveback,\n 'react_time': react_time,\n 'maxex': maxex})\n return vigor", "title": "" }, { "docid": "2b5c1b9016c88fec795e9da5e64fcc46", "score": "0.39946437", "text": "def find_sequence_on_scale(layers, scale, symbols, start, target, recorder):\n\n # initialize\n active_symbols = [start]\n targets = [target]\n\n tick = 0\n any_target_found = False\n recorder.record_expansion(scale, tick, start, targets, active_symbols, utils.intersect(targets, active_symbols))\n\n any_target_found = utils.intersect(targets, active_symbols) != []\n while not any_target_found:\n # update 'retrieval tick' -> this emulates refractory periods of place\n # cells. This comes from marking symbols as \"expanded\"\n for s in active_symbols:\n symbols[s].retrieval_tick = tick\n\n # predict next batch of symbols, and remove currently active ones\n next_symbols = layers[scale].expand(active_symbols, symbols, tick)\n active_symbols = [s for s in next_symbols if s not in active_symbols and symbols[s].retrieval_tick < 0]\n\n # update time\n tick += 1\n\n # record everything!!1\n recorder.record_expansion(scale, tick, start, targets, active_symbols, utils.intersect(targets, active_symbols))\n\n # check if we reached the destination\n any_target_found = utils.intersect(targets, active_symbols) != []\n if any_target_found:\n break\n\n return utils.intersect(targets, active_symbols)", "title": "" }, { "docid": "bdef2b430bc17ab83fbb84cd71cb42f0", "score": "0.39909536", "text": "def return_relax_times():\n\n # No data.\n if not hasattr(cdp, 'relax_times'):\n return None\n\n # Initialise.\n relax_times = zeros((count_exp(), count_frq()), float64)\n\n # Loop over the experiment types.\n for exp_type, frq, point, time, ei, mi, di, ti in loop_exp_frq_point_time(return_indices=True):\n # Fetch all of the matching intensity keys.\n keys = find_intensity_keys(exp_type=exp_type, frq=frq, point=point, time=time, raise_error=False)\n\n # No data.\n if not len(keys):\n continue\n\n # Add the data.\n relax_times[ei][mi] = cdp.relax_times[keys[0]]\n\n # Return the data.\n return relax_times", "title": "" }, { "docid": "8cb39328a2dba750599be19273418da2", "score": "0.39903674", "text": "def get_selected_pixels(charge_map, min_charge_for_certain_selection,\n number_of_rings, geom,\n min_npixels_for_full_event=500):\n\n # Proceed with the identification of interesting pixels to be saved.\n # Keep pixels that have a charge above min_charge_for_certain_selection:\n selected_pixels = (charge_map > min_charge_for_certain_selection)\n\n # Add \"number_of_rings\" rings of pixels around the already selected ones:\n for ring in range(number_of_rings):\n # we add-up (sum) the selected-pixel-wise map of neighbors, to find\n # those who appear at least once (>0). Those should be added:\n additional_pixels = (np.sum(geom.neighbor_matrix[selected_pixels],\n axis=0)>0)\n selected_pixels |= additional_pixels\n\n # if more than min_npixels_for_full_event were selected, keep whole camera:\n if selected_pixels.sum() > min_npixels_for_full_event:\n selected_pixels = np.array(geom.n_pixels * [True])\n\n return selected_pixels", "title": "" }, { "docid": "5681393c3743618275e7c50c59e462ba", "score": "0.39840835", "text": "def get_target_Qs(self, sess, mini_batch):\n q_next_state = sess.run(self.output,\n feed_dict={\n self.inputs: mini_batch.get('next_states')\n })\n q_target_ns = sess.run(\n target_net.output,\n feed_dict={\n target_net.inputs: mini_batch.get('next_states')})\n\n target_Qs = []\n\n for i in range(mini_batch.get('batch_len')):\n terminal = mini_batch.get('dones')[i]\n action = np.argmax(q_next_state[i])\n rewards = mini_batch.get('rewards')[i]\n\n if terminal:\n target_Qs.append(rewards)\n else:\n target_Qs.append(rewards + self.gamma * q_target_ns[i][action])\n\n targets_mb = [m_b for m_b in target_Qs]\n\n return targets_mb", "title": "" }, { "docid": "7e7acd3e2f04d82657759704d735214a", "score": "0.39826855", "text": "def bestOrbitInnerParams(r,s,oppositions):\r\n\r\n # We want to minimise the max error over all possible oppositions here \r\n minimum = 100\r\n \r\n \r\n for c in np.linspace(1,360,36000): # precision 0.01 degree henceforth \r\n for e1 in np.linspace(1,5,400): # precision 0.01 units \r\n for e2 in np.linspace(1,360,36000):\r\n for z in np.linspace(1,360,36000):\r\n errors,maxError = MarsEquantModel(c,r,e1,e2,z,s,oppositions)\r\n if(minimum>maxError):\r\n minimum = maxError\r\n errors_opt = errors\r\n maxError_opt = maxError\r\n c_opt = c\r\n e1_opt = e1\r\n e2_opt = e2\r\n z_opt = z\r\n \r\n \r\n return c_opt,e1_opt,e2_opt,z_opt,errors_opt,maxError_opt", "title": "" }, { "docid": "a2d04b1771ea20023f065ab4d48d9ba2", "score": "0.39818808", "text": "def get_features_for_target(self, target):\n return self.target_features[target]", "title": "" }, { "docid": "003898c0097d0187f8ed4f094b4b988b", "score": "0.39748853", "text": "def get_target_allocations(self):\n equity_curve = self.get_equity_curve()\n alloc_df = pd.DataFrame(self.target_allocations).set_index('Date')\n alloc_df.index = alloc_df.index.date\n alloc_df = alloc_df.reindex(index=equity_curve.index, method='ffill')\n if self.burn_in_dt is not None:\n alloc_df = alloc_df[self.burn_in_dt:]\n return alloc_df", "title": "" }, { "docid": "3ed4eecd00a00e7ef9220dad2ba21c85", "score": "0.39744982", "text": "def create_optical_elements(self):\n # Create telescope pupil\n r_pupil = (self.piaa_r_in_mm * 2)/self.dx\n r_obstruction = (self.piaa_r_in_mm * 2 * self.r0)/self.dx\n self.telescope_pupil = utils.annulus(self.npix, r_pupil, r_obstruction)\n \n # Create PIAA\n if self.use_piaa:\n self.piaa_optics = optics.PIAAOptics(self.alpha, self.r0, \n self.frac_to_focus, self.n_med, \n self.thickness, \n self.piaa_r_in_mm, \n self.real_heights, self.dx, \n self.npix, \n self.wavelength_in_mm)\n \n # Create lens #1\n self.lens_1 = optics.CircularLens(self.focal_length_1, \n self.piaa_r_in_mm * 2)\n \n # Create lens #2\n self.lens_2 = optics.CircularLens(self.focal_length_2, \n self.lens_2_diameter)\n \n # Create microlens array\n if self.use_microlens_array:\n self.m_array = optics.MicrolensArray_3x3(self.focal_length_3, \n self.lenslet_width)", "title": "" }, { "docid": "64ef89ad2ad88610cb3ad155efa59b1f", "score": "0.39710587", "text": "def get_spec():\n playing_world = get_playing_world()\n\n # build the action_space descriptor\n action_space_desc = {}\n input_ = ue.get_mutable_default(InputSettings)\n # go through all action mappings\n # TODO: FOR NOW: ignore all non-keyboard mappings for simplicity.\n # TODO: Later, we will have to create a tick box to specify which actions should be sent to ML\n for action in input_.ActionMappings:\n if re.search(r'Gamepad|Mouse|Thumbstick', action.Key.KeyName):\n continue\n if action.ActionName not in action_space_desc:\n action_space_desc[action.ActionName] = {\"type\": \"action\", \"keys\": [action.Key.KeyName]}\n else:\n action_space_desc[action.ActionName][\"keys\"].append(action.Key.KeyName)\n for axis in input_.AxisMappings:\n if re.search(r'Gamepad|Mouse|Thumbstick', axis.Key.KeyName):\n continue\n if axis.AxisName not in action_space_desc:\n action_space_desc[axis.AxisName] = {\"type\": \"axis\", \"keys\": [(axis.Key.KeyName, axis.Scale)]}\n else:\n action_space_desc[axis.AxisName][\"keys\"].append((axis.Key.KeyName, axis.Scale))\n ue.log(\"action_space_desc: {}\".format(action_space_desc))\n\n # DEBUG\n #pydevd.settrace(\"localhost\", port=20023, stdoutToServer=True, stderrToServer=True) # DEBUG\n # END: DEBUG\n\n # build the observation_space descriptor\n observation_space_desc = {}\n for observer in MLObserver.GetRegisteredObservers():\n owner, obs_name = sanity_check_observer(observer, playing_world)\n #ue.log(\"obs={} name={} owner={} enabled={} gray={} type={}\".\n # format(observer, obs_name, owner, observer.bEnabled, observer.bGrayscale, observer.ObserverType))\n # ignore reward observer (ObserverType=1) and is-terminal observer (ObserverType=2)\n if not owner or observer.ObserverType > 0:\n continue\n\n # ue.log(\"DEBUG: get_spec observer {}\".format(obs_name))\n\n # this observer returns a camera image\n if observer.bScreenCapture:\n try:\n _, texture = get_scene_capture_and_texture(owner, observer)\n except RuntimeError as e:\n return {\"status\": \"error\", \"message\": \"{}\".format(e)}\n observation_space_desc[obs_name+\"/camera\"] = {\n \"type\": \"IntBox\",\n \"shape\": (texture.SizeX, texture.SizeY) if observer.bGrayscale else (texture.SizeX, texture.SizeY, 3),\n \"min\": 0, \"max\": 255}\n\n # go through non-camera/capture properties that need to be observed by this Observer\n for observed_prop in observer.ObservedProperties:\n if not observed_prop.bEnabled:\n continue\n prop_name = observed_prop.PropName\n if not owner.has_property(prop_name):\n continue\n\n type_ = type(owner.get_property(prop_name))\n if type_ == ue.FVector or type_ == ue.FRotator:\n desc = {\"type\": \"Continuous\", \"shape\": (3,)} # no min/max -> will be derived from samples\n elif type_ == ue.UObject:\n desc = {\"type\": \"str\"}\n elif type_ == bool:\n desc = {\"type\": \"Bool\"}\n elif type_ == float:\n desc = {\"type\": \"Continuous\", \"shape\": (1,)}\n elif type_ == int:\n desc = {\"type\": \"IntBox\", \"shape\": (1,)}\n else:\n return {\"status\": \"error\", \"message\": \"Observed property {} has an unsupported type ({})\".\n format(prop_name, type_)}\n\n observation_space_desc[obs_name+\"/\"+prop_name] = desc\n\n ue.log(\"observation_space_desc: {}\".format(observation_space_desc))\n\n return {\"status\": \"ok\", \"game_name\": get_project_name(), \"action_space_desc\": action_space_desc,\n \"observation_space_desc\": observation_space_desc}", "title": "" }, { "docid": "333a3458cdf2be676340fd9d129d2c39", "score": "0.39659464", "text": "def returnParam(self):\n\n initial_theta = self.fit_result.x[:self.num_patterns]\n K_values = self.fit_result.x[self.num_patterns:]\n\n out = np.zeros((len(K_values),self.num_rounds),dtype=float)\n for i in range(self.num_rounds):\n out[:,i] = np.exp(self.log_degeneracy + initial_theta + K_values*(i+1))\n out[:,i] = out[:,i]/sum(out[:,i])\n\n return initial_theta, K_values, out", "title": "" }, { "docid": "adc686a166c7ba19b96508e3618db0f1", "score": "0.3964855", "text": "def get_current_parameters(self):\n\n # First assignment\n X_Offset = self.pictures[self.sorted_idx_list[self.idx]].X_Offset\n Y_Offset = self.pictures[self.sorted_idx_list[self.idx]].Y_Offset\n rotation = self.pictures[self.sorted_idx_list[self.idx]].rotation\n __Zoom__ = self.pictures[self.sorted_idx_list[self.idx]].zoom\n \n # Features assignation\n Features = {\"X_Offset\":(X_Offset , None), \n \"Y_Offset\":(Y_Offset , None),\n \"Rotation\":(rotation , None),\n \"__Zoom__\":(__Zoom__ , None)}\n\n return Features", "title": "" }, { "docid": "4b9107c01c12d895e45e34fa86264df8", "score": "0.3964035", "text": "def get_desired_context(self):\n if self.active:\n # Active task selection: determine next context via Bayesian\n # Optimization\n self.context, self.parameters = \\\n self._determine_contextparams(self.bayes_opt)\n else:\n raise NotImplementedError(\"Passive ACES not implemented\")\n ## Choose context randomly and only choose next parameters\n #self.context = self.rng.uniform(size=self.context_dims) \\\n # * (self.context_boundaries[:, 1]\n # - self.context_boundaries[:, 0]) \\\n # + self.context_boundaries[:, 0]\n ## Repeat context self.n_query_points times, s.t. ACES can only\n ## select parameters for this context\n #contexts = np.repeat(self.context, self.n_query_points)\n #contexts = contexts.reshape(-1, self.n_query_points).T\n #_, self.parameters = \\\n # self._determine_contextparams(self.bayes_opt, contexts)\n # Return only context, the parameters are later returned in\n # get_next_parameters\n return self.context", "title": "" }, { "docid": "700cde16b8460bdaa3d8dc4c734ddc2f", "score": "0.39596957", "text": "def getExactImageFeatures(self, cx, cy, thd, f=1.0):\n # run through all of the point features\n # cast a ray FROM the point feature to the center (cx,cy)\n # see if it hits anything in the world before reaching the center\n # if so, it's blocked; if not blocked and on the same side as thd, it's visible\n # then project and add to image feature list\n\n imagefeatures = {} # dictionary for all of the image features with\n # imx as the key and global world coords as the value\n # this value, in turn, can be used from self.pointFeatures\n # to obtain the attached segments, if desired\n\n # the direction the camera is pointing\n costh = math.cos( thd * math.pi/180.0 )\n sinth = math.sin( thd * math.pi/180.0 )\n\n # for each world feature in the map\n for (wx,wy) in self.pointFeatures:\n # print \"handling wx,wy=\", wx, wy\n # is it on the correct side of the camera?\n # that is, does the vector have a positive dot product with\n # the camera direction?\n dx = wx-cx\n dy = wy-cy\n distanceAlongTheta = dx*costh + dy*sinth \n # print \" distanceAlongTheta is\", distanceAlongTheta\n if distanceAlongTheta <= 0.0: continue\n\n # so it's on the correct side of the camera\n # now, we'll cast the ray (wx,wy) + s(dx,dy) through the world\n # to see what it hits... (note we have to point back to the optical center)\n xi, yi, valid, d, item = self.rayTrace(wx,wy,-dx,-dy)\n # print \"rayTrace results are\", self.rayTrace(wx,wy,-dx,-dy)\n if valid > 0 and d < math.sqrt(dx*dx + dy*dy): continue # hit something too close!\n\n # ok, it did not hit anything between it and the camera\n # so, this feature is visible - now we project it\n # we already know the feature's distance along the optical axis = distanceAlongTheta\n # what is the signed distance perpendicular to the optical axis?\n #\n # I think cos(thd+90) = -sin(thd) and sin(thd+90) = cos(thd)\n # note that this yields an image coordinate system with origin\n # along the optical axis (OK) and positive to the left (so-so)\n distancePerpTheta = -sinth*dx + costh*dy\n # print \" distancePerpTheta is\", distancePerpTheta\n\n # projecting to the single imx coordinate is then easy\n imx = f * distancePerpTheta / float(distanceAlongTheta)\n # print \" imx is\", imx\n\n # load it into our dictionary\n imagefeatures[imx] = (wx,wy)\n\n # should this just set self.imagefeatures?\n return imagefeatures", "title": "" }, { "docid": "b5c130456bc300ea17fc72e65bda9cea", "score": "0.3953749", "text": "def _exposure_time_(img_hk: np.ndarray) -> np.ndarray:\n # need first bit of address 121\n reg_pgagainfactor = img_hk['DET_BLACKCOL'] & 0x1\n reg_pgagain = img_hk['DET_PGAGAIN']\n exp_time = (1 + 0.2 * reg_pgagain) * 2 ** reg_pgagainfactor\n\n return MCP_TO_SEC * exp_time", "title": "" }, { "docid": "25c2de72dc88ac64497e5210a95efc8c", "score": "0.3950059", "text": "def test_target_value_set_range_away(device_heat_cool_away):\n device = device_heat_cool_away\n assert device.values.setpoint_heating.data == 2\n assert device.values.setpoint_cooling.data == 9\n assert device.values.setpoint_away_heating.data == 1\n assert device.values.setpoint_away_cooling.data == 10\n device.set_preset_mode(PRESET_AWAY)\n device.set_temperature(**{ATTR_TARGET_TEMP_LOW: 0, ATTR_TARGET_TEMP_HIGH: 11})\n assert device.values.setpoint_heating.data == 2\n assert device.values.setpoint_cooling.data == 9\n assert device.values.setpoint_away_heating.data == 0\n assert device.values.setpoint_away_cooling.data == 11", "title": "" }, { "docid": "1c25982fb5c185cef0b6f9b43b397f42", "score": "0.3941342", "text": "def _get_obs(self):\r\n obs = self.env._get_obs()\r\n obs['achieved_goal'] = np.concatenate([obs['achieved_goal'], self.defaults])\r\n obs['desired_goal'] = np.concatenate([obs['desired_goal'], self.defaults])\r\n return obs", "title": "" }, { "docid": "00592f268ec1459584a5682e7e089c66", "score": "0.39374137", "text": "def get_sound_devices():\n TARGET_VIRTUAL_DEVICE: str = 'CABLE Input'\n INPUT_EXCEPT_DEVICE_LIST: List[str] = ['CABLE Output', 'Microsoft Sound Mapper - Input',\n 'Microsoft Sound Mapper - Output', 'Microsoft 사운드 매퍼',\n '주 사운드 캡처 드라이버', '주 사운드 드라이버', '라인 입력', 'Output ()', '스테레오 믹스']\n OUTPUT_EXCEPT_DEVICE_LIST: List[str] = ['CABLE Output', 'Microsoft Sound Mapper - Input', 'Microsoft 사운드 매퍼',\n 'Microsoft Sound Mapper - Output', '스테레오 믹스', 'SPDIF Out',\n '주 사운드 캡처 드라이버', '주 사운드 드라이버', '라인 입력', 'Output ()', '마이크']\n\n input_device_dict: Dict[str, int] = dict()\n output_device_dict: Dict[str, int] = dict()\n\n device_list: object = sd.query_devices()\n\n for index, device in enumerate(device_list):\n if device['default_samplerate'] != 44100.0: # if not sample rate 44100:\n continue\n\n if device['hostapi'] != 0: # if not host api is 0\n continue\n\n if '(' in device['name']: # make a device name more beautiful\n device['name'] = device['name'][0:device['name'].index('(')]\n\n # get input devices\n if device['max_input_channels'] != int(0) and device[\"max_output_channels\"] == int(0):\n if device['name'] in INPUT_EXCEPT_DEVICE_LIST: # exception based on device name\n continue\n\n input_device_dict[device['name']] = index\n\n # get output devices\n if device['max_input_channels'] == int(0) and device[\"max_output_channels\"] != int(0):\n if device['name'] in OUTPUT_EXCEPT_DEVICE_LIST: # exception based on device name\n continue\n\n if TARGET_VIRTUAL_DEVICE in device['name']:\n device['name'] = '딥러닝 기반 음향 품질향상 모드'\n\n output_device_dict[device['name']] = index\n\n return input_device_dict, output_device_dict", "title": "" }, { "docid": "584af38c97f808c88583a92fe8a8fdbd", "score": "0.39304507", "text": "def generate_all_ventilation_matrices_for_all_door_open_close_combination(self, save=True):\n # get all flow paths which are internal doors\n all_doors = self.get_all_flow_paths_of_type(search_type_term='oor')\n external_doors = all_doors[(all_doors['n#'] == '-1') | (all_doors['m#'] == '-1')]\n self.external_door_matrix_idx = np.where(np.isin(all_doors['P#'].values, external_doors['P#'].values))[0]\n print(f'Number of matrices to be generated: {2**len(all_doors)}')\n self.all_door_matrices = []\n for i in range(2**len(all_doors)):\n # if i % 4 == 0:\n print(f'Generating all matrices: {i/2**len(all_doors):0.1%}', end='\\r')\n binary_ref = split(int_to_binary_ref(i,len(all_doors)))\n flow_path_types = [self.door_open if x == '1' else self.door_closed for x in binary_ref]\n for path, value in zip(all_doors['P#'].values, flow_path_types):\n self.set_flow_path(int(path), 'type', value)\n self.run_simulation(verbose=False)\n self.all_door_matrices.append(self.vent_mat)\n if save:\n self.save_all_ventilation_matrices()", "title": "" } ]
13a1f4a01c8bdb5bf9ed48373336cc8e
Clone(itkIsolatedConnectedImageFilterIF2IF2 self) > itkIsolatedConnectedImageFilterIF2IF2_Pointer
[ { "docid": "8e4deb70da60c0e10db653b12fc735da", "score": "0.9369704", "text": "def Clone(self) -> \"itkIsolatedConnectedImageFilterIF2IF2_Pointer\":\n return _itkIsolatedConnectedImageFilterPython.itkIsolatedConnectedImageFilterIF2IF2_Clone(self)", "title": "" } ]
[ { "docid": "8381db5192bb2f2dea69a19d8ddd520c", "score": "0.88956755", "text": "def Clone(self) -> \"itkIsolatedConnectedImageFilterIUC2IUC2_Pointer\":\n return _itkIsolatedConnectedImageFilterPython.itkIsolatedConnectedImageFilterIUC2IUC2_Clone(self)", "title": "" }, { "docid": "99f90611bd305797a95531be7d4caf42", "score": "0.88624185", "text": "def Clone(self) -> \"itkIsolatedConnectedImageFilterISS2ISS2_Pointer\":\n return _itkIsolatedConnectedImageFilterPython.itkIsolatedConnectedImageFilterISS2ISS2_Clone(self)", "title": "" }, { "docid": "7825b2e8312b6754481cf484e2363d99", "score": "0.8542051", "text": "def Clone(self) -> \"itkComparisonImageFilterIF2IF2_Pointer\":\n return _itkComparisonImageFilterPython.itkComparisonImageFilterIF2IF2_Clone(self)", "title": "" }, { "docid": "1a5b73410458969a6b962268d91e3622", "score": "0.8530124", "text": "def Clone(self) -> \"itkAsinImageFilterIF2IF2_Pointer\":\n return _itkAsinImageFilterPython.itkAsinImageFilterIF2IF2_Clone(self)", "title": "" }, { "docid": "c27e906e4ea99449b8aa75d125086711", "score": "0.8471579", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF22ISS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF22ISS2_Clone(self)", "title": "" }, { "docid": "73495f9a1c09da78bfa6afef86e93057", "score": "0.84662867", "text": "def Clone(self) -> \"itkSLICImageFilterIF2IULL2_Pointer\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF2IULL2_Clone(self)", "title": "" }, { "docid": "2ab224fdead545c661054ab6e2b98ad5", "score": "0.84619683", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF22ISS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF22ISS2_Clone(self)", "title": "" }, { "docid": "4a10555fbcd78a025bd26bd9fa954c0c", "score": "0.8456684", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIUC2ISS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC2ISS2_Clone(self)", "title": "" }, { "docid": "2c305caf4a4e21ee175114400db39526", "score": "0.84557253", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF42ISS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF42ISS2_Clone(self)", "title": "" }, { "docid": "8fa35d75e76cbb662bc5e79590d9d4d9", "score": "0.84467244", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF32ISS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF32ISS2_Clone(self)", "title": "" }, { "docid": "c111865138767d5cde9a603c26d64bd9", "score": "0.8440387", "text": "def Clone(self) -> \"itkExpImageFilterIF2IF2_Pointer\":\n return _itkExpImageFilterPython.itkExpImageFilterIF2IF2_Clone(self)", "title": "" }, { "docid": "736cb2c5f1c0cf12f9b0c7e35a63e44c", "score": "0.8424018", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF32ISS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF32ISS2_Clone(self)", "title": "" }, { "docid": "f6810866e0f586b292c487af2207c59c", "score": "0.840776", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF22IUC2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF22IUC2_Clone(self)", "title": "" }, { "docid": "3e5cf7f94960aca73dc35c35e6b9f81f", "score": "0.840732", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF22IUC2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF22IUC2_Clone(self)", "title": "" }, { "docid": "c28a10b50080d411325ddd2b0d7b4120", "score": "0.839802", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterISS2IUL2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterISS2IUL2_Clone(self)", "title": "" }, { "docid": "7b73033d0a9ac626354838cd663137bd", "score": "0.839143", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF42ISS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF42ISS2_Clone(self)", "title": "" }, { "docid": "9b3ffa1faed2900c000c09f9a27f3b17", "score": "0.8370632", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF42IUC2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF42IUC2_Clone(self)", "title": "" }, { "docid": "30e315c1fd1bc0ea0be2f78529882920", "score": "0.83484226", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterISS2IUC2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterISS2IUC2_Clone(self)", "title": "" }, { "docid": "acffcf8940e5def4186e37a6ea6a71a2", "score": "0.83463746", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterISS2ISS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterISS2ISS2_Clone(self)", "title": "" }, { "docid": "e746dc490a8ae4484a616430c760caee", "score": "0.83210397", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF42IUC2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF42IUC2_Clone(self)", "title": "" }, { "docid": "af5f876d803e8c13bb11701604b25ad8", "score": "0.8294856", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF42IUS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF42IUS2_Clone(self)", "title": "" }, { "docid": "40221497e143c7f40d3379acd6833058", "score": "0.8283212", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF32IUC2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF32IUC2_Clone(self)", "title": "" }, { "docid": "1878b407a488445952e1fe7d04b7fbdd", "score": "0.82831985", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF32IUC2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF32IUC2_Clone(self)", "title": "" }, { "docid": "5344de61a50e8f720d1d65cb4cafe864", "score": "0.8277707", "text": "def Clone(self) -> \"itkSLICImageFilterVIF2IULL2_Pointer\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIF2IULL2_Clone(self)", "title": "" }, { "docid": "be55c993340c2b189fd64ec5873a7c58", "score": "0.8277258", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterISS2IUS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterISS2IUS2_Clone(self)", "title": "" }, { "docid": "af037aa876fda3b289bae820880bec77", "score": "0.82680434", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIUC2IUC2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC2IUC2_Clone(self)", "title": "" }, { "docid": "07a3b7d20407da45915a47c72965d66d", "score": "0.8244986", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIUC2IUL2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC2IUL2_Clone(self)", "title": "" }, { "docid": "fdba23bd4bc12d0f659fadbbc3c6dfcc", "score": "0.82404643", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF22IUS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF22IUS2_Clone(self)", "title": "" }, { "docid": "a387aadf0e9715ea0a320556962d03a6", "score": "0.822867", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIUS2ISS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS2ISS2_Clone(self)", "title": "" }, { "docid": "452043fdbd662e0a8a1164e0bb61883d", "score": "0.82144004", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF22IUS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF22IUS2_Clone(self)", "title": "" }, { "docid": "39de04f95a319eaad58594f88c253844", "score": "0.8199321", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF42IUS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF42IUS2_Clone(self)", "title": "" }, { "docid": "fafc4ac4c7a088aeca8ff1908f816fbf", "score": "0.81708354", "text": "def Clone(self) -> \"itkIsolatedConnectedImageFilterIUS2IUS2_Pointer\":\n return _itkIsolatedConnectedImageFilterPython.itkIsolatedConnectedImageFilterIUS2IUS2_Clone(self)", "title": "" }, { "docid": "e8e34eb178389bab94a13df24f369cc8", "score": "0.81443334", "text": "def Clone(self) -> \"itkComparisonImageFilterISS2ISS2_Pointer\":\n return _itkComparisonImageFilterPython.itkComparisonImageFilterISS2ISS2_Clone(self)", "title": "" }, { "docid": "44b6cd5d345107d9595693e3e4cc5e6c", "score": "0.8128361", "text": "def Clone(self) -> \"itkZeroCrossingBasedEdgeDetectionImageFilterIF2IF2_Pointer\":\n return _itkZeroCrossingBasedEdgeDetectionImageFilterPython.itkZeroCrossingBasedEdgeDetectionImageFilterIF2IF2_Clone(self)", "title": "" }, { "docid": "13713fb06b1f77be2645496cc57ae62b", "score": "0.81168294", "text": "def Clone(self) -> \"itkComparisonImageFilterIUC2IUC2_Pointer\":\n return _itkComparisonImageFilterPython.itkComparisonImageFilterIUC2IUC2_Clone(self)", "title": "" }, { "docid": "3998bbfe9c32bfc3cacd09abbc55302b", "score": "0.8098259", "text": "def Clone(self) -> \"itkSLICImageFilterVIUS2IULL2_Pointer\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS2IULL2_Clone(self)", "title": "" }, { "docid": "a6cb70e7e8ebbbe93d3c554d6f1a6fb1", "score": "0.80822843", "text": "def Clone(self) -> \"itkSLICImageFilterVISS2IULL2_Pointer\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVISS2IULL2_Clone(self)", "title": "" }, { "docid": "ec1368cb54f9fed9e49a92901181d00c", "score": "0.80705595", "text": "def Clone(self) -> \"itkSLICImageFilterVIUC2IULL2_Pointer\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC2IULL2_Clone(self)", "title": "" }, { "docid": "1baf297d67c77786bfa7d0771c4b0de6", "score": "0.805997", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF32IUS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF32IUS2_Clone(self)", "title": "" }, { "docid": "e75b3ec65e77294092dd320978b77a0d", "score": "0.8059943", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF32IUS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF32IUS2_Clone(self)", "title": "" }, { "docid": "d204a5338c97950ffb7d520bd651b890", "score": "0.80556697", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIUC2IUS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUC2IUS2_Clone(self)", "title": "" }, { "docid": "39895bd50490744262be62fad48a16b0", "score": "0.8032456", "text": "def Clone(self) -> \"itkSLICImageFilterIUC2IULL2_Pointer\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIUC2IULL2_Clone(self)", "title": "" }, { "docid": "dbd60a7b084c933b33b63e43b193eacb", "score": "0.79846865", "text": "def Clone(self) -> \"itkVTKImageImportIF2_Pointer\":\n return _itkVTKImageImportPython.itkVTKImageImportIF2_Clone(self)", "title": "" }, { "docid": "b42f679527c321425a7f7e13e88a9fc0", "score": "0.796729", "text": "def Clone(self) -> \"itkSLICImageFilterVIF2IUS2_Pointer\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIF2IUS2_Clone(self)", "title": "" }, { "docid": "b48320fa1a9534bb78a5add1bf6c53bd", "score": "0.79567796", "text": "def Clone(self) -> \"itkExpImageFilterIUC2IUC2_Pointer\":\n return _itkExpImageFilterPython.itkExpImageFilterIUC2IUC2_Clone(self)", "title": "" }, { "docid": "1b29ad9cc6fe10e6e1fa3e0c9921238d", "score": "0.79363954", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIUS2IUL2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS2IUL2_Clone(self)", "title": "" }, { "docid": "ac642e1cca7f6b5f0543da896bbe0345", "score": "0.79299074", "text": "def Clone(self) -> \"itkSLICImageFilterISS2IULL2_Pointer\":\n return _itkSLICImageFilterPython.itkSLICImageFilterISS2IULL2_Clone(self)", "title": "" }, { "docid": "6b29dded97453f7647cc62bf5a081fb4", "score": "0.79099643", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIUS2IUC2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS2IUC2_Clone(self)", "title": "" }, { "docid": "1f9e8f231b49de65e40baa080e16fc85", "score": "0.78992134", "text": "def Clone(self) -> \"itkAsinImageFilterIUC2IUC2_Pointer\":\n return _itkAsinImageFilterPython.itkAsinImageFilterIUC2IUC2_Clone(self)", "title": "" }, { "docid": "f6ff92834f3db7a833a83d84b6f86d10", "score": "0.78795", "text": "def Clone(self) -> \"itkNearestNeighborInterpolateImageFunctionIF2D_Pointer\":\n return _itkNearestNeighborInterpolateImageFunctionPython.itkNearestNeighborInterpolateImageFunctionIF2D_Clone(self)", "title": "" }, { "docid": "d8e757cf4edeb0e43571cbf71998c0c9", "score": "0.78585565", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF43ISS3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF43ISS3_Clone(self)", "title": "" }, { "docid": "046e1517e8d96f4a995dae99b46e2e28", "score": "0.7788282", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF43ISS3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF43ISS3_Clone(self)", "title": "" }, { "docid": "41a9d09a61090db3e339897abd74aeed", "score": "0.7781968", "text": "def Clone(self) -> \"itkSLICImageFilterIF2IUS2_Pointer\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIF2IUS2_Clone(self)", "title": "" }, { "docid": "2e7917dfc0b25b7d069744b5becec8e9", "score": "0.7756652", "text": "def Clone(self) -> \"itkMattesMutualInformationImageToImageMetricv4IF2IF2_Pointer\":\n return _itkMattesMutualInformationImageToImageMetricv4Python.itkMattesMutualInformationImageToImageMetricv4IF2IF2_Clone(self)", "title": "" }, { "docid": "6f8e3436a178a4dd8764dc1e7949abef", "score": "0.7746149", "text": "def Clone(self) -> \"itkAsinImageFilterISS2ISS2_Pointer\":\n return _itkAsinImageFilterPython.itkAsinImageFilterISS2ISS2_Clone(self)", "title": "" }, { "docid": "bc9a96518fd353a80c51927acb6bbe98", "score": "0.77450985", "text": "def Clone(self) -> \"itkKernelImageFilterIF2IF2Neighborhood_Pointer\":\n return _itkAdaptiveHistogramEqualizationImageFilterPython.itkKernelImageFilterIF2IF2Neighborhood_Clone(self)", "title": "" }, { "docid": "44b5105e8b6c7b453c6a7e51afb08f96", "score": "0.77395153", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIUS2IUS2_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIUS2IUS2_Clone(self)", "title": "" }, { "docid": "512cdede49f20b79e380bcddeb47cae0", "score": "0.77032524", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF43IUC3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF43IUC3_Clone(self)", "title": "" }, { "docid": "828e87f25d6c20e6e8178fc08682108d", "score": "0.7681944", "text": "def Clone(self) -> \"itkNearestNeighborInterpolateImageFunctionIUC2D_Pointer\":\n return _itkNearestNeighborInterpolateImageFunctionPython.itkNearestNeighborInterpolateImageFunctionIUC2D_Clone(self)", "title": "" }, { "docid": "9c6ae1d17943573cf4970108a943bff9", "score": "0.767437", "text": "def Clone(self) -> \"itkMovingHistogramImageFilterBaseIF2IF2Neighborhood_Pointer\":\n return _itkAdaptiveHistogramEqualizationImageFilterPython.itkMovingHistogramImageFilterBaseIF2IF2Neighborhood_Clone(self)", "title": "" }, { "docid": "0013f8910012450202b9f2558a90d92f", "score": "0.76702917", "text": "def Clone(self) -> \"itkIsolatedConnectedImageFilterISS3ISS3_Pointer\":\n return _itkIsolatedConnectedImageFilterPython.itkIsolatedConnectedImageFilterISS3ISS3_Clone(self)", "title": "" }, { "docid": "9975b159cfa596ed64bc485d2e61d4a7", "score": "0.76667786", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF23ISS3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF23ISS3_Clone(self)", "title": "" }, { "docid": "c8911367301105c560ea9ab8f45787de", "score": "0.76523393", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF23ISS3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF23ISS3_Clone(self)", "title": "" }, { "docid": "f67e24ac86397ba17e85bd253686422b", "score": "0.7608224", "text": "def Clone(self) -> \"itkIsolatedConnectedImageFilterIF3IF3_Pointer\":\n return _itkIsolatedConnectedImageFilterPython.itkIsolatedConnectedImageFilterIF3IF3_Clone(self)", "title": "" }, { "docid": "675b165965f025a6a81d7281ca1cba49", "score": "0.76017934", "text": "def Clone(self) -> \"itkVTKImageImportIUL2_Pointer\":\n return _itkVTKImageImportPython.itkVTKImageImportIUL2_Clone(self)", "title": "" }, { "docid": "67881e3871a04b42c2e6252bf4a0d619", "score": "0.76012653", "text": "def Clone(self) -> \"itkKernelImageFilterISS2ISS2Neighborhood_Pointer\":\n return _itkAdaptiveHistogramEqualizationImageFilterPython.itkKernelImageFilterISS2ISS2Neighborhood_Clone(self)", "title": "" }, { "docid": "403f6c2b335380da3f4a6008e32cd06e", "score": "0.7596524", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF43IUC3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF43IUC3_Clone(self)", "title": "" }, { "docid": "6c7339d4333ab7900213d2c86ed14c34", "score": "0.7595517", "text": "def Clone(self) -> \"itkNearestNeighborInterpolateImageFunctionISS2D_Pointer\":\n return _itkNearestNeighborInterpolateImageFunctionPython.itkNearestNeighborInterpolateImageFunctionISS2D_Clone(self)", "title": "" }, { "docid": "3664a92463f1484f434ffc56f142428c", "score": "0.7580084", "text": "def Clone(self) -> \"itkVTKImageImportISS2_Pointer\":\n return _itkVTKImageImportPython.itkVTKImageImportISS2_Clone(self)", "title": "" }, { "docid": "4c9abd437b16a6cda44ccbbd020b5f62", "score": "0.75643545", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF33ISS3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF33ISS3_Clone(self)", "title": "" }, { "docid": "fafe0ad70b80aeeaaa5655964af7ddc0", "score": "0.7564185", "text": "def Clone(self) -> \"itkIsolatedConnectedImageFilterIUC3IUC3_Pointer\":\n return _itkIsolatedConnectedImageFilterPython.itkIsolatedConnectedImageFilterIUC3IUC3_Clone(self)", "title": "" }, { "docid": "01e6bc78c08b7dc229c9c0cfa9a2e623", "score": "0.75596017", "text": "def Clone(self) -> \"itkSLICImageFilterVISS2IUS2_Pointer\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVISS2IUS2_Clone(self)", "title": "" }, { "docid": "18539fa21ebb1a60bbc193a133727503", "score": "0.7555129", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF43IUS3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF43IUS3_Clone(self)", "title": "" }, { "docid": "55572eea089449d23753d2366bc2fab9", "score": "0.7541083", "text": "def Clone(self) -> \"itkMovingHistogramImageFilterBaseISS2ISS2Neighborhood_Pointer\":\n return _itkAdaptiveHistogramEqualizationImageFilterPython.itkMovingHistogramImageFilterBaseISS2ISS2Neighborhood_Clone(self)", "title": "" }, { "docid": "3bbe7dc40822ce7aedbaf009d69b395f", "score": "0.75369", "text": "def Clone(self) -> \"itkVTKImageImportIUC2_Pointer\":\n return _itkVTKImageImportPython.itkVTKImageImportIUC2_Clone(self)", "title": "" }, { "docid": "6e627c47d23b2251d59f7c8805981943", "score": "0.75205564", "text": "def Clone(self) -> \"itkExpImageFilterISS2ISS2_Pointer\":\n return _itkExpImageFilterPython.itkExpImageFilterISS2ISS2_Clone(self)", "title": "" }, { "docid": "17ae16e68248c836f97fde832b47fa5c", "score": "0.751507", "text": "def Clone(self) -> \"itkMovingHistogramImageFilterBaseIUC2IUC2Neighborhood_Pointer\":\n return _itkAdaptiveHistogramEqualizationImageFilterPython.itkMovingHistogramImageFilterBaseIUC2IUC2Neighborhood_Clone(self)", "title": "" }, { "docid": "614fca6f4848528abfd8fe8a6c673de8", "score": "0.750964", "text": "def Clone(self) -> \"itkSLICImageFilterISS2IUS2_Pointer\":\n return _itkSLICImageFilterPython.itkSLICImageFilterISS2IUS2_Clone(self)", "title": "" }, { "docid": "e4338bb2f767585e88e4f47309cb4046", "score": "0.7509603", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF23IUC3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF23IUC3_Clone(self)", "title": "" }, { "docid": "fb5b03e463344cbfba292afcd1ba2a7c", "score": "0.748805", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF33ISS3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF33ISS3_Clone(self)", "title": "" }, { "docid": "a201c0a0de19eb837aaecc67236fce1c", "score": "0.7475529", "text": "def Clone(self) -> \"itkComparisonImageFilterIUS2IUS2_Pointer\":\n return _itkComparisonImageFilterPython.itkComparisonImageFilterIUS2IUS2_Clone(self)", "title": "" }, { "docid": "46a0a9f9a98b820ad887f0e828fe502b", "score": "0.7466953", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF43IUS3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF43IUS3_Clone(self)", "title": "" }, { "docid": "10fb788ec5daaa3a059f317f2fb4d9d6", "score": "0.7463923", "text": "def Clone(self) -> \"itkSLICImageFilterIUS2IULL2_Pointer\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIUS2IULL2_Clone(self)", "title": "" }, { "docid": "4e67442910a7bc4e4f9d2d2f6015aad8", "score": "0.7463431", "text": "def Clone(self) -> \"itkSLICImageFilterIUC2IUS2_Pointer\":\n return _itkSLICImageFilterPython.itkSLICImageFilterIUC2IUS2_Clone(self)", "title": "" }, { "docid": "c1ddf00291affa67a5b95f8f7cf1a1e4", "score": "0.7460297", "text": "def Clone(self) -> \"itkSLICImageFilterVIUC2IUS2_Pointer\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUC2IUS2_Clone(self)", "title": "" }, { "docid": "4a8fd82eee9eeba26c3374602f7c7a1e", "score": "0.74370897", "text": "def Clone(self) -> \"itkSLICImageFilterVIUS2IUS2_Pointer\":\n return _itkSLICImageFilterPython.itkSLICImageFilterVIUS2IUS2_Clone(self)", "title": "" }, { "docid": "deb2eeb5f6ddbfdc42d624ea174c2be8", "score": "0.7434198", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF23IUC3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF23IUC3_Clone(self)", "title": "" }, { "docid": "697a59428caf01aa818e2a9e78d12aa5", "score": "0.7399529", "text": "def Clone(self) -> \"itkKernelImageFilterIUC2IUC2Neighborhood_Pointer\":\n return _itkAdaptiveHistogramEqualizationImageFilterPython.itkKernelImageFilterIUC2IUC2Neighborhood_Clone(self)", "title": "" }, { "docid": "57d82d317dd2ff892319610997ad3743", "score": "0.73910517", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF33IUC3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF33IUC3_Clone(self)", "title": "" }, { "docid": "01fced6039b25fb31bedfcd8c13c833e", "score": "0.73886013", "text": "def Clone(self) -> \"itkAdaptiveHistogramEqualizationImageFilterIF2_Pointer\":\n return _itkAdaptiveHistogramEqualizationImageFilterPython.itkAdaptiveHistogramEqualizationImageFilterIF2_Clone(self)", "title": "" }, { "docid": "61f49ffac7fc9bc869b63fbb4b48e7e5", "score": "0.7381144", "text": "def Clone(self) -> \"itkVTKImageImportIRGBUC2_Pointer\":\n return _itkVTKImageImportPython.itkVTKImageImportIRGBUC2_Clone(self)", "title": "" }, { "docid": "fde39d0bf448b6b5d6735b377395d57f", "score": "0.73734957", "text": "def Clone(self) -> \"itkAdaptiveHistogramEqualizationImageFilterISS2_Pointer\":\n return _itkAdaptiveHistogramEqualizationImageFilterPython.itkAdaptiveHistogramEqualizationImageFilterISS2_Clone(self)", "title": "" }, { "docid": "04942694680c2dd020c53468342d2010", "score": "0.73497283", "text": "def Clone(self) -> \"itkImageSpatialObject2F_Pointer\":\n return _itkImageSpatialObjectPython.itkImageSpatialObject2F_Clone(self)", "title": "" }, { "docid": "b265777e35ece6f7a2edd5e857866dcd", "score": "0.7340306", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterIVF23IUS3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterIVF23IUS3_Clone(self)", "title": "" }, { "docid": "a0aabd1f89cd471fb270c2b27287fafd", "score": "0.7329984", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF23IUS3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF23IUS3_Clone(self)", "title": "" }, { "docid": "e89f172f54a110ca9e14392b800a3033", "score": "0.7314972", "text": "def Clone(self) -> \"itkImageSpatialObject2SS_Pointer\":\n return _itkImageSpatialObjectPython.itkImageSpatialObject2SS_Clone(self)", "title": "" }, { "docid": "f41fbdb44bf82ad81b96bf58d1a06ca1", "score": "0.7285321", "text": "def Clone(self) -> \"itkAdaptiveHistogramEqualizationImageFilterIUC2_Pointer\":\n return _itkAdaptiveHistogramEqualizationImageFilterPython.itkAdaptiveHistogramEqualizationImageFilterIUC2_Clone(self)", "title": "" }, { "docid": "b2c1d0c7ad415142b80b09ee2046bb31", "score": "0.7283304", "text": "def Clone(self) -> \"itkFastMarchingThresholdStoppingCriterionIF2IF2_Pointer\":\n return _itkFastMarchingThresholdStoppingCriterionPython.itkFastMarchingThresholdStoppingCriterionIF2IF2_Clone(self)", "title": "" }, { "docid": "a11055d2697721d878418cd49e5773d9", "score": "0.7274749", "text": "def Clone(self) -> \"itkConnectedComponentImageFilterICVF33IUC3_Pointer\":\n return _itkConnectedComponentImageFilterPython.itkConnectedComponentImageFilterICVF33IUC3_Clone(self)", "title": "" }, { "docid": "078fd0bb6194e7432d608eef199eb9e5", "score": "0.7264446", "text": "def Clone(self) -> \"itkVTKImageImportIUS2_Pointer\":\n return _itkVTKImageImportPython.itkVTKImageImportIUS2_Clone(self)", "title": "" } ]
e5e042c8fc342e1994f4fc2cb4e561e3
Get item with the specified Id.
[ { "docid": "5e24fdda50f697c9459795fa135b522c", "score": "0.71838367", "text": "def get(self, itemId):\n\n tableRow = self.__queryTableRow(itemId)\n return self.__getItemFromTableRow(tableRow)", "title": "" } ]
[ { "docid": "b88dfccb495f954fb8fd0f791e6b5d7f", "score": "0.86855495", "text": "def getItem(self, id):\n path = 'item/' + id\n return self.sendRestRequest('GET', path)", "title": "" }, { "docid": "15d5ee78bc3f44b0c0619e5b4e135c81", "score": "0.7920818", "text": "def get_item_by_id(self, id):\n results = self.table_connector.query(\n KeyConditionExpression=Key(self.primary_key).eq(id)\n )\n return results[\"Items\"][0] if \"Items\" in results else []", "title": "" }, { "docid": "dfd1f3e90b7104f3f0224e64975cb70a", "score": "0.79094565", "text": "def read_item(id: str, request: Request):\n obj = db.get(id, kind=endpoint_model)\n return obj", "title": "" }, { "docid": "23bfeee45770fff851d11d076f7fd152", "score": "0.77619416", "text": "def get_by_id(cls, id):\n return cls.query().get(id)", "title": "" }, { "docid": "30366e96aabf443e45887e93571e765e", "score": "0.7723601", "text": "def _get(self, table, _id):\n data = {\"Key\": _id}\n return self._response_handler(table, \"get_item\", data)", "title": "" }, { "docid": "3a3f2db3c5856b7898deb482d330267f", "score": "0.770753", "text": "def get(cls, id):\n\n return cls.query.get(id)", "title": "" }, { "docid": "3a3f2db3c5856b7898deb482d330267f", "score": "0.770753", "text": "def get(cls, id):\n\n return cls.query.get(id)", "title": "" }, { "docid": "11700e0e851ed84049d28af27410ab5a", "score": "0.76779234", "text": "def get_item(item_id):\n return Item.query.filter_by(id=item_id).first()", "title": "" }, { "docid": "284e40d1276f2625981fb81efb788ee5", "score": "0.7507353", "text": "def get_item(self, item_id): # pragma: no cover\n raise NotImplementedError", "title": "" }, { "docid": "09ae69b772407e7e43cf0983c998d728", "score": "0.7482027", "text": "def get(self, id):\n return self.__model__.query.get(id)", "title": "" }, { "docid": "d9656815b33059ce90265f3558047043", "score": "0.74654627", "text": "def get_item_by_id(self, item_id):\n\n return self.api.items.get(item_id)['item']", "title": "" }, { "docid": "b4166aff0908131ec4327bf556b6b3ba", "score": "0.7464646", "text": "def get_by_id(self, id: int):\n\n\t\traise NotImplemented", "title": "" }, { "docid": "3cece960c416ffe757157f364fa36ee8", "score": "0.74564147", "text": "def get_by_id(cls, id):\n e = api.get([key.Key(cls.__name__, id)])\n if e:\n return cls.from_entity(e[0])\n raise ObjectDoesNotExist", "title": "" }, { "docid": "7ca30f8ec559853f304e6f4e57bfd936", "score": "0.74345386", "text": "def get_object(self, id_):\n return self._objects.get(id_, None)", "title": "" }, { "docid": "d6f9ce3aade665fc27986438b3e5ad38", "score": "0.7329277", "text": "def get_by_id(cls, item_id):\n return db_session.query(cls).filter(cls.id == item_id).first()", "title": "" }, { "docid": "e8b0686e646f6b17c8f03eb0a868e661", "score": "0.7260149", "text": "def get(self, _id):", "title": "" }, { "docid": "6ab335bcb2e80ed6ecdb169bf3dc65a1", "score": "0.72581697", "text": "def get_item(self, item_id):\n for item in self.order_items:\n if item.get_itemId() == item_id:\n return item", "title": "" }, { "docid": "8230ee91e6d0b7e94624f00235ccc0bf", "score": "0.7238151", "text": "def get_by_id(self, id):\n return Entry.all().filter('entry_id = ', id).get()", "title": "" }, { "docid": "757c66f8fc8bc9f3cff752cfaab6d140", "score": "0.7202799", "text": "def get (self, *k, **kw):\n if kw.has_key ('id'):\n return self.get_by_id (kw['id'])", "title": "" }, { "docid": "fc07a2ff80f8f916b60a89bae2397d61", "score": "0.7186531", "text": "def get_item(self, item_id):\n if self._database:\n try:\n return self._database.retrieve(item_id)\n except PyragargaError:\n pass\n # TODO: Retry if it times out \n details_page = self._build_tree(\n self._session.get(KG_URL + DETAILS_SCRIPT,\n params={'id': item_id, 'filelist':1}\n ).content)\n item = self._parse_details_page(details_page, item_id)\n if self._database:\n self._database.store(item)\n self.logger.info('Received details for item %d' % item.kg_id)\n return item", "title": "" }, { "docid": "99a6e4fa7f9b62b43afff852a982439f", "score": "0.71824855", "text": "def item_retrieve(id):\n item = getItem(id)\n if item is None:\n return jsonify({}), 204\n else:\n return jsonify(item=item.serialize)", "title": "" }, { "docid": "6834733692de784792792f44e094e2e0", "score": "0.7168817", "text": "def get_by_id(cls, id):\n return db.session.query(cls).get(id)", "title": "" }, { "docid": "630419ebf60aa333010c394bb6c170a7", "score": "0.7165354", "text": "def find_by_id(self, id_):\n return self.by_id.get(id_)", "title": "" }, { "docid": "13504df93ea21ce2e3c1433ba395103c", "score": "0.7162626", "text": "def get_item_with_id(self, uid):\n for item in self.get_items():\n if item.id == uid:\n return item\n\n return None", "title": "" }, { "docid": "0f026c2eb77af5a31514951bcac30d59", "score": "0.71237695", "text": "def itemById(self, itemId):\n itemType = \"\".join([i for i in itemId if not i.isdigit()])\n if itemType not in self.__inventory__:\n return None\n for item in self.__inventory__[itemType]:\n if item.id == itemId:\n return item\n return None", "title": "" }, { "docid": "8392c9426fa2c0ccb3d4c44b66682815", "score": "0.71179855", "text": "def get_item(self, id: str, user: User) -> Optional[T]:", "title": "" }, { "docid": "884c6a6039933c8cad493df3ddf167d1", "score": "0.71143734", "text": "def find_by_id(object_id, items):\n for item in items:\n if object_id == item[\"id\"]:\n return item\n\n raise Exception(f\"Item with {object_id} not found\")", "title": "" }, { "docid": "2c15e24cbe6b691d7dcf7e9faccd135f", "score": "0.7093455", "text": "def find(self, id):\n response = self._connection.session.get(self.url + \"/%s\" % id)\n return self._raise_or_return_json(response)", "title": "" }, { "docid": "50c1054fbd10ea8d744c510ab65a0f1b", "score": "0.7090611", "text": "def get(self, cls, idvalue):\n result = self.imap.get(cls, idvalue)\n if result is None:\n result = self.find(cls, dict(_id=idvalue)).first()\n return result", "title": "" }, { "docid": "98461a6d51a604b9ecab7be0aece5b2c", "score": "0.7077143", "text": "def get_from_id(self,id=None):\n if id is None:\n return(self.items)\n if type(id) is int:\n for item in self.items:\n if item.id == id: return(item)\n items=[]\n if type(id) is list:\n return ([item.id for item in self.items if (item.id in id)])", "title": "" }, { "docid": "920bdb390c8344aa694ca92174d2858c", "score": "0.70641273", "text": "def getbyid(self, id):\n\n return esd.retrieve(id)", "title": "" }, { "docid": "a8a251533f6e3cb3858ffeb1d7a029a9", "score": "0.7049357", "text": "def get(self, id):\n return {'id': id}", "title": "" }, { "docid": "6d0aea792b3a6300e727e2adc356b5e2", "score": "0.7018917", "text": "def get(self, cls, id):\n pass", "title": "" }, { "docid": "db329eea76b347b22f44cde543e4e451", "score": "0.7007331", "text": "def find_item_by_id(self, item_id: str) -> ClientWorklistItem:\n # print(f'Finding item with id {item_id}')\n self.update_worklist()\n # print(self.__items)\n for item in self.__items:\n if item.id == item_id:\n # print('Found')\n return item\n return None", "title": "" }, { "docid": "2487c700320d79bc8fb8b324548ace1d", "score": "0.69988006", "text": "def find_by_id(self, _id: int) -> tuple:\n item = self.model.find_by_id(_id)\n if item:\n return {'item': check_json(item)}, 200\n else:\n return {'error': {'message': 'Item not found'}}, 400", "title": "" }, { "docid": "d43f1f481f29737d04bb67cce63202c5", "score": "0.6985872", "text": "def read_item(id):\n\n username = login_session.get('username', None)\n item = session.query(Item).filter_by(id=id).one()\n item_display = {'id': item.id, 'title': item.title, 'desc': item.desc}\n return render_template(\n 'read_item.html',\n item_display=item_display,\n username=username)", "title": "" }, { "docid": "80f607077ae46fd73564e760a02750bd", "score": "0.69625455", "text": "def __getitem__(self, id):\r\n \r\n if isinstance(id, basestring):\r\n return self._by_name[id]\r\n return self._by_number[id]", "title": "" }, { "docid": "829a3255186b6759c794fbddc4b66a4c", "score": "0.69533855", "text": "def get_object(self, id, **args):\n return self.request(\"{0}/{1}\".format(self.version, id), args)", "title": "" }, { "docid": "53a927813c822de278a963ea61e16b9b", "score": "0.6946598", "text": "def get_by_id(cls, id):\n try:\n return cls.objects.get(id=id)\n except(IntegrityError, OperationalError):\n return None", "title": "" }, { "docid": "bb08741be3e8c45d47dc65cbbc20d6d1", "score": "0.6940705", "text": "def getitem(itemID):\n\n return harvest(GET_ITEM_URL, itemID)", "title": "" }, { "docid": "8f29bdf266aeef5590f402ea5f5bb12c", "score": "0.69235885", "text": "async def get_item(\n request: Request,\n response: Response,\n item_id: int,\n db: SAConnection = Depends(get_postgresql_connection)\n):\n cached_item = await request.app.extra['cache'].get_cache_item(item_id=item_id)\n if cached_item:\n return cached_item\n if db is None:\n response.status_code = 503\n return ResponseModel(result='Service unavailable')\n q = items.select().where(items.c.id == item_id)\n item = await db.fetchrow(query=q)\n if item is not None:\n item = Item(**item)\n await request.app.extra['cache'].set_cache_item(item=item)\n return item\n else:\n response.status_code = 404", "title": "" }, { "docid": "ca978bdbeafc0d59c586daa3d4753d8a", "score": "0.691854", "text": "def get_item(\n self, id_: Union[UUID, str], full_dataset: bool = True\n ) -> Optional[DatasetItem]:\n items = list(\n self.search_items(\n dataset_ids=[id_], full_dataset=full_dataset, order=ItemSort.UNSORTED\n )\n )\n if not items:\n return None\n if len(items) > 1:\n raise RuntimeError(\n \"Something is wrong: Multiple dataset results for a single UUID\"\n )\n\n [item] = items\n return item", "title": "" }, { "docid": "cbbf3b902a306e91827059b8bc7c460d", "score": "0.69126326", "text": "def get(id=None):\n return requests.get(\"/{}\".format(id))", "title": "" }, { "docid": "3ce3f52dcd6d3786c2f9c36651237412", "score": "0.68936217", "text": "def get_item_by_id(request, pk):\n item = get_object_or_404(StockItem, pk=pk)\n res_dict = {\n 'id': item.id,\n 'name': item.name,\n 'count': item.count,\n 'date_added': item.date_added,\n 'exp': item.date_of_expiration,\n 'added_by': item.added_by,\n 'cat': str(item.fk_category),\n 'subcat': str(item.fk_subcategory),\n 'notes': item.notes\n }\n return JsonResponse(res_dict)", "title": "" }, { "docid": "567be0413558148f601edfa1052dccc3", "score": "0.68906343", "text": "def read_item(\n *,\n db: Session = Depends(deps.get_db),\n id: int,\n current_user: models.User = Depends(deps.get_current_active_user),\n) -> Any:\n item = crud.item.get(db=db, id=id)\n if not item:\n raise HTTPException(status_code=404, detail='Item not found')\n if not crud.user.is_superuser(current_user) and (item.owner_id != current_user.id):\n raise HTTPException(status_code=400, detail='Not enough permissions')\n return item", "title": "" }, { "docid": "b0ff19738c47b5bb9968c080dcdf9fe9", "score": "0.6865024", "text": "def find(cls, item_id):\n cls.logger.info(\"Processing lookup for shopcart item id %s ...\", item_id)\n return cls.query.get(item_id)", "title": "" }, { "docid": "b4024adb4a7bd8e808c4c53ba1607d75", "score": "0.68090284", "text": "def get_item_by_index(self, index_name, id):\n results = self.table_connector.query(\n IndexName=index_name,\n KeyConditionExpression=Key(index_name).eq(id),\n )\n return results[\"Items\"] if \"Items\" in results else []", "title": "" }, { "docid": "481a74702e377e4b62d64019c2031256", "score": "0.6790732", "text": "def get(self, id):\n return Entry.query.filter(Entry.id == id).one()", "title": "" }, { "docid": "98bdf723b5b416a7e00fbc933001a437", "score": "0.6785707", "text": "def get_object(self, id, **args):\n return self.request(id, args)", "title": "" }, { "docid": "ae4f1645061b95a60d8e6f1a420bbd7f", "score": "0.6782711", "text": "def read_by_id(self, id, fields=None):\n assert id is not None, \"id can not be None\"\n return self.read_many_by_id([id], fields)[0]", "title": "" }, { "docid": "e75dea86b266cf5d91360eff1943f38c", "score": "0.6780855", "text": "def find_by_id(cls, iid: int):\n return cls.query.filter_by(id=iid).first()", "title": "" }, { "docid": "11b00cceea2575704ac06a68620895aa", "score": "0.67532885", "text": "def get_item(self, item_id):\n\n response = self._get_page_param('item', item_id).json()\n\n if not response:\n raise InvalidItemID\n\n return Item(response)", "title": "" }, { "docid": "b29af8b8c8522bf4221d5e85272a1dff", "score": "0.67386603", "text": "def get_object(id):", "title": "" }, { "docid": "d278b21b29c89b6ab175a6438cdbdd49", "score": "0.6727492", "text": "def get_element_by_id(self, id):\n for element in self._elements:\n if element.get_id() == id:\n return element", "title": "" }, { "docid": "72b2e21ce7658a9cb955264a87325f39", "score": "0.66887516", "text": "def get_by_id(self, pkId: int):\n if not self.model:\n raise NameError('database model has not been set.')\n if not pkId:\n raise ValueError('invalid primary key value.')\n\n with self.session() as session:\n query = self.get_query(session)\n rec = query.get(pkId)\n return rec", "title": "" }, { "docid": "21618ee5fa2cbc37d7000714dc85e9f9", "score": "0.66850483", "text": "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "title": "" }, { "docid": "26c7dd4a33c20da45fcecae92e5d8c6a", "score": "0.66613346", "text": "def get_item(self, mediaId):\n headers = { 'Authorization' : self.client.authorization_header }\n\n response = requests.get(\n self.client.url + '/media/' + mediaId,\n headers = headers\n )\n return json.loads(response.text)", "title": "" }, { "docid": "c3954a713aab82dfdc6a4ae3c87ee852", "score": "0.66567796", "text": "def get_one_by_id(self, object, id):\n self.lock.acquire()\n result = self.__Session.query(object).get(id)\n self.lock.release()\n return result", "title": "" }, { "docid": "e62aa8e823f21e52552c824dd84bb60f", "score": "0.664641", "text": "def get_proof_item(self, id):\n return self.prf.find_item(id)", "title": "" }, { "docid": "c90e8f8226182a1dcad3cc665944e826", "score": "0.66166353", "text": "def _get(self, id_: str) -> Union[DBModelInstance, NoReturn]:\n record = self.model.query.get(id_)\n if record:\n return record\n else:\n # raise error to correct handling wrong inputted params\n raise ServiceBadRequest()", "title": "" }, { "docid": "12c9bc12991579269e2311b187cbfcb8", "score": "0.6612357", "text": "def get_item(id):\n return jsonify(id=id, name='name', number=123)", "title": "" }, { "docid": "426496c1ca1da7416b839910e0e7b20f", "score": "0.6603247", "text": "def get(self, id):\n tmp = userDao.get_one_entry(id)\n return tmp", "title": "" }, { "docid": "05e992d28c40705941dd1ed798ca6561", "score": "0.65994793", "text": "def getById(self, Id):\n # we need to create a new object to insure we don't corrupt the generator count\n csvsource = CSVSource(self.source, self.factory, self.key())\n try:\n for item in csvsource.items():\n if Id == item.getId():\n return item\n except StopIteration:\n return None", "title": "" }, { "docid": "e3a8fec4644081cf7c8b90734c73ebd2", "score": "0.65687656", "text": "def get_by_id(self, _id):\n return Field(self.context, ResourcePathServiceOperation(\"getById\", [_id], self.resource_path))", "title": "" }, { "docid": "698784c410c9b4a4afbd73f12dc590b5", "score": "0.6563778", "text": "def find_by_id(db, model, id, *, worker_task=False):\n item = db.query(model).get(id)\n if not item:\n if worker_task:\n raise RuntimeError(\"could not find\") # TODO pick better exception\n raise HTTPError(falcon.HTTP_404, errors={\"id\": \"does not exist\"})\n return item", "title": "" }, { "docid": "797e40d3e724ab82b8c1d0054efc0296", "score": "0.6556701", "text": "def get(self, item_id: int):\n\n try:\n\n controller = self.controller()\n schema = self.schema()\n raw_data = controller.read(id=item_id)\n data = {'item': schema.dump(raw_data)}\n\n return ResponseHandler.render_response(data=data)\n\n except Exception as ex:\n\n return ResponseHandler.render_response(status=ERR, message=traceback.format_exc())", "title": "" }, { "docid": "9c4f5e4e999ea3ecba6c5bd785c79e1f", "score": "0.65457463", "text": "def get(self, _id):\n try:\n doc = self._db[_id]\n # For speed testing\n del self._db[_id]\n except KeyError:\n return None\n else:\n return self._parse_doc(doc)", "title": "" }, { "docid": "a56c138e971efdac002e67888ccffad0", "score": "0.6519662", "text": "def at(cls, _id):\n return cls.where(cls.primarykey == _id)", "title": "" }, { "docid": "66d2dc34f3caa86a725cabe50ad1fb34", "score": "0.6511209", "text": "def get(self, request, pk):\n return self.retrieve(request, pk)", "title": "" }, { "docid": "b3922e6e6a24bd882d9129aaa4be0e42", "score": "0.6497967", "text": "def getItem(self, itemID, no_html=False):\n data = self._client.Item.find(int(itemID))\n item = self.makeDict(data, no_html=no_html)\n return item", "title": "" }, { "docid": "cd3779b6ebecbc667e61f9e093aa316a", "score": "0.6497742", "text": "def find(cls, id=None):\n return cls.query.filter_by(id=id).one_or_none()", "title": "" }, { "docid": "a8daa84be5e6d95f66ae183c2926ac8e", "score": "0.64873743", "text": "def findItem(self, id):\n itemFound = None\n for curItem in self.scene.items():\n if not isinstance(curItem, DiagramItem):\n continue \n if curItem.itemId == int(id):\n itemFound = curItem\n break\n return itemFound", "title": "" }, { "docid": "116e14a31d4ca1a7b69770b584eb425a", "score": "0.64747566", "text": "def get(self, id):\n return self.__get_object(super(PullRequests, self).get(id))", "title": "" }, { "docid": "226ea52f9b4f96720d1cbd328d7faac2", "score": "0.64704424", "text": "def item(self, item_id):\n response = self._request(V2_ENDPOINTS['ITEMS'] + item_id)\n return response", "title": "" }, { "docid": "cf55e9df461bdf8d17311a91903c3737", "score": "0.6464578", "text": "def find(self, id):\n return self._select_one('''\n select\n *\n from\n {table}\n where\n {primary_key} = %s\n '''.format(table=self.__class__._table,\n primary_key=self.__class__._primary_key), [id])", "title": "" }, { "docid": "567838ef5067b015ee7f5344ee5b8f01", "score": "0.6443456", "text": "def get_food_with_id(cls, food_id):\n obj = cls.objects(food_id=food_id).first()\n return obj", "title": "" }, { "docid": "8fd51baaff40e7e23aceee985aa6d221", "score": "0.6441796", "text": "def get(cls, db, id):\n doc = cls.collection(db).find_one(filter={ '_id': ObjectId(id) })\n return Todo(**doc)", "title": "" }, { "docid": "d730e84712e12f707f0d2b1023e7f9b0", "score": "0.64399976", "text": "def get_item(self, item_id: int, category: str = None):\n if category:\n # ensuring that it will be in lowercase\n category = category.lower()\n\n if not category or not category in self.item_categories:\n # Assuming that if category isnt set, we are searching for anime\n category = \"anime\"\n\n search_url = f\"{SITE_URL}/{self.item_categories[category]}/{item_id}\"\n\n return self.fetch_url(search_url)", "title": "" }, { "docid": "ddfc3f6e0592ac1eb9a0f3e582f577ed", "score": "0.64376116", "text": "def get(self, mapitem_id: int) -> MapItem:\n pass", "title": "" }, { "docid": "bb4b0518193246ffc01f8c1a8d60f5fa", "score": "0.6433279", "text": "def get(self, pk):\n return self.model.query.options(self.from_cache(pk=pk)).get(pk)", "title": "" }, { "docid": "5b66d97eb99b746e53a722082e62e1fb", "score": "0.64272", "text": "async def get_one(self, pk):\n\n return await self._expand(await self.db.get_one(pk=pk))", "title": "" }, { "docid": "1773a66106ce44c0414d99d63db37d41", "score": "0.6426264", "text": "def get_object(self, ObjectClass, id):\n try:\n object = ObjectClass.objects.get(id=id)\n except (ObjectClass.DoesNotExist, ObjectClass.MultipleObjectsReturned):\n object = None\n return object", "title": "" }, { "docid": "ad9a717fc580ceb158b0399550418a6e", "score": "0.64217377", "text": "def get_by_id(id, lista):\n for inventar in lista:\n if get_id(inventar) == id:\n return inventar\n return None", "title": "" }, { "docid": "c9be3eb54d5c786a6c0d3967a7e7f48e", "score": "0.64213073", "text": "def get(self,id):\r\n person = get_one_by_persons_id(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "title": "" }, { "docid": "d0e31e43408da42d15b81fc41cc78945", "score": "0.6420426", "text": "def get_volume_by_id(self, id):\n for vol in self.conn.volumes:\n if vol.id == id:\n return vol\n raise KeyError(\"Volume with ID \" + id + \" not found\")", "title": "" }, { "docid": "ef25c6ed61d4540447faee6baf46a4d7", "score": "0.6418612", "text": "def get(self, _id):\n if not self.root:\n raise RootNotSet\n node = self.id_map.get(_id)\n if not node:\n raise IDNotFound(_id)\n\n link = node.get('link')\n if link:\n link_node = self.id_map.get(_id)\n if not link_node:\n logger.error('link node not found!')\n raise IDNotFound(link_node)\n data = self.get(node['link'])\n data['link'] = data['id']\n data['id'] = link_node['id']\n return data\n\n if node.get('type') == 'group' or node.get('type') == None:\n return self._adapter._get_group(_id)\n elif node.get('type') == 'data':\n return self._adapter._load_data(_id)\n elif node.get('type') == 'json':\n return self._adapter._load_data(_id)\n elif node.get('type') == 'config':\n data = self._adapter._load_data(_id)\n data.pop('name', None)\n return data\n else:\n raise UnsupportedType", "title": "" }, { "docid": "98682beb5dfad2ff331f971a365e70e6", "score": "0.6415115", "text": "def get_item(self, itemID, no_html=False, external_id=False, depth=1):\n data = self._client.Item.find(int(itemID))\n item = self.make_dict(data, no_html=no_html, external_id=external_id, depth=depth)\n return item", "title": "" }, { "docid": "29f3428c07763b76ac309716592a7b55", "score": "0.6412472", "text": "async def read_maintenance_record_by_id(id: int, conn: Database = Depends(get_db)):\n\n item = await get(conn=conn, id=id)\n if not item:\n raise HTTPException(status_code=400, detail=\"Item not found\")\n return item", "title": "" }, { "docid": "1741b1a95b7b03d20ad2cca0feb268bc", "score": "0.6400419", "text": "def get(cls, id):\n\t\tvalue = cls.query.filter_by(id=id, deleted=False).first()\n\t\tif value is None:\n\t\t\traise ValidationError({'message': f'{cls.__name__} not found'})\n\t\treturn value", "title": "" }, { "docid": "0eede1bc51396fc5c2da9a0926fd05ad", "score": "0.639704", "text": "def get(cls, _id):\n return DataStore.get_instance(cls, _id)", "title": "" }, { "docid": "4310db6df8f7c65b18812a96907e3564", "score": "0.6393155", "text": "def get_by_id(self, model, key_name):\n return model.get_by_id(key_name)", "title": "" }, { "docid": "9d051d2beb6246526d6abed3f32b3a1c", "score": "0.63901585", "text": "def lookup(cls, id: int):\n record = query_db(\n \"select id, amount, description, user_id from expenses where id = ?\",\n [id],\n one=True,\n )\n if record is None:\n raise NotFound()\n return cls(**record)", "title": "" }, { "docid": "6ea5029811069dda2f1472ddb4375eb6", "score": "0.63842773", "text": "def getNodeById(self, nodes, id):\n for item in nodes:\n if item.getProperty('id') == id:\n return item", "title": "" }, { "docid": "05727681a2a748b4b7cb698e4ccf1e76", "score": "0.63783866", "text": "def get(cls, pk):\n return DBSession().query(cls).get(pk)", "title": "" }, { "docid": "c97950cf3f27da02490d60c5fd883cc0", "score": "0.63771385", "text": "def by_id(cls, id):\n try:\n return DBSession.query(cls).filter(cls.id == id).one()\n except (NoResultFound, MultipleResultsFound):\n return None", "title": "" }, { "docid": "3a2be9c6e7be3e9db49ede758ce87590", "score": "0.6368296", "text": "def get_itemByImageId(self, record_id):\n for item in self.order_items:\n img = item.get_image()\n if img.get_itemId() == record_id:\n return item", "title": "" }, { "docid": "b293d30d42363978034102f9b72ded5d", "score": "0.63682586", "text": "def get(self, id):\n file = (\n self.drive.files()\n .get(\n fileId=id,\n fields=\"id, name\",\n supportsAllDrives=self.shared_drive[0],\n )\n .execute()\n )\n return file", "title": "" }, { "docid": "235b6daa655d7fb740108e939b994327", "score": "0.6364752", "text": "def get_item_detail(item_id):\n pass", "title": "" }, { "docid": "4f96bdd77f6d7cb3d6513f3074b36525", "score": "0.6358872", "text": "def get(self, id):\n\n matches = self._d.get(id, set())\n for match in matches: return match", "title": "" }, { "docid": "55dfa40b5eb6e27e0489b06198a1eb44", "score": "0.6358486", "text": "def get_item(self, name: str) -> Optional[Item]:\n item = self.filter_items(name, limit=1)\n return item[0] if item else None", "title": "" } ]
8110df8e5976ea4b2be197eebacd69f5
Ensures correct output from is_v_wind_field. In this case the answer is yes.
[ { "docid": "55dd6928d4b8a4b81d8bb41fc8db8adc", "score": "0.8026145", "text": "def test_is_v_wind_field_true(self):\n\n self.assertTrue(grib_io.is_v_wind_field(V_WIND_NAME))", "title": "" } ]
[ { "docid": "c22039e2105441ccda68e2fe307fa85b", "score": "0.7764373", "text": "def test_is_u_wind_field_v_wind(self):\n\n self.assertFalse(grib_io.is_u_wind_field(V_WIND_NAME))", "title": "" }, { "docid": "cab1e36c9939960c2ac1794dca943e7f", "score": "0.7331233", "text": "def test_is_v_wind_field_u_wind(self):\n\n self.assertFalse(grib_io.is_v_wind_field(U_WIND_NAME))", "title": "" }, { "docid": "8e48ac2e4efa63231b8f51f44c0dcd49", "score": "0.71818435", "text": "def test_is_v_wind_field_non_wind(self):\n\n self.assertFalse(grib_io.is_v_wind_field(NON_WIND_NAME))", "title": "" }, { "docid": "22ee8671c8c195e6360178723c90cf21", "score": "0.6858301", "text": "def test_is_u_wind_field_true(self):\n\n self.assertTrue(grib_io.is_u_wind_field(U_WIND_NAME))", "title": "" }, { "docid": "31352c6d8fff56d7682bf126556fb014", "score": "0.61270475", "text": "def test_is_u_wind_field_non_wind(self):\n\n self.assertFalse(grib_io.is_u_wind_field(NON_WIND_NAME))", "title": "" }, { "docid": "514b4511d4fba95ecf31160bad847536", "score": "0.61025345", "text": "def test_field_name_switch_u_and_v_input_v(self):\n\n this_u_wind_name = grib_io.field_name_switch_u_and_v(V_WIND_NAME)\n self.assertTrue(this_u_wind_name == U_WIND_NAME)", "title": "" }, { "docid": "4a551fb1b725412f4fc21fbe1d0ec26c", "score": "0.5796117", "text": "def test_field_name_switch_u_and_v_input_neither(self):\n\n this_field_name = grib_io.field_name_switch_u_and_v(NON_WIND_NAME)\n self.assertTrue(this_field_name == NON_WIND_NAME)", "title": "" }, { "docid": "cc2d41114ec929058ec028fbcc6ce767", "score": "0.5780031", "text": "def _IsTvMode(self):\n return bool(self.Get(self._REG_SYNC_DETECT) & self._BIT_TV_MODE)", "title": "" }, { "docid": "06d0adce5b55969004285c44e14a201f", "score": "0.5759916", "text": "def test_field_name_switch_u_and_v_input_u(self):\n\n this_v_wind_name = grib_io.field_name_switch_u_and_v(U_WIND_NAME)\n self.assertTrue(this_v_wind_name == V_WIND_NAME)", "title": "" }, { "docid": "2d0269ceef23ac5001361f649c6dfe12", "score": "0.57270974", "text": "def test_wind_from_direction(self):\n expected_u = -1.0 * self.expected_u\n expected_v = -1.0 * self.expected_v\n self.wind_direction_cube.rename(\"wind_from_direction\")\n ucube, vcube = self.plugin.process(\n self.wind_speed_cube, self.wind_direction_cube\n )\n self.assertArrayAllClose(ucube.data, expected_u, atol=1e-5)\n self.assertArrayAllClose(vcube.data, expected_v, atol=1e-5)", "title": "" }, { "docid": "36bbca2542f6626895a00b0016d987d0", "score": "0.5691997", "text": "def v_wind(self):\n # V = -cos(direction) * speed\n v_wind = -np.cos(np.deg2rad(self.direction)) * self.speed\n return v_wind", "title": "" }, { "docid": "cc32c69db4206398a2b26a09686cf477", "score": "0.56700325", "text": "def isLenientDrive(self) -> bool:\n ...", "title": "" }, { "docid": "041e9a9caad58eb59466833e5779e337", "score": "0.5581114", "text": "def isprismatic(self) -> bool:\n return self.v.istranslation if self.v else False", "title": "" }, { "docid": "194521edbfb4d84f7beba1b29ad334c5", "score": "0.55744886", "text": "def get_under_voltage_status() -> bool:\n hwmon = get_rpi_volt_hwmon()\n if not hwmon:\n return False\n\n with open(os.path.join(hwmon, SYSFILE_HWMON_FILE), 'r', encoding='utf8') as file:\n bit = file.read()[:-1]\n return bit == \"1\"", "title": "" }, { "docid": "e6ef8973bd01d1a30f976b089fdbf879", "score": "0.5550861", "text": "def within_voltage_compliance(self):\n response = self._instrument.query('SENS:VOLT:PROT:TRIP?').strip()\n return not bool(int(response))", "title": "" }, { "docid": "6db741a24988d610d6ba8f8a72dc19e4", "score": "0.5529494", "text": "def test_comp_surface_wind(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface_wind()\n\n a = result\n b = test_dict[\"SW_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)\n # Check that the analytical method returns the same result as the numerical one\n b = comp_surface_wind(test_obj.slot)\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=1e-5, msg=msg)", "title": "" }, { "docid": "e7cb569321dea5630e33c29a694c32e1", "score": "0.54944885", "text": "def volumepreserved(self):\n det = np.linalg.det(self.transformation)\n if det == 1:\n return True\n else:\n return False", "title": "" }, { "docid": "c60d7c8a4458ff140e015f386526203d", "score": "0.54922056", "text": "def _DecreamentVoltDummy(self):\n volt_raw_now = self.volt_now/1000\n deltaV_raw = self.dV/1000\n next_raw = float('{0:.2f}'.format(volt_raw_now - deltaV_raw))\n if self.volt_now <= self.volt_target:\n # self.Ve_obj.Instruct('volt ' + str(self.volt_target/1000))\n self.volt_now = self.volt_target\n self.is_changevolt = False\n return False\n else:\n # self.Ve_obj.Instruct('volt ' + str(next_raw))\n self.volt_now = next_raw*1000\n # if self.is_output == False:\n # self.Ve_obj.OutOn()\n return True", "title": "" }, { "docid": "3a8cb4bae01af8aa70af992a4d70211d", "score": "0.5439755", "text": "def isOverFloor(self) -> bool:\n return self.floorSensor.getVoltage() > 1.5", "title": "" }, { "docid": "c943c815bf8e4cc1212eed24561563e9", "score": "0.54137653", "text": "def vx_corrected(field, data):\n halo_velocity_kms = data.ds.halo_velocity_kms\n return data['gas','velocity_x'].in_units('km/s') - halo_velocity_kms[0]", "title": "" }, { "docid": "c15eca9f6859f7a96bb6deecff47fef7", "score": "0.540942", "text": "def _guess_implicit_VR(self):\n potential_vr = self._raw_content[4:6].decode()\n if potential_vr in pdcm.values.converters.keys():\n implicit_VR = False\n else:\n implicit_VR = True\n return implicit_VR", "title": "" }, { "docid": "886173a2811f30f5e4d374f90b85e56e", "score": "0.53976506", "text": "def hasRawVariable(self) :\n clogger.debug(\"TBD: actually test variables in files, rather than assuming that variable %s is virtual for dataset %s\"\\\n %(self.variable,self.crs))\n return(False)", "title": "" }, { "docid": "592fc728dcaa048a19ae9df3ca107337", "score": "0.53949535", "text": "def _DecreamentVolt(self):\n volt_raw_now = self.volt_now/1000\n deltaV_raw = self.dV/1000\n next_raw = '{0:.2f}'.format(volt_raw_now - deltaV_raw)\n if self.volt_now <= self.volt_target:\n self.Ve_obj.Instruct('volt ' + str(self.volt_target/1000))\n self.is_changevolt = False\n return False\n else:\n self.Ve_obj.Instruct('volt ' + str(next_raw))\n if self.is_output == False:\n self.Ve_obj.OutOn()\n return True", "title": "" }, { "docid": "fffed81f88ca8fe95dadab792fd243e0", "score": "0.5370652", "text": "def is_power(F,v,w):\n v1=copy.copy(v)\n w1=copy.copy(w)\n w1bar=w1**(-1)\n pos=F.word([])\n neg=F.word([])\n ispower=False\n while len(v1)>=len(pos):\n if v1.letters==pos.letters or v1.letters==neg.letters:\n ispower=True\n break\n pos=pos*w1\n neg=neg*w1bar\n\n return ispower", "title": "" }, { "docid": "7bdd088b98918f86d17f6f3789432b44", "score": "0.5340905", "text": "def is_field(self, proof=True):\n return False", "title": "" }, { "docid": "c3f2e377f337487fac19a24b3994efd6", "score": "0.5329164", "text": "def test_values(self):\n ucube, vcube = self.plugin.process(\n self.wind_speed_cube, self.wind_direction_cube\n )\n self.assertArrayAlmostEqual(ucube.data, self.expected_u, decimal=5)\n self.assertArrayAlmostEqual(vcube.data, self.expected_v, decimal=5)", "title": "" }, { "docid": "c93d414e36e7bfb4e14f5e9373d1ade9", "score": "0.532809", "text": "def dv(self) -> float:\n if self.veh_lead:\n return self.vl - self.v_t\n return 0", "title": "" }, { "docid": "a2a558b1be918a36d8266bf61fb60b31", "score": "0.53222656", "text": "def is_transmach(self):\n if self._Flight.mach > 0.8 and self._Flight.mach < 1.2:\n return True\n return False", "title": "" }, { "docid": "a92f0e4b923a3faf75d645d0e71a6620", "score": "0.53173333", "text": "def wind_gust(self):", "title": "" }, { "docid": "d3e58917ad5719daf54657c2b811917e", "score": "0.5302966", "text": "def _pnp_winding_test(self, point):\n px, py = point\n winding_no = 0\n v0_x, v0_y = self[-1]\n v0_above = (v0_y >= py)\n for v1_x, v1_y in self:\n v1_above = (v1_y >= py)\n if v0_above != v1_above:\n if v1_above: # upward crossing\n if ((v1_x - v0_x) * (py - v0_y)\n - (px - v0_x) * (v1_y - v0_y) <= 0):\n # point is right of edge, valid up intersect\n winding_no += 1\n else:\n if ((v1_x - v0_x) * (py - v0_y)\n - (px - v0_x) * (v1_y - v0_y) >= 0):\n # point is left of edge, valid down intersect\n winding_no -= 1\n v0_above = v1_above\n v0_x = v1_x\n v0_y = v1_y\n return winding_no != 0", "title": "" }, { "docid": "d19216d61e79e7e63a6a1d90facc1099", "score": "0.52892596", "text": "def landuse_vacant(self):\n return self.landuse == '11'", "title": "" }, { "docid": "6014b48096ec6301c14f0ea923d1d9fa", "score": "0.52669275", "text": "def is_stationary(self):\n return False", "title": "" }, { "docid": "7cf8a4713cf81f981b66cb9c2f976609", "score": "0.52537155", "text": "def has_variable_days(self):\n return # boolean", "title": "" }, { "docid": "497d1222f0f367d0ce614050d31906e4", "score": "0.525303", "text": "def check_vflag(self, goodflags):\n vflags = self.get_vflag()\n gv = np.in1d(vflags, goodflags)\n print \"%d out of %d objects pass visual inspection.\" % (gv.sum(), len(self.c.d))\n return gv", "title": "" }, { "docid": "54c5570308dff7c1a492b569d4103f76", "score": "0.5232098", "text": "def _handle_wind_variability(identif, d):\n if not d:\n return \"\", \"\"\n lower = d['lower']\n upper = d['upper']\n translation = f\"direction varying between: {lower} and {upper} degree\"\n return translation, d.end()", "title": "" }, { "docid": "9de50906052aeccf92bcc8fda5044b38", "score": "0.5230297", "text": "def isInFieldOfView(self,cameraFov,carAngle):\n lowCameraDetectionRange = -cameraFov/2\n highCameraDetectionRange = cameraFov/2\n if (carAngle>=lowCameraDetectionRange and carAngle<=highCameraDetectionRange):\n return True\n else: return False", "title": "" }, { "docid": "abc70784ec5007bd59af5c06c04ac721", "score": "0.5229411", "text": "def v(self):\n v = self.api.v\n v = self._metadata(v,\n id='v',\n standard_name='northward_wind',\n units='m s**-1',\n long_name='northward component of wind')\n return v", "title": "" }, { "docid": "4a766e6124d6622342620e0de272866d", "score": "0.5210039", "text": "def __bool__(self):\n return _RMF.Vector4s___bool__(self)", "title": "" }, { "docid": "af22e4d62e5e7c37e0c2df9463439363", "score": "0.5206028", "text": "def vz_corrected(field, data):\n halo_velocity_kms = data.ds.halo_velocity_kms\n return data['gas','velocity_z'].in_units('km/s') - halo_velocity_kms[2]", "title": "" }, { "docid": "366504cae6034b76aa01d546d2cc0ada", "score": "0.5198178", "text": "def west_valley(parcels):\n in_wv = parcels['mpa'].isin([\n 'AV', 'BU', 'EL', 'GB', 'GL', 'GO', 'LP', 'PE', 'SU', 'TO', 'WI', 'YO'\n ])\n return (parcels['is_MC'] & in_wv).astype(int)", "title": "" }, { "docid": "03543c506b808b2e3ba9a417f9a1549a", "score": "0.51857775", "text": "def isVibrationalStateLabel(label):\n if label[0]!='v':\n return False\n try:\n int(label[1])\n return True\n except IndexError:\n return True\n except ValueError:\n return False", "title": "" }, { "docid": "6568a5ad60a90430dd000a0102c98a79", "score": "0.51792353", "text": "def check_hole_values(self,hole)->bool:\n stones = self.boardgame[hole]\n value = False\n if (stones > 0 and stones<4):\n value= True \n else:\n value = False\n return value", "title": "" }, { "docid": "eeca686ebc0a0595885670f7217b972c", "score": "0.51773477", "text": "def Vorticity(u_wind, v_wind):\n r = 6371.22e6 #Average radius of earth in metres, same as ERA Interim\n\n dvdlon = iris.analysis.calculus.differentiate(v_wind, 'longitude')\n dvdlon.units=None\n\n cos_lats=iris.analysis.cartography.cosine_latitude_weights(u_wind)\n\n u_cos_lats_interp = iris.analysis.interpolate.regrid(iris.analysis.maths.multiply(u_wind, cos_lats), dudlon, mode='bilinear')\n\n dudlat = iris.analysis.calculus.differentiate(u_cos_lats_interp, 'latitude')\n dudlat.units=None\n\n if dudlon.shape != dvdlat.shape:\n dudlat.transpose([1,0,2,3])\n\n\n second_term=iris.analysis.maths.subtract(dvdlon, dudlat)\n\n first_term=1/(r*cos_lats) \n\n return iris.analysis.maths.multiply(second_term, first_term)", "title": "" }, { "docid": "1f74681524cbc150eb7db2ae7106b4a8", "score": "0.5176574", "text": "def is_variable(self):\n return (self.data[\"variability_type\"] == \"P\") or (\n self.data[\"tyc_variable_flag\"] == \"V\"\n )", "title": "" }, { "docid": "30b0b7d483a508d289acb45dd3cfc736", "score": "0.51725864", "text": "def is_electric(self):\n return self.vnEx[0] < self.vnEy[0]", "title": "" }, { "docid": "0e790512c0038564f8d205359391b6e9", "score": "0.51613724", "text": "def met_wvwd_v(wv, wd, undef=-9999.):\n return np.where((wv==undef) | (wd==undef), undef, -wv*np.cos(np.deg2rad(wd)))", "title": "" }, { "docid": "bfb4fa601955ab2e2d637fb1ccb269dd", "score": "0.515436", "text": "def _process_present_wx(self, field):\n # check to see if we got a sky condition field\n for key in self._sky_condition_dict.keys():\n if key in field:\n return 'sky_condition'\n\n self._codes.append(field)\n return 'present_wx'", "title": "" }, { "docid": "a7c5656b73cc63f95ca593159aa90373", "score": "0.51461065", "text": "def IvMeasureDummy(self):\n # print('IvMeasure: volt_target is ', self.volt_target)\n # self.volt_now = self.Ve_obj.AskVolt()*1000\n\n self.AquireParamDummy()\n if self.volt_now == self.volt_target:\n self.is_iv = False\n self.count_iv += 1\n print('End I-V')\n return False\n elif self.volt_now < self.volt_target:\n self.is_iv = True\n self._IncrementVoltDummy()#self.volt_target)#, *largs)\n return True\n else:\n self.is_iv = False\n self.count_iv += 1\n print('End I-V')\n return False", "title": "" }, { "docid": "5b4712dedd2d29677a002039429f3971", "score": "0.51424605", "text": "def is_perfect(self):\r\n return super().is_perfect()", "title": "" }, { "docid": "70948d32fc732dbe483bcfc81f8ebc5d", "score": "0.51376384", "text": "def isInField(px, py, pz):\n ptype, pface = getProximity(px, py, pz)\n if ptype == Proximity.IN_FIELD:\n return True\n return False\n #UNKNOWN = 0\n #INSIDE = 1\n #TOUCHING_INSIDE = 2\n #OUTSIDE = 3\n #TOUCHING_OUTSIDE = 4\n #IN_FIELD = 5", "title": "" }, { "docid": "29ed4a1991b9aac5bf7136222bb1971c", "score": "0.51347166", "text": "def check_safe_orbit(self):\n pass\n body = self.vehicle.orbit.body\n body_atmo = body.atmosphere_depth\n if self.vehicle.orbit.periapsis_altitude > body_atmo and self.vehicle.orbit.apoapsis_altitude > body_atmo:\n return True\n return False", "title": "" }, { "docid": "78ea8a6a34d61f26a9f41413dab3c471", "score": "0.51192117", "text": "def is_Coord_Inside_Vacuum(self, x, y, z):\n if self.inputAngleTilt == self.outputAngleTilt == 0.0: # drift is a simple cylinder\n return 0 <= x <= self.L and np.sqrt(y ** 2 + z ** 2) < self.ap\n else:\n # min max of purely cylinderical portion of drift region\n xMinCylinder = abs(np.tan(self.inputAngleTilt) * self.ap)\n xMaxCylinder = self.L - abs(np.tan(self.outputAngleTilt) * self.ap)\n if xMinCylinder <= x <= xMaxCylinder: # if in simple straight section\n return np.sqrt(y ** 2 + z ** 2) < self.ap\n else: # if in the tilted ends, our outside, along x\n xMinDrift, xMaxDrift = -xMinCylinder, self.L + abs(np.tan(self.outputAngleTilt) * self.ap)\n if not xMinDrift <= x <= xMaxDrift: # if entirely outside\n return False\n else: # maybe it's in the tilted slivers now\n slopeInput, slopeOutput = np.tan(np.pi / 2 + self.inputAngleTilt), np.tan(\n np.pi / 2 + self.outputAngleTilt)\n yInput = slopeInput * x\n yOutput = slopeOutput * x - slopeOutput * self.L\n if ((slopeInput > 0 and y < yInput) or (slopeInput < 0 and y > yInput)) and x < xMinCylinder:\n return np.sqrt(y ** 2 + z ** 2) < self.ap\n elif ((slopeOutput > 0 and y > yOutput) or (slopeOutput < 0 and y < yOutput)) and x > xMaxCylinder:\n return np.sqrt(y ** 2 + z ** 2) < self.ap\n else:\n return False", "title": "" }, { "docid": "6d581c58ef1227b0c53d66009a4160d6", "score": "0.51104534", "text": "def isTrivial(self):\n return self.totalV == 1", "title": "" }, { "docid": "c61ee525ce1353a8ef4913343a7990f7", "score": "0.5104581", "text": "def is_sunglint(self):\n\n for s in self.sunglint_distance:\n if s < self.sunglint_threshold:\n return True\n return False", "title": "" }, { "docid": "385f1ff5e56342b57a177abe48d4fc90", "score": "0.5099794", "text": "def is_day():\n lux = sensor.Lux\n print ('Lux: %d'%lux)\n if lux>10:\n return True\n else:\n return False", "title": "" }, { "docid": "c32be377ad7bca08e2fbb46160246bf3", "score": "0.50939894", "text": "def is_unit_vector(self):\n return self.magnitude() == 1", "title": "" }, { "docid": "f18fb36dfd849c92c3842aab5ea654e3", "score": "0.5086916", "text": "def verify_safe_orbit(self, value):\n self.check_safe_orbit_bool = str2bool(value)", "title": "" }, { "docid": "181baf2ca334cb9d1c96b8b6c515b736", "score": "0.5084921", "text": "def solved(self):\n return self.filled == 81", "title": "" }, { "docid": "718bb24b3d5963f782e120022d003d36", "score": "0.5082402", "text": "def is_fixed(self):\n return True", "title": "" }, { "docid": "718bb24b3d5963f782e120022d003d36", "score": "0.5082402", "text": "def is_fixed(self):\n return True", "title": "" }, { "docid": "b6b46fd89606e9c08e0ff7a24972fa7b", "score": "0.508212", "text": "def _get_pol_flag(fhc_vars):\n if fhc_vars['DR'] or fhc_vars['KD'] or fhc_vars['LD'] or fhc_vars['RH']:\n pol_flag = True\n else:\n pol_flag = False\n return pol_flag", "title": "" }, { "docid": "76c02c01b397b2a394cd131975e9774a", "score": "0.50788397", "text": "def bldgclass_vacant(self):\n return self.bldgclass.lower().startswith('v')", "title": "" }, { "docid": "14cca0a1f0607ab283978e1f8d191492", "score": "0.50780386", "text": "def in_field(self, x, y):\n if -self.x_half < x < self.x_half and -self.y_half < y < self.y_half:\n return True\n else:\n return False", "title": "" }, { "docid": "3400d05787b745fe9ade42432eda480e", "score": "0.50718796", "text": "def valid(v): # this is a var, return true if it's valid.\n if v.cat == \"Continuous\":\n return False\n if var_type(v) == \"coed\":\n return False\n return True", "title": "" }, { "docid": "3c6b1bf4677a58b91e61f55edb6a61b8", "score": "0.5068221", "text": "def compare_clearance(self) -> bool:\n post_c = self.convert_clearances(self.post.clearance)\n fs_c = self.convert_clearances(self.fast_streamer.clearance)\n if self.post.clearance == 'DV' and self.fast_streamer.preferences.will_undertake_dv is True:\n r = True\n else:\n r = post_c <= fs_c\n return r", "title": "" }, { "docid": "c0d58cd023b626604444c47b48962d87", "score": "0.5065542", "text": "def is_solved(fields):\n for field in fields:\n if field.number == 0:\n return False\n return True", "title": "" }, { "docid": "875fa12052a5035882f3ac59cc4577f8", "score": "0.5062963", "text": "def test_build_voltage_divider(design_goals):\n vd = vdr.build_voltage_divider(design_goals, 2000, 470)\n\n vin_Ok = vd.vin == design_goals.vin \n v1_OK = vd.v1 == 20.4 \n v2_Ok = vd.v2 == 4.8 \n p1_Ok = abs(vd.pow1_mw - 208) <= 1 \n p2_Ok = abs(vd.pow2_mw - 49) <= 1 \n a2d_Ok = abs(vd.a2d - 981) <= 1\n assert vin_Ok and v1_OK and v2_Ok and p1_Ok and p2_Ok and a2d_Ok", "title": "" }, { "docid": "df5772c69af6aebf49bd64be4e474b27", "score": "0.5062817", "text": "def is_in_a_vehicle(self):\n return self.playerinfo.is_in_a_vehicle()", "title": "" }, { "docid": "9711c6c798d69b31ab2bb4d8c6a0f3ba", "score": "0.50594175", "text": "def is_conditioned(self) -> bool:\n return self.vis_x is not None", "title": "" }, { "docid": "7dfc3de6597fd9b34b222cd4b2ceace7", "score": "0.5058578", "text": "def _winding_result(self, e_total, lattice, direct):\n e_result_0 = np.zeros(len(e_total), dtype=bool)\n e_result_1 = np.zeros(len(e_total), dtype=bool)\n for i, e in enumerate(e_total):\n if len(e) == 0:\n continue\n winding = self._winding_number(e, lattice, direct)\n if winding[0] % 2 == 1:\n e_result_0[i] = True\n if winding[1] % 2 == 1:\n e_result_1[i] = True\n return e_result_0, e_result_1", "title": "" }, { "docid": "e441704228a9945ae1b418c7592db8b9", "score": "0.5058546", "text": "def get_israw(self):\n return (not self.filtmeth and\n not self.car and\n self.sampfreq == self.rawsampfreq and\n not self.shcorrect)", "title": "" }, { "docid": "b45582437a8c9108ae441f1d46020c88", "score": "0.5053547", "text": "def is_fixed(self):\n return False", "title": "" }, { "docid": "45b078685534442fa9f32152113486a6", "score": "0.5053439", "text": "def fireInMyFOV(self):\r\n\t\tif not self.model.FireControl.fireMovements:\r\n\t\t\treturn False\r\n\t\tif not self.movements:\r\n\t\t\treturn False\r\n\t\tfor firePos in self.model.FireControl.fireMovements:\r\n\t\t\tif firePos in self.movements[self.N:] and self.posInMyFOV(firePos):\r\n\t\t\t\treturn True\r\n\t\treturn False", "title": "" }, { "docid": "1c0360c5bb1aba973dfb4236fed46596", "score": "0.505291", "text": "def test_is_haz_district():\n expected_output_is_haz_district = 1\n output_is_haz_district = is_haz_district(50)\n assert output_is_haz_district == expected_output_is_haz_district, \\\n \"\"\"Should show that field has value of 1 if district is value 50.\"\"\"", "title": "" }, { "docid": "ff38e52c99a30a07afcafd4404596bce", "score": "0.5048003", "text": "def isConscious(self):\n\t\tif self.stamina > 0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "title": "" }, { "docid": "e0a6c7f14653f52ccb9df07b3359ef02", "score": "0.50317824", "text": "def test(self, w: WME) -> bool:\n for f in ['identifier', 'attribute', 'value']:\n v = getattr(self, f)\n if is_var(v):\n continue\n if v != getattr(w, f):\n return False\n return True", "title": "" }, { "docid": "3ad0f9d858639410cb18c56c1de69862", "score": "0.50296104", "text": "def checkInvariants(form_fields, form_data):", "title": "" }, { "docid": "91fa8fbef04731a69de7d62e5ff4ee28", "score": "0.5027952", "text": "def _calc_vpd_or_vp(self, df):\n df = df.rename(columns=self.inv_map)\n\n # make sure day intervals are hourly or less if not skip\n second_day = df.index.date[2]\n third_day = second_day + pd.Timedelta(1, unit='D')\n # both days start at 00:00:00, don't duplicate\n times_in_day = len(df.loc[str(third_day)].index) \n if times_in_day < 24:\n print('Temporal frequency of data > hourly cannot calculate VP/VPD')\n return\n\n for v in ['vp', 'vpd', 't_avg']:\n u = self.units.get(v)\n if u:\n self.units[v] = u = u.lower()\n\n if u and not u in Data.allowable_units[v]:\n print('ERROR: {} units are not recognizable for var: {}\\n'\n 'allowable input units are: {}\\nNot converting.'.format(\n u, v, ','.join(Data.allowable_units[v])\n )\n )\n elif u and not u == Data.required_units[v]:\n # do conversion, update units\n # pass variable, initial unit, unit to be converted to, df\n df = Convert.convert(v, u, Data.required_units[v], df)\n self.units[v] = Data.required_units[v]\n\n # calculate vpd from actual vapor pressure and temp\n # check if needed variables exist and units are correct\n has_vpd_vars = set(['vp','t_avg']).issubset(df.columns)\n units_correct = (\n self.units.get('vp') == 'kpa' and self.units.get('t_avg') == 'c'\n )\n if has_vpd_vars and units_correct:\n print(\n 'Calculating vapor pressure deficit from vapor pressure and '\n 'air temperature'\n )\n # saturation vapor pressure (es)\n es = 0.6108 * np.exp(17.27 * df.t_avg / (df.t_avg + 237.3))\n df['vpd'] = es - df.vp\n df['es'] = es\n self.variables['vpd'] = 'vpd'\n self.units['vpd'] = 'kpa'\n self.variables['es'] = 'es'\n self.units['es'] = 'kpa'\n\n # same calc actual vapor pressure from vapor pressure deficit and temp\n has_vp_vars = set(['vpd','t_avg']).issubset(df.columns)\n units_correct = (\n self.units.get('vpd') == 'kpa' and self.units.get('t_avg') == 'c'\n )\n\n if has_vp_vars and units_correct:\n print(\n 'Calculating vapor pressure from vapor pressure deficit and '\n 'air temperature'\n )\n # saturation vapor pressure (es)\n es = 0.6108 * np.exp(17.27 * df.t_avg / (df.t_avg + 237.3))\n df['vp'] = es - df.vpd\n df['es'] = es\n self.variables['vp'] = 'vp'\n self.units['vp'] = 'kpa'\n self.variables['es'] = 'es'\n self.units['es'] = 'kpa'\n\n if not 'rh' in self.variables and {'vp','es'}.issubset(self.variables):\n if not self.units.get('vp') == 'kpa': pass\n else:\n print(\n 'Calculating relative humidity from actual and saturation '\n 'vapor pressure and air temperature'\n )\n df['rh'] = 100 * (df.vp / df.es)\n self.variables['rh'] = 'rh'\n self.units['rh'] = '%'\n \n if 'vp' in self.variables and self.units.get('vp') == 'kpa':\n print(\n 'Calculating dew point temperature from vapor pressure'\n )\n df['t_dew'] = (-1 / ((np.log(df.vp/.611) / 5423) - (1/273)))-273.15\n self.variables['t_dew'] = 't_dew'\n self.units['t_dew'] = 'c'\n\n self._df = df", "title": "" }, { "docid": "81b5b3a3bca8588dd0b98d19ddddb6d7", "score": "0.50275457", "text": "def vy_corrected(field, data):\n halo_velocity_kms = data.ds.halo_velocity_kms\n return data['gas','velocity_y'].in_units('km/s') - halo_velocity_kms[1]", "title": "" }, { "docid": "9b13d20504cf38cfbb98abd6f5ca73cd", "score": "0.50233555", "text": "def wayPointFaced(tolerance):\n if ((abs(targetInDrone.orientation.z)) < tolerance):\n return True\n return False", "title": "" }, { "docid": "bf86b0230a457c9839f40c0041c13211", "score": "0.50135773", "text": "def isWon(nova, window):\n\n if nova.dist >= 200:\n global msg\n \n msg = \"Nova won! She made it home safely thanks to your help!\"\n return True", "title": "" }, { "docid": "08d633bc7463ddd22fb28f39e09deb30", "score": "0.50082034", "text": "def test_vrad_disp_from_lookup():\n nfw = NFWPhaseSpace(concentration_bins=np.array((5, 10, 15)))\n\n c15 = np.zeros(10)\n scaled_radius = np.logspace(-2, 0, len(c15))\n vr_disp = nfw._vrad_disp_from_lookup(scaled_radius, c15, seed=43)\n\n assert np.all(vr_disp < 1)\n assert np.all(vr_disp > 0)", "title": "" }, { "docid": "960e87a7fae744513cf7dbbcf2cf8214", "score": "0.49994993", "text": "def has_vrf(self):\r\n return self.__has_vrf", "title": "" }, { "docid": "c33fa43c4475c69e2645ff1e2c598270", "score": "0.49991673", "text": "def checkWin(self):\n return self.nhidden==self.nbomb", "title": "" }, { "docid": "bc688cd903b0cb47de9bc3f419806ce3", "score": "0.4996402", "text": "def check_winner(self):\n\t\treturn self.word == self.progress", "title": "" }, { "docid": "bc359c979630fcf494ddba50de16d34d", "score": "0.49934444", "text": "def getValueBoolean(self):\r\n return self.variable.get() == 1", "title": "" }, { "docid": "708768933ee179a5ea3a9ba103fc778b", "score": "0.49827856", "text": "def is_on(self):\n return self._get_car_value(self._feature_name, self._object_name, self._attrib_name, False)", "title": "" }, { "docid": "fe8eeeca74b51f8bfca024e64f01703e", "score": "0.49770153", "text": "def is_non_inverting(self):\n return False", "title": "" }, { "docid": "ce3134343308039764693b1849a73d99", "score": "0.4976267", "text": "def met_uv_wv(u, v, undef=-9999.):\n return np.where((u==undef) | (v==undef), undef, np.sqrt(u*u + v*v))", "title": "" }, { "docid": "77d4afbbf83caebbd68a5a09f47baa45", "score": "0.4967592", "text": "def is_vector_voxel(self):\n ret_val = self._is_vector_voxel()\n return ret_val", "title": "" }, { "docid": "8fdf6bdd1b581138e07103cae5cf78b4", "score": "0.4964411", "text": "def is_filled(self):\n len_vec = len(self.vertices[0])\n # Insert first vertices at end to get last angle in loop\n verts = np.concatenate((self.vertices,np.reshape(self.vertices[0:,0],(2,1))),axis = 1)\n # Calculate winding number\n winding_num = 0\n for i in range(len_vec):\n winding_num += (verts[0][i] - verts[0][i+1])*(verts[1][i] + verts[1][i+1])\n if winding_num >= 0:\n flag = True #counterclockwisse\n else:\n flag = False #clockwise\n return flag", "title": "" }, { "docid": "d34855ecb40ba0893cd35cdd51878fb6", "score": "0.49643552", "text": "def check_solvability(self, state):\n inversion = 0\n for i in range(dimention[0]):\n if (state[i].index('V') or state[i].index('J')):\n return 1\n return 0", "title": "" }, { "docid": "b0c3d9252f03aa502949f1d8532ffef2", "score": "0.49633452", "text": "def isOn(self):\n if self.Value:\n return True\n return False", "title": "" }, { "docid": "7ae9ee7b209e3acfacb5d03953f41e7f", "score": "0.4961524", "text": "def test_comp_angle_wind_eq(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_angle_wind_eq()\n\n a = result\n b = test_dict[\"Aw\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)", "title": "" }, { "docid": "f00b21f0f4ad059a750eab1f1ecfb465", "score": "0.49582747", "text": "def vadFakeFalse_event(self, key, value):\n\t\tif self.conn_man.acceptVAD:\n\t\t\tprint \"vadFakeFalse\"\n\t\t\tself._vad_detected = True\n\t\t\tself._vad_correct = False", "title": "" }, { "docid": "65929ae4417aff4f58a0beb2b1e2b37f", "score": "0.4951464", "text": "def admissible(self, v, omg, dist_obs_min):\n if np.abs(v) > self.vel_max[0] or np.abs(omg) > self.vel_max[1]:\n return False\n\n if dist_obs_min is None:\n return True\n\n if np.abs(v) > np.sqrt(2.0 * dist_obs_min * self.acc_max[0]) or \\\n np.abs(omg) > np.sqrt(2.0 * dist_obs_min * self.acc_max[1]):\n return False\n return True", "title": "" }, { "docid": "b62727b6463d197d1a22aa61bcd4c700", "score": "0.49365568", "text": "def is_place(result):\n return (result <= 3)*1", "title": "" }, { "docid": "bfa283897f28665fd17978788f22de49", "score": "0.49301007", "text": "def is_legal(v):\n legal = not torch.isnan(v).any() and not torch.isinf(v)\n\n return legal", "title": "" }, { "docid": "10ed659e88d76c16a8c08cf0686aa763", "score": "0.49266908", "text": "def bool_value(self):\n if self.value:\n if self.value.lower() in self.MEANS_YES:\n return True\n return False\n return None", "title": "" }, { "docid": "daeb9af78b18efcfacd15cf252be59bf", "score": "0.49207944", "text": "def is_on(self) -> bool:\n return bool(self.appliance.get_erd_value(self.erd_code))", "title": "" } ]
c9ba8f4bfdaae302393c82e504eb69bf
Test that the string is correctly parsed.
[ { "docid": "b967bf926cc3d869cb6e8c5e13feb784", "score": "0.0", "text": "def test_parse_string_key_val(key: str, value: str, enclosing: str):\n enclosed_value = enclosing.format(value)\n bibtex_str = f\"\"\"@string{{{key} = {enclosed_value}}}\"\"\"\n library: Library = Splitter(bibtex_str).split()\n\n assert len(library.failed_blocks) == 0\n assert len(library.strings) == 1\n assert library.strings[0].key == key\n assert library.strings[0].value == enclosed_value\n assert library.strings[0].start_line == 0", "title": "" } ]
[ { "docid": "68bcb075ee9023bc6504ab3b98c0912c", "score": "0.8762374", "text": "def __test_parse_string():", "title": "" }, { "docid": "f21244c00c5bec4a58b7f54687975541", "score": "0.74926984", "text": "def test_parse_str(self):\n\n obj = self.parser.parse_str(STR)\n self.assertTrue(isinstance(obj, parser.String))\n self.assertEqual(obj.value, STR_VALUE)", "title": "" }, { "docid": "49c802de9c61ec594b983a46e213abe1", "score": "0.74551547", "text": "def test89(self):\n input = \"\"\"\n Var: b = \"string ;\n \n \"\"\"\n expect = \"string ;\"\n self.assertTrue(TestParser.checkParser(input,expect,189))", "title": "" }, { "docid": "231a5d5a01d6223943bb7678300d6969", "score": "0.74400645", "text": "def test96(self):\n input = \"\"\"\n Function: main\n Body:\n Var: str = \"This string have \\\\t\";\n Var: str = \"This string have \\\\f\";\n Var: str = \"This string have \\\\r\";\n Var: str = \"This string have \\\\b\";\n Var: str = \"This string have \\\\\\\\\";\n Var: str = \"This string have \\\\'\";\n Var: str = \"This string have '\"\";\n EndBody.\n \"\"\"\n expect = \"successful\"\n self.assertTrue(TestParser.checkParser(input,expect,196))", "title": "" }, { "docid": "6d594a06e49b15e25d567f87182a6664", "score": "0.7415696", "text": "def test_parser(self):\n data = \"la vie est belle à Montpellier\"\n result = __parser__(data)\n self.assertEqual(result, \"lavieestbelleàMontpellier\")\n self.assertIs(result, str)", "title": "" }, { "docid": "bd1ad805d5d018dd7709e4410310e0ef", "score": "0.7397948", "text": "def test2(self):\n input = \"Var:a=0,9=6;\"\n expect = \"Error on line 1 col 8: 9\"\n self.assertTrue(TestParser.checkParser(input,expect,102))", "title": "" }, { "docid": "8f02e9c00fc5fa9556e8405807f14dc5", "score": "0.7336645", "text": "def assertParsed(self, s, termstr):\n self.assertEqual(str(parse_term(s)), termstr)", "title": "" }, { "docid": "22fa06162ed89790ed047883065ba6d1", "score": "0.7317265", "text": "def test_can_parse(self):\n self.assertEqual(self.parser.can_parse('123,456'), 'KUNDT')\n self.assertEqual(self.parser.can_parse('123,45f'), False)\n self.assertEqual(self.parser.can_parse('KUNDT,123,456'), False)\n self.assertEqual(self.parser.can_parse('123,456,789'), False)\n self.assertEqual(self.parser.can_parse('foobar'), False)", "title": "" }, { "docid": "ee47d83e662f97b9dfa71392cb6b4bec", "score": "0.7241336", "text": "def test_parse_string():\n\n input = \"\"\"..##\n..##\n....\n....\"\"\"\n\n expected = [\n [0, 0, 1, 1],\n [0, 0, 1, 1],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n ]\n\n actual = parse_string(input)\n assert expected == actual", "title": "" }, { "docid": "00ba178815a5af5de98b56730e80b6f8", "score": "0.7230969", "text": "def __test_messy_should_be_string():", "title": "" }, { "docid": "b99128434ccf074b4ae3ed118d3bae2a", "score": "0.7180927", "text": "def test_parser(infix, expected):\n assert str(parse(infix)) == expected", "title": "" }, { "docid": "f02ba5a25b1c25c87eb39ea7bf77c147", "score": "0.71661484", "text": "def test_parse_contact_string(input, expected):\n result = utils.parse_contact_string(input)\n assert result == expected", "title": "" }, { "docid": "54bd602e1332d6b1a5e4cae8e31c6a97", "score": "0.71656555", "text": "def test27(self):\n input = \"\"\"\n Var: a = {\"String\",True};\n \"\"\"\n expect = \"Error on line 2 col 17: {\"\n self.assertTrue(TestParser.checkParser(input,expect,127))", "title": "" }, { "docid": "4707121bbd0885734512474a0fe83d20", "score": "0.71257603", "text": "def test95(self):\n input = \"\"\"\n ** hello *\n \"\"\"\n expect = \"\"\n self.assertTrue(TestParser.checkParser(input,expect,195))", "title": "" }, { "docid": "da76c6918995304543f55e8a2a3622b8", "score": "0.7082847", "text": "def test_parse():", "title": "" }, { "docid": "b85f00a61992b3fe2c00c25d50063e9b", "score": "0.6998487", "text": "def test_parse(self):\r\n parser = FormParser()\r\n\r\n stream = StringIO(self.string)\r\n data = parser.parse(stream)\r\n\r\n self.assertEqual(Form(data).is_valid(), True)", "title": "" }, { "docid": "40698cc8fe9105f2029b4df8828194ca", "score": "0.69221747", "text": "def test_normalString(self):\n self._parseTestCase(\n input=' SoMe NaMe <SoMeNaMe@example.com>',\n display=u'SoMe NaMe',\n email=u'somename@example.com',\n anyDisplayName=u'SoMe NaMe',\n pseudoFormat=u'SoMe NaMe <somename@example.com>',\n localpart=u'somename',\n domain=u'example.com')", "title": "" }, { "docid": "c4ac1b841eb5c13eaa11e7e337e9bde3", "score": "0.69147366", "text": "def test3(self):\n input = \"Var:a=1,b=2,c=3;\"\n expect = \"successful\"\n self.assertTrue(TestParser.checkParser(input,expect,103))", "title": "" }, { "docid": "1668d4be825d2ac6d229bd6e6ee4f41e", "score": "0.69017714", "text": "def test_parsing_case():\n t2 = Tokens(r'string_value:\"\\x07\\x08\\x0c\\n\\r\\t\\x0b\\\"' \"'\" r'\\\\\"')\n # print(f\"Parsing Go &{{string_value:{t2.text}}}\")\n actual = list(t2)\n parsed_value = (r'\"\\x07\\x08\\x0c\\n\\r\\t\\x0b\\\"' \"'\" r'\\\\\"')\n expected = [Token(type='NAME', value='string_value'), Token(type='PUNCTUATION', value=':'), Token(type='STRING', value=parsed_value)]\n assert actual == expected, f\"{actual!r} != \\n{expected!r}\"", "title": "" }, { "docid": "27a470b27d58cd25ce72438dea790066", "score": "0.6883777", "text": "def test61(self):\n input = \"\"\"\n Function: main\n Body:\n foo(1);\n fib(2+42+324\\\\234*24332);\n remove(3);\n get(4);\n EndBody.\n \"\"\"\n expect = \"successful\"\n self.assertTrue(TestParser.checkParser(input,expect,161))", "title": "" }, { "docid": "f211c93ad69192158942a4c5a3c71158", "score": "0.6867676", "text": "def test_string(value):\n\n # Checking if the string is an empty string\n assert len(value) > 0\n # Checking if there is any other character in input string other than integers\n assert not re.findall(r'[-+]?[0-9]+', value)", "title": "" }, { "docid": "b62c96a00d95e667df76459d60ad634f", "score": "0.6860701", "text": "def test_valid(self):\n result = self.lookup.parseString('CD2')\n self.assertEqual('CD2', result[0])", "title": "" }, { "docid": "f468c5d071979fbbfb1a68721691d9ee", "score": "0.6855407", "text": "def test_try_parse_with_string(self):\n root = _try_parse(self.xml)\n self.assertEqual(\"a\", root.tag)", "title": "" }, { "docid": "03770a88d8db5edd0515ecb37b0307b7", "score": "0.6830347", "text": "def test(cls):\n parse(\"\"\"Let's try plain text first.\n \n \"\"\", cls)", "title": "" }, { "docid": "5617f13873cad2c5d6eaeb530966e1be", "score": "0.68014884", "text": "def test(cls):\n # What should work\n parse(\".\", cls)\n parse(\"/\", cls)\n parse(\"?\", cls)\n parse(\"|\", cls)\n\n # What should not work\n testFail(\"||\", cls)\n testFail(\" OK[\", cls)\n testFail(\" \", cls)\n testFail(\"<<\", cls)\n testFail(\"Uh-huh, this text does'nt mean anything. [[\", cls)\n testFail(\"}}\", cls)", "title": "" }, { "docid": "adb0147d37449ba024384fda1abeb9f7", "score": "0.67198104", "text": "def test50(self):\n input = \"\"\"\n Var: x[12.e3]=1;\n \"\"\"\n expect = \"Error on line 2 col 15: 12.e3\"\n self.assertTrue(TestParser.checkParser(input, expect, 250))", "title": "" }, { "docid": "60fb9c56fb89bbe97566620e7ad27cbd", "score": "0.6701917", "text": "def test_from_string(self) -> bool:\n self.test_counter += 1\n failed: bool = False\n # Parsing a string to create a number\n list_valid = [\n \"0\", \"0.\", \"0.1\", \"0.01\", \"1\", \"12\", \"-0\", \"-0.\", \"-0.1\", \"-0.01\", \"-1\", \"-12\",\n \"12.34\", \"-12.34\", \"3.141592653589793238462643383279\", \"123456789012345\", \"98765.43210\"\n ]\n list_invalid = [\n \"1..4\", \"-\", \"1.-4\", \"--5\", \"0..\", \"12a345\", \"7O\"\n ]\n for n in list_invalid:\n if not self.assertRaises(DecimalNumberExceptionParseError, lambda: DecimalNumber(n)):\n failed = True\n for n in list_valid:\n number = DecimalNumber(n)\n return failed", "title": "" }, { "docid": "204d1bbc8f84999a4f4b87576531de2f", "score": "0.66967094", "text": "def test_empty(self):\n self.assertRaises(ParseException, self.lookup.parseString, '')", "title": "" }, { "docid": "fb84062997e4c041b1c69193daed0051", "score": "0.66822815", "text": "def test_parselocation_normal(location: str, expected: Location) -> None:\n assert parselocation(location) == expected", "title": "" }, { "docid": "74359dc50d34736ed77483f8a43e44d3", "score": "0.66761255", "text": "def test26(self):\n input = \"\"\"\n Var: a = {True,False};\n Var: b = {\"String1\",\"String2\"};\n Var: c = {9.23,8.5};\n Var: d = {2,1,4};\n \"\"\"\n expect = \"successful\"\n self.assertTrue(TestParser.checkParser(input,expect,126))", "title": "" }, { "docid": "a59f591e62822ee8475286fbb66c59b6", "score": "0.6667585", "text": "def test_parser():", "title": "" }, { "docid": "f82d02b36cb17be9fa765d837265ce2b", "score": "0.66661423", "text": "def test49(self):\n input = \"\"\"\n Var: x[]=1;\n \"\"\"\n expect = \"Error on line 2 col 15: ]\"\n self.assertTrue(TestParser.checkParser(input, expect, 249))", "title": "" }, { "docid": "5f36ecc2184c518801d19cd12d684079", "score": "0.6658921", "text": "def test_raises_on_parser_error(self):\r\n\r\n self.assertRaises(ubjspy.Exception, ubjspy.loads, b']')", "title": "" }, { "docid": "dc22eabb74faa8301cf4af04c0b966f0", "score": "0.66564053", "text": "def test_parse_string(self):\n bb = parse(antlr4.InputStream(test_file))\n\n assert bb._var == {\"alpha\": 0.3423}\n\n expected = {\"name\": \"fock\", \"options\": {\"num_subsystems\": 1, \"cutoff_dim\": 7, \"shots\": 10}}\n assert bb.target == expected\n\n expected = [\n {\"op\": \"Coherent\", \"args\": [0.3423, np.sqrt(np.pi)], \"kwargs\": {}, \"modes\": [0]},\n {\"op\": \"MeasureFock\", \"args\": [], \"kwargs\": {}, \"modes\": [0]},\n ]\n\n assert bb.operations == expected", "title": "" }, { "docid": "96c63cd52627aa3c22e839c6a967a114", "score": "0.66557306", "text": "def checkParsed(self, input, expected, beExtremelyLenient=1):\n output = microdom.parseString(input,\n beExtremelyLenient=beExtremelyLenient)\n self.assertEqual(output.documentElement.toxml(), expected)", "title": "" }, { "docid": "7d863ebbbb371feaa68ff19a25a78f4a", "score": "0.66507256", "text": "def test(cls):\n # What should work\n parse(\" Testing with no special terminator\", cls)\n parse(\" OK DOKE \", cls)\n\n # What should not work\n testFail(\" OK DOKE[[\", cls)\n testFail(\" OK DOKE]]\", cls)\n testFail(\" OK DOKE. <<\", cls)\n testFail(\"Uh-huh, this text does'nt mean anything. [[\", cls)\n testFail(\" OK DOKE}}\", cls)\n testFail(\" OK DOKE\\n NOT! \", cls)", "title": "" }, { "docid": "06bbba7e8a48ff0abdde38d32cd28cd6", "score": "0.6632215", "text": "def test_parse_correct(input):\n rule = github_issues_bot.Rule.parse(input)\n assert rule", "title": "" }, { "docid": "23c32981c94dc5d3db30dbb3e3f80e18", "score": "0.6611889", "text": "def test_invalid_wrong_type(self):\n record = 'AAAAA000001234NAME OF THE COMPANY 01.102012011512300020121102U+0123 '\n\n self.assertRaises(ParseException, self.grammar.parseString, record)", "title": "" }, { "docid": "6da708f36ffb3f4b91c76724220f45d0", "score": "0.6609234", "text": "def test(cls):\n # What should work\n parse(\"##\\n\", cls)\n parse(\"## \\n\", cls)\n parse(\"## I sing the body ...\\n\", cls)\n parse(\"## page was renamed from Admin/Disk Quotas\\n\", cls)\n\n # What should not work\n testFail(\"# Not a comment\", cls)\n testFail(\"#format\\n\", cls)", "title": "" }, { "docid": "e90eae07e6d3f47ffde2243068dd5c9f", "score": "0.66081274", "text": "def test_error_string_with_quotes(self):\n self.assertEqual(len(validate(\"test_basic_errors_with_colon.html\")), 3)", "title": "" }, { "docid": "9fae4740bda37f8bfca282f36ca0fdf0", "score": "0.6562068", "text": "def test_str_well_formed(self):\n doc = ET.fromstring(str(self.tested))\n NS_ODM = '{http://www.cdisc.org/ns/odm/v1.3}'\n self.assertEqual(doc.tag,NS_ODM + \"ODM\")\n self.assertEqual(doc.attrib[\"Originator\"], \"MY TEST SYSTEM\")\n self.assertEqual(doc.attrib[\"Description\"], self.tested.description)", "title": "" }, { "docid": "d4abfaac9f6da73283d582d3ee2095c3", "score": "0.65471566", "text": "def test_invalid(self):\n self.assertRaises(ParseException, self.lookup.parseString, 'AEI')", "title": "" }, { "docid": "b3659e3929f001f7e626c904764a1f8c", "score": "0.652516", "text": "def test_parse_str_space(self):\n\n obj = self.parser.parse_str(STR_SPACE)\n self.assertTrue(isinstance(obj, parser.String))\n self.assertEqual(obj.value, STR_SPACE_VALUE)", "title": "" }, { "docid": "b7afb5d2f37ae549ef9a510d5b2440a1", "score": "0.6519682", "text": "def test_parse_unit_valid():\n meter = parse_unit('m')\n assert str(meter) == 'meter'", "title": "" }, { "docid": "a8946d67d3b1c61ecabef5c64b671380", "score": "0.65182674", "text": "def __test_parse_int():", "title": "" }, { "docid": "66047689867066d079e6f287118f5b82", "score": "0.6511736", "text": "def test_check_string(self):\n\n name_pairs = {\n 'foo': 'bar'\n }\n\n self.assertEqual(check_string(\"'foo'\", name_pairs), \"'bar'\")\n self.assertEqual(check_string('\"foo\"', name_pairs), '\"bar\"')\n self.assertEqual(check_string('r\"foo\"', name_pairs), 'r\"foo\"')", "title": "" }, { "docid": "7f840de8a18b20cf85073236b9fa4896", "score": "0.65036124", "text": "def test_invalid_expression_1(self):\n input = \"\"\"void func(){\n int a;\n i+i;\n a-a;\n z*z+3;\n }\n \"\"\"\n expect = \"successful\"\n \n self.assertTrue(TestParser.checkParser(input,expect,304))", "title": "" }, { "docid": "787c4e680ab7b883a661fcff1f6e409c", "score": "0.65023094", "text": "def parse(self, string):\n raise NotImplementedError()", "title": "" }, { "docid": "a5ba80a72f44ba775c5abc40377e4698", "score": "0.64980453", "text": "def test_spaces_letters(self):\n self.assertRaises(ParseException, self.date.parseString, '201211XV')", "title": "" }, { "docid": "ac44e40a91d6a517ca89a8f07f9e37f9", "score": "0.6495341", "text": "def test_string( self ):\n\t\tfor source,expected in [\n\t\t\t(\n\t\t\t\t'.1.2.3.4.5',\n\t\t\t\toid.OID.fromNumeric( (1,2,3,4,5)),\n\t\t\t),\n\t\t\t(\n\t\t\t\t'.1.somewhere(2).3.this(4).5',\n\t\t\t\toid.OID.fromNumeric( (1,2,3,4,5)),\n\t\t\t),\n\t\t\t(\n\t\t\t\t'...1. somewhere(2). \\n3.this(4).5',\n\t\t\t\toid.OID.fromNumeric( (1,2,3,4,5)),\n\t\t\t),\n\t\t\t(\n\t\t\t\t'1.... somewhere(2). \\n3.this(4).5..',\n\t\t\t\toid.OID.fromNumeric( (1,2,3,4,5)),\n\t\t\t),\n\t\t]:\n\t\t\tassert oid.OID( source ) == expected, (source,expected)", "title": "" }, { "docid": "2448dfdb1d2f716b76509247ea2cda85", "score": "0.64891833", "text": "def test_empty(self):\n record = ''\n\n self.assertRaises(ParseException, self.grammar.parseString, record)", "title": "" }, { "docid": "0e6169cacd8128852f237697abce5d73", "score": "0.647868", "text": "def test13_parser_test(self):\n\t\toutput, expected = parser_helper(\"test13\")\n\t\tself.assertEqual(output, expected)", "title": "" }, { "docid": "4c3d770d1837d973669976891dfd29e6", "score": "0.6477336", "text": "def is_correct_string(line):\n if type(line) != str:\n return False\n try:\n line.encode()\n except ValueError:\n return False\n return MakeRE.prog.match(line)", "title": "" }, { "docid": "88bf7a660efe8a25de0642982859b07c", "score": "0.6476138", "text": "def test_preprocess_input(self):\n analyser = Analyser(None)\n test = analyser.preprocess_input('lol. max/keksasdasd>?<\".<WOW><12')\n self.assertIsNot(None, test)\n self.assertEqual(test, \"lol max'keksasdasd'WOW'12\")", "title": "" }, { "docid": "2a40c1f56cfa48aee0f5813c1c992479", "score": "0.6475952", "text": "def test_string(self):\n self.assertEqual(lenient_ascii_text(\"test\"), u\"test\")", "title": "" }, { "docid": "f50c431beb3a7bf6b4adb21167c157e2", "score": "0.64741", "text": "def test_valid_full(self):\n record = 'NCT0000123400000023THE TITLE ES'\n\n result = self.grammar.parseString(record)[0]\n\n self.assertEqual('NCT', result.record_type)\n self.assertEqual(1234, result.transaction_sequence_n)\n self.assertEqual(23, result.record_sequence_n)\n self.assertEqual('THE TITLE', result.title)\n self.assertEqual('ES', result.language_code)", "title": "" }, { "docid": "cfb0b3822791c7e68d9e51e4aa1f62b9", "score": "0.6457932", "text": "def test9_parser_test(self):\n\t\toutput, expected = parser_helper(\"test9\")\n\t\tself.assertEqual(output, expected)", "title": "" }, { "docid": "16492c44554cd9abd0cf0e16c9e6399c", "score": "0.6450215", "text": "def test_valid(self):\n prefix = 'HDR0000123400000023'\n\n result = self.grammar.parseString(prefix)\n\n self.assertEqual('HDR', result.record_type)\n self.assertEqual(1234, result.transaction_sequence_n)\n self.assertEqual(23, result.record_sequence_n)", "title": "" }, { "docid": "34aa944fefa237808ccdc45c0a138385", "score": "0.6445867", "text": "def parse(self, string):\n raise NotImplemented", "title": "" }, { "docid": "4276c99408258756c6c5225a66a6a070", "score": "0.6433644", "text": "def test_str_node(test_node):\n assert str(test_node) == '3'", "title": "" }, { "docid": "776518179cd6d24b4718c446eb6d7a8a", "score": "0.6431392", "text": "def test_from_string(self):\n self.assertEqual(\n binary_type(TryteString.from_string('你好,世界!')),\n b'LH9GYEMHCF9GWHZFEELHVFOEOHNEEEWHZFUD',\n )", "title": "" }, { "docid": "b3da43eb8dcc6187aa85ff6ec3781ea2", "score": "0.64301246", "text": "def test52(self):\n input = \"\"\"Var **some COMMENT**: ****someid = 3\n **more more**;\"\"\"\n expect = \"successful\"\n self.assertTrue(TestParser.checkParser(input, expect, 252))", "title": "" }, { "docid": "e63aad93c1c5165362e95e895e7ebb9c", "score": "0.6425627", "text": "def test4_parser_test(self):\n\t\toutput, expected = parser_helper(\"test4\")\n\t\tself.assertEqual(output, expected)", "title": "" }, { "docid": "ab2d2e4ef5fbebbdca94f2574049b116", "score": "0.6412798", "text": "def test_parse_time_string(self):\n self.assertTupleEqual((1, 24, 3), sh.parse_time_string(\"1h24m03s\"))\n self.assertTupleEqual((0, 54, 25), sh.parse_time_string(\"54m25s\"))\n self.assertTupleEqual((0, 0, 45), sh.parse_time_string(\"45s\"))\n\n with self.assertRaisesRegex(BadArgumentError, \"23m\"):\n sh.parse_time_string(\"23m\")", "title": "" }, { "docid": "7e72ef3acaa57d14f52e5f9c8e075624", "score": "0.6410967", "text": "def test98(self):\n input = \"\"\"\n Function: main\n Body:\n If i == 12 Then\n i += 1\n EndIf.\n EndBody.\n\n \"\"\"\n expect = \"Error on line 5 col 18: +\"\n self.assertTrue(TestParser.checkParser(input,expect,198))", "title": "" }, { "docid": "c4e5f37b5f8d94586ac78d7bc31d2f43", "score": "0.63928396", "text": "def test46(self):\n input = \"\"\"Function: foo \n Parameter: n\n Body: \n a =(foo(3) != foo(4))* 0.e2;\n EndBody.\"\"\"\n expect = \"successful\"\n self.assertTrue(TestParser.checkParser(input, expect, 246))", "title": "" }, { "docid": "49cb436b28cdde86e79e534397d71979", "score": "0.6377498", "text": "def test1_parser_test(self):\n\t\toutput, expected = parser_helper(\"test1\")\n\t\tself.assertEqual(output, expected)", "title": "" }, { "docid": "17e7ea67229264e7b060f006b1cfdf99", "score": "0.63698614", "text": "def test36(self):\n input = \"\"\"\n Function: main\n Body:\n Var: a = 5.0;\n If a == 5.0 a >= 12.0 && a <= 6.0 Then\n a = 12.0;\n EndIf.\n EndBody.\n \"\"\"\n expect = \"Error on line 5 col 24: a\"\n self.assertTrue(TestParser.checkParser(input,expect,136))", "title": "" }, { "docid": "034da890937703734e6eda6d71d44f4d", "score": "0.6367734", "text": "def test_parse_learners_string_valid(self):\n\n # Test some combinations of valid learner-types (not all\n # combinations, though)\n for i in range(1, len(self.learners) + 1):\n assert_equal(\n sorted(parse_learners_string(','.join(self.learners[:i]))),\n sorted(self.learners[:i]))", "title": "" }, { "docid": "119acd83cafe7d0f88a1d6affc57b458", "score": "0.63675785", "text": "def test7_parser_test(self):\n\t\toutput, expected = parser_helper(\"test7\")\n\t\tself.assertEqual(output, expected)", "title": "" }, { "docid": "37a418cf7dbbe04d73c0ff88deddcc99", "score": "0.6367305", "text": "def test11_parser_test(self):\n\t\toutput, expected = parser_helper(\"test11\")\n\t\tself.assertEqual(output, expected)", "title": "" }, { "docid": "b9e84f317790eea40747707a15f7e4d9", "score": "0.63671803", "text": "def test_version_parser_value_error(version_string):\n with pytest.raises(ValueError, match=f'\"{version_string}\" is not valid to Semantic Versioning Specification'):\n Version.parse(version_string)", "title": "" }, { "docid": "75368a57a1765d00651193178c930a07", "score": "0.6365896", "text": "def test_whitespace(self):\n self.assertRaises(ParseException, self.lookup.parseString, ' ')", "title": "" }, { "docid": "4090e730343677bbadb35032b675ffac", "score": "0.6356833", "text": "def test_strproc():\r\n\r\n teststr = k\r\n assert(strproc(teststr) == \"2.2160175\")\r\n print(\"test_strproc successfully conducted\")", "title": "" }, { "docid": "3592ae44eb3c1cd02f75af61a8da26f1", "score": "0.63546866", "text": "def test60(self):\n input = \"\"\"\n Function: main\n Body:\n foo();\n fib();\n remove();\n get(;\n EndBody.\n \"\"\"\n expect = \"Error on line 7 col 16: ;\"\n self.assertTrue(TestParser.checkParser(input,expect,160))", "title": "" }, { "docid": "b2a63246c7ba4b98675bd222a7b27e5c", "score": "0.63529056", "text": "def test_str(self):\n\n self.assertEqual(self.date, _convert_date(self.date_str))", "title": "" }, { "docid": "0c84c3458b9c00ef4ae91b7520328650", "score": "0.6352493", "text": "def test_exception(self):\n parser = self._PARSER.PARSER\n with self.assertRaises(pyparsing.ParseException):\n parser.parseString(\"a\", parseAll=True)\n with self.assertRaises(pyparsing.ParseException):\n parser.parseString(\"()\", parseAll=True)\n with self.assertRaises(pyparsing.ParseException):\n parser.parseString(\"{}\", parseAll=True)\n with self.assertRaises(pyparsing.ParseException):\n parser.parseString(\"{b}\", parseAll=True)\n with self.assertRaises(pyparsing.ParseException):\n parser.parseString(\"a{b}\", parseAll=True)\n with self.assertRaises(pyparsing.ParseException):\n parser.parseString(\"a{}\", parseAll=True)\n with self.assertRaises(pyparsing.ParseException):\n parser.parseString(\"a{byy}\", parseAll=True)\n with self.assertRaises(pyparsing.ParseException):\n parser.parseString(\"a{ayy}\", parseAll=True)", "title": "" }, { "docid": "fe2d0cf936602605596577b859c27e96", "score": "0.63523984", "text": "def checkString(self, expected, result, msg=None):\n self.assertEqual(expected.splitlines(), result.splitlines(), msg)\n if expected != result:\n # Strip the beginning\n while expected and result and expected[0] == result[0]:\n expected = expected[1:]\n result = result[1:]\n # The exception trace makes it hard to read so dump it too.\n if '\\n' in result:\n print(result)\n self.assertEqual(expected, result, msg)", "title": "" }, { "docid": "b6e3d424af99245f224484a2443d28e1", "score": "0.63523906", "text": "def test14(self):\n input = \"\"\"\n Var: pi = 3..14;\n \"\"\"\n expect = \"Error on line 2 col 20: .\"\n self.assertTrue(TestParser.checkParser(input,expect,114))", "title": "" }, { "docid": "612099449142ba2e35b65fa7e8bf5ebe", "score": "0.6352206", "text": "def test_parse_string(image_data: TypingGetTestData):\n result = ImageName._parse_string(image_data[\"string\"])\n assert result[\"digest\"] == image_data[\"digest\"]\n if image_data[\"digest\"]:\n assert isinstance(result[\"digest\"], FormattedSHA256)\n assert result[\"endpoint\"] == image_data[\"endpoint\"]\n if image_data[\"endpoint\"]:\n assert ImageName.DEFAULT_ENDPOINT not in str(result[\"endpoint\"])\n assert not result[\"endpoint\"].endswith(\"/\")\n assert result[\"image\"] == image_data[\"image\"]\n assert not result[\"image\"].startswith(\"/\")\n assert ImageName.DEFAULT_NAMESPACE not in str(result[\"image\"])\n assert result[\"tag\"] == image_data[\"tag\"]\n if image_data[\"tag\"]:\n assert ImageName.DEFAULT_TAG not in str(result[\"tag\"])", "title": "" }, { "docid": "cd72aaa7dbd29ed8b339260b314b701e", "score": "0.63452816", "text": "def test_parse_correct(input):\n rule = github_issues_bot.Rule.parse(input)\n assert not rule", "title": "" }, { "docid": "4fd24a2d02e9eff93f81be8d6390dc0b", "score": "0.63440377", "text": "def test_wrong_type(self):\n prefix = 'AAA0000123400000023'\n\n self.assertRaises(ParseException, self.grammar.parseString, prefix)", "title": "" }, { "docid": "043dcf7a6b2df1c8ac5886d6ec3bc97b", "score": "0.63427424", "text": "def test12_parser_test(self):\n\t\toutput, expected = parser_helper(\"test12\")\n\t\tself.assertEqual(output, expected)", "title": "" }, { "docid": "e5f597c4f8f8077e872fd08de2193e63", "score": "0.63420504", "text": "def test37(self):\n input = \"\"\"\n Function: main\n Body:\n Var: a = 5.0;\n If a == 5.0 a >= 12.0 && a <= 6.0 Then\n a = 12.0;\n EndIf.\n EndBody.\n \"\"\"\n expect = \"Error on line 5 col 24: a\"\n self.assertTrue(TestParser.checkParser(input,expect,137))", "title": "" }, { "docid": "85202494771e35a31bc3fd54c41243b3", "score": "0.6338986", "text": "def test_dataset_valid_iati_string(self):\n pass", "title": "" }, { "docid": "bcf4c63b0ca27af9e5b86293711e714e", "score": "0.6335752", "text": "def test14_parser_test(self):\n\t\toutput, expected = parser_helper(\"test14\")\n\t\tself.assertEqual(output, expected)", "title": "" }, { "docid": "9b6bab843e8a0b071ad1f9bd4917f5a9", "score": "0.63353974", "text": "def _check_string(text):\n quote_pos = text.find('\"')\n if quote_pos < text.find(')') and quote_pos != -1:\n # Data is a string\n r = text.split('\"')\n else:\n # Data is not a string\n r = re.split('[,)]', text)\n \n if len(r) < 2:\n raise ValueError(\"Cannot parse \" + text + \", length < 2.\")\n return r[1]", "title": "" }, { "docid": "434cb868e4eec9d2d7b1cb38180078fe", "score": "0.6329728", "text": "def validity_of___str___test(self):\n ts = TimeSeries()\n ts.add_entry(0.0, 0.0)\n ts.add_entry(0.1, 0.1)\n ts.add_entry(0.2, 0.2)\n ts.add_entry(0.3, 0.3)\n ts.add_entry(0.4, 0.4)\n\n matchres = re.match(\"TimeSeries\\(\\[(.*)\\]\\)\", ts.__str__())\n\n assert (None != matchres)", "title": "" }, { "docid": "6d2a575d335d341db22234816c651ff7", "score": "0.63283575", "text": "def test_str(self):\n test_str = \"noetuaoe\"\n try:\n raise Exception(test_str)\n except:\n err = sys.exc_info()\n pe = proto_error(err)\n self.assertIn(test_str, str(pe))", "title": "" }, { "docid": "194569513e34b86569baee2417d4e489", "score": "0.63279915", "text": "def test_parse_number(self) -> bool:\n self.test_counter += 1\n failed: bool = False\n # Parsing a string to create a number\n list_invalid = [\n \"1..4\", \"-\", \"1.-4\", \"--5\", \"0..\", \"12a345\", \"123v\", \"7O\"\n ]\n list_valid = [\n \"0\", \"0.\", \"0.1\", \"0.01\", \"1\", \"12\", \"-0\", \"-0.\", \"-0.1\", \"-0.01\", \"-1\", \"-12\",\n \"12.34\", \"-12.34\", \"3.141592653589793238462643383279\", \"123456789012345\", \"98765.43210\"\n ]\n for n in list_invalid:\n if not self.assertFalse(DecimalNumber._parse_number(n)[0], \"Incorrect parsing of {0} as a number\".format(n)):\n failed = True\n for n in list_valid:\n if not self.assertTrue(DecimalNumber._parse_number(n)[0], \"Incorrect parsing of {0} as a number\".format(n)):\n failed = True\n\n return failed", "title": "" }, { "docid": "1180fda22af6c6011197cda43db82745", "score": "0.6321648", "text": "def testParseValidNum(self):\n self.assertEqual(ParseValidNum(self.num1),2)\n self.assertEqual(ParseValidNum(self.num2),4)", "title": "" }, { "docid": "4035c555820bd19cfa3cf4ecfb5c31b1", "score": "0.63142467", "text": "def test3_parser_test(self):\n\t\toutput, expected = parser_helper(\"test3\")\n\t\tself.assertEqual(output, expected)", "title": "" }, { "docid": "b4af0a5bf9b3ff3a25cea54bb1f5331e", "score": "0.631283", "text": "def test6_parser_test(self):\n\t\toutput, expected = parser_helper(\"test6\")\n\t\tself.assertEqual(output, expected)", "title": "" }, { "docid": "14ef4ec5256d425e64778b04596c0c99", "score": "0.63105744", "text": "def test_from_string(votable_string):\n res = downloader.SpectraDownloader.from_string(votable_string)\n assert res is not None\n assert res.parsed_ssap is not None", "title": "" }, { "docid": "9d07be61f21c5aa2efef9025153fe4e6", "score": "0.6309112", "text": "def test_parse_signature_valid(self):\n self.assertEqual(util.parse_signature('sha1=hello world'), 'hello world')", "title": "" }, { "docid": "c792584eb380046f35e462c4e2cd3bdd", "score": "0.62946236", "text": "def test33(self):\n input = \"\"\"\n Function: main\n Body:\n Var: a = 5.0;\n If a == 5.0 || a >= 12.0 || a <= 6.0 Then\n a = 12.0;\n EndIf.\n EndBody.\n \"\"\"\n expect = \"successful\"\n self.assertTrue(TestParser.checkParser(input,expect,133))", "title": "" }, { "docid": "3c95627f4b137f6b77aed838fdb8e058", "score": "0.62924236", "text": "def test_string_expression():\n reset_counter()\n for ws, ws_sep in iter_whitespace():\n if '\\n' in ws: continue # causes too many problems\n\n # Test short-form strings\n for quote, opposite_quote in [('\"' \"'\"), (\"'\" '\"')]:\n parse_lua(f'a = {quote} hello {opposite_quote} world {quote} ;_'.replace(' ', ws).replace('_', ws_sep))\n\n # Test simple escapes\n for esc in ('abfnrtv' '\\\\' '\"' \"'\" '\\n'):\n parse_lua(f'a = \"hello\\\\{esc}world\" ;_'.replace(' ', ws).replace('_', ws_sep))\n\n # \\z: skips following whitespace, including newlines\n if '--' not in ws:\n parse_lua(f'a = \"hello \\\\z \\t \\r \\n world\" ;_'.replace(' ', ws).replace('_', ws_sep))\n\n # \\xXX hex escapes\n parse_lua(r'a = \"hello \\x00 \\xfF \\x78 world\" ;_'.replace(' ', ws).replace('_', ws_sep))\n with pytest.raises(Exception):\n parse_lua(r'a = \"hello \\x2 world\" ;_'.replace(' ', ws).replace('_', ws_sep))\n\n # \\d, \\dd, \\ddd decimal escapes\n parse_lua(r'a = \"hello \\1 \\12 \\123 \\1234 world\" ;_'.replace(' ', ws).replace('_', ws_sep))\n parse_lua(r'a = \"hello \\240 \\250 \\255 \\25599 \\255F world\" ;_'.replace(' ', ws).replace('_', ws_sep))\n parse_lua(r'a = \"hello \\2a \\25a \\255a world\" ;_'.replace(' ', ws).replace('_', ws_sep))\n for v in ['256', '260', '300', '999']:\n with pytest.raises(Exception):\n parse_lua(f'a = \"hello \\\\{v} world\" ;_'.replace(' ', ws).replace('_', ws_sep))\n\n # \\u{XXX} unicode literals\n parse_lua(r'a = \"hello \\u{0} \\u{1234CDEF} \\u{0001234CDEF} world\" ;_'.replace(' ', ws).replace('_', ws_sep))\n parse_lua(r'a = \"hello \\u{7FFFFFFF} \\u{0007FFFFFFF} world\" ;_'.replace(' ', ws).replace('_', ws_sep))\n for v in ['700000000', '7FFFFFFF0', '80000000', '8FFFFFFF', 'FFFFFFFF']:\n with pytest.raises(Exception):\n parse_lua(f'a = \"hello \\\\u{v} world\" ;_'.replace(' ', ws).replace('_', ws_sep))\n with pytest.raises(Exception):\n parse_lua(r'a = \"hello \\u{} world\" ;_'.replace(' ', ws).replace('_', ws_sep))\n\n # Test illegal linebreaks\n with pytest.raises(Exception):\n parse_lua(f'a = \"hello\\nworld\" ;_'.replace(' ', ws).replace('_', ws_sep))\n\n # Test some random invalid escapes\n for esc in 'cdyq!#*':\n with pytest.raises(Exception):\n parse_lua(f'a = \"hello\\\\{esc}world\" ;_'.replace(' ', ws).replace('_', ws_sep))\n\n # Test non-ASCII characters\n parse_lua(f'a = \" ツ \" ; '.encode('utf-8'))\n\n # Test long-form strings\n # These ignore escape sequences so we don't have to test that\n helper_test_multiline_comment_or_long_string('a =')\n end()", "title": "" }, { "docid": "eeeaaeebb4b927b2b71094b27de43129", "score": "0.629115", "text": "def fromstring( string ):", "title": "" }, { "docid": "1882a52bc455689adf49b997f17c1044", "score": "0.62889796", "text": "def test15(self):\n input = \"\"\"\n Varr: pi = 3..14;\n \"\"\"\n expect = \"Error on line 2 col 11: r\"\n self.assertTrue(TestParser.checkParser(input,expect,115))", "title": "" }, { "docid": "75d5b441c6e7d84abb989cee909e1e33", "score": "0.6284227", "text": "def test_spaces_head(self):\n self.assertRaises(ParseException, self.date.parseString, ' 20121121')", "title": "" }, { "docid": "f9265f8da212b651a9e9b058417315d7", "score": "0.62761736", "text": "def test_reserved_ko(self):\n with self.assertRaises(ParseError):\n parser.parse('foo:NOT')\n with self.assertRaises(ParseError):\n parser.parse('foo:AND')\n with self.assertRaises(ParseError):\n parser.parse('foo:OR')\n with self.assertRaises(ParseError):\n parser.parse('OR')\n with self.assertRaises(ParseError):\n parser.parse('AND')", "title": "" } ]
2ae863de650db4ee3a7ff292d859e71b
Ensure ``path`` outputs proper settings file path.
[ { "docid": "7f2eb865576fa67708552492ef14276c", "score": "0.6299038", "text": "def test_settings_path(self):\n with self.cli_runner.isolated_filesystem():\n with open(\"settings.json\", \"w\") as handler:\n handler.write(PULP_SMASH_CONFIG)\n with mock.patch.object(pulp_smash_cli, \"PulpSmashConfig\") as psc:\n psc.get_load_path.return_value = utils.uuid4()\n result = self.cli_runner.invoke(\n pulp_smash_cli.settings, [self.settings_subcommand]\n )\n self.assertEqual(result.exit_code, 0, result.output)\n self.assertEqual(\n psc.get_load_path.return_value, result.output.strip()\n )", "title": "" } ]
[ { "docid": "d49d3192faad7077206e313d0d2e8fa0", "score": "0.62478626", "text": "def test_get_settings_file_path_returns_the_settings_path_correctly(self):\n self.assertEqual(\n os.path.expanduser('~/.atrc/last_version'),\n ExternalDCC.get_settings_file_path()\n )", "title": "" }, { "docid": "9aa805525576cdf8f300d08f7a6e427b", "score": "0.61765647", "text": "def _check_path(path):\n if not path:\n path = os.path.join(tempfile.gettempdir(), \"resume-workflow.yml\")\n try:\n if os.path.exists(path):\n # make sure we can write to this file\n with open(path, \"r\") as fp:\n contents = fp.read()\n else:\n contents = json.dumps(dict())\n\n with open(path, \"w\") as fp:\n fp.write(contents)\n\n except IOError:\n # let's just assume we can always write to the temp folder...\n path = os.path.join(tempfile.gettempdir(), \"resume-workflow.yml\")\n return path", "title": "" }, { "docid": "1a529a60c40dcd2f6121417860183a4a", "score": "0.6154319", "text": "def settings_load_path(ctx):\n path = ctx.obj[\"load_path\"]\n if not path:\n _raise_settings_not_found()\n click.echo(path)", "title": "" }, { "docid": "c10fbc471819db771c43f894b90433ea", "score": "0.6102077", "text": "def settings_save_path(ctx):\n click.echo(ctx.obj[\"save_path\"])", "title": "" }, { "docid": "a74c7ca93a5726c441d77d6655885e7a", "score": "0.6072662", "text": "def validate_path(path):\r\n return open(os.path.abspath(path), 'w', encoding='utf-8', newline='')", "title": "" }, { "docid": "9cb3ac2c084bf67be480be0150ceaf91", "score": "0.59763783", "text": "def test_settings_load_path(self):\n with self.cli_runner.isolated_filesystem():\n with open(\"settings.json\", \"w\") as handler:\n handler.write(PULP_SMASH_CONFIG)\n with mock.patch.object(pulp_smash_cli, \"PulpSmashConfig\") as psc:\n psc.get_load_path.return_value = utils.uuid4()\n result = self.cli_runner.invoke(\n pulp_smash_cli.settings, [self.settings_subcommand]\n )\n self.assertEqual(result.exit_code, 0, result.output)\n self.assertEqual(\n psc.get_load_path.return_value, result.output.strip()\n )", "title": "" }, { "docid": "6077426c65fe66b1187a7ba9835a5189", "score": "0.5973759", "text": "def normalize_path(path):\n normalized_path = join(ASIAQ_CONFIG, path)\n if exists(normalized_path):\n return normalized_path\n else:\n raise RuntimeError(\"Config path not found: %s\" % normalized_path)", "title": "" }, { "docid": "1fa9d2f42289232424d1ade1b220fe60", "score": "0.59306884", "text": "def test_settings_save_path(self):\n with mock.patch.object(pulp_smash_cli, \"PulpSmashConfig\") as psc:\n psc.get_save_path.return_value = utils.uuid4()\n result = self.cli_runner.invoke(\n pulp_smash_cli.settings, [\"save-path\"]\n )\n self.assertEqual(result.exit_code, 0, result.output)\n self.assertEqual(psc.get_save_path.return_value, result.output.strip())", "title": "" }, { "docid": "548e3931e6513827e43b35cde8d78ec4", "score": "0.59296757", "text": "def _setpath():\n path = os.path.join(os.path.dirname(__file__),\n '..', 'conf', 'config.cfg')\n return path", "title": "" }, { "docid": "aed9a6e5cdd4836af0310f31bfb822f9", "score": "0.5888329", "text": "def test_fix_settings_fspath(self):\n from on.video.video import fixupConfig\n oldsettings = self.settings\n self.settings.fspath = u'/nosuchfileordirectory'\n fixupConfig()\n self.failUnless(self.settings.fspath == u'/tmp')", "title": "" }, { "docid": "b71f0df831f90105e1ecb7076c71bdfc", "score": "0.5830939", "text": "def get_config_path(path=None):\n if path is None:\n path = Path.home() / '.nbreport.yaml'\n else:\n path = Path(path)\n return path", "title": "" }, { "docid": "0a960e79c7b4286fc25c6ad6ca3241a5", "score": "0.5828295", "text": "def settings_path(ctx): # noqa:D401\n ctx.forward(settings_load_path)", "title": "" }, { "docid": "6e7f9e936b34a78dbb6fec7f93c23aae", "score": "0.5810461", "text": "def get_settings_location():\n\n # set up the path\n path_name = os.path.dirname(sys.argv[0])\n settings_location = os.path.abspath(path_name)\n\n # normalize and return the entire path\n return normalize_path(os.path.join(settings_location, SETTINGS_NAME + \".txt\"))", "title": "" }, { "docid": "23fa35189d509bea8a84c8ecf3708467", "score": "0.57915986", "text": "def assure_path (self, path):\n\t\tif not os.path.exists(path):\n\t\t\tos.makedirs(path)\n\t\treturn path", "title": "" }, { "docid": "0f2c99bd9de79df2b7cdfe849d5a6be3", "score": "0.5784017", "text": "def path(self):\n return path(self.config[\"path\"])", "title": "" }, { "docid": "7640daaba4eee0568f3c932838135b0f", "score": "0.5755317", "text": "def __validate_settings(self):\n assert len(self.settings.paths) > 0, 'Startpath not defined'\n for p in self.settings.paths:\n assert os.path.exists(p), 'Startpath not found'\n assert os.access(p, os.R_OK), 'Startpath not readable'", "title": "" }, { "docid": "51a1c65651c1a8202f6c980917cee0d2", "score": "0.56454915", "text": "def _check_path(hass: HomeAssistant, path: str) -> None:\n get_path = pathlib.Path(path)\n if not get_path.exists() or not get_path.is_file():\n raise ConfigEntryNotReady(f\"Can not access file {path}\")\n\n if not hass.config.is_allowed_path(path):\n raise ConfigEntryNotReady(f\"Filepath {path} is not valid or allowed\")", "title": "" }, { "docid": "617107870869b9cb45a0ca3c9fc7c47d", "score": "0.56145227", "text": "def _sanitize_paths(cfg):\n cfg['TEMPLATE_DIR'] = os.path.expanduser(cfg['TEMPLATE_DIR'])", "title": "" }, { "docid": "818f5bc3964f51d2eaa8958598508dc0", "score": "0.5594388", "text": "def setOutPath(self, path: str) -> None:\n self.outPath = path", "title": "" }, { "docid": "1137545fb7872ea33dbc96fbccfc6e00", "score": "0.5589535", "text": "def set_output_path(self, path):\n self.output_path = path\n\n if not os.path.exists(self.output_path):\n os.makedirs(self.output_path)", "title": "" }, { "docid": "9f5fc252d65d88539b11e7659e2e9ce9", "score": "0.5587821", "text": "def __get_settings(self, path):\n if not path:\n raise Exception('Setting key must be provided')\n\n res = self.settings\n\n for key in path.split('.'):\n res = res.get(key, {})\n\n return res", "title": "" }, { "docid": "2625a00c6fc88187ba97f9903f982698", "score": "0.5559026", "text": "def set_config_path(self, file_path):\n if os.path.exists(file_path):\n self.file_path = file_path", "title": "" }, { "docid": "30397fbc8b603904d218de9c844cbdce", "score": "0.55566996", "text": "def _fix_path(path: Union[Path, str]) -> Path:\n path = Path(path) # type: ignore\n if not path.exists():\n raise exc.ContentInitializeError(AgentTool, path)\n elif path.is_file():\n path = path.parent\n\n return path", "title": "" }, { "docid": "1672abe97feb5abd68cf26275be91b77", "score": "0.55107003", "text": "def resolve_given_outfile_path(path):\n if path is None:\n return\n outfile = os.path.expanduser(os.path.expandvars(path))\n if not os.access(os.path.dirname(os.path.abspath(outfile)), os.W_OK):\n raise ValueError('Unable to write to file: %s' % outfile)\n return outfile", "title": "" }, { "docid": "2c4c3862774406ad88277adce0871f4a", "score": "0.5504211", "text": "def test_simple_path():\n assert str(ConfigPath(['tmp'])) == '/tmp'", "title": "" }, { "docid": "af4a209409b07f3decad7f332d8e8ed0", "score": "0.5484446", "text": "def print_global_config_path(ctx, _, value):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(project_context.global_config_path)\n ctx.exit()", "title": "" }, { "docid": "0ba33c25caa5b6c821acb03c1ad71e6a", "score": "0.54769254", "text": "def set_config_path(self, path):\n config = self.load_config()\n config['path'] = path\n pickle.dump(config, open(self.config_path, 'wb'))", "title": "" }, { "docid": "11b82f35ac6d5d0217706238acdb392e", "score": "0.547405", "text": "def _get_path(cls, path):\n if not isinstance(path, (str, six.string_types)):\n # Es un DirEntry\n path = path.path\n return os.path.abspath(os.path.expanduser(path))", "title": "" }, { "docid": "b86f1b0c5ae869197cf08223f4debe06", "score": "0.54586005", "text": "def val_path(cls, path_string):\n try:\n pv.validate_filepath(path_string, platform='auto')\n str_path = pv.sanitize_filepath(path_string, platform='auto')\n return str_path\n except ValueError as err:\n logger.error('Invalid filepath provided')\n return False", "title": "" }, { "docid": "502fd8b4d3a686d49d76889ea64243ed", "score": "0.5422264", "text": "def file_path(self):\n return os.path.join(self.path, self.__class__.CONFIG_FILE_NAME)", "title": "" }, { "docid": "b2dd12f25e4a3b892c42ca6f90f925e1", "score": "0.5421735", "text": "def verify_path(path):\n if path is None:\n sys.exit('Program terminated. You must specify a correct path.')\n path = Path(path)\n assert path.exists(), f'The specified path was not found: {path}.'\n return path", "title": "" }, { "docid": "a75195c6d4a40c2e69debac60e2f411a", "score": "0.541987", "text": "def goodpath(self, path):\r\n if os.sep != '/':\r\n path = '/'.join(path.split(os.sep))\r\n abs_path = os.path.abspath(path)\r\n src_path = os.path.relpath(path, self.gdict['src_root'])\r\n return", "title": "" }, { "docid": "c6f72e2a724c7d14757b73c9e04d562b", "score": "0.54172456", "text": "def test_path_validation(capsys, rm_test_path):\n p = ConfigPath(TEST_PATH)\n p.validate()\n captured = capsys.readouterr()\n assert captured.out == \\\n \"The path '/tmp/test/foo' is required by your configuration.\\n\"", "title": "" }, { "docid": "373211f80ad81dfeb6f050eb6e9d1aae", "score": "0.5389501", "text": "def output_path(path):\n\n path = os.path.abspath(path)\n dirname = os.path.dirname(path)\n\n if not os.access(dirname, os.W_OK):\n raise IOError('File %s cannot be created (check your permissions).'\n % path)\n return path", "title": "" }, { "docid": "92eb4ce258ef4be07f16fde90a9641e6", "score": "0.5388543", "text": "def project_path(path: str) -> Path:\n return Path(path).absolute()", "title": "" }, { "docid": "99b53f507427d6199d43adbba92c2982", "score": "0.5377475", "text": "def _filter_manifest_path(self, path: str) -> str:\n\n # Our makefile paths contain vars to be subbed by the makefile.\n # We need to do those same subs now.\n for pair in [\n ('$(PROJ_DIR)', PROJ_DIR),\n ('$(TOOLS_DIR)', TOOLS_DIR),\n ('$(PROJ_SRC_DIR)', PROJ_SRC_DIR),\n ]:\n path = path.replace(pair[0], pair[1])\n\n projpath = f'{self._projroot}/'\n assert '\\\\' not in projpath # Don't expect to work on windows.\n abspath = os.path.abspath(\n os.path.join(self._projroot, 'src', 'meta', path)\n )\n if not abspath.startswith(projpath):\n raise RuntimeError(\n f'Path \"{abspath}\" is not under project root \"{projpath}\"'\n )\n return abspath[len(projpath) :]", "title": "" }, { "docid": "d4e59c22712c5a89967b1e0f32820d07", "score": "0.5345697", "text": "def test_pathValueRoundTrip(self):\n fp = filepath.FilePath(self.mktemp())\n p = amp.Path()\n s = p.toString(fp)\n v = p.fromString(s)\n self.assertIsNot(fp, v) # sanity check\n self.assertEqual(fp, v)", "title": "" }, { "docid": "d33e9a6af12795d500576779f4df3f9d", "score": "0.534505", "text": "def path(self, value: str):\n self._properties[\"path\"] = value", "title": "" }, { "docid": "d33e9a6af12795d500576779f4df3f9d", "score": "0.534505", "text": "def path(self, value: str):\n self._properties[\"path\"] = value", "title": "" }, { "docid": "d33e9a6af12795d500576779f4df3f9d", "score": "0.534505", "text": "def path(self, value: str):\n self._properties[\"path\"] = value", "title": "" }, { "docid": "d33e9a6af12795d500576779f4df3f9d", "score": "0.534505", "text": "def path(self, value: str):\n self._properties[\"path\"] = value", "title": "" }, { "docid": "d33e9a6af12795d500576779f4df3f9d", "score": "0.534505", "text": "def path(self, value: str):\n self._properties[\"path\"] = value", "title": "" }, { "docid": "d37a4e2f3c43ef9ebb08ed3f8f652c3c", "score": "0.5341578", "text": "def set_save_path(self, path):\n raise NotImplementedError", "title": "" }, { "docid": "c1dccaaff4b643ed0feac77bd7367052", "score": "0.533678", "text": "def using(path):\n env.cfg_path = path", "title": "" }, { "docid": "0d26fb803e8b199db972b77938c3ac75", "score": "0.53331167", "text": "def get_plugin_settings_path(self, autocreate=True):\n\n return get_dir(os.path.join(self.get_local_settings_path(autocreate),\n \"plugin-settings\"), autocreate)", "title": "" }, { "docid": "bd4d4c2d706fde8ad871f40381240937", "score": "0.5322574", "text": "def on_outputDirPicker_pathSelected(self, path):\n # make it relative, if it is in a subdirectory of the project path\n dn = self.project.getRelativePath(path)\n while dn.endswith(os.sep):\n dn = dn[:-1]\n self.outputDirPicker.setText(dn)", "title": "" }, { "docid": "6a6c445539e9a1c3026ed4881bff9804", "score": "0.53203624", "text": "def test_path(self, cd_tmp_path: Path) -> None:\n assert EnvManager(\"\", \"\", path=cd_tmp_path).path == cd_tmp_path\n assert EnvManager(\"\", \"\").path == cd_tmp_path", "title": "" }, { "docid": "10ea9d4a333ce991e8027f20e7afcfff", "score": "0.5312953", "text": "def set_path(self):\n pass", "title": "" }, { "docid": "b42a806c67a9ab400d914488d4fbbb5b", "score": "0.53112274", "text": "def load_setting_export_path(logger, config_settings):\n try:\n export_path = config_settings['export_options']['export_path']\n if export_path is not None:\n return export_path\n else:\n return None\n except Exception as ex:\n log_critical_error(logger, ex, 'Exception getting export path from the configuration file')\n return None", "title": "" }, { "docid": "d0f72c27503d6dd9496abfbaf5250f2a", "score": "0.530353", "text": "def fixpath(config):\n\t\tpass", "title": "" }, { "docid": "7db78e2a6c275a88f66ead2e10381bfc", "score": "0.5289156", "text": "def settings_file():\n filename = settingsdir()+\"/lauecollect_settings.py\"\n return filename", "title": "" }, { "docid": "fc63afcd9b0abe6058b3414ad11ff861", "score": "0.5285615", "text": "def path(self, path):\n path = os.path.expanduser(path)\n path = os.path.expandvars(path)\n path = os.path.abspath(path)\n\n if not os.path.exists(path) or not os.path.isfile(path):\n raise ReaderException(\"No CSV file found at '%s'\" % path)\n\n self._path = path", "title": "" }, { "docid": "b9c3269a45234ca171930c0839e91183", "score": "0.5283701", "text": "def obtenerPath(path):\n return os.path.abspath(path)", "title": "" }, { "docid": "be597e37fa9f902188f1d6a72ff39298", "score": "0.5280259", "text": "def UpdateOutDir(self, path):\n self.saveParams['outDir'] = path\n self.outDirTxt.SetLabel(self.saveParams['outDir'])\n self.SendSaveMenu()", "title": "" }, { "docid": "056eafd0a35bda9afbdf32b96579a302", "score": "0.5271145", "text": "def get_output_path(path):\n return osp.join(cfg.app_output_dir, path)", "title": "" }, { "docid": "7882f452565491e3eb5881f4a484baa4", "score": "0.52707547", "text": "def set_file_path(file_path_value):\n global file_path\n file_path=file_path_value", "title": "" }, { "docid": "7e0a09597da29e9ef53a34aac45c95b5", "score": "0.5269545", "text": "def SetPath(self, path):\n if not path:\n path = ''\n self.dialog.app_path_text_ctrl.SetValue(path)", "title": "" }, { "docid": "716aa2e00f490667940732d30adde1f8", "score": "0.5249686", "text": "def dump(self, path: Optional[Path] = None) -> None:\n\n if path:\n log.explain(\"Using custom path\")\n else:\n log.explain(\"Using default path\")\n path = self._default_path()\n\n log.explain(f\"Dumping to {fmt_real_path(path)}\")\n log.print(f\"[bold bright_cyan]Dumping[/] to {escape(fmt_real_path(path))}\")\n\n try:\n path.parent.mkdir(parents=True, exist_ok=True)\n except PermissionError:\n raise ConfigDumpError(path, \"Could not create parent directory\")\n\n try:\n # Ensuring we don't accidentally overwrite any existing files by\n # always asking before overwriting a file.\n try:\n # x = open for exclusive creation, failing if the file already\n # exists\n with open(path, \"x\", encoding=\"utf-8\") as f:\n self._parser.write(f)\n except FileExistsError:\n print(\"That file already exists.\")\n if asyncio.run(prompt_yes_no(\"Overwrite it?\", default=False)):\n with open(path, \"w\", encoding=\"utf-8\") as f:\n self._parser.write(f)\n else:\n raise ConfigDumpError(path, \"File already exists\")\n except IsADirectoryError:\n raise ConfigDumpError(path, \"That's a directory, not a file\")\n except PermissionError:\n raise ConfigDumpError(path, \"Insufficient permissions\")", "title": "" }, { "docid": "ae00132fbf3f4291bfdadbf1386acbc5", "score": "0.5248239", "text": "def FormatFilePathMenu(self, file_path):\n file_path = pathlib.Path(file_path)\n try:\n # Attempt to find path relative to home directory\n file_path = file_path.relative_to(pathlib.Path.home())\n except ValueError:\n # keep absolute path if path not under home directory\n pass\n return str(file_path)", "title": "" }, { "docid": "1e41b5c841b4f3340efc942b2a094c13", "score": "0.5246279", "text": "def test_check_and_make_path_error(self, path):\n\n testing.eval_bad_input(pysat.utils.files.check_and_make_path,\n ValueError, 'Invalid path specification.',\n input_args=[path])\n\n return", "title": "" }, { "docid": "0cbd2ef0dd67a8e10708833bd19bb50e", "score": "0.52368397", "text": "def prepare_output_path(self, path):\n # Check if exists\n if os.path.exists(path) and \\\n input(\"File exists at '{}', overwrite contents (n)?\".format(path)) not in ['y', 'yes']:\n return False\n elif not os.path.exists(os.path.dirname(path)):\n try:\n if not self.dry:\n os.mkdir(os.path.dirname(path))\n except:\n raise ValueError(\"Could not create owner directory '{}'\".format(os.path.dirname(path)))\n\n return True", "title": "" }, { "docid": "1bbfd5e92a5cd7c47bc9e1e62cb25bdc", "score": "0.5229969", "text": "def existing_path(path):\n if path == STDOUT:\n return path\n return resolve_path(path)", "title": "" }, { "docid": "fd240325f75b85a005fd09b6e2f4622e", "score": "0.5216684", "text": "def _verbose_path(path: str) -> str:\n realpath = os.path.realpath(path)\n if path != realpath:\n return path + \" -> \" + realpath\n return path", "title": "" }, { "docid": "1c2347a9f7bf6958526921e9e46d1410", "score": "0.520714", "text": "def _format_paths(self, path):\r\n if path:\r\n if (path[0] == '/' or path[0] == '\\\\'):\r\n path = path[1:]\r\n if (path[-1] == '/' or path[-1] == '\\\\'):\r\n path = path[:-1]\r\n return path", "title": "" }, { "docid": "88fea339499d6069fbceed93e8794556", "score": "0.5199069", "text": "def check_one_config_path(self, config_path):\n if not config_path.exists():\n return\n\n rel_path = config_path.relative_to(self.manager.output_dir)\n config = self.get_pyodide_settings(config_path)\n\n yield self.task(\n name=f\"validate:settings:{rel_path}\",\n doc=f\"validate {config_path} with the pyodide kernel settings schema\",\n actions=[\n (self.validate_one_json_file, [self.settings_schema, None, config]),\n ],\n file_dep=[self.settings_schema, config_path],\n )\n\n urls = config.get(PIPLITE_URLS, [])\n\n for wheel_index_url in urls:\n yield from self.check_one_wheel_index(wheel_index_url)", "title": "" }, { "docid": "b13ba3cababd453a20cb666620ec80f0", "score": "0.5195709", "text": "def set_path(self,path=None):\n self.path = path", "title": "" }, { "docid": "0966539eb47a670811dcc9564ad5880a", "score": "0.5188524", "text": "def ensure_settings_dir(self):\n\n if not os.path.exists(self.pl_settings_dir):\n os.makedirs(self.pl_settings_dir)", "title": "" }, { "docid": "fedec47e58b05395e08b555c4c316b15", "score": "0.5176884", "text": "def set_luxrender_path(self, path):\n if not path:\n return\n efutil.write_config_value('luxrender', 'defaults', 'install_path',\n efutil.filesystem_path(path))", "title": "" }, { "docid": "22a11327ca29373c5b7ad2146802516f", "score": "0.5163475", "text": "def conf_file_path(conf_path):\n return conf_path / 'astrality.yml'", "title": "" }, { "docid": "ac846b4329807a5bba2d92aefe1eaed0", "score": "0.5156966", "text": "def _set_path_absolute(self):\n\t\tif type(self.path) == str:\n\t\t\tabspath = (os.path.abspath(self.path)).replace('\\\\', '/')\n\t\t\tif not abspath[-1] == '/':\n\t\t\t\tabspath += '/' #put final slash in place\n\t\t\tout = abspath\n\t\telif type(self.path) == dict:\n\t\t\tpdict = self.path\n\t\t\tnew_pdict = {}\n\t\t\tfor key in pdict:\n\t\t\t\tindices = pdict[key]\n\n\t\t\t\tabspath = (os.path.abspath(key)).replace('\\\\', '/')\n\t\t\t\tif not abspath[-1] == '/':\n\t\t\t\t\tabspath += '/' #put final slash in place\n\n\t\t\t\tnew_pdict.update({abspath:indices})\n\t\t\tdel pdict\n\t\t\tout = new_pdict\n\t\telse:\n\t\t\traise TypeError('Only str or dict supported as self.path. Please report this issue.')\n\n\t\tself.attrs['path'] = out\n\n\t\treturn", "title": "" }, { "docid": "65f11f4e819e195290f9135cff012095", "score": "0.51539135", "text": "def __str__(self):\n return(\"Path {0} Invalid, check path exists and \"\n \"script has permissions to access it! \\n\"\n \"See Exception for more details: \\n{1}\"\n .format(self.values[0], self.values[1]))", "title": "" }, { "docid": "bcb490060e28731eff3c92f318ed5a00", "score": "0.5140294", "text": "def set_output_dir(self, path):\n self.output_dir = path", "title": "" }, { "docid": "06238f339f7e4c85ee877a42d71bd24b", "score": "0.51392204", "text": "def style_path(self, path: str, **info: Any) -> str:\n isdir: bool = info.get(\"isdir\", False)\n\n if self.dir_trailing_slash and isdir and not path.endswith(sep):\n path += sep\n\n if not self.colored:\n return path\n\n file = File.from_path(path)\n val = None\n if isdir:\n val = self.codes.get(\"di\")\n elif file.ext:\n val = self.extensions.get(file.ext, None)\n\n if val:\n reset = self.codes.get(\"rs\", 0)\n return f\"\\033[{val}m{file.path}\\033[{reset}m\"\n return color_file(file, isdir=isdir)", "title": "" }, { "docid": "7f016171daba80a0840887b0440d6893", "score": "0.51368594", "text": "def data_path(path):\n return path if isabs(path) else join(project_data_dir(), path)", "title": "" }, { "docid": "672391cda486bf7af5826fe4da0bf298", "score": "0.5134646", "text": "def test_001_success(settings, temp_builds_dir):\n basedir = temp_builds_dir.join(\"postprocessor_validate_path_001\")\n os.makedirs(basedir.strpath)\n\n processor = SettingsPostProcessor()\n\n foo = basedir.join(\"foo.txt\")\n foo.write(\"Hello world!\")\n\n result = processor._validate_path({}, \"DUMMY_NAME\", foo.strpath)\n\n assert result == foo.strpath", "title": "" }, { "docid": "d3632534ee87fb54f63e58772740afa4", "score": "0.5134148", "text": "def full_path(self, in_path):\n if in_path.lower() == 'none':\n return in_path\n else:\n return os.path.abspath(os.path.expandvars(in_path))", "title": "" }, { "docid": "d3632534ee87fb54f63e58772740afa4", "score": "0.5134148", "text": "def full_path(self, in_path):\n if in_path.lower() == 'none':\n return in_path\n else:\n return os.path.abspath(os.path.expandvars(in_path))", "title": "" }, { "docid": "d931f4f3017634070938eb7b7fee357a", "score": "0.51290196", "text": "def get_path():\r\n return template_path", "title": "" }, { "docid": "b85f5207a7d7e486a01a0334eef989ea", "score": "0.5126321", "text": "def settingsdir():\n return module_dir()+\"/settings\"", "title": "" }, { "docid": "e331fcf3cc83de9355421186a548e571", "score": "0.5118932", "text": "def get_local_settings_path(self, autocreate=True):\n\n return get_dir(os.path.join(xdg_config_home(autocreate), \"pitivi\"),\n autocreate)", "title": "" }, { "docid": "cb4fa74ffc2bb564e3fcd2a6ce4ec155", "score": "0.5110402", "text": "def SetPath(self, path):\n pass", "title": "" }, { "docid": "b816aa38e1d46dc982345058ad973b45", "score": "0.51000065", "text": "def test_missing_settings_file(self):\n with self.cli_runner.isolated_filesystem():\n with mock.patch.object(pulp_smash_cli, \"PulpSmashConfig\") as psc:\n psc.get_load_path.side_effect = exceptions.ConfigFileNotFoundError( # noqa: E501\n \"No config file found.\"\n )\n result = self.cli_runner.invoke(\n pulp_smash_cli.settings, [self.settings_subcommand]\n )\n self.assertNotEqual(result.exit_code, 0, result.output)", "title": "" }, { "docid": "a54f18e2438ffe844c699a3c661acad2", "score": "0.5097566", "text": "def _build_repo_path(self, path):\n return '%s/%s' % (self.path, path.lstrip('/'))", "title": "" }, { "docid": "700003cf8ebba2988810573cee49360b", "score": "0.5093068", "text": "def setPath(self, path):\n self.path = path", "title": "" }, { "docid": "27ee8c1e32286189ad2ed5616d520acd", "score": "0.5092654", "text": "def __str__(self):\n return 'Config file_path: {}'.format(self.file_path)", "title": "" }, { "docid": "58e4471d845b93c42933dd32e9aecaa5", "score": "0.5088746", "text": "def _fix_path(self, path):\n if path.endswith('/'):\n path = path.rstrip('/')\n\n return safe_str(path)", "title": "" }, { "docid": "0bc4602df13c2bfd3dbb8e9846cce9ad", "score": "0.5073261", "text": "def fullpath(path):\n if os.path.splitext(path)[1] != \".localized\":\n path = \"%s.localized\" % path\n return os.path.expanduser(path)", "title": "" }, { "docid": "bdad7fc1febe793f2ed90abdd1522b08", "score": "0.5072967", "text": "def _validate_path(self, key, path):\n if path is None:\n raise TypeError(\"VersionType.path can not be None, please \"\n \"specify a valid path template string by using \"\n \"Jinja2 template syntax\")\n\n if not isinstance(path, (str, unicode)):\n raise TypeError(\"VersionType.path should be an instance of string \"\n \"or unicode\")\n\n if path == \"\":\n raise ValueError(\"VersionType.path can not be an empty \"\n \"string, it should be a string containing a \"\n \"Jinja2 template code showing the file naming \"\n \"convention of Versions of this type.\")\n return path", "title": "" }, { "docid": "3eb7d1ae7f93000cede672236943c1be", "score": "0.50728565", "text": "def set_save_path(self, path):\n\t\tself.save_dir, self.save_name = os.path.split(path)", "title": "" }, { "docid": "0ea5f1f55b35b283aacc469d057eb60d", "score": "0.5071459", "text": "def _writeFileOpenPath(self, path):\n self._fileOpenPath = path\n self.settings.beginGroup(\"FileSystem\")\n self.settings.setValue(\"openpath\", path)\n self.settings.endGroup()", "title": "" }, { "docid": "62ac40b478ec83a4c9917dcf9a2c45df", "score": "0.50662297", "text": "def check_path(self) -> str:\n return pulumi.get(self, \"check_path\")", "title": "" }, { "docid": "410812cab2ea5f97547c689da73e9890", "score": "0.50640434", "text": "def set_basepath(self, path):\n self.basepath = path\n self.metadata_path = path + \".info\"", "title": "" }, { "docid": "41f8bcaff0ac09c622539415006c8df5", "score": "0.5062251", "text": "def refine_path(self, caller: AiohttpAction):\n _ = caller\n if self.file_path is None and self.file_path_template is None:\n raise ValueError(\"Must have a file_path or a file_path_template\")\n if self.file_path_template is not None:\n template = Template(str(self.file_path_template))\n resolved_string = template.safe_substitute(self.path_values)\n self.file_path = Path(resolved_string)\n if self.file_ending is not None:\n assert self.file_path is not None\n self.file_path = self.file_path.with_suffix(self.file_ending)", "title": "" }, { "docid": "1463c9f5358d7064b533f307e3206cb2", "score": "0.5061787", "text": "def render_config(path, conf):\n with open(os.path.join(path, \".subway\", \"config.json\"), \"w\") as f:\n json.dump(conf, f, indent=2)", "title": "" }, { "docid": "5915207b2b237822408726f06213d3cc", "score": "0.5056235", "text": "def update_path(self, config_path=None):\r\n if config_path is not None:\r\n # Ignore empty values and duplicates\r\n unique_paths = set(cbmod.__path__ + [v for v in config_path if v])\r\n cbmod.__path__ = list(unique_paths)\r\n \r\n self.path = cbmod.__path__\r\n return self.path", "title": "" }, { "docid": "75d28cdd92b3f54f7eb96a2487dcb902", "score": "0.50558287", "text": "def store_config(path):\n\tfh = open(path, 'w')\n\tfor key,val in _CONFIG.items():\n\t\tif key == \"data_dir\": continue\n\t\tfh.write(\"%s: %s\\n\" % (key,val))\n\tfh.close()", "title": "" }, { "docid": "cb513d22a5d8187022eb814c6fc7ac5d", "score": "0.5053484", "text": "def outpath(self, *path, basepath=None):\n if basepath is None:\n basepath = self.outbasepath\n if basepath is None:\n return os.path.join(self.__outpath, *path)\n\n return os.path.join(self.__outpath, basepath, *path)", "title": "" }, { "docid": "8a20529ac50c9bd49b339a302d909eb9", "score": "0.50509703", "text": "def path(filename):", "title": "" }, { "docid": "ede175cad95046351de29ae029ef7cd2", "score": "0.5036759", "text": "def post_build(path):\n main_settings_file = os.path.join(path, 'config', 'settings', 'base.py')\n settings_contents = open(main_settings_file, 'r').read()\n fp = open(main_settings_file, 'w')\n secret_key = ''.join([choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])\n settings_contents = re.sub(r\"(?<=SECRET_KEY = ')'\", secret_key + \"'\", settings_contents)\n fp.write(settings_contents)\n fp.close()", "title": "" }, { "docid": "688df123711e534db77064290f419607", "score": "0.5030308", "text": "def _writePath(self):\n pass\n # do nothing, we don't want to rename the script", "title": "" } ]
db56b62c837eeb813a767122f02d3d5c
generate all graident targets for CC Graph
[ { "docid": "8def199e03ddb3e57dd452bf2c938c97", "score": "0.53105754", "text": "def GraphDef_Grad(graph_def, targets):\n all_pairs = set()\n for target in targets:\n for wrt in target.grad_wrts:\n all_pairs.add((target.name, wrt))\n\n for pair in all_pairs:\n g_target = pb.GradientTarget()\n g_target.cost = str(pair[0])\n g_target.wrt = str(pair[1])\n graph_def.g_target.extend([g_target])", "title": "" } ]
[ { "docid": "60d287fb34ae7e3076cd7efcf7539469", "score": "0.6744482", "text": "def __generate_targets(self):\n targets = list()\n for i in range(len(self.images)):\n mat = sio.loadmat(self.masks[i], mat_dtype=True, squeeze_me=True, struct_as_record=False)\n categories = mat['GTcls'].CategoriesPresent\n if isinstance(categories, np.ndarray):\n categories = np.asarray(list(categories))\n else:\n categories = np.asarray([categories]).astype(np.uint8)\n targets.append(categories)\n self.targets = np.asarray(targets)", "title": "" }, { "docid": "628254e3f974b23c56535e527ee6c753", "score": "0.6556425", "text": "def targets(self):", "title": "" }, { "docid": "fe42002ed4bfaa9cd237298a741d561c", "score": "0.59816444", "text": "def targets(self):\n return self.gate.targets", "title": "" }, { "docid": "4ad1a967f99ab016a4840c9039fdd4de", "score": "0.59797657", "text": "def distillation_targets(self):\n raise NotImplementedError('TODO implement function')", "title": "" }, { "docid": "dd7b89b8fd117314e7b46ef584e46607", "score": "0.5978775", "text": "def create_targets(self):\n targets = get_data_from_csv(data1_10)\n for target in targets:\n target.compute_list_primers_pairs()\n return targets", "title": "" }, { "docid": "2cd31008cd299b6ce6578c0455fa40e2", "score": "0.5935107", "text": "def gen_actors(self):\n sdfgraph = self.sdfgraph\n code = writer()\n for actor in sdfgraph.nodes():\n code.write('inline static void actor_'+actor+'(')\n code.indent()\n first = True\n in_queue=[]\n out_queue=[]\n for pred in sdfgraph.in_nodes(actor): # incoming nodes\n edge = (pred,actor)\n for i in range(0,len(sdfgraph.target_tokens[edge])):\n in_queue.append(sdfgraph.target_tokens[edge][i])\n for succ in sdfgraph.out_nodes(actor): # outgoing nodes\n edge = (actor,succ)\n for i in range(0,len(sdfgraph.source_tokens[edge])):\n out_queue.append(sdfgraph.source_tokens[edge][i])\n code.unindent()\n code.writeln('){')\n code.indent()\n # TBD: add #line directive to each line so that C-Compiler can track source code of sdf program!\n if sdfgraph.is_generic(actor): \n if len(sdfgraph.in_nodes(actor)) > 1:\n print \"ERROR: More than 1 incoming node is found: \"+actor+\".\"\n sys.exit(1)\n if len(sdfgraph.out_nodes(actor)) > 1:\n print \"ERROR: More than 1 outgoing node is found: \"+actor+\".\"\n sys.exit(1)\n\n pop_fn = 'pop'\n push_fn = 'push'\n peek_fn = 'peek'\n if len(sdfgraph.in_nodes(actor)) == 0 and len(sdfgraph.out_nodes(actor)) == 1: # Source\n push_fn = 'push_'+actor+'_'+sdfgraph.out_nodes(actor)[0]\n \n elif len(sdfgraph.in_nodes(actor)) == 1 and len(sdfgraph.out_nodes(actor)) == 0: # Sink\n pop_fn = 'pop_'+sdfgraph.in_nodes(actor)[0]+'_'+actor\n peek_fn = 'peek_'+sdfgraph.in_nodes(actor)[0]+'_'+actor\n else:\n pop_fn = 'pop_'+sdfgraph.in_nodes(actor)[0]+'_'+actor\n push_fn = 'push_'+actor+'_'+sdfgraph.out_nodes(actor)[0]\n peek_fn = 'peek_'+sdfgraph.in_nodes(actor)[0]+'_'+actor\n \n actor_code = sdfgraph.actor_code[actor]\n param_list = sdfgraph.node_param[actor]\n for i in range(0, len(param_list)):\n (p_type, p_name, p_value) = param_list[i]\n actor_code = actor_code.replace(p_name, str(p_value))\n actor_code = re.sub(r\"(?=[^0-9A-Za-z_])\"+p_name+\"(?=[^0-9A-Za-z_])\",str(p_value),actor_code)\n actor_code = re.sub(r\"(?<=[^0-9A-Za-z_])pop(?=[^0-9A-Za-z_])\",pop_fn,actor_code)\n actor_code = re.sub(r\"(?<=[^0-9A-Za-z_])push(?=[^0-9A-Za-z_])\",push_fn,actor_code)\n actor_code = re.sub(r\"(?<=[^0-9A-Za-z_])peek(?=[^0-9A-Za-z_])\",peek_fn,actor_code)\n code.writeln(actor_code)\n\n else:\n # round robin\n if len(in_queue) == len(out_queue):\n for i in range(0,len(in_queue)):\n tokens=out_queue[i].split('_')\n out_postfix=tokens[1]\n for j in range(2, len(tokens)-1):\n out_postfix=out_postfix+'_'+tokens[j]\n tokens=in_queue[i].split('_')\n in_postfix=tokens[1]\n for j in range(2, len(tokens)-1):\n in_postfix=in_postfix+'_'+tokens[j]\n code.writeln('push_'+out_postfix+'(pop_'+in_postfix+'());')\n # duplicate\n elif len(in_queue) < len(out_queue) and \\\n len(out_queue)%len(in_queue) == 0:\n i = 0\n for o in range(0,len(out_queue)):\n tokens=out_queue[o].split('_')\n out_postfix=tokens[1]\n for j in range(2, len(tokens)-1):\n out_postfix=out_postfix+'_'+tokens[j]\n tokens=in_queue[i].split('_')\n in_postfix=tokens[1]\n for j in range(2, len(tokens)-1):\n in_postfix=in_postfix+'_'+tokens[j]\n code.writeln('push_'+out_postfix+'(peek_'+in_postfix+'('+str(i)+'));')\n i = i + 1\n i = i % len(in_queue)\n for i in range(0, len(in_queue)):\n code.writeln('pop_'+in_postfix+'();')\n # invalid (inconsistent) sdf graph\n else:\n print \"ERROR: Graph is inconsistent.\"\n print \" \"+actor+\" with \"+str(len(in_queue))+\" in-tokens and \"+str(len(out_queue))+\" out-tokens.\"\n sys.exit(1)\n code.unindent()\n code.writeln('}')\n code.writeln('')\n return code.get()", "title": "" }, { "docid": "2494e0c8c5b03669208570134a9e9ee5", "score": "0.5925598", "text": "def generate_all():\n import time\n import sidechainnet as scn\n t = time.localtime()\n timestamp = time.strftime('%b-%d-%Y-%H%M', t)\n pr.startLogfile(f\"sidechainnet_generateall_{timestamp}\")\n casps = list(range(7, 13))[::-1]\n for c in casps:\n print(\"CASP\", c)\n scn.create(c, \"all\", regenerate_scdata=False)", "title": "" }, { "docid": "5d15171301eabe4ce74948cff5567318", "score": "0.57528", "text": "def assemble_targets():\n\n target_endings = [\"target-0.png\", \"target-1.png\", \"target-2.png\"]\n target_paths = []\n rootdir_glob = GT_DIR + \"/**/*\"\n target_paths = [\n f\n for f in glob.iglob(rootdir_glob, recursive=True)\n if f[-5:] == \"0.png\" or f[-5:] == \"1.png\" or f[-5:] == \"2.png\"\n ]\n\n return target_paths", "title": "" }, { "docid": "9b079648cbcb46778524c0951027fdef", "score": "0.5720993", "text": "def add_objects_from_tgen(self, tg):\r\n try:\r\n link_task = self.link_task\r\n except AttributeError:\r\n pass\r\n else:\r\n for tsk in getattr(tg, 'compiled_tasks', []):\r\n for x in tsk.outputs:\r\n if self.accept_node_to_link(x):\r\n link_task.inputs.append(x)\r\n\r\n self.add_tsk_to_dumpenv_task(tsk)", "title": "" }, { "docid": "48a755583164a4bb2412a49760c1e08d", "score": "0.56193185", "text": "def makeCCNodes(self):\n for node in self.G.nodes_iter():\n cap = random.randint(self.capN['min'],self.capN['max'])\n self.G.node[node]['cap'] = cap\n self.G.node[node]['res'] = dict()\n self.G.node[node]['pop'] = {}\n pop = 0\n for party, partyInfo in self.popN.items():\n partyMax = min(cap-pop, partyInfo['max'])\n partyMin = min(partyMax, partyInfo['min'])\n partyPop = random.randint(partyMin,partyMax)\n self.G.node[node]['pop'][party] = {0:partyPop}\n pop += partyPop", "title": "" }, { "docid": "7b790649c3e7b917ef75a27b4d098e7a", "score": "0.5608881", "text": "def make_targets(self):\n for it, type_ in ((self.itertokens, Token),\n (self.itersentences, Sentence),\n (self.iterdocuments, Document)):\n target_map = make_target_map(d.target_str for d in it())\n DataItemSequence(it()).set_target_maps(target_map)", "title": "" }, { "docid": "604b9dc2f60583a7a94d3f26ac9dc63a", "score": "0.5606412", "text": "def build_graph(self):\n return", "title": "" }, { "docid": "7237f6b84e295c74afbe9d4b21c2c0cf", "score": "0.5606301", "text": "def _build_forward_pass_graph(self,\n source_sequence, src_length=None,\n target_sequence=None, tgt_length=None,\n gpu_id=0):\n pass", "title": "" }, { "docid": "9951cc583d1045c47fce7cae196727dd", "score": "0.5589292", "text": "def build_graph(self):\n ###auto encoder\n tfconfig = tf.ConfigProto()\n tfconfig.gpu_options.allow_growth = True\n ###cox net work\n self._create_network()\n self._create_loss()\n self._create_optimizer()\n self.sess = tf.Session(config=tfconfig)", "title": "" }, { "docid": "e92adc8be8c4a62dffdbc06c12423141", "score": "0.55584884", "text": "def targets(self):\n printtime('Performing analysis with {} targets folder'.format(self.analysistype), self.start)\n for sample in self.metadata:\n # Initialise dictionaries\n sample[self.analysistype].vtxhashes = dict()\n sample[self.analysistype].hashcalls = dict()\n try:\n # Iterate through all the vtx genes found for each strain\n for vtx, allelefile in sample[self.analysistype].targetfiles.items():\n # Find the base name/path of the allele file\n targetbase = allelefile.split('.')[0]\n hashfile = '{}.mhs.gz'.format(targetbase)\n # Define the hash call\n hashcall = 'cd {} && mirabait -b {} -k 19 -K {}'.format(self.allelepath, allelefile, hashfile)\n # Add the hash call to the dictionary\n sample[self.analysistype].hashcalls[vtx] = hashcall\n # Run the system call as required\n if not os.path.isfile(hashfile):\n call(hashcall, shell=True, stdout=self.devnull, stderr=self.devnull)\n # Ensure that the hash file was successfully created\n assert os.path.isfile(\n hashfile), u'Hashfile could not be created for the combined target file {0!r:s}' \\\n .format(allelefile)\n # Add the hash filename/path to the dictionary\n sample[self.analysistype].vtxhashes[vtx] = hashfile\n except KeyError:\n pass\n # Bait the fastq files\n self.mirabaiting()", "title": "" }, { "docid": "220638a7113cce4ad449052a2bd03bd0", "score": "0.55567056", "text": "def update_targets(self):\n self.actor.update_target_network()\n self.critic1.update_target_network()\n self.critic2.update_target_network()", "title": "" }, { "docid": "fb61fc5eec7c1f183a1e37bb201a093b", "score": "0.55299944", "text": "def targets(self):\n distances = [2 * self.cn.max_id / 2**i / 3\n for i in range(1, self.num_channels + 1)]\n return [(self.uid + d) % self.cn.max_id for d in distances]", "title": "" }, { "docid": "084c773a1596d8fddc9bb3ea21249293", "score": "0.5518038", "text": "def _build_network_graph(self):\n with tf.variable_scope(\"Generator\", reuse=tf.AUTO_REUSE):\n # Look up tables\n self.src_emb = tf.nn.embedding_lookup(self.src_ten, self.src_ph, name=\"src_lut\")\n self.tgt_emb = tf.nn.embedding_lookup(self.tgt_ten, self.tgt_ph, name=\"tgt_lut\")\n # Map them\n self.mapWX = self._mapper(self.src_emb)\n # Concatenate them\n self.X = tf.concat([self.mapWX, self.tgt_emb], 0, name=\"X\")\n # Set target for discriminator\n Y = np.zeros(shape=(2 * self.batch_size, 1), dtype=np.float32)\n # Label smoothing\n Y[: self.batch_size] = 1 - self.smooth_val\n Y[self.batch_size :] = self.smooth_val\n # Convert to tensor\n self.Y = tf.convert_to_tensor(Y, name=\"Y\")", "title": "" }, { "docid": "4fa2800b43d128ce9df11e6b6014e1c1", "score": "0.5477927", "text": "def generate(self):\n if self.args.gen == 'ALL':\n gen_targets = GENERATOR_TARGETS[:-1]\n else:\n gen_targets = [self.args.gen]\n gen_path = os.path.abspath(self.args.gen_dir)\n for target in gen_targets:\n target_file_name = None\n if target == GENERATOR_TARGETS[0]: # Python\n target_file_name = os.path.join(gen_path, '%s_data.py' % self.args.json_base_name)\n with PythonGenerator(target_file_name) as generator:\n self._generate_target(target, generator)\n elif target == GENERATOR_TARGETS[1]: # C++\n target_file_name = os.path.join(gen_path, '%s_data.cpp' % self.args.json_base_name)\n with CppGenerator(target_file_name) as generator:\n self._generate_target(target, generator)\n elif target == GENERATOR_TARGETS[2]: # JavaScript\n target_file_name = os.path.join(gen_path, '%s_data.js' % self.args.json_base_name)\n with JavaScriptGenerator(target_file_name) as generator:\n self._generate_target(target, generator)\n elif target == GENERATOR_TARGETS[3]: # DotNet\n target_file_name = os.path.join(gen_path, '%s_data.cs' % self.args.json_base_name)\n with DotNetGenerator(target_file_name) as generator:\n self._generate_target(target, generator)\n else:\n raise RuntimeError('Unknown target %s' % target)", "title": "" }, { "docid": "2c2b1ecc288736ba6f27f9eb1db3590a", "score": "0.54492104", "text": "def graph(code, target_paths, out):\n count = 0\n for j in out:\n lbl = target_paths[count]\n map_types = [\"Normal Map\", \"Diffuse Albedo\", \"Roughness Map\"]\n if lbl[-5] == \"0\":\n map_type = 0\n print(lbl[-5])\n if lbl[-5] == \"1\":\n map_type = 1\n elif lbl[-5] == \"2\":\n map_type = 2\n print(map_type)\n print(lbl)\n plt.figure()\n name = code[count][0][0][:-20]\n for i, y in enumerate(j):\n plt.scatter(\n [i + 1 for i in range(len(y))],\n [im[\"SSIM\"] for im in y],\n label=f\"k={i+1}\",\n )\n plt.title(name + \" \" + map_types[map_type])\n plt.xlabel(\"Combination\")\n plt.ylabel(\"SSIM\")\n count += 1\n plt.legend()", "title": "" }, { "docid": "062b88f1535bb954b54fb86156e0dd12", "score": "0.54449797", "text": "def build_graph(self):\n\n \"\"\"\n 1. create anxilliary parameters\n 2. create CapsNet core layers\n 3. create trainer\n \"\"\"\n\n self._build_init()\n self.cost = self._build_model()\n\n grads_vars = self.optimizer.compute_gradients(self.cost)\n\n self._build_train_op(grads_vars)\n self.summaries = tf.summary.merge_all()", "title": "" }, { "docid": "e17a536d5bacc0278c0203252ad11d24", "score": "0.54207855", "text": "def __generate_target_labels(self, mode):\n if mode == 'uniform':\n return self.__uniform_target_labels()", "title": "" }, { "docid": "997b4b830daa0c18091c373a78773e72", "score": "0.54171526", "text": "def targets(self):\n return (self.label[1],)", "title": "" }, { "docid": "b852bfa6505c7c2711e3c4832bebea6b", "score": "0.540714", "text": "def get_targets(self):\n raise NotImplementedError(TaskCombiner.not_implemented_err_string)", "title": "" }, { "docid": "0578304fdc4eae7332c21d0118df7efe", "score": "0.5395645", "text": "def collect_targets(self):\n\n # we will iterate over every platform and configuration permutation\n # and add any that match the current \"live\" set to the project.\n\n spec_string_list = getattr(self.options,'specs_to_include_in_project_generation','').strip()\n allowed_specs = [] if len(spec_string_list)==0 else spec_string_list.replace(' ', '').split(',')\n qualified_taskgens = []\n unqualified_taskgens = []\n\n\n # Collect all the relevant task gens into a processing list\n taskgen_list = []\n target_to_taskgen = {}\n for group in self.groups:\n for task_gen in group:\n if isinstance(task_gen, TaskGen.task_gen) and hasattr(task_gen,'project_filter'):\n taskgen_list.append(task_gen)\n target_to_taskgen[task_gen.target] = task_gen\n\n # Prepare a whitelisted list of targets that are eligible due to use lib dependencies\n whitelisted_targets = set()\n\n # First pass is to identify the eligible modules based on the spec/platform/configuration rule\n for taskgen in taskgen_list:\n\n target = taskgen.target\n\n # Get the platform restrictions from the module definitions\n module_platforms = self.get_module_platforms(target)\n\n skip_module = True\n\n # Check against the spec/platform/configuration standard rules\n for spec_name in allowed_specs:\n skip_module = not self.check_against_platform_config_rules(module_platforms, spec_name, target)\n if not skip_module:\n break\n # Check if implied through the enabled game project\n if skip_module:\n skip_module = not self.check_against_enabled_game_projects(target)\n\n if skip_module:\n Logs.debug('msvs: Unqualifying %s' % taskgen.target)\n unqualified_taskgens.append(taskgen)\n continue\n\n qualified_taskgens.append(taskgen)\n\n module_uses = self.get_module_uses_for_taskgen(taskgen, target_to_taskgen)\n for module_use in module_uses:\n whitelisted_targets.add(module_use)\n\n # Second pass, go through the initial unqualified task gens and see if they qualify based on the whitelisted targets\n for taskgen in unqualified_taskgens:\n if taskgen.target in whitelisted_targets:\n qualified_taskgens.append(taskgen)\n\n # Iterate through the qualified taskgens and create project nodes for them\n for taskgen in qualified_taskgens:\n\n Logs.debug('lumberyard: Creating Visual Studio project for module %s ' % target)\n\n if not hasattr(taskgen, 'msvs_includes'):\n taskgen.msvs_includes = taskgen.to_list(getattr(taskgen, 'includes', [])) + taskgen.to_list(getattr(taskgen, 'export_includes', []))\n taskgen.post()\n\n p = self.vsnode_target(self, taskgen)\n p.collect_source() # delegate this processing\n p.collect_properties()\n self.all_projects.append(p)", "title": "" }, { "docid": "fba27991a180d9ccdc3f832a6f4c1ee9", "score": "0.5393235", "text": "def build_graph():\n ids = range(13)\n coords = [(0, 0), (1, 1), (1, 0), (1, 1), (5, 2), (3, 1), (3, 0),\n (3, -1), (5, 1), (4, 1), (4, 0), (4, -2), (7, 0)]\n\n # https://en.wikipedia.org/wiki/Euclidean_distance\n euclidean_distance = lambda x1y1, x2y2: ((x1y1[0] - x2y2[0]) ** 2 + (x1y1[1] - x2y2[1]) ** 2) ** (0.5)\n\n def build_connected_node_list(from_id, to_ids):\n starting_coords = coords[from_id]\n\n connected_nodes = []\n for to_id in to_ids:\n connected_nodes.append((euclidean_distance(starting_coords, coords[to_id]), all_nodes[to_id]))\n\n return connected_nodes\n\n goal_coords = (7, 0)\n all_nodes = [Node(_id, euclidean_distance(coord, goal_coords)) for _id, coord in zip(ids, coords)]\n\n all_nodes[8].set_connected_nodes(build_connected_node_list(8, [12]))\n all_nodes[10].set_connected_nodes(build_connected_node_list(10, [12]))\n all_nodes[5].set_connected_nodes(build_connected_node_list(5, [8]))\n all_nodes[6].set_connected_nodes(build_connected_node_list(6, [9, 10]))\n all_nodes[7].set_connected_nodes(build_connected_node_list(7, [11]))\n all_nodes[1].set_connected_nodes(build_connected_node_list(1, [4, 5]))\n all_nodes[2].set_connected_nodes(build_connected_node_list(2, [5, 6]))\n all_nodes[3].set_connected_nodes(build_connected_node_list(3, [7]))\n all_nodes[0].set_connected_nodes(build_connected_node_list(0, [1, 2, 3]))\n\n return all_nodes[0]", "title": "" }, { "docid": "dd317f5ba15a426caeb2c64f2d03eb2b", "score": "0.53840595", "text": "def targets(self, node):\r\n node = self.node(node)\r\n nodes =[conn.target for conn in self.connections if conn.target == node]\r\n return nodes", "title": "" }, { "docid": "c07a0da9600d0f395831edcedc9eefe4", "score": "0.5360226", "text": "def GA(self):\n for i in range(0, self.generations_amount - 1):\n print('Generation_{}'.format(i))\n self.mutation_permutation_crossover()\n self.selection()\n print('---------------------------')\n self.generations[self.generations_amount - 1].create_characters()\n self.write_to_csv()\n # self.show_diagram()\n # self.save_top_3()", "title": "" }, { "docid": "1a024e8f2b55398024b46021fa344e99", "score": "0.53549314", "text": "def GetTargets(arc, source, layers='core'):\n log.debug(\"GetTargets checking in layer: %s for unit: %s arc: %s\" % (layers, source.id, arc.id))\n targets = {}\n for triple in source.arcsOut:\n if (triple.arc == arc):\n if (triple.target != None and triple.layer in layers):\n targets[triple.target] = 1\n elif (triple.text != None and triple.layer in layers):\n targets[triple.text] = 1\n return targets.keys()", "title": "" }, { "docid": "566cfbac1f73a7a7f361f953a4b1dcc1", "score": "0.5343739", "text": "def _build_network_graph(self):\n self.generator = Generator(\n self.src_ten,\n self.tgt_ten,\n self.emb_dim,\n self.batch_size,\n self.smooth_val,\n self.lr_ph,\n self.beta,\n self.vocab_size,\n )\n self.discriminator = Discriminator(self.generator.X, self.generator.Y, self.lr_ph)", "title": "" }, { "docid": "432b2e4f84a893681fa021915cc490aa", "score": "0.5342593", "text": "def graph(self, target):\n target0_n = '\"{}\"'.format(target[0][0])\n target0_v = target[0][1]\n # 俱乐部\n\n target1_n = '\"{}\"'.format(target[1][0])\n target1_v = target[1][1]\n # 日本国家队\n\n target2_n = '\"{}\"'.format(target[2][0])\n target2_v = target[2][1]\n # 韩国国家队\n\n # target3_n = '\"{}\"'.format(target[3][0])\n target3_n = aiballclass(target[3][0])\n target3_v = target[3][1]\n # 青训\n\n target4_v = 'aiball:cnName'\n # 哪些\n\n zhuyu3 = HasProperty(target1_n, target1_v)\n zhuyu1 = ArelationB(zhuyu3, 'aiball:team', reverse=False)\n binyu1 = IsRelatedTo2(zhuyu1, reverse=False)\n\n thing2 = HasProperty(target3_n, target3_v)\n # 青训节点\n thing2.merge(binyu1)\n\n zhuyu5 = HasProperty(target2_n, target2_v)\n zhuyu2 = ArelationB(zhuyu5, 'aiball:team', reverse=False)\n binyu4 = IsRelatedTo2(zhuyu2, reverse=False)\n thing2.merge(binyu4)\n\n binyu3 = IsRelatedTo(thing2, reverse=True)\n\n thing1 = HasProperty(target0_n, target0_v)\n binyu3.merge(thing1)\n # 俱乐部节点\n\n goal = ArelationB(binyu3, target4_v)\n return goal", "title": "" }, { "docid": "3e6a5d691ab5ba0a0bad0d2c57ffda8f", "score": "0.5339286", "text": "def _get_targets(self, cfg):\n\n targets = {}\n\n for section in cfg.sections():\n if not section.startswith(\"target.\"):\n continue\n\n key = section[7:]\n title = cfg.get(section, \"title\")\n link = cfg.get(section, \"link\")\n driver = key.split(\"-\")[0]\n target = MatrixTarget(key, title, driver, link=link)\n\n targets[key] = target\n\n return targets", "title": "" }, { "docid": "f18ae458623ea879ccca92ce70a930c8", "score": "0.5336056", "text": "def graph(self, target):\n target0_n = '\"{}\"'.format(target[0][0])\n target0_v = target[0][1]\n # 大卫·路易斯\n\n target1_n = aiballclass(target[1][0])\n target1_v = target[1][1]\n # 有矛盾\n\n target2_n = '\"{}\"'.format(target[2][0])\n target2_v = target[2][1]\n # 迭戈·科斯塔\n\n target3_v = target[3][1]\n # 哪些\n\n thing1 = HasProperty(target0_n, target0_v)\n zhuyu1 = IsRelatedTo(thing1, reverse=False)\n thing11 = HasProperty(target1_n, target1_v)\n zhuyu1.merge(thing11)\n\n thing2 = HasProperty(target2_n, target2_v)\n zhuyu2 = IsRelatedTo(thing2, reverse=False)\n thing22 = HasProperty(target1_n, target1_v)\n zhuyu2.merge(thing22)\n\n zhuyu3 = IsRelatedTo2(zhuyu1, reverse=False)\n zhuyu4 = IsRelatedTo2(zhuyu2, reverse=False)\n zhuyu3.merge(zhuyu4)\n\n goal = ArelationB(zhuyu3, target3_v)\n return goal", "title": "" }, { "docid": "38af6c4c3742b025a65347ec5f61300a", "score": "0.53185654", "text": "def make_graph(self, instr_file):\n return", "title": "" }, { "docid": "12035b4d6108c76aa5afc2fcf05efe8a", "score": "0.531395", "text": "def generate_networkx_graphs(rand, num_examples, num_nodes_min_max, theta):\n input_graphs = []\n target_graphs = []\n graphs = []\n for _ in range(num_examples):\n graph = generate_graph(rand, num_nodes_min_max, theta=theta)[0]\n graph = add_shortest_path(rand, graph)\n input_graph, target_graph = graph_to_input_target(graph)\n input_graphs.append(input_graph)\n target_graphs.append(target_graph)\n graphs.append(graph)\n return input_graphs, target_graphs, graphs", "title": "" }, { "docid": "63b7eb003737a84e22a7d12c5f8b95af", "score": "0.5305161", "text": "def build_model(self):\n self.G_src2trg = Generator()\n self.D_trg = Discriminator()\n \n self.G_trg2src = Generator()\n self.D_src = Discriminator()\n\n self.g_optimizer = torch.optim.Adam(list(self.G_src2trg.parameters()) + list(self.G_trg2src.parameters() ) , self.g_lr, [self.beta1, self.beta2])\n self.d_optimizer = torch.optim.Adam(list(self.D_src.parameters()) + list(self.D_trg.parameters()), self.d_lr, [self.beta1, self.beta2])\n self.print_network(self.G_src2trg, 'G_src2trg')\n self.print_network(self.G_trg2src, 'G_trg2src')\n self.print_network(self.D_src, 'D_src')\n self.print_network(self.D_trg, 'D_trg')\n \n self.G_src2trg.to(self.device)\n self.G_trg2src.to(self.device)\n self.D_src.to(self.device)\n self.D_trg.to(self.device)", "title": "" }, { "docid": "a9bbcea1a4ef3704abcd0a7162b2bf1e", "score": "0.52945495", "text": "def graph(self, target):\n target0_n = '\"{}\"'.format(target[0][0])\n target0_v = target[0][1]\n # 马拉多纳\n\n target1_v = target[1][1]\n # 转会\n\n target1_v_prime = target[1][1] + target[2][1]\n # 后缀1,从\n target1_v_prime2 = target[1][1] + target[3][1]\n # 后缀2,到\n\n target3_n = '\"{}\"'.format(target[4][0])\n target3_v = target[4][1]\n # 巴塞罗那\n\n target4_v = target[5][1]\n # 名字\n\n thing1 = HasProperty(target0_n, target0_v)\n binyu1 = ArelationB(thing1, target1_v, reverse=True)\n\n thing2 = HasProperty(target3_n, target3_v)\n binyu2 = ArelationB(thing2, target1_v_prime2, reverse=False)\n\n binyu1.merge(binyu2)\n\n binyu3 = ArelationB(binyu1, target1_v_prime, reverse=True)\n goal = ArelationB(binyu3, target4_v)\n return goal", "title": "" }, { "docid": "5f5616e5e21cec0d7056d412a011677c", "score": "0.52842224", "text": "def test_canon_multi_target_and_host_6():\n cuda_device_type = tvm.device(\"cuda\").device_type\n target = {cuda_device_type: Target(target=\"cuda\", host=\"llvm\")}\n host = None\n raw_targets_1 = Target.canon_multi_target_and_host(target, host)\n assert len(raw_targets_1) == 1\n assert raw_targets_1[0].kind.name == \"cuda\"\n assert raw_targets_1[0].host.kind.name == \"llvm\"\n\n target = {cuda_device_type: Target(tvm.runtime.container.String(\"cuda\"))}\n host = Target(tvm.runtime.container.String(\"llvm\"))\n target = tvm.runtime.convert(target)\n assert isinstance(target, tvm.ir.container.Map)\n raw_targets_2 = Target.canon_multi_target_and_host(target, host)\n assert len(raw_targets_2) == 1\n assert raw_targets_2[0].kind.name == \"cuda\"\n assert raw_targets_2[0].host.kind.name == \"llvm\"", "title": "" }, { "docid": "026f69b837e7ed1e366cb64078469b14", "score": "0.52680296", "text": "def main(target):\n if target == 'scheme':\n gen_scheme()\n elif target == 'tlds':\n gen_tlds()\n else:\n raise ValueError('invalid target')", "title": "" }, { "docid": "46fa20dd65eb380e5d4b6efc2f990fec", "score": "0.5264964", "text": "def encode_targets(gold, tags):\n encoded_targets = []\n for g in gold:\n # Create the binary vector with all zeros\n y = np.zeros(len(tags), dtype=float)\n # Get the labels of the instance\n target_tags = g.split(\";\")\n\n for i in range(0, len(tags)):\n # If the label is in the list assign 1 to its position\n if tags[i] in target_tags:\n y[i] = 1\n encoded_targets.append(y)\n return encoded_targets", "title": "" }, { "docid": "86f3bbc1aeed7218372ea03c2cc057c6", "score": "0.5253634", "text": "def generate_networkx_graphs(rand, num_examples, num_nodes_min_max, theta):\n input_graphs = []\n target_graphs = []\n graphs = []\n for _ in range(num_examples):\n graph = generate_graph(rand, num_nodes_min_max, theta=theta)[0]\n graph = add_shortest_path(rand, graph)\n input_graph, target_graph = graph_to_input_target(graph)\n input_graphs.append(input_graph)\n target_graphs.append(target_graph)\n graphs.append(graph)\n return input_graphs, target_graphs, graphs", "title": "" }, { "docid": "f81e785106ab244ce7b725fc41126368", "score": "0.52509904", "text": "def build_graphs(self) -> int:\n softmax = torch.nn.Softmax(dim=1)\n\n # keep track of a few things...\n n_generated_so_far = 0\n t_bar = tqdm(total=self.batch_size)\n generation_round = 0\n\n # generate graphs in a batch, saving graphs when either the terminate action or an\n # invalid action is sampled, until `self.batch_size` number of graphs have been generated\n while n_generated_so_far < self.batch_size:\n\n # predict the APDs for this batch of graphs\n apd = softmax(self.model(self.nodes, self.edges))\n\n # sample the actions from the predicted APDs\n add, conn, term, invalid, nlls_just_sampled = self.get_actions(apd)\n\n # indicate (with a 1) the structures which have been properly terminated\n self.properly_terminated[n_generated_so_far:(n_generated_so_far + len(term))] = 1\n\n # collect the indices for all structures to write (and reset) this round\n termination_idc = torch.cat((term, invalid))\n\n # never write out the dummy graph at index 0\n termination_idc = termination_idc[termination_idc != 0]\n\n # copy the graphs indicated by `terminated_idc` to the tensors for\n # finished graphs (i.e. `generated_{nodes/edges}`)\n n_generated_so_far = self.copy_terminated_graphs(termination_idc,\n n_generated_so_far,\n generation_round,\n nlls_just_sampled)\n\n # apply actions to all graphs (note: applies dummy actions to terminated\n # graphs, since output will be reset anyways)\n self.apply_actions(add, conn, generation_round, nlls_just_sampled)\n\n # after actions are applied, reset graphs which were set to terminate this round\n self.reset_graphs(termination_idc)\n\n # update variables for tracking the progress\n t_bar.update(len(termination_idc))\n generation_round += 1\n\n # done generating\n t_bar.close()\n\n return n_generated_so_far", "title": "" }, { "docid": "fd7bc217363ef4f09c5b8ec5e059dce6", "score": "0.5249665", "text": "def __uniform_target_labels(self):\n gold_label = []\n for i in xrange(0, self.num_egs):\n r = numpy.random.uniform(0,1)\n gold_label.append(1 if r >= 0.5 else 0)\n\n return gold_label", "title": "" }, { "docid": "f8e52b069770394326d553765aded911", "score": "0.52430797", "text": "def generate(env):\r\n\r\n # create a builder that makes PTX files from .cu files\r\n ptx_builder = SCons.Builder.Builder(action = '$NVCC -ptx $NVCCFLAGS $_NVCCWRAPCFLAGS $NVCCWRAPCCFLAGS $_NVCCCOMCOM $SOURCES -o $TARGET',\r\n emitter = {},\r\n suffix = '.ptx',\r\n src_suffix = CUDASuffixes)\r\n env['BUILDERS']['PTXFile'] = ptx_builder\r\n\r\n # create builders that make static & shared objects from .cu files\r\n static_obj, shared_obj = SCons.Tool.createObjBuilders(env)\r\n\r\n for suffix in CUDASuffixes:\r\n # Add this suffix to the list of things buildable by Object\r\n static_obj.add_action('$CUDAFILESUFFIX', '$NVCCCOM')\r\n shared_obj.add_action('$CUDAFILESUFFIX', '$SHNVCCCOM')\r\n static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)\r\n shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)\r\n\r\n # Add this suffix to the list of things scannable\r\n SCons.Tool.SourceFileScanner.add_scanner(suffix, CUDAScanner)\r\n\r\n add_common_nvcc_variables(env)\r\n\r\n # set the \"CUDA Compiler Command\" environment variable\r\n # windows is picky about getting the full filename of the executable\r\n if os.name == 'nt':\r\n env['NVCC'] = 'nvcc.exe'\r\n env['SHNVCC'] = 'nvcc.exe'\r\n else:\r\n env['NVCC'] = 'nvcc'\r\n env['SHNVCC'] = 'nvcc'\r\n \r\n # set the include path, and pass both c compiler flags and c++ compiler flags\r\n env['NVCCFLAGS'] = SCons.Util.CLVar('')\r\n env['SHNVCCFLAGS'] = SCons.Util.CLVar('') + ' -shared'\r\n \r\n # 'NVCC Command'\r\n env['NVCCCOM'] = '$NVCC -o $TARGET -c $NVCCFLAGS $_NVCCWRAPCFLAGS $NVCCWRAPCCFLAGS $_NVCCCOMCOM $SOURCES'\r\n env['SHNVCCCOM'] = '$SHNVCC -o $TARGET -c $SHNVCCFLAGS $_NVCCWRAPSHCFLAGS $_NVCCWRAPSHCCFLAGS $_NVCCCOMCOM $SOURCES'\r\n \r\n # the suffix of CUDA source files is '.cu'\r\n env['CUDAFILESUFFIX'] = '.cu'\r\n\r\n # XXX add code to generate builders for other miscellaneous\r\n # CUDA files here, such as .gpu, etc.\r\n\r\n # XXX intelligently detect location of nvcc and cuda libraries here\r\n (bin_path,lib_path,inc_path) = get_cuda_paths()\r\n \r\n env.PrependENVPath('PATH', bin_path)", "title": "" }, { "docid": "91c4dff4c4702ce53df8c96d8d78fd9a", "score": "0.5242214", "text": "def targets(self):\n return tuple(self.label[0])", "title": "" }, { "docid": "ff79fd78b4d1eb67963716fc07171b01", "score": "0.52288973", "text": "def build_graph(ruleset, args):\n\n if args.dependencies == \"direct\":\n dep_lib = ruleset_deps_direct\n else:\n dep_lib = ruleset_deps_indirect\n\n ruleset = sort_ruleset(ruleset)\n if args.input_format == \"fib\":\n deps = dep_lib.build_prefix_table_deps(ruleset)\n else:\n deps = dep_lib.build_ruleset_deps(ruleset)\n\n if args.no_table_miss:\n ruleset = [rule for rule in ruleset if rule.priority != 0]\n deps = [e for e in deps if e[0].priority != 0 and e[1].priority != 0]\n\n if args.compress:\n if args.only_group:\n min_groups, _ = create_similar_groups(ruleset, deps=deps)\n min_ruleset = []\n else:\n min_ruleset, min_groups = compress_ruleset(ruleset, deps=deps)\n\n # Build our graph object\n G = nx.DiGraph()\n G.add_edges_from(((src, dst) for src, dst in deps if src.table == dst.table),\n color=\"black\")\n G.add_edges_from(((src, dst) for src, dst in deps if src.table != dst.table),\n color=\"red\", style=\"dashed\")\n\n # Add a label to all rules\n label_dict = {}\n reachables = []\n for i, rule in enumerate(ruleset):\n try:\n G.node[rule]['table'] = rule.table\n rule.label = args.node_label.format(AlphaInt(i))\n label_dict[rule] = rule.label\n G.node[rule]['label'] = rule.label\n if args.type == 'd3':\n G.node[rule]['d3name'] = rule.label + \" \" + str(rule)\n if args.type in ('dotpdf', 'dotraw'):\n G.node[rule]['tooltip'] = str(rule).replace(\"\\n\", \"<BR/>\")\n G.node[rule]['priority'] = rule.priority\n G.node[rule]['table'] = rule.table\n # Label with number and print mapping\n print(rule.label, '\\t', str(rule), file=sys.stderr)\n reachables.append(rule)\n except KeyError:\n print('Unreachable:', '\\t', str(rule), file=sys.stderr)\n if args.compress:\n min_ruleset.remove(rule)\n del min_groups[rule]\n ruleset[:] = reachables\n # Remove unreachable rules\n\n # Create clusters and groups\n if args.cluster == \"priority\":\n number_groups(groupby(ruleset, key=sort_key_ruleset_priority), \"cluster\", G)\n elif args.cluster == \"table\":\n number_groups(groupby(ruleset, key=lambda r: r.table), \"cluster\", G)\n elif args.cluster == \"compressed\":\n groups = [(None, min_groups[group]) for group in sort_ruleset(min_groups)]\n number_groups(groups, \"cluster\", G)\n\n if args.group == \"priority\":\n number_groups(groupby(ruleset, key=sort_key_ruleset_priority), \"group\", G)\n elif args.group == \"table\":\n number_groups(groupby(ruleset, key=lambda r: r.table), \"group\", G)\n elif args.group == \"compressed\":\n groups = [(None, min_groups[group]) for group in sort_ruleset(min_groups)]\n number_groups(groups, \"group\", G)\n\n if args.type in ('pyplot', 'd3'):\n pos = directed_layout(G)\n for node in G:\n G.node[node]['pos'] = pos[node]\n\n\n if args.compress:\n for selected in min_ruleset:\n G.node[selected]['fillcolor'] = 'skyblue'\n G.node[selected]['style'] = 'filled'\n\n return G", "title": "" }, { "docid": "b0f33debd999a48a8d07f157a702da14", "score": "0.52284366", "text": "def build_target_network(self):\n self.target_input_x_op, _, self.target_net_out, \\\n self.target_weight, self.target_bias = self.__build_net_struct('target')", "title": "" }, { "docid": "d853dd1602dbbc9fe779c0834ee8e48d", "score": "0.52281356", "text": "def tmb(self, targets):\n\n\n pass", "title": "" }, { "docid": "ca5549711418ec8a28db30a501808399", "score": "0.5223095", "text": "def build_gdynet(self):\n stacked_coords_inp = Input(shape=(self.num_atom, 3, 2))\n stacked_lattices_inp = Input(shape=(3, 2))\n stacked_nbr_lists_inp = Input(shape=(self.num_atom, self.num_nbr, 2),\n dtype='int32')\n atom_types_inp = Input(shape=(self.num_atom, ), dtype='int32')\n target_index_inp = Input(shape=(self.num_target, ), dtype='int32')\n nbr_lists_1, bond_fea_1, nbr_lists_2, bond_fea_2 =\\\n Lambda(self.prep.pre_process)(\n [stacked_coords_inp, stacked_lattices_inp,\n stacked_nbr_lists_inp])\n cgcnn_model = self.build_cgcnn_model()\n branch_1 = cgcnn_model([atom_types_inp,\n bond_fea_1,\n nbr_lists_1,\n target_index_inp])\n branch_2 = cgcnn_model([atom_types_inp,\n bond_fea_2,\n nbr_lists_2,\n target_index_inp])\n merged = concatenate([branch_1, branch_2])\n self.model = keras.Model(inputs=[stacked_coords_inp,\n stacked_lattices_inp,\n stacked_nbr_lists_inp,\n atom_types_inp,\n target_index_inp],\n outputs=merged)\n self.optimizer = keras.optimizers.Adam(lr=self.learning_rate)\n self.losses = [self.vamp.loss_VAMP2_autograd,\n self.vamp._loss_VAMP_sym,\n self.vamp.loss_VAMP2_autograd]", "title": "" }, { "docid": "f8ff149eb0b88c26b2f105c7d9897621", "score": "0.522274", "text": "def graph(self, target):\n target0_n = '\"{}\"'.format(target[0][0])\n target0_v = target[0][1]\n # 马拉多纳\n\n target1_v = target[1][1]\n # 转会\n target1_v_in = target[1][1] + 'In'\n\n target2_n = '\"{}\"'.format(target[2][0])\n target2_v = target[2][1]\n # 巴塞罗那\n\n target4_v = target[3][1]\n # 时间\n\n thing1 = HasProperty(target0_n, target0_v)\n binyu1 = ArelationB(thing1, target1_v, reverse=True)\n\n thing2 = HasProperty(target2_n, target2_v)\n zhuyu1 = ArelationB(thing2, target1_v_in, reverse=False)\n\n binyu1.merge(zhuyu1)\n goal = ArelationB(binyu1, target4_v)\n return goal", "title": "" }, { "docid": "f7c9aae5054b6907b625531590658512", "score": "0.52136", "text": "def targets(self):\n return self._targets", "title": "" }, { "docid": "efc251220e3ec2e317943f0c508e78e2", "score": "0.51953965", "text": "def build_graph(self):\n self._create_dynamics(self.trajectory_length,\n self.eps,\n use_temperature=True)\n self._create_loss()\n self._create_optimizer()\n self._create_summaries()\n self._save_variables()\n self._create_params_file()", "title": "" }, { "docid": "dac81a7a51fc9621dc4a08b3153e2ff6", "score": "0.5194197", "text": "def injectables(self, build_graph):", "title": "" }, { "docid": "831c6b8195515a4a498b04ed6efb4d47", "score": "0.51882654", "text": "def build_cascade_graph(cascades, num_nodes):\n cid = num_nodes\n edges = []\n for c in cascades:\n edges += [(node, cid) for node in c]\n cid += 1\n g = nx.from_edgelist(edges)\n return g", "title": "" }, { "docid": "3e109b713c92fb3326afef6146c296a5", "score": "0.5187795", "text": "def updateTargets(self):\n\n for name in self.getOutputNames():\n outputTargets = self.getOutput(name)\n if not isinstance(outputTargets, list):\n outputTargets = [outputTargets]\n\n for outputTarget in outputTargets:\n if not isinstance(outputTarget, SceneItem):\n continue\n\n outputTarget.addSource(self)", "title": "" }, { "docid": "fe2a1a4436be3b98c20a9fd9cca90aba", "score": "0.5185761", "text": "def extract_from_graph(graph, shape, dtype, target, symbols, target_host=None):\n import nnvm.compiler\n\n env = TaskExtractEnv.get()\n\n topi_funcs = []\n for sym_name in symbols:\n if sym_name in env.symbol2topi:\n topi_funcs.extend(env.symbol2topi[sym_name])\n else:\n warnings.warn(\"Symbol %s is not tunable, ignored\" % sym_name)\n\n # run compiler to collect all TOPI calls during compilation\n env.reset(topi_funcs)\n\n # disable logger temporarily\n old_state = logger.disabled\n logger.disabled = True\n\n # use a \"tracing\" target to do a fake compile for collecting topi calls\n tracing_target = _target.create(\"llvm -device=tracing\")\n nnvm.compiler.engine.clear_cache()\n nnvm.compiler.build(graph, target=tracing_target, shape=shape, dtype=dtype)\n\n logger.disabled = old_state\n\n tasks = []\n for task_name, args in env.get_tasks():\n tasks.append(create(task_name, args,\n target=target, target_host=target_host,\n template_key='direct'))\n\n return tasks", "title": "" }, { "docid": "3a98381ad76f736e12773f7a071ce4b7", "score": "0.51793665", "text": "def generate_triples(\n gp,\n stps,\n vars_joint='edges',\n):\n assert isinstance(gp, GraphPattern)\n assert vars_joint in ('none', 'edges', 'all')\n vars_ = gp.vars_in_graph\n assert {SOURCE_VAR, TARGET_VAR} <= vars_\n\n if vars_joint == 'none':\n vars_ = set()\n elif vars_joint == 'edges':\n vars_ = gp.edge_vars()\n else:\n # 'all' joint, use vars_ as is from above\n pass\n vars_ = vars_ - {SOURCE_VAR, TARGET_VAR}\n\n # for given vars generate static URIRefs\n gp_static = gp.replace({v: EVAL_DATA_NS[v] for v in vars_})\n # for each of the remaining vars create a UUID URIRef per stp:\n dyn_vars = gp_static.vars_in_graph - {SOURCE_VAR, TARGET_VAR}\n\n # for each stp generate pattern instantiation\n n = 0\n for source, target in stps:\n mapping = {v: EVAL_DATA_NS[str(uuid4())] for v in dyn_vars}\n mapping.update({SOURCE_VAR: source, TARGET_VAR: target})\n for t in gp_static.replace(mapping):\n yield t\n n += 1\n logger.debug(\"Generated %d triples\", n)", "title": "" }, { "docid": "86830d71ccd676e3e67fa3a5a17e5815", "score": "0.51753753", "text": "def make_graphs(data):\n base = file_to_module(data[0])\n sources = data[1]\n lines = [' \"{}\" -> \"{}\";'.format(base, source) for source in sources]\n return \"\\n\".join(lines)", "title": "" }, { "docid": "4e8953a9d8fca5543a22105fc52c8b26", "score": "0.5174002", "text": "def graph(self, target):\n target0_n = '\"{}\"'.format(target[0][0])\n target0_v = target[0][1]\n # 鲁尼\n\n target1_n = '\"{}\"'.format(target[1][0])\n target1_v = target[1][1]\n # 俱乐部\n\n target2_n = '\"{}\"'.format(target[2][0])\n target2_v = target[2][1]\n # 英格兰队\n\n target3_n = '\"{}\"'.format(target[3][0])\n target3_v = target[3][1]\n # 国家队\n\n target4_v = target[4][1]\n # 哪些\n\n thing1 = HasProperty(target0_n, target0_v)\n binyu1 = IsRelatedTo(thing1, reverse=True)\n\n thing2 = HasProperty(target1_n, target1_v)\n binyu1.merge(thing2)\n zhuyu1 = IsRelatedTo(binyu1, reverse=False)\n\n thing3 = HasProperty(target2_n, target2_v)\n thing4 = HasProperty(target3_n, target3_v)\n thing3.merge(thing4)\n zhuyu2 = IsRelatedTo(thing3, reverse=False)\n\n zhuyu1.merge(zhuyu2)\n\n goal = ArelationB(zhuyu1, target4_v)\n return goal", "title": "" }, { "docid": "3828ef47a2456bc438f4370436f5f96c", "score": "0.5170531", "text": "def split_targets(targets):\n nogain, gain, pol, target = [], [], [], []\n for cal in targets:\n kat_target = katpoint.Target(cal)\n tags = kat_target.tags\n # tags which have gains applied by pipeline\n gaintaglist = ('gaincal', 'bfcal')\n nogaintaglist = ('bpcal', 'delaycal')\n if any(x in nogaintaglist for x in tags):\n nogain.append(cal)\n if any(x in gaintaglist for x in tags):\n gain.append(cal)\n if 'polcal' in tags:\n pol.append(cal)\n # if a target is a calibrator, don't include it here as it will already be included\n # in the calibrator plots\n if ('target' in tags and\n not any(x in nogaintaglist + gaintaglist for x in tags)):\n target.append(cal)\n return nogain, gain, pol, target", "title": "" }, { "docid": "e6039ace5cc9c1ecf6e78ec039c62c4c", "score": "0.51635015", "text": "def targets(self):\n\n exp = _get_all_children(self._node, \"target\")\n ret = []\n for node in exp:\n ret.append(Target(node, self._rdmlFilename))\n return ret", "title": "" }, { "docid": "3cf713adc950ec260afd98b59072ce8e", "score": "0.5161229", "text": "def gen_target(self, arm):\n gain = np.sum(arm.L) * .75\n bias = -np.sum(arm.L) * 0\n\n self.target = np.random.random(size=(2,)) * gain + bias\n\n return self.target.tolist()", "title": "" }, { "docid": "0dc5200e24f1f02c6573e1a89211efbb", "score": "0.51590145", "text": "def build_targets(self, pred_boxes, pred_confs, ground_truth, nH, nW, seen=0):\n if torch.is_tensor(ground_truth):\n return self.__build_targets_tensor(pred_boxes, pred_confs, ground_truth, nH, nW, seen=seen)\n else:\n return self.__build_targets_brambox(pred_boxes, pred_confs, ground_truth, nH, nW, seen=seen)", "title": "" }, { "docid": "ea3a150c716f4c67c859f81d299b7495", "score": "0.5153567", "text": "def graph(self, target):\n target0_n = '\"{}\"'.format(target[0][0])\n target0_v = target[0][1]\n # 马拉多纳\n\n target1_v = target[1][1]\n # 转会\n\n target1_v_prime = target[1][1] + target[2][1]\n # 后缀1, 从\n target1_v_prime2 = target[1][1] + target[3][1]\n # 后缀2,到\n\n target3_n = '\"{}\"'.format(target[4][0])\n target3_v = target[4][1]\n # 博卡青年\n\n target4_v = target[5][1]\n # 名字\n\n thing1 = HasProperty(target0_n, target0_v)\n binyu1 = ArelationB(thing1, target1_v, reverse=True)\n\n thing2 = HasProperty(target3_n, target3_v)\n binyu2 = ArelationB(thing2, target1_v_prime, reverse=False)\n\n binyu1.merge(binyu2)\n\n binyu3 = ArelationB(binyu1, target1_v_prime2, reverse=True)\n goal = ArelationB(binyu3, target4_v)\n return goal", "title": "" }, { "docid": "fe9eec89e228a7a5e9d995e41d6e7812", "score": "0.51534534", "text": "def generate_networkx_graph_reference(\n generators='all',\n print=print,\n output=sys.stdout):\n tests_path = resource_filename('networkx', 'generators/tests')\n\n sys.path.append(tests_path)\n\n # generators=('classic', 'small', ...)\n if generators == 'all':\n generatorsrc = resource_stream(\n 'networkx',\n 'generators/__init__.py')\n generatorlist = tuple(\n n.split('.')[2].split()[0]\n for n in generatorsrc\n if n.startswith('from networkx.generators.'))\n else:\n generatorlist = tuple(generators)\n\n\n c = RSTContext(output=output)\n c.title = \"networkx reference graphs\"\n c.print_rest_title(c.title)\n c.print_rest_meta(\"Version\", nx.__version__)\n c.print_rest_meta(\"Copyright\", \"Copyright NetworkX: %s. `<%s>`_\\n\" % (\n nx.__license__, \"http://networkx.lanl.gov\"))\n c.print_rest_meta(\"SeeAlso\", \"`<https://github.com/networkx/networkx>`_\")\n\n c.print_rest_directive(\"contents\", depth=2)\n c.print_rest_directive(\"sectnum\")\n\n generators = OrderedDict()\n for g in generatorlist:\n c.print_rest_header(\n str(g),\n \"=\")\n\n Generators = getattr(nx.generators, g)\n generators[g] = []\n\n if Generators.__doc__:\n c.print_rest_docstring(Generators)\n\n fn_filter = lambda x: x.endswith('_graph')\n Functions = filter(fn_filter, dir(Generators))\n if Functions:\n c.print(\".. Functions:\")\n c.print_rest_list(Functions, indentstr=('.. '))\n\n generators[g] = dict(izip(Functions, repeat([])))\n\n c.print_rest_directive(\"contents\", local='', depth=1)\n\n TestsFile, Tests, TestClass, TestFuncs = None, None, None, None\n # nx.generators.tests.test_${g}\n TestsFile = 'test_%s' % g\n TestsFilePath = resource_filename(\n 'networkx',\n 'generators/tests/%s.py' % TestsFile)\n TestClassName = 'TestGenerator%s' % g.capitalize()\n TestMethodFilter = lambda x: x.startswith(\"test\")\n\n try:\n Tests = __import__(TestsFile)\n except ImportError:\n pass\n try:\n TestClass = getattr(Tests, TestClassName)\n TestFuncs = filter(TestMethodFilter, dir(TestClass))\n except AttributeError:\n pass\n\n for graph_fn in Functions:\n graph_fn_path = \"%s.%s\" % (g, graph_fn)\n GraphFunction = getattr(Generators, graph_fn)\n\n c.print_rest_header(\n graph_fn_path,\n '-')\n c.print_rest_directive(\"contents\", local='', depth=1)\n\n if GraphFunction.__doc__:\n c.print_rest_docstring(GraphFunction)\n\n argspec = inspect.getargspec(GraphFunction)\n # asr.add_argspec(argspec)\n c.print_rest_preformatted(\n inspect.formatargspec(argspec),\n header = \"``%s`` argspec\" % graph_fn)\n\n #c.print_rest_argspec(\n # sage_getargspec(GraphFunction),\n # header = \"``%s`` argspec ast\" % graph_fn)\n\n c.print_rest_header(\n 'src: ``%s``' % graph_fn_path,\n '~')\n c.print_rest_source_lines(\n GraphFunction,\n header=\"source\")\n\n\n if TestFuncs:\n for fn in ifilter(lambda x: graph_fn in x, TestFuncs): # ...\n c.print_rest_header(\"test_function: ``%s``\" % fn, \"~\")\n c.print_rest_source_lines(\n getattr(TestClass, fn),\n header=\"``%s``\" % fn)\n #else:\n # c.print_rest_preformatted(\"# No tests found\"),\n\n if Tests:\n c.print_rest_header(\n 'tests grep for ``%s``' % graph_fn,\n \"~\")\n test_examples=[]\n\n c.print_rest_preformatted(\n format_numbered_line_iter(\n parse_for_examples(\n grep_file(\n filename=TestsFilePath,\n searchfn=lambda x: graph_fn in x),\n func_name=graph_fn,\n examples=test_examples)\n ),\n header=\"``networkx.generators.tests.%s``\" % TestsFile)\n\n c.print_rest_header(\n 'ast examples',\n '~')\n\n #c.print_rest_preformatted(\n # parse_for_examples_ast(\n # source=inspect.getsourcelines(GraphFunction)[0]),\n # header='ast examples')\n\n c.print_rest_header(\n 'test_examples for ``%s``' % graph_fn,\n \"~\")\n c.print_rest_preformatted(\n test_examples,\n header='examples')\n\n generators[g][graph_fn] = test_examples\n\n c.print_rest_args_summary(c.argspecs)\n\n c.print_rest_header(\n \"generators\",\n \"=\")\n c.print_rest_preformatted(\n pformat(dict(generators)))\n\n return c.file", "title": "" }, { "docid": "0df26819cb5d16c0b727ead06dadb376", "score": "0.51488036", "text": "def generate_visualized_network(genome, nodes):\n for i in genome.get_nodes():\n if genome.is_input(i):\n color = (0, 0, 255)\n x = 50\n y = 140 + i*60\n elif genome.is_output(i):\n color = (255, 0, 0)\n x = NETWORK_WIDTH-50\n y = HEIGHT/2\n else:\n color = (0, 0, 0)\n x = random.randint(NETWORK_WIDTH/3, int(NETWORK_WIDTH * (2.0/3)))\n y = random.randint(20, HEIGHT-20)\n nodes[i] = [(int(x), int(y)), color]", "title": "" }, { "docid": "c38a3a35aa33e716f257f26f2c078838", "score": "0.5148618", "text": "def create_targets(self, sars):\n rewards = [_sars[2] for _sars in sars]\n s_prime = [_sars[3] for _sars in sars]\n\n # collect features for every s' with every possible action\n feats = []\n for meta in s_prime:\n # get features for every possible action in every possible state\n feats.extend([\n self.get_features(meta, action)\n for action in self.actions()\n ])\n\n # get Q predictions for all s' and actions\n # qs = self.net.call(np.vstack(sa_prime))\n self.scaler = sklearn.preprocessing.StandardScaler()\n X = self.scaler.fit_transform(np.vstack(feats))\n qs = self.ridge.predict(X)\n\n # reshape so that Qs for each s' is on a row\n # with Q for each (s', action) in the columns\n # then take the min of each row and ravel\n qs = qs.reshape(-1, self.n_actions).min(axis=1).ravel()\n\n # targets = reward + discount * Qopt\n targets = (qs * self.discount) + rewards\n\n return targets", "title": "" }, { "docid": "0641192b9bcd1b7763c483e42011aadb", "score": "0.51362354", "text": "def graph(self, target):\n target0_n = '\"{}\"'.format(target[0][0])\n target0_v = target[0][1]\n # 河床\n\n target1_n = aiballclass(target[1][0])\n target1_v = target[1][1]\n # 青训\n\n target2_v = target[2][1]\n # 哪些\n\n # target3_v = target[3][1]\n # 青训动词\n\n thing1 = HasProperty(target0_n, target0_v)\n zhuyu1 = IsRelatedTo(thing1, reverse=False)\n\n thing2 = HasProperty(target1_n, target1_v)\n zhuyu1.merge(thing2)\n zhuyu2 = IsRelatedTo2(zhuyu1, reverse=False)\n\n goal = ArelationB(zhuyu2, target2_v)\n return goal", "title": "" }, { "docid": "486b465287e87f2361e7fa1a98347627", "score": "0.5135752", "text": "def sample_graph_exp():\n from .simple_graph import vis_graph, find_connected_limited\n from .nx_graph import nx_create_graph, nx_vis_force, nx_find_connected_limited\n from .monte_carlo import (\n monte_carlo,\n list_to_df,\n summarise_monte_carlo,\n get_distribution,\n )\n\n graph = [[] for _ in range(14)]\n\n graph[0] = [7]\n graph[1] = [3]\n graph[2] = []\n graph[3] = [9]\n graph[4] = [12, 5]\n graph[5] = [4]\n graph[6] = [13]\n graph[7] = []\n graph[8] = []\n graph[9] = []\n graph[10] = [8, 9]\n graph[11] = []\n graph[12] = []\n graph[13] = [4]\n\n connected = [0, 3, 4, 6]\n fixed_samples = 2\n num_sampled = 3\n\n start = [0, 1, 2, 3, 4, 5, 6]\n source_vert_list = np.array(start)\n end = [7, 8, 9, 10, 11, 12, 13]\n targets = np.array(end)\n\n here = os.path.dirname(os.path.abspath(__file__))\n os.makedirs(os.path.join(here, \"..\", \"figures\"), exist_ok=True)\n fig = vis_graph(graph, [7, 7], start, end)\n fig.savefig(os.path.join(here, \"..\", \"figures\", \"hand_simple_2.png\"))\n\n # def random_var_gen():\n # sources = np.append(\n # np.random.choice(connected, fixed_samples, replace=False),\n # np.random.choice(\n # np.delete(source_vert_list, connected),\n # num_sampled - fixed_samples,\n # replace=False,\n # ),\n # )\n\n # return (sources,)\n\n # def fn_to_eval(sources):\n # reachable = find_connected_limited(graph, sources, targets, max_depth=1)\n # # reachable = find_connected(graph, sources, targets)\n # return (len(reachable),)\n\n def random_var_gen(iter_val):\n sources = np.random.choice(source_vert_list, num_sampled, replace=False)\n targets_t = np.random.choice(targets, num_sampled, replace=False)\n\n return (sources, targets_t)\n\n def fn_to_eval(sources, targets_t):\n reachable = find_connected_limited(graph, sources, targets_t, max_depth=3)\n return (len(reachable),)\n\n result = monte_carlo(fn_to_eval, random_var_gen, 10000, num_cpus=1)\n df = list_to_df(\n result,\n [\n \"Connections\",\n ],\n )\n result = summarise_monte_carlo(\n df,\n to_plot=[\n \"Connections\",\n ],\n plt_outfile=\"graph_dist.png\",\n plot=False,\n )\n distrib = get_distribution(df, \"Connections\", 10000)\n\n fig, ax = plt.subplots(figsize=(6, 8))\n x = np.array(list(distrib.keys()), dtype=float)\n y = np.array(list(distrib.values()), dtype=float)\n\n ax.plot(x, y, \"ko\", ms=2.5)\n y_vals_min = [0 for _ in x]\n y_vals_max = y\n colors = [\"k\" for _ in x]\n ax.vlines(x, y_vals_min, y_vals_max, colors=colors)\n sns.despine()\n plt.xlabel(\"Number of recorded connections\")\n plt.ylabel(\"Probability of occurrence\")\n fig.savefig(os.path.join(here, \"..\", \"figures\", \"pdf_small_3.pdf\"), dpi=400)\n\n return {\"distrib\": distrib, \"summary\": result, \"full\": df}", "title": "" }, { "docid": "02f3a58a8d50dd7c3cbd208c693d9ba9", "score": "0.5135485", "text": "def targets(self) -> Iterator[BranchSet]:\n for target_select in self.values():\n if target_select.non_terminal_part:\n yield target_select.target", "title": "" }, { "docid": "c7b5984bcf7987538b5a08f490cf2942", "score": "0.513384", "text": "def targets_to_report(self):\n return self.all_targets", "title": "" }, { "docid": "1f7f23734ecec7664a753426b669613a", "score": "0.51313114", "text": "def combine_carpathlink_nodes(self):", "title": "" }, { "docid": "39fa4037439a3fbded1736320505ddb4", "score": "0.5130095", "text": "def generate_targets(self, results):\n\n assert isinstance(results, dict)\n\n polygon_masks = results['gt_masks'].masks\n polygon_masks_ignore = results['gt_masks_ignore'].masks\n\n h, w, _ = results['img_shape']\n\n gt_text_mask = self.generate_text_region_mask((h, w), polygon_masks)\n gt_mask = self.generate_effective_mask((h, w), polygon_masks_ignore)\n\n (gt_center_region_mask, gt_radius_map, gt_sin_map,\n gt_cos_map) = self.generate_center_mask_attrib_maps((h, w),\n polygon_masks)\n\n results['mask_fields'].clear() # rm gt_masks encoded by polygons\n mapping = {\n 'gt_text_mask': gt_text_mask,\n 'gt_center_region_mask': gt_center_region_mask,\n 'gt_mask': gt_mask,\n 'gt_radius_map': gt_radius_map,\n 'gt_sin_map': gt_sin_map,\n 'gt_cos_map': gt_cos_map\n }\n for key, value in mapping.items():\n value = value if isinstance(value, list) else [value]\n results[key] = BitmapMasks(value, h, w)\n results['mask_fields'].append(key)\n\n return results", "title": "" }, { "docid": "05099e1c8e0d51ccfde722a7d2112d97", "score": "0.51292646", "text": "def _build_computational_graph(self, model, optimizer):\n pass", "title": "" }, { "docid": "4fdf3f5d5bd68d37cbb68a8daefa6f63", "score": "0.5123553", "text": "def new_targets_cost(self, measurements):\n \n raise NotImplementedError()", "title": "" }, { "docid": "fa9135185b4ea29f0d108103e588893c", "score": "0.5120161", "text": "def cycle_gan_internal(inputs, targets, _, hparams):\n with tf.variable_scope(\"cycle_gan\"):\n # Embed inputs and targets.\n inputs_orig, targets_orig = tf.to_int32(inputs), tf.to_int32(targets)#[? ? 1 1]\n inputs = common_layers.embedding(\n inputs_orig, hparams.vocab_size, hparams.hidden_size, \"embed\")#[? ? 1 384]\n targets = common_layers.embedding(\n targets_orig, hparams.vocab_size, hparams.hidden_size,\n \"embed\", reuse=True)\n\n\n ###?????\n x, _ = split_on_batch(inputs)\n _, y = split_on_batch(targets)\n\n\n whether_compress=True#real\n #whether_compress=False\n\n\n\n\n\n # Y --> X\n y_fake = generator(y, hparams, \"Fy\", reuse=False)# [? ? 1 384]\n #y_to_x_loss = lossfn(y, y_fake, True, hparams, True, \"YtoX\")###??? wrong?\n\n y_to_x_loss = lossfn(x, y_fake, whether_compress, hparams, True, \"YtoX\")##yr add\n\n # X --> Y\n x_fake = generator(x, hparams, \"Gx\", reuse=False)\n x_to_y_loss = lossfn(y, x_fake, whether_compress, hparams, True, \"XtoY\")\n\n # Cycle-Consistency\n y_fake_ = generator(y_fake, hparams, \"Gx\", reuse=True)\n x_fake_ = generator(x_fake, hparams, \"Fy\", reuse=True)\n x_to_x_loss = hparams.cycle_loss_multiplier1 * tf.reduce_mean(\n tf.abs(x_fake_ - x))\n y_to_y_loss = hparams.cycle_loss_multiplier2 * tf.reduce_mean(\n tf.abs(y_fake_ - y))\n cycloss = x_to_x_loss + y_to_y_loss\n\n sample_generated = generator(inputs, hparams, \"Gx\", reuse=True)#[? ? 1 384]\n sample_generated = tf.layers.dense(\n sample_generated, hparams.vocab_size, name=\"softmax\", reuse=None)#[? ? 1 6381]\n sample_generated = tf.stop_gradient(\n tf.expand_dims(sample_generated, axis=2))\n\n # losses = {\"cycloss\": cycloss,\n # \"y_to_x_loss\": y_to_x_loss,\n # \"x_to_y_loss\": x_to_y_loss,\n # 'yr1':x_to_x_loss,'yr2':y_to_y_loss}\n #'x':[x,inputs],'yf':y_fake}#fail\n\n #cycloss | y_to_x_loss sometimes nan ,sometimes otherwise\n # losses = {\"cycloss\": 1.0,\n # \"y_to_x_loss\": 1.0,\n # \"x_to_y_loss\": x_to_y_loss,\n # \"training\":1.0}# no need to calc loss(generated_sample,target)\n\n # losses = {\"cycloss\": 1.0,\n # \"y_to_x_loss\": 1.0,\n # \"x_to_y_loss\": 1.0}\n\n losses = {\"cycloss\": cycloss,\n \"y_to_x_loss\": y_to_x_loss,\n \"x_to_y_loss\": x_to_y_loss}#real\n\n\n\n return sample_generated, losses# [? ? 1 1 1471] loss", "title": "" }, { "docid": "ab4895d9a90e735e07bce868853f07f8", "score": "0.51180214", "text": "def load_targets(positive_data_file):\n # Load data from files\n with open(positive_data_file, 'r', encoding=\"utf8\") as csvfile:\n aspectreader = csv.reader(csvfile, delimiter=',')\n j = 0\n count = 0\n input = []\n target = []\n lable = []\n examples=[]\n ccc=[]\n for row in aspectreader:\n if (j == 0):\n j = 1\n else:\n sent = row[0].lower()\n sent = remove_punct(sent)\n sent.replace('\\d+', '')\n # sent.replace(r'\\b\\w\\b', '').replace(r'\\s+', ' ')\n # sent.replace('\\s+', ' ', regex=True)\n # sent=re.sub(r\"^\\s+|\\s+$\", \"\", sent), sep='')\n sent = re.sub(r\"^\\s+|\\s+$\", \"\", sent)\n examples.append(sent)\n input.append(sent)\n # nb_aspects = int(row[1])\n aspect = row[1].lower()\n target.append(aspect)\n examples.append(aspect)\n polarity = row[2]\n examples.append(polarity)\n ccc.append(sent+\",\"+aspect+\",\"+polarity+\",\"+row[3]+\",\"+row[4])\n x_text = input\n # find the same targets\n all_sentence = [s for s in x_text]\n targets_nums = [all_sentence.count(s) for s in all_sentence]\n # ccc_num= [ccc.count(s) for s in ccc]\n # for i in range(len(ccc_num)):\n # if ccc_num[i]>1:\n # print(str(i+2) +ccc[i])\n\n # i=0\n # while i<len(targets_nums):\n # act=int(targets_nums[i])\n # for j in range(act):\n # if not (targets_nums[i+j]==act):\n # print(x_text[i+j-1])\n # print(targets_nums[i+j])\n # print(i+j-1)\n # i=i+j+1\n\n\n targets = []\n i = 0\n while i < len(all_sentence):\n num = targets_nums[i]\n target = []\n for j in range(num):\n target.append(examples[(i + j) * 3 + 1])\n for j in range(num):\n targets.append(target)\n i = i + num\n targets_nums = np.array(targets_nums)\n return [targets, targets_nums]", "title": "" }, { "docid": "24453dfed819907fb3b42f1107e5c591", "score": "0.51176643", "text": "def generate(train_set, test_set, experiment_group=0, time_train_1=50, time_train_2=150,\r\n time_train_sim=0, begin_hold=5, end_hold=25, present_time_train=10,\r\n present_time_test=5, fm_output_neurons=3000, sim_switch_time=5, probes=True,\r\n load_weights=False, seed=0, tau=0.05, delta=0.0002, c=0):\r\n\r\n targ_func = generate_target_function(train_set, test_set, present_time_train, present_time_test, time_train_1,\r\n time_train_2, begin_hold, end_hold)\r\n\r\n start_pos = np.array([0, 1])\r\n arm_sim = arm.Arm2Link(dt=1e-3) # Make use of arm from REACH (src: TO DO)\r\n\r\n # set the initial position of the arm\r\n arm_sim.init_q = arm_sim.inv_kinematics(start_pos)\r\n arm_sim.reset()\r\n\r\n # Scaling functions\r\n q_scale = 2.9 / 2\r\n # q_means = [np.pi / 2, np.pi / 2]\r\n q_means = 3.1/2\r\n x_means = start_pos\r\n x_scale = 2\r\n\r\n dq_means = np.array([0, 0])\r\n dq_scale = [np.pi * 2, np.pi * 2] # TODO: change scale\r\n\r\n u_means = np.array([0, 0])\r\n u_scale = np.array([18, 14]) # need to tweak\r\n\r\n q_to_ens, ens_to_q = generate_scaling_functions(q_means, q_scale)\r\n x_to_ens, ens_to_x = generate_scaling_functions(x_means, x_scale)\r\n dq_to_ens, ens_to_dq = generate_scaling_functions(dq_means, dq_scale)\r\n u_to_ens, ens_to_u = generate_scaling_functions(u_means, u_scale)\r\n\r\n net = nengo.Network(seed=seed)\r\n with net:\r\n def global_sim_flag_function(t):\r\n if experiment_group > 0 and not (experiment_group == 6):\r\n start = time_train_1 + time_train_2\r\n weight = np.maximum(0, t - start)\r\n weight = np.minimum(weight, sim_switch_time)\r\n weight = 1 - ((weight) / sim_switch_time)\r\n return weight\r\n else:\r\n return 1\r\n\r\n # global internal simulation flag switch\r\n net.global_sim_flag = nengo.Node(output=global_sim_flag_function)\r\n\r\n # ======= Inverse Model ======= #\r\n # arm output: [q1, q2, dq1, dq2, x, y, u1, u2]\r\n net.dim = arm_sim.DOF\r\n net.arm_node = arm_sim.create_nengo_node()\r\n net.target_x = nengo.Node(targ_func, size_out=2)\r\n net.i1_q_dq_qt = nengo.Ensemble(300, 6)\r\n net.i2_u = nengo.Ensemble(300, 2)\r\n net.error_qu = nengo.Ensemble(200, 2)\r\n\r\n # Target transformation xt to qt\r\n net.xt_2_qt = x_2_q.generate(arm_sim.l)\r\n nengo.Connection(net.target_x, net.xt_2_qt.input)\r\n\r\n if load_weights:\r\n weights = np.load('Networks/weights/inverse.npy').T\r\n else:\r\n weights = np.random.uniform(size=(2, 6))\r\n\r\n if load_weights:\r\n net.inverse_q_u = nengo.Connection(net.i1_q_dq_qt.neurons, net.i2_u, transform=weights,\r\n learning_rule_type=nengo.PES(),\r\n synapse=tau)\r\n else:\r\n net.inverse_q_u = nengo.Connection(net.i1_q_dq_qt, net.i2_u, transform=weights,\r\n learning_rule_type=nengo.PES(),\r\n synapse=tau)\r\n net.ws_IM = weightsaver.WeightSaver(net.inverse_q_u, 'Networks/weights/inverse.npy')\r\n nengo.Connection(net.xt_2_qt.output, net.i1_q_dq_qt[4:])\r\n nengo.Connection(net.i2_u, net.arm_node[:2], function=ens_to_u)\r\n nengo.Connection(net.xt_2_qt.output, net.error_qu, transform=-1)\r\n nengo.Connection(net.arm_node[:2], net.error_qu, function=q_to_ens)\r\n nengo.Connection(net.i2_u, net.inverse_q_u.learning_rule, transform=1)\r\n\r\n # Switch \r\n net.arm_to_IM_switch = nengo.Node(size_in=5, size_out=4, output=generic_switch_node_func)\r\n nengo.Connection(net.global_sim_flag, net.arm_to_IM_switch[0])\r\n nengo.Connection(net.arm_node[:2], net.arm_to_IM_switch[1:3], function=q_to_ens, synapse=0)\r\n nengo.Connection(net.arm_node[2:4], net.arm_to_IM_switch[3:5], function=dq_to_ens, synapse=0)\r\n nengo.Connection(net.arm_to_IM_switch[:2], net.i1_q_dq_qt[:2])\r\n nengo.Connection(net.arm_to_IM_switch[2:4], net.i1_q_dq_qt[2:4])\r\n\r\n # ======= Forward Model connections ======= #\r\n def FM_learn_func(t):\r\n if t < time_train_1 + time_train_2:\r\n return 1\r\n else:\r\n return 0\r\n\r\n net.FM = FM_q_dq.generate(fm_output_neurons, tau, probes, load_weights,\r\n seed=seed) # Input: q1, q2, dq1, dq2, x, y, u1, u2, s\r\n net.FM_learn = nengo.Node(output=FM_learn_func)\r\n net.error_qu_next = nengo.Ensemble(100, 2)\r\n\r\n if probes:\r\n net.probe_error = nengo.Probe(net.error_qu)\r\n net.probe_arm = nengo.Probe(net.arm_node)\r\n net.probe_next_error = nengo.Probe(net.error_qu_next)\r\n net.probe_fm = nengo.Probe(net.FM.output)\r\n\r\n net.arm_to_FM_switch = nengo.Node(size_in=5, size_out=4, output=generic_switch_node_func)\r\n nengo.Connection(net.global_sim_flag, net.arm_to_FM_switch[0])\r\n nengo.Connection(net.arm_node[:2], net.arm_to_FM_switch[1:3], function=q_to_ens, synapse=c)\r\n nengo.Connection(net.arm_node[2:4], net.arm_to_FM_switch[3:5], function=dq_to_ens, synapse=c)\r\n nengo.Connection(net.arm_to_FM_switch, net.FM.input[:4], synapse=0)\r\n nengo.Connection(net.i2_u, net.FM.input[6:8])\r\n nengo.Connection(net.FM_learn, net.FM.input[8])\r\n nengo.Connection(net.FM.output[:2], net.error_qu_next)\r\n nengo.Connection(net.xt_2_qt.output, net.error_qu_next, transform=-1)\r\n\r\n # ====== FM Loop ====== #\r\n net.FM_loop_state = nengo.Node(size_in=5, size_out=4, output=generic_switch_node_func)\r\n\r\n nengo.Connection(net.global_sim_flag, net.FM_loop_state[0], function=lambda x: 1 - x)\r\n nengo.Connection(net.FM.output, net.FM_loop_state[1:5])\r\n nengo.Connection(net.FM_loop_state, net.FM.input[:4], synapse=tau - delta)\r\n nengo.Connection(net.FM_loop_state, net.i1_q_dq_qt[:4], synapse=0)\r\n\r\n def next_func(t, x):\r\n \"\"\"\r\n Selects between two incoming error signals (one based on FM and one based on current values from the arm).\r\n Depends on time (training/test)\r\n :param t:\r\n :param x:\r\n :return:\r\n \"\"\"\r\n e1_c, e2_c, e1_n, e2_n = x\r\n if t < time_train_1:\r\n return e1_c, e2_c\r\n else:\r\n return e1_n, e2_n\r\n\r\n net.next_switch = nengo.Node(next_func, size_in=4, size_out=2)\r\n nengo.Connection(net.error_qu, net.next_switch[:2])\r\n nengo.Connection(net.error_qu_next, net.next_switch[2:4])\r\n nengo.Connection(net.next_switch, net.inverse_q_u.learning_rule)\r\n\r\n # ====== Lesion module ====== #\r\n def lesion_func(t):\r\n if t < time_train_1 + time_train_2 + time_train_sim + sim_switch_time:\r\n return 0\r\n else:\r\n if experiment_group > 1 and not (experiment_group == 6):\r\n return -1\r\n else:\r\n return 0\r\n\r\n net.lesion_current = nengo.Node(output=lesion_func)\r\n lesioned_neurons = np.zeros((fm_output_neurons, 1))\r\n # Choose which neurons are going to be lesioned\r\n # Lesion percentage depends on condition/group\r\n # condition 2: 20%\r\n # condition 3: 40%\r\n # condition 4: 60%\r\n # condition 5: 80%\r\n lesion_percentage = max(0, 2 * (experiment_group - 1) / 10)\r\n nr_of_lesions = int(lesion_percentage * fm_output_neurons)\r\n lesioned_ind = np.random.choice(range(fm_output_neurons), nr_of_lesions)\r\n\r\n for i in lesioned_ind:\r\n lesioned_neurons[i, 0] = 1\r\n\r\n nengo.Connection(net.lesion_current, net.FM.f2_q_next.neurons, transform=lesioned_neurons)\r\n nengo.Connection(net.lesion_current, net.FM.f2_dq_next.neurons, transform=lesioned_neurons)\r\n\r\n model = net\r\n\r\n return model", "title": "" }, { "docid": "e6e04146ce2be0857c9b31b4dc91e227", "score": "0.5113152", "text": "def get_targets_from_genome(self):\n return {key: self.domains[key].targets for key in self.domains.keys()}", "title": "" }, { "docid": "fca337b574aa0076313b3a7fa54aa6c8", "score": "0.51125646", "text": "def initialize_gan(generator, discriminator, loss, optimizer, metrics):\n generator.compile(optimizer=optimizer, loss=loss)\n discriminator.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n gen_disc = stack_gen_disc(generator, discriminator)\n gen_disc.compile(optimizer=optimizer, loss=loss, metrics=metrics)\n print(generator.summary())\n print(discriminator.summary())\n print(gen_disc.summary())\n return gen_disc", "title": "" }, { "docid": "3e61119e22e3dd85f1a9579bfaa3297d", "score": "0.51122135", "text": "def test_build_edges(self):\n self.graph.build()\n for source, edges in self.graph.dfgp.items():\n for target, edge in edges.items():\n self.assertEqual(source, edge.source.code)\n self.assertEqual(target, edge.target.code)", "title": "" }, { "docid": "f2803fff19b7d21a488b30e3e4800c46", "score": "0.5106924", "text": "def targets(self):\n return np.array(list(d.target for d in self._items))", "title": "" }, { "docid": "728e40d85c3bb8c50cb7d08589df9ba5", "score": "0.5105176", "text": "def print_targets(people_protocol):\n for hack_pair in people_protocol:\n print(hack_pair)\n return", "title": "" }, { "docid": "00765173c919cc9184f8ebb832add759", "score": "0.5102299", "text": "def GetListOfPossibleTargets(self):\n self.__ResetCancel()\n \n # load module - technically _forceReload is not necessary here, it is just set to maintain the current user experience (the\n # user expect a reload when he clicks on the \"update\" button - we should give him an extra button to do so in the future)\n projectModule = self.__GetProjectModule(_forceReload = True)\n # find csnake targets in the loaded module\n members = inspect.getmembers(projectModule)\n nMembers = len(members)\n count = 0\n result = []\n for member in members:\n self.__NotifyListeners(ProgressEvent(self, count*100/nMembers))\n if self.IsCanceled(): return result\n count += 1\n (targetName, target) = (member[0], member[1])\n if isinstance(target, csnProject.VeryGenericProject):\n result.append(targetName)\n elif isinstance(target, csnAPIImplementation._APIVeryGenericProject_Base):\n result.append(targetName)\n \n return result", "title": "" }, { "docid": "cecb9b3dddb4a8045bed27074812ea78", "score": "0.509151", "text": "def __generate_target_list(self, target_config):\n target_list = []\n for target_config_item in target_config:\n position = Vector_2D(0.0, 0.0)\n velocity = Vector_2D(1.0,1.0)\n wobble = 1\n size = 1\n color = [0, 0, 0]\n if 'position' in target_config_item:\n pos_array = target_config_item['position']\n position = Vector_2D(pos_array[0], pos_array[1])\n if 'velocity' in target_config_item:\n velocity_array = target_config_item['velocity']\n velocity = Vector_2D(velocity_array[0], velocity_array[1])\n if 'wobble' in target_config_item:\n wobble = target_config_item['wobble']\n if 'size' in target_config_item:\n size = target_config_item['size']\n if 'color' in target_config_item:\n color = target_config_item['color']\n\n target_list.append(Target(position, velocity * (self.ppm / self.fps),\n wobble, size, color, len(target_list)))\n\n return target_list", "title": "" }, { "docid": "f00085944f00e5cff38c9e34118bf216", "score": "0.50851303", "text": "def targets(self):\n return self.label", "title": "" }, { "docid": "3a8411b6eba5765868f874b754157893", "score": "0.5082325", "text": "def _build_graph(self):\r\n self.graph = TopologyGraph()\r\n for src, dst in self._topology:\r\n self.graph.add_route(NodeSet(src), NodeSet(dst))", "title": "" }, { "docid": "9f09cd1ccb638e25b1f23122ae047384", "score": "0.5081032", "text": "def build_gdynet_direct(self):\n atom_types_inp = Input(shape=(self.num_atom, ), dtype='int32')\n target_index_inp = Input(shape=(self.num_target, ), dtype='int32')\n bond_dist_1_inp = Input(shape=(self.num_atom, self.num_nbr))\n bond_dist_2_inp = Input(shape=(self.num_atom, self.num_nbr))\n nbr_list_1_inp = Input(shape=(self.num_atom, self.num_nbr),\n dtype='int32')\n nbr_list_2_inp = Input(shape=(self.num_atom, self.num_nbr),\n dtype='int32')\n bond_fea_1 = Lambda(self.prep.gaussian_expand)(bond_dist_1_inp)\n bond_fea_2 = Lambda(self.prep.gaussian_expand)(bond_dist_2_inp)\n cgcnn_model = self.build_cgcnn_model()\n branch_1 = cgcnn_model([atom_types_inp,\n bond_fea_1,\n nbr_list_1_inp,\n target_index_inp])\n branch_2 = cgcnn_model([atom_types_inp,\n bond_fea_2,\n nbr_list_2_inp,\n target_index_inp])\n merged = concatenate([branch_1, branch_2])\n self.model = keras.Model(inputs=[atom_types_inp,\n target_index_inp,\n bond_dist_1_inp,\n nbr_list_1_inp,\n bond_dist_2_inp,\n nbr_list_2_inp],\n outputs=merged)\n self.optimizer = keras.optimizers.Adam(lr=self.learning_rate)\n self.losses = [self.vamp.loss_VAMP2_autograd,\n self.vamp._loss_VAMP_sym,\n self.vamp.loss_VAMP2_autograd]", "title": "" }, { "docid": "d1fc0fcf34192bf015d939e6e69a142b", "score": "0.5079063", "text": "def get_targets_impl(self, frame):\n\t\tpass", "title": "" }, { "docid": "3d92e0b8cd1cc70fa10bac04ccbd0338", "score": "0.5076602", "text": "def makeCCGraph(self):\n graph_types = {\n 'grid_2d': self.makeCCGraph_grid2d\n }\n if self.graph_type in graph_types:\n graph_types[self.graph_type]()\n else:\n graph_types['grid_2d']()", "title": "" }, { "docid": "a5e40c2ddd617b0090dd25e5c1b5ce97", "score": "0.50724655", "text": "def build_graphs(model, n_graphs_to_generate, batch_size):\r\n # start the timer\r\n t = time.time()\r\n\r\n # define the softmax for use later\r\n softmax = torch.nn.Softmax(dim=1)\r\n\r\n # initialize node and edge features tensors for batch of graphs, as well\r\n # as a tensor to keep track of the number of nodes per graph\r\n nodes, edges, n_nodes = initialize_graph_batch(batch_size=batch_size)\r\n\r\n # allocate tensors for finished graphs; these will get filled in gradually\r\n # as graphs terminate\r\n (\r\n generated_nodes, \r\n generated_edges,\r\n generated_n_nodes, \r\n nlls, \r\n generated_nlls, \r\n properly_terminated_graphs\r\n ) = allocate_graph_tensors(n_graphs_to_generate,\r\n batch_size)\r\n\r\n # keep track of a few things...\r\n n_generated_so_far = 0\r\n t_bar = tqdm(total=n_graphs_to_generate)\r\n generation_round = 0\r\n\r\n # generate graphs in a batch until the total number of graphs is reached\r\n while n_generated_so_far < n_graphs_to_generate:\r\n\r\n # skip dummy node after calling model (only need it for predicting APDs)\r\n apd = softmax(model(nodes, edges))[1:]\r\n nodes = nodes[1:]\r\n edges = edges[1:]\r\n n_nodes = n_nodes[1:]\r\n nlls = nlls[1:]\r\n\r\n # get the actions from the predicted APDs\r\n add, conn, term, invalid, nlls_just_sampled = get_actions(apd,\r\n edges,\r\n n_nodes,\r\n batch_size)\r\n\r\n # indicate (with a 1) the structures which have been properly terminated\r\n properly_terminated_graphs[n_generated_so_far : n_generated_so_far + len(term)] = 1\r\n termination_idc = torch.cat((term, invalid))\r\n\r\n # copy the graphs to be terminated (indicated by `terminated_idc`) to\r\n # the tensors for finished graphs (e.g. `generated_nodes`, etc)\r\n (\r\n n_generated_so_far, \r\n generated_nodes,\r\n generated_edges, \r\n generated_n_nodes,\r\n generated_nlls\r\n ) = copy_terminated_graphs(termination_idc,\r\n n_generated_so_far,\r\n nodes,\r\n edges,\r\n n_nodes,\r\n generated_nodes,\r\n generated_edges,\r\n generated_n_nodes,\r\n generation_round,\r\n nlls,\r\n nlls_just_sampled,\r\n generated_nlls)\r\n\r\n # apply actions to all graphs (note: applies actions to terminated\r\n # graphs too to keep on GPU, as this makes generation faster and graphs\r\n # will be removed anyway)\r\n nodes, edges, n_nodes, nlls = apply_actions(add,\r\n conn,\r\n nodes,\r\n edges,\r\n n_nodes,\r\n generation_round,\r\n nlls,\r\n nlls_just_sampled)\r\n\r\n # after actions are applied, reset graphs which were set to terminate\r\n # this round\r\n nodes, edges, n_nodes, nlls = reset_graphs(n_graphs_to_generate,\r\n termination_idc,\r\n nodes,\r\n edges,\r\n n_nodes,\r\n nlls,\r\n batch_size)\r\n\r\n # update variables that are being kept track of\r\n t_bar.update(len(termination_idc))\r\n generation_round += 1\r\n\r\n # done generating\r\n t_bar.close()\r\n\r\n # get the time it took to generate graphs\r\n t = time.time() - t\r\n print(f\"Generated {n_generated_so_far} molecules in {t:.4} s\")\r\n print(f\"--{n_generated_so_far/t:4.5} molecules/s\")\r\n\r\n # convert the molecular graphs (currently separate node and edge features\r\n # tensors) into `GenerationGraph` objects\r\n graphs = []\r\n for graph_idx in range(n_graphs_to_generate):\r\n graphs.append(\r\n graph_to_graph(graph_idx, generated_nodes, generated_edges, generated_n_nodes)\r\n )\r\n\r\n # sum NLLs over all the actions to get the total NLLs for each structurei\r\n # and remove extra zero padding\r\n final_nlls = torch.sum(generated_nlls, dim=1)[:len(graphs)]\r\n\r\n # remove extra zero padding from `generated_nlls` and `properly_terminated_graphs`\r\n generated_nlls = generated_nlls[generated_nlls != 0]\r\n properly_terminated_graphs = properly_terminated_graphs[:len(graphs)]\r\n\r\n\r\n return graphs, generated_nlls, final_nlls, properly_terminated_graphs", "title": "" }, { "docid": "e42e44e32f2b1d94dd86f80a5d119334", "score": "0.50712335", "text": "def generate_cls_targets(self, local_angle):\n # cls_targets = (local_angle >= self.left) & (local_angle < self.right)\n deltas = torch.abs(local_angle - self.bin_centers)\n cls_targets = (deltas <= self.max_deltas) | (\n deltas > 2 * math.pi - self.max_deltas)\n return cls_targets.long()", "title": "" }, { "docid": "8d86f3d8c3325a681cb1de2e7fdf91a2", "score": "0.5069117", "text": "def graph(self, target):\n target0_n = '\"{}\"'.format(target[0][0])\n target0_v = target[0][1]\n # 哪些\n\n target1_n = '\"{}\"'.format(target[1][0])\n target1_v = target[1][1]\n # 意大利\n\n target2_n = aiballclass(target[2][0])\n target2_v = target[2][1]\n # 处子球\n\n target3_v = target[3][1]\n # 对手\n\n thing1 = HasProperty(target1_n, target1_v)\n zhuyu1 = ArelationB(thing1, target3_v, reverse=False)\n\n thing2 = HasProperty(target2_n, target2_v)\n zhuyu1.merge(thing2)\n\n zhuyu2 = IsRelatedTo2(zhuyu1, reverse=False)\n\n goal = ArelationB(zhuyu2, target0_v)\n return goal", "title": "" }, { "docid": "59f00904682f6612d16125d1b8d37b28", "score": "0.50687826", "text": "def getTargets(self) -> Awaitable[Dict]:\n return self.client.send(\"Target.getTargets\", {})", "title": "" }, { "docid": "47c8ef00277db3eedd4d22f7883cffd5", "score": "0.5068312", "text": "def make_targets(self,minibatch):\n\n actions = minibatch[1]\n\n\n Q = self.model.predict(minibatch[0])\n avg_q = np.mean(Q)\n self.avg_q = avg_q\n\n future = self.target_model.predict(minibatch[3])\n\n fv = np.amax(future,1)\n\n terminal = minibatch[4].astype(int)\n\n delta_o = minibatch[2] + (1-terminal)*self.discount*fv\n if self.priority_replay:\n q = np.zeros(self.batch_size)\n for i in range(0,self.batch_size):\n q[i] = Q[i][actions[i]]\n\n delta = q-delta_o\n td_error = abs(delta) +self.priority_replay_eps\n\n batch_idxes = minibatch[6]\n self.memory.update_priorities(batch_idxes,td_error)\n\n\n targets = Q \n for i in range(0,self.batch_size):\n targets[i][actions[i]] = delta_o[i]\n\n\n return targets", "title": "" }, { "docid": "50001d2ac9be610ccf0447080b2b55ac", "score": "0.5068127", "text": "def get_causal_sink_nodes(graph: BELGraph, func) -> Set[BaseEntity]:\n return {\n node\n for node in graph\n if node.function == func and is_causal_sink(graph, node)\n }", "title": "" }, { "docid": "3ba02f31776df667a464f04930843a41", "score": "0.50648296", "text": "def _define_sinks_and_sources(self):\n #make a set of sink and source arrays which match\n #the size of each gradient\n sources = []\n sinks = []\n for i in range(0, len(self.gradients)):\n shp = self.gradients[i].shape()\n source = np.zeros(shp)\n sink = np.zeros(shp)\n sources.append(source)\n sinks.append(sink)\n #get the center of the network\n cent = self.get_center()\n #now loop over all of the agents in the simulation\n for j in range(0, len(self.objects)):\n agent = self.objects[j]\n #get the object position\n pos = agent.location\n #get the difference from the center\n dist_vec = SubtractVec(pos, cent)\n #convert this to a index value\n #for all of the gradients get the coeffients\n for i in range(0, len(self.gradients)):\n x, y, z = self.gradients[i].get_object_position_on_grid(dist_vec)\n agent.set_gradient_location(self.gradients[i].name, (x,y,z))\n #see if the object conatins a value for this gradient\n src, snk = agent.get_gradient_source_sink_coeff(self.gradients[i].name)\n sources[i][int(x),int(y),int(z)] += src\n sinks[i][int(x),int(y),int(z)] += snk\n \n #and return the list\n return sources, sinks", "title": "" }, { "docid": "fd5db6d0e54cf0e202ae9d9c081e6c42", "score": "0.5064662", "text": "def experiment1(graph):\n if not graph.is_directed(): \n raise ValueError(\"Graph must be DiGraph\")\n nonleaf_nodes = get_nonleaf_nodes(graph)\n #data_dir = 'pre_data/example/fixedsource/'\n data_dir = 'pre_data/p2p-Gnutella08/fixedsource/'\n file_nos = []\n for myfile in os.listdir(data_dir):\n if myfile.endswith(\".txt\"):\n if myfile[:len('short')] == 'short':\n file_no = myfile[len('shortWalks'):-4]\n file_no = [int(i) for i in file_no.replace('_', ' ').split()]\n iratio, scenario = file_no\n file_nos.append((iratio, scenario))\n #print(os.path.join(data_dir, file))\n birth_rate = 1\n full_soc = 3\n birth_type = 'random_source'\n #birth_type = 'walk_at_random'\n for pair in file_nos:\n ratio, scene = pair\n walkfile = (data_dir + 'shortWalks' +\n str(ratio) + '_' + str(scene) + '.txt')\n installfile = (data_dir + 'install' +\n str(ratio) + '_' + str(scene) + '.txt')\n sourcefile = (data_dir + 'source' +\n str(ratio) + '_' + str(scene) + '.txt')\n feasible_walks, feasible_walks_source = read_feasible_walks(walkfile)\n install_dict, install_nodes = get_install_dict(graph, installfile)\n source_nodes = get_source_nodes(graph, sourcefile)\n soc_btn = soc.fixed_source_betweenness_centrality(graph,\n full_soc,\n install_nodes,\n source_nodes)\n standard_btn = standard_fixed_source.betweenness_centrality(\n graph, source_nodes)\n #btn_ranking = node_values_to_ranking(soc_btn)\n num_new_routes = int(math.ceil(birth_rate* len(source_nodes)))\n max_counter, average_counter = particle_simulation_fixed_source(\n graph,\n num_new_routes,\n feasible_walks,\n feasible_walks_source,\n birth_type)\n #sim_ranking = node_values_to_ranking(average_counter)\n #sim_ranking2 = node_values_to_ranking(max_counter)\n #ken_soc, spr_soc = compute_stats(sim_ranking, btn_ranking)\n #ken_soc2, spr_soc2 = compute_stats(sim_ranking2, btn_ranking)\n #print ratio, scene, ken_soc.correlation,\\\n # spr_soc.correlation\n sim = rank_nonleafnodes(nonleaf_nodes, average_counter)\n btnrank = rank_nonleafnodes(nonleaf_nodes, standard_btn)\n myrank = rank_nonleafnodes(nonleaf_nodes, soc_btn)\n ken_soc, spr_soc = compute_stats(sim, myrank)\n ken_std, spr_std = compute_stats(sim, btnrank)\n print 'myrank', ratio, scene, ken_soc.correlation, spr_soc.correlation\n print 'std', ratio, scene, ken_std.correlation, spr_std.correlation\n\n #ken_file = open('kendall.txt', 'w')\n #spr_file = open('spearman.txt', 'w')", "title": "" }, { "docid": "7cde3b75acaaab46a2504b4d7126c5bb", "score": "0.506413", "text": "def graph_generator():\n inputs = select_user_inputs()\n\n train_dir = os.path.join(out_folder, \"train\")\n os.mkdir(train_dir)\n test_dir = os.path.join(out_folder, \"test\")\n os.mkdir(test_dir)\n\n for i in range(0, inputs[0]):\n graph = generate_connected_graph(inputs[2], inputs[3], inputs[4])\n train_data = {}\n train_data[\"input\"] = nx.to_numpy_matrix(graph, dtype=np.int64).tolist()\n train_data[\"output\"] = nx.is_connected(graph)\n with open(os.path.join(train_dir, str(i) + \".json\"), \"w\") as outfile:\n outfile.write(json.dumps(train_data, outfile))\n\n\n for i in range(0, inputs[1]):\n graph = generate_connected_graph(inputs[2], inputs[3], inputs[4])\n test_data = {}\n test_data[\"input\"] = nx.to_numpy_matrix(graph, dtype=np.int64).tolist()\n test_data[\"output\"] = nx.is_connected(graph)\n with open(os.path.join(test_dir, str(i) + \".json\"), \"w\") as outfile:\n outfile.write(json.dumps(test_data, outfile))\n\n\n print('Success, file written %s ' % out_folder)\n return", "title": "" }, { "docid": "e02550ba55e2dbc7d52a58f23eeef626", "score": "0.5057841", "text": "def fill_graph(request):\n graph = nx.Graph()\n tracts = request.dbsession.query(Tract).all()\n edges = request.dbsession.query(Edge).all()\n for tract in tracts:\n graph.add_node(tract)\n for edge in edges:\n source = request.dbsession.query(Tract).get(edge.tract_source)\n target = request.dbsession.query(Tract).get(edge.tract_target)\n graph.add_edge(source, target)\n return graph", "title": "" } ]
da5f3f850684cd950c772a6c423981fc
Execute all commands issued after MULTI
[ { "docid": "eeb79144b48cc547bd8eda52b1d4704d", "score": "0.6023346", "text": "def _exec(self):\r\n if not self._in_transaction:\r\n raise Error('Not in transaction')\r\n\r\n futures_and_postprocessors = self._transaction_response_queue\r\n self._transaction_response_queue = None\r\n\r\n # Get transaction answers.\r\n multi_bulk_reply = yield from self._query(b'exec', _bypass=True)\r\n\r\n if multi_bulk_reply is None:\r\n # We get None when a transaction failed.\r\n raise TransactionError('Transaction failed.')\r\n else:\r\n assert isinstance(multi_bulk_reply, MultiBulkReply)\r\n\r\n for f in multi_bulk_reply.iter_raw():\r\n answer = yield from f\r\n f2, call = futures_and_postprocessors.popleft()\r\n\r\n if isinstance(answer, Exception):\r\n f2.set_exception(answer)\r\n else:\r\n if call:\r\n self._pipelined_calls.remove(call)\r\n\r\n f2.set_result(answer)\r\n\r\n self._transaction_response_queue = deque()\r\n self._in_transaction = False\r\n self._transaction = None", "title": "" } ]
[ { "docid": "dda936b07c092ad782de1aba63e81567", "score": "0.6528384", "text": "def execute_commands(self):\n for command in self.commands:\n command.execute(self)\n\n self.commands.clear()\n self.controller.loading = False", "title": "" }, { "docid": "cce11de1108af38f8cf2c414d8dfb358", "score": "0.6464062", "text": "def __call__(self):\n self.lock_commands.acquire()\n if self.commands:\n print(\"commands:\")\n print(self.commands)\n while self.commands:\n cmd = self.commands.pop()\n self.procs_state.clear()\n if cmd[0] == 'stop' or cmd[0] == 'restart':\n self._stop_handler(cmd)\n if cmd[0] == 'start' or cmd[0] == 'restart':\n self._start_handler(cmd)\n elif cmd[0] == 'status':\n self._status_handler()\n elif cmd[0] == 'reload':\n self._reload_handler()\n elif cmd[0] == 'shutdown':\n self._shutdown_handler()\n\n self.lock_datas.acquire()\n if cmd[0] != 'status':\n self.processesHandler.datas = self.procs_state\n if cmd[0] != 'reload':\n self.processesHandler.datas_right = True\n self.lock_datas.release()\n\n self.lock_commands.release()", "title": "" }, { "docid": "390c98398b3710d99eb64d775a4216e6", "score": "0.63885146", "text": "def executeAll(self):\n while self.execute() is not None:\n pass", "title": "" }, { "docid": "89d5b1d2251414a5a4ceb20f51e62dc0", "score": "0.62608725", "text": "async def execute(self):\n\n # - pass -", "title": "" }, { "docid": "296ce95ab5ba746ef17ec9a5e7052295", "score": "0.61232364", "text": "def execute( self ):\n # Connect to DB\n \n # Create savepoint\n\n # Loop through commands\n # Rolling back if problem\n\n # Close DB connection", "title": "" }, { "docid": "e6592a5aa9b09e18070c6122045a060d", "score": "0.61180663", "text": "def execute():", "title": "" }, { "docid": "ef30e7c3c31bb47ea345b2f25fc7dbe9", "score": "0.60914916", "text": "def execute_commands(self, commands):\n self.conn = self.__connect()\n for cmd in commands:\n stdin, stdout, stderr = self.client.exec_command(cmd)\n stdout.channel.recv_exit_status()\n response = stdout.readlines()\n for line in response:\n logger.info(f'INPUT: {cmd} | OUTPUT: {line}')", "title": "" }, { "docid": "ced670d7dc15047775585cadd2fa8dba", "score": "0.60422224", "text": "def execute_commands(self, commands):\n self.conn = self._connect()\n for cmd in commands:\n stdin, stdout, stderr = self.client.exec_command(cmd)\n stdout.channel.recv_exit_status()\n response = stdout.readlines()\n for line in response:\n logger.info(f'INPUT: {cmd} | OUTPUT: {line}')", "title": "" }, { "docid": "75bd4f01e01523785bb2c41beeeb1a95", "score": "0.6027384", "text": "def execute(self):\n for system in self._systems:\n system.execute()", "title": "" }, { "docid": "041519bf68afc8ecf02d11dfe2aea628", "score": "0.60121346", "text": "def execute(self, cmd):\n\n self.logger.debug('execute cmd: [%s].' % cmd)\n for i in range(3):\n try:\n self.cursor.execute(cmd)\n except pymysql.MySQLError:\n self.reconnect()\n else:\n break", "title": "" }, { "docid": "8e532913952c21798b1f1018feb3f63b", "score": "0.59858286", "text": "def execute(self):\r\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.5981959", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.5981959", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.5981959", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.5981959", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.5981959", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.5981959", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.5981959", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.5981959", "text": "def execute(self):\n pass", "title": "" }, { "docid": "8e0b9987ea2f8305825d25dabe6a2afc", "score": "0.59611714", "text": "def executeJob(self):\n return", "title": "" }, { "docid": "d9e907f17e295186248a841ea675cb7c", "score": "0.5939286", "text": "def execute(self):\n logging.debug(\"=== Executing tasks ===\")\n if not self.no_more_operations():\n reactor.callLater(1, self.execute)\n for func, args in self.get_next_operations():\n logging.debug(\"Execute task: %s, %s\", func, args)\n reactor.callLater(0, func, *args)\n else:\n logging.debug(\"no more operation,but counter = %d\", self.counter)\n if self.counter > 0:\n reactor.callLater(1, self.execute)\n else:\n reactor.callLater(1, self.send)", "title": "" }, { "docid": "62882b92624afd5047741a18eeb3f5ea", "score": "0.5909719", "text": "def execute_pool(self):\n for job in self._jobs_pool:\n print_msg(f\"job from the pool started: {job}\")\n job.do()\n self._jobs_pool = []", "title": "" }, { "docid": "509bcc9e4219864dd30915a7555000d1", "score": "0.5881448", "text": "def execute_command(self):\n pass", "title": "" }, { "docid": "e14420256fe7d0593e3d04950e0c1c50", "score": "0.5872389", "text": "def execute(self):\n if self.local:\n return self.execute_local()\n urls = set()\n from ..data.routing import inspect, route\n for func, args, kwargs in self.calls:\n urls.update(inspect(args, kwargs))\n if urls:\n active_hosts, data_info = self.data_info(urls)\n else:\n active_hosts, data_info = None, None\n calls = self.calls\n self.calls = []\n self.jids = self._bulk_execute(calls, active_hosts, data_info)", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.5838344", "text": "def execute(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.5838344", "text": "def execute(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.5838344", "text": "def execute(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.5838344", "text": "def execute(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.5838344", "text": "def execute(self):", "title": "" }, { "docid": "c94b65a703f05ace99ac3925c548850d", "score": "0.5806605", "text": "def receive(self, command_list):\n for cmd in command_list:\n self._handle(cmd)\n\n if not self.is_last_engine:\n self.send(command_list)", "title": "" }, { "docid": "ff9590f7dcd215c521a7795b550cf02a", "score": "0.57582474", "text": "def execute_deferred_sql(self):\n for sql in self.deferred_sql:\n self.execute(sql)\n\n self.deferred_sql = []", "title": "" }, { "docid": "b5102b47dac1f64207d025fa8ceb2a65", "score": "0.5723028", "text": "def _commit(self, results):\n for result in results:\n if not result.should_clean:\n continue\n course = modulestore().get_course(result.course_key)\n del course.cert_name_short\n del course.cert_name_long\n modulestore().update_item(course, ModuleStoreEnum.UserID.mgmt_command)", "title": "" }, { "docid": "41b80cb6d89d7e1d2e1a52334a262a81", "score": "0.5721438", "text": "def execute(self):\n return []", "title": "" }, { "docid": "700985a4af400b367145a5eb13073575", "score": "0.57192826", "text": "def _commit_handler(self, commands):\n response = Reply()\n\n for command in commands:\n ## Strip any leading whitespace as this will result\n ## in desync of send/recv\n command = command.strip()\n\n if command == 'end' or command == 'end no-confirm': \n prompt = self.oper_prompt\n else:\n prompt = self.conf_prompt\n\n ## Skip processing any lines that end w/ !\\n\n if '!\\n' in command:\n continue\n if 'commit' in command:\n if self.session.local is True:\n self.child.sendline(command)\n (success, check_response) = self._check_expect_response(\n command, prompt)\n buf = self.child.before\n if success:\n response = self._validate_buffer(command, buf)\n else:\n response = check_response\n else:\n self._send_command(command)\n ## Arbitrary sleep before commit\n time.sleep(2)\n buf = self._wait(prompt)\n if isinstance(buf, Reply):\n # command timeout\n response = buf\n break\n response = self._validate_buffer(command, buf)\n ## Need to check if response is good and if not break loop\n if response and response.error is not None:\n break\n\n else:\n if self.session.local is True:\n self.child.sendline(command)\n (success, check_response) = self._check_expect_response(\n command, prompt)\n if not success:\n ## Breaking from loop to send error back to caller\n response = check_response\n break\n\n buf = self.child.before\n if re.search(\"end$|end no-confirm$\", command) is None:\n response = self._validate_buffer(command, buf)\n if response.error is not None:\n break\n else:\n self._validate_buffer(command, buf)\n else:\n self._send_command(command)\n buf = self._wait(prompt)\n if isinstance(buf, Reply):\n response = buf\n else:\n if re.search(\"end$|end no-confirm$\", command) is None:\n response = self._validate_buffer(command, buf)\n else:\n self._validate_buffer(command, buf)\n if response.error is not None:\n break\n\n return response", "title": "" }, { "docid": "9de0752025702f564adb193093ee2a73", "score": "0.5709145", "text": "def _execute_sync(self, client, userdata, msg):\r\n asyncio.ensure_future(self._execute(msg.topic, msg.payload, msg.retain))", "title": "" }, { "docid": "b8fb62f4f4980832c01f604cd4ee2ee6", "score": "0.56784296", "text": "def execute(self,cmd):\n self.database.execute( cmd )", "title": "" }, { "docid": "55d73ee73473f6b9a84171355381b640", "score": "0.5660235", "text": "def execute_all(self):\n 'Alertas'\n debug.create_alerts()\n 'Clientes'\n debug.create_customers()\n 'Vinculados'\n debug.create_stakeholders()\n 'Crear documentos'\n debug.create_documents()", "title": "" }, { "docid": "c1cd2df8715ec5e88e5a9beb3b49f6c0", "score": "0.5647915", "text": "def execute(self):\n self.start()\n\n #TODO: join all processes; the current system only works cleanly with one output pipe\n if not is_backend(Backend.DUMMY):\n for p in self._result_pipe()._processes:\n p.join()", "title": "" }, { "docid": "9fe439f52bfa5dd5982ed2f9963e911e", "score": "0.5624954", "text": "def onecmd(self, commands, separator=\";\"):\n for command in commands.split(separator):\n Cmd.onecmd(self, command)", "title": "" }, { "docid": "640628ba1f6c31d2a2486101e96bd745", "score": "0.56112033", "text": "def run_commands(self, unit_name, commands, ctxt):\n for _cmd in commands:\n cmd = _cmd.format(**ctxt)\n generic_utils.assertRemoteRunOK(zaza.model.run_on_unit(\n unit_name,\n cmd))", "title": "" }, { "docid": "1b1a2aa4e00122029eabb4385e55661b", "score": "0.5607333", "text": "def _process_queue(self):\n logging.debug(\"Processing RCON command queue\")\n try:\n command, callback = self._queue.popleft()\n logging.debug(\"QUEUE - command: %s, callback: %s\", command, \n callback)\n\n self.send_cmd(command, callback)\n except IndexError:\n pass\n\n except:\n logging.exception(\"Exception processing queue\")", "title": "" }, { "docid": "7683465264be95615db75f92de767397", "score": "0.5564978", "text": "def execute(self):\n return [command() for command in self.commands]", "title": "" }, { "docid": "3e70dfaa32a9e369bc6cb375c2fd793d", "score": "0.5539985", "text": "def commands_run(self, commands: list):\n next_command = []\n for command in commands:\n if isinstance(command, CommandSet):\n if len(command.commands)<=0:\n continue\n self.commands_run(command.commands)\n next_command.append(command)\n continue\n try:\n with command as c:\n if c.valid(self.controller) and c.live:\n out = c.run(self.controller)\n if out is not None: self.changed.update(out)\n c.update()\n if command.leave(): next_command.append(command)\n else: command.destroy()\n\n except Exception as e:\n if App.instance().config.App.DEBUG:\n print(traceback.format_exc())\n else:\n print('[ERROR/{}]'.format(c.command))\n pass\n commands.clear()\n commands += next_command", "title": "" }, { "docid": "41bcbc0bb85570a9ce48ee258d566e68", "score": "0.55348045", "text": "def execute(self):\n raise NotImplementedError()", "title": "" }, { "docid": "38c6a336cce68da8387dd241d1128f4a", "score": "0.55331427", "text": "def receive(self, command_list):\n for cmd in command_list:\n if not cmd.gate == FlushGate():\n self._store(cmd)\n else:\n self._run()\n\n if not self.is_last_engine:\n self.send(command_list)", "title": "" }, { "docid": "b23b1a5e21662dfaf38d96b9a2aca50f", "score": "0.5528391", "text": "def execute(self, parameters=None, messages=None):\n self._is_executing = True\n try:\n self._commit_temporaries()\n except:\n pass \n self.tool.main(self.par, parameters, messages)\n try:\n self.clear_temporary_dbs()\n except:\n pass\n self._is_executing = False\n self.show_outputs()", "title": "" }, { "docid": "f7d4b0944f01dc87855d09ed9426b355", "score": "0.5522034", "text": "def runCommands(self, commands):\n return self.rpc.system.runCommands(commands)", "title": "" }, { "docid": "c4382b4de7e0a065b30ae820212c41f8", "score": "0.55218375", "text": "def run (self): \n if self.setup_tool.update_enabled():\n if self.setup_tool.get_software() == 'pdns':\n self.connect_to_mysql()\n self.start_update()\n \n self.write_to_log('>> Waiting for remote command from master...\\n')\n try: \n while 1:\n buf, addr = self.s.recvfrom (2048)\n if buf == 'send-result':\n self.send_result() \n elif buf == 'tear-down':\n self.tear_down()\n break \n elif buf == 'abort':\n self.abort()\n break \n else:\n self.trigger_top()\n if not self.test_is_validated():\n break\n except KeyboardInterrupt:\n print '\\t>> Exiting...'\n exit(0)", "title": "" }, { "docid": "8eeb89f7cc98f547ab3065de5884228c", "score": "0.5486547", "text": "def async(commands):\n if isinstance(commands, dict):\n for name, cmd in commands.iteritems():\n ccmd = _get_callable(cmd)\n multiple = isinstance(ccmd, (list, tuple))\n if multiple and len(ccmd) > 1:\n cmdmap = {}\n for i, subcmd in enumerate(ccmd):\n subname = None\n if isinstance(subcmd, (str, unicode)):\n subname = subcmd\n else:\n subname = \"sub-%s\" % i\n cmdmap[\"%s.%s.%s\" % (name, cmd, subname)] = subcmd\n async(cmdmap)\n continue\n elif multiple:\n ccmd = _get_callable(ccmd[0])\n cmdproc = MachopAsyncCommand(ccmd, CURRENT_DIRECTORY, _api_q, name)\n cmdproc.start()\n __join_list__.append(cmdproc)\n return\n commands = _get_callables(ensure_list(commands))\n for cmd in commands:\n cmdproc = MachopAsyncCommand(cmd, CURRENT_DIRECTORY, _api_q)\n cmdproc.start()\n __join_list__.append(cmdproc)", "title": "" }, { "docid": "4856bec7c0cd461411df13a9068355e6", "score": "0.5476877", "text": "def execute_command(self, command, fetch):\n\t\n\t\tpass", "title": "" }, { "docid": "c10fa5e36b6b2a1d3029c6416246d955", "score": "0.54662323", "text": "def _execute(self):\n raise NotImplementedError", "title": "" }, { "docid": "7b26b95ad44ea34039ac49d748f029d6", "score": "0.54415846", "text": "def handleOperation(self, message):\n if 's/ds' in message.topic and self.command_message_id == message.messageId:\n try:\n self._supported_commands = self._get_supported_commands()\n self.logger.info(f'Supported Commands {self._supported_commands}')\n messages = message.values\n self._set_executing()\n self.logger.info(f'Shell Command Message received: {messages}')\n\n # Parse command\n raw_cmd = re.sub(r';?\\s*\\n', '; ', ';'.join(message.values[1:]))\n raw_cmd = re.sub('^\"(.*)\"$', '\\\\1', raw_cmd)\n # replace escaped double quote, with literal quote\n raw_cmd = raw_cmd.replace(r'\\\"', '\"')\n\n # Check for help\n if raw_cmd == 'show help':\n logging.info(f'Showing shell help')\n \n self._set_success_with_result('\\n'.join(self._show_help()))\n return\n\n resolved_cmd = self._resolve_command(raw_cmd)\n\n if resolved_cmd:\n logging.info(f'Using pre-defined command')\n elif self.check_command(raw_cmd):\n logging.info(f'Using extended command')\n resolved_cmd = CommandAlias(raw_cmd, raw_cmd)\n\n if not resolved_cmd:\n raise InvalidCommandError().add_context(f'command: {raw_cmd}')\n\n _, output_text = resolved_cmd.execute_command(\n raw_cmd, timeout=60)\n self._set_success_with_result(output_text)\n\n except (InvalidCommandError, CommandFailedError, CommandTimeoutError) as ex:\n logging.error(f'Command error. Exception={ex}')\n self._set_failed(f'{ex}')\n except TimeoutExpired as ex:\n self._set_failed(f'{ex}')\n except Exception as ex:\n logging.error(f'Handling operation error. exception={ex}')\n self._set_failed(f'Unhandled exception. exception={ex}')", "title": "" }, { "docid": "69c69ae695a55cb1b6634a70446a811a", "score": "0.54381526", "text": "def __call_and_notify(self, requests_by_command):\n\n for command, work_items in requests_by_command.items():\n request = [item.data for item in work_items]\n request = work_items[0].aggregator(request) if work_items[0].aggregator else request\n result_status = self.OK_RESULT\n result_data = None\n try:\n result_data = command(request)\n except (self.Error, Exception) as e:\n result_status = e\n\n for work_item in work_items:\n work_item.completion_queue.put((result_status, result_data))", "title": "" }, { "docid": "d0c8986fcc1c5c33cb536fc8133e9017", "score": "0.5428644", "text": "async def process_commands(self, message, prefix):\n await self.invoke(self.get_context(message, prefix))", "title": "" }, { "docid": "fa5390634d5e6a03d9c3db1d4b54757f", "score": "0.5413452", "text": "def _schedule_cloud_commands(self):\n pass", "title": "" }, { "docid": "81bece5910c35aceb8558fe879434316", "score": "0.5407022", "text": "def receive(self, command_list):\r\n for cmd in command_list:\r\n if not cmd.gate == FlushGate():\r\n self._store(cmd)\r\n else:\r\n self._run()\r\n self._reset()", "title": "" }, { "docid": "23766acd6ce01e47c5a89c2df948c8fd", "score": "0.5402643", "text": "def execute_many(self, wire, pargs): # pragma: no cover\n raise NotImplementedError", "title": "" }, { "docid": "ad85350f7cb81a8f112dbc8c1c3acbda", "score": "0.5400535", "text": "def execute_order(self):\n raise NotImplementedError(\"Should implement execute_order()\")", "title": "" }, { "docid": "fc3a3e6e6bb3e198d92bdbc959201330", "score": "0.53909975", "text": "def run_management_commands():\n call_command('clearsessions', verbosity=0)\n call_command('django_cas_ng_clean_sessions', verbosity=0)", "title": "" }, { "docid": "f2d47ee5386528f1495e4eed61d06e29", "score": "0.5385449", "text": "def execute_batch(self, commands=(), connection=None):\n if commands is None:\n raise InvalidCommandValue(\"Command can't be None\")\n if not hasattr(commands, \"__iter__\"):\n raise InvalidCommandValue(\"Command needs to be an iterable\")\n result_list = []\n for cmd in commands:\n res = self.execute(command=cmd, connection=connection)\n result_list.append(res)\n if res.cmd.exclusive:\n res.wait_for_data()\n return result_list", "title": "" }, { "docid": "af39a5a7c989605986a60be565af3c4b", "score": "0.5383679", "text": "def receive(self, command_list):\n for cmd in command_list:\n self._store(cmd)\n if isinstance(cmd.gate, FlushGate):\n self._run()\n self._reset()", "title": "" }, { "docid": "4904d05b8625aed2e445b23112afea91", "score": "0.53789467", "text": "def RunAllTasks():", "title": "" }, { "docid": "f5380c8fecc6b55bdb68bb90e8f24daf", "score": "0.5373474", "text": "async def run_async(self) -> None:\n self._fail_on_multi_run()\n self._has_run = True\n # Runs the script itself + all of the \"tasks\" (table processors) asynchronously.\n await asyncio.gather(self._run_conn(self._conn), *[t() for t in self._tasks])", "title": "" }, { "docid": "076b203bfe1a9a44177b63c6092e7dba", "score": "0.53687465", "text": "def run_step(self):\n \n if not self.commandsQueue.empty():\n self.process_command(self.commandsQueue.get())\n time.sleep(0.01)", "title": "" }, { "docid": "898ef0fb13a9828472da057c0fac06af", "score": "0.5368596", "text": "def executeAllCmd(self, argv):\n cmd = [self.CMD]\n cmd.extend(argv)\n vcs_st, ret_code = RunShellWithReturnCode(cmd, print_output=True)\n sys.exit(ret_code)", "title": "" }, { "docid": "1e9e5271a8e9a04f7137adf910c425d0", "score": "0.536769", "text": "def finalize_batch(self):\n self._db.commit()", "title": "" }, { "docid": "14cd2f692c65dc40372ca00156010a7f", "score": "0.5364441", "text": "def run_commands(callback_commands):\n for command in callback_commands:\n print(command)\n call(command, shell=True)\n print(\"CONTINUE?\")", "title": "" }, { "docid": "3b6522b93e591f85446ccdbfdae02054", "score": "0.53641605", "text": "def bulk():\n pass", "title": "" }, { "docid": "b12be1ddb7766d764d817db528228c78", "score": "0.5363422", "text": "def execute(self, userdata):", "title": "" }, { "docid": "8b61f3d4f64261f62fa5c408ada54728", "score": "0.5355978", "text": "def process_commands(lines) -> None:\n while True:\n try:\n execute_command(next(lines))\n\n except StopIteration:\n break", "title": "" }, { "docid": "c12bca501b31cd6394c990d5b01b54d1", "score": "0.53399956", "text": "def collect(self):\n self.initialize_logging() # need to initialize logging again in child process cause multiprocessing\n self.logger.info(\"Starting data collection process\\n\")\n self.initialize_database_connections() \n while True:\n if not self._queue.empty():\n message = self._queue.get() # Get the task off our MP queue\n else:\n break\n self.logger.info(\"Popped off message: {}\\n\".format(str(message)))\n\n if message['job_type'] == 'STOP':\n break\n\n # If task is not a valid job type\n if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':\n raise ValueError('{} is not a recognized task type'.format(message['job_type']))\n pass\n\n try:\n self.commits_model(message)\n except Exception as e:\n self.logger.error(e)\n raise(e)\n break", "title": "" }, { "docid": "39472ded99e883500080a5ac8d0687db", "score": "0.5331401", "text": "def execute(self):\n\n # start\n self.log(\"Starting...\")\n\n self.emit(START, StartMessage, forward=True)\n\n # process\n self.process(None)\n\n self.emit(STOP, StopMessage, forward=True)\n\n # done\n self.log(\"Exiting...\")\n\n # stopping current node and sending stop message to child nodes\n # this is now handled by the engine\n # calling stop here prematurely cancels consumers. this could be bad if more than one producer it feeding this consumer.\n # self.stop()", "title": "" }, { "docid": "ea9857d078c85ea3a1e09b94743625a5", "score": "0.53299844", "text": "def execute(self):\n if self.use_cublas:\n raise NotImplementedError(\"Not yet supported\")\n else:\n self.kernel.prepared_async_call(*self.params)", "title": "" }, { "docid": "1e37332b018d46eb85873759a0b8393b", "score": "0.5326248", "text": "def dispatch_callback(self, items):\n if not self._manager.is_active:\n return\n\n batched_commands = collections.defaultdict(list)\n\n for item in items:\n batched_commands[item.__class__].append(item)\n\n _LOGGER.debug(\"Handling %d batched requests\", len(items))\n\n if batched_commands[requests.LeaseRequest]:\n self.lease(batched_commands.pop(requests.LeaseRequest))\n if batched_commands[requests.ModAckRequest]:\n self.modify_ack_deadline(batched_commands.pop(requests.ModAckRequest))\n # Note: Drop and ack *must* be after lease. It's possible to get both\n # the lease the and ack/drop request in the same batch.\n if batched_commands[requests.AckRequest]:\n self.ack(batched_commands.pop(requests.AckRequest))\n if batched_commands[requests.NackRequest]:\n self.nack(batched_commands.pop(requests.NackRequest))\n if batched_commands[requests.DropRequest]:\n self.drop(batched_commands.pop(requests.DropRequest))", "title": "" }, { "docid": "ac8c97cf0021c3b950ea3a6f57bc4cc9", "score": "0.53231066", "text": "def _execute_perform(bot):\n if not bot.connection_registered:\n # How did you even get this command, bot?\n raise Exception('Bot must be connected to server to perform commands.')\n\n commands = bot.config.core.commands_on_connect\n count = len(commands)\n\n if not count:\n LOGGER.info(\"No custom command to execute.\")\n return\n\n LOGGER.info(\"Executing %d custom commands.\", count)\n for i, command in enumerate(commands, 1):\n command = command.replace('$nickname', bot.config.core.nick)\n LOGGER.debug(\"Executing custom command [%d/%d]: %s\", i, count, command)\n bot.write((command,))", "title": "" }, { "docid": "f0025dca741279f760944489c23e1811", "score": "0.5320665", "text": "def _execute(self):\n\n\t\twhile int.from_bytes(self.registers['RIP'].data[4 : 8], byteorder='little') < int.from_bytes(self.registers['RDS'].data[4 : 8], byteorder='little') and self.running and not self.error:\n\t\t\t# Get opcode\n\t\t\topcode = int.from_bytes(self.handle_output(self.get_current_code_bytes(1)), byteorder='little')\n\t\t\tself.inc_rip(1)\n\t\t\tif not opcode in self.opcode_dict:\n\t\t\t\tself.handle_output((29, \"Invalid opcode.\"))\n\t\t\tfunc, n_args, d_args = self.opcode_dict[opcode]\n\t\t\t# Get args\n\t\t\targs = []\n\t\t\tfor arg in range(n_args):\n\t\t\t\targs.append(self.handle_output(self.parse_argument()))\n\t\t\t# Run the opcode\n\t\t\ttry:\n\t\t\t\tself.handle_output(func(*([self] + args), **d_args))\n\t\t\texcept Interrupt as e:\n\t\t\t\tself.running = False\n\t\t\t\treturn\n\n\t\tif not self.cpu.computer.operatingsystem.processes[self.pname[1]].threads[self.tid].waiting and self.running:\n\t\t\t# Exitcode 0\n\t\t\tself.cpu.update_from_computer()\n\t\t\tself.processmemory = self.cpu.memory.memorypartitions[self.pname]\n\t\t\tself.set(bytes([0, 0]), (\"MEM\", (int.to_bytes(self.processmemory.es, 4, byteorder='little'), bytes([2]))))\n\t\t\tself.output_exit = (0, None)\n\t\t\tself.running = False", "title": "" }, { "docid": "582fc2dd64711302dfb8b1e6f6dcd43d", "score": "0.5318237", "text": "def _execute(self):\n while True:\n buffer_entry = yield self.execute_buffer.get()\n if isinstance(buffer_entry, RegMoveReq):\n yield self.reg_move_engine\\\n .reg_req_queue.put(buffer_entry)\n elif isinstance(buffer_entry, InstrEntry):\n instr_entry = buffer_entry\n # dispatch instruction to operand collector units\n opcode = instr_entry.instr.opcode.split(\".\")[0]\n if opcode in self.config[\"alu_instr\"]:\n yield self.opc_nb_alu.instr_entry_queue.put(instr_entry)\n elif opcode in self.config[\"lsu_instr\"]:\n yield self.opc_lsu_extension.instr_entry_queue\\\n .put(instr_entry)\n else:\n raise NotImplementedError(\n \"Unsupported opcode: {}\".format(opcode)\n )\n else:\n raise NotImplementedError(\n \"Unsupported class: {}\".format(type(buffer_entry))\n )", "title": "" }, { "docid": "f96909f6fb7169abaa1e947f8700618d", "score": "0.53044575", "text": "def process(self):\n if len(self.execution_queue) > 0:\n now = time.time()\n next_command = self.execution_queue[0]\n if now >= (self.last_executed + next_command.delay):\n next_command.execute()\n self.execution_queue.pop(0)\n self.last_executed = now", "title": "" }, { "docid": "1ccae5c93865b4a8e4e65996d9e40221", "score": "0.5299173", "text": "def execute(\n self,\n cmd:str\n ):\n try:\n with self.get_connection() as conn:\n with self.conn.cursor() as cur:\n cur.execute(cmd)\n cur.close()\n conn.commit()\n logger.info(\"executed and commited: `{}`\".format(cmd[:30]))\n except (Exception, psycopg2.DatabaseError) as error:\n logger.error(error)\n self.get_connection().commit()", "title": "" }, { "docid": "34c6d2611e68825b339125a59f976d3c", "score": "0.52813107", "text": "def _stage_apply(self):\n self._logger.info('bulk operation start')\n self._logger.info('detected resource type: %s',\n HANDLERS[self._params['resource_type']]['type'])\n for entry in self._params['content']:\n self._handler.render_item(entry)\n\n if self._params.get('commit'):\n self._logger.info('committing database changes')\n MANAGER.session.commit()\n else:\n self._logger.info('rolling back database changes (dry-run)')\n MANAGER.session.rollback()", "title": "" }, { "docid": "b1fbb22eacd5260b29ed6ee941156b6c", "score": "0.5276733", "text": "async def execute_many(query_list, args_list):\n pool = await Db.get_db_pool()\n async with pool.acquire() as conn:\n i = 0\n try:\n async with conn.cursor() as cursor:\n for query, args in zip(query_list, args_list):\n await cursor.execute(query, args)\n i += 1\n affected_rows = cursor.rowcount\n await conn.commit()\n except Exception as e:\n await conn.rollback()\n error_log(\"db execute many errror [%s]: %s\", query_list[i] % list(args_list[i]), e)\n raise\n return affected_rows", "title": "" }, { "docid": "982439f834612bed498cb1552c13a73c", "score": "0.527403", "text": "def handle_commands(self):\n timer = CountdownTimer(1.0)\n while not timer.expired():\n topic, msg_obj = self._sub.receive_message(blocking=True, timeout_ms=0.05)\n if not topic:\n continue\n self._logger.debug('Received a message for topic {}', topic)\n if topic.lower() == self._cmd_topic:\n try:\n self.handle_command(msg_obj)\n except Exception as e:\n self._logger.error('Exception while handling command: {}', e)\n self._logger.error('msg_obj: {}', msg_obj)", "title": "" }, { "docid": "a086270d1db51d2d5311f1c0f43763d7", "score": "0.5273162", "text": "def execute_query(self):", "title": "" }, { "docid": "7178947c00dc0280fde89793b0205248", "score": "0.52627003", "text": "def run(self):\n\n try:\n num_of_locks = 0\n # first we acquire read locks check if inventory is enough, if so we acquire write lock\n while num_of_locks < len(self.order):\n for s_id, p_id, quantity in self.order:\n # check if theres any write lock\n if self.try_acquire_lock(s_id, p_id, 'read'):\n # set num_of_locks\n num_of_locks = num_of_locks + 1\n # read inventory\n inventory = self.read_inventory(s_id, p_id)\n # if out of order\n if inventory < quantity:\n # notify and go back\n self.my_error = f\"out of order ! siteID:{s_id}, pID:{p_id}, wanted amount:{quantity},\" \\\n f\" inventory:{inventory}\"\n return\n else:\n # wait until we update the lock\n while not (self.try_update_lock(s_id, p_id)):\n pass\n # execute updateInventory\n self.update_inventory1(s_id, int(p_id), int(inventory - quantity))\n self.insert_order(s_id, int(p_id), int(quantity))\n # commit all the changes\n self.commit()\n self.release_all_lock()\n except Exception as e:\n # if any Exception occur we want to print why.\n self.my_error = str(e)\n finally:\n # always release lock and rollback all the un-save changes\n if self.my_error is not None or self.timeout:\n self.rollback()\n self.undo()\n self.rollback()\n self.release_all_lock()", "title": "" }, { "docid": "4b8faa749f1da2229361d4456b4dd488", "score": "0.52614516", "text": "def handle_commands(self):\n while self.running:\n print(\"Command handler thread waiting for messages\")\n msg = self.c.poll()\n\n if not msg.error():\n print(\"Command handler thread unblocked, checking for message\")\n command = json.loads(msg.value().decode('utf-8'))\n\n if 'target_host' in command:\n # check to see if this message is addressed to us, or to\n # everyone in the fleet\n if command['target_host'] == host_id or command['target_host'] == \"ALL\":\n # it is, verify this command is in the valid commands map and run\n # it if so\n if command['command'] in valid_commands:\n run_command = command['command']\n output = run(valid_commands[run_command], check=True, stdout=PIPE)\n\n # add some logic to make sure this worked\n # send the output back\n results = {}\n results['stdout'] = str(output.stdout)\n results['hostname'] = host_id\n\n p.produce(response_topic, json.dumps(results))\n print(\"Sent results successfully\")\n else:\n # disregard, received a message not for us\n print(\"Ignoring message, not targeted to us\")\n pass\n elif msg.error().code() != KafkaError._PARTITION_EOF:\n print(\"Kafka error: {}\".format(msg.error()))\n\n return self.c.close()", "title": "" }, { "docid": "3a943c9d2d5ce07d15433d1a780dd9c9", "score": "0.5260305", "text": "def execute_commands(self, id, globals = {}, locals = {}):\n self._read_commands(id)\n exec(self.commands,globals,locals)", "title": "" }, { "docid": "910a1bae6c6b78057ef937d024e9df11", "score": "0.5260225", "text": "async def process_commands(self, message: Message, /) -> None:\n if message.author.bot:\n return\n\n ctx = await self.get_context(message)\n # the type of the invocation context's bot attribute will be correct\n await self.invoke(ctx) # type: ignore", "title": "" }, { "docid": "014da81f83e4fbcf74991107cbc7619f", "score": "0.52596986", "text": "def BlockingCommands(self) -> int:", "title": "" }, { "docid": "f5fee7b297bdeb2afb5b6d04d10160c4", "score": "0.52566296", "text": "def execute(cmd):\n run(cmd)", "title": "" }, { "docid": "dd4f0b50982b113d0f7f774ecbdc9eb3", "score": "0.52563536", "text": "def batch(self):\n pass", "title": "" }, { "docid": "b9cd42b06e520200249b2e95d7f48b78", "score": "0.52523476", "text": "def handle(self):\n avail = range(self.server.maxProcs)\n \n # Ignore any further data from now on.\n self.set_terminator(None)\n \n # Extract the command from the data we have read so far.\n cmd = self.raw_requestlines[0].strip()\n \n # Extract the name of the master input file.\n masterInputFile = extractInputFileName(cmd)\n \n # Prepare input files given the name of the master input file and the\n # maximun mumber of processes to start.\n procFileInfo = prepareDataFiles(masterInputFile, \n ['proc%04d' %(i) for i in avail])\n \n \n # Start as many worker processes as self.server.maxProcs\n workers = [Worker(convertCMD(cmd,\n masterInputFile,\n procFileInfo['proc%04d' %(i)][0],\n procFileInfo['proc%04d' %(i)][1]),\n procFileInfo['proc%04d' %(i)][1]) \\\n for i in avail]\n statusCodes = []\n outputData = ''\n \n # Activate the workers.\n n = self.server.maxProcs\n procs = [w.run() for w in workers]\n while(workers):\n i = 0\n while(i < n):\n try:\n procs[i].next()\n time.sleep(POLLING_TIME)\n i += 1\n except StopIteration:\n # cmd is done executing. Save the output and the status \n # code. Remove the corresponding worker from the pool.\n statusCodes.append(workers[i].status)\n outputData += workers[i].output\n del(workers[i])\n del(procs[i])\n n -= 1\n # <-- end while\n # <-- end while\n \n # Return the exit codes and STDOUT to the client. The format is\n # [(exit code, STDOUT), ...]\n data = (statusCodes, outputData)\n self.push(pickle.dumps(data, protocol=-1))\n return", "title": "" }, { "docid": "d014a51bb0091da5e4b3e0b7fc464053", "score": "0.52512276", "text": "def execute(self, args):\n # all methods should have a method that executes its tasks\n # based on the given commands\n raise NotImplementedError(\"every method needs an 'execute' method\")\n self.log(args) # all methods should record logs\n return self.status # should be set by self.check()", "title": "" }, { "docid": "6f1b969de17b42cf5e7254658abdb7d2", "score": "0.524105", "text": "def main():\n qs = OqJob.objects.filter(status='running') \\\n .values_list('id', 'job_pid', 'supervisor_pid')\n for job_id, job_pid, supervisor_pid in qs:\n if not supervising.is_pid_running(supervisor_pid):\n proc = multiprocessing.Process(target=supervise,\n args=(job_id, job_pid))\n proc.start()", "title": "" }, { "docid": "c8d2856be4bd6f26640dcad499c19f45", "score": "0.5239826", "text": "def __command_handler_daemon(self):\n while True:\n try:\n if not self.command_queue.empty():\n command_item = self.command_queue.get()\n method = command_item[0]\n arguments = command_item[1]\n keywordarguments = command_item[2]\n reply = method(*arguments, **keywordarguments)\n self.reply_queue.put(reply)\n else:\n self.keepalive()\n except ValueError as e:\n # workaround if something goes wrong with the serial connection\n # future me will certainly not hate past me for this...\n self.logger.critical(e)\n self.__connection.flush()\n # thread-safe purging of both queues\n while not self.command_queue.empty():\n self.command_queue.get()\n while not self.reply_queue.empty():\n self.reply_queue.get()", "title": "" }, { "docid": "12ff0d46571a56d9df73c9277e201769", "score": "0.5232365", "text": "def usingHandler(self, cmd):\n self.command_handler.handle_command(cmd)\n while msg_queue.empty() is False:\n self.writeresponse(msg_queue.get())", "title": "" }, { "docid": "258e27a44590ef52e0eca51f865a2612", "score": "0.5230809", "text": "def ShellAsync(self, cmds, show_cmd=False):\n self.Cmds = cmds\n if show_cmd:\n for cmd in cmds:\n print(cmd)\n\n self.InsertMoreProcs()", "title": "" }, { "docid": "c615c421cff6036218dbfcebfb5fcba1", "score": "0.52228075", "text": "def perform(app, commands):\n for name, args in commands:\n action = ACTIONS.get(name, default_action(name))\n action(app, *args)", "title": "" }, { "docid": "e2c5048390278f3b4d545e15e781a335", "score": "0.5218612", "text": "async def process(self, user: User, command: str):\n pass", "title": "" }, { "docid": "853429175541e1138bf4b7e2860baaf0", "score": "0.5214108", "text": "def _execute(self, cmd, data=None, errors='strict'):\n if errors not in ['strict', 'ignore']:\n raise ValueError(\"\"\"errors argument must be \\'strict\\' (raise exception on bad command)\n or \\'ignore\\' (return None on bad command). '\"\"\")\n\n self.cur = self.con.cursor()\n\n if errors == 'ignore':\n try:\n self.cur.execute(cmd, data)\n except:\n self.con.rollback()\n\n elif errors == 'strict':\n self.cur.execute(cmd, data)\n\n self.con.commit()", "title": "" }, { "docid": "05941bf88159d2f90471b2c73a8976e9", "score": "0.5207732", "text": "def execute(self, *args, **kwargs):\n pass", "title": "" } ]
3303d6927f44a64c4d0fdd3feca1faf9
decode control choice strings >>> a = Parameter([]) >>> a._onOffParser(1) 'on'
[ { "docid": "37aea494cada7c57b9ba3f650af682da", "score": "0.6830201", "text": "def _onOffParser(self, usrStr):\n ref = {\n 'on' : ['1'],\n 'off' : ['0'],\n }\n usrStr = drawer.selectionParse(usrStr, ref)\n if usrStr == None:\n selStr = drawer.selectionParseKeyLabel(ref)\n raise error.ParameterObjectSyntaxError, 'bad control value: enter %s.' % selStr\n return usrStr", "title": "" } ]
[ { "docid": "621779a6535a480d22d4f8db6d1da406", "score": "0.55207944", "text": "def _decode_boolean(data: str) -> Tuple[int, base.Asn1Item]:\r\n if data[0] == '1':\r\n return 1, univ.Boolean(value=True)\r\n else:\r\n return 1, univ.Boolean(value=False)", "title": "" }, { "docid": "72f37b33b006f590ee71292d6fec6dea", "score": "0.53643435", "text": "def parse_switch(s):\n s = s.removeprefix(\"-\")\n s, v = s[:1], s[1:]\n if not v:\n v = True\n return s, v", "title": "" }, { "docid": "d7ac1d47d90ae9af2872bb2ee92ad3e5", "score": "0.52008945", "text": "def _decode(cls, value: int) -> tuple[bool, int]:\n control = bool(value & cls.APCI_CONTROLMASK)\n step_code = value & cls.APCI_STEPCODEMASK\n return control, step_code", "title": "" }, { "docid": "54e2c9563bf719cc0f900c28bf57367c", "score": "0.5199727", "text": "def decode(self, rawdata):\n option_number = 0\n\n while len(rawdata) > 0:\n if ord(rawdata[0]) == 0xFF:\n return rawdata[1:]\n dllen = ord(rawdata[0])\n delta = (dllen & 0xF0) >> 4\n length = (dllen & 0x0F)\n rawdata = rawdata[1:]\n (delta, rawdata) = readExtendedFieldValue(delta, rawdata)\n (length, rawdata) = readExtendedFieldValue(length, rawdata)\n option_number += delta\n option = option_formats.get(option_number, StringOption)(option_number)\n option.decode(rawdata[:length])\n self.addOption(option)\n rawdata = rawdata[length:]\n return ''", "title": "" }, { "docid": "d5cb1b7706bce18418d7ff9651ab0aed", "score": "0.51863825", "text": "def booltooffon(value):\n if value == \"1\" or value == \"true\" or value == \"on\":\n return \"on\"\n else:\n return \"off\"", "title": "" }, { "docid": "106348e16daed9ed09784099dfff31cc", "score": "0.5074576", "text": "def decode_option(self, data):\n opts = self.validate_data(data)\n if opts:\n opt = opts[0]\n value = self.get_format_match(opt).groups()[0]\n try:\n return self.get_value_encoder().decode(value)\n except ValueError as e:\n raise SettingRuntimeException('Invalid value to decode for setting {}. '\n 'Error: {}. Arg: {}'.format(q(self.name), str(e), opt))\n return self.default", "title": "" }, { "docid": "82c8ed169df7e77aa4f438e0c334c675", "score": "0.5039302", "text": "def olympus_decode_tag(self, value, mn_tags):\r\n pass", "title": "" }, { "docid": "fcaa4dddacf489354996546efb2fce57", "score": "0.50247914", "text": "def decode_flags(flag):\n if flag == \"\":\n flag = \"0x00\"\n str_rep = \"{:08b}\".format(eval(flag))\n return np.array([i for i in str_rep], dtype='int32')", "title": "" }, { "docid": "560d65cce5db31c818ae739db9330759", "score": "0.50155497", "text": "def _bool_to_opt(boolean):\n if boolean:\n return 'on'\n else:\n return 'off'", "title": "" }, { "docid": "ed779148031b02d5025e85a23c4303dc", "score": "0.49676272", "text": "def decode(self, datastring):\n\t\tself.ver, = unpack( 'B', datastring[0:1] )\n\t\tself.ack, = unpack( 'B', datastring[1:2] )\n\t\tself.typ, = unpack( 'B', datastring[2:3] )\n\t\tself.data = datastring[3:]\n\n\t\tif self.typ == 0x01:\tself.sens.process(self.data)\n\t\telif self.typ == 0x02:\tself.stat.decode(self.data)\n\t\telif self.typ == 0x03:\tself.comd.decode(self.data)\n\t\telif self.typ == 0x04:\tself.para.decode(self.data)", "title": "" }, { "docid": "9b5ba11559a7e8f8d54f353b09cb04ca", "score": "0.496606", "text": "def decode_operator(data: str) -> Operator:\n return Operator[data]", "title": "" }, { "docid": "cf849d145efc6e0e1a66dfab17f04391", "score": "0.49591225", "text": "def decode_port(data: List[str]) -> Port:\n return Port(Identifier(data[0]), decode_operator(data[1]))", "title": "" }, { "docid": "00cd9f26e35f633617987904fe9d3f38", "score": "0.49499172", "text": "def valueOf(string: str) -> 'SimpleValueControlStatus':\n ...", "title": "" }, { "docid": "12449366a7b8fedf4436ef3edf3c0517", "score": "0.49232545", "text": "def native_value(value):\n if value.lower() in ['on', 'true', 'yes', '1']:\n value = True\n elif value.lower() in ['off', 'false', 'no', '0']:\n value = False\n try:\n return ast.literal_eval(value)\n except ValueError:\n return value", "title": "" }, { "docid": "eae51607cb981d605abe7e7cf199d348", "score": "0.48615846", "text": "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 15\n (_x.deliverySwitch, _x.hopperSwitch, _x.doserSwitch, _x.doserSpeed, _x.wettingSwitch, _x.mixerSwitch, _x.mixerSpeed, _x.pumpSwitch, _x.pumpRotarySpeed, _x.pumpUpDownSpeed, _x.eStop, _x.tower2Cart, _x.cart2Tower, _x.RoboClaw5PWM, _x.RoboClaw6PWM,) = _get_struct_3Bb2BbB2b3B2b().unpack(str[start:end])\n self.deliverySwitch = bool(self.deliverySwitch)\n self.hopperSwitch = bool(self.hopperSwitch)\n self.doserSwitch = bool(self.doserSwitch)\n self.wettingSwitch = bool(self.wettingSwitch)\n self.mixerSwitch = bool(self.mixerSwitch)\n self.pumpSwitch = bool(self.pumpSwitch)\n self.eStop = bool(self.eStop)\n self.tower2Cart = bool(self.tower2Cart)\n self.cart2Tower = bool(self.cart2Tower)\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "title": "" }, { "docid": "1b44438c4e7ca6943a2003e558196fb0", "score": "0.48042917", "text": "def _scaleSwitchParser(self, usrStr):\n ref = {\n 'absolute' : ['a', '1'],\n 'proportional' : ['p', '0'],\n }\n usrStr = drawer.selectionParse(usrStr, ref)\n if usrStr == None:\n selStr = drawer.selectionParseKeyLabel(ref)\n raise error.ParameterObjectSyntaxError, \"bad step control. enter %s.\" % selStr\n return usrStr # may be None", "title": "" }, { "docid": "aba0fdb48ba0cbbeaa6dbff36e7469f5", "score": "0.4802388", "text": "def __init_states(self):\n self.__preset_inp.wait_ready()\n self.__preset, _, _ = self.__preset_inp.value.partition(\" \")\n self.__inpos = Pv.Pv('%s:IN_SET'%self.__preset)\n self.__outpos = Pv.Pv('%s:OUT_SET'%self.__preset)\n self.__indelta = Pv.Pv('%s:IN_DELTA'%self.__preset)\n self.__outdelta = Pv.Pv('%s:OUT_DELTA'%self.__preset)", "title": "" }, { "docid": "6920e23a6852d70a5c9ae24111e0d2ed", "score": "0.47875527", "text": "def parse_instr(instr):\n op = None\n idx = 0\n if instr[:7] == 'turn on':\n op = on\n idx = 7\n elif instr[:8] == 'turn off':\n op = off\n idx = 8\n else:\n op = toggle\n idx = 6\n\n rng_parts = instr[idx:].split(\" through \")\n rng = tuple([tuple([int(i) for i in rng_parts[0].split(',')]),tuple([int(i) for i in rng_parts[1].split(',')])])\n return op,rng", "title": "" }, { "docid": "e21d1e48716aa71c456a128607fe33c4", "score": "0.4774831", "text": "def decode_mode(mode):\n ret = \"\"\n if mode & BITMASK_MANUAL:\n ret = \"manual\"\n else:\n ret = \"auto\"\n\n if mode & BITMASK_AWAY:\n ret = ret + \" holiday\"\n if mode & BITMASK_BOOST:\n ret = ret + \" boost\"\n if mode & BITMASK_DST:\n ret = ret + \" dst\"\n if mode & BITMASK_WINDOW:\n ret = ret + \" window\"\n if mode & BITMASK_LOCKED:\n ret = ret + \" locked\"\n if mode & BITMASK_BATTERY:\n ret = ret + \" low battery\"\n\n return ret", "title": "" }, { "docid": "cd238418dfe10e37a67922cc1034314b", "score": "0.47610274", "text": "def test_decode(self, token, value, quote):\n # TODO: Need to test default value for 'default'.\n field = StringField(__name__, 0, quote=quote, default=\"XXX\")\n assert field.decode(token) == value", "title": "" }, { "docid": "9ac4bb1638ea850b1ec00b3c14dfefd3", "score": "0.47531903", "text": "def decode(self, values, tool, app):\n values = safe_loads(values) or {}\n self.page = values.pop(\"__page__\") if \"__page__\" in values else None\n self.rerun_remap_job_id = values.pop(\"__rerun_remap_job_id__\") if \"__rerun_remap_job_id__\" in values else None\n self.inputs = params_from_strings(tool.inputs, values, app, ignore_errors=True)", "title": "" }, { "docid": "1693ec20b29b93caf375b50e355575b1", "score": "0.47490948", "text": "def decode_commands_up(self, buf):\n i = 0\n while i < len(buf) :\n cmd = buf[i]\n i += 1\n if cmd == 0x01 :\n self._output.write('\\nResetInd LoRaWANversion:{}'.format(buf[i] & 0xf))\n i += 1\n elif cmd == 0x02 :\n self._output.write('\\nLinkCheckReq')\n elif cmd == 0x03 :\n self._output.write('\\nLinkADRAns Status:{:02x}'.format(buf[i]))\n i += 1\n elif cmd == 0x04 :\n self._output.write('\\nDutyCycleAns')\n elif cmd == 0x05 :\n self._output.write('\\nRXParamSetupAns Status:{:02x}'.format(buf[i]))\n i += 1\n elif cmd == 0x06 :\n self._output.write('\\nDevStatusAns Battery:{} Margin:{}'.format(buf[i], buf[i+1]))\n i += 2\n elif cmd == 0x07 :\n self._output.write('\\nNewChannelAns Status:{}'.format(buf[i]))\n i += 1\n elif cmd == 0x08 :\n self._output.write('\\nRxtimingSetupAns')\n elif cmd == 0x09 :\n self._output.write('\\nTxParamSetupAns')\n elif cmd == 0x0a :\n self._output.write('\\nDlChannelAns Status:{}'.format(buf[i]))\n i += 1\n elif cmd == 0x0b :\n version = buf[i] & 0xf\n self._output.write('\\nRekeyInd Dev LoRaWANversion:{}'.format(version))\n i += 1\n elif cmd == 0x0c :\n self._output.write('\\nADRParamSetupAns')\n elif cmd == 0x0d :\n self._output.write('\\nDeviceTimeReq')\n elif cmd == 0x0e :\n self._output.write('\\n0xe ???')\n elif cmd == 0x0f :\n self._output.write('\\nRejoinParamSetupAns')\n i += 1\n elif cmd == 0x10 :\n self._output.write('\\nPingSlotInfoReq Periodicity:{}'.format(buf[i]&0x7))\n i += 1\n elif cmd == 0x11 :\n self._output.write('\\nPingSlotChannelAns Status:{}'.format(buf[i]))\n i += 1\n elif cmd == 0x12 :\n self._output.write('\\nBeaconTimingReq deprecated')\n elif cmd == 0x13 :\n self._output.write('\\nBeaconFreqAns Status:{}'.format(buf[i]))\n i += 1", "title": "" }, { "docid": "b485fd75931f671869790bed77634bb3", "score": "0.47360328", "text": "def test_parser(self):\n self.assertThat(\n Delimited(b'1,burp,2', parser=Integer),\n Equals([1, None, 2]))", "title": "" }, { "docid": "d855073139ec53312eff7560be2f88f9", "score": "0.4687436", "text": "def get_parser(parser):\n # Special case bool so that we can explicitly give bool values otherwise\n # all values would be True since they're non-empty strings.\n if parser is bool:\n return parse_bool\n return parser", "title": "" }, { "docid": "90dbe23280b63707d5f0e80b47fae3ed", "score": "0.46802413", "text": "def _articulationParser(self, usrStr):\n ref = {\n 'attack' : ['a'],\n 'sustain' : ['s'],\n }\n usrStr = drawer.selectionParse(usrStr, ref)\n if usrStr == None:\n selStr = drawer.selectionParseKeyLabel(ref)\n raise error.ParameterObjectSyntaxError, 'bad control value: enter %s.' % selStr\n return usrStr", "title": "" }, { "docid": "9efa1df99e03ccd8e1a26ce9fe4edf4e", "score": "0.46743447", "text": "def convert_one_to_state_descriptions(char_class, num):\n descriptions = []\n if num == 1:\n descriptions.append([(get_is_in_char_set(char_class), 'next')])\n elif num == '?':\n descriptions.append([(get_is_in_char_set(char_class), 'next'), (get_return_True(), 'next')])\n elif num == float('inf'):\n descriptions.append([(get_is_in_char_set(char_class), 'here'), (get_return_True(), 'next')])\n else:\n raise Exception('Logic Error!')\n return descriptions", "title": "" }, { "docid": "7e52fb7de4856c30f8cc7d0e6c02e409", "score": "0.46637687", "text": "def test_deconstruct_manual_choices_without_enum(self):\n choices_list = [('VAL1', 'Value 1'), ('VAL2', 'Value 2')]\n field = EnumField(choices=choices_list)\n name, path, args, kwargs = field.deconstruct()\n expected_kwargs = {'choices': choices_list}\n self.assertEqual(kwargs, expected_kwargs)", "title": "" }, { "docid": "59a32a25353defae33bcee3680551a12", "score": "0.46635965", "text": "def __str2bool(v):\n if isinstance(v, bool):\n return v\n\n if v.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n if v.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n\n raise argparse.ArgumentTypeError(\"Boolean value expected.\")", "title": "" }, { "docid": "ed44a1c9d09dcb2dcbc82feb1bb4150b", "score": "0.46526983", "text": "def parse_modes(self, read_str, instruction):\n names = instruction.__code__.co_varnames\n modes = reversed(read_str.zfill(len(names)))\n return [mode + 'o'[name != 'out':] for mode, name in zip(modes, names)]", "title": "" }, { "docid": "9c9ac22423ca2e482198a57ba1c87b9b", "score": "0.46468607", "text": "def str2bool(v):\n if v.lower() in (\"yes\", \"true\", \"True\", \"t\", \"y\", \"1\"):\n return True\n elif v.lower() in (\"no\", \"false\", \"False\", \"f\", \"n\", \"0\"):\n return False\n else:\n raise argparse.ArgumentTypeError(\"Boolean value expected.\")", "title": "" }, { "docid": "e98e7d0605cc2b2c36f61d35d14d5b71", "score": "0.46450526", "text": "def optionxform(self, optionstr):\r\n return optionstr", "title": "" }, { "docid": "9c3ed3fbbed8e13e66f3baee95a3dabc", "score": "0.46421444", "text": "def unmarshal(self, data):\n offset = 0\n processing_bitset = False\n for argument in self.__slots__:\n data_type = self.type(argument)\n\n if offset == 7 and processing_bitset:\n data = data[1:]\n offset = 0\n\n if processing_bitset and data_type != 'bit':\n offset = 0\n processing_bitset = False\n data = data[1:]\n\n consumed, value = decode.by_type(data, data_type, offset)\n\n if data_type == 'bit':\n offset += 1\n processing_bitset = True\n consumed = 0\n\n setattr(self, argument, value)\n if consumed:\n data = data[consumed:]", "title": "" }, { "docid": "1e5fdea79961e5f3bf7cfcc04e0c2758", "score": "0.46219406", "text": "def _decode_control(self, p_topic, p_message):\n l_logmsg = '\\tControl: '\n l_control = extract_tools.get_mqtt_field(p_message, 'Control')\n if l_control == 'On':\n l_logmsg += ' Turn On '\n self.m_api.Start()\n elif l_control == 'Off':\n l_logmsg += ' Turn Off '\n self.m_api.Stop()\n\n elif l_control == 'VolUp1':\n l_logmsg += ' Volume Up 1 '\n else:\n l_logmsg += ' Unknown samsung Control Message {} {}'.format(p_topic, p_message)\n return l_logmsg", "title": "" }, { "docid": "4db9db7911ea992ab66b37983040b3cc", "score": "0.4621466", "text": "def do_unicode(self, parsed):", "title": "" }, { "docid": "6dd276591c52f17e658e4b1b540cb5e9", "score": "0.46118903", "text": "def _handle_special_yaml_cases(v):\r\n if \";\" in v:\r\n v = v.split(\";\")\r\n else:\r\n try:\r\n v = int(v)\r\n except ValueError:\r\n if v.lower() == \"true\":\r\n v = True\r\n elif v.lower() == \"false\":\r\n v = False\r\n return v", "title": "" }, { "docid": "0e840815319ac047a9d60616e292757f", "score": "0.45999005", "text": "def parse_mode(self, op):\n cm, bm, am, d, e = str(op).rjust(5, '0')\n self.cm, self.bm, self.am = int(cm), int(bm), int(am)\n self.de = int(d + e)", "title": "" }, { "docid": "5102a54d95f5186ff600935ef609c255", "score": "0.45916373", "text": "def parse_token(getflag_output):\n\n return getflag_output.decode().split(\":\")[-1].strip()", "title": "" }, { "docid": "f65b4f43d89a1240860ee4bbe6b8434d", "score": "0.45842412", "text": "def parsenum(s):\n\n if not isinstance(s, str):\n return s\n\n s = NUMS_RE.sub(\"\", s or \"\")\n if not s or s == \"--\":\n return 0\n\n if \".\" in s:\n return float(s)\n return int(s)", "title": "" }, { "docid": "d3e23fc158404667183f8b65b5417fdc", "score": "0.4583341", "text": "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 8\n (_x.cameraNum, _x.cameraStatus, _x.PIPNum, _x.PIPStatus, _x.toggleHUD, _x.expandMap, _x.mapStatus, _x.toggleHelp,) = _struct_8b.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "title": "" }, { "docid": "b919e354ede4390e89af63e9bda6b9d4", "score": "0.4580749", "text": "def _toFieldValue(self, input):\n return input == 'on'", "title": "" }, { "docid": "174b82c87afb35818f30aa3a3426f6cc", "score": "0.45769927", "text": "def _decoders(self) -> Any:\n return [\n ((6, 2), \"protocol_version\", self._wrap_unpack, [UNPACK_UINT2]),\n ((10, 16), \"reserved_aet\", self._wrap_bytes, []), # Called AET\n ((26, 16), \"reserved_aec\", self._wrap_bytes, []), # Calling AET\n ((74, None), \"variable_items\", self._wrap_generate_items, []),\n ]", "title": "" }, { "docid": "e89397e81552f41dba507e32b1f0d10d", "score": "0.45759213", "text": "def _parse_status_flag(status_flag):\n cooling = bit_is_set(status_flag, 0)\n heating = bit_is_set(status_flag, 1)\n energy = bit_is_set(status_flag, 2)\n celcius = bit_is_set(status_flag, 3)\n hold = bit_is_set(status_flag, 4)\n return (cooling, heating, energy, celcius, hold)", "title": "" }, { "docid": "8ba35a62e39f73a761c6758f124a5408", "score": "0.45756567", "text": "def decode_commands_down(self, buf):\n i = 0\n while i < len(buf) :\n cmd = buf[i]\n i += 1\n if cmd == 0x01 :\n self._output.write('\\nResetConf LoRaWANversion:{}'.format(buf[i] & 0xf))\n i += 1\n elif cmd == 0x02 :\n self._output.write('\\nLinkCheckAns Margin:{} GwCnt:{}'.format(buf[i], buf[i+1]))\n i += 2\n elif cmd == 0x03 :\n DataRate = buf[i] >> 4\n TxPower = buf[i] & 0xf\n ChMask = buf[i+1:i+3]\n ChMaskCntl = (buf[i+3] >> 4) & 0x7\n NbTrans = buf[i+3] & 0xf\n self._output.write('\\nLinkADRReq DataRate:{} TxPower:{} ChMask:{} ChMaskCntl:{} NbTrans:{}'.format(DataRate, TxPower, ChMask, ChMaskCntl, NbTrans))\n i += 4\n elif cmd == 0x04 :\n self._output.write('\\nDutyCycleReq MaxDCycle:{}'.format(buf[i]&0xf))\n i += 1\n elif cmd == 0x05 :\n RX1DRoffset = (buf[i]>>4) & 0x7\n RX2DataRate = buf[i] & 0xf\n freq = getHundredHz(buf[i+1:i+4])\n self._output.write('\\nRXParamSetupReq RX1DRoffset:{} RX2DataRate:{} Freq:{}'.format(RX1DRoffset, RX2DataRate, freq))\n i += 4\n elif cmd == 0x06 :\n self._output.write('\\nDevStatusReq')\n elif cmd == 0x07 :\n freq = getHundredHz(buf[i+1:i+4])\n self._output.write('\\nNewChannelReq ChIndex:{} Freq:{} DrRange:{:02x}'.format(buf[i], freq, buf[i+4]))\n i += 5\n elif cmd == 0x08 :\n self._output.write('\\nRxtimingSetupReq Delay:{}'.format(buf[i]&0xf))\n i += 1\n elif cmd == 0x09 :\n DownlinkDwellTime = (buf[i] >> 5) & 0x1\n UplinkDwellTime = (buf[i] >> 4) & 0x1\n MaxEIRP = buf[i] & 0xf\n self._output.write('\\nTxParamSetupReq DownlinkDwellTime:{} UplinkDwellTime:{} MaxEIRP:{}'.format(DownlinkDwellTime, UplinkDwellTime, MaxEIRP))\n i += 1\n elif cmd == 0x0a :\n freq = getHundredHz(buf[i+1:i+4])\n self._output.write('\\nDlChannelReq ChIndex:{} Freq:{} DrRange:{:02x}'.format(buf[i], freq, buf[i+4]))\n i += 5\n elif cmd == 0x0b :\n version = buf[i] & 0xf\n self._output.write('\\nRekeyConf Serv LoRaWANversion:{}'.format(version))\n i += 1\n elif cmd == 0x0c :\n self._output.write('\\nADRParamSetupReq')\n i += 1\n elif cmd == 0x0d :\n self._output.write('\\nDeviceTimeAns')\n i += 5\n elif cmd == 0x0e :\n self._output.write('\\nForceRejoinReq')\n i += 2\n elif cmd == 0x0f :\n self._output.write('\\nRejoinParamSetupReq')\n i += 1\n elif cmd == 0x10 :\n self._output.write('\\nPingSlotInfoAns')\n elif cmd == 0x11 :\n freq = getHundredHz(buf[i:i+3])\n DataRate = buf[3] & 0xf\n self._output.write('\\nPingSlotChannelReq Frequency:{} DataRate:{}'.format(freq, DataRate))\n i += 4\n elif cmd == 0x12 :\n self._output.write('\\nBeaconTimingAns deprecated')\n elif cmd == 0x13 :\n freq = getHundredHz(buf[i:i+3])\n self._output.write('\\nBeaconFreqReq Frequency:{}'.format(freq))\n i += 3", "title": "" }, { "docid": "4cc81384de416df8b64c2628db6f3502", "score": "0.45734334", "text": "def _decode_insteon_message_flag(p_byte):\n\n def decode_message_type_flag(p_type):\n MESSAGE_TYPE_X = ['SD', 'SDA', 'SC', 'SCA', 'SB', 'SDN', 'SA', 'SCN']\n return MESSAGE_TYPE_X[p_type] + ', '\n\n def decode_extended_flag(p_extended):\n MESSAGE_LENGTH_X = ['Std-Len', 'Ext-Len']\n return MESSAGE_LENGTH_X[p_extended] + ', '\n\n l_type = (p_byte & 0xE0) >> 5\n l_extended = (p_byte & 0x10) >> 4\n l_hops_left = (p_byte & 0x0C) >> 2\n l_hops_max = (p_byte & 0x03)\n l_ret = decode_message_type_flag(l_type)\n l_ret += decode_extended_flag(l_extended)\n l_ret += \"Hops:{:d}/{:d}({:#x})\".format(l_hops_left, l_hops_max, p_byte)\n return l_ret", "title": "" }, { "docid": "f88566a0eb145d3179dab6f1017b6dd9", "score": "0.45732498", "text": "def str2bool(v):\n\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')", "title": "" }, { "docid": "6be6c9df199ce0f75b877ee88f4d4bf5", "score": "0.45632344", "text": "def decode(self, string):\n raise NotImplementedError", "title": "" }, { "docid": "407d13d8a2b36395d4ae46f05fb382eb", "score": "0.45620036", "text": "def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')", "title": "" }, { "docid": "b32c983aa75a50ed492ab2b9f378b377", "score": "0.45605752", "text": "def _parse_bool_flag(cls, func_str, prop_name):\n prop_re = re.compile(prop_name + r\"=([0,1])\")\n match = prop_re.search(func_str)\n if match:\n value = ast.literal_eval(match.group(1))\n if not isinstance(value, int):\n raise ValueError(\"Unexpected format for {0} value. Expected e.g. {0}=1\".format(prop_name))\n else:\n raise ValueError(\"Cannot find {0}= in function_str\".format(prop_name))\n\n return value", "title": "" }, { "docid": "fe63d192de3de63cf23b7528cb6b89fb", "score": "0.4552372", "text": "def getStates(self, *args):\n states = []\n for idx, bit in enumerate([x.get() for x in self.stringVarList]):\n if bit != '-':\n states.append((self.options[idx], bit))\n return states", "title": "" }, { "docid": "b1262b754c046be70c70c25d6678ea9a", "score": "0.4540561", "text": "def _parse_boolean(value):\r\n if re.match(\"^(on|true|yes|1)$\", str(value), re.IGNORECASE):\r\n return True\r\n\r\n if re.match(\"^(off|false|no|0)$\", str(value), re.IGNORECASE):\r\n return False\r\n\r\n raise Exception(\"Unable to coerce value '{}' to boolean\".format(value))", "title": "" }, { "docid": "09dbd0434b0e60d274febc5f9180f6ef", "score": "0.45376247", "text": "def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')", "title": "" }, { "docid": "09dbd0434b0e60d274febc5f9180f6ef", "score": "0.45376247", "text": "def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')", "title": "" }, { "docid": "09dbd0434b0e60d274febc5f9180f6ef", "score": "0.45376247", "text": "def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')", "title": "" }, { "docid": "8fece459556e4fb8877e4c7e622fa52f", "score": "0.453429", "text": "def gen_bool_parse(val):\n val = val.strip()\n if not val:\n return False\n try:\n return bool(int(val))\n except:\n pass\n ch = val[0]\n if ch in {'t', 'T', 'y', 'Y'}:\n return True\n if ch in {'f', 'F', 'n', 'N'}:\n return False\n raise ValueError('\"%s\" does not look like a boolean' % (val,))", "title": "" }, { "docid": "e32dd590b0088ad4073f928ff1abff2a", "score": "0.4529224", "text": "def decode(data, true_char):\n encode = lambda x: \"1\" if x == true_char else \"0\"\n binary = \"\".join(encode(x) for x in data)\n return int(binary, 2)", "title": "" }, { "docid": "b3bc368dc2515e5743961fee3b5726db", "score": "0.4517948", "text": "def strtobool(val):\n val = val.lower()\n if val in (\"y\", \"yes\", \"t\", \"true\", \"on\", \"1\"):\n return 1\n elif val in (\"n\", \"no\", \"f\", \"false\", \"off\", \"0\"):\n return 0\n else:\n raise ValueError(\"invalid truth value %r\" % (val,))", "title": "" }, { "docid": "055c3533156e0ece84f7731ace3644af", "score": "0.4496978", "text": "def parseFlags(self):\n flagStr = self.pageData.get('LABEL', '')\n return set(flagStr.split(', '))", "title": "" }, { "docid": "9ed9a6a982e48f60be784e005406d04b", "score": "0.4495775", "text": "def _decode(instruction):\n # Very naive parser.\n gen_ins = {'add': lambda args: ins.Add(args[0], args[1], args[2]),\n 'addi': lambda args: ins.AddI(args[0], args[1], int(args[2])),\n 'sub': lambda args: ins.Sub(args[0], args[1], args[2]),\n 'subi': lambda args: ins.SubI(args[0], args[1], int(args[2])),\n 'mul': lambda args: ins.Mul(args[0], args[1], args[2]),\n 'muli': lambda args: ins.MulI(args[0], args[1], int(args[2])),\n 'ldr': lambda args: ins.Load(args[0], args[1]),\n 'str': lambda args: ins.Store(args[0], args[1]),\n 'j': lambda args: ins.Jump(int(args[0])),\n 'blth': lambda args: ins.Blth(args[0], args[1], int(args[2])),\n 'halt': lambda args: ins.Halt()}\n fields = instruction['instruction_str'].split(' ')\n try:\n front_end_ins = gen_ins[fields[0]](fields[1:])\n if fields[0] == 'blth':\n front_end_ins.branch_info = instruction['branch_info']\n return front_end_ins\n except:\n raise ValueError('unknown instruction %r' % instruction)", "title": "" }, { "docid": "cc9719594362f59c57786e7555919c50", "score": "0.44919345", "text": "def strtobool(val):\n val = val.lower()\n if val in (\"y\", \"yes\", \"t\", \"true\", \"on\", \"1\"):\n return 1\n if val in (\"n\", \"no\", \"f\", \"false\", \"off\", \"0\"):\n return 0\n raise ValueError(f\"invalid truth value {val}\")", "title": "" }, { "docid": "3016cd4b3fe01c02afa31e02f6388ed5", "score": "0.44914186", "text": "def parse(self):\n\n parts = Token.split(self.text)\n\n if len(parts) == 0:\n return\n\n if parts[0][0] == '!':\n self.command = parts[0][1:]\n\n split = 1\n for p in range(1, len(parts)):\n if parts[p][0] == '-':\n if '=' in parts[p]:\n flag = parts[p][1:].split('=')\n self.flags[flag[0]] = flag[1]\n else:\n self.flags[parts[p][1:]] = None\n split = p + 1\n else:\n break\n\n self.args = parts[split:]", "title": "" }, { "docid": "bbb8398b362476e520e4233b4d13e42c", "score": "0.44895816", "text": "def str_to_bool(string):\r\n\r\n if string is not None:\r\n if string.lower() in [\"true\", \"yes\", \"1\", \"on\"]:\r\n return True\r\n elif string.lower() in[\"false\", \"no\", \"0\", \"off\"]:\r\n return False\r\n\r\n return None", "title": "" }, { "docid": "8999c5da8486d4733a34fafc49d1f74a", "score": "0.44820485", "text": "def pynini_decode(inputBytes):\n asString = inputBytes.decode(\"utf8\")\n asTokens = (from_att_symbol(symbol) for symbol in asString.split(' '))\n return \"\".join(asTokens)", "title": "" }, { "docid": "fa8b1b5d463b550098a71bfe369faf30", "score": "0.44760013", "text": "def _parse_parameter_options(self, options):\n return self._select_options(options, self.ALL_OPTIONS, invert=True)", "title": "" }, { "docid": "a3abe676ead87bc31df7d47c9f8fda6b", "score": "0.44724858", "text": "def parse(str):", "title": "" }, { "docid": "d1b7f736c1cf4750e844f38c22ac23e5", "score": "0.44721937", "text": "def on_eAutoParsingBtn_toggled(self, checked):\n egOnlyBtns.toggleParse(self)", "title": "" }, { "docid": "b3a2956df735cd2258da221430ac2c3d", "score": "0.44695023", "text": "def bencode_decoder(bencode_data):\n if bencode_data[0] == \"i\":\n decoded_value, bencode_data = decode_int(value=bencode_data)\n elif bencode_data[0].isdigit():\n decoded_value, bencode_data = decode_string(value=bencode_data)\n elif bencode_data[0] == \"l\":\n decoded_value, bencode_data = decode_list(value=bencode_data)\n elif bencode_data[0] == \"d\":\n decoded_value, bencode_data = decode_dictionary(value=bencode_data)\n else:\n raise TypeError(\"Not a proper Bencode String\")\n return decoded_value, bencode_data", "title": "" }, { "docid": "643aa1caa0803898f447ad163cdd5199", "score": "0.44650644", "text": "def _decoders(self) -> Any:\n return [\n ((6, 2), \"protocol_version\", self._wrap_unpack, [UNPACK_UINT2]),\n ((10, 16), \"called_ae_title\", self._wrap_bytes, []),\n ((26, 16), \"calling_ae_title\", self._wrap_bytes, []),\n ((74, None), \"variable_items\", self._wrap_generate_items, []),\n ]", "title": "" }, { "docid": "462b21ddaa58227b7f2f573e3578c0bd", "score": "0.44535503", "text": "def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('true'):\n return True\n elif v.lower() in ('false'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')", "title": "" }, { "docid": "921b64490b0427f7b7fcf38ab3fb7598", "score": "0.4451403", "text": "def decode(self, s):\n if isinstance(s, basestring) and '&' in s:\n for x in self.decodings:\n s = s.replace(x[0], x[1])\n return s", "title": "" }, { "docid": "80cf5d6ceeb981a449a3dd4d1d294864", "score": "0.44450536", "text": "def COM_SET_OPTION(self):\n return 'COM_SET_OPTION',['OK_Packet','ERR_Packet']", "title": "" }, { "docid": "db70de0a31bdd6fd87816a375c75a568", "score": "0.44439882", "text": "def decode(self, code, syndrome, **kwargs):", "title": "" }, { "docid": "ea7eda09068ecb6c5aef019798f13dab", "score": "0.44387847", "text": "def convert_param_string(s):\n config = dict([p.split(\"=\") for p in s.split(\",\")])\n\n # force typecasting in this order\n types = [int, float]\n for param in config:\n v = config[param]\n for t in types:\n try:\n v = t(v)\n except:\n continue\n config[param] = v\n break\n if config[param] in ['true','True']:\n config[param] = True\n elif config[param] in ['false','False']:\n config[param] = False\n\n return config", "title": "" }, { "docid": "efc7251fa6f1ac101ca4a47a754e60cf", "score": "0.4437709", "text": "def decode(self, data):", "title": "" }, { "docid": "a929c389cb7e873f11de9a9f0bbb0493", "score": "0.4433311", "text": "def _parse(self):\n\n parseOptions = True;\n self.__parsed = self.__tokens[:];\n try:\n token = self.__parsed.pop(0);\n while (None is not token) :\n if (parseOptions and '' == token) :\n self.__parseArgument(token);\n elif (parseOptions and '--' == token) :\n parseOptions = False;\n elif parseOptions and token.startswith('--') :\n self.__parseLongOption(token);\n elif parseOptions and token.startswith('-') :\n self.__parseShortOption(token);\n else :\n self.__parseArgument(token);\n token = self.__parsed.pop(0);\n except IndexError:\n pass;", "title": "" }, { "docid": "3d17e13c129a5472d2161248db031e16", "score": "0.44326496", "text": "def decode(self, s):\n parse_float = self.parse_float\n if parse_float is float:\n parse_float = None\n parse_int = self.parse_int\n if parse_int is int:\n parse_int = None\n parse_constant = self.parse_constant\n if parse_constant == _CONSTANTS.__getitem__:\n parse_constant = None\n return _loads(s, object_hook=self.object_hook, parse_float=parse_float,\n parse_int=parse_int, parse_constant=parse_constant,\n object_pairs_hook=self.object_pairs_hook, strict=self.strict)", "title": "" }, { "docid": "a521f5079295c8ed95cd97840fa70ae2", "score": "0.44280192", "text": "def __init__(__self__, *,\n name: str,\n on: 'outputs.ExpressionResponse',\n type: str,\n cases: Optional[Sequence['outputs.SwitchCaseResponse']] = None,\n default_activities: Optional[Sequence[Any]] = None,\n depends_on: Optional[Sequence['outputs.ActivityDependencyResponse']] = None,\n description: Optional[str] = None,\n on_inactive_mark_as: Optional[str] = None,\n state: Optional[str] = None,\n user_properties: Optional[Sequence['outputs.UserPropertyResponse']] = None):\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"on\", on)\n pulumi.set(__self__, \"type\", 'Switch')\n if cases is not None:\n pulumi.set(__self__, \"cases\", cases)\n if default_activities is not None:\n pulumi.set(__self__, \"default_activities\", default_activities)\n if depends_on is not None:\n pulumi.set(__self__, \"depends_on\", depends_on)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if on_inactive_mark_as is not None:\n pulumi.set(__self__, \"on_inactive_mark_as\", on_inactive_mark_as)\n if state is not None:\n pulumi.set(__self__, \"state\", state)\n if user_properties is not None:\n pulumi.set(__self__, \"user_properties\", user_properties)", "title": "" }, { "docid": "1970e7cf87f157898985c70685a062df", "score": "0.44139403", "text": "def assess_type(self):\n self.value = None\n self.min = None\n self.max = None\n self.step = None\n self.default = None\n self.menu = None #some day in the future we will extract the control menu entries here.\n\n self.info = self.get_info()\n \"\"\"\n D0 1 = Supports GET value requests Capability\n D1 1 = Supports SET value requests Capability\n D2 1 = Disabled due to automatic mode (under device control) State\n D3 1 = Autoupdate Control Capability\n D4 1 = Asynchronous Control Capability\n D5 1 = Disabled due to incompatibility with Commit state. State\n \"\"\"\n if self.info > 0 : # Control supported\n self.value = self.get_val_from_device()\n self.min = self.get_(UVC_GET_MIN)\n self.max = self.get_(UVC_GET_MAX)\n self.step = self.get_(UVC_GET_RES)\n self.default = self.get_(UVC_GET_DEF)\n\n if ((self.max,self.min) == (None,None)) or ((self.max,self.min) == (1,0)) :\n self.type = \"bool\"\n # elif (self.max,self.min) == (None,None):\n # ###I guess this should be a menu\n # self.type = \"int\"\n # self.flags = \"active\"\n # self.min = 0\n # self.max = 20\n # self.step = 1\n else:\n self.type = \"int\"\n\n if self.info >> 3 & 1: # Disabled due to automatic mode (under device control)\n self.flags = \"inactive\"\n else:\n self.flags = \"active\"\n else:\n self.type = \"unknown type\"\n self.flags = \"control not supported\"\n self.value = None", "title": "" }, { "docid": "627c5f4d442a4c48ba342926c0bbe434", "score": "0.44135877", "text": "def __init__(self, on: Dict[str, Any], off: Dict[str, Any]):\n\n super().__init__(dict(on, **off))\n self.on = on\n self.off = off\n self.active = False", "title": "" }, { "docid": "fe4e044062b2bfc03459fba7cc0f9e7c", "score": "0.44135416", "text": "def get_bool_from_ini(ini_param):\n if not isinstance(ini_param, str):\n return None\n if ini_param.lower() in ['0', 'off', 'false']:\n return False\n elif ini_param.lower() in ['1', 'on', 'true']:\n return True\n else:\n return None", "title": "" }, { "docid": "0a0ec7f57afc10622cc84ebc2c00f1e5", "score": "0.44093332", "text": "def parse(self, inp: str):\n op_code = OpCode()\n # TODO:\n # 1: split string into a verb and rest of string\n tokens = tokenizer(inp)\n # 2: lookup verb in the _verbs dictionary, and place its numeric value\n if tokens[0].lower() in self._verbs:\n op_code = OpCode(verb=self._primitives[self._verbs[tokens[0].lower()]])\n op_code._inp_params = []\n for token in tokens[1:]:\n if token.lower() in self._parameters:\n op_code._inp_params.append(self._primitives[self._parameters[token.lower()]])\n else:\n op_code._inp_params.append(token)\n else:\n op_code = OpCode(error=903) # Invalid Verb\n\n # 3: slice input string into tokens; keywords and literals\n # literals identified by double quotes, single quotes or square brackets surrounding them\n # 4: lookup non literal tokens and replace them with values from _objects and _decoration dictionary\n # 5: check for syntax maps\n # if all is ok, return op code object\n # otherwise, return a syntax error op code object\n return op_code", "title": "" }, { "docid": "3a5f410abbacde139186f4c4dca73ab4", "score": "0.44066578", "text": "def decode(buff):\n\t\tpp = list(map(ord, buff))\n\t\tif 0 == len(pp) == 1:\n\t\t\tpp = []\n\t\treturn pp", "title": "" }, { "docid": "7723c66894f3157abafaa556d88110f8", "score": "0.44054854", "text": "def getOptionsControl(control):\n return re.search(\"description=\\'([^']+)\\'\", str(control)).group(0)", "title": "" }, { "docid": "26339716281a58d2f5165e2518758c70", "score": "0.43989006", "text": "def decode(string):\n return parser.Parser(lexer.Lexer(string)).data", "title": "" }, { "docid": "9bac2c40f0fccb3ee805fa51dde19e5a", "score": "0.43987343", "text": "def _parse(self):\n\n for key, value in self.__parameters.items():\n if key.startswith('--') :\n self.__addLongOption(key[2:], value);\n elif key.startswith('-') :\n self.__addShortOption(key[1:], value);\n else :\n self.__addArgument(key, value);", "title": "" }, { "docid": "843d7baddaeb5092b0b485e94ef74614", "score": "0.4389272", "text": "def parse_auto_off_config(response):\n auto_off_config = None\n try:\n time_left = ba.hexlify(response)[194:202]\n all_seconds = int(time_left[6:8] + time_left[4:6] + time_left[2:4] + time_left[0:2] , 16)\n auto_off_config = convert_seconds_to_iso_time(all_seconds)\n except:\n _LOGGER.warning('failed to parse off timer data from response')\n\n return auto_off_config", "title": "" }, { "docid": "b8ea2cbff0e93e589766a015cdc7f2cd", "score": "0.43879446", "text": "def convert(self, value, param, ctx):\n try:\n return self.ops[value.lower()]\n except:\n self.fail('{} is not a valid option'.format(value, param, ctx))", "title": "" }, { "docid": "c49a6680ff2bd5e1de7109761371062b", "score": "0.43870988", "text": "def bool_flag(s):\r\n if s.lower() in ['off', 'false', '0']:\r\n return False\r\n if s.lower() in ['on', 'true', '1']:\r\n return True\r\n raise argparse.ArgumentTypeError(\"invalid value for a boolean flag (0 or 1)\")", "title": "" }, { "docid": "66700c7db1161ae9f10f2991ea73a2e0", "score": "0.43784449", "text": "def power_off(text=ALL_OFF, port=1, state=OFF):\n if power_outlet == \"epower4\":\n cmd = 'P' + str(port) + '=0\\r'\n elif power_outlet == \"epower4v2\":\n cmd = '/P0' + str(port) + '=0\\r'\n else:\n if port == '0':\n cmd=text\n else:\n port = (port - 1) * 2\n cmd = text[:port] + state + text[port + 1:]\n return str(cmd)", "title": "" }, { "docid": "06b53c2df3dd781512de71199f2785b0", "score": "0.4375632", "text": "def offontobool(value):\n if type(value) == str:\n if value.lower() == \"on\":\n return 1\n elif value.lower() == \"off\":\n return 0\n return value", "title": "" }, { "docid": "56f8b91fa654dab8b812ad1b91fef8aa", "score": "0.43719998", "text": "def decode_message_flag(p_byte):\n\n def decode_message_type_flag(p_type):\n MESSAGE_TYPE_X = ['Dir(SD)', 'Dir_ACK(SD-ACK)', 'AllCleanup(SC)', 'All_Cleanup_ACK(SC-ACK)', 'Brdcst(SB)', 'Direct_NAK(SD-NAK)', 'All_Brdcst(SA)', 'All_Cleanup_NAK(SC-NAK)']\n return MESSAGE_TYPE_X[p_type] + '-Msg, '\n\n def decode_extended_flag(p_extended):\n MESSAGE_LENGTH_X = [' Std-Len,', ' Ext-Len,']\n return MESSAGE_LENGTH_X[p_extended]\n\n l_type = (p_byte & 0xE0) >> 5\n l_extended = (p_byte & 0x10)\n l_hops_left = (p_byte & 0x0C) >= 4\n l_hops_max = (p_byte & 0x03)\n l_ret = decode_message_type_flag(l_type)\n l_ret += decode_extended_flag(l_extended)\n l_ret += \" Hops:{:d}/{:d}({:#X})\".format(l_hops_left, l_hops_max, p_byte)\n return l_ret", "title": "" }, { "docid": "d1a1fc760298cbd65a1c947e5048cfef", "score": "0.43706724", "text": "def convert_flags(flags=None):\n if flags is None or flags == \"\":\n return None\n flags = flags.strip().upper()\n return [TCP_FLAGS[x] for x in flags]", "title": "" }, { "docid": "e74124a9ee27cb1678daf47bce78cf22", "score": "0.4370477", "text": "def decode(self, encoding, program):\n pass", "title": "" }, { "docid": "ae5baaf48942b1e4726c053bfd454622", "score": "0.43642277", "text": "def deserialize(self, str):\n try:\n end = 0\n _x = self\n start = end\n end += 4\n (_x.connect_disconnect_robot, _x.shutdown_system, _x.connect_over_ethercat, _x.value,) = _struct_4B.unpack(str[start:end])\n self.connect_disconnect_robot = bool(self.connect_disconnect_robot)\n self.shutdown_system = bool(self.shutdown_system)\n self.connect_over_ethercat = bool(self.connect_over_ethercat)\n self.value = bool(self.value)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.comment = str[start:end].decode('utf-8')\n else:\n self.comment = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "title": "" }, { "docid": "269ad2385039f3284ea6b4996f2e3a61", "score": "0.43626732", "text": "def value_decode(self, val):\n return val, val", "title": "" }, { "docid": "a610dc350d7ce5986b37d7b4fbd75034", "score": "0.4361953", "text": "def parse(string):\n if string.lower() in {\"true\", \"false\"}:\n parsed = loads(string.lower())\n else:\n try:\n parsed = literal_eval(string)\n except (ValueError, SyntaxError):\n parsed = string\n\n return parsed", "title": "" }, { "docid": "d1d8dad555057b28c68cc38ae67b0a63", "score": "0.4361493", "text": "def parse_bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('true', 't'):\n return True\n elif v.lower() in ('false', 'f'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')", "title": "" }, { "docid": "a58a7ecb9987369483b5eafa52b12257", "score": "0.43563613", "text": "def on_off_to_boolean(value: str) -> bool:\n logger.debug(\"converting value %s to bool\", repr(value))\n if not isinstance(value, str):\n raise ValueError(f\"The value {value.__repr__()} is not a string.\")\n if value.lower() in (\"on\", \"true\", \"yes\", \"1\"):\n return True\n if value.lower() in (\"off\", \"false\", \"no\", \"0\"):\n return False\n raise ValueError(f\"{value} is not a proper on/off value.\")", "title": "" }, { "docid": "9d901dedcd96f1d14c8d651e9f50b55c", "score": "0.4343158", "text": "def strtobool(val):\n val = val.lower()\n if val in (\"y\", \"yes\", \"t\", \"true\", \"on\", \"1\"):\n return True\n if val in (\"n\", \"no\", \"f\", \"false\", \"off\", \"0\"):\n return False\n raise ValueError(\"invalid truth value {}\".format(val))", "title": "" }, { "docid": "4a1b8ee1f2c071aebe6e6f59314d8164", "score": "0.43402213", "text": "def canon_decode_tag(self, value, mn_tags):\r\n for i in range(1, len(value)):\r\n tag = mn_tags.get(i, ('Unknown', ))\r\n name = tag[0]\r\n if len(tag) > 1:\r\n val = tag[1].get(value[i], 'Unknown')\r\n else:\r\n val = value[i]\r\n try:\r\n logger.debug(\" %s %s %s\", i, name, hex(value[i]))\r\n except TypeError:\r\n logger.debug(\" %s %s %s\", i, name, value[i])\r\n \r\n # it's not a real IFD Tag but we fake one to make everybody\r\n # happy. this will have a \"proprietary\" type\r\n self.tags['MakerNote ' + name] = IfdTag(str(val), None, 0, None,\r\n None, None)", "title": "" }, { "docid": "31600e366fb9d9afa653ce1c0fe0a232", "score": "0.43375158", "text": "def _stepControlParser(self, usrStr):\n ref = {\n 'event' : ['e', '1'],\n 'time' : ['t', '0'],\n }\n usrStr = drawer.selectionParse(usrStr, ref)\n if usrStr == None:\n selStr = drawer.selectionParseKeyLabel(ref)\n raise error.ParameterObjectSyntaxError, \"bad step control. enter %s.\" % selStr\n return usrStr # may be None", "title": "" } ]
39ab20ecd199378318d85b3d7501b6ba
Cuts the selected text from the line edit. Copies the selected text to the clipboard then deletes the selected text from the line edit.
[ { "docid": "668e040d2ce6d90fca8f89da6da9aef5", "score": "0.7026711", "text": "def cut(self):\n self.widget.Cut()\n self._update_shell_selection_and_cursor()", "title": "" } ]
[ { "docid": "70e29e1fe2c3ae489f6f68a3d2a73b81", "score": "0.77484757", "text": "def editCut(self):\n widget = QtGui.QApplication.focusWidget()\n try:\n if widget.hasSelectedText():\n widget.cut()\n return\n except AttributeError:\n pass\n self.currentSelectionModel().selectedNodes().copyTree()\n self.nodeDelete()", "title": "" }, { "docid": "d3a703c8f01e076aaae299444f4631b9", "score": "0.73549324", "text": "def cut(self):\n if not self.__readOnly:\n byteArray = self.__toHex(self.__chunks.data(\n self.getSelectionBegin(), self.getSelectionLength()))\n idx = 32\n while idx < len(byteArray):\n byteArray.insert(idx, \"\\n\")\n idx += 33\n cb = QApplication.clipboard()\n cb.setText(byteArray.decode(encoding=\"latin1\"))\n if self.__overwriteMode:\n length = self.getSelectionLength()\n self.replaceByteArray(self.getSelectionBegin(), length,\n bytearray(length))\n else:\n self.remove(self.getSelectionBegin(),\n self.getSelectionLength())\n self.setCursorPosition(2 * self.getSelectionBegin())\n self.__resetSelection(2 * self.getSelectionBegin())", "title": "" }, { "docid": "f587a22cae252be69e644294c4b5af9d", "score": "0.72769296", "text": "def cut_copy_paste_del_sel_action(self, event):\n\n self.show_notebook_if_not_shown()\n\n text = self.FindFocus()\n if text is not None:\n if event.GetId() == wx.ID_CUT:\n text.Cut()\n elif event.GetId() == wx.ID_COPY:\n text.Copy()\n elif event.GetId() == wx.ID_PASTE:\n text.Paste()\n elif event.GetId() == wx.ID_DELETE:\n text.Clear()\n elif event.GetId() == wx.ID_SELECTALL:\n text.SelectAll()\n else:\n event.Skip()", "title": "" }, { "docid": "f4d8a0a99052e1b173b0f890541aa7d8", "score": "0.69763345", "text": "def delete(self, event):\n\t\tif self.inLastLine():\n\t\t\tif self.output.selectedText:\n\t\t\t\tself.doc.remove(self.output.selectionStart, self.output.selectionEnd - self.output.selectionStart)\n\t\t\telif self.output.caretPosition < self.doc.length:\n\t\t\t\tself.doc.remove(self.output.caretPosition, 1)", "title": "" }, { "docid": "357c071ea66340277dc592eb881d052c", "score": "0.6665124", "text": "def __cutAllChat(self):\n txt = self.chatEdit.toPlainText()\n if txt:\n cb = QApplication.clipboard()\n cb.setText(txt)\n self.chatEdit.clear()", "title": "" }, { "docid": "9f9b78b3b844eb66fee58446a86aa537", "score": "0.6519321", "text": "def cut(self):\n self.focus()\n self.dispatch('Cut')\n return self", "title": "" }, { "docid": "b88c6ab3fae96b1cbdddca137fe192aa", "score": "0.64730287", "text": "def cut_copy_paste_del_sel_event(self, event):\n\n if event.GetId() == wx.ID_CUT or wx.ID_COPY or wx.ID_PASTE or wx.ID_DELETE or wx.ID_SELECTALL:\n self.cut_copy_paste_del_sel_action(event)\n else:\n event.Skip()", "title": "" }, { "docid": "d0e4d3c1cca446c3e385de2aa40f8845", "score": "0.6434715", "text": "def backspace(self):\n widget = self.widget\n start, end = widget.GetSelection()\n if start == end:\n start = end - 1\n widget.Remove(start, end)\n self._update_shell_selection_and_cursor()", "title": "" }, { "docid": "9314e1c5c247fb506127ee2b946c21cc", "score": "0.6387869", "text": "def on_delete(self, sender, arg=None):\n self.modify_selection(lambda *a: '')", "title": "" }, { "docid": "6ea0f4814b9220630a5e304e3d844287", "score": "0.631673", "text": "def on_cut(self, sender, arg=None):\n buf = self.get_active_buffer()\n if buf is not None:\n buf.cut_clipboard(sender, arg)", "title": "" }, { "docid": "ec83282171423517cb62c263266d3c70", "score": "0.6268679", "text": "def remove_selected(self, event):\n widget = event.widget\n widget.curselection()\n self.remove_selection()", "title": "" }, { "docid": "ec83282171423517cb62c263266d3c70", "score": "0.6268679", "text": "def remove_selected(self, event):\n widget = event.widget\n widget.curselection()\n self.remove_selection()", "title": "" }, { "docid": "c8c03e77eb3df05bfd9c5a34adde46f6", "score": "0.6196883", "text": "def clip(main: tk.Tk, cliptext: str):\n tk.Tk.clipboard_clear(main)\n tk.Tk.clipboard_append(main, cliptext)", "title": "" }, { "docid": "01d0662f33854b02e09a94e0845209a2", "score": "0.61712724", "text": "def delete_selection(self):\n try:\n text = self.get(Tk.SEL_FIRST, Tk.SEL_LAST)\n a, b = self.index(Tk.SEL_FIRST), self.index(Tk.SEL_LAST)\n self.delete(Tk.SEL_FIRST, Tk.SEL_LAST)\n # If there is no text left, flag the user as not typing\n if len(self.get_text()) == 0:\n self.set_typing(False)\n return True \n except Tk.TclError:\n return False", "title": "" }, { "docid": "4cab2a834ee1a522fa1f77f770143d4a", "score": "0.616116", "text": "def delete(self):\n widget = self.widget\n start, end = widget.GetSelection()\n if start == end:\n end = end + 1\n widget.Remove(start, end)\n self._update_shell_selection_and_cursor()", "title": "" }, { "docid": "b589da17fa6a302964ab3cdaa5729c81", "score": "0.60711753", "text": "def deselect(self):\n widget = self.widget\n start, end = widget.GetSelection()\n widget.SetSelection(start, start)\n self._update_shell_selection_and_cursor()", "title": "" }, { "docid": "25920d5dee63a09021b29c4060c364e6", "score": "0.60534936", "text": "def __cut(self):\n self.currentBrowser().cut()", "title": "" }, { "docid": "2f5697faf4cedfd5b0f711a3a830a3ec", "score": "0.604984", "text": "def copy(self):\n byteArray = self.__toHex(self.__chunks.data(\n self.getSelectionBegin(), self.getSelectionLength()))\n idx = 32\n while idx < len(byteArray):\n byteArray.insert(idx, \"\\n\")\n idx += 33\n cb = QApplication.clipboard()\n cb.setText(byteArray.decode(encoding=\"latin1\"))", "title": "" }, { "docid": "241ae3f45a698f92469bdd74ab19a380", "score": "0.6042844", "text": "def editCopy(self):\n splitter = self.activeWindow.rightTabs.currentWidget()\n if splitter == self.activeWindow.outputSplitter:\n for view in splitter.children():\n try:\n if view.hasSelectedText():\n view.copy()\n return\n except AttributeError:\n pass\n widget = QtGui.QApplication.focusWidget()\n try:\n if widget.hasSelectedText():\n widget.copy()\n return\n except AttributeError:\n pass\n self.currentSelectionModel().selectedNodes().copyTree()", "title": "" }, { "docid": "e44d95f7b3986a7a996eb79d056dba53", "score": "0.60193163", "text": "def get_selected_clipboard(self, textview):\n textbuffer = textview.get_buffer()\n start,end=textbuffer.get_selection_bounds()\n text = textbuffer.get_text(start, end)\n return (text,(start, end))", "title": "" }, { "docid": "2e8f8a610fe3604e523b20eaff7c2b57", "score": "0.60190094", "text": "def copy_and_paste_text(self):\n text = self.get_text()\n pyperclip.copy(text)\n keyboard.press(\"ctrl+v\")\n keyboard.release(\"ctrl+v\")\n self.typeout_ending_keystroke()", "title": "" }, { "docid": "017a98007d039074b4c4d7869a475c9a", "score": "0.59853566", "text": "def cancelCurrent(self):\n \n self._current_command.clear()\n i, j = self._locked_position\n m = self.lines()\n n = self.lineLength(m)\n self.setSelection(i, j + 1, m, n)\n self.removeSelectedText()", "title": "" }, { "docid": "5d48b52aff862f705760cf4190d42d35", "score": "0.5969466", "text": "def changeClip(self, evt=None):\n index = self.clipper.curselection()\n text = self.clipper.get(index)\n self.editor.clipboard_clear()\n self.editor.clipboard_append(text)\n return 'break'", "title": "" }, { "docid": "b629710eccab7f28c9f6b565333493ff", "score": "0.58971035", "text": "def Selection(self, event=None):\n self.text.tag_remove(SEL, \"1.0\", END)\n return", "title": "" }, { "docid": "34072545c770587f72f738293eda72bc", "score": "0.5879467", "text": "def copy_selection_to_clipboard(view):\n if VERBOSE:\n print('[guitool] Copying selection to clipboard')\n copy_str = guitool_misc.get_view_selection_as_str(view)\n #copy_qstr = QtCore.Q__String(copy_str)\n copy_qstr = str(copy_str)\n clipboard = guitool_main.get_qtapp().clipboard()\n if VERBOSE:\n print(copy_str)\n clipboard.setText(copy_qstr)\n if VERBOSE:\n print('[guitool] finished copy')", "title": "" }, { "docid": "e2fb73b5a649d61830a123815a3fde60", "score": "0.58751786", "text": "def select_line(self, line):\n cursor = self.textCursor()\n cursor.movePosition(QTextCursor.Start, QTextCursor.MoveAnchor, 1)\n cursor.movePosition(QTextCursor.Down, QTextCursor.MoveAnchor, line-1)\n self.setTextCursor(cursor)", "title": "" }, { "docid": "a0579c1efe6b8b1597d15a182fd76633", "score": "0.5852274", "text": "def clear_text(self):\n self.part_entry.delete(0, tk.END)\n self.customer_entry.delete(0, tk.END)\n self.retailer_entry.delete(0, tk.END)\n self.price_entry.delete(0, tk.END)\n self.parts_list.select_clear(0, tk.END)", "title": "" }, { "docid": "d380cd1fecf2d979fcbddd6eefcfec0e", "score": "0.58319163", "text": "def handlerTextLineNumbersDelete(self,event):\n \n def tempDef(event):\n \n global widthLineNumbers\n \n print '\\n** In handlerTextLineNumbersDelete'\n# print ' raw index =',eval(self.textMyCode.index(END))\n \n self.textLineNumbers['state']='normal'\n \n# first check is there is a selection-delete\n try:\n print 'Checking for \"Selection Delete\"...'\n indexFirstLine, indexFirstChar = self.textMyCode.index(SEL_FIRST).split('.')\n indexLastLine, indexLastChar = self.textMyCode.index(SEL_LAST).split('.')\n print '\\n SELECTION DELETE found:'\n print ' indexFirstLine, indexFirstChar =',indexFirstLine, indexFirstChar\n print ' indexLastLine, indexLastChar =', indexLastLine, indexLastChar\n# print ' WARNING: no selection-delete has been implemented yet.'\n# print ' delete operation halted.'\n \n diffLines = eval(indexLastLine) - eval(indexFirstLine)\n print ' Number of line numbers to selectively deleted =',diffLines\n \n for line in range(diffLines):\n indexBeginDelete = str(eval(self.textLineNumbers.index(END))-1)\n indexEndDelete = self.textLineNumbers.index(END)\n self.textLineNumbers.delete(indexBeginDelete,indexEndDelete)\n print ' .... deleted line',indexBeginDelete\n \n self.textLineNumbers['state']='disabled'\n \n return\n \n# no selection, so come here for single character delete \n except TclError: \n# these values are strings\n indexCurrentLine, indexCurrentChar = self.textMyCode.index(INSERT).split('.')\n print 'Current line number, char = ',indexCurrentLine, indexCurrentChar\n indexEnd = self.textMyCode.index(END)\n \n# these values are ints \n indexCurrentCharInt = int(eval(indexCurrentChar))\n if indexCurrentCharInt > 0:\n print ' Cursor is not at beginning of line.'\n print ' No line deletion yet, just character deletion.'\n return\n \n indexCurrentLineInt = int(eval(indexCurrentLine))\n if indexCurrentLineInt == 1 and indexCurrentCharInt == 0:\n print ' Cursor is at the beginning of line 1.'\n print ' No deletions allowed at this cursor location.'\n print\n self.textLineNumbers['state']='disabled'\n return\n \n indexBeginDelete = str(eval(self.textLineNumbers.index(END))-1)\n indexEndDelete= self.textLineNumbers.index(END)\n self.textLineNumbers.delete(indexBeginDelete,indexEndDelete)\n \n print ' Deleting from %s to %s in textLineNumbers' % (indexBeginDelete, indexEndDelete)\n \n# line numbers take up 'widthLineNumbers' spaces\n# End=str(eval(indexEnd))\n# self.textLineNumbers.delete(End) \n# newEnd = self.textMyCode.index(END)\n# self.textLineNumbers.delete(str(float(eval(newEnd))),str(float(eval(newEnd)+0.5)))\n \n self.textLineNumbers.delete(indexBeginDelete,indexEndDelete)\n \n \n# plus values scroll line numbers up; neg values scroll down \n# scrollLines = -5\n# self.textLineNumbers.yview_scroll(scrollLines,UNITS)\n# print ' Scroll line numbers in y direction:',scrollLines\n \n# make sure the current line we just moved to is visible\n var=self.textMyCode.index(INSERT)\n self.textLineNumbers.see(var)\n# self.textLineNumbers.see(var)\n# self.textMyCode.see(var)\n print ' \"SEE\" line number index:',str(var)\n print\n \n self.textLineNumbers['state']='disabled'\n \n return tempDef", "title": "" }, { "docid": "c9b09cd053c6724f9e9f5b5c09d78946", "score": "0.58143234", "text": "def deleteText(self, theLineNumber: int = 0):\n if len(self.getText()) > 0:\n oldLines: str = self.getText()\n splits: List[str] = oldLines.splitlines(True)\n self.logger.info(f'splits: {splits}')\n\n if len(splits) > theLineNumber:\n del splits[theLineNumber]\n newLines: str = ''.join(splits)\n self.logger.info(f'newLines: {newLines}')\n\n self.setText(newLines)", "title": "" }, { "docid": "e499f1a09139f1d68389c5b4b41d6d29", "score": "0.5806026", "text": "def _on_update_line(self, event):\n if len(self.content.curselection()) <= 0:\n return\n curindex = self.content.curselection()[0]\n self.content.delete(curindex)\n self.content.insert(curindex,\n self.current.get(\"1.0\", tk.END + \"-1c\"))\n self.content.selection_set(curindex)", "title": "" }, { "docid": "5d0224e15d0def464c3bb366b256762d", "score": "0.57853097", "text": "def clearSelection(self):\n pass", "title": "" }, { "docid": "5d0224e15d0def464c3bb366b256762d", "score": "0.57853097", "text": "def clearSelection(self):\n pass", "title": "" }, { "docid": "5d0224e15d0def464c3bb366b256762d", "score": "0.57853097", "text": "def clearSelection(self):\n pass", "title": "" }, { "docid": "4dfa12bbe130cc4b8773321241162942", "score": "0.5783061", "text": "def on_delete_text(self, start, end):", "title": "" }, { "docid": "fc28e3f693892019d129e31e8a422bcd", "score": "0.57799304", "text": "def delete_text(self, start, end):\n self._delete_text(start, end)\n self.dispatch_event('on_delete_text', start, end)", "title": "" }, { "docid": "eabb76fa81aa3558133b50366d36d18f", "score": "0.5673352", "text": "def copy_snippet(self):\n self.clipboard_clear()\n code = self.code_editor_text.get(0.1, tk.END)\n self.clipboard_append(code)", "title": "" }, { "docid": "175dacaba7c084b9b7a534b25993939b", "score": "0.56341463", "text": "def clear_text(self):\n self.text.config(state=\"normal\")\n self.text.delete(1.0, \"end-1c\")\n self.text.config(state=\"disabled\")", "title": "" }, { "docid": "ffd3e93c71aa7ea3c42a9dddcb2d9b35", "score": "0.56057733", "text": "def clearSelection(self):\n sel = self.widget.curselection()\n if len(sel):\n for j in sel: self.widget.selection_clear(j)", "title": "" }, { "docid": "3f2cdfd3c8943b252dd7fc77f88d22e2", "score": "0.56004155", "text": "def delete_text(self):\n # Only characters after the current safe_point will be deleted\n if self.safe_point:\n self.text.delete(self.safe_point, \"end\")\n else:\n self.text.delete(\"1.0\", \"end\")\n # Update character count label\n self.update_label_count(self.count_words())", "title": "" }, { "docid": "953f60140c3305d432070fd662027299", "score": "0.555631", "text": "def delete_text():\n\n # delete all text in the text box\n text_box.delete(\"1.0\", END)", "title": "" }, { "docid": "09b6c11c494feb8e5a7efaf24137736c", "score": "0.55504644", "text": "def on_clear_ont_select_clicked(self):\n self.ont_table_widget.clearSelection()", "title": "" }, { "docid": "7b086f36fac9e8ee18b91c65c740ba21", "score": "0.55485654", "text": "def remove_gap():\n selection = self.listbox.curselection()\n if len(selection) != 0:\n self.listbox.delete(selection[0])", "title": "" }, { "docid": "b1423f9cffb391ef54d37eb252b3d4df", "score": "0.5509195", "text": "def copy(self):\n self.widget.Copy()\n self._update_shell_selection_and_cursor()", "title": "" }, { "docid": "f2960549ce8bb3ab830ec701cbce0e2b", "score": "0.55078423", "text": "def pasteToClipboard(self):\n text = self.copyToClipboard()\n self.central_widget.insertPlainText(text + '\\n')", "title": "" }, { "docid": "0358c9de3713a86a95a6de22ec3001e1", "score": "0.55073166", "text": "def key_backspace(self):\n # global current_line, current_num, saved_since_edit, text_entered, continue_up, continue_down\n self.continue_down = 0\n self.continue_up = 0\n self.saved_since_edit = False\n if not self.text_entered and len(self.current_line.text) > 4:\n self.text_entered = True\n\n if not self.current_line.text and self.current_line.number == self.lines.total:\n self.lines.add() # create emtpy line\n\n if not self.lines.db[self.current_num].text: # delete line if empty\n self.delete(self.current_num)\n self.text_entered = True\n return\n\n if (self.current_num - 1) in self.lines.db and \\\n self.lines.db[self.current_num].text and self.current_line.x == 6 and \\\n self.current_line.y == self.current_line.end_y: # end_y added to fix bug\n part1 = self.lines.db[self.current_num - 1].text\n part2 = self.lines.db[self.current_num].text\n self.combine_lines(self.current_num, part1, part2)\n self.text_entered = True\n return\n\n old_number_of_rows = self.current_line.number_of_rows\n temp_list = self.current_line.listing\n\n if self.current_line.y == 0 and self.current_line.x == self.current_line.end_x: # delete last character on line\n del temp_list[-1]\n else:\n position = self.row_size * (\n self.current_line.number_of_rows - 1 - abs(self.current_line.y)) + self.current_line.x - 6\n try:\n if position <= self.current_line.indentation and \\\n self.current_line.text[position - 3:position + 1] and \\\n self.current_line.indentation / 4.0 == int(self.current_line.indentation / 4.0): # delete tab\n del temp_list[position - 4:position]\n self.current_line.x -= 3 # move cursor position 3 spaces, final one below\n else:\n del temp_list[position - 1] # delete position\n except BareException:\n del temp_list[position - 1] # delete position\n\n temp_string = \"\"\n for item in temp_list:\n temp_string += item\n self.current_line.text = temp_string\n self.current_line.x -= 1\n if self.config[\"syntax_highlighting\"]:\n self.current_line.add_syntax()\n if old_number_of_rows != self.current_line.number_of_rows:\n self.current_line.y += 1\n if self.current_line.number_of_rows == 1 and self.current_line.x == 6:\n self.current_line.x = self.current_line.end_x", "title": "" }, { "docid": "69612b34ecb56c3cd8ef10a5857064c4", "score": "0.5503404", "text": "def undo(self):\n self.widget.Undo()\n self._update_shell_selection_and_cursor()", "title": "" }, { "docid": "c01e77965a006f15312a373b34bbeaad", "score": "0.54982555", "text": "def paste(self):\n self.widget.Paste()\n self._update_shell_selection_and_cursor()", "title": "" }, { "docid": "a36941f0dff5fa244a35d90e5b4facab", "score": "0.5493156", "text": "def selectPreviousLine(self):\n pos = self.__cursorPosition - 2 * self.BYTES_PER_LINE\n self.setCursorPosition(pos)\n self.__setSelection(pos)", "title": "" }, { "docid": "bf47ec0c14546ae7c3e4b48de08d2604", "score": "0.5491694", "text": "def copy_event(self, event=None):\r\n self.clipboard_clear()\r\n c = self.select_item()\r\n self.clipboard_append(c)\r\n return", "title": "" }, { "docid": "ca3ad128f0dfd711b5b3ee5d55c90a6d", "score": "0.54726475", "text": "def on_selection_changed(self, event):\n event.Skip()\n self._update_shell_selection_and_cursor()", "title": "" }, { "docid": "8fed4db8a0e569eabe065ed5f5e934f9", "score": "0.5464717", "text": "def _activate_cut_line(self, pos: QPointF):\n self.selection_method = 1\n self.cut_line.setValue(pos.x())\n self._selection_method_changed()", "title": "" }, { "docid": "f6f40d571fad8d08f4e904566b8d9fe4", "score": "0.5438832", "text": "def copySelection(self):\n items = self.get_selected_item_ids(False)\n if items is not None:\n cb = QtGui.QApplication.clipboard()\n text = self.controller.copy_modules_and_connections(items[0],items[1])\n cb.setText(text)", "title": "" }, { "docid": "72f68ab546991a968ed8516a4366fc75", "score": "0.543217", "text": "def copy(self) -> None:\n self.parent.clipboard_clear()\n self.parent.clipboard_append(APP.FREEZE)", "title": "" }, { "docid": "17df380aea9b598fbbbee500fc7297b1", "score": "0.54298234", "text": "def delete_line(self, index=None):\n if self.content.size() <= 0:\n return\n if index is None:\n # Delete the last line\n self.content.delete(self.content.size() - 1)\n else:\n if 0 <= index < self.content.size():\n self.content.delete(index)\n if self.content.size() <= 0:\n self.content['state'] = tk.DISABLED", "title": "" }, { "docid": "3982a1adf5004dc46000a51c3ff16ef5", "score": "0.5423273", "text": "def __copyAllChat(self):\n txt = self.chatEdit.toPlainText()\n if txt:\n cb = QApplication.clipboard()\n cb.setText(txt)", "title": "" }, { "docid": "79086c48af7d8d63b5d0d865069cb405", "score": "0.5420967", "text": "def selectToEndOfLine(self):\n pos = (\n self.__cursorPosition -\n (self.__cursorPosition % (2 * self.BYTES_PER_LINE)) +\n 2 * self.BYTES_PER_LINE\n )\n self.setCursorPosition(pos)\n self.__setSelection(pos)", "title": "" }, { "docid": "3591d2391a5965e9f7b391d1542ba1ec", "score": "0.54199934", "text": "def backspace():\r\n entry_text_area.delete(\"end-2c\", tkinter.END)", "title": "" }, { "docid": "74a7e62cf4a6bc0645b79139ef90a938", "score": "0.54191333", "text": "def __onCopy(self, ev):\n text = self.__textEdit.GetValue()\n\n cb = wx.TheClipboard\n\n if cb.Open():\n cb.SetData(wx.TextDataObject(text))\n cb.Close()\n\n if self.__showCopyMessage:\n td = TimeoutDialog(self, 'Copied!', 1000)\n td.Show()", "title": "" }, { "docid": "92b5d0447b5795e7a68a0b8baaf38ec0", "score": "0.5405073", "text": "def Paste(self, event=None):\n text = self.root.clipboard_get()\n row, col = self.convert(self.text.index(self.text.marker.mark))\n self.push_queue_put( MSG_INSERT(self.text.marker.id, text, row, col), wait=True )\n return \"break\"", "title": "" }, { "docid": "85b91cd76e0524dd698f0997b9a1cf04", "score": "0.5386678", "text": "def on_clear_utility_selection_clicked(self):\n self.assets_table_widget.clearSelection()", "title": "" }, { "docid": "5142cde6906dded108a26275245d928e", "score": "0.5364991", "text": "def deleteByteBack(self):\n if not self.__readOnly:\n if self.hasSelection():\n self.__bPosCurrent = self.getSelectionBegin()\n self.setCursorPosition(2 * self.__bPosCurrent)\n if self.__overwriteMode:\n byteArray = bytearray(self.getSelectionLength())\n self.replaceByteArray(self.__bPosCurrent, len(byteArray),\n byteArray)\n else:\n self.remove(self.__bPosCurrent,\n self.getSelectionLength())\n else:\n self.__bPosCurrent -= 1\n if self.__overwriteMode:\n self.replace(self.__bPosCurrent, 0)\n else:\n self.remove(self.__bPosCurrent, 1)\n self.setCursorPosition(2 * self.__bPosCurrent)\n self.__resetSelection(2 * self.__bPosCurrent)", "title": "" }, { "docid": "26a60ce210faeb4aeb219d4960998fbf", "score": "0.5361135", "text": "def cursor(self, selection):\n # Remove the old selection\n self._lines[self._current_cursor].data['cursor'] = False\n # Assign and mark the current selection\n self._current_cursor = selection\n if self._current_cursor < 0:\n self._current_cursor = 0\n elif self._current_cursor > len(self._lines) - 1:\n self._current_cursor = len(self._lines) - 1\n self._lines[self._current_cursor].data['cursor'] = True", "title": "" }, { "docid": "916bd62a05598a8c45d7ad46e7fc16c6", "score": "0.5355053", "text": "def selected_content(self):\n content = u'\\n'.join([self.view.substr(region) for region in self.view.sel()])\n if not content:\n content = self.view.substr(sublime.Region(0, self.view.size()))\n return content", "title": "" }, { "docid": "8a6bc89941efaa73e14d9c9b41ad481d", "score": "0.5344886", "text": "def handlerTempDelete(self):\n print '** In handlerTempDelete'\n self.textMyCode.insert(END,'This is the end line')\n raw_input()\n self.textMyCode.delete(END)\n self.textLineNumbers.delete(END)", "title": "" }, { "docid": "926d4e66eb8c9628b973562a71184d1d", "score": "0.53406227", "text": "def set_text(self, text: str) -> None:\n self.clipboard.set_text(text, -1)\n self.clipboard.store()", "title": "" }, { "docid": "e4ba1dbed14f046e1c9a9ad0964a9dda", "score": "0.5337363", "text": "def clear_line(self, y_pos: int) -> None:\n (min_x, _, _, _) = self.get_chrome_boundaries()\n (_, max_x) = self.stdscr.getmaxyx()\n chars_to_delete = range(min_x, max_x)\n # we go in the **reverse** order since the original documentation\n # of delchar (http://dell9.ma.utexas.edu/cgi-bin/man-cgi?delch+3)\n # mentions that delchar actually moves all the characters to the right\n # of the cursor\n for x_pos in reversed(chars_to_delete):\n self.stdscr.delch(y_pos, x_pos)", "title": "" }, { "docid": "de68fe5d391ef9bc0ab5fc0b5304ed4b", "score": "0.5331662", "text": "def remove_formatting(self, event):\n self.current_tab.text.delete('1.0', 'end-1c')\n self.current_tab.text.insert(tk.END, self.current_tab.raw)\n self.current_tab.highlighted_text_list = {}\n # Erase saved text list.", "title": "" }, { "docid": "54c39cc147f4022e2d88cec3bf0460a0", "score": "0.5304579", "text": "def change_selection(self, p: Position) -> bool:\n c, u = self.c, self.c.undoer\n wrapper = c.frame.body and c.frame.body.wrapper\n gui_w = c.edit_widget(p) if self.in_headline else wrapper\n if not gui_w: # pragma: no cover\n self.in_headline = False\n gui_w = wrapper\n if not gui_w: # pragma: no cover\n return False\n oldSel = sel = gui_w.getSelectionRange()\n start, end = sel\n if start > end: # pragma: no cover\n start, end = end, start\n if start == end: # pragma: no cover\n g.es(\"no text selected\")\n return False\n bunch = u.beforeChangeBody(p)\n start, end = oldSel\n change_text = self.change_text\n # Perform regex substitutions of \\1, \\2, ...\\9 in the change text.\n if self.pattern_match and self.match_obj:\n groups = self.match_obj.groups()\n if groups:\n change_text = self.make_regex_subs(change_text, groups)\n change_text = self.replace_back_slashes(change_text)\n # Update both the gui widget and the work \"widget\"\n new_ins = start if self.reverse else start + len(change_text)\n if start != end:\n gui_w.delete(start, end)\n gui_w.insert(start, change_text)\n gui_w.setInsertPoint(new_ins)\n self.work_s = gui_w.getAllText() # #2220.\n self.work_sel = (new_ins, new_ins, new_ins)\n # Update the selection for the next match.\n gui_w.setSelectionRange(start, start + len(change_text))\n c.widgetWantsFocus(gui_w)\n # No redraws here: they would destroy the headline selection.\n if self.in_headline:\n # #2220: Let onHeadChanged handle undo, etc.\n c.frame.tree.onHeadChanged(p, undoType='Change Headline')\n # gui_w will change after a redraw.\n gui_w = c.edit_widget(p)\n if gui_w:\n # find-next and find-prev work regardless of insert point.\n gui_w.setSelectionRange(start, start + len(change_text))\n else:\n p.v.b = gui_w.getAllText()\n u.afterChangeBody(p, 'Change Body', bunch)\n\n if self.mark_changes and not p.isMarked(): # pragma: no cover\n undoType = 'Mark Changes'\n bunch = u.beforeMark(p, undoType)\n p.setMarked()\n p.setDirty()\n u.afterMark(p, undoType, bunch)\n return True", "title": "" }, { "docid": "861910793314adbf43378df9e5dec524", "score": "0.52963895", "text": "def cut(request, id):\n obj = lfc.utils.get_content_object(pk=id)\n obj.check_permission(request.user, \"delete\")\n\n request.session[\"clipboard\"] = [id]\n request.session[\"clipboard_action\"] = CUT\n\n obj = lfc.utils.get_content_object(pk=id)\n\n html = (\n (\"#menu\", object_menu(request, obj)),\n )\n\n return return_as_json(html, _(u\"The object has been put to the clipboard.\"))", "title": "" }, { "docid": "39a158dae8c44a182f329249e36188d1", "score": "0.5289882", "text": "def clear(self):\n self.delete(1.0, Tk.END)\n self.edit_reset()\n self.set_typing(False)\n return", "title": "" }, { "docid": "16227b3d289207f58bb24a8373d6c5ff", "score": "0.52744144", "text": "def _copyToClip(self):\n if self.path is not None:\n self.clipboard_clear()\n self.clipboard_append(self.path)", "title": "" }, { "docid": "2f9732f373d5ea05575be08dfd102d0a", "score": "0.52550274", "text": "def popout(self):\n if len(self.show_selection) > 0:\n self.show_selection.pop()", "title": "" }, { "docid": "4205064881163876f47984f0954fae4f", "score": "0.5249084", "text": "def on_actionCut_activated(self):\n # TODO: not implemented yet\n print \"Cut selection\"", "title": "" }, { "docid": "097cccbffa604051e24049c25232fccc", "score": "0.5246048", "text": "def remove_hpo(e):\r\n if hpo_selected.curselection():\r\n deletion = hpo_selected.curselection()\r\n hpo_selected.delete(deletion)\r\n else:\r\n pass", "title": "" }, { "docid": "0059b2f073a04ba5ac5a52b12bcb8c62", "score": "0.52445555", "text": "def editPaste(self):\n if self.activeWindow.treeView.hasFocus():\n if (self.currentSelectionModel().selectedNodes().\n pasteMimeData(QtGui.QApplication.clipboard().mimeData())):\n for node in self.currentSelectionModel().selectedNodes():\n node.expandInView()\n self.updateAll()\n else:\n widget = QtGui.QApplication.focusWidget()\n try:\n widget.paste()\n except AttributeError:\n pass", "title": "" }, { "docid": "67bc0d8d74d8ac024904387cbff05d80", "score": "0.5240026", "text": "def delete_current_point(self):\n if self._selected_point is not None:\n self.curve.remove_cv(self._selected_point)\n self._selected_point = None\n self._drag_point = None\n self.update()", "title": "" }, { "docid": "791eb4ab27cc2399e5bf9f8801d44687", "score": "0.522778", "text": "def _update_shell_selection_and_cursor(self):\n # This isn't nearly as nice as Qt. We can only update the \n # selection due to a left up event because wx doesn't provide\n # any better/more relevant events. This means this function \n # is just call whenver any change in the control *may* have\n # caused these things to update. We are doing way more work\n # here that we should be needing to, but the only way around\n # this that I see is to implement our own control from scratch.\n with guard(self, 'updating_selection'):\n self.shell_obj.selected_text = self.widget.GetStringSelection()\n self.shell_obj.cursor_position = self.widget.GetInsertionPoint()", "title": "" }, { "docid": "ddb065f3f76df536d7893f95ca41d5f3", "score": "0.5226161", "text": "def on_selection_modified(self, view):\r\n\r\n constraints = ONLY_PYTHON | NOT_SCRATCH | LINTING_ENABLED\r\n if (not check_linting(view, constraints)\r\n or not 'Python' in view.settings().get('syntax')):\r\n return\r\n\r\n last_selected_line = last_selected_lineno(view)\r\n\r\n if last_selected_line != self.last_selected_line:\r\n self.last_selected_line = last_selected_line\r\n update_statusbar(view)", "title": "" }, { "docid": "fd7fa374ef64b3cbad736e850e1a7908", "score": "0.5225075", "text": "def copySelection(self):\n\n selection = self.selectedIndexes()\n\n if selection:\n rows = [index.row() for index in selection]\n columns = [index.column() for index in selection]\n rowCount = max(rows) - min(rows) + 1\n colCount = max(columns) - min(columns) + 1\n table = [[\"\"] * colCount for _ in range(rowCount)]\n\n for index in selection:\n row = index.row() - min(rows)\n column = index.column() - min(columns)\n table[row][column] = index.data()\n\n stream = io.StringIO()\n csv.writer(stream, delimiter=\"\\t\").writerows(table)\n QApplication.clipboard().setText(stream.getvalue())", "title": "" }, { "docid": "0ef33c14b12402b9a9a2ff58050fd498", "score": "0.5218399", "text": "def _press_tab(self, event):\n if self.text.index(tk.SEL_FIRST) == \"None\" or self.text.index(tk.SEL_LAST) == \"None\":\n # prevent indenting anything if there is no current selection\n return\n # remember position of old text\n selected_text = self.text.get(tk.SEL_FIRST, tk.SEL_LAST)\n # create replacement\n selection_replacement = \"\"\n for line in selected_text.splitlines():\n selection_replacement += \"\\t\" + line + \"\\n\"\n\n self.__replace_selection_by_text_and_select_it(selection_replacement)\n return \"break\"", "title": "" }, { "docid": "ef744fbda68afefc07954cdd9e3d6ca1", "score": "0.5212176", "text": "def selectToStartOfLine(self):\n pos = (\n self.__cursorPosition -\n (self.__cursorPosition % (2 * self.BYTES_PER_LINE))\n )\n self.setCursorPosition(pos)\n self.__setSelection(pos)", "title": "" }, { "docid": "d5f8156f035860e8c99f163a6020fb15", "score": "0.52048373", "text": "def line_select_callback(self, eclick, erelease):\n x1, y1 = eclick.xdata, eclick.ydata\n x2, y2 = erelease.xdata, erelease.ydata\n\n if self.currentTagText.text().strip() == \"\":\n msgBox = QMessageBox()\n msgBox.setIcon(msgBox.Warning)\n msgBox.setText(\"Warning: You didn't give a tag ID for this annotation.\")\n msgBox.exec_()\n self.currentTagText.setStyleSheet(\"QLineEdit {background-color: rgb(255, 0, 0)}\")\n return\n else:\n self.currentTagText.setStyleSheet(\"\")\n \n rect = plt.Rectangle((min(x1,x2),min(y1,y2)), np.abs(x1-x2), np.abs(y1-y2), fill=False, edgecolor=\"red\")\n self.ax.add_patch(rect)\n if x1 > x2:\n x1, x2 = x2, x1\n if y1 > y2:\n y1, y2 = y2, y1\n self.annotationLineText.setText(self.annotationLineText.text()+str(int(x1))+\" \"+str(int(y1))+\\\n \" \"+str(int(x2))+\" \"+str(int(y2))+\" \"+str(self.currentTagText.text())+\" \")\n\n self.canvas.draw()", "title": "" }, { "docid": "65c4b34cede96e11d40d2622da56d6a6", "score": "0.5184446", "text": "def deleteByte(self):\n if not self.__readOnly:\n if self.hasSelection():\n self.__bPosCurrent = self.getSelectionBegin()\n if self.__overwriteMode:\n byteArray = bytearray(self.getSelectionLength())\n self.replaceByteArray(self.__bPosCurrent, len(byteArray),\n byteArray)\n else:\n self.remove(self.__bPosCurrent,\n self.getSelectionLength())\n else:\n if self.__overwriteMode:\n self.replace(self.__bPosCurrent, 0)\n else:\n self.remove(self.__bPosCurrent, 1)\n self.setCursorPosition(2 * self.__bPosCurrent)\n self.__resetSelection(2 * self.__bPosCurrent)", "title": "" }, { "docid": "bde7f4a64efa2ea3f9b3d2007f59e37f", "score": "0.51586", "text": "def clearKey(self, staySelected=True):\n self.setText(KeyLineEdit.blankText)\n if staySelected:\n self.selectAll()\n if self.key:\n KeyLineEdit.usedKeySet.remove(self.key.toString(QKeySequence.\n NativeText))\n self.key = None\n self.modified = True", "title": "" }, { "docid": "804462a0b6e4bfc8b27f8a6c363ed8c8", "score": "0.5158067", "text": "def remove_selection(self):\n selection = self.list_on.curselection()\n if len(selection) != 0:\n for selected in selection:\n items_ids = self.list_on.get(selected)\n self.list_off.insert(0, items_ids)\n self.list_on.delete(selected)", "title": "" }, { "docid": "804462a0b6e4bfc8b27f8a6c363ed8c8", "score": "0.5158067", "text": "def remove_selection(self):\n selection = self.list_on.curselection()\n if len(selection) != 0:\n for selected in selection:\n items_ids = self.list_on.get(selected)\n self.list_off.insert(0, items_ids)\n self.list_on.delete(selected)", "title": "" }, { "docid": "52af5979fbae7c912d532ffffcfcdfea", "score": "0.514943", "text": "def selectPreviousChar(self):\n pos = self.__cursorPosition - 1\n self.setCursorPosition(pos)\n self.__setSelection(pos)", "title": "" }, { "docid": "55ae82a7a6534f7adebb77a3c097ef8d", "score": "0.5148357", "text": "def delete_selected(self, ):\n sl = self.treeview.get_selection()\n if sl.get_mode() == gtk.SELECTION_SINGLE:\n (model, it) = sl.get_selected()\n if it <> None:\n model.remove(it)\n elif sl.get_mode() == gtk.SELECTION_MULTIPLE:\n (model, its) = sl.get_selected_rows()\n if len(its) > 0:\n for it in its:\n model.remove(it)", "title": "" }, { "docid": "dad2ba88d6b29af529c79e891c145be8", "score": "0.51277727", "text": "def cancel_new_line(self, *args):\n self.new_line_prompt.text = \"\"\n self.new_line_dropdown.options = []\n self.new_line_textbox.value = None\n self.set_currently_identifying(False)", "title": "" }, { "docid": "f5d24a603de037437b1a0145983d2805", "score": "0.51218015", "text": "def paste(self):\n if not self.__readOnly:\n cb = QApplication.clipboard()\n byteArray = self.__fromHex(cb.text().encode(encoding=\"latin1\"))\n if self.__overwriteMode:\n self.replaceByteArray(self.__bPosCurrent, len(byteArray),\n byteArray)\n else:\n self.insertByteArray(self.__bPosCurrent, byteArray)\n self.setCursorPosition(\n self.__cursorPosition + 2 * len(byteArray))\n self.__resetSelection(2 * self.getSelectionBegin())", "title": "" }, { "docid": "079c725385785baa35079555580169f0", "score": "0.51193047", "text": "def on_clear_collect_select_clicked(self):\n self.ont_collection_table_widget.clearSelection()", "title": "" }, { "docid": "b3e6b71b8017153bb3a4e471829cd87d", "score": "0.511882", "text": "def undo(self):\n # TODO: Change this so things don't get duplicated\n if self.base_edit is None:\n self.edits.append(self.curr_text)\n self.edits.rotate(1)\n if self.prev_edit is not None:\n self.edits.rotate(1)\n if self.prev_edit is not None:\n self.curr_text = self.prev_edit\n elif self.prev_edit is None:\n print(\"Nothing to undo\")", "title": "" }, { "docid": "3b5a5630ea2c7ca465f99798fe8b9083", "score": "0.5118632", "text": "def _copy_content(self, column: int):\n cb = qt.QtWidgets.QApplication.clipboard()\n cb.clear(mode=cb.Mode.Clipboard)\n if column == 0:\n cb.setText(self.tree_widget.currentItem().data(0, 0),\n mode=cb.Mode.Clipboard)\n if column == 1:\n cb.setText(self.tree_widget.currentItem().data(1, 0),\n mode=cb.Mode.Clipboard)", "title": "" }, { "docid": "d7fa6068f7719d305da2f932e40d1d61", "score": "0.51035744", "text": "def new_file(textarea):\n textarea.delete(1.0, END)", "title": "" }, { "docid": "a6d8b61d20f9d255e75a80549db3108b", "score": "0.50957763", "text": "def end(self, mark=False):\n widget = self.widget\n if mark:\n start = widget.GetInsertionPoint()\n end = widget.GetLastPosition()\n widget.SetSelection(start, end)\n else:\n widget.SetInsertionPointEnd()\n self._update_shell_selection_and_cursor()", "title": "" }, { "docid": "a86388b36b503983e7832192817d3e90", "score": "0.5089306", "text": "def SelectDown(self, event):\n row1, col1 = self.text.index(self.text.marker.mark).split(\".\")\n row1, col1 = int(row1), int(col1)\n row2, col2 = self.Down(row1, col1)\n \n self.UpdateSelect(row1, col1, row2, col2)\n return \"break\"", "title": "" }, { "docid": "eb521df9edac2c0cf736e6bc1c03aa9a", "score": "0.50793755", "text": "def _select_all_line(self, event):\n event.widget.select_range(0, tk.END)\n return \"break\"", "title": "" }, { "docid": "3e7a7c856daf86dd844e07cb8fb7f305", "score": "0.5070793", "text": "def clear(self):\n self.widget.Clear()\n self._update_shell_selection_and_cursor()", "title": "" }, { "docid": "f25eb43a694698ce2b89e1820b903f64", "score": "0.5060187", "text": "def texCutContext(*args, **kwargs):\n\n pass", "title": "" }, { "docid": "3af078598230bb93741776d52ef31755", "score": "0.504937", "text": "def __lineComboChanged(self):\n if self.__lineCombo.itemText(0) == \"\":\n self.__lineCombo.removeItem(0)", "title": "" } ]
6272898a2a26e5319b121f4f311870f2
This method configures Serial Port option in Bios Policy.
[ { "docid": "5d1cc36694c82efaef1184495a6799c0", "score": "0.6220808", "text": "def bios_conf_serial_port_a(handle, name, parent_org_dn,\n vp_serial_port_a_enable=\"platform-default\"):\n\n from ucsmsdk.mometa.bios.BiosVfSerialPortAEnable import \\\n BiosVfSerialPortAEnable\n\n profile_dn = parent_org_dn + \"/bios-prof-\" + name\n obj = handle.query_dn(profile_dn)\n if obj:\n mo = BiosVfSerialPortAEnable(\n parent_mo_or_dn=obj,\n vp_serial_port_a_enable=vp_serial_port_a_enable)\n handle.add_mo(mo, True)\n handle.commit()\n return mo\n else:\n raise ValueError(\"Bios policy '%s' not found.\" % profile_dn)", "title": "" } ]
[ { "docid": "c64bd2d416d7d52c6d562b64a9da0c3b", "score": "0.67158395", "text": "def _update_serial_port(self):", "title": "" }, { "docid": "d27573773a5effb1fc3f30e4f1001f6e", "score": "0.67053586", "text": "def __init__(self, serialPort):\n self.__serialPort = serialPort", "title": "" }, { "docid": "f69e9f3137aa2eb379249aef0149e9eb", "score": "0.65555584", "text": "def init_communication(self):\n if self.serial_port is not None and \\\n self.serial_port.port == self.serial_port_name: return\n if self.serial_port_name.startswith(\"COM\") or \\\n self.serial_port_name.startswith(\"/dev/\"):\n # Assume local port\n from serial import Serial\n else: from EPICS_serial_CA import Serial\n try: self.serial_port = Serial(self.serial_port_name)\n except Exception,msg:\n self.log_error(\"serial port %s: %s\" % (self.serial_port_name,msg))\n return\n self.serial_port.baudrate = 9600\n self.serial_port.bytesize = 7\n self.serial_port.parity = \"O\"\n self.serial_port.stopbits = 1\n self.serial_port.rtscts = 0 # Hardware flow control: off\n self.serial_port.xonxoff = 0 # Software flow control: off\n self.serial_port.dsrdtr = None # Modem handshake: off\n self.serial_port.timeout = 0.1", "title": "" }, { "docid": "1eb6ef3d733bfdef0580510d52207af5", "score": "0.64059436", "text": "def __init__(self, port: str, baudrate: int=9600):\n # The port name is the name shown in control panel\n # And the baudrate is the communication setting, default value of HC-05 is 9600.\n self.ser = serial.Serial(port, baudrate=baudrate)", "title": "" }, { "docid": "c903cfb1b92df2a7f77c933de430f363", "score": "0.63992053", "text": "def __init__(self, port):\n try:\n import serial\n s = serial.Serial(port, 9600, timeout=1)\n time.sleep(1.5) # need to give serial time to actually connect\n self.serial = s\n except Exception as exc:\n raise ControllerError(f'Failed to start serial on {port}')", "title": "" }, { "docid": "d8c0756cf02b916a64517588f25cbc65", "score": "0.6302897", "text": "def _init_serial(self, port, data_rate):\n # Serial port: /dev/ttyACM0\n # The Raspberry Pi may not provide enough power to drive an Arduino, so you might need external power.\n self.ser = serial.Serial()\n self.ser.baudrate = data_rate\n self.ser.timeout = 1\n\n if port!=None:\n self.ser.port = port\n else:\n port = \"/dev/ttyACM0\"\n print_msg(self.name, \"Automatically find serial port\")\n for i in range(20):\n try:\n port_auto_find = port[:-1] + str(i)\n self.ser.port = port_auto_find\n print_msg(self.name, \"Finding port at %s\"%port_auto_find)\n self.ser.open()\n\n return\n except serial.SerialException as e:\n print e.message\n continue\n\n print_msg(self.name, \"Automatically find port fails. Try to reboot the linux OS\")\n sys.exit(-1)", "title": "" }, { "docid": "c715982671fecee7af17bcc0af9891d4", "score": "0.6291694", "text": "def __init__(self, port, baudrate=115200):\n\n self.serial = serial.Serial(port, baudrate)", "title": "" }, { "docid": "8c64201ec10025611f34a1d4b47e7115", "score": "0.62514937", "text": "def __init__(self):\n self.portname = find_port()\n if self.portname:\n self.comport = serial.Serial(port=self.portname, baudrate=9600)\n else:\n print('No serial port found')", "title": "" }, { "docid": "8b2c0a0f61347097c22dfc7a5202bae9", "score": "0.62126225", "text": "def _serial_setup(self, port, baud, byte, parity, stop, timeout_sec, \n skip_pass, boot_prompt, reset_cb):\n self._logger.debug(\"(re)opening serial port %s\" % port)\n ser = serial_conn.SerialConn(\n login = self._login, skip_pass = skip_pass, \n boot_prompt = boot_prompt,\n reset_cb = reset_cb)\n ser.port = port\n ser.baudrate = baud\n ser.bytesize = byte\n ser.parity = parity\n ser.stopbits = stop\n ser.timeout = timeout_sec\n ser.open()\n self._logger.debug(\"%s is now open.\" % port)\n return ser", "title": "" }, { "docid": "a839adf7f1218831a4e5505507bfcf40", "score": "0.62073076", "text": "def _reconfigure_port(self):\n msg = bytearray(\n [\n self.baudrate & 0xFF,\n self.baudrate >> 8 & 0xFF,\n self.baudrate >> 16 & 0xFF,\n self.baudrate >> 24 & 0xFF,\n self.STOPBIT_MAP[self.stopbits],\n self.PARITY_MAP[self.parity],\n self.bytesize,\n ]\n )\n # Set line coding.\n self._ctrl_transfer_out(self.SET_LINE_CODING, 0, msg)", "title": "" }, { "docid": "bd3254bb677216ec2b00ff7d85ce2e11", "score": "0.6176299", "text": "def serial_port(self):\n raise NotImplementedError()", "title": "" }, { "docid": "d23d981f8a9590a3457a2f78903f0fff", "score": "0.6171368", "text": "def __init__(self, com_port='/dev/ttyACM0', speed=115200):\n self.my_serial = serial.Serial(com_port, speed, timeout=1, writeTimeout=1)", "title": "" }, { "docid": "b3104db48c81746cbdda13a8caa5e557", "score": "0.6168958", "text": "def _reconfigurePort(self):\n if not self.sPort:\n raise SerialException(\"Can only operate on a valid port handle\")\n\n self.sPort.enableReceiveTimeout(30)\n if self._bytesize == FIVEBITS:\n jdatabits = comm.SerialPort.DATABITS_5\n elif self._bytesize == SIXBITS:\n jdatabits = comm.SerialPort.DATABITS_6\n elif self._bytesize == SEVENBITS:\n jdatabits = comm.SerialPort.DATABITS_7\n elif self._bytesize == EIGHTBITS:\n jdatabits = comm.SerialPort.DATABITS_8\n else:\n raise ValueError(\"unsupported bytesize: %r\" % self._bytesize)\n\n if self._stopbits == STOPBITS_ONE:\n jstopbits = comm.SerialPort.STOPBITS_1\n elif stopbits == STOPBITS_ONE_POINT_FIVE:\n self._jstopbits = comm.SerialPort.STOPBITS_1_5\n elif self._stopbits == STOPBITS_TWO:\n jstopbits = comm.SerialPort.STOPBITS_2\n else:\n raise ValueError(\"unsupported number of stopbits: %r\" % self._stopbits)\n\n if self._parity == PARITY_NONE:\n jparity = comm.SerialPort.PARITY_NONE\n elif self._parity == PARITY_EVEN:\n jparity = comm.SerialPort.PARITY_EVEN\n elif self._parity == PARITY_ODD:\n jparity = comm.SerialPort.PARITY_ODD\n elif self._parity == PARITY_MARK:\n jparity = comm.SerialPort.PARITY_MARK\n elif self._parity == PARITY_SPACE:\n jparity = comm.SerialPort.PARITY_SPACE\n else:\n raise ValueError(\"unsupported parity type: %r\" % self._parity)\n\n jflowin = jflowout = 0\n if self._rtscts:\n jflowin |= comm.SerialPort.FLOWCONTROL_RTSCTS_IN\n jflowout |= comm.SerialPort.FLOWCONTROL_RTSCTS_OUT\n if self._xonxoff:\n jflowin |= comm.SerialPort.FLOWCONTROL_XONXOFF_IN\n jflowout |= comm.SerialPort.FLOWCONTROL_XONXOFF_OUT\n\n self.sPort.setSerialPortParams(self._baudrate, jdatabits, jstopbits, jparity)\n self.sPort.setFlowControlMode(jflowin | jflowout)\n\n if self._timeout >= 0:\n self.sPort.enableReceiveTimeout(self._timeout*1000)\n else:\n self.sPort.disableReceiveTimeout()", "title": "" }, { "docid": "4deb8b7f55914e51b73e42722e126268", "score": "0.6110167", "text": "def __init__(self, port): \n\t\tself.S=ser.Serial(port)\n\t\tself.S.setRTS(False)\n\t\tself.S.setDTR(False)\n\t\treturn None", "title": "" }, { "docid": "5689a010915c8833b1004b26becde087", "score": "0.6061457", "text": "def set_serial(self, serial):\r\n self.set_byte(7, serial[0])\r\n self.set_byte(79, serial[1])", "title": "" }, { "docid": "146ee03f6e37838adfa9aa7639364e13", "score": "0.604953", "text": "def serial(self, serial):\n\n self._serial = serial", "title": "" }, { "docid": "146ee03f6e37838adfa9aa7639364e13", "score": "0.604953", "text": "def serial(self, serial):\n\n self._serial = serial", "title": "" }, { "docid": "8863f767c60083981c4bf7786c82578f", "score": "0.6044094", "text": "def set_pypi_serial(self, serial):\n with self._conn.begin():\n self._conn.execute(\n self._configuration.update().\n where(self._configuration.c.id == 1),\n pypi_serial=serial\n )", "title": "" }, { "docid": "1bec139eb4112b625e5e17991af41783", "score": "0.60267234", "text": "def __init__(self, serial_port=None):\n \n # Make a list of usb serial locations to test connection IFF it is unkown\n self.locations = ['/dev/ttyACM0','/dev/ttyACM1','/dev/ttyACM3','/dev/ttyUSB0','/dev/ttyUSB1','/dev/ttyUSB2','/dev/ttyUSB3', '/dev/ttyS0','/dev/ttyS1','/dev/ttyS2','/dev/ttyS3','/dev/tty.usbmodem411'] \n \n rospy.init_node('arduino_safety_light', anonymous=True)\n \n #Parameteres:\n # Get the serial port name\n self.device = serial_port or rospy.get_param('~serial_port', '/dev/ttyACM1')\n \n # for device in locations: \n try:\n # Connect to board\n self.arduino = serial.Serial(self.device, 9600) \n except: \n print \"SAFETY LIGHT: Failed to connect to\", self.device \n \n print \"SAFETY LIGHT: Connected to arduino on\", self.device\n # wait for Arduino board to respond\n time.sleep(3)\n self.arduino.write('r') # standby mode light\n \n #Listens for Autonomous Mode indicator signal\n rospy.Subscriber(\"/cata/navigation_mode\", LightMode, self.lightCallback, queue_size=1)\n \n # Handle ros requests\n rospy.spin()", "title": "" }, { "docid": "8614649831b2da22495051ef42cb2f65", "score": "0.60070056", "text": "def __init__(self, serialport='/dev/ttyUSB0', eSSPId=0, timeout=None): # noqa\n if timeout is None or timeout == 0:\n serial_timeout = timeout\n else:\n serial_timeout = 0.1\n self.timeout = timeout\n self.__ser = serial.Serial(serialport, 9600, timeout=serial_timeout)\n self.__eSSPId = eSSPId\n self.__sequence = '0x80'\n\n self._logger = logging.getLogger(__name__)\n self._logger.debug(\"Startup at \" + str(datetime.datetime.now()))", "title": "" }, { "docid": "9729e72902ca5f2c8ecedb83099ab5f9", "score": "0.5988493", "text": "def _setup(self):\n\n mode = tcgetattr(self.mSerial_nr)\n mode[self.IFLAG] = mode[self.IFLAG] & ~(IGNBRK |\n BRKINT |\n IGNPAR |\n INPCK |\n ISTRIP |\n ICRNL | \n INLCR |\n IXON|IXOFF)\n mode[self.OFLAG] = mode[self.OFLAG] & ~(OPOST)\n mode[self.CFLAG] = mode[self.CFLAG] & ~(CSIZE | PARENB)\n # CS8 = 8 Bits, CSTOPB = 1 stop bit, CRTSCTS = no hardware flow control\n # PARENB = no parity; BAUD/N/8/1 \n mode[self.CFLAG] = mode[self.CFLAG] | (CS8)\n mode[self.CFLAG] = mode[self.CFLAG] & ~(PARENB | CSTOPB | CRTSCTS)\n mode[self.LFLAG] = mode[self.LFLAG] & ~(ECHO | ICANON | IEXTEN | ISIG)\n mode[self.CC][VMIN] = 1\n mode[self.CC][VTIME] = 0\n tcsetattr(self.mSerial_nr, TCSANOW, mode)", "title": "" }, { "docid": "73a02918dad3108be769b854adf65fa5", "score": "0.59700793", "text": "def SC_Write_to_modem_Library_Default_Settings(self):\r\n # Writing on the modem the dynamic configuration parameters \r\n self.SC_sndConfigurationCommand(self._pin_code, self._desired_gateway,\r\n self._def_polled, self._def_ack_level,\r\n self._def_rep_or_ind, self._def_msg_or_ind ,\r\n self._def_priority, self._def_msg_body_type,\r\n self._def_serv_type, self._gwy_search_mode)", "title": "" }, { "docid": "4d1646637036fa91f107d190d4d202f4", "score": "0.5962945", "text": "def set_port(self, port, resource=pumpserial):\n if resource.is_open:\n resource.close()\n resource.port = port\n self.pc_connect = True\n HPump.enabled = True\n self.logger.info(self.name+\" port set to \"+port)", "title": "" }, { "docid": "9ca7ed2ae452fb36a4f679380fa203b6", "score": "0.59604377", "text": "def bios_serial_port(handle, name, parent_org_dn,\n vp_serial_port_a_enable=\"platform-default\"):\n\n from ucsmsdk.mometa.bios.BiosVfSerialPortAEnable import \\\n BiosVfSerialPortAEnable\n\n profile_dn = parent_org_dn + \"/bios-prof-\" + name\n obj = handle.query_dn(profile_dn)\n if obj:\n mo = BiosVfSerialPortAEnable(\n parent_mo_or_dn=obj,\n vp_serial_port_a_enable=vp_serial_port_a_enable)\n handle.add_mo(mo, True)\n handle.commit()\n return mo\n else:\n raise ValueError(\"Bios policy '%s' not found.\" % profile_dn)", "title": "" }, { "docid": "9afb476da2eb4588e4bb1eb6f55a27d5", "score": "0.5959773", "text": "def __init__(self, port='COM3', baudrate='115200'):\n serial.Serial.__init__(self)\n self.baudrate = baudrate # le debit est de 115200 bits\n self.rtscts = True # on utilise le handshake hardware du port serie\n self.timeout = 0.1 # Les methodes read et write auront un time out de 0.1s\n self.port = port # Designe le port COM a ouvrir", "title": "" }, { "docid": "4ec0c1c2486d967f507895a32ab1283e", "score": "0.59337777", "text": "def __init__(self, port):\n self.serial = serial.Serial(port, baudrate=19200, timeout=10.0)", "title": "" }, { "docid": "7e1e1a98c77fcd2170268f69494f1a29", "score": "0.59337145", "text": "def __init__(self, serialport):\n\t\tprint('Instanciado Arduino dispositivo %s' % serialport)\n\t\ttry:\n\t\t\tself.serialport = Serial(serialport, 9600)\n\t\t\tself.error = False\n\t\t\tself.id = None\n\t\texcept:\n\t\t\tself.error = True\n\t\t\tself.id = -666\n\t\t\traise", "title": "" }, { "docid": "d0aa82d28f6f78bb81b25d3c87efb20d", "score": "0.5906874", "text": "def __init__(self, port=None, device_name=None, connect_on_instantiation=False, soft_fail_for_testing=False):\n super().__init__(port, device_name, soft_fail_for_testing)\n\n # serial settings\n self.baudrate = 9600\n self.bytesize = serial.SEVENBITS\n self.parity = serial.PARITY_EVEN\n\n self.write_delay = 0.1\n self.read_delay = 0.1\n\n # answer patterns\n self.stranswer = re.compile(\"([0-9A-Z_]+)\\r\\n\")\n self.intanswer = re.compile(\"(\\d+) (\\d)\\r\\n\")\n self.floatanswer = re.compile(\"(\\d+\\.\\d+) (\\d)\\r\\n\")\n\n # DOCUMENTED COMMANDS for easier maintenance\n self.GET_ROTATION_PV = \"IN_PV_4\"\n self.GET_ROTATION_SP = \"IN_SP_4\"\n self.SET_ROTATION_SP = \"OUT_SP_4\" # 20-280 RPM\n self.GET_TEMP_PV = \"IN_PV_2\"\n self.GET_TEMP_SP = \"IN_SP_2\"\n self.SET_TEMP_SP = \"OUT_SP_2\" # 0-180°C, max. T is safety temperature minus 10°C, T>90°C switches to oil mode\n self.GET_SAFETY_TEMP_SP = \"IN_SP_2\"\n self.SET_SAFETY_TEMP_SP = \"OUT_SP_2\"\n self.START_TEMP = \"START_2\"\n self.STOP_TEMP = \"STOP_2\"\n self.START_ROTATION = \"START_4\"\n self.STOP_ROTATION = \"STOP_4\"\n self.RESET = \"RESET\"\n self.GET_NAME = \"IN_NAME\"\n self.SET_NAME = \"OUT_NAME\"\n self.GET_SOFTWARE_VERSION = \"IN_SOFTWARE\"\n self.SET_INTERVAL_SP = \"OUT_SP_60\" # 1-60s, \"0\" switches mode off\n self.SET_TIMER_SP = \"OUT_SP_61\" # 1-199min, \"0\" switches mode off\n self.LIFT_UP = \"OUT_SP_62 1\"\n self.LIFT_DOWN = \"OUT_SP_63 1\"\n\n self.MAX_RPM = 280\n\n self.MAX_RETRIES = 10\n\n self.heating_on = Event() # communicator for switching the keepalive on or off\n\n self.launch_command_handler()\n\n if connect_on_instantiation:\n self.open_connection()", "title": "" }, { "docid": "3325b845ebe5cd3a7f2e2030e0b0113d", "score": "0.5884818", "text": "def __init__(self, port='', brate='115200'):\n self.logger = loggerinit(self.__class__.__name__)\n if port == '':\n port = findusb()[-1]\n self.dser = {'port': port, 'brate': brate}\n self.sopen(port, brate)", "title": "" }, { "docid": "8df67ebca097611edcdcbd795c6c1741", "score": "0.5884018", "text": "def configureDevice(self):\r\n pass", "title": "" }, { "docid": "0b6a4ffb8378400b3b44291eddf8027a", "score": "0.5879307", "text": "def serial_config():\n # Open the serial ports for the configuration and the data ports\n\n # Raspberry pi\n # data_port = serial.Serial('/dev/ttyS0', 9600)\n data_port = serial.Serial('/dev/ttyUSB0', 9600)\n\n # Windows\n # data_port = serial.Serial('COM3', 9600)\n\n if data_port.isOpen():\n try:\n # throwing all the data stored at port coming from sensor\n data_port.flushInput()\n # if error is been thrown print it\n except Exception as err:\n print(\"Error \" + str(err))\n data_port.close()\n exit()\n\n else:\n try:\n data_port.open()\n except Exception as err:\n print(\"Error \" + str(err))\n data_port.close()\n exit()\n\n return data_port", "title": "" }, { "docid": "0c76beb8b4e2cd2dd451bf32d9885826", "score": "0.58530164", "text": "def _set_config(self):\n\n config = 0\n config |= self._enable_servo1\n config |= self._enable_servo2 << 1\n config |= self._enable_lights << 2\n config |= self._light_mode << 3\n config |= self._light_on << 4\n\n self._i2c_write_byte(self.REG_CONFIG, config)", "title": "" }, { "docid": "c1bb7c7c958d1319c3c977345b9b9c1f", "score": "0.5837727", "text": "def enable(port, target, mode_value, lane_map):\n\n import sonic_y_cable.y_cable\n res = sonic_y_cable.y_cable.enable_prbs_mode(port, target, mode_value, lane_map)\n if res != True:\n click.echo(\"PRBS config unsuccesful\")\n sys.exit(CONFIG_FAIL)\n click.echo(\"PRBS config sucessful\")\n sys.exit(CONFIG_SUCCESSFUL)", "title": "" }, { "docid": "216a679150b0f683b2cebd371ab591da", "score": "0.5836821", "text": "def __init__(self, tty, baud):\n self.connection = get_serial(tty, baud)", "title": "" }, { "docid": "c31030e88e1db33dee4b45da2f7028ba", "score": "0.5836268", "text": "def getSerialPort(self):\n\t\traise NotImplementedError", "title": "" }, { "docid": "1593075f085759f9a53a6ca052dcafd9", "score": "0.58342856", "text": "def setup(self):\n try:\n self.ser = serial.Serial(self.path)\n except Exception:\n logging.exception(\"Serial Error\")", "title": "" }, { "docid": "17b7a2edbcdddb86abe25bc44b00c361", "score": "0.5823796", "text": "def _init_serial(self, serial_name, **kwargs):\n if isinstance(serial_name, str):\n is_valid_serial, details = validate_serial_port(serial_name)\n if is_valid_serial:\n try:\n settings = {\n 'baudrate': 9600,\n 'bytesize': serial.EIGHTBITS,\n 'parity': serial.PARITY_NONE,\n 'stopbits': serial.STOPBITS_ONE,\n 'timeout': None,\n 'write_timeout': 0\n }\n for key in kwargs:\n if key in settings: settings[key] = kwargs[key]\n serial_port = serial.Serial(port=serial_name,\n baudrate=settings['baudrate'],\n bytesize=settings['bytesize'],\n parity=settings['parity'],\n stopbits=settings['stopbits'],\n timeout=settings['timeout'],\n write_timeout=settings['write_timeout'],\n xonxoff=False, rtscts=False, dsrdtr=False)\n serial_port.flush()\n self.log.info(\"Connected to {} at {} baud\".format(details,\n settings['baudrate']))\n return serial_port\n except serial.SerialException as e:\n self._handle_error(\n \"Unable to open {} - {}\".format(details, e))\n else:\n self._handle_error(\n \"Invalid serial port {} - {}\".format(serial_name, details))\n else:\n self._handle_error(\n \"Invalid type passed as serial_port - requires string name of port\")", "title": "" }, { "docid": "892d2760a911e49d74e9aba359d7ccfb", "score": "0.5801167", "text": "def __init__(self, port, baud, log_to_file=None, start_serial=False, query_version=False):\n logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)\n self.logger = logging.getLogger()\n self.port = SerialPort(port=port, baud=baud, log_to_file=log_to_file, start_serial=start_serial)\n self.shutdown = False\n self.firmware_version = 'Unknown'\n self.firmware_name = 'Unknown'\n self.errors = []\n self.dtoa_map = []\n self.atod_map = []\n self.pin_config = []\n self._listeners = collections.defaultdict(list)\n self._listeners_lock = threading.Lock()\n self.pin_state = collections.defaultdict(lambda: 0) #pins all default to output low\n self.pin_mode = collections.defaultdict(lambda: MODE_OUTPUT) #pins all default to output\n self._i2c_device = I2CDevice(self)\n super(Board, self).__init__()\n if start_serial:\n self.StartCommunications(query_version=query_version)", "title": "" }, { "docid": "0921cdb41a3a6dcffa478c263abfd056", "score": "0.5788999", "text": "def setup(self):\n\t\t\tself.oasis_serial = oserial", "title": "" }, { "docid": "88fc5c1066167e55fb3c3db40fef037b", "score": "0.5785761", "text": "def serial_config():\n # Open the serial ports for the configuration and the data ports\n\n # Raspberry pi\n data_port = serial.Serial('/dev/ttyUSB0', 9600)\n\n if data_port.isOpen():\n try:\n # throwing all the data stored at port coming from sensor\n data_port.flushInput()\n # if error is been thrown print it\n except Exception as err:\n print(\"Error \" + str(err))\n data_port.close()\n exit()\n\n else:\n try:\n data_port.open()\n except Exception as err:\n print(\"Error \" + str(err))\n data_port.close()\n exit()\n\n return data_port", "title": "" }, { "docid": "524b2d7c5858cdc4e1b982b8749f2646", "score": "0.5767808", "text": "def __init__(self, port, baudrate=9600, bytesize=8, parity='N',\n stopbits=1, timeout=None, xonxoff=False, rtscts=False,\n write_timeout=None, dsrdtr=False, inter_byte_timeout=None,\n exclusive=None, eol='\\n',\n encoding='utf-8', encoding_errors='ignore', quiet=False):\n super().__init__(input_format=Text)\n if not SERIAL_MODULE_FOUND:\n raise RuntimeError('Serial port functionality not available. Please '\n 'install Python module pyserial.')\n try:\n self.serial = serial.Serial(port=port, baudrate=baudrate,\n bytesize=bytesize, parity=parity,\n stopbits=stopbits, timeout=timeout,\n xonxoff=xonxoff, rtscts=rtscts,\n write_timeout=write_timeout, dsrdtr=dsrdtr,\n inter_byte_timeout=inter_byte_timeout,\n exclusive=exclusive)\n except serial.SerialException as e:\n raise serial.SerialException(f'Failed to open serial port {port}: {e}')\n\n self.eol = eol\n self.encoding = encoding\n self.encoding_errors = encoding_errors\n self.quiet = quiet", "title": "" }, { "docid": "b6028c2bcc2aa9e9456ee9a0e5227449", "score": "0.57632685", "text": "def __init__(self, dev_num=0, serial_num=None, debug=False):\n \n #self.poll_time = poll_time # polling period in seconds\n \n self.debug = debug\n if self.debug:\n logger.debug(\"ThorlabsStepperControllerDev.__init__\")\n \n logger.setLevel('DEBUG')\n \n # Load DLL libraries, note DeviceManager.dll must be loaded first \n D = self.devman_dll = ctypes.windll.LoadLibrary(\"C:\\Program Files\\Thorlabs\\Kinesis\\Thorlabs.MotionControl.DeviceManager.dll\")\n S = self.sbc_dll = ctypes.windll.LoadLibrary(\"C:\\Program Files\\Thorlabs\\Kinesis\\Thorlabs.MotionControl.Benchtop.StepperMotor.dll\")\n\n _err(D.TLI_BuildDeviceList())\n serialNos = ctypes.create_string_buffer(100)\n _err(D.TLI_GetDeviceListByTypeExt(serialNos, 100, 70))\n \n \n # byte arrays for the serial numbers\n self._serial_numbers = [x for x in serialNos.value.split(b',') if x]\n\n if debug:\n logger.debug(\"serial_numbers available {} --> {}\".format(serialNos.value, self._serial_numbers))\n logger.debug(\"serial_number requested {}\".format(repr(serial_num)))\n\n \n # _id is byte string representing serial number, like b'37874816'\n \n if serial_num:\n self._id = str(serial_num).encode() # _id must be a bytes array\n if debug:\n logger.debug(\"using serial number {}\".format(self._id))\n else:\n if debug:\n logger.debug(\"using device number {}\".format(dev_num))\n self._id = self._serial_numbers[dev_num]\n\n #self._sernum = str(self.sernum).encode('ascii') \n \n self.lock = Lock()\n\n if self.debug:\n logger.debug(\"SBC_Open: {}\".format( self._id))\n \n with self.lock:\n _err(S.SBC_Open(self._id))\n self.num_chans = S.SBC_GetNumChannels(self._id)\n \n # polling required to update device status, otherwise must use request functions before reads\n #self.sbc_dll.SBC_StartPolling(self._id, 100 )\n \n #time.sleep(0.2)", "title": "" }, { "docid": "fa458dfa316b3ec5ac51e7c9b7566fd7", "score": "0.574867", "text": "def configure(self):\n save_required = False\n val = self.get_setting(SERIAL_ADDR)\n if val is None:\n return False\n if val & 0x3 != 0:\n self.set_setting(SERIAL_ADDR, val & 0xFC)\n save_required = True\n\n val = self.get_setting(SETTINGS_ADDR)\n if val is None:\n return False\n if val != SETTINGS_VALUE:\n self.set_setting(SETTINGS_ADDR, SETTINGS_VALUE)\n save_required = True\n\n val = self.get_setting(TIMOUT_ADDR)\n if val is None:\n return False\n if val != 0:\n self.set_setting(TIMOUT_ADDR, 0)\n save_required = True\n\n if save_required:\n val = self.save_settings()\n # some log\n if val:\n print(\"QR scanner is configured\")\n else:\n print(\"Failed to configure scanner\")\n return val\n return True", "title": "" }, { "docid": "34e383c59c6c93129cd2cfd1f220f94f", "score": "0.5744691", "text": "def __init__(self, port:str, bauds:int=38400,\n timeout:int=3500, dtr:bool=False)->None:\n conn = Serial(port, bauds, timeout)\n if(not dtr is None):\n conn.setDTR(dtr)\n time.sleep(2.0)\n conn.ignoreInput(100)\n\n super(S2Serial, self).__init__(conn)", "title": "" }, { "docid": "7a08754b94c93db2c931b47d623abaf8", "score": "0.5736152", "text": "def InitialiseSerial(self, serPort = 'COM5', baud = 19200, bytes = serial.EIGHTBITS, \n par = serial.PARITY_NONE, stop = serial.STOPBITS_ONE):\n self.SerialConnection(serPort, baud, bytes, par, stop)\n\n if(self.serialConnect.isOpen()):\n \"Auto baud to let the modem know the baud rate\"\n print(\"autobaud\")\n result = self.SendCommand(\"AT\\r\")\n\n \"\"\"no response from unit check\"\"\"\n if not result:\n print(\"Failed to connect over serial... Switch Unit On\")\n return False\n\n \"Fix baud to self.serialBaud now they can communicate\"\n print(\"setting baud rate\")\n self.SendCommand(\"AT+IPR=\" + str(self.serialBaud) + \"\\r\")\n\n self.State_Initialized = True\n return True\n else:\n print(\"serial port is not open, cannot initialise\")\n self.State_Initialized = False\n return False", "title": "" }, { "docid": "c836dd4cd46ed2ed680d7708bac196c2", "score": "0.57329446", "text": "def configure(self):\n # SYSTEM_CONNECT_CMD_CODE\n # 5a a5 09 00 00 00 aa ee\n print(self._send_command(CMD.SYSTEM_CONNECT_CMD_CODE))\n\n # READ_FPGA_VERSION_CMD_CODE\n # 5a a5 0e 00 00 00 aa ee\n print(self._send_command(CMD.READ_FPGA_VERSION_CMD_CODE))\n\n # CONFIG_FPGA_GEN_CMD_CODE\n # 5a a5 03 00 06 00 01 02 01 02 03 1e aa ee\n print(self._send_command(CMD.CONFIG_FPGA_GEN_CMD_CODE, '0600', 'c005350c0000'))\n\n # CONFIG_PACKET_DATA_CMD_CODE \n # 5a a5 0b 00 06 00 c0 05 35 0c 00 00 aa ee\n print(self._send_command(CMD.CONFIG_PACKET_DATA_CMD_CODE, '0600', 'c005350c0000'))", "title": "" }, { "docid": "67e9104cb69f0b4caa509133f9116f4c", "score": "0.57182425", "text": "def __init__(self, node_name='arduino_node', rate=1, COM='/dev/ttyACM0', baudrate=9600, verbose=False): \n super().__init__(node_name)\n self.rate = rate\n self.COM = COM\n self.baudrate = baudrate\n self.verbose = verbose\n \n self.joint_sub = self.create_subscription(JointState, \"/joint_states\", self.joint_state_CB, 1)\n self.joint_states = None\n \n # Try to connect to device\n self.ser = None\n self.connected = False\n self.connect()\n\n self.create_timer(1/self.rate, self.timer_callback) # frequency rate of running commands", "title": "" }, { "docid": "8e9782358d7f92327c54c1b376472b33", "score": "0.5703677", "text": "def setupSMSReceive(self, port):\n self.resourceConfig['sms']['rx'] = {}\n self.resourceConfig['sms']['rx']['port'] = port", "title": "" }, { "docid": "93cf0dd5fd91fb1ecdd128d6ce29e986", "score": "0.5699524", "text": "def __init__(self, logger=None):\r\n if logger:\r\n self.logger = logger\r\n else:\r\n self.init_module_logger()\r\n \r\n self._ser = serial.Serial()\r\n \r\n # static configuration parameters\r\n self._ser.setParity(serial.PARITY_NONE)\r\n self._ser.setStopbits(serial.STOPBITS_ONE)\r\n self._ser.setByteSize(serial.EIGHTBITS)\r\n self._ser.setXonXoff(False)\r\n self._ser.setRtsCts(False)\r\n \r\n # dynamic configuration parameters\r\n self._pin_code = DEFAULT_PIN_CODE\r\n self._desired_gateway = DEFAULT_DESIRED_GATEWAY\r\n self._def_polled = DEFAULT_SC_POLL_MODE\r\n self._def_ack_level = DEFAULT_ACK_LEVEL\r\n self._def_rep_or_ind = DEFAULT_OR_IND_REPORTS\r\n self._def_msg_or_ind = DEFAULT_OR_IND_MES\r\n self._def_priority = DEFAULT_PRIORITY_LVL\r\n self._def_msg_body_type = DEFAULT_MSG_BODY_TYPE\r\n self._def_serv_type = DEFAULT_REPORTS_SERVICE_TYPE\r\n self._gwy_search_mode = DEFAULT_GWY_SEARCH_MODE", "title": "" }, { "docid": "f5c5da31ef528497dd486192361a570e", "score": "0.56967187", "text": "def __setDevice(self, name):\r\n #Python has no native switch command, if-elsing it\r\n if(name == MEGA2560):\r\n PWM = [0, 13]\r\n DIGITAL = [0, 69]\r\n ANALOG = [0, 15]\r\n elif(name == UNO):\r\n PWM = [3, 5, 6, 9, 10, 11]\r\n DIGITAL = [0, 20]\r\n ANALOG = [0, 5]\r\n elif(name == LEONARDO):\r\n PWM = [3, 5, 6, 9, 10, 11, 13]\r\n DIGITAL = [0, 20]\r\n ANALOG = [4, 6, 8, 9, 10, 12, 14, 15, 16, 17, 18, 19]\r\n elif(name == ADK):\r\n PWM = [0, 13]\r\n DIGITAL = [0, 69]\r\n ANALOG = [0, 15]\r\n else:\r\n raise ArPiException(\"Your device is not compatible with the ArduinoPi\")\r\n\r\n self.__arduino = {'PWM' : PWM, 'DIGITAL' : DIGITAL, 'ANALOG' : ANALOG }\r\n self.__arName = name\r\n return True", "title": "" }, { "docid": "7000c12caf6aaa962e501b31d96dfaab", "score": "0.56890315", "text": "def setup():\n\tglobal arduino_port\n\t #???????\n\tarduino_port = serial.Serial( port=\"/dev/ttyACM0\",\n\t\tbaudrate=115200,\n\t\ttimeout=10 )\n\n\tarduino_port.open()", "title": "" }, { "docid": "adbb199529396b1e47f94cce48072e97", "score": "0.5684218", "text": "def __init__(self, port, baudrate=19200, printer=None):\n if printer is not None:\n self.printer = printer\n else:\n self.printer = Serial(port, baudrate)\n self.reset()", "title": "" }, { "docid": "904d429ce6d9356cbe2e44f9446d3e89", "score": "0.5676028", "text": "def configure_params(slot, param):\n\t\tUPIargs = { 'interface' : 'wlan0', UPI_R.TDMA_ALLOCATED_SLOT : param[1].value }\n\t\trvalue = controller.radio.set_parameters(UPIargs)\n\t\tif rvalue[0] == SUCCESS:\n\t\t\tlog.warning('Parameter writing successful')\n\t\telse :\n\t\t\tlog.warning('Error in parameter writing')", "title": "" }, { "docid": "3dc87d61a7b7cfc3f13eb24614014647", "score": "0.5671316", "text": "def __init__(self, port_no=PortNo.OFPP_ANY):\n super().__init__()\n self.port_no = port_no", "title": "" }, { "docid": "47959a572caa3bc8089b0e3d66791bb1", "score": "0.56697494", "text": "def set_controlling_var(serialcom):\n\n serialcom.write('1\\r'.encode('utf-8')) #writes to board using pyserial\n sleep(6) #sleep to match with HAL_Delay(500) on micro", "title": "" }, { "docid": "74c8280249fa66bc4e4c7ea47d511f14", "score": "0.5665945", "text": "def __init__(self, serialport, bps):\n self.fd = os.open(serialport, os.O_RDWR | os.O_NOCTTY | os.O_NDELAY)\n attrs = termios.tcgetattr(self.fd)\n bps_sym = bps_to_termios_sym(bps)\n # Set I/O speed.\n attrs[ISPEED] = bps_sym\n attrs[OSPEED] = bps_sym\n\n # 8N1\n attrs[CFLAG] &= ~termios.PARENB\n attrs[CFLAG] &= ~termios.CSTOPB\n attrs[CFLAG] &= ~termios.CSIZE\n attrs[CFLAG] |= termios.CS8\n # No flow control\n attrs[CFLAG] &= ~termios.CRTSCTS\n\n # Turn on READ & ignore contrll lines.\n attrs[CFLAG] |= termios.CREAD | termios.CLOCAL\n # Turn off software flow control.\n attrs[IFLAG] &= ~(termios.IXON | termios.IXOFF | termios.IXANY)\n\n # Make raw.\n attrs[LFLAG] &= ~(termios.ICANON | termios.ECHO | termios.ECHOE | termios.ISIG)\n attrs[OFLAG] &= ~termios.OPOST\n\n # It's complicated--See\n # http://unixwiz.net/techtips/termios-vmin-vtime.html\n attrs[CC][termios.VMIN] = 0;\n attrs[CC][termios.VTIME] = 20;\n termios.tcsetattr(self.fd, termios.TCSANOW, attrs)", "title": "" }, { "docid": "b954e72f57fbd5714a0f941c3aacc9db", "score": "0.56534374", "text": "def com_port():\n port = serial.tools.list_ports_common.ListPortInfo(\"/dev/ttyUSB1234\")\n port.serial_number = \"1234\"\n port.manufacturer = \"Virtual serial port\"\n port.device = \"/dev/ttyUSB1234\"\n port.description = \"Some serial port\"\n\n return port", "title": "" }, { "docid": "f28075066a9b57ab7bc901c25b77285a", "score": "0.5646309", "text": "def init_serial(self):\n self.method = \"serial\"\n root.destroy()", "title": "" }, { "docid": "3267c448fc6d0a0266130c52e62868ac", "score": "0.5587701", "text": "def __init__(self, platform=\"windows\"):\n try:\n if platform == \"windows\":\n self.arduino = serial.Serial('COM3', 115200) # Windows\n if platform == \"linux\":\n self.arduino = serial.Serial('/dev/ttyACM0', 115200) # Linux\n info(\"initialization complete.\")\n except:\n error(\"initialization arduino.\")", "title": "" }, { "docid": "07d33011c861dcf93c83e54f5c9c36fc", "score": "0.5585625", "text": "def __init__(self):\n self.srl_rlock=threading.RLock()\n self.srl_port=serial.Serial()\n self.srl_port.bytesize=8\n self.srl_port.parity=serial.PARITY_NONE\n self.srl_port.stopbits=serial.STOPBITS_ONE\n self.srl_port.baudrate=9600\n\n self._nextsleep=time.time()\n\n self.motor_address='1'\n self.motor_position=1073741823#(2^30)-1\n self.is_max_set=False\n self.is_min_set=False\n self.max_pos=2147483647#(2^31)-1\n #experimentally decent default calibration values\n self.mL_per_rad=0.016631691553103064\n self.motor_position_per_rad=8156.69083345965\n #last pump operations, for calibration reasons\n self.vol=0\n self.rad=0", "title": "" }, { "docid": "044b0205c84fb031946a4441c9ef8732", "score": "0.5577866", "text": "def prepare(self, params, device_serial):\n return True", "title": "" }, { "docid": "61747e2c43be595b11cf989f2c034b32", "score": "0.5574236", "text": "def port_config_set(controller, port_no, config, mask):\n logging.info(\"Setting port \" + str(port_no) + \" to config \" + str(config))\n\n hw_addr, _, _ = port_config_get(controller, port_no)\n\n mod = ofp.message.port_mod()\n mod.port_no = port_no\n if hw_addr != None:\n mod.hw_addr = hw_addr\n mod.config = config\n mod.mask = mask\n mod.advertise = 0 # No change\n controller.message_send(mod)\n return 0", "title": "" }, { "docid": "61da90ffac4f49fcd785ee475a8afb45", "score": "0.55710506", "text": "def run(self):\n self._checkArgs() # pull in the command line options\n \n if (self.args.debug == True):\n self.logger.info(\"Setting output level to DEBUG\")\n self._ch.setLevel(logging.DEBUG)\n \n # TODO: Check panID and EncrytpionKey args are valid\n if (self.args.panID):\n pass\n\n if (self.args.encKey):\n pass\n \n # setup the serial port\n self._serial = serial.Serial()\n if (self.args.port):\n self._serial.port = self.args.port\n else:\n self._serial.port = self._port\n \n if (self.args.baudrate):\n self._serial.baudrate = self.args.baudrate\n else:\n self._serial.baudrate = self._baudrate\n\n self._serial.timeout = self._serialTimeout\n \n # setup the at class\n self._at = AT.AT(serialHandle=self._serial, logger=self.logger)\n\n\n self.logger.info(\"This app will attempt to read the current PANID and encryption setting from the radio on port {}.\".format(self._serial.port))\n self.logger.info(\"If factory default setting are found we will generate a new PANID and encryption key to setup your radio network\")\n\n self.logger.debug(\"Attempting to open the serial port\")\n self._serial.open()\n self.logger.debug(\"Port open\")\n \n if self._readCurrent():\n if self._defaultPANID == self._panID and self._encryption == False and self._encryptionKey == self._defaultEncryptionKey:\n self.logger.info(\"Default settings found\")\n\n self._generateNewSetings()\n if self._applySettings():\n if self._saveSettings():\n self.logger.info (\"New setting have been successfully applied\")\n self._printSettings()\n else:\n self.logger.info(\"Failed to correctly apply setting, no changes have been saved to the device\")\n self.exit(1)\n else:\n self.logger.info(\"Non default settings found, no changes have been made\")\n self._printSettings()\n if (self.args.force):\n self.logger.info(\"Setting update forced via command line\")\n self._generateNewSetings()\n if self._applySettings():\n if self._saveSettings():\n self.logger.info (\"New setting have been successfully applied\")\n self._printSettings()\n else:\n self.logger.info(\"Failed to correctly apply setting, no changes have been saved to the device\")\n self.exit(1)\n else:\n self.logger.info(\"Failed to read the current setting from your radio\")\n self.exit(1)\n self.exit(0)", "title": "" }, { "docid": "da92fa786284bcbf2d6514fc96636455", "score": "0.55605686", "text": "def use_com_port(self,N = 0):\n try:\n port = self.available_ports[N][0]\n self.ser = Serial(port, baudrate=115200, rtscts=True, timeout=0.1)\n self.stop_scan()\n self.ser.flushInput()\n self.ser.flushOutput()\n self.ser.set_buffer_size(rx_size = 409200)\n self.ser.timeout = 2\n info('try: use_com_port N = %r' % N)\n except:\n error(traceback.format_exc())\n warn('failed to connect to the requested com port')", "title": "" }, { "docid": "300bdbe33b675e350dceb277313d7c36", "score": "0.55537313", "text": "def open(self):\n self.device = serial.Serial(port=self.devfile, baudrate=self.baudrate, bytesize=self.bytesize, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=self.timeout, dsrdtr=True)\n\n if self.device is not None:\n print \"Serial printer enabled\"\n else:\n print \"Unable to open serial printer on: %s\" % self.devfile", "title": "" }, { "docid": "48c3fe3e4f6205eca20bbc1e73d6b145", "score": "0.5538877", "text": "def __init__(self, port, baud=100000, verbose=0):\n self.VERBOSE_THRESHOLD = 4\n self.verbose = verbose\n self.notify(\"Constructor\")\n\n # Inter-byte delay\n self.ibdly = 0.0001\n self.port = port\n self.baud = baud\n self.ser = None\n self.initialise_serial(self.port, self.baud)", "title": "" }, { "docid": "41ca1f9535934ec896a029dac9947353", "score": "0.55287015", "text": "def phone_start(self, baud=\"115200\", timeout=\"1\"):\n\n self.py = PythonExecutor(self)\n self.py.run(\"import serial,time\")\n self.py.run(\n f\"set = serial.Serial('/dev/tty{self.line}', {baud} ,timeout= {timeout})\"\n )\n self.py.run(\"set.write(b'ATZ\\\\r')\")\n self.mta_readlines(search=\"OK\")\n self.py.run(\"set.write(b'AT\\\\r')\")\n self.mta_readlines(search=\"OK\")\n self.py.run(\"set.write(b'AT+FCLASS=1\\\\r')\")\n self.mta_readlines(search=\"OK\")", "title": "" }, { "docid": "ce6702407a08bb7c70b053accce87962", "score": "0.55218244", "text": "def set_port(self, port, instrument_list=[]):\n if self.is_open:\n self.close()\n self.port = port\n self.open()\n self.enabled = True\n self.logger.info(\"Controller set to port \"+port)\n for instrument in instrument_list:\n if instrument.pc_connect == False:\n instrument.set_to_controller(self)", "title": "" }, { "docid": "ed0206cbacf5fd85a520ef1f4f73c3e0", "score": "0.55175644", "text": "def on(port_name):\n set_lpmode(port_name, True)", "title": "" }, { "docid": "cbcde57d3e1b438c80f3c22597103b71", "score": "0.54883784", "text": "def _open(self):\n\t\n\t\tif self.port is not None:\n\t\t\tself.serial = serial.Serial(self.port)\n\t\telse:\t\n\t\t\tn = 0\n\t\t\tlast_error = ''\n\t\t\tprefix = self.SERIAL_PORT[:-1]\n\n\t\t\twhile n < 10:\n\t\t\t\ttry:\n\t\t\t\t\tself.port = prefix + str(n)\n\t\t\t\t\tself.serial = serial.Serial(self.port)\n\t\t\t\t\tbreak\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tlast_error = str(e)\n\t\t\t\t\tn += 1\n\t\t\t\t\tself.port = None\n\n\t\t\tif self.port is None:\n\t\t\t\traise Exception(last_error)\n\t\t\telse:\n\t\t\t\tself.Reset()", "title": "" }, { "docid": "5f8da3246e8f11ad5dfb413ade238598", "score": "0.5486184", "text": "def __init__(self, device, wait=0):\n Pyboard.__init__(\n self, device=device, baudrate=115200, user=None, password=None, wait=wait\n )", "title": "" }, { "docid": "2daa9f4ea0ef43fa1088be6e6b1d6ffc", "score": "0.54857993", "text": "def __init__(self, serialPort, ID):\n\t\traise NotImplementedError", "title": "" }, { "docid": "f4ed6635114673d37bdd3d62bb5aa0c3", "score": "0.5468252", "text": "def __init__(self, signals):\r\n self.signals = signals\r\n self.ser = serial.Serial('COM4', baudrate = 9600, timeout = 1)", "title": "" }, { "docid": "7071b46489aefc03700f32099206c417", "score": "0.54519427", "text": "def set_port(self,port):\n if port == \"In\": self.send(\"aIR\")\n elif port == \"Out\": self.send(\"aOR\")\n else: return", "title": "" }, { "docid": "634dfbc5d45408fa20edac9ba2b4961d", "score": "0.5443698", "text": "def __init__(self, serial_setup = (os.getenv(\"GORDON_CONSOLE_PORT\", \"/dev/ttyUSB1\"), 115200, 8, 'N', 1, 1), \n network_setup = ( None, \"eth0\" ), login = ( \"root\", \"\" ),\n boot_prompt = \"HidaV boot on\", serial_skip_pw = True,\n reset_cb = None):\n self._logger = logging.getLogger(__name__)\n self._login = login\n self._target_if = network_setup[1]\n self._serial = self._serial_setup(*serial_setup, \n skip_pass = serial_skip_pw, \n boot_prompt = boot_prompt,\n reset_cb = reset_cb)\n if network_setup[0]:\n self.host = network_setup[0]", "title": "" }, { "docid": "3bf42d45df741385bb7f0fb24a9fd62a", "score": "0.5433358", "text": "def __init__(self,serialPort,ID = 0):\n\t\tself.dataMemory = sysv_ipc.SharedMemory(65)\n\t\tself.statusMemory = sysv_ipc.SharedMemory(88)\n\t\t\n\t\tinstruction = NanotecSharedMemoryClient.argumentsToString([\"NanotecMotor\",serialPort,ID])\n\t\tself.serialPort = self.sendInstruction(instruction)\n\t\treturn", "title": "" }, { "docid": "1b1033b78d825ecf0b7713ccac262b47", "score": "0.5401038", "text": "def uart_param_config(name, baud_rate, data_bits, parity, stop_bits, timestamp):\n try:\n uart = XObject.get_object(name)\n isok = True\n if utility.mount_on_fpga(uart.name):\n uart.disable()\n uart.enable()\n uart.config(baud_rate, data_bits, parity, stop_bits, timestamp)\n else:\n isok = uart.config(baud_rate, data_bits, parity, stop_bits)\n \n return isok\n except Exception as e:\n logger.error(\"the %s uart param config execute error:%s\"%(name, repr(e)))\n return False", "title": "" }, { "docid": "b1156398f0418e3ae1382f90db975155", "score": "0.5386756", "text": "def on(self, port):\n # Make sure this is a valid port number.\n self.validatePort(port)\n self.log.info(\"--------- Enabling port %d ------------\" % port)\n # These commands will enable the given apc port.\n self.sendCmdSequence(['olOn %s' % port])\n self.log.info(\"--------- Port %d enabled -------------\" % port)", "title": "" }, { "docid": "687fb62fa799d4fcb9381aaa79cec1a7", "score": "0.5385953", "text": "def serial(self):", "title": "" }, { "docid": "50492a4b45759d4498e26eee29dd42dc", "score": "0.53824866", "text": "def __init__(self, serial_port=None, time_resolution=0.008, logging_file='bkprecision.log'):\n if serial_port is not None:\n self.ser = serial.Serial(port=serial_port,\n baudrate=self.baud,\n bytesize=serial.EIGHTBITS,\n timeout=0,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE)\n logging.basicConfig(filename=logging_file, level=logging.DEBUG)\n self.configure_connection()\n self.time_resolution = time_resolution", "title": "" }, { "docid": "736e6a1518eb939b68016c6f24f75eb4", "score": "0.5378504", "text": "def _writeconfig(self):\n # Build the configuration file\n pkt = pack('>LBbHHbH',\n self.clock, # Clock Speed (Hz, 4 Bytes)\n 2, # Master Address (Byte)\n 0, # Disable Read Auto Send (Byte)\n self.wrtimeout, # Write Timeout (ms, 2 Bytes)\n self.rdtimeout, # Read Timeout (ms, 2 Bytes)\n 1, # Enable SCL Low Timeout (Byte)\n self.retries) # Retry Limit (2 Bytes)\n self._setfeature(REPORTID_SMBCONF, pkt)", "title": "" }, { "docid": "b75fbc9c19016752c30636b0a795142b", "score": "0.53781193", "text": "def open_port(self, ser, baudrate=9600, port=\"/dev/ttyUSB0\"):\n #Set COM port config\n ser.baudrate = baudrate\n ser.port = port\n\n #Open COM port\n try:\n ser.open()\n except serial.SerialException:\n sys.exit (\"Fout bij het openen van %s. Aaaaarch.\" % ser.port)\n\n print \"Poort %s geopend\" % ser.port", "title": "" }, { "docid": "635f911a23269fbe73bc05eae687fc9b", "score": "0.5358455", "text": "def connect(self):\n\n if self.device:\n self.device.close()\n self.device = None\n\n self.device = serial.Serial(**self.devConfig)", "title": "" }, { "docid": "8c5871364aa461dc507d9bf4cd876ff7", "score": "0.53556955", "text": "def set_usb_comm_capable(self, value):\n if value != 0:\n value = 1\n\n # Read the configuration if it is empty\n if self.config is None:\n self.read()\n\n self.config[3][2] &= 0xFE\n self.config[3][2] |= value", "title": "" }, { "docid": "497cfe0e6784d4ab63b8cd91473619a5", "score": "0.5343197", "text": "def StartUp():\n\n\tser = serial.Serial('/dev/cu.usbmodem1421', 9600, timeout=1)\n\n\t#resets the serial port and prints a statement to ensure it's working\n\tser.close()\n\ttime.sleep(1)\n\tser.open()\n\tprint ser.isOpen()\n\n\treturn ser", "title": "" }, { "docid": "bd779eb1266212ba87f45405db2b7c3c", "score": "0.5341305", "text": "def getSerialConfiguration ():\n \n serialConfiguration = {\"port\": 'COM1',\n \"baudrate\": 115200 # can be 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, 9600, 19200, 38400, 57600, 115200\n };\n \n \n print(\"Trying to open default serial configuration file:\", serialConfigFileName, \"...\");\n try:\n serialConfigFile = open(serialConfigFileName, 'r')\n serialConfigFileFound = True;\n except Exception as e:\n serialConfigFileFound = False;\n \n if serialConfigFileFound == True:\n print(\"Serial configuration file found, trying to load serial configuration...\");\n try:\n serialConfig = json.load(serialConfigFile)\n except Exception as e:\n print(\"Error: Error loading data from serial configuration file \", serialConfigFile, \". Please correct it or delete it and try again.\", sep='');\n sys.exit(1);\n \n if \"port\" not in serialConfig.keys():\n print(\"Error: The serial configuration file does not contain the expected information:\", \"port\", \"Please correct it or delete it and try again.\");\n sys.exit(1); \n if \"baudrate\" not in serialConfig.keys():\n print(\"Error: The serial configuration file does not contain the expected information:\", \"baudrate\", \"Please correct it or delete it and try again.\");\n sys.exit(1);\n print (\"Serial configuration successfully loaded from serial configuration file...\");\n return serialConfig\n else:\n print(\"Serial configuration file not found, creating serial configuration. \");\n print(\"List of available ports (which can be opened):\");\n portList = serial_ports()\n print (portList)\n if len(portList) == 0:\n portName = \"COM1\"\n print (\"No ports available, port set to default:\", portName)\n else:\n print(\"Please input the port and press ENTER or press ENTER to use the first in the list:\");\n portName = ''\n inputStr = input(\">>\")\n portName = inputStr;\n while portName not in portList:\n if portName == '':\n portName = portList[0]\n print(\"No port specified, port set to first in the list:\", portName);\n break\n \n print(\"Port not in the list, please input the port and press ENTER or press ENTER to use the first in the list:\")\n inputStr = input(\">>\")\n portName = inputStr;\n \n print(\"Serial port set to:\", portName);\n serialConfig = {};\n serialConfig[\"port\"] = portName;\n \n print('Please input the baudrate and press ENTER or press ENTER to use the default (115200) preferably from the standard baudrates \\\n (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, 9600, 19200, 38400, 57600, 115200) \\\n although other values may work (0 not allowed):');\n portBaudrate = 0;\n while portBaudrate == 0:\n inputStr = input(\">>\")\n if inputStr == '':\n portBaudrate = 115200;\n else:\n try:\n portBaudrate = int(inputStr)\n except Exception as e:\n portBaudrate = 0;\n if portBaudrate == 0:\n print(\"Invalid baudrate input, please try again:\");\n print(\"Serial port baudrate set to:\", portBaudrate);\n serialConfig[\"baudrate\"] = portBaudrate;\n \n print (\"Creating serial configuration file and writing configuration for later use...\");\n \n serialConfig[\"_comment\"] = \"Configure the port and baudrate for the OTAP Server to run on. The default settings are (8 N 1).\"\n \n print(\"Opening output serial configuration file for writing:\", serialConfigFileName, \"...\");\n try:\n serialConfigFile = open(serialConfigFileName, 'w+')\n except Exception as e:\n print(\"Error: Could not open output serial configuration file \", serialConfigFileName, sep='');\n sys.exit(1);\n \n print(\"Saving serial configuration data to default file:\", serialConfigFileName, \"...\");\n try:\n json.dump(serialConfig, serialConfigFile, indent=4);\n except Exception as e:\n print(\"Error: Could not write data to serial configuration file \", serialConfigFileName, sep='');\n sys.exit(1)\n \n serialConfigFile.close()\n return serialConfig\n pass", "title": "" }, { "docid": "4dea634fead44973303b70f13c80022e", "score": "0.53389263", "text": "def on_interface_port_security(self, tokens):\n si = self.get_current_subinterface()\n if tokens[1] == \"enable\":\n si.port_security = True\n if tokens[1] == \"max-mac-num\":\n si.port_security_max = tokens[-1]", "title": "" }, { "docid": "34002d44c8b10a4f884d3d4291f4f82b", "score": "0.5333183", "text": "def __init__(self):\n try: \n self.serial_port = serial.Serial(self.port, 19200, timeout=2)\n except:\n self.serial_port = None\n # Kurz warten, damit sich das Display initialisiert\n sleep(3)", "title": "" }, { "docid": "90c965c992b5e935c00f59458ad12929", "score": "0.5313028", "text": "def EnableRing(serialPort):\n # Enables Ring message to indicate there's a message to read.\n Log(\"EnableRing()\")\n\n if not WriteAndCheck(serialPort, \"AT+SBDMTA=1\\r\", \"OK\", 30):\n Log(\"Issue enabling ring notifications.\")\n return False\n\n Log(\"OK.\")\n return True", "title": "" }, { "docid": "a8c8c2f862630341065300f1402200c1", "score": "0.5309382", "text": "def onChoseSerialPort( self, event ):\n # ignore the None option\n if self.portChoice.GetStringSelection() != 'None':\n try:\n # don't re-open a working stream\n if self.portChoice.GetStringSelection() != self.currentPort:\n # close any open ports if present\n if self.portOpen:\n self.arduinoSerialConnection.close()\n \n self.arduinoSerialConnection = serial.Serial(self.portChoice.GetStringSelection(),\n 19200, timeout = 2)\n \n if self.checkConnection():\n self.portOpen = True\n self.currentPort = self.portChoice.GetStringSelection()\n \n except:\n wx.MessageBox('Unknown problem occurred while establishing connection using the chosen port!', 'Error', \n wx.OK | wx.ICON_ERROR)\n self.arduinoSerialConnection = 0\n self.portOpen = False\n self.updatePorts()\n \n # if None is chosen then close the current port\n else:\n if self.portOpen:\n self.arduinoSerialConnection.close()\n self.arduinoSerialConnection = 0\n self.portOpen = False\n self.currentPort = 'None'", "title": "" }, { "docid": "c5dc98dfac764b17a9bb8c02a3c08f37", "score": "0.5308701", "text": "def connect(self):\n port = serial.Serial(self.portpath, self.boudrate, \\\n timeout = self.timeout, stopbits = self.stopbits)\n return port", "title": "" }, { "docid": "6a7dd68ab8b88cd299b56dada928908d", "score": "0.530437", "text": "def initialize_device(self, init_device):\n\n device_serial = init_device['serial']\n\n log.d(TAG, \"Preparing device: %s\" % device_serial)\n\n utils.touch(utils.CONFIG_FILE_NAME)\n\n set_prop('Info', 'serial', device_serial)\n\n # Set the client section.\n set_prop('Client', 'mode', adb.MODE_USB)\n\n # Since we have a serial now, lets create a new DtfAdb instance\n self.adb = adb.DtfAdb()\n\n # Kernel\n self.adb.shell_command('cat /proc/version')\n kernel = self.adb.get_output()[0]\n log.d(TAG, \"Kernel version: %s\" % kernel)\n set_prop('Info', 'kernel', kernel)\n\n # SDK\n sdk = self.getprop('ro.build.version.sdk')\n log.d(TAG, \"Using SDK API %s\" % sdk)\n set_prop('Info', 'SDK', sdk)\n\n if int(sdk) > const.API_MAX:\n log.w(TAG, \"API %s isn't supported by dtf (yet), results may vary!\"\n % sdk)\n\n self.adb.shell_command('set')\n set_output = self.adb.get_output()\n\n # $PATH\n path = get_set_value(set_output, 'PATH')\n if path is None:\n log.e(TAG, \"Unable to get $PATH variable!\")\n self.do_shutdown(None, None)\n log.d(TAG, \"PATH : %s\" % path)\n set_prop('Info', 'path', path)\n\n # $BOOTCLASSPTH\n bootclasspath = get_set_value(set_output, 'BOOTCLASSPATH')\n if bootclasspath is None:\n log.e(TAG, \"Unable to get $BOOTCLASSPATH variable!\")\n self.do_shutdown(None, None)\n log.d(TAG, \"BOOTCLASSPATH : %s\" % bootclasspath)\n set_prop('Info', 'bootclasspath-jars', bootclasspath)\n\n # Version string\n version_string = self.generate_version_string()\n\n log.d(TAG, \"Using version string: %s\" % version_string)\n set_prop('Info', 'version-string', version_string)\n\n # Determine architecture and CPU bitness\n arch, cpu_bits = self.determine_cpu_arch()\n if cpu_bits is None:\n self.do_shutdown(None, None)\n\n log.d(TAG, \"CPU Architecture: %s\" % arch)\n set_prop(\"Info\", \"cpu-arch\", arch)\n\n log.d(TAG, \"Using %s-bit CPU\" % cpu_bits)\n set_prop('Info', 'cpu-bits', cpu_bits)\n\n # Set the VM type (Dalvik|Art)\n vm_type = self.determine_vm_type(sdk, cpu_bits)\n if vm_type is None:\n self.do_shutdown(None, None)\n\n log.d(TAG, \"Determined runtime: %s\" % vm_type)\n set_prop('Info', 'vmtype', vm_type)\n\n # Determine SEAndroid\n se_state = self.determine_seandroid_state()\n\n log.d(TAG, \"Determine SEAndroid state: %s\" % se_state)\n set_prop('Info', 'seandroid-state', se_state)\n\n # Setup the directory structure\n self.make_project_directories()\n\n # Set directory related properties\n set_prop('Local', 'reports-dir', utils.REPORTS_DIRECTORY)\n set_prop('Local', 'db-dir', utils.DBS_DIRECTORY)\n\n # Invoke client installation\n rtn = pkg.launch_builtin_module('client', ['install'])\n if rtn != 0:\n log.w(TAG, \"Unable to install dtf client. Try manually.\")\n\n return 0", "title": "" }, { "docid": "840b29299ce599ec9e70ff897a22efb6", "score": "0.53029215", "text": "def start(self):\n\n\t\tmaster, slave = pty.openpty()\n\t\ts_name = os.ttyname(slave)\n\t\t# Desactivation de l'echo sur le fake serial\n\t\told_settings = termios.tcgetattr(master)\n\t\tnew_settings = termios.tcgetattr(master)\n\t\tnew_settings[3] = new_settings[3] & ~termios.ECHO\n\t\ttermios.tcsetattr(master, termios.TCSADRAIN, new_settings)\n\t\t# make the sure the port is readable (if the script is launched as root)\n\t\tos.chmod(s_name, 0664)\n\t\tself.master = master\n\t\tself.slave = slave\n\t\tself.thread = ComThread(master)\n\t\tself.thread.daemon = True\n\t\tself.thread.start()\n\t\tprint \"Fake serial lance sur : \" + s_name", "title": "" }, { "docid": "18bca880e69f9dd616ec40db1dd32f2a", "score": "0.53008294", "text": "def do_port(self, customized_port):\n if customized_port:\n self.port = customized_port\n else:\n print 'Invalid Syntax! Try {port argument1}'", "title": "" }, { "docid": "fa51b5b3ac3a62a43b20f47f7991eaaa", "score": "0.5299915", "text": "def SC_COM_connect(self, port_num, baudrate):\r\n # serial connection parameters\r\n port_device = serial.device(port_num)\r\n self.logger.debug(\"openning device port: %s\" % port_device)\r\n self._ser.setPort(port_device)\r\n self._ser.setBaudrate(baudrate)\r\n # open the serial port\r\n self._ser.open()\r\n self._ser.setDTR(True)", "title": "" }, { "docid": "8eb1b7473a97d82a2b14317fc94841e4", "score": "0.5296849", "text": "def LoadConf(self, conf):\n if 'Rs232Port' in conf:\n self.cbRs232Port.SetSelection(int(conf['Rs232Port']))\n else:\n self.cbRs232Port.SetSelection(0)\n if 'Rs232BaudRate' in conf:\n self.cbRs232BaudRate.SetSelection(int(conf['Rs232BaudRate']))\n else:\n self.cbRs232BaudRate.SetSelection(6)", "title": "" }, { "docid": "22bf94492bee255f7c06e7a3af3ca168", "score": "0.5293663", "text": "def setSerialNode(self, n):\n self.__serialNode = n", "title": "" }, { "docid": "7c0291ec596ebeff4bbb768b844c673a", "score": "0.5293008", "text": "def __init__(self, port, baudrate, frame_id, timeout):\n self._preempted = False\n self._frame_id = frame_id\n self._conn = serial.Serial(\n port=port, baudrate=baudrate, timeout=timeout)", "title": "" }, { "docid": "48ba14c1567ae21855f03ed075817904", "score": "0.5290335", "text": "def __init__(self, port: str, timeout: int, baudrate: int):\n super().__init__(port=port, timeout=timeout, baudrate=baudrate)\n self.buffer = self.FULL_BUFFER", "title": "" }, { "docid": "06671d4841fb7a68a02a00436249fac7", "score": "0.5278944", "text": "def __init__(self, Beagle_UART=\"UART2\", port=\"ttyO2\", address=13):\n self.UART = Beagle_UART\n self.port = port\n self.address = address\n # Setup UART on BeagleBone (loads device tree overlay)\n UART.setup(self.UART)\n # Initialiase serial port\n self.pololu = serial.Serial()\n self.pololu.baudrate = 9600\n self.pololu.port = '/dev/%s' % (self.port)\n self.pololu.open()", "title": "" } ]
ab4561e60b8d17ad3e9706a5bdad0f5e
vraci posledni vygenerovany obrazek
[ { "docid": "9862ae685f443540f28e99e363c75520", "score": "0.0", "text": "def getImage(self):\n return self.mergeImages()", "title": "" } ]
[ { "docid": "67ea62c603f56ebebe165f8302d920e1", "score": "0.63692826", "text": "def graficar(self):", "title": "" }, { "docid": "66dbd1ccb817c652c7178c06c7368c7d", "score": "0.62632704", "text": "def preparation(self):", "title": "" }, { "docid": "a1799f1e0af250b75b587be6b0b4cdb1", "score": "0.6118471", "text": "def _retrodiction(self):", "title": "" }, { "docid": "0a9ce4b7dcdc46c30464759380f38937", "score": "0.59944564", "text": "def mafonction_3() :", "title": "" }, { "docid": "5154cc11386e050370ed7bdb0a066356", "score": "0.5958835", "text": "def bestandlezer():", "title": "" }, { "docid": "57b883fa0a5f10a17d1a79d767736117", "score": "0.5954792", "text": "def generateData(self):", "title": "" }, { "docid": "991e0245df134584bbc6b3bcd6de3c7b", "score": "0.59012973", "text": "def preberi_pot(ukazi):", "title": "" }, { "docid": "991e0245df134584bbc6b3bcd6de3c7b", "score": "0.59012973", "text": "def preberi_pot(ukazi):", "title": "" }, { "docid": "991e0245df134584bbc6b3bcd6de3c7b", "score": "0.59012973", "text": "def preberi_pot(ukazi):", "title": "" }, { "docid": "991e0245df134584bbc6b3bcd6de3c7b", "score": "0.59012973", "text": "def preberi_pot(ukazi):", "title": "" }, { "docid": "991e0245df134584bbc6b3bcd6de3c7b", "score": "0.59012973", "text": "def preberi_pot(ukazi):", "title": "" }, { "docid": "0ba447745a2a6804f579e5170e040f50", "score": "0.58830696", "text": "def test_generation(self):\n pass", "title": "" }, { "docid": "71c31fa950f70dc20faab8a5a1e1bfab", "score": "0.58681804", "text": "def render(self):", "title": "" }, { "docid": "71c31fa950f70dc20faab8a5a1e1bfab", "score": "0.58681804", "text": "def render(self):", "title": "" }, { "docid": "ca48b5ffe4e1a27180a5970b5719692e", "score": "0.58509594", "text": "def prepare(self):", "title": "" }, { "docid": "61c84276db9b5d51dff4c83d456d1761", "score": "0.58340716", "text": "def generate(self):\n pass", "title": "" }, { "docid": "ab27aa4f0de17a2e4deecf3fc8d97275", "score": "0.5829857", "text": "def process(self):", "title": "" }, { "docid": "ce715e484866b52cd69afd03acf27b54", "score": "0.5779205", "text": "def build(self):", "title": "" }, { "docid": "10b3fd49f537277d9f5b9be39f6dc847", "score": "0.57790947", "text": "def get(self):", "title": "" }, { "docid": "afcc7539975c5fb98961524cb483550e", "score": "0.5746907", "text": "def data(self):", "title": "" }, { "docid": "afcc7539975c5fb98961524cb483550e", "score": "0.5746907", "text": "def data(self):", "title": "" }, { "docid": "56c7f14039dfc1e22bdbcae0f081fa7f", "score": "0.5742641", "text": "def l(self):", "title": "" }, { "docid": "7b492b078aeff5c225a095b8e974f587", "score": "0.5727693", "text": "def molodenskybadekasModel():\n pass", "title": "" }, { "docid": "c7e09c02d200d7c771049691470d77dd", "score": "0.5723601", "text": "def self(self):", "title": "" }, { "docid": "672848f76a1325a68855eb836f679c40", "score": "0.57042116", "text": "def gen(self):\n pass", "title": "" }, { "docid": "f4c69c4b0842de6175172b27ea886059", "score": "0.5697867", "text": "def _prepare(self):", "title": "" }, { "docid": "6e695801e2c84a4c3f606da8934f406b", "score": "0.56763965", "text": "def prepare_data(self):", "title": "" }, { "docid": "6e695801e2c84a4c3f606da8934f406b", "score": "0.56763965", "text": "def prepare_data(self):", "title": "" }, { "docid": "f8ab07a749914bc58bf008e589394ed1", "score": "0.5670824", "text": "def init_dro(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.5668779", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.5668779", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.5668779", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.5668779", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.5668779", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.5668779", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.5668779", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.5668779", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.5668779", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.5668779", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.5668779", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.5668779", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.5668779", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.5668779", "text": "def run(self):", "title": "" }, { "docid": "1582333da9bdeb23af670559bd4212b5", "score": "0.5668779", "text": "def run(self):", "title": "" }, { "docid": "dc0bf9cda041576bf3ab78769ce46191", "score": "0.5635347", "text": "def chevette():\n pass", "title": "" }, { "docid": "310d115f4d9fe84fd00825eb205b5d7a", "score": "0.5607987", "text": "def generate_omegas(self):", "title": "" }, { "docid": "197a89a78e1025c7bca6cdbeb98e9486", "score": "0.5607948", "text": "def publicar(self):", "title": "" }, { "docid": "57b2fc5c52c9bddb1d7594745caa5c73", "score": "0.5599319", "text": "def __parametros_objeto(self, codigo:str)->None:\n # Cantidad de objetos...\n indice_de_objeto = [i for i, letra in enumerate(codigo,0) if letra == \"}\"]\n\n if len(indice_de_objeto):\n primer_objeto=codigo[slice(codigo.find(\"{\"), indice_de_objeto[0]+1)]+\"}\"\n else:\n primer_objeto=\"\"\n \n if self.comando_tikz == \"definecolor\":\n nombre_color=primer_objeto.replace(\"{\", \"\").replace(\"}\", \"\").strip()\n tipo_color=codigo[slice(indice_de_objeto[0]+1, indice_de_objeto[1]+1)].replace(\"{\", \"\").replace(\"}\", \"\").strip()\n codigo_color=codigo[slice(indice_de_objeto[1]+1, indice_de_objeto[2]+1)].replace(\"{\", \"\").replace(\"}\", \"\").strip().split(\",\")\n objeto_valor=[[], {tipo_color: codigo_color}]\n self.funcion_de_comando[1][nombre_color]=objeto_valor\n elif self.comando_tikz in list(self.comandos_de_usuario.keys()):\n valores_a_establecer=[]\n parametro = self.parametros_comando[0][0]\n valores_a_establecer.append(parametro)\n self.parametros_comando=[[], {}]\n if primer_objeto:\n primer_objeto=primer_objeto.replace(\"{\", \"\").replace(\"}\", \"\")\n valores_a_establecer.append(primer_objeto)\n for i in range(0,len(indice_de_objeto)-1):\n valor=codigo[slice(indice_de_objeto[i]+1, indice_de_objeto[i+1]+1)].replace(\"{\", \"\").replace(\"}\", \"\")\n valores_a_establecer.append(valor)\n\n cantidad_de_parametros,parametros_sin_establecer=self.__extraer_validar_parametros_a_definir()\n \n if cantidad_de_parametros == len(valores_a_establecer):\n \n #Lo extraido, se cataloga. Los valores que corresponde a un estilo, se guardan en \"valores_estilos\" y los que son posiciones en \"valores_posiciones\".\n valores_estilos=[]\n valores_posicion=[]\n for valor in valores_a_establecer:\n valor_resultante=self.validadores.validar_metrica(valor, no_error=True)\n if not valor_resultante:\n valores_estilos.append(valor)\n else:\n valores_posicion.append(valor)\n comandos_sin_establecer=deepcopy(self.comandos_de_usuario)\n # parametros_sin_establecer=> [[[\"#1\",\"#2\"],[\"#1\",\"#2\"]],[[\"#1\",\"#2\"],[\"#1\",\"#2\"]]]\n for count_exterior,parametro in enumerate(parametros_sin_establecer,0):\n # parametro=> [[\"#2\"],[\"#2\"]]\n for indice_parametros,conjunto_parametro in enumerate(parametro,0):\n # conjunto_parametro=>[\"#2\"]\n # ¿Tiene uno (0) o mas de un parametro en los mismos comandos de estilo o de posicion?\n cant_param=len(set(conjunto_parametro))-1\n if cant_param >= 0:\n count_interior=0\n # conjunto_parametro=>[\"#1\",\"#2\"]\n for parametro in list(set(conjunto_parametro)):\n if count_exterior == 0: # Si son parametros a establecer en estilos...\n # Cantidad de parametros a definir...\n count_interior_max=len(valores_estilos)-1\n # for e,_ in enumerate(comandos_sin_establecer[comando]):\n # {'top color': '#1!30!white', 'bottom color': '#1!70!black', ' line width ': ' 1pt', 'rounded corners': '2ex', 'yshift': '-0.3cm', 'xshift': '0.2cm'}\n comandos_sin_establecer_object=comandos_sin_establecer[self.comando_tikz][indice_parametros][1][1]\n for k in comandos_sin_establecer_object:\n comandos_sin_establecer[self.comando_tikz][indice_parametros][1][1][k]=comandos_sin_establecer_object[k].replace(\n parametro, valores_estilos[count_interior])\n if count_interior_max > count_interior:\n count_interior += 1\n else:\n count_interior=0\n elif count_exterior == 1: # Si son parametros a establecer en posiciones...\n # Cantidad de parametros a definir...\n count_interior_max=len(valores_posicion)-1\n # [['#1', '100pt'], ['10pt', '10pt']]\n comandos_sin_establecer_arr=comandos_sin_establecer[self.comando_tikz][indice_parametros][2]\n for e in range(len(comandos_sin_establecer_arr)):\n arr_actualizado=[\n comando for comando in comandos_sin_establecer_arr[e]]\n get_indexs_a_actualizar=lambda x, xs: [\n i for (y, i) in zip(xs, range(len(xs))) if x == y]\n indexs_a_actualizar=get_indexs_a_actualizar(\n parametro, arr_actualizado)\n if len(indexs_a_actualizar):\n for index in indexs_a_actualizar:\n arr_actualizado[index]=arr_actualizado[index].replace(\n parametro, valores_posicion[count_interior])\n comandos_sin_establecer[self.comando_tikz][indice_parametros][2][e]=list(\n arr_actualizado)\n if count_interior_max > count_interior:\n count_interior += 1\n else:\n count_interior=0\n indice_parametros += 1\n count_exterior += 1\n\n comandos_establecidos = comandos_sin_establecer\n # Si hay mas de 2 comandos de dibujado invocados...\n if len(self.comandos_tikz_validados)-1 >= 0:\n # Si se desea añadir los comandos invocados al \"ejecutar\" del comando \"animarPytikz\" o \"guardarPytikz\", se añadira con todo y nombre del nomando personalizado...\n if self.comandos_tikz_validados[len(self.comandos_tikz_validados)-1][0] == \"animarPytikz\" or self.comandos_tikz_validados[len(self.comandos_tikz_validados)-1][0] == \"guardarPytikz\":\n comandos={self.comando_tikz: comandos_establecidos[self.comando_tikz]}\n if not \"ejecutar\" in self.comandos_tikz_validados[len(self.comandos_tikz_validados)-1][1][1]:\n self.funcion_de_comando[1][\"ejecutar\"]=[comandos]\n # REEMPLAZAR VALORES VACIÓS DEL COMANDO ANIMARPYTIKZ\n self.comandos_tikz_validados[len(self.comandos_tikz_validados)-1][1]=self.funcion_de_comando\n else:\n # AÑADIR MÁS VALORES ANIDADOS DEL COMANDO ANIMARPYTIKZ\n self.comandos_tikz_validados[len(self.comandos_tikz_validados)-1][1][1][\"ejecutar\"].append(comandos)\n # Si no entonces solo añadir...\n else:\n for comando_establecido in comandos_establecidos[self.comando_tikz]:\n self.comandos_tikz_validados.append(comando_establecido)\n # Si no los hay...\n else:\n for comando_establecido in comandos_establecidos[self.comando_tikz]:\n self.comandos_tikz_validados.append(comando_establecido)\n else:\n self.mensajes_de_error.append(\"Error en la linea \"+str(self.linea_de_codigo)+\": La cantidad de valores a colocar \"+str(len(valores_a_establecer))+\" es diferente a la cantidad de parametros del comando lo cual son \"+str(cantidad_de_parametros))", "title": "" }, { "docid": "a76cc57cffa174c961c45b7257dd2f4b", "score": "0.5595947", "text": "def manque( self ):\n pass", "title": "" }, { "docid": "18c56a2acf19d418a6a54a1a4c5bb9de", "score": "0.55907005", "text": "def a_realization(self):", "title": "" }, { "docid": "56f6727cd2bd5e4d6266b6d44ced0a91", "score": "0.5587361", "text": "def modified(self):", "title": "" }, { "docid": "617468716af6cf4433bdecf42d18d866", "score": "0.5576582", "text": "def generer_les_vehicules(compteur_vehicules, liste_voie):\r\n #On initialise le nombre de voies occupees a zero\r\n voie_occupee = 0\r\n #On initialise la disponibilite de toute les voies a libre.\r\n for voie in liste_voie:\r\n voie.libre = True\r\n \r\n #Pour chaque voie, pour chacun des vehicules\r\n for vehi in voie.liste_vehicules:\r\n#COEF VEHI ?\r\n #S'il existe un vehicule dont la position est inferieure a la \r\n #distance de securite (ie proche du metre 0, ou le vehicule serait\r\n #cree)\r\n if vehi.position < 0.6*d.vitesse_limite:\r\n #Alors la voie n'est pas libre\r\n voie.libre = False\r\n #Mise a jour du nombre de voies occupees\r\n voie_occupee += 1\r\n \r\n #On compte le nombre de voies disponibles\r\n nb_voies_dispo = d.nb_voies - voie_occupee\r\n \r\n #S'il existe au moins une voie libre\r\n if nb_voies_dispo != 0:\r\n #On definit la probabilite de creation du vehicule sur les voies libres\r\n #en fonction de leur nombre, et du debit en entree demande\r\n probabilite = d.debit * d.pas / nb_voies_dispo\r\n #On traite le cas ou le debit demande est trop eleve en ramenant la \r\n #somme des probabilites a 1\r\n if d.debit * d.pas > 1:\r\n probabilite = 1 / nb_voies_dispo\r\n #S'il n'y a pas de voie libre, la probabilite de creer des vehicules est \r\n #nulle\r\n if nb_voies_dispo == 0:\r\n probabilite = 0\r\n \r\n \r\n #On initalise la liste des vehicules crees a l'instant donne \r\n liste_vehicules_crees = []\r\n #Si la probabilite est non nulle, on cree des vehicules (avec une certaine\r\n #probabilite)\r\n if probabilite != 0:\r\n #On parcourt toutes les voies\r\n for voie in liste_voie:\r\n #Si la voie est libre\r\n if voie.libre:\r\n #On cree (ou pas) un vehicule, aleatoirement, selon la \r\n #probabilite definie, et a condition que le nombre de vehicules\r\n #deja cree soit inferieur au nombre total de vehicules voulus\r\n alea = random()\r\n if alea <= probabilite and compteur_vehicules != d.nb_vehicules_voulu:\r\n nouveau_vehicule = creer_un_vehicule(compteur_vehicules, voie)\r\n#DISTANCE A CREATION \r\n# id_min = -1\r\n# min_pos = 1200\r\n# for vehi in nouveau_vehicule.voie.liste_vehicules:\r\n# if vehi.nom != nouveau_vehicule.nom and min_pos > vehi.position:\r\n# min_pos = vehi.position\r\n# id_min = vehi.nom\r\n# print(\"cree id {} : pos_min= {}\\ndevant id {}\\nvoie libre: {}\".format(nouveau_vehicule.nom, min_pos, id_min, nouveau_vehicule.voie.libre))\r\n \r\n #On met a jour le nombre de vehicules crees\r\n compteur_vehicules += 1\r\n #On stocke l'objet cree dans une liste\r\n liste_vehicules_crees.append(nouveau_vehicule)\r\n \r\n #On renvoie la liste et le nombre total de vehicules crees\r\n return (liste_vehicules_crees, compteur_vehicules)", "title": "" }, { "docid": "b5aff8fd107f9e10dce29fd978679d30", "score": "0.5574194", "text": "def zapisi_pot(pot):", "title": "" }, { "docid": "b5aff8fd107f9e10dce29fd978679d30", "score": "0.5574194", "text": "def zapisi_pot(pot):", "title": "" }, { "docid": "b5aff8fd107f9e10dce29fd978679d30", "score": "0.5574194", "text": "def zapisi_pot(pot):", "title": "" }, { "docid": "b5aff8fd107f9e10dce29fd978679d30", "score": "0.5574194", "text": "def zapisi_pot(pot):", "title": "" }, { "docid": "b5aff8fd107f9e10dce29fd978679d30", "score": "0.5574194", "text": "def zapisi_pot(pot):", "title": "" }, { "docid": "c3c7360b10c1c92483c14a96eb115cbe", "score": "0.5567613", "text": "def sub_procesing(self):\n # implementujemy tu całość\n pass", "title": "" }, { "docid": "dae460610e17515fa366b35961bb0776", "score": "0.5560887", "text": "def iterate(self):", "title": "" }, { "docid": "3330cf56fe1682e1a28c2cf6f7620a7d", "score": "0.55512846", "text": "def need_prepare(self):", "title": "" }, { "docid": "3330cf56fe1682e1a28c2cf6f7620a7d", "score": "0.55512846", "text": "def need_prepare(self):", "title": "" }, { "docid": "18cbaf5a961cc9d4105bc6222307ae79", "score": "0.5543242", "text": "def generate_sent(self):\n # WORK HERE!!", "title": "" }, { "docid": "4992162cd17930a9f7ca8013b2aa1161", "score": "0.554261", "text": "def cargar(self):\n return None", "title": "" }, { "docid": "c145a8b810d4ffec923008dd5942039f", "score": "0.55241084", "text": "def pre_put(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.5515631", "text": "def execute(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.5515631", "text": "def execute(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.5515631", "text": "def execute(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.5515631", "text": "def execute(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.5515631", "text": "def execute(self):", "title": "" }, { "docid": "cc48c3f6fc40697d90183fc0050d19e4", "score": "0.5511488", "text": "def demo_hand_written():", "title": "" }, { "docid": "276c3c0ec36bfd9156b8a0cad5cd1bef", "score": "0.5498766", "text": "def data(self):\n ...", "title": "" }, { "docid": "4fd6ebf38cba4ff565a2c224c65a5933", "score": "0.5493947", "text": "def prepare_result(self):", "title": "" }, { "docid": "3e76fdfbf262980b08661f1665eb8953", "score": "0.54896486", "text": "def creer_un_vehicule(compteur_vehicules, voie):\r\n #On initialise les differents attributs necessaires pour creer un objet\r\n #vehicule\r\n \r\n #Le nom du vehicule est un entier unique (on choisit l'ordre dans lequel le \r\n #vehicule a ete cree)\r\n nom = compteur_vehicules\r\n\r\n#PROBA CUMULEE : ORDONNER ? \r\n #On associe un type de conducteur au vehicule aleatoirement, avec la \r\n #probabilite definie par le scenario choisi\r\n alea = random()\r\n proba_cumul = 0\r\n i = 0\r\n while alea > proba_cumul:\r\n proba_cumul += p.PART_CONDUCT[d.scenario][i][1]\r\n i +=1\r\n conducteur = p.PART_CONDUCT[d.scenario][i-1][0]\r\n \r\n #La vitesse est initialisee a la limitation de vitesse ponderee par le \r\n #coefficient de vitesse associee au type de conducteur (un conducteur plus \r\n #prudent ira moins vite)\r\n vitesse = d.vitesse_limite * conducteur.coef_vitesse\r\n\r\n #La voie du vehicule est celle entree en argument\r\n voie_vehi = voie\r\n \r\n #L'attribut prend_la_sortie vaut True ou False aléatoirement, avec une \r\n #probabilite de valoir True d'autant plus elevee que le vehicule est proche\r\n #de la voie la plus a droite\r\n if d.nb_voies != 1:\r\n proba_sortie = (d.nb_voies - voie.id_voie) / ((d.nb_voies*(d.nb_voies + 1))/2)\r\n else:\r\n proba_sortie = 1/5\r\n alea = random()\r\n prend_la_sortie = (alea <= proba_sortie)\r\n \r\n #On associe un type de vehicule au vehicule aleatoirement, avec la \r\n #probabilite definie par le scenario choisi\r\n alea = random()\r\n proba_cumul = 0\r\n i = 0\r\n while alea > proba_cumul:\r\n proba_cumul += p.PART_VEHICULE[d.scenario][i][1]\r\n i +=1\r\n \r\n \r\n #On cree le vehicule\r\n vehi = p.PART_VEHICULE[d.scenario][i-1][0](nom, conducteur, vitesse, prend_la_sortie, voie_vehi)\r\n \r\n #On met a jour la liste de vehicules de la voie sur laquelle le vehicule est cree\r\n voie.liste_vehicules.append(vehi)\r\n \r\n return vehi", "title": "" }, { "docid": "ca28fb7f3dab18ee437b7b3dea91ac3c", "score": "0.5488465", "text": "def z(self):\r\n pass", "title": "" }, { "docid": "dedf9896e518ed0b87deda08cd4d1ceb", "score": "0.54663324", "text": "def guardar(self):\n pass", "title": "" }, { "docid": "5475c45d6fe784a5c4c2fe247859073c", "score": "0.54586595", "text": "def __int__(self):\n self.res = []", "title": "" }, { "docid": "290b0039242b63142ba26ed0660d1689", "score": "0.5447833", "text": "def update(self):", "title": "" }, { "docid": "290b0039242b63142ba26ed0660d1689", "score": "0.5447833", "text": "def update(self):", "title": "" }, { "docid": "290b0039242b63142ba26ed0660d1689", "score": "0.5447833", "text": "def update(self):", "title": "" }, { "docid": "290b0039242b63142ba26ed0660d1689", "score": "0.5447833", "text": "def update(self):", "title": "" }, { "docid": "290b0039242b63142ba26ed0660d1689", "score": "0.5447833", "text": "def update(self):", "title": "" }, { "docid": "290b0039242b63142ba26ed0660d1689", "score": "0.5447833", "text": "def update(self):", "title": "" }, { "docid": "4d18f588c454aa3a73779c71b7f37538", "score": "0.54354346", "text": "def __init__(self, nom=\"sans nom\"):\n self.geoide = Geoide()\n self.nom = nom\n self.spritesJoueur = [] #Pas d'objets sur la planète\n self.spritesNonJoueur = [] #Pas d'objets sur la planète\n self.joueurs = []\n \n self.distanceSoleil = general.configuration.getConfiguration(\"planete\", \"Univers\", \"distanceSoleil\", \"10.0\", float)\n self.vitesseSoleil = general.configuration.getConfiguration(\"planete\", \"Univers\", \"vitesseSoleil\", \"1.0\", float)\n self.angleSoleil = 0.0\n self.seuilSauvegardeAuto = general.configuration.getConfiguration(\"affichage\", \"General\", \"seuilSauvegardeAuto\", \"600.0\", float)\n \n self.fini = False\n #On calcule la navigation pour l'intelligence artificielle\n self.aiNavigation = ai.AINavigation()\n taskMgr.add(self.pingPlanete, \"BouclePrincipale-planete\")", "title": "" }, { "docid": "550f4f5684d61aa66941bf643ede0d6b", "score": "0.54145855", "text": "def __repr__(self) :\n\n #Ne pas oublier le robot !!!!\n affichage=\"\\n\\n\"\n for ligne in self.grille :\n if ligne == self.robot_joueur.coordinates[0] :\n new_line =self.grille[ligne][:self.robot_joueur.coordinates[1]]+\"X\"+self.grille[ligne][self.robot_joueur.coordinates[1]+1:]\n affichage+=new_line+\"\\n\"\n else :\n affichage+=self.grille[ligne]+\"\\n\"\n ligne +=1\n return (affichage)", "title": "" }, { "docid": "21fc08ee32dd9c82fdb6c8f6d3a21d3d", "score": "0.5408452", "text": "def trick(self):\n pass", "title": "" }, { "docid": "500d7a468a758967e74e249c68e26fc8", "score": "0.5401841", "text": "def generar_pedido(id_canasta: int):\n #Importar clase Canasta\n from .canasta import Canasta\n #Importar clase ProductoPorCanasta\n from .producto_por_canasta import ProductoPorCanasta\n #Importar clase ProductoPorPedido\n from .producto_por_pedido import ProductoPorPedido\n #Creación de objeto Canasta\n canasta = Canasta.obtener(id_canasta)\n productos_por_canasta = canasta.buscar_productos_por_canasta()\n nuevo_pedido = Pedido()\n nuevo_pedido.usuario = Usuario.obtener(id_canasta)\n #Todo pedido recién creado tiene el estado \"Nuevo\"\n nuevo_pedido.estado = \"En Progreso (Pago y dirección pendientes)\"\n #La tarifa de envío es fija\n nuevo_pedido.tarifa_de_envio = 15\n #Valores por default\n nuevo_pedido.repartidor = \"\"\n nuevo_pedido.metodo_de_pago = \"\"\n nuevo_pedido.direccion_de_envio = \"\"\n nuevo_pedido.fecha_de_pago = datetime(1,1,1)\n nuevo_pedido.fecha_de_envio = datetime(1,1,1)\n nuevo_pedido.fecha_de_entrega = datetime(1,1,1)\n #Se crea un nuevo pedido\n nuevo_pedido.crear()\n for producto_por_canasta in productos_por_canasta:\n nuevo_producto_por_pedido = ProductoPorPedido()\n nuevo_producto_por_pedido.producto = producto_por_canasta.producto\n nuevo_producto_por_pedido.pedido = nuevo_pedido\n nuevo_producto_por_pedido.cantidad = producto_por_canasta.cantidad\n nuevo_producto_por_pedido.crear()\n producto = nuevo_producto_por_pedido.producto\n producto.stock -= 1\n producto_por_canasta.borrar()\n #producto.actualizar()\n return nuevo_pedido", "title": "" }, { "docid": "811b34112fb0f46e2610ea2f5ac1e399", "score": "0.5401579", "text": "def preorder(self):", "title": "" }, { "docid": "b4900261c9a8f9487c959e7bc8f9680e", "score": "0.53967506", "text": "def init(self):", "title": "" }, { "docid": "b4900261c9a8f9487c959e7bc8f9680e", "score": "0.53967506", "text": "def init(self):", "title": "" }, { "docid": "b4900261c9a8f9487c959e7bc8f9680e", "score": "0.53967506", "text": "def init(self):", "title": "" }, { "docid": "b4900261c9a8f9487c959e7bc8f9680e", "score": "0.53967506", "text": "def init(self):", "title": "" }, { "docid": "f3250829bcefd8564d9180ccef5cf7fc", "score": "0.5391679", "text": "def huh(self):\r\n return", "title": "" }, { "docid": "854c90dcb73259cbccc90c24d4a0f740", "score": "0.53884363", "text": "def neuePartieSchwarz(self):\n self.neuePartie()", "title": "" }, { "docid": "9fcf92c6ef23f06591f775c50095d383", "score": "0.5386708", "text": "def roc(self):\n pass", "title": "" }, { "docid": "ca82b2c81f9bbe96d45c7fa58957a27e", "score": "0.53848046", "text": "def load(self):", "title": "" }, { "docid": "ca82b2c81f9bbe96d45c7fa58957a27e", "score": "0.53848046", "text": "def load(self):", "title": "" }, { "docid": "0a2d6b14ed4c2509cf53a09868f0f0b5", "score": "0.5382664", "text": "def _modified(self):", "title": "" }, { "docid": "9619465465ff67626127611515db5bba", "score": "0.5378249", "text": "def _update(self):", "title": "" }, { "docid": "ae7a6a241aadf2189cf677f8a3860604", "score": "0.5376579", "text": "def procedure(self):", "title": "" }, { "docid": "c82eb207ce185f1de1b24389ee21542a", "score": "0.53758186", "text": "def write(self):", "title": "" }, { "docid": "c82eb207ce185f1de1b24389ee21542a", "score": "0.53758186", "text": "def write(self):", "title": "" } ]
e90564c8c32db5ea742e1378643912c8
Tokenization/string cleaning for all datasets except for SST.
[ { "docid": "2c7da3f2b0c6c61cde56150c7bad40da", "score": "0.0", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),\\+!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\+\", \" \\+ \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" } ]
[ { "docid": "b83641c6804b9e83900942ae4337486e", "score": "0.6654124", "text": "def cleanup_tokens(self):\n raise NotImplementedError", "title": "" }, { "docid": "5e9fe54b86bfae9f8b800afed3853334", "score": "0.66179585", "text": "def clean(str_input_dataset):\n\tstr_clean_dataset = str_input_dataset.lower()\n\tstr_clean_dataset = str_clean_dataset.decode(encoding=\"ascii\", errors=\"ignore\")\n\treplacements = {\",\": \"\",\n\t\t\t\t\t\":\": \"\",\n\t\t\t\t\t\"!\": \"\",\n\t\t\t\t\t\".\": \"\",\n\t\t\t\t\t\";\": \"\",\n\t\t\t\t\t\"?\": \"\",\n\t\t\t\t\t\"_\": \"\",\n\t\t\t\t\t\")\": \"\",\n\t\t\t\t\t\"(\": \"\",\n\t\t\t\t\t\"--\": \" \"}\n\n\tstr_clean_dataset = multi_replace(str_clean_dataset, replacements)\n\n\treturn str_clean_dataset", "title": "" }, { "docid": "0ecdbdba085bfe1c885a75c3969af273", "score": "0.6290306", "text": "def clean_data(self, corpus):\n corpus = [str(text).split() for text in corpus]\n corpus = [\" \".join([re.sub(r'\\W', '', word.lower()) for word in text]) for text in corpus]\n return corpus", "title": "" }, { "docid": "5dc93c6aa7b5a4e7b25a33c0c2fb3f8d", "score": "0.6283477", "text": "def preliminary_cleaning(self):\n # removes brackets, parentheses, commas, numbers, semicolons, etc.\n # no removal of .!? due to sentence tokenizing later on\n re_pattern = re.compile('\\[|\\]|:|\\(|\\)|;|,|\\'|\"|[0-9]|\\{|\\}|/|\\/')\n s = re.sub(re_pattern, '', self.article_text)\n\n # removes unicode chars left\n cleaned = s.replace('\\xa0', ' ')\n cleaned = cleaned.replace('\\\\xa', ' ')\n return cleaned", "title": "" }, { "docid": "7985755349180128ae002e16866d6f38", "score": "0.6240884", "text": "def clean(self, minlen=3):\n\n \"\"\" Remove symbols, sentence separators and split sentences. \"\"\"\n\n clean=re.sub('<\\/?[a-z]+>|\\n|\\t|\\r', '', ' '.join(self.raw))\n clean=clean.split('.')\n\n \"\"\" Tokenize. \"\"\"\n \n tkz = RegexpTokenizer(r'\\w+')\n clean=[tkz.tokenize(sentence.lower()) for sentence in clean]\n\n \"\"\" Lemmatize. \"\"\"\n\n lemmatizer = WordNetLemmatizer()\n clean=[[lemmatizer.lemmatize(w) for w in sentence] for sentence in clean]\n\n \"\"\" Stem, removed. \"\"\"\n\n #stemmer = PorterStemmer()\n #text=[[stemmer.stem(w) for w in sentence] for sentence in text]\n\n \"\"\" Filter stopwords, numbers and short words. \"\"\"\n\n self.raw=[[w for w in sentence if w not in stopwords.words('english') and not w.isdigit() and len(w)>=minlen] for sentence in clean]", "title": "" }, { "docid": "0a358bf4c29590ac2addc543056149e5", "score": "0.6144344", "text": "def preprocess(list_of_strings):\n clean_strings = [x for x in list_of_strings if x not in [',', ':', '.', '?']]\n clean_strings = [x for x in clean_strings if x not in stopwords.words(\"english\")]\n return clean_strings", "title": "" }, { "docid": "e0a2d5c997a1a1196e8fd2b382ed3da5", "score": "0.6091173", "text": "def clean_sents(self):\n str_tokenize = nltk.sent_tokenize(self.article_text)\n str_remove = [sent for sent in str_tokenize if \"Mehr zum Thema\" not in sent]\n cleaned_string = ' '.join([str(sent) for sent in str_remove])\n sentences = re.sub(r'[^\\w\\s]', '', cleaned_string)\n return sentences", "title": "" }, { "docid": "7e4e25adcefe744d13d4dbe8dfad3a37", "score": "0.6082094", "text": "def _clean(self, text):\n # self.function_name and cls.function_name both work\n token_funs = (self._convert_user_name, self._convert_url, self._convert_number,\n self._convert_duplicate_characters, self._convert_lemmatization)\n # _convert_punctuation must be after _convert_negation\n string_funs = (self._convert_lower_case, self._convert_negation, self._convert_punctuation)\n # text = self.text\n # self.text, text = tee(self.text) # keep generator\n for ts in text: # ts = text_string\n token_list = self._tokenizer(ts) # return list\n token_list = [nested_fun(token_funs, tk) for tk in token_list]\n text_string = ' '.join(token_list) # return string\n yield nested_fun(string_funs, text_string)", "title": "" }, { "docid": "c1c4776e2eee461bacf7664269345a1b", "score": "0.6049459", "text": "def preprocess(self, dirty_text):\n data = self.__convert_lower_case(dirty_text)\n data = self.__remove_punctuation(dirty_text) #remove comma seperately\n data = self.__remove_apostrophe(dirty_text)\n data = self.__remove_stop_words(dirty_text)\n data = self.__convert_numbers(dirty_text)\n #data = stemming(dirty_text)\n data = self.__remove_punctuation(dirty_text)\n data = self.__convert_numbers(dirty_text)\n #data = stemming(dirty_text) #needed again as we need to stem the words\n data = self.__remove_punctuation(dirty_text) #needed again as num2word is giving few hypens and commas fourty-one\n return data", "title": "" }, { "docid": "2cb2d609de605f464dcd2421a289fc8d", "score": "0.6044335", "text": "def clean_stem(corpus):\n sw = set(stopwords.words(\"english\"))\n tokenizer = RegexpTokenizer(\"[\\w']+\")\n lemma = WordNetLemmatizer()\n cleaned = [\" \".join([lemma.lemmatize(word.lower()) for word in tokenizer.tokenize(doc)\n if regex(word) == False and word.lower() not in sw])\n for doc in corpus]\n return cleaned", "title": "" }, { "docid": "096e6ab1ec7e490aaf680bd584c66f70", "score": "0.59960216", "text": "def clean(self, string):\n\t\tpuncs = [\"!\",\";\",\",\",\":\",\".\",\"?\"]\n\t\tfor punc in puncs: \n\t\t\tstring = string.replace(punc,\" \") \n\t\t\n\t\tpuncs = [\"(\",\")\",\"[\",\"]\",\"{\",\"}\",\"'\",'\"',\"<\",\">\"]\n\t\tfor punc in puncs: \n\t\t\tstring = string.replace(punc,\"\") \n\t\tstring = string.replace(\"\\n\",\" \")\n\t\tstring = string.replace(\"\\s+\",\" \")\n\t\tstring = string.lower()\n\t\treturn string", "title": "" }, { "docid": "9b97f64ba2e490b28497e41105586cfd", "score": "0.5924688", "text": "def clean_data(cls, data):\n clean_data = data.upper()\n clean_data = clean_data.replace('X', '*')\n clean_data = clean_data.replace('^', '**')\n clean_data = clean_data.replace('THE', '')\n clean_data = clean_data.strip()\n return clean_data", "title": "" }, { "docid": "526df5a1c3590ff11e41239e02356b6f", "score": "0.5899191", "text": "def clean_token(token):\n\n # SECURITY CRITICAL:\n # ##########################################################################################################\n # Do not add any other characters, including but not limited to [' * & ! @ ( ) /\\ | + ;] to the whitelist. All\n # of these characters have significance in a tsvector query, and there's no guarantee that psycopg2 will\n # properly escape them\n\n whitelist = '-_.$#%'\n\n result = ''\n\n for char in token:\n\n if (unicodedata.category(char) != 'Po') or (char in whitelist):\n result += char\n\n return result", "title": "" }, { "docid": "ee87542d50d0b9a1b29e673372617720", "score": "0.585961", "text": "def __text_cleaning(self, text):\n deadlist = ['mit', 'xxl', 'xxxl', 'uvp', 'xcm', 'grs', 'grm', 'grl',\n 'tlg', 'xxcm', 'xcm']\n transfer = {\n ord('ä'): 'ae',\n ord('ö'): 'oe',\n ord('ü'): 'ue',\n ord('ß'): 'ss'\n }\n # tokenize the text string\n tokens = word_tokenize(text)\n\n # convert to lower case\n tokens = [w.lower() for w in tokens]\n\n # transfer German umlauts into vowels\n tokens = [w.translate(transfer) for w in tokens]\n\n # remove punctuation and digits from each word\n table = str.maketrans('', '', string.punctuation + string.digits)\n stripped = [w.translate(table) for w in tokens]\n\n # remove remaining tokens that are not alphabetic\n words = [word for word in stripped if word.isalpha()]\n\n # reduce words to their stemms\n porter = PorterStemmer()\n stemmed = list(set([porter.stem(word) for word in words]))\n\n # filter out\n # stop words,\n # words that are contained in the deadlist,\n # words that are shorter than 3 characters and\n # words which are assembled only from one and\n # the same identical character\n stop_words = set(stopwords.words(['english', 'german']) + deadlist)\n words = [w for w in stemmed if w not in stop_words\n and len(w) > 2 and len(Counter(w)) > 1]\n\n # et voilà\n return words", "title": "" }, { "docid": "76a4e67f01d949feb8f8bf51d68641df", "score": "0.58579254", "text": "def clean(dictionary):\n stoplist = STOPWORDS\n stop_ids = [\n dictionary.token2id[stopword]\n for stopword in stoplist if stopword in dictionary.token2id\n ]\n hapax_ids = [\n tokenid\n for tokenid, docfreq in dictionary.dfs.iteritems() if docfreq == 1\n ]\n unwanted_tokens = stop_ids + hapax_ids\n dictionary.filter_tokens(unwanted_tokens)\n dictionary.compactify()\n return dictionary", "title": "" }, { "docid": "f8e774578e4aa0835c7937581ad837bd", "score": "0.5851542", "text": "def test_tokenizing_before_doesnt_change_features2(self):\n sents = self.data_df[self.text_col].values\n tok_ext = BOWExtractor(self.data_df, self.col_names)\n tokenized_sents = map(lambda sent: ' '.join(stem_tokenize(sent)), sents)\n notok_ext = BOWExtractor(self.data_df, self.col_names)\n\n for i in range(len(sents)):\n self.assertEquals(notok_ext.notok_vectorizer.transform([tokenized_sents[i]]).sum(),\n tok_ext.notok_vectorizer.transform([tokenized_sents[i]]).sum())\n\n self.assertEqual(tok_ext.transform(self.data_df, self.col_names).sum(),\n notok_ext.transform(self.data_df, self.col_names).sum())", "title": "" }, { "docid": "ece66433e495ebb214d1e4744e7bf492", "score": "0.5850434", "text": "def filter_tokens(self, bad_ids=None, good_ids=None):\n if bad_ids is not None:\n bad_ids = set(bad_ids)\n Word.objects.filter(sequence__in=bad_ids).delete()\n if good_ids is not None: \n good_ids = set(good_ids)\n Word.objects.exclude(sequence__in=good_ids).delete()\n \n self.compactify()", "title": "" }, { "docid": "14bf51fe8d56fe670d7f0306728ee072", "score": "0.5848931", "text": "def clean_text(self):\n self._clean_brackets()\n self._clean_more_on_genius()\n self._clean_header_lyrics()\n self._clean_letters()\n self._clean_spaces()", "title": "" }, { "docid": "57bd72fa0d4255050b07879edcc95b70", "score": "0.5840743", "text": "def cleanData(df):\n df['name'] = df['name'].str.lower().str.strip()\n df['name'] = df['name'].str.split(\" \")\n df['name'] = df['name'].apply(nmFilter)\n\n for i in TO_DELETE:\n df['name'] = df['name'].str.replace(i, '')\n\n df['name'] = df['name'].str.strip()", "title": "" }, { "docid": "654be27026329cdab17275c7853d0e8b", "score": "0.5817936", "text": "def _clean(data):\n first_clean = [d.replace(';', ' ').replace(NEWLINE, EMPTY_STRING).replace(TAB, EMPTY_STRING).strip() for d in data]\n return [re.sub(' {2,}', ' ', elem) for elem in first_clean]", "title": "" }, { "docid": "70d82ee6d677a0d7e652123a71c3cad7", "score": "0.58177894", "text": "def clean_tokenize_remove_stopwords_quora(p_df, test_set=False):\n if not test_set:\n p_df = p_df.withColumnRenamed(\"is_duplicate\", \"label\")\n p_df = p_df.withColumn(\"label\", p_df[\"label\"].cast(ShortType()))\n \n p_df = p_df.fillna(\"\", [\"question1\", \"question2\"])\n if not test_set:\n p_df = p_df.fillna(0, [\"label\"])\n p_df = tokenize(p_df, \"question1\", \"question1_words\")\n p_df = remove_stop_words(p_df, \"question1_words\", \"question1_meaningful_words\")\n p_df = tokenize(p_df, \"question2\", \"question2_words\")\n p_df = remove_stop_words(p_df, \"question2_words\", \"question2_meaningful_words\")\n return p_df", "title": "" }, { "docid": "2de90c9d5894b611f1ed0c6cb0625efe", "score": "0.5800533", "text": "def clean(data):\n logging.info('Cleaning samples')\n\n # Extract raw samples to iterate over\n text = data['text']\n\n # Clean each sample individually\n for i, sample in tqdm.tqdm(text.iteritems(), total=text.size):\n # Keep only alphanumeric characters\n sample = re.sub(r'(?<! )(?=[^\\s\\w])|(?<=[^\\s\\w])(?! )', r' ', sample)\n # Convert to lowercase\n sample = sample.lower()\n # Strip title sequences \"LONDON (Reuters) - \"\n if len(sample.split('-', 1)[0]) < 50:\n sample = sample.split('-', 1)[-1]\n\n # Store cleaned samples in DataFrame\n data.at[i, 'clean'] = sample", "title": "" }, { "docid": "ea7a44230c122a7d92145b5a596bebb0", "score": "0.578092", "text": "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string) \n string = re.sub(r\"\\s{2,}\", \" \", string) \n return string.strip().lower()", "title": "" }, { "docid": "e376c5460873997508d149343723a6f9", "score": "0.5779428", "text": "def _clear_special_tokens(self, words):\n return [word for word in words if word not in set([\"PAD\",\"UNK\",\"EOS\",\"SOS\"])]", "title": "" }, { "docid": "a93ed43354dd12b17856e71143f2fef4", "score": "0.577941", "text": "def _clean_dataset_name(dataset: str) -> str:\n clean_dataset = dataset.lower()\n clean_dataset = re.sub(r\"^\\d+.\", \"\", clean_dataset)\n clean_dataset = re.sub(r\"\\s+on.*$\", \"\", clean_dataset)\n clean_dataset = re.sub(r\"\\s+\", \"_\", clean_dataset)\n clean_dataset = re.sub(r\"&\", \"\", clean_dataset)\n clean_dataset = clean_dataset.strip(\"_\")\n # TODO(amr): should we assert the result matches an element in `vkdcon.DATASETS`?\n return clean_dataset", "title": "" }, { "docid": "0a6e5512a71b3fcc49bcb81cf4b136e5", "score": "0.57674587", "text": "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string) \n string = re.sub(r\"\\s{2,}\", \" \", string) \n return string.strip().lower()", "title": "" }, { "docid": "1f5f7627358d0c897dfe312060fc662c", "score": "0.5761492", "text": "def clean_token(article_df: pd.DataFrame, qa_df: pd.DataFrame):\n\n\tdef clean(row, token_col, flag_col):\n\t\ttokens_cleaned = []\n\t\tflags_cleaned = []\n\t\tfor token, flag in zip(row[token_col], row[flag_col]):\n\t\t\ttoken = token.strip()\n\t\t\tif token != '':\n\t\t\t\ttokens_cleaned.append(token)\n\t\t\t\tflags_cleaned.append(flag)\n\n\t\trow[token_col] = tokens_cleaned\n\t\trow[flag_col] = flags_cleaned\n\n\t\treturn row\n\n\tarticle_df = article_df.apply(lambda row: clean(row, 'article_tokens', 'article_flags'), axis=1)\n\tqa_df = qa_df.apply(lambda row: clean(row, 'question_tokens', 'question_flags'), axis=1)\n\tif 'answer_tokens' in qa_df.columns:\n\t\tqa_df = qa_df.apply(lambda row: clean(row, 'answer_tokens', 'answer_flags'), axis=1)\n\n\treturn article_df, qa_df", "title": "" }, { "docid": "08a405b7288955edec16ff08140542a9", "score": "0.5757534", "text": "def generic_clean(self):\n self.results = re.sub('<em>','',self.results)\n self.results = re.sub('<b>','',self.results)\n self.results = re.sub('</b>','',self.results)\n self.results = re.sub('</em>','',self.results)\n self.results = re.sub('%2f',' ',self.results)\n self.results = re.sub('%3a',' ',self.results)\n self.results = re.sub('<strong>','',self.results)\n self.results = re.sub('</strong>','',self.results)\n self.results = re.sub('<wbr>','',self.results)\n self.results = re.sub('</wbr>','',self.results)\n for character in ('>',':','=','<','/','\\\\',';','&','%3A','%3D','%3C'):\n self.results = str.replace(self.results,character,' ')", "title": "" }, { "docid": "ad1df5bd103ee710567d16c4f91b1b58", "score": "0.57553315", "text": "def sanitiseData(data):\n splitted = data.split(\" \")\n removedStopWord = [removeNonAlphabet.sub('', word).lower()\n for word in splitted if word != \"\" and not any(i.isdigit() for i in word)]\n\n return removedStopWord", "title": "" }, { "docid": "cf33c47d2de0b67876e0e04e60f91e8f", "score": "0.5736731", "text": "def _cleaning(self, doc): \n return [\n self._stemmer.stem(wordFiltered.lower()) for wordFiltered in word_tokenize(doc) if wordFiltered not in self._stop_words and wordFiltered.isalpha()\n ]", "title": "" }, { "docid": "cb0e47643f5ac7a7b91973d82021ca0a", "score": "0.5735537", "text": "def test_unclean_text(self):\n tokenizer = get_tokenizer(\"en-us\")\n\n text = \"IT’S <a> 'test' (seNtEnce) for $100, dr., & [ I ] ## like ## it 100%!\"\n sentences = list(tokenizer.tokenize(text))\n self.assertEqual(1, len(sentences))\n sentence = sentences[0]\n\n self.assertEqual(\n [\n \"it's\",\n \"a\",\n \"test\",\n \"sentence\",\n \"for\",\n \"one\",\n \"hundred\",\n \"dollars\",\n \",\",\n \"doctor\",\n \",\",\n \"and\",\n \"i\",\n \"like\",\n \"it\",\n \"one\",\n \"hundred\",\n \"percent\",\n \"!\",\n ],\n [t.text for t in sentence.tokens],\n )", "title": "" }, { "docid": "b6c5e9bfd9f79fb949688cd43929051e", "score": "0.57299256", "text": "def clean_text(text):\n\n try:\n tokenized_text = word_tokenize(text.lower())\n cleaned_text = [t for t in tokenized_text if t not in STOPWORDS and re.match('[a-zA-Z\\-][a-zA-Z\\-]{2,}', t)]\n return cleaned_text\n except:\n return traceback.format_exc()", "title": "" }, { "docid": "a05f09c5563b775d951612f48af8fc6e", "score": "0.5723738", "text": "def test_tokenizing_before_doesnt_change_features(self):\n # self.skipTest(\"Doesn't work for some reason :(\")\n sents = self.data_df[self.text_col].values\n notok_vect = CountVectorizer(\n analyzer='word',\n tokenizer=unicode.split, # doesn't tokenize\n lowercase=True,\n stop_words='english',\n max_features=75\n )\n\n tokenized_sents = map(lambda sent: ' '.join(stem_tokenize(sent)), sents)\n\n notok_vect.fit(tokenized_sents)\n self.orig_vect.fit(sents)\n # do they have the same features\n # normal extractor with no tokenize extractor\n\n self.assertTrue(not set(self.orig_vect.get_feature_names()).difference(set(notok_vect.get_feature_names())))\n self.assertTrue(not set(notok_vect.get_feature_names()).difference(set(self.orig_vect.get_feature_names())))\n\n for i in range(len(sents)):\n self.assertEquals(notok_vect.transform([tokenized_sents[i]]).sum(),\n self.orig_vect.transform([sents[i]]).sum())", "title": "" }, { "docid": "b6d6f157a9f12c926e3b6b7c272af649", "score": "0.57156026", "text": "def cleaning (text):\n \n\ttext = re.sub(r'[^\\w\\s]','',text, re.UNICODE)\n\ttext = text.lower()\n\n\tlemmatizer = WordNetLemmatizer()\n\ttext = [lemmatizer.lemmatize(token) for token in text.split(\" \")]\n\ttext = [lemmatizer.lemmatize(token, \"v\") for token in text]\n\n\ttext = \" \".join(text)\n\ttext = re.sub('ãââ', '', text)\n \n\treturn text", "title": "" }, { "docid": "06397dae8f9c4d836b75735b15e18444", "score": "0.5706418", "text": "def clean(cls, series):\n f = lambda raw_text: cls._clean_text(raw_text)\n return series.apply(f)", "title": "" }, { "docid": "3ab1932010a127cf8dacdf78d3cda9af", "score": "0.56741965", "text": "def td_clean_string(text):\n text = clean_string(text)\n _, text = td_split_text(text)\n return text", "title": "" }, { "docid": "38b2e0abf24d94a0b4c8c961bd3c0ee5", "score": "0.56649965", "text": "def normalize_and_tokenize_tweet( tweet ):\n if 'text' in tweet and tweet['text']:\n tweet['clean_text'] = clean_tweet( tweet['text'])\n tweet['tokens'] = tweet['clean_text'].split() #Simple tokenizer, much room for improvement\n return tweet", "title": "" }, { "docid": "24c29598463e692a5b92f36206f86821", "score": "0.56597495", "text": "def clean_text(text, tokenizer, stopwords):\r\n text = str(text).lower() # Lowercase words\r\n text = re.sub(r\"\\[(.*?)\\]\", \"\", text) # Remove [+XYZ chars] in content\r\n text = re.sub(r\"\\s+\", \" \", text) # Remove multiple spaces in content\r\n text = re.sub(r\"\\w+…|…\", \"\", text) # Remove ellipsis (and last word)\r\n text = re.sub(r\"(?<=\\w)-(?=\\w)\", \" \", text) # Replace dash between words\r\n text = re.sub(\r\n f\"[{re.escape(string.punctuation)}]\", \"\", text\r\n ) # Remove punctuation\r\n\r\n tokens = tokenizer(text) # Get tokens from text\r\n tokens = [t for t in tokens if not t in stopwords] # Remove stopwords\r\n tokens = [\"\" if t.isdigit() else t for t in tokens] # Remove digits\r\n tokens = [t for t in tokens if len(t) > 1] # Remove short tokens\r\n return tokens", "title": "" }, { "docid": "d02b2d122b30e9da5903c8a2d3d3113a", "score": "0.56456923", "text": "def result_cleaning(df):\n # remove NaN-only columns\n # NB: we need to do it before replacing otherwise Series.str.replace errors out\n df.dropna(axis=\"columns\", how=\"all\", inplace=True)\n\n for col in df.columns.drop(\"ena project\"):\n df.loc[:, col] = df.loc[:, col].str.replace(\" \", \"\", regex=False)\n\n df.replace(\"\", np.NaN, inplace=True)\n # remove NaN-only lines and columns\n df.dropna(axis=\"index\", subset=df.columns.drop(\"ena project\"), how=\"all\", inplace=True)\n df.dropna(axis=\"columns\", how=\"all\", inplace=True)", "title": "" }, { "docid": "a13ac1109dae3d32671e6b11082c37eb", "score": "0.5643749", "text": "def run_cleaning_process(Cleaner, tokens,\n exceptions = [],\n minL = 1,\n minF = 4,\n notallowed = ['*'],\n logging = True\n ):\n exp = exceptions\n \n ninit = len(tokens)\n print('Cleaning process: Initial size of tokens = {}'.format(ninit))\n \n TFtokens = Cleaner.clean(tokens,logging = logging)\n print('Reduction due to punctuations and stopwords = {}.'.format(len(tokens) - len(TFtokens)))\n \n uTFtokens = Cleaner.remove_numerals(TFtokens, remove_any = False, exceptions = exp)\n print(\"Reduction due to all numeral terms = {}\".format(len(TFtokens) - len(uTFtokens)))\n TFtokens = uTFtokens\n \n uTFtokens = Cleaner.remove_short_terms(TFtokens, threshold = minL, exceptions = exp)\n print(\"Reduction due to short terms = {}\".format(len(TFtokens) - len(uTFtokens)))\n TFtokens = uTFtokens\n \n uTFtokens = Cleaner.remove_rare_terms(TFtokens, below = minF, exceptions = exp)\n print(\"Reduction due to rare terms = {}\".format(len(TFtokens) - len(uTFtokens)))\n TFtokens = uTFtokens\n \n uTFtokens = Cleaner.remove_numerals(TFtokens, remove_any = True, exceptions = exp)\n print(\"Reduction due to partially numeral terms = {}\".format(len(TFtokens) - len(uTFtokens)))\n TFtokens = uTFtokens\n \n uTFtokens = Cleaner.remove_contains(TFtokens, literals = notallowed, exceptions = exp)\n print(\"Reduction due to terms with not allowed symbols = {}\".format(len(TFtokens) - len(uTFtokens)))\n TFtokens = uTFtokens\n \n reduction = ninit - len(TFtokens)\n print(\"The total term count reduction during this cleaning process = {}\".format(reduction))\n if ninit > 0:\n print(\"Percentage = {}%\".format(round(100.0 * reduction/ninit),2))\n return TFtokens", "title": "" }, { "docid": "86424c289f5ef528b120ee4b681122c2", "score": "0.5632676", "text": "def clean_statcode(self, data: pd.DataFrame) -> None:\n col = 'KoppelvariabeleRegioCode_306'\n data[col] = data[col].str.strip()", "title": "" }, { "docid": "476f624e3171a161cc6d4e72aed67170", "score": "0.56223595", "text": "def sanitize(sentence):\n\n stop_words = set(stopwords.words('english'))\n word_tokens = word_tokenize(sentence)\n filtered_sentence = [w for w in word_tokens if not w in stop_words]\n\n return filtered_sentence", "title": "" }, { "docid": "48bf9a0bdb9a2930ecfca3c45ad525e8", "score": "0.5613786", "text": "def clean_text(self, text):\n\n text = text.strip()\n text = NORMALIZE_WHITESPACE_REGEX.sub(' ', text)\n text = RE_DASH_FILTER.sub('-', text)\n text = RE_APOSTROPHE_FILTER.sub(\"'\", text)\n text = RE_LEFT_PARENTH_FILTER.sub(\"(\", text)\n text = RE_RIGHT_PARENTH_FILTER.sub(\")\", text)\n text = RE_BASIC_CLEANER.sub('', text)\n\n return text", "title": "" }, { "docid": "6c3440de652df205d8381fc7bfa0e53e", "score": "0.5604855", "text": "def preprocess_data(data):\n data_processed = data\n for idx, sentence in enumerate(data):\n sentence_processed = first_char_lower(sentence)\n sentence_processed = space_around_special(sentence_processed)\n sentence_processed = remove_forbidden_special(sentence_processed)\n data_processed[idx] = sentence_processed\n return data_processed", "title": "" }, { "docid": "3b727e499e8db98fb6e0eb2a8751bf10", "score": "0.56023115", "text": "def clean_tweet_text(t):\n try:\n t = t.replace('http://t.co/9TP7UUrMtU(alteration-of-cri/index.xml', ' ') # improper link causes preprocessor to fail\n t = preprocessor.clean(t)\n t = unicodedata.normalize('NFD', t).encode('ascii', 'ignore') # normalize unicode accents etc\n t = re.sub('[^a-zA-Z \\']', ' ', t) # only keep letters, ', and - characterst = t.lower()\n t = t.lower() # lower case everything\n t = ' '.join(t.split())\n except (AttributeError, TypeError):\n t = ' '\n return t", "title": "" }, { "docid": "3e5d4bd7bb062f6a7157c463d1ffae93", "score": "0.56020623", "text": "def text_cleaning(text: str): \n # Strip punctuation and special chars\n stripped = re.sub(r\"[^\\w]\", \" \", text)\n \n # Tokenize and stem words\n tokenized = [\n token.lower() for token in stripped.split(\" \")\n if token.strip() and token.lower() not in STOP_WORDS and len(token) >= 2\n ]\n \n return tokenized", "title": "" }, { "docid": "622593b239d3e785983c688834da834b", "score": "0.5597015", "text": "def preprocess(self):\n self.standardize_raw(self.sequence_to_remove)\n if len(self.clean_text.split()) == 0:\n self.empty = True", "title": "" }, { "docid": "5bc8460409eeab79926307f742c4d98d", "score": "0.5586985", "text": "def sanitise(long_string):\n # Tokenize\n word_list = nltk.tokenize.word_tokenize(long_string)\n \n # Remove non-alphanumeric characters\n sanitised_list = [sanitise_word(string) for string in word_list]\n token_list = \" \".join(sanitised_list).split()\n\n # Remove stop words\n if (REMOVE_STOPWORDS):\n removed_list = [token for token in token_list if token not in stopwords.words('english')]\n token_list = removed_list\n\n # Apply stemming and/or lemmatization\n if (USE_LEMMATIZER and USE_STEMMER):\n token_list = lemmatize_and_stem(token_list)\n else:\n # This line is if you want to do lemmatization (prefer to do this before stemming, as stemming might not return a real word)\n if (USE_LEMMATIZER):\n token_list = lemmatize(token_list)\n \n # This line is if you want to do stemming after or instead\n if (USE_STEMMER):\n token_list = stem(token_list)\n \n return token_list", "title": "" }, { "docid": "420568814b446d7128968637bc9a1c69", "score": "0.5586855", "text": "def sanitize_data(self, data):\n\n # Lowercase letters\n # Remove empty spaces\n data = [x.replace(' ', '').lower() for x in data]\n\n # Remove domains with invalid characters\n clean_data = []\n invalid_chars = \"~`!@#$%^&*()_+={}[]|:;<>,?'/\\\\\"\n for x in data:\n\n # !! IMPORTANT !!:\n # For now, all the domains with invalid characters are dropped, but need to change later\n # Need to probably create a JSON with a key that identifies which domain to score or not\n if any(n in x for n in [x for x in invalid_chars]):\n print('The domain {} is not valid and will not be scored'.format(x))\n continue\n clean_data.append(x)\n\n return clean_data", "title": "" }, { "docid": "658c31ce806818464c382e54b12b8ea2", "score": "0.5582669", "text": "def clean_text(text, tokenizer, stemmer, stopwords = stop_words, combine_tweets = False):\n cleaned_text = []\n for post in text:\n cleaned_words = []\n for word in tokenizer(post[\"tweet\"][2:]):\n word = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', '', word, flags=re.MULTILINE)\n word = re.sub(r'^nhttps?:\\/\\/.*[\\r\\n]*', '', word, flags=re.MULTILINE)\n low_word = word.lower().strip(\" \")\n if low_word == \"\" or len(low_word) <= 3:\n continue\n if word_tokenize(low_word)[0].isalpha():\n if low_word not in stopwords:\n if \"htt\" in low_word:\n continue\n# print(low_word)\n cleaned_words.append(low_word)\n if combine_tweets == False:\n cleaned_text.append(' '.join(cleaned_words))\n else:\n cleaned_text = cleaned_words\n return cleaned_text", "title": "" }, { "docid": "a132d89d13566710545e5d6eb6bd5f02", "score": "0.5580322", "text": "def process_manual(df):\n df_error_des = df['error description'].values.tolist()\n for er in df_error_des:\n er = er.split()\n er = [re.sub('\\W+','', i) for i in er]\n er = [i for i in er if i not in stopwords.words('english')]\n df['error description'] = df_error_des", "title": "" }, { "docid": "609d95a459182d5fbd241256aa56d36c", "score": "0.5573048", "text": "def _clean_text(cls, text, math_token=MATH_TOKEN):\n replaced_math = cls._strip_math(text, math_token)\n clean_text = cls._strip_non_alphas(replaced_math)\n return clean_text", "title": "" }, { "docid": "f139399fb4cf25f1767875168657c83f", "score": "0.5571381", "text": "def text_cleaner(mess):\n nopunc_mess = ''.join([c for c in mess if c not in string.punctuation])\n \n nopunc_mess = nopunc_mess.split()\n \n cleantext_mess = [word for word in nopunc_mess if word.lower() not in stopwords.words('english')]\n \n return cleantext_mess", "title": "" }, { "docid": "c72e273c6f855e6f2a2f53d1df4a0912", "score": "0.5570511", "text": "def preprocess(df, column):\n df = tokenize(df, column)\n df = filterTokenAlpha(df, column)\n df = filterTokenLength(df, column)\n df = removeStopWords(df, column)\n df = stemming(df, column)\n return df", "title": "" }, { "docid": "cac230b299ce0f1c3d362fcb6ee41be2", "score": "0.55693036", "text": "def clean_data(csv_file):\r\n # load csv file\r\n data = pd.read_csv(csv_file)\r\n\r\n data['text'] = data['text'].str.strip('fw :')\r\n data['text'] = data['text'].str.strip(' re : ')\r\n\r\n stop_words = set(stopwords.words('english'))\r\n tokenizer = RegexpTokenizer(r'\\w+')\r\n ps = PorterStemmer()\r\n\r\n numb_mails = len(data.text)\r\n\r\n # Loops through each message (email)\r\n for message in range(numb_mails):\r\n # make message lower case\r\n text = data.text[message]\r\n\r\n # Substitute special tokens with descriptive strings\r\n text = text.replace('$', 'DOLLAR')\r\n text = text.replace('@', 'EMAILADRESS')\r\n text = text.replace('https', 'URL')\r\n text = text.replace('www', 'URL')\r\n\r\n # Remove unescessary information\r\n text = text.replace('Subject', '')\r\n text = text.replace('cc', '')\r\n\r\n # Make text lower case\r\n text = text.lower()\r\n\r\n # Tokenize + remove punctuation\r\n tokens1 = tokenizer.tokenize(text)\r\n\r\n # Remove stop-words\r\n tokens2 = [w for w in tokens1 if not w in stop_words]\r\n\r\n # Stemming tokens\r\n numb_tokens = len(tokens2)\r\n\r\n for token in range(numb_tokens):\r\n tokens2[token] = ps.stem(tokens2[token])\r\n\r\n # Sustitute number (special token) with 'NUMBER'\r\n #(numbers can be split by with space)\r\n for token in range(numb_tokens):\r\n try:\r\n int(tokens2[token])\r\n tokens2[token] = \"NUMBER\"\r\n except:\r\n pass\r\n\r\n last_token = \"\"\r\n for token in reversed(range(numb_tokens)):\r\n if (last_token == tokens2[token]) and (last_token=='NUMBER'):\r\n del tokens2[token+1]\r\n\r\n last_token = tokens2[token]\r\n\r\n # Collect tokens to string and assign to dataframe\r\n prepared_string = \" \".join(tokens2)\r\n\r\n data.at[message,'text'] = prepared_string\r\n\r\n return data", "title": "" }, { "docid": "8ca00558ceb7589331d0f3774fe80564", "score": "0.55521464", "text": "def pre_process(tokens):\n all_words = []\n for w in tokens:\n if w.isalpha():\n w = w.encode(encoding='ascii', errors='ignore')\n w = w.lower()\n w = stemmer.stem(w)\n if w not in stopwords:\n all_words.append(w)\n return all_words", "title": "" }, { "docid": "b158662c52e25e4b6818a5f0171e1757", "score": "0.55466616", "text": "def _clean_input(input):\r\n \r\n # Cheap and nasty tokenisation\r\n cleaned = []\r\n removed = set()\r\n for word in input.split(' '):\r\n if word in LibrarySearchQuery.STOP_WORDS:\r\n removed.add(word)\r\n else:\r\n cleaned.append(word)\r\n return ' '.join(cleaned), frozenset(removed)", "title": "" }, { "docid": "fe228aab68b5a8425a594ab4565b7cf3", "score": "0.55365354", "text": "def sanitize_data(value):\n global sanitize\n for i in sanitize:\n value = value.replace(i, \"\")\n return value.replace(\"|\", \"-\")", "title": "" }, { "docid": "5c1486679096e8aa4e9e6de6d17e98f4", "score": "0.55333376", "text": "def normalize_text(data):\r\n # Lowercase all characters\r\n data['english_sentence'] = data['english_sentence'].apply(lambda x: x.lower())\r\n data['hindi_sentence'] = data['hindi_sentence'].apply(lambda x: x.lower())\r\n\r\n # Remove quotes\r\n data['english_sentence'] = data['english_sentence'].apply(lambda x: re.sub(\"'\", '', x))\r\n data['hindi_sentence'] = data['hindi_sentence'].apply(lambda x: re.sub(\"'\", '', x))\r\n\r\n exclude = set(string.punctuation) # Set of all special characters\r\n # Remove all the special characters\r\n data['english_sentence'] = data['english_sentence'].apply(lambda x: ''.join(ch for ch in x if ch not in exclude))\r\n data['hindi_sentence'] = data['hindi_sentence'].apply(lambda x: ''.join(ch for ch in x if ch not in exclude))\r\n\r\n # Remove all numbers from text\r\n remove_digits = str.maketrans('', '', digits)\r\n data['english_sentence'] = data['english_sentence'].apply(lambda x: x.translate(remove_digits))\r\n data['hindi_sentence'] = data['hindi_sentence'].apply(lambda x: x.translate(remove_digits))\r\n\r\n data['hindi_sentence'] = data['hindi_sentence'].apply(lambda x: re.sub(\"[२३०८१५७९४६]\", \"\", x))\r\n\r\n # Remove extra spaces\r\n data['english_sentence'] = data['english_sentence'].apply(lambda x: x.strip())\r\n data['hindi_sentence'] = data['hindi_sentence'].apply(lambda x: x.strip())\r\n data['english_sentence'] = data['english_sentence'].apply(lambda x: re.sub(\" +\", \" \", x))\r\n data['hindi_sentence'] = data['hindi_sentence'].apply(lambda x: re.sub(\" +\", \" \", x))\r\n\r\n # Add start and end tokens to target sequences\r\n data['hindi_sentence'] = data['hindi_sentence'].apply(lambda x: 'START_ ' + x + ' _END')\r\n\r\n return data", "title": "" }, { "docid": "ae0b73a8cb6c713850a11bd80ce76072", "score": "0.55195785", "text": "def clean_unwanted_data(self, text):\n # Flags any ERROR tag in the log.\n text = re.sub(r'[\\[]ERROR[\\]]\\s+', 'ERROR ', text)\n\n for i in self.RE_LIST:\n text = re.sub(i, '', text)\n\n return text", "title": "" }, { "docid": "af881a7db4d93df124cbc080a4e128e1", "score": "0.5512658", "text": "def data_preprocess():\r\n print(\"---\", str(datetime.datetime.now()), \"---\")\r\n print('\\tPreprocessing data...')\r\n global full_text\r\n\r\n def preprocess(text):\r\n text = text.replace('!', '.')\r\n text = text.replace('?', '.')\r\n text = re.sub(r'\\.+', r'.', text)\r\n text = re.sub(r'\\.+', r'.', text)\r\n text = re.sub(r',+', r',', text)\r\n text = re.sub(r'\\(+', r'(', text)\r\n text = re.sub(r'\\)+', r')', text)\r\n text = re.sub(r'\\.([^ ])', r'. \\1', text)\r\n text = re.sub(r'\\:([^ ])', r': \\1', text)\r\n text = text.replace('e. g.', 'e.g.')\r\n text = text.replace('i. e.', 'i.e.')\r\n text = re.sub(r'\\((.*?)\\)', '', text)\r\n neg_mapping = [('wasnt', \"wasn't\"), ('werent', \"weren't\"), ('isnt', \"isn't\"), ('arent', \"aren't\"), ('aint', \"ain't\"),\r\n ('havent', \"haven't\"), ('hasnt', \"hasn't\"), ('dont', \"don't\"), ('doesnt', \"doesn't\"), ('didnt', \"didn't\"),\r\n ('wont', \"won't\"), ('couldnt', \"couldn't\"), ('wouldnt', \"wouldn't\"),\r\n ('its', \"it's\"), ('thats', \"that's\"), ('thatre', \"that're\"), ('theres', \"there's\"), ('theyre', \"they're\"),\r\n ('therere', \"there're\"), ('im', \"i'm\"),\r\n ('Its', \"It's\"), ('Thats', \"That's\"), ('Thatre', \"That're\"), ('Theres', \"There's\"), ('Theyre', \"They're\"),\r\n ('Therere', \"There're\"), ('Im', \"I'm\")]\r\n for k, v in neg_mapping:\r\n text = text.replace(' ' + k + ' ', ' ' + v + ' ')\r\n text = re.sub(r'[#|*|^|_]', r' ', text)\r\n text = text.replace(': )', '')\r\n text = text.replace(': -)', '')\r\n text = text.replace(': (', '')\r\n text = text.replace(': -(', '')\r\n text = text.replace(': ]', '')\r\n text = text.replace(': -]', '')\r\n text = text.replace(': [', '')\r\n text = text.replace(': -[', '')\r\n text = text.replace(': >', '')\r\n text = text.replace(': <', '')\r\n text = text.replace(': ->', '')\r\n text = text.replace(': -<', '')\r\n text = text.lower()\r\n return text\r\n\r\n full_text = list(map(preprocess, full_text))\r\n print(\"\\tFinished data pre-processing\")", "title": "" }, { "docid": "2303088e8e280f80145f64d72041f7eb", "score": "0.55067414", "text": "def sanitize_data(s):\n\n\t# If string only empty spaces return None\n\tif not s or s.isspace():\n\t\treturn None\n\n\t# remove any white-space from beginning or end of the string\n\ts = s.strip()\n\n\t# remove double white-spaces or tabs if any\n\ts = re.sub(r'\\s+', ' ', s)\n\n\treturn s", "title": "" }, { "docid": "34b780792adb9ba42652824ffd08a740", "score": "0.55036336", "text": "def test_token_processor():\n\tsentence = \"The food is not very good. Or is it?\"\n\ttokenizer = MyPottsTokenizer()\n\ttokenized_sent = tokenizer.tokenize(sentence)\n\n\ttp = TokenProcessor()\n\t\n\tprocessed_tokens = tp.filter_stop_words(tp.lower_case(tokenized_sent))\n\n\tfor tok in processed_tokens:\n\t\tif tok.isalnum():\t\n\t\t\tassert tok.islower()\n\t\tassert tok is not ''\n\t\tassert tok is not None\n\n\tassert 'not' not in tp.STOPWORDS\n\tassert 'is' not in processed_tokens\n\tassert 'or' not in processed_tokens", "title": "" }, { "docid": "ec91e3f5c980c41fd6fe3279d05c8766", "score": "0.5496015", "text": "def remove_tweet_specific_chars(tokens):\r\n\r\n return [token for token in tokens if token != '@' and token != '#' and token != 'RT']", "title": "" }, { "docid": "d7c8fb47a92451f802143973397eff25", "score": "0.54933137", "text": "def clean_up_strings(strarg):\n stage1 = strarg.lstrip().rstrip()\n stage2 = re.sub('\\t', ' ', stage1)\n stage3 = re.sub('\\n', ' ', stage2)\n stage4 = ' '.join(re.split('\\W+', stage3, flags=re.UNICODE)).rstrip()\n return stage4", "title": "" }, { "docid": "e3e237d918e3fc50f4edbb1a29052df7", "score": "0.54839027", "text": "def fix_space(self):\n clean_list = []\n for sentence in self.tweet.sentences:\n clean_sentence = ' '.join(sentence.split())\n clean_list.append(clean_sentence)\n self.tweet.sentences = clean_list", "title": "" }, { "docid": "56b5819bf959173fd3563de246c96d2f", "score": "0.5483264", "text": "def preprocessing(sentence):\n # removing extra whitespace and making the sentence lower case\n sentence = sentence.lower().strip()\n \n # removing punctuation\n bad_chars = '-.?;,!@#$%^&*()+/{}[]\\\\\":\\'“’'\n for char in bad_chars:\n sentence = sentence.replace(char, ' ').strip()\n all_words = sentence.split()\n \n # removing stop words\n filtered_sentence = [w for w in all_words if not w in stopwords]\n return ' '.join(filtered_sentence)", "title": "" }, { "docid": "fd014a10ee56a7686597f07979d09b28", "score": "0.5476522", "text": "def clean(item):\n res = item.encode(\"ascii\", errors=\"ignore\").decode()\n res = re.sub('['+string.punctuation+']+','',res).strip()\n res = word_tokenize(res.lower())\n res = [lemat.lemmatize(item) for item in res if item not in stopwords.words('english')] \n return \" \".join(res)", "title": "" }, { "docid": "e4d2c8bfde64fffeadade5d47ff13cd5", "score": "0.5466487", "text": "def remove_special_chars(self):\n clean_list = []\n for sentence in self.tweet.sentences:\n self.tweet.non_word_chars_removed.append(re.findall(r'\\W|\\d|_', sentence))\n clean_sentence = re.sub(r'\\W|\\d|_', ' ', sentence)\n clean_list.append(clean_sentence)\n self.tweet.sentences = clean_list", "title": "" }, { "docid": "a2727a804d192fec27da2b49ad51e79d", "score": "0.5465855", "text": "def preprocessing(sample):\n illegal_pattern = r'[^a-zA-Z0-9#@_$%\\s]'\n sample = [re.sub(illegal_pattern, '', word) for word in sample]\n\n return sample", "title": "" }, { "docid": "d8f482deb6b53fadc4e4c9c882d4eb4f", "score": "0.5465561", "text": "def normalize_tokenizations(cls, tokenizer, space_tokenization, target_tokenization):\n pass", "title": "" }, { "docid": "d2e95cba6d63f1f6dd3a66aae99a7cb9", "score": "0.5464007", "text": "def pre_processing(dataset, stop_words, additional_words, stemmer=None):\n\n for doc_id, text in enumerate(dataset):\n\n text = re.sub(\"[^a-zA-Z]\", \" \", text)\n\n # On decode un peu le bordel\n doc = nltk.word_tokenize(text.decode('utf-8').lower())\n\n # On enlève la ponctuation\n punctuation = set(string.punctuation)\n doc = [w for w in doc if w not in punctuation]\n\n # On casse les mots à la con du type 8/10, ou quand le FdP a oublié un espace après le point..\n doc = [split(x, '.') for x in doc]\n doc = list(itertools.chain(*doc))\n doc = [split(x, '/') for x in doc]\n doc = list(itertools.chain(*doc))\n doc = [split(x, '`') for x in doc]\n doc = list(itertools.chain(*doc))\n \n # On enlève les stopwords\n doc = [w for w in doc if w not in stop_words]\n doc = [w for w in doc if w not in additional_words]\n \n s = \" \"\n doc = s.join(doc)\n # On stemme un peu tout ça\n # doc = [stemmer.stem(w) for w in doc]\n dataset[doc_id] = doc", "title": "" }, { "docid": "ce1f86782beb607d2fe43d992a9960c8", "score": "0.54639566", "text": "def clean_data(self, some_text):\n\t\treturn some_text.strip()", "title": "" }, { "docid": "b1121bd1da3696e81a643ee14cb7d92d", "score": "0.54621994", "text": "def preprocess(self, text:str, tokenize=True, stem=True, remove_stopwords=False, replace_numbers=False, remove_numbers=False, remove_punct=True, strip_accs = True, expand_contractions=False, replace_newline=False):\n text = str(text)\n # should newlines be replaced? Standard config will be applied\n if replace_newline == True:\n text = self.replace_newline(text)\n # expand contractions\n if expand_contractions == True:\n text = self.expand_contractions(text)\n # lowers text and removes trailing spaces\n text = self.lower_remove_white(text)\n # remove stopwords\n if remove_stopwords:\n text = self.remove_stopwords(text)\n # emoji to text\n text = emoji.demojize(text)\n # replace urls\n text = self.replace_urls(text, 'URL')\n # remove tags\n text = self.remove_html_tags(text)\n # replace hashtags\n text = self.replace_hashtags(text, 'HASHTAG')\n # replace ips\n text = self.replace_ips(text, 'IP')\n # remove social media handles\n text = self.remove_handles(text)\n # replace numbers\n if replace_numbers == True:\n text = self.replace_numbers(text)\n if remove_numbers == True:\n text = self.remove_numbers(text) \n # reduce loooooong words\n text = self.reduce_words_with_repeated_chars(text)\n # remove punctuation \n if remove_punct == True:\n text = self.remove_punctuation(text)\n # remove excessive spaces\n text = self.remove_excessive_spaces(text)\n # remove accents \n if strip_accs == True:\n text = self.strip_accents(text)\n # remove excessive spaces\n text = self.remove_excessive_spaces(text)\n # stem text\n if stem:\n text = self.stem_sentence(text)\n # tokenize\n if tokenize == True:\n text = self.tokenize(text)\n self.vocab.update(text)\n if tokenize == False:\n # updates list of words\n self.vocab.update(self.tokenize(text))\n # return text\n return text", "title": "" }, { "docid": "c02b8787304d0cdd74d7b2b5171a1971", "score": "0.5460038", "text": "def tokenize(rawtext):\n soup = BeautifulSoup(rawtext,\"lxml\")\n\n text = soup.get_text(strip=True)\n\n tokens = [t for t in text.split()]\n\n for pos, token in enumerate(tokens):\n tokens[pos] = token.translate({ord(i): None for i in '.,:()[]{}·0123456789/*-+<>'}) #characters that should not appear in a token\n\n clean_tokens = tokens[:]\n\n sr = stopwords.words('spanish') #by now only in spanish content #TO DO languaje detector + adaptation\n for token in tokens:\n if token in sr:\n clean_tokens.remove(token)\n if token == \"\":\n clean_tokens.remove(token)\n\n return(clean_tokens)", "title": "" }, { "docid": "d1cb2b3b7e863f8063a4b481e850e497", "score": "0.54600334", "text": "def kimyoon_text_cleaner(text: str):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n\n return text.strip().lower()", "title": "" }, { "docid": "e201714b60a049d80fef8207b654be97", "score": "0.5454857", "text": "def prepare_data(self, sentence_text, remove_stop_words=True):\n # Tokenization\n words = self.tokenize(sentence_text)\n # Lemmatization\n tokens = self.lemmatize(words)\n if remove_stop_words:\n return [t for t in tokens if t[0] not in self.stopwords and t[2] not in self.stopwords]\n else:\n return tokens", "title": "" }, { "docid": "15c238c79e65485fd5209a84d4e05ba1", "score": "0.54519534", "text": "def _re_tokenize(segmented):\n preserve_chars = {\"(\", \")\", \"-\"}\n for seg in segmented:\n if seg in preserve_chars:\n yield seg\n else:\n normalized = self.form_for_segmentation(seg)\n tokenized = tokenizer(normalized, column=\"IPA\")\n for seg in tokenized.split(\" \"):\n yield seg", "title": "" }, { "docid": "d08a7842764bcd54ddc8ea5b06aefb52", "score": "0.54504704", "text": "def filter_out_special_characters(self):", "title": "" }, { "docid": "eefbf05c2429c8ca5ebbd3269af78d28", "score": "0.54451215", "text": "def clean(self, text):\r\n wnl = nltk.stem.WordNetLemmatizer()\r\n stopwords = nltk.corpus.stopwords.words('english') + ADDITIONAL_STOPWORDS\r\n text = (unicodedata.normalize('NFKD', text)\r\n .encode('ascii', 'ignore')\r\n .decode('utf-8', 'ignore')\r\n .lower())\r\n words = re.sub(r'[^\\w\\s]', '', text).split()\r\n return [wnl.lemmatize(word) for word in words if word not in stopwords]", "title": "" }, { "docid": "467c862089ffa5faa536cefbf486ad68", "score": "0.544113", "text": "def cleaning(x):\n x = convert_to_lower(x)\n\n # remove punctuation\n x = remove_punctuation(x)\n # operator = str.maketrans('', '', string.punctuation) # ????\n # x = x.translate(operator)\n # x = correct_spellings(x)\n x = word_tokenize(x)\n # x = remove_stopwords(x)\n return x", "title": "" }, { "docid": "9ae3d139489e6feef1402384572ddacc", "score": "0.54409045", "text": "def preprocess_data(df):\n\n\t## aggregate headline and short_descriptions cols\n\tdf['text'] = df.headline + \" \" + df.short_description\n\n\t## get rid of non-alphanumeric chars\n\tdf['text'] = df['text'].apply(replace_bad_chars)\n\n\t## convert to lowercase\n\tdf['text'] = df['text'].str.lower() \n\n\t#3remove stop words\n\tstop_words = set(stopwords.words('english'))\n\tdf['text'] = df['text'].apply(lambda x: \" \".join(x for x in x.split() if x not in stop_words))\n\n\t## lemmatize text\n\tdf['text'] = df['text'].apply(lambda x: \" \".join([Word(word).lemmatize() for word in x.split()]))\n\n\t## tokenize text\n\tdf['tokenized_text'] = df['text'].apply(word_tokenize) \n\n\t## get length of text and get rid of bad text based on length threshold\n\t## threshold = 2 standard deviations below mean length\n\tdf['text_length'] = df['tokenized_text'].apply(len)\n\tthreshold = df['text_length'].mean() - 2*(df['text_length'].std())\n\tdf = df[df.text_length >= threshold]\n\n\treturn df", "title": "" }, { "docid": "50572aa2110f0a990065cada2bb5b912", "score": "0.54387325", "text": "def feature_cleaning(df):\n # casefold column names\n df.rename(str.casefold, axis=\"columns\", inplace=True)\n\n # rename index name\n df.index = df.index.rename(\"run accession\")\n # make features consistent\n df.rename(columns=equivalent_features, inplace=True)\n\n # remove the Oxford identifier\n df.drop(columns=\"oxford comid\", axis=\"columns\", errors='ignore', inplace=True)", "title": "" }, { "docid": "fa2d5d6c22d3f61933a83f6a55855fa8", "score": "0.543803", "text": "def detokenize(self, tokens):\t\n return ''.join([t for t in tokens])", "title": "" }, { "docid": "a7f241b8c9a05a370befe05a38365536", "score": "0.5434769", "text": "def preprocess(sentence):\n # Converts the sentence to lowercase\n sent = sentence.lower()\n # Tokenizes the lowercase sentence thus splitting it into a list of words\n final = nltk.word_tokenize(sent)\n # Loops through each word\n for x in final:\n counter = 0\n # Loops through each char in said word\n for char in x:\n counter += 1\n # If the there is an alphabetic character in the word, the loop is broken as word is NOT removed\n if char.isalpha() == True:\n break\n # Else if no alphabetical character have been found in the entire word, it is removed\n if counter == len(x):\n final.remove(x)\n return final", "title": "" }, { "docid": "a24a53dc60cd897b76cbc8c23ba12bef", "score": "0.5432793", "text": "def preprocess(text):\r\n clean_data = []\r\n for x in (text[:]): \r\n new_text = re.sub('<.*?>', '', x) # remove HTML tags\r\n new_text = re.sub(r'[^\\w\\s]', '', new_text) # remove punc.\r\n new_text = re.sub(r'\\d+','',new_text)# remove numbers\r\n new_text = new_text.lower() # lower case, .upper() for upper \r\n if new_text != '':\r\n clean_data.append(new_text)\r\n return clean_data", "title": "" }, { "docid": "e7594ed65ea6661eab69bcf90d0c8524", "score": "0.5430267", "text": "def strip_non_keywords(data, keywords, language):\n if (len(keywords[language]) == 0):\n return data\n token = \"\"\n i = 0\n while i < len(data):\n if data[i].isalnum():\n token += data[i]\n i += 1\n continue\n if token.isalnum() and not token.isnumeric() and token not in keywords[language]:\n data = 'w'.join((data[:i - len(token)], data[i:]))\n i += len('w') - len(token)\n token = \"\"\n i += 1\n return data", "title": "" }, { "docid": "d773805ce7c9dcc7fd420dd93af454aa", "score": "0.54196495", "text": "def normalizer_textcleaning(string):\n string = re.sub(\n 'http\\S+|www.\\S+',\n '',\n ' '.join(\n [\n word\n for word in string.split()\n if word.find('#') < 0 and word.find('@') < 0\n ]\n ),\n )\n string = re.sub('[^A-Za-z ]+', ' ', string)\n string = re.sub(r'[ ]+', ' ', string).strip()\n string = [\n word.title() if word.isupper() else word\n for word in string.split()\n if len(word)\n ]\n string = [\n word\n for word in string\n if not any([laugh in word for laugh in _list_laughing])\n ]\n string = ' '.join(string)\n return ''.join(''.join(s)[:2] for _, s in itertools.groupby(string))", "title": "" }, { "docid": "fb3dc356ba7f1e18d1a87beed867c85d", "score": "0.54147506", "text": "def test_remove_ats_strings_without_ats(self):\n self.assertEqual(ParseTools.remove_ats(\"String with no twitter names\"), \"String with no twitter names\")", "title": "" }, { "docid": "66726362a3c8a8c1edcbc9c7d8ced1fe", "score": "0.541384", "text": "def tokenize(text):\n \n #remove punctuation\n text = re.sub(r\"[,.;@#?!&$]+\\ *\", \" \", text)\n\n #tokenize\n tokens = word_tokenize(text)\n \n \n #define the Lemmatizer\n lemmatizer = WordNetLemmatizer()\n\n #lemmatize and normalize to lowercase\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n \n #remove stop words\n if clean_tok not in set(stopwords.words('english')):\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "title": "" }, { "docid": "54b7542208ba8fda27f19b16e63f61ac", "score": "0.54131407", "text": "def untokenize(self, words):\n text = ' '.join(words)\n step1 = text.replace(\"`` \", '\"').replace(\n \" ''\", '\"').replace('. . .', '...')\n step2 = step1.replace(\" ( \", \" (\").replace(\" ) \", \") \")\n step3 = re.sub(r' ([.,:;?!%]+)([ \\'\"`])', r\"\\1\\2\", step2)\n step4 = re.sub(r' ([.,:;?!%]+)$', r\"\\1\", step3)\n step5 = step4.replace(\" '\", \"'\").replace(\" n't\", \"n't\").replace(\n \"can not\", \"cannot\")\n step6 = step5.replace(\" ` \", \" '\")\n return step6.strip()", "title": "" }, { "docid": "9fec953831178cc23f0b356c9a1f8e49", "score": "0.54113", "text": "def clean(self, s):\n\t\ts = re.sub(r'[^a-zA-Z]', ' ', s) # remove everything that isn't an alphabet character (only interested in words, not numbers or punctuation)\n\t\ts = re.sub(r'\\s+', ' ', s) # change multiple whitespace in a row to a single space\n\t\treturn s", "title": "" }, { "docid": "7c6a83815db8034bbc033192daff25d5", "score": "0.54093915", "text": "def _clean_coreNLP_output(self, sentiments):\n sentiments = [sentiment.strip().lower().replace(\" \", \"\") for sentiment in sentiments]\n sentiments = ['neutral' if (sentiment == '') else sentiment for sentiment in sentiments]\n return sentiments", "title": "" }, { "docid": "e11f614c19fbf7359fc33a5229cb1597", "score": "0.5399628", "text": "def preprocess_corpus(corpus,stemming=False,\n all_smilies=False, pos_smilies=False, neg_smilies=False, other_smilies=False,\n hugs_and_kisses=False,hearts=False,\n hashtag=False, hashtag_mention=False, \n numbers=False, number_mention=False, \n exclamation=False, ##OBS denne er nå ikke testet, eventuelt bare fjerne den\n set_to_not=False, \n segmentation_hash= False, \n spelling=False,\n elongation=False, \n remove_signs=False\n ):\n \n start = time.time()\n \n #initialising the new corpus:\n new_corpus=[]\n\n #Want to split the tweets using this tokenizer:\n tknzr = TweetTokenizer(reduce_len=True)\n \n \n \n if stemming:\n ps = PorterStemmer()\n \n if segmentation_hash or spelling or elongation:\n d = enchant.Dict(\"en_US\")\n \n if segmentation_hash: \n #seg = Segmenter(corpus=\"english\")\n seg = Segmenter(corpus=\"twitter\")\n\n if spelling: \n sp = SpellCorrector(corpus=\"english\")\n \n \n elapsed = time.time()\n print(\"Time in min before starting first for loop:\", (elapsed - start) / 60 )\n \n #Want to go though each line (tweet) in the corpus\n for k, line in enumerate(corpus):\n \n \n if hashtag_mention:\n there_is_hashtag=False\n if number_mention:\n there_is_number=False\n if exclamation:\n there_is_exclamation=False\n \n #Splitting the tweet using the chosen tokenizer. \n words=tknzr.tokenize(line)\n #Initializing for cleaned_tweet:\n cleaned_tweet=[]\n \n for i, word in enumerate(words):\n #Indicating that the word has not been treated yet\n word_not_treated=True\n end_=len(words)-1\n if ((pos_smilies or all_smilies) and word_not_treated):\n if (i>0 and (word=='d' and (words[i-1]==':' or words[i-1]==';'))) or word == ':d' or word == ';d':\n cleaned_tweet.append('smile')\n word_not_treated=False\n elif (i>0 and (word=='p' and (words[i-1]==':' or words[i-1]==';'))) or word == ':p' or word == ';p' :\n cleaned_tweet.append('smile')\n word_not_treated=False\n elif i>0 and word=='d' and (words[i-1]==':' or words[i-1]==';' or words[i-1]=='x'):\n cleaned_tweet.append('smile')\n word_not_treated=False\n elif i>0 and words[i-1]=='(' and (word==':' or word==';'):\n cleaned_tweet.append('smile')\n word_not_treated=False\n elif i>0 and word==')' and (words[i-1]==':' or words[i-1]==';'):\n cleaned_tweet.append('smile')\n word_not_treated=False\n\n if ((neg_smilies or all_smilies) and word_not_treated):\n if i>0 and words[i-1]==')' and (word==':' or word==';'):\n cleaned_tweet.append('sad')\n word_not_treated=False\n elif i>0 and word=='(' and (words[i-1]==':' or words[i-1]==';'):\n cleaned_tweet.append('sad')\n word_not_treated=False\n \n if ((other_smilies or all_smilies) and word_not_treated):\n if i>0 and i<end_ and word=='_' and words[i-1]=='^' and words[i+1]=='^':\n cleaned_tweet.append('eyesmiley')\n word_not_treated=False\n elif i>0 and word=='o' and words[i-1]==':':\n cleaned_tweet.append('openmouthface')\n word_not_treated=False\n elif i>0 and word=='/' and words[i-1]==':':\n cleaned_tweet.append('slashsmiely')\n word_not_treated=False\n elif i>0 and word=='*' and (words[i-1]==':' or words[i-1]==';'):\n cleaned_tweet.append('kiss')\n word_not_treated=False\n \n if ((hugs_and_kisses and word_not_treated)):\n #want to find hearts, hugs, kisses, etc: \n if (word == \"xoxo\" or word == \"xo\" or word == \"xoxoxo\" or word == \"xxoo\"):\n cleaned_tweet.append('hug')\n cleaned_tweet.append('kiss')\n word_not_treated=False\n elif (word=='xx' or word=='xxx'or word=='xxxx'):\n cleaned_tweet.append('kiss')\n word_not_treated=False\n \n if ((hearts and word_not_treated)):\n if word == \"<3\":\n cleaned_tweet.append('heart')\n word_not_treated=False\n \n if (hashtag and word_not_treated):\n if word[0]=='#':\n there_is_hashtag=True\n if (len(word)>1 and segmentation_hash and not d.check(word[1:])):\n cleaned_tweet.append(seg.segment(word[1:]))\n else:\n cleaned_tweet.append(word[1:])\n word_not_treated=False\n \n if (numbers and word_not_treated):\n if word.isdigit():\n there_is_number=True\n word_not_treated=False\n \n if (exclamation and word_not_treated):\n if word=='!':\n there_is_exclamation=True\n cleaned_tweet.append(word)\n word_not_treated=False\n \n if (set_to_not and word_not_treated):\n if word[-3:]=='n\\'t':\n cleaned_tweet.append('not')\n word_not_treated=False\n \n \n \n if (word_not_treated):\n if (not remove_signs) or (remove_signs and ( (word!= '^' and word!=',' and word!='.' and word!=':' \n and word!='-' and word!='´' and word!=';'and word!=')' \n and word!='(' and word!='*'))):\n \n if ((not word[0].isdigit()) and elongation and not d.check(word) and len(word)>2):\n new=[]\n new.append(word[0])\n for i,letter in enumerate(word):\n if i>0 and i<len(word)-1: \n if not( letter==word[i-1]==word[i+1]):\n new.append(letter)\n new.append(word[-1])\n new_word=''.join(new)\n if new_word!= word:\n cleaned_tweet.append('elongation')\n word=new_word\n\n if spelling and not d.check(word)and len(word)>2: \n word=sp.correct(word)\n if stemming:\n word=ps.stem(word)\n\n \n cleaned_tweet.append(word)\n\n \n \n \n if (hashtag_mention and there_is_hashtag) :\n cleaned_tweet.append('hashtag')\n if (number_mention and there_is_number) :\n cleaned_tweet.append('number')\n if (exclamation and there_is_exclamation):\n cleaned_tweet.append('exclamation')\n \n \n new_words = ' '.join(cleaned_tweet)\n new_words = new_words.encode('utf-8')\n new_corpus.append(new_words)\n \n if np.mod(k,25000)==1:\n elapsed = time.time()\n print(\"Time in min after\", k, \" tweets:\", (elapsed - start) / 60 )\n\n \n elapsed = time.time()\n print(\"Time in min total:\", (elapsed - start) / 60 )\n return new_corpus", "title": "" }, { "docid": "36e6c5de823a15fc827be914a031feb5", "score": "0.53972626", "text": "def ft_cleaner(text: str):\n text = text.lower()\n text = text.replace(\"'\", \" ' \").replace(\".\", \" . \").replace(\"!\", \" ! \").replace(\",\", \" , \")\n text = text.replace(\"(\", \" ( \").replace(\")\", \" ) \").replace(\"?\", \" ? \")\n text = text.replace('\"', \"\").replace(\";\", \"\").replace(\":\", \"\")\n text = replace_html(text)\n return text", "title": "" }, { "docid": "2120cfd8a4b3c34d3d954d63675200be", "score": "0.5393788", "text": "def preprocess_bodystring(self):\n # Remove html tags\n text = strip_html_tags(self.bodystring)\n\n # Change commas for space\n regex = re.compile(',')\n text = regex.sub('', text)\n\n # Remove all other punctuation\n regex = re.compile('[%s]' % re.escape(string.punctuation))\n text = regex.sub('', text)\n\n # only accept words that beegin with an alphabet or a number. outputs lowercase tokens\n tokenizer = RegexpTokenizer('[A-Za-z1-9]\\w+')\n tokens = word_tokenize(text.lower())\n \n self.processed_tokens = scorer.stem_and_discard(tokens)", "title": "" }, { "docid": "3edf6ac206ac7870d55cd22c80111367", "score": "0.53932524", "text": "def clean_and_tokenize(text, remove_stops):\n tokens = tokens_re.findall(text)\n _tokens = [t.lower() for t in tokens]\n filtered_tokens = [\n token.replace(\"-\", \"_\")\n for token in _tokens\n # Conditions to be kept:\n # - Longer than 2 characters if `remove_stops`\n # - Not be a stop words if `remove_stops`\n # - No digits in token\n # - At least one ascii lowercase character\n if not (remove_stops and len(token) <= 2)\n and (not remove_stops or token not in _STOP_WORDS)\n and not any(x in token for x in string.digits)\n and any(x in token for x in string.ascii_lowercase)\n ]\n return filtered_tokens", "title": "" }, { "docid": "90e6c0fb0e2dd975191223d6d8bf0e7c", "score": "0.53893036", "text": "def clean_document(document):\n document = re.sub('[^A-Za-z .-]+', ' ', document)\n document = ' '.join(document.split())\n document = ' '.join([i for i in document.split() if i not in stop])\n return document", "title": "" }, { "docid": "90e6c0fb0e2dd975191223d6d8bf0e7c", "score": "0.53893036", "text": "def clean_document(document):\n document = re.sub('[^A-Za-z .-]+', ' ', document)\n document = ' '.join(document.split())\n document = ' '.join([i for i in document.split() if i not in stop])\n return document", "title": "" }, { "docid": "6a98252e36eda01f516cd96c43423428", "score": "0.5383273", "text": "def clean_text(line_in):\n\n def _is_english(s):\n try:\n s.encode(encoding='utf-8').decode('ascii')\n except UnicodeDecodeError:\n return False\n else:\n return True\n\n # Remove any new-line characters\n line = line_in.replace('\\n', '')\n # Clean any erroneous markers from Cornell dataset\n line = line.replace('<u>', '')\n line = line.replace('</u>', '')\n # Split line into respective tokens and remove any unwanted characters\n all_chars = [re.sub(cbc.REMOVE_CHARS, '', t) for t in line.split()]\n # Filter empty strings from tokens\n filter_tokens = list(filter(None, all_chars))\n # Remove non-english words\n english_words = [c for c in filter_tokens if _is_english(c)]\n # Remove any words with that have anything other than alpha/numeric chars,\n # hyphens, or apostrophes\n alpha_words = [c for c in english_words if re.match(r\"[-'a-zA-Z1-9]\", c)]\n # Normalise text by changing it all to lower case\n normal_text = [word.lower() for word in alpha_words]\n # Rejoin tokens into string\n return ' '.join(normal_text)", "title": "" }, { "docid": "b8a3dfeba8c5ae517627b67559ec2aee", "score": "0.5381005", "text": "def _sanitize(self, s):\n return re.sub(\"[^\\w-]\", \"\", s)", "title": "" } ]
5dd02d0415547f3f2075d2b995d22465
List all registered user names.
[ { "docid": "de0509e2edc83fc9a2b96c68761c6277", "score": "0.676247", "text": "def get_users_view(request):\n user_name_list = ax.evaluate_call(lambda: [user.user_name for user in\n UserService.all(models.User, db_session=request.db)],\n fallback=lambda: request.db.rollback(), httpError=HTTPForbidden,\n msgOnFail=s.Users_GET_ForbiddenResponseSchema.description)\n return ax.valid_http(httpSuccess=HTTPOk, content={u\"user_names\": sorted(user_name_list)},\n detail=s.Users_GET_OkResponseSchema.description)", "title": "" } ]
[ { "docid": "e11550c2bc2053986e8a1101486dbabb", "score": "0.79452044", "text": "def list_users(self):\n pass", "title": "" }, { "docid": "ec1ec8aa3e65420fb561c7e7d9a81399", "score": "0.79144025", "text": "def list(self, request, format=None):\n usernames = [user.username for user in User.objects.all()]\n return Response(usernames)", "title": "" }, { "docid": "5fff3bd91d63afcbe99db88df6149e65", "score": "0.7799419", "text": "def list_users():\n pass", "title": "" }, { "docid": "8a603dba99bbf57268a72dab1ab4aefb", "score": "0.77278984", "text": "def api_alluserdnames():\n result = getUserDisplayNames()\n return jsonify(result)", "title": "" }, { "docid": "c6c797dd84474b5a6b5552f75dbb36f6", "score": "0.7673142", "text": "def list_all():\n\tutm_users = UTMUsers()\n\tfor u in utm_users.get_all_users(): print u['data']['name']\n\treturn", "title": "" }, { "docid": "9c8929ac7898b39ab2f0b80d1d879efe", "score": "0.75055283", "text": "def usernames(self):\n return [u['name'] for u in self.users]", "title": "" }, { "docid": "10424768245b3db1e443be8c7749b7e7", "score": "0.74424964", "text": "def get_users():\n user_list = get_user_list()\n usernames = []\n for usr in user_list:\n usernames.append(usr)\n return usernames", "title": "" }, { "docid": "311cdc7b908387f0bf6e44fee7f6a88d", "score": "0.74409026", "text": "def list_users(self):\n r = self.admin_api.get_users()\n return r", "title": "" }, { "docid": "d8ffb5dfdb0a51f3dc6998959fce1c61", "score": "0.7423891", "text": "def get_usernames():\n return [user.display_name for user in User.query().iter()]", "title": "" }, { "docid": "d1e4f8f658c35c393f4f1c3d33735e9f", "score": "0.74056333", "text": "def list_users():\n with quiet():\n keyfile = KeysFile()\n\n print(green('\\n================== {}:'.format(env.host_string)))\n for user in keyfile.users:\n print(blue('\\t ' + user.name))", "title": "" }, { "docid": "68e2b9b6bd687ecfe6c1d1fd0fcd8718", "score": "0.73886144", "text": "def list_users():\n users = User.query.all()\n return render_template('user-list.html', users = users)", "title": "" }, { "docid": "a2b371fc8056a68ac18dfd3152c7820b", "score": "0.7308653", "text": "def getUserNames():", "title": "" }, { "docid": "1e3a2ce3cd445e44c8bdec542130e083", "score": "0.7294234", "text": "def list_users():\n users = User.query.all()\n return render_template('list.html', users=users)", "title": "" }, { "docid": "1e3a2ce3cd445e44c8bdec542130e083", "score": "0.7294234", "text": "def list_users():\n users = User.query.all()\n return render_template('list.html', users=users)", "title": "" }, { "docid": "1e3a2ce3cd445e44c8bdec542130e083", "score": "0.7294234", "text": "def list_users():\n users = User.query.all()\n return render_template('list.html', users=users)", "title": "" }, { "docid": "79dda20f15e9cb33a0e6bb385d4b6612", "score": "0.72362703", "text": "def users():\n return []", "title": "" }, { "docid": "421cbc08512c88240e066e752c30d5b1", "score": "0.72337097", "text": "def list_users():\n\n search = request.args.get('q')\n\n if not search:\n users = User.query.all()\n else:\n users = User.query.filter(User.username.like(f\"%{search}%\")).all()\n\n return render_template('users/index.html', users=users)", "title": "" }, { "docid": "204a544258528f52ca2ff7ce88c4df69", "score": "0.72110754", "text": "def all_usernames():\n if not hasattr(LDAP, \"_all_users\"):\n LDAP._initialize_cache()\n return list(set(LDAP._all_users.keys()))", "title": "" }, { "docid": "c34e5685a163baab7517dd0e1aa22e35", "score": "0.7204628", "text": "def list_name():\n keywords = request.form['keywords']\n if not keywords:\n return __result({}, 1, '')\n\n data, code, message = USER_SERVICE.list_name(keywords)\n return __result(data, code, message)", "title": "" }, { "docid": "004d070d6dccfe758c322c9d3e1cd378", "score": "0.7180698", "text": "def list_of_users():\n users = User.query.all()\n return render_template('user_list.html', users=users)", "title": "" }, { "docid": "c8349629e85c27c2d0ce5b503ea80f6b", "score": "0.7180263", "text": "def list_user(self):\n return self.get_list('ListUsers', None, ['Users', User])", "title": "" }, { "docid": "b0c1aa2aa0e71cd2ff5585a5c53f86d2", "score": "0.71574855", "text": "def user_list():\n\n users = User.query.all()\n return render_template('user_list.html',\n users=users)", "title": "" }, { "docid": "bb7f50930c78bf4534df8f29de75e9d2", "score": "0.71449697", "text": "def display_users(cls):\n return cls.user_list", "title": "" }, { "docid": "6ab295905c4afe2ec1a9c6e43e1e7da8", "score": "0.71243954", "text": "def list_users():\n\n users = User.query.all()\n return render_template(\"index.html\", users=users)", "title": "" }, { "docid": "c5bfe87762d84f83ae77e087f2be61a4", "score": "0.71222824", "text": "def list_users():\n\n search = request.args.get('q')\n\n if not search:\n users = User.query.all()\n else:\n users = User.query.filter(User.username.like(f\"%{search}%\")).all()\n\n return render_template('users/index.html', users=users, is_following=is_following)", "title": "" }, { "docid": "6eaa8e9731913839c6b55e6916606946", "score": "0.7111727", "text": "def display_users(cls):\n return cls.users_list", "title": "" }, { "docid": "ecc7423fc97df08e34d711376724ade4", "score": "0.7093168", "text": "def listUsers(self):\n catalog = getToolByName(self, TOOLNAME)\n\n results = catalog(object_implements=(\n user_ifaces.IMembraneUserAuthAvail.__identifier__))\n value = []\n for r in results:\n key = r.getUserName is not None and \\\n r.getUserName.strip() or r.getUserId\n value.append((key.lower(), (r.UID, key)))\n value.sort()\n value = [r for throwaway, r in value]\n value.insert(0, ('', '<no reference>'))\n return DisplayList(value)", "title": "" }, { "docid": "a02d613485d969c6195aea714cf52a87", "score": "0.70890504", "text": "def userlist(self):\n self.clean_up_channel()\n return ', '.join(u.nickname for u in self.users_in_room)", "title": "" }, { "docid": "1d337d953e2026eceafe48d9e89bddcf", "score": "0.7040018", "text": "def show():\r\n user_list = []\r\n for users in authorized:\r\n for user in users:\r\n user_list.append(user)\r\n app_log('Was sent all register users')\r\n return jsonify(user_list)", "title": "" }, { "docid": "4f200b2d05629905983f0c10a013f0bf", "score": "0.70313364", "text": "def list_users(self):\n response = self.authenticated_request(\"/v3/users\", method=\"GET\")\n result = self._to_users(data=response.object[\"users\"])\n return result", "title": "" }, { "docid": "9dd786b29203eb3a44899bfd73f2a33f", "score": "0.70221287", "text": "def users_list(request):\n db_adapter = DBAdapter()\n user_list = db_adapter.get_users_list()\n data = ''\n for index in user_list:\n name = index.name\n email = index.email\n data = '%s</b><h1><p>name %s email = %s</p></h1>'%(data, name, email)\n return HttpResponse(data)", "title": "" }, { "docid": "f63a0c29baa74d54cae6903739c61085", "score": "0.701342", "text": "async def listusers(self, ctx, perm_name):\n us = self.settings.get_users_with_perm(perm_name)\n us = [str(self.bot.get_user(u) or \"Unknown ({})\".format(u)) for u in us]\n if not us:\n await ctx.send(inline(\"No users have this perm.\"))\n for page in pagify(\"\\n\".join(us)):\n await ctx.send(box(page))", "title": "" }, { "docid": "9e5b1df12796b4cc6dd781704b983daf", "score": "0.701129", "text": "def list(self):\n response = self._client.get('users')\n return UserList.from_json(response.text)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.701055", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.701055", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.701055", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.701055", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.701055", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.701055", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.701055", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.701055", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.701055", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "0f960ebfa2d56fe1e2da6558b9f42d23", "score": "0.701055", "text": "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "1c8c0a914b3bf643c174a4c3f5f6e3d7", "score": "0.7000787", "text": "async def listusers(self, ctx):\n if not ctx.author.voice:\n return await ctx.send(\"You are not connected to a voice channel :mute:\")\n members = ctx.author.voice.channel.members\n memnames = []\n for member in members:\n memnames.append(member.name)\n await ctx.send(f\"Members in {ctx.author.voice.channel.name}:\\n```\\n\" + \"\\n\".join(memnames) +\"\\n```\")", "title": "" }, { "docid": "cc1f1fab1bc357ba9520d9b9359b85a8", "score": "0.6998635", "text": "def get_usernames(db=None) -> list[str]:\n return list(db[\"user\"].keys())", "title": "" }, { "docid": "c988a8e4214a6d5f9f4da43e160aa927", "score": "0.6998534", "text": "def get(self, request, format=None):\n usernames = [user.email for user in User.objects.all()]\n return Response(usernames)", "title": "" }, { "docid": "d0854a1cdb598fff06bda50445918b9a", "score": "0.6974017", "text": "def user_names(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"user_names\")", "title": "" }, { "docid": "e79ef0d405bce59c03c15b0ada869dfa", "score": "0.6942891", "text": "def get_all_local_users(self):\n sql = \"\"\"\n SELECT name FROM users\n \"\"\"\n rows = yield self._execute(\"get_all_local_users\", None, sql)\n defer.returnValue([name for name, in rows])", "title": "" }, { "docid": "814582c50bc6a1a4328389c9e831c6a4", "score": "0.69400537", "text": "def user_list():\n\n users = User.query.all()\n # user list looped through in Jinja\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "ae2e529d199cdb7ec08c49b810860501", "score": "0.6924035", "text": "def all_users():\n return jsonify(users.get_users())", "title": "" }, { "docid": "66e827c7c891854691e7a0754c5f2122", "score": "0.69218934", "text": "def all_users(self):\n return_list = []\n for post in self.client.db.users.find({\"username\": {\"$exists\": \"true\"}}, {\"_id\": 0, \"username\": 1}):\n return_list.append(post[\"username\"])\n return return_list", "title": "" }, { "docid": "e33ff2693ce852f53be29da2d3e62b20", "score": "0.6912878", "text": "def all(cls):\n users = db_con.smembers(\"users\")\n return [User.get(re.sub(r'^user:', '', user)) for user in users]", "title": "" }, { "docid": "710eb34be3003d85aa0d9d4603cc241f", "score": "0.6896962", "text": "def get_users(self):\n fields = ['name', ]\n return self.get_data(\"myUsers\", fields)", "title": "" }, { "docid": "e263d92d524930a21c1d5f11af00aa05", "score": "0.6892641", "text": "def all_users(self):\n\t\tpass", "title": "" }, { "docid": "14f71bb3637ededbfbe22582d0a51066", "score": "0.68762654", "text": "def showUsers(self):\n liste = \"\"\n for user in self.listUsers:\n liste += user + \",\"\n return liste[0:len(liste)-1]", "title": "" }, { "docid": "f33d6a0a38164f80e804a395f2ff846d", "score": "0.68753815", "text": "def list_users(self):\n self.sudo_or_dryrun('rabbitmqctl list_users')", "title": "" }, { "docid": "cd208aa1260a71e60f50c0dea19cda1c", "score": "0.68752646", "text": "def users_list(self):\n\n query = self.session.query(\n self.Users.name,\n self.Users.last_login,\n )\n\n return query.all()", "title": "" }, { "docid": "a73d4d59b886163604a6f84b1ac7d2d7", "score": "0.68619996", "text": "def get_all_users(self):\n\t\treturn self.users_list", "title": "" }, { "docid": "b58915597cbf78e98523076bc0e97aa1", "score": "0.68510354", "text": "def list_users() -> None:\n\n # Read in the config\n parser = configparser.ConfigParser()\n parser.read(GRAM_ACCOUNTS_INI)\n\n for section in parser.sections():\n username = section\n user_info = parser[section]\n print(\n '{username} ({full_name}) <{email}>; {public_key}'.format(\n username=username,\n full_name=user_info['full_name'],\n email=user_info['email'],\n public_key=user_info['public_key'],\n )\n )", "title": "" }, { "docid": "4e72958cdf11d632ead3176bd41b1cd9", "score": "0.68463475", "text": "def usernames(self):\n users = [self.agent_name, self.user_name]\n users.sort()\n return users", "title": "" }, { "docid": "ae4710b91cf71819c6ad14a2495522ac", "score": "0.68461627", "text": "async def get_names(self, user):\n usernames = [str(user)] # Tack on their current username\n query = \"\"\"\n SELECT ARRAY(\n SELECT username\n FROM usernames\n WHERE user_id = $1\n ORDER BY insertion DESC\n ) as name_list;\n \"\"\"\n results = await self.bot.cxn.fetchval(query, user.id)\n if results:\n usernames.extend(results)\n return usernames", "title": "" }, { "docid": "af4b2784685a89ff7f8d268ad3b47e06", "score": "0.68380785", "text": "def listUser(self, delete=None, deleteId=None):\r\n template = self.lookup.get_template('manager/listuser.mako')\r\n (u, c) = getUserInfo()\r\n conn = Database()\r\n session = conn.get()\r\n users = []\r\n if delete and deleteId:\r\n session.query(User).filter(\r\n User.user_name == deleteId).delete()\r\n conn.commit()\r\n for entry in session.query(User).order_by(User.user_name).all():\r\n users.append(entry.__dict__)\r\n conn.close()\r\n return template.render(username=u, classtype=c, users=users)", "title": "" }, { "docid": "3b953a8a7d071e578dafe99aef6153bf", "score": "0.6831349", "text": "def users_listing():\n\n # users_from_db = query all users from DB\n users_from_db = User.query.all()\n\n return render_template('/user_listing.html', users=users_from_db,)", "title": "" }, { "docid": "ee5b7fcc9ac3b6213844cc24ab0a94a5", "score": "0.6812424", "text": "def show(self):\n\t\t\n\t\tif self.input.user == \"all\":\n\t\t\treturn self.list()\n\n\t\treturn [u for u in User().list() if u[\"name\"]==self.input.user]", "title": "" }, { "docid": "8fe8c6b8a3219c700c7e6114e5b0e0fb", "score": "0.68094397", "text": "def displayUsers():\n list_users = []\n users = storage.all('User')\n for key, value in users.items():\n list_users.append(value.to_dict())\n return jsonify(list_users)", "title": "" }, { "docid": "27f58472dc6641839c4387c0caf14837", "score": "0.6802501", "text": "def users():\n users = db.execute(\"SELECT id, username from USERS\")\n return render_template(\"users.html\", users=users, adminIDs=getAdminIDs())", "title": "" }, { "docid": "b169d45e512081731c9f32f0b901bf54", "score": "0.6801924", "text": "def get_username_list(self):\n conn, cursor = self.connect()\n self.select_db(cursor)\n cursor.execute(\"SELECT username FROM Users\")\n query_return = cursor.fetchall()\n self.close(conn, cursor)\n return query_return", "title": "" }, { "docid": "afff5800e3168ebbdbd4f38d7747809d", "score": "0.67999184", "text": "def get(self, request, format=None):\n\n usernames = [user.username for user in User.objects.all()]\n return Response(usernames)", "title": "" }, { "docid": "242064dc0eb69ba70791eb4f08b450de", "score": "0.6793567", "text": "def list_users():\n try:\n users = g.user.get_all_users()\n data = [{\n 'id' : user.id,\n 'first_name' : user.first_name,\n 'last_name' : user.last_name,\n 'occupation' : user.occupation,\n 'year_of_birth' : user.year_of_birth,\n 'email' : user.email,\n 'account_type' : user.account_type\n } for user in users ]\n return data_response( data )\n except AuthorizationError:\n return error_response( 'Neuspješno dohvaćanje popisa korisnika: Nedovoljne ovlasti.', 403 )\n except:\n return error_response( 'Neuspješno dohvaćanje popisa korisnika: Nevaljan zahtjev.' )", "title": "" }, { "docid": "ba57ae8e23bffa403f12239f226a4673", "score": "0.6792802", "text": "def list(*param, **dic):\n usr = Server.instance().db._userRequest.getAllUser()\n return usr", "title": "" }, { "docid": "58f1589235416c294ea7e0406c094435", "score": "0.67920303", "text": "def users():\n\n users = User.query.all()\n return render_template(\"user-list.html\", users=users)", "title": "" }, { "docid": "f01bd7aa41c52fbd2fab5077f85cec3b", "score": "0.678475", "text": "async def allnames(self, ctx, *, user: converters.UserMemberConverter=None):\n if user is None:\n user = ctx.message.author\n names = await self.names_for(user)\n names = utils.clean_formatting(\", \".join(names))\n names = utils.clean_mentions(names)\n await ctx.send(\"Names for {}\\n{}\".format(user, names))", "title": "" }, { "docid": "96ebd1d891fd334f232f939a4ca973ea", "score": "0.6773458", "text": "def list_users(page):\n fields = [Users.id, Users.login]\n query = db_queries.get_all_users(fields)\n pagination = paginate(query, page, app.config['ITEMS_PER_PAGE'])\n flash('Found users: %s.' % pagination.total, 'info')\n return render_template('list.html', rows=pagination.items, pagination=pagination,\n session=session, table='users',\n fields=[field.__dict__['key'] for field in fields])", "title": "" }, { "docid": "4d3cd334084a30a4871bd872883ba4fd", "score": "0.67400634", "text": "def displayUser(cls):\n return cls.userList", "title": "" }, { "docid": "b44723e3758e8394828cad716a7e11ed", "score": "0.6737834", "text": "def list_server_users(self):\r\n users = ApplicationWebSocket._list_connected_users()\r\n logging.debug('list_server_users(): %s' % repr(users))\r\n # Remove things that users should not see such as their session ID\r\n filtered_users = []\r\n policy = applicable_policies('gateone', self.current_user, self.prefs)\r\n allowed_fields = policy.get('user_list_fields', False)\r\n # If not set, just strip the session ID\r\n if not allowed_fields:\r\n allowed_fields = ('upn', 'ip_address')\r\n for user in users:\r\n if not user: # Broadcast client (view-only situation)\r\n continue\r\n user_dict = {}\r\n for key, value in user.items():\r\n if key in allowed_fields:\r\n user_dict[key] = value\r\n filtered_users.append(user_dict)\r\n message = {'go:user_list': filtered_users}\r\n self.write_message(json_encode(message))\r\n self.trigger('go:user_list', filtered_users)", "title": "" }, { "docid": "1ed8a2ccd262f62ff7fec4b1bce8271a", "score": "0.6733907", "text": "def display_all_users():\n\n users = User.query.all()\n\n return render_template('users.html', users = users)", "title": "" }, { "docid": "1831d1aab1416febc77c5394ba8dc4d0", "score": "0.67328167", "text": "def get_users(self):\n return self._request('GET', 'rest/users/list')", "title": "" }, { "docid": "4182e18c113a988de7913159d130b990", "score": "0.6722633", "text": "def read_users():\n usermanager = UserManager()\n for user in usermanager.users:\n click.echo(\"Username: \" + user.get_username() +\n \" & Password: \" + user.get_password())", "title": "" }, { "docid": "4e35f973cfcfe07963dcacbaa72d22e6", "score": "0.67128557", "text": "def registered_users():\n users = User.query.all()\n roles = Role.query.all()\n return render_template(\n 'admin/registered_users.html', users=users, roles=roles)", "title": "" }, { "docid": "59e80cc05414ad3cf176f1d70133d6a3", "score": "0.67060673", "text": "def userlist(self):\n sort_by = request.POST['sort_by']\n sort_how = h.asbool(request.POST['sort_how'])\n\n if 'None' in request.POST['letter']:\n c.users = []\n return render('accounts.snippets.userlist')\n\n if 'letter' in request.POST:\n start_letter = request.POST['letter']\n else:\n start_letter = 'All'\n\n c.lengths, userlist = get_users_list(self.domain,\n start_letter,\n sortby=sort_by,\n sort_ascending=sort_how)\n\n if not userlist:\n c.error = _(\"No results retrieved.\")\n else:\n c.users = userlist\n return render('accounts.snippets.userlist')", "title": "" }, { "docid": "48e9d44c048e0e10edb6a747f33f2d1c", "score": "0.67007685", "text": "def get_all_users(self):\n self.cur.execute(\"SELECT user_id, email,names,role FROM users\")\n return self.cur.fetchall()", "title": "" }, { "docid": "71f61290fa1b6490e102abc6ae8ba747", "score": "0.66889536", "text": "def get_all_users():\n user_obj = User.query.all()\n if user_obj is None:\n return util.build_json_response('No users found')\n\n user_list = [u.username for u in user_obj]\n return util.build_json_response('User list retrieved', users=user_list)", "title": "" }, { "docid": "96101410ef762c49c08fb2d8c31e4012", "score": "0.66582894", "text": "def user_names(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"user_names\")", "title": "" }, { "docid": "c7b94b8d11a7f3865feab81d1d06c2f0", "score": "0.66548055", "text": "def all_users_list(self) -> list:\n qry = self.session.query(\n self.AllUsers.login,\n self.AllUsers.last_login\n )\n return qry.all()", "title": "" }, { "docid": "fb696ecc6726e485da612bb7e8bc1efc", "score": "0.66511476", "text": "def get_api_user_list(self):\n return self.call_api('get', 'users')", "title": "" }, { "docid": "1153100187c14ac947ba9b642cf20033", "score": "0.6650739", "text": "def get_all_users(self):\n return list(self.users.find({}, {\"_id\": 0, \"username\": 1, \"user_id\": 1}))", "title": "" }, { "docid": "2a151d1d2832d0b655c413c73b116ab9", "score": "0.66380334", "text": "def ro_user_list(self) -> str:\n return pulumi.get(self, \"ro_user_list\")", "title": "" }, { "docid": "1bf01d77cd5d2b8264826ccfef3e4ce5", "score": "0.6636287", "text": "def list_users(self) -> List[dict]:\n\n results = []\n qry = 'SELECT user_id, username FROM ' + self.userTable +\" where study_name=%(study_name)s\"\n vals = {\"study_name\": self.study_name}\n rows = self.execute(qry, vals)\n\n if len(rows) == 0:\n return []\n else:\n for row in rows:\n results.append(row)\n return results", "title": "" }, { "docid": "c712d2e95b5115f02b29ef2689674a91", "score": "0.66325426", "text": "async def getAllUsers(ctx):\n return ctx.guild.members", "title": "" }, { "docid": "f9289d23d46fd8ec456d29811b39fb6c", "score": "0.6625654", "text": "def print_users(self):\n print(\"printing the users\")\n with self._users_lock:\n for user in self._users.keys():\n print(user)", "title": "" }, { "docid": "828589521a292dcfd700350e94791245", "score": "0.66219544", "text": "def get_users():\n\n users = User.query.all()\n\n return render_template('user_list.html', users=users)", "title": "" }, { "docid": "bb5bd1aa46f23f59727bfa2c1ce3268f", "score": "0.661847", "text": "def admin_list_users():\n\n usr = verify_user()\n if not usr or not users.is_admin(usr[\"email\"]):\n raise EXCEPTION_UNAUTHORIZED\n\n return [{\n \"name\": usr[\"name\"],\n \"email\": usr[\"email\"]\n } for usr in users.get_users()]", "title": "" }, { "docid": "78b593f3df200a6cf6aeeecdc3600e36", "score": "0.66004366", "text": "def ls(index, id, username, partialname, publickey, page, items, head):\n spinner = Halo(\"Retrieving users\", spinner=\"dot\").start()\n r = galileo.profiles.list_users(\n userids=list(id),\n usernames=list(username),\n partial_usernames=list(partialname),\n public_keys=list(publickey),\n page=page,\n items=items,\n )\n\n if len(r) == 0:\n spinner.stop()\n click.echo(\"No user matches that query.\")\n return\n\n if isinstance(index, int):\n users_list = r[index]\n else:\n users_list = r\n\n users_list = [user.__dict__ for user in users_list]\n users_df = pandas.json_normalize(users_list)\n users_df = users_df[[\"username\", \"userid\", \"mids\"]]\n\n spinner.stop()\n if head:\n click.echo(users_df.head(head))\n else:\n click.echo(\"(Displaying only first 30 items)\\n\")\n click.echo(users_df.head(30))", "title": "" }, { "docid": "a434c2689e2f7c6155efabdf0cffbcb4", "score": "0.6597045", "text": "def user_list():\n users = User.query.all()\n # user_id = users.user_id\n return render_template(\"user_list.html\", users=users)", "title": "" }, { "docid": "9e97847f8c182a675b8f4ecadd164dff", "score": "0.65776205", "text": "def __get_registered_users(self) -> List[ent_user.User]:\n registered_users: List[ent_user.User] = []\n users, _ = self.user_repository.get_all_users()\n\n if users is None:\n return []\n\n for user in users:\n registered_users.append(ent_user.User(user.name, user.nickname, user.password))\n\n return registered_users", "title": "" }, { "docid": "397c921f1af32e82ef27f17f14c230fc", "score": "0.6576012", "text": "def show_users():\n\n users = User.query.all()\n return render_template('user_list.html', users=users)", "title": "" }, { "docid": "f79bcd891f583345420aa35a2d4b50a9", "score": "0.6572434", "text": "def get_users(self):\n return []", "title": "" }, { "docid": "32867a3d7cd70e82bce89395e889c9de", "score": "0.6571739", "text": "def getUsers():", "title": "" }, { "docid": "d25754529fd2845dc444bed3fb34e52b", "score": "0.65717155", "text": "def list_users():\n return sorted(user.pw_name for user in pwd.getpwall())", "title": "" }, { "docid": "8209be16e59a74d6e417fd89a04784f9", "score": "0.656751", "text": "def print_users(self):\n print(\"\\nUsers in database : \\n\")\n for user in self.users.values():\n print(user, end=\"\\n\")", "title": "" } ]
02f8ec1dbb4565a65a735cd47153f869
This Dataset returns sys.maxsize, but is effectively unlimited.
[ { "docid": "d8c9aeeecff6b2c39021299813852f14", "score": "0.7583838", "text": "def __len__(self):\n return sys.maxsize", "title": "" } ]
[ { "docid": "014ad5c757cf79d33f34e3caa8b16f23", "score": "0.7852997", "text": "def __len__(self):\n\t\treturn min(len(self.dataset), self.opt.max_dataset_size)", "title": "" }, { "docid": "591f645eecd2c5835f57f0dfe90c7e6f", "score": "0.77031475", "text": "def maxsize(self):\n return len(self._data)", "title": "" }, { "docid": "0f85ba16f4ec47e2af53cfe306ecc5be", "score": "0.76373404", "text": "def graph_data_size_max(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size_max or 0)", "title": "" }, { "docid": "edefdb21b6083c4147a6d38918d5d3b1", "score": "0.75820345", "text": "def max_size(self):\n raise NotImplementedError()", "title": "" }, { "docid": "fb9964bcab2df86abe09d8badae1a2d4", "score": "0.7554182", "text": "def maxsize(self):\r\n return None", "title": "" }, { "docid": "f0b0850c4c224c4477bc4cd8459da29f", "score": "0.7544423", "text": "def maxsize(self):\r\n return self._maxsize", "title": "" }, { "docid": "832c0948b400a39c04a2bc70a0456d6a", "score": "0.7427881", "text": "def recommended_max_num_datapoints(self) -> int:\n # very large number, essentially no limit by default\n return 1e9", "title": "" }, { "docid": "419c1f8c134322f3654446602cb4a69b", "score": "0.72739136", "text": "def maximum_size(self):\n return self._maximum_size", "title": "" }, { "docid": "76580ce800923ced4dd3032a7f080b0a", "score": "0.72561973", "text": "def maxsize(self) -> int:\n return self._maxsize", "title": "" }, { "docid": "550a91bdc2202c1808c1a830f2a123dd", "score": "0.71554714", "text": "def get_max_size(self):\n max_size = 0\n file = h5py.File(self.filename, 'r')\n for idx in range(len(self)):\n label = self.labels[idx]\n timestamps_group = file['/'][self.mode + '_timestamps']\n timestamps_dset = timestamps_group[label]\n size = len(timestamps_dset)\n if size > max_size: max_size = size\n file.close()\n return max_size\n\n # max_size = 0\n # for i in range(len(self)):\n # item = self[i][0]\n # if len(item) > max_size:\n # max_size = len(item)\n # return max_size", "title": "" }, { "docid": "7b9c4c3bc048aa2ec46bf00f73e2fbef", "score": "0.7038319", "text": "def _getMaxSize(self):\n \n # get gene list and related seqs\n geneList = map(lambda l: l.strip('\\n'), open(self.mdapArgs[1]).readlines())\n self.coRegSeqs = MDAP_defs.seqSubSet(geneList,self.mdapArgs[0])\n \n # warn user if there are missing genes\n if self.coRegSeqs[1]:\n sys.stderr.write('Warning: %s seqs in your gene list were not found in the fasta file provided.\\nA list of names follows:\\n%s\\n'\\\n % (len(self.coRegSeqs[1]),str(self.coRegSeqs[1])))\n \n # Concatonate, get and set self.maxsize\n return len(''.join(self.coRegSeqs[0].values()))\n \n \n #----------- ", "title": "" }, { "docid": "460f42002a43c9de8ba845e635ed3be5", "score": "0.69655234", "text": "def dataset_size(self):\n return self.dataset.size", "title": "" }, { "docid": "9a6711b0c0aa76d2ed650cc7d9e20422", "score": "0.69364065", "text": "def max(self):\n return(len(self.__d))", "title": "" }, { "docid": "e7cb719acd8516926d46924658c5bfe6", "score": "0.69280666", "text": "def MAXMEM(self):", "title": "" }, { "docid": "dca47bc82260408b106e88e8d2dd9f18", "score": "0.6874828", "text": "def size_limit(self):\n\t\treturn self._size_limit", "title": "" }, { "docid": "9114f7645ec10ebfa230a2a909f2e79f", "score": "0.68626", "text": "def data_size( self, groups ):\n #if len(groups) == 0:\n # return 0\n return max( groups.values() )", "title": "" }, { "docid": "b86a17637829e5a01570b558eb811740", "score": "0.6708185", "text": "def max_size(self):\n size = 1\n for idx in self.config.index_specs:\n size *= len(idx.distribution)\n return size", "title": "" }, { "docid": "aae2f208694b14d30b4b8e46445e619b", "score": "0.67080206", "text": "def quick_run_limit(self):\n try:\n return int(environment.get(\"Quick\"))\n except KeyError:\n return maxsize", "title": "" }, { "docid": "db9251c725a89e2871b4f5e335869bff", "score": "0.6679711", "text": "def len_max(self):\n return 16 + 16 + 8 + 8 + Tools.bin_to_dec(self.get_data_size()) + Tools.bin_to_dec(self.get_verification_size())", "title": "" }, { "docid": "be79daebd005cfeebb04da85e2bda79e", "score": "0.66728234", "text": "def max_chunk_size(self):\n return min(constants.MAX_CHUNK_SIZE, self._maxdata // 2) or constants.MAX_PUSH_DATA", "title": "" }, { "docid": "a88d024f68e3c68c7b512de8efea9e5d", "score": "0.665125", "text": "def required_memory_maximum(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"required_memory_maximum\")", "title": "" }, { "docid": "d94c5d8d3e6b56d86e4a31ac9c4e984f", "score": "0.6647536", "text": "def maxMessageSize(self, appdata=None):\r\n return self.user.maxMessageSize", "title": "" }, { "docid": "93b6ded3510790807b341e226bc77115", "score": "0.664416", "text": "def __len__(self):\n if not self.opt.union:\n return min(len(self.dataset), self.opt.max_dataset_size)\n else:\n return len(self.batch_sampler)", "title": "" }, { "docid": "b6f4031abd151f0cba9b2a5956697752", "score": "0.662768", "text": "def argo_size_decider(xdf,byte_lim =200000000):\n fsize = xdf.nbytes\n if fsize > byte_lim:\n ds_large = True\n else:\n ds_large = False\n return ds_large", "title": "" }, { "docid": "3fe198fe8754b6b526e02e4df0847565", "score": "0.66016006", "text": "def GetMaxBatchSize(self, run_params):\n if run_params.dynamic_engine:\n return None\n return min(self.max_batch_sizes)", "title": "" }, { "docid": "dacea2d1571a280ff193d1f679cca602", "score": "0.6582471", "text": "def __len__(self):\n return max(self.A_size, self.B50_size, self.B100_size, self.B150_size)", "title": "" }, { "docid": "637578cf6f69b7fcba890f846415f30b", "score": "0.65675694", "text": "def data_edge_count_max(self) -> int:\n return int(self.graph_tuple_stats.data_edge_count_max or 0)", "title": "" }, { "docid": "fbd0fc42076943aa3fe895fe189b9b28", "score": "0.6522303", "text": "def max_capacity(self) -> int:\n return self._max_capacity", "title": "" }, { "docid": "cc3194e3c1c5048b372226a9429c655b", "score": "0.65163666", "text": "def __len__(self):\n return self.limit_batches", "title": "" }, { "docid": "052b135adefeb16d99dfea460a33eb17", "score": "0.6513411", "text": "def max_memory_gib(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_memory_gib\")", "title": "" }, { "docid": "052b135adefeb16d99dfea460a33eb17", "score": "0.6513411", "text": "def max_memory_gib(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_memory_gib\")", "title": "" }, { "docid": "42b28d6bfac4d10e85af03b5f653585c", "score": "0.65026045", "text": "def calculate_max_input_size(self):\n max_size = configuration.get_config_value(\n 'server', 'maxsingleinputsize')\n self.max_size = configuration.get_size_mb(max_size)", "title": "" }, { "docid": "738e40db9915f1e790b7fc5c050e55f0", "score": "0.64872915", "text": "def maximum_over_capacity(self):\n return self._maximum_over_capacity", "title": "" }, { "docid": "57e8d88cfbf6c3ee73d84c05267cc43c", "score": "0.6481141", "text": "def graph_data_size_min(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size_min or 0)", "title": "" }, { "docid": "d6f4c1acd604918e9da77a73e2b72ac3", "score": "0.64663935", "text": "def size_limit_in_bytes(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"size_limit_in_bytes\")", "title": "" }, { "docid": "870348c2b88ed554d3f967859355dc42", "score": "0.6427558", "text": "def container_log_max_size_mb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"container_log_max_size_mb\")", "title": "" }, { "docid": "c6cab9ca350525380597e6e77898e5e1", "score": "0.64186084", "text": "def TableFeatureMaxEntries(self):\n\t\treturn self._get_attribute('tableFeatureMaxEntries')", "title": "" }, { "docid": "2f06e57bb907fbc9ea428c2512c9e109", "score": "0.6405466", "text": "def max_memory_gib(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"max_memory_gib\")", "title": "" }, { "docid": "cc1855c29da76dc97724981fd447fb37", "score": "0.6398563", "text": "def get_max_record_limit(self):\n return self.max_record_limit", "title": "" }, { "docid": "b1e6b27b30ef3d28f5f644b0c253c82c", "score": "0.63746136", "text": "def dataset_size(self):\n if not self._dataset_size:\n # pylint: disable=attribute-defined-outside-init\n self._dataset_size = count_file_lines(\n self._hparams.source_dataset.files)\n return self._dataset_size", "title": "" }, { "docid": "3113b7dd31d17b39dbae8edb6e3850e5", "score": "0.63561296", "text": "def __len__(self):\n return int(np.floor(len(self.dataset_df) / self.batch_size))", "title": "" }, { "docid": "6acd1351b0e66897973c57b9ef716e65", "score": "0.6338612", "text": "def _maxValueLength(self):\n returnvalue = 0\n for row in self._value:\n for item in row:\n if (type(item) == type(float())):\n returnvalue = max(returnvalue, len('%.3f' % item))\n else:\n returnvalue = max(returnvalue, len(str(item)))\n return returnvalue", "title": "" }, { "docid": "1ea3c0a24e22f15022e0db309f99eab3", "score": "0.63347256", "text": "def max_node_size(self):\n return self.max_node_capacity", "title": "" }, { "docid": "a75676d3a3c2f80a42b4f70fb03119dc", "score": "0.63035357", "text": "def record_batch_size(self):\n return 10000", "title": "" }, { "docid": "e81a2633602078eb0f4981dfb28423dd", "score": "0.629832", "text": "def graph_data_size(self) -> int:\n return int(self.graph_tuple_stats.graph_data_size or 0)", "title": "" }, { "docid": "f2b1d2fcf676ac3cd58ba1b715f70473", "score": "0.62965894", "text": "def limit_bytes(self):\n return self._limit_bytes", "title": "" }, { "docid": "5bd7f4c0974496d8511dc7a5876caa1a", "score": "0.6295468", "text": "def setmaxsize(self, maxsize):\n self.maxsize = maxsize", "title": "" }, { "docid": "ee25be8efe85f314b8527552217dae20", "score": "0.6292332", "text": "def max(self):\r\n return np.max(self.data_array)", "title": "" }, { "docid": "13e27026bcecfebb80364260b6ff4b84", "score": "0.62849116", "text": "def max_length(self):\n\t\treturn self._max_length", "title": "" }, { "docid": "b83cf38afd0e737935a9903becf331d6", "score": "0.6267079", "text": "def max_num_batches(self):\n return self._max_num_batches", "title": "" }, { "docid": "07374ed2688a788352b7445ca3c9abe5", "score": "0.6259691", "text": "def __len__(self):\n return self.dataset.shape[0]", "title": "" }, { "docid": "8d9f0296b5340d525fd8157b4b060b3d", "score": "0.6258112", "text": "def charlimit():\n\n # Return the value\n return sys.maxsize", "title": "" }, { "docid": "0e15371a9ee7eeae3e704b3ea60f46bd", "score": "0.62552184", "text": "def max_bytes(self) -> int:\n return self.width * self.height * 3", "title": "" }, { "docid": "738d372f26bec6c4e2dd07459103f265", "score": "0.6249887", "text": "def minsize(self):# -> int:\r\n return 0", "title": "" }, { "docid": "533db83f61c0ecc9df841f43c93ce4e4", "score": "0.624839", "text": "def ExpectedMaxBatchSizes(self, run_params):\n return self.max_batch_sizes", "title": "" }, { "docid": "81a8a8954c4f3708d42604831bc27e4d", "score": "0.62470883", "text": "def update_maximum_size(self):\n if self.initialized:\n max_size = self._compute_maximum_size()\n self.set_max_size(max_size)", "title": "" }, { "docid": "e2eac30e06bbde26f69b61475276e1b0", "score": "0.6240061", "text": "def max_size_bytes(self) -> Optional[float]:\n return pulumi.get(self, \"max_size_bytes\")", "title": "" }, { "docid": "f7f3863b290a94fd637f4caaeb7ae95b", "score": "0.6239025", "text": "def test_largedata(self):\n cur = self.connect().cursor()\n cur.execute(\"SELECT @@max_allowed_packet\")\n if cur.fetchone()[0] < 16 * 1024 * 1024 + 10:\n print(\"Set max_allowed_packet to bigger than 17MB\")\n return\n t = \"a\" * (16 * 1024 * 1024)\n cur.execute(\"SELECT '\" + t + \"'\")\n assert cur.fetchone()[0] == t", "title": "" }, { "docid": "9d5c3db78589f469cb976e50eb9075c8", "score": "0.62338567", "text": "def max_disk_size(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_disk_size\")", "title": "" }, { "docid": "3ba1c9e3412f228037d96cc4216f0a8c", "score": "0.62305033", "text": "def __init__(self):\n self.data = []\n self.min = sys.maxsize", "title": "" }, { "docid": "efe328a4b39bd16f4c6b35ca950905af", "score": "0.6227722", "text": "def minimum_size(self):\n # TODO: Allow `Source` to understand when this returns None?\n return 3.", "title": "" }, { "docid": "90593313ef2c0819472a725e9ea549b3", "score": "0.6225135", "text": "def Capacity(self) -> int:", "title": "" }, { "docid": "490b4f7f427cce5fafc4e1429c899536", "score": "0.6224223", "text": "def get_max_objects_per_file(self):\n return self.__max_objects_per_file", "title": "" }, { "docid": "c2c548b67bc7825c1931fb31b7c97133", "score": "0.6216922", "text": "def data_size(self) -> int:\n return len(self.__labels)", "title": "" }, { "docid": "f7e17964d779ac39b16f7dd7b07a8d37", "score": "0.62139237", "text": "def maxlen(self):\n \n return reduce(max, list(map(len, self.tags)))", "title": "" }, { "docid": "bf6390ba8aea1b9ffcdd82fc9e218593", "score": "0.6207323", "text": "def max_pool_size(self) -> ConfigNodePropertyInteger:\n return self._max_pool_size", "title": "" }, { "docid": "1a28cb4e14e9fd03ee738d394f09e02e", "score": "0.61899585", "text": "def getMaxValue(self):\n # TODO: make this more consistent accross versions\n # This was a \"fix\" when we started supported PS5000a\n return self.MAX_VALUE", "title": "" }, { "docid": "07f5e822e8797fe4c9ee69b3c02d8182", "score": "0.61825836", "text": "def ram_max(self):\n return max(self.ram_samples)", "title": "" }, { "docid": "cb7672a1c9e4a8264c23095ba667380e", "score": "0.61751795", "text": "def get_available_space(self):\n return self.maxsize - len(self)", "title": "" }, { "docid": "123e4033353f8077fd14f6a23b272f17", "score": "0.6175156", "text": "def maxSize(self):\n maxPartSize = self._unalignedMaxPartSize\n maxFormatSize = self.format.maxSize\n unalignedMax = min(maxFormatSize, maxPartSize) if maxFormatSize else maxPartSize\n return self.alignTargetSize(unalignedMax)", "title": "" }, { "docid": "255f74d28de2c1af54887bc5656e5265", "score": "0.61697155", "text": "def max_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_capacity\")", "title": "" }, { "docid": "255f74d28de2c1af54887bc5656e5265", "score": "0.61697155", "text": "def max_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_capacity\")", "title": "" }, { "docid": "255f74d28de2c1af54887bc5656e5265", "score": "0.61697155", "text": "def max_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_capacity\")", "title": "" }, { "docid": "1094e8c7a84af38d002ed70fc82d1082", "score": "0.6167468", "text": "def maxsize(self, maxsize):\n self.shape = (int(maxsize), ) + self.shape[1:]\n self.clear()", "title": "" }, { "docid": "776776a6b904cacb7bba1a15d86a708f", "score": "0.6163169", "text": "def max_disk_size(self) -> str:\n return pulumi.get(self, \"max_disk_size\")", "title": "" }, { "docid": "d04cb3aa43c1abe6a85e4d7c8a17ceb4", "score": "0.61602026", "text": "def max_mireds(self):\n return 333", "title": "" }, { "docid": "6314e301062bd187711d308c1577dd15", "score": "0.6152927", "text": "def __len__(self):\r\n return 100000", "title": "" }, { "docid": "d125deae847eb2aff1a0c9887eccc14e", "score": "0.61510664", "text": "def max_files(self):\n\n return 10 ** self.int_len(self.cnt_files())", "title": "" }, { "docid": "ec253548424516967fab4d080787b28a", "score": "0.6143614", "text": "def max_size(self):\n sizes = np.array([m.sum() for m in self.masks])\n return sizes.max()", "title": "" }, { "docid": "72fcc5874a0563f392a7e2fb2f8f8d47", "score": "0.6143347", "text": "def bufSize(self) -> int:\n return self._coreIndex.size()", "title": "" }, { "docid": "e40d17357dfd1cea1294dcb66680710f", "score": "0.6140658", "text": "def max_length(self) -> int | None:\n return self._underlying.max_length", "title": "" }, { "docid": "0e21bd0433c398b277267e7cd73739b8", "score": "0.614031", "text": "def Max(data):\n return data.max()", "title": "" }, { "docid": "d1151c28696162d63141b22466654040", "score": "0.6135843", "text": "def __len__(self):\n if not hasattr(self.limitedstream, \"limit\"):\n return 0\n return self.limitedstream.limit", "title": "" }, { "docid": "87d80b46825b0fa7d3c2a1e11bc64f60", "score": "0.6133004", "text": "def __len__(self):\n return max(self.A_size, self.B_size)", "title": "" }, { "docid": "de803146378736190432d97824dbc70d", "score": "0.6127637", "text": "def abs_max_heat_setpoint_limit(self) -> int:\n return self.cluster.get(\"abs_max_heat_setpoint_limit\", 3000)", "title": "" }, { "docid": "8f1f36107f6f7d2ca1a03a437c658d1c", "score": "0.61252886", "text": "def max_capacity_per_scale(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_capacity_per_scale\")", "title": "" }, { "docid": "22bf83756ad732e762c8142ce596b931", "score": "0.61230433", "text": "def scale_max_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"scale_max_capacity\")", "title": "" }, { "docid": "22bf83756ad732e762c8142ce596b931", "score": "0.61230433", "text": "def scale_max_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"scale_max_capacity\")", "title": "" }, { "docid": "92bafc7e94c869c87cfc60743f6a9a13", "score": "0.61160535", "text": "def abs_max_cool_setpoint_limit(self) -> int:\n return self.cluster.get(\"abs_max_cool_setpoint_limit\", 3200)", "title": "" }, { "docid": "bcae69bc8d31822c900b0d996957e8c4", "score": "0.6113825", "text": "def max_target_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_target_capacity\")", "title": "" }, { "docid": "bcae69bc8d31822c900b0d996957e8c4", "score": "0.6113825", "text": "def max_target_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_target_capacity\")", "title": "" }, { "docid": "bcae69bc8d31822c900b0d996957e8c4", "score": "0.6113825", "text": "def max_target_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_target_capacity\")", "title": "" }, { "docid": "bcae69bc8d31822c900b0d996957e8c4", "score": "0.6113825", "text": "def max_target_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_target_capacity\")", "title": "" }, { "docid": "bcae69bc8d31822c900b0d996957e8c4", "score": "0.6113825", "text": "def max_target_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_target_capacity\")", "title": "" }, { "docid": "bcae69bc8d31822c900b0d996957e8c4", "score": "0.6113825", "text": "def max_target_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_target_capacity\")", "title": "" }, { "docid": "bcae69bc8d31822c900b0d996957e8c4", "score": "0.6113825", "text": "def max_target_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_target_capacity\")", "title": "" }, { "docid": "bcae69bc8d31822c900b0d996957e8c4", "score": "0.6113825", "text": "def max_target_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_target_capacity\")", "title": "" }, { "docid": "aa1146b9cd9b22b27acf45994944a0e7", "score": "0.61121815", "text": "def maxmemory_delta(self) -> int:\n return pulumi.get(self, \"maxmemory_delta\")", "title": "" }, { "docid": "957abfa19a1ba803765fc76478f937b2", "score": "0.611015", "text": "def min_size(self):\n raise NotImplementedError()", "title": "" }, { "docid": "e84172bf28d58c9b7d0a708ae9184e40", "score": "0.6110147", "text": "def maxmemory_delta(self) -> Optional[int]:\n return pulumi.get(self, \"maxmemory_delta\")", "title": "" } ]
7b27d376791b46a38ba562f6fe6bd2d6
Return dict of current parameters.
[ { "docid": "d9ddcdb75d127dfd6736448eb7236734", "score": "0.75191206", "text": "def get_params(self):\n parameter_dict = {\n 'regression_model': self.regression_model,\n 'holiday': self.holiday,\n 'mean_rolling_periods': self.mean_rolling_periods,\n 'macd_periods': self.macd_periods,\n 'std_rolling_periods': self.std_rolling_periods,\n 'max_rolling_periods': self.max_rolling_periods,\n 'min_rolling_periods': self.min_rolling_periods,\n \"ewm_var_alpha\": self.ewm_var_alpha,\n 'ewm_alpha': self.ewm_alpha,\n 'additional_lag_periods': self.additional_lag_periods,\n 'abs_energy': self.abs_energy,\n 'rolling_autocorr_periods': self.rolling_autocorr_periods,\n 'add_date_part': self.add_date_part,\n 'polynomial_degree': self.polynomial_degree,\n 'x_transform': self.x_transform,\n 'regression_type': self.regression_type,\n 'window': self.window,\n }\n return parameter_dict", "title": "" } ]
[ { "docid": "3cf9e43ecc4bec239accccc8d23c8210", "score": "0.8382747", "text": "def param_dict(self):\n return self.params.param_dict()", "title": "" }, { "docid": "9f9705309e0e045c816935e2ba229178", "score": "0.8358229", "text": "def get_params(self) -> dict:\n\t\treturn dict()", "title": "" }, { "docid": "618106ad9c5c7a07219ecab5002a1d92", "score": "0.8238529", "text": "def get_params(self):\n return {}", "title": "" }, { "docid": "7bd3f64e050524793a2e7faf196a43bd", "score": "0.8238242", "text": "def param_dict(self):\r\n return self._param_dict", "title": "" }, { "docid": "d7e9561d4af3db623c10167903d3474d", "score": "0.8208727", "text": "def parameters(self) -> Dict:\n return self.__dict__", "title": "" }, { "docid": "02d4e0332110c4c57a8733f74241cd82", "score": "0.80972624", "text": "def param(self):\n return dict()", "title": "" }, { "docid": "1d23d1ab9edfabd1aaff4441a743b3e4", "score": "0.8083471", "text": "def get_params(self) -> Dict[str, object]:\n params = {}\n\n # example for the window size parameter\n # params[\"window_size\"] = self.window_size\n\n return params", "title": "" }, { "docid": "085255b96400d5a5554ea0984b4ce861", "score": "0.8064837", "text": "def currentParams(self):\n record = self.registrar.currentRecord()\n return {\"length\": record.length, \"nstep\": record.nstep}", "title": "" }, { "docid": "2b1741882153c3a388926077c65229a4", "score": "0.8022113", "text": "def get_parameters(self):\n\n params = {}\n for par in self.PARAMETERS:\n params[par] = getattr(self, par)\n return params", "title": "" }, { "docid": "91f85587a05c2e9821388f1c9d464f9a", "score": "0.8018692", "text": "def currentParams(self):\n\t\trecord = self.registrar.currentRecord()\n\t\treturn {\"length\": record.length, \"nstep\": record.nstep}", "title": "" }, { "docid": "16fd28a4a51642fdf9e28eabe9003787", "score": "0.791599", "text": "def parameters(self) -> dict:\n return self.__parameters", "title": "" }, { "docid": "16fd28a4a51642fdf9e28eabe9003787", "score": "0.791599", "text": "def parameters(self) -> dict:\n return self.__parameters", "title": "" }, { "docid": "e6fbf793bd79463e91c5a334844f3b49", "score": "0.7870874", "text": "def get_params(self):\n return {\n 'model': self.model,\n 'model_parameters': self.model_parameters,\n 'decomposition': self.decomposition,\n 'n_components': self.n_components,\n }", "title": "" }, { "docid": "19ac95c49e7aea3daeb218f393f9750e", "score": "0.7847043", "text": "def get_params_info(cls):\n return {}", "title": "" }, { "docid": "a12e41aaf81582ca2ffaf530e95f87a1", "score": "0.7832231", "text": "def get_params(self) -> Dict[str, Any]:\n dictionary = {param: getattr(self, param) for param in self.defence_params}\n return dictionary", "title": "" }, { "docid": "87ab00c059d85b0c396152f04fea8d9d", "score": "0.78227305", "text": "def current_params(self):\n return self._current_params", "title": "" }, { "docid": "8464dc7d6398b606fe1fb32993dbdcc8", "score": "0.7770235", "text": "def get_params(self, *args, **kwargs):\n return {}", "title": "" }, { "docid": "806d45048dde3e8cb724b68990e61d3b", "score": "0.7732879", "text": "def get_parameters(self):\n d = {}\n for p in self.processors:\n parameter_names = list(p.PARAMETERS.keys())\n parameter_values = [getattr(p, n) for n in parameter_names]\n d.update(dict(zip(parameter_names, parameter_values)))\n return d", "title": "" }, { "docid": "3626dafc173e186dec1c2440834ffc05", "score": "0.77274066", "text": "def get_params(self):\n return{\"n_channels\": self.n_channels,\n \"channel_exp\": self.channel_exp,\n \"stimulus_mode\": self.stimulus_mode,\n \"range_start\": self.range_start,\n \"range_stop\": self.range_stop,\n \"channel_domain\": self.channel_domain,\n \"stim_res\": self.stim_res}", "title": "" }, { "docid": "980f12fa5faa1886efddf83a4c46accb", "score": "0.7703039", "text": "def parameters(self) -> Dict[Any, List[Any]]:\n return self._parameters", "title": "" }, { "docid": "1f655b960eec90e698f5e63ecad80c5f", "score": "0.770084", "text": "def get_params(self):\n parameter_dict = {\n 'regression_model': self.regression_model,\n 'datepart_method': self.datepart_method,\n 'regression_type': self.regression_type,\n }\n return parameter_dict", "title": "" }, { "docid": "81d5b59d17895c9c161488a1983711e3", "score": "0.768911", "text": "def getParameters(self) :\n\t\treturn self.getParameterDict().values()", "title": "" }, { "docid": "28a33265674b121dc04862b1a0c27a20", "score": "0.7666477", "text": "def get_params(self):\n\t\treturn self.params", "title": "" }, { "docid": "59303dfff1437753808cfb43ff9eabc5", "score": "0.7651696", "text": "def get_params(self):\n return {'window_length': self._window_length,\n 'initial_pred': self._initial_pred}", "title": "" }, { "docid": "f7d7c1cd433733d76114bba710cd26da", "score": "0.7646104", "text": "def get_params(self):\n return self[\"params\"]", "title": "" }, { "docid": "daed6293ea3954102a65746df141dace", "score": "0.7604805", "text": "def get_params(self):\n return self.params", "title": "" }, { "docid": "78ef6e0adecc16f0ea9c9663fb7feed9", "score": "0.7585268", "text": "def params():\n return {}", "title": "" }, { "docid": "f14900dcc4d97a38745411dfff4b162f", "score": "0.75769055", "text": "def get_params(self):\n params = {\"pi\": self.pi, \"mu_0\": self.mu_0, \"mu_1\" : self.mu_1, \"sigma_0\" : self.sigma_0, \"sigma_1\" : self.sigma_1,\n \"Q\": self.Q, \"w\" : self.w, \"b\":self.b}\n return params", "title": "" }, { "docid": "574a233e18214c3fde26eeae80b87609", "score": "0.75534713", "text": "def getParameters(self):\n return dict(mu = self._mu, \n sigma = self._sigma, \n S0 = self._S0,\n dt = self._dt,\n steps = self._steps,\n numScen = self._numScen\n )", "title": "" }, { "docid": "9a7558346739a851542312de9f187ba9", "score": "0.7551109", "text": "def get_parameters(self):\n parameter_names = self.PARAMETERS.keys()\n # TODO: Unresolved reference for processor\n parameter_values = [getattr(processor, n) for n in parameter_names]\n return dict(zip(parameter_names, parameter_values))", "title": "" }, { "docid": "dea3cd20a38270a11ac20ca4b3175cce", "score": "0.7549134", "text": "def get_params(self):\n parameter_dict = {\n 'regression_model': self.regression_model,\n 'mean_rolling_periods': self.mean_rolling_periods,\n 'macd_periods': self.macd_periods,\n 'std_rolling_periods': self.std_rolling_periods,\n 'max_rolling_periods': self.max_rolling_periods,\n 'min_rolling_periods': self.min_rolling_periods,\n \"quantile90_rolling_periods\": self.quantile90_rolling_periods,\n \"quantile10_rolling_periods\": self.quantile10_rolling_periods,\n 'ewm_alpha': self.ewm_alpha,\n \"ewm_var_alpha\": self.ewm_var_alpha,\n 'additional_lag_periods': self.additional_lag_periods,\n 'abs_energy': self.abs_energy,\n 'rolling_autocorr_periods': self.rolling_autocorr_periods,\n 'datepart_method': self.datepart_method,\n 'polynomial_degree': self.polynomial_degree,\n 'regression_type': self.regression_type,\n 'window': self.window,\n 'holiday': self.holiday,\n }\n return parameter_dict", "title": "" }, { "docid": "389e81d197e22d11db7143b9ff123e10", "score": "0.75433433", "text": "def get_params(self, deep=False):\n return dict()", "title": "" }, { "docid": "5aba8c4d632453fa73a21f3183abb08d", "score": "0.753835", "text": "def getParameters(self):\n return {\n 'alpha': self._alpha,\n 'dt': self._dt,\n 'sigma': self._sigma,\n 'mu': self._mu,\n 'S0': self._S0,\n 'steps': self._steps,\n 'numScen': self._numScen\n }", "title": "" }, { "docid": "9b7f63ab711a22e394117d5083165b8a", "score": "0.7534874", "text": "def params(self):\n return {'identity_map': self.identity_map,\n 'num_caps': self.num_caps,\n 'act_fn': self.act_fn,\n 'vec_dim': self.vec_dim,\n 'batch_size': self.batch_size}", "title": "" }, { "docid": "cd2efdf19a738071d9433848bfda5a8e", "score": "0.7517438", "text": "def params(self):\n return self._get_params()", "title": "" }, { "docid": "b65e425152dfc8938b509de06d3ce02b", "score": "0.75094056", "text": "def getParameterDict(self) :\n\t\tfrom theano.compile import SharedVariable\n\t\tres = {}\n\t\tfor k, v in self.__dict__.iteritems() :\n\t\t\tif isinstance(v, SharedVariable) :\n\t\t\t\tres[k] = v\n\t\treturn res", "title": "" }, { "docid": "62ade4456905c8ce55fb8040a5f09a13", "score": "0.7487859", "text": "def get_params(self):\n return {\"orderid\": self.order_id, \"analyse\": self.analyse}", "title": "" }, { "docid": "f0168ec518de3b20d22527254de9dc86", "score": "0.74840796", "text": "def get_params(self) -> typing.Mapping[str, typing.Any]:\n return {}", "title": "" }, { "docid": "13d1bb5476e516da5d6eb3df5aaa832a", "score": "0.7477948", "text": "def get_params(self, *argv, **kwargs) -> dict:\n return self._params.copy()", "title": "" }, { "docid": "1b2b34127d4ab0ab594398716746f912", "score": "0.74741346", "text": "def parameters(self):\n return dict(json.loads(self.parameters_data))", "title": "" }, { "docid": "1b2b34127d4ab0ab594398716746f912", "score": "0.74741346", "text": "def parameters(self):\n return dict(json.loads(self.parameters_data))", "title": "" }, { "docid": "83d3efad880b68abfcf5196d27137c75", "score": "0.7468689", "text": "def parameters(self):\n return {self.name + '_' + n: getattr(self, n) for n in\n self._parameter_names}", "title": "" }, { "docid": "a4df876587c486fb13ce8a85ba577311", "score": "0.74294555", "text": "def report_parameters(self):\n param_dict = dict()\n for parameter in self.properties[\"parameters\"]:\n param_dict[parameter] = self.properties[\"parameters\"][parameter][\"value\"]\n return param_dict", "title": "" }, { "docid": "be31c93ea9ea93ead8447713955fdcbc", "score": "0.74224406", "text": "def get_params(self, deep=True):\r\n return {}", "title": "" }, { "docid": "d0eb583f6810b34496922a0da6388052", "score": "0.7415912", "text": "def getParams(self):\n pass", "title": "" }, { "docid": "e349218200b5e6316a16f563640e8580", "score": "0.74029124", "text": "def getparams(self):\r\n return{self.__params[0]: str(self.getid()), self.__params[1]: str(self.getweight()), \r\n self.__params[2]: str(self.getalpha()), self.__params[3]: str(self.getbqp()),\r\n self.__params[4]: str(self.getbqp_b()), self.__params[5]: str(self.getcellrem()),\r\n self.__params[6]: str(self.getsppfile())}", "title": "" }, { "docid": "c55fe566dde2bfdddcfdb41df4b6f64c", "score": "0.7390775", "text": "def get_params(self) -> dict:\n params = {\n \"Severity\": self.severity,\n \"Confidence\": self.confidence\n }\n for k, v in self.params.items():\n params[k] = v\n return params", "title": "" }, { "docid": "8f7936eab3464a075269c0018ece4192", "score": "0.7390028", "text": "def get_params(self):\n parameter_dict = {\n 'regression_model': self.regression_model,\n 'holiday': self.holiday,\n 'mean_rolling_periods': self.mean_rolling_periods,\n 'macd_periods': self.macd_periods,\n 'std_rolling_periods': self.std_rolling_periods,\n 'max_rolling_periods': self.max_rolling_periods,\n 'min_rolling_periods': self.min_rolling_periods,\n \"ewm_var_alpha\": self.ewm_var_alpha,\n \"quantile90_rolling_periods\": self.quantile90_rolling_periods,\n \"quantile10_rolling_periods\": self.quantile10_rolling_periods,\n 'ewm_alpha': self.ewm_alpha,\n 'additional_lag_periods': self.additional_lag_periods,\n 'abs_energy': self.abs_energy,\n 'rolling_autocorr_periods': self.rolling_autocorr_periods,\n 'add_date_part': self.add_date_part,\n 'polynomial_degree': self.polynomial_degree,\n 'x_transform': self.x_transform,\n 'regression_type': self.regression_type,\n }\n return parameter_dict", "title": "" }, { "docid": "68f0eddd7b8a81606e739f28013176af", "score": "0.7385088", "text": "def get_params(self):\n return {\n \"field count\": self.field_count,\n \"feature count\": self.feature_count,\n \"file path\": self.filepath,\n }", "title": "" }, { "docid": "d215b8dbef939f7ce74c09f484487cd2", "score": "0.737477", "text": "def get_api_params(self):\n print('getting current api params')\n return self._api_params", "title": "" }, { "docid": "0d48e07c3b54c3f0a09ccbfab2cdd822", "score": "0.7365894", "text": "def get_params(self, deep=True) -> Dict:\n\n parameters = {\n \"get_params\": super().get_params(deep=deep),\n \"n_dims\": self.ndim,\n \"state_dict\": self.state_dict(),\n }\n\n return parameters", "title": "" }, { "docid": "6adb4cef63aad4cb129072d88a64a279", "score": "0.736337", "text": "def get_params(self):\n return {\n 'window_size': self.window_size,\n 'input_dim': self.input_dim,\n 'output_dim': self.output_dim,\n 'normalize_window': self.normalize_window,\n 'max_windows': self.max_windows,\n 'regression_type': self.regression_type,\n 'regression_model': self.regression_model,\n }", "title": "" }, { "docid": "0805e952c283441ac0afbc4057f27206", "score": "0.73481655", "text": "def parameters(self):\n p = {}\n for k in self.sub_kernels:\n p.update(k.parameters)\n return {self.name + '_' + k: v for k, v in p.items()}", "title": "" }, { "docid": "73464deb87e88bdec05ef72afc5f7f3b", "score": "0.73451626", "text": "def get_params(self):\n raise NotImplementedError", "title": "" }, { "docid": "73464deb87e88bdec05ef72afc5f7f3b", "score": "0.73451626", "text": "def get_params(self):\n raise NotImplementedError", "title": "" }, { "docid": "17fe5b10ed1ed8029204dead33a66d8a", "score": "0.7339599", "text": "def params(self):\n return {'num_caps': self.num_caps,\n 'vec_dim': self.vec_dim,\n 'route_epoch': self.route_epoch,\n 'batch_size': self.batch_size,\n 'idx': self.idx}", "title": "" }, { "docid": "f14b828db956923a82beb9c1521dee62", "score": "0.73288894", "text": "def get_parameters(self):\n params = super().get_parameters()\n params.update({'p': self.p})\n return params", "title": "" }, { "docid": "5b6bf670cd5323a62e95b5d3e88b4ffb", "score": "0.73232543", "text": "def to_dict(self):\n return self.params_dict", "title": "" }, { "docid": "12f51d730b074c9724a334e5228dc485", "score": "0.7322126", "text": "def parameters(self) -> \"Dict\":\n if self._parameters is None: # delayed instantiation to avoid infinte loop\n assert self.__class__ != Dict, \"parameters of Parameters dict should never be called\"\n self._parameters = Dict()\n assert self._parameters is not None\n return self._parameters", "title": "" }, { "docid": "bb0e79c244555a7ff3536a88703e46bf", "score": "0.7321202", "text": "def parameters(self):\n return self._parameters", "title": "" }, { "docid": "bb0e79c244555a7ff3536a88703e46bf", "score": "0.7321202", "text": "def parameters(self):\n return self._parameters", "title": "" }, { "docid": "fe6eb7b8ff3eade4114272e5622922e5", "score": "0.7305094", "text": "def params(self):\n return self._params", "title": "" }, { "docid": "fe6eb7b8ff3eade4114272e5622922e5", "score": "0.7305094", "text": "def params(self):\n return self._params", "title": "" }, { "docid": "fe6eb7b8ff3eade4114272e5622922e5", "score": "0.7305094", "text": "def params(self):\n return self._params", "title": "" }, { "docid": "fe6eb7b8ff3eade4114272e5622922e5", "score": "0.7305094", "text": "def params(self):\n return self._params", "title": "" }, { "docid": "5b232c80d6bb6736190034ff3d5cf0d9", "score": "0.72976774", "text": "def get_params(self):\n\t\treturn []", "title": "" }, { "docid": "84f2544a8ec73d702c93a91983f6558a", "score": "0.72913486", "text": "def get_params(self):\n return self.dbm.get_params()", "title": "" }, { "docid": "3ba4b24b2192c8cfb0f815ed8a26d32a", "score": "0.7267947", "text": "def get_params(self):\n return []", "title": "" }, { "docid": "542458f22973a480103decdeda32442e", "score": "0.7262642", "text": "def get_parameters(self):\n strategy_parameters = {}\n for name in self.parameters:\n strategy_parameters[name] = getattr(self, name)\n return strategy_parameters", "title": "" }, { "docid": "ce0e56db50d2abc339197c960f585b9f", "score": "0.7254253", "text": "def get_params(self):\n param_dict = super(SvmRepurposer, self).get_params()\n param_dict[keys.C] = self.c\n param_dict[keys.KERNEL] = self.kernel\n param_dict[keys.GAMMA] = self.gamma\n param_dict[keys.PROB_ESTIMATES] = self.enable_probability_estimates\n return param_dict", "title": "" }, { "docid": "0e9a4c229cd4ea8a90821320bba8062a", "score": "0.7248258", "text": "def get_params(self) -> dict:\n name = self.name\n path = self.path_root\n problem_type = self.problem_type\n eval_metric = self.eval_metric\n hyperparameters = self._user_params.copy()\n if self._user_params_aux:\n hyperparameters[AG_ARGS_FIT] = self._user_params_aux.copy()\n\n args = dict(\n path=path,\n name=name,\n problem_type=problem_type,\n eval_metric=eval_metric,\n hyperparameters=hyperparameters,\n )\n\n return args", "title": "" }, { "docid": "202619d3eaa5ad032f636bb0183f9f50", "score": "0.7246529", "text": "def get_parameters(self):\n return self.parameters", "title": "" }, { "docid": "0b90ecfbe0405443fdcdb632360a8a3f", "score": "0.7233867", "text": "def get_params(self):\n return self.alg.get_params()", "title": "" }, { "docid": "88533ac072197343030d92f40b26d2a2", "score": "0.722744", "text": "def _get_model_params(self) -> dict:\n return self._get_params()", "title": "" }, { "docid": "04621be558a878573a8c84e583b5f0d5", "score": "0.722365", "text": "def get_params(self):\n\n return None", "title": "" }, { "docid": "46607148cdc7a1efdafe8918cbad0f17", "score": "0.7222106", "text": "def params_dict(self):\n parameters = {}\n for i in self.inputs:\n for p in i.get('parameters', []):\n parameters.setdefault(p.get('name'), p)\n return parameters", "title": "" }, { "docid": "acd562d821f9dee75a92360e2669fc2f", "score": "0.72165835", "text": "def get_params_dic(self):\n\n params_dic = self.params_default_dic.copy()\n\n for k in self.params_file_dic:\n params_dic[k] = self.params_file_dic[k]\n\n return params_dic", "title": "" }, { "docid": "7b364235e073a2f1bb66a8adae3b6a8f", "score": "0.72152346", "text": "def args(self):\n params = self.data.items()\n return dict((k, v) for k, v in params if k not in self.PARAMS)", "title": "" }, { "docid": "46f2960cb924da45b25fc48800c0935a", "score": "0.72136104", "text": "def get_params(self):\n\n return self._parameters", "title": "" }, { "docid": "4ff2ff16735cb1449bff230c3137c9a5", "score": "0.7206124", "text": "def _components_params(self) -> dict:\n return {'all': self.controller.parameters()}", "title": "" }, { "docid": "5755f9823aa11736d791c54ec1812f31", "score": "0.7205954", "text": "def to_dict(self):\n self.params_dict['additional_parameters'] = self.additional_parameters\n \n return self.params_dict", "title": "" }, { "docid": "bd9f67504952c103cfe1a09a376d168d", "score": "0.7194325", "text": "def parameters(self):\n return self.__parameters__", "title": "" }, { "docid": "61d79cc366060aec5df7bc5ef58a3484", "score": "0.71933305", "text": "def get_params(self):\n return {\"d\": \"57\"}", "title": "" }, { "docid": "da06a3228259e9c2623f74e4015c3aaf", "score": "0.719021", "text": "def parameters(self) -> Mapping[str, Any]:", "title": "" }, { "docid": "5eb2efdb093d2ecf8fd26f23960d5071", "score": "0.7189004", "text": "def getCurrentSetting(self):\n paramDict = {}\n paramDict['counter_mdlEval' ] = self.counter['mdlEval']\n paramDict['counter_varsUpdate' ] = self.counter['varsUpdate']\n paramDict['initial seed' ] = self.initSeed\n for key in self.inputInfo:\n if key!='SampledVars':\n paramDict[key] = self.inputInfo[key]\n else:\n for var in self.inputInfo['SampledVars'].keys():\n paramDict['Variable: '+var+' has value'] = paramDict[key][var]\n paramDict.update(self.localGetCurrentSetting())\n return paramDict", "title": "" }, { "docid": "493e89d7745211e835ae8c4dd845b4b1", "score": "0.7164346", "text": "def get_params(self):\r\n return copy.deepcopy(self._singleton__input_params)", "title": "" }, { "docid": "8c4d98f5ffa3cfae2c8b0fe5057fa31c", "score": "0.7163374", "text": "def get_params(self):\n params = {}\n for k, v in self.__hyperparams.items():\n # Unconditionally add these since can't have any errors.\n params[k] = v\n\n for k, v in self.__archparams.items():\n # Raise an error if there's any parameter overlap.\n if k in params:\n raise Error('Found duplicate key in archparams and hyperparams: ' +\n k + ', cannot have duplicate keys')\n params[k] = v\n\n for k, v in self.__results.items():\n # Raise an error if there's any parameter overlap.\n if k in params:\n raise Error('Found duplicate key in output params: ' + k +\n ' cannot have duplicate keys')\n params[k] = v\n\n # Insert name.\n params['model_name'] = self.__name\n\n return params", "title": "" }, { "docid": "880f4a32576596c2c20ab21f68eb1d2b", "score": "0.7156139", "text": "def get_parameters(self):\n return self.__parameters", "title": "" }, { "docid": "b56823f31a3730e786fafe231f00f67b", "score": "0.71319854", "text": "def get_parameter_dict(self, include_frozen=False):\n return OrderedDict(zip(\n self.get_parameter_names(include_frozen=include_frozen),\n self.get_parameter_vector(include_frozen=include_frozen),\n ))", "title": "" }, { "docid": "b16df9abb6f98b34a0a77c5998f3ff68", "score": "0.71151686", "text": "def params(self):\n return {'kernel_size': self.kernel_size,\n 'stride': self.stride,\n 'n_kernel': self.n_kernel,\n 'padding': self.padding,\n 'act_fn': self.act_fn,\n 'vec_dim': self.vec_dim,\n 'w_init_fn': self.w_init_fn,\n 'use_bias': self.use_bias,\n 'batch_size': self.batch_size}", "title": "" }, { "docid": "3c8cc2cf8719462a16ae5d9e7e4acfbd", "score": "0.71150357", "text": "def initial_parameters(self, **kwargs):\n return dict()", "title": "" }, { "docid": "ad997163325296cd9acd7867d36ba648", "score": "0.71150196", "text": "def get_config(self):\n config = {}\n for (key, val) in self._param_dict.iteritems():\n config[key] = val.get_value()\n return config", "title": "" }, { "docid": "b992fd6dafc215f352a1765ff8cafca1", "score": "0.71092874", "text": "def parameters(self):\n serializers = self.merge_serializer_parameters()\n vars(self).update(**serializers)\n return serializers['parameters']", "title": "" }, { "docid": "0c390f4680c8555b9808f520bf7c3d5e", "score": "0.7102326", "text": "def params_wps(self):\n # type: () -> Dict[str, Any]\n return {\n \"identifier\": self.identifier,\n \"title\": self.title,\n \"abstract\": self.abstract,\n \"keywords\": self.keywords,\n \"metadata\": self.metadata,\n \"version\": self.version,\n \"inputs\": self.inputs,\n \"outputs\": self.outputs,\n \"package\": self.package,\n \"payload\": self.payload,\n }", "title": "" }, { "docid": "ded0ad766921fc0a6fa16bad8baa3ca9", "score": "0.7082363", "text": "def parameterMap(self):\n return _libBornAgainCore.IterationInfo_parameterMap(self)", "title": "" }, { "docid": "210d21b098099e2e6e6e7323ade94bdc", "score": "0.7081221", "text": "def to_params(self):\n params = {}\n custom_params = {}\n for key in self.custom_params:\n custom_params[key] = self.custom_params[key]\n ext_params = {}\n for key in self.ext_params:\n ext_params[key] = self.ext_params[key]\n for key in LAUNCH_DATA_PARAMETERS:\n if getattr(self, key, None):\n params[key] = getattr(self, key)\n params.update(custom_params)\n params.update(ext_params)\n return params", "title": "" }, { "docid": "a682cbb0733340a981112627978d49c3", "score": "0.70775825", "text": "def build_params(self):\n params = {\n \"adsb\": 1,\n \"bounds\": self.build_bounds(),\n \"estimated\": 1,\n \"faa\": 1,\n \"flarm\": 1,\n \"gliders\": 1,\n \"maxage\": 14400,\n \"mlat\": 1,\n \"stats\": 1,\n \"vehicles\": 1\n }\n return params", "title": "" }, { "docid": "61829d55fa194650e6073f9b2e1c0f6e", "score": "0.7076799", "text": "def get_params(self):\n params = {}\n for i, m in enumerate(self.marginals):\n for key, value in m.get_params().items():\n params[key + '_' + str(i)] = value\n for key, value in self.copula.get_params().items():\n params[key + '_c'] = value\n return params", "title": "" }, { "docid": "31c8eccebe5eb3ade8fc517697ff3df0", "score": "0.70439875", "text": "def parameters(self):\n return self._parameters", "title": "" }, { "docid": "31c8eccebe5eb3ade8fc517697ff3df0", "score": "0.70439875", "text": "def parameters(self):\n return self._parameters", "title": "" } ]
5fc41bac473de89bde8fd57a79bcc753
Sets the next_quote_number of this InvoiceSettings.
[ { "docid": "5b4899f73aacda3408acf6a4ab04ad3c", "score": "0.83951414", "text": "def next_quote_number(self, next_quote_number):\n\n self._next_quote_number = next_quote_number", "title": "" } ]
[ { "docid": "04adb1996b68cc40f09a039527a77589", "score": "0.63772535", "text": "def next_invoice_number(self, next_invoice_number):\n\n self._next_invoice_number = next_invoice_number", "title": "" }, { "docid": "8103b18f09d7d8b0c612bfe99c3997b0", "score": "0.62827", "text": "def setNextOrderId(self, orderId):\n self._nextOrderId = orderId", "title": "" }, { "docid": "9e12b4dda452b57bc4c3b8116a1fdfbb", "score": "0.5918288", "text": "def set_next(self, next):\n self.next = next", "title": "" }, { "docid": "9e12b4dda452b57bc4c3b8116a1fdfbb", "score": "0.5918288", "text": "def set_next(self, next):\n self.next = next", "title": "" }, { "docid": "2810c57fdcc9ecde42fca831a6cf5512", "score": "0.5901612", "text": "def set_next(self, next):\n self.__next = next", "title": "" }, { "docid": "fdbf92a75c6593bdbf76027cea4439a5", "score": "0.5865499", "text": "def SetNext(self, next_page):\r\n self._next = next_page", "title": "" }, { "docid": "7a7d52dfae8b146ff7594437b2b0c8da", "score": "0.5840354", "text": "def next_credit_note_number(self, next_credit_note_number):\n\n self._next_credit_note_number = next_credit_note_number", "title": "" }, { "docid": "3a437a7b76a5688fd0e6dfb888dd3a5a", "score": "0.5785043", "text": "def set_next(self, next: str):\n if not type(next) == str:\n raise ValueError(\"Next should be a string\")\n\n if self._next is None:\n self._next = next", "title": "" }, { "docid": "6c8cef110690d3245c1334c9adc325e9", "score": "0.57400966", "text": "def set_next(self, new_next):\n\n\t\tself.next = new_next", "title": "" }, { "docid": "d7a5ba1922fbcf06da0eff510db4f3fd", "score": "0.57387936", "text": "def set_next(self, new_next):\n self.next = new_next", "title": "" }, { "docid": "d7a5ba1922fbcf06da0eff510db4f3fd", "score": "0.57387936", "text": "def set_next(self, new_next):\n self.next = new_next", "title": "" }, { "docid": "73b7a5f84776dae52035e009e3fd31d3", "score": "0.5738486", "text": "def setNext(self, snext) -> None:\n self.next = snext", "title": "" }, { "docid": "0c4e605a3e3bb6bc1cc30ee224fd4117", "score": "0.5730693", "text": "def next_sales_corrective_invoice_number(self, next_sales_corrective_invoice_number):\n\n self._next_sales_corrective_invoice_number = next_sales_corrective_invoice_number", "title": "" }, { "docid": "92fa3451d5197f6b8244d513d0c0e725", "score": "0.5699994", "text": "def setNext(self,next):\n self.next = next", "title": "" }, { "docid": "71d02cba9b4e5bc973cbf4fb55ddbbf5", "score": "0.56317484", "text": "def set_next(self, new_next):\n self.next = new_next", "title": "" }, { "docid": "f7f8a98e85ecc4337a01a319718684ea", "score": "0.55825955", "text": "def set_next_field_value(self, next_field_value):\n\n\t\tif next_field_value is not None and not isinstance(next_field_value, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: next_field_value EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__next_field_value = next_field_value\n\t\tself.__key_modified['next_field_value'] = 1", "title": "" }, { "docid": "93a9b210afd9e8d06e6e1bfbcf9c283e", "score": "0.55032396", "text": "def quote_currency_code(self, quote_currency_code):\n\n self._quote_currency_code = quote_currency_code", "title": "" }, { "docid": "ef0886874795886845993d107bb35cfc", "score": "0.54076535", "text": "def next_payment_date_to(self, next_payment_date_to):\n\n self._next_payment_date_to = next_payment_date_to", "title": "" }, { "docid": "585d576aa73916aad4c2c35ea3eac704", "score": "0.53450465", "text": "def next_page(self, next_page):\n\n self._next_page = next_page", "title": "" }, { "docid": "585d576aa73916aad4c2c35ea3eac704", "score": "0.53450465", "text": "def next_page(self, next_page):\n\n self._next_page = next_page", "title": "" }, { "docid": "585d576aa73916aad4c2c35ea3eac704", "score": "0.53450465", "text": "def next_page(self, next_page):\n\n self._next_page = next_page", "title": "" }, { "docid": "85f83d76fe342072f8206338c4227c77", "score": "0.52647954", "text": "def quote_number_prefix(self, quote_number_prefix):\n if (self.local_vars_configuration.client_side_validation and\n quote_number_prefix is not None and len(quote_number_prefix) > 6):\n raise ValueError(\"Invalid value for `quote_number_prefix`, length must be less than or equal to `6`\") # noqa: E501\n\n self._quote_number_prefix = quote_number_prefix", "title": "" }, { "docid": "f66c5edf9f5f666374f51ff4717d55f3", "score": "0.5222656", "text": "def _set_next_interest_due_date_30X(self, val):\n self.swift_obj.SequenceB_TransactionDetails.NextInterestDueDate = val\n self.swift_obj.SequenceB_TransactionDetails.NextInterestDueDate.swiftTag = \"30X\"", "title": "" }, { "docid": "51bdab1516c56b88aeac899a44d3db21", "score": "0.52024835", "text": "def order_no(self, order_no):\n self._order_no = order_no", "title": "" }, { "docid": "df067d29c993d66f155c54a09c837e14", "score": "0.5133299", "text": "def update_quotes(self, quotes):\n # update the mark value of the order\n updated_quotes = quotes.symbol(self.symbol)\n self.set_prices(updated_quotes)", "title": "" }, { "docid": "81abe2710c0c344fdc6b0a089fd13bac", "score": "0.5091334", "text": "def set_current_number(self, number):\n self.current_number = number", "title": "" }, { "docid": "69c59efed2c179122911de96b49ac181", "score": "0.50678843", "text": "def update_next_content_id_index(\n self, next_content_id_index: int\n ) -> None:\n self.next_content_id_index = next_content_id_index", "title": "" }, { "docid": "f7ce849aebf3a9a5ddcac26bf6964f0f", "score": "0.5045317", "text": "def next(self, next):\n\n self._next = next", "title": "" }, { "docid": "f7ce849aebf3a9a5ddcac26bf6964f0f", "score": "0.5045317", "text": "def next(self, next):\n\n self._next = next", "title": "" }, { "docid": "f7ce849aebf3a9a5ddcac26bf6964f0f", "score": "0.5045317", "text": "def next(self, next):\n\n self._next = next", "title": "" }, { "docid": "d6cf3694cb9b495ccc3a82c8f48ae673", "score": "0.50443465", "text": "def next_x_orders(self, next_x_orders):\n\n self._next_x_orders = next_x_orders", "title": "" }, { "docid": "0106685d11d66e4c2a8df998960b627f", "score": "0.5036332", "text": "def set_next_tonic_button(self, button):\n self._tonic_selector.set_next_page_button(button)", "title": "" }, { "docid": "52c1c75a88bbe363be1c5b9c9c86eb3d", "score": "0.50068027", "text": "def set_quote_currency(self, currency):\n if currency not in self.quote_currencies:\n return\n self.quote_currencies.remove(currency)\n self.quote_currencies.insert(0, currency)\n self.refresh_balance()", "title": "" }, { "docid": "385f18dfab4ef893d0da57cf91daead0", "score": "0.49841535", "text": "def set_next_distance(self,next):\n self.next_distance = self.distance(next)", "title": "" }, { "docid": "bb6dd68309327e8aae61ed0dd7bcfb77", "score": "0.49796414", "text": "def set_number(self, number):\n self.number = number", "title": "" }, { "docid": "bb6dd68309327e8aae61ed0dd7bcfb77", "score": "0.49796414", "text": "def set_number(self, number):\n self.number = number", "title": "" }, { "docid": "51de7bcfbeaa783647fb90c31468f12a", "score": "0.49683505", "text": "def sequence_number(self, sequence_number):\n\n self._sequence_number = sequence_number", "title": "" }, { "docid": "9255161d6ee4e2caeb1bb55f3ee8d02f", "score": "0.49591583", "text": "def setQuotechar(self, quotechar):\n self._quotechar = quotechar", "title": "" }, { "docid": "499bd919500196989cd43512ac228b68", "score": "0.495317", "text": "def set_next(self, node):\n self.next = node", "title": "" }, { "docid": "7906ab640c6ab470f17b0af30982ed69", "score": "0.49266204", "text": "def set_OrderNumber(self, value):\n super(RecoveryInputSet, self)._set_input('OrderNumber', value)", "title": "" }, { "docid": "0a26356531805924cf16ac3a68342289", "score": "0.48948213", "text": "def account_number(self, account_number):\n\n self._account_number = account_number", "title": "" }, { "docid": "e85b455c21564df74561da00fde72ded", "score": "0.4887277", "text": "def quote_id(self, quote_id):\n if self.local_vars_configuration.client_side_validation and quote_id is None: # noqa: E501\n raise ValueError(\"Invalid value for `quote_id`, must not be `None`\") # noqa: E501\n\n self._quote_id = quote_id", "title": "" }, { "docid": "652081841f3c631960f4aa796db4796f", "score": "0.48858672", "text": "def _choose_quote(self):\n self.random_quote = choice(self.quotes_list)", "title": "" }, { "docid": "1de9de6c3c87fe142a1d528c860e2c5d", "score": "0.48685443", "text": "def invoice_number(self, invoice_number):\n\n self._invoice_number = invoice_number", "title": "" }, { "docid": "78a4dbb02d01c6b912f33b6a0bfba238", "score": "0.48513752", "text": "def setNumber(self, number):\n self.number = number", "title": "" }, { "docid": "08f39b8fd293c04b17fb5b92ef193842", "score": "0.48445463", "text": "def save(self, *args, **kwargs):\n if not self.order_number:\n self.order_number = self._generate_order_number()\n super().save(*args, **kwargs)", "title": "" }, { "docid": "08f39b8fd293c04b17fb5b92ef193842", "score": "0.48445463", "text": "def save(self, *args, **kwargs):\n if not self.order_number:\n self.order_number = self._generate_order_number()\n super().save(*args, **kwargs)", "title": "" }, { "docid": "6a721d0ca1788977b9aba5cfbfaaf1e1", "score": "0.48405582", "text": "def next(self):\n self.url = self.next_page", "title": "" }, { "docid": "b38af3770ab7ee76b899dd7a05980e68", "score": "0.4831471", "text": "def _set_my_number(self, value):\n self._my_number = value", "title": "" }, { "docid": "73d90ec651d2c0d051854f47bef11ae6", "score": "0.4825131", "text": "def setFrameNumber(self, number):\n self._browser.setValue(number)", "title": "" }, { "docid": "829dbd88676716714fcd7d330caf842e", "score": "0.48138514", "text": "def purchase_order_number(self, purchase_order_number):\n\n self._purchase_order_number = purchase_order_number", "title": "" }, { "docid": "f8aba4d7cc1dd49f2ad201c545090979", "score": "0.48114237", "text": "def set_next(self, frame):\n if not self.ron:\n\n return epdblib.basedebugger.BaseDebugger.set_next(self, frame)\n self.set_step()\n self.running_mode = 'next'\n #self.nocalls = 0 # Increased on call - decreased on return\n self.stopnocalls = self.nocalls", "title": "" }, { "docid": "6d9b5ef1faca4d46346bf7ac19de67c0", "score": "0.48104414", "text": "def setNumerator(self, numerator):\n \n self.numerator = numerator", "title": "" }, { "docid": "67f31b72eb8a9ffc1ef1c3026f40b946", "score": "0.48048848", "text": "def tax_number(self, tax_number):\n\n self._tax_number = tax_number", "title": "" }, { "docid": "658c13e9010b43dc3ee48f0f3ce3a651", "score": "0.48041582", "text": "def next_link(self, next_link: \"str\"):\n if next_link is None:\n raise ValueError(\"Invalid value for `next_link`, must not be `None`\")\n self._attrs[\"nextLink\"] = next_link", "title": "" }, { "docid": "658c13e9010b43dc3ee48f0f3ce3a651", "score": "0.48041582", "text": "def next_link(self, next_link: \"str\"):\n if next_link is None:\n raise ValueError(\"Invalid value for `next_link`, must not be `None`\")\n self._attrs[\"nextLink\"] = next_link", "title": "" }, { "docid": "658c13e9010b43dc3ee48f0f3ce3a651", "score": "0.48041582", "text": "def next_link(self, next_link: \"str\"):\n if next_link is None:\n raise ValueError(\"Invalid value for `next_link`, must not be `None`\")\n self._attrs[\"nextLink\"] = next_link", "title": "" }, { "docid": "658c13e9010b43dc3ee48f0f3ce3a651", "score": "0.48041582", "text": "def next_link(self, next_link: \"str\"):\n if next_link is None:\n raise ValueError(\"Invalid value for `next_link`, must not be `None`\")\n self._attrs[\"nextLink\"] = next_link", "title": "" }, { "docid": "658c13e9010b43dc3ee48f0f3ce3a651", "score": "0.48041582", "text": "def next_link(self, next_link: \"str\"):\n if next_link is None:\n raise ValueError(\"Invalid value for `next_link`, must not be `None`\")\n self._attrs[\"nextLink\"] = next_link", "title": "" }, { "docid": "658c13e9010b43dc3ee48f0f3ce3a651", "score": "0.48041582", "text": "def next_link(self, next_link: \"str\"):\n if next_link is None:\n raise ValueError(\"Invalid value for `next_link`, must not be `None`\")\n self._attrs[\"nextLink\"] = next_link", "title": "" }, { "docid": "658c13e9010b43dc3ee48f0f3ce3a651", "score": "0.48041582", "text": "def next_link(self, next_link: \"str\"):\n if next_link is None:\n raise ValueError(\"Invalid value for `next_link`, must not be `None`\")\n self._attrs[\"nextLink\"] = next_link", "title": "" }, { "docid": "658c13e9010b43dc3ee48f0f3ce3a651", "score": "0.48041582", "text": "def next_link(self, next_link: \"str\"):\n if next_link is None:\n raise ValueError(\"Invalid value for `next_link`, must not be `None`\")\n self._attrs[\"nextLink\"] = next_link", "title": "" }, { "docid": "cc74ef6f175d90e5c6628d5e9c7bdce2", "score": "0.48001137", "text": "def set_serial_number(self, serial_number):\n\t\tself._set_config_string(REG_SERIAL_NUMBER, serial_number, \n\t\t\t\t\t\t\t\tSIZE_SERIAL_NUMBER)", "title": "" }, { "docid": "19cc7eff6042690745d91d6d59751e0b", "score": "0.47866917", "text": "def next_payment_date_from(self, next_payment_date_from):\n\n self._next_payment_date_from = next_payment_date_from", "title": "" }, { "docid": "5c9969b6d557d5c40dfb662e3a66e97c", "score": "0.47745696", "text": "def set_next_charge_date(self, subscription_id, date):\n return self.http_post(\n '{0}/{1}/set_next_charge_date'.format(self.url, subscription_id), {\n 'date': date\n }\n )", "title": "" }, { "docid": "bece473324b3f9c774c904587a6b3618", "score": "0.4768069", "text": "def serial_no(self, serial_no):\n\n self._serial_no = serial_no", "title": "" }, { "docid": "9d6af2d4fbcf5b8b9c555e9f22ff919a", "score": "0.4759931", "text": "def set_atomic_number(self, value):\n self.type = ATOMIC_NUMBER[value]", "title": "" }, { "docid": "8743e51f094143e2d2986b0336ba169e", "score": "0.47594562", "text": "def trade_id(self, trade_id):\n\n self._trade_id = trade_id", "title": "" }, { "docid": "8743e51f094143e2d2986b0336ba169e", "score": "0.47594562", "text": "def trade_id(self, trade_id):\n\n self._trade_id = trade_id", "title": "" }, { "docid": "d267b9306e1555ecaa97e853ecd6431c", "score": "0.47520447", "text": "def set_configured_currency(self, set_quote_currency):\n currency = self.g.config.get('currency')\n # currency can be none when Electrum is used for the first\n # time and no setting has been created yet.\n if currency is not None:\n set_quote_currency(currency)", "title": "" }, { "docid": "1ae746926283defe8c421002f6d2e868", "score": "0.4734617", "text": "def save(self, *args, **kwargs):\n \"\"\" (if hasn't been done already) \"\"\"\n\n if not self.order_number:\n self.order_number = self._generate_order_number()\n super().save(*args, **kwargs)", "title": "" }, { "docid": "4446323ff03d6e59ae5de36010f177fd", "score": "0.47332856", "text": "def next_attempt_date(self, next_attempt_date):\n\n self._next_attempt_date = next_attempt_date", "title": "" }, { "docid": "274e0150f29bd41146853eb54235261b", "score": "0.46995494", "text": "def next_retry_after(self, next_retry_after):\n\n self._next_retry_after = next_retry_after", "title": "" }, { "docid": "594d12f962aa64ec3f6d286c417342c1", "score": "0.4682145", "text": "def number(self, number):\n \n self._number = number", "title": "" }, { "docid": "d3be0acf259ac403a3e5f65db765e6f6", "score": "0.46517122", "text": "def number(self, number):\n\n self._number = number", "title": "" }, { "docid": "d3be0acf259ac403a3e5f65db765e6f6", "score": "0.46517122", "text": "def number(self, number):\n\n self._number = number", "title": "" }, { "docid": "d3be0acf259ac403a3e5f65db765e6f6", "score": "0.46517122", "text": "def number(self, number):\n\n self._number = number", "title": "" }, { "docid": "d3be0acf259ac403a3e5f65db765e6f6", "score": "0.46517122", "text": "def number(self, number):\n\n self._number = number", "title": "" }, { "docid": "d3be0acf259ac403a3e5f65db765e6f6", "score": "0.46517122", "text": "def number(self, number):\n\n self._number = number", "title": "" }, { "docid": "a7e927fff8cc9dcff7323642c271065a", "score": "0.46195418", "text": "def next_execute_time(self, next_execute_time):\n\n self._next_execute_time = next_execute_time", "title": "" }, { "docid": "014bb86ca31054b3e493f434e9bfcf7d", "score": "0.46124288", "text": "def serial_number(self, serial_number):\n\n self._serial_number = serial_number", "title": "" }, { "docid": "d4dbd5c40b8bc271afc4052ab578b50d", "score": "0.46102273", "text": "def set_begin_number(self, begin_number):\n self.set_value_into_input_field(self.begin_number_textbox_locator, begin_number)", "title": "" }, { "docid": "b3d2277f3445dc9ba7d5aa0a637598a2", "score": "0.46053657", "text": "def SetPreviousLinkNr(self):\n\t\n\tself.__lastlinknr -= 1", "title": "" }, { "docid": "bc22117144711ff00be28b16a4385593", "score": "0.46030906", "text": "def set_order(self,order_index):\n self._order = order_index", "title": "" }, { "docid": "13f4a34d6329f1ff9d6af2f3ba60d7b1", "score": "0.4602621", "text": "def order_id(self, order_id):\n\n self._order_id = order_id", "title": "" }, { "docid": "13f4a34d6329f1ff9d6af2f3ba60d7b1", "score": "0.4602621", "text": "def order_id(self, order_id):\n\n self._order_id = order_id", "title": "" }, { "docid": "13f4a34d6329f1ff9d6af2f3ba60d7b1", "score": "0.4602621", "text": "def order_id(self, order_id):\n\n self._order_id = order_id", "title": "" }, { "docid": "751f4f48fe4089ba2479ac23365fce45", "score": "0.4600618", "text": "def phone_number(self, phone_number):\n\n self._phone_number = phone_number", "title": "" }, { "docid": "751f4f48fe4089ba2479ac23365fce45", "score": "0.4600618", "text": "def phone_number(self, phone_number):\n\n self._phone_number = phone_number", "title": "" }, { "docid": "751f4f48fe4089ba2479ac23365fce45", "score": "0.4600618", "text": "def phone_number(self, phone_number):\n\n self._phone_number = phone_number", "title": "" }, { "docid": "751f4f48fe4089ba2479ac23365fce45", "score": "0.4600618", "text": "def phone_number(self, phone_number):\n\n self._phone_number = phone_number", "title": "" }, { "docid": "0df5a663bc67557df478ec2b1f9b70c6", "score": "0.4587704", "text": "def setNextmon(self, qdate ):\n if qdate.month() == 12:\n qdate.setDate( qdate.year()+1, 1, 1)\n else:\n qdate.setDate( qdate.year(), qdate.month()+1, 1)\n \n return qdate", "title": "" }, { "docid": "cc557b18610fc822f413dec395a2eb9b", "score": "0.4585126", "text": "def page_number(self, page_number):\n\n self._page_number = page_number", "title": "" }, { "docid": "cc557b18610fc822f413dec395a2eb9b", "score": "0.4585126", "text": "def page_number(self, page_number):\n\n self._page_number = page_number", "title": "" }, { "docid": "cc557b18610fc822f413dec395a2eb9b", "score": "0.4585126", "text": "def page_number(self, page_number):\n\n self._page_number = page_number", "title": "" }, { "docid": "3d36f3952b1cb370a176e8075afcd1de", "score": "0.45828053", "text": "def save(self, *args, **kwargs):\n\n if not self.order_number:\n self.order_number = self._create_order_number()\n super().save(*args, **kwargs)", "title": "" }, { "docid": "64a1a65e41383e42bafae85f91d61476", "score": "0.45806536", "text": "def set_increment(self, increment):\n self.set_value_into_input_field(self.increment_textbox_locator, increment)", "title": "" }, { "docid": "d12155a0d80662e910d1ca097822aa44", "score": "0.45732397", "text": "def contract_number(self, contract_number):\n\n self._contract_number = contract_number", "title": "" }, { "docid": "f448bb085b961b029e95885aaa43bd1b", "score": "0.4559965", "text": "def phone_number(self, phone_number):\n self._phone_number = phone_number", "title": "" }, { "docid": "9a87533618a7da1d469d13163ca37872", "score": "0.4554133", "text": "def set_next_scale_button(self, button):\n self._scale_selector.set_next_page_button(button)", "title": "" } ]
6e8b8262a19b1c75022347aaab7573e6
This function takes in 2 lists of tuples, one for visited cities and their distance from the outbreak city, and another for the unvisited cities and it's distance from the outbreak city. it moves the city with the shortest distance, which is the distance from the visited city to its nearest neighbouring city, from the unvisited list to the visited list. It also updates the unvisited list with new paths that exist from this moved city. The function also takes in the dictionary of distances between the neighbouring cities, in order to add new paths and update existing paths in the unvisited list.
[ { "docid": "be55ec6d9760f8a38fe94ecd3f55a4bd", "score": "0.7617503", "text": "def visit_next(visited, unvisited, distance):\n\n # Takes in the visited cities as a list\n visited_list = visited\n\n # Takes in the unvisited cities as a list\n unvisited_list = unvisited\n\n # Gets the shortest distance from\n # the unvisited list into the visited list\n shortest_distance = get_closest(unvisited_list)\n unvisited_list_loop = unvisited_list[:]\n for i in unvisited_list_loop:\n # Takes the shortest distance in the unvisited list\n if shortest_distance == i:\n # Adds the shortest distance from the unvisited list\n # to the visited list\n visited_list.append(i)\n\t unvisited_list.remove(i) \n\t for j in unvisited_list:\n\t\tif shortest_distance[0] == j[0]:\n \t unvisited_list.remove(j) # remove all same cities from the unvisited file \n # Gets the neighbouring cities of the shortest distance\n # if the neighbouring cities is already in the visited_list:\n new_distance = distance[shortest_distance[0]]\n #if the new neighbouring cities are already in\n #the visited lists, remove them\n\t new_distance_loop = new_distance[:]\n\t for k in new_distance_loop:\n for m in visited_list:\n\t\t if k[0] == m[0]:\n new_distance.remove(k)\n break\n\n #this for loop adds to the total distance\n #of the new neighbouring cities from the\n #point of origin of the outbreak.\n for element in new_distance:\n\t\t# Append the new distance to that path as the shortest distance from \n\t\t# the source town to the town that was just expanded\n\t\t# and add the distance from the just expanded town to this new town and append it to unvisited. \n x = element[1]\n y = shortest_distance[1]\n m = x + y\n unvisited_list.append((element[0], m))\n\t break # Only 1 city is needed for this \n return visited_list, unvisited_list", "title": "" } ]
[ { "docid": "3ee2efd959a25fbdb8ae5f6d6baefb41", "score": "0.61393", "text": "def dijsktra(initial, destination):\n \n distances = {}\n\n for i in city_list:\n distances[i.state] = 10000000000000\n\n distances[initial.state] = 0\n\n q = PriorityQueue()\n\n q.put( (0, initial.state) )\n \n while q.empty() == False :\n current = q.get()\n current_city = current[1]\n for neighbour in romania_map[current_city].keys():\n if distances[neighbour] > (distances[current_city] + romania_map[current_city][neighbour]):\n distances[neighbour] = distances[current_city] + romania_map[current_city][neighbour]\n q.put( (distances[neighbour], neighbour) )\n \n return distances[destination.state]", "title": "" }, { "docid": "c8740b8566c4d1513f015a382c672ba3", "score": "0.607672", "text": "def solve_it(cities, routes):\n res = []\n for city1 in cities:\n # first determine which cities we can travel to from city1.\n to_be_explored = set((city1, ))\n have_seen = set()\n while to_be_explored:\n examinee = to_be_explored.pop()\n have_seen.add(examinee)\n to_be_explored = to_be_explored.union(routes[examinee])\n # Remove cities already seen\n to_be_explored = to_be_explored - have_seen\n cities_from_city1 = have_seen\n\n # Remove one city and see which cities we get\n for city2 in cities:\n if city2 in res or city2 == city1:\n continue\n to_be_explored = set((city1, ))\n have_seen = set((city2, ))\n while to_be_explored:\n examinee = to_be_explored.pop()\n have_seen.add(examinee)\n to_be_explored = to_be_explored.union(routes[examinee])\n to_be_explored = to_be_explored - have_seen\n \n if cities_from_city1 - set(have_seen):\n res.append(city2)\n return res", "title": "" }, { "docid": "c111cf2fff63ca1136188d25ba0df8ed", "score": "0.60278314", "text": "def get_distances(filename):\n\n # Create an empty list to store the original cities and\n # their neighbouring cities with their distances in a list\n line_list = []\n\n # Create an empty string used to concatenate\n # the strings into the name of a city.\n s = ''\n\n # Create an empty list to store the\n # distances of the neighbouring cities.\n city2_distance_list = []\n\n # Create an empty list to store the\n # original cities of the list.\n city1_list = []\n\n # Create an empty dictionary to store the\n # original cities as keys and their neighbouring\n # cities with their distances as values.\n city_distance_dict = {}\n\n # Create a temporary list to store an\n # original city and its distances with every\n # possible neighbouring cities.\n temp_list = []\n\n # Open the file given by the user.\n filename = open(filename)\n\n #Create a loop to convert the data obtained from the file into a list.\n for line in filename:\n # Remove the unwanted whitespaces before and after a sentence\n # along with newlines, \"\\n\" between the lines\n j = line.strip()\n\n # Separate j at it's colon and append it\n # into list, line_list.\n line_list.append(j.split(\":\"))\n\n filename.close()\n\n # Create a for loop which adds the original cities into city1_list.\n for i in range(len(line_list)):\n\n #add the name of original cities from the line_list into city1_list.\n city1_list.append(line_list[i][0])\n\n # a string that represents the neighbouring cities\n # and their distances from each original list\n j = line_list[i][1]\n\n for k in range(len(j)):\n # Creates a for loop which adds the neighbouring\n # cities and their distances into\n # city2_distance_list\n for l in range(len(j[k])):\n\n # If the single string m is a number string:\n if j[k][l] in \"1234567890\":\n\n # Append the number string into this list\n city2_distance_list.append(s.strip())\n s = ''\n\n # Convert the single digit string into an integer\n city2_distance_list.append(int(j[k][l]))\n\n # Else, concatenating the single alphabet strings together\n # until a digit string appears,which means a string which\n # represents a city is formed. This will create a\n # dictionary with the given original cities\n # and its distances from its given neighbouring\n # cities from the file.\n else:\n s += j[k][l]\n\n for i in range(len(city1_list)):\n #if the original city is already in the dictionary:\n if city1_list[i] in city_distance_dict.keys():\n\n g = city_distance_dict[city1_list[i]]\n\n # Append the distances of the new values of new\n # neighbouring cities to the original cities\n # already found in the dictionary, as a tuple.\n g.append(tuple(city2_distance_list[2 * i: 2 * i + 2]))\n\n # Creates a new definition for this existing key\n # to include the values extra neighbouring cities\n # that refer to this same key.\n city_distance_dict[city1_list[i]] = g\n\n # If the original city is not in the dictionary,\n # create the dictionary with the city name as\n # the key and its distances as the values.\n else:\n city_distance_dict[city1_list[i]] = \\\n [tuple(city2_distance_list[2 * i: 2 * i + 2])]\n\n # Creates a for loop that updates the dictionary by giving the distances\n # between the original cities and all of its possible\n # neighbouring cities that is given indirectly from the file.\n for i in range(0, len(city2_distance_list), 2):\n # if the neighbouring city is already a\n # key in the dictionary:\n if city2_distance_list[i] in city_distance_dict.keys():\n\n g = city_distance_dict[city2_distance_list[i]]\n\n # Append the neighbouring cities into the\n # temporary list.\n temp_list.append((city1_list[i / 2]))\n\n # Append the distances from these cities\n # into the temporary list.\n temp_list.append(city2_distance_list[i + 1])\n\n # Convert the temporary list into a list of tuples\n # and append it to the variable g.\n g.append(tuple(temp_list))\n\n # Update the keys in the dictionary.\n city_distance_dict[city2_distance_list[i]] = g\n\n # Make the temporary list to be an empty\n # list again.\n temp_list = []\n\n #if the neighbouring cities is not a key in the dictionary:\n else:\n\n # Appends the neighbouring cities\n # from city1_list into the temporary list.\n temp_list.append((city1_list[i / 2]))\n\n # Appends the distances form these cities\n # from city2_distance_list\n # into the temporary list.\n temp_list.append(city2_distance_list[i + 1])\n\n #update the keys for the dictionary.\n city_distance_dict[city2_distance_list[i]] = [tuple(temp_list)]\n\n # Makes the temporary list to be an\n # empty list again\n temp_list = []\n\n return city_distance_dict", "title": "" }, { "docid": "033e9abcd599cf49df60f86529a070d2", "score": "0.5957961", "text": "def filter_path (paths_updated, costs_updated, fathers_updated, paths, edge_table):\n fathers_updated2 = []\n costs_updated2 = []\n paths_updated2 = []\n for node in fathers_updated:\n cost_now = costs_updated[fathers_updated.index(node)]\n costs_old = []\n for path in paths:\n if node in path:\n idx = path.index(node)+1\n costs_old.append(look_up(path[:idx],edge_table))\n if costs_old == []:\n fathers_updated2.append(node)\n costs_updated2.append(costs_updated[fathers_updated.index(node)])\n paths_updated2.append(paths_updated[fathers_updated.index(node)])\n continue\n elif min(costs_old) >= cost_now:\n fathers_updated2.append(node)\n costs_updated2.append(costs_updated[fathers_updated.index(node)])\n paths_updated2.append(paths_updated[fathers_updated.index(node)])\n return paths_updated2, costs_updated2, fathers_updated2", "title": "" }, { "docid": "d8edff6e9848f8d3548a63205c13387f", "score": "0.59207654", "text": "def updateVisitedCities(self, tour):\n self.visitedCities = set(tour)\n self.unvisitedCities = set(self.tspData.getAllCities())\n for city in self.visitedCities:\n self.unvisitedCities.discard(city)", "title": "" }, { "docid": "20dd07a81e0062dcaab9a04cae021087", "score": "0.5883189", "text": "def solve_a_star(start_city,end_city,cost):\n nodes_visited = 0\n visited = {}\n fringe = []\n trace_route = {}\n heapq.heappush(fringe,[0,0,0,None,None,start_city])\n while len(fringe) > 0:\n temp = heapq.heappop(fringe)\n if is_goal(temp[-1],end_city):\n trace_route[temp[-1]] = (temp[-2], temp[2], temp[3], temp[-3])\n return trace_route\n if temp[-1] in visited:\n if visited[temp[-1]] > temp[0]:\n visited.pop(temp[-1])\n if temp[-1] in visited:\n pass\n else:\n nodes_visited += 1\n trace_route[temp[-1]] = (temp[-2], temp[2], temp[3], temp[-3])\n visited[temp[-1]] = temp[0]\n for s in successors_heuristic(temp,cost):\n # If the element is already in fringe with a high value then remove that element.\n # But if we keep both the elements in the fringe then the lower value element will be popped and marked as visited.\n # If it's marked as visited, then even if we pop it next time it won't be explored. Removing element from fringe\n # is increasing the time taken to run, as it searches the whole fringe and sorts it again after removing.\n # Reference: Piazza Question 151.\n # for i,s1 in enumerate(fringe):\n # if s1[-1] == s[-1]:\n # if s1[0] > s[0]:\n # fringe.pop(i)\n # heapq.heapify(fringe)\n heapq.heappush(fringe,s)\n return False", "title": "" }, { "docid": "1edc065c49c3882f2ffd79d3255bbf97", "score": "0.58288634", "text": "def determine_path(cities: List[City], distance_matrix: np.ndarray) -> Tuple[List[str], int]:\n path, total_distance = nearest_neighbor_path_with_swapping(len(cities), distance_matrix)\n total_distance += distance_matrix[path[-1]][path[0]]\n\n cities_to_visit = []\n for i in path:\n cities_to_visit.append(cities[i].name)\n\n return cities_to_visit, total_distance", "title": "" }, { "docid": "f7ef3a47f876a354c7a664a9bd5f0540", "score": "0.57866627", "text": "def update_pathCost(self,oldCity,newCity):\n self.path_cost += tspmap[oldCity][newCity]", "title": "" }, { "docid": "7e93f0ec4e82648fd1a39b5ace7b62a4", "score": "0.5743212", "text": "def traverse(self,oldCity,newCity):\n self.update_path(newCity)\n self.update_pathCost(oldCity,newCity)\n self.current_location = newCity", "title": "" }, { "docid": "d85199c11dd9cd2e0d0b2b726018b468", "score": "0.5742618", "text": "def remove_redundant_paths(expand_paths, list_of_path, visited_stations_cost):\n\n not_redundant_list = list_of_path\n not_redundant_expanded = expand_paths.copy()\n updated_costs = visited_stations_cost.copy()\n\n for expanded in expand_paths:\n last = expanded.last\n if last not in visited_stations_cost or expanded.g < visited_stations_cost[last]:\n not_redundant_list = [p for p in not_redundant_list if last not in p.route]\n updated_costs[last] = expanded.g\n else:\n not_redundant_expanded.remove(expanded)\n\n return not_redundant_expanded, not_redundant_list, updated_costs", "title": "" }, { "docid": "aaa88d1c724b9466cfa8b4a850df8e40", "score": "0.5700115", "text": "def improve_with_2opt(visit_order, distance_matrix):\n n_cities = len(visit_order)\n cost_diff_best = 0.0\n i_best, j_best = None, None\n\n for i in range(0, n_cities - 2):\n for j in range(i + 2, n_cities):\n if i == 0 and j == n_cities - 1:\n continue\n\n cost_diff = calculate_2opt_exchange_cost(\n visit_order, i, j, distance_matrix)\n\n if cost_diff < cost_diff_best:\n cost_diff_best = cost_diff\n i_best, j_best = i, j\n\n if cost_diff_best < 0.0:\n visit_order_new = apply_2opt_exchange(visit_order, i_best, j_best)\n return visit_order_new\n else:\n return None", "title": "" }, { "docid": "9bab510e3a465cb07adf98adaa118977", "score": "0.56737363", "text": "def solve_uniform(start_city,end_city,cost):\n nodes_visited = 0\n visited = {}\n fringe = []\n trace_route = {}\n heapq.heappush(fringe,[0,0,0,None,None,start_city])\n while len(fringe) > 0:\n temp = heapq.heappop(fringe)\n if is_goal(temp[-1],end_city):\n trace_route[temp[-1]] = (temp[-2], temp[1], temp[2], temp[-3])\n return trace_route\n if temp[-1] in visited:\n pass\n else:\n nodes_visited += 1\n trace_route[temp[-1]] = (temp[-2], temp[1], temp[2], temp[-3])\n visited[temp[-1]] = 1\n visited_states[temp[-1].split(\",\")[-1][1:]] = 1\n for s in successors(temp,cost):\n heapq.heappush(fringe,s)\n return False", "title": "" }, { "docid": "3588298afcb3b887b4ca4fc7fb1322c3", "score": "0.5642488", "text": "def dijkstra(self, src, dest):\n if (src in self.vertices) and (dest in self.vertices):\n\n # 1. Mark all nodes as unvisited and store them.\n # 2. Set the distance to zero for our source node \n # 3. Set the distance to infinity for other nodes.\n distances = {vertex: inf for vertex in self.vertices}\n # 4. This is used to keep a track of the path taken\n previous_vertices = {\n vertex: None for vertex in self.vertices\n }\n distances[source] = 0\n vertices = self.vertices.copy()\n\n while vertices:\n # 5. Select the unvisited node with the smallest distance, \n # 6. Make it as the current node now.\n current_vertex = min(\n vertices, key=lambda vertex: distances[vertex])\n\n # 7. Stop, if the smallest distance \n # among the unvisited nodes is infinity.\n if distances[current_vertex] == inf:\n break\n # 8. If the current node is the destination Node then \n # we can break as that remains the optimum distance to the destination\n if current_vertex == dest:\n break\n\n # 9. Find unvisited neighbors for the current node \n # 10. Calculate their distances through the current node.\n for neighbour, cost in self.neighbours[current_vertex]:\n alternative_route = distances[current_vertex] + cost\n\n # 11. Update the distance if the newly calculated distance\\is better\n # 12. Update the previous node also\n if alternative_route < distances[neighbour]:\n distances[neighbour] = alternative_route\n previous_vertices[neighbour] = current_vertex\n\n # 13. Mark the current node as visited \n # and remove it from the unvisited set.\n vertices.remove(current_vertex)\n\n path_q = deque()\n # 14. Start from the destination and traverse the previous nodes \n # until we reach the source to find the path\n current_vertex = dest\n while previous_vertices[current_vertex] is not None:\n path_q.appendleft(current_vertex)\n current_vertex = previous_vertices[current_vertex]\n if path_q:\n path_q.appendleft(current_vertex)\n distance_nodes = 0\n # 15. calculate the distance also\n for index in range(1, len(path_q)):\n for thing in self.edges:\n if thing.start == path_q[index - 1] and thing.end == path_q[index]:\n distance_nodes += thing.cost\n # Convert the queue into list to display the output\n paths_list = list(path_q)\n print \"Shortest Path Is\", paths_list\n return paths_list, distance_nodes\n else:\n return [], -1", "title": "" }, { "docid": "16dfab308858c23608e127b290c97fc3", "score": "0.560158", "text": "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n G, msg = adjacency_matrix_to_graph(adjacency_matrix)\n # if msg != \"\":\n # return\n #raise error as a location has a road to self\n coolingRate = 0.97\n ITERATIONS = 1\n temp_original = 100\n\n FWdict = nx.floyd_warshall(G)\n global_best_tour = []\n global_best_cost = 10000000000000000000000000000000000000000000000000000000000000000000\n local_best_tour = global_best_tour\n local_best_cost = global_best_cost\n starting_car_location = convert_locations_to_indices([starting_car_location], list_of_locations)[0]\n list_of_homes = convert_locations_to_indices(list_of_homes, list_of_locations)\n list_of_locations = convert_locations_to_indices(list_of_locations, list_of_locations)\n\n for size in range(0, len(list_of_locations)//2):\n temp = temp_original\n\n tour = get_two_tours(adjacency_matrix, starting_car_location, size)\n if not tour:\n continue\n tour1_cost = cost_of_cycle(list_of_homes, G, tour[0], FWdict)\n tour2_cost = cost_of_cycle(list_of_homes, G, tour[1], FWdict)\n\n if tour1_cost <= tour2_cost:\n tour = tour[0]\n else:\n tour = tour[1]\n\n for i in range(ITERATIONS):\n local_tour = deepcopy(tour)\n tour = switch_vertex(G, local_tour)\n tour = switch_edges(G, tour)\n curr_cost = cost_of_cycle(list_of_homes, G, tour, FWdict)\n #dropoff_mapping = drop_off_given_path(tour, list_of_homes, FWdict)\n #curr_cost, _ = cost_of_solution(G, tour, dropoff_mapping)\n change_cost = curr_cost - local_best_cost\n\n if change_cost < 0:\n #switch based on temperature?\n local_best_cost = curr_cost\n local_best_tour = tour\n elif random.random() < math.exp(-(change_cost/temp)):\n local_best_cost = curr_cost\n local_best_tour = tour\n\n temp *= coolingRate\n\n if local_best_cost < global_best_cost:\n global_best_tour = local_best_tour\n global_best_cost = local_best_cost\n\n print(str(size) + \" : \" + str(global_best_cost))\n\n dropoff_mapping = drop_off_given_path(global_best_tour, list_of_homes, FWdict)\n return global_best_tour, dropoff_mapping", "title": "" }, { "docid": "424b6600202483bd646ebf1f94dac888", "score": "0.5567282", "text": "def greedy_tsp(cities):\r\n endpoints = {c: [c] for c in cities}\r\n for (A, B) in shortest_edges_first(cities):\r\n if (A in endpoints and B in endpoints and\r\n endpoints[A] != endpoints[B]):\r\n new_segment = join_endpoints(endpoints, A, B)\r\n if len(new_segment) == len(cities):\r\n return new_segment", "title": "" }, { "docid": "0eb0f10ad3343befcee9d670c5595594", "score": "0.5523716", "text": "def update_cities(self, road):\n if road == None or self.by_cities == None:\n print \" -- NONE FOUND --\"\n print \" self = '%s'\" % repr(self)\n print \" by_cities = '%s'\" % repr(self.by_cities)\n print \" road = '%s'\" % repr(road)\n self.by_cities[(road[0], road[1])] = road\n self.by_cities[(road[1], road[0])] = road\n self.by_names[(road[1].name, road[0].name)] = road\n self.by_names[(road[0].name, road[1].name)] = road", "title": "" }, { "docid": "d9e6ed10afdf47f79c85763d3f1ac3ed", "score": "0.5519368", "text": "def get_shortest_connection(graph, city1, city2):\n shortest_path_dict = graph.shortest_path(city1)\n distance = shortest_path_dict[0][city2]\n path = shortest_path_dict[1][city2]\n return distance, path", "title": "" }, { "docid": "b0d72ce399467297b07c0b9db71b8251", "score": "0.548628", "text": "def _reroute_network(outcoming_dict, endpoints, dup_info):\n branch_ops = ge.get_walks_intersection_ops(\n forward_seed_ops=list(outcoming_dict),\n backward_seed_ops=endpoints,\n forward_inclusive=False,\n backward_inclusive=True)\n\n outputs_to_swap = []\n for op, outputs in outcoming_dict.items():\n outputs_to_swap += [o for o in outputs if o in branch_ops]\n\n for node in outputs_to_swap:\n orig_inputs = list(node.inputs)\n new_inputs = []\n for ts in orig_inputs:\n new_op = dup_info.transformed(ts.op)\n if new_op is not None:\n new_inputs.extend(new_op.outputs)\n else:\n new_inputs.append(ts)\n ge.reroute_inputs(new_inputs, node)", "title": "" }, { "docid": "6aab31aefaa386b67d63b900fea6913d", "score": "0.54777956", "text": "def dfs(place, dist_so_far, roads, distances):\n if place not in distances or dist_so_far < distances[place]:\n distances[place] = dist_so_far # put place in distances, if place is not in the dictionary\n # choose shortest distance\n for rds in roads[place]: \n city, dist = rds\n dfs(city, dist_so_far + dist, roads, distances)", "title": "" }, { "docid": "fd28d557278813848a22a6ea6f687ff0", "score": "0.54303473", "text": "def compute_destination(self, observation):\n\n blocked_edges = observation[\"blocked_edges\"]\n graph = observation[\"graph\"]\n current_location = observation[\"agents_location\"][self.id]\n dest = current_location[1]\n\n # Find nodes where there is more than one men\n people_locations = [node for node, people in observation[\"people_location\"].items() if\n people > 0 and dest != node]\n\n temp_distance = float(\"inf\")\n self.current_destination = None\n self.current_path = []\n\n for node in people_locations:\n distance, path = algo.get_shortest_path_Dijk(graph,dest, node, blocked_edges)\n if distance < temp_distance:\n temp_distance = distance\n self.current_path = path\n self.current_destination = node", "title": "" }, { "docid": "13490d2978657d0cc5244efced326844", "score": "0.54229474", "text": "def calculate_2opt_exchange_cost(visit_order, i, j, distance_matrix):\n n_cities = len(visit_order)\n a, b = visit_order[i], visit_order[(i + 1) % n_cities]\n c, d = visit_order[j], visit_order[(j + 1) % n_cities]\n\n cost_before = distance_matrix[a, b] + distance_matrix[c, d]\n cost_after = distance_matrix[a, c] + distance_matrix[b, d]\n return cost_after - cost_before", "title": "" }, { "docid": "e8998274a96e8955849e2bb33303ce4d", "score": "0.540604", "text": "def get_route(start, end, cost):\r\n\r\n city_gps = pd.read_csv('city-gps.txt', sep=\" \", header=None)\r\n city_gps.columns = [\"city\", \"latitude\", \"longitude\"]\r\n city_gps.latitude.astype(float)\r\n city_gps.longitude.astype(float)\r\n\r\n roads = pd.read_csv('road-segments.txt', sep=\" \", header=None)\r\n roads.columns = [\"city1\", \"city2\", \"length\", \"speed\", \"highway\"]\r\n roads.speed = roads.speed.astype(int)\r\n\r\n city1_unique = roads.drop_duplicates(subset=['city1'])\r\n city2_unique = roads.drop_duplicates(subset=['city2'])\r\n city1_unique = city1_unique.city1\r\n city2_unique = city2_unique.city2\r\n\r\n\r\n city1_2 = pd.concat([city1_unique, city2_unique])\r\n city1_2 = pd.DataFrame(city1_2)\r\n city1_2.columns = [\"city\"]\r\n city_gps = pd.concat([city_gps, city1_2], axis=0).drop_duplicates().reset_index(drop=True)\r\n\r\n # https://stackoverflow.com/questions/21317384/pandas-python-how-to-concatenate-two-dataframes-without-duplicates\r\n # https://datatofish.com/reset-index-pandas-dataframe/\r\n city_gps.drop(city_gps[city_gps['city'].duplicated()].index, inplace=True)\r\n city_gps.duplicated().sum()\r\n city_gps = city_gps.reset_index(drop=True)\r\n city_gps.fillna(0, inplace=True)\r\n\r\n max_speed = max(roads.speed)\r\n max_segment = max(roads.length)\r\n\r\n curr_state = start\r\n route_taken = []\r\n explored = []\r\n\r\n if (cost == 'segments'):\r\n fringe = []\r\n hq.heapify(fringe)\r\n hq.heappush(fringe, (0, 0, 0, 0, 0, [], start))\r\n while len(fringe) > 0:\r\n (cost_priority, total_segments, total_miles, total_hours, total_expected_accidents, route_taken, curr_state) = hq.heappop(fringe)\r\n explored.append(curr_state)\r\n for (city2, miles, speed, highway, segment) in successors(curr_state, roads):\r\n total_expected_accidents += find_probability(highway, miles)\r\n hours = miles / speed\r\n\r\n if city2 == end:\r\n total_miles += miles\r\n route_taken += [city2]\r\n total_hours += hours\r\n total_expected_accidents += find_probability(highway, miles)\r\n return {\"total-segments\": len(route_taken),\r\n \"total-miles\": total_miles,\r\n \"total-hours\": total_hours,\r\n \"total-expected-accidents\": total_expected_accidents,\r\n \"route-taken\": route_taken}\r\n\r\n elif (city2) in explored:\r\n continue\r\n\r\n else:\r\n explored.append(city2)\r\n total_miles += miles\r\n route_taken += [city2]\r\n total_segments += segment\r\n total_hours += hours\r\n total_expected_accidents += find_probability(highway, miles)\r\n\r\n haversine_dist = distance_heur(curr_state, end, city_gps)\r\n cost_priority = segment_heur(max_segment, haversine_dist) + total_segments\r\n\r\n hq.heappush(fringe, (cost_priority, total_segments, total_miles, total_hours, total_expected_accidents, route_taken, city2))\r\n\r\n if(cost == 'distance'):\r\n fringe = []\r\n hq.heapify(fringe)\r\n hq.heappush(fringe, (0, 0, 0, 0, [], start))\r\n while len(fringe) > 0:\r\n (cost_priority, total_miles, total_hours, total_expected_accidents, route_taken, curr_state ) = hq.heappop(fringe)\r\n explored.append(curr_state)\r\n for (city2, miles, speed, highway, segment) in successors(curr_state, roads):\r\n total_expected_accidents += find_probability(highway, miles)\r\n hours = miles/speed\r\n if city2 == end:\r\n total_miles += miles\r\n route_taken += [city2]\r\n total_hours += hours\r\n total_expected_accidents += find_probability(highway, miles)\r\n return {\"total-segments\": len(route_taken),\r\n \"total-miles\": total_miles,\r\n \"total-hours\": total_hours,\r\n \"total-expected-accidents\": total_expected_accidents,\r\n \"route-taken\": route_taken}\r\n\r\n elif (city2) in explored:\r\n continue\r\n\r\n else:\r\n total_miles += miles\r\n #print(total_miles)\r\n #print(city2)\r\n cost_priority = distance_heur(curr_state, end, city_gps) + total_miles\r\n #route_taken += city2\r\n total_hours += hours\r\n total_expected_accidents += find_probability(highway, miles)\r\n hq.heappush(fringe, (cost_priority, total_miles , total_hours , total_expected_accidents, route_taken + [(city2, highway)], city2))\r\n\r\n if (cost == 'time'):\r\n fringe = []\r\n hq.heapify(fringe)\r\n hq.heappush(fringe, (0, 0, 0, 0, [], start))\r\n while len(fringe) > 0:\r\n (cost_priority, total_hours, total_miles, total_expected_accidents, route_taken, curr_state) = hq.heappop(fringe)\r\n explored.append(curr_state)\r\n for (city2, miles, speed, highway, segment) in successors(curr_state, roads):\r\n total_expected_accidents += find_probability(highway, miles)\r\n hours = miles / speed\r\n if city2 == end:\r\n total_miles += miles\r\n route_taken += [city2]\r\n total_hours += hours\r\n total_expected_accidents += find_probability(highway, miles)\r\n return {\"total-segments\": len(route_taken),\r\n \"total-miles\": total_miles,\r\n \"total-hours\": total_hours,\r\n \"total-expected-accidents\": total_expected_accidents,\r\n \"route-taken\": route_taken}\r\n\r\n elif (city2, miles, speed, highway) in explored:\r\n continue\r\n else:\r\n total_miles += miles\r\n haversine_dist = distance_heur(curr_state, end, city_gps)\r\n cost_priority = time_heur(haversine_dist, max_speed) + total_hours\r\n # route_taken += city2\r\n total_hours += hours\r\n total_expected_accidents += find_probability(highway, miles)\r\n hq.heappush(fringe,(cost_priority, total_hours, total_miles, total_expected_accidents, route_taken + [(city2, highway)], city2))\r\n\r\n\r\n if (cost == 'safe'):\r\n fringe = []\r\n hq.heapify(fringe)\r\n hq.heappush(fringe, (0, 0, 0, 0, [], start))\r\n while len(fringe) > 0:\r\n (cost_priority, total_expected_accidents, total_miles, total_hours, route_taken, curr_state) = hq.heappop(fringe)\r\n explored.append(curr_state)\r\n for (city2, miles, speed, highway, segment) in successors(curr_state, roads):\r\n total_expected_accidents += find_probability(highway, miles)\r\n hours = miles / speed\r\n if city2 == end:\r\n total_miles += miles\r\n route_taken += [city2]\r\n total_hours += hours\r\n total_expected_accidents += find_probability(highway, miles)\r\n return {\"total-segments\": len(route_taken),\r\n \"total-miles\": total_miles,\r\n \"total-hours\": total_hours,\r\n \"total-expected-accidents\": total_expected_accidents,\r\n \"route-taken\": route_taken}\r\n\r\n elif (city2) in explored:\r\n continue\r\n else:\r\n total_miles += miles\r\n haversine_dist = distance_heur(curr_state, end, city_gps)\r\n cost_priority = probability_heur(haversine_dist) + total_expected_accidents\r\n # route_taken += city2\r\n total_hours += hours\r\n total_expected_accidents += find_probability(highway, miles)\r\n hq.heappush(fringe, (cost_priority, total_expected_accidents, total_miles, total_hours, route_taken + [(city2, highway)], city2))\r\n\r\n \"\"\"route_taken = [(\"Martinsville,_Indiana\",\"IN_37 for 19 miles\"),\r\n (\"Jct_I-465_&_IN_37_S,_Indiana\",\"IN_37 for 25 miles\"),\r\n (\"Indianapolis,_Indiana\",\"IN_37 for 7 miles\")]\r\n \"\"\"", "title": "" }, { "docid": "89cf7967b2e8b547a0abc21d8501b5d8", "score": "0.53990185", "text": "def main():\n # function read files\n city, list_cities, price_gasoline = read_file()\n N = list_cities[-1]\n # create costs cities\n costs = create_costs(city)\n\n used_city = list_cities.pop(0) # Сity that we processing\n while (N in list_cities):\n used_costs = costs[used_city] # Cost of the current city\n for neighbor in city[used_city]:\n costs_neighbor = costs[neighbor]\n path_to_neighbor = price_gasoline[used_city]\n\n # If path on current node less then rewrite the neighbor node\n if used_costs + path_to_neighbor < costs_neighbor:\n costs[neighbor] = used_costs + path_to_neighbor\n\n # Entry in used_city the city with min path to it\n used_city = find_used_city(city, used_city, list_cities,\n price_gasoline)\n\n # Deleting city from the uninitiated cities\n if used_city in list_cities:\n list_cities.remove(used_city)\n else:\n write_file(costs, -1)\n break\n else:\n write_file(costs, costs[N])\n\n for city, value in costs.items():\n print(f'{city:13} {value:2}')", "title": "" }, { "docid": "387610ff394b10cea1a51b35af217590", "score": "0.5384444", "text": "def get_best_path(digraph, start, end, path, max_dist_outdoors, best_dist,\n best_path):\n \n # check if start and end nodes are valid nodes\n \n # convert the start and end strings into instances of node object\n start_node = Node(start)\n end_node = Node(end)\n \n # check if the nodes are actually in the map\n if not(digraph.has_node(start_node) or digraph.has_node(end_node)):\n # if either of them aren't valid, raise an error\n raise ValueError('Building(s) not in the map')\n \n # base case for recursion\n # check if start and end nodes are the same\n # path is a list [[nodes that have been traversed], total distance, total outdoor distance]\n if start == end:\n return (path[0], path[1])\n \n # recursive case\n else:\n for edge in digraph.get_edges_for_node(start_node):\n # determine the destination node\n destination_node = edge.get_destination()\n # get that node's name\n destination_node_name = destination_node.get_name()\n \n # check to see if you are in a cycle\n # if the destination node is already in the path, continue on from that iteration of the loop\n if destination_node_name in path[0]:\n # continue from that iteration of the loop\n continue\n \n # construct the path with the total distance to the destination node\n distance_to_destination = edge.get_total_distance()\n updated_total_distance = path[1] + distance_to_destination\n \n # check to see that the path constructed so far is smaller than your shortest path\n # continue on from that iteration if it is longer than your shortest path\n if updated_total_distance >= best_dist:\n # continue from that iteration of the loop\n continue\n \n # construct the path with the outdoors distance to the destination node\n outdoors_dist_to_destination = edge.get_outdoor_distance()\n updated_outdoors_distance = path[2] + outdoors_dist_to_destination\n \n # check to see that the path constructed so far is smaller than your max distance outdoors constraint\n # continue on from that iteration if it longer than your constraint\n if updated_outdoors_distance > max_dist_outdoors:\n # continue from that iteration of the loop\n continue\n \n # construct the path with that destination node by creating a new list\n # initial path global variable shouldn't be disturbed\n updated_path = path[0] + [destination_node_name]\n \n # call your recursive function\n current_path, current_dist = get_best_path(digraph, destination_node_name, end, [updated_path, updated_total_distance, updated_outdoors_distance], max_dist_outdoors, best_dist, best_path)\n # if the path returned from recursive function is not Nonetype, assign it to be the best so far\n if current_path != None:\n if current_dist < best_dist:\n best_path = current_path\n best_dist = current_dist\n \n # check to see if best path and best dist variables have been changed\n # that the recursive function actually found the shortest path that satisfies all the constraints\n if len(best_path) > 0:\n return (best_path, best_dist)\n else:\n return (None, None)", "title": "" }, { "docid": "6e0852eba9a2a061b2c1b78036b187fa", "score": "0.53831995", "text": "def update_path(self,newCity):\n self.path.append(newCity)\n self.possible_locations.remove(newCity)", "title": "" }, { "docid": "b0f88f55107f0bd3dd2254c4857db554", "score": "0.53806365", "text": "def addNewNodeFromTourToTour(tour1, tour2,D,distanceO):\r\n bestTour1,bestTour2=tour1,tour2\r\n distance = distanceO.getDistanceTour(tour1, D) + distanceO.getDistanceTour(tour2, D)\r\n bestDelta=0\r\n for node in tour1:\r\n newTour1=copy.deepcopy(tour1)\r\n newTour1.remove(node)\r\n distanceNew1 = distanceO.getDistanceTour(newTour1,D)\r\n newTour2,distanceNew2 = getBestInsertedDistance(tour2, node,D, distanceO)\r\n delta = distanceNew1+distanceNew2 - distance\r\n if delta < bestDelta:\r\n bestDelta = delta\r\n bestTour1,bestTour2=newTour1,newTour2\r\n \r\n return bestTour1,bestTour2,bestDelta", "title": "" }, { "docid": "5bbeea2eefa24056e24e99c0c70393c2", "score": "0.5355615", "text": "def transformed_sorted_distances(self, locs, locs2=None):\n if locs2 is None: \n d = tf.sqrt(square_dist(self.locs_poi_j, locs))\n if self.kernel_type == \"Gaussian\":\n d_new = self.effects*tf.exp(-0.5*(d/self.distmax)**2)\n else:\n d_new = self.effects * tf.nn.relu((-1/self.distmax)*d+1)\n out = tf.matmul(d_new, d_new, transpose_a = True)\n return(out + tf.eye(tf.shape(out)[0], dtype = tf.float64)*1e-4)\n else:\n d = tf.sqrt(square_dist(self.locs_poi_j, locs))\n d2 = tf.sqrt(square_dist(self.locs_poi_j, locs2))\n if self.kernel_type == \"Gaussian\":\n d_new = self.effects*tf.exp(-0.5*(d/self.distmax)**2)\n d2_new = self.effects*tf.exp(-0.5*(d2/self.distmax)**2)\n else:\n d_new = self.effects * tf.nn.relu((-1/self.distmax)*d+1) \n d2_new = self.effects * tf.nn.relu((-1/self.distmax)*d2+1)\n out = tf.matmul(d_new, d2_new, transpose_a = True)\n return(out)", "title": "" }, { "docid": "cf4a5e6a768618a158bdc52b261713a0", "score": "0.5341396", "text": "def minimum_transfers(r_dict, weekday, time_unit, start_route, end_route):\n\tmaximum_paths = 1000\n\tpossible_paths = list()\n\tvisited = set()\n\tjourney_id = -1\n\tpath_dict = dict()\n\tcandidate_paths = list()\n\t# check the simple case\n\tif start_route == end_route:\n\t\tpossible_paths.append([start_route])\n\t# explore more complex possible paths\n\tfor route in r_dict[weekday][time_unit][start_route]:\n\t\tjourney_id += 1\n\t\tpath_dict[journey_id] = [1, [start_route, route]]\n\t# mark that we've been on the start_route already\n\tvisited.add(start_route)\n\t# develop non-trivial paths\n\twhile not not path_dict and len(possible_paths) <= maximum_paths:\n\t\t# get current_details\n\t\tcurrent_details = remove_first_entry_of_dict(path_dict)[1]\n\t\ttransfers = current_details[0]\n\t\tpath = current_details[1]\n\t\tvisited_route = path[-1]\n\t\t# check if we've completed the trip\n\t\tif visited_route == end_route:\n\t\t\tpossible_paths.append(path)\n\t\telse:\n\t\t\t# make sure this path has not already been explored\n\t\t\tif is_subset_for_multiple_lists(path, possible_paths):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t# mark that we've been to visted_route as we'll now iterate over all its possible connections\n\t\t\t\tvisited.add(visited_route)\n\t\t\t\t# iterate over all possible connections of the prior_stop\n\t\t\t\tfor route in r_dict[weekday][time_unit][visited_route]:\n\t\t\t\t\tif route in visited:\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\tjourney_id += 1\n\t\t\t\t\t\tnew_path = path + [route]\n\t\t\t\t\t\tnew_transfers = transfers + 1\n\t\t\t\t\t\tpath_dict[journey_id] = [new_transfers, new_path]\n\treturn possible_paths", "title": "" }, { "docid": "43cadf3176b9d459b679e452d418b235", "score": "0.5336412", "text": "def update_routing_table(self):\n\n unvisited_nodes = {node_id for node_id in self.network.node_dict}\n distance_dict = {node_id : BIG for node_id in self.network.node_dict}\n previous_dict = {node_id : None for node_id in self.network.node_dict}\n\n distance_dict[self.node_id] = 0\n\n while unvisited_nodes:\n\n min_dist = min(distance_dict[node] for node in unvisited_nodes)\n current_vertex = [node for node in unvisited_nodes if distance_dict[node] == min_dist][0]\n\n unvisited_nodes.remove(current_vertex)\n\n for link in self.network.node_dict[current_vertex].adjacent_links:\n adj_node = link.get_other_node(self.network.node_dict[current_vertex])\n\n distance_through_node = distance_dict[current_vertex] + self.known_link_costs[link.link_id]\n\n if distance_through_node < distance_dict[adj_node.node_id]:\n distance_dict[adj_node.node_id] = distance_through_node\n previous_dict[adj_node.node_id] = current_vertex\n\n\n for node_id in self.network.node_dict:\n if node_id == self.node_id:\n continue\n traceback_node_id = node_id\n while previous_dict[traceback_node_id] != self.node_id:\n traceback_node_id = previous_dict[traceback_node_id]\n\n\n\n self.routing_table[node_id] = self.get_link_from_node_id(traceback_node_id)", "title": "" }, { "docid": "53fec183e2bf62b877c208af73d6ca20", "score": "0.53182477", "text": "def uniformCostSearch(problem):\n\n # *** Your Code Here ***\n startState = problem.startingState()\n if problem.isGoal(startState):\n return []\n path = util.PriorityQueue()\n path.push( (startState, [], 0), 0)\n #print (\"startState:\", startState)\n pathList =[]\n pathList.append(startState)\n visited = []\n\n actions = []\n actions2 = []\n\n while not path.isEmpty():\n (state, actions, cost) = path.pop()\n #print (\"pathList before remove:\", pathList)\n #print (\"state[0]:\",state[0])\n pathList.remove(state)\n #print (\"pathlist after remove:\", pathList)\n\n if problem.isGoal(state):\n return actions\n\n if state not in visited:\n visited.append(state)\n\n successors = problem.successorStates(state)\n for(nextState, nextAction, newCost) in successors:\n actions2 = actions.copy()\n actions2.append(nextAction)\n priority = newCost + cost\n if nextState not in visited and nextState not in pathList:\n #print (\"nextState1stif:\", nextState)\n path.push( (nextState, actions2, priority), priority)\n pathList.append(nextState)\n #print (\"pathListIn1stIf:\",pathList)\n elif nextState in pathList and newCost > priority:\n #print (\"nextState2ndif:\", nextState)\n path.push( (nextState, actions2, priority), priority)\n pathList.append(nextState)\n #print (\"pathListIn2ndIf:\",pathList)\n return actions\n\n util.raiseNotDefined()", "title": "" }, { "docid": "7cabc6d3933e2ad64031829b3f1a6acc", "score": "0.5302066", "text": "def heuristic(current_city,goal_city):\n\n if current_city not in gps:\n return 0\n x1,y1 = gps[current_city]\n x2,y2 = gps[goal_city]\n x1 = math.radians(x1)\n y1 = math.radians(y1)\n x2 = math.radians(x2)\n y2 = math.radians(y2)\n distance = float(69.1105 * (math.degrees(math.acos(math.sin(x1) * math.sin(x2) \\\n + math.cos(x1) * math.cos(x2) * math.cos(y1 - y2)))))\n return distance", "title": "" }, { "docid": "ec257d032062b73057bb9bab18bc9520", "score": "0.5296174", "text": "def get_closest(unvisited):\n\n # Set the variable, min_length to be the\n # distance of its first\n # neighbouring city listed in the tuple.\n min_length = unvisited[0][1]\n\n tuple_index = 0\n\n for i in range(len(unvisited)):\n\n # If the new distance is smaller than\n # the current minimum distance,\n # substitute the new distance as the\n # current the minimum distance.\n if unvisited[i][1] < min_length:\n min_length = unvisited[i][1]\n tuple_index = i\n\n # Returns the smallest distance tuple from\n # the lists of tuples given.\n return unvisited[tuple_index]", "title": "" }, { "docid": "5cef8911124b0b4f444d7f2c60237a28", "score": "0.52709365", "text": "def shortestpath(graph,start,end,visited=[],distances={},predecessors={}):\n # detect if first time through, set current distance to zero\n if not visited:\n distances[start]=0\n # if we've found our end node, find the path to it, and return\n if start==end:\n path=[]\n while end != None:\n path.append(end)\n end=predecessors.get(end,None)\n return distances[start], path[::-1]\n # process neighbors as per algorithm, keep track of predecessors\n for neighbor in graph[start]:\n if neighbor not in visited:\n neighbordist = distances.get(neighbor, sys.maxsize)\n tentativedist = distances[start] + graph[start][neighbor]\n if tentativedist < neighbordist:\n distances[neighbor] = tentativedist\n predecessors[neighbor]=start\n # neighbors processed, now mark the current node as visited\n visited.append(start)\n # finds the closest unvisited node to the start\n unvisiteds = dict((k, distances.get(k, sys.maxsize)) for k in graph if k not in visited)\n closestnode = min(unvisiteds, key=unvisiteds.get)\n # now take the closest node and recurse, making it current\n return shortestpath(graph,closestnode,end,visited,distances,predecessors)", "title": "" }, { "docid": "163431c5a4a127032abaa12ba291b1b8", "score": "0.52591145", "text": "def searchHillClimbing(graph, start, goal):\n\n # Initialise the came_from dictionary\n came_from = {}\n came_from[start] = None\n l_stack=[]\n if start==goal:\n came_from[goal]=start\n return came_from\n visited={}\n for x in range(graph.width):\n for y in range(graph.height):\n if graph.isOOB((x,y)):\n visited[(x,y)]=True\n else:\n visited[(x,y)]=False\n '''for items in visited:\n print(visited[items])'''\n #came_from[start]=start\n l_stack.append(start)\n #visited[start]=True\n parent={}\n parent[start]=start\n while(l_stack):\n cur=l_stack.pop()\n #print (cur)\n #visited[cur]=True\n came_from[cur]=parent[cur]\n if cur==goal:\n break\n neighbors=graph.neighboursOf(cur)\n distances={}\n for n in neighbors:\n distances[n]=heuristic(n,goal)\n sorted_neighbors = sorted(distances.items(), key=operator.itemgetter(1))\n for ((x,y),z) in reversed(sorted_neighbors):\n item=(x,y)\n #print(visited[item])\n if (not(visited[item])):\n parent[item]=cur\n visited[cur]=True\n l_stack.append(item)\n #print(item) \n \n \n '''for i in came_from:\n print(i+came_from[i])\n # BEGIN HERE #\n print(\"goal: \",goal)\n print(\"start:\",start)\n #print(\"goal_parent:\",came_from[goal])'''\n path={}\n current=goal\n while(current!=start):\n path[current]=came_from[current]\n current=came_from[current]\n\n # END HERE #\n\n return path", "title": "" }, { "docid": "f3ada5ec78048cbe9dd3183cea907994", "score": "0.52402467", "text": "def tour_improve(self, tour):\n (best_length, best_cities) = (tour.tour_length(), tour.city_sequence())\n\n self._lk_tour_length = tour.tour_length() # best known so far\n loop_roads = Roads(tour) # loop over a duplicate; tour will be modified.\n # loop_roads.update_by_length() # sort; keeps things deterministic\n # roads_by_length = loop_roads.by_length\n # roads_list = list(tour)\n\n if self.lk_verbose:\n print \"===== starting tour_improve with %i paths to check\" % \\\n (2*len(loop_roads))\n i = 0\n for road in loop_roads: # no sort; works, but expect order to vary\n #for road in roads_by_length: # sorted ... but still not deterministic\n # for road in roads_list: # still not deterministic. I give up.\n for backward in (True, False):\n i += 1\n tour.revert()\n tour.tour2path(road, backward)\n if self.lk_verbose:\n print \"---- calling %i path_search on %s \" % (i, str(tour))\n tour2 = self.path_search(tour)\n if self.lk_verbose:\n print \"---- done path_search; found length=%f\" % tour2.tour_length()\n if tour2.tour_length() < best_length:\n best_length = tour2.tour_length()\n best_cities = tour2.city_sequence()\n best_tour = Tour(self, best_cities)\n if self.lk_verbose:\n print \"===== finished tour_improve; best is %s \" % str(best_tour)\n return best_tour", "title": "" }, { "docid": "98fc0fdfa3f473dcd05723c50048d8a3", "score": "0.52213496", "text": "def path_distances(graph, target):\n distances = {n: np.inf for n in graph.nodes}\n distances[target] = 0\n done = False\n while not done:\n done = True\n for n, current_dist in list(distances.items()):\n for neighbor in graph.out_neighbors[n]:\n update_dist = 1 + distances[neighbor]\n if update_dist < current_dist:\n distances[n] = update_dist\n done = False\n return distances", "title": "" }, { "docid": "ef6f473d4019fd87cb22a495933dd7b6", "score": "0.52144676", "text": "def ast(hueristic,start,goal):\n path = []\n depth = []\n\n # initialize variables\n max_depth = 0 \n\n # initialize a Queue() object and add the start location to it:\n # queue = Queue()\n\n queue = PriorityQueue()\n # queue.put(start) \n queue.put((0,start))\n # initialize a set() object for visited list and add the start location to it\n visited = set()\n visited.add(start)\n\n # initialize Queue() object for depth calculation\n depth = Queue()\n depth.put(0)\n\n\n # define an empty dictionary, where you'll record how you moved through the grid and a goal location,\n branch = {}\n found = False\n \n while not queue.empty():\n # deque and store the explored node\n # current_node = queue.get()\n item = queue.get()\n current_cost = item[0]\n current_node = item[1]\n visited.add(current_node)\n dep = depth.get()\n\n\n if current_node == goal:\n print('Found the Solution')\n found = True\n break\n else:\n for action in valid_actions(current_node):\n # get movement indicator from actions list\n da = action.delta\n\n # tuple -> grid transformation\n grid = np.array(current_node).reshape(3,-1)\n\n # find grid index of 0\n index = np.where(grid == 0)\n x,y = int(index[0]),int(index[1])\n\n #grid manipulation to exchange 0 and neighbor elements. \n grid[x+da[0],y+da[1]],grid[x,y] = grid[x,y],grid[x+da[0],y+da[1]]\n\n # grid -> tuple transformation\n next_node = tuple(grid.flatten().tolist())\n\n # calculate the heuristic cost.\n new_cost = current_cost + hueristic(next_node,goal)\n\n\n # Check if the new node has been visited before.\n # If the node has not been visited:\n # 1. Mark it as visited\n # 2. Add it to the queue\n # 3. Add how I got there to branch\n \n if next_node not in visited:\n visited.add(next_node)\n # queue.put(next_node)\n queue.put((new_cost,next_node))\n\n depth.put(dep+1)\n # branch[next_node] = (current_node, action)\n branch[next_node] = (new_cost,current_node,action)\n\n if dep + 1 > max_depth:\n max_depth = dep + 1\n\n\n path_cost = 0 \n nodes = 0 \n \n if found:\n\n # path_cost = 0\n\n nodes = len(branch)\n\n # traceback to find the depth by using of the branch dictionary.\n n = goal\n # print(branch[n][0])\n path_cost = branch[n][0]\n\n # while branch[n][0] != start:\n while branch[n][1] != start:\n\n # path.append(branch[n][1])\n path.append(branch[n][2])\n # n = branch[n][0]\n n = branch[n][1]\n\n # path.append(branch[n][1])\n path.append(branch[n][2])\n\n return path[::-1],max_depth,nodes,path_cost", "title": "" }, { "docid": "eac7340b33adf41e5fc6fcab89fb1a1c", "score": "0.52105474", "text": "def solve(G):\n\n def graph_generator(H):\n \"\"\"\n Args:\n H: networkx.Graph\n k: # of cities to remove\n Returns:\n L: list of all possible connected graphs w/ (H.nodes - 1) cities \n \"\"\"\n L = []\n nodes = list(H.nodes)\n nodes.remove(0)\n nodes.remove(dest)\n for node in nodes:\n h = H.copy()\n h.remove_node(node)\n if nx.is_connected(h):\n L.append(h)\n\n return L\n\n def nodes_to_edges(nodes):\n edges = []\n\n if len(nodes) == 0:\n return edges\n\n prev = nodes[0]\n for n in nodes[1:]:\n edges.append((prev, n))\n prev = n\n\n return edges\n\n def solver(A, k):\n\n HTC = 3 # Heuristic - height of a tree\n\n def helper(A, k):\n if k == 0:\n return [A]\n R = []\n for e in A[2]:\n H = A[0].copy()\n H.remove_edge(e[0], e[1])\n if nx.is_connected(H):\n B = (\n H,\n nx.dijkstra_path_length(H, 0, dest),\n nodes_to_edges(nx.dijkstra_path(H, 0, dest))\n )\n\n for x in helper(B, k-1):\n R.append(x)\n\n return R\n\n while k > 0:\n R = helper(A, k) if k < HTC else helper(A, HTC)\n r = list(map(lambda x: x[1], R))\n if len(r) == 0:\n break\n A = R[r.index(max(r))]\n k -= HTC\n\n return A\n\n # Initialize\n num_k, num_c, dest = 0, 0, G.number_of_nodes()-1\n if G.number_of_nodes() <= 30:\n num_k, num_c = 15, 1\n elif G.number_of_nodes() <= 50:\n num_k, num_c = 50, 3\n elif G.number_of_nodes() <= 100:\n num_k, num_c = 100, 5\n\n answer = (G, nx.dijkstra_path_length(G, 0, dest))\n A = (G, nx.dijkstra_path_length(G, 0, dest),\n nodes_to_edges(nx.dijkstra_path(G, 0, dest)))\n if answer[1] <= A[1]:\n A = solver(A, num_k)\n if answer[1] < A[1]:\n answer = A\n k = [e for e in G.edges if e not in answer[0].edges]\n\n for cc in range(num_c):\n less_cities = graph_generator(answer[0])\n for g in less_cities:\n A = (g, nx.dijkstra_path_length(g, 0, dest),\n nodes_to_edges(nx.dijkstra_path(g, 0, dest)))\n if answer[1] < A[1]:\n answer = A\n else:\n break\n c = [v for v in G.nodes if v not in answer[0].nodes]\n\n return c, k", "title": "" }, { "docid": "9b473ad7d0e19ca899d61862624f06c3", "score": "0.5204823", "text": "def swap_cities(*, road_map: RoadMap, index_1: int, index_2: int) -> Tuple[RoadMap, float]:\n if index_1 == index_2:\n return road_map, compute_total_distance(road_map=road_map)\n else:\n city_a = road_map[index_1]\n city_b = road_map[index_2]\n\n for idx, _ in enumerate(road_map):\n if idx == index_1:\n road_map[idx] = city_b\n if idx == index_2:\n road_map[idx] = city_a\n\n return road_map, compute_total_distance(road_map=road_map)", "title": "" }, { "docid": "21b01c2d1f3809ddb1ccf4a5b8e6eb9d", "score": "0.52007", "text": "def nearest_neighbor_tsp(cities):\n\n n = len(cities)\n distances = get_distances(cities)\n\n cost = 0 # traveling cost so far\n path = [None] * n # tour path across all cities\n path[0] = 0\n visited = [False] * n # cache of visited cities\n visited[0] = True\n\n # repeatedly visit the closest city that hasn't been visited yet\n # (and break ties by lowest city index)\n for i in xrange(1, n):\n u = path[i - 1]\n d, v = min((d, v) for v, d in enumerate(distances[u]) if not visited[v])\n cost += d\n path[i] = v\n visited[v] = True\n\n return cost + distances[path[-1]][0]", "title": "" }, { "docid": "083e9f2a2ea59f64e0e894777a7c65bc", "score": "0.5180176", "text": "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n ind = { loc:ind for (ind, loc) in enumerate(list_of_locations) }\n list_of_homes_ind = [ind[home] for home in list_of_homes]\n starting_ind = ind[starting_car_location]\n graph, message = adjacency_matrix_to_graph(adjacency_matrix)\n shortest_paths = nx.shortest_path(graph)\n\n mst = approximation.steinertree.steiner_tree(graph, [starting_ind] + list_of_homes_ind)\n\n # print(\"All mst nodes in homes list: \" + str(all([home in list(mst.nodes) for home in list_of_homes_ind])))\n\n nodes = remove_repeats(list(nx.algorithms.traversal.depth_first_search.dfs_preorder_nodes(mst, source = ind[starting_car_location])))\n # print(\"All mst nodes in remove repeats nodes list: \" + str(all([home in list(nodes) for home in list_of_homes_ind])))\n\n # print(nodes)\n nodes = find_path(nodes, graph)\n print(is_valid_walk(graph, nodes))\n #print(graph.edges)\n print(nodes)\n\n dropoff_dict = { home:[home] for home in list_of_homes_ind }\n\n #nodes, dropoff_dict = simulated_annealing(nodes, graph, starting_ind, list_of_homes_ind, shortest_paths, dropoff_dict)\n\n return nodes, dropoff_dict", "title": "" }, { "docid": "b51756aee153b950b2c57fdcda8d6e92", "score": "0.51776135", "text": "def updateLocations(self, loc1, loc2):\n\n #Get the pieces based on the locations passed in\n piece1 = self.findLocPiece(loc1)\n piece2 = self.findLocPiece(loc2)\n\n #TODO: DETERMINE WHY THIS IS HERE; seems redundant/useless\n if piece1 == piece2:\n piece2 = None\n\n # Print debugging code.\n # print(\"Loc 1: %s\" % loc1)\n # print(\"Loc 1: %s\" % loc2)\n # print(piece1)\n # print(piece2)\n # print(\"Good1\")\n # print(\"Piece1 : %s \" % piece1)\n # print(\"Piece2 : %s \" % piece2)\n\n #Make sure that the second piece is moved to the graveyard first.\n if piece2 is not None: #Need to run this first because of pathing\n hi = loc1+loc2\n src, dest = self.uciToLocations(hi)\n temp = self.mp.capture(self.output_move(src, dest))\n temp = self.convertBack(temp)\n if loc1 != temp:\n print(loc1, loc2, temp)\n self.updateLocations(loc1, temp)\n\n if(self.turn == self.first):\n #white takes black, so false\n self.graveyardMove(loc2, False)\n else:\n #black takes white, so true\n self.graveyardMove(loc2, True)\n\n self.updateLocations(temp, loc2)\n\n #Make the move, depending on whose turn it is.\n elif self.turn:\n # print(self.whiteLocations[piece1])\n self.whiteLocations[piece1].remove(loc1)\n self.whiteLocations[piece1].append(loc2)\n\n else:\n self.blackLocations[piece1].remove(loc1)\n self.blackLocations[piece1].append(loc2)", "title": "" }, { "docid": "b497cf75aad445cef7e75085c2f0b5ac", "score": "0.5173105", "text": "def searchAStar(graph, start, goal):\n\n # Initialise the came_from dictionary\n '''came_from={}\n heap=[]\n visited={}\n str_dist={}\n str_dist[start]=0\n s_dist=heuristic(start,goal)+str_dist[start]\n heapq.heappush(heap, (s_dist, start))\n \n for x in range(graph.width):\n for y in range(graph.height):\n if graph.isOOB((x,y)):\n visited[(x,y)]=True\n else:\n visited[(x,y)]=False\n previous=(-1,-1)\n while(heap):\n (z,(x,y))=heapq.heappop(heap)\n cur=(x,y)\n #print(cur)\n visited[cur]=True\n came_from[cur]=previous\n if cur==goal:\n break\n neighbors=graph.neighboursOf(cur)\n for n in neighbors:\n if(not(visited[n])):\n str_dist[n]=str_dist[cur]+1\n dist=heuristic(n,goal)+str_dist[n]\n heapq.heappush(heap, (dist, n))\n previous=cur''' \n \n # BEGIN HERE #\n came_from = {}\n came_from[start] = None\n visited_node={}\n visited_node[start]=1\n parent={}\n parent[start]=None\n if(start==goal):\n return came_from\n cur=start\n m_list={}\n dist={}\n dist[start]=0\n m_list[start]=heuristic(start,goal)+dist[start]\n while(1):\n if(len(graph.neighboursOf(cur))!=0):\n l=[]\n del m_list[cur]\n l=graph.neighboursOf(cur)\n for i in l:\n if i not in visited_node:\n dist[i]=dist[cur]+1\n visited_node[i]=1\n parent[i]=cur\n m_list[i]=heuristic(i,goal)+dist[i]\n s={} \n s = [(k, m_list[k]) for k in sorted(m_list, key=m_list.get, reverse=False)]\n m_list={}\n c=0\n for i in s:\n if(c==0):\n cur=i[0]\n c=1\n m_list[i[0]]=i[1]\n\n came_from[cur]=parent[cur]\n if(cur==goal):\n break\n return came_from\n else:\n del m_list[cur]\n s={}\n s = [(k, m_list[k]) for k in sorted(m_list, key=m_list.get, reverse=False)]\n \n m_list={}\n c=0\n for i in s:\n if(c==0):\n cur=s[0]\n c=1\n m_list[i[0]]=i[1] \n came_from[cur]=parent[cur]\n if(cur==goal):\n break\n return came_from\n current=goal\n path={}\n while(current!=start):\n path[current]=came_from[current]\n current=came_from[current]\n return path \n\n # END HERE #\n\n return came_from", "title": "" }, { "docid": "46108f111fbff300c20faf5f2de30072", "score": "0.5169279", "text": "def dijkstras_shortest_path(initial_position, destination, graph, adj):\r\n\r\n queue = [] # essentially handles visited and unvisited positions\r\n heappush(queue, (0, initial_position)) # add the starting position to the queue\r\n shortest_distance = {} # updated list of shortest paths to position\r\n previous_position = {} # updated list of parent position of position\r\n\r\n shortest_distance[initial_position] = 0 # initial position is cost 0 from itself\r\n previous_position[initial_position] = None # initial position is the root\r\n\r\n while queue:\r\n # fetch the position with the shortest distance\r\n current_distance, current_position = heappop(queue)\r\n\r\n # exit loop if destination found\r\n if current_position == destination:\r\n shortest_path = []\r\n index_path = destination\r\n # build the found shortest_path\r\n while index_path != None:\r\n shortest_path.insert(0, index_path) # initial position should be at beginning of list\r\n index_path = previous_position[index_path]\r\n return shortest_path\r\n\r\n # iterate through visitable cells\r\n for next_distance, next_direction in navigation_edges(graph, current_position):\r\n new_distance = shortest_distance[current_position] + next_distance # caluculate new potentially shortest distance\r\n # enter when next position has not been visited or new distance is less than current position's distance \r\n if next_direction not in shortest_distance or new_distance < shortest_distance[next_direction]:\r\n shortest_distance[next_direction] = new_distance # update the distance\r\n previous_position[next_direction] = current_position # update the trail\r\n heappush(queue, (new_distance, next_direction)) # add updated waypoint to queue\r\n\r\n return None", "title": "" }, { "docid": "0f037429119692784f24b7eacca3a757", "score": "0.5155091", "text": "def dijkstras_shortest_path_to_all(initial_position, graph, adj):\n dist = {} # distance from source to destination\n prev = {} # previous node in optimal path from source\n queue = [] # queue initialization\n dist[initial_position] = 0\n prev[initial_position] = None #prev from source\n\n heappush(queue, (dist[initial_position], initial_position))\n\n while queue:\n curr_cost, curr_node = heappop(queue)\n # Use navigation_edges to get adjacent cells\n adjacent = adj(graph, curr_node)\n # Iterate through adjacency list and calculate cost\n for acell, cost in adjacent:\n # Variable to store cost of path consisting of current cost and the cost of the\n # adjacent cell\n tempcost = curr_cost + cost\n # Updating dist of cells\n if acell not in dist or tempcost < dist[acell]:\n dist[acell] = tempcost\n prev[acell] = curr_node\n heappush(queue, (tempcost, acell))\n return dist\n pass", "title": "" }, { "docid": "859ab82b30c26362dccb5e0c322b0d19", "score": "0.5143684", "text": "def two_node_swap_optimization(path: List[int],\n distance_matrix: np.ndarray,\n total_distance: int) -> Tuple[List[int], int]:\n while True:\n best_swap = (0, None)\n for segment in path_segments(segment=[],\n start=0, end=len(path)-1,\n segment_length=2):\n delta = delta_if_better_path_from_swap(path, distance_matrix, *segment)\n if delta < best_swap[0]:\n best_swap = (delta, segment)\n if best_swap[0] < 0:\n i, j = best_swap[1]\n path[i + 1:j + 1] = reversed(path[i + 1:j + 1])\n total_distance += best_swap[0]\n else:\n break\n\n return path, total_distance", "title": "" }, { "docid": "e096dee6833105256c992d50bc0def63", "score": "0.5132506", "text": "def salesman_paths_rec(self, loc_c, locs, current_path, all_paths, forks):\n current_path.append(loc_c)\n conns = self.conns[loc_c]\n new_conns = [c for c in conns if c not in current_path]\n # Dead end reached\n if len(new_conns) == 0:\n # If full map explored, we're done\n unvisited = [c for c in locs if c not in current_path]\n if len(unvisited) == 0 and current_path not in all_paths:\n all_paths.append(current_path)\n return\n # Otherwise, we backtrack and try different decisions at prev forks\n else:\n # For each fork\n for fork in forks:\n # Retrace our steps to that fork, adding to our path\n steps = current_path.copy()[:len(current_path) - 1]\n current_path_new = current_path.copy()\n while steps[len(steps) - 1] != fork:\n current_path_new.append(steps.pop())\n # This branch of recursion takes unexplored branches w/ it\n forks_new = forks.copy()\n forks_new.remove(fork)\n # Rerun search with path including backtracing\n self.salesman_paths_rec(\n fork, locs, current_path_new, all_paths, forks_new\n )\n # If we made a decision here, make a note so we come back and try others\n elif len(new_conns) > 1:\n forks.append(loc_c)\n # Explore new branches\n for conn in new_conns:\n self.salesman_paths_rec(\n conn, locs, current_path.copy(), all_paths, forks.copy()\n )\n # Cycles in the graph may give us shortcuts to previous forks\n for fork in forks:\n if self.cost(loc_c, fork) is not None:\n forks_new = forks.copy()\n forks_new.remove(fork)\n self.salesman_paths_rec(\n fork, locs, current_path.copy(), all_paths, forks_new\n )", "title": "" }, { "docid": "4fe3279ac176d430004148ef34e409cf", "score": "0.51274425", "text": "def dijkstra_shortest_path(self, grid_obs, source, dest):\r\n prio_dict = priorityDictionary()\r\n prio_dict[source] = 0\r\n iterator = iter(prio_dict)\r\n current_best_length = dict()\r\n for i in range(len(grid_obs)):\r\n current_best_length[i] = (float('inf'), None)\r\n current_best_length[source] = (0, None)\r\n\r\n while True:\r\n try:\r\n current_space = next(iterator)\r\n except StopIteration:\r\n break\r\n\r\n if current_space + (self.obs_size * 2 + 1) < len(grid_obs) and \\\r\n current_best_length[current_space][0] + 1 < \\\r\n current_best_length[current_space + (self.obs_size * 2 + 1)][0] and \\\r\n grid_obs[current_space + (self.obs_size * 2 + 1)] != \"air\" and \\\r\n current_space != current_best_length[current_space + (self.obs_size * 2 + 1)][1]:\r\n current_best_length[current_space + (self.obs_size * 2 + 1)] = (\r\n current_best_length[current_space][0] + 1, current_space)\r\n prio_dict[current_space + (self.obs_size * 2 + 1)] = \\\r\n current_best_length[current_space + (self.obs_size * 2 + 1)][0]\r\n\r\n if current_space + 1 < len(grid_obs) and \\\r\n current_best_length[current_space][0] + 1 < current_best_length[current_space + 1][0] and \\\r\n grid_obs[current_space + 1] != \"air\" and \\\r\n current_space != current_best_length[current_space + 1][1]:\r\n current_best_length[current_space + 1] = (current_best_length[current_space][0] + 1, current_space)\r\n prio_dict[current_space + 1] = current_best_length[current_space + 1][0]\r\n\r\n if current_space - 1 >= 0 and \\\r\n current_best_length[current_space][0] + 1 < current_best_length[current_space - 1][0] and \\\r\n grid_obs[current_space - 1] != \"air\" and \\\r\n current_space != current_best_length[current_space - 1][1]:\r\n current_best_length[current_space - 1] = (current_best_length[current_space][0] + 1, current_space)\r\n prio_dict[current_space - 1] = current_best_length[current_space - 1][0]\r\n\r\n if current_space - (self.obs_size * 2 + 1) >= 0 and \\\r\n current_best_length[current_space][0] + 1 < \\\r\n current_best_length[current_space - (self.obs_size * 2 + 1)][\r\n 0] and \\\r\n grid_obs[current_space - (self.obs_size * 2 + 1)] != \"air\" and \\\r\n current_space != current_best_length[current_space - (self.obs_size * 2 + 1)][1]:\r\n current_best_length[current_space - (self.obs_size * 2 + 1)] = (\r\n current_best_length[current_space][0] + 1, current_space)\r\n prio_dict[current_space - (self.obs_size * 2 + 1)] = current_best_length[current_space - 1][0]\r\n\r\n best_path = [dest]\r\n prev = dest\r\n while prev != source:\r\n prev = current_best_length[prev][1]\r\n best_path.append(prev)\r\n\r\n best_path.reverse()\r\n return (best_path)", "title": "" }, { "docid": "1486f9a7cd4f31d812d654dcbae13900", "score": "0.5126029", "text": "def update_traversal_graph(self):\n\n\t\tif self.player.current_room.id not in self.traversal_graph:\n\t\t\tself.traversal_graph[self.player.current_room.id] = {}\n\n\t\tfor direction in self.player.current_room.get_exits():\n\t\t\tif direction not in self.traversal_graph[self.player.current_room.id]:\n\t\t\t\tself.traversal_graph[\n\t\t\t\t\tself.player.current_room.id][direction] = '?'", "title": "" }, { "docid": "24d4e28a2033e65a14d4124f0aa5473d", "score": "0.5114988", "text": "def path_search(self, path, added=None, deleted=None):\n if not added:\n added = set()\n if not deleted:\n deleted = set()\n\n depth = len(added) # = len(deleted)\n (old_tour_length, old_cities) = (path.tour_length(), path.city_sequence())\n results = [(old_tour_length, old_cities)]\n mods = path.find_lk_mods(added, deleted)\n\n if self.lk_verbose:\n print \" \"*depth + \" -- path_search \" + \\\n \" depth=%i, path=%f, tour=%f, n_mods=%i \" % \\\n (depth, path.length, old_tour_length, len(mods))\n\n for (city, road_add, road_rm) in mods:\n\n if self.lk_verbose:\n print \" \"*depth + \" -> (city, road_add, road_rm) = (%s, %s, %s) \" % \\\n (str(city), str(road_add), str(road_rm))\n\n path.modify(city, road_add, road_rm)\n\n if self.lk_verbose:\n print \" \"*depth + \" -> modified path %s \" % str(path)\n\n if self.lk_restart_better_tours and \\\n (path.tour_length() + 1e-6 < self._lk_tour_length):\n # The 1e-6 is a round-off error fudge factor;\n # I think it sometimes thinks the same tour is a bit shorter,\n # maybe if the roads are added up in a different order.\n self.tour = Tour(self, Cities(path.city_sequence()))\n if self.lk_verbose:\n print \"!! restart with better tour ; using %s\" % str(self.tour)\n # Restart the whole search, all the back to LK, with this better tour\n raise RestartLK()\n\n added.add(road_add)\n deleted.add(road_rm)\n\n if self.lk_depth_limit and depth > self.lk_depth_limit:\n result_path = path\n else:\n result_path = self.path_search(path, added, deleted)\n results.append((result_path.tour_length(), result_path.city_sequence()))\n\n if self.lk_verbose:\n print \" \"*depth + \" -> result path=%f; tour=%f\" % \\\n (result_path.length, result_path.tour_length())\n\n added.remove(road_add)\n deleted.remove(road_rm)\n\n path.unmodify(city, road_add, road_rm)\n\n # Finished breadth search at this depth ; return best result\n (best_length, best_city_seq) = min(results)\n return Tour(self, best_city_seq)", "title": "" }, { "docid": "6938afa5945b9ccf5fa94fedf2580b74", "score": "0.5110177", "text": "def tspToSolution1(nodes, cost_mat):\r\n # define neccessary functions from TSP notebook\r\n def cost(A, B):\r\n return cost_mat[A.num, B.num]\r\n\r\n def shortest_edges_first(cities):\r\n # Return all edges between distinct cities, sorted shortest first.\"\r\n edges = [(A, B) for A in cities for B in cities\r\n if id(A) < id(B)]\r\n return sorted(edges, key=lambda edge: cost(*edge))\r\n\r\n def join_endpoints(endpoints, A, B):\r\n # Join B's segment onto the end of A's and return the segment.\r\n # Maintain endpoints dict.\"\r\n Asegment, Bsegment = endpoints[A], endpoints[B]\r\n if Asegment[-1] is not A:\r\n Asegment.reverse()\r\n if Bsegment[0] is not B:\r\n Bsegment.reverse()\r\n Asegment.extend(Bsegment)\r\n del endpoints[A], endpoints[B] # A and B are no longer endpoints\r\n endpoints[Asegment[0]] = endpoints[Asegment[-1]] = Asegment\r\n return Asegment\r\n\r\n def greedy_tsp(cities):\r\n \"\"\"Go through edges, shortest first.\r\n Use edge to join segments if possible.\"\"\"\r\n endpoints = {c: [c] for c in cities}\r\n for (A, B) in shortest_edges_first(cities):\r\n if (A in endpoints and B in endpoints and\r\n endpoints[A] != endpoints[B]):\r\n new_segment = join_endpoints(endpoints, A, B)\r\n if len(new_segment) == len(cities):\r\n return new_segment\r\n\r\n # start of additional code\r\n\r\n # converting nodes into a list of cities\r\n class Node():\r\n def __init__(self, x, y, num):\r\n self.x = x\r\n self.y = y\r\n self.num = num\r\n\r\n City = Node\r\n cities = [City(nodes[0, i], nodes[1, i], i) for i in range(nodes.shape[1])]\r\n\r\n # apply greedy algorithm\r\n tour = greedy_tsp(cities)\r\n\r\n return tour", "title": "" }, { "docid": "783a630609d517ce51c1c76a0ff2620f", "score": "0.5101502", "text": "def plan_online(self):\n self.open_list.append(self.current_node)\n\n heapq.heapify(self.open_list)\n\n it = 0\n while len(self.open_list) > 0:\n # pprint(vars(self.current_node))\n it += 1\n # Simultaneously set current node and remove it from openlist\n self.current_node = heapq.heappop(self.open_list)\n # pprint(vars(self.current_node))\n # Heapq unnecessary for closed list\n self.closed_list.append(self.current_node)\n\n if self.current_node.position == self.goal_node.position:\n return self.trace_path(self.start_node, self.current_node)\n print(\"goal found after {} iterations!\".format(it))\n break\n\n self.neighbour_list = [] # restart neighbour list every iteration\n heapq.heapify(self.neighbour_list)\n\n for neighbour in self.get_neighbours(self.current_node):\n # print(\"Neighbour pos: {}\".format(neighbour))\n skip = False\n\n # see if matches coords in closed list\n for node in self.closed_list:\n if neighbour[0] == node.position[0] and neighbour[\n 1] == node.position[1]:\n # node exists in closed lit\n skip = True\n\n # see if index matches obstacle list\n for obstacle in self.obstacle_list:\n if neighbour[0] == obstacle[0] and neighbour[\n 1] == obstacle[1]:\n skip = True\n\n if skip is True:\n continue\n # if in none of these lists, create new node\n else:\n neighbour_temp = Node(neighbour, None, 0, 0, False)\n h_cost = self.get_dist(neighbour_temp, self.goal_node)\n # g_cost = self.current_node.gcost + self.get_dist(\n # neighbour_temp, self.current_node)\n g_cost = self.current_node.gcost + self.get_dist_n(\n neighbour_temp, self.current_node)\n # g_cost = self.current_node.gcost + 1\n neighbour_node = Node(neighbour, self.current_node, g_cost,\n h_cost, False)\n\n # Push to the right index by comparing .heap\n # attribute defined in node class under __lt__\n # (less than)\n heapq.heappush(self.neighbour_list, neighbour_node)\n\n if len(self.neighbour_list) > 0:\n neighbour_node = heapq.heappop(self.neighbour_list)\n # Avoid back-tracking\n self.closed_list.append(neighbour_node)\n heapq.heappush(self.open_list, neighbour_node)", "title": "" }, { "docid": "5823f1dcdd957a31470721fc49644cab", "score": "0.5073208", "text": "def calc_path(self):\n paths = nx.single_source_dijkstra_path(self.rev, self.exit_node)\n paths.pop(self.exit_node)\n self.path = {n: p[-2] for n, p in paths.items()}", "title": "" }, { "docid": "76011fe38ea1267e449a9dc006ab5bc2", "score": "0.5067229", "text": "def get_neighbors(state: Dict[int, List[int]]) -> List[Dict[int, List[int]]]:\n neighbors = []\n\n for source in range(len(state)):\n for task_id in range(len(state[source])):\n for destination in range(len(state)):\n # moving a task to its source is useless\n if destination == source:\n continue\n\n # creating the neighbor\n neighbors.append(copy.deepcopy(state))\n\n # removing the task\n task = neighbors[-1][source].pop(task_id)\n\n # add it to the other processor\n new_tasklist = neighbors[-1][destination][:]\n new_tasklist.append(task)\n neighbors[-1][destination] = sorted(new_tasklist)\n\n return neighbors", "title": "" }, { "docid": "6f27c31c40662ef600023ed89ccaa29c", "score": "0.5062935", "text": "def _costs(self, owned_cities):\n # assume I can have many roots\n if not owned_cities:\n costs = dict((c, 0) for c in self.cities)\n return costs\n costs = dict((c.name, 0) for c in owned_cities)\n queue = [c.name for c in owned_cities[:]]\n while queue:\n node = queue.pop(0)\n for neighbor, cost in self.graph[node].iteritems():\n c = cost + costs[node]\n if neighbor not in costs or c < costs[neighbor]:\n costs[neighbor] = c\n queue.append(neighbor)\n return costs", "title": "" }, { "docid": "075a43476217ec92ae5de7592ba9c2b6", "score": "0.50501317", "text": "def dijkstra(weighted_graph, start, end):\n list_of_tuples_node_totalweight = []\n list_of_tuples_node_totalweight.append((start, 0))\n # weight_dict[start] = 0 # total weight/distance\n prev = [] # previous node\n # unvisited = []\n\n for node in weighted_graph.nodes():\n if node is not start:\n list_of_tuples_node_totalweight.append((node, float(\"inf\")))\n unvisited = copy.deepcopy(list_of_tuples_node_totalweight)\n\n while unvisited:\n sorted_list = sorted(unvisited, key=lambda x: x[1])\n temp = sorted_list[0]\n for i, j in list_of_tuples_node_totalweight:\n if i == temp[0]:\n new_temp = j\n\n for neighbor in weighted_graph.neighbors(temp[0]):\n alt = new_temp + weighted_graph.dict[temp[0]][neighbor]\n for i, j in list_of_tuples_node_totalweight:\n if i == neighbor:\n list_v = j\n\n if alt < list_v:\n list_of_tuples_node_totalweight.remove((neighbor, list_v))\n list_of_tuples_node_totalweight.append((neighbor, alt))\n prev.append(neighbor)\n # if temp == end:\n # break\n unvisited = sorted_list[1:]\n return list_of_tuples_node_totalweight[-1][1]", "title": "" }, { "docid": "5c11b8d3301e3e5b285d124e9579e956", "score": "0.5044877", "text": "def dijkstras_shortest_path(initial_position, destination, graph, adj):\n dist = {} # distance from source to destination\n prev = {} # previous node in optimal path from source\n queue = [] # queue initialization\n dist[initial_position] = 0\n prev[initial_position] = None #prev from source\n #queue = [0, start]\n heappush(queue, (dist[initial_position], initial_position))\n while queue:\n # Pop least cost node\n curr_cost, curr_node = heappop(queue)\n # Once we find the destination, break the loop\n if curr_node == destination:\n break\n # Use navigation_edges to get adjacent cells\n adjacent = adj(graph, curr_node)\n # Iterate through adjacency list and calculate cost\n for acell, cost in adjacent:\n # Variable to store cost of path consisting of current cost and the cost of the\n # adjacent cell\n pathcost = curr_cost + cost\n if acell not in dist or pathcost < dist[acell]:\n dist[acell] = pathcost\n prev[acell] = curr_node\n heappush(queue, (pathcost, acell))\n # Build path to return\n if curr_node == destination:\n path = []\n # Building path in reverse order because we're at the destination\n while curr_node:\n path.append(curr_node)\n curr_node = prev[curr_node]\n # Reversing the path\n path.reverse()\n return path\n else:\n # Return empty list if there is no path\n return []\n pass", "title": "" }, { "docid": "8060e7209def8acf3c19dafd7aba5f8b", "score": "0.5017886", "text": "def dijkstra_shortest_path(self, src, dest=None):\r\n # make sure src is in the map\r\n for i in range(max_size):\r\n if self.labels.index(src) is not None:\r\n break\r\n raise ValueError\r\n\r\n # declare needed stuff\r\n path = [-1] * max_size\r\n distance = [math.inf] * max_size\r\n prev_vertex = [math.inf] * max_size\r\n\r\n # set distance for starting vertex 0\r\n distance[self.get_index(src)] = 0\r\n\r\n # go through each vertex and find it's distance\r\n for i in range(max_size - 1):\r\n # declare some needed variables and give them ridiculous values\r\n min_path = math.inf\r\n\r\n for j in range(max_size):\r\n if prev_vertex[j] == math.inf and distance[j] <= min_path:\r\n min_path = distance[j]\r\n min_index = j\r\n\r\n # set the selected vertex as visited\r\n prev_vertex[min_index] = min_index\r\n\r\n # update the distance of adjacent vertices\r\n for k in range(max_size):\r\n if prev_vertex[k] and self.my_map[min_index][k] != math.inf and \\\r\n distance[min_index] != math.inf and \\\r\n distance[min_index] + self.my_map[min_index][k] < distance[k]:\r\n\r\n path[k] = min_index\r\n distance[k] = distance[min_index] + self.my_map[min_index][k]\r\n\r\n # choose whether we return everything or just a single path\r\n if dest is None:\r\n # put everything into a dict file\r\n all_paths = {}\r\n\r\n for i in range(max_size):\r\n # break if it goes too far\r\n if self.labels[i] == -1:\r\n break\r\n\r\n if distance[i] == math.inf:\r\n all_paths[self.labels[i]] = (math.inf, [])\r\n else:\r\n all_paths[self.labels[i]] = (float(distance[i]), self.create_path(path, i, []))\r\n\r\n return all_paths\r\n else:\r\n if distance[self.get_index(dest)] == math.inf:\r\n my_tuple = (math.inf, [])\r\n else:\r\n path = self.create_path(path, self.get_index(dest), [])\r\n my_tuple = (float(distance[self.get_index(dest)]), path)\r\n return my_tuple", "title": "" }, { "docid": "8044c245e2a9c107c58b6a2230b82d1c", "score": "0.5007438", "text": "def move_zombies(self, human_distance):\n neighbors = {}\n new_zombie_list = []\n for zombie in self._zombie_list:\n for neighbor in self.four_neighbors(zombie[0],zombie[1]):\n neighbors[neighbor] = human_distance[neighbor[0]][neighbor[1]]\n neighbors[zombie] = human_distance[zombie[0]][zombie[1]]\n new_zombie_list.append(choice([new_zombie for new_zombie in neighbors.keys()\n if neighbors[new_zombie] == min(neighbors.values())\n and (self.is_empty(new_zombie[0],new_zombie[1]) or new_zombie == zombie)]))\n neighbors = {}\n self._zombie_list = list(new_zombie_list)", "title": "" }, { "docid": "14e794f6a0ec13bc4e6d002bf11dc44c", "score": "0.50062156", "text": "def find_path_using_visibility_graph(start, destination, visibility_graph):\n nodes_to_visit = set()\n nodes_to_visit.add(start)\n visited_nodes = set()\n came_from_graph = {}\n\n distance_from_start = defaultdict(lambda: float('inf'))\n distance_from_start[start] = 0\n estimated_distance = defaultdict(lambda: float('inf'))\n estimated_distance[start] = distance_estimate(start, destination)\n\n while nodes_to_visit:\n min_estimated_distance = min(estimated_distance[n] for n in nodes_to_visit)\n current_node = next(node for node in nodes_to_visit if estimated_distance[node] == min_estimated_distance)\n if current_node == destination:\n return reconstruct_path_to_point(destination, came_from_graph)\n nodes_to_visit.remove(current_node)\n visited_nodes.add(current_node)\n for adjacency in visibility_graph[current_node]:\n neighbour_node = adjacency.point\n if neighbour_node in visited_nodes:\n continue\n neighbour_distance = distance_from_start[current_node] + adjacency.distance\n if neighbour_node not in nodes_to_visit or neighbour_distance < distance_from_start[neighbour_node]:\n came_from_graph[neighbour_node] = current_node\n distance_from_start[neighbour_node] = neighbour_distance\n estimated_distance[neighbour_node] = neighbour_distance + distance_estimate(neighbour_node, destination)\n if neighbour_node not in nodes_to_visit:\n nodes_to_visit.add(neighbour_node)\n return None", "title": "" }, { "docid": "6bd875eb2f68ce7658412050db9c6e9c", "score": "0.49866474", "text": "def resolvejumps(jumps, linestowords):\n # in src order [(dest_line, cur_dest_budget)]\n # {dest_line: cur_dest_word}\n wordjumps = [(destline, 2) for _, destline in jumps]\n destmap = {destline: linestowords[destline] for _, destline in jumps}\n\n for j in range(len(wordjumps)):\n destline, curbudget = wordjumps[j]\n newbudget = len(locationtowords(destmap[destline])) + 1\n if newbudget == curbudget:\n continue\n\n shift = newbudget - curbudget\n wordjumps[j] = destline, newbudget\n for l2 in destmap:\n if l2 > destline:\n destmap[l2] += shift\n\n return destmap", "title": "" }, { "docid": "96a6639a530a2edc7d7257dc41996538", "score": "0.49855444", "text": "def main():\n node = create_node()\n\n costs = create_costs()\n\n list_cities = ['1', '2', '3', '4', '5']\n\n k = '1'\n c = []\n f = []\n\n while k != '5':\n for neighbor in node[k]:\n if node[k][neighbor] > 0 and costs[neighbor] == [float(\"inf\")]:\n c.append([node[k][neighbor], neighbor])\n max_c = max(c)\n costs[max_c[1]] = [max_c[0], k]\n k = max_c[1]\n c = []\n\n f.append(min(costs.values()))\n for i, node_ in enumerate(costs.values()):\n if node_ != [float(\"inf\")] and node_ != [float(\"inf\"), None]:\n print(i + 1, node_)\n print(f)\n print(costs)\n\n print()", "title": "" }, { "docid": "b078cdb5afa49db52168e418b6b2b30c", "score": "0.4977445", "text": "def all_pairs_shortest_paths(graph):\n n = len(graph)\n\n for k in range(n): # step\n for i in range(n): # row\n for j in range(n): # column\n\n # If an edge is found to reduce distance, update the shortest paths\n if graph[i][j] > graph[i][k] + graph[k][j]:\n graph[i][j] = graph[i][k] + graph[k][j]", "title": "" }, { "docid": "4b335496ac72df901a0c56690be3d2bd", "score": "0.4976292", "text": "def way(map_: Tuple[str], city1: str, city2: str) -> bool:\n # fill dictionary with pairs city : list of possible roads\n road_map = {el[0]: [] for el in map_} # type: Any\n for city in map_:\n road_map[city[0]].append(city[2])\n\n key_cities = road_map.keys()\n if city1 not in key_cities:\n return False\n # list to storage all possible roads from city1\n resulting_road = road_map[city1]\n i = 0\n\n while resulting_road[i] in key_cities and i < len(resulting_road):\n for road in road_map[city1]:\n if road in road_map.keys():\n resulting_road.extend(road_map[road])\n i += 1\n\n if city2 in resulting_road:\n return True\n return False", "title": "" }, { "docid": "7ba9181f67fe80b5719d204bf9ad5a4f", "score": "0.49755922", "text": "def extra(maze):\n # TODO: Write your code here\n nrow = maze.rows \n ncol = maze.cols \n start_point = maze.getStart()\n path = [start_point]\n goals = maze.getObjectives()\n visited = [[0 for i in range(ncol)] for i in range(nrow)]\n Q = []\n distance = 0\n hq.heappush(Q, (heuristic_extra(start_point, goals, distance), start_point, path))\n \n while True:\n current_cost, current_pos, path = hq.heappop(Q)\n visited[current_pos[0]][current_pos[1]] = 1\n \n if current_pos in goals:\n goals.remove(current_pos)\n if len(goals) == 0:\n return path\n start_point = current_pos\n Q = []\n distance = 0\n visited = [[0 for i in range(ncol)] for i in range(nrow)]\n hq.heappush(Q, (heuristic_extra(start_point, goals, distance), start_point, path))\n continue\n\n for next_pos in maze.getNeighbors(current_pos[0], current_pos[1]):\n if maze.isValidMove(next_pos[0], next_pos[1]) and visited[next_pos[0]][next_pos[1]] == 0:\n distance += 1\n hq.heappush(Q, (heuristic_extra(start_point, goals, distance), next_pos, path + [next_pos]))", "title": "" }, { "docid": "bf8470963eba12f7884c579f6b7991ce", "score": "0.4966595", "text": "def assign_dropoffs(G, path, homes_idxs, all_pairs_dists):\n locations_on_path = set(path)\n dropoffs = collections.defaultdict(list)\n for h in homes_idxs:\n closest_loc_on_path = min(locations_on_path,\n key=lambda loc: all_pairs_dists[h][loc])\n dropoffs[closest_loc_on_path].append(h)\n return dropoffs", "title": "" }, { "docid": "9d9faef29e71e2015590f2fad999bd78", "score": "0.4958681", "text": "def shortest_path(free_tiles, start, target):\n\n start_node = Node(None, start)\n target_node = Node(None, target)\n start_node.g = start_node.h = start_node.f = 0\n target_node.g = target_node.h = target_node.f = 0\n\n open_nodes = []\n closed_nodes = []\n\n heapq.heappush(open_nodes, (start_node.f, start_node))\n free_tiles[target] = True\n\n while len(open_nodes) > 0:\n current_node = heapq.heappop(open_nodes)[1]\n closed_nodes.append(current_node)\n\n if current_node == target_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1]\n\n # get the current node and all its neighbors\n neighbors = []\n i, j = current_node.position\n neighbors_pos = [(i, j) for (i, j) in [(i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)] if free_tiles[i, j]]\n\n for position in neighbors_pos:\n new_node = Node(current_node, position)\n neighbors.append(new_node)\n\n for neighbor in neighbors:\n if neighbor in closed_nodes:\n continue\n\n neighbor.g = current_node.g + 1\n neighbor.h = ((neighbor.position[0] - target_node.position[0]) ** 2) + (\n (neighbor.position[1] - target_node.position[1]) ** 2)\n neighbor.f = neighbor.g + neighbor.h\n\n if not any(node[1] == neighbor for node in open_nodes):\n heapq.heappush(open_nodes, (neighbor.f, neighbor))\n continue\n\n for open_node in open_nodes:\n if neighbor == open_node[1]:\n open_node[1].f = min(neighbor.f, open_node[1].f)\n break\n\n return [start]", "title": "" }, { "docid": "c2dfd1e512ec05cd4caa7076cf9e9dc3", "score": "0.49582008", "text": "def uniformCostSearch(problem):\n # initialize data structures\n priority_queue = util.PriorityQueue()\n visited_set = set()\n\n # dictionary to keep track of the highest priority/lowest cost of each node\n weights = {}\n\n # grab the start node\n # nodes are tuples of (x, y)\n start_node = problem.getStartState()\n\n # give the first node a weight of 0\n weights[start_node] = 0\n\n # create tuple of start node and an array containing the path it took to get there\n # the path array will be a list of directions\n # ex: ['North', 'West', 'East', 'South']\n node_tuple = (start_node, [])\n\n # push the tuple and initial weight of 0 into the priority queue\n priority_queue.push(node_tuple, 0)\n\n # begin algorithm, continue as long as there is a node in the priority queue\n while not priority_queue.isEmpty():\n popped_tuple = priority_queue.pop()\n node = popped_tuple[0]\n path = popped_tuple[1]\n\n # check if the node has been visited yet\n if node in visited_set:\n # skip this iteration of the loop\n continue\n\n # check if we've reached the end node\n if problem.isGoalState(node):\n # success, return the path it took to get here\n return path\n\n # mark the current node as visited, and grab the weight associated to it\n visited_set.add(node)\n cost = weights[node]\n\n # grab the all neighbors for the current node\n neighbors = problem.getSuccessors(node)\n for neighbor in neighbors:\n # a node is in the format ((x, y), 'Direction', Cost)\n next_node = neighbor[0]\n direction = neighbor[1]\n next_cost = neighbor[2]\n\n # create a tuple for the next node containing the node and the path it took to get to it\n next_node_tuple = (next_node, path + [direction])\n\n # check if the next node's weight has been previously calculated\n if next_node in weights:\n # check if this new cost for next node is better than the previously calculated one\n if weights[next_node] <= cost + next_cost:\n continue\n else:\n priority_queue.update(next_node_tuple, cost + next_cost)\n else:\n priority_queue.push(next_node_tuple, cost + next_cost)\n\n # update the weight of the next node in the dictionary if we got to this point\n weights[next_node] = cost + next_cost", "title": "" }, { "docid": "83b8eb134aaffeaf7af9a9e1b124d0f6", "score": "0.49567476", "text": "def get_best_path(digraph, start, end, path, max_dist_outdoors, best_dist,\n best_path):\n # Checks if the values are nodes, otherwise convert them to nodes\n if type(start) != Node:\n start = Node(start)\n\n if type(end) != Node:\n end = Node(end)\n\n best_path += [start.name]\n\n # Raise an error if one or both nodes are not in the digraph\n\n if not (start in digraph.nodes and end in digraph.nodes):\n raise ValueError('Invalid Node')\n\n # Checks if the start node == ende node\n\n elif start.__eq__(end):\n # best_path += [end.name]\n best_dist = path[1] + int(end.get_total_distance())\n return (best_path), best_dist\n else:\n # Checks if the end node is in the destination nodel of all edges from the start node\n if end in [e.get_destination() for e in digraph.get_edges_for_node(start)]:\n\n # Get the edge between start and destination\n ed = [e for e in digraph.get_edges_for_node(\n start) if e.get_destination() == end][0]\n\n best_dist = path[1] + int(ed.get_total_distance())\n # best_path = path[0] + [end.name]\n\n else:\n # Check every edge\n for e in digraph.get_edges_for_node(start):\n if str(e.get_destination()) not in path[0]:\n t_path = [\n path[0] + [str(e.get_destination())], path[1] + int(e.get_total_distance()), path[2] + int(e.get_outdoor_distance())]\n\n print(e.get_destination())\n print(int(e.get_outdoor_distance()))\n\n best_path, best_dist = get_best_path(digraph, e.get_destination(\n ), end, t_path, max_dist_outdoors - int(e.get_outdoor_distance()), best_dist, best_path)\n\n if max_dist_outdoors >= 0:\n return best_path, best_dist\n else:\n return [], 0", "title": "" }, { "docid": "1c916b1dedcf9693410f4f266dd35e55", "score": "0.49484032", "text": "def aStarSearch(problem, heuristic):\n startState = problem.startingState()\n if problem.isGoal(startState):\n return []\n path = util.PriorityQueue()\n path.push( (startState, [], 0), 0)\n #print (\"startState:\", startState)\n visited = []\n\n actions = []\n actions2 = []\n pathList =[]\n pathList.append(startState)\n\n while not path.isEmpty():\n (state, actions, cost) = path.pop()\n #print (\"pathList before remove:\", pathList)\n #print (\"state[0]:\",state[0])\n pathList.remove(state)\n #print (\"pathlist after remove:\", pathList)\n\n if problem.isGoal(state):\n return actions\n\n if state not in visited:\n visited.append(state)\n\n successors = problem.successorStates(state)\n for(nextState, nextAction, newCost) in successors:\n actions2 = actions.copy()\n actions2.append(nextAction)\n priority = cost + newCost\n heuristicPriority = cost + heuristic(nextState, problem)\n if nextState not in visited:\n if nextState not in pathList:\n #print (\"nextState1stif:\", nextState)\n path.push( (nextState, actions2, priority), heuristicPriority)\n pathList.append(nextState)\n #print (\"pathListIn1stIf:\",pathList)\n elif nextState in pathList:\n if newCost > priority:\n #print (\"nextState2ndif:\", nextState)\n path.push( (nextState, actions2, priority), heuristicPriority)\n pathList.append(nextState)\n #print (\"pathListIn2ndIf:\",pathList)\n return actions\n\n util.raiseNotDefined()", "title": "" }, { "docid": "be043a31feffa9bf5ff6934ac57828c2", "score": "0.49471185", "text": "def build_neighbor_dictionary(mmps, no_chiral=False):\n print(\"Analyzing neighborhoods\")\n\n neighs = {}\n for line in mmps:\n smiles_lhs, smiles_rhs, id_lhs, id_rhs, transf, const = line.split(\"\\t\")\n if no_chiral and \"@\" in transf:\n continue\n var_lhs, var_rhs = transf.split(\">>\")\n # Skip pair if the transformation has more than one anchoring point\n # and the topological distance changes between those two (no reason to assume additivity then)\n if \"[*:2]\" in var_lhs:\n a = Chem.MolFromSmarts(var_lhs)\n b = Chem.MolFromSmarts(var_rhs)\n a_idx1 = [atom.GetSmarts() for atom in a.GetAtoms()].index(\"[*:1]\")\n a_idx2 = [atom.GetSmarts() for atom in a.GetAtoms()].index(\"[*:2]\")\n b_idx1 = [atom.GetSmarts() for atom in b.GetAtoms()].index(\"[*:1]\")\n b_idx2 = [atom.GetSmarts() for atom in b.GetAtoms()].index(\"[*:2]\")\n if (\n not Chem.GetDistanceMatrix(a)[a_idx1, a_idx2]\n == Chem.GetDistanceMatrix(b)[b_idx1, b_idx2]\n ):\n continue\n if \"[*:3]\" in var_lhs:\n a_idx3 = [atom.GetSmarts() for atom in a.GetAtoms()].index(\"[*:3]\")\n b_idx3 = [atom.GetSmarts() for atom in b.GetAtoms()].index(\"[*:3]\")\n if (\n not Chem.GetDistanceMatrix(a)[a_idx1, a_idx3]\n == Chem.GetDistanceMatrix(b)[b_idx1, b_idx3]\n ):\n continue\n if (\n not Chem.GetDistanceMatrix(a)[a_idx2, a_idx3]\n == Chem.GetDistanceMatrix(b)[b_idx2, b_idx3]\n ):\n continue\n # Add to neighbor dictionary\n if id_lhs in neighs.keys():\n if id_rhs not in [i[0] for i in neighs[id_lhs]]:\n neighs[id_lhs].append((id_rhs, transf))\n else:\n id_rhs_idx = [i[0] for i in neighs[id_lhs]].index(id_rhs)\n old_transf_len = len(neighs[id_lhs][id_rhs_idx][1])\n if len(transf) < old_transf_len:\n neighs[id_lhs][id_rhs_idx] = (id_rhs, transf)\n else:\n neighs[id_lhs] = [(id_rhs, transf)]\n\n return neighs", "title": "" }, { "docid": "56fe543b05c790dff6760acbb77c2cd6", "score": "0.49465218", "text": "def insert_cost(expand_paths, list_of_path):\n\n list_of_path = expand_paths + list_of_path\n list_of_path.sort(key=lambda l: [l.g, l.route])\n return list_of_path", "title": "" }, { "docid": "c3050daaa924cdf09dd8df0e0367eec5", "score": "0.49463895", "text": "def findQuickestPath(map, start,end):\n # check arguments\n if(not isinstance(map,CityMap)):\n raise ValueError('map is not type of CityMap')\n \n if(not map.contains(start) or not map.contains(end)):\n raise ValueError('map does not contain start and/or end')\n \n if(start == end):\n raise ValueError('Start and End is the same. This is not allowed.')\n\n # to keep track of parent node from a bfs perspective \n previous_node = {}\n previous_node[start] = None\n \n # to keep track of local optimal\n time_till_now = {}\n time_till_now[start] = 0\n \n # Queue to hold the nodes for bfs according to priority\n visitedQueue = []\n # Priority Queue of bfs where the priority is greedy based on dynamically evaluated local optimal solution\n # items are tuples of type (cost,node) where cost the the time taken to reach node\n heapq.heappush(visitedQueue, (0,start))\n \n while not len(visitedQueue) == 0:\n # pop is best optimal solution\n currentNode = heapq.heappop(visitedQueue)[1]\n\n \n if currentNode == end:\n logging.debug('Goal %r found - breaking out' %currentNode)\n break \n \n logging.debug('Visiting %r' %currentNode)\n \n connectedNodes = map.getConnectedNodes(currentNode)\n \n for nextNode in connectedNodes:\n\n # avoid going back to parent \n if nextNode == previous_node[currentNode]:\n continue\n \n # find the optimal time taken to reach nextNode\n # getEdgeTime gives the time to go from currentNode to nextNode\n # getWaitTime gives the time to wait on nextNode once nextNode is reached.\n time_to_reach = time_till_now[currentNode] + map.getEdgeTime(currentNode, nextNode) + map.getWaitTime(currentNode,nextNode)\n \n if nextNode not in time_till_now or time_to_reach < time_till_now[nextNode]:\n time_till_now[nextNode] = time_to_reach\n previous_node[nextNode] = currentNode\n\n priority = time_to_reach\n heapq.heappush(visitedQueue,(priority,nextNode))\n\n # Now Recontruct the path\n nextNode = end\n path = [end]\n \n while nextNode != start:\n nextNode = previous_node[nextNode]\n path.append(nextNode)\n \n path.reverse() \n return path,time_till_now[end]", "title": "" }, { "docid": "cdd1d2cb22948241f8929dcbadce83c5", "score": "0.4944419", "text": "def successors_heuristic(temp,cost):\n suc = city_routes[temp[-1]]\n if cost == \"distance\":\n return [[float(s[-3])+float(temp[1])+heuristic((s[0]),end_city)]+[float(s[-3])+float(temp[1])]+[int(s[-3])]+[float(s[-3])/float(s[-2])]+[s[-1]]+[temp[-1]]+[s[0]] for s in suc]\n elif cost == \"time\":\n return [[float(s[-3])/float(s[-2])+float(temp[1])+heuristic((s[0]),end_city)/100.0]+[float(s[-3])/float(s[-2])+float(temp[1])]+[int(s[-3])]+[float(s[-3])/float(s[-2])]+[s[-1]]+[temp[-1]]+[s[0]] for s in suc]\n elif cost == \"segments\":\n return [[1+float(temp[1])+heuristic((s[0]),end_city)/3000]+[1+float(temp[1])]+[int(s[-3])]+[float(s[-3])/float(s[-2])]+[s[-1]]+[temp[-1]]+[s[0]] for s in suc]\n elif cost == \"longtour\":\n return [[-(float(s[-3])+float(temp[1])+ (heuristic((s[0]),end_city)))]+[float(s[-3])+float(temp[1])]+[int(s[-3])]+[float(s[-3])/float(s[-2])]+[s[-1]]+[temp[-1]]+[s[0]] for s in suc]\n elif cost == \"statetour\":\n return\n else:\n return [[float(s[-3])+float(temp[1])+heuristic((s[0]),end_city)]+[float(s[-3])+float(temp[1])]+[int(s[-3])]+[float(s[-3])/float(s[-2])]+[s[-1]]+[temp[-1]]+[s[0]] for s in suc]", "title": "" }, { "docid": "c6fa212822e157dcbd6e0573ec63b83e", "score": "0.49442226", "text": "def _update_distances(self):\n dist_shape = self._representation.shape\n self._distances = np.full((dist_shape[0], dist_shape[1]), -1, dtype=int)\n # print(self._distances.shape)\n # print(self._origin)\n # print(self._agent_position)\n agent_in_matrix = self._from_relative_to_matrix(self._agent_position)\n queue = deque([(agent_in_matrix, 0)])\n while len(queue) > 0:\n pos, dist = queue.popleft()\n if self._distances[pos[0], pos[1]] == -1: # to avoid infinite loop\n self._distances[pos[0], pos[1]] = dist\n for direction in global_variables.MOVING_DIRECTIONS: # ADD ALSO ROTATIONS?\n new_pos = direction + pos\n if GridMap.coord_inside_matrix(new_pos, dist_shape):\n if self._get_value_of_cell(new_pos,self._distances) == global_variables.UNKNOWN_CELL:\n cell_value = self._get_value_of_cell(new_pos, self._path_planner_representation)\n if GridPathPlanner.is_walkable(cell_value):\n queue.append((new_pos, dist + 1))", "title": "" }, { "docid": "77218d034fbc7420303be6883bf44ab8", "score": "0.49390548", "text": "def find_path_nodes(aux_structures, node1, node2, cost_function):\n expanded = set()\n agenda = [(0, node1, [node1])]\n\n while len(agenda) != 0:\n # pop lowest cost path from agenda\n cost, node, path = sort_by_cost(\n aux_structures, agenda, node2, False)[0]\n del agenda[agenda.index((cost, node, path))]\n\n if node in expanded:\n continue\n\n # return path if current node is goal\n\n if node == node2:\n return path\n # else ready to expand it\n expanded.add(node)\n\n children = get_children(node, aux_structures)\n\n for child in children:\n if child not in expanded:\n if cost_function == compute_distance:\n add_cost = cost_function(node, child, aux_structures)\n else:\n add_cost = cost_function(\n node, child, aux_structures, children)\n new_path = path.copy() + [child]\n new_cost = cost + add_cost\n agenda.append((new_cost, child, new_path))", "title": "" }, { "docid": "3bf7e7d456a774e6f7a3b76a21446da4", "score": "0.49361777", "text": "def dfs(self, starting_vertex, destination_vertex, graph):\n \n \n # q = Queue()\n # listy = [starting_vertex]\n # q.enqueue(listy)\n # bfs_visited_set = set()\n # graph_2 = []\n # while q.size() > 0:\n\n # number = q.dequeue()\n # # if number[-1] == 0:\n # # pass\n # # else:\n # # graph_2.append(graph[number[-1]])\n # if number[-1] == destination_vertex:\n # # bfs_visited_set.add(number)\n # return number\n # if number[-1] not in bfs_visited_set:\n # neighbors = self.get_neighbors(number[-1], graph)\n # bfs_visited_set.add(number[-1])\n # # print(number[-1])\n # # print(graph_2)\n # # print(neighbors)\n # for n in neighbors:\n # temp_listy = number.copy()\n # temp_listy.append(n)\n # q.enqueue(temp_listy)\n \n s = Stack()\n listy = [starting_vertex]\n s.push(listy)\n bfs_visited_set = set()\n \n while s.size() > 0:\n \n number = s.pop()\n if number[-1] == destination_vertex:\n # bfs_visited_set.add(number)\n return number\n\n if number[-1] not in bfs_visited_set:\n neighbors = self.get_neighbors(number[-1], graph)\n bfs_visited_set.add(number[-1])\n # print(number[-1])\n # print(neighbors)\n for n in neighbors:\n temp_listy = number.copy()\n temp_listy.append(n)\n s.push(temp_listy)", "title": "" }, { "docid": "2a63e3d5ea52fdabcdd1330fdbdbc906", "score": "0.4933533", "text": "def relax_edges(source, graph, priority_queue, distances, predecessors, visited):\n visited.add(source)\n for neighbor in graph[source]:\n if neighbor.value not in visited:\n new_distance = distances[source] + neighbor.weight\n if new_distance < distances[neighbor.value]:\n distances[neighbor.value] = new_distance\n predecessors[neighbor.value] = source\n heapq.heappush(priority_queue, (new_distance, neighbor.value))", "title": "" }, { "docid": "95050802278b618a2f2d96fa7aacd01d", "score": "0.49325344", "text": "def shift_cities(*, road_map: RoadMap) -> RoadMap:\n last_city = road_map.pop()\n road_map.insert(0, last_city)\n return road_map", "title": "" }, { "docid": "c1659458ad3d83ebbbe2a93a9ae2adc5", "score": "0.4920952", "text": "def flip1city(self, city):\n (before, after) = self.neighbors[city]\n self.neighbors[city] = (after, before)", "title": "" }, { "docid": "fc25f2c23303c495630272bf9dfb9ff7", "score": "0.49204373", "text": "def dfs(start,goal):\n\n path = []\n depth = []\n\n # initialize variables\n max_depth = 0 \n\n # frontier variables \n max_fringe_size = 0\n\n # initialize a stack object and add the start location to it:\n\n queue = deque()\n queue.append(start)\n\n # initialize a set() object for visited list and add the start location to it\n visited = set()\n visited.add(start)\n\n # initialize stack object for depth calculation\n\n depth = deque()\n depth.append(0)\n\n\n # define an empty dictionary, where you'll record how you moved through the grid and a goal location,\n branch = {}\n found = False\n \n \n while queue:\n # deque and store the explored node\n # current_node = queue.get()\n current_node = queue.pop()\n visited.add(current_node)\n # dep = depth.get()\n dep = depth.pop()\n\n \n \n # goal check\n if current_node == goal:\n print('Found the Solution')\n found = True\n break\n else:\n count = 0\n for action in valid_actions(current_node):\n # get movement indicator from actions list\n da = action.delta\n \n # tuple -> grid transformation\n grid = np.array(current_node).reshape(3,-1)\n \n # find grid index of 0\n index = np.where(grid == 0)\n x,y = int(index[0]),int(index[1])\n \n #grid manipulation to exchange 0 and neighbor elements. \n grid[x+da[0],y+da[1]],grid[x,y] = grid[x,y],grid[x+da[0],y+da[1]]\n \n # grid -> tuple transformation\n next_node = tuple(grid.flatten().tolist())\n \n\n # Check if the new node has been visited before.\n # If the node has not been visited:\n # 1. Mark it as visited\n # 2. Add it to the queue\n # 3. Add how I got there to branch\n if next_node not in visited:\n visited.add(next_node)\n # queue.put(next_node)\n queue.append(next_node)\n # depth.put(dep+1)\n depth.append(dep+1)\n \n branch[next_node] = (current_node, action)\n count += 1\n\n fringe_size = len(queue)\n if fringe_size > max_fringe_size:\n max_fringe_size = fringe_size\n\n if count > 0:\n if dep + 1 > max_depth:\n max_depth = dep + 1\n\n\n nodes = 0 \n \n if found:\n\n nodes = len(branch)\n \n # traceback to find the depth by using of the branch dictionary.\n n = goal\n # print(branch[n][0])\n while branch[n][0] != start:\n \n path.append(branch[n][1])\n n = branch[n][0]\n \n path.append(branch[n][1])\n\n \n return path[::-1],nodes,max_depth,fringe_size", "title": "" }, { "docid": "eb372afc3cf05bb6feaef698b96aca5c", "score": "0.49183238", "text": "def find_path(aux_structures, loc1, loc2, cost_function):\n n1, n2 = nearest_nodes(aux_structures, loc1, loc2)\n short_path_nodes = find_path_nodes(aux_structures, n1, n2, cost_function)\n\n if short_path_nodes is not None:\n short_path_locs = nodes_to_locs(aux_structures, short_path_nodes)\n\n return short_path_locs", "title": "" }, { "docid": "9f9001a8996798493b195aa1fefcdf9a", "score": "0.49130678", "text": "def shortest_path(self, id1: int, id2: int) -> (float, list):\n\n nodes = self.graph.get_all_v()\n if id1 not in nodes or id2 not in nodes: # If not exist\n return None\n visited = [] # Visited nodes list\n heap_min = [] # Min binomial heap\n prev_nodes = dict()\n for x in self.graph.graph_v.keys():\n nodes[x].tag = math.inf\n nodes[id1].tag = 0\n heapq.heappush(heap_min, (nodes[id1].tag, id1)) # Push to heap\n\n while len(heap_min) > 0:\n v = heapq.heappop(heap_min)[1] # get the node with the smallest tag\n for node_neighbor in self.graph.all_out_edges_of_node(v).keys(): # from neighbors\n if node_neighbor not in visited: # check if visited\n # if node_neighbor in self.graph.all_out_edges_of_node(v).keys(): # not search null\n\n visited.append(node_neighbor)\n alt_path = nodes[v].tag + self.graph.all_out_edges_of_node(v)[\n node_neighbor] # tag + edge weight\n\n if self.graph.get_all_v()[node_neighbor].tag > alt_path:\n self.graph.get_node(node_id=node_neighbor).tag = alt_path\n prev_nodes[node_neighbor] = v\n heapq.heappush(heap_min, (alt_path, node_neighbor)) # add to heap the node id by tag\n node_key = id2\n li_return = [] # The path\n while self.graph.get_all_v()[node_key].tag > 0:\n li_return.append(node_key)\n if node_key not in prev_nodes.keys():\n return -1, {}\n else:\n node_key = prev_nodes[node_key]\n li_return.append(node_key)\n li_return.reverse()\n return self.graph.get_all_v()[id2].tag, li_return\n\n # raise NotImplementedError", "title": "" }, { "docid": "f3c72a95ef7517b32394baead2c34994", "score": "0.49123624", "text": "def ucs(graph, start, goal):\n\n reset_graph_info(graph, start)\n queue = PriorityQueue()\n queue.push(start, 0)\n visited = {start}\n\n while not queue.is_empty():\n node = queue.pop()\n\n for neighbor in graph.neighbors(node):\n cost = get_cost(graph, node, neighbor)\n\n if neighbor == goal:\n return construct_path(graph, neighbor)\n\n if neighbor not in visited:\n queue.push(neighbor, cost)\n visited.add(neighbor)\n return []", "title": "" }, { "docid": "49b6ecd34cb4b3e7d98b9d8973fdd263", "score": "0.490902", "text": "def str_paths(nx1):\n\n def iter1(g1, d2, site):\n\n keys1 = g1.keys()\n sites2 = [i for i in keys1 if ((i != site) & (i < 10000000))]\n if not sites2:\n output = [site]\n else:\n len1 = [d2[site][i] for i in sites2]\n down_site = sites2[np.argmin(len1)]\n output = g1[down_site]\n return output\n\n ## Determine all paths\n p1 = nx.all_pairs_shortest_path(nx1)\n d1 = nx.all_pairs_dijkstra_path_length(nx1, None, 'len')\n\n ## Make list of all sites\n sites = [i for i in nx1.nodes() if (i < 10000000)]\n\n ## Extract the paths for all sites (into a dict)\n p2 = {i: p1[i] for i in sites}\n d2 = {i: d1[i] for i in sites}\n\n site_nodes = {i: iter1(p2[i], d2, i) for i in p2}\n site_paths = {i: [j[2] for j in nx1.out_edges(site_nodes[i], data='num')][0:-1] for i in site_nodes}\n return site_nodes, site_paths", "title": "" }, { "docid": "c6f6a3e908e2c6c0d01dbd7593df4465", "score": "0.49035507", "text": "def bfs(start,goal):\n\n path = []\n depth = []\n\n # initialize variables\n max_depth = 0 \n\n # initialize a Queue() object and add the start location to it:\n queue = Queue()\n queue.put(start)\n # initialize a set() object for visited list and add the start location to it\n visited = set()\n visited.add(start)\n\n # initialize Queue() object for depth calculation\n depth = Queue()\n depth.put(0)\n\n\n # define an empty dictionary, where you'll record how you moved through the grid and a goal location,\n branch = {}\n found = False\n\n max_fringe_size = 0 \n \n \n while not queue.empty():\n # deque and store the explored node\n current_node = queue.get()\n visited.add(current_node)\n dep = depth.get()\n \n \n # goal check\n if current_node == goal:\n print('Found the Solution')\n found = True\n break\n else:\n for action in valid_actions(current_node):\n # get movement indicator from actions list\n da = action.delta\n \n # tuple -> grid transformation\n grid = np.array(current_node).reshape(3,-1)\n \n # find grid index of 0\n index = np.where(grid == 0)\n x,y = int(index[0]),int(index[1])\n \n #grid manipulation to exchange 0 and neighbor elements. \n grid[x+da[0],y+da[1]],grid[x,y] = grid[x,y],grid[x+da[0],y+da[1]]\n \n # grid -> tuple transformation\n next_node = tuple(grid.flatten().tolist())\n \n\n # Check if the new node has been visited before.\n # If the node has not been visited:\n # 1. Mark it as visited\n # 2. Add it to the queue\n # 3. Add how I got there to branch\n if next_node not in visited:\n visited.add(next_node)\n queue.put(next_node)\n depth.put(dep+1)\n branch[next_node] = (current_node, action)\n\n fringe_size = queue.qsize()\n if fringe_size > max_fringe_size:\n max_fringe_size = fringe_size\n\n if dep + 1 > max_depth:\n max_depth = dep + 1\n\n nodes = 0\n\n if found:\n\n nodes = len(branch)\n \n # traceback to find the depth by using of the branch dictionary.\n n = goal\n #print(branch[n][0])\n while branch[n][0] != start:\n \n path.append(branch[n][1])\n n = branch[n][0]\n \n path.append(branch[n][1])\n\n return path[::-1],nodes,max_depth,max_fringe_size", "title": "" }, { "docid": "f1509d8140cbcc59d53125ef6a928b06", "score": "0.48972273", "text": "def navigate(self, car, source, destination):\n # since we currently doesn't use multi-thread for each car, we can use heapq for better performance\n heap = []\n times = {} # key: intersection, value: time\n backPtr = {} # key: intersection, value: intersection\n routeWeights = {}\n sourceInter = source.getTarget()\n\n if destination.isIntersection():\n targetInter = destination.getIntersection()\n else:\n targetInter = destination.getRoad().getSource()\n\n begin = RouteWeight(0, sourceInter)\n times[sourceInter] = 0\n routeWeights[sourceInter] = begin\n heapq.heappush(heap, begin)\n\n while heap:\n curt = heapq.heappop(heap)\n\n if curt.intersection == targetInter:\n break\n\n neighborData = self.realMap.neighborAndTime(curt.intersection)\n for t, nextInter in neighborData:\n newTime = min(t + curt.time, Navigator.MAX_TIME)\n if nextInter in times:\n if newTime < times[nextInter]:\n key = routeWeights[nextInter]\n key.time = newTime\n heapq.heapify(heap)\n times[nextInter] = newTime\n backPtr[nextInter] = curt.intersection\n else:\n backPtr[nextInter] = curt.intersection\n times[nextInter] = newTime\n nextRouteWeight = RouteWeight(newTime, nextInter)\n routeWeights[nextInter] = nextRouteWeight\n heapq.heappush(heap, nextRouteWeight)\n\n route = self.extractPath(targetInter, backPtr, car)\n if not destination.isIntersection():\n route.append(destination.getRoad())\n\n return route", "title": "" }, { "docid": "c4e3c4e20f032f03f60598f9c3a35683", "score": "0.48837945", "text": "def dijkstras_shortest_path(initial_position, destination, graph, adj):\n q = [(0, initial_position)] # node queue\n visited = {initial_position: (0, None)} # dist, prev\n\n # while queue not empty\n while q:\n cur_dist, cur = heappop(q) # get current\n if cur == destination: # check success\n path = [cur]\n back = visited[cur][1]\n while back and back is not initial_position: # backpathing\n path = [back] + path\n back = visited[back][1]\n return [initial_position] + path\n for pos, cost in adj(graph, cur): # for each neighbour\n if cost is not inf:\n pathcost = cost + cur_dist\n if not visited.get(pos) or pathcost < visited[pos][0]:\n visited[pos] = (pathcost, cur)\n heappush(q, (pathcost, pos))\n return None", "title": "" }, { "docid": "52c1a73b7556ad111463f40a29d4222c", "score": "0.48833454", "text": "def shortestPathDFS(digraph, start, end, maxTotalDist, maxDistOutdoors):\r\n\r\n output = None\r\n\r\n stack = Queue.LifoQueue()\r\n stackDist = Queue.LifoQueue()\r\n stackOutDist = Queue.LifoQueue()\r\n stepCounter = 0\r\n\r\n stack.put([Node(start)])\r\n stackDist.put(0)\r\n stackOutDist.put(0)\r\n\r\n checkNodesExist(digraph, start, end)\r\n\r\n while not stack.empty():\r\n tmpPath = stack.get()\r\n # print tmpPath\r\n tmpPathDist = stackDist.get()\r\n tmpPathOutDist = stackOutDist.get()\r\n stepCounter += 1\r\n\r\n start = tmpPath[-1]\r\n\r\n if start == Node(end):\r\n\r\n if output == None or tmpPathDist < maxTotalDist:\r\n output = tmpPath\r\n outputDist, outputOutDist = tmpPathDist, tmpPathOutDist\r\n maxTotalDist = outputDist\r\n\r\n else:\r\n for cNode in digraph.childrenOf(start):\r\n if cNode not in tmpPath:\r\n updateTmpPathDist = tmpPathDist + \\\r\n digraph.getWeight(start, cNode)[0]\r\n updateTmpPathOutDist = tmpPathOutDist + \\\r\n digraph.getWeight(start, cNode)[1]\r\n\r\n if updateTmpPathDist <= maxTotalDist:\r\n if updateTmpPathOutDist <= maxDistOutdoors:\r\n\r\n updateTmpPath = tmpPath + [cNode]\r\n\r\n stack.put(updateTmpPath)\r\n stackDist.put(updateTmpPathDist)\r\n stackOutDist.put(updateTmpPathOutDist)\r\n\r\n if output == None or len(output) <= 1:\r\n raise ValueError('Path not found!')\r\n else:\r\n # return path, steps taken, and total path cost as tuple\r\n return output, stepCounter, maxTotalDist", "title": "" }, { "docid": "21093d36add3b716cbbf0f0d119d8ec7", "score": "0.48831803", "text": "def shortestPathBFS(digraph, start, end, maxTotalDist, maxDistOutdoors):\r\n output = None\r\n\r\n q = Queue.Queue()\r\n qDist = Queue.Queue()\r\n qOutDist = Queue.Queue()\r\n stepCounter = 0\r\n\r\n q.put([Node(start)])\r\n qDist.put(0)\r\n qOutDist.put(0)\r\n\r\n checkNodesExist(digraph, start, end)\r\n\r\n while not q.empty():\r\n tmpPath = q.get()\r\n # print tmpPath\r\n tmpPathDist = qDist.get()\r\n tmpPathOutDist = qOutDist.get()\r\n stepCounter += 1\r\n\r\n start = tmpPath[-1]\r\n\r\n if start == Node(end):\r\n\r\n if output == None or tmpPathDist < maxTotalDist:\r\n output = tmpPath\r\n outputDist, outputOutDist = tmpPathDist, tmpPathOutDist\r\n maxTotalDist = outputDist\r\n\r\n else:\r\n for cNode in digraph.childrenOf(start):\r\n if cNode not in tmpPath:\r\n updateTmpPathDist = tmpPathDist + \\\r\n digraph.getWeight(start, cNode)[0]\r\n updateTmpPathOutDist = tmpPathOutDist + \\\r\n digraph.getWeight(start, cNode)[1]\r\n\r\n if updateTmpPathDist <= maxTotalDist:\r\n if updateTmpPathOutDist <= maxDistOutdoors:\r\n\r\n updateTmpPath = tmpPath + [cNode]\r\n\r\n q.put(updateTmpPath)\r\n qDist.put(updateTmpPathDist)\r\n qOutDist.put(updateTmpPathOutDist)\r\n\r\n if output == None or len(output) <= 1:\r\n raise ValueError('Path not found!')\r\n else:\r\n # return path, steps taken, and total path cost as tuple\r\n return output, stepCounter, maxTotalDist", "title": "" }, { "docid": "d418ddc114f49c415a0c81d4e2126a4c", "score": "0.48792958", "text": "def shortest_route(G: Dict[str, List[str]], a: str, b: str) -> List[str]:\n prevnodes = {a: None}\n q = deque([a])\n while q:\n n = q.popleft()\n if n == b:\n return reconstruct_path(prevnodes, a, b)\n if n in G:\n for neighbor in G[n]:\n if neighbor not in prevnodes:\n prevnodes[neighbor] = n\n q.append(neighbor)\n return None", "title": "" }, { "docid": "2285567a686aa3a1aeba44b296a41ac8", "score": "0.4876595", "text": "def old_to_new_path(self):\n try:\n new_paths = defaultdict(list)\n new_weights = defaultdict()\n for pathid in self.old_graph.paths:\n path = self.old_graph.paths[pathid]\n curr_new = self.old_to_new[path[0]][0]\n new_paths[pathid].append(curr_new)\n new_weights[pathid] = self.old_graph.weights[pathid]\n for i in range(len(self.old_graph.paths[pathid])):\n if path[i] in self.new_to_old[curr_new]:\n continue\n curr_new = self.old_to_new[path[i]][0]\n new_paths[pathid].append(curr_new)\n self.new_graph.paths = new_paths\n self.new_graph.weights = new_weights\n except Exception as e:\n print(\"old_to_new_path\",e)", "title": "" }, { "docid": "302b6cf42d8b19f2445fa7b78017dff3", "score": "0.48654762", "text": "def compute_shortest_paths(source_vertex, destination_vertices, zero=0):\n source_vertex.length = zero\n heap = Heap()\n heap.insert(source_vertex, zero)\n completed = set()\n unseen_dests = set(destination_vertices)\n while heap and unseen_dests:\n u, length = heap.remove_min()\n completed.add(u)\n unseen_dests.discard(u)\n for edge in u.outgoing_edges:\n v = edge.vertex_to\n new_length = length + edge.weight\n if v.length is None:\n v.length = new_length\n v.parent_edge = edge\n heap.insert(v, new_length)\n elif v not in completed and new_length < v.length:\n v.length = new_length\n v.parent_edge = edge\n heap.decrease_key(v, new_length)\n return completed", "title": "" }, { "docid": "8e50cff67a3af8774971991ff9334a1f", "score": "0.48591265", "text": "def find_short_path(aux_structures, loc1, loc2):\n\n return find_path(aux_structures, loc1, loc2, compute_distance)", "title": "" }, { "docid": "92987940a2fdc19d712ee56da8f1dc12", "score": "0.4855189", "text": "def dijkstras_shortest_path_to_all(initial_position, graph, adj):\n level = {**graph[\"spaces\"],**{pos:1.0 for pos in graph[\"waypoints\"].values()}} # recomprehend graph\n level_cost = {}\n for pos in level: # for every reachable space\n p = dijkstras_shortest_path(initial_position, pos, graph, adj) # find path\n cost = 0 if p else inf\n if p:\n for i in range(len(p)-1):\n a = p[i]\n b = p[i+1]\n cost = cost + (level[a] + level[b]) / 2 * (1 if abs(a[0]-b[0]) + abs(a[1]-b[1]) > 1 else sqrt(2)) #calc path cost\n level_cost[pos] = cost\n return level_cost", "title": "" }, { "docid": "dde9d24b9d7edcee354ac4575e476e7f", "score": "0.4853709", "text": "def shortest_paths(\n graph: Graph,\n start_curies: List[CURIE],\n end_curies: Optional[List[CURIE]] = None,\n predicate_weights: Optional[PREDICATE_WEIGHT_MAP] = None,\n directed=False,\n) -> Iterator[Tuple[CURIE, CURIE, List[CURIE]]]:\n if directed:\n dg = as_digraph(graph, reverse=False)\n else:\n dg = as_graph(graph, predicate_weights=predicate_weights)\n logging.info(f\"Calculating paths, starts={start_curies}\")\n for start_curie in start_curies:\n if not dg.has_node(start_curie):\n logging.info(f\"Skipping {start_curie} because it is not in the graph\")\n continue\n if end_curies:\n this_end_curies = end_curies\n else:\n this_end_curies = list(nx.ancestors(dg, start_curie))\n logging.info(f\"Calculating distances for {start_curie}\")\n for end_curie in set(this_end_curies):\n if not dg.has_node(end_curie):\n logging.info(f\"Skipping {end_curie} because it is not in the graph\")\n continue\n logging.debug(f\"COMPUTING {start_curie} to {end_curie}\")\n try:\n if directed:\n paths = nx.all_simple_paths(dg, source=start_curie, target=end_curie)\n else:\n paths = nx.all_shortest_paths(\n dg,\n source=start_curie,\n target=end_curie,\n weight=\"weight\",\n method=\"bellman-ford\",\n )\n for path in paths:\n yield start_curie, end_curie, path\n except nx.NetworkXNoPath:\n logging.info(f\"No path between {start_curie} and {end_curie}\")", "title": "" }, { "docid": "354c746af2fce25ef78e885f2ee12ea5", "score": "0.48510998", "text": "def path_cost(self, c, state1, action, state2):\n if (self.push_costs != []):\n for box_index in range(len(state2.boxes)):\n if (state2.boxes[box_index] != state1.boxes[box_index]):\n assert (1.5 > distance_between_two_points(state2.boxes[box_index], state1.boxes[box_index]))\n return c + self.push_costs[box_index]\n return c + 1", "title": "" }, { "docid": "83f36c1ee51806c7c2421db29f7714a1", "score": "0.48510763", "text": "def find_path(self, starting_node, target_node):\n\t\tc = starting_node #current node\n\t\td = [(starting_node, 0),] #queue\n\t\tv = [] #visited\n\t\tshortest_path = {}\n\t\tfor i in self.nodes:\n\t\t\tshortest_path[i] = Distance()\n\t\tshortest_path[c].distance = 0\n\t\t\n\t\twhile d:\n\t\t\tcurr = d[0]\n\t\t\tc = curr[0]\n\n\t\t\tfor i in self.nodes[c].outbound:\n\t\t\t\tif i not in v:\n\t\t\t\t\td.append((i, self.nodes[c].outbound[i] + curr[1]),) # the 'cuur[0]' add the current dist to start\n\n\t\t\t\t\tif shortest_path[i].distance == None:\n\t\t\t\t\t\tshortest_path[i].distance = self.nodes[c].outbound[i] + curr[1]\n\t\t\t\t\t\tshortest_path[i].origin = c\n\t\t\t\t\telif shortest_path[i].distance > self.nodes[c].outbound[i] + curr[1]:\n\t\t\t\t\t\tshortest_path[i].distance = self.nodes[c].outbound[i] + curr[1]\n\t\t\t\t\t\tshortest_path[i].origin = c\n\t\t\t\telse:\n\t\t\t\t\tpass\n\n\t\t\t\t\td.sort(lambda x, y: cmp(x[1], y[1])) #sort the queue stack\n\t\t\td.pop(0) #remove curr from stack\n\t\t\tv.append(curr[0]) #add curr to visited list (no more checking)\n\n\t\t\tif c == target_node: #stop the loop if target reached\n\t\t\t\tbreak\n\t\tprint 'shortest routes found:'\n\t\tfor i in shortest_path:\n\t\t\tprint i + ': ' + str(shortest_path[i].distance) + ' via ' + str(shortest_path[i].origin)\n\t\t\t\n\t\ta = []\n\t\tstep = target_node\n\t\tprint \"\\n\"\n\t\tprint('=> Distance from ' + starting_node + ' to ' + target_node + ': ' + str(shortest_path[step].distance))\n\t\tif shortest_path[step].distance != None:\n\t\t\ta = []\n\t\t\twhile step != starting_node:\n\t\t\t\ta.append(step)\n\t\t\t\tstep = shortest_path[step].origin\n\t\t\ta.append(step)\n\t\t\ta.reverse() #reverse since we got steps in reverse order\n\t\t\tprint '=> ' + str(a)\n\t\telse:\n\t\t\tprint '=> No route to %s to %s found' % (target_node, starting_node)\n\t\treturn", "title": "" }, { "docid": "88774fcd64ca37471acc2328467d5d0b", "score": "0.4850265", "text": "def traveling_salesman_problem(cities):\r\n cities_quantity = len(cities)\r\n\r\n # lista 2D roads przechowuje wartosc kazdej drogi z kazdego miasta do kazdego miasta\r\n roads = [[0 for _ in range(cities_quantity)] for _ in range(cities_quantity)]\r\n for row in range(cities_quantity):\r\n for col in range(cities_quantity):\r\n if row == col:\r\n continue\r\n roads[row][col] = hypot(cities[row][0] - cities[col][0], cities[row][1] - cities[col][1])\r\n # dzieki temu np roads[0][4] (i roads[4][0]) daja w prosty sposob wartosc odleglosci od miasta pierwszego do piatego\r\n\r\n if mode == 2:\r\n return traveling_salesman_problem_brute_force(roads)\r\n else:\r\n return traveling_salesman_problem_a_star(roads)", "title": "" }, { "docid": "5772dc62fc32c4c7eb7ca410bc1f3a86", "score": "0.484936", "text": "def successors(temp,cost=\"bfs/dfs\"):\n suc = city_routes[temp[-1]]\n res = []\n if cost == \"distance\":\n return [[int(s[-3])+int(temp[0])]+[int(s[-3])]+[float(s[-3])/float(s[-2])]+[s[-1]]+ [temp[-1]]+ [s[0]] for s in suc]\n elif cost == \"time\":\n return [[float(s[-3])/float(s[-2])+float(temp[0])]+[int(s[-3])]+[float(s[-3])/float(s[-2])]+[s[-1]]+[temp[-1]]+[s[0]] for s in suc]\n elif cost == \"longtour\":\n return [[-(int(s[-3]))+int(temp[0])]+[int(s[-3])]+[float(s[-3])/float(s[-2])]+[s[-1]]+[temp[-1]]+[s[0]] for s in suc]\n elif cost == \"segments\":\n return [[1+int(temp[0])]+[int(s[-3])]+[float(s[-3])/float(s[-2])]+[s[-1]]+[temp[-1]]+[s[0]] for s in suc]\n elif cost == \"statetour\":\n for s in suc:\n if s[0].split(\",\")[-1][1:] not in states:\n continue\n if s[0].split(\",\")[-1][1:] not in visited_states and s[0].split(\",\")[-1][1:] in states:\n res.append([((int(s[-3])) + int(temp[0]))-len(visited_states)*10000] + [int(s[-3])] + [float(s[-3]) / float(s[-2])] + [s[-1]] + [temp[-1]] + [s[0]])\n else:\n res.append([int(s[-3]) + int(temp[0])] + [int(s[-3])] + [float(s[-3]) / float(s[-2])] + [s[-1]] + [temp[-1]] + [s[0]])\n return res\n else: #if bfs and dfs\n return [[0]+[int(s[-3])]+[float(s[-3])/float(s[-2])]+[s[-1]]+[temp[-1]]+[s[0]]for s in suc]", "title": "" } ]
c0f7a1b92bd168f6b5656caefb477e56
Sets objective of model to minimization of enzymatic mass.
[ { "docid": "5381cdaae655f133f3562b505f58e64d", "score": "0.5934148", "text": "def set_enzymatic_objective(cobra_model, coefficients_forward, coefficients_reverse):\n coefficients = dict()\n for (bigg_id, cf) in coefficients_forward.items():\n rxn = cobra_model.reactions.get_by_id(bigg_id)\n coefficients[rxn.forward_variable] = cf\n for (bigg_id, cr) in coefficients_reverse.items():\n rxn = cobra_model.reactions.get_by_id(bigg_id)\n coefficients[rxn.reverse_variable] = cr\n \n cobra_model.objective = cobra_model.problem.Objective(Zero,\n direction='min', sloppy=True, \n name=\"min_enzymatic\")\n\n cobra_model.objective.set_linear_coefficients(coefficients=coefficients)", "title": "" } ]
[ { "docid": "57a0a352db57b18e123cc27b029c93a8", "score": "0.73641044", "text": "def Minimize(self, obj: ObjLinearExprT):\n self._SetObjective(obj, minimize=True)", "title": "" }, { "docid": "ce71e8f03db5f70ea793377ffb73845d", "score": "0.69110936", "text": "def _SetObjective(self, obj: ObjLinearExprT, minimize: bool):\n self.ClearObjective()\n if isinstance(obj, IntVar):\n self.__model.objective.coeffs.append(1)\n self.__model.objective.offset = 0\n if minimize:\n self.__model.objective.vars.append(obj.Index())\n self.__model.objective.scaling_factor = 1\n else:\n self.__model.objective.vars.append(self.Negated(obj.Index()))\n self.__model.objective.scaling_factor = -1\n elif isinstance(obj, LinearExpr):\n coeffs_map, constant, is_integer = obj.GetFloatVarValueMap()\n if is_integer:\n if minimize:\n self.__model.objective.scaling_factor = 1\n self.__model.objective.offset = constant\n else:\n self.__model.objective.scaling_factor = -1\n self.__model.objective.offset = -constant\n for v, c in coeffs_map.items():\n self.__model.objective.coeffs.append(c)\n if minimize:\n self.__model.objective.vars.append(v.Index())\n else:\n self.__model.objective.vars.append(self.Negated(v.Index()))\n else:\n self.__model.floating_point_objective.maximize = not minimize\n self.__model.floating_point_objective.offset = constant\n for v, c in coeffs_map.items():\n self.__model.floating_point_objective.coeffs.append(c)\n self.__model.floating_point_objective.vars.append(v.Index())\n elif cmh.is_integral(obj):\n self.__model.objective.offset = int(obj)\n self.__model.objective.scaling_factor = 1\n else:\n raise TypeError(\"TypeError: \" + str(obj) + \" is not a valid objective\")", "title": "" }, { "docid": "68af8eb000bd59b1fd3af2226bf2635b", "score": "0.643252", "text": "def Maximize(self, obj: ObjLinearExprT):\n self._SetObjective(obj, minimize=False)", "title": "" }, { "docid": "9f1fcc376b0cb305b823ca80ccfcf4f0", "score": "0.6102254", "text": "def set_objective(self, objective):\n if type(objective) == Variable:\n objective = LinExpr(vars={objective}, coefficients={objective: objective.coefficient})\n if type(objective) != LinExpr:\n raise Exception(\"Illegal objective. Objective has to be of type LinExp\")\n else:\n self.objective = objective\n # Set objective in concrete solver\n self.concrete_solver.set_objective(objective)", "title": "" }, { "docid": "924c5bb896bba3d65b09ebd0ef1b20b5", "score": "0.6058323", "text": "def set_optim_specs(self,\n objective_func: Optional[object] = None, \n model: Optional[Model] = None, \n maximize: Optional[bool] = True,\n Y_weights: Optional[ArrayLike1d] = None\n ):\n # assign objective function\n self.objective_func = objective_func\n\n # Set the sign for the objective\n if maximize: \n self.objective_sign = 1 # sign for the reponses\n self.negate_Y = False # if true (minimization), negate the model predicted values \n else:\n self.objective_sign = -1 \n self.negate_Y = True\n \n # set optimization goal\n self.maximize = maximize\n\n # fit a GP model if no model is input\n if model is None:\n self.fit_model()\n\n # assign weights to each objective, useful only to multi-objective systems\n if Y_weights is not None:\n self.assign_weights(Y_weights)", "title": "" }, { "docid": "16fc72e50df9491ef13c0f08fc306619", "score": "0.5974558", "text": "def set_objective(self, y_sa, w):\n objective = sum([ sum([ \n y_sa[s,a] * self.cost[s,a] \n for a in self.actions.values()]) for s in self.states.values()])\n \n for c in self.collision_set:\n # collision cost\n x = y_sa[c[0], c[1]]\n y = y_sa[c[2], c[3]]\n # this is a convex relaxation using AM-GM: \\sqrt(ab) <= 0.5*(a + b)\n objective += 0.5 * self.k * (cvx.square(x) + cvx.square(y))\n return objective", "title": "" }, { "docid": "70264f400d9a38c20d60e7d7e525b59d", "score": "0.5942988", "text": "def optimize(self) -> None:\n self.model.optimize()", "title": "" }, { "docid": "c18b4d9cc7bcd2a5806dc8453761ab62", "score": "0.5825058", "text": "def minimize(\r\n\t\tself,\r\n\t\tobjective,\r\n\t\tbackend: str = None,\r\n\t\tsilent: bool = True,\r\n\t\tmaxiter: int = None,\r\n\t):\r\n\t\tif not maxiter:\r\n\t\t\tmaxiter = self.maxiter\r\n\t\treturn self._minimize(\r\n\t\t\tobjective=objective,\r\n\t\t\tmethod=self._method,\r\n\t\t\tgradient=None,\r\n\t\t\thessian=None,\r\n\t\t\tinitial_values=None,\r\n\t\t\tvariables=None,\r\n\t\t\tbackend=backend,\r\n\t\t\tsilent=silent,\r\n\t\t\ttol=1.e-13,\r\n\t\t\tmaxiter=maxiter,\r\n\t\t\t*self._args,\r\n\t\t\t**self._kwarks\r\n\t\t)", "title": "" }, { "docid": "6f868638e287fda1f7816500269fe574", "score": "0.5688473", "text": "def CreateObjectiveFunction(self):\n\t\tfileobj = open(self.filename_model, \"a\")\n\t\tfileobj.write(\"Minimize\\n\")\n\t\teqn = []\n\t\tfor i in range(0,32):\n\t\t\teqn.append(\"x\" + \"_\" + str(i) + \"_\" + str(self.Round))\n\t\tfor i in range(0,32):\n\t\t\teqn.append(\"y\" + \"_\" + str(i) + \"_\" + str(self.Round))\n\n\n\t\ttemp = \" + \".join(eqn)\n\n\t\tfileobj.write(temp)\n\t\tfileobj.write(\"\\n\")\n\t\tfileobj.close()", "title": "" }, { "docid": "828e6270d5f390d8f4fcc74e070f1ca9", "score": "0.5681083", "text": "def add_objective(\n self,\n model_data: xr.Dataset,\n name: str,\n objective_dict: parsing.UnparsedObjectiveDict,\n ) -> None:", "title": "" }, { "docid": "4db9fecf5bd8cb3a826ad6bcf8eafd7b", "score": "0.5659443", "text": "def __init__(self, objective=None, constraints=[], lowerBounds=[],\r\n upperBounds=[], varType=[], discreteVals=[], optimum=0.0,\r\n pltTitle='', histTitle='', varNames=['']):\r\n\r\n #CHANGES MADE BY ALEX:\r\n #changed:\r\n #the objective variable is now an ObjectiveFunction_multi object\r\n # this means that the objective functino(s) are stored in an array\r\n #inorder to callother mulit-objective compatible code the import statement was changed\r\n #Added:\r\n #the num_objective_functions variable: stores the number of objective functions\r\n if type(objective) != list:\r\n self.numObjectiveFunctions = 1\r\n else:\r\n self.numObjectiveFunctions = len(objective)\r\n ## @var objective\r\n # <em> ObjectiveFunction Object: </em> The objective function object\r\n # to be used for the optimization.\r\n self.objective = objective\r\n\r\n ## @var constraints\r\n # <em> list of Constraint Objects: </em> The constraints on the\r\n # optimization design space.\r\n if type(constraints) != list:\r\n self.constraints = [constraints]\r\n else:\r\n self.constraints = constraints\r\n\r\n ## @var lb\r\n # \\e array: The lower bounds of the design variable(s).\r\n self.lb = lowerBounds\r\n\r\n ## @var ub\r\n # \\e array: The upper bounds of the design variable(s).\r\n self.ub = upperBounds\r\n\r\n ## @var varType\r\n # \\e array: The type of variable for each position in the upper and\r\n # lower bounds array.\r\n self.varType = varType\r\n\r\n ## @var discreteVals\r\n #\\e array: nxm with n=# of discrete variables and m=# of values that\r\n # can be taken for each variable.\r\n self.discreteVals = discreteVals\r\n\r\n ## @var optimum\r\n # \\e float: The global optimal solution.\r\n self.optimum = optimum\r\n\r\n ## @var pltTitle\r\n # \\e string: The title used for plotting the results of the\r\n # optimization.\r\n self.pltTitle = pltTitle\r\n\r\n ## @var histTitle\r\n # \\e string: The plot title for the histogram of the optimization\r\n # results.\r\n self.histTitle = histTitle\r\n\r\n ## @var varNames\r\n # <em> list of strings: </em> The names of the variables for the\r\n # optimization problem.\r\n self.varNames = varNames\r\n\r\n # Ensure that the correct inputs were provided; modify as neccesary\r\n # to meet Gnowee's requirements;\r\n # Populate variable type id vectors\r\n if len(self.lb) and len(self.ub) and len(self.varType) != 0 \\\r\n or len(self.discreteVals) and len(varType) != 0:\r\n self.sanitize_inputs()\r\n\r\n ## @var cID:\r\n # \\e array: The continuous variable truth array. This contains\r\n # a one in the positions corresponding to continuous variables\r\n # and 0 otherwise.\r\n self.cID = []\r\n\r\n ## @var iID:\r\n # \\e array: The integer variable truth array. This contains\r\n # a one in the positions corresponding to continuous variables\r\n # and 0 otherwise.\r\n self.iID = []\r\n\r\n ## @var dID:\r\n # \\e array: The discrete variable truth array. This contains\r\n # a one in the positions corresponding to continuous variables\r\n # and 0 otherwise.\r\n self.dID = []\r\n\r\n ## @var xID:\r\n # \\e array: The combinatorial variable truth array. This contains\r\n # a one in the positions corresponding to continuous variables\r\n # and 0 otherwise.\r\n self.xID = []\r\n\r\n # Develop ID vectors for each variable type\r\n for var in range(len(self.varType)):\r\n if 'c' in self.varType[var]:\r\n self.cID.append(1)\r\n else:\r\n self.cID.append(0)\r\n if 'i' in self.varType[var]:\r\n self.iID.append(1)\r\n else:\r\n self.iID.append(0)\r\n if 'd' in self.varType[var]:\r\n self.dID.append(1)\r\n else:\r\n self.dID.append(0)\r\n if 'x' in self.varType[var]:\r\n self.xID.append(1)\r\n else:\r\n self.xID.append(0)\r\n self.cID = np.array(self.cID)\r\n self.iID = np.array(self.iID)\r\n self.dID = np.array(self.dID)\r\n self.xID = np.array(self.xID)", "title": "" }, { "docid": "13106a2b9c4567a72c0035179e82df87", "score": "0.5616432", "text": "def set_objective(self, linear=None, quadratic=None, minimize=True):\n\n if quadratic is not None:\n raise Exception('PuLP wrapper does not support quadratic objectives.')\n \n if linear is not None:\n objective = lpSum([coeff * self.variables[var_id] for var_id, coeff in linear.items() if coeff != 0])\n self.problem.setObjective(objective)\n self.problem.sense = LpMinimize if minimize else LpMaximize", "title": "" }, { "docid": "d73aa85040442275ad7d611ea1f47783", "score": "0.5579276", "text": "def set_edge_objective(problem, variable_dict, graph):\n\n objective = xpress_sum(mip_utils.define_edge_objective(variable_dict, graph))\n problem.setObjective(objective)", "title": "" }, { "docid": "063c82a1bedfdefe409b467ac4c45e47", "score": "0.5537422", "text": "def objective(self):\n return NotImplemented", "title": "" }, { "docid": "83e1ac7f7ca151987c62a66a24866836", "score": "0.5533802", "text": "def compile(self):\n if self._compiled:\n return\n for obj in [self.exp_u_pi, self.exp_u_pi_g_e, self.exp_u_pi_g_e2,\n self.exp_u_pi_g_ea]:\n obj.compile()\n self._compiled = True\n # The objective function to be minimized\n self._obj_fun = lambda _e, _a: -self.exp_u_pi(_e[0], _a)\n self._obj_fun_jac = lambda _e, _a: -self.exp_u_pi_g_e(_e[0], _a)", "title": "" }, { "docid": "849327af4c1deeb49d8f0367b1b1a8d1", "score": "0.54932815", "text": "def optimize(self, X, y):\n for key in self.data.keys():\n self.data[key] = getattr(self,key)\n self.best = self.model.optimizing(data=self.data)", "title": "" }, { "docid": "23be9177a41fdec32c196d9beb460d89", "score": "0.5489", "text": "def objective(model):\n # This is equation (1) in the paper\n return sum(model.utility[j] * model.x[j] for j in model.V)", "title": "" }, { "docid": "61f2df572908d97a076fe4b28cceaafb", "score": "0.54608405", "text": "def _objective(self):\n raise NotImplementedError()", "title": "" }, { "docid": "fb458ded4bc25b6210ef64c1a141438b", "score": "0.54358", "text": "def setMass(self, mass=1.0):\n self.mass = mass\n # The inverse mass.\n self.massInv = 1.0 / self.mass", "title": "" }, { "docid": "76505c320ccfeb78f7dc477913d934ed", "score": "0.54226446", "text": "def set_minimizer(self, minimizer):\n\n self.Minimizer = self._setup_minimizer(minimizer)", "title": "" }, { "docid": "e615484e8228f71f9c166fc1573c2a64", "score": "0.5416081", "text": "def add_setpoint_objective(\n self,\n setpoint,\n weights,\n ):\n vardata_map = self.vardata_map\n for vardata, weight in weights:\n nmpc_var = vardata_map[vardata]\n nmpc_var.weight = weight\n\n weight_vector = []\n for vardata, sp in setpoint:\n nmpc_var = vardata_map[vardata]\n if nmpc_var.weight is None:\n self.logger.warning(\"Weight not supplied for %s\" % var.name)\n nmpc_var.weight = 1.0\n weight_vector.append(nmpc_var.weight)\n\n obj_expr = sum(\n weight_vector[i] * (var - sp) ** 2 for i, (var, sp) in enumerate(setpoint)\n )\n self.setpoint_objective = Objective(expr=obj_expr)", "title": "" }, { "docid": "38bd2afd04f6da3227d04b83a9bd6515", "score": "0.5415522", "text": "def objective(self):\n return self._objective", "title": "" }, { "docid": "437b3a2d6cbc85001852583d4ea91eb1", "score": "0.54134095", "text": "def set_optim_specs(self,\n weights: Union[ArrayLike1d, float],\n objective_func: Optional[object] = None, \n maximize: Optional[bool] = True,\n ):\n self.objective_func = objective_func\n self.maximize = maximize\n if maximize: \n self.objective_sign = 1\n else:\n self.objective_sign = -1\n\n # Total number of experiments\n if isinstance(weights, float):\n weights = [weights]\n self.n_exp = len(weights)\n\n # Compute the weight pairs \n # The weights for objective 2 is 1-weight_i\n weight_pairs = []\n for weight_i in weights:\n weight_pairs.append([weight_i, 1-weight_i])\n \n # List of experiment objects\n experiments = [] \n\n print('Initializing {} experiments'.format(self.n_exp))\n # initialize weighted experimnets with data and weights \n for i, weight_pair_i in enumerate(weight_pairs):\n experiment_i = SingleWeightedExperiment()\n experiment_i.input_data(self.X_init, \n self.Y_init_real, \n X_ranges = self.X_ranges, \n unit_flag=True)\n experiment_i.set_optim_specs(objective_func=objective_func,\n model=None, #start fresh\n maximize=maximize, \n Y_weights = weight_pair_i)\n experiments.append(experiment_i)\n print('Initializing experiments {:.2f} % '.format((i+1)/self.n_exp *100))\n\n self.experiments = experiments", "title": "" }, { "docid": "ff7b2ecb78b0d76c31394349f6b809b0", "score": "0.5390979", "text": "def set_optimal_parameters(self):\n self.objective.parse_trial_oat(self.optimal_params)", "title": "" }, { "docid": "f7e5c1c8bc00a5ba43b32fb9271be985", "score": "0.5382947", "text": "def objective(self):\n return 'MAX'", "title": "" }, { "docid": "f7e5c1c8bc00a5ba43b32fb9271be985", "score": "0.5382947", "text": "def objective(self):\n return 'MAX'", "title": "" }, { "docid": "8d46c48a9f3c5923facbb9c145c80f62", "score": "0.5372812", "text": "def __init__(self):\n\n # Maintain a parallel list of objectives and weights\n self.objectives = []\n self.weights = []\n \n # The objective will simply be the sum of all objectives \n # in the list\n self.objective = None", "title": "" }, { "docid": "3c82d10ab90572147ffc2f4276e26052", "score": "0.53221107", "text": "def objective(\n self,\n x: Optional[Union[Array, BlockArray]] = None,\n ) -> float:\n if x is None:\n x = self.x\n return self.f(x) + self.g(self.C(x))", "title": "" }, { "docid": "40098ede30ce5db6328500ecc89243c9", "score": "0.5313936", "text": "def update_fitness(self):\n self.fitness = self.objectives[:, self.problem.minimize]\n self.ideal_fitness = np.full((1, self.fitness.shape[1]), np.inf)\n self.worst_fitness = -1 * self.ideal_fitness\n self.update_ideal_and_nadir()", "title": "" }, { "docid": "cc02d6e77672fd2d2b550023c2049105", "score": "0.53081226", "text": "def objective(**params):\n scores = dqn(env=env, brain_name=brain_name, n_episodes=N_EPISODES, break_early=False, **params)\n return -np.mean(scores[-100:])", "title": "" }, { "docid": "263c67183ed3ee9c1f6eb0b16c85915c", "score": "0.53038937", "text": "def transform_objective(self, objective: float):\n # prev_objective = (\n # self.observed_objectives[-1] if len(self.observed_objectives) > 0 else None\n # )\n # if prev_objective is not None:\n # objective = max(prev_objective, objective)\n return objective", "title": "" }, { "docid": "a4242580db77f017916f8e913e05282c", "score": "0.5287671", "text": "def objective(x, a, b, c):\n return b * x + a * x**2 + c", "title": "" }, { "docid": "0395f497e8ba0ca87c649830a008e4a6", "score": "0.52865165", "text": "def init_optimiser(self):\n parameters = self.denoiser.parameters()\n self._optimizer = optim.Adam(parameters, betas=[0.9, 0.99])", "title": "" }, { "docid": "5a8cec91a0130ef9d924553d85efdda3", "score": "0.52794653", "text": "def minlp():\n m = pyo.ConcreteModel()\n m.x = pyo.Var(initialize=-0.1)\n m.y = pyo.Var(initialize=1)\n m.i = pyo.Var(domain=pyo.Binary, initialize=0)\n m.c = pyo.Constraint(expr=m.x >= 1)\n m.obj = pyo.Objective(\n expr=m.i * (m.x**2 + m.y**2) + (1 - m.i) * 4 * (m.x**2 + m.y**2)\n )\n return m, 1, 1", "title": "" }, { "docid": "4e799f6d7659cf3cd734778e7f8e13ff", "score": "0.5270191", "text": "def optimize(self):\n self.concrete_solver.optimize()", "title": "" }, { "docid": "ad29c951bf193f36a4f6b918009cba5f", "score": "0.5253416", "text": "def build_primal_optModel(self): \n # Define parameters and scalars needed to define the optimizaiton problem\n self.define_optModel_params()\n\n # Create a pyomo model optimization model\n optModel = ConcreteModel()\n \n #--- Sets ---\n # Set of compounds \n optModel.I = Set(initialize = self.model.compounds) \n\n # Set of rxns \n optModel.J = Set(initialize = self.model.reactions) \n\n #--- Variables --- \n # Reaction fluxes\n optModel.v = Var(optModel.J, domain=Reals, bounds = self.assignFluxBounds)\n\n # Binary variables in v_j >= LB_j*yLB_j and v_j <= UB_j*yUB_j \n optModel.yLB = Var(optModel.J, domain=Boolean)\n optModel.yUB = Var(optModel.J, domain=Boolean)\n\n #--- Objective function ----\n # Objective function\n optModel.objectiveFunc = Objective(rule=self.primal_objectiveFunc_rule, sense = maximize)\n\n # Mass balance \n optModel.massBalance_const = Constraint(optModel.I, rule=self.massBalance_const_rule)\n\n # v_j >= LB_j*yLB_j and v_j <= UB_j*yUB_j\n optModel.fluxLB_const = Constraint(optModel.J, rule=self.fluxLB_const_rule)\n optModel.fluxUB_const = Constraint(optModel.J, rule=self.fluxUB_const_rule)\n\n self.optModel = optModel", "title": "" }, { "docid": "eefb758ed413a9385bd95d80f9ec673d", "score": "0.5249066", "text": "def add_linear_obj(community, exchanges):\n check_modification(community)\n coefs = {}\n for rxn in exchanges:\n export = len(rxn.reactants) == 1\n if export:\n coefs[rxn.reverse_variable] = 1.0\n else:\n coefs[rxn.forward_variable] = 1.0\n community.objective.set_linear_coefficients(coefs)\n community.objective.direction = \"min\"\n community.modification = \"minimal medium linear\"", "title": "" }, { "docid": "7c6ebfce37840a0838487ffb6cf8e2b0", "score": "0.52415144", "text": "def addObjective(self, objective, weight = 1.0):\n\n self.objectives.append(objective)\n self.weights.append(weight)", "title": "" }, { "docid": "459b762cfee13e5dddf2f556c86f53fc", "score": "0.5232504", "text": "def objective_function(self, configuration, **kwargs):\n pass", "title": "" }, { "docid": "50220da8ad3f79e6c672ff29a860d084", "score": "0.5227372", "text": "def fit_model(self):\n\n # create a GP model based on input data\n # In the case of minimize, the negative reponses values are used to fit the GP\n self.model = create_and_fit_gp(self.X, self.objective_sign * self.Y)", "title": "" }, { "docid": "42dfb4bc15b988259cb297699f4cd763", "score": "0.5215185", "text": "def _objective_function(self, x):\n aeval = Interpreter()\n exprc = aeval.parse(self.objective)\n aeval.symtable['x'] = x\n return aeval.run(exprc)", "title": "" }, { "docid": "517b6e212806064fc99279e55f904d80", "score": "0.52150595", "text": "def set_optim_specs(self, \n objective_file_name: str,\n comsol_location: str,\n comsol_output_location: str,\n comsol_output_col: Optional[int] = 2, \n model: Optional[Model] = None, \n maximize: Optional[bool] = True,\n Y_weights: Optional[ArrayLike1d] = None\n ):\n\n # assign objective COMSOL file and location\n self.objective_file_name = objective_file_name\n self.comsol_location = comsol_location\n self.objective_func = self.comsol_simulation\n\n # assign output file and objective column\n self.comsol_output_location = comsol_output_location\n self.comsol_output_col = comsol_output_col\n \n # set optimization goal\n self.maximize = maximize\n\n if maximize: \n self.objective_sign = 1 # sign for the reponses\n self.negate_Y = False # if true (minimization), negate the model predicted values \n else:\n self.objective_sign = -1 \n self.negate_Y = True\n\n # create a GP model based on input data\n # In the case of minimize, the negative reponses values are used to fit the GP\n if model is None:\n self.model = create_and_fit_gp(self.X, self.objective_sign * self.Y)\n # assign weights to each objective, useful only to multi-objective systems\n if Y_weights is not None:\n self.assign_weights(Y_weights)", "title": "" }, { "docid": "4e1d090c4653295a0254642a8560a812", "score": "0.5193236", "text": "def set_optim_specs(self, \n objective_file_name: str,\n comsol_location: str,\n comsol_output_location: str,\n comsol_output_col: Optional[List[int]] = [2, 3], \n model: Optional[Model] = None, \n maximize: Optional[bool] = True,\n Y_weights: Optional[ArrayLike1d] = None\n ):\n\n # assign objective COMSOL file and location\n self.objective_file_name = objective_file_name\n self.comsol_location = comsol_location\n self.objective_func = self.comsol_simulation\n\n # assign output file and objective column\n self.comsol_output_location = comsol_output_location\n self.comsol_output_col = comsol_output_col\n \n # set optimization goal\n self.maximize = maximize\n\n if maximize: \n self.objective_sign = 1 # sign for the reponses\n self.negate_Y = False # if true (minimization), negate the model predicted values \n else:\n self.objective_sign = -1 \n self.negate_Y = True\n\n # create a GP model based on input data\n # In the case of minimize, the negative reponses values are used to fit the GP\n if model is None:\n self.model = create_and_fit_gp(self.X, self.objective_sign * self.Y)\n # assign weights to each objective, useful only to multi-objective systems\n if Y_weights is not None:\n self.assign_weights(Y_weights)", "title": "" }, { "docid": "107ce592222b071a20bee06c4d379e0a", "score": "0.5192758", "text": "def emin( self , emin ) :\n\n # Just check that the minimum energy is greater than\n # 5.e-9.\n # In reality, at this point, is factible to check\n # that emin is greater than mass * 1.e-9\n # By the way, the dminterpolator put 1.e-40\n # in values outside the range of interpolation\n if emin < 5.e-9 :\n\n raise ValueError( ( '\\nMinimum energy {0} GeV '.format( emin ) +\n 'is below the allowed value (5.e-9GeV)') )\n\n # Set minimum energy\n self._emin = emin", "title": "" }, { "docid": "5b09909e1b098a5aa9be15d0d236d843", "score": "0.51852345", "text": "def add_pressure_minimization_equations(self):\n units_meta = self.config.property_package.get_metadata()\n self.eps_pressure = Param(\n mutable=True,\n initialize=1e-3,\n domain=PositiveReals,\n doc=\"Smoothing term for minimum inlet pressure\",\n units=units_meta.get_derived_units(\"pressure\"),\n )\n\n # Calculate minimum inlet pressure\n @self.Expression(\n self.flowsheet().time,\n self.inlet_list,\n doc=\"Calculation for minimum inlet pressure\",\n )\n def minimum_pressure(b, t, i):\n if i == self.inlet_list[0]:\n return self.inlet_blocks[i][t].pressure\n else:\n pi = self.inlet_list[self.inlet_list.index(i) - 1]\n prev_p = self.minimum_pressure[t, pi]\n this_p = self.inlet_blocks[i][t].pressure\n return smooth_min(this_p, prev_p, self.eps_pressure)\n\n # Set inlet pressure to minimum pressure\n @self.Constraint(self.flowsheet().time, doc=\"Link pressure to control volume\")\n def minimum_pressure_constraint(b, t):\n return self.mixed_state[t].pressure == (\n self.minimum_pressure[t, self.inlet_list[-1]]\n )", "title": "" }, { "docid": "7a7244500cd80f5841782dc6c0a22078", "score": "0.51837987", "text": "def maximize(self, budget, optimizer):\n\n\t\tpass", "title": "" }, { "docid": "38998389f48dbbe2f01cc0d1383193b5", "score": "0.5179309", "text": "def _init_optimizer(self):\n first_model = list(self.nn.keys())[0]\n self.optimizer['adam'] = _torch.optim.Adam(self.nn[first_model].parameters(),\n lr=self.args['learning_rate'])", "title": "" }, { "docid": "a6b583e810b2d594c43a80472471f5ff", "score": "0.5149724", "text": "def objective_function_rule(_m):\r\n\r\n # Objective function\r\n objective_function = m.TOTAL_PLANNING_COST + m.alpha\r\n\r\n return objective_function", "title": "" }, { "docid": "196c37111eed449e50eec2f584e3e4bd", "score": "0.5146759", "text": "def _update_obj_fn(self):\n for objective in self.obj_fn.objectives:\n if isinstance(objective, ObstacleAvoidance):\n objective.obstacle_map = self.obstacle_map\n elif isinstance(objective, GoalDistance):\n objective.fmm_map = self.fmm_map\n elif isinstance(objective, AngleDistance):\n objective.fmm_map = self.fmm_map\n elif isinstance(objective, PersonalSpaceCost):\n pass\n else:\n assert False", "title": "" }, { "docid": "c7c3761cd13ed4541d114da66e0e5a21", "score": "0.5115613", "text": "def objective(self, X, y, w):\n m,_ = X.shape\n hinge_loss = np.maximum(np.zeros(m), 1 - y * np.dot(X, w))\n return self.lambda_ / 2 * np.dot(w,w) + 1./m * np.sum(hinge_loss)", "title": "" }, { "docid": "442d1f8a57b66aa4e17cf37a4cebe555", "score": "0.5086727", "text": "def eval_fitness(self, obj):\n\n # fitness = self.objectives * self.problem.objs\n if self.problem.minimize is None:\n self.problem.minimize = [True] * self.problem.num_of_objectives\n else:\n assert len(self.problem.minimize) == self.problem.num_of_objectives\n\n fitness = np.asarray(obj)[np.asarray(self.problem.minimize)]\n\n return fitness", "title": "" }, { "docid": "fd162f05b71bc28a2fe040d80fcde188", "score": "0.5064911", "text": "def maximize(self, maximize):", "title": "" }, { "docid": "d5d7d39639a31d78c854341776cbbe80", "score": "0.5047889", "text": "def objectivefunction(self, simulation, evaluation):\n # sceua requires minimization which will result in a negative KGE\n if (\n (self.calib_algorithm == 'sceua') |\n (self.calib_algorithm == 'NSGAII')\n ):\n multiplier = -1\n else:\n multiplier = 1\n obj1 = spotpy.objectivefunctions.kge(evaluation,\n simulation) * multiplier\n\n return obj1", "title": "" }, { "docid": "cb184405f1b3305336c6b4fa766ce0cf", "score": "0.504041", "text": "def setOptimizeableHydrogens(self):\n for residue in self.protein.getResidues():\n optinstance = self.isOptimizeable(residue)\n if optinstance == None: continue\n for atom in residue.getAtoms():\n if atom.name in optinstance.map:\n atom.optimizeable = 1", "title": "" }, { "docid": "d909ce14ee93b74aa562db1226bba5f9", "score": "0.5028671", "text": "def objective_function(variable):\n global optimal_action\n global d\n player_veridical = variable[0]\n predator_veridical = variable[1]\n prey_veridical = variable[2]\n actual_action = variable[3]\n optimal_action = ddqn_veridical_action(player_veridical, predator_veridical, prey_veridical)\n d = difference([optimal_action, actual_action]) / 4\n return 1 - d", "title": "" }, { "docid": "6680f7e869c32f06bf678924f49bc5f2", "score": "0.50227463", "text": "def optimize_for_linear_regression(self):\r\n self.ml_model = LinearRegression(n_jobs=-1)\r\n self.best_parameters = []", "title": "" }, { "docid": "aa9878ca95bd52366a5658fb2363f719", "score": "0.5008923", "text": "def add_mip_obj(community, exchanges):\n check_modification(community)\n if len(community.variables) > 1e4:\n logger.warning(\n \"the MIP version of minimal media is extremely slow for\"\n \" models that large :(\"\n )\n boundary_rxns = exchanges\n M = max(np.max(np.abs(r.bounds)) for r in boundary_rxns)\n prob = community.problem\n coefs = {}\n to_add = []\n for rxn in boundary_rxns:\n export = len(rxn.reactants) == 1\n indicator = prob.Variable(\"ind_\" + rxn.id, lb=0, ub=1, type=\"binary\")\n if export:\n vrv = rxn.reverse_variable\n indicator_const = prob.Constraint(\n vrv - indicator * M, ub=0, name=\"ind_constraint_\" + rxn.id\n )\n else:\n vfw = rxn.forward_variable\n indicator_const = prob.Constraint(\n vfw - indicator * M, ub=0, name=\"ind_constraint_\" + rxn.id\n )\n to_add.extend([indicator, indicator_const])\n coefs[indicator] = 1\n community.add_cons_vars(to_add)\n community.solver.update()\n community.objective.set_linear_coefficients(coefs)\n community.objective.direction = \"min\"\n community.modification = \"minimal medium mixed-integer\"", "title": "" }, { "docid": "56c217238626f9d6217cb4c994190c96", "score": "0.5003886", "text": "def setup_minimize(self, learning_rate):\n\n self.train_step = tf.train.AdamOptimizer(learning_rate).minimize(self.c_loss)", "title": "" }, { "docid": "fd759caf9d0f17d3f09f716bf3360de4", "score": "0.50003326", "text": "def _add_bidding_objective(self, model):\n\n # declare an empty objective\n model.obj = pyo.Objective(expr=0, sense=pyo.maximize)\n\n for k in model.SCENARIOS:\n time_index = model.fs[k].power_output_ref.index_set()\n\n # currently .total_cost is a tuple of 2 items\n # the first item is the name of the cost expression\n # the second item is the weight for the cost\n cost_name = self.bidding_model_object.total_cost[0]\n cost = getattr(model.fs[k], cost_name)\n weight = self.bidding_model_object.total_cost[1]\n\n for t in time_index:\n model.obj.expr += (\n model.fs[k].day_ahead_energy_price[t]\n * model.fs[k].day_ahead_power[t]\n + model.fs[k].real_time_energy_price[t]\n * (model.fs[k].power_output_ref[t] - model.fs[k].day_ahead_power[t])\n - weight * cost[t]\n - model.fs[k].real_time_underbid_penalty\n * model.fs[k].real_time_underbid_power[t]\n )\n\n return", "title": "" }, { "docid": "756ff9667b9ae3a7a534d6f9eb8b4837", "score": "0.49895692", "text": "def Minimize_Object(optimization_round):\n\n if Set.decon_type in ('classical', 'myopic', 'nobjects'):\n\n Set.costfunction_type = 1 ## minimize object \n elif Set.decon_type == 'npsfs':\n\n Set.costfunction_type = 3 ## minimize object in presence of npsfs\n elif Set.decon_type in ('si','siclassical'):\n \n Set.costfunction_type = 4 ## SI related; NEED INTEGRATION with SI code\n \n ### Initialization of CCG Variables ###\n Set.old_estimate = Set.object.copy()\t# use .copy() instead of [:] = \n Set.old_estimate_difference[:] = 0.\n itn = 0; ifn = 0; fn = 0.; fmin = 0.\n Set.ivec[:] = 0; df0 = 0. ## df0, scalar float; input and output\n Nclsrch = 0; istop = 100\n rising_test_count = 0\n old_test = 100000. ## to determine if solution steps go \"uphill\"\n object_stops = 0\n \n ### Minimization Loop Over Object ###\n for i in range(Set.object_PCG_iter_array[optimization_round-1]):\n\t#this is PIo\n startCGtime = time.time()\n \n if i == 0:\n\n print('[obj] PCG iter:', i+1, 'of', end=' ') \n print(Set.object_PCG_iter_array[optimization_round-1], end=' ')\n print(' in optimization round ', end=' ') \n print(optimization_round, ' out of ', end=' ')\n print(Set.max_total_PCG_blocks, '(max)')\n else:\n\n print('[obj] PCG iter:', i+1, ' of ', end=' ') \n print(Set.object_PCG_iter_array[optimization_round-1])\n \n # Pour test on fait avec scipy opt.minimize\n import scipy.optimize as opt\n def function_cost(X):\n temp_grad = X.copy()\n (res,dummy) = AIDA_CostGradFunctions.CostFunction(X.copy(),temp_grad)\n return(res,temp_grad.flatten())\n myObject = Set.object.copy()\n # pour test, ne sert a rien en fait\n (res,grad_res) = function_cost(myObject)\n # Tentative d'optimisation\n \n resOptim2 = opt.minimize(function_cost,myObject.flatten(),jac = True,method='L-BFGS-B',\n bounds=N.asarray([(Set.xmin,Set.xmax_object)]*len(myObject.flatten())),\n options={'disp': False,'maxiter': Set.max_object_CCG_iter/2\n })\n \n itn = resOptim2.nit\n ifn = resOptim2.nfev\n istop = 0\n fn = resOptim2.fun\n df0 = 0\n Nclsrch = 0\n Set.object[:]= resOptim2.x.reshape(Set.object.shape)\n \n \n# #seb (itn, ifn, istop, fn, df0, Nclsrch) = CCG.doCCG(Set.object, Set.xmin,\n# #seb Set.xmax_object, Set.ivec, Set.max_object_CCG_iter, fmin, df0,\n# #seb Set.object_CCG_tolerance, Nclsrch) #@ CCG \n# (istop, itn, ifn, Nclsrch, fn, df0) = \\\n# ccg.getsol( Set.object, Set.xmin,\n# Set.xmax_object, Set.ivec, Set.max_object_CCG_iter, fmin, df0,\n# Set.object_CCG_tolerance,\n# AIDA_CostGradFunctions.CostFunction)\n \n \n \n \n\n \n if Set.info_level >= 1:\n \n Set.cum_CG_time[0::2] += time.time() - startCGtime\n Set.decon_total_CG_itns[0::2] += itn\n Set.decon_total_CostFunction_itns[0::2] += ifn\n\n if Set.info_level >= 2:\n\n if i == 0 :\n\n try:\n\n print('<lambda_object>: %.7g' %Set.lambda_object.mean(), end=' ')\n except:\n \n print('lambda_object: %.7g' %Set.lambda_object, end=' ')\n\n try:\n \n print(' <mu>: %.7g' %Set.mu.mean(), end=' ')\n except:\n \n print(' mu: %.7g' %Set.mu, end=' ')\n\n try:\n \n print(' <theta>: %.7g' %(1./Set.inv_theta).mean())\n except:\n \n print(' theta: %.7g' %(1./Set.inv_theta))\n\n print('\\tCG itns', itn, ' istop', istop, ' ifn', ifn, end=' ') \n print(' df0 %.6g' %df0, ' Nclsrch', Nclsrch, end=' ')\n print(' CGtime %.6g' %(time.time() - startCGtime))\n\n Nclsrch = 0\n\n if optimization_round == 3:\n\n Set.start = False\n\n ### Check Global Solution Convergence of Object Estimate ###\n test = (N.abs(N.abs(Set.object - Set.old_estimate) - \\\n Set.old_estimate_difference)).mean()\n Set.obj_diff.append(test)\n\n if test <= Set.object_PCG_tolerance:\n\n old_test = 0.\n object_stops += 1\n\n if Set.info_level >= 2:\n\n print('\\t obj diff: ', test, '\\tcheck_object = stop')\n\n if object_stops == Set.max_sequential_PCG_stops:\n\n if Set.info_level >= 2:\n\n print('\\t\\t*** max object_stops reached ***')\n\n break\n else:\n\n object_stops = 0\n \n if itn < (Set.max_object_CCG_iter+1) and \\\n i > Set.object_PCG_iter_array[optimization_round-1]+1 and \\\n Set.decon_type != 'classical':\n\n if Set.info_level >= 2:\n\n print('\\t obj diff: ', test, '\\tcheck_object = go')\n print('\\t >> max specified object iterations reached <<')\n\n break\n\n if i != 0 and test >= Set.rising_tol_ratio * old_test:\n\n rising_test_count += 1\n\n if rising_test_count == Set.max_uphill_object_PCG_steps:\n\n if Set.info_level >= 2:\n\n print('\\t obj diff: ', test, '\\tcheck_object = go')\n print('\\t >> max rising test count encountered <<')\n\n break ## break out if test > old_test occurs\n ## more than max_uphill_steps, consecutively\n else:\n \n if Set.info_level >= 2:\n\n print('\\t obj diff: ', test, '\\tcheck_object = go')\n else:\n\n Set.old_estimate_difference = N.abs(Set.object - Set.old_estimate).copy()\n # use .copy() instead of [:] = \n old_test = test\n\n if Set.info_level >= 2:\n\n print('\\t obj diff: ', test, '\\tcheck_object = go')\n\n ### Swap Old Object With Current Object Estimate 'xo' ###\n Set.old_estimate = Set.object.copy()\t# use .copy() instead of [:] = \n\n return (object_stops, rising_test_count, fn)", "title": "" }, { "docid": "775be1f71df054ecfb70850fcf1783c2", "score": "0.49886695", "text": "def set_potential(Z, Zs, alpha):\n global potential\n radial.potential(Z, Zs, alpha)\n potential = lambda r: Z/r + Zs/r * np.exp(-alpha*r)", "title": "" }, { "docid": "c3cef58e690adfdf88d238ed9e1e4602", "score": "0.4981759", "text": "def freeze_elmo(self):\n self.elmo.freeze()", "title": "" }, { "docid": "8f16270e7c7c129cedc0d839d2aeff09", "score": "0.49768797", "text": "def _minimaxRisk(self, X):\n\n # Constants\n m= self.phi.len\n\n # Variables\n mu = cvx.Variable(m)\n zhi = cvx.Variable(m)\n nu = cvx.Variable()\n\n # Cost function\n cost = (1/2)*(self.b - self.a).T@zhi - (1/2)*(self.b + self.a).T@mu - nu\n\n # Objective function\n objective = cvx.Minimize(cost)\n\n # Constraints\n constraints= [zhi + mu >= 0, zhi - mu >= 0]\n\n # Get the learn configurations of phi (feature mapping)\n phi = self.phi.learnConfig(X, self.learn_duplicates)\n\n if self.loss == '0-1':\n # Constraints in case of 0-1 loss function\n\n # Exponential number in num_class of linear constraints\n M = self.phi.getAllSubsetConfig(phi)\n \n # F is the sum of phi\n # for different subset of Y\n # for each data point\n F = M[:, :m]\n cardS= M[:, -1]\n numConstr= M.shape[0]\n constraints.extend([F[i, :]@mu + cardS[i]*nu + cardS[i]*1 <= 1 \\\n for i in range(numConstr)])\n\n elif self.loss == 'log':\n # Constraints in case of log loss function\n\n numConstr = phi.shape[0]\n constraints.extend([cvx.log_sum_exp(phi[i, :, :]@mu + np.ones(self.r) * nu) <= 0 \\\n for i in range(numConstr)])\n\n self.mu, self.zhi, self.nu = self.trySolvers(objective, constraints, mu, zhi, nu)\n\n # Save the phi configurations for finding the lower bounds\n self.lowerPhiConfigs = phi\n\n # Upper bound\n self.upper= (1/2)*(self.b - self.a).T@self.zhi - (1/2)*(self.b + self.a).T@self.mu - self.nu", "title": "" }, { "docid": "98d8318220ce47154f1bd155c5ff1767", "score": "0.49734777", "text": "def eval_objective(x):\n return fun(unnormalize(x, fun.bounds))", "title": "" }, { "docid": "78022255360a8887e16b9f948fdf5573", "score": "0.4972587", "text": "def default_objective_function(ph):\n return int(ph.as_string, 2) / float(2 ** len(ph.as_string))", "title": "" }, { "docid": "b947a62d8288a48dd905721b4e51e055", "score": "0.49714148", "text": "def objective(self,param):\n\t\tv = np.float64(0)\n\t\tfor i in range(self.measured.shape[0]):\n\t\t\tv += (self.model.evaluate(self.measured[i,:-1],param) - self.measured[i,-1])**2\n\n\t\treturn v", "title": "" }, { "docid": "9c9a64a433deb2e452b538a4b82b54b1", "score": "0.49700218", "text": "def _perform_fit(self):\n self.coeff = {}\n self.p0 = self.initial.copy()\n limits = self.limits.copy()\n fix = self.fix.copy()\n self._prepare_params(self.p0, limits, fix)\n\n m0 = iminuit.Minuit(self._minimize_function,\n **self.p0, **limits, **fix,\n print_level=0, pedantic=False, throw_nan=True,\n forced_parameters=self.coeff_names)\n m0.migrad()\n self.coeff = m0.values\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', HesseFailedWarning)\n m0.hesse()\n self.errors = m0.errors", "title": "" }, { "docid": "b122914f25a2203d7c79be4394bdbc72", "score": "0.49678245", "text": "def emax( self , emax ) :\n\n # Just check that the minimum energy is greater than\n # 1.e+5.\n # In reality, at this point, is factible to check\n # that emax is lower than mass\n # By the way, the dminterpolator put 1.e-40\n # in values outside the range of interpolation\n if emax > 1.e+5 :\n\n raise ValueError( ( '\\nMaximum energy {0} GeV '.format( emax ) +\n 'is above the allowed value (1.e+5GeV)') )\n\n # Set minimum energy\n self._emax = emax", "title": "" }, { "docid": "d4b3bc6cb375ca74f81a1377e3703053", "score": "0.49653354", "text": "def outer_objectiveFunc_rule(self,optModel):\n return sum(j.costUB*(1 - optModel.yUB[j]) + j.costLB*(1 - optModel.yLB[j]) for j in optModel.J)", "title": "" }, { "docid": "cb44f87032651f1c06741fb129d639de", "score": "0.49623013", "text": "def minimizer(self):\n return self.x", "title": "" }, { "docid": "74cb021660bfda50c00b3999e0de1cc7", "score": "0.49602318", "text": "def rooney_biegler_model_opt():\n\n model = pyo.ConcreteModel()\n\n model.asymptote = pyo.Var(initialize = 15)\n model.rate_constant = pyo.Var(initialize = 0.5)\n \n model.obj = pyo.Objective(expr = model.asymptote*( 1 - pyo.exp(-model.rate_constant*10 ) ), sense=pyo.minimize)\n return model", "title": "" }, { "docid": "6a66cbf69457f0de5e45538ea9fcc160", "score": "0.49529898", "text": "def _calc_min_cost_obj(self, phenotype):\n obj = 0.0\n for runway in phenotype:\n for i, x_i, rw_i in runway:\n alpha_i = max(0, self.T[i] - x_i)\n beta_i = max(0, x_i - self.T[i])\n obj += (alpha_i * self.g[i] + beta_i * self.h[i])\n return -1 * obj", "title": "" }, { "docid": "acbe70007dac97d1bbdaae7fb00b9938", "score": "0.49529234", "text": "def minimize(self):\r\n if self.debug_level>0:\r\n print '-'*90\r\n print '::: initial values:', self.initial_values\r\n print '::: constraints:', self.constraints\r\n res = sp_minimize(self, self.initial_values, constraints=self.constraints, )\r\n\r\n if self.debug_level>0:\r\n print res\r\n return res", "title": "" }, { "docid": "ec9b1b61d3ce2bdf41599917beeac59d", "score": "0.49513823", "text": "def add_objective(self, var):\n self._add_var('func', var)", "title": "" }, { "docid": "30e579f3534b5474143c2ddb44fbbdca", "score": "0.4948328", "text": "def getObjective(self):\n\n return self.objective", "title": "" }, { "docid": "03981349289734eb178e0e1deeae904b", "score": "0.49373212", "text": "def optimize():\n \n equalization_factors_to_optimize = [1 for _ in range(n_motor_units_chunk)]\n print(\"optimize(n_motor_units={}, len(mu_positions)={}, n_motor_units_chunk={})\".format(n_motor_units, len(mu_positions), n_motor_units_chunk))\n \n # solve optimization problem using sparse grids, if this fails, fall back to scipy.optimize\n try:\n import pysgpp\n import traceback\n print(\"The module pysgpp is available.\")\n \n if n_max_iterations != -1:\n raise Exception(\"The value of <n_max_iterations> is not set to -1, this means do not use pysgpp but scipy.optimize.\")\n\n class ExampleFunction(pysgpp.ScalarFunction):\n \"\"\"objective function\"\"\"\n def __init__(self):\n super(ExampleFunction, self).__init__(size_optimization)\n \n def eval(self, x):\n \"\"\"Evaluates the function.\"\"\"\n print(\"x:{}\".format(x))\n try:\n #scaled_x = [value*2 for value in x]\n for i in range(len(x)):\n x[i] = x[i]*2\n result = objective(x)\n except Exception as a:\n print(a)\n traceback.print_exc() \n print(\"result: {}\".format(result))\n return result\n \n #pysgpp.omp_set_num_threads(64)\n pysgpp.omp_set_num_threads(2)\n \n # increase verbosity of the output\n pysgpp.Printer.getInstance().setVerbosity(2)\n \n # objective function\n f = ExampleFunction()\n \n # dimension of domain\n d = f.getNumberOfParameters()\n \n # B-spline degree\n p = 3\n \n # maximal number of grid points\n N = 30\n \n # adaptivity of grid generation\n gamma = 0.95\n \n ## First, we define a grid with modified B-spline basis functions and\n ## an iterative grid generator, which can generate the grid adaptively.\n grid = pysgpp.Grid.createModBsplineGrid(d, p)\n gridGen = pysgpp.OptIterativeGridGeneratorRitterNovak(f, grid, N, gamma)\n\n ## With the iterative grid generator, we generate adaptively a sparse grid.\n print(\"Generating grid...\\n\")\n\n if not gridGen.generate():\n print(\"Grid generation failed, exiting.\")\n sys.exit(1)\n\n ## Then, we hierarchize the function values to get hierarchical B-spline\n ## coefficients of the B-spline sparse grid interpolant\n ## \\f$\\tilde{f}\\colon [0, 1]^d \\to \\mathbb{R}\\f$.\n print(\"Hierarchizing...\\n\")\n functionValues = gridGen.getFunctionValues()\n coeffs = pysgpp.DataVector(len(functionValues))\n hierSLE = pysgpp.HierarchisationSLE(grid)\n sleSolver = pysgpp.AutoSLESolver()\n\n # solve linear system\n if not sleSolver.solve(hierSLE, gridGen.getFunctionValues(), coeffs):\n print(\"Solving failed, exiting.\")\n sys.exit(1)\n\n ## We define the interpolant \\f$\\tilde{f}\\f$ and its gradient\n ## \\f$\\nabla\\tilde{f}\\f$ for use with the gradient method (steepest descent).\n ## Of course, one can also use other optimization algorithms from\n ## sgpp::optimization::optimizer.\n print(\"Optimizing smooth interpolant...\\n\")\n ft = pysgpp.InterpolantScalarFunction(grid, coeffs)\n ftGradient = pysgpp.InterpolantScalarFunctionGradient(grid, coeffs)\n gradientDescent = pysgpp.OptGradientDescent(ft, ftGradient)\n x0 = pysgpp.DataVector(d)\n\n ## The gradient method needs a starting point.\n ## We use a point of our adaptively generated sparse grid as starting point.\n ## More specifically, we use the point with the smallest\n ## (most promising) function value and save it in x0.\n gridStorage = gridGen.getGrid().getStorage()\n\n # index of grid point with minimal function value\n x0Index = 0\n fX0 = functionValues[0]\n for i in range(1, len(functionValues)):\n if functionValues[i] < fX0:\n fX0 = functionValues[i]\n x0Index = i\n\n x0 = gridStorage.getCoordinates(gridStorage.getPoint(x0Index));\n ftX0 = ft.eval(x0)\n\n print(\"x0 = {}\".format(x0))\n print(\"f(x0) = {:.6g}, ft(x0) = {:.6g}\\n\".format(fX0, ftX0))\n\n ## We apply the gradient method and print the results.\n gradientDescent.setStartingPoint(x0)\n gradientDescent.optimize()\n xOpt = gradientDescent.getOptimalPoint()\n ftXOpt = gradientDescent.getOptimalValue()\n fXOpt = f.eval(xOpt)\n\n print(\"\\nxOpt = {}\".format(xOpt))\n print(\"f(xOpt) = {:.6g}, ft(xOpt) = {:.6g}\\n\".format(fXOpt, ftXOpt))\n\n scaled_x = [value*2 for value in xOpt]\n equalization_factors_to_optimize = scaled_x\n \n except Exception as e:\n print(e)\n \n bounds = [(0,None) for i in range(n_motor_units_chunk)]\n\n result = scipy.optimize.minimize(objective, equalization_factors_to_optimize, bounds=bounds, options={\"disp\": True, \"maxiter\": n_max_iterations})\n print(\"Result of optimizer: success: {}, status: {}, message: {}, n objective evaluations: {}, n iterations: {}\".format(result.success, result.status, result.message, result.nfev, result.nit))\n equalization_factors_to_optimize = result.x\n \n return equalization_factors_to_optimize", "title": "" }, { "docid": "e5c4afbd491c6eddedae86f0e8a88c43", "score": "0.49234957", "text": "def milp():\n m = pyo.ConcreteModel()\n m.x = pyo.Var(domain=pyo.Integers, initialize=3)\n m.y = pyo.Var(domain=pyo.Integers, initialize=3)\n m.c1 = pyo.Constraint(expr=m.x >= 0.5)\n m.c2 = pyo.Constraint(expr=m.y >= 1.5)\n m.c3 = pyo.Constraint(expr=m.x <= 5)\n m.c4 = pyo.Constraint(expr=m.y <= 5)\n m.obj = pyo.Objective(expr=m.x + m.y)\n return m, 1", "title": "" }, { "docid": "39b4de475e2dc8b0d329e8bff0045411", "score": "0.49111336", "text": "def build_bilevel_optModel(self): \n # Define parameters and scalars needed to define the optimizaiton problem\n self.define_optModel_params()\n\n # Create a pyomo model optimization model\n optModel = ConcreteModel()\n \n #--- Sets ---\n # Set of compounds \n optModel.I = Set(initialize = self.model.compounds) \n\n # Set of rxns \n optModel.J = Set(initialize = self.model.reactions) \n\n #--- Variables --- \n # Reaction fluxes\n optModel.v = Var(optModel.J, domain=Reals, bounds = self.assignFluxBounds)\n\n # Dual variables associated with steady-state mass balance constraints\n optModel.Lambda = Var(optModel.I, domain=Reals)\n \n # Dual variables associated with v_j >= LB_j*yLB_j and v_j <= UB_j*yUB_j \n optModel.muLB = Var(optModel.J, domain=Reals, bounds = (0,self._muLB_max))\n optModel.muUB = Var(optModel.J, domain=Reals, bounds = (0,self._muUB_max))\n\n # Product of muLB_j*yLB_j and muUB_j*yUB_j\n optModel.muLByLB = Var(optModel.J, domain=Reals, bounds = (0,self._muLB_max))\n optModel.muUByUB = Var(optModel.J, domain=Reals, bounds = (0,self._muUB_max))\n\n # Binary variables in v_j >= LB_j*yLB_j and v_j <= UB_j*yUB_j \n optModel.yLB = Var(optModel.J, domain=Boolean)\n optModel.yUB = Var(optModel.J, domain=Boolean)\n\n #--- Objective function (outer level)----\n # Objective function\n optModel.objectiveFunc = Objective(rule=self.outer_objectiveFunc_rule, sense = minimize)\n\n #--- Constraints of the outer-level problem ---\n # Constraint on the total number of modificaitons\n optModel.total_modifications_const = Constraint(rule=self.total_modifications_const_rule)\n\n # Constraint on the max biomass flux \n optModel.biomass_const = Constraint(rule=self.max_biomass_const_rule)\n\n # Integer cuts\n optModel.integer_cuts = ConstraintList(noruleinit=True) \n \n #-- Constraints of the primal problem --\n # Mass balance \n optModel.massBalance_const = Constraint(optModel.I, rule=self.massBalance_const_rule)\n\n # v_j >= LB_j*yLB_j and v_j <= UB_j*yUB_j\n optModel.fluxLB_const = Constraint(optModel.J, rule=self.fluxLB_const_rule)\n optModel.fluxUB_const = Constraint(optModel.J, rule=self.fluxUB_const_rule)\n\n #-- Constraints of the dual problem --\n # dual constraints \n optModel.dual_const = Constraint(optModel.J, rule=self.dual_const_rule)\n \n # Strong duality \n optModel.strong_duality_const = Constraint(rule=self.strong_duality_const_rule)\n\n # Constraints linearizing muLB_j*yLB_j and muUB_j*yUB_j\n optModel.linearize_muLByLB_const1 = Constraint(optModel.J, rule=self.linearize_muLByLB_const1_rule)\n optModel.linearize_muLByLB_const2 = Constraint(optModel.J, rule=self.linearize_muLByLB_const2_rule)\n optModel.linearize_muLByLB_const3 = Constraint(optModel.J, rule=self.linearize_muLByLB_const3_rule)\n optModel.linearize_muUByUB_const1 = Constraint(optModel.J, rule=self.linearize_muUByUB_const1_rule)\n optModel.linearize_muUByUB_const2 = Constraint(optModel.J, rule=self.linearize_muUByUB_const2_rule)\n optModel.linearize_muUByUB_const3 = Constraint(optModel.J, rule=self.linearize_muUByUB_const3_rule)\n \n self.optModel = optModel", "title": "" }, { "docid": "4fa5b4ed5f8835dc5f3d9eee2db7dbf5", "score": "0.49110568", "text": "def optimize(self):\n if hasattr(self, '_n_splits'):\n self.best_params = fmin(self._object_score_cv, self._all_params, algo=tpe.suggest,\n max_evals=self._num_opts, trials=self.trials)\n else:\n self.best_params = fmin(self._object_score, self._all_params, algo=tpe.suggest,\n max_evals=self._num_opts, trials=self.trials)\n # save trials for further fine-tune\n pickle.dump(self.trials, open(self._trials_path, \"wb\"))\n # there are some params in best_params is index value\n # we need to convert back to actual value\n self._index2value()", "title": "" }, { "docid": "5412e8961200572fa71adf3e297b69a8", "score": "0.4907186", "text": "def set_optimizer(self, probe):\n self.optimizer = optim.Adam(probe.parameters(), lr=0.001)\n self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.1,patience=0)", "title": "" }, { "docid": "bff1ebd17163008422e2a6cbd85e9e54", "score": "0.48996443", "text": "def maximize(self, maximize):\r\n self.Maximize(maximize)", "title": "" }, { "docid": "2e81ad3b73beba18f972ffc0d4c62f0a", "score": "0.4889789", "text": "def objective_function(self, variables, subs, generation, annuity_scalar=1):\r\n return {}", "title": "" }, { "docid": "fe7130bca0aa94ced8e5fe952fd79501", "score": "0.48887497", "text": "def minimizeEnergy(self, tolerance=1*unit.kilojoule/unit.mole, maxIterations=0):\n mm.LocalEnergyMinimizer.minimize(self.context, tolerance, maxIterations)", "title": "" }, { "docid": "f25f194a588e35052289a9975a6bdc11", "score": "0.48842463", "text": "def set_operator_to_minimum(self):\n self.operator_name = 'minimum'", "title": "" }, { "docid": "9a3c410d18f08573522bae046e2b3f61", "score": "0.4883572", "text": "def _evaporation(self):\n self.pheromone_matrix *= (1 - self.evaporation_rate)\n self.heuristic_beta *= (1 - self.beta_evaporation_rate)", "title": "" }, { "docid": "ee6392509800d16ddfd67aacd68c2c14", "score": "0.48821133", "text": "def minimize(term):\n return _optimize(term, True)", "title": "" }, { "docid": "74790b5cfbc18f7e52c39b480c9c3865", "score": "0.4875711", "text": "def hp_tune(self):\n self.study.optimize(\n self._objective, n_trials=self.n_trials, timeout=self.timeout\n )\n self.model = self.build_model(**self.study.best_params)", "title": "" }, { "docid": "8b8a158f11e2cec0ff0aa76a8c32cd95", "score": "0.48754784", "text": "def doMatrixMinimization(self, progress):\r\n\r\n N = self.options.matrixSize.get()\r\n aList = np.linspace(self.options.aFrom.get(), self.options.aTo.get(), num=N)\r\n bList = np.linspace(self.options.bFrom.get(), self.options.bTo.get(), num=N)\r\n nList = np.linspace(self.options.nFrom.get(), self.options.nTo.get(), num=N)\r\n mList = np.linspace(self.options.mFrom.get(), self.options.mTo.get(), num=N)\r\n TD50List = np.linspace(self.options.TD50From.get(), self.options.TD50To.get(), num=N)\r\n\r\n if self.options.fixA.get():\r\n aList = aList[0:1]\r\n if self.options.fixB.get():\r\n bList = bList[0:1]\r\n if self.options.fixN.get():\r\n nList = nList[0:1]\r\n if self.options.fixM.get():\r\n mList = mList[0:1]\r\n if self.options.fixTD50.get():\r\n TD50List = TD50List[0:1]\r\n\r\n if self.options.NTCPcalculation.get() == \"LKB\":\r\n params = [nList, mList, TD50List]\r\n if progress:\r\n progress['maximum'] = len(nList)\r\n else:\r\n params = [aList, bList, [1]]\r\n if progress:\r\n progress['maximum'] = len(aList)\r\n\r\n toxArray = np.zeros([len(k) for k in params], dtype=np.longdouble)\r\n argTuple = ()\r\n for name, patient in list(self.patients.items()):\r\n argTuple += ((patient.getTox() >= self.options.toxLimit.get(), patient.getGEUD),)\r\n\r\n for idx1, val1 in enumerate(params[0]):\r\n if progress:\r\n progress.step(1)\r\n progress.update_idletasks()\r\n\r\n for idx2, val2 in enumerate(params[1]):\r\n for idx3, val3 in enumerate(params[2]): # [1] if Logit is used\r\n error = 0\r\n\r\n for tox, dose in argTuple:\r\n if self.options.NTCPcalculation.get() == \"LKB\":\r\n n = val1\r\n m = val2\r\n TD50 = val3\r\n gEUD = dose(n)\r\n NTCP = HPM((gEUD - TD50) / (m * TD50))\r\n else:\r\n a = val1\r\n b = val2\r\n Dpercent = dose\r\n NTCP = 1 - 1 / (1 + exp(a + b * Dpercent))\r\n\r\n if self.options.optimizationMetric.get() == \"LS\":\r\n error += (tox - NTCP) ** 2\r\n else:\r\n if tox:\r\n if NTCP > 0:\r\n error -= log(NTCP)\r\n else:\r\n error += 2500 # assume minimum probability of ~10^1000\r\n else:\r\n if NTCP < 1:\r\n error -= log(1 - NTCP)\r\n else:\r\n error += 2500\r\n\r\n toxArray[idx1, idx2, idx3] = error\r\n\r\n minIdx = np.unravel_index(np.argmin(toxArray), np.shape(toxArray))\r\n self.bestParameters = [params[0][minIdx[0]], params[1][minIdx[1]], params[2][minIdx[2]]]\r\n if self.options.NTCPcalculation.get() == \"Logit\":\r\n self.bestParameters.pop()\r\n\r\n self.calculateNTCP()\r\n\r\n if progress:\r\n progress['value'] = 0\r\n\r\n return Result(toxArray[minIdx], self.bestParameters)", "title": "" }, { "docid": "e41afeb30d123fe3292426d5c61c5f8d", "score": "0.48496", "text": "def WriteObjective(self, obj):\n\t\tfileobj = open(self.filename_result, \"a\")\n\t\t#writing obj function\n\t\tfileobj.write(\"The objective value = %d\\n\" %round(obj.getValue()))\n\t\teqn1 = []\n\t\teqn2 = []\n\t\tfor i in range(0, self.blocksize):\n\t\t\tu = obj.getVar(i)\n\t\t\t#Edit\n\t\t\t#if u.getAttr(\"x\") != 0:#before\n\t\t\tif round(u.getAttr(\"x\")) != 0:\n\t\t\t\teqn1.append(u.getAttr('VarName'))\n\t\t\t\t#Edit\n\t\t\t\t#eqn2.append(u.getAttr('x'))#Before\n\t\t\t\teqn2.append(round(u.getAttr('x')))\n\t\tlength = len(eqn1)\n\t\tfor i in range(0,length):\n\t\t\ts = eqn1[i] + \"=\" + str(eqn2[i])\n\t\t\tfileobj.write(s)\n\t\t\tfileobj.write(\"\\n\")\n\t\tfileobj.close()", "title": "" }, { "docid": "d5279fcfcb47a5301839563a713f4511", "score": "0.48491362", "text": "def run_basemodel(self, solver=None):\n model = self.m\n\n if solver is None:\n solver = 'ipopt'\n\n def demSup(model, R, S):\n return (self.X[R, S] >=\n sum(self.A_matrix[R, S, R, Sb]*self.X[R, Sb]\n for Sb in model.Sb) + self.lfd[R, S]\n + sum(self.ImportShare[R, Rb, S]*(sum(self.A_matrix[R, S, Rb, Sb]*self.X[Rb, Sb]\n for Sb in model.Sb) + self.fd[Rb, S]) for Rb in model.Rb if (R != Rb))\n + self.ExpROW[R, S])\n\n model.demSup = Constraint(model.R, model.S, rule=demSup, doc='Satisfy demand')\n\n def objective_base(model):\n return sum(self.X[R, S] for R in model.R for S in model.S)\n\n model.objective = Objective(rule=objective_base, sense=minimize,\n doc='Define objective function')\n\n opt = SolverFactory(solver)\n if solver is 'ipopt':\n opt.options['warm_start_init_point'] = 'yes'\n opt.options['warm_start_bound_push'] = 1e-6\n opt.options['warm_start_mult_bound_push'] = 1e-6\n opt.options['mu_init'] = 1e-6\n results = opt.solve(model, tee=True)\n # sends results to stdout\n results.write()", "title": "" }, { "docid": "07d1d406b876042029d08caee32e09bf", "score": "0.4846447", "text": "def assign_stellar_mass(self, formula=\"Grylls19\", scatter=0.11):\n self.main_catalog[\"stellar_mass\"] = main.halo_mass_to_stellar_mass(self.main_catalog[\"effective_halo_mass\"],\n self.main_catalog[\"effective_z\"],\n formula=formula,\n scatter=scatter)", "title": "" }, { "docid": "9a7e0cc0c1670bba817b52fa2ee620f3", "score": "0.4844664", "text": "def _optimize(self):\n self.stopCriteria.startTime()\n\n up = self.task.bounds.get_max().tolist()\n lb = self.task.bounds.get_min().tolist()\n if self.x0 is None:\n self.x0 = self.task.bounds.sample_uniform((1,)) # Randomly sample mean distribution\n\n def objfunc(parameters):\n return np.array(self._evaluate(np.matrix(parameters)))[:, 0][0] # Deal with transformations from/to np.matrix\n\n res = cma.fmin(objfunc, self.x0.tolist(), self.sigma,\n options={\"bounds\": [lb, up], \"verbose\": -1, \"verb_disp\": False,\n \"maxfevals\": self.stopCriteria.get_n_maxEvals(), \"popsize\": self.popsize})\n\n # Delete log file optimizer (pretty much useless)\n try:\n os.remove('outcmaesaxlen.dat')\n os.remove('outcmaesaxlencorr.dat')\n os.remove('outcmaesfit.dat')\n os.remove('outcmaesstddev.dat')\n os.remove('outcmaesxmean.dat')\n os.remove('outcmaesxrecentbest.dat')\n except:\n # Something went wrong\n pass\n\n # Logs\n # self._logs.data.n_evals = res[3]\n self._logs.data.xOpt = res[0]\n self._logs.data.fOpt = np.array(res[1])\n self._logs.data.time = np.matrix(self.stopCriteria.get_time())\n\n # self._logs.add_evals(x=np.matrix(res[0]).T, fx=np.matrix(res[1]),\n # opt_x=np.matrix(res[0]).T, opt_fx=np.matrix(res[1]),\n # time=np.matrix(self.stopCriteria.get_time())\n # )\n # self._logs.n_evals = res[3]\n\n out = np.array(res[0])\n\n return out", "title": "" }, { "docid": "750a94f77154abecbc8a114dc0f8046f", "score": "0.48435763", "text": "def configure_objective(self, phase):\n pass", "title": "" }, { "docid": "663cfdec58272507a52a2b5faaa8c61b", "score": "0.48401186", "text": "def obj(x, y, p, neq, niq, nx, np):\n x = SX.sym('x',nx) #Variable\n p = SX.sym('p',np) #Parameters\n f = p[0]*x[0]**3 + x[1]**2 #Objective array\n f_fun = Function('f_fun',[x,p],[p[0]*x[0]**3+x[1]**2]) #Objective function\n \n con = vertcat(exp(-x[0])-x[1],p[1]-x[0]) #Constraint array\n conf = Function('conf',[x,p],[exp(-x[0])-x[1],p[1]-x[0]])#Constraint function\n \n \n #Specifying Bounds\n ubx = 1e16*ones([1,nx]) #Variable upper bound\n lbx = -1e16*ones([1,nx]) #Variable lower bound\n ubg = zeros([1,niq+neq]) #Constraint upper bound\n lbg= -1e16*ones([1,niq+neq]) #Constraint lower bound\n return x, p, f, f_fun, con, conf, ubx, lbx, ubg, lbg", "title": "" }, { "docid": "cb3bc5475dc73df3a798887e0b66e977", "score": "0.48342457", "text": "def objective():\n assert len(ObjEntities.items) <= 1\n return ObjEntities.items[0].constraint if len(ObjEntities.items) == 1 else None", "title": "" }, { "docid": "e22b5e69b3c7c97c23cf5a0946e31183", "score": "0.48338103", "text": "def set_mass(self, rname, mass):\n momentum = self.momentum_res(mass**2, self.rdict[rname]['type'])\n self.rdict[rname]['prop'].set_mass(mass, momentum)", "title": "" }, { "docid": "987845a2c153c866ef9ebe922ef72c86", "score": "0.48280948", "text": "def objective(self, indiv, *args):\n if len(args) == 0:\n (x, y0) = self.update(indiv)\n elif len(args) == 1:\n raise ValueError(\"not enough values to unpack (expected 2, got 1)\")\n elif len(args) == 2:\n (x, y0) = args\n else:\n raise ValueError(\"too many values to unpack (expected 2)\")\n\n self.set_data()\n\n if self.simulate(x, y0) is None:\n error = np.zeros(len(self.obs_names))\n for i, obs_name in enumerate(self.obs_names):\n if self.experiments[i] is not None:\n error[i] = self._compute_objval_rss(\n *self._diff_sim_and_exp(\n self.simulations[i],\n self.experiments[i],\n self.get_timepoint(obs_name),\n self.conditions,\n sim_norm_max=1\n if not self.normalization\n else (\n np.max(\n self.simulations[\n self.obs_names.index(obs_name),\n [\n self.conditions.index(c)\n for c in (\n self.normalization[obs_name][\"condition\"]\n if self.normalization[obs_name][\"condition\"]\n else self.conditions\n )\n ],\n self.normalization[obs_name][\"timepoint\"],\n ]\n )\n if self.normalization[obs_name][\"timepoint\"] is not None\n else np.max(\n self.simulations[\n self.obs_names.index(obs_name),\n [\n self.conditions.index(c)\n for c in (\n self.normalization[obs_name][\"condition\"]\n if self.normalization[obs_name][\"condition\"]\n else self.conditions\n )\n ],\n ]\n )\n ),\n )\n )\n \"\"\"\n error = np.zeros(16)\n\n norm_max = np.max(self.simulations[self.obs_names.index('Phosphorylated_MEKc')])\n error[0] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('Phosphorylated_MEKc'), self.t2, self.conditions.index('EGF')]/norm_max,\n self.experiments[self.obs_names.index('Phosphorylated_MEKc')]['EGF']\n )\n error[1] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('Phosphorylated_MEKc'), self.t2, self.conditions.index('HRG')]/norm_max,\n self.experiments[self.obs_names.index('Phosphorylated_MEKc')]['HRG']\n )\n\n norm_max = np.max(self.simulations[self.obs_names.index('Phosphorylated_ERKc')])\n error[2] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('Phosphorylated_ERKc'), self.t2, self.conditions.index('EGF')]/norm_max,\n self.experiments[self.obs_names.index('Phosphorylated_ERKc')]['EGF']\n )\n error[3] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('Phosphorylated_ERKc'), self.t2, self.conditions.index('HRG')]/norm_max,\n self.experiments[self.obs_names.index('Phosphorylated_ERKc')]['HRG']\n )\n\n norm_max = np.max(self.simulations[self.obs_names.index('Phosphorylated_RSKw')])\n error[4] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('Phosphorylated_RSKw'), self.t2, self.conditions.index('EGF')]/norm_max,\n self.experiments[self.obs_names.index('Phosphorylated_RSKw')]['EGF']\n )\n error[5] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('Phosphorylated_RSKw'), self.t2, self.conditions.index('HRG')]/norm_max,\n self.experiments[self.obs_names.index('Phosphorylated_RSKw')]['HRG']\n )\n\n norm_max = np.max(self.simulations[self.obs_names.index('Phosphorylated_CREBw')])\n error[6] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('Phosphorylated_CREBw'), self.t3, self.conditions.index('EGF')]/norm_max,\n self.experiments[self.obs_names.index('Phosphorylated_CREBw')]['EGF']\n )\n error[7] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('Phosphorylated_CREBw'), self.t3, self.conditions.index('HRG')]/norm_max,\n self.experiments[self.obs_names.index('Phosphorylated_CREBw')]['HRG']\n )\n\n norm_max = np.max(self.simulations[self.obs_names.index('dusp_mRNA')])\n error[8] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('dusp_mRNA'), self.t5, self.conditions.index('EGF')]/norm_max,\n self.experiments[self.obs_names.index('dusp_mRNA')]['EGF']\n )\n error[9] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('dusp_mRNA'), self.t5, self.conditions.index('HRG')]/norm_max,\n self.experiments[self.obs_names.index('dusp_mRNA')]['HRG']\n )\n\n norm_max = np.max(self.simulations[self.obs_names.index('cfos_mRNA')])\n error[10] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('cfos_mRNA'), self.t4, self.conditions.index('EGF')]/norm_max,\n self.experiments[self.obs_names.index('cfos_mRNA')]['EGF']\n )\n error[11] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('cfos_mRNA'), self.t4, self.conditions.index('HRG')]/norm_max,\n self.experiments[self.obs_names.index('cfos_mRNA')]['HRG']\n )\n\n norm_max = np.max(self.simulations[self.obs_names.index('cFos_Protein')])\n error[12] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('cFos_Protein'), self.t5, self.conditions.index('EGF')]/norm_max,\n self.experiments[self.obs_names.index('cFos_Protein')]['EGF']\n )\n error[13] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('cFos_Protein'), self.t5, self.conditions.index('HRG')]/norm_max,\n self.experiments[self.obs_names.index('cFos_Protein')]['HRG']\n )\n\n norm_max = np.max(self.simulations[self.obs_names.index('Phosphorylated_cFos')])\n error[14] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('Phosphorylated_cFos'), self.t2, self.conditions.index('EGF')]/norm_max,\n self.experiments[self.obs_names.index('Phosphorylated_cFos')]['EGF']\n )\n error[15] = self._compute_objval_rss(\n self.simulations[self.obs_names.index('Phosphorylated_cFos'), self.t2, self.conditions.index('HRG')]/norm_max,\n self.experiments[self.obs_names.index('Phosphorylated_cFos')]['HRG']\n )\n \"\"\"\n return np.sum(error) # < 1e12\n else:\n return 1e12", "title": "" }, { "docid": "44f3f9303a5baba128f23ba77b7d5878", "score": "0.48261186", "text": "def mass(self, mass):\n\n self._mass = mass", "title": "" }, { "docid": "3ae2518ab86043e7f1c6721e68a8caf9", "score": "0.48246935", "text": "def minimize_expression(self, expression):\r\n self.leftPart(expression[0], self.dic)\r\n self.rightPart(expression[1], self.dic)\r\n for clef, val in self.dic.items():\r\n c = int(clef)\r\n v = float(val)\r\n if v != 0:\r\n self.tab.append(Equation(v, c))\r\n return self.sort_equation(self.tab)", "title": "" }, { "docid": "a9ce183845671ae121ff6ba999d79034", "score": "0.48149467", "text": "def init_optimizer(self):\n self.optimizer = optim.Adam(self.model.parameters(), lr=LR)\n self.model = self.model.to(self.device)\n if self.criterion_str == \"CE\":\n self.criterion = nn.CrossEntropyLoss()\n if self.criterion_str == \"WCE\":\n self.criterion = nn.BCEWithLogitsLoss(pos_weight=torch.FloatTensor([10]))\n elif self.criterion_str == \"BCE\":\n self.criterion = nn.BCEWithLogitsLoss()\n elif self.criterion_str == \"Reg\":\n self.criterion = nn.MSELoss()\n self.criterion = self.criterion.to(self.device)", "title": "" } ]
53679d492b371e2133afbc443afe9005
computes residuals based on distance from ellipsoid can be used with different lossfunctions on residual
[ { "docid": "7c6ae09ece715a1bb31ffcedcd812579", "score": "0.0", "text": "def fitting_obj_stack(param, x, y, z, i):\n\n\n # centers\n cx = param[0]\n cy = param[1]\n\n #num_layers = len(set(z))\n #assert len(param) == num_layers+2\n\n radii = param[2:]\n\n num_layers = len(radii) / 2\n radii_x = radii[0:num_layers]\n radii_y = radii[num_layers:]\n\n obj = 0\n\n\n for idx in range(len(x)):\n\n rx = radii_x[z[idx]]\n ry = radii_y[z[idx]] \n residual = (cx - x[idx])**2 / (rx**2) + (cy - y[idx])**2 / (ry**2) - 1\n\n tmp = loss_functions.squared_loss(residual)\n #obj += loss_functions.eps_loss(residual, 2)*data_intensity[idx]\n #obj += loss_functions.abs_loss(residual)\n #tmp = loss_functions.eps_loss(residual, 1)\n #obj += loss_functions.eps_loss_asym(residual, 2, 1.0, 0.3)\n #print idx, residual\n\n #obj += tmp*i[idx]\n obj += tmp\n\n\n #gradient = gradient_c + gradient_rx + gradient_ry\n # smoothness regularizer\n #for idx in xrange(len(radii_x)-1):\n # obj += (radii_x[idx] - radii_x[idx+1])**2\n\n # smoothness regularizer\n #for idx in xrange(len(radii_y)-1):\n # obj += (radii_y[idx] - radii_y[idx+1])**2\n\n # L1-regularize large radii\n #for r in radii:\n # obj += r\n\n return obj", "title": "" } ]
[ { "docid": "6bde0a984bdbc9de25e970990587fd95", "score": "0.6588138", "text": "def ellipsoid_fit_rmse(query_points, axes, center, rotmat, return_full=True): \n # Find starting angles for all query points\n ellipsoid_points, ell_theta, ell_phi = generate_ellipsoid_even(axes, center, rotmat, \n n_points=2000, return_angles=True)\n ellipsoid_angles = np.c_[ell_theta, ell_phi] # Combine ellipsoid angles into array\n \n # Find closest ellipsoid point for each query point and get the corresponding set of angles\n tree_ell = cKDTree(ellipsoid_points) # Make a KDTree\n _, idx_ell0 = tree_ell.query(query_points) # Find the closest points \n init_angle_array = ellipsoid_angles[idx_ell0] # Find the corresponding angles\n \n # Prepare output arrays\n dist_pe = np.zeros(query_points.shape[0])\n ell_points_nearest = np.zeros(query_points.shape)\n \n # Run minimization\n for i, point in enumerate(tqdm(query_points)):\n #if i > 10000: break\n init_angles = tuple( init_angle_array[i] )\n dist_pe[i], ell_points_nearest[i,:], res = minimum_distance_ellipsoid(point, center, \n axes, rotmat, \n init_angles=init_angles)\n \n # Calculate the rmse\n residual_sum_squares = np.sum( np.power(dist_pe, 2) )\n ellipsoid_rmse = np.sqrt(residual_sum_squares / len(dist_pe))\n \n if return_full:\n return ellipsoid_rmse, dist_pe, ell_points_nearest\n\n return ellipsoid_rmse", "title": "" }, { "docid": "99b498ccb7febea11601c4879176242d", "score": "0.6200724", "text": "def calculate_residuals(self):\n func = self.fit.to_sherpa_model()\n x = self.points['energy'].quantity\n y = self.points['flux'].quantity\n y_err = self.points['flux_err_hi'].quantity\n\n func_y = func(x.to('keV').value) * Unit('s-1 cm-2 keV-1')\n residuals = (y - func_y) / y\n # Todo: add correct formular (butterfly)\n residuals_err = y_err / y\n\n return residuals.decompose(), residuals_err.decompose()", "title": "" }, { "docid": "a246b6161f977f023c4fac1415fe74d9", "score": "0.6094676", "text": "def residual(objectives):\n return solver(objectives).getResidual()", "title": "" }, { "docid": "e826284f14b8aa506a412c705bfbf756", "score": "0.5952468", "text": "def computeResidualsXYZ(invprojvars, GCPxyz, GCPuv, dem):\r\n GCPxyz_proj = projectUV(GCPuv, invprojvars) \r\n \r\n #Compute residuals using pythag theorem (i.e. pixel difference between pts)\r\n residual=[]\r\n for i in range(len(GCPxyz_proj)):\r\n residual.append(np.sqrt((GCPxyz_proj[i][0]-GCPxyz[i][0])**2 + \r\n (GCPxyz_proj[i][1]-GCPxyz[i][1])**2)) \r\n residual = np.array(residual) \r\n\r\n fig, (ax1) = plt.subplots(1, figsize=(20,10))\r\n fig.canvas.set_window_title('Average residual difference: ' + \r\n str(np.nanmean(residual)) + ' m')\r\n \r\n #Plot DEM and set cmap\r\n demextent = dem.getExtent()\r\n demz = dem.getZ() \r\n implot = ax1.imshow(demz, origin='lower', extent=demextent)\r\n implot.set_cmap('gray')\r\n ax1.axis([demextent[0], demextent[1],demextent[2], demextent[3]])\r\n \r\n #Plot UV GCPs\r\n ax1.scatter(GCPxyz[:,0], GCPxyz[:,1], color='red', marker='+', \r\n label='XYZ')\r\n \r\n #Plot projected XYZ GCPs\r\n ax1.scatter(GCPxyz_proj[:,0], GCPxyz_proj[:,1], color='blue', \r\n marker='+', label='Projected UV')\r\n \r\n #Add legend and show plot\r\n ax1.legend()\r\n plt.show() \r\n \r\n #Return all residuals\r\n return residual", "title": "" }, { "docid": "80a0d190fcdea6452cb180ce09ad84f0", "score": "0.5853911", "text": "def _residuals(self, pars,):\n v = pars.valuesdict()\n model = self._calc_model(v)\n\n resid = np.sqrt((self._fit_data - model)**2 * self._fit_weights**2)\n\n return np.squeeze(resid.flatten())", "title": "" }, { "docid": "707d41275ff119efb2a8b6a55f8b82db", "score": "0.58476025", "text": "def ad_boundary_residual(solver_obj, dual_new=None, dual_old=None): # TODO: Generalise to other BCs and timesteps\n\n # Collect fields and parameters\n if dual_new is not None and dual_old is not None:\n tracer_new = dual_new\n tracer_old = dual_old\n else:\n tracer_new = solver_obj.fields.tracer_2d\n tracer_old = solver_obj.timestepper.tracer_old\n tracer_2d = 0.5 * (tracer_old + tracer_new)\n\n # Create P0 TestFunction, scaled to take value 1 in each cell. This has the effect of conserving mass upon\n # premultiplying piecewise constant and piecewise linear functions.\n mesh = solver_obj.mesh2d\n P0 = FunctionSpace(mesh, \"DG\", 0)\n # v = Constant(mesh.num_cells()) * TestFunction(P0)\n v = TestFunction(P0)\n n = FacetNormal(mesh)\n\n # Construct residuals across edges\n edge_contribution = Constant(0.5) * jump(-grad(tracer_2d), n=n) * (v('+') + v('-')) * dS\n boundary_contribution = -dot(grad(tracer_2d), n) * v * ds\t# TODO: This is not applied on all boundaries\n\n return Function(P0).interpolate(assemble(edge_contribution + boundary_contribution))", "title": "" }, { "docid": "12945a1f9aecbbc83d3d3a1dde1f3e02", "score": "0.5827588", "text": "def compute_error_residual(residual, nz):\n # ***************************************************\n t = np.array([residual[d,n] for (d,n) in nz])\n rmse = np.sqrt(t.dot(t.T)/len(nz))\n # ***************************************************\n return rmse", "title": "" }, { "docid": "68640805bab54c19fc58abbcc4ee2079", "score": "0.57795995", "text": "def solve_linear(self, d_outputs, d_residuals, mode):\n pass", "title": "" }, { "docid": "269454aa9190e2b55f68c3997112143f", "score": "0.57592785", "text": "def prepare_residual(res, s=1.):\n \n data = {}\n shape = res[[*res][0]].shape\n all_comps = ['vx', 'vz', 'taux', 'tauz', 'tauxz']\n \n for param in all_comps:\n if param in res:\n data[param] = s * res[param]\n else:\n data[param] = np.zeros(shape, np.float32)\n return data", "title": "" }, { "docid": "67cf897e6a23ba42ad3713d1161de996", "score": "0.5735301", "text": "def get_3d_residual(clear_image_volume, blurry_image_volume):\n\n # Convert blurry_image and clear_image into 2 dimensional arrays -- from (x,x,x,1) to (x,x,x,)\n blurry_image_volume = blurry_image_volume.reshape(blurry_image_volume.shape[0], blurry_image_volume.shape[1], blurry_image_volume.shape[2])\n clear_image_volume = clear_image_volume.reshape(clear_image_volume.shape[0], clear_image_volume.shape[1], clear_image_volume.shape[2])\n\n # Throw away the SSIM score and keep the residual between the two images\n (_, residual) = structural_similarity(blurry_image_volume, clear_image_volume, full=True)\n\n return residual", "title": "" }, { "docid": "c761579e1d1431e35105889bfa6d6627", "score": "0.5728806", "text": "def fitting_obj_sample(param):\n\n\n obj = 0\n\n # centers\n cx = param[0]\n cy = param[1]\n cz = param[2]\n\n rx = param[3]\n ry = param[4]\n rz = param[5]\n \n sx, sy, sz = ellipsoid(cx, cy, cz, rx, ry, rz, 20)\n num_samples = len(sx)\n\n #plot_point_cloud(sx, sy, sz)\n\n print \"num_samples\", num_samples\n\n #import pdb \n #pdb.set_trace()\n\n #data = numpy.array(zip(sx, sy, sz)).T\n #tree = kdt.kdtree( data, leafsize=1000 )\n\n data = zip(sx, sy, sz)\n tree = KDTree.construct_from_data(data)\n\n num_queries = len(x)\n\n print \"num_queries\", num_queries\n\n global global_loss\n global_loss = numpy.zeros(num_queries)\n\n for idx in range(num_queries):\n\n \"\"\"\n Compute the unique root tbar of F(t) on (-e2*e2,+infinity);\n\n x0 = e0*e0*y0/(tbar + e0*e0);\n x1 = e1*e1*y1/(tbar + e1*e1);\n x2 = e2*e2*y2/(tbar + e2*e2);\n\n distance = sqrt((x0 - y0)*(x0 - y0) + (x1 - y1)*(x1 - y1) + (x2 - y2)*(x2 - y2))\n \"\"\"\n\n query = (x[idx], y[idx], z[idx])\n nearest, = tree.query(query_point=query, t=1)\n residual = dist.euclidean(query, nearest)\n\n #obj += loss_functions.squared_loss(residual)\n #obj += loss_functions.abs_loss(residual)\n #obj += loss_functions.eps_loss(residual, 2)\n #obj += loss_functions.eps_loss_bounded(residual, 2)\n loss_xt = loss_functions.eps_loss_asym(residual, 2, 1.0, 0.2)\n obj += loss_xt\n global_loss[idx] = num_queries\n\n #obj += eps_loss(residual, 2)*data_intensity[idx]\n\n # add regularizer to keep radii close\n reg = 10 * regularizer(param)\n\n print \"loss\", obj\n print \"reg\", reg\n\n obj += reg\n\n return obj", "title": "" }, { "docid": "03f5bad2102cfc63d7713b888088e4b6", "score": "0.5686537", "text": "def fit_ellipsoid(dx, dy, dz, di, num_points=None):\n\n\n is_sphere = True\n\n num_points = len(dx)\n idx = range(num_points)\n random.shuffle(idx) \n subset_idx = idx[0:500]\n\n global x,y,z,i\n x = numpy.array(dx)[subset_idx]\n y = numpy.array(dy)[subset_idx]\n z = numpy.array(dz)[subset_idx]\n i = numpy.array(di)[subset_idx]\n \n print \"num data points: %i\" % (len(x))\n\n if is_sphere:\n x0 = numpy.array([0, 0, 0, 5])\n else:\n x0 = numpy.array([15, 15, 10, 5, 5, 5])\n x0[0] = numpy.average(x)\n x0[1] = numpy.average(y)\n x0[2] = numpy.average(z)\n\n print \"center guess: x=%f, y=%f, z=%f\" % (x0[0], x0[1], x0[2])\n\n #x_opt = scipy.optimize.fmin(fitting_obj, x0)\n epsilon = 0.5\n\n bounds = []\n bounds.append((0, None)) # cx\n bounds.append((0, None)) # cy\n bounds.append((0, None)) # cz\n bounds.append((0, None)) # rx\n\n if not is_sphere:\n bounds.append((0, None)) # ry\n bounds.append((0, None)) # rz\n\n if is_sphere:\n #x_opt, nfeval, rc = scipy.optimize.fmin_l_bfgs_b(fitting_obj, x0, bounds=bounds, approx_grad=True, iprint=5)\n #x_opt = scipy.optimize.fmin(fitting_obj_sphere_sample, x0, xtol=epsilon, ftol=epsilon, disp=True, full_output=True)[0]\n #x_opt = scipy.optimize.fmin(fitting_obj, x0, xtol=epsilon, ftol=epsilon, disp=True, full_output=True)[0]\n x_opt, nfeval, rc = scipy.optimize.fmin_tnc(fitting_obj, x0, bounds=bounds, approx_grad=True, messages=5)\n return x_opt[0], x_opt[1], x_opt[2], x_opt[3], x_opt[3], x_opt[3]\n\n else:\n #x_opt, nfeval, rc = scipy.optimize.fmin_l_bfgs_b(fitting_obj, x0, bounds=bounds, approx_grad=True, iprint=5)\n x_opt = scipy.optimize.fmin(fitting_obj_sample, x0, xtol=epsilon, ftol=epsilon, disp=True, full_output=True)[0]\n return x_opt[0], x_opt[1], x_opt[2], x_opt[3], x_opt[4], x_opt[5]", "title": "" }, { "docid": "4b6608d84d617ac97e9764d59180b413", "score": "0.5681945", "text": "def test_regress_residuals(self):\n x = [1.0,2.0,3.0,4.0,5.0]\n y = [2.1,4.2,5.9,8.4,9.6]\n result = regress_residuals(x, y)\n self.assertFloatEqual(result, [-0.1, 0.08, -0.14, 0.44, -0.28])", "title": "" }, { "docid": "801b9f4c63a98bec8f6d975ec80f60b3", "score": "0.5681814", "text": "def residuals(par, y, x):\n return y - total(x, par)", "title": "" }, { "docid": "29ab9ca7374629e03ae92dfd0f418413", "score": "0.5681603", "text": "def sw_boundary_residual(solver_obj, dual_new=None, dual_old=None): # TODO: Generalise to other BCs and timesteps\n\n # Collect fields and parameters\n if dual_new is not None and dual_old is not None:\n uv_new, elev_new = dual_new.split()\n uv_old, elev_old = dual_old.split()\n else:\n uv_new, elev_new = solver_obj.fields.solution_2d.split()\n uv_old, elev_old = solver_obj.timestepper.solution_old.split()\n b = solver_obj.fields.bathymetry_2d\n uv_2d = 0.5 * (uv_old + uv_new) # Use Crank-Nicolson timestepping so that we isolate errors as being\n elev_2d = 0.5 * (elev_old + elev_new) # related only to the spatial discretisation\n H = b + elev_2d\n\n # Create P0 TestFunction, scaled to take value 1 in each cell. This has the effect of conserving mass upon\n # premultiplying piecewise constant and piecewise linear functions.\n mesh = solver_obj.mesh2d\n P0 = FunctionSpace(mesh, \"DG\", 0)\n # v = Constant(mesh.num_cells()) * TestFunction(P0) # Scaled to take value 1 in each cell\n v = TestFunction(P0)\n n = FacetNormal(mesh)\n\n # Construct residuals across edges\n bres_u1 = Function(P0)\t# No contribution\n bres_u2 = Function(P0)\t# No contribution\n edge_contribution = Constant(0.5) * jump(-H * uv_2d, n=n) * (v('+') + v('-')) * dS\n boundary_contribution = -dot(H * uv_2d, n) * v * ds\n bres_e = Function(P0).interpolate(assemble(edge_contribution + boundary_contribution))\n\n return bres_u1, bres_u2, bres_e", "title": "" }, { "docid": "74fe9ef302a69a3edea6b0c6e496ee21", "score": "0.56718326", "text": "def __prepareEllipsoid(self):\r\n if (self.__initialized == False):\r\n return None\r\n \r\n e2 = self.__flattening * (2.0 - self.__flattening)\r\n n = self.__flattening / (2.0 - self.__flattening)\r\n self.__a_roof = self.__axis / (1.0 + n) * (1.0 + n*n/4.0 + n*n*n*n/64.0)\r\n # Prepare ellipsoid-based stuff for geodetic_to_grid.\r\n self.__A = e2\r\n self.__B = (5.0*e2*e2 - e2*e2*e2) / 6.0\r\n self.__C = (104.0*e2*e2*e2 - 45.0*e2*e2*e2*e2) / 120.0\r\n self.__D = (1237.0*e2*e2*e2*e2) / 1260.0\r\n self.__beta1 = n/2.0 - 2.0*n*n/3.0 + 5.0*n*n*n/16.0 + 41.0*n*n*n*n/180.0\r\n self.__beta2 = 13.0*n*n/48.0 - 3.0*n*n*n/5.0 + 557.0*n*n*n*n/1440.0\r\n self.__beta3 = 61.0*n*n*n/240.0 - 103.0*n*n*n*n/140.0\r\n self.__beta4 = 49561.0*n*n*n*n/161280.0\r\n # Prepare ellipsoid-based stuff for grid_to_geodetic.\r\n self.__delta1 = n/2.0 - 2.0*n*n/3.0 + 37.0*n*n*n/96.0 - n*n*n*n/360.0\r\n self.__delta2 = n*n/48.0 + n*n*n/15.0 - 437.0*n*n*n*n/1440.0\r\n self.__delta3 = 17.0*n*n*n/480.0 - 37*n*n*n*n/840.0\r\n self.__delta4 = 4397.0*n*n*n*n/161280.0\r\n self.__Astar = e2 + e2*e2 + e2*e2*e2 + e2*e2*e2*e2\r\n self.__Bstar = -(7.0*e2*e2 + 17.0*e2*e2*e2 + 30.0*e2*e2*e2*e2) / 6.0\r\n self.__Cstar = (224.0*e2*e2*e2 + 889.0*e2*e2*e2*e2) / 120.0\r\n self.__Dstar = -(4279.0*e2*e2*e2*e2) / 1260.0", "title": "" }, { "docid": "6bcaa51d1752649887d2b7a27790d269", "score": "0.56661916", "text": "def Ellipsoid(self) -> object:", "title": "" }, { "docid": "5d60ac2e2ed06576e3004255c031d02e", "score": "0.5659883", "text": "def _residual(self, x):\n h = x\n h = self.c1(h)\n h = self.activation(h)\n h = self.c2(h)\n h = F.avg_pool2d(h, 2)\n\n return h", "title": "" }, { "docid": "65d3e065f5245fc7bfdc8556c7babf2b", "score": "0.5655958", "text": "def calibrate(eyepts, worldpts):\n\tworldx = np.array(worldpts)[:,0];\t\n\tworldy = np.array(worldpts)[:,1];\t\n\teyex = np.array(eyepts)[:,0];\t\n\teyey = np.array(eyepts)[:,1];\t\n\n\txcoeff = fit_polynomial_surf(eyex, eyey, worldx);\n\tycoeff = fit_polynomial_surf(eyex, eyey, worldy);\n\n\tprint \"xcoeff\";\t\n\tprint xcoeff;\t\n\tprint \"ycoeff\";\t\n\tprint ycoeff;\t\n\treturn xcoeff, ycoeff;", "title": "" }, { "docid": "92d642b11335388ea07beeb1cd70fea6", "score": "0.56481373", "text": "def residual_analysis(self):\n pass", "title": "" }, { "docid": "40eed76a0f6d98005ca23633b184a998", "score": "0.56222546", "text": "def _residuals(params, t0, c, zr, dens_interp, len_arr, time_target):\n\n l0, v0, v_inf, sigma = params\n\n # Compute the time guess with the given parameters\n time_arr = timeFromLen(len_arr, t0, l0, v0, v_inf, sigma, c, zr, dens_interp)\n\n # Sum of squared residuals\n cost = np.sum((time_target - time_arr)**2)\n\n # # Sum of absolute residuals\n # cost = np.sum(np.abs(time_target - time_arr))\n\n print(\"Cost = {:16.10f}, guess: l0 = {:7.3f}, v0 = {:6.3f}, vi = {:6.3f}, sigma = {:.5f}\".format(cost, *params))\n\n return cost", "title": "" }, { "docid": "43f38d452b72a79a0f5a5e4de215eb89", "score": "0.5579783", "text": "def eshtens(G, lam, dropaxes, R):\n\n\n #---- Axis ratios (B.85) ----\n a3, a2, a1 = dropaxes\n C = max( [ a3/a1, 0 ] ) # C = c/a\n D = max( [ a3/a2 , 0] ) # D = c/b\n TINY = 1e-14 # tol for using C or D = 0 formulae\n\n #---- Build Eshelby tensors Sm and Tm in the principal axis system ---\n Sm = np.zeros([6,6])\n Tm = np.zeros([6,6])\n\n C2 = C**2\n\n # independent components for various cases of C and D:\n if D < TINY:\n # disk shape (U corner)\n Sm[0,0] = 1.0\n Sm[0,2] = 1.0\n\n elif (1-(D-C)) < TINY:\n # circular cylinder (B corner)\n Sm[0,0] = 0.75\n Sm[1,1] = 0.75\n Sm[0,2] = 0.50\n Sm[1,0] = 0.25\n\n elif 1-C < TINY:\n # sphere (T corner)\n Sm[0,0] = 0.60\n Sm[1,1] = 0.60\n Sm[2,2] = 0.60\n Sm[2,1] = 0.20\n Sm[0,2] = 0.20\n Sm[1,0] = 0.20\n\n elif C < TINY:\n # ellipsoidal cylinder (UB edge)\n Sm[0,0] = (1+2*D) / (1+D)**2\n Sm[1,1] = D*(2+D) / (1+D)**2\n Sm[0,2] = 1 / (1+D)\n Sm[1,0] = D**2 / (1+D)**2\n\n elif (1-D) < TINY:\n # prolate spheriod (BT edge)\n exp1 = 0.5*np.log( (1+np.sqrt(1-C2)) / (1-np.sqrt(1-C2)) )\n it = (np.sqrt(1-C2) - C2*exp1) / (2*((1-C2)**(1.5)))\n Sm[0,0] = 0.75 * (1-3*it*C2) / (1-C2)\n Sm[1,1] = Sm[0,0]\n Sm[2,2] = (3-6*it-C2) / (1-C2)\n Sm[2,1] = C2 * (3*it-1) / (1-C2)\n Sm[0,2] = (3*it-1) / (1-C2)\n Sm[1,0] = 0.25*(1-3*it*C2) / (1-C2)\n\n elif (D-C) < TINY:\n # oblate spheriod (TU edge)\n it = C * (np.arccos(C) - C*np.sqrt(1-C2)) / (2*((1-C2)**(1.5)))\n Sm[0,0] = (1+6*C2*it-3*C2) / (1-C2)\n Sm[1,1] = 0.75*(3*it-C2) / (1-C2)\n Sm[2,2] = Sm[1,1]\n Sm[2,1] = 0.25*(3*it-C2) / (1-C2)\n Sm[0,2] = (1-3*it) / (1-C2)\n Sm[1,0] = C2 * (1-3*it) / (1-C2)\n\n else:\n # general ellipsoid\n \n # Elliptic integral quantities (B.87, 88, 91, and Wetzel's \"calcesh.f\")\n RD = ellip1(C2, (C/D)**2, 1)\n RF = ellip2(C2, (C/D)**2, 1)\n F = np.sqrt(1-C2)*RF\n E = F - ((1-(C/D)**2)*np.sqrt(1-C2)*RD) / 3.0\n \n Ja = (C2*D*(F-E)) / ((D**2-C2)*np.sqrt(1-C2))\n Jc = (np.sqrt(1-C2) - D*E) / ((1-D**2) * np.sqrt(1-C2))\n Jb = 1 - Ja - Jc\n \n # Eqns (B.79-84)\n \n # Main components. From Eqns. (B.79-84) with 1 and 3 indices swapped\n # The 1-3 swapping occurs because Wetzel's appendix B associates S(1,1)\n # with the longest semi-axis of the ellipse, which is the smallest\n # eigenvalue/vector of G.\n Sm[0,0] = 1 + C2*(Ja-Jc) / (1-C2) + D**2*(Jb-Jc) / (1-D**2)\n Sm[1,1] = 1 + (Jb-Jc) / (1-D**2) + C2*(Ja-Jb) / (D**2-C2)\n Sm[2,2] = 1 + D**2*(Ja-Jb) / (D**2-C2) + (Ja-Jc) / (1 -C2)\n Sm[2,1] = C2*(Jb-Ja) / (D**2-C2)\n Sm[0,2] = (Jc-Ja) / (1-C2)\n Sm[1,0] = D**2*(Jc-Jb) / (1-D**2)\n\n # End of main principal-axis components for various cases of C and D\n\n\n\n #---- Fill in remaining principal-axis components using (B.103-105) ----\n Sm[1,2] = 1.0 - Sm[0,2] - Sm[2,2]\n Sm[2,0] = 1.0 - Sm[1,0] - Sm[0,0]\n Sm[0,1] = 1.0 - Sm[2,1] - Sm[1,1]\n \n Sm[3,3] = 0.5*( Sm[1,2] + Sm[2,1] )\n Sm[4,4] = 0.5*( Sm[2,0] + Sm[0,2] )\n Sm[5,5] = 0.5*( Sm[0,1] + Sm[1,0] )\n \n Tm[3,3] = 0.5*( Sm[2,1] - Sm[1,2] ) # these eqns. match (B.105). In\n Tm[4,4] = 0.5*( Sm[0,2] - Sm[2,0] ) # calcesh.f they differ by a sign,\n Tm[5,5] = 0.5*( Sm[1,0] - Sm[0,1] ) # which is later corrected in the C eqn.\n\n\n #---- 6x6 rotation matrices and associated quantities -----\n Qa=np.array(\n [[R[0,0]*R[0,0], R[0,1]*R[0,1], R[0,2]*R[0,2], \n R[0,1]*R[0,2], R[0,2]*R[0,0], R[0,0]*R[0,1]],\n [R[1,0]*R[1,0], R[1,1]*R[1,1], R[1,2]*R[1,2], \n R[1,1]*R[1,2], R[1,2]*R[1,0], R[1,0]*R[1,1]],\n [R[2,0]*R[2,0], R[2,1]*R[2,1], R[2,2]*R[2,2], \n R[2,1]*R[2,2], R[2,2]*R[2,0], R[2,0]*R[2,1]],\n [R[1,0]*R[2,0], R[1,1]*R[2,1], R[1,2]*R[2,2], \n R[1,1]*R[2,2], R[1,2]*R[2,0], R[1,0]*R[2,1]],\n [R[2,0]*R[0,0], R[2,1]*R[0,1], R[2,2]*R[0,2], \n R[2,1]*R[0,2], R[2,2]*R[0,0], R[2,0]*R[0,1]],\n [R[0,0]*R[1,0], R[0,1]*R[1,1], R[0,2]*R[1,2], \n R[0,1]*R[1,2], R[0,2]*R[1,0], R[0,0]*R[1,1]]])\n\n Qb=np.array(\n [[0, 0, 0, R[0,2]*R[0,1], R[0,0]*R[0,2], R[0,1]*R[0,0]],\n [0, 0, 0, R[1,2]*R[1,1], R[1,0]*R[1,2], R[1,1]*R[1,0]],\n [0, 0, 0, R[2,2]*R[2,1], R[2,0]*R[2,2], R[2,1]*R[2,0]],\n [0, 0, 0, R[1,2]*R[2,1], R[1,0]*R[2,2], R[1,1]*R[2,0]],\n [0, 0, 0, R[2,2]*R[0,1], R[2,0]*R[0,2], R[2,1]*R[0,0]],\n [0, 0, 0, R[0,2]*R[1,1], R[0,0]*R[1,2], R[0,1]*R[1,0]]])\n\n Q = Qa+Qb # rotation matrix for symmetric tensors\n Qu = Qa-Qb # rotation matrix for unsymmetric tensors\n \n #---- contracted 4th-order identity tensor and its inverse ----\n Id4 = np.diag(np.array([1, 1, 1, 0.5, 0.5, 0.5])) \n R4 = np.diag(np.array([1, 1, 1, 2, 2, 2 ]))\n \n #---- Rotate Eshelby tensors into laboratory coordinates ----\n Qi = np.linalg.inv(Q)\n \n \n Sm = Q.dot(Sm.dot(R4.dot(Qi.dot(Id4)))) # Eshelby tensor in lab coords.\n Tm = Qu.dot(Tm.dot(R4.dot(Qi.dot(Id4)))) # alternate tensor in lab coords\n \n #---- Calculate the concentration tensors ----\n Smsi = np.linalg.inv(Id4 - (1-lam)*Sm)\n Bm = Id4.dot(Smsi.dot(Id4)) # eqn. (B.57) \n # Cm = (1-lam)*Tm * inv(Id4-(1-lam)*Sm) * Id4 # eqn. (B.66)\n Cm = (1-lam)*Tm.dot(R4.dot(Bm)) # eqn. (B.62)\n \n return [Bm, Cm, Sm, Tm]", "title": "" }, { "docid": "5ad6d1969cbe1d0d824fad8a6fa6a869", "score": "0.55782235", "text": "def fit_ellipsoid_cvx(x, y, z):\n\n #TODO not working. it is using non-linear solver, but takes forever\n\n assert len(x) == len(y)\n\n N = len(x)\n D = 7\n \n dat = numpy.zeros((N, D))\n dat[:,0] = x*x\n dat[:,1] = y*y\n dat[:,2] = z*z\n dat[:,3] = x\n dat[:,4] = y\n dat[:,5] = z\n dat[:,6] = numpy.ones(N)\n \n\n print dat.shape \n dat = cvxmod.matrix(dat)\n #### parameters\n\n # data\n X = cvxmod.param(\"X\", N, D)\n\n\n #### varibales\n \n # parameter vector\n theta = cvxmod.optvar(\"theta\", D)\n \n\n # simple objective \n objective = cvxmod.sum(square(X*theta))\n \n # create problem \n p = cvxmod.problem(cvxmod.minimize(objective))\n p.constr.append(theta[0] + theta[1] == 1)\n #p.constr.append(theta[0] + theta[2] == 1)\n #p.constr.append(theta[1] + theta[2] == 1)\n \n \n ###### set values\n X.value = dat\n #solver = \"mosek\" \n #p.solve(lpsolver=solver)\n p.solve()\n \n\n w = numpy.array(cvxmod.value(theta))\n \n #print weights\n \n cvxmod.printval(theta)\n\n\n ## For clarity, fill in the quadratic form variables\n A = numpy.zeros((3,3))\n A[0,0] = w[0]\n #A.ravel()[1:3] = 0 #w[2]\n A[1,1] = w[1]\n A[2,2] = w[2]\n bv = w[3:6]\n c = w[6]\n \n ## find parameters\n from conic2ellipse import conic2ellipsoid\n z, rx, ry, rz, alpha = conic2ellipsoid(A, bv, c)\n\n return z, rx, ry, alpha", "title": "" }, { "docid": "3290c98d2634aa8d1c0dd8b21fdb3fb0", "score": "0.5568613", "text": "def _polynomial_roots_ok(\n self, source_x, source_y, return_distances=False):\n roots = self._get_polynomial_roots(\n source_x=source_x, source_y=source_y)\n\n roots_conj = np.conjugate(roots)\n component2 = self.mass_1 / (roots_conj - self._position_z1)\n component3 = self.mass_2 / (roots_conj - self._position_z2)\n solutions = (self._zeta + self.G * roots_conj + component2 + component3) / (1 - self.K)\n # solutions = (self._zeta + 1 / np.conjugate(roots)) / (1 - self.K)\n\n # This backs-up the lens equation.\n\n\n out = []\n distances = []\n for (i, root) in enumerate(roots):\n distances_from_root = abs((solutions-root)**2)\n min_distance_arg = np.argmin(distances_from_root)\n\n if i == min_distance_arg:\n out.append(root)\n distances.append(distances_from_root[min_distance_arg])\n # The values in distances[] are a diagnostic on how good the\n # numerical accuracy is.\n\n # If the lens equation is solved correctly, there should be\n # either 3 or 5 solutions (corresponding to 3 or 5 images)\n # print(\"Number of solutions: \", len(out))\n if len(out) not in [1,2,3,4,5,6,7,8,9]:\n msg = (\"Wrong number of solutions to the lens equation of binary\" +\n \" lens.\\nGot {:} and expected 3 or 5.\\nThe parameters \" +\n \"(m1, m2, s, source_x, source_y, solver) are:\\n\" +\n \"{:} {:} {:} {:} {:} {:}\\n\\n\" +\n \"Consider using 'point_source_point_lens' method for \" +\n \"epochs when the source is very far from the lens. Note \" +\n \"that it's different from 'point_source' method.\")\n txt = msg.format(\n len(out), repr(self.mass_1), repr(self.mass_2),\n repr(self.separation), repr(source_x), repr(source_y),\n self._solver)\n\n if self._solver != \"Skowron_and_Gould_12\":\n txt += (\n \"\\n\\nYou should switch to using Skowron_and_Gould_12\" +\n \" polynomial root solver. It is much more accurate than \" +\n \"numpy.polynomial.polynomial.polyroots(). \" +\n \"Skowron_and_Gould_12 method is selected in automated \" +\n \"way if VBBL is imported properly.\")\n distance = sqrt(source_x**2 + source_y**2)\n if (self.mass_2 > 1.e-6 * self.mass_1 and\n (distance < 15. or distance < 2. * self.separation)):\n txt += (\"\\n\\nThis is surprising error - please contact code \" +\n \"authors and provide the above error message.\")\n elif distance > 200.:\n txt += (\"\\n\\nYou try to calculate magnification at huge \" +\n \"distance from the source and this is causing an \" +\n \"error.\")\n txt += \"\\nMulensModel version: {:}\".format(mm.__version__)\n\n raise ValueError(txt)\n if return_distances:\n return (np.array(out), np.array(distances))\n else:\n return np.array(out)", "title": "" }, { "docid": "8b2cd3abaefe03b337d712712381015f", "score": "0.5542595", "text": "def compute_residual(F, x1, x2):\n\n # create the homogeneous coordinates\n x1_new = np.ones((x1.shape[0], 3))\n x1_new[:, :2] = x1\n\n x2_new = np.ones((x2.shape[0], 3))\n x2_new[:, :2] = x2\n\n # calculation of the residual by the original (not normalized) F_matrix and points\n g = 0\n for i in range(x1_new.shape[0]):\n g = g + x1_new[i].T.dot(F).dot(x2_new[i])\n g = g / x1_new.shape[0]\n\n return g", "title": "" }, { "docid": "62df623b8e9577048dc9c10e9962fddd", "score": "0.55329645", "text": "def compute_residuals(self):\n self.residual = {}\n for i, vote in self.votes.items():\n product = vote * self.num_seats\n sub_product = self.sum_votes * self.seats[i]\n self.residual[i] = product - sub_product", "title": "" }, { "docid": "30075b40a55e77e6e7f45ab3f5791bc4", "score": "0.55167097", "text": "def fitting_obj(param):\n\n\n # centers\n cx = param[0]\n cy = param[1]\n cz = param[2]\n\n radius = param[3]\n\n #a = param[3]\n #b = param[4]\n #c = param[5]\n\n obj = 0\n\n for idx in range(len(x)):\n\n \"\"\"\n Compute the unique root tbar of F(t) on (-e2*e2,+infinity);\n\n x0 = e0*e0*y0/(tbar + e0*e0);\n x1 = e1*e1*y1/(tbar + e1*e1);\n x2 = e2*e2*y2/(tbar + e2*e2);\n\n distance = sqrt((x0 - y0)*(x0 - y0) + (x1 - y1)*(x1 - y1) + (x2 - y2)*(x2 - y2))\n \"\"\"\n\n #residual = b*b*c*c*(cx - x[idx])**2 \n #residual += a*a*c*c*(cy - y[idx])**2 \n #residual += a*a*b*b*(cz - z[idx])**2\n #residual = residual - a*a*b*b*c*c\n\n residual = (cx - x[idx])**2 + (cy - y[idx])**2 + (cz - z[idx])**2 \n residual = numpy.sqrt(residual) - radius\n\n tmp = loss_functions.squared_loss(residual)\n #tmp = loss_functions.abs_loss(residual)\n #tmp = loss_functions.eps_loss(residual, 1)\n #tmp = loss_functions.eps_loss_asym(residual, 2, 1.0, 0.3)\n\n # consider intensity\n obj += tmp*i[idx]\n\n\n return obj", "title": "" }, { "docid": "a26573ab2c4767f72366d5d3da791b8a", "score": "0.5507667", "text": "def test_utils_ellipsoid(self):\n a = 1.0\n b = 0.75\n c = 2.0\n aa = a * a\n bb = b * b\n cc = c * c\n TOL = 1.0E-6\n u, v = getEllipsoidPolarCoordinatesFromPosition(a, b, c, [1.0, 0.0, 0.0])\n self.assertAlmostEqual(u, 0.0, delta=TOL)\n self.assertAlmostEqual(v, 0.5 * math.pi, delta=TOL)\n x, dx_du, dx_dv = getEllipsoidPolarCoordinatesTangents(a, b, c, u, v)\n assertAlmostEqualList(self, x, [1.0, 0.0, 0.0], delta=TOL)\n assertAlmostEqualList(self, dx_du, [0.0, 0.75, 0.0], delta=TOL)\n assertAlmostEqualList(self, dx_dv, [0.0, 0.0, 2.0], delta=TOL)\n\n # test a point not on the surface\n u, v = getEllipsoidPolarCoordinatesFromPosition(a, b, c, [1.0, 0.75, 1.0])\n self.assertAlmostEqual(u, 0.6795893730435009, delta=TOL)\n self.assertAlmostEqual(v, 2.038067043010934, delta=TOL)\n x = getEllipsoidPolarCoordinatesTangents(a, b, c, u, v)[0]\n assertAlmostEqualList(self, x, [0.6944481794937621, 0.42082645661076656, 0.9009025175142785], delta=TOL)\n mag = (x[0] * x[0]) / aa + (x[1] * x[1]) / bb + (x[2] * x[2]) / cc\n self.assertAlmostEqual(mag, 1.0, delta=TOL)\n # test the nearest point found on the surface has same polar coordinates\n u, v = getEllipsoidPolarCoordinatesFromPosition(a, b, c, x)\n self.assertAlmostEqual(u, 0.6795893730435009, delta=TOL)\n self.assertAlmostEqual(v, 2.038067043010935, delta=TOL)\n\n # test a point not on the surface\n u, v = getEllipsoidPolarCoordinatesFromPosition(a, b, c, [-0.9, -0.25, -1.2])\n self.assertAlmostEqual(u, -2.8190484300147065, delta=TOL)\n self.assertAlmostEqual(v, 0.9561038921906282, delta=TOL)\n x = getEllipsoidPolarCoordinatesTangents(a, b, c, u, v)[0]\n assertAlmostEqualList(self, x, [-0.7748223543206793, -0.19421818481623945, -1.153414567503636], delta=TOL)\n mag = (x[0] * x[0]) / aa + (x[1] * x[1]) / bb + (x[2] * x[2]) / cc\n self.assertAlmostEqual(mag, 1.0, delta=TOL)\n # test the nearest point found on the surface has same polar coordinates\n u, v = getEllipsoidPolarCoordinatesFromPosition(a, b, c, x)\n self.assertAlmostEqual(u, -2.8190484300147065, delta=TOL)\n self.assertAlmostEqual(v, 0.9561038921906281, delta=TOL)\n\n u_in = math.pi / 3.0\n v_in = math.pi / 2.0\n x, dx_du, dx_dv = getEllipsoidPolarCoordinatesTangents(a, b, c, u_in, v_in)\n assertAlmostEqualList(self, x, [0.5, 0.649519052838329, 0.0], delta=TOL)\n assertAlmostEqualList(self, dx_du, [-0.8660254037844386, 0.375, 0.0], delta=TOL)\n assertAlmostEqualList(self, dx_dv, [0.0, 0.0, 2.0], delta=TOL)\n u, v = getEllipsoidPolarCoordinatesFromPosition(a, b, c, x)\n self.assertAlmostEqual(u, u_in, delta=TOL)\n self.assertAlmostEqual(v, v_in, delta=TOL)\n\n u_in = -0.7 * math.pi\n v_in = 0.3 * math.pi\n x, dx_du, dx_dv = getEllipsoidPolarCoordinatesTangents(a, b, c, u_in, v_in)\n assertAlmostEqualList(self, x, [-0.4755282581475767, -0.4908813728906053, -1.1755705045849463], delta=TOL)\n assertAlmostEqualList(self, dx_du, [0.6545084971874737, -0.35664619361068256, 0.0], delta=TOL)\n assertAlmostEqualList(self, dx_dv, [-0.3454915028125262, -0.3566461936106826, 1.618033988749895], delta=TOL)\n u, v = getEllipsoidPolarCoordinatesFromPosition(a, b, c, x)\n self.assertAlmostEqual(u, u_in, delta=TOL)\n self.assertAlmostEqual(v, v_in, delta=TOL)\n\n u_in = 0.35 * math.pi\n v_in = 0.65 * math.pi\n x, dx_du, dx_dv = getEllipsoidPolarCoordinatesTangents(a, b, c, u_in, v_in)\n assertAlmostEqualList(self, x, [0.4045084971874737, 0.5954194696096774, 0.9079809994790935], delta=TOL)\n assertAlmostEqualList(self, dx_du, [-0.7938926261462366, 0.3033813728906053, 0.0], delta=TOL)\n assertAlmostEqualList(self, dx_dv, [-0.20610737385376343, -0.30338137289060524, 1.7820130483767358], delta=TOL)\n u, v = getEllipsoidPolarCoordinatesFromPosition(a, b, c, x)\n self.assertAlmostEqual(u, u_in, delta=TOL)\n self.assertAlmostEqual(v, v_in, delta=TOL)\n\n centre, axis1, axis2 = getEllipsoidPlaneA(a, b, c, [0.0, 0.0, 0.0], [0.0, -b, 0.0])\n assertAlmostEqualList(self, centre, [0.0, 0.0, 0.0], delta=TOL)\n assertAlmostEqualList(self, axis1, [0.0, -b, 0.0], delta=TOL)\n assertAlmostEqualList(self, axis2, [a, 0.0, 0.0], delta=TOL)\n\n centre, axis1, axis2 = getEllipsoidPlaneA(a, b, c, [0.0, b / 3.0, 0.0], [0.0, -b, 0.0])\n assertAlmostEqualList(self, centre, [0.0, 0.0, 0.0], delta=TOL)\n assertAlmostEqualList(self, axis1, [0.0, -b, 0.0], delta=TOL)\n assertAlmostEqualList(self, axis2, [a, 0.0, 0.0], delta=TOL)\n\n centre, axis1, axis2 = getEllipsoidPlaneA(a, b, c, [0.0, 0.0, 0.0], [0.0, 0.0, c])\n assertAlmostEqualList(self, centre, [0.0, 0.0, 0.0], delta=TOL)\n assertAlmostEqualList(self, axis1, [0.0, 0.0, c], delta=TOL)\n assertAlmostEqualList(self, axis2, [a, 0.0, 0.0], delta=TOL)\n\n centre, axis1, axis2 = getEllipsoidPlaneA(a, b, c, [0.0, 0.0, c / 4.0], [0.0, 0.0, c])\n assertAlmostEqualList(self, centre, [0.0, 0.0, 0.0], delta=TOL)\n assertAlmostEqualList(self, axis1, [0.0, 0.0, c], delta=TOL)\n assertAlmostEqualList(self, axis2, [a, 0.0, 0.0], delta=TOL)\n\n z = 0.25 * c\n y = -math.sqrt((b * b) * (1.0 - (z * z) / (c * c)))\n x = math.sqrt((a * a) * (1.0 - (z * z) / (c * c)))\n centre, axis1, axis2 = getEllipsoidPlaneA(a, b, c, [0.0, 0.0, z], [0.0, y, z])\n assertAlmostEqualList(self, centre, [0.0, 0.0, z], delta=TOL)\n assertAlmostEqualList(self, axis1, [0.0, y, 0.0], delta=TOL)\n assertAlmostEqualList(self, axis2, [x, 0.0, 0.0], delta=TOL)\n mag = (axis2[0] * axis2[0]) / aa + (z * z) / cc\n self.assertAlmostEqual(mag, 1.0, delta=TOL)\n\n centre, axis1, axis2 = getEllipsoidPlaneA(a, b, c, [0.0, 0.0, 0.0], [0.0, y, z])\n assertAlmostEqualList(self, centre, [0.0, 0.0, 0.0], delta=TOL)\n assertAlmostEqualList(self, axis1, [0.0, y, z], delta=TOL)\n assertAlmostEqualList(self, axis2, [a, 0.0, 0.0], delta=TOL)\n\n centre, axis1, axis2 = getEllipsoidPlaneA(a, b, c, [0.0, 0.1, 0.1], [0.0, y, z])\n assertAlmostEqualList(self, centre, [0.0, 0.009782267266570388, 0.1436792247347149], delta=TOL)\n assertAlmostEqualList(self, axis1, [0.0, -0.735966644680461, 0.3563207752652851], delta=TOL)\n assertAlmostEqualList(self, axis2, [0.9973309128094611, 0.0, 0.0], delta=TOL)\n mag = (axis2[0] * axis2[0]) / aa + (centre[1] * centre[1]) / bb + (centre[2] * centre[2]) / cc\n self.assertAlmostEqual(mag, 1.0, delta=TOL)\n # check original midx is on axis1\n dir1 = normalize(sub(centre, [0.0, 0.1, 0.1]))\n dir2 = normalize(axis1)\n dir3 = normalize(sub([0.0, y, z], centre))\n assertAlmostEqualList(self, dir1, dir2, delta=TOL)\n assertAlmostEqualList(self, dir1, dir3, delta=TOL)\n\n z = 0.8 * c\n y = math.sqrt((b * b) * (1.0 - (z * z) / (c * c)))\n x = math.sqrt((a * a) * (1.0 - (y * y) / (b * b)))\n centre, axis1, axis2 = getEllipsoidPlaneA(a, b, c, [0.0, y, 0.0], [0.0, y, z])\n assertAlmostEqualList(self, centre, [0.0, y, 0.0], delta=TOL)\n assertAlmostEqualList(self, axis1, [0.0, 0.0, z], delta=TOL)\n assertAlmostEqualList(self, axis2, [x, 0.0, 0.0], delta=TOL)\n mag = (axis2[0] * axis2[0]) / aa + (y * y) / bb\n self.assertAlmostEqual(mag, 1.0, delta=TOL)\n\n centre, axis1, axis2 = getEllipsoidPlaneA(a, b, c, [0.0, 0.0, 0.0], [0.0, y, z])\n assertAlmostEqualList(self, centre, [0.0, 0.0, 0.0], delta=TOL)\n assertAlmostEqualList(self, axis1, [0.0, y, z], delta=TOL)\n assertAlmostEqualList(self, axis2, [a, 0.0, 0.0], delta=TOL)\n\n centre, axis1, axis2 = getEllipsoidPlaneA(a, b, c, [0.0, -0.3, -0.1], [0.0, y, z])\n assertAlmostEqualList(self, centre, [0.0, -0.10732946298984034, 0.3367198838896952], delta=TOL)\n assertAlmostEqualList(self, axis1, [0.0, 0.5573294629898402, 1.2632801161103049], delta=TOL)\n assertAlmostEqualList(self, axis2, [0.9752823267321079, 0.0, 0.0], delta=TOL)\n mag = (axis2[0] * axis2[0]) / aa + (centre[1] * centre[1]) / bb + (centre[2] * centre[2]) / cc\n self.assertAlmostEqual(mag, 1.0, delta=TOL)\n # check original midx is on axis1\n dir1 = normalize(sub(centre, [0.0, -0.3, -0.1]))\n dir2 = normalize(axis1)\n dir3 = normalize(sub([0.0, y, z], centre))\n assertAlmostEqualList(self, dir1, dir2, delta=TOL)\n assertAlmostEqualList(self, dir1, dir3, delta=TOL)", "title": "" }, { "docid": "69d630e8369d1aa13131c05f4765389a", "score": "0.55031484", "text": "def minimum_distance_ellipsoid(query_point, center, axes, rotmat, init_angles=(0,0)):\n # Minimize the distance over the pair of angles that define the ellipsoid point\n minimizing_function = partial(distance_to_ellipsoid, query_point=query_point, center=center, axes=axes, rotmat=rotmat) \n res = minimize(minimizing_function, init_angles, bounds=((0, 2*np.pi), (0, np.pi)), tol=1e-6)\n # The distance is the minimized function value\n distance = res.fun \n # Find the closest ellipsoid point using the angles\n closest_ellipsoid_point = ellipsoid_point_from_angles(*res.x, center, axes, rotmat)\n \n return distance, closest_ellipsoid_point, res", "title": "" }, { "docid": "42f2147b10fe9651a4991b302f90cb3d", "score": "0.54441506", "text": "def residual(x, filters, kernel, strides, mode=None):\n with tf.variable_scope('residual'):\n conv1 = conv2d(x, filters, filters, kernel, strides)\n conv2 = conv2d(relu(conv1), filters, filters, kernel, strides)\n shortcut = x + conv2\n return shortcut", "title": "" }, { "docid": "3ba740f1cffa6e1a60a53de72bf92d37", "score": "0.5441913", "text": "def test_bounding_ellipsoid_robust():\n\n for n in range(1, NMAX+1):\n ell_gen = random_ellipsoid(n)\n for npoints in range(1, n):\n x = ell_gen.samples(npoints)\n\n # check that it works\n ell = nestle.bounding_ellipsoid(x, pointvol=ell_gen.vol/npoints)\n\n # check that volume is as expected\n assert_allclose(ell.vol, ell_gen.vol)\n\n # check that points are contained\n for xi in x:\n assert ell.contains(xi)", "title": "" }, { "docid": "93e82dc8c44e88abc2de4e5114461429", "score": "0.54264235", "text": "def _compute_residual_and_jacobian(\n x: jnp.ndarray,\n y: jnp.ndarray,\n xd: jnp.ndarray,\n yd: jnp.ndarray,\n k1: float = 0.0,\n k2: float = 0.0,\n k3: float = 0.0,\n p1: float = 0.0,\n p2: float = 0.0,\n) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray,\n jnp.ndarray]:\n # let r(x, y) = x^2 + y^2;\n # d(x, y) = 1 + k1 * r(x, y) + k2 * r(x, y) ^2 + k3 * r(x, y)^3;\n r = x * x + y * y\n d = 1.0 + r * (k1 + r * (k2 + k3 * r))\n\n # The perfect projection is:\n # xd = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2);\n # yd = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2);\n #\n # Let's define\n #\n # fx(x, y) = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2) - xd;\n # fy(x, y) = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2) - yd;\n #\n # We are looking for a solution that satisfies\n # fx(x, y) = fy(x, y) = 0;\n fx = d * x + 2 * p1 * x * y + p2 * (r + 2 * x * x) - xd\n fy = d * y + 2 * p2 * x * y + p1 * (r + 2 * y * y) - yd\n\n # Compute derivative of d over [x, y]\n d_r = (k1 + r * (2.0 * k2 + 3.0 * k3 * r))\n d_x = 2.0 * x * d_r\n d_y = 2.0 * y * d_r\n\n # Compute derivative of fx over x and y.\n fx_x = d + d_x * x + 2.0 * p1 * y + 6.0 * p2 * x\n fx_y = d_y * x + 2.0 * p1 * x + 2.0 * p2 * y\n\n # Compute derivative of fy over x and y.\n fy_x = d_x * y + 2.0 * p2 * y + 2.0 * p1 * x\n fy_y = d + d_y * y + 2.0 * p2 * x + 6.0 * p1 * y\n\n return fx, fy, fx_x, fx_y, fy_x, fy_y", "title": "" }, { "docid": "c5be50a6001766ed84714502df4be676", "score": "0.5420093", "text": "def find_ellipsoid_parameters(A, b, c, verbose=False): \n # Determine the center\n # center = solution of A*x + b/2 = 0 -> -A*x = b/2\n center = np.linalg.lstsq(-1*A, 0.5*b)[0]\n if verbose:\n print('Ellipsoid center: {}'.format(center))\n \n # Determine the radii of the axes\n Amat = ellipsoid_triple_to_algebraic(A, b, c) # put ellipsoid parameters into 4x4 algebraic form\n # Transformation to center\n T = np.eye(4) # 4x4 unity matrix\n T[3,0:3] = center\n R = T @ Amat @ T.T # transform to center\n # Determine eigenvalues\n evals, evecs = eig(R[0:3,0:3] / -R[3,3])\n radii = np.power(abs(evals), -0.5) # radii = 1 / sqrt( abs(evals) )\n if verbose:\n print('Ellipsoid radii: {}'.format(radii))\n \n # Determine the rotation matrix\n rotmat = inv(evecs)\n \n return center, radii, rotmat", "title": "" }, { "docid": "1db84dac016fe161b31e8c253c58a848", "score": "0.541392", "text": "def PrimalResidual(self, epsi, lamda, y, z, s, gvec, dpsidx, xsi, eta, mu, zet):\n \n rex = dpsidx - xsi + eta # d/dx\n rey = self.c + self.d*y - mu - lamda # d/dy\n rez = self.a0 - zet - np.inner(self.a, lamda) # d/dz\n relam = gvec - self.a*z - y + s - self.b # d/dlam\n rexsi = xsi * (self.xact - self.alfa) - epsi # d/dxsi\n reeta = eta * (self.beta - self.xact) - epsi # d/deta\n remu = mu * y - epsi # d/dmu\n rezet = zet*z - epsi # d/dzeta\n res = lamda * s - epsi # d/ds\n \n residu1 = np.concatenate([rex, rey, [rez]])\n residu2 = np.concatenate([relam, rexsi, reeta, remu, [rezet], res])\n residual = np.concatenate([residu1, residu2])\n residunorm = np.linalg.norm(residual)\n residumax = np.max(np.abs(residual))\n \n return residual, residunorm, residumax", "title": "" }, { "docid": "8e5e92ff502fad58c45fa8a1a1f78042", "score": "0.5400336", "text": "def compute_warped_residual(pose, invD0, invD1, x0, x1, px, py, K, obj_mask=None):\n u_warped, v_warped, inv_z_warped = geometry.batch_warp_inverse_depth(\n px, py, invD0, pose, K)\n x1_1to0 = geometry.warp_features(x1, u_warped, v_warped)\n occ = geometry.check_occ(inv_z_warped, invD1, u_warped, v_warped)\n\n residuals = x1_1to0 - x0 # equation (12)\n\n B, C, H, W = x0.shape\n if obj_mask is not None:\n # determine whether the object is in-view\n occ = occ & (obj_mask.view(B,1,H,W) < 1)\n residuals[occ.expand(B,C,H,W)] = 1e-3\n\n return residuals, occ", "title": "" }, { "docid": "e76832d12d4247cff3a12414c5e81441", "score": "0.5388933", "text": "def residual(ctx):\n Nspace = ctx.atmos.Nspace\n atoms = ctx.activeAtoms\n resid = [np.zeros((atom.Nlevel, Nspace)) for atom in atoms]\n\n for i, atom in enumerate(atoms):\n for k in range(Nspace):\n iEliminate = -1\n Gamma = np.copy(atom.Gamma[:, :, k])\n\n Gamma[iEliminate, :] = 1.0\n\n f = np.zeros(atom.Nlevel)\n f[iEliminate] = atom.nTotal[k]\n\n resid[i][:, k] = f - Gamma @ atom.n[:, k]\n\n return resid", "title": "" }, { "docid": "e140013e4f9b38462e340bbcfbef16f3", "score": "0.5381208", "text": "def DualResidual(self, hvec, eta, lamda, epsvecm):\n \n reslam = hvec + eta\n reseta = eta * lamda - epsvecm\n res = np.concatenate([reslam.ravel(), reseta.ravel()])\n norm2 = np.linalg.norm(res, 2)\n norminf = np.abs(res).max()\n \n return res, norm2, norminf", "title": "" }, { "docid": "154652882c51c3f8a73a530ea4565249", "score": "0.5380959", "text": "def get_residual(clear_image, blurry_image):\n\n # Convert blurry_image and clear_image into 2 dimensional arrays -- from (x,x,1) to (x,x,)\n blurry_image = blurry_image.reshape(blurry_image.shape[0], blurry_image.shape[1])\n clear_image = clear_image.reshape(clear_image.shape[0], clear_image.shape[1])\n\n # Throw away the SSIM score and keep the residual between the two images\n (_, residual) = structural_similarity(blurry_image, clear_image, full=True)\n\n return residual", "title": "" }, { "docid": "f76d20facca4febca954820c4cfebe20", "score": "0.53750545", "text": "def residuals_all_exp(p, y0_dict, c_dict, datasets_dict):\r\n\r\n #Code for chemical balancing \r\n\r\n #Order: C,H,O,N\r\n gluc = np.array([6.0,12.0,6.0,0.0])\r\n O2 = np.array([0.0, 0.0, 2.0, 0.0])\r\n NH3 = np.array([0.0,3.0,0.0,1.0])\r\n biomass = np.array([1.0,p[\"HX\"].value, p[\"OX\"].value, p[\"NX\"].value])\r\n CO2 = np.array([1.0,0.0,2.0,0.0])\r\n H2O = np.array([0.0,2.0,1.0,0.0])\r\n etoh = np.array([2.0,6.0,1.0,0.0])\r\n glyc = np.array([3.0,8.0,3.0,0.0])\r\n\r\n \r\n\r\n\r\n MW_element_dict = {\"C\": 12.011, \"H\": 1.0079, \"O\": 15.999, \"N\": 14.007} #molar masses\r\n molecule = {\"gluc\": gluc, \"O2\": O2, \"NH3\" : NH3, \"biomass\": biomass, \"CO2\" : CO2, \"H2O\": H2O, \"etoh\": etoh, \"glyc\": glyc}\r\n\r\n MW = {} #creating dict with masses of molecules\r\n for key, mol in molecule.items():\r\n molecule_MW_array = ([])\r\n for vectorvalue, weight in zip (mol, MW_element_dict.values()):\r\n vw = vectorvalue*weight\r\n molecule_MW_array= np.append(molecule_MW_array, vw)\r\n MW[key] = sum(molecule_MW_array)\r\n\r\n NX1 = p[\"NX\"].value\r\n GE = (p[\"g_e\"]/MW[\"glyc\"]) * MW[\"etoh\"] #from mass ratio(p[\"g_e\"]) to molar ratio. Glycerol per ethanol\r\n\r\n #1. oxidative glucose consumption: gluc+ a*O2 + b*NX*NH3 = b*biomass + c*CO2 + d*H2O \r\n a,b,c,d, NX = symbols(\"a b c d NX\")\r\n Yxs_ox = p[\"Yxs_ox\"].value\r\n b1 = Yxs_ox* MW[\"gluc\"]/MW[\"biomass\"] #calculate stoichiometric coefficient\r\n\r\n eqOx_list = []\r\n for num in range(3):\r\n eqOx = Eq(gluc[num]+ a*O2[num]+ b*NX*NH3[num], b*biomass[num]+ c*CO2[num]+ d*H2O[num])\r\n eqOx = eqOx.subs({b: b1, NX: NX1})\r\n eqOx_list.append(eqOx)\r\n \r\n solution_Ox = sp.solve(eqOx_list, (a, c, d), dict= True)\r\n a1, c1, d1 = np.float(solution_Ox[0][a]), np.float(solution_Ox[0][c]), np.float(solution_Ox[0][d])\r\n YCO2x_ox = c1/b1 * MW[\"CO2\"]/MW[\"biomass\"]\r\n YCO2s_ox = c1/1 * MW[\"CO2\"]/MW[\"gluc\"]\r\n YO2s_ox = a1/1 * MW[\"O2\"]/MW[\"gluc\"]\r\n\r\n #Yield coefficients showing up later in fit results. Not actual changeable parameters. Vary have to be False\r\n p.add(\"YCO2x_ox\", value=YCO2x_ox, vary=False)\r\n p.add(\"YCO2s_ox\", value=YCO2s_ox, vary=False)\r\n p.add(\"YO2s_ox\", value=YO2s_ox, vary=False)\r\n \r\n # stop\r\n\r\n #2. reductive glucose consumption: gluc+ g*NX*NH3 = g*biomass + h*CO2 + i*H2O + j*etOH + GE*j*glyc\r\n g,h,i,j, NX = symbols(\"g h i j NX\")\r\n Yxs_red = p[\"Yxs_red\"].value\r\n g1 = Yxs_red* MW[\"gluc\"]/MW[\"biomass\"]\r\n\r\n eqRed_list = []\r\n for num in range(3):\r\n eqRed = Eq(gluc[num]+ g*NX*NH3[num] , g*biomass[num]+ h*CO2[num]+ i*H2O[num]+ j*etoh[num]+ GE*j*glyc[num] )\r\n eqRed = eqRed.subs({g: g1, NX: NX1})\r\n eqRed_list.append(eqRed)\r\n \r\n solution_Red = sp.solve(eqRed_list, (h, i, j), dict= True)\r\n h1,i1,j1 = np.float(solution_Red[0][h]), np.float(solution_Red[0][i]), np.float(solution_Red[0][j])\r\n\r\n Yes_red = j1/1 * MW[\"etoh\"]/MW[\"gluc\"]\r\n Ygs_red = GE*j1/1 * MW[\"glyc\"]/MW[\"gluc\"] \r\n YCO2x_red = h1/g1 * MW[\"CO2\"]/MW[\"biomass\"]\r\n YCO2s_red = h1/1 * MW[\"CO2\"]/MW[\"gluc\"]\r\n\r\n p.add(\"Yes_red\", value=Yes_red, vary=False)\r\n p.add(\"Ygs_red\", value=Ygs_red, vary=False)\r\n p.add(\"YCO2x_red\", value=YCO2x_red, vary=False)\r\n p.add(\"YCO2s_red\", value=YCO2s_red, vary=False)\r\n\r\n\r\n #3. oxidative ethanol consumption: etoh + k*O2 + l*NX*NH3 = l*biomass + m*CO2 + n*H2O\r\n k,l,m,n, NX = symbols(\"k l m n NX\")\r\n Yxe_et = p[\"Yxe_et\"].value \r\n l1 = Yxe_et* MW[\"etoh\"]/MW[\"biomass\"]\r\n\r\n eqEt_list = []\r\n for num in range(3):\r\n eqEt = Eq(etoh[num]+ k*O2[num]+ l*NX*NH3[num], + l*biomass[num]+ m*CO2[num]+ n*H2O[num])\r\n eqEt = eqEt.subs({l: l1, NX: NX1})\r\n eqEt_list.append(eqEt)\r\n \r\n solution_Et = sp.solve(eqEt_list, (k, m, n), dict= True)\r\n k1, m1, n1 = np.float(solution_Et[0][k]), np.float(solution_Et[0][m]), np.float(solution_Et[0][n])\r\n\r\n YCO2x_et = m1/l1 * MW[\"CO2\"]/MW[\"biomass\"]\r\n YCO2e_et = m1/1 * MW[\"CO2\"]/MW[\"etoh\"]\r\n YO2e_et = k1 * MW[\"O2\"]/MW[\"etoh\"]\r\n\r\n p.add(\"YCO2x_et\", value=YCO2x_et, vary=False)\r\n p.add(\"YCO2e_et\", value=YCO2e_et, vary=False)\r\n p.add(\"YO2e_et\", value=YO2e_et, vary=False)\r\n\r\n\r\n\r\n #4. oxidative glycerol consumption: glyc+ u*O2 + v*NX*NH3 = v*biomass + w*CO2 + x*H2O\r\n u,v,w,x,NX = symbols(\"u v w x NX\")\r\n Yxg_glyc = p[\"Yxg_glyc\"].value\r\n v1 = Yxg_glyc * MW[\"glyc\"]/MW[\"biomass\"]\r\n\r\n eqGlyc_list = []\r\n for num in range(3):\r\n eqGlyc = Eq(glyc[num]+ u*O2[num]+ v*NX*NH3[num] , v*biomass[num]+ w*CO2[num]+ x*H2O[num])\r\n eqGlyc = eqGlyc.subs({v: v1, NX: NX1})\r\n eqGlyc_list.append(eqGlyc)\r\n \r\n solution_glyc = sp.solve(eqGlyc_list, (u, w, x), dict= True)\r\n u1, w1, x1 = np.float(solution_glyc[0][u]), np.float(solution_glyc[0][w]), np.float(solution_glyc[0][x])\r\n\r\n YCO2x_glyc = w1/v1 * MW[\"CO2\"]/MW[\"biomass\"]\r\n YCO2g_glyc = w1/1 * MW[\"CO2\"]/MW[\"glyc\"]\r\n YO2g_glyc = u1/1 * MW[\"O2\"]/MW[\"etoh\"]\r\n \r\n p.add(\"YCO2x_glyc\", value=YCO2x_glyc, vary=False)\r\n p.add(\"YCO2g_glyc\", value=YCO2g_glyc, vary=False)\r\n p.add(\"YO2g_glyc\", value=YO2g_glyc, vary=False)\r\n\r\n #5. maintenance metabolism : gluc + 6*O2 = 6*CO2 + 6*H2O\r\n\r\n YCO2s_m = 6 * MW[\"CO2\"]/MW[\"gluc\"]\r\n p.add(\"YCO2s_m\", value=YCO2s_m, vary=False)\r\n \r\n exp_names = y0_dict.keys() # experiment names\r\n\r\n res_all= [] # empty (list which will be an array), will contain residuals\r\n\r\n for exp in exp_names: # loop over experiments\r\n y0 = y0_dict[exp]\r\n c = c_dict[exp] \r\n datasets = datasets_dict[exp]\r\n\r\n res_this_exp = residuals_single_exp(p, c, y0, datasets)\r\n res_all = np.append(res_all, res_this_exp)\r\n\r\n return res_all", "title": "" }, { "docid": "cb96d19a05c4839c2aae9e47741b3af3", "score": "0.5368057", "text": "def residuals(self, pars, lnum=None, nonzeros_num=None, nonzeros_den=None, eps=None):\n if lnum is None:\n lnum = len(self.ml)\n if nonzeros_num is None:\n raise ValueError(\"nonzeros_num has to be provided.\")\n if nonzeros_den is None:\n raise ValueError(\"nonzeros_den has to be provided.\")\n if eps is None:\n raise ValueError(\"eps has to be provided.\")\n self.ml = list(nonzeros_num)\n self.nl = list(nonzeros_den)\n # Instead of using polynomials we save the coefficients\n ps = np.zeros(self._m+1)\n qs = np.zeros(self._n+1)\n # Optimize the coefficients using the least-squares non-linear solver\n ps[nonzeros_num] = pars[:lnum]\n qs[nonzeros_den] = pars[lnum:]\n self._ps = ps\n self._qs = qs\n self.update_approximant()\n vmodel = self._evaluate(self._x)\n return (self._y - vmodel)/eps", "title": "" }, { "docid": "cb96d19a05c4839c2aae9e47741b3af3", "score": "0.5368057", "text": "def residuals(self, pars, lnum=None, nonzeros_num=None, nonzeros_den=None, eps=None):\n if lnum is None:\n lnum = len(self.ml)\n if nonzeros_num is None:\n raise ValueError(\"nonzeros_num has to be provided.\")\n if nonzeros_den is None:\n raise ValueError(\"nonzeros_den has to be provided.\")\n if eps is None:\n raise ValueError(\"eps has to be provided.\")\n self.ml = list(nonzeros_num)\n self.nl = list(nonzeros_den)\n # Instead of using polynomials we save the coefficients\n ps = np.zeros(self._m+1)\n qs = np.zeros(self._n+1)\n # Optimize the coefficients using the least-squares non-linear solver\n ps[nonzeros_num] = pars[:lnum]\n qs[nonzeros_den] = pars[lnum:]\n self._ps = ps\n self._qs = qs\n self.update_approximant()\n vmodel = self._evaluate(self._x)\n return (self._y - vmodel)/eps", "title": "" }, { "docid": "4ba13094ec44993f53338b12db9e82c1", "score": "0.5365172", "text": "def residual(self):\n return self.bounds.residual(self.value)", "title": "" }, { "docid": "42e0bf7e268c90864e3dde7d91f239d0", "score": "0.536172", "text": "def _add_residuals(self):\n\n self._residuals = self.expected_counts - self._signal.data.counts\n self._residuals /= _np.sqrt(self.expected_counts)\n\n vmax = _np.max( _np.abs( self._residuals ) )\n\n resid = self._ax_resid.pcolormesh(self._signal.data.phases,\n self._signal.data.channels,\n self._residuals,\n cmap = cm.get_cmap(self._residual_cmap),\n vmin = -vmax,\n vmax = vmax,\n linewidth = 0,\n rasterized = self._rasterized)\n resid.set_edgecolor('face')\n\n resid = self._ax_resid.pcolormesh(self._signal.data.phases + 1.0,\n self._signal.data.channels,\n _np.abs(self._residuals),\n cmap = cm.get_cmap(self._residual_cmap),\n vmin = -vmax,\n vmax = vmax,\n linewidth = 0,\n rasterized = self._rasterized)\n resid.set_edgecolor('face')\n\n self._ax_resid.axvline(1.0, lw=self._tick_width, color='k')\n\n self._ax_resid.set_ylim([self._signal.data.channels[0],\n self._signal.data.channels[-1]])\n self._ax_resid.set_yscale('log')\n\n self._resid_cb = plt.colorbar(resid, cax = self._ax_resid_cb,\n ticks=AutoLocator())\n self._resid_cb.ax.set_frame_on(True)\n self._resid_cb.ax.yaxis.set_minor_locator(AutoMinorLocator())\n\n self._resid_cb.set_label(label=r'$(c_{ik}-d_{ik})/\\sqrt{c_{ik}}$',\n labelpad=15)", "title": "" }, { "docid": "35ba8ca95f987a09a018eac42ac0a5b1", "score": "0.53565997", "text": "def test_ellipsoid_sphere():\n\n scale = 5.\n for n in range(1, NMAX+1):\n ctr = 2.0 * scale * np.ones(n) # arbitrary non-zero center\n a = 1.0 / scale**2 * np.identity(n)\n ell = nestle.Ellipsoid(ctr, a)\n\n assert_allclose(ell.vol, nestle.vol_prefactor(n) * scale**n)\n assert_allclose(ell.axlens, scale * np.ones(n))\n assert_allclose(ell.axes, scale * np.identity(n))", "title": "" }, { "docid": "1330111eb5f4255ef448cd13afa8c60f", "score": "0.53402066", "text": "def showResiduals(self): \r\n xyz, uv = self._gcp.getGCPs() #Get GCPs\r\n dem = self.getDEM() #Get DEM\r\n\r\n #Set inverse projection parameters\r\n invprojvars = setProjection(dem, self._camloc, self._camDirection, \r\n self._radCorr, self._tanCorr, self._focLen, \r\n self._camCen, self._refImage)\r\n \r\n #Compute residuals\r\n computeResidualsXYZ(invprojvars, xyz, uv, dem)", "title": "" }, { "docid": "9356f27a92e9002bf2c9f2783a7c6f9b", "score": "0.5334807", "text": "def residual_of(self, z):\n return z - dot(self.H, self.x)", "title": "" }, { "docid": "510b4435fd9d8bffc16ec9694da10427", "score": "0.53333867", "text": "def _residual(self, x):\n h = x\n h = self.activation(h)\n h = self.c1(h)\n h = self.activation(h)\n h = self.c2(h)\n if self.downsample:\n h = F.avg_pool2d(h, 2)\n\n return h", "title": "" }, { "docid": "76d3f8fac3012437b09ec59b6d8abc05", "score": "0.5330859", "text": "def sphere_fit_rmse(points, center, radius):\n distances_to_center = np.sqrt( np.sum((points - center)**2, axis=1) )\n residuals = distances_to_center - radius\n residual_sum_squares = np.sum( np.power(residuals, 2) )\n rmse = np.sqrt(residual_sum_squares / len(residuals))\n return rmse", "title": "" }, { "docid": "a39fc97a6bc5398a5ef8fb673b149a50", "score": "0.53258675", "text": "def _reorthogonalized_residual(\n self,\n solver_state: \"probnum.linalg.solvers.LinearSolverState\",\n ) -> Tuple[np.ndarray, np.ndarray]:\n residual = self._reorthogonalization_fn_residual(\n v=solver_state.residual,\n orthogonal_basis=np.asarray(\n solver_state.cache[\"reorthogonalized_residuals\"]\n ),\n inner_product=None,\n )\n solver_state.cache[\"reorthogonalized_residuals\"].append(residual)\n prev_residual = solver_state.cache[\"reorthogonalized_residuals\"][\n solver_state.step - 1\n ]\n return residual, prev_residual", "title": "" }, { "docid": "0695554b26ed4f232aef9ecba16b22d0", "score": "0.53218126", "text": "def fit_ellipsoid_DR_SVD(x, n_iter=1000):\n # Find SVD of x and change coordinates\n x_mean = x.mean(axis=0)\n x_centered = x - x_mean # Center points around origin\n x_eval, x_evec = eig(x_centered.T @ x_centered) # Singular value decomposition\n \n P = np.diagflat(np.power(x_eval, -0.5)) @ x_evec.T # Transformation matrix for normalization\n \n x_norm = P @ x_centered.T # normalize x\n \n # Assemble matrix D\n D = np.vstack( (x_norm**2, \n sqrt(2)*x_norm[0,:]* x_norm[1,:],\n sqrt(2)*x_norm[0,:]* x_norm[2,:],\n sqrt(2)*x_norm[1,:]* x_norm[2,:],\n x_norm,\n np.ones_like(x_norm[0,:]) ) )\n \n K = D @ D.T\n \n # The objective is now to solve min <q,Kq>, Tr(Q)=1, Q>=0\n\n c = x_norm.mean(axis=1) # center after normalization\n \n r2 = x_norm.var(axis=1).sum()\n \n u = 1/3 * np.hstack( (1, 1, 1,\n 0, 0, 0,\n -2*c,\n (c**2).sum()-r2 ) )\n \n # And now go to the Douglas-Rachford (Lions-Mercier) iterative algorithm\n gamma = 10; # parameter gamma\n \n M = gamma*K + np.eye(K.shape[0]) \n p = u\n CF = np.zeros(n_iter+1)\n \n # Iterative solution\n for k in range(n_iter):\n q = project_on_B(p)\n CF[k] = 0.5* q @ K @ q.T\n \n (solution, res, rank, sing) = np.linalg.lstsq(M, 2*q-p, rcond=None) # np.linalg.lstsq corresponds to Matlab's mldivide (\\)\n p += solution - q\n \n q = project_on_B(q)\n CF[-1] = 0.5* q @ K @ q.T\n \n A2 = np.array([[q[0], q[3]/sqrt(2), q[4]/sqrt(2)],\n [q[3]/sqrt(2), q[1], q[5]/sqrt(2)],\n [q[4]/sqrt(2), q[5]/sqrt(2), q[2]]])\n \n b2 = q[6:9]\n c2 = q[9] \n \n # Go back to initial basis\n\n A = P.T @ A2 @ P\n b = -2* A @ x_mean.T + P.T @ b2.T\n c = (A2 @ P @ x_mean.T).T @ (P @ x_mean.T) - b2.T @ P @ x_mean.T + c2\n \n q = np.hstack( (np.diag(A),\n sqrt(2)*A[1,0], sqrt(2)*A[2,0], sqrt(2)*A[2,1],\n b, c) )\n \n q = q / np.sum(np.diag(A)) # normalization to stay on the simplex\n \n return A, b, c, q, CF", "title": "" }, { "docid": "b44aa9177f589135a70df469c3633e5a", "score": "0.5315361", "text": "def solve(self) :\n\n self.compute_scattering_source(self.flux_moments)\n flux_moments = self.sweep('red')\n self.update_flux_moments(flux_moments)\n self.compute_scattering_source(self.flux_moments)\n flux_moments = self.sweep('black')\n self.update_flux_moments(flux_moments)\n\n return self.flux_moments", "title": "" }, { "docid": "53ed7f1a857b571878f9d0defc7be8bc", "score": "0.5296649", "text": "def get_gene_residuals(guide_residuals, guide_mapping):\n check_gene_inputs(guide_residuals, guide_mapping)\n mapping_construct_col = guide_mapping.columns[0]\n mapping_gene_col = guide_mapping.columns[1]\n residual_construct_col = guide_residuals.columns[0]\n mapped_guide_residuals = merge_residual_mapping(guide_residuals, guide_mapping, residual_construct_col,\n mapping_construct_col)\n gene_residuals = aggregate_guide_residuals(mapped_guide_residuals, mapping_gene_col, residual_construct_col)\n return gene_residuals", "title": "" }, { "docid": "f448177d90977495f783b9ca6553236d", "score": "0.52941215", "text": "def ellipsoid_se(shp, mode='outer', r_in=None):\n if r_in is None: r_in = [0, 0, 0]\n\n shp = np.array(shp)\n\n if mode == 'center':\n r = [0, 0, 0]\n elif mode == 'outer':\n r = [0.5, 0.5, 0.5]\n elif mode == 'inner':\n r = [-0.5, -0.5, -0.5]\n elif mode == 'manual':\n r = r_in\n\n radii = (shp.astype(np.float) - 1) / 2\n\n def x(c):\n return c - radii[0]\n\n def y(c):\n return c - radii[1]\n\n def z(c):\n return c - radii[2]\n\n se = np.zeros(shp)\n\n for xi in xrange(0, int(shp[0])):\n for yi in xrange(0, int(shp[1])):\n for zi in xrange(0, int(shp[2])):\n\n se[xi, yi, zi] = (x(xi) / (radii[0] + r[0])) ** 2 \\\n + (y(yi) / (radii[1] + r[1])) ** 2 \\\n + (z(zi) / (radii[2] + r[2])) ** 2\n\n return se <= 1", "title": "" }, { "docid": "c8ea8f148c765d4b34b9e48f2bb7dc80", "score": "0.5283136", "text": "def residuals(self, x):\n return g(x, self._a, self._c)", "title": "" }, { "docid": "51eee71dd552930f708e266e1469e0fe", "score": "0.5281299", "text": "def compute_pseudo_residual(y, fm):\n ### BEGIN SOLUTION\n \n ### END SOLUTION\n return res", "title": "" }, { "docid": "b6d3619365ace43a3fd1aa18a6bf1bc1", "score": "0.52787554", "text": "def einstein_radius(self) -> u.Quantity:", "title": "" }, { "docid": "c14f824ba4caf362e4d95a03a133dddb", "score": "0.52656585", "text": "def residual(self, x, coefs, matching): \n # construct the polynomial approximations of mu(x) and theta(x)\n theta = self.polynomial(coefs[0])\n theta_x = theta.deriv()\n \n mu = self.polynomial(coefs[1])\n mu_x = mu.deriv()\n \n # compute the residual polynomial\n res_mu = (mu_x(x) - \n self.model.mu_prime(x, [theta(x), mu(x)], matching))\n res_theta = (theta_x(x) - \n self.model.theta_prime(x, [theta(x), mu(x)], matching))\n \n residual = np.hstack((res_theta, res_mu)) \n \n return residual", "title": "" }, { "docid": "3a5944569ada89a1c7ba6e0cc4c3b5b5", "score": "0.5262013", "text": "def calc_residuals(self):\n if not len(self.phases): # no phases, cant calc rms\n return None\n\n if not self.tts:\n self.fast_march(get_rays=False)\n # get index for each station\n sinds = [tuple(self.stations[x]) for x in self.inkey]\n # get travel time to each station for each event, stuff into df add SID\n stimes = [x[sinds] for key, x in self.tts.items()]\n sti_df = pd.DataFrame(stimes, columns=self.stations.ID, index=['pTT'])\n sti_df = sti_df[self.phases.SID].T\n sti_df['SID'] = sti_df.index\n # merge into current phases\n self.phases = self.phases.merge(sti_df)\n # calc residuals and set _rms attr\n self.phases['resid'] = self.phases.TT - self.phases.pTT\n self._rms = norm(self.phases.resid) / np.sqrt(len(self.phases))", "title": "" }, { "docid": "cd7e540a088e55a0a6ce13894d590b3b", "score": "0.52577347", "text": "def classical_residuals(params, x, data):\n N0 = params['N0'].value\n Nmax = params['Nmax'].value\n Rmax = params['Rmax'].value\n \n model = (N0*Nmax*(e**(Rmax*x)))/(\n Nmax+N0*(e**(Rmax*x)-1))\n\n return model - data", "title": "" }, { "docid": "95f9f3f3f50487c4cc9d720953fb16d3", "score": "0.5256888", "text": "def compute_losses(self, inputs, outputs):\n losses = {}\n losses[\"totLoss\"] = 0\n\n source_scale = 0\n target = inputs[(\"color\", 0, source_scale)]\n if self.opt.selfocclu:\n sourceSSIMMask = self.selfOccluMask(outputs[('real_scale_disp', source_scale)], inputs['stereo_T'][:, 0, 3])\n else:\n sourceSSIMMask = torch.zeros_like(outputs[('real_scale_disp', source_scale)])\n outputs['ssimMask'] = sourceSSIMMask\n\n # compute depth hint reprojection loss\n if self.opt.read_stereo:\n pred = outputs[(\"color_depth_hint\", 's', 0)]\n depth_hint_reproj_loss = self.compute_reprojection_loss(pred, inputs[(\"color\", 0, 0)])\n depth_hint_reproj_loss += 1000 * (1 - inputs['depth_hint_mask'])\n else:\n depth_hint_reproj_loss = None\n\n for scale in self.opt.scales:\n reprojection_loss = self.compute_reprojection_loss(outputs[(\"color\", 's', scale)], target)\n identity_reprojection_loss = self.compute_reprojection_loss(inputs[(\"color\", 's', source_scale)], target) + torch.randn(reprojection_loss.shape).cuda() * 0.00001\n combined = torch.cat((reprojection_loss, identity_reprojection_loss, depth_hint_reproj_loss), dim=1)\n to_optimise, idxs = torch.min(combined, dim=1, keepdim=True)\n\n reprojection_loss_mask = (idxs != 1).float() * (1 - outputs['ssimMask'])\n depth_hint_loss_mask = (idxs == 2).float()\n\n\n losses[\"loss_depth/{}\".format(scale)] = (reprojection_loss * reprojection_loss_mask).sum() / (reprojection_loss_mask.sum() +1e-7)\n losses[\"totLoss\"] += losses[\"loss_depth/{}\".format(scale)] / self.num_scales\n # proxy supervision loss\n if self.opt.read_stereo:\n valid_pixels = inputs['depth_hint_mask']\n\n depth_hint_loss = self.compute_proxy_supervised_loss(outputs[('depth', 0, scale)], inputs['depth_hint'], valid_pixels,\n depth_hint_loss_mask)\n depth_hint_loss = depth_hint_loss.sum() / (depth_hint_loss_mask.sum() + 1e-7)\n losses['depth_hint_loss/{}'.format(scale)] = depth_hint_loss\n losses[\"totLoss\"] += depth_hint_loss / self.num_scales * self.opt.depth_hint_param\n\n if self.opt.disparity_smoothness > 0:\n mult_disp = outputs[('disp', scale)]\n mean_disp = mult_disp.mean(2, True).mean(3, True)\n norm_disp = mult_disp / (mean_disp + 1e-7)\n losses[\"loss_smooth\"] = get_smooth_loss(norm_disp, target) / (2 ** scale)\n losses[\"totLoss\"] += self.opt.disparity_smoothness * losses[\"loss_smooth\"] / self.num_scales\n\n return losses", "title": "" }, { "docid": "b563f472199df39fb370cd8d0cd38c59", "score": "0.52437514", "text": "def compute_curved_earth_correction(wgs84_lat1, wgs84_long1, wgs84_lat2, wgs84_long2, latitudes, longitudes):\n half_central_angle = geometry.half_central_angle(math.radians(wgs84_lat1), math.radians(wgs84_long1),\n math.radians(wgs84_lat2), math.radians(wgs84_long2))\n max_overhead = geometry.overhead_height(half_central_angle, geometry.EARTH_RADIUS)\n angles = geometry.central_angle(np.deg2rad(wgs84_lat1), np.deg2rad(wgs84_long1), np.deg2rad(latitudes),\n np.deg2rad(longitudes))\n return max_overhead - geometry.overhead_height(half_central_angle - angles, geometry.EARTH_RADIUS)", "title": "" }, { "docid": "7f80f394c29d5df150099e15ccb3d64f", "score": "0.52408254", "text": "def exp_residual(p, y, x):\n\terr = y - exp_fit(p,x)\n\treturn err", "title": "" }, { "docid": "6f5ec9822361b73b0072569a4773d026", "score": "0.5233424", "text": "def lat_lon_from_ECEF(x,y,z,e_oplus,R_oplus,tol=0.001):\n r_delta_sat = np.sqrt(x**2. + y**2.)\n delta = np.arctan2(z,r_delta_sat)\n\n\n alpha = np.arctan2(z,r_delta_sat)#ok\n lam = alpha\n phi_gd = delta\n r_delta = r_delta_sat\n\n phi_gd_old = phi_gd\n for i in np.arange(1000):\n c_oplus = R_oplus/np.sqrt(1.-e_oplus**2.*np.sin(phi_gd_old)**2.)\n phi_gd = np.arctan2(z+c_oplus*e_oplus**2.*np.sin(phi_gd_old),r_delta)\n if np.abs(phi_gd-phi_gd_old) < tol:\n break\n phi_gd_old = phi_gd\n \n #Compute h_ellp\n if phi_gd < 0.99*np.pi/2. and phi_gd > -0.99*np.pi/2.:\n h_ellp = r_delta/np.cos(phi_gd) - c_oplus\n else:\n s_oplus = (R_oplus*(1.-e_oplus**2.))/np.sqrt(1.-e_oplus**2.*np.sin(phi_gd)**2.)\n h_ellp = z/np.sin(phi_gd)-s_oplus\n\n return (phi_gd, lam, h_ellp)", "title": "" }, { "docid": "5cc834e326f31ed9915748d3015d4822", "score": "0.52301604", "text": "def find_disparity_scale(self, feat_list_left, feat_list_right, intrinsic, aug_size):\n feat_l = merge_lists(feat_list_left)\n feat_r = merge_lists(feat_list_right)\n\n # outputs\n sceneflows = []\n disps = []\n\n # bidirect\n for l, (f_l, f_r) in enumerate(zip(feat_l, feat_r)):\n\n xl, xr = torch.cat([f_l, f_r], dim=0), torch.cat([f_r, f_l], dim=0)\n xl_f, xr_f = torch.flip(xl, [-1]), torch.flip(xr, [-1])\n\n # warping\n if l == 0:\n xr_f_warp = xr_f\n xr_warp = xr\n else:\n sf_f = interpolate2d_as(sf_f, xl, mode=\"bilinear\")\n sf_b = interpolate2d_as(sf_b, xl, mode=\"bilinear\")\n dp_1 = interpolate2d_as(dp_1, xl, mode=\"bilinear\")\n dp_1_flip = torch.flip(dp_1, [3])\n x_out_f = interpolate2d_as(self.upconv_layers[l-1](x_out_f), xl, mode=\"bilinear\")\n x_out_b = interpolate2d_as(self.upconv_layers[l-1](x_out_b), xl, mode=\"bilinear\")\n \n xr_warp = self.warping_layer_sf(xr, sf_f, dp_1, intrinsic, aug_size) # becuase K can be changing when doing augmentation\n xr_f_warp = self.warping_layer_sf(xr_f, sf_b, dp_1_flip, intrinsic, aug_size)\n\n # correlation\n xl, xr_warp, xl_f, xr_f_warp = normalize_features([xl, xr_warp, xl_f, xr_f_warp])\n out_corr_f = self.leakyRELU(Correlation.apply(xl, xr_warp, self.corr_params))\n out_corr_b = self.leakyRELU(Correlation.apply(xl_f, xr_f_warp, self.corr_params))\n\n # monosf estimator\n if l == 0:\n x_out_f, sf_f, dp_f = self.flow_estimators[l](torch.cat([xl, out_corr_f, torch.flip(out_corr_b, [3])], dim=1))\n x_out_b, sf_b, dp_b = self.flow_estimators[l](torch.cat([xl_f, out_corr_b, torch.flip(out_corr_f, [3])], dim=1))\n else: \n x_out_f, sf_f_res, dp_f = self.flow_estimators[l](torch.cat([xl, x_out_f, out_corr_f, torch.flip(out_corr_b, [3]), sf_f, flow_horizontal_flip(sf_b), dp_1], dim=1))\n x_out_b, sf_b_res, dp_b = self.flow_estimators[l](torch.cat([xl_f, x_out_b, out_corr_b, torch.flip(out_corr_f, [3]), sf_b, flow_horizontal_flip(sf_f), dp_1_flip], dim=1))\n sf_f = sf_f + sf_f_res\n sf_b = sf_b + sf_b_res\n\n dp_1 = (dp_f + torch.flip(dp_b, [3])) / 2.0\n dp_1 = nn.LayerNorm(dp_1.size()[1:], elementwise_affine=False)(dp_1)\n dp_1 = self.sigmoid(dp_1) * 0.3\n sceneflows.append(sf_f)\n disps.append(dp_1)\n\n if l == self.output_level: \n break\n\n return upsample_outputs_as(sceneflows[::-1], feat_l[::-1]), upsample_outputs_as(disps[::-1], feat_l[::-1])", "title": "" }, { "docid": "b35d44f6e864de94e38074fda124f596", "score": "0.5226722", "text": "def test_bounding_ellipsoid():\n\n npoints = 100\n\n print(\"\\ntest_bounding_ellipsoid\")\n\n for n in range(1, NMAX+1):\n ell_gen = random_ellipsoid(n) # random elipsoid\n x = ell_gen.samples(npoints) # points within it\n ell = nestle.bounding_ellipsoid(x)\n for xi in x:\n assert ell.contains(xi)\n\n print(\"n={}: true_vol={} vol={}\".format(n, ell_gen.vol, ell.vol))", "title": "" }, { "docid": "0482eed8ae13367fb9074d9c6f888415", "score": "0.52139044", "text": "def get_residuals(self, check=True):\n if not self.fit_is_valid and check:\n return None\n return self.data.y - self.model.evaluate(self.data.x)", "title": "" }, { "docid": "6692953c0dec32e8c0b94662fae8ef93", "score": "0.52064455", "text": "def refrac_residual(p, y, lamb):\n\t\n\terr = y - refrac_fit(p, lamb)\n\treturn err", "title": "" }, { "docid": "619b8a3bc02c16c1d811bc647c3e60a7", "score": "0.5201049", "text": "def get_condition_residuals(condition_x, condition_y, lfc_df, folds, degrees):\n x_data = lfc_df[condition_x] # series type\n y_data = lfc_df[condition_y]\n model_df = pd.DataFrame({'x': x_data, 'y':y_data}) # columns with x and y values with 'x' and 'y' headers\n optimal_degree = find_optimal_degree(model_df, degrees, folds, 'x', 'y')\n model_fit = fit_natural_cubic_spline(model_df, optimal_degree, 'x', 'y')\n model_info = {'model': 'spline', 'deg_fdm': optimal_degree, 'const': model_fit.params.xs('Intercept')}\n predictions = model_fit.predict(model_df['x'])\n residuals = y_data - predictions\n fig, _ = plot_model_fit(model_df, predictions, 'x', 'y', condition_x, condition_y)\n return residuals, model_info, fig", "title": "" }, { "docid": "07d5869a044623709d3d66608f2f34a0", "score": "0.5195318", "text": "def dens_ej(r, r_snr, r_rs, t, e0, mej2, dens):\n r *= gp.pc_to_cm\n r_rs *= gp.pc_to_cm\n r_snr *= gp.pc_to_cm\n v_t = np.sqrt((40./18.)*(e0/mej2)) #Transition velocity\n if (v_t*t*gp.yr_to_sec) <= r_snr:\n r_t = (v_t*t*gp.yr_to_sec)\n else:\n r_t = r_snr\n rho_ej = []\n if r_rs >= r_t:\n for j in range(len(r)):\n if r[j] < r_t:\n rho = (10./(9.*np.pi))*e0*(v_t**(-5))*((t*gp.yr_to_sec)**(-3))\n if r[j] >= r_t and r[j]< r_rs:\n rho = (10./(9.*np.pi))*e0*(v_t**(-5))*(r[j]/(v_t*(t*gp.yr_to_sec)))**(-9)*(((t*gp.yr_to_sec)**(-3)))\n if r[j] >= r_rs and r[j]< r_snr:\n rho = np.inf #just to break the computation if the radius of the PWN became bigger the the one of the RS\n rho_ej.append(rho)\n else:\n for j in range(len(r)):\n if r[j] < r_t:\n rho = (10/(9*np.pi))*e0*(v_t**(-5))*((t*gp.yr_to_sec)**(-3))\n if r[j] >= r_t and r[j]< r_rs:\n rho = (10/(9*np.pi))*e0*(v_t**(-5))*(r[j]/(v_t*(t*gp.yr_to_sec)))**(-9)*(((t*gp.yr_to_sec)**(-3)))\n if r[j] >= r_rs:\n rho = np.inf #just to break the computation if the radius of the PWN became bigger the the one of the RS\n rho_ej.append(rho)\n return np.array(rho_ej)", "title": "" }, { "docid": "da2bab4fdb53bee453cac9f66a41e94e", "score": "0.5192915", "text": "def norm_residuals(data):\n b = data[:, 0] # take the first column as vector\n A_ab = np.array([[1, i] for i in data[:, 1]])\n A_abc = np.array([[1, i, j] for (i, j) in zip(data[:, 1], data[:, 2])])\n return norm(A_ab, b), norm(A_abc, b)", "title": "" }, { "docid": "e7f4438496f94426869548007975f9a7", "score": "0.5192128", "text": "def corr_magnitude(directory, globalname, reference):\n psf = np.loadtxt(directory+globalname+\"_PSF.txt\")\n stars = pandas.read_csv(directory+globalname+\"_stars.csv\")\n files = files_intercalibrated(directory, globalname, reference)\n\n filenames = glob.glob(directory+globalname+'_images_epoch_*.fits')\n corr_epoch = []\n\n epochs_img = np.zeros(len(files))\n for i in range(len(files)):\n name = filenames[i]\n bla = str.split(name,'_')\n epochs_img[i]= bla[-2]\n\n #### Sort according to epoch\n s = np.argsort(epochs_img)\n\n epochs = np.zeros(len(files))\n for i in range(len(files)):\n epochs[i] = epochs_img[s[i]]\n \n plt.figure(figsize=(20,10))\n for i in range(len(files)):\n image = files[i]\n\n if globalname == 'EI2019-Data-Search-Field-2': #on traite à part ce dataset à cause de l'outlier 19\n X = [j for j in range(len(stars)) if j != 19]\n magnitudes = calculate_magnitude(image, [(stars[\"x\"][i], stars[\"y\"][i]) for i in X], psf[i]*2)\n Y = stars.drop(19)[\"magnitudes\"] - magnitudes #correction de la magnitude pour toutes les étoiles de l'image courante\n\n else:\n X = range(len(stars))\n magnitudes = calculate_magnitude(image, [(stars[\"x\"][i], stars[\"y\"][i]) for i in X], psf[i]*2)\n Y =stars[\"magnitudes\"] - magnitudes\n \n corr_epoch.append(mean(Y)) #moyenne des corrections de magnitude sur toutes les étoiles, époque par époque\n plot(X,Y, label=\"Epoch = \"+str(epochs[i])) #plot la correction des magnitudes pour chaque étoile et pour chaque époque\n\n corr = np.mean(corr_epoch) #moyenne des corrections sur toutes les époques dans ce dataset (normalement celles-ci doivent être proches car la magnitude des étoiles ne doit pas changer)\n incert_corr = np.std(corr_epoch) #incertitude sur la correction sur les époques\n\n print(\"Correction pour le dataset \"+globalname +\" : \"+ \"%.2f\" %corr + \" ± \" \"%.2f\" %incert_corr)\n \n plot([-5,25], [corr, corr], color=\"k\", linestyle='dashed', linewidth = 2, label=\"Correction moyenne\")\n ylabel(\"Différence entre magnitude apparente mesurée et réelle\")\n xlabel(\"Numéro étoile\")\n xlim((0,23))\n title(\"Ecart entre la magnitude apparente mesurée et réelle pour les étoiles de référence à chaque epoch dans le dataset \"+globalname)\n legend()\n show()\n \n return(corr, incert_corr)", "title": "" }, { "docid": "fdde56b38e1e128d5a6ee7300f74ee81", "score": "0.5190759", "text": "def projection_error(X, Vr):\n return la.norm(X - Vr @ Vr.T @ X) / la.norm(X)", "title": "" }, { "docid": "49a708059b1dc2a47575cd24d750a16d", "score": "0.5186033", "text": "def test_ellipsoid_vol_scaling():\n\n scale = 1.5 # linear scale\n\n for n in range(1, NMAX+1):\n # ellipsoid centered at origin with principle axes aligned with\n # coordinate axes, but random sizes.\n ctr = np.zeros(n)\n a = np.diag(np.random.rand(n))\n ell = nestle.Ellipsoid(ctr, a)\n\n # second ellipsoid with axes scaled.\n ell2 = nestle.Ellipsoid(ctr, 1./scale**2 * a)\n\n # scale volume of first ellipse to match the second.\n ell.scale_to_vol(ell.vol * scale**n)\n \n # check that the ellipses are the same.\n assert_allclose(ell.vol, ell2.vol)\n assert_allclose(ell.a, ell2.a)\n assert_allclose(ell.axes, ell2.axes)\n assert_allclose(ell.axlens, ell2.axlens)", "title": "" }, { "docid": "1ac3d8027f9f64561b283b3c46e67c87", "score": "0.51859367", "text": "def compute_image_rmse(im1, im2, nx, ny):\n rmse_im = 0\n for i in range(nx):\n for j in range(ny):\n rmse_im += np.linalg.norm(im1[i][j] - im2[i][j])**2\n return np.sqrt(rmse_im/(nx*ny))", "title": "" }, { "docid": "6cdb8fd9b5cef0f3f34f9f1d236f607f", "score": "0.5185267", "text": "def getResidual(self, initial, params, pivot, settings):", "title": "" }, { "docid": "de693e07f69400f81193f68b2f2c40c6", "score": "0.5173698", "text": "def ER(equat_core, polar_core, equat_shell, polar_shell):\n import numpy as np\n from .ellipsoid import ER as ellipsoid_ER\n return ellipsoid_ER(polar_shell, equat_shell)", "title": "" }, { "docid": "ffd90f92488fb0eeaaf7377042ed8740", "score": "0.5171655", "text": "def vdist(lat1,lon1,lat2,lon2):\n#%% reshape inputs\n lat1 = atleast_1d(lat1)\n lat2 = atleast_1d(lat2)\n lon1 = atleast_1d(lon1)\n lon2 = atleast_1d(lon2)\n keepsize = lat1.shape\n\n#%% Input check:\n if ((abs(lat1)>90) | (abs(lat2)>90)).any():\n raise ValueError('Input latitudes must be between -90 and 90 degrees, inclusive.')\n#%%% Supply WGS84 earth ellipsoid axis lengths in meters:\n a = 6378137 # definitionally\n b = 6356752.31424518 # computed from WGS84 earth flattening coefficient\n#%% preserve true input latitudes:\n lat1tr = lat1\n lat2tr = lat2\n#%% convert inputs in degrees to radians:\n lat1 = radians(lat1)\n lon1 = radians(lon1)\n lat2 = radians(lat2)\n lon2 = radians(lon2)\n#%% correct for errors at exact poles by adjusting 0.6 millimeters:\n kidx = abs(pi/2-abs(lat1)) < 1e-10;\n if kidx.any():\n lat1[kidx] = sign(lat1[kidx])*(pi/2-(1e-10))\n\n kidx = abs(pi/2-abs(lat2)) < 1e-10\n if kidx.any():\n lat2[kidx] = sign(lat2[kidx])*(pi/2-(1e-10))\n\n f = (a-b)/a\n U1 = arctan((1-f)*tan(lat1))\n U2 = arctan((1-f)*tan(lat2))\n lon1 = lon1 % tau\n lon2 = lon2 % tau\n L = abs(lon2-lon1)\n kidx = L > pi\n if kidx.any():\n L[kidx] = tau - L[kidx]\n\n lamb = L.copy() # NOTE: program will fail without copy!\n lambdaold = zeros(lat1.shape)\n itercount = 0\n notdone = ones(lat1.shape,dtype=bool)\n alpha = zeros(lat1.shape)\n sigma = zeros(lat1.shape)\n cos2sigmam = zeros(lat1.shape)\n C = zeros(lat1.shape)\n warninggiven = False\n sinsigma = empty(notdone.shape)\n cossigma = empty(notdone.shape)\n while notdone.any(): # force at least one execution\n #print('iter:',itercount)\n #print(f'lambda[21752] = {lamb[21752],20}')\n itercount += 1\n if itercount > 50:\n if ~warninggiven:\n print('Essentially antipodal points encountered. Precision may be reduced slightly.',file=stderr)\n\n lamb[notdone] = pi\n break\n\n lambdaold[notdone] = lamb[notdone]\n\n sinsigma[notdone] = sqrt(\n (cos(U2[notdone])*sin(lamb[notdone]))**2+(cos(U1[notdone])*sin(U2[notdone])-sin(U1[notdone])*\n cos(U2[notdone])*cos(lamb[notdone]))**2)\n\n cossigma[notdone] = (sin(U1[notdone])*sin(U2[notdone])+\n cos(U1[notdone])*cos(U2[notdone])*cos(lamb[notdone]))\n # eliminate rare imaginary portions at limit of numerical precision:\n sinsigma[notdone]= sinsigma[notdone].real\n cossigma[notdone]= cossigma[notdone].real\n \n sigma[notdone] = arctan2(sinsigma[notdone],cossigma[notdone])\n \n alpha[notdone] = (arcsin(cos(U1[notdone])*cos(U2[notdone])*\n sin(lamb[notdone])/sin(sigma[notdone])))\n \n cos2sigmam[notdone] = (cos(sigma[notdone])-2*sin(U1[notdone])*\n sin(U2[notdone])/cos(alpha[notdone])**2)\n \n C[notdone] = f/16*cos(alpha[notdone])**2*(4+f*(4-3*cos(alpha[notdone])**2))\n \n lamb[notdone] = (L[notdone]+(1-C[notdone])*f*sin(alpha[notdone])\n *(sigma[notdone]+C[notdone]*sin(sigma[notdone])*\n (cos2sigmam[notdone]+C[notdone]*cos(sigma[notdone])*\n (-1+2.*cos2sigmam[notdone]**2))))\n #print(f'then, lambda(21752) = {lamb[21752],20})\n # correct for convergence failure in the case of essentially antipodal points\n if (lamb[notdone] > pi).any():\n print('Essentially antipodal points encountered. Precision may be reduced slightly.',file=stderr)\n warninggiven = True\n lambdaold[lamb>pi] = pi\n lamb[lamb>pi] = pi\n\n notdone = abs(lamb-lambdaold) > 1e-12\n\n# print(lambdaold)\n# print(sinsigma)\n# print(cossigma)\n# print(sigma)\n# print(alpha)\n# print(cos2sigmam)\n# print(C)\n# print(lamb)\n# print(lamb-lambdaold)\n\n u2 = cos(alpha)**2*(a**2-b**2)/b**2\n A = 1+u2/16384*(4096+u2*(-768+u2*(320-175*u2)))\n B = u2/1024*(256+u2*(-128+u2*(74-47*u2)))\n deltasigma = B*sin(sigma)*(cos2sigmam+B/4*(cos(sigma)*(-1+2*\n cos2sigmam**2)-B/6*cos2sigmam*(-3+4*sin(sigma)**2)*(-3+4*cos2sigmam**2)))\n\n dist_m = (b*A*(sigma-deltasigma)).reshape(keepsize)\n\n#%% From point #1 to point #2\n # correct sign of lambda for azimuth calcs:\n lamb = abs(lamb)\n kidx=sign(sin(lon2-lon1)) * sign(sin(lamb)) < 0\n lamb[kidx] = -lamb[kidx]\n numer = cos(U2)*sin(lamb)\n denom = cos(U1)*sin(U2)-sin(U1)*cos(U2)*cos(lamb)\n a12 = arctan2(numer,denom)\n kidx = a12<0\n a12[kidx]=a12[kidx] + tau\n #%% from poles\n a12[lat1tr <= -90] = 0\n a12[lat1tr >= 90] = pi\n az = (a12 * 57.2957795130823).reshape(keepsize) # to degrees\n\n#%% From point #2 to point #1\n # correct sign of lambda for azimuth calcs:\n lamb = abs(lamb);\n kidx=sign(sin(lon1-lon2)) * sign(sin(lamb)) < 0\n lamb[kidx]=-lamb[kidx]\n numer = cos(U1)*sin(lamb)\n denom = sin(U1)*cos(U2)-cos(U1)*sin(U2)*cos(lamb)\n a21 = arctan2(numer,denom)\n kidx=a21<0;\n a21[kidx] = a21[kidx] + tau\n #%% backwards from poles:\n a21[lat2tr >= 90] = pi\n a21[lat2tr <= -90] = 0\n backdist = (a21 * 57.2957795130823).reshape(keepsize) # to degrees\n\n return dist_m,az,backdist", "title": "" }, { "docid": "36dbd136fd69c36c682279bf158e8ff8", "score": "0.5162716", "text": "def ls_ellipsoid(xx,yy,zz):\n # change xx from vector of length N to Nx1 matrix so we can use hstack\n x = xx[:,np.newaxis]\n y = yy[:,np.newaxis]\n z = zz[:,np.newaxis]\n \n # Ax^2 + By^2 + Cz^2 + Dxy + Exz + Fyz + Gx + Hy + Iz = 1\n J = np.hstack((x*x,y*y,z*z,x*y,x*z,y*z, x, y, z))\n K = np.ones_like(x) #column of ones\n \n #np.hstack performs a loop over all samples and creates\n #a row in J for each x,y,z sample:\n # J[ix,0] = x[ix]*x[ix]\n # J[ix,1] = y[ix]*y[ix]\n # etc.\n \n JT=J.transpose()\n JTJ = np.dot(JT,J)\n InvJTJ=np.linalg.inv(JTJ);\n ABC= np.dot(InvJTJ, np.dot(JT,K))\n\n # Rearrange, move the 1 to the other side\n # Ax^2 + By^2 + Cz^2 + Dxy + Exz + Fyz + Gx + Hy + Iz - 1 = 0\n # or\n # Ax^2 + By^2 + Cz^2 + Dxy + Exz + Fyz + Gx + Hy + Iz + J = 0\n # where J = -1\n eansa=np.append(ABC,-1)\n \n return (eansa)", "title": "" }, { "docid": "8061500cd0d66e9222e5318c42a0d49e", "score": "0.51537514", "text": "def computeResidualsUV(params, stable, GCPxyz, GCPuv, refimg, \r\n optimise='YPR'): \r\n #Assign optimisable and stable parameters depending on optimise flag\r\n if optimise == 'YPR':\r\n campose = params \r\n camloc, radcorr, tancorr, focal, camcen = stable\r\n \r\n elif optimise == 'INT':\r\n radcorr = params[1:3]\r\n tancorr = params[3:5]\r\n focal = params[5:7]\r\n camcen = params[7:9]\r\n camloc, campose = stable \r\n \r\n elif optimise == 'EXT':\r\n camloc = params[0:3]\r\n campose = params[3:6]\r\n radcorr, tancorr, focal, camcen = stable \r\n\r\n elif optimise == 'ALL':\r\n camloc = params[0:3]\r\n campose = params[3:6]\r\n radcorr = params[6:9]\r\n tancorr = params[9:11]\r\n focal = params[11:13]\r\n camcen = params[13:15]\r\n \r\n else: \r\n camloc, campose, radcorr, tancorr, focal, camcen = stable\r\n \r\n #Project XYZ points to UV space \r\n GCPxyz_proj,depth,inframe = projectXYZ(camloc, campose, radcorr, tancorr, \r\n focal, camcen, refimg, GCPxyz)\r\n \r\n #Compute residuals using pythag theorem (i.e. pixel difference between pts)\r\n residual=[]\r\n for i in range(len(GCPxyz_proj)):\r\n residual.append(np.sqrt((GCPxyz_proj[i][0]-GCPuv[i][0])**2 + \r\n (GCPxyz_proj[i][1]-GCPuv[i][1])**2)) \r\n residual = np.array(residual)\r\n\r\n #Return all residuals\r\n return residual", "title": "" }, { "docid": "139f75a10e39a84d41a26c0bc148a817", "score": "0.5150466", "text": "def calculateResidual(self, integrator):\n import feutils\n\n residual = numpy.zeros( (integrator.spaceDim*integrator.numVertices),\n dtype=numpy.float64)\n\n # Matrix of elasticity values\n D = integrator._calculateElasticityMat()\n \n for cell in integrator.cells:\n cellR = numpy.zeros( (integrator.spaceDim*integrator.numBasis, 1),\n dtype=numpy.float64)\n vertices = integrator.vertices[cell, :]\n (jacobian, jacobianInv, jacobianDet, basisDeriv) = \\\n feutils.calculateJacobian(integrator.quadrature, vertices)\n fieldTpdt = integrator.fieldT + integrator.fieldTIncr\n for iQuad in xrange(integrator.numQuadPts):\n wt = integrator.quadWts[iQuad] * jacobianDet[iQuad]\n BL0 = integrator._calculateBasisDerivMatLinear0(basisDeriv, iQuad)\n BL1 = integrator._calculateBasisDerivMatLinear1(basisDeriv, iQuad, fieldTpdt)\n BL = BL0 + BL1\n strain = integrator._calculateStrain(basisDeriv, iQuad, fieldTpdt)\n S = numpy.dot(D, strain.transpose())\n cellR -= wt * numpy.dot(BL.transpose(), S)\n \n feutils.assembleVec(residual, cellR.flatten(), cell, integrator.spaceDim)\n\n return residual", "title": "" }, { "docid": "e68bec9abe34e06a55aec62e0bb39e60", "score": "0.5149262", "text": "def ecef2lla(r):\n R_earth = 6378.1\n lat = math.asin(r[2] / np.linalg.norm(r))\n lon = math.atan2(r[1], r[0])\n alt = np.linalg.norm(r) - R_earth;\n\n return lat, lon, alt", "title": "" }, { "docid": "0566b82ef66bbd64df4a8c5aeb63dc86", "score": "0.51475084", "text": "def residual(self, x, knots, coefs, deg, matching):\n # construct the B-spline approximations of mu(x) and theta(x)\n theta = lambda x: self.bspline(x, knots[0], coefs[0], deg, 0)\n theta_x = lambda x: self.bspline(x, knots[0], coefs[0], deg, 1)\n \n mu = lambda x: self.bspline(x, knots[1], coefs[1], deg, 0)\n mu_x = lambda x: self.bspline(x, knots[1], coefs[1], deg, 1)\n \n # compute the residual \n res_theta = (theta_x(x[0]) - \n self.model.theta_prime(x[0], [theta(x[0]), mu(x[0])], matching))\n res_mu = (mu_x(x[1]) - \n self.model.mu_prime(x[1], [theta(x[0]), mu(x[0])], matching))\n res = np.hstack((res_theta, res_mu)) \n \n return res", "title": "" }, { "docid": "97e67e32becc930b8ffc715fdbe5222c", "score": "0.51344484", "text": "def residuals(self, p, data, grid):\n return data - self.evaluate(p, grid)", "title": "" }, { "docid": "0c957e63a82cab7bb9959b2e7684ef31", "score": "0.513346", "text": "def evaluate_residuals(self, params):\n model = self.evaluate_model(params)\n return self.data.values.squeeze() - model.values.squeeze()", "title": "" }, { "docid": "0521fb6ddbffa81baa58d0e6141b75a0", "score": "0.5124542", "text": "def linear_pst_ridge(y, X, lam, max_iter=1000, tol=1e-7, algo=\"proj_gd\"):\n n, p = X.shape\n x = np.zeros(p)\n XTX = np.dot(X.T, X)\n XTy = np.dot(X.T, y)\n ss = 1.0 / (max_eigval(XTX, eigvals_only=True, eigvals=(p-1, p-1),\n check_finite=False)[0] + lam)\n\n ridge_obj = lambda x: lr.l2_loss_obj(x, y, X) + lam * lr.l2_reg_obj(x)\n ridge_grad = lambda x, XTy, XTX: lr.l2_loss_grad2(x, XTy, XTX) + lam * lr.l2_reg_grad(x)\n\n if algo == \"proj_gd\":\n x_hat = lib.proj_gd(x, ridge_grad, proj.pst_quadrant,\n ss, ridge_obj, max_iter, tol,\n grad={\"XTX\" : XTX, \"XTy\" : XTy}, proj={} )\n\n return x_hat", "title": "" }, { "docid": "ed56180d6ca821f90f760d1a7a8ed3d3", "score": "0.5121161", "text": "def _get_ellipsoid(self, center, radius):\n N = 50\n u = np.linspace(0, 2 * np.pi, N)\n v = np.linspace(0, np.pi, N)\n x = center[0] + radius[0] * np.outer(np.cos(u), np.sin(v))\n y = center[1] + radius[1] * np.outer(np.sin(u), np.sin(v))\n z = center[2] + radius[2] * np.outer(np.ones_like(u), np.cos(v))\n return x, y, z", "title": "" }, { "docid": "f64f739245253d4fdfd5763f28a59109", "score": "0.5114636", "text": "def calculate_residuals(df_from: pd.DataFrame, df_to: pd.DataFrame):\n df_combined = pd.merge(df_from, df_to, how = \"inner\", left_index=True, right_index=True, suffixes = (\"_from\", \"_to\"))\n \n df_from[\"dAlpha\"] = (df_combined.alpha_to-df_combined.alpha_from)*10**9\n df_from[\"dDelta\"] = (df_combined.delta_to-df_combined.delta_from)*10**9\n\n return df_from", "title": "" }, { "docid": "388e1f9c393d8f104431cfe5384ec0d2", "score": "0.5111546", "text": "def get_xyz_ellipsoid(self):\n from ellipsoid import Ellipsoid3D\n # The following code is a python translation of the\n # CalcErrorEllipsoid() c-function from the NonLinLoc package,\n # written by Anthony Lomax\n cov = self.get_xyz_cov()\n if cov is None:\n return None\n\n u, s, v = np.linalg.svd(cov)\n\n del_chi_2 = 3.53 # 3.53: value for 68% conf\n ell = Ellipsoid3D()\n ell.az1 = math.degrees(math.atan2(u[0, 0], u[1, 0]))\n if ell.az1 < 0.0:\n ell.az1 += 360.0\n ell.dip1 = math.degrees(math.asin(u[2, 0]))\n ell.len1 = math.sqrt(del_chi_2) / math.sqrt(1.0 / s[0])\n ell.az2 = math.degrees(math.atan2(u[0, 1], u[1, 1]))\n if ell.az2 < 0.0:\n ell.az2 += 360.0\n ell.dip2 = math.degrees(math.asin(u[2, 1]))\n ell.len2 = math.sqrt(del_chi_2) / math.sqrt(1.0 / s[1])\n ell.len3 = math.sqrt(del_chi_2) / math.sqrt(1.0 / s[2])\n\n self.ellipsoid = ell\n return ell", "title": "" }, { "docid": "dc7259a87246c6e1d934f69456779106", "score": "0.51066667", "text": "def EquivRadius( dataDict ):\n \n try:\n a = np.array(dataDict[\"sma\"])\n except KeyError:\n # maybe it's a Bender-format ellipse fit\n a = np.array(dataDict[\"a\"])\n try:\n ellipticity = np.array(dataDict[\"ellip\"])\n except KeyError:\n ellipticity = np.array(dataDict[\"eps\"])\n # maybe it's a Bender-format ellipse fit\n b = (1.0 - ellipticity)*a\n r_eq = np.sqrt(a*b)\n return r_eq", "title": "" }, { "docid": "1243d07aa405b15d8b878fc3f353694f", "score": "0.5105175", "text": "def path_rmse(state_estimates):\n x_est = state_estimates[1][:]\n y_est = state_estimates[3][:]\n sqerrors = []\n errors = []\n residuals = []\n rmse_time = []\n\n #resid = measured - predicted by segments\n\n for i in range(len(x_est)):\n if (x_est[i]<0 and y_est[i]>0):\n #Upper left corner\n resid = distance(x_est[i], y_est[i], 0,0)\n sqerror = resid**2\n\n elif (x_est[i]>10 and y_est[i]>0):\n #Upper right coner\n resid = distance(x_est[i], y_est[i], 10,0)\n sqerror = resid**2\n\n elif (x_est[i]>10 and y_est[i]<-10):\n #Lower right coner\n resid = distance(x_est[i], y_est[i], 10,-10)\n sqerror = resid**2\n\n elif (x_est[i]<0 and y_est[i]<-10):\n #Lower right coner\n resid = distance(x_est[i], y_est[i], 0,-10)\n sqerror = resid**2\n\n else:\n #General case\n r1 = (y_est[i] - 0)\n r2 = (x_est[i] - 10)\n r3 = (y_est[i] - (-10))\n r4 = (x_est[i] -0)\n resid = min(abs(r1),abs(r2),abs(r3),abs(r4))\n\n residuals.append(resid) #residuals are basically cte\n sqerrors.append(resid**2)\n errors.append(abs(resid))\n mse = np.mean(sqerrors)\n rmse = math.sqrt(mse)\n rmse_time.append(rmse)\n\n mean_error = np.mean(errors)\n mse = np.mean(sqerrors)\n rmse = math.sqrt(mse)\n\n return rmse, residuals, mean_error, rmse_time", "title": "" }, { "docid": "4ff688579adae5e68580d319c1f9b1e7", "score": "0.5099058", "text": "def apply_nonlinear(self, inputs, outputs, residuals):\n pass", "title": "" }, { "docid": "15dd3142a52b843bc57e11b93fb62795", "score": "0.50978315", "text": "def ridge_regression(train_path, validation_path):\n # *** START CODE HERE ***\n train_data, train_labels = util.load_dataset(train_path)\n valid_data, valid_labels = util.load_dataset(validation_path)\n\n\n #eta_list = [ 1/np.sqrt(2 * scale_val) for scale_val in scale_list if scale_val != 0]\n #eta_list.insert(0, 10e-50)\n lam_opt = 1/(2*eta**2)\n val_err = []\n for lam in scale_list:\n theta_map = np.linalg.pinv(train_data.T @ train_data + 2*(sigma ** 2)*lam * np.eye(d) * lam_opt) @ train_data.T @ train_labels\n\n val_err.append( ((valid_data @ theta_map - valid_labels) ** 2).mean(axis=0))\n\n return val_err\n # *** END CODE HERE", "title": "" }, { "docid": "a41c4b81f079a52f5efd254c85b72d5c", "score": "0.50955254", "text": "def cal_rmse(src_img, dst_img):\n\n return np.sqrt(cal_mse(src_img, dst_img))", "title": "" }, { "docid": "70869e5c6f8da4ba1b8b11b17c9841c0", "score": "0.5092425", "text": "def solve_for_everything_bilinear(self):\n if self.spectral_guess is None:\n # First, deconvolve the flux to obtain a guess for the spectrum\n # We subtract the convolved mean spectrum and add the deconvolved\n # residuals to each of the component spectral means to obtain\n # a rough guess for all components. This is by no means optimal.\n f = self.Se2i.dot(np.mean(self.flux, axis=0))\n f /= f[self.continuum_idx]\n mu = self.spectral_mean.T\n f -= np.mean(np.dot(self.KT0, mu), axis=1)\n CInv = np.dot(self.KT0.T, self.KT0) / np.mean(self.flux_err) ** 2\n term = np.dot(self.KT0.T, f) / np.mean(self.flux_err) ** 2\n self.spectrum_ = mu.T + self.L1(\n CInv,\n term,\n np.array(self.spectral_lambda),\n self.spectral_maxiter,\n np.array(self.spectral_eps),\n np.array(self.spectral_tol),\n )\n else:\n self.spectrum_ = self.spectral_guess\n self.meta[\"spectrum_guess\"] = self.spectrum_\n\n # Assume a unit baseline guess\n self.baseline = np.ones(self.nt)\n\n # Iterate\n for i in tqdm(range(len(self.T)), disable=self.quiet):\n\n # Solve for the map\n self._S = None\n if self.linear:\n\n self.solve_for_map_linear(T=self.T[i])\n\n else:\n\n self.solve_for_map_linear(\n T=self.T[i], baseline_var=self.baseline_var\n )\n self.baseline = np.dot(self.C, self.y.T.reshape(-1))\n\n # Solve for the spectrum\n self.solve_for_spectrum_linear()", "title": "" }, { "docid": "18ee78713eeb02abe2ce080102def53f", "score": "0.5079417", "text": "def create_somerville_elev_slope_geotiffs():\n\n # get points and KDTree\n tree, zpts = lidar_kdtree(load=True)\n\n # prepare output grids from somerville mask raster\n mask, x_vec, y_vec, meta = read_geotiff(OUTPUT_SOMERVILLE_MASK_GTIF)\n mask = mask.astype(np.bool)\n elev = np.zeros(mask.shape, dtype=np.float32)\n elev[:] = np.nan\n slope_dir = elev.copy()\n slope_pct = elev.copy()\n\n # populate all grid points\n nrows, ncols = elev.shape\n for ii in range(nrows):\n\n # progress monitor\n if ii % 100 == 0 or ii == nrows-1:\n print(f'Row {ii} / {nrows}')\n\n for jj in range(ncols):\n if mask[ii, jj]:\n\n # get point coords\n this_x = x_vec[jj]\n this_y = y_vec[ii]\n\n # get all pts within 15 ft (yields 30-foot diameter circle ROI)\n nbr_idx = tree.query_ball_point((this_x, this_y), NBR_RADIUS)\n nbr_num = len(nbr_idx)\n if nbr_num < FIT_MIN_PTS:\n continue\n\n # find best-fit plane to points\n fit = np.linalg.lstsq(\n a=np.column_stack(( np.ones((nbr_num, 1)), tree.data[nbr_idx] )),\n b=zpts[nbr_idx],\n rcond=None\n )[0]\n\n # extract elevation (evaluate best fit plane at this point)\n elev[ii,jj] = fit[0] + fit[1]*this_x + fit[2]*this_y\n\n # extract slope magnitude (vector magnitude is m/m, times 100 to percent grade)\n # NOTE: confusingly, percent grade can be > 100, see: https://en.wikipedia.org/wiki/Grade_(slope)\n slope_pct[ii,jj] = np.sqrt(fit[1]*fit[1] + fit[2]*fit[2])*100\n \n # extract slope direction\n slope_dir[ii,jj] = np.degrees(np.arctan2(fit[2], fit[1]))\n\n # write results to geotiff\n meta.update({\n 'driver': 'GTiff',\n 'dtype': 'float32',\n })\n with rasterio.open(f'{OUTPUT_SOMERVILLE_ELEV_PREFIX}_lstsq_30ft.gtif', 'w', **meta) as elev_raster:\n elev_raster.write(elev, 1)\n with rasterio.open(OUTPUT_SOMERVILLE_SLOPE_PCT_GTIF, 'w', **meta) as slope_pct_raster:\n slope_pct_raster.write(slope_pct, 1)\n with rasterio.open(OUTPUT_SOMERVILLE_SLOPE_DIR_GTIF, 'w', **meta) as slope_dir_raster:\n slope_dir_raster.write(slope_dir, 1)", "title": "" }, { "docid": "b2d3ee1c38f54e5608570772bb8129d8", "score": "0.5072665", "text": "def compute_residual(ssh, t_model, ttide):\n\n # interpolate tides to model time\n tides_interp = shared.interp_to_model_time(t_model, ttide.pred_all, ttide.time)\n\n res = ssh - tides_interp\n\n return res", "title": "" }, { "docid": "35053b0fb7f023deb108962dc36190bd", "score": "0.5071266", "text": "def test_residual(basis, **kwargs):\n\n\tdef f(x):\n\t\tw = np.ones(x.shape)\n\t\tw /= np.linalg.norm(w)\n\t\tw2 = np.zeros(x.shape)\n\t\tw2[0] = 1\n\t\treturn np.dot(x, w)**3 + np.dot(x, w2)**2 + np.dot(x,w)*np.dot(x, w2) + 10.\n\n\t# Generate samples of function\n\tX = np.random.uniform(size = (100, 5))\n\tfX = np.array([f(x) for x in X])\n\n\t# We setup the right subspace so we should have no residual\n\tU = np.array([np.ones(5), np.zeros(5)]).T\n\tU[0,1] = 1\n\tU = orth(U)\n\tr = residual(U, X, fX, basis, **kwargs) \n\tassert np.all(np.isclose(r, 0))", "title": "" }, { "docid": "4fc781864a00ad13c1c0660a1f4890df", "score": "0.50684106", "text": "def _residual(self, x):\n h = x\n h = self.b1(h)\n h = self.activation(h)\n h = self._upsample_conv(h, self.c1) if self.upsample else self.c1(h)\n h = self.b2(h)\n h = self.activation(h)\n h = self.c2(h)\n\n return h", "title": "" }, { "docid": "8a8e3cc682f7ec4c74441606e840efbe", "score": "0.5057676", "text": "def get_halo_center(ds, center_guess, **kwargs):\n\n radius = kwargs.get('radius', 50.) # search radius in kpc\n units = kwargs.get('units', 'code')\n\n length = 'code_length'\n vel = 'code_velocity'\n\n print('get_halo_centers: ', length, vel)\n sphere_region = ds.sphere(center_guess, (radius, 'kpc'))\n print(\"we have obtained the spherical region\")\n\n x_pos, y_pos, z_pos = np.array(sphere_region[\"x\"].in_units(length)), \\\n np.array(sphere_region[\"y\"].in_units(length)), \\\n np.array(sphere_region[\"z\"].in_units(length))\n\n dm_density = sphere_region['Dark_Matter_Density']\n print(\"we have extracted the DM density\")\n\n # now determine the location of the highest DM density, which should be the\n # center of the main halo\n imax = (np.where(dm_density > 0.9999 * np.max(dm_density)))[0]\n halo_center = [x_pos[imax[0]], y_pos[imax[0]], z_pos[imax[0]]]\n print(\" we have obtained the preliminary center\")\n\n sph = ds.sphere(halo_center, (5., 'kpc'))\n velocity = [np.mean(sph['x-velocity']),\n np.mean(sph['y-velocity']),\n np.mean(sph['z-velocity'])]\n print(\"got the velocities\")\n\n if (units == 'physical'): # do it over again but in the physical units\n x_pos, y_pos, z_pos = np.array(sphere_region[\"x\"].in_units('kpc')), \\\n np.array(sphere_region[\"y\"].in_units('kpc')), \\\n np.array(sphere_region[\"z\"].in_units('kpc'))\n halo_center = [x_pos[imax[0]], y_pos[imax[0]], z_pos[imax[0]]]\n velocity = [np.mean(sph['x-velocity'].in_units('km/s')),\n np.mean(sph['y-velocity'].in_units('km/s')),\n np.mean(sph['z-velocity'].in_units('km/s'))]\n\n print('Located the main halo at:', halo_center, velocity)\n\n return halo_center, velocity", "title": "" }, { "docid": "0315d3851bad16da6d5e7b8b5a58d143", "score": "0.50473106", "text": "def get_residual(data, forecast):\n return (data[\"y\"] - forecast[\"yhat\"])", "title": "" }, { "docid": "d2a8d5fb6dbcface791a15214dc38893", "score": "0.50347084", "text": "def get_residual(\n s1,\n s2,\n var,\n norm=\"L2\",\n ignore_nan=False,\n diff_window=0,\n normalize=False,\n normalize_how=\"max\",\n):\n # type: (Spectrum, Spectrum, str, bool, int) -> np.array, np.array\n\n if normalize:\n from radis.spectrum.operations import multiply\n\n if isinstance(normalize, tuple):\n wmin, wmax = normalize\n w1, I1 = s1.get(var, copy=False) # (faster not to copy)\n b = (w1 > wmin) & (w1 < wmax)\n if normalize_how == \"max\":\n norm1 = I1[b].max()\n elif normalize_how == \"mean\":\n norm1 = I1[b].mean()\n elif normalize_how == \"area\":\n norm1 = np.abs(np.trapz(I1[b], w1[b]))\n else:\n raise ValueError(\n \"Unexpected `normalize_how`: {0}\".format(normalize_how)\n )\n # now normalize s2. Ensure we use the same unit system!\n w2, I2 = s2.get(var, Iunit=s1.units[var], wunit=s1.get_waveunit())\n b = (w2 > wmin) & (w2 < wmax)\n if normalize_how == \"max\":\n norm2 = I2[b].max()\n elif normalize_how == \"mean\":\n norm2 = I2[b].mean()\n elif normalize_how == \"area\":\n norm2 = np.abs(np.trapz(I2[b], w2[b]))\n else:\n raise ValueError(\n \"Unexpected `normalize_how`: {0}\".format(normalize_how)\n )\n s1 = multiply(s1, 1 / norm1, var=var)\n s2 = multiply(s2, 1 / norm2, var=var)\n else:\n if normalize_how == \"max\":\n norm1 = s1.get(var, copy=False)[1].max()\n norm2 = s2.get(var)[1].max()\n elif normalize_how == \"mean\":\n norm1 = s1.get(var, copy=False)[1].mean()\n norm2 = s2.get(var)[1].mean()\n elif normalize_how == \"area\":\n norm1 = s1.get_integral(var)\n norm2 = s2.get_integral(\n var, wunit=s1.get_waveunit(), Iunit=s1.units[var]\n )\n else:\n raise ValueError(\n \"Unexpected `normalize_how`: {0}\".format(normalize_how)\n )\n # Ensure we use the same unit system!\n s1 = multiply(s1, 1 / norm1, var=var)\n s2 = multiply(s2, 1 / norm2, var=var)\n\n # mask for 0\n wdiff, dI = get_diff(s1, s2, var, resample=True, diff_window=diff_window)\n\n if ignore_nan:\n b = np.isnan(dI)\n wdiff, dI = wdiff[~b], dI[~b]\n warningText = (\n 'NaN output in residual. You should use \"ignore_nan=True\". Read the help.'\n )\n if norm == \"L2\":\n output = np.sqrt((dI ** 2).sum()) / len(dI)\n if np.isnan(output):\n warn(warningText, UserWarning)\n return output\n elif norm == \"L1\":\n output = (np.abs(dI)).sum() / len(dI)\n if np.isnan(output):\n warn.warning(warningText, UserWarning)\n return output\n else:\n raise ValueError(\"unexpected value for norm\")", "title": "" } ]
3a64a0b9859bfe2c14156c5350510d65
If the download doesn't complete, the validator fails.
[ { "docid": "f317b436fec237c212be7e1c65623b82", "score": "0.0", "text": "def test_download_fail(mock_tools, tmp_path):\n # Mock the environment as if there is not WiX variable\n mock_tools.os.environ.get.return_value = None\n\n # Mock the download failure\n mock_tools.download.file.side_effect = NetworkFailure(\"mock\")\n\n # Verify the install. This will trigger a download\n with pytest.raises(NetworkFailure, match=\"Unable to mock\"):\n WiX.verify(mock_tools)\n\n # The environment was queried.\n mock_tools.os.environ.get.assert_called_with(\"WIX\")\n\n # A download was initiated\n mock_tools.download.file.assert_called_with(\n url=WIX_DOWNLOAD_URL,\n download_path=tmp_path / \"tools\",\n role=\"WiX\",\n )\n\n # ... but the unpack didn't happen\n assert mock_tools.shutil.unpack_archive.call_count == 0", "title": "" } ]
[ { "docid": "3b8e5b802d54159eac4931d814661471", "score": "0.67761797", "text": "def __download_and_verify(url):\n \n n = __download_url(url)\n if n==1:\n FAILED = True\n elif n==0:\n if __verify(url):\n VERIFYFAIL=True\n elif n==3:\n if __check_md5(url):\n VERIFYFAIL=True\n elif n==4:\n if __check_sha1(url):\n VERIFYFAIL=True", "title": "" }, { "docid": "a591c46d63005b7f06e7f0ef40f6d248", "score": "0.62038076", "text": "def testDownloadIfNeeded(self):\n\n validURL = 'https://s3.amazonaws.com/pronto-data/open_data_year_one.zip'\n invalidURL = 'ttps://s3.amazonaws.com/pronto-data/open_data_year_one.zip'\n fileName = 'open_data_year_one.zip'\n path = os.getcwd() + '\\open_data_year_one.zip'\n\n # invalid url\n download_if_needed(invalidURL,fileName)\n self.assertFalse(os.path.exists(path))\n\n # valid url\n download_if_needed(validURL,fileName)\n self.assertTrue(os.path.exists(path))", "title": "" }, { "docid": "8ac40a253d28c6ccb5f9a95729c7c05f", "score": "0.60681486", "text": "def test_download_no_hash(self, client):\n\n response = client.post('/download')\n assert response.status_code == 400", "title": "" }, { "docid": "ee0ab6d280debe0fc7412f3dfb529ed1", "score": "0.6017704", "text": "def verify_download(self, image):\n\n valid = True\n\n # Don't download any images that would overwrite an existing one\n if os.path.exists(self.find_image_path(image)):\n valid = False\n return valid\n\n # Ignore any images that are unreachable for any reason\n try:\n image_request = urllib.request.urlopen(image.get_image_url())\n except urllib.error.HTTPError:\n print(\"Image unreachable\")\n valid = False\n return valid\n\n # Ignore blank images\n try:\n web_file_size = int(image_request.info()[\"Content-Length\"])\n except TypeError:\n # The length of the photo is not provided. Take no risks\n valid = False\n return valid\n\n # ID blank images by having unreasonably small file size\n if web_file_size <= 100:\n valid = False\n return valid\n\n # If nothing wrong was detected, return the true value assigned in the\n # beginning\n\n return valid", "title": "" }, { "docid": "184ea690ae4c989ea36bd71246c78ae2", "score": "0.5946057", "text": "def test_download_bad_url():\n import pytest\n pytest.skip('This takes a long time to timeout and I dont understand why')\n\n url = 'http://www.a-very-incorrect-url.gov/does_not_exist.txt'\n # if not ub.argflag('--network'):\n # pytest.skip('not running network tests')\n\n # Ensure the opener exist\n # import urllib.request as urllib_x\n from urllib.error import URLError # NOQA\n # if urllib_x._opener is None:\n # urllib_x.install_opener(urllib_x.build_opener())\n\n dpath = ub.Path.appdir('ubelt/tests/test_download').ensuredir()\n fname = basename(url)\n fpath = join(dpath, fname)\n\n ub.delete(fpath)\n assert not exists(fpath)\n\n with pytest.raises(URLError):\n ub.download(url, fpath=fpath, verbose=1, timeout=1.0)", "title": "" }, { "docid": "67fb8f2769d4e943c459df5e90f2f376", "score": "0.5929214", "text": "def was_successful(self):\n\n # download status must be SUCCESS\n # if checksum validation was requested, validation status must be PASSED\n # otherwise, do not check checksum status\n success = False\n if self.download_status == ds.DownloadStatus.COMPLETED:\n if self.cli_kwargs[\"validate_checksum\"]:\n if self.checksum_status == cs.ChecksumStatus.PASSED:\n success = True\n else:\n success = True\n return success", "title": "" }, { "docid": "d0e45e0e70cf3a228dcfbf08570fb53c", "score": "0.58769894", "text": "def _verify(self) -> None:\n # Check if the files already exist\n filepath = os.path.join(self.root, self.base_dir)\n if os.path.exists(filepath):\n return\n\n # Check if zip file already exists (if so then extract)\n if self._check_integrity():\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n \"Dataset not found in `root` directory and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automatically download the dataset.\"\n )\n\n # Download and extract the dataset\n self._download()\n self._extract()", "title": "" }, { "docid": "2ee2c95cd6280b945fcac5a69122e0e9", "score": "0.58437836", "text": "def callback(data, total):\n # Use for unit testing to emulate fail download\n # Can ignore\n if self.test_net:\n raise Exception(\"Testing fail download\")", "title": "" }, { "docid": "2716f6cca1aa075d7a8ab50eb4918b25", "score": "0.5819806", "text": "def check_file_download( url, filename, dl_func=download_file, chkLen=False ):\n\n if not os.path.isfile( filename ):\n p( 'getting \"{}\"'.format(url) )\n retval = dl_func( url, filename )\n if retval != 200:\n perr( f'server returned {retval}' )\n return False # didn't exist\n else:\n p('!EXISTS \"{}\"'.format(filename))\n\n if not chkLen:\n return True # did exist\n\n print('!EXISTS \"{}\" **Checking Length** '.format(filename), end='')\n # get headers and compare against byte size of file\n H = requests.head( url )\n if H.headers.get('Content-Length', None):\n if H.headers['Content-Length'] != str(os.stat(filename).st_size):\n a = str(H.headers['Content-Length'])\n b = str(os.stat(filename).st_size)\n print( f'Size doesnt match [{a} != {b}]. Re-getting' )\n retval = dl_func(url, filename)\n if retval != 200:\n perr( f'server returned {retval}' )\n return False # didn't checklen correctly\n else:\n print(' ok')\n return True # correct length", "title": "" }, { "docid": "fa3bed98ebd8a93adb684a9fdf12bf4b", "score": "0.5797326", "text": "def show_download_error(name, err):\n pass", "title": "" }, { "docid": "3c90a200e231cc36542270fb342e04b4", "score": "0.579386", "text": "def _handle_download_report(future, url):\n download_report = future.result()\n downloaded = True\n if not download_report[\"downloaded\"]:\n reason = download_report[\"reason\"]\n downloaded = False\n\n if reason == \"err_timeout\":\n logger.warning(f\"Could not download {url}: {reason}\")\n else:\n logger.debug(f\"Could not download {url}: {reason}\")\n\n return downloaded", "title": "" }, { "docid": "8f6f64c1ac8a485dbd4dfd130af0a06a", "score": "0.57829005", "text": "def __validate_processor(self, callback=None):\n # Keep this function running on a timer\n while True:\n # If the queue is empty, wait a while before checking again\n if self.validate_queue.empty():\n time.sleep(1)\n # Get the validation entry\n entry = self.validate_queue.get()\n # If the file does not validate, add it to the download queue\n if not self.__validate_file(entry[\"path\"], entry[\"name\"], entry[\"hash\"]):\n self.download_queue.put(entry)\n self.downloads_total_counter += 1\n # Either way, mark the task as done\n self.validate_queue.task_done()\n if callback and callable(callback):\n with self.validate_callback_lock:\n callback()", "title": "" }, { "docid": "d283244778f4523aa12dd2584c358ccf", "score": "0.5761155", "text": "def checkDownloadfailed(self):\n self.info = self.return_json.json()\n time.sleep(1)\n\n beaurl = self.info['ads'][0]['beacon_url']\n time.sleep(1)\n self.url = self.info['ads'][0]['beacon']['download_failed'][0]['parameters']\n self.url = beaurl + self.url\n self.code = requests.request('GET', self.url)\n\n time.sleep(1)\n configPG.connectPG(\"root\",\"aszx\",\"5432\",\"192.168.0.66\",\"cpd_reporting\")\n self.sql = \"select download_failed FROM public.cpd_stats where ad_channel_id=451 and imp_type=1 and ad_customer_id=14207 and ad_group_id=69 order by ts desc\"\n self.cursor = configPG.executeSQL(self.sql)\n self.result = configPG.get_one(self.cursor)\n\n if self.result[0] == 1:\n self.assertEqual(self.result[0], 1)\n print(\"PGdb download_failed is test seccussfully!\")\n else:\n self.assertEqual(self.result[0], 1)\n print(\"PGdb download_failed test failed\")", "title": "" }, { "docid": "ef6f7ccc775980e0ffc9b248e044fd2c", "score": "0.57368517", "text": "def import_succeeded(self):\n return not self.validation_errors", "title": "" }, { "docid": "a2869a1eadc23ea6e43f498270a315fd", "score": "0.5693402", "text": "def test_invalid_download_files(self):\n for type_ in [\"html\", \"foo\", \"zip\"]:\n resp = self.client.get(\n f\"/_/downloads/en/latest/{type_}/\",\n headers={\"host\": \"project.dev.readthedocs.io\"},\n )\n self.assertEqual(resp.status_code, 404)", "title": "" }, { "docid": "96226647ba26ebc080d767d1933191d2", "score": "0.56864303", "text": "def test_download_with_inital_partial_download_before_failure(self):\n # Set up harness to fail download after several hundred KB so download\n # server will have saved something before we retry.\n harness = CallbackTestHarness(\n fail_after_n_bytes=LARGE_KEY_SIZE/2)\n larger_src_key_as_string = os.urandom(LARGE_KEY_SIZE)\n larger_src_key = self._MakeKey(data=larger_src_key_as_string)\n res_download_handler = ResumableDownloadHandler(num_retries=1)\n dst_fp = self.make_dst_fp()\n larger_src_key.get_contents_to_file(\n dst_fp, cb=harness.call,\n res_download_handler=res_download_handler)\n # Ensure downloaded object has correct content.\n self.assertEqual(LARGE_KEY_SIZE,\n get_cur_file_size(dst_fp))\n self.assertEqual(larger_src_key_as_string,\n larger_src_key.get_contents_as_string())\n # Ensure some of the file was downloaded both before and after failure.\n self.assertTrue(\n len(harness.transferred_seq_before_first_failure) > 1 and\n len(harness.transferred_seq_after_first_failure) > 1)", "title": "" }, { "docid": "43b01d5e2a4da8f5375255600e45706a", "score": "0.56796914", "text": "def download_and_check(url, fpath=\".\", name=\"\"): #%t\n down_path = Path.joinpath(Path(fpath), get_name_from_url(url, name))\n if any(x for x in [\"www\", \"http\"] if x in url):\n status_code = urllib.request.urlopen(url).getcode()\n if status_code != 200:\n print(\"Sorry, invalid url\")\n else:\n if not Path.exists(down_path):\n wget.download(url, str(down_path), bar=bar_progress)\n print(f\"Downloaded to {down_path}\")\n else:\n print(f\"Already downloaded at {down_path}\")\n else:\n if not Path.exists(down_path):\n return \"Invalid path\"\n return down_path", "title": "" }, { "docid": "23b5cd25f260975c23df56423eb41092", "score": "0.56782246", "text": "def next_is_valid(url):\n # [#] print>> sys.stderr, \"######\", url\n return True", "title": "" }, { "docid": "a96d5cc1aacd4b4e0a3bcdad8cfa8b0d", "score": "0.56706965", "text": "def is_failed(self):\n return False", "title": "" }, { "docid": "e9481e520546dd3b579e6c62ae5b15c6", "score": "0.56657183", "text": "def failed(self):\n return not self.success", "title": "" }, { "docid": "e9481e520546dd3b579e6c62ae5b15c6", "score": "0.56657183", "text": "def failed(self):\n return not self.success", "title": "" }, { "docid": "a2899963c146f1cdb91c1e799904f8de", "score": "0.5644942", "text": "def test_download_start_timeout_yields_errors(self):\n self.timeout_by_text['Testing download...'] = True\n\n result = self.banjo.perform_test()\n\n self.assertErrorMessagesEqual(\n [browser_client_common.ERROR_S2C_NEVER_STARTED], result.errors)", "title": "" }, { "docid": "ba7c346073bab9cd0701eb1ff23fc077", "score": "0.5639667", "text": "async def async_download_zip_file(self, content, validate) -> None:\n try:\n filecontent = await self.hacs.async_download_file(content.browser_download_url)\n\n if filecontent is None:\n validate.errors.append(f\"[{content.name}] was not downloaded\")\n return\n\n temp_dir = await self.hacs.hass.async_add_executor_job(tempfile.mkdtemp)\n temp_file = f\"{temp_dir}/{self.repository_manifest.filename}\"\n\n result = await self.hacs.async_save_file(temp_file, filecontent)\n with zipfile.ZipFile(temp_file, \"r\") as zip_file:\n zip_file.extractall(self.content.path.local)\n\n def cleanup_temp_dir():\n \"\"\"Cleanup temp_dir.\"\"\"\n if os.path.exists(temp_dir):\n self.logger.debug(\"%s Cleaning up %s\", self.string, temp_dir)\n shutil.rmtree(temp_dir)\n\n if result:\n self.logger.info(\"%s Download of %s completed\", self.string, content.name)\n await self.hacs.hass.async_add_executor_job(cleanup_temp_dir)\n return\n\n validate.errors.append(f\"[{content.name}] was not downloaded\")\n except BaseException: # lgtm [py/catch-base-exception] pylint: disable=broad-except\n validate.errors.append(\"Download was not completed\")", "title": "" }, { "docid": "c818ddc9f5fdb9ede5626f9f05627a50", "score": "0.5590708", "text": "def test_download_valid(setup, set_file_path):\n setup.download(set_file_path[1], set_file_path[0] + set_file_path[1])\n assert os.path.exists(set_file_path[0] + set_file_path[1])\n subprocess.run(f\"rm {set_file_path[0]}{set_file_path[1]}\", shell=True, check=True)", "title": "" }, { "docid": "6e2da20fb1eb418b257c2eeb2b6c9be8", "score": "0.5589996", "text": "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "title": "" }, { "docid": "d3a33743826db6bd8e4ea86062644f7e", "score": "0.55836034", "text": "def on_failure(self):\n return -1", "title": "" }, { "docid": "3d5a23549e5ec654355d07400e2a516b", "score": "0.5583305", "text": "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "title": "" }, { "docid": "5d5afaf529ac078b70faef7d412271d3", "score": "0.5571681", "text": "def finish_with_failure(self):\n self.success = False\n self.done = True", "title": "" }, { "docid": "8d1cf24dd986450e11a45cdb04517716", "score": "0.55432457", "text": "def final_check(self):\n pass", "title": "" }, { "docid": "5b24abcaa6a29bfe55e6a86aa6d44697", "score": "0.55387247", "text": "def maybe_download(filename, url, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "title": "" }, { "docid": "431c14212a79ce0506e6487c7d3565ff", "score": "0.5537609", "text": "def downloadedSuccessfully(self):\n return self.__state == DownloadItem.DownloadSuccessful", "title": "" }, { "docid": "e732cf2c9e9be6fab000539910a0c2e2", "score": "0.55329955", "text": "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print 'Found and verified', filename\n else:\n print statinfo.st_size\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "title": "" }, { "docid": "3b40b278008ee0ba6538cd8c6386ece1", "score": "0.55317867", "text": "def check_file(self, filename, url):\n # Exceptions coming from this will be caught higher up.\n headers = urlopen(url).info()\n\n return headers.get('Content-Disposition', '').startswith('attachment')", "title": "" }, { "docid": "ee1b551dfd12be6ab87d53e4bee7d287", "score": "0.55009365", "text": "def need_download(self):\n if not os.path.isfile(self.dbfn):\n return True\n else:\n partcount, filename, size = self.get_info()\n if not size or size == 0:\n return True\n else:\n return False", "title": "" }, { "docid": "03dd6fb79a59f6717010453f64b103d9", "score": "0.54639834", "text": "def __validate(self):\n # check technical break\n try:\n if self.driver.find_element_by_xpath('/html/body/div[2]/div/img').get_property('alt') == 'Przerwa techniczna':\n raise TechnicalBreakException()\n except TechnicalBreakException:\n raise\n except Exception:\n pass\n # check error 503... (haven't saved the webpage...)\n # ok\n return", "title": "" }, { "docid": "03d3064d3892ef34f628a200168dd466", "score": "0.5462961", "text": "def on_failure(self):\n self.logger.error(\"All copier jobs failed. Bailing out!\")\n return 1", "title": "" }, { "docid": "5c4eec9cfca24787eb0f004782bc3d35", "score": "0.54535437", "text": "def test_download_file_when_checksum_is_invalid(self):\n model_1 = ResourceURI(MAGIC_WAND_MODEL_URI)\n\n model_1_mtime = model_1.stat().st_mtime\n\n with open(model_1, 'wb') as downloaded_file:\n downloaded_file.write(b'test')\n\n model_2 = ResourceURI(MAGIC_WAND_MODEL_URI)\n\n assert model_1_mtime != model_2.stat().st_mtime", "title": "" }, { "docid": "e914f977d00c21ca9fd4c6e3150b1649", "score": "0.54534787", "text": "def issuccess(self):\n return False", "title": "" }, { "docid": "f4d4f87ab03886d5427cd5d620cc6d76", "score": "0.5447482", "text": "def is_required(url):\n try:\n res = requests.get(url)\n\n return not ShareX.Errors.UPLOAD_FAILED.value['content'].lower() in res.text.lower()\n except Exception:\n return True", "title": "" }, { "docid": "a5e0d638722ad623e751218ca5a8f624", "score": "0.5446734", "text": "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename +\n '. Can you get it with a browser')\n return filename", "title": "" }, { "docid": "105bc7357c121746b43428744d5b2d93", "score": "0.5443458", "text": "def validate_endofcheckphase(process, filename):\n\n passed = True\n downloader_idx = next(\n (\n idx\n for (idx, x) in enumerate(process)\n if x.get(\"Processor\") in (\"URLDownloader\", \"CURLDownloader\")\n ),\n None,\n )\n if downloader_idx is None:\n return passed\n endofcheck_idx = next(\n (\n idx\n for (idx, x) in enumerate(process)\n if x.get(\"Processor\") == \"EndOfCheckPhase\"\n ),\n None,\n )\n if endofcheck_idx is None:\n print(\n \"{}: Contains a download processor, but no EndOfCheckPhase \"\n \"processor.\".format(filename)\n )\n passed = False\n elif endofcheck_idx < downloader_idx:\n print(\n \"{}: EndOfCheckPhase typically goes after a download processor, \"\n \"not before.\".format(filename)\n )\n passed = False\n\n return passed", "title": "" }, { "docid": "41e9c4e4baa4430cc8b2554f756c983a", "score": "0.54409236", "text": "def downloadable(self): # type: () -> bool\n return False", "title": "" }, { "docid": "9fcb2a4d12c15e3842fff60ce0dc89ad", "score": "0.54401135", "text": "def declare_failure(self):\n self.success = False\n return", "title": "" }, { "docid": "96f1a1e7c3d7cfda3d43edeacd716849", "score": "0.54317373", "text": "def complete(self):\n self.download_label.config(text='downloading complete')\n self.progressbar['value'] = 0\n self.progressbar.update()", "title": "" }, { "docid": "3c9f01ac8a977af31e49fd42d2d9d406", "score": "0.5423162", "text": "def validate(self) -> None:", "title": "" }, { "docid": "f28fcca3fdcf4427daf04ad8c71f6c1b", "score": "0.5421449", "text": "def __finished(self):\n self.__finishedDownloading = True\n \n noError = (self.__downloadItem.state() ==\n QWebEngineDownloadItem.DownloadCompleted)\n \n self.progressBar.setVisible(False)\n self.pauseButton.setEnabled(False)\n self.pauseButton.setVisible(False)\n self.stopButton.setEnabled(False)\n self.stopButton.setVisible(False)\n self.openButton.setEnabled(noError)\n self.openButton.setVisible(noError)\n self.__state = DownloadItem.DownloadSuccessful\n self.__updateInfoLabel()\n self.__setDateTime()\n \n self.__adjustSize()\n \n self.statusChanged.emit()\n self.downloadFinished.emit(True)\n \n if self.__autoOpen:\n self.openFile()", "title": "" }, { "docid": "6232ae3af35aed656dfb16cfa532dcb4", "score": "0.5405885", "text": "def fileValidation(self, filename):\n if not os.path.exists(filename):\n print(\"error: file '{}' does not exist\".format (filename), file = sys.stderr)\n sys.exit(1)", "title": "" }, { "docid": "3e905c2b90b225003bff5a3abba6f360", "score": "0.5404941", "text": "def check_download( model_name ):\n if os.path.isdir( model_name ):\n print( 'Model already downloaded, using existing version' )\n else:\n print( 'Requested model not found, attempting to download' )\n download_pretrained_model( model_name )", "title": "" }, { "docid": "6197ce15409bc9496242c0eb40994539", "score": "0.5399104", "text": "def test_download_file_error(self):\n\n key = \"test-report.csv\"\n err_msg = f\"Unable to locate {key}\"\n with self.assertRaises(Exception) as exp:\n self.oci_local_report_downloader.download_file(key)\n expected_exception = exp.exception\n self.assertIn(err_msg, expected_exception.args[0])", "title": "" }, { "docid": "b4b468d4aa24b188005499da931c898d", "score": "0.5391797", "text": "def checkDownloadcompleted(self):\n self.info = self.return_json.json()\n time.sleep(1)\n\n beaurl = self.info['ads'][0]['beacon_url']\n time.sleep(1)\n self.url = self.info['ads'][0]['beacon']['download_completed'][3]['parameters']\n self.url = beaurl + self.url\n self.code = requests.request('GET', self.url)\n\n time.sleep(1)\n configPG.connectPG(\"root\",\"aszx\",\"5432\",\"192.168.0.66\",\"cpd_reporting\")\n self.sql = \"select download_completed FROM public.cpd_stats where ad_channel_id=451 and imp_type=1 and ad_customer_id=14207 and ad_group_id=69 order by ts desc\"\n self.cursor = configPG.executeSQL(self.sql)\n self.result = configPG.get_one(self.cursor)\n\n if self.result[0] == 1:\n self.assertEqual(self.result[0], 1)\n print(\"PGdb download_completed is test seccussfully!\")\n else:\n self.assertEqual(self.result[0], 1)\n print(\"PGdb download_completed test failed\")", "title": "" }, { "docid": "18f259ee2b1a7e2c9a419dea2b201ac7", "score": "0.5358468", "text": "def failure(self) -> _ErrorType:", "title": "" }, { "docid": "2dcf65f32246acfd5350f40547c60385", "score": "0.5355109", "text": "def noPage(self, reason):\n if self.waiting:\n self.waiting = 0\n if self.file:\n try:\n self.file.close()\n except BaseException:\n self._log.failure(\"Error closing HTTPDownloader file\")\n self.deferred.errback(reason)", "title": "" }, { "docid": "a93391ff995060036c19935947d0a54c", "score": "0.5353592", "text": "def successful_load(self):\r\n return self.operation == self.OPERATION_IMPORT and \\\r\n self.status == self.STATUS_COMPLETE", "title": "" }, { "docid": "fb11b5db3aafa3474002d1b0146104eb", "score": "0.5347024", "text": "def test_fetch__failed_download(self) -> None:\n mock_s3 = MockS3()\n mock_s3.mk_list_entries(self.config.bucket, self.config.base_path + '/doc', [\n ('abc.data', 2),\n ('abc.meta', 3),\n ])\n mock_s3.mk_download_key_not_found(\n self.config.bucket, self.config.base_path + '/doc/abc.data',\n )\n with mock_s3:\n res = fetch.fetch(self.config, 'doc', self.outfile, '')\n self.assertEqual(31, res)\n self.assertFalse(os.path.isfile(self.outfile))", "title": "" }, { "docid": "85737826dad76d0dafb0198bb50dce2c", "score": "0.5336272", "text": "def validate(self):\n\t\tprint()\n\t\tprint('*** Validate the content !')\n\t\t# Internal\n\t\tout = self.validate_internal()\n\n\t\t# External\n\t\t#self.validate_external()", "title": "" }, { "docid": "55f4f9f507a6ec31807435735c3f6010", "score": "0.53356004", "text": "def handle_bin_download_error(exc, name):\n if sys.version_info[0] == 2:\n url_error_msg = str(exc.strerror)\n else:\n url_error_msg = str(exc.reason)\n\n if \"CERTIFICATE_VERIFY_FAILED\" in url_error_msg:\n LOGGER.error(\n \"Attempted to download %s but was unable to verify the TLS \"\n \"certificate on its download site.\",\n name,\n )\n LOGGER.error(\"Full TLS error message: %s\", url_error_msg)\n if platform.system().startswith(\"Darwin\") and (\n \"unable to get local issuer certificate\" in url_error_msg\n ):\n LOGGER.error(\n \"This is likely caused by your Python installation missing root certificates. \"\n 'Run \"/Applications/Python %s.%s/\"Install Certificates.command\" to fix it '\n \"(https://stackoverflow.com/a/42334357/2547802)\",\n sys.version_info[0],\n sys.version_info[1],\n )\n sys.exit(1)\n else:\n raise exc", "title": "" }, { "docid": "90dd67f5155bfa557bc37d5088b5a9b9", "score": "0.5325697", "text": "def setUrlError(self, url):\n\t\tif url in self.DG.node:\n\t\t\tself.DG.node[url]['downloaded'] = \"error\"", "title": "" }, { "docid": "253feb127b7e01aee82faa1980d7cbf3", "score": "0.5320741", "text": "def test_download_single_url(self):\n\n imdl = ImageDownloader(urls_filename=self.existing_testfile)\n\n for urlid in range(len(self.urls)):\n url = self.urls[urlid]\n expected_ok = self.expected_status_ok[urlid]\n\n infodict = imdl.download_single_url(url)\n if expected_ok:\n self.assertEqual(\"Ok\", infodict['status'])\n else:\n self.assertNotEqual(\"Ok\", infodict['status'])", "title": "" }, { "docid": "5346faf1e1bc5be519d77a2e95df8e23", "score": "0.5301636", "text": "def check_download_status(request):\n file_id = request.GET.get('file_id', None)\n\n if file_id is not None:\n try:\n # Get the exported file with the given id\n exported_file = exported_file_api.get_by_id(file_id)\n except:\n return HttpResponseBadRequest(\"File with the given id does not exist\")\n\n return HttpResponse(json.dumps({'is_ready': exported_file.is_ready,\n 'message': \"The file is now ready for download\"}),\n content_type='application/javascript')\n else:\n return HttpResponseBadRequest(\"File id is missing in parameters\")", "title": "" }, { "docid": "3e40986c0cb7e243577291a041f96b73", "score": "0.5296508", "text": "def fails_validation():\n raise Exception", "title": "" }, { "docid": "d15edeb9e02124e54d5654c1deabff52", "score": "0.52789867", "text": "def maybe_download(filename, expected_bytes, force=False):\n if force or not os.path.exists(filename):\n print('Attempting to download:', filename) \n filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)\n print('\\nDownload Complete!')\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "title": "" }, { "docid": "9d68b7007ba6d3abc3f8720d2d6f042d", "score": "0.5278678", "text": "def validate(self):\n return None", "title": "" }, { "docid": "61df10916bda9da3991e8243008c1c16", "score": "0.527645", "text": "def validate(self, result):\n if not result:\n return False\n if self.filename not in self.already_printed_filepaths: \n # Print file of file to document\n click.echo(\n \"\\n\\nIn file {} :\\n\".format(\n click.style(\n os.path.join(*self.filename.split(os.sep)[-3:]), fg=\"red\"\n )\n )\n )\n self.already_printed_filepaths.append(self.filename)\n return True", "title": "" }, { "docid": "1340419d4797afc64ebf65f6d29d2aca", "score": "0.52763724", "text": "def issuccess(self):\n return True", "title": "" }, { "docid": "ca6a4b0f7d19d81ec37af7cc1d479116", "score": "0.5268203", "text": "def maybe_download(filename, expected_bytes, force=False):\n if force or not os.path.exists(filename):\n print('Attempting to download:', filename) \n filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)\n print('\\nDownload Complete!')\n \n statinfo = os.stat(filename)\n\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n return filename", "title": "" }, { "docid": "0d9aba9c62afdee693166ec8bb7b3598", "score": "0.5267554", "text": "async def download_zip_files(self, validate) -> None:\n try:\n contents = None\n target_ref = self.ref.split(\"/\")[1]\n\n for release in self.releases.objects:\n self.logger.debug(\n \"%s ref: %s --- tag: %s\", self.string, target_ref, release.tag_name\n )\n if release.tag_name == target_ref:\n contents = release.assets\n break\n\n if not contents:\n validate.errors.append(f\"No assets found for release '{self.ref}'\")\n return\n\n download_queue = QueueManager(hass=self.hacs.hass)\n\n for content in contents or []:\n download_queue.add(self.async_download_zip_file(content, validate))\n\n await download_queue.execute()\n except BaseException: # lgtm [py/catch-base-exception] pylint: disable=broad-except\n validate.errors.append(\"Download was not completed\")", "title": "" }, { "docid": "075d61a3e2049ba0034d1e44db2aedc8", "score": "0.52660006", "text": "def validate(self) -> None:\n pass", "title": "" }, { "docid": "4b9f45ad36251c48bf912fc9965fdeac", "score": "0.52643406", "text": "def _maybe_download(filename, work_directory, source_url):\n if not gfile.Exists(work_directory):\n gfile.MakeDirs(work_directory)\n filepath = os.path.join(work_directory, filename)\n if not gfile.Exists(filepath):\n urllib.request.urlretrieve(source_url, filepath) # noqa: S310\n with gfile.GFile(filepath) as f:\n size = f.size()\n print(\"Successfully downloaded\", filename, size, \"bytes.\")\n return filepath", "title": "" }, { "docid": "e2e88e0b99599ebe74e0c34b7217d64e", "score": "0.52641094", "text": "def _validate(self):\n pass", "title": "" }, { "docid": "fb14727b32f91e7bff9767040a37f863", "score": "0.5261507", "text": "def __download_url(url):\n \n print clr.BOLD+\"Downloading:\\t%s\"%(url)+clr.ENDC\n if __download_file(url):\n print clr.FAIL+\"FAILED\"+clr.ENDC\n return 1\n \n print clr.OKGREEN+\"Success\"+clr.ENDC\n if not (__download_file(url+\".asc\") and \n __download_file(url+\".sig\")):\n print (clr.OKGREEN\n + \"Signing file found!\"\n + clr.ENDC)\n return 0 \n\n print (clr.WARNING\n + \"No signing file found, looking for hashes.\"\n + clr.ENDC)\n \n filename = url.split('/')[-1]\n folder = '/'.join(url.split('/')[:-1])\n\n if not __download_file(url+\".md5\"):\n print (clr.OKGREEN\n + \"MD5 Found!\"\n + clr.ENDC )\n return 3\n\n print (clr.WARNING\n + \"No MD5\"\n + clr.ENDC)\n \n if not __download_file(url+\".sha1\"):\n print (clr.OKGREEN\n + \"SHA1 Found!\"\n + clr.ENDC )\n return 4\n\n else:\n print (clr.WARNING\n +\"NO SHA1\"\n +clr.ENDC)\n return 5", "title": "" }, { "docid": "2087047c3031dacc5aa4bfb862cdc681", "score": "0.5261361", "text": "def _validate_downloaded_file(self, filepath, md5sum):\n # First check md5sum\n if md5sum:\n with open(filepath, 'rb') as fp:\n md5digest = hashlib.md5(fp.read()).hexdigest()\n\n self._log_debug('Verifying md5 checksum for %s. Expecting %s - found %s',\n (filepath, md5sum, md5digest))\n\n if md5sum != md5digest:\n raise InvalidCheckSumException(\n 'File {} md5 checksum does not match {}'.format(filepath, md5sum))\n\n # Then check zip file\n with zipfile.ZipFile(filepath) as zip_fd:\n if zip_fd.testzip():\n raise zipfile.BadZipfile('Bad CRC on zipfile {}'.format(filepath))", "title": "" }, { "docid": "e5ea065a9c596f1a3eb6668ebc01f7ce", "score": "0.5258759", "text": "def check_file(web_url):\n res = requests.head(web_url)\n if res.headers is None:\n return False\n return True", "title": "" }, { "docid": "236cec96ecc81a656a1e71452f3120a2", "score": "0.5257698", "text": "def test_x_sendfile_allowed_download_status(self):\n self.client.login(username='first', password='thepassword')\n resp = self.client.get(self.first_user_doc.attachment.url)\n self.assertEqual(resp.status_code, 200)", "title": "" }, { "docid": "30f1d20b6d0be616f007719354ac325a", "score": "0.5253453", "text": "def check_error(self):\n\t\twhile self.poll() is None:\n\t\t\tpass\n\t\tif self.poll() != 0:\n\t\t\tprint \"FORM failed!\"", "title": "" }, { "docid": "6fdbd800dcfc18e2afe6d887353eedca", "score": "0.5251666", "text": "def maybe_download(filename, expected_bytes, force=False):\n dest_filename = os.path.join(data_root, filename) #take parts of a filename and combines\n if force or not os.path.exists(dest_filename): #check if path exists\n print('Attempting to download:', filename)\n filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook) # the store, the desitnation and callback for what to do on every event\n print('\\nDownload Complete!')\n statinfo = os.stat(dest_filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', dest_filename)\n else:\n raise Exception(\n 'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')\n return dest_filename", "title": "" }, { "docid": "2530b1af5fe6d9d42550edb10dd2039d", "score": "0.5247718", "text": "def _validate(response):\n return True", "title": "" }, { "docid": "eee1fd5ae2ade245f1ff0c64d5d5afad", "score": "0.5241409", "text": "def _maybe_download(data_path, url, filename, expected_bytes, sha256=None, move=False,\n resources_path=config.RESOURCES_PATH):\n file_path = os.path.join(data_path, filename)\n\n logger.debug(\"Checking data path: {}\".format(data_path))\n if not os.path.exists(file_path):\n logger.debug(\"Saving data in {}\".format(data_path))\n if not os.path.exists(data_path):\n os.makedirs(data_path)\n if not move:\n logger.debug(\"Downloading... {}\".format(filename))\n filename, _ = urllib.request.urlretrieve(url, file_path)\n logger.debug(\"Downloaded {}\".format(filename))\n else:\n logger.debug(\"Moving {} to {}\".format(filename, data_path))\n shutil.copy(os.path.join(resources_path, filename), data_path)\n\n\n statinfo = os.stat(file_path)\n\n if statinfo.st_size == expected_bytes:\n if sha256 is not None:\n hash_f = hashlib.sha256(open(file_path, 'rb').read()).hexdigest()\n if hash_f == sha256:\n logger.debug(\"Verified: {}\".format(filename))\n else:\n raise ValueError('hash of {} does not match original'.format(filename))\n else:\n raise Exception('Failed to verify {}'.format(filename))", "title": "" }, { "docid": "75cdf2a9df81544282ace6ddaa67141e", "score": "0.52391034", "text": "def __maybe_download_and_extract(self):\n dest_directory = self.cache_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "title": "" }, { "docid": "a466aaf49bd241fc796998c7065ecb5b", "score": "0.5237214", "text": "def maybe_download(filename='.'):\n if not tf.gfile.Exists(WORK_DIRECTORY):\n tf.gfile.MakeDirs(WORK_DIRECTORY)\n filepath = os.path.join(WORK_DIRECTORY, filename)\n if not tf.gfile.Exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded', filename, size, 'bytes.')\n else:\n print('Successfully finded the mnist data')\n return", "title": "" }, { "docid": "8d64bfb17837e6491d3093707d72118d", "score": "0.52350384", "text": "def _write_could_not_download(self):\n\n if self.n_could_not_download > 0:\n try:\n Path(self.not_downloaded_output_path).write_text(\"\\n\".join(self.could_not_download))\n if self.verbose:\n msg = f\"Written urls of {self.n_could_not_download} file(s)\" + \\\n f\" that could not be downloaded to {self.not_downloaded_output_path}\"\n except FileNotFoundError as err:\n msg = f\"Problem writing {self.n_could_not_download} file(s) to \" + \\\n f\"{self.could_not_download_path}. Error was: {err}\"", "title": "" }, { "docid": "96822f549314a6c57b9fa3ba0f8807e2", "score": "0.52328026", "text": "def pre_download(self):\n\n # Download the subtitle\n gobject.idle_add(self.gui_manager.set_status_message,\n gettext(\"Downloading subtitles...\"))\n\n # Wait for the file to exists\n gobject.idle_add(self.gui_manager.set_status_message,\n gettext(\"Please wait...\"))\n while not os.path.exists(self.file_path):\n time.sleep(1)\n\n # Show the progress bar box\n gobject.idle_add(self.gui_manager.progress.set_fraction, 0.0)\n gobject.idle_add(self.gui_manager.progress_box.show)\n gobject.idle_add(self.gui_manager.set_status_message,\n gettext(\"Filling cache...\"))\n\n if self.downloader.file_size != 0:\n # Waits %1 of the total download\n percent = self.downloader.file_size * 0.01\n\n while self.downloader.downloaded_size < percent:\n self._update_progress()\n self.update_speed()\n time.sleep(1)\n else:\n # Waits 2MB, just an arbitrary amount\n while self.downloader.downloaded_size < 2 * 1024 * 1024:\n gobject.idle_add(self.gui_manager.progress.pulse)\n time.sleep(0.5)", "title": "" }, { "docid": "513cfa48589b78e8e1e7c9f1a3b1ccd7", "score": "0.52314085", "text": "def test_failure(self):\n d = self.signPDF('garbage')\n return self.assertFailure(d, ExternalProcessError)", "title": "" }, { "docid": "dca3bcdab5c76969d778c4cb366e4f5b", "score": "0.523099", "text": "def validate_checksum(self):\n\n filepath = self.get_output_file_path()\n\n hashfuncs_d = Checksum.HASHFUNCS\n hashfuncs_l = Checksum.RANKED_HASHFUNCS\n checksums_by_type = {c.type: c for c in self.drs_obj.checksums}\n \n # find the most suitable hashing algorithm, for each algorithm in the \n # ranked list, check if the DRSObject has a checksum of that type \n hashfunc = None\n hashfunc_not_found = True\n for hashfunc_key in hashfuncs_l:\n if hashfunc_not_found:\n if hashfunc_key in checksums_by_type.keys():\n self.checksum_algo = hashfunc_key\n hashfunc = hashfuncs_d[hashfunc_key]\n self.checksum_exp = checksums_by_type[hashfunc_key].checksum\n hashfunc_not_found = False\n \n # if a suitable hashing algorithm has been found, perform the hashfunc\n # on the downloaded file, then compare the expected to observed\n # if expected matches observed, checksum status is set to PASSED\n # otherwise, checksum status is set to FAILED\n if hashfunc:\n self.checksum_obs = str(hashfunc(filepath))\n if str(self.checksum_exp) != str(self.checksum_obs):\n msg = \"output file {filepath} expected {type} checksum: \" \\\n + \"{expected} does not match observed: {observed}\"\n format_dict = {\"filepath\": filepath, \"type\": self.checksum_algo,\n \"expected\": self.checksum_exp, \n \"observed\": self.checksum_obs}\n self.logger.error(msg.format(**format_dict))\n self.checksum_status = cs.ChecksumStatus.FAILED\n else:\n self.checksum_status = cs.ChecksumStatus.PASSED\n\n # if no suitable hashing algorithm has been found, check status is set\n # to FAILED\n else:\n msg = \"could not perform checksum validation for {filepath}, \" \\\n + \"no suitable hashing algorithm found\"\n format_dict = {\"filepath\": filepath}\n self.logger.warning(msg.format(**format_dict))\n self.checksum_status = cs.ChecksumStatus.FAILED", "title": "" }, { "docid": "66ede2504f5d3470e3ea60b9bb8d0174", "score": "0.52253926", "text": "def test_download_invalid_because_no_remote_path(setup):\n with pytest.raises(TypeError):\n setup.download(local_path=\"local_path\")", "title": "" }, { "docid": "d5de5621513c0733d1d5512af20fe6c4", "score": "0.5219523", "text": "def test_download_no_such_file(self, client):\n\n response = client.post('/download', json={\n 'hash': '1234567',\n })\n assert response.status_code == 404", "title": "" }, { "docid": "fb45583ae8f392249d58467fd96bb41b", "score": "0.5218268", "text": "def downloaded(self):\n return bool(self._path)", "title": "" }, { "docid": "7917627bb063c4a171a51d542984336a", "score": "0.5197187", "text": "def validate(self, pdf_version):\n pass", "title": "" }, { "docid": "93d6d9469cf1bd9a465c36aa12cf80e8", "score": "0.51940364", "text": "def test_download(self):\n\n self.assertEqual(self.client.get(\"/download\").status_code, 200)", "title": "" }, { "docid": "85135ff0572b055dd18df86d94dff17f", "score": "0.5191884", "text": "def test_upload_inv_rdf(self):\n self.client.force_login(User.objects.get_or_create(username='validatetestuser')[0])\n self.invalid_rdf_file = open(\"examples/SPDXRdfExample-v2.0_invalid.rdf\")\n resp = self.client.post(reverse(\"validate\"),{'file' : self.invalid_rdf_file},follow=True)\n self.assertTrue(resp.status_code,400)\n self.assertTrue('error' in resp.context)\n self.client.logout()", "title": "" }, { "docid": "a08fdd6357b9e0581bc5ce045f17527b", "score": "0.51888216", "text": "def success_downloading_rib(monitor, url, filename, year):\n file = url + filename\n output_filename = filename.split(\".\")[0] + \".\" + filename.split(\".\")[1] + \\\n \".\" + filename.split(\".\")[3]\n command = 'wget -q %s -O %s/%s' % (file,\n PATH_TO_DATA_STORAGE % monitor,\n output_filename)\n error_code = os.WEXITSTATUS(os.system(command))\n if not error_code:\n return True\n os.system('rm %s/%s' % (PATH_TO_DATA_STORAGE % monitor, output_filename))\n return False", "title": "" }, { "docid": "9e4611caf83121b42f284af2f199dd38", "score": "0.5185633", "text": "def maybe_download(filename, expected_bytes, force=False):\n if force or not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n raise Exception(\n 'Failed to verify' + filename + '. Can you get to it with a browser?')\n return filename", "title": "" }, { "docid": "683aba9b870e3d9a5ff5658275950986", "score": "0.5184357", "text": "def retry_if_asset_ok(exception):\n return not isinstance(exception, IOError)", "title": "" }, { "docid": "0ab27328bb8e9fc35173fe94ec5667c0", "score": "0.51813114", "text": "def maybe_download(filename, expected_bytes, force=False):\n dest_filename = os.path.join(data_root, filename)\n if force or not os.path.exists(dest_filename):\n print('Attempting to download:', filename)\n filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)\n print('\\nDownload Complete!')\n statinfo = os.stat(dest_filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', dest_filename)\n else:\n raise Exception(\n 'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')\n return dest_filename", "title": "" }, { "docid": "16e05b9c03afaa0a0e15557ad5c21243", "score": "0.5176649", "text": "def maybe_download(filename, expected_bytes, force=False):\n if force or not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n raise Exception(\n 'Failed to verify' + filename + '. Can you get to it with a browser?')\n return filename", "title": "" }, { "docid": "336b3d1df86195dae043404ef2381e61", "score": "0.517287", "text": "def test_download_too_many_records_after_login(self):\n url = reverse('dataset-download')\n query = {'q': ''}\n c = Client()\n c.login(**TEST_USER_1)\n response = c.get(url, query, follow=True)\n storage = list(get_messages(response.wsgi_request))\n self.assertEqual(storage[0].message, 'Download abort. File size too large')", "title": "" }, { "docid": "2012f58f3d9915cb99ef739d0f9a9666", "score": "0.516246", "text": "def log_files_not_downloaded(self) -> None:\n if self.files_not_downloaded:\n logger.info(\"\\n[red]Failed to download the following files:\\n[/red]\")\n logger.info(\n \"[red]\"\n + tabulate(self.files_not_downloaded, headers=[\"FILE NAME\", \"REASON\"])\n + \"[/red]\"\n )\n reasons: list = [file[1] for file in self.files_not_downloaded]\n if FILE_EXIST_REASON in reasons:\n logger.info(\n \"\\nTo merge existing files use the download command with -f.\"\n )", "title": "" }, { "docid": "d28f799ef14bf17d81d12ef0a061960e", "score": "0.5161238", "text": "def _check_status_valid(self):\n\n if self.status == DataManagerStatus.INIT.value:\n raise exceptions.SummaryLogIsLoading(\"Data is being loaded, current status: %s.\" % self._status)", "title": "" }, { "docid": "6c3c21abb22ed6383cb554abe3c29d97", "score": "0.5161157", "text": "def validate(self):\n\t\tpass", "title": "" }, { "docid": "938de44d352220e581491820057b2a7c", "score": "0.5159017", "text": "def _has_nonempty_downloads(element: Element) -> bool:\n downloads = element.find(\"DownloadableArchives\")\n update_file = element.find(\"UpdateFile\")\n if downloads is None or update_file is None:\n return False\n uncompressed_size = int(update_file.attrib[\"UncompressedSize\"])\n return downloads.text is not None and uncompressed_size >= Settings.min_module_size", "title": "" }, { "docid": "e87894108b411fdfb0a616f1c68ef849", "score": "0.5158713", "text": "def validate(self):\n # To do: pretty much everything\n pass", "title": "" }, { "docid": "52cd4273c09f236074b04e8f601e0710", "score": "0.5155822", "text": "def test_should_error(self):\n self.driver.get(self.url)\n self.page_loads.wait_for_element_visible()", "title": "" } ]
1ccfae3bff6322e4b4bf1fc7c24361f4
Make a query to the Telegram Bot API.
[ { "docid": "4c183467346d812399c2676a2c673dc1", "score": "0.0", "text": "def make_request(method, message):\n url = f'{BASE_URL}{BOT_TOKEN}/{method}'\n post = requests.post(url, json=message)\n return post.json()", "title": "" } ]
[ { "docid": "65b103293b5c140607ffef35562dd283", "score": "0.6427141", "text": "def make_query_request(self, api_query):\n\t\ttry:\n\t\t\treturn requests.post(self.query_url, json=api_query, headers=headers, timeout=self.request_timeout)\n\t\texcept Exception as e:\n\t\t\tlogging.warning(\"Exception in calculator_biotrans: {}\".format(e))\n\t\t\treturn None", "title": "" }, { "docid": "2f657441b74ab402f48d6c2a8410a386", "score": "0.6395868", "text": "async def talk(ctx, *, query):\r\n await ctx.trigger_typing()\r\n res = get_chatbot_response(query)\r\n await ctx.send(f\"> {query}\\n\\nChatbot: {res}\")", "title": "" }, { "docid": "c5d2df6bc6881a8060267ee9289cda87", "score": "0.6205094", "text": "def send(connection, telegram):\n connection.write(bytes(telegram))\n reply_bytes = connection.read(Telegram.LENGTH)\n reply = TelegramBuilder().from_bytes(reply_bytes).build()\n return TelegramReader(telegram, 'query'), TelegramReader(reply, 'reply')", "title": "" }, { "docid": "d05c2aba65461fc7770e8b43daa4821e", "score": "0.6090845", "text": "def _request_query_transaction(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n strategy.is_behaviour_active = False\n contract_api_dialogues = cast(\n ContractApiDialogues, self.context.contract_api_dialogues\n )\n contract_api_msg, contract_api_dialogue = contract_api_dialogues.create(\n counterparty=str(LEDGER_API_ADDRESS),\n performative=ContractApiMessage.Performative.GET_RAW_TRANSACTION,\n ledger_id=strategy.ledger_id,\n contract_id=str(CLIENT_CONTRACT_PUBLIC_ID),\n contract_address=strategy.client_contract_address,\n callable=\"get_query_transaction\",\n kwargs=ContractApiMessage.Kwargs(\n {\n \"from_address\": self.context.agent_address,\n \"query_function\": strategy.query_function,\n \"amount\": strategy.query_oracle_fee,\n \"gas\": strategy.gas_limit_query,\n \"tx_fee\": strategy.gas_price * strategy.gas_limit_query,\n }\n ),\n )\n contract_api_dialogue = cast(ContractApiDialogue, contract_api_dialogue)\n contract_api_dialogue.terms = strategy.get_query_terms()\n self.context.outbox.put_message(message=contract_api_msg)\n self.context.logger.info(\"requesting query transaction...\")", "title": "" }, { "docid": "d96446a9ee5df6fd1026522d0c2955d3", "score": "0.6074366", "text": "def query_handler(call):\n if call.data == BUTTON_GOOD:\n if review_good is not None:\n try:\n bot.send_message(chat_id, text=\"Хороший отзыв:\\n\" + review_good)\n except Exception:\n bot.send_message(chat_id, text=\"Отзыв слишком большой, Telegram не разрешает его отправить :(\")\n else:\n bot.send_message(chat_id, text=\"Отзыва об этом фильме нет :(\")\n if call.data == BUTTON_BAD:\n if review_bad is not None:\n try:\n bot.send_message(chat_id, text=\"Плохой отзыв:\\n\" + review_bad)\n except Exception:\n bot.send_message(chat_id, text=\"Отзыв слишком большой, Telegram не разрешает его отправить :(\")\n else:\n bot.send_message(chat_id, text=\"Отзыва об этом фильме нет :(\")\n if call.data == BUTTON_TRAILER:\n if url_trailer is not None:\n bot.send_video(chat_id, url_trailer)\n else:\n bot.answer_callback_query(callback_query_id=call.id, text=\"Трейлера нет\")\n if call.data == BUTTON_WATCH:\n if url_watch[0] is None:\n bot.send_message(chat_id, text=\"К сожалению сервис Megogo не доступен\")\n return\n if url_watch[1] is None:\n bot.send_message(chat_id, text=\"К сожалению сервис IvI не доступен\")\n return\n keyboard = get_keyboard2()\n bot.send_message(chat_id, text=\"Выберите на какой площадке будете смотреть фильм.\\n\"\n \"P.S.: Примите к сведению,что данного фильма может \"\n \"и не быть на выбранной площадке\", reply_markup=keyboard)", "title": "" }, { "docid": "a77cff639102f30ef044e215ee5d2a47", "score": "0.6063051", "text": "def query(body):\n\n result = None\n if connexion.request.is_json:\n #body = Query.from_dict(connexion.request.get_json())\n query = connexion.request.get_json()\n rtxq = RTXQuery()\n result = rtxq.query(query)\n return result", "title": "" }, { "docid": "60d92ec4902c050abccf2d875e981da2", "score": "0.59059453", "text": "def query(self, query):\n data = {\"q\": query}\n return requests.post(self.base_url, json=data).json()", "title": "" }, { "docid": "f12d9cc787444df8359fd8264e88c833", "score": "0.5892893", "text": "def _query(self, method, *args, **kwargs):\n params = kwargs\n kwargs['apikey'] = self.apikey\n\n response = requests.request(\n method,\n self.get_url(self.host, *args),\n headers={\"Accept\": \"application/json\"},\n params=params\n )\n if response.ok:\n return response.json()\n raise ValueError(response.json())", "title": "" }, { "docid": "b95c790d5e0c910efd817536b0193919", "score": "0.5873222", "text": "async def wolframalpha(self, *, q : str):\r\n result = self.bot.wolfram_client.query(q) \r\n #not all wolfram queries have results section\r\n #default to the first pod \r\n try:\r\n bot_msg = next(result.results).text\r\n except (AttributeError, StopIteration):\r\n bot_msg = result.pods[1].text\r\n \r\n await self.bot.say(bot_msg)", "title": "" }, { "docid": "2d2cf4762b8e207d905607c6d4115131", "score": "0.5864931", "text": "def query(self, message, args):\n\n query = args['query']\n\n if query.startswith('#') or ' ' in query:\n tweet = self.tw_api.search.tweets(q=query)['statuses'][0]\n\n else:\n screen_name = self.shelf.get(query.lower(), query.lower())\n\n tweet = self.tw_api.statuses.user_timeline(\n screen_name=screen_name)[0]\n\n self.send_tweet(message, tweet)", "title": "" }, { "docid": "44f7aa445f23424be03c260fe64e9d25", "score": "0.5834338", "text": "async def _query(self, ctx, *, searchString: str = ''):\n try:\n client = wolframalpha.Client(os.environ.get(\"WOLFRAM_API_KEY\"))\n res = client.query(searchString)\n answer = next(res.results).text\n await ctx.send(f'```apache\\n{answer}```')\n except (Exception, StopIteration):\n try:\n wikipedia.set_lang(\"en\")\n summary = wikipedia.summary(searchString)\n wikiLen = 2000\n if len(summary) > 2000:\n wikiLoop = 'true'\n while wikiLoop == 'true':\n #summary[wikiLen-1:wikiLen].isupper() or\n if summary[wikiLen-1:wikiLen] == '.':\n wikiLoop = 'false'\n else:\n wikiLen-=1\n await ctx.send('``Summary was longer than expected, output truncated.``')\n await ctx.send(f'```{summary[:wikiLen]}```')\n except Exception:\n await ctx.send(f'Sorry, no matches were found for ``{query}``.')\n finally:\n try:\n channel = ctx.message.channel\n thankers = ['thank you', 'thanks']\n def check(m):\n return any(thanks in m.content for thanks in thankers) and m.channel == channel\n msg = await self.bot.wait_for('message', check=check, timeout=25)\n await channel.send('You\\'re welcome :⁾')\n except Exception:\n return", "title": "" }, { "docid": "34e7a5a421039f3f6f7240fac03423e1", "score": "0.58229095", "text": "def query(query, **params):", "title": "" }, { "docid": "d0c6d6560dc888fff639d432cc6815cd", "score": "0.5800546", "text": "def get_telegram(conn, telegram):\n\n cursor = conn.cursor()\n cursor.execute(sql.SELECT_TELEGRAM, (telegram,))\n return cursor.fetchone()", "title": "" }, { "docid": "feeed3263ffa125817569f35fe5fe311", "score": "0.5739372", "text": "def query(ctx, query):\n print_mind_response(ctx.obj[\"query\"].query(query))", "title": "" }, { "docid": "5c1c5d3246f241f55f2f9221a44e6616", "score": "0.5735594", "text": "def uptycs_run_query():\n http_method = 'post'\n query = demisto.args().get('query')\n if demisto.args().get('query_type') == 'global':\n api_call = '/query'\n post_data = {\n 'query': query\n }\n else:\n api_call = '/assets/query'\n if demisto.args().get('asset_id') is not None:\n _id = {\n \"_id\": {\n \"equals\": demisto.args().get('asset_id')\n }\n }\n elif demisto.args().get('host_name_is') is not None:\n _id = {\n \"host_name\": {\n \"equals\": demisto.args().get(\n 'host_name_is')\n }\n }\n elif demisto.args().get('host_name_like') is not None:\n _id = {\n \"host_name\": {\n \"like\": \"%{0}%\".format(demisto.args().get(\n 'host_name_like'))\n }\n }\n else:\n _id = {\n \"host_name\": {\n \"like\": '%%'\n }\n }\n\n post_data = {\n \"query\": query,\n \"type\": \"realtime\",\n \"filtering\": {\n \"filters\": _id\n }\n }\n\n return restcall(http_method, api_call, json=post_data)", "title": "" }, { "docid": "26992b9d33055f0fe54ca99ae0455f8b", "score": "0.56907445", "text": "def __call__(self, query, **params):\n \n url = add_path(self.ENDPOINT, 'fql.query')\n params.update(query=query, access_token=self.access_token,\n format='json')\n url = update_query_params(url, params)\n \n return self.fetch_json(url)", "title": "" }, { "docid": "115fb7dc99da0a286dfe6afb428b724c", "score": "0.5690657", "text": "def query():\n printIterationHeader()\n QUERY_HISTORY.append(QUERY) # Been there, done that\n url = URL.substitute(client_key = CLIENT_KEY, engine_key = ENGINE_KEY, query = QUERY)\n response = requests.get(url)\n items = response.json()[\"items\"]\n process(items)", "title": "" }, { "docid": "ee8e7be6cdde5166f61026ff461ca3c7", "score": "0.5688593", "text": "def query():\n\n return LotteryPage(QUERY_URL)", "title": "" }, { "docid": "ce0a771e464508e7c81e2c9a04b2c011", "score": "0.56670433", "text": "def _get(self, api_endpoint, **kwargs):\n req_str = telegram_API_URL + 'bot' + self.token + '/' + api_endpoint\n response = requests.get(url=req_str, params=self.params)\n return response.json()", "title": "" }, { "docid": "7a8e79245309f18ad467b15d890f42d8", "score": "0.56552804", "text": "def query_api(self, url, queryrequest, headers):\n # print (url, queryrequest)\n r = requests.get(url, params=queryrequest, headers=headers)\n return r.json()", "title": "" }, { "docid": "02b03ed0fab928cf0edea40719eb3d71", "score": "0.56451136", "text": "def query(self):\n print(\"Listening\")\n silent, record = self.record.record_mp3()\n if silent:\n print(\"Silent\")\n return\n success, text = self.speech_to_text.convert(record)\n if not success:\n print(\"Not recognized\")\n return\n print(\"User : \" + text)\n success, _, commands = self.robot.query(text)\n print(\"Robot commands : \" + json.dumps(commands))\n if not success:\n return\n self._process_answer(commands)", "title": "" }, { "docid": "eeee66e8b51d565676d39b0a257343f2", "score": "0.56005824", "text": "def query_api(self, url, queryrequest, headers):\n r = requests.get(url, params=queryrequest, headers=headers)\n return r.json()", "title": "" }, { "docid": "f5264d7043f26531fa5e8599e659e2fd", "score": "0.55969536", "text": "def handle_query(event, cursor, say):\n try:\n text = []\n user_name = None\n channel_name = None\n sort = None\n limit = 10\n\n params = event[\"text\"].lower().split()\n for p in params:\n # Handle emoji\n # usual format is \" :smiley_face: \"\n if len(p) > 2 and p[0] == \":\" and p[-1] == \":\":\n text.append(p)\n continue\n\n p = p.split(\":\")\n\n if len(p) == 1:\n text.append(p[0])\n if len(p) == 2:\n if p[0] == \"from\":\n user_name = p[1]\n if p[0] == \"in\":\n channel_name = p[1].replace(\"#\", \"\").strip()\n if p[0] == \"sort\":\n if p[1] in [\"asc\", \"desc\"]:\n sort = p[1]\n else:\n raise ValueError(\"Invalid sort order %s\" % p[1])\n if p[0] == \"limit\":\n try:\n limit = int(p[1])\n except:\n raise ValueError(\"%s not a valid number\" % p[1])\n\n query = f\"\"\"\n SELECT DISTINCT\n messages.message, messages.user, messages.timestamp, messages.channel\n FROM messages\n INNER JOIN users ON messages.user = users.id\n -- Only query channel that archive bot is a part of\n INNER JOIN (\n SELECT * FROM channels\n INNER JOIN members ON\n channels.id = members.channel AND\n members.user = (?)\n ) as channels ON messages.channel = channels.id\n INNER JOIN members ON channels.id = members.channel\n WHERE\n -- Only return messages that are in public channels or the user is a member of\n (channels.is_private <> 1 OR members.user = (?)) AND\n messages.message LIKE (?)\n \"\"\"\n query_args = [app._bot_user_id, event[\"user\"], \"%\" + \" \".join(text) + \"%\"]\n\n if user_name:\n query += \" AND users.name = (?)\"\n query_args.append(user_name)\n if channel_name:\n query += \" AND channels.name = (?)\"\n query_args.append(channel_name)\n if sort:\n query += \" ORDER BY messages.timestamp %s\" % sort\n\n logger.debug(query)\n logger.debug(query_args)\n\n cursor.execute(query, query_args)\n\n res = cursor.fetchmany(limit)\n res_message = None\n if res:\n logger.debug(res)\n res_message = \"\\n\".join(\n [\n \"*<@%s>* _<!date^%s^{date_pretty} {time}|A while ago>_ _<#%s>_\\n%s\\n\\n\"\n % (i[1], int(float(i[2])), i[3], i[0])\n for i in res\n ]\n )\n if res_message:\n say(res_message)\n else:\n say(\"No results found\")\n except ValueError as e:\n logger.error(traceback.format_exc())\n say(str(e))", "title": "" }, { "docid": "ba83cb56328ca3762257412da96f48b0", "score": "0.5595219", "text": "def sysapi_query(bot, system, querytype=None):\n\n sapi_url = bot.config.ratbot.sapi_url or \"https://system.api.fuelrats.com/\"\n encoded = quote_plus(system)\n if querytype == \"landmark\":\n endpoint = f\"landmark?name={encoded}\"\n elif querytype == \"smart\":\n endpoint = f\"mecha?name={encoded}\"\n else:\n endpoint = f\"search?name={encoded}\"\n\n try:\n response = requests.get(urljoin(sapi_url, endpoint))\n if response.status_code != 200:\n return {\"meta\": {\"error\": \"System API did not respond with valid data.\"}}\n result = response.json()\n except Timeout:\n return {\"meta\": {\"error\": \"The request to Systems API timed out!\"}}\n except requests.exceptions.ConnectionError:\n return {\"meta\": {\"error\": \"The systems API is currently unavailable.\"}}\n return result", "title": "" }, { "docid": "951925cb4c42d7ee3149876961c87ae5", "score": "0.5576392", "text": "def post(self):\n workflow_id = self.extract_workflow_id() \n query = self.extract_from_payload(\"query\")\n query = entity.Query(query_dict=query)\n query_id = self.service.query(query, wf_id = workflow_id)\n result_url = \"%s/%s\" % (self.generate_host_port_endpoint(endpoint = rest_endpoints.results), query_id)\n msg = message.CallbackMessage(request.url, \"Finished query. Find the result at the included URL\", result_url, workflow_id = workflow_id)\n return msg.to_dict() , 201", "title": "" }, { "docid": "eaf06102f7d7d5d9d616b34922eb03fb", "score": "0.55592036", "text": "def _api_query( # noqa: F811\n self,\n endpoint: str,\n request_method: Literal['GET', 'POST'] = 'GET',\n options: Optional[Dict[str, Any]] = None,\n ) -> Union[List[Any], Dict[str, Any]]:\n request_url = f'/{endpoint}'\n\n timestamp = str(int(time.time()))\n if options:\n stringified_options = json.dumps(options, separators=(',', ':'))\n else:\n stringified_options = ''\n options = {}\n message = timestamp + request_method + request_url + stringified_options\n log.debug(\n 'Coinbase Pro API query',\n request_method=request_method,\n request_url=request_url,\n options=options,\n )\n if 'products' not in endpoint:\n try:\n signature = hmac.new(\n b64decode(self.secret),\n message.encode(),\n hashlib.sha256,\n ).digest()\n except binascii.Error:\n raise RemoteError('Provided API Secret is invalid')\n\n self.session.headers.update({\n 'CB-ACCESS-SIGN': b64encode(signature).decode('utf-8'),\n 'CB-ACCESS-TIMESTAMP': timestamp,\n })\n\n retries_left = QUERY_RETRY_TIMES\n while retries_left > 0:\n full_url = self.base_uri + request_url\n try:\n response = self.session.request(\n request_method.lower(),\n full_url,\n data=stringified_options,\n )\n except requests.exceptions.ConnectionError as e:\n raise RemoteError(\n f'Coinbase Pro {request_method} query at '\n f'{full_url} connection error: {str(e)}',\n )\n\n if response.status_code == HTTPStatus.TOO_MANY_REQUESTS:\n # Backoff a bit by sleeping. Sleep more, the more retries have been made\n gevent.sleep(QUERY_RETRY_TIMES / retries_left)\n retries_left -= 1\n else:\n # get out of the retry loop, we did not get 429 complaint\n break\n\n json_ret: Union[List[Any], Dict[str, Any]]\n if response.status_code == HTTPStatus.BAD_REQUEST:\n json_ret = rlk_jsonloads_dict(response.text)\n if json_ret['message'] == 'invalid signature':\n raise CoinbaseProPermissionError(\n f'While doing {request_method} at {endpoint} endpoint the API secret '\n f'created an invalid signature.',\n )\n # else do nothing and a generic remote error will be thrown below\n\n elif response.status_code == HTTPStatus.FORBIDDEN:\n raise CoinbaseProPermissionError(\n f'API key does not have permission for {endpoint}',\n )\n\n if response.status_code != HTTPStatus.OK:\n raise RemoteError(\n f'Coinbase Pro {request_method} query at {full_url} responded with error '\n f'status code: {response.status_code} and text: {response.text}',\n )\n\n loading_function: Union[Callable[[str], Dict[str, Any]], Callable[[str], List[Any]]]\n if any(x in endpoint for x in ('accounts', 'products')):\n loading_function = rlk_jsonloads_list\n else:\n loading_function = rlk_jsonloads_dict\n\n try:\n json_ret = loading_function(response.text)\n except JSONDecodeError:\n raise RemoteError(\n f'Coinbase Pro {request_method} query at {full_url} '\n f'returned invalid JSON response: {response.text}',\n )\n\n return json_ret", "title": "" }, { "docid": "754f28e45ada03f530581ab91a9c792c", "score": "0.5549181", "text": "def sQuery(self, **kwargs):\n kwargs.update({'rdtype':'json','id':self.id,'key':self.key})\n response = requests.get('https://'+self.url+':5656/api/admin/command.php', params=kwargs, timeout=2)\n return response.text", "title": "" }, { "docid": "87358f17aa80047e87c83ca13defb2be", "score": "0.5528606", "text": "def inlinequery(bot, update):\n query = update.inline_query.query\n results = omni_search(query)\n update.inline_query.answer(results)", "title": "" }, { "docid": "741b435b8e894365723ba9b9bcaddf2d", "score": "0.5484776", "text": "def request(self, query: str, params: Dict[str, Any] = None):\n\n headers = {\n 'Content-Type': 'application/json',\n 'x-lux-api-key': f\"{self.key}\",\n }\n\n s = requests.Session()\n s.headers = headers\n\n if self.verbose:\n logging.info(query)\n\n response = s.request(self.method,\n self.host,\n data=json.dumps({\n 'query': query,\n 'variables': params\n }).encode('utf-8'))\n\n if response.status_code == 200:\n return response.json()\n elif response.content:\n raise Exception(\n str(response.status_code) + \": \" + str(response.reason) + \": \" +\n str(response.content.decode()))\n else:\n raise Exception(str(response.status_code) + \": \" + str(response.reason))", "title": "" }, { "docid": "d32c9a8ea4fbc58a85badcd5967e2b24", "score": "0.54711646", "text": "def query(\n self,\n body: Optional[Dict] = None,\n ) -> VespaQueryResponse:\n with VespaSync(self) as sync_app:\n return sync_app.query(\n body=body,\n )", "title": "" }, { "docid": "c67bd4e3acdd585f6b80fe31a37f9c25", "score": "0.5461882", "text": "async def api_grafanads_query(self, req: QueryRequest, user: User = Depends(get_current_user)):\n self.logger.info(\"Query Request: %s\", req)\n connect = connection()\n r = []\n targets: Dict[Tuple[str, str], List[\"QueryConfig\"]] = defaultdict(list)\n # Merge targets to Metric Scope and Filter\n for target in req.targets:\n if target.target in self.query_config:\n query_config = self.query_config[target.target]\n metric_type = MetricType.get_by_name(query_config.metric_type)\n else:\n metric_type = MetricType.get_by_id(target.target)\n query_config = QueryConfig(\n metric_type=metric_type.name, query_expression=metric_type.field_name\n )\n if not metric_type:\n self.logger.error(\"[%s] Unknown MetricType: %s\", target.target, query_config)\n raise HTTPException(status_code=500, detail=\"Unknown MetricType in QueryConfig\")\n # Target Filter\n # {\"managed_object\": \"3780187837837487731\"}\n query_mt_condition = self.get_query_metric_type_condition(\n target.payload, metric_type, user=user\n )\n if target.payload and \"metric_function\" in target.payload:\n # Alternative - target with function suffix, percentile ?\n query_config.aggregate_function = target.payload[\"agg_func\"]\n targets[(metric_type.scope.table_name, query_mt_condition)] += [query_config]\n # Query\n for (table_name, query_condition), query_configs in targets.items():\n # Format query\n query = self.get_query(req, table_name, query_condition, query_configs)\n self.logger.info(\"Do query: %s\", query)\n try:\n result = connect.execute(query, return_raw=True)\n except ClickhouseError as e:\n self.logger.error(\"Clickhouse query error: %s\", e)\n raise HTTPException(status_code=500, detail=e)\n r += [(query_configs, orjson.loads(result))]\n return self.format_result(r, result_type=req.result_type)", "title": "" }, { "docid": "d6f8a9b4e9efd513ca51bdb771ca6277", "score": "0.54611796", "text": "def send_query(self, query):\n message = riemann_pb2.Msg()\n message.query.string = query\n return self.transport.send(message)", "title": "" }, { "docid": "5d0138407602bdf4420f8839e92abc3d", "score": "0.5459873", "text": "def post(self):\n query_content = request.get_json(force=True)\n query_id, workflow_id = self.service.query(query_content)\n result_url = \"%s/%s\" % (self.generate_host_port_endpoint(endpoint = rest_endpoints.facade_results_get), query_id)\n msg = message.CallbackMessage(request.url, \"Finished query. Find the result at the included URL\", result_url, workflow_id = workflow_id)\n\n return msg.to_dict() , 201", "title": "" }, { "docid": "d8faed4b6d63ae212c3946b36af685ac", "score": "0.5456863", "text": "def query(**name):\n url = query_url\n resp = requests.post(url, name)\n return json.loads(resp.content)", "title": "" }, { "docid": "fc39735ebe84c4dc5ed3b78e3f3291dd", "score": "0.54449046", "text": "def query(self):\n return", "title": "" }, { "docid": "fc39735ebe84c4dc5ed3b78e3f3291dd", "score": "0.54449046", "text": "def query(self):\n return", "title": "" }, { "docid": "b0ba70a3832fd05a0bc16434d88e21c3", "score": "0.54410344", "text": "def query(self, triples, options=None):\n req = {\n \"graph\": self.graph,\n \"data\": triples,\n 'prefix': PREFIX,\n }\n if options:\n for k, v in options.iteritems():\n req[k] = v\n if self.debug:\n pprint.pprint(req)\n r = requests.post('http://' + self.host_port + '/v1/query', data=json.dumps(req))\n if r.status_code == requests.codes.ok:\n return r.json['data'] if isinstance(r.json, dict) else r.json()['data']\n return r.text", "title": "" }, { "docid": "5623d61d43a8a5fdaf93cbac9cb6ae07", "score": "0.5432956", "text": "def execute(self):\n response = self.client.query(**self.build())\n return SearchResponse(response, self.reconstructor)", "title": "" }, { "docid": "eb2089c4ed1e2cb3e78274c50929a900", "score": "0.5420371", "text": "def query(\n self,\n body: Optional[Dict] = None,\n ) -> VespaQueryResponse:\n response = self.http_session.post(self.app.search_end_point, json=body, cert=self.cert)\n raise_for_status(response)\n return VespaQueryResponse(\n json=response.json(), status_code=response.status_code, url=str(response.url)\n )", "title": "" }, { "docid": "88675c4a39d106661500f21b99210f0f", "score": "0.5408459", "text": "def query(**kwargs):\n api, config = configure(kwargs)\n\n if config is None:\n print_error(\"Error in config\", show_help=True)\n if config[\"debug\"]:\n return\n exit()\n\n if not config[\"query\"]:\n print_error(ERROR_MSGS[\"no_query\"], show_help=True)\n if config[\"debug\"]:\n return\n exit()\n\n if \"from\" in config.keys():\n dates = {\"from\": config[\"from\"]}\n if \"to\" in config.keys():\n dates[\"to\"] = config[\"to\"]\n if \"timeZone\" in config.keys():\n dates[\"timeZone\"] = config[\"timeZone\"]\n elif \"to\" in config.keys():\n print_error(ERROR_MSGS[\"to_but_no_from\"], show_help=True)\n exit()\n else:\n if \"timeZone\" in config.keys():\n dates = {\"timeZone\": config[\"timeZone\"]}\n else:\n dates = None\n\n if \"response\" in config.keys():\n if config[\"response\"] in [\"msgpack\", \"xls\"]:\n if \"output\" not in config.keys():\n print_error(ERROR_MSGS[\"binary_format_requires_output\"], show_help=True)\n exit()\n\n reponse = api.query(query=config[\"query\"], dates=dates)\n\n process_response(reponse, config)", "title": "" }, { "docid": "5f78383c60df59ddc77bc1335604ddad", "score": "0.5401899", "text": "async def _query_helper(\n self, target: str, cmd: str, arg: Optional[Dict] = None, child_ids=None\n ) -> Any:\n request = self._create_request(target, cmd, arg, child_ids)\n\n try:\n response = await self.protocol.query(request=request)\n except Exception as ex:\n raise SmartDeviceException(f\"Communication error on {target}:{cmd}\") from ex\n\n if target not in response:\n raise SmartDeviceException(f\"No required {target} in response: {response}\")\n\n result = response[target]\n if \"err_code\" in result and result[\"err_code\"] != 0:\n raise SmartDeviceException(f\"Error on {target}.{cmd}: {result}\")\n\n if cmd not in result:\n raise SmartDeviceException(f\"No command in response: {response}\")\n result = result[cmd]\n if \"err_code\" in result and result[\"err_code\"] != 0:\n raise SmartDeviceException(f\"Error on {target} {cmd}: {result}\")\n\n if \"err_code\" in result:\n del result[\"err_code\"]\n\n return result", "title": "" }, { "docid": "425d984b42d4e892379c9e384c58888c", "score": "0.5389715", "text": "def query(self, **kwargs):\n kwargs.update(self.kwargs)\n response = requests.get(config.API_URL, kwargs)\n\n logger.debug('Query URL: %s' % response.url)\n\n # API error\n if response.status_code != 200:\n raise SatSearchError(response.text)\n\n self.results = response.json()\n logger.debug(self.results['meta'])\n return self.results", "title": "" }, { "docid": "c19214aab4fa0fabd4f6135a2fb9ad89", "score": "0.53780264", "text": "def query_tx(self, tx_: str = \"\"):\n url = \"{}/transactionSearch?string={}&action=run\".format(self.client, tx_)\n res = get(url)\n temp = self.fake_table_to_list(res.text)\n # print(\"temp\", temp)\n return temp[0]", "title": "" }, { "docid": "e2c3ea5f3fcf6ec70a7d025198bf7e13", "score": "0.53775907", "text": "def connect(self, id):\n headers = {\n \"User-Agent\": \"QueryFetch\",\n \"Authorization\": self._API_KEY\n }\n url = \"https://shoppy.gg/api/v1/queries/\"\n try:\n req = requests.get(f\"{url}{id}\", headers=headers)\n except ConnectionError:\n result = {\n \"status\": False,\n \"message\": \"<span style='color: red;'>Connection Error</span>\"\n }\n return result\n print(req.text)\n queries = json.loads(req.text)\n\n result = {\n \"status\": True,\n \"type\": \"QUERY ID\",\n \"id\": queries['id'],\n \"email\": queries['email'],\n \"message\": queries[\"message\"]\n }\n return result\n # If id isn't found\n result = {\n \"status\": False,\n \"message\": f\"query ID <span style='color: red;'>{id}</span> not found\"\n }\n return result", "title": "" }, { "docid": "3fd8db3fb9a5c973d887b8182ed2e54c", "score": "0.5362363", "text": "def query(request):\n request['action'] = 'query'\n request['format'] = 'json'\n lastContinue = {'continue': ''}\n while True:\n # Clone original request\n req = request.copy()\n # Modify it with the values returned in the 'continue' section of the last result.\n req.update(lastContinue)\n # Call API\n result = requests.get('http://pl.wikipedia.org/w/api.php', params=req).json()\n\n if 'error' in result:\n raise Error(result['error'])\n if 'warnings' in result:\n print(result['warnings'])\n if 'query' in result:\n yield result['query']\n if 'continue' not in result:\n break\n lastContinue = result['continue']", "title": "" }, { "docid": "c98848de15cf03b5534dc6f1e1c9c9b3", "score": "0.5356508", "text": "def update_telegram(conn, telegram):\n\n cursor = conn.cursor()\n cursor.execute(sql.UPDATE_TELEGRAM, (\n telegram['username'],\n telegram['telegram']\n ))", "title": "" }, { "docid": "e0be725ec7b8fad9f1c7db0fb1f91788", "score": "0.53528965", "text": "def RunQuery(query_name,graph,params=None):\n res = tgCl.Rest.get(\"/query/{0}/{1}\".format(graph,query_name),parameters=params,resKey=\"results\")\n return res", "title": "" }, { "docid": "ff185afe8835a0cd1bf05525633023ce", "score": "0.5345226", "text": "async def query_user(context, data: dict):\n question_dict = enumerate_keys(data)\n await send_message(context, format_dict(question_dict))\n response = await get_reply(context, question_dict)\n return question_dict[response]", "title": "" }, { "docid": "068909278aeddcadc7e96e2da04d4c0a", "score": "0.53418124", "text": "def call(query, variables):\n # Add Auth token\n session = requests.Session()\n if \"token\" in conf:\n session.headers.update({\"Authorization\": \"JWT \" + conf[\"token\"]})\n\n return session.post(\n BASE_URL,\n data={\n \"query\": query,\n \"variables\": json.dumps(variables),\n },\n ).json()", "title": "" }, { "docid": "85ca51b41e3b4e411ab218fe5bfccf5a", "score": "0.5340669", "text": "def test_apis_conversations_search(self):\n self._test_search(self._get_conversation())", "title": "" }, { "docid": "387c4f38fba50ba2af01e48bab9acacc", "score": "0.5330538", "text": "def _request(self, query):\n search_params = {'format': 'xml',\n 'search': query.terms,\n 'action': 'opensearch',\n 'limit': query.top}\n\n try:\n response = requests.get(API_ENDPOINT, params=search_params)\n except requests.exceptions.ConnectionError:\n raise EngineConnectionException(self.name, \"Unable to send request, check connectivity\")\n\n if response.status_code != 200:\n raise EngineConnectionException(self.name, \"\", code=response.status_code)\n\n return Wikipedia._parse_xml_response(query, response)", "title": "" }, { "docid": "666d50bc5d261bf7ae43a99e93d147c6", "score": "0.53182787", "text": "async def main(phone_number, bot_choice):\n # Session to store telegram credentials\n if not os.path.exists(\"session\"):\n os.mkdir(\"session\")\n\n # Connect to client\n try:\n client = TelegramClient(f'session/{phone_number}', settings.API_ID, settings.API_HASH)\n await client.start(phone_number)\n me = await client.get_me()\n except OperationalError as db_err:\n logger.error(f'It seems the database is locked. Kill running process or delete session dir. \\n{db_err}')\n sys.exit()\n\n logger.debug(f'Current account: {me.first_name}({me.username})')\n logger.debug('Sending /visit command...')\n\n # Start command /visit\n await client.send_message(bot_choice, '/visit')\n\n # Start visiting the ads\n @client.on(events.NewMessage(chats=bot_choice, incoming=True))\n async def visit_adverts(event):\n \"\"\"\n Handle the visit response event and go visit the website with Selenium\n\n :param event:\n :return:\n \"\"\"\n # Check this is the visit reply event\n original_update = event.original_update\n if type(original_update) is not UpdateShortMessage:\n if hasattr(original_update.message, 'reply_markup') and type(\n original_update.message.reply_markup) is ReplyInlineMarkup:\n\n # Parse the URL of the website to go visit\n url = event.original_update.message.reply_markup.rows[0].buttons[0].url\n\n if url is not None:\n logger.debug(f'Visiting website {url}')\n\n # Visit the URL\n # await get_response(client, event, url, bot_choice)\n await get_response_alt(client, event, url, bot_choice)\n\n # Print earned money\n @client.on(events.NewMessage(chats=bot_choice, incoming=True))\n async def balance_report(event):\n \"\"\"\n Handle the event telling us how much we earned by simply printing it\n\n :param event:\n :return:\n \"\"\"\n message = event.raw_text\n if 'You earned' in message:\n logger.debug(message)\n\n @client.on(events.NewMessage(chats=bot_choice, incoming=True))\n async def user_skip(event):\n \"\"\"\n User skipped a URL\n :param event:\n :return:\n \"\"\"\n message = event.raw_text\n if 'Skipping task...' in message:\n logger.debug(message)\n\n @client.on(events.NewMessage(chats=bot_choice, incoming=True))\n async def no_longer_valid(event):\n \"\"\"\n The URL is no longer valid\n\n :param event:\n :return:\n \"\"\"\n message = event.raw_text\n if 'Sorry, that task is no longer valid' in message:\n logger.debug(message)\n # Init /visit to get a new one\n await client.send_message(bot_choice, '/visit')\n\n @client.on(events.NewMessage(chats=bot_choice, incoming=True))\n async def no_more_ads(event):\n \"\"\"\n There are no more ads to visit right now\n\n :param event:\n :return:\n \"\"\"\n message = event.raw_text\n if 'no new ads available' in message:\n logger.debug('Sorry, there are no new ads available. Waiting...')\n\n @client.on(events.NewMessage(chats=bot_choice, incoming=True))\n async def new_site_available(event):\n \"\"\"\n After no more ads we wait and may get a message telling when there is a new site. At\n this point we start the visit process again\n :param event:\n :return:\n \"\"\"\n message = event.raw_text\n if 'new site for you' in message:\n print('New site available. Visiting...')\n await client.send_message(bot_choice, '/visit')\n\n await client.run_until_disconnected()", "title": "" }, { "docid": "e1e99ad853477db8a824163916d98a6c", "score": "0.5295686", "text": "def do_tq_search(self, cmd_args):\n resp_content = self.threat_q.query(cmd_args)\n if resp_content:\n print json.dumps(resp_content, indent=4)", "title": "" }, { "docid": "b7d713366b22dd4624bfc9bca795b641", "score": "0.52877533", "text": "def bot_request():\n logger.debug(\"got json: \" + str(request.json))\n message = request.json[\"message\"]\n user_id = str(message[\"from\"][\"id\"])\n\n if user_id not in users:\n logger.info(\"unregistered userId: \" + user_id)\n # unregistered user - no response\n return Response()\n\n command, argument = parse_message(message, user_id)\n users[user_id].state = UserState.DEFAULT\n users[user_id].current_keyboard = Keyboard.DEFAULT\n if not check_permissions(command, user_id):\n # user does not have permission for command\n logger.info(\"command not allowed: {0} by {1}\".format(command, user_id))\n send_message(user_id, \"Command not allowed\")\n return Response()\n\n execute_command(command, argument, user_id)\n logger.debug(\"success command - responding\")\n return Response()", "title": "" }, { "docid": "ce69fca9cb739abe992ffe86c6155d4c", "score": "0.5280185", "text": "def _kegg_query(operation: str, database: str = None, query: str = None, form: str = None) -> Response:\n # Set arguments to empty strings if None\n query = '' if query is None else query\n form = '' if form is None else form\n database = '' if database is None else database\n\n # Define base URL\n url = 'http://rest.kegg.jp'\n\n # Make get request\n request = os.path.join(url, operation, database, query, form)\n return rget(request)", "title": "" }, { "docid": "2ecaf19692ddc7681b1e40b08fb8699f", "score": "0.5276716", "text": "async def _wolfram(self, ctx, *arguments: str):\n api_key = self.settings[\"WOLFRAM_API_KEY\"]\n if api_key:\n url = \"http://api.wolframalpha.com/v2/query?\"\n query = \" \".join(arguments)\n payload = {\"input\": query, \"appid\": api_key}\n headers = {\"user-agent\": \"Red-cog/1.0.0\"}\n conn = aiohttp.TCPConnector(verify_ssl=False)\n session = aiohttp.ClientSession(connector=conn)\n async with session.get(url, params=payload, headers=headers) as r:\n result = await r.text()\n session.close()\n root = ET.fromstring(result)\n a = []\n for pt in root.findall(\".//plaintext\"):\n if pt.text:\n a.append(pt.text.capitalize())\n if len(a) < 1:\n message = \"There is as yet insufficient data for a meaningful answer.\"\n else:\n message = \"\\n\".join(a[0:3])\n else:\n message = (\n \"No API key set for Wolfram Alpha. Get one at http://products.wolframalpha.com/api/\"\n )\n message = escape_mass_mentions(message)\n await self.bot.say(box(message))", "title": "" }, { "docid": "e8569b7ab83e38754f1ed6b5c563f99c", "score": "0.5276533", "text": "def ydapi(q, fromLang='zh-CHS', toLang='EN'):\n appKey = '**************'\n secretKey = '********************'\n url = 'http://openapi.youdao.com/api'\n salt = random.randint(1, 65536)\n sign = appKey+q+str(salt)+secretKey\n m = md5(sign.encode('utf-8')) \n sign = m.hexdigest()\n params = {'q':q, 'from':fromLang, 'to':toLang,\\\n 'appKey': appKey, 'salt': str(salt), 'sign': sign}\n try:\n r = requests.get(url, params=params)\n r.raise_for_status()\n dic = r.json()\n except:\n return None\n return dic", "title": "" }, { "docid": "c7185eccf5403670977b61884b855b64", "score": "0.52764195", "text": "def send_message(message, chatID):\r\n\r\n request = apiai.ApiAI('#').text_request() #dialogfow API key\r\n request.lang = 'ru'\r\n request.session_id = str(chatID) #TODO it's work?\r\n request.query = message.text\r\n response = json.loads(request.getresponse().read())\r\n print(response['result'])\r\n #print(response['result']['metadata']['intentName'])\r\n if response['result']['metadata']['intentName'] == 'smalltalk.dialog.weather' and response['result']['actionIncomplete'] == False: #if we got weather request and DF correct recognize query\r\n city = city_correct(response['result']['fulfillment']['speech']) #correct city name\r\n if city[1] == 'unknown':\r\n bot.send_message(chatID, 'Мне город неизвестен этот, попробуй использовать именительный падеж в названии города.')\r\n else:\r\n region = city[2] #get region from response\r\n start_message('/погода {}'.format(city[1]), chatID, city[0], region)\r\n\r\n elif response['result']['action'] == 'smalltalk.greetings.hello': #if user say hello or something\r\n bot.send_message(chatID, response['result']['fulfillment']['speech'].format(message.from_user.username))\r\n else:\r\n bot.send_message(chatID, response['result']['fulfillment']['speech'])\r\n #print(response['result']['action'])\r\n return 1", "title": "" }, { "docid": "b4e01ea6edeb44ed499654388d2ee73f", "score": "0.5251222", "text": "def _runQuery(query):\n req = requests.post(API_ENDPOINT, json={\"query\": query}, headers=HEADERS)\n if \"errors\" not in req.json().keys():\n #if req.status_code == 200:\n return req.json()\n else:\n # In theory, we should never get here\n print(\"Query failed: {}\".format(req.json()))\n return None", "title": "" }, { "docid": "762e3932fbb861d384de87b6173a6405", "score": "0.52407026", "text": "def get(self, query):\n payload = json.dumps({\"query\": query})\n LOGGER.debug(payload)\n return http_call(\"post\", self.url, headers=_get_headers(), data=payload)", "title": "" }, { "docid": "c315abd4d42aa13fa50a036eb5e661d5", "score": "0.52326494", "text": "def query(query):\n return gql(query)", "title": "" }, { "docid": "dddcf010a668a740bb3a4a5852c3280a", "score": "0.52150106", "text": "def queryAPI(server=\"http://www.peerexplorer.com\", command=\"/api_openapi\", timeout=TIMEOUT, data=None):\r\n \r\n url=urlparse.urljoin(server, command) if command!=\"\" else server\r\n try:\r\n f=urllib2.urlopen(url, timeout=timeout, data=data) # data=None --> GET, otherwise POST\r\n answer = f.read()\r\n f.close()\r\n apiResult = json.loads(answer)\r\n except Exception as e:\r\n return False, e\r\n \r\n if apiResult.has_key(\"errorDescription\"):\r\n return False, apiResult\r\n else:\r\n return True, apiResult", "title": "" }, { "docid": "1c0cc0bac336f51751b9bf2baaababd7", "score": "0.521275", "text": "def get_request_from_raw_query_window(self):\n # поместить в общую очередь описание запроса: тип (text/url), настройки,\n settings = self.settings.getsettings() # словарь с настройками.\n if settings:\n # print settings\n self.reqParser.read_corpora_query(self.qField)\n if self.reqParser.last_is_appropriate():\n path = self._ask_path_to_directory()\n if path:\n\n setParams = {}\n\n if settings[\"rand\"]:\n setParams[\"sort\"] = \"random\"\n setParams[\"seed\"] = unicode(random.randint(1, 65356))\n\n if settings['homonymy_in_main_allowed']:\n setParams[\"mycorp\"] = urllib.unquote(u'%28%28tagging%253A%2522manual%2522%29%29')\n\n if setParams:\n request = self.reqParser.get_subcorpora_query_list(**setParams)\n else:\n request = self.reqParser.get_subcorpora_query_list()\n statistics = self.stlist\n queueData = {'type': 'text',\n 'args': (request, self.homeDir, settings, statistics),\n 'text_query': self.qField.get('1.0', END)}\n self._process_request_dict(queueData)\n else:\n errorText = _DialogueLabels.generate_raw_query_error(self.reqParser.last_unread)\n showerror(**errorText)", "title": "" }, { "docid": "430fc4cb43fc3b9641c072055f1c9366", "score": "0.5206097", "text": "async def wiki(self, ctx, *, search):\n\n # Initializing variables\n message = ctx.message\n await self.bot.send_typing(message.channel)\n\n # Obtaining parameter and message edit info\n search, numParameter = self.parameterProcess(search)\n if not search:\n return await self.bot.say(\"Cannot accept just a parameter. \" +\n \"Please try again with a valid search.\")\n messageCheck = 0\n\n # Preparing query URL\n query = 'https://minecraft.gamepedia.com/api.php?action=query' + \\\n '&format=json&list=search&srsearch=' + \\\n search.replace(' ', '_')\n\n # Checking if API is responding\n try:\n r = requests.get(query).text\n data = json.loads(r)\n except:\n return await self.bot.say(\n \"An error has occured. API may be down.\")\n\n # Validating existence of valid queries\n info = data['query']['search']\n if info == []:\n return await self.bot.say(\"No search results returned.\")\n else:\n # Checking if it is a single query or multiple queries\n if len(info) == 1:\n title = info[0]['title']\n snippet = self.cleanspans(info[0]['snippet'])\n elif numParameter != -1:\n # If a parameter was provided, process it with respect to data\n if numParameter == -2:\n num = 0\n await self.bot.say(\n \"Invalid parameter. Assuming first search result.\")\n else:\n num = numParameter - 1\n try:\n title = info[0]['title']\n except IndexError:\n num = 0\n await self.bot.say(\n \"Parameter is out of bounds. Assuming first \" +\n \"search result.\")\n else:\n # Prepare a bot function to receive the query the user intends\n # to view\n msg = \"Found the following results: \"\n msg += \"```\"\n for index, item in enumerate(info):\n if index > 9:\n break\n nmb = index + 1\n msg += str(nmb) + \") \" + info[index]['title'] + \"\\n\"\n msg += \"```\"\n msg += \"Please type the number of the item you want to view.\"\n msg += \"\\nType `cancel` to cancel.\"\n msg += \"\\nType `random` for a random result\"\n em2 = discord.Embed(description=msg,\n color=discord.Color.gold())\n botmessage = await self.bot.say(embed=em2)\n messageCheck = 1\n rp = await self.bot.wait_for_message(\n channel=message.channel, author=message.author)\n if rp.content.lower() == \"cancel\":\n return await self.bot.say(\"Search cancelled.\")\n elif rp.content.lower() == \"random\":\n num = randint(0, len(info) - 1)\n editMsg = \"Here's a random result.\"\n else:\n try:\n num = int(rp.content) - 1\n editMsg = \"Here is the search result you requested.\"\n if (num >= len(info)) or (num >= 10) or (num < 0):\n editMsg = \"Chosen number invalid. Assuming \" + \\\n \"first search result.\"\n num = 0\n except:\n editMsg = \"Cannot accept strings for choosing\" + \\\n \" search results. Assuming first\" + \\\n \" search result.\"\n num = 0\n title = info[num]['title']\n snippet = self.cleanspans(info[num]['snippet'])\n\n # Preparing to post the information in an embed\n snippet = \"...\" + snippet + \"...\"\n url = \"http://minecraft.gamepedia.com/\" + title.replace(' ', '_')\n em = discord.Embed(title=title, url=url, description=snippet,\n color=discord.Color.gold())\n em.set_footer(\n text=\"*Wiki snippet may be strange due to API limitations*\")\n em.set_author(name=\"Official Minecraft Wiki\",\n url=\"https://minecraft.gamepedia.com\",\n icon_url=\"https://minecraft.gamepedia.com/media/\" +\n \"minecraft.gamepedia.com/b/bc/Wiki.png\")\n\n # Finally sending the message\n if messageCheck == 1:\n return await self.bot.edit_message(botmessage,\n str(editMsg),\n embed=em)\n else:\n return await self.bot.send_message(message.channel, embed=em)", "title": "" }, { "docid": "2999ea2f68c230e80c3c5b85143d68d8", "score": "0.52033347", "text": "def list_messages_matching_query(service, query: str = ''):\n\n # Find among all e-mails first\n try:\n response = service.users().messages().list(userId=CUR_USER, q=query).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=CUR_USER, q=query, pageToken=page_token).execute()\n messages.extend(response['messages'])\n\n # Check in TRASH too\n response = service.users().messages().list(userId=CUR_USER, q=query, labelIds=['TRASH']).execute()\n if 'messages' in response:\n choice = str(input(f\"In Trash bin were found {len(response['messages'])} message(s) matching your request! \"\n f\"Would you like to process them too (y/n)? \"))\n if choice == 'y':\n messages.extend(response['messages'])\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=CUR_USER, q=query, pageToken=page_token,\n labelIds=['TRASH']).execute()\n messages.extend(response['messages'])\n\n return messages\n except errors.HttpError as error:\n print(f'An error occurred: {error}')", "title": "" }, { "docid": "d67b6ae8b4d58c4d276a35ba820d3093", "score": "0.5202707", "text": "def get_query(self):\n pass", "title": "" }, { "docid": "03cb8ff922734d9728d508af8d7c2e77", "score": "0.5196969", "text": "def query(self):\n with requests.Session() as session:\n session.auth = OAuth2BearerToken(self.access_token)\n response = session.post(self.url, data=self.data)\n try:\n response.raise_for_status()\n except HTTPError:\n pass\n data = response.text\n return data", "title": "" }, { "docid": "cbf9f7670c00985e56da51edfd96074d", "score": "0.5194094", "text": "def query(self):\n with requests.Session() as session:\n session.auth = OAuth2BearerToken(self.access_token)\n response = session.post(self.url)\n try:\n response.raise_for_status()\n except HTTPError:\n pass\n data = response.json()\n return data", "title": "" }, { "docid": "9537e54d2817ddbc0d00f86ba52e4e5f", "score": "0.51909053", "text": "def graphql():\n data = request.get_json(force=True)\n if not data:\n app.logger.error(u'No data received: %s', data)\n return abort(400)\n\n query = data.get('query', None)\n if not query or not isinstance(query, six.string_types):\n app.logger.error(u'No valid query data received: %s', query)\n return abort(400)\n\n args = data.get('args', None)\n if args is None or not isinstance(args, dict): # empty dict args is valid\n app.logger.error(u'No valid args data received: %s', args)\n return abort(400)\n\n # pylint: disable=broad-except\n try:\n return json.dumps(_run_graphql_query(query, args), default=_graphql_type_json_encoder)\n except Exception as e:\n app.logger.error(u'Encountered an error: %s', e)\n return str(e), 500, []\n # pylint: enable=broad-except", "title": "" }, { "docid": "2529c5adb1ad37ab139566091e98eb3a", "score": "0.51894516", "text": "def _send_request(self):\n url = \"https://fr.wikipedia.org/w/api.php\"\n setting = {\n \"action\": \"query\",\n \"format\": \"json\",\n \"prop\": \"extracts\",\n \"generator\": \"geosearch\",\n \"formatversion\": \"2\",\n \"exchars\": \"150\",\n \"exintro\": \"1\",\n \"explaintext\": \"1\",\n \"ggslimit\": \"1\",\n \"ggscoord\": \"{}|{}\".format(self.latitude, self.longitude)\n }\n request = requests.get(url=url, params=setting)\n request_wikipedia = request.json()\n if request.status_code == 200 and 'query' in request_wikipedia:\n return request_wikipedia", "title": "" }, { "docid": "f839613b0cce1747f4c1e2ae92b38e03", "score": "0.5178398", "text": "def echo(update, context):\r\n # update.message.reply_text(update.message.text)\r\n # update.message.reply_text('Ваше сообщение принял: ' + update.message.text.lower())\r\n # update.message.reply_text(f\"Поиск признака вопроса: {re.findall(r'[?]', update.message.text)}\")\r\n logger.info(f\"Request: {update.message.text}\")\r\n question = re.search(r'[?]', update.message.text)\r\n if question is not None:\r\n if flag_w2v and flag_w2v_index:\r\n line_numbers = get_response(update.message.text, w2v_index, model_w2v_wv, 2, w2v_width)\r\n spls = [getline(prep_answ, i+1).split(\"\\t\") for i in line_numbers]\r\n\r\n txt = \"Word2vec 100:\"\r\n logger.info(f\"\\t{txt}\")\r\n update.message.reply_text(f\"<b>{txt}</b>\", parse_mode=telegram.ParseMode.HTML)\r\n\r\n for i in range(2):\r\n # print(f\"Вопрос: {spl[0]}\\nОтвет: {spl[1]}\")\r\n answer = re.sub(r'<br>', '\\n', spls[i][1])\r\n answer = re.sub(r'<[^<]*>', '', answer)\r\n str_out = f\"*Вопрос:*\\n{spls[i][0]}\\n\\n*Ответ:*\\n{answer}\"\r\n logger.info(f\"\\t{str_out[:4096]}\")\r\n update.message.reply_text(str_out[:4096], parse_mode=telegram.ParseMode.MARKDOWN)\r\n if flag_ft and flag_ft_index:\r\n line_numbers = get_response(update.message.text, ft_index, model_ft_wv, 2, ft_width)\r\n spls = [getline(prep_answ, i+1).split(\"\\t\") for i in line_numbers]\r\n\r\n txt = \"FastText quant 300:\"\r\n logger.info(f\"\\t{txt}\")\r\n update.message.reply_text(f\"<b>{txt}</b>\", parse_mode=telegram.ParseMode.HTML)\r\n\r\n for i in range(2):\r\n answer = re.sub(r'<br>', '\\n', spls[i][1])\r\n answer = re.sub(r'<[^<]*>', '', answer)\r\n str_out = f\"*Вопрос:*\\n{spls[i][0]}\\n\\n*Ответ:*\\n{answer}\"\r\n logger.info(f\"\\t{str_out[:4096]}\")\r\n update.message.reply_text(str_out[:4096], parse_mode=telegram.ParseMode.MARKDOWN)\r\n else:\r\n session_client = dialogflow.SessionsClient()\r\n session = session_client.session_path(DIALOGFLOW_PROJECT_ID, SESSION_ID)\r\n text_input = dialogflow.types.TextInput(text=update.message.text, language_code=DIALOGFLOW_LANGUAGE_CODE)\r\n query_input = dialogflow.types.QueryInput(text=text_input)\r\n\r\n try:\r\n response = session_client.detect_intent(session=session, query_input=query_input)\r\n except InvalidArgument:\r\n raise\r\n\r\n text = response.query_result.fulfillment_text\r\n\r\n if text:\r\n logger.info(f\"\\t{text}\")\r\n update.message.reply_text(text=text)\r\n else:\r\n txt = 'Не понял...'\r\n logger.info(f\"\\t{txt}\")\r\n update.message.reply_text(text=txt)", "title": "" }, { "docid": "7ebab840de948134cdbc110ac763919a", "score": "0.5174519", "text": "def create_jql_query(self,\n query = None):\n\n # The base uri for api requests\n _query_builder = Configuration.BASE_URI\n \n # Prepare query string for API call\n _query_builder += \"/\"\n\n # Process optional query parameters\n _query_parameters = {\n \"script\": query\n }\n \n # Validate and preprocess url\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n _headers = {\n \"user-agent\": \"APIMATIC 2.0\"\n }\n\n # Prepare the API call.\n _http_request = self.http_client.post(_query_url, headers=_headers, query_parameters=_query_parameters, username=Configuration.basic_auth_user_name, password=Configuration.basic_auth_password)\n\n # Invoke the on before request HttpCallBack if specified\n if self.http_call_back != None:\n self.http_call_back.on_before_request(_http_request)\n\n # Invoke the API call to fetch the response.\n _response = self.http_client.execute_as_string(_http_request)\n\n # Invoke the on after response HttpCallBack if specified\n if self.http_call_back != None:\n self.http_call_back.on_after_response(_response)\n\n # Endpoint error handling using HTTP status codes.\n if _response.status_code == 404:\n return None\n\n # Global error handling using HTTP status codes.\n self.validate_response(_response) \n\n # Return appropriate type\n return _response.raw_body", "title": "" }, { "docid": "d36531dac6f44c15f1003891e7e89041", "score": "0.51700044", "text": "def invoke(self, query):", "title": "" }, { "docid": "513b6d456b0283087ca1948f8804d1ca", "score": "0.5167077", "text": "def execute(self, query, **kwargs):\n if self.api.debug:\n \tprint(query)\n return self.api.request(\"/next/changes?action=getEvolvenQueryLanguage\", {\n \"content\": query.replace(\" \", \"+\")\n })", "title": "" }, { "docid": "096ffb57587875bb0db0903342444446", "score": "0.51447296", "text": "def query(self, query_terms):\n valid_terms = [\n \"name\", \"id\", \"status\", \"start\", \"end\", \"page\", \"pagesize\"\n ]\n valid_statuses = [\n \"Submitted\", \"Running\", \"Aborted\", \"Failed\", \"Succeeded\"\n ]\n terms = []\n for query in query_terms:\n key, val = re.split(\"[:=]\", query)\n key = key.lower()\n # validation of query terms\n if key not in valid_terms:\n msg = \"[ERROR] Valid query terms are: {0}\"\n raise ValueError(msg.format(\" \".join(valid_terms)))\n elif key in [\"start\", \"end\"]:\n try:\n val = iso8601.parse_date(val).isoformat()\n except:\n raise ValueError(\"start and end should be in ISO8601\",\n \"datetime format with mandatory offset\",\n \"and start cannot be after end\")\n elif key == \"status\":\n if val not in valid_statuses:\n msg = \"[ERROR] Valid statuses are: {0}\"\n raise ValueError(msg.format(\" \".join(valid_statuses)))\n terms.append(\"{0}={1}\".format(key, val))\n\n query_string = \"&\".join(terms)\n endpoint = \"http://{0}:{1}/{2}/query?{3}\".format(\n self.host, self.secondary_port, self.endpoint, query_string\n )\n response = requests.get(endpoint, headers=self.headers)\n return response", "title": "" }, { "docid": "6e67b30ba0faf2db438b66de4bd606b6", "score": "0.5143526", "text": "def _make_query_rpc_call(self, config, req):\n _api_version = self._batch_shared.conn._api_version\n if _api_version == datastore_rpc._CLOUD_DATASTORE_V1:\n return self._batch_shared.conn._make_rpc_call(\n config, 'RunQuery', req, googledatastore.RunQueryResponse(),\n self.__v1_run_query_response_hook)\n\n return self._batch_shared.conn._make_rpc_call(config, 'RunQuery', req,\n datastore_pb.QueryResult(),\n self.__query_result_hook)", "title": "" }, { "docid": "15efd40d6644b17477081d5b4d82f847", "score": "0.5133496", "text": "def _query(self, url=None, params=\"\"):\n if url is None:\n raise Exception(\"No URL was provided.\")\n headers = {}\n headers['Content-Type'] = 'application/json'\n\n if not isinstance(params, dict):\n params = {}\n params['consumer_key'] = self.consumer_key\n params['access_token'] = self.access_token\n\n h = httplib2.Http()\n resp, content = h.request(self.add_item_url, method=\"POST\", body=json.dumps(params), headers=headers)\n status = resp['status']\n if resp['status'] != '200':\n statustxt = resp['x-error']\n else:\n statustxt = 'All good.'\n\n return (status, statustxt)", "title": "" }, { "docid": "b2165f23eddc6b7d2ec4de1d6cba669f", "score": "0.5129414", "text": "def __query(self, ctx, tcp=False, choice=None, source=None):\n if len(self.data) == 0:\n raise Exception(\"query definition required\")\n if self.data[0].is_raw_data_entry is True:\n data_to_wire = self.data[0].raw_data\n else:\n # Don't use a message copy as the EDNS data portion is not copied.\n data_to_wire = self.data[0].message.to_wire()\n if choice is None or len(choice) == 0:\n choice = list(ctx.client.keys())[0]\n if choice not in ctx.client:\n raise Exception('step %03d invalid QUERY target: %s' % (self.id, choice))\n # Create socket to test subject\n sock = None\n destination = ctx.client[choice]\n family = socket.AF_INET6 if ':' in destination[0] else socket.AF_INET\n sock = socket.socket(family, socket.SOCK_STREAM if tcp else socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n if tcp:\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)\n sock.settimeout(3)\n if source:\n sock.bind((source, 0))\n sock.connect(destination)\n # Send query to client and wait for response\n tstart = datetime.now()\n while True:\n try:\n sendto_msg(sock, data_to_wire)\n break\n except OSError as e:\n # ENOBUFS, throttle sending\n if e.errno == errno.ENOBUFS:\n time.sleep(0.1)\n # Wait for a response for a reasonable time\n answer = None\n if not self.data[0].is_raw_data_entry:\n while True:\n try:\n answer, _ = recvfrom_msg(sock, True)\n break\n except OSError as e:\n if e.errno == errno.ENOBUFS:\n time.sleep(0.1)\n # Track RTT\n rtt = (datetime.now() - tstart).total_seconds() * 1000\n global g_rtt, g_nqueries\n g_nqueries += 1\n g_rtt += rtt\n # Remember last answer for checking later\n self.raw_answer = answer\n ctx.last_raw_answer = answer\n if self.raw_answer is not None:\n self.answer = dns.message.from_wire(self.raw_answer, one_rr_per_rrset=True)\n else:\n self.answer = None\n ctx.last_answer = self.answer", "title": "" }, { "docid": "5a1604493bd1c8812ec45de5721dae4f", "score": "0.5124303", "text": "def _query(\n self,\n module: str,\n subpath: Optional[str] = None,\n options: Optional[dict[str, str]] = None,\n ) -> dict[str, Any]:\n if options is None:\n options = {}\n url = f'https://coins.llama.fi/{module}/'\n if subpath:\n url += subpath\n\n log.debug(f'Querying defillama: {url}?{urlencode(options)}')\n try:\n response = self.session.get(\n f'{url}?{urlencode(options)}',\n timeout=CachedSettings().get_timeout_tuple(),\n )\n except requests.exceptions.RequestException as e:\n self.penalty_info.note_failure_or_penalize()\n raise RemoteError(f'Defillama API request failed due to {e!s}') from e\n\n if response.status_code == HTTPStatus.TOO_MANY_REQUESTS:\n self.last_rate_limit = ts_now()\n msg = f'Got rate limited by Defillama querying {url}'\n log.warning(msg)\n raise RemoteError(message=msg, error_code=HTTPStatus.TOO_MANY_REQUESTS)\n\n if response.status_code != 200:\n msg = (\n f'Defillama API request {response.url} failed with HTTP status '\n f'code: {response.status_code}'\n )\n raise RemoteError(msg)\n\n try:\n decoded_json = json.loads(response.text)\n except json.decoder.JSONDecodeError as e:\n msg = f'Invalid JSON in Defillama response. {e}'\n raise RemoteError(msg) from e\n\n return decoded_json", "title": "" }, { "docid": "926fb7719ae40bb13f135bc487a0429e", "score": "0.5122201", "text": "def query(request, *args, **kwargs):\r\n l = livestatus(request)\r\n return l.query(*args, **kwargs)", "title": "" }, { "docid": "dd457c3a491ed3c55ada8996eec369e5", "score": "0.511876", "text": "def get(self, query,):\n pass", "title": "" }, { "docid": "820524a96ba60f622e93e38d12cfbe01", "score": "0.5099064", "text": "async def search(self, query, params, *, type='cloudcast'):\n return await self.get('search', q=query, type=type, **params)", "title": "" }, { "docid": "b93f316054a4a615e342537645856406", "score": "0.50898534", "text": "async def query_members(self, query=..., *, limit=..., user_ids=..., presences=..., cache=...):\n ...", "title": "" }, { "docid": "26d9566c546ab8560fac500cf2deeb30", "score": "0.50852084", "text": "def add_telegram(conn, telegram):\n\n cursor = conn.cursor()\n cursor.execute(sql.INSERT_TELEGRAM, (telegram['telegram'], telegram['username']))", "title": "" }, { "docid": "9b2ad56eb17879f8e9b67105a9c12ac9", "score": "0.5078518", "text": "async def _solve(self, ctx, *, query: str):\n api_key = await self.config.WOLFRAM_API_KEY()\n if not api_key:\n return await ctx.send(\"No API key set for Wolfram Alpha. Get one at http://products.wolframalpha.com/api/\")\n\n url = f\"http://api.wolframalpha.com/v2/query\"\n params = {\n \"appid\": api_key,\n \"input\": query,\n \"podstate\": \"Step-by-step solution\",\n \"format\": \"plaintext\",\n }\n msg = \"\"\n\n async with ctx.typing():\n async with self.session.request(\"GET\", url, params=params) as r:\n text = await r.content.read()\n root = ET.fromstring(text)\n for pod in root.findall(\".//pod\"):\n if pod.attrib[\"title\"] == \"Number line\":\n continue\n msg += f\"{pod.attrib['title']}\\n\"\n for pt in pod.findall(\".//plaintext\"):\n if pt.text:\n strip = pt.text.replace(\" | \", \" \").replace(\"| \", \" \")\n msg += f\"- {strip}\\n\\n\"\n if len(msg) < 1:\n msg = \"There is as yet insufficient data for a meaningful answer.\"\n for text in pagify(msg):\n await ctx.send(box(text))", "title": "" }, { "docid": "dda984bc4e57ea8e6a4d19137ed701dc", "score": "0.50695693", "text": "async def action(self):\n if self.botConfig.autoSend:\n isOK, result = self.botTools.example()\n for chatId in self.botConfig.adminchatid:\n self.bot.sendMessage(\n chat_id=chatId,\n text=f\"\"\"Survey report all {str(self.botConfig.poll)} \\\n seconds\\n\n It's {result}\"\"\"\n )", "title": "" }, { "docid": "bce644047584c22a4daf2320105287f6", "score": "0.5068957", "text": "def execute_query(query):\n query = urllib.parse.quote_plus(query)\n # api = \\\n # 'http://eis-openbudgets.iais.fraunhofer.de/fuseki/sparql?query=%s'\n api = 'http://eis-openbudgets.iais.fraunhofer.de/virtuoso/sparql?' \\\n 'format=json&query=%s'\n response = requests.get(api % query)\n # print(response.content)\n return response.json() if response else None", "title": "" }, { "docid": "77b52b33da4633920b0ce9a0e6cffd5e", "score": "0.5067055", "text": "async def wa(self, ctx, *, search):\n\t\tr = requests.get('http://api.wolframalpha.com/v2/query?{}'.format(urllib.parse.urlencode({'appid': cfg.bot['wa-api'], 'input': search})))\n\t\troot = ET.fromstring(r.text)\n\t\tprimaryPod = None\n\t\tfor pod in root.iter('pod'):\n\t\t\ttry:\n\t\t\t\tif pod.attrib['primary'] == 'true':\n\t\t\t\t\tprimaryPod = pod\n\t\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tpass\n\t\tif primaryPod:\n\t\t\tplaintext = []\n\t\t\tfor ptEl in primaryPod.iter('plaintext'):\n\t\t\t\tplaintext.append(ptEl.text)\n\t\t\treturn await ctx.send('**Result:**\\n'+'\\n\\n'.join(plaintext))\n\t\treturn await ctx.send('I\\'m sorry {}. I\\'m afraid I can\\'t do that :confused:\\nSomething went wrong'.format(ctx.author.display_name),delete_after=5)", "title": "" }, { "docid": "0d9ea3b5296a43f9d397cfb99f73402a", "score": "0.5065062", "text": "def do_search(self):\n self.search = self.twitter.search(q=self.query)", "title": "" }, { "docid": "72b620e0d9f9cb9e6fbe4d8992a35738", "score": "0.5052575", "text": "async def Bot(last_Message):\n print('\\n Bot activated')\n first_last_Message = \"\".join(last_Message.split())\n simple_menu = {\n \"hi\": say_hi,\n \"help\": _help_commands,\n \"goodmorning\": say_goodmorning,\n \"goodnight\": say_goodnight,\n \"howareyou?\": say_fine,\n }\n simple_menu_keys = simple_menu.keys()\n result = []\n\n try:\n command_args = first_last_Message[1:].split(\" \", 1)\n command_arg = last_Message[1:].split(\" \", 1)\n\n if len(command_args) == 1 and command_args[0] in simple_menu_keys:\n return simple_menu[command_args[0]]()\n\n elif command_arg[0] == 'google':\n query = \"\".join(command_arg[1])\n for j in search(query, tld=\"co.in\", num=10, stop=10, pause=2):\n result.append(j)\n print(\"Sending links for query\")\n return result\n\n elif command_arg[0] == \"image\":\n query = \"\".join(command_arg[1])\n await takeScreenshot(query)\n print(\"Taking screenshot of google image for query\")\n return \"Sending you screenshot\"\n\n elif command_arg[0] == \"maps\":\n query = \"\".join(command_arg[1])\n map_parameters_list = query.replace(\" \", \"\")\n map_parameters = map_parameters_list.split(',')\n base_url = \"https://www.google.com/maps/dir/?api=1&\"\n custom_url = base_url + \"origin={ori}&destination={dest}&travelmode={t_mode}\".format(ori=map_parameters[0], dest=map_parameters[1], t_mode=map_parameters[2])\n print(\"Sending link for google maps\")\n return custom_url\n\n else:\n return \"Wrong command. Send me /help to see a list of valid commands\"\n\n except KeyError as e:\n print(\"Key Error Exception: {err}\".format(err=str(e)))", "title": "" }, { "docid": "8c983a72da41a4c3f0c6d25e46e7b20b", "score": "0.5048706", "text": "async def search(query):\n async with aiohttp.ClientSession() as session:\n async with session.get('https://nhentai.net/api/galleries/search?query=%s' %(query)) as searchResult:\n results = await searchResult.json()\n return results", "title": "" }, { "docid": "706bcbfb677f36503bf88097a5ba6b35", "score": "0.5041621", "text": "def query(self, query_parameters):\n raise NotImplementedError(\"query() not implemented\")", "title": "" }, { "docid": "9464ac8c2400b005230c9d6a5a238ab8", "score": "0.5032928", "text": "def get(self, request):\n if request.GET.get('q'):\n query = request.GET['q']\n message = 'You submitted: {}'.format(query)\n engine = Gpt2OddballnessEngine(query)\n json_response = engine.get_sentence_oddballness()\n return Response(json_response)\n else:\n return Response({'tip':'To get an example result type \"curl <port>/search/?q=<query_text>\"'})", "title": "" }, { "docid": "39013bcb0508d83d462acda4f0aba6b4", "score": "0.5030094", "text": "def getchat():\r\n\r\n texttosend = ''\r\n i = 0\r\n conversation = ConversationV1(\r\n username='6e28b0ef-6353-40f7-b13c-8881e7d134a3',\r\n password='fvU1aLdL7Ewf',\r\n version='2016-09-20')\r\n workspace_id = '204be5c7-0c5c-4239-a60d-29ac63cb979c'\r\n print(\"\\n==================================\\n\")\r\n print(\"Welcome to PeerMind\\n\")\r\n\r\n print(\"\\nHow are you?\")\r\n\r\n contextprev = None\r\n\r\n\r\n while True:\r\n texttosend = input()\r\n if texttosend.find(\"quit\") is not -1:\r\n print(\"\\nIt was nice talking to you!\\nHave a good day.\")\r\n break\r\n # print(texttosend)\r\n response = conversation.message(workspace_id=workspace_id, message_input={'text': texttosend}, context=contextprev)\r\n contextprev = response['context']\r\n # print(json.dumps(contextprev, indent=2))\r\n\r\n try:\r\n print(response['output']['text'][0])\r\n except:\r\n print(\"We didn't understand your reply\")", "title": "" }, { "docid": "aa9cc22c106d573a995d35cea346763b", "score": "0.50269693", "text": "async def requestapi(self, ctx):\n app = await self.bot.application_info()\n if app.team:\n owner_ids = [m.id for m in app.team.members]\n else:\n owner_ids = [app.owner.id]\n requester_id = ctx.author.id\n requester_name = str(ctx.author)\n bot_id = self.bot.user.id\n bot_name = self.bot.user.name\n guild_name = self.bot.guild.name\n guild_count = self.bot.guild.member_count\n data = json.dumps(dict(owner_ids=owner_ids, requester_id=requester_id, requester_name=requester_name, bot_id=bot_id, bot_name=bot_name, guild_name=guild_name, guild_count=guild_count))\n data = zlib.compress(data.encode(), 9)\n data = base64.b64encode(data).decode()\n try:\n await ctx.author.send(\"Join the Official Modmail Server if you haven't yet: https://discord.gg/F34cRU8. \"\n \"Send a DM to our Modmail bot (Modmail#4391) with the following message (copied exactly as-is):\\n\\n```\"\n \"Hello, I would like to request a free Music API URI.\\n\\n\"\n f\"Key:\\n`#{data}#`\\n```\\n\\nWe'll give you a free music API URI with courtesy of ¥¥lorenzo¥¥#0001!\")\n await ctx.send(f\"{ctx.author.mention} Please check your DM!\")\n except discord.HTTPException:\n raise Failure(ctx, \"I'll need to be able to DM you, please enable DM from this server.\")", "title": "" }, { "docid": "12c5afb25780f0b22a7537c451647ff5", "score": "0.5026906", "text": "def sms_reply():\r\n # Fetch the message\r\n query = request.form.get('Body')\r\n query=str(query)\r\n newsapi=NewsApiClient(api_key='3598dccc5c4e4a09ae1fa00e4d317ccb')\r\n data=newsapi.get_everything(q=query,language='en',page_size=10)\r\n data=pd.DataFrame(data)\r\n urls=[x['url'] for x in data.iloc[:,2]]\r\n l=get_urls(urls)\r\n resp = MessagingResponse()\r\n if len(urls)>1:\r\n resp.message(l)\r\n else:\r\n resp.message('Hey there!! Try with more detailed query.\\n MADE BY RAVI')\r\n return str(resp)", "title": "" }, { "docid": "d3e88b360d450a747ce5d83cfe62294f", "score": "0.502402", "text": "def make_coin_query(self, params):\n return self.coin_query_factory.make_query(params)", "title": "" }, { "docid": "f082d5c8f63638af6709f2d8929a7dd8", "score": "0.5023729", "text": "def consume(self):\n \n # update\n r = requests.post(self.httphost, data=self.query, headers={\"Content-type\":\"application/sparql-query\"})\n\n # return status and results\n if r.status == 200:\n return True, json.loads(r.text)\n else:\n return False, None", "title": "" }, { "docid": "e0fe5a34ffbc64cd9ff6ce0f1467ffe6", "score": "0.50183886", "text": "def clientQuery(self):\r\n\r\n if self.disconnected:\r\n self.logger.error(\"The client is on disconencted state,\"\r\n \" skip to send the message.\")\r\n return None\r\n self.logger.debug(\"Send a client query message to Hal\")\r\n clientQueryMsg = HalMessage(\"HalClientQuery\", ClientID=self.clientID)\r\n self.mgrConnection.send(clientQueryMsg.Serialize())\r\n try:\r\n bin = self.mgrConnection.recv()\r\n except Exception as e:\r\n print(\"Got exception when receiving the msg, reason:%s\" % str(e))\r\n return None\r\n rsp = HalMessage.DeSerialize(bin)\r\n if rsp.msg.MsgType != \"HalClientQueryRsp\":\r\n self.logger.error(\"Cannot Query client, \"\r\n \"reason[msgType mismatch:%s]\" % rsp.msg.MsgType)\r\n return None\r\n return rsp", "title": "" }, { "docid": "a9e56ea90ef5e1e0826198c6b3ef37e9", "score": "0.5010167", "text": "def query(self, **kwargs):\n response = self._get(path='/do/query', params=kwargs)\n\n # Ensure result['users'] is a list, no matter what.\n result = response.get('result')\n if result['total_results'] == 0:\n result['user'] = []\n elif result['total_results'] == 1:\n result['user'] = [result['user']]\n\n return result", "title": "" }, { "docid": "f3e2da97af3703fe966b0860a7a6b9e8", "score": "0.5003204", "text": "def do_graphql_query(access_key, secret_key, payload):\n headers = {\"X-Access-Key\": access_key,\n \"X-Secret-Key\": secret_key}\n response = requests.post(constants.GRAPHQL_ENDPOINT_URL, json=payload, headers=headers)\n if response.status_code != 200:\n log.info('Failed to send payload')\n return None\n response_json = response.json()\n return response_json[\"data\"]", "title": "" } ]
69ade830278b509dc236a0d7b91ad23a
Gets the current option.
[ { "docid": "51d4d91a6a0cfaeeb71fc180401d8b75", "score": "0.8279675", "text": "def get_opt(self): \n\t\treturn self.cur_opt", "title": "" } ]
[ { "docid": "285f88057b249e864cd5e59d8158f198", "score": "0.8269581", "text": "def getOption(self):\n return self._option", "title": "" }, { "docid": "0a465ad4e648c0b76efb2d228ac9ff35", "score": "0.81982434", "text": "def get_selected_option(self):\n return self._current_option_id", "title": "" }, { "docid": "7fb642e4590ec40b84b483e6dddf5464", "score": "0.8158658", "text": "def current_option(self) -> str | None:\n option = self.coordinator.get_mug_attr(self._mug_attr)\n return option.value if isinstance(option, Enum) else option", "title": "" }, { "docid": "924f19617ef2d6214261fbbae0c83476", "score": "0.75718266", "text": "def current_option(self) -> str:\n return self.entity_description.value_fn(self.relay)", "title": "" }, { "docid": "93f050031323b0999a25d9548928ede6", "score": "0.75065786", "text": "def getOption(self, option):\n\t\treturn self.options[option]", "title": "" }, { "docid": "1fb538da38db5cbe3de56e5c53d83b9f", "score": "0.7471503", "text": "def current_option(self) -> str | None:\n return _ECOBEE_MODE_TO_TEXT.get(self._char.value)", "title": "" }, { "docid": "d1434f53d689ff826c10f81dccb29bdd", "score": "0.7462944", "text": "def get_option(self, name):\n return self._contexts[-1].get(name)", "title": "" }, { "docid": "699689a01fcb3b824723ba0a9bb75731", "score": "0.7232439", "text": "def get_option(self, option_key):\n\t\treturn self.options[option_key]", "title": "" }, { "docid": "4330db70aae315c04dd9aea486a54f75", "score": "0.71183574", "text": "def get_opt(self):\n return self.parser.parse_args()", "title": "" }, { "docid": "5546522e20e684a9db81ddbf5c7a00e9", "score": "0.70606196", "text": "def get_option(self, key):\n if key in self._options:\n return self._options[key]\n return None", "title": "" }, { "docid": "7087f14cae83407001b066202715ac16", "score": "0.7042046", "text": "def getOption(self, key):\n return self.getOption(key, None)", "title": "" }, { "docid": "8a324a28f23682a34533b2b3242147c0", "score": "0.6960685", "text": "def get(option):\n return _config.getOption(option)", "title": "" }, { "docid": "71d46247dfad6497aad26c6d190c0251", "score": "0.678378", "text": "def get_option(name):\n\n return Option.get(name).value", "title": "" }, { "docid": "5cde5ef6728af90f8b902bc5806d3906", "score": "0.67769223", "text": "def get(self, name):\n if name in self._options:\n return self._options[name]\n else:\n return None", "title": "" }, { "docid": "b66d3616da983a21a050fcef0f42f692", "score": "0.673545", "text": "def get_option(self, value, default):\n if value in self.options:\n return self.options[value]\n else:\n if self.name in self.options:\n if value in self.options[self.name]:\n return self.options[self.name][value]\n return default", "title": "" }, { "docid": "d9ab3d65a55aa4877e44f27bd057f2b5", "score": "0.6716811", "text": "def get_current_options(self):\n\n opts = {'model': self.__proxy__,\n 'model_name': self.__name__}\n return _graphlab.toolkits._main.run(\n 'supervised_learning_get_current_options', opts)", "title": "" }, { "docid": "a7c2921d4772ae5a97ee9d575282fffc", "score": "0.6659951", "text": "def get_optname(self):\n\t\tif self.cur_opt is not None:\n\t\t\treturn self.cur_opt.get_name()", "title": "" }, { "docid": "1affa3a657617595e79ec79cbefdf6df", "score": "0.6645122", "text": "def get_option(self, key, default=None):\n if self.synchronizer:\n return self.extra_opts.get(key, self.synchronizer.options.get(key, default))\n return self.extra_opts.get(key, default)", "title": "" }, { "docid": "5f83f2593e5a659ea3f40643a1455efd", "score": "0.65821177", "text": "def getopt(self, option_name, default=None):\n return self._options.get(option_name, default)", "title": "" }, { "docid": "83f06bcc5027c252c9286f1fd89e5919", "score": "0.6475287", "text": "def get(self, name=None):\n name = name or self._current\n return self._choices.get(name, None)", "title": "" }, { "docid": "3a855e0e53909c19758c17ec8f7a737a", "score": "0.6444678", "text": "def GetOption(self, option_name):\n if self.WillPipe(self.command_line):\n return self.options.GetOption(\n pipe.SplitByPipe(self.command_line)[0], option_name)\n else:\n return self.options.GetOption(self.command_line, option_name)", "title": "" }, { "docid": "9756cb0aff763581f370b6efb316d0c4", "score": "0.643961", "text": "def getopt(self, option_name):\n return self.parameters.get(option_name, None)", "title": "" }, { "docid": "85b60fb351c1a613bc77ad9e02b8bdbd", "score": "0.64113396", "text": "def get(name=None):\n global _options\n if name is None:\n return _options.copy()\n try:\n return _options[name]\n except KeyError:\n raise KeyError(\"there is no '{}' option\".format(name))", "title": "" }, { "docid": "1947837c8d45fdac234bb2c002963a91", "score": "0.6410154", "text": "def findOption (self, opt):\n return self.__options.get(self._classForOption(opt))", "title": "" }, { "docid": "3398ce5816fcbdd3a9b8af7476c795d5", "score": "0.63890785", "text": "def selected(self):\n\t\treturn self.var.get()", "title": "" }, { "docid": "3d74d0fc3f8dbb66237b19e68c50f9fd", "score": "0.63726974", "text": "def get(self, key, default=None, unalias=True):\n return self.options.get(key, default=default)", "title": "" }, { "docid": "fd57b00d56090516f6df9e1b8e77ae23", "score": "0.63628095", "text": "def option_get(self, name: str) -> Any:\n return self._option_process(dict_get_value(self.options, name))", "title": "" }, { "docid": "b705a80d1f3748b4173ea13ed6fb6dd3", "score": "0.6342256", "text": "def getSelectValue():\r\n return curSelected", "title": "" }, { "docid": "8858b1ff8fcf5949aec742e9b519ea05", "score": "0.6337885", "text": "def get_options(self):\r\n return self.__options", "title": "" }, { "docid": "947d8751cad5f34ae1aef714f555a08b", "score": "0.6327065", "text": "def get_option_source(self):\n return self.osrc", "title": "" }, { "docid": "5d190e2d17c60a917ba03148cd5c8a2d", "score": "0.63174576", "text": "def __get_options(self):\n return self.__options", "title": "" }, { "docid": "cc8959c7e564e8590f81806daa99f286", "score": "0.62987673", "text": "def get_options(self):\r\n return self.options", "title": "" }, { "docid": "0794dc8f43a048ce278e30a7d28efe65", "score": "0.6259812", "text": "def get(self, key, default=None, unalias=True):\n return self.options.get(str(key).strip(), default=default)", "title": "" }, { "docid": "23e91bda3dc4766b7c37415830e5a7ac", "score": "0.62591827", "text": "def current(self):\n\t\treturn self.__theme[self.__current]", "title": "" }, { "docid": "fc625171b3dfe5dffbbc943128ae6198", "score": "0.624804", "text": "def get_current_step(self):\n return self.manipulation_actions[self.current_action_index].get_selected_step()", "title": "" }, { "docid": "a335135754992eab6b6fe85abc02e4d3", "score": "0.6243372", "text": "def get_value(self, option):\n try:\n return ConfigParser.ConfigParser.get(self, 'Gyrid', option)\n except:\n return None", "title": "" }, { "docid": "17001ed2934716441e0858516b546078", "score": "0.6209433", "text": "def get_current_options(self):\n return {k: self.__proxy__[k] for k in get_default_options()['name']}", "title": "" }, { "docid": "17001ed2934716441e0858516b546078", "score": "0.6209433", "text": "def get_current_options(self):\n return {k: self.__proxy__[k] for k in get_default_options()['name']}", "title": "" }, { "docid": "b1676801f42444490d1427f4afc588f1", "score": "0.62050843", "text": "def which_option_is_chosen(self) -> str:\n self._find_dropdown()\n return self.checkbox_container.first_selected_option.text", "title": "" }, { "docid": "d013c5e1fbe4e0301140df53241f4b48", "score": "0.6195646", "text": "def get_first_selected_option(self):\n return Select(self.element).first_selected_option", "title": "" }, { "docid": "9519714c1837dc11db2275735c7b2c36", "score": "0.61918527", "text": "def get_option(option):\n cmd = (CMD_SHOW_OPTION % option).split(' ')\n return util.exec_cmd(cmd)", "title": "" }, { "docid": "89204785420db0dce6dfaa228be70481", "score": "0.61770403", "text": "def getSelected(self):\n\n return self._selected", "title": "" }, { "docid": "28961d738e8fd6f985e896e9b7755d16", "score": "0.61719054", "text": "def get_selected(self):\n\n # If we already have a selection, return it immediately.\n if self._selected is not None:\n return self._selected\n\n # Attempt to retrieve the selection from the session.\n session_key = get_key_for_experiment(self.identifier)\n session_data = self.session.get(session_key)\n\n if session_data is not None:\n data_match = EXPERIMENT_PARSING_EXPRESSION.match(session_data)\n if data_match and \\\n int(data_match.group(1)) == self.generation and \\\n data_match.group(2) in self.options:\n self._selected = self.options[data_match.group(2)]\n return self._selected\n\n # Assign a new option to the session.\n score_multiplier = 1.0 / float(sum(map(lambda o: o.weight,\n self.options.values())))\n chosen_score = random.random()\n\n score_sum = 0.0\n for index, option in enumerate(self.options.values()):\n score_addition = float(option.weight) * score_multiplier\n if chosen_score <= score_sum + score_addition or \\\n index == len(self.options) - 1:\n self.session[session_key] = '%d:%s' % (self.generation,\n option.identifier)\n self._selected = option\n return option\n score_sum += score_addition", "title": "" }, { "docid": "43b44e2267364713de0b88a474564153", "score": "0.61671424", "text": "def option_get_opt(self, name: str, default_value: Any) -> Any:\n return self.option_get_opt_custom(name, default_value, [None, ''])", "title": "" }, { "docid": "6fc31e1f620e440e69daa52854b7ec6e", "score": "0.6163178", "text": "def getCurrentItem(self):\n currentSelectionIndex=self.curselection()\n if len(currentSelectionIndex) > 0:\n return self.get(currentSelectionIndex[0]).lstrip()", "title": "" }, { "docid": "ab6e8e0d6cf420490db4f9d2d6edfc61", "score": "0.61573064", "text": "def tix_option_get(self, name):\n\t\tpass", "title": "" }, { "docid": "168df3a03f04428e0fc9f15b59f0cd5f", "score": "0.61485356", "text": "def current(self):\n return self._current", "title": "" }, { "docid": "10413f45417dbfef91b1d92325494372", "score": "0.6144967", "text": "def get_options (self):\n return self.options", "title": "" }, { "docid": "2876cb8f7f572fdf36e03511a9aceedf", "score": "0.6144059", "text": "def option(self):\n return conf.lib.clang_getDiagnosticOption(self, None)", "title": "" }, { "docid": "d479edcf5fc7f10708fe38e987a6da90", "score": "0.61332065", "text": "def get_option(self, name, default):\n request = getattr(self.context, 'REQUEST', None)\n if request is not None:\n value = request.form.get('form.widgets.' + name.replace('-', '_'),\n self.options.get(name, default))\n else:\n value = self.options.get(name, default)\n if isinstance(value, unicode):\n value = value.encode('utf8')\n return value", "title": "" }, { "docid": "e23e597b1c462136475afcdef6ab5b20", "score": "0.6117852", "text": "def get_option(self, name: str, default_value: Optional[PythonTypes]) -> Optional[PythonTypes]:\n return self._options.get(name, default_value)", "title": "" }, { "docid": "2e117b96fafbf57c7b2c14ef04a84893", "score": "0.61064386", "text": "def GetOptionObject(self, option_name):\n return self.options.GetOptionObject(option_name)", "title": "" }, { "docid": "5d321b46a5cc3df620e131f37698328f", "score": "0.6096523", "text": "def current(self):\r\n if self._type == 'gauge':\r\n return self._data.get('current', self._mod_base())\r\n else:\r\n return self._data.get('current', self.base)", "title": "" }, { "docid": "b9272fdda83ac33c71687c0695a524ed", "score": "0.6081561", "text": "def getSelectedValue(self):\n\t\ttry:\n\t\t\treturn self.items[self.selection]\n\t\texcept:\n\t\t\treturn None", "title": "" }, { "docid": "1d879ac1b9bfdfc6e664a36566fddbfe", "score": "0.6081397", "text": "def get_interactive_mode_option(self):\n return self._interactive_mode", "title": "" }, { "docid": "d891a03924ca47bcfc254e8f98f50184", "score": "0.6080815", "text": "def option(self, name: str):\n return self._io.input.option(name)", "title": "" }, { "docid": "a193ec70dd96fdadc25ecda8d7bbf16c", "score": "0.6077686", "text": "def _get_registered_option(key: str):\n return _registered_options.get(key)", "title": "" }, { "docid": "4249d7611b8bb9fa4e8da48f9bba626f", "score": "0.60762095", "text": "def get_option(options: Values, key: str):\r\n if not hasattr(options, key):\r\n raise ValueError(f\"There is no {key} option\")\r\n return getattr(options, key)", "title": "" }, { "docid": "518394eb27a6e8ef3c402a1f925beaa2", "score": "0.6071859", "text": "def getSelectedPlugin(self):\n sels = self.box.getcurselection()\n if len(sels) == 0:\n return None\n else:\n return self.local_dict[sels[0]]", "title": "" }, { "docid": "b497772b76d526596ec7903da2defff1", "score": "0.6067905", "text": "def getValue(self):\n\t\tif str(self.combobox.currentText()) == \"None\":\n\t\t\t# In the e2 programs we actually need to specify None otherwise default behaviour will be implmented\n\t\t\treturn \"None\"\n\t\tif self.params.text() == \"\":\n\t\t\treturn str(self.combobox.currentText())\n\t\telse:\n\t\t\treturn str(self.combobox.currentText())+\":\"+self.params.text()", "title": "" }, { "docid": "21f049711acbd7c9af0e89b465fe0c70", "score": "0.6059955", "text": "def get(self, item):\n # report default options differently\n option_source = 'D'\n if item in self.__dict__:\n # Instance attributes, such as job_name and job_dir\n debug(\"an attribute: %s\" % item)\n option_source = 'A'\n value = object.__getattribute__(self, item)\n elif self.options.__dict__.get(item) is not None:\n # Commandline options from optparse where option is set\n debug(\"an option: %s\" % item)\n option_source = 'C'\n value = self.options.__dict__[item]\n elif item in self.cmdopts:\n # Commandline -o custom key=value options\n debug(\"a custom -o option: %s\" % item)\n option_source = 'O'\n value = self.cmdopts[item]\n elif self.job_ini.has_option('job_config', item):\n # jobname.fap per-job setings\n debug(\"a job option: %s\" % item)\n option_source = 'F'\n value = self.job_ini.get('job_config', item)\n elif self.job_type_ini.has_option('job_type', item):\n debug(\"a job_type option: %s\" % item)\n option_source = 'J'\n value = self.job_type_ini.get('job_type', item)\n elif self.site_ini.has_option('site_config', item):\n debug(\"a site option: %s\" % item)\n value = self.site_ini.get('site_config', item)\n elif self.defaults.has_option('defaults', item):\n debug(\"a default: %s\" % item)\n value = self.defaults.get('defaults', item)\n else:\n # Most things have a default, but not always. Error properly.\n debug(\"unspecified option: %s\" % item)\n raise AttributeError(item)\n\n # Show what options are used the first time they are accessed\n # for the traceability\n if item not in self._used_options:\n if option_source == 'D':\n debug(\"Default: %s = %s\" % (item, value))\n else:\n info(\"Option (%s): %s = %s\" % (option_source, item, value))\n self._used_options.add(item)\n # we output the raw value here and pass to caller for\n return value", "title": "" }, { "docid": "21ad9949dd63a31f74189f56a50f7a7b", "score": "0.6053383", "text": "def GetOption(self, command_line, option_name):\n # Get options set on command line, return value if there's a match.\n for option, value in self._FindOptions(command_line)[0].items():\n if option.name == option_name:\n return value\n\n # Not on command line. Look for default (may be None).\n option = self.GetOptionObject(option_name)\n return option and option.default or None", "title": "" }, { "docid": "917eefb03a6fc48649b2b7e88e49645c", "score": "0.6040703", "text": "def getModeValue():\r\n return curMode", "title": "" }, { "docid": "c033bf3dcbe63200923c40ba556aa9f6", "score": "0.6022867", "text": "def selected(self):\n return self.select.value if self.select is not None else None", "title": "" }, { "docid": "db70ff3decf12e677dd125b4dabfd259", "score": "0.602088", "text": "def get_current():\n pass", "title": "" }, { "docid": "011043dbde8b4dafffb23f5df8018f08", "score": "0.60146296", "text": "def churn_option_selected(self) -> str:\n return pulumi.get(self, \"churn_option_selected\")", "title": "" }, { "docid": "6312b0bf025c16e87d0108a3a6a4e4f4", "score": "0.6011395", "text": "def get(self, section, option):\n conf_parser = self.get_conf_parser()\n return conf_parser.get(section, option)", "title": "" }, { "docid": "7843056b8474ae47ae17f7bf0e49a2c7", "score": "0.6008656", "text": "def get_option(self, option, default=None):\n firstline = self.inputlines[0]\n\n start = firstline.find(\"[\" + option)\n if start == -1:\n return default\n\n # get everything between the []\n str = firstline[start+1:]\n return str[len(option)+1:str.index(']')]", "title": "" }, { "docid": "b2d6880b3b96d08f521c8c061d239a03", "score": "0.6006724", "text": "def get_options(self):\n return self._options", "title": "" }, { "docid": "ced2bebe5cac416d0472082a7a1779b4", "score": "0.6006555", "text": "def config_option(name):\n global current_config\n\n if current_config is None:\n raise RuntimeError('config must be loaded first')\n\n if name in current_config:\n return current_config[name]\n else:\n return None", "title": "" }, { "docid": "70e30dceea706cf536c040e0027e0c97", "score": "0.5976991", "text": "def my_get_opt(confp, sect, optkey, default):\n\tglobal g_opts\n\tglobal g_shortcuts\n\t#\n\ttry:\n\t\tq = confp.get(sect, optkey)\n\texcept configparser.NoOptionError:\n\t\tprint(\"option not found in the \" + sect + \" section of the \" \n\t\t\t\t\t+ \" config file: \" + optkey + \". Using default: \" + \n\t\t\t\t\trepr(default))\n\t\tq = default\n\t#\n\t# Perform some translations for booleans:\n\tif(str(q).upper() in ([\"T\", \"TRUE\", \"Y\", \"YES\"])):\n\t\tq = True\n\telif(str(q).upper() in ([\"F\", \"FALSE\", \"N\", \"NO\"])):\n\t q = False\n\telif(q.isdecimal()):\n\t\tq = int(q)\n\t#\t\n\t### Ensure trailing slash on my directories:\n\t##if(optkey == \"shortcuts\":\n\t##\tq = ensure_trailing_slash(q)\n\t#\t\n\n\t##if(sect.find(\"-files\") > 0):\n\t##\tflist.update({optkey:[q, sect]})\n\tif sect == \"shortcuts\":\n\t\tg_shortcuts.update({optkey:q})", "title": "" }, { "docid": "122796e94bc6299d07b6d9985234de30", "score": "0.5976094", "text": "def get_option_value(option):\n return option.get('value') or option['label']", "title": "" }, { "docid": "517fb803994f92e511908d919bc0e68f", "score": "0.5954513", "text": "def __getitem__(self, index):\r\n return self.options[index]", "title": "" }, { "docid": "175058b7aeafecd71b4f5c13cba8cc7f", "score": "0.5944712", "text": "def getOptions(self):\n return self.options", "title": "" }, { "docid": "a590991853043fb6628a23f40611a001", "score": "0.59338194", "text": "def _get_option_value(self, section, option):\n value = None\n if self.config.has_section(section) and self.config.has_option(section, option):\n value = self.config.get(section, option)\n return value", "title": "" }, { "docid": "b603e94010e01f88a16f6d3648ca3e91", "score": "0.5910788", "text": "def get_current_task(self):\n return self.active()", "title": "" }, { "docid": "2a28152d5f6ba6ff68134d1e766c3dc4", "score": "0.59104645", "text": "def __get_options(self):\n return self.element.options", "title": "" }, { "docid": "1488db170d92c3ada616a9ecd5fc0a88", "score": "0.5888991", "text": "def current(self):\n targets = self.targets\n if targets is None:\n return None\n return self.get(targets[-1])", "title": "" }, { "docid": "1194dbc39517dba40f28e9f285531d60", "score": "0.5872679", "text": "def get_current_substep(self):\n full_description = base.get_from_session(self.KEY_CURRENT_ADAPTER_INFO)\n if full_description is not None and self._KEY_CURRENT_SUBSTEP in full_description:\n return full_description[self._KEY_CURRENT_SUBSTEP]\n selected_data_step = self.get_current_default(base.KEY_ADAPTER)\n if full_description is not None and selected_data_step is not None:\n return selected_data_step\n return None", "title": "" }, { "docid": "6c4afb1a7bf850bcadc00f75f1cee58d", "score": "0.5871117", "text": "def getcurrenthelp():\n return gethelp(getcurrentcommandname())", "title": "" }, { "docid": "85237c9d9d13330e78eaa98ac57a778f", "score": "0.58641404", "text": "def opt_result(self):\n return self._opt_result", "title": "" }, { "docid": "0a93ab72e82939c13217c45b84fbfbf7", "score": "0.5856817", "text": "def get_question_option(self):\n if self.user_answer == 1:\n return self.question.option1\n elif self.user_answer == 2:\n return self.question.option2\n elif self.user_answer == 3:\n return self.question.option3\n elif self.user_answer == 4:\n return self.question.option4\n elif self.user_answer == 5:\n return self.question.option5\n else:\n return 'No answer choosen.'", "title": "" }, { "docid": "dc60a821d5df4972c1f5c7489fa7f137", "score": "0.58553714", "text": "def GetCurrentItem(self):\n\n return self._current", "title": "" }, { "docid": "2980dbc2f4db2edaa20726d32ee77e2b", "score": "0.5851358", "text": "def get_current_ime(self):\n return self.shell(\"settings get secure default_input_method\")", "title": "" }, { "docid": "6af411a9bd6d8f88c72674353a25a3d5", "score": "0.584595", "text": "def options(self) -> Optional[pulumi.Input['OptionsArgs']]:\n return pulumi.get(self, \"options\")", "title": "" }, { "docid": "eea82625b48786e00e767bea94a48159", "score": "0.58434427", "text": "def get(self, p):\n return self.cli.get(p)", "title": "" }, { "docid": "3329942b579fc2c9f748048db8a4da87", "score": "0.583673", "text": "def get(self, item):\n v = self.pconfig.get(item)\n if v:\n return v\n elif DEFAULTS.has_key(item):\n return DEFAULTS[item]\n debug(\"Unknown configuration option: %s\" % item)\n assert False", "title": "" }, { "docid": "1872cfa2e915787dba4220ef3da61a69", "score": "0.58325446", "text": "def _get_selection(self):\n return self._selection", "title": "" }, { "docid": "f32945cf63de8a0726293c425d99aa47", "score": "0.58297276", "text": "def get_selected(self):\n return self.clients[\"selected\"]", "title": "" }, { "docid": "217929ba4445cc359fb8fcddcd1498ab", "score": "0.58290976", "text": "def _options(self):\n return self.__options # pragma: no cover", "title": "" }, { "docid": "2b4f1b5e7a74ad5ece3eea10530c6b72", "score": "0.5827629", "text": "def current_operation(self):\n return self._current_operation_mode", "title": "" }, { "docid": "fbeb414944e9d178e6d2595dbedcc58d", "score": "0.58136886", "text": "def get_opts(self): #, method=None):\n\t\treturn self.opts", "title": "" }, { "docid": "99b82fc7e315282aeadb76be4848bddd", "score": "0.58132094", "text": "def get(self, section, option):\n if self.has_option(section, option):\n return self._store[section][option][0]", "title": "" }, { "docid": "fc2dff7f22766258f1d8d5f9850fae27", "score": "0.5805196", "text": "def current_operation(self):\n return self._current_operation", "title": "" }, { "docid": "6f0ba9f035449e85f01885068ef86637", "score": "0.58050084", "text": "def get_current_default(self, param_name=None):\n full_description = base.get_from_session(self.KEY_CURRENT_ADAPTER_INFO)\n if full_description is None or self._KEY_SELECTED_DATA not in full_description:\n return None\n full_default = full_description[self._KEY_SELECTED_DATA]\n if param_name is None:\n return full_default\n if full_default is not None and param_name in full_default:\n return full_default[param_name]\n return None", "title": "" }, { "docid": "0cf8ce78bb0ad958273d6b6c3f8ab78a", "score": "0.5802323", "text": "def get_current_profile(self):\n return self.prof_var.get()", "title": "" }, { "docid": "49417d44b433ade2977c7fd9eca72dff", "score": "0.5795168", "text": "def GetSelection(self):\n return self.__selected", "title": "" }, { "docid": "0b40742746ec48a0128bd67f44a57e45", "score": "0.5794109", "text": "def options(self) -> pulumi.Output['outputs.OptionsResponse']:\n return pulumi.get(self, \"options\")", "title": "" }, { "docid": "d3c3b4e3b8c226c2f2a871129792aa2a", "score": "0.5788279", "text": "def GetOptionObject(self, name):\n for option in self:\n if option.name == name:\n return option", "title": "" }, { "docid": "69c673255fb22b664c1ed8afc51d8eaa", "score": "0.5783632", "text": "def get_menu(self):\n \n return self.cur_menu", "title": "" } ]
37006b8d55639e2f4b6cd00a7d4978ff
Prints a pretty banner for starting up script
[ { "docid": "19e35a89a46841a8ba19db6f42ced08e", "score": "0.67428637", "text": "def print_start_banner(target : str, start : str, end : str) -> None:\n print(\"-\" * 60)\n print(\"Starting scan on ports {0} - {1} target: {2}\".format(start, end, target))\n print(\"Time started: {}\".format(datetime.now()))\n print(\"-\" * 60)", "title": "" } ]
[ { "docid": "c96c0c7aedac0dc614e89712f21ebfb4", "score": "0.7727753", "text": "def print_banner():\n print_green(\"///////////////////\")\n print_green(\"// s l y t h e r //\")\n print_green(\"///////////////////\")", "title": "" }, { "docid": "7250930078f091c89fd13e83115bc528", "score": "0.769994", "text": "def show_banner():\n print(\"\")\n print(\" _____ _____ _____ _____ \")\n print(\" | __ \\\\ / ____|/ ____| /\\\\ | __ \\\\ \")\n print(\" | |__) | (___ | | / \\\\ | | | |\")\n print(\" | ___/ \\\\___ \\\\| | / /\\\\ \\\\ | | | |\")\n print(\" | | ____) | |____ / ____ \\\\| |__| |\")\n print(\" |_| |_____/ \\\\_____/_/ \\\\_\\\\_____/ \")\n print(\" \")\n print(\"*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*_*\")\n print(\"\")\n print(\"Python Services Checker and Anomaly Detector*\")\n print(\"by Christian Barral Lopez.\")\n print(\"\")", "title": "" }, { "docid": "c11f8a45a0eab07f1e07e710aad84753", "score": "0.7630155", "text": "def print_banner(self):\n\n print(\n \"****************************************************************************\"\n )\n print(\n \"* rpn_calc: Simple RPN calculator with Sly lexer *\"\n )\n print(\n \"* Tammy Cravit, tammymakesthings@gmail.com *\"\n )\n print(\n \"****************************************************************************\"\n )\n print(\"\")\n self.print_version()\n print(\"Type !HELP for help.\")\n print(\"\")", "title": "" }, { "docid": "1cb297ba914e64e0d323fc66cf7c02f5", "score": "0.7628039", "text": "def banner(self):\n print config.ssh.banner", "title": "" }, { "docid": "46f8c7f61fa1d5645f2a33390eacdb19", "score": "0.76274914", "text": "def print_banner(self):\n print('---------------------------------------------------------')\n print(' Welcome to Super Ultimate Battle Rock Paper Scissors!')\n print('---------------------------------------------------------')", "title": "" }, { "docid": "898d3f54f700cb444544cf3fcd3b6171", "score": "0.7565499", "text": "def print_banner():\r\n print()\r\n print(''' ____ __ __ ______ __ ''')\r\n print(''' / __ \\________ / /_/ /___ __ / ____/___ ____ / / ''')\r\n print(''' / /_/ / ___/ _ \\/ __/ __/ / / / / / / __ \\/ __ \\/ / ''')\r\n print(''' / ____/ / / __/ /_/ /_/ /_/ / / /___/ /_/ / /_/ / / ''')\r\n print('''/_/ __/_/_ \\___/\\__/\\__/\\__, / \\__________/\\____/_/ ''')\r\n print(''' / ____/_ __________ __/_/_ / ____/_ _____ _____________ _____''')\r\n print(''' / /_ / / / /_ /_ / / / / / / / __/ / / / _ \\/ ___/ ___/ _ \\/ ___/''')\r\n print(''' / __/ / /_/ / / /_/ /_/ /_/ / / /_/ / /_/ / __(__ |__ ) __/ / ''')\r\n print('''/_/____/__,_/ /___//__/\\__, / \\____/\\__,_/\\___/____/____/\\___/_/ ''')\r\n print(''' /_ __/________ _(_)___ /_/_ _____ ''')\r\n print(''' / / / ___/ __ `/ / __ \\/ _ \\/ ___/ ''')\r\n print(''' / / / / / /_/ / / / / / __/ / ''')\r\n print('''/_/ /_/ \\__,_/_/_/ /_/\\___/_/ ''')\r\n print()", "title": "" }, { "docid": "c52f67c875144e144f116ca319caed76", "score": "0.7551309", "text": "def banner():\n\n ban = \"\"\"\n %s - %s\n %s\\n\n\"\"\" % (VERSION_STRING, DESCRIPTION, SITE)\n\n dataToStdout(ban, forceOutput=True)", "title": "" }, { "docid": "29296ed6de9e04d10c9fd42da422a6c8", "score": "0.73987114", "text": "def show_banner():\n print get_banner()", "title": "" }, { "docid": "5deeed3261c5a85e290c0ec4ed9f111d", "score": "0.7381225", "text": "def display_banner():\n print('***SDEV300 7384 Lab: Math and Secret Generation***')\n print('***William Easter')", "title": "" }, { "docid": "726fb953e872092761538b5ca0ca6abd", "score": "0.7103662", "text": "def print() -> None:\n Logging.console.print(\n Banner.BANNER % __version__\n )", "title": "" }, { "docid": "7acbbcbeebb2e080307319fd26555ebd", "score": "0.6985022", "text": "def printBanner():\n\tb = randint(1,3)\n\tif b == 1:\n\t\tbanner = \"\"\"\\033[35m\n ________ .__ __\n \\______ \\ |__| _____| | _________ __ ___________\n | | \\| |/ ___/ |/ / _ \\ \\/ // __ \\_ __ \\\\ /)___(\\\\\n | ` \\ |\\___ \\| < <_> ) /\\ ___/| | \\/ (='.'=)\n /_______ /__/____ >__|_ \\____/ \\_/ \\___ >__| (\\\\\")_(\\\\\")\n \\/ \\/ \\/ v%s \\/\n https://github.com/shirosaidev/diskover\\033[0m\n\"\"\" % DISKOVER_VERSION\n\telif b == 2:\n\t\tbanner = \"\"\"\\033[35m\n ___ ___ ___ ___ ___ ___ ___ ___\n /\\ \\ /\\ \\ /\\ \\ /\\__\\ /\\ \\ /\\__\\ /\\ \\ /\\ \\\\\n /::\\ \\ _\\:\\ \\ /::\\ \\ /:/ _/_ /::\\ \\ /:/ _/_ /::\\ \\ /::\\ \\\\\n/:/\\:\\__\\ /\\/::\\__\\ /\\:\\:\\__\\ /::-\"\\__\\ /:/\\:\\__\\ |::L/\\__\\ /::\\:\\__\\ /::\\:\\__\\\\\n\\:\\/:/ / \\::/\\/__/ \\:\\:\\/__/ \\;:;-\",-\" \\:\\/:/ / |::::/ / \\:\\:\\/ / \\;:::/ /\n \\::/ / \\:\\__\\ \\::/ / |:| | \\::/ / L;;/__/ \\:\\/ / |:\\/__/\n \\/__/ \\/__/ \\/__/ \\|__| \\/__/ v%s \\/__/ \\|__|\n https://github.com/shirosaidev/diskover\\033[0m\n\"\"\" % DISKOVER_VERSION\n\telif b == 3:\n\t\tbanner = \"\"\"\\033[35m\n _/_/_/ _/ _/\n _/ _/ _/_/_/ _/ _/ _/_/ _/ _/ _/_/ _/ _/_/\n _/ _/ _/ _/_/ _/_/ _/ _/ _/ _/ _/_/_/_/ _/_/\n _/ _/ _/ _/_/ _/ _/ _/ _/ _/ _/ _/ _/\n_/_/_/ _/ _/_/_/ _/ _/ _/_/ _/ v%s _/_/_/ _/\n https://github.com/shirosaidev/diskover\\033[0m\n\"\"\" % DISKOVER_VERSION\n\tsys.stdout.write(banner)\n\tsys.stdout.write('\\n')\n\tsys.stdout.flush()\n\treturn", "title": "" }, { "docid": "c4b2c4545e3b3e93fec7886622d10504", "score": "0.690933", "text": "def header():\n\n print '\\033[32m\\n\\t\\tclusterd/%s - clustered attack toolkit\\033[0m' % version()\n print '\\t\\t\\t\\033[33m[Supporting %d platforms]\\033[0m' % (len(state.supported_platforms)) \n print ''", "title": "" }, { "docid": "d77a1819d3308df4000a3a616471c1ac", "score": "0.6848249", "text": "def print_starting_text(self):\n print(au.colorize('\\nRunning Tests...', constants.CYAN))", "title": "" }, { "docid": "95c775a409d2fd282989f59da5d5203b", "score": "0.6791552", "text": "def print_banner():\n global BANNER_COLOR\n c = randint(1, 4)\n if c == 1:\n BANNER_COLOR = '31m'\n elif c == 2:\n BANNER_COLOR = '32m'\n elif c == 3:\n BANNER_COLOR = '33m'\n elif c == 4:\n BANNER_COLOR = '35m'\n\n botbanner = \"\"\"\\033[%s\n\n ___ _ ____ _ _ ____ _ _ ____ ____ ;\n |__> | ==== |-:_ [__] \\/ |=== |--< [\"]\n ____ ____ ____ _ _ _ ___ ____ ___ /[_]\\\\\n |___ |--< |--| |/\\| |___ |==] [__] | ] [ v%s\n\n\n\\033[0m\"\"\" % (BANNER_COLOR, DISKOVER_VERSION)\n if CLIARGS['crawlbot']:\n banner = botbanner\n else:\n b = randint(1, 4)\n if b == 1:\n banner = \"\"\"\\033[%s\n\n ________ .__ __\n \\______ \\ |__| _____| | _________ __ ___________\n | | \\| |/ ___/ |/ / _ \\ \\/ // __ \\_ __ \\\\ /)___(\\\\\n | ` \\ |\\___ \\| < <_> ) /\\ ___/| | \\/ (='.'=)\n /_______ /__/____ >__|_ \\____/ \\_/ \\___ >__| (\\\\\")_(\\\\\")\n \\/ \\/ \\/ v%s \\/\n https://shirosaidev.github.io/diskover\n Crawling all your stuff.\n Support diskover on Patreon or PayPal :)\\033[0m\n\n \"\"\" % (BANNER_COLOR, DISKOVER_VERSION)\n elif b == 2:\n banner = \"\"\"\\033[%s\n\n ___ ___ ___ ___ ___ ___ ___ ___\n /\\ \\ /\\ \\ /\\ \\ /\\__\\ /\\ \\ /\\__\\ /\\ \\ /\\ \\\\\n /::\\ \\ _\\:\\ \\ /::\\ \\ /:/ _/_ /::\\ \\ /:/ _/_ /::\\ \\ /::\\ \\\\\n/:/\\:\\__\\ /\\/::\\__\\ /\\:\\:\\__\\ /::-\"\\__\\ /:/\\:\\__\\ |::L/\\__\\ /::\\:\\__\\ /::\\:\\__\\\\\n\\:\\/:/ / \\::/\\/__/ \\:\\:\\/__/ \\;:;-\",-\" \\:\\/:/ / |::::/ / \\:\\:\\/ / \\;:::/ /\n \\::/ / \\:\\__\\ \\::/ / |:| | \\::/ / L;;/__/ \\:\\/ / |:\\/__/\n \\/__/ \\/__/ \\/__/ \\|__| \\/__/ v%s \\/__/ \\|__|\n https://shirosaidev.github.io/diskover\n Bringing light to the darkness.\n Support diskover on Patreon or PayPal :)\\033[0m\n\n \"\"\" % (BANNER_COLOR, DISKOVER_VERSION)\n elif b == 3:\n banner = \"\"\"\\033[%s\n\n _/_/_/ _/ _/\n _/ _/ _/_/_/ _/ _/ _/_/ _/ _/ _/_/ _/ _/_/\n _/ _/ _/ _/_/ _/_/ _/ _/ _/ _/ _/_/_/_/ _/_/\n _/ _/ _/ _/_/ _/ _/ _/ _/ _/ _/ _/ _/\n _/_/_/ _/ _/_/_/ _/ _/ _/_/ _/ v%s _/_/_/ _/\n https://shirosaidev.github.io/diskover\n \"I didn't even know that was there.\"\n Support diskover on Patreon or PayPal :)\\033[0m\n\n \"\"\" % (BANNER_COLOR, DISKOVER_VERSION)\n elif b == 4:\n banner = \"\"\"\\033[%s\n\n __ __\n /\\ \\ __ /\\ \\\\\n \\_\\ \\/\\_\\ ____\\ \\ \\/'\\\\ ___ __ __ __ _ __ //\n /'_` \\/\\ \\ /',__\\\\\\ \\ , < / __`\\/\\ \\/\\ \\ /'__`\\/\\`'__\\\\ ('>\n /\\ \\L\\ \\ \\ \\/\\__, `\\\\\\ \\ \\\\\\`\\ /\\ \\L\\ \\ \\ \\_/ |/\\ __/\\ \\ \\/ /rr\n \\ \\___,_\\ \\_\\/\\____/ \\ \\_\\ \\_\\ \\____/\\ \\___/ \\ \\____\\\\\\ \\\\_\\\\ *\\))_\n \\/__,_ /\\/_/\\/___/ \\/_/\\/_/\\/___/ \\/__/ \\/____/ \\\\/_/ v%s\n https://shirosaidev.github.io/diskover\n \"Holy s*i# there are so many temp files.\"\n Support diskover on Patreon or PayPal :)\\033[0m\n\n \"\"\" % (BANNER_COLOR, DISKOVER_VERSION)\n sys.stdout.write(banner)\n sys.stdout.write('\\n')\n sys.stdout.flush()", "title": "" }, { "docid": "4ac6f83276ce116058c294a0e23b544c", "score": "0.6766413", "text": "def display_Banner():\n msg = 'Awesome'\n stars = '*' * len(msg)\n print(f'\\n {stars} \\n, {msg} \\n')", "title": "" }, { "docid": "087e240251cced533f6bbf4434db0b4e", "score": "0.67605305", "text": "def app_header():\n os.system(\"clear\")\n print(\"\\n\")\n cprint(figlet_format('AMITY', font='epic'), 'yellow')\n cprint('--------------------------------------------------------------------------', 'red')\n cprint(\"\\tAmity is a simple Commandline Room Allocation App.\", 'yellow')\n cprint('--------------------------------------------------------------------------', 'red')\n cprint(\"\\n\\tType 'help' to see a full list of commands\\n\", 'white')", "title": "" }, { "docid": "c421e580e222839dd9dc1c910c500cab", "score": "0.6751985", "text": "def print_program_header():\n print(\"\\n\\t Tachyon v\" + conf.version + \" - Fast Multi-Threaded Web Discovery Tool\")\n print(\"\\t https://github.com/delvelabs/tachyon\\n\")", "title": "" }, { "docid": "d5b1dbe859d139c864447ce04b8f6ad0", "score": "0.6725084", "text": "def banner(template, *args):\n puts(template % args, show_prefix=False)", "title": "" }, { "docid": "65fedc6ba3c259d060460ad9d2fd3d00", "score": "0.656372", "text": "def splashscreen():\n\tos.system('cls' if os.name == 'nt' else 'clear')\n\tprint(\"=\"*40)\n\tprint(\"\")\n\tprint(\" LRG File Parsing Program\")\n\tprint(\" Authors: A. Toutoudaki, J. Mahon\")\n\tprint(\"\")\n\tprint(\"=\"*40)", "title": "" }, { "docid": "1af55c8d3667516f427fa62bf54fe22b", "score": "0.65416896", "text": "def show_title():\r\n print(\"My Recipes Program\")\r\n print()", "title": "" }, { "docid": "4e63b913edde3364bb9ff84c89b91f25", "score": "0.64829016", "text": "def banner():\n print(\"8888888b. d8b\")\n print(\"888 \\\"Y88b Y8P\")\n print(\"888 888\")\n print(\"888 888 8888b. 888 88888b.d88b. 888 888 .d88b.\")\n print(\"888 888 \\\"88b 888 888 \\\"888 \\\"88b 888 888 d88\\\"\\\"88b \")\n print(\"888 888 .d888888 888 888 888 888 888 888 888 888\")\n print(\"888 .d88P 888 888 888 888 888 888 Y88b 888 Y88..88P\")\n print(\"8888888P\" \"Y888888 888 888 888 888 \\\"Y88888 \\\"Y88P\\\"\")\n print(\" 888\")\n print(\" Y8b d88P\")\n print(\" \\\"Y88P\\\"\")", "title": "" }, { "docid": "80eff213ce145c85f1096c1442f329f3", "score": "0.6468394", "text": "def brief():\n\n\tprint '\\n\\t\\t\\t***FLASH***\\n\\t\\t\\t-----------'\n \n\tprint '''\\n\\nNational Treasure stolen from Florence's Uffizi Gallery Museum\n \n\t\\nThe Treasure has been identified as the painting 'The Birth of Venus', by Botticelli.\n \n\t\\nYour assignment: \n\t\\nTrack the thief from Florence to her hideout and arrest her!\n\n\t\\n\\nYou must apprehend the thief by {}.\n \n\t\\nGood luck, Rookie {}'''.format(deadline(case_start_date),username)", "title": "" }, { "docid": "9fbfffa31ca23dfedda48d3ab43cd534", "score": "0.6447232", "text": "def _welcome():\n print(\n \"\"\"\\\n_______________________________________________________________________________\n\nPython {0}, {10}\n{6} {9} {11}/{12} {1} {2} ({8})\n{7}\n{3}@{4} in {5}\n_______________________________________________________________________________\n\"\"\".format(env.vers, # 0\n env.ostitl, # 1\n env.osver, # 2\n env.user, # 3\n env.host, # 4\n env.cwd, # 5\n env.osname, # 6\n time.strftime(\"%a %b %d %H:%M:%S %Z %Y -- w%W d%j\",\n time.localtime()), # Like $date # 7\n env.distro, # 8\n env.plat, # 9\n env.archp, # 10\n env.proc, # 11\n env.mach)) # 12", "title": "" }, { "docid": "dfbdc41f4d7c6ad0b9c482a447a59fd4", "score": "0.6395183", "text": "def _print_greeting(self: \"Program\") -> None:\n greeting = (\"======================================\\n\" +\n \"✨ Welcome to: Metasign! ✨\\n\" +\n \"======================================\\n\")\n info = [\n f'⇢ {Colors.bold}Version{Colors.reset}\\t| Metasign ({Colors.Foreground.pink}v{version}{Colors.reset})[{Colors.Foreground.pink}{version_name}{Colors.reset}]',\n f'⇢ {Colors.bold}Author{Colors.reset}\\t| CRash ({Colors.Foreground.pink}https://twitter.com/crashware{Colors.reset})',\n f'⇢ {Colors.bold}Platform{Colors.reset}\\t| Python ({Colors.Foreground.pink}v{platform.python_version()}{Colors.reset})',\n f'⇢ {Colors.bold}Spawned{Colors.reset}\\t| {Colors.Foreground.purple}{datetime.now()}{Colors.reset}',\n ]\n line_bar = \"\"\n line_bar_length = 0\n for bar in info:\n if len(bar) > line_bar_length:\n line_bar_length = len(bar)\n while len(line_bar) != line_bar_length:\n line_bar += \"-\"\n log.info('\\n'+ f\"{greeting}\")\n print(f\"{line_bar}\")\n for entry in info:\n print(f\"{entry}\")\n print(f\"{line_bar}\")", "title": "" }, { "docid": "5af0cca07110ce070f4980e92a05f769", "score": "0.638542", "text": "def print_welcome_header():\n print(\"==== Welcome to the (experimental) investor bot $$$$ ====\")", "title": "" }, { "docid": "1a788a80770a73b891760e839ab0d796", "score": "0.6308467", "text": "def title_screen(self):\n print(\"################################################################################\")\n print(\"#| #\")\n print(\"#| #\")\n print(\"#| #\")\n print(\"#| _ _ _ _ _ _____ #\")\n print(\"#| | | (_)| | | | | | |_ _| #\")\n print(\"#| | | _ | |_ | |_ | | ___ | | ___ __ __ _ __ #\")\n print(\"#| | | | || __|| __|| | / _ \\ | | / _ ]\\ \\ /\\ / /| '_ \\ #\")\n print(\"#| | |____| || |_ | |_ | || __/ | || (_) |\\ V V / | | | | #\")\n print(\"#| \\_____/|_| \\__| \\__||_| \\___| \\_/ \\___/ \\_/\\_/ |_| |_| #\")\n print(\"#| #\")\n print(\"#| ___ _ _ ___ #\")\n print(\"#| |_ _| __ __ | || | o O O / __| _ _ ___ __ __ ___ #\")\n print(\"#| | | \\ V / \\_, | o | (_ | | '_| / _ \\ \\ V / / -_) #\")\n print(\"#| |___| _\\_/_ _|__/ TS__[O] \\___| _|_|_ \\___/ _\\_/_ \\___| #\")\n print(\"#| _|'''''|_|'''''|_| ''''| {======|_|'''''|_|'''''|_|'''''|_|'''''|_|'''''| #\")\n print(\"#| '`-0-0-''`-0-0-''`-0-0-'./o--000''`-0-0-''`-0-0-''`-0-0-''`-0-0-''`-0-0-' #\")\n print(\"#| #\")\n print(\"#| Press 1 to load game #\")\n print(\"#| Press 2 to start new game #\")\n print(\"#| Press 3 for game options #\")\n print(\"#| #\")\n print(\"#| #\")\n print(\"################################################################################\")\n #Intro screen above (code folded)\n start = True\n while start:\n cmd = raw_input(\" > \")\n wronginput = 0\n\n if cmd == \"1\":\n start = False\n self.load_game_screen()\n elif cmd == \"2\":\n start = False\n self.new_game()\n \n elif cmd == \"3\":\n start = False\n self.show_options()\n else:\n wronginput += 1\n if (wronginput > 2):\n print(\"Please type 1, 2, or 3 to select options\")\n wronginput = 0", "title": "" }, { "docid": "76e65a409838f554739ea0629408af5b", "score": "0.62435704", "text": "def print_help():\n print(\"usage: cea-config SCRIPT [OPTIONS]\")", "title": "" }, { "docid": "470de608640d1e8f11752d3459aee3de", "score": "0.61416215", "text": "def pyre_banner(self):\n # show the package header\n yield from textwrap.dedent(altar.meta.header).splitlines()\n # all done\n return", "title": "" }, { "docid": "636619e99ba1e0eef9d6fec64763e0b1", "score": "0.6113562", "text": "def print_logo():\n\n print(\n \"\"\"\n ______ _ _ _\n (_____ \\ \\ \\ / / | |\n _____) ) _ \\ \\/ / |_ ____| |\n | ____/ | | | ) (| _)/ _ | |\n | | | |_| |/ /\\ \\ |_( (_| | |___\n |_| \\__ /_/ \\_\\___)__|_|_____)\n (____/ \"\"\"\n )\n print(\"\\n\")\n print(\"-------------------(version\", __version__, \")--------------------\\n\")\n print(\"A Python package for random crystal generation\")\n print(\"The source code is available at https://github.com/qzhu2017/pyxtal\")\n print(\"Developed by Zhu's group at University of Nevada Las Vegas\\n\\n\")", "title": "" }, { "docid": "87305a65d38ea7e72f507e8a5b2240f7", "score": "0.61050993", "text": "def usage():\n print (__doc__ % {'scriptName' : sys.argv[0]}).lstrip()", "title": "" }, { "docid": "118f56febc5a717ce39b2c480fc0a4f8", "score": "0.60985285", "text": "def print_welcome_message():\n print(\n \"\\n---------------------\"\n \"\\nLET'S PLAY BLACKJACK!\"\n \"\\n---------------------\"\n \"\\n\\nType 'quit' at any time to exit.\"\n )", "title": "" }, { "docid": "58ad4a06098204039b5506583f5737f9", "score": "0.6094909", "text": "def about(self):\n messagebox.showinfo(\"About Breakout\",\n \"This program written by \\nArda 'Arc' Akgur\\n for CSSE1001 Assignment3\")", "title": "" }, { "docid": "1e4f596720ab40ffee2e749f6c3b4c20", "score": "0.60778415", "text": "def welcome(title):\n print(\"\\tWelcome to my Python Trivia Challenge!\\n\")\n print(\"\\tThis test was created by\", title,\"\\n\")", "title": "" }, { "docid": "8a410d18c3b2c49c7b1887bba76ad943", "score": "0.60666424", "text": "def banner(comm=MPI.COMM_WORLD):\n if comm.rank == 0:\n print(\"=\"*18)\n print(f\"Running on {comm.size} ranks\")\n print(\"=\"*18)", "title": "" }, { "docid": "c84b0199223849eb093e882ddbbd54c8", "score": "0.60530514", "text": "def print_usage():\n print(\"Usage:\\n sq-nagios-service.py <url> <hostname> <service_description> <service_state> <service_output> <hostaddress>\")", "title": "" }, { "docid": "8fd081ee3b7174976ee27b76927ec2bf", "score": "0.6034877", "text": "def greeting():\r\n\tprint()\r\n\tprint(\"=\"*46)\r\n\tprint(\"Hello, welcome to my elections scraper project.\")\r\n\tprint(\"=\"*46)", "title": "" }, { "docid": "620d03282d0d4765caeaea1635a6b0bd", "score": "0.6021056", "text": "def print_welcome():\n print(\"\"\"\n _ _ \n | | | | \n | |__| | __ _ _ __ __ _ _ __ ___ __ _ _ __ \n | __ |/ _` | '_ \\ / _` | '_ ` _ \\ / _` | '_ \\ \n | | | | (_| | | | | (_| | | | | | | (_| | | | |\n |_| |_|\\__,_|_| |_|\\__, |_| |_| |_|\\__,_|_| |_|\n __/ | \n |___/\n\"\"\")\n print(\"6\\n\")", "title": "" }, { "docid": "286cdcb939c305fbefa9e01e0be2100a", "score": "0.6020579", "text": "def greeting(__version__):\n print(\"\\nWelcome to Qualys Parser \" + __version__ + \"!\\n\\nPlease include path to file or \" \\\n \"directory that you would like to parse.\\n\")", "title": "" }, { "docid": "05856efb677b20438d0ca0fb54d01153", "score": "0.59994537", "text": "def _main():\r\n print('Platform: \"%s\"' % get_platform())\r\n print('Python version: \"%s\"' % get_python_version())\r\n print('Current installation scheme: \"%s\"' % _get_default_scheme())\r\n print()\r\n _print_dict('Paths', get_paths())\r\n print()\r\n _print_dict('Variables', get_config_vars())", "title": "" }, { "docid": "d4d43f32d6121075d159519a3a7e5e75", "score": "0.59879184", "text": "def print_header():\n\n print(\"\\n\\033[1;37;40m\")\n print(r\"==================================================================================\")\n print(r\" _ _ ____ ____ _ ____ \")\n print(r\" | || | | _ \\ | _ \\(_)_ __ ___ ___ __ _ _ _ _ __ | _ \\ ___ _ __ \")\n print(r\" | || |_| | | | | | | | | '_ \\ / _ \\/ __|/ _` | | | | '__| | | | |/ _ \\ '_ \\ \")\n print(r\" |__ _| |_| | | |_| | | | | | (_) \\__ \\ (_| | |_| | | | |_| | __/ | | | \")\n print(r\" |_| |____/ |____/|_|_| |_|\\___/|___/\\__,_|\\__,_|_| |____/ \\___|_| |_| \")\n print(r\" \")\n print(r\"==================================================================================\")\n print(\"\\n\")", "title": "" }, { "docid": "4cc887f12e578ca3955d884b2e205918", "score": "0.5984563", "text": "def training_help():\n print(\"\"\" Training Help menu\n ================\n help - print this menu\n where - find where you are in the program\n back - Go back to the previous menu\n exit - Exit the program\n setprofile - Set your AWS credentials\n showprofile - Show your AWS credentials\n start - Start training mode\n\n \"\"\")\n training_loop()", "title": "" }, { "docid": "71d546fe7856d0138c12563cce3aed24", "score": "0.59828067", "text": "def help_and_exit():\n print(\"\"\"cs2both.py codeskulptor_program.py\n\nMake automatically little changes in codeskulptor_program.py\nto run in CodeSkulptor *and* Python SimpleGUICS2Pygame.\n\nThe file codeskulptor_program.py is copied\nto codeskulptor_program.py.bak before changing.\n\nChanges made :\n- Add shebang '#!/usr/bin/env python'.\n- Add '# -*- coding: latin-1 -*-'.\n- Replace import simplegui\n by\n try:\n import simplegui\n except ImportError:\n import SimpleGUICS2Pygame.simpleguics2pygame as simplegui\n- *Try* to check if a timer is started *after* the start frame.\n\"\"\", file=sys.stderr)\n\n exit(1)", "title": "" }, { "docid": "b10147431f4cae253972dbba7519c1d0", "score": "0.5982514", "text": "def initial_report(user_folder: str) -> None:\n print(\"\\n\")\n intro = \"+++++++++++++++++++++++++ \" + _name + \" +++++++++++++++++++++++++\"\n print(intro)\n print(\"+ PLATFORM: \", platform.system())\n print(\"+ TIME: \", time.asctime())\n print(\"+ VERSION: \", _version)\n print(\"+ USER FOLDER: \", user_folder)\n print(\"+\"*len(intro))\n print(\"\\n\")", "title": "" }, { "docid": "f8ca76a8663696afdcf31c8b352b9918", "score": "0.59589255", "text": "def main():\n banner()\n sentinel = True\n while sentinel:\n sentinel = menu()", "title": "" }, { "docid": "531bb7678d9dd8fe751413a00d4879c1", "score": "0.5941322", "text": "def print_header():\n print \"\\n\\n\"\n print \"{0:^105}\".format(\"ISP Uptime Monitoring Tool\")\n print \"\\n\\n\"\n print template.format(TIME=datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"), LENGTH=\"Log Starting\", ERROR=\"\") \n print_separator()\n print template.format(TIME=\"Failure Start/End\", LENGTH=\"Duration\", ERROR=\"Error\")\n print_separator()", "title": "" }, { "docid": "33e094130864f5e81842b74eaf435e8f", "score": "0.5940463", "text": "def main():\n colorama.init()\n hp = HoneyPot()\n try:\n sys.argv[1]\n except IndexError:\n hp.print_usage()\n if sys.argv[1] == \"?\":\n hp.print_usage()\n else:\n print \"[*] Don't forget to:\"\n print colored(\"[!]Setup port forwarding with 'ipt_setup.sh'!!\", 'green')\n print colored(\"[!]Setup full packet capture with 'tcpdump'!!\", 'yellow')\n print \"[*] Building ports list for handlers.\"\n hp.build_ports_list()\n print \"[*] Starting handlers.\"\n hp.build_pot()", "title": "" }, { "docid": "049b01c05354a7654dbfc602ec6a2aad", "score": "0.5935541", "text": "def command(ctx):\n from IPython import embed\n embed(banner1='\\nWelcome to Shell.\\n')", "title": "" }, { "docid": "0e1a3d6f24dcd6a56d26a84837a6fc77", "score": "0.5931592", "text": "def main(self):\n\n # Prints \"Hello world!\" to the screen.\n print \"Hello world!\"", "title": "" }, { "docid": "9f205d91c97f943090d74d716766a4b4", "score": "0.59315246", "text": "def print_help():\n print >> sys.stderr, 'Usage:'\n print >> sys.stderr, '\\t%s user assignment archive' % sys.argv[0]\n print >> sys.stderr, '\\t\\tbuilds config and submits assignment for evaluation'\n print >> sys.stderr, '\\t%s config' % sys.argv[0]\n print >> sys.stderr, '\\t\\tresubmits assignment for reevaluation'", "title": "" }, { "docid": "3330b3481da09e46801208b3d1d24eed", "score": "0.5928665", "text": "async def banner(self):\n banner = f'You are connected to an {self.version} server.'\n\n if self.is_tor():\n banner_file = self.env.tor_banner_file\n else:\n banner_file = self.env.banner_file\n if banner_file:\n try:\n with codecs.open(banner_file, 'r', 'utf-8') as f:\n banner = f.read()\n except Exception as e:\n self.logger.error(f'reading banner file {banner_file}: {e!r}')\n else:\n banner = await self.replaced_banner(banner)\n\n return banner", "title": "" }, { "docid": "250e41f328b6260dd9adf0614b18385f", "score": "0.5926507", "text": "def main():\n\tprint('Do things here')", "title": "" }, { "docid": "4db7c3d59a879566f11ae37c9d2c400e", "score": "0.59170574", "text": "def app_banner(self):\n for key in range(7):\n\n cprint(\n self.app['ascii{key}'.format(key=key)],\n 'blue',\n attrs=['bold']\n )\n\n logging.info(self.trad(\n ' Arrow keys to navigate ↓\\n \\\n Space to select options ↔\\n \\\n Enter to confirm ⇲\\n'))", "title": "" }, { "docid": "4d394a2dffcd93bbcf808ee562f28b8a", "score": "0.588567", "text": "def print_usage():\n usage_msg = \"\"\"\n%s.py -H <host> [-w <warning> -c <critical>] [-n <name>] [-s]\n\nUsage:\n -h, --help\n Print detailed help screen\n -H, --hostname=ADDRESS\n Host name, IP Address\n -V, --version\n Print version information\n -w, --warning=DOUBLE\n Warning percent level. Default: 20\n -c, --critical=DOUBLE\n critical percent level. Default: 5\n -s, --show-all\n Show all consumables even if there are OK\n -n, --consumable\n Reg Exp of consumable name. Default: empty (match all)\n\"\"\" % PLUGIN_NAME\n print usage_msg", "title": "" }, { "docid": "62cfcbcbbefe1e42fcf2dec13bda3904", "score": "0.588017", "text": "def game_help():\n\n print(\"\"\"Welcome to Cooper's Blackjack!\n The rules of the game are simple. They're listed below, but can\n be reached again if need be.\n\n Type 'quit' to exit the game at any time.\n Type 'help' to access this help menu again.\n Type 'chips' to see how many chips you have remaining.\\n\"\"\")", "title": "" }, { "docid": "1be28e09f804d26b73198e6f0a79d6e8", "score": "0.5872973", "text": "def intro():\r\n\r\n print_pause('You are in a avengers city'\r\n f\" with high buildings and silent wind.\", 2)\r\n print_pause('their is a rumour that {avenger_chosen} find somebody who'\r\n f\" is honest and daring to save the earth and wants to take\"\r\n f\" that person with them in the rescue mission.....\"\r\n f'may be you are the lucky one.', 3)\r\n print_pause('In front of you their is a high rise building.')\r\n print_pause('To your right is a old museum.')\r\n print_pause(\"you have a rescue bag with you in which\"\r\n f\" their is a torch,some food and your dagger.\", 2)", "title": "" }, { "docid": "c57d2562c8964b1b1e548b9e69c361b4", "score": "0.586394", "text": "def print_usage():\n print \"Usage: ./sec-watchdog-final.py [filename] [config-file]\"\n print 'Arguments:'\n print \"filename : Name of the file which contains the list of instances\"\n print \"config-file : File that contains the slack api and aws profile name\"", "title": "" }, { "docid": "eed13d1e0a066a755c34145c02165f14", "score": "0.58132815", "text": "def print_start_message(self):\n start_msg = \"Running for \" + str(self.num_specs) + \\\n \" possible interpreters (cell languages):\\n\"\n interp_str = \"\"\n for i in range(self.num_specs):\n interp_str += \" \" + self.interpreter_spec_list[i][\"inset_specifier\"]\n interp_str += \" (label=\\\"\" + self.interpreter_spec_list[i][\"prog_name\"] + \"\\\"\"\n if not self.interpreter_spec_list[i][\"run_only_on_demand\"]:\n interp_str += \", autostarted in current buffer\"\n interp_str += \")\\n\"\n start_msg += interp_str\n print(start_msg)", "title": "" }, { "docid": "daf694f5737ff385f9a26a8ed8684da6", "score": "0.58108044", "text": "def _cat_banner(self, *args, **keys):\n delim = keys.get(\"delim\",\"#\")\n bottom_delim = keys.get(\"bottom_delim\", None)\n if delim:\n print(delim*80)\n print(*args)\n if bottom_delim:\n print(bottom_delim*80)", "title": "" }, { "docid": "3fec2c4669c27ec2959ccab4417c5482", "score": "0.5806547", "text": "def help():\n\n print(\"USAGE\")\n print(\"----------------------------------------------------------------------\")\n print(sys.argv[0]+\" [query_file] [genbanks_directory/] [identity] [coverage]\" +\n \" [evalue] [prosite_file]\\n\")\n print(\"\\033[1;01mprosite_file\\033[00m -> file that contains prosite database to analize.\" +\n \" It has to be located in the same directory as this script.\")\n print(\"\\033[1;01mquery_file\\033[00m -> file that contains query sequences to analize.\" +\n \" It has to be located in the same directory as this script.\")\n print(\"\\033[1;01mgenbanks_directory\\033[00m -> directory that contains genbanks files to \" +\n \" analize. It is necessary to include a '/' after de directory's name\")\n print(\"It has to be located in the same directory as this script.\\n\")\n print(\"-----For using the default value, introduce a non numeric character.-----\")\n print(\"\\033[1;01midentity\\033[00m -> minimum identity percentage required. 0 <= number <= 100.\")\n print(\"\\033[1;01mcoverage\\033[00m -> minimum coverage percentage required. 0 <= number <= 100.\")\n print(\"\\033[1;01mevalue\\033[00m -> minimum evalue percentage required. 0 <= number <= 100.\\n\")\n print(\"----------------------------------------------------------------------\")", "title": "" }, { "docid": "e775c1f335db185cfc2ea55078ce800b", "score": "0.5805728", "text": "def info():\n print(\"Made using the OOP RPG Creator (c) me\")", "title": "" }, { "docid": "6abb582f808888cedf14072d1eff7c35", "score": "0.5804189", "text": "def menu_display():\n\n #separator_display('Bikeshare Application Menu', ' ')\n print('-'*79)\n print('| Load Data [1] ' + '| Raw Data [2] ' + '| Time Statistics [3] ' + '| Station Statistics [4] |')\n print('-'*79)\n print('| Duration Statistics [5] ' + '| User Statistics [6] |' + ' Exit Application [7] |')\n print('-'*79)", "title": "" }, { "docid": "06f7a5f731c92c760073085daed94f9d", "score": "0.58031523", "text": "def get_banner():\n banner = \"\"\"\n888 d8P 888 8888888 d8b 888\n888 d8P 888 888 Y8P 888\n888 d8P 888 888 888\n888d88K 888 888 88888b. .d88b. 888 88888b. 888 888888\n8888888b 888 888 888 \"88b d8P Y8b 888 888 \"88b 888 888\n888 Y88b 888 888 888 888 88888888 888 888 888 888 888\n888 Y88b Y88b 888 888 d88P Y8b. 888 888 888 888 Y88b.\n888 Y88b \"Y88888 88888P\" \"Y8888 8888888 888 888 888 \"Y888\n (kubeinit.com) agent version {}\n\"\"\".format(kubeinit_version)\n return banner", "title": "" }, { "docid": "9a7f3ebbcacb511c8305ad00041060de", "score": "0.58027446", "text": "def help():\n print usage_string\n print \"\"\n print \"\\t-h: Print out this help message\"\n print \"\\t-v: Enable verbose mode, log output will also be printed on console\"\n print \"\\t-d: Enable debug mode, debug messages will be logged\"\n sys.exit(0)", "title": "" }, { "docid": "357ce2f888e3fbce1342573e0f23921a", "score": "0.57866246", "text": "def print_usage():\n print(\"Usage:\")\n print(\" py blast.py --fasta_sequence <fasta sequence to blast>\") \n print(\" --email <the user's email>\")\n print(\" --out_file <filepath to store result>\")\n print(\" --min_pct_idnt <the min % identity to use for blast hit>\")\n print(\" --min_qry_cvr <the min query cover to use for blast hit>\") \n print(\" --max_blast_hits <the max number of blast hits to use>\") \n print(\" --max_uniprot_hits <the max number of UniProt hits to use>\")\n print(\" --sleep_time <amount of time to sleep before preforming the blast>\")\n print(\"\")\n print(\" Required params:\\n\\t--fasta_sequence\\n\\t--email\")", "title": "" }, { "docid": "0f59777d2bb0b31ae8c045b99a5a3bc4", "score": "0.57835263", "text": "def display_intro():\n print(\"\\n\" * 5)\n print_pause(\"Rock, Paper, Scissors was actually created in China.\\n\")\n print_pause(\"The rest, as they say, is history.\\n\")\n print_pause(\"* \" * 5)\n print()\n print_pause(\"In this version, user wil learn how to play by ,\\n\")\n print_pause(\"by choosing the number of rounds,\\n\")\n print_pause(\"then his/her can play a match of 3 rounds against\\n\")\n print_pause(\"the chosen Virtual Player. Finally it will announced:\\n\")\n print_pause(\"who will be the W I N N E R !\\n\")\n print()\n print_pause(\"* \" * 5)", "title": "" }, { "docid": "6e4ef72ed32cd2e4ce6f710b538194fe", "score": "0.5780562", "text": "def printHelp(scriptname = \"img2braille.py\"):\n helptext = \"Script takes two arguments, argument one is \"\\\n \" input image filename, second argument is output filename.\\n\"\\\n \"EXAMPLE use: \"+scriptname+\" image.gif text.txt\"\n print(helptext)", "title": "" }, { "docid": "92e641ddfe88f90b0cae57a0c87bf28b", "score": "0.57784784", "text": "def show_help():\n filename = os.path.basename(__file__)\n print('Usage: {} [options]'.format(filename))\t\n print(\"Options:\")\n print(\" -t,\\t--testmode\\tExecutes test suite for MongoConnection class.\")\t\n print('Examples:')\n print(' {}'.format(filename))\n print(' {} -t'.format(filename))\n print(' {} --testmode'.format(filename))", "title": "" }, { "docid": "6cd65c96bdd4ff3d2f3295ad85bbdee7", "score": "0.5776559", "text": "def BeginTest(title, version, date):\r\n StartCapture()\r\n print(title + '\\n' + version + '\\n' + date + '\\n')", "title": "" }, { "docid": "1eedcec7855a301f71961ec81d0445e2", "score": "0.5772927", "text": "def print_help():\n\n print(\"\\nUsage: {} -i INPUT [-o OUTPUT] [-rgb] [-h]\\n\".format(argv[0]))\n print(\"Required Arguments:\\n\")\n print(\"\\t-i <path>\\t\\tSpecifies the path of the lsystem\\n\")\n print(\"Optional Arguments:\\n\")\n print(\"\\t-o <path>\\t\\tSpecifies the path of output file\")\n print(\"\\t-rgb\\t\\t\\tEnables Fancy preview mode\")\n print(\"\\t-h \\t\\t\\tShows help\\n\")\n print(\"Example: {} -i arbre.txt -o sortie.py -rgb\\n\".format(argv[0]))\n exit(0)", "title": "" }, { "docid": "8f8c3945f105a926a772ae7be0278746", "score": "0.5761871", "text": "def onStart(self):\n progress(\"%s --- starting up main loop\" % str(self) )", "title": "" }, { "docid": "eb15dbcda89523d0ac494bbaedfece50", "score": "0.5760751", "text": "def main():\n print(\"Running um-allthings main.\")", "title": "" }, { "docid": "538e8ca22f656d990129741a1ca37a81", "score": "0.5750173", "text": "def boot_print(app_name):\n global STARTED_APP\n STARTED_APP = app_name.upper()\n log(\" === STARTING {} === \".format(STARTED_APP))", "title": "" }, { "docid": "5b1272bcc2ee81ea81a1fbea6c176a3e", "score": "0.5743855", "text": "def _show(title, message):\n\n print\n print '%s:' % title\n print ' %s' % message\n print\n sys.exit(1)", "title": "" }, { "docid": "74f4ff3319d6d62083242de68c176066", "score": "0.5743042", "text": "def info(self):\n print('')\n print('='*40)\n print('Sabertooth Motor Controller')\n print(' port: {}'.format(self.saber.port))\n print(' baudrate: {} bps'.format(self.saber.baudrate))\n print(' address: {}'.format(self.address))\n print('-'*40)\n print('')", "title": "" }, { "docid": "869c2faab8e55a11da79cf8a782b48d8", "score": "0.57392454", "text": "def extra_print_in_the_beginning():\n print()", "title": "" }, { "docid": "f42dbf9030f0cdb567172610ae9a935f", "score": "0.5733088", "text": "def printUsage():\n print 'usage: %s [options]' % os.path.abspath(__file__)\n print ' options (and DEFAULT values) are:'\n for i in defaults.keys():\n print '\\t%s=%s' % (i, defaults[i])", "title": "" }, { "docid": "4291bd40b044a4d6ca1e903f6efc24e3", "score": "0.5732844", "text": "def print_finish_banner(target : str, num : int) -> None:\n print()\n print(\"-\" * 60)\n print(\"Finished scan on target: {}\".format(target))\n print(\"Time finished: {}\".format(datetime.now()))\n print(\"{} open port(s)\".format(num))\n print(\"-\" * 60)", "title": "" }, { "docid": "40e3cf718dea8207781945c0dc6cdea2", "score": "0.57314813", "text": "def box():\n print \"\\n\" + bcolors.OKBLUE + \"#\"*49\n print ''.join([\"#\",\n \" \"*13,\n bcolors.ENDC,\n \"Welcome to AirDrop-ng\",\n bcolors.OKBLUE,\n \" \"*13,\n \"#\"])\n print \"#\"*49 + bcolors.ENDC + \"\\n\"", "title": "" }, { "docid": "4d80f4a6a88c42d4c870351fc71cca08", "score": "0.5731228", "text": "def welcome_message():\n\n print(\"\\nWelcome to the Cipher tool - Secret Messages Project\")", "title": "" }, { "docid": "f476cd1a9f9dee1979558b20d3561e5b", "score": "0.57289577", "text": "def main():\n # this is a useless comment\n print(\"Call your main application code here\")", "title": "" }, { "docid": "3cf46d9d0b937e4cc995c31545cac71b", "score": "0.57283545", "text": "def usage():\n print('USAGE\\n\\t./203hotline [n k | d]',\n '\\nDESCRIPTION\\n\\tn\\t\\x1B[3mn\\x1B[23m value for the computation of (n k)',\n '\\tk\\t\\x1B[3mk\\x1B[23m value for the computation of (n k)',\n '\\td\\taverage duration of calls (in seconds)', sep='\\n')", "title": "" }, { "docid": "757069cc28c36aedf336490545f65076", "score": "0.5716697", "text": "def print_usage():\n print 'usage: %s [options]' % os.path.abspath(__file__)\n print ' options (and default values) are:'\n for i in defaults.keys():\n print '\\t%s=%s' % (i, defaults[i])", "title": "" }, { "docid": "ee59399cdfb5a0d9b3f736f76546597c", "score": "0.57154244", "text": "def usage():\n print \"\"\"\n If you are seeing this message, Congratulations! You ignored the README.md\n and felt like you knew what you were doing. Good job at that.\n\n In all seriousness, most of what you need to know is in the README.md, so\n check it out to get an idea on what is available to you here and how to use\n it with the NOC Dashboard application.\n\n Remember to set the path in the NOC Dashboard in the config.js, and to enable\n the needed settings as required. The README.md and the docs folder contain\n all that you need to know.\n\n The Default Port is 4510, which can be changed by modifying the Config file.\n If you need to run this within a larger app structure, there are examples of\n gunicorn and Nginx WSGI configs in the server_configs directory. If all else\n fails you can always proxy pass the requests through Apache or Nginx. You\n can find information about this on Google or Stack Overflow.\n\n Have fun.\n \"\"\" #% (sys.argv[0])", "title": "" }, { "docid": "41055cc0aba517e5f54ad281b6eaf10b", "score": "0.5710354", "text": "def program_start():\n welcome = Format.BOLD + Format.YELLOW + ' WGUPS Routing Program ' + Format.END\n print(welcome.center(105, \"*\"))\n instruction = '''\n Select one of the following options:\n 1. Package delivery status\n 2. Total mileage traveled by all trucks\n 3. Truck routes simulation\n 4. Open screenshots\n 5. Exit\n '''\n print(instruction)", "title": "" }, { "docid": "36230a5ccb222ed2e8f47ea7f2ac8b40", "score": "0.57094127", "text": "def Usage():", "title": "" }, { "docid": "cd1e711329eff1d257f36383ae727251", "score": "0.570427", "text": "def _print_usage():\n print >> sys.stderr, \"\"\"Usage:\n ./commander.py course_id directory - where directory contains:\n `archive.zip' `tests.zip' `config'\"\"\"", "title": "" }, { "docid": "fe7a08146747fc06c6e584ed3d3a51c1", "score": "0.57019764", "text": "def show_version():\n print 'Clipboard-modifier version %s.' % __version__\n print 'Written by %s' % __author__", "title": "" }, { "docid": "4d6bc6a0d6f7f8ad040e00d5341241fc", "score": "0.5692938", "text": "def do_barkskin(self, arg):\r\n print_description('2nd', 'Barkskin', '1 Action', '1 Hour [C]', 'Touch', 'None', 'Buff',\r\n 'V, S, M', 'Transmutation')", "title": "" }, { "docid": "131b08f68946d45d75b0b43543e660fa", "score": "0.56909895", "text": "def show_small_headline(headline):\n\n\tprint(\"+--- \" + headline + \" ---+\")", "title": "" }, { "docid": "bfe870d808bc73e77a88a5f8ca2de258", "score": "0.56813264", "text": "def info(self):\n print(\"Grrrr..I am an Orc Rider. Don't mess with me.\")", "title": "" }, { "docid": "b5f4843f944e11c08a20cabe41d67ec0", "score": "0.56684935", "text": "def make_banner(self):\n banner = [\n\t\t\tR\" __ __ _ __ _ ___ __ _ _ ___ ___ ___ ___ ___ \",\n\t\t\tR\" | V | | \\| | __/' _/| | | | __| __| _,\\ __| _ \\ \",\n\t\t\tR\" | \\_/ | | | ' | _|`._`.| 'V' | _|| _|| v_/ _|| v / \",\n\t\t\tR\" !_! !_!_!_!\\__!___!___/!_/ \\_!___!___!_! !___!_!_\\ \"\n\t\t]\n start_point, _ = self.graphics.center_just(4, banner[0])\n title = LongTextBox(start_point, banner)\n self.uielements.append(title)", "title": "" }, { "docid": "9ea95f563b9b410eb8f17a4c29f473af", "score": "0.5665186", "text": "def help():\n print()\n print(\"----- HELP -----\")\n print(\"-a: Access\")\n print(\"-c: Create\")\n print(\"-h: Help\")\n print(\"-s: Search\")\n print()", "title": "" }, { "docid": "09a203006b586129ed0ac53136fddc66", "score": "0.5652161", "text": "def usage():\n print(\"Usage: \", sys.argv[0], \" [-v|--verbose] [-m|--moonphase] [-h|--help] [-p=XX|--power=XX] [-o|--once] [-f|--force]\")", "title": "" }, { "docid": "a58dc41343235ae46d022b9a291ce462", "score": "0.5649697", "text": "def testing_start():\n\n print(\"|*|*|*|*|*| Starting testing... |*|*|*|*|*|\")", "title": "" }, { "docid": "d568868e4d3dbe6f8942a6ab3b49eb34", "score": "0.5644214", "text": "def main_help():\n print(\"\"\" Main Help menu\n ================\n help - print this menu\n where - find where you are in the program\n back - Go back to the previous menu\n exit - Exit the program\n setprofile - Set your AWS credentials\n showprofile - Show your AWS credentials\n showsecrets - Show credentials and secrets acquired from the target AWS account\n training - Go to training mode \n dumpsecrets - Gather and dump credentials of EC2 in Secrets Manager and Parameter Store\n attacksurface - Discover attack surface of target AWS account\n addtosecgroups - Add IPs and ports to security groups\n persistence - Add persistence and hide deeper\n ec2instances - Go to EC2 instances menu\n securitygroups - List all discovered Security Groups\n \"\"\")\n main_loop()", "title": "" }, { "docid": "d2a4017e215e2b42e13033cba8b61ea9", "score": "0.56400365", "text": "def banner(message, border):\n\n line = border * (len(message) + 4)\n print(line)\n print(border, message, border)\n print(line)", "title": "" }, { "docid": "7502bddce7f1f268f053aacf3d3da210", "score": "0.56393707", "text": "def printHelp():\n\tprint \"\"\"Use:\n\t\t * --tle to specify the three line element file,\n\t\t * -i to specify which ephemeris accuracy from the sampling plan to us,\n\t\t * -t to set the simulation title,\n\t\t * -o to set the output name,\n\t\t * -l to set the log file name,\n\t\t * -s how many separate simulations to split the desired time interval into, ideally no. nodes X no. cores per node,\n\t\t * -p how many processes per node to use for the simulations,\n\t\t * --jdayStart for the start Julian Day epoch,\n\t\t * --jdayStop for the end Julian Day epoch,\n\t\t\t\t* --plan= to specify the sampling plan from which to take ephemeris uncertainties.\"\"\"", "title": "" }, { "docid": "101ffe75c5f28922e534e3823dd00922", "score": "0.56346303", "text": "def show(self):\r\n colors = bytecode.disable_print_colors()\r\n self.pretty_show()\r\n bytecode.enable_print_colors(colors)", "title": "" }, { "docid": "101ffe75c5f28922e534e3823dd00922", "score": "0.56346303", "text": "def show(self):\r\n colors = bytecode.disable_print_colors()\r\n self.pretty_show()\r\n bytecode.enable_print_colors(colors)", "title": "" }, { "docid": "c73fd4f76fa639522e02ef791e523bca", "score": "0.56055844", "text": "def print_welcome_message():\n print()\n print(\"Hello! And welcome to the...\")\n sleep(0.5)\n print()\n print(r\" _____ ______ _____ ___ _ _ \")\n print(r\"|_ _|| ___|/ ___| / _ \\ | | | | \")\n print(r\" | | | |_ \\ `--. / /_\\ \\ _ _ | |_ ___ _ __ ___ __ _ | |_ ___ _ __ \")\n print(r\" | | | _| `--. \\ | _ || | | || __| / _ \\ | '_ ` _ \\ / _` || __| / _ \\ | '__|\")\n print(r\" | | | | /\\__/ / | | | || |_| || |_ | (_) || | | | | || (_| || |_ | (_) || | \")\n print(r\" \\_/ \\_| \\____/ \\_| |_/ \\__,_| \\__| \\___/ |_| |_| |_| \\__,_| \\__| \\___/ |_| \")\n print(r\" \")", "title": "" } ]
8d3afb91c26db22102b0a220ca2c055d
build the content for one option
[ { "docid": "936c529a999bf1e3861633927b45549e", "score": "0.7659812", "text": "def _build_option_content(self):\n return Step(\n name=\"option_content\",\n tipe=\"content\",\n value=self._config,\n index=self.steps.current.index,\n )", "title": "" } ]
[ { "docid": "ad66492fca0c1c1d8d521f00ee802da2", "score": "0.6674333", "text": "def _create_content(self, options):\n sizer = wx.BoxSizer(wx.VERTICAL)\n self.SetSizer(sizer)\n\n for opt in options:\n\n if opt.get_type() == \"bool\":\n self.__add_bool(opt.get_text(), value=opt.get_value())\n\n elif opt.get_type() == \"int\":\n self.__add_int(opt.get_text(), value=opt.get_value())\n\n elif opt.get_type() == \"float\":\n self.__add_float(opt.get_text(), value=opt.get_value())\n\n elif opt.get_type().startswith(\"file\"):\n self.__add_file(opt.get_text(), opt.get_value(), opt.get_type())\n\n else: # if opt.get_type() == \"str\":\n self.__add_text(opt.get_text(), value=opt.get_value())", "title": "" }, { "docid": "ea51068d6f5313c95a9c6970ce416b23", "score": "0.6238981", "text": "def _generate_options_body(self):\r\n\r\n self._options_body = [urwid.Text('OPTIONS', align='center'), urwid.Divider()]\r\n self._option_widgets = []\r\n\r\n for group_key in self._options:\r\n # Get data from group key\r\n if type(self._options[group_key]) == dict:\r\n group_options_data = self._options[group_key]\r\n show_title = True\r\n divider_on = True\r\n elif type(self._options[group_key]) == tuple:\r\n # First element in tuple will be a dictionary containing the option names dictionary\r\n group_options_data = self._options[group_key][0]\r\n if len(self._options[group_key]) == 2:\r\n # Assume second element is show title\r\n show_title = self._options[group_key][1]\r\n elif len(self._options[group_key]) == 3:\r\n # Assume second element is show title and divider on is third element\r\n show_title = self._options[group_key][1]\r\n divider_on = self._options[group_key][2] \r\n\r\n # Add urwid.Text() widget to _options_body to be used as a title if required\r\n if show_title:\r\n self._options_body.append(urwid.Text(group_key, align='center'))\r\n\r\n # Add buttons to _options_body \r\n for option_name in group_options_data:\r\n\r\n # Create defaults for optional components\r\n caption = \"\"\r\n increment = None\r\n limits = [None, None]\r\n enter_fires_change_event = True\r\n enabled = True\r\n\r\n # Create a UserOption widget\r\n option_data = group_options_data[option_name]\r\n\r\n # Determine if option_data is a list, a single value or a tuple containing optional UserOption parameters\r\n if type(option_data) == tuple:\r\n # Option data contains optional UserOption parameters\r\n\r\n # Get value component from option_data tuple\r\n value = option_data[0]\r\n \r\n # Value component will determine if is a list of selectable values or an editable value\r\n if type(value) == list:\r\n # list of selectable values\r\n if len(option_data) == 2:\r\n caption = option_data[1]\r\n elif len(option_data) == 3:\r\n caption = option_data[1]\r\n enter_fires_change_event = option_data[2]\r\n elif len(option_data) == 4:\r\n caption = option_data[1]\r\n enter_fires_change_event = option_data[2]\r\n enabled = option_data[3]\r\n else:\r\n raise RuntimeError('The tuple containing the option data when the option is a list of selectable values must have a length less of 4 or less.')\r\n elif type(value) == int or type(value) == float or type(value) == str:\r\n # editable value\r\n if len(option_data) == 2:\r\n caption = option_data[1]\r\n elif len(option_data) == 3:\r\n caption = option_data[1]\r\n increment = option_data[2]\r\n elif len(option_data) == 4:\r\n caption = option_data[1]\r\n increment = option_data[2]\r\n limits = option_data[3]\r\n elif len(option_data) == 5:\r\n caption = option_data[1]\r\n increment = option_data[2]\r\n limits = option_data[3]\r\n enter_fires_change_event = option_data[4]\r\n elif len(option_data) == 6:\r\n caption = option_data[1]\r\n increment = option_data[2]\r\n limits = option_data[3]\r\n enter_fires_change_event = option_data[4]\r\n enabled = option_data[5]\r\n else:\r\n raise RuntimeError('The tuple containing the option data when the option is an editable value must have a length less of 6 or less.')\r\n\r\n \r\n elif type(option_data) == list:\r\n # option_data is a list of selectable values\r\n value = option_data\r\n\r\n elif type(option_data) == int or type(option_data) == float or type(option_data) == str:\r\n # option_data is an editable value\r\n value = option_data\r\n\r\n else:\r\n raise RuntimeError('The option data must be a list, int, float or str, or a tuple where the first element is a list, int, float or str')\r\n\r\n # print(option_name, value, caption, increment, limits, enter_fires_change_event, enabled)\r\n\r\n # Create the actual widget, connect the change signal, append user_option to body and option_widgets list\r\n user_option = UserOption(option_name, value, caption, increment, limits, enter_fires_change_event, enabled)\r\n urwid.connect_signal(user_option, 'value_change', self._option_item_selected)\r\n self._options_body.append(user_option)\r\n self._option_widgets.append(user_option) # used to find user_options\r\n\r\n # Add urwid.Divider() widget to _options_body if required\r\n if divider_on:\r\n self._options_body.append(urwid.Divider())", "title": "" }, { "docid": "d24f201c95eec78d9167f7b6b25534a4", "score": "0.60124874", "text": "def build(self) -> bytes:\n return self.build_option(self.type, self.payload)", "title": "" }, { "docid": "044c6a2fe0dc9ceb731675e1b9d4e3ae", "score": "0.57691467", "text": "def _create_options(self):", "title": "" }, { "docid": "5ae3f605787aff54f083170e09944c3b", "score": "0.56945246", "text": "def _create_content(self):\r\n pass", "title": "" }, { "docid": "ce715e484866b52cd69afd03acf27b54", "score": "0.5632457", "text": "def build(self):", "title": "" }, { "docid": "40e4337d0d6afd181dab5f8361f019a2", "score": "0.5613281", "text": "def build_content(self, request, context):\n for builder in CHUNK_BUILDERS:\n if builder.appropriate_key(self.key, chunk=self):\n return builder.render(request, self, \\\n parent=None, context=context)\n return self.content", "title": "" }, { "docid": "8713c7889c3d0ecb480ef005fd6e8c67", "score": "0.55968535", "text": "def _construct_option_outcome(self):", "title": "" }, { "docid": "9a9aec9c066830a45c8678548b7dfaf9", "score": "0.558199", "text": "def options(request):\n\treturn render(request, 'mealmageapp/options.html')", "title": "" }, { "docid": "d14fc7b8df71912d7cdae9b29cdbc360", "score": "0.5555755", "text": "def _build_main_menu(self):\n return Step(\n name=\"all_options\",\n columns=[\"option\", \"__default\", \"source\", \"via\", \"__current_value\"],\n select_func=self._build_option_content,\n tipe=\"menu\",\n value=self._config,\n )", "title": "" }, { "docid": "26c1a7c204283ea16fd27fb6fd0d6123", "score": "0.5555116", "text": "def _build_description(self, action, data, template, conf):\n return None", "title": "" }, { "docid": "1f0d4ea92e4ce509df9789ac08f4ac32", "score": "0.5513662", "text": "def _build(self):\n self._data = [self._script, self._selection]", "title": "" }, { "docid": "6b30713e69d67ef41176a622fe0f3890", "score": "0.5480315", "text": "def _build(self):\n pass", "title": "" }, { "docid": "6b30713e69d67ef41176a622fe0f3890", "score": "0.5480315", "text": "def _build(self):\n pass", "title": "" }, { "docid": "00715bf309783b709593feac3fffeb5c", "score": "0.546971", "text": "def build(self):\n pass", "title": "" }, { "docid": "00715bf309783b709593feac3fffeb5c", "score": "0.546971", "text": "def build(self):\n pass", "title": "" }, { "docid": "c53b706d2eddd25c1ea871e0a12b89e0", "score": "0.54694414", "text": "def render(self) -> str:\n html = None\n \n with open(join(self.path, self._template), 'r') as f:\n html = f.read()\n \n assert html is not None\n \n opts = []\n opt = '<option value=\"{{uid}}\">{{name}}</option>'\n vals = self.requires['multiselect']\n for name in sorted(self.dataset[vals].values):\n opts.append(opt.replace('{{uid}}', name[0]).replace('{{name}}', name[0]))\n\n return html.replace('{{REPLACE}}', '<br/>'.join(opts))", "title": "" }, { "docid": "6827ea8c1863f293b14dbbd41c5c3d88", "score": "0.54301643", "text": "def init_content(self):\n pass", "title": "" }, { "docid": "06ffedb151e75d8144baf6a54dadf7cb", "score": "0.541355", "text": "def build_field_context_menu(self):\n self.context_menu.delete(0, \"end\")\n if issubclass(self.field_type, GameObject):\n for field_link in self.field_links:\n field_link.add_to_context_menu(self.context_menu)\n if issubclass(self.field_type, MapEntry):\n self.context_menu.add_command(\n label=\"Select linked entry name from list\", command=self.choose_linked_map_entry\n )\n if self.field_type == CharacterModel:\n self.context_menu.add_command(\n label=\"Select model from complete list\", command=self.choose_character_model\n )\n\n # Users can open a dialog of checkbuttons.\n if self.field_type == list and self.field_name.endswith(\"_groups\"):\n self.context_menu.add_command(label=\"Show checkbuttons\", command=self._set_group_checkbuttons)\n\n msb_type, msb_subtype = self.master.active_category.split(\": \")\n if msb_type == \"Regions\" or msb_subtype in {\"Characters\", \"Objects\", \"PlayerStarts\"}:\n copy_fields = (\"translate\", \"rotate\")\n if msb_subtype in {\"Characters\", \"Objects\"}:\n copy_fields += (\"draw_parent_name\",)\n if self.field_name in copy_fields:\n copy_menu = self.master.Menu(tearoff=0)\n if len(copy_fields) == 3:\n # Triple option.\n kwargs = {f: True for f in copy_fields}\n self._add_copy_option(copy_menu, **kwargs)\n if self.master.active_category.endswith(\"Boxes\"):\n self._add_copy_option(copy_menu, **kwargs, y_offset=-0.1)\n # Double/single options.\n for copy_field in copy_fields:\n kwargs = {f: f in {self.field_name, copy_field} for f in copy_fields}\n self._add_copy_option(copy_menu, **kwargs)\n if self.master.active_category.endswith(\"Boxes\"):\n self._add_copy_option(copy_menu, **kwargs, y_offset=-0.1)\n self.context_menu.add_cascade(label=\"Copy from hook...\", foreground=\"#FFF\", menu=copy_menu)", "title": "" }, { "docid": "e82c00da2768f84eff127c139fb91d88", "score": "0.5405601", "text": "def build_content(self, request, context, obj=None):\n\n if not obj:\n obj = self.content_object\n\n for builder in CHUNK_BUILDERS:\n if builder.appropriate_key(self.key, chunk=self, obj=obj):\n return builder.render(request, self, \\\n parent=obj, context=context)\n return self.content", "title": "" }, { "docid": "aa86b2a0e4bf39c13b104d7acecc3b08", "score": "0.53366727", "text": "def get_options_form(spawner, additional_cpu_info=\"\", additional_memory_info=\"\", additional_gpu_info=\"\") -> str:\n\n # Only show spawner options for named servers (the default server should start with default values)\n if getattr(spawner, \"name\", \"\") == \"\":\n return ''\n\n description_memory_limit = 'Memory Limit in GB.'\n description_env = 'One name=value pair per line, without quotes'\n description_days_to_live = 'Number of days the container should live'\n\n default_image = getattr(spawner, \"image\", \"mltooling/ml-workspace:latest\")\n\n # Show / hide custom image input field when checkbox is clicked\n custom_image_listener = \"if(event.target.checked){ $('#image-name').css('display', 'block'); $('.defined-images').css('display', 'none'); }else{ $('#image-name').css('display', 'none'); $('.defined-images').css('display', 'block'); }\"\n \n # Indicate a wrong input value (not a number) by changing the color to red\n memory_input_listener = \"if(isNaN(event.srcElement.value)){ $('#mem-limit').css('color', 'red'); }else{ $('#mem-limit').css('color', 'black'); }\"\n\n # Create drop down menu with pre-defined custom images\n image_option_template = \"\"\"\n <option value=\"{image}\">{image}</option>\n \"\"\"\n image_options = \"\"\n for image in spawner.workspace_images:\n image_options += image_option_template.format(image=image)\n\n images_template = \"\"\"\n <select name=\"defined_image\" class=\"defined-images\" required autofocus>{image_options}</select>\n \"\"\".format(image_options=image_options)\n\n # template = super()._default_options_form()\n return \"\"\"\n <div style=\"{div_style}\">\n <label style=\"{label_style}\" for=\"image\">Docker Image</label>\n <div name=\"image\">\n <div style=\"margin-bottom: 4px\">\n <input style=\"margin-right: 8px;\" type=\"checkbox\" name=\"is_custom_image\" onchange=\"{custom_image_listener}\"></input>\n <label style=\"font-weight: 400;\" for=\"is_custom_image\">Custom Image</label>\n </div>\n <input style=\"{input_style}; display: none;\" name=\"custom_image\" id=\"image-name\" class=\"custom-image\" placeholder=\"Custom Image\"></input>\n {images_template}\n </div>\n </div>\n <div style=\"{div_style}\">\n <label style=\"{label_style}\" for=\"cpu_limit\">CPU Limit {optional_label}</label>\n <input style=\"{input_style}\" name=\"cpu_limit\" placeholder=\"e.g. 8\"></input>\n <div style=\"{additional_info_style}\">{additional_cpu_info}</div>\n </div>\n <div style=\"{div_style}\">\n <label style=\"{label_style}\" for=\"mem_limit\" title=\"{description_memory_limit}\">Memory Limit in GB {optional_label}</label>\n <input style=\"{input_style}\" name=\"mem_limit\" id=\"mem-limit\" title=\"{description_memory_limit}\" placeholder=\"e.g. 1, 2, 15, ...\" oninput=\"{memory_input_listener}\"></input>\n <div style=\"{additional_info_style}\">{additional_memory_info}</div>\n </div>\n <div style=\"{div_style}\">\n <label style=\"{label_style}\" for=\"env\" title=\"{description_env}\">Environment Variables {optional_label}</label>\n <textarea style=\"{input_style}\" name=\"env\" title=\"{description_env}\" placeholder=\"NAME=VALUE\"></textarea>\n <div style=\"{additional_info_style}\">{description_env}</div>\n </div>\n <div style=\"{div_style}\">\n <label style=\"{label_style}\" for=\"days_to_live\" title=\"{description_days_to_live}\">Days to live {optional_label}</label>\n <input style=\"{input_style}\" name=\"days_to_live\" title=\"{description_days_to_live}\" placeholder=\"e.g. 3\"></input>\n </div>\n \"\"\".format(\n div_style=div_style,\n label_style=label_style,\n input_style=input_style,\n additional_info_style=additional_info_style,\n default_image=default_image,\n images_template=images_template,\n custom_image_listener=custom_image_listener,\n optional_label=optional_label,\n description_memory_limit=description_memory_limit,\n memory_input_listener=memory_input_listener,\n description_env=description_env,\n description_days_to_live=description_days_to_live,\n additional_cpu_info=additional_cpu_info,\n additional_memory_info=additional_memory_info,\n )", "title": "" }, { "docid": "6ae97a882401aba660e9b1e498e99985", "score": "0.53237975", "text": "def render_options(self):\n if self.options:\n self.render_title(\"Options\")\n\n with indent(self.INDENT_SIZE):\n for o in self.options:\n self.render_line2(o['option'], o['desc'])\n puts(\"\")", "title": "" }, { "docid": "49197683ccc9bbd687b55c9763c8aa22", "score": "0.53213006", "text": "def build(self) -> bytes:\n buf = bytearray([0])\n for k, v in self.configs:\n if v is not None:\n buf.append(len(k) + len(v) + 1)\n buf += k.encode(\"ascii\")\n buf += b\"=\"\n buf += v.encode(\"ascii\")\n else:\n buf.append(len(k))\n buf += k.encode(\"ascii\")\n buf.append(0)\n return self.build_option(self.type, buf)", "title": "" }, { "docid": "cdf20ee48693d1ee771865aabdddf5c5", "score": "0.5320269", "text": "def compile_data(self):\n if self.args[\"--builder-name\"]:\n self.build_data[\"builder\"][\"name\"] = self.args[\"--builder-name\"]\n if self.args[\"--media\"]:\n self.build_data[\"builder\"][\"iso_url\"] = self.args[\"--media\"]\n if self.args[\"--check-sum\"]:\n self.build_data[\"builder\"][\"iso_checksum\"] = self.args[\"--check-sum\"]\n if self.args[\"--check-sum-type\"]:\n self.build_data[\"builder\"][\"iso_checksum_type\"] = self.args[\"--check-sum-type\"]\n if self.args[\"--box-name\"]:\n self.build_data[\"box_name\"] = self.args[\"--box-name\"]\n if self.args[\"--memory\"]:\n self.build_data[\"builder\"][\"memory\"] = self.args[\"--memory\"]\n if self.args[\"--disk-size\"]:\n self.build_data[\"builder\"][\"disksize\"] = self.args[\"--disk-size\"]\n if self.args[\"--proxy\"]:\n self.build_data[\"proxy\"] = self.args[\"--proxy\"]\n if self.args[\"--user-name\"]:\n self.build_data[\"proxy_username\"] = self.args[\"--user-name\"]\n if self.args[\"--password\"]:\n self.build_data[\"proxy_password\"] = getpass.getpass(prompt=\"Enter password: \")", "title": "" }, { "docid": "88609c1e2b1723f911b4be705c78db1f", "score": "0.5287009", "text": "def build(_: Context):", "title": "" }, { "docid": "0dccdf4876e034b92be2b6f178fdc80d", "score": "0.52717847", "text": "def build_html(self):\n self.html = self.title_slide.build_html() + \"\".join([s.build_html() for s in self.sections])\n return self.html", "title": "" }, { "docid": "a8d3f6e89a2dca8fddd0052da30f8ba0", "score": "0.5235679", "text": "def make_content(self):\n\n styles = getSampleStyleSheet()\n #You can apply different styles\n styleN = {\"default\": ParagraphStyle('default',fontName='Times-Roman',fontSize=12)}\n\n self.slogan = []\n self.slogan.append(Paragraph(self.tag.slogan1,styleN[\"default\"]))\n self.slogan.append(Paragraph(self.tag.slogan2,styleN[\"default\"]))\n\n self.contact =[]\n self.contact.append(Paragraph(self.tag.contact1,styleN[\"default\"]))\n self.contact.append(Paragraph(self.tag.contact2,styleN[\"default\"]))\n self.contact.append(Paragraph(self.tag.contact3,styleN[\"default\"]))\n\n self.url=[Paragraph(self.tag.url,styleN[\"default\"])]\n\n self.qrcode=[]\n if self.tag.qrcode:\n self.qrcode.append(Image(self.tag.qrcode.path, 1.0*inch, 1.0*inch))\n\n self.logo=[]\n if self.tag.logo:\n self.logo.append(Image(self.tag.logo.path, 0.4*self.height, 0.4*self.height))", "title": "" }, { "docid": "fba4233b1c9aedd79f7f3036a78ce10b", "score": "0.5228066", "text": "def build(self) -> bytes:\n return self.build_option(\n self.type, struct.pack(\"!BHH\", 0, self.priority, self.weight)\n )", "title": "" }, { "docid": "05b79dcbf6289c3cdbdb2b8db6649e35", "score": "0.5224002", "text": "def build(self):\n # Call the page section builders in order.\n self.__build_header()\n self.__build_content()\n self.__build_footer()\n\n # Return the page content.\n if \"REQUEST_METHOD\" in environ:\n return self.httpheader + self.html\n else:\n return self.html", "title": "" }, { "docid": "91ffa1b364ae5487c7ff62ca3b78ae21", "score": "0.51967454", "text": "def buildOptions(parser):", "title": "" }, { "docid": "2995242473bdfae750aa130b6d0cabf8", "score": "0.5183647", "text": "def getopt_content_tag(self):\n return self.processing_options.get('content_tag', None)", "title": "" }, { "docid": "bf81ae74df791a7659b24be365861454", "score": "0.51769376", "text": "def _options_form(self):\n options = ''.join([\n self.option_template.format(image=di, name= re.split('\\W+', di)[1]) for di in self.dockerimages\n ])\n return self.form_template.format(option_template=options)", "title": "" }, { "docid": "305f706c9f60769548700a21d6b852fe", "score": "0.51681364", "text": "def _build_ui(self):\n raise NotImplementedError()", "title": "" }, { "docid": "77524224408661dd3af101d18ae346b2", "score": "0.5150901", "text": "def show_options(self):\n self.ui.HAtomsCheckBox.setChecked(self.without_h)\n self.ui.ReportTextCheckBox.setChecked(not self.report_text)\n self.ui.PictureWidthDoubleSpinBox.setValue(self.picture_width)\n self.ui.CheckCIFServerURLTextedit.setText(self.checkcif_url)\n self.ui.CODURLTextedit.setText(self.cod_url)\n self.ui.ADPTableCheckBox.setChecked(self.report_adp)\n self.ui.trackChangesCifCheckBox.setChecked(self.track_changes)\n #\n self.ui.MainStackedWidget.go_to_options_page()", "title": "" }, { "docid": "403f7daabfdaa96ce1763947b6d0f7f9", "score": "0.51492804", "text": "def __init__(self,collection):\n handler.ContentHandler.__init__(self)\n self.collection = collection\n \n self.mname = None\n self.option_list = None\n self.oname = None\n self.o = None\n self.an_o = None\n self.list_class = OptionList", "title": "" }, { "docid": "db8b584555af4a35ead33d70015f7d9e", "score": "0.51343757", "text": "def __str__(self):\n\t\t\n\t\thtml = ''\n\t\tfor val in self._list:\n\t\t\tlistElement = genHTMLElement('option',[],[],str(val))\n\t\t\thtml = html + listElement\n\t\treturn html", "title": "" }, { "docid": "232931a9c5de82e24aafba0e69cbe5d8", "score": "0.51298606", "text": "def build(self):\n raise NotImplementedError", "title": "" }, { "docid": "232931a9c5de82e24aafba0e69cbe5d8", "score": "0.51298606", "text": "def build(self):\n raise NotImplementedError", "title": "" }, { "docid": "232931a9c5de82e24aafba0e69cbe5d8", "score": "0.51298606", "text": "def build(self):\n raise NotImplementedError", "title": "" }, { "docid": "232931a9c5de82e24aafba0e69cbe5d8", "score": "0.51298606", "text": "def build(self):\n raise NotImplementedError", "title": "" }, { "docid": "04ec3612e3f8a8b37ce69402c6dd4255", "score": "0.51192486", "text": "def buildFormDescr(self, formName):\n self.nodes = self.vf.getSelection()\n self.molModes = {} # this dictionary has molecules as keys and\n # a method for assigning SS as value or None\n\n molecules = self.nodes.top.uniq()\n if len(molecules)==0: return \n idf = InputFormDescr(title=\"Get SS Information:\")\n\n haveStride = self.haveStride\n for mol in molecules:\n\n # check if there are amino acids in the selection\n aa = [x for x in mol.chains.residues if \\\n AAnames.has_key(x.type.strip().upper())]\n\n if len(aa)==0: # NO AA -> cannot use file, PROSS or STRIDE\n self.molModes[mol.name]=None\n continue\n \n # MS 09 2010. I do not think we have 2 level molecules anymore\n #if not Chain in mol.levels: continue\n\n haveInFile = mol.parser.hasSsDataInFile()\n molName = mol.name + ' : '\n\n # if stride is not found and no SS info in the file we use PROSS\n if not haveStride and not haveInFile:\n idf.append({\n 'widgetType':Label,\n 'wcfg':{'text':molName},\n 'gridcfg':{'sticky':'w'}})\n\n idf.append({'widgetType':Label, # label NOT button\n 'name':'From Pross',# since it is the only option\n 'wcfg':\n {'text':'From Pross'},\n 'gridcfg':{'sticky':'w', 'row':-1}})\n # we put\n self.molModes[mol.name]='From Pross'\n\n # if stride is installed but no file info we have a choice of\n # stride or PROSS and PROSS is the default\n elif haveStride and not haveInFile:\n defaultValue = 'From Pross'\n idf.append({'name':mol.name,\n 'widgetType': Pmw.RadioSelect,\n 'groupedBy':1,\n 'listtext':['From Stride','From Pross'],\n 'defaultValue': defaultValue,\n 'wcfg':{'label_text':molName,\n 'labelpos':'w'\n },\n 'gridcfg':{'sticky':'ew'}})\n\n # if no stride but file info we have a choice of Pross or file\\\n # and file is the default\n elif not haveStride and haveInFile:\n defaultValue = 'From File'\n idf.append({'name':mol.name,\n 'widgetType': Pmw.RadioSelect,\n 'groupedBy':1,\n 'listtext':['From File','From Pross'],\n 'defaultValue': defaultValue,\n 'wcfg':{'label_text':molName,\n 'labelpos':'w'\n },\n 'gridcfg':{'sticky':'ew'}})\n\n # stride is present, and file has SS info. 3 choices,\n # file is default\n else:\n #if 'From File' in mol.hasSS:\n # defaultValue = 'From Stride'\n #else:\n defaultValue = 'From File'\n\n idf.append({'name':mol.name,\n 'widgetType': Pmw.RadioSelect,\n 'groupedBy':1,\n 'listtext':['From File', 'From Stride','From Pross'],\n 'defaultValue': defaultValue,\n 'wcfg':{'label_text':molName,\n 'labelpos':'w'\n },\n 'gridcfg':{'sticky':'ew'}})\n \n return idf", "title": "" }, { "docid": "375f940518d3f0e79e7e409ca34c3204", "score": "0.5108437", "text": "def builder(request, doc=None, scriptdata=None):\n context = dict(\n form=PresetForm(initial=dict(user=request.user)),\n presets=Preset.objects.order_by(\"name\"),\n profiles=Preset.objects.order_by(\"name\"),\n doc=doc,\n scriptdata=scriptdata,\n ref=request.GET.get(\"ref\", \"/documents/list\")\n )\n return render(request, \"presets/builder.html\", context)", "title": "" }, { "docid": "1f9374796400c75e2d3fd790c8e7a95d", "score": "0.50869155", "text": "def createContents(self,contents):", "title": "" }, { "docid": "87b80832617bad7ae77959b7752160cf", "score": "0.50680506", "text": "def genContent(self, doc):\n meta = self.genMetaTable(doc)\n div = self.genDiv(doc)\n return meta + div", "title": "" }, { "docid": "9462bc8554668744fda262921be7a0a7", "score": "0.5064631", "text": "def generateDetailForm(layout, options):\n # config all plugins' options\n for option in options:\n if option.has_key('header'):\n layout.addRow(QLabel(\"<strong>%s</strong>\" % option['header']))\n else:\n widget = QLabel()\n widget.setWordWrap(True)\n layout.addRow(option['title'], widget)\n option['widget'] = widget", "title": "" }, { "docid": "5b831d46d28dd279c55a9cc192ed06f4", "score": "0.50631523", "text": "def options_from_front_end(self) -> ContentAnalysisOption:\n options = [Option('formula', None),\n Option('dict_label', None),\n Option('toggle_all', None),\n Option('dict_labels', None),\n Option('active_dicts', None),\n Option('toggle_all_value', None),\n Option('overview_table_selected_column', None),\n Option('overview_table_sort_mode', None)]\n for option in options:\n if option.name in self._front_end_data:\n option.value = self._front_end_data[option.name]\n\n return ContentAnalysisOption(formula=options[0].value,\n dict_label=options[1].value,\n toggle_all=options[2].value,\n dict_labels=options[3].value,\n active_dicts=options[4].value,\n toggle_all_value=options[5].value,\n sort_column=int(options[6].value),\n sort_ascending=bool(\n options[7].value == \"Ascending\"))", "title": "" }, { "docid": "27a44a04d98ecb040a1aa3343d651be5", "score": "0.50411785", "text": "def save(self):\n f = io.open(self.filename,\"w\", encoding=\"utf-8\")\n f.write(\"<?xml version=\\\"1.0\\\" encoding=\\\"utf-8\\\"?>\\n\")\n f.write('<options>\\n')\n\n self.write_common(f)\n\n for module_name in sorted(self.get_module_names()): # enable a diff\n option_list = self.get_option_list(module_name)\n module_docgen_opts = {}\n for docgen_name in self.docgen_names:\n module_docgen_opts[docgen_name] = []\n f.write('<module name=%s>\\n' % quoteattr(module_name))\n options = option_list.get_options()\n for option_name in sorted(options.keys()): # enable a diff\n option_data = options[option_name]\n if isinstance(option_data, (list, tuple)):\n if option_data and option_data[0] in self.docgen_names:\n module_docgen_opts[option_data[0]].append(\n (option_name, option_data[1]))\n else:\n f.write(' <option name=%s '\n 'value=\"\" length=\"%d\">\\n' % (\n quoteattr(option_name),\n len(option_data) ) )\n for list_index, list_data in enumerate(option_data):\n f.write(' <listitem '\n 'number=\"%d\" value=%s/>\\n' % (\n list_index,\n quoteattr(str(list_data))) )\n f.write(' </option>\\n')\n else:\n f.write(' <option name=%s value=%s/>\\n' % (\n quoteattr(option_name),\n quoteattr(str(option_data))) )\n for docgen_name in self.docgen_names:\n if module_docgen_opts[docgen_name]:\n for ix, data in enumerate(module_docgen_opts[docgen_name]):\n f.write(' <docgen-option docgen=%s '\n 'name=%s value=%s/>\\n' %\n (quoteattr(docgen_name),\n quoteattr(data[0]),\n quoteattr(str(data[1])) ))\n self.write_module_common(f, option_list)\n\n f.write('</module>\\n')\n\n f.write('</options>\\n')\n f.close()", "title": "" }, { "docid": "f937439a479529e6fa3a9209ba6c859d", "score": "0.5029603", "text": "def run(self):\n self.options['exer_name'] = self.arguments[0]\n self.options['short_name'] = self.arguments[0]\n self.options['type'] = self.arguments[1]\n self.options['odsa_path'] = os.path.relpath(conf.odsa_path,conf.ebook_path)\n\n # Set defaults for any values that aren't configured\n if 'required' not in self.options:\n self.options['required'] = False\n\n if 'points' not in self.options:\n self.options['points'] = 0\n\n if 'threshold' not in self.options:\n self.options['threshold'] = 1.0\n\n if 'long_name' not in self.options:\n self.options['long_name'] = self.options['exer_name']\n\n if 'align' not in self.options:\n self.options['align'] = 'center'\n\n if 'id' not in self.options:\n self.options['id'] = ''\n\n if 'output' in self.options and self.options['output'] == \"show\":\n self.options['output_code'] = '<p class=\"jsavoutput jsavline\"></p>'\n else:\n self.options['output_code'] = ''\n\n if self.options['type'] == \"dgm\":\n avdgm_node = av_dgm()\n anchor_node = av_anchor()\n\n avdgm_node['exer_name'] = self.options['exer_name']\n anchor_node['ids'].append(self.options['exer_name'])\n avdgm_node += anchor_node\n if self.content:\n node = nodes.Element() # anonymous container for parsing\n self.state.nested_parse(self.content, self.content_offset, node)\n first_node = node[0]\n if isinstance(first_node, nodes.paragraph):\n caption = nodes.caption(first_node.rawsource, '', *first_node.children)\n caption['align']= self.options['align']\n avdgm_node += caption\n\n return [avdgm_node]\n elif self.options['type'] == \"ss\" and self.content:\n avss_node = av_ss()\n avss_node['res'] = SLIDESHOW % self.options\n node = nodes.Element() # anonymous container for parsing\n self.state.nested_parse(self.content, self.content_offset, node)\n first_node = node[0]\n if isinstance(first_node, nodes.paragraph):\n caption = nodes.caption(first_node.rawsource, '', *first_node.children)\n caption['align']= self.options['align']\n avss_node += caption\n return [avss_node]\n elif self.options['type'] == \"ff\":\n res = FRAMES % self.options\n return [nodes.raw('', res, format='html')]\n else:\n res = SLIDESHOW % self.options\n return [nodes.raw('', res, format='html')]", "title": "" }, { "docid": "3b8e27ca6c3de550ec2d29b752d96017", "score": "0.5028624", "text": "def build_field_data(self) -> None:\n for field in self.experiment['fields']:\n # the field name and requirement\n for name, required in field.items():\n # help text and type\n field_data = self.fields[name]\n self.field_data.append(\n (name.title(), field_data['type'], self.map_to_bool(required['required']), field_data['help_text'])\n )", "title": "" }, { "docid": "17c2c5bc73dcd7b7824887a959b87ee4", "score": "0.4993241", "text": "def _build_title(self, action, data, template, conf):\n return None", "title": "" }, { "docid": "231a3fbbc6ecc2c1b6bf40409d6d534d", "score": "0.49926642", "text": "def allText(self,l):\n textContents=\"\"\n for c in l.childNodes:\n if c.nodeName == \"lfoption\":\n\n subtext=self.selectOption(c)\n # name = c.attributes[\"name\"].value\n # choice = c.attributes[\"choice\"].value\n # subtext=\"\"\n # for subc in c.childNodes:\n # if self.d_masterInfo.d_options[name].selected(choice):\n # subtext += subc.nodeValue\n textContents += subtext\n \n else:\n if c.nodeType in [ c.TEXT_NODE, c.CDATA_SECTION_NODE]:\n textContents += c.nodeValue\n return textContents", "title": "" }, { "docid": "b248d806dd09a6734e4598ef71ac8372", "score": "0.4992149", "text": "def create_content(control):\n text = ''\n for justification in control.get('justifications', []):\n text += '\\n## {0}\\n'.format(justification.get('name'))\n text += justification.get('narative')\n references = justification.get('references')\n if references:\n text += '\\n### References\\n'\n text += convert_references(references)\n governors = justification.get('governors')\n if governors:\n text += '\\n### Governors\\n'\n text += covert_governors(governors)\n text += \"\\n--------\\n\"\n return text", "title": "" }, { "docid": "dcea8f16d52a7b395be675f5fdae59b0", "score": "0.49906883", "text": "def gref_template_data_from_options(og):\n if not og: return None\n title = og.title or \"\"\n xref = \"\".join([c for c in title if c.isalnum()])\n option_l = []\n for o in og.option_list:\n default = None\n if o.default and not str(o.default).startswith(\"('NO',\"):\n default = o.default\n hlp = None\n if o.help:\n hlp = indent_docstring_by_n(o.help.replace(\"[%default]\", \"\").strip(), 6)\n option_l.append(TemplateData(\n st=str(o),\n default=default,\n hlp=hlp,\n typ=o.type))\n return TemplateData(\n title=title,\n options=option_l,\n xref=xref)", "title": "" }, { "docid": "e8f73c8c60cd0a586cbedcf47bec9bf6", "score": "0.49826708", "text": "def get_template_menu(self):\n\t\ttemplate = ''\n\t\tif self.options.count():\n\t\t\turl = '<http://127.0.0.1:8000/menu/%s>' % self.uuid\n\t\t\ttemplate += url\n\t\t\ttemplate += \"\\n\\n Hello! I share with you today's menu :) \\n\\n\"\n\t\t\tfor index, option in enumerate(self.options.all()):\n\t\t\t\ttemplate += \"%s Option: %s \\n\" % (index + 1,option.description) \t\n\t\t\ttemplate += \"\\n\\nHave a nice day!\\n\\n\"\n\t\treturn template", "title": "" }, { "docid": "01165901e4ea47bb8c2e5a50a75fa2b0", "score": "0.49710712", "text": "def gref_template_data_from_options(og):\n if not og: return None\n title = og.title or \"\"\n xref = \"\".join([c for c in title if c.isalnum()])\n option_l = []\n for o in og.option_list:\n default = None\n if o.default and not str(o.default).startswith(\"('NO',\"):\n default = o.default\n hlp = None\n if o.help:\n hlp = indent_docstring_by_n(o.help.replace('[%default]', '').strip(), 6)\n option_l.append(TemplateData(\n st=str(o),\n default=default,\n hlp=hlp,\n typ=o.type))\n return TemplateData(\n title=title,\n options=option_l,\n xref=xref)", "title": "" }, { "docid": "a23675ce89bdcaa812eae8afeaabef91", "score": "0.49699372", "text": "def as_design(self, options, *args):\n return []", "title": "" }, { "docid": "4ff29206f93e9f7110d7dd781f5b764d", "score": "0.496136", "text": "def build(self):\n raise NotImplementedError()", "title": "" }, { "docid": "1e69f2db5d3026030fa2a1071e282f74", "score": "0.49430406", "text": "def run(self, edit):\n selections = self.view.sel()\n if selections:\n # Get required search word\n region = selections[0]\n\n if not self.view.match_selector(region.a,\"source.imba\"):\n return\n\n if region.a == region.b:\n region = self.view.word(region)\n highlighted = self.view.substr(region)\n\n if self.view.match_selector(region.a,\"entity.name.tag.type,entity.name.tagdef\"):\n highlighted = \"tag \" + highlighted\n\n if self.view.match_selector(region.a,\"accessor.invocation,identifier.basic\"):\n highlighted = \"def \" + highlighted\n\n if self.view.match_selector(region.a,\"identifier.class\"):\n highlighted = \"class \" + highlighted\n\n # Get definition locations of word\n self.options = self.view.window().lookup_symbol_in_index(highlighted)\n\n if not self.options:\n sublime.status_message(\n 'Found no definition for \"%s\".' % highlighted\n )\n return\n\n # Test if all results are for the same location\n # If they are, don't give a option, just go there\n first_abs_path = None\n\n for abs_path, proj_path, row_col in self.options:\n file_path = abs_path + ':' + str(row_col[0])\n if first_abs_path is not None and not file_path == first_abs_path:\n break\n first_abs_path = file_path\n else:\n self.on_done(file_path)\n return\n\n # Display options in quick panel\n display_options = []\n for option in self.options:\n display_options.append(option[1] + ':' + str(option[2][0]))\n self.view.window().show_quick_panel(\n items=display_options,\n on_select=self.on_done,\n on_highlight=self.on_highlight\n )", "title": "" }, { "docid": "3688a56320e2fe49a71cc896f41e9323", "score": "0.49379677", "text": "def build_option(self, type_b: int, buf: bytes) -> bytes:\n return self.__format.pack(len(buf), type_b) + buf", "title": "" }, { "docid": "e5576f49a78b37e3e78c03d9cb9246fc", "score": "0.4937683", "text": "def generate_option(self):\n return '{} {} a {}'.format(\n random.choice(self.ACTIVITIES),\n random.choice(self.WITH_USING_EATING),\n random.choice(self.THINGS)\n )", "title": "" }, { "docid": "f52fa5365cf034ca6df2be3c3244b5d8", "score": "0.49303064", "text": "async def create_options(self) -> O:", "title": "" }, { "docid": "2454f39ad984e52e9ada4fd07b4ea489", "score": "0.4927432", "text": "def makecmd(self, options):", "title": "" }, { "docid": "6645c75d3908545b6a00d2638486a8bb", "score": "0.49257994", "text": "def addOption(self, tag, prompt='', alist=[]):\n\n self.content[tag] = {'type': 'option', 'value': tk.StringVar()} # init var to tk var\n if prompt:\n frame = ttk.LabelFrame(self.master, text=prompt) # create titled frame\n else:\n frame = ttk.Frame(self.master) # no title\n option = tk.OptionMenu(frame, self.content[tag]['value'], *alist) # create optionmenu\n option.grid()\n self.content[tag]['frame'] = frame\n self.content[tag]['widget'] = option\n return option", "title": "" }, { "docid": "8a6fdfd96a8ed77428f1518f031e5e4b", "score": "0.49005282", "text": "def content(self):\n return None", "title": "" }, { "docid": "2eb761419bc836a230a11a03fe15a807", "score": "0.48986822", "text": "def build(self):\n return '\\n'.join([str(e) for e in self.entries]) + '\\n'", "title": "" }, { "docid": "d49baac187492420ca00f5cf9805e871", "score": "0.48935443", "text": "def make_menu():\n dict_data, all_graph_list, all_key_list, all_table_list = retrieve_data()\n\n java_script_code = \"\"\n multiple_menu = \"\"\n i = 0\n list_keys = []\n\n for key, value in dict_data.items():\n list_keys.append(key)\n java_script_code += \"document.multiselect('#\" + str(key) + \"');\"\n multiple_menu += \"<select id =\" + str(key) + \" multiple>\"\n for key_word in value:\n multiple_menu += \"<option text =\" + key_word + \" value =\" + str(i) + \">\" + key_word + \"</option>\"\n i += 1\n multiple_menu += \"</select>\"\n\n return java_script_code, multiple_menu", "title": "" }, { "docid": "d41f32624c31c12e4377cb528e021d18", "score": "0.48922282", "text": "def __build_dropdown(self, info_label):\n\n # Creating title label for the dropdown menu.\n dropdown_label = tk.Label(self.modal_frame,\n text=\"Pick agent training strategy:\",\n bg=\"white\"\n )\n # Placing dropdown menu on the modal frame grid.\n dropdown_label.grid(\n row=0, column=0,\n padx=(5, 5),\n pady=(5, 0)\n )\n\n dropdown_options = tk.StringVar(self.main_gui_master)\n\n # Default value is 'q agent vs q agent'.\n dropdown_options.set(self.training_options['QQ'])\n\n # Creating the dropdown element\n dropdown = tk.OptionMenu(self.modal_frame,\n dropdown_options,\n *self.training_options.values(),\n )\n\n dropdown.configure(\n relief=\"flat\",\n bg=\"white\",\n width=20,\n fg=\"green\"\n )\n\n # Placing the dropdown in the grid.\n dropdown.grid(\n row=1, column=0,\n padx=(10, 0),\n pady=(0, 50),\n sticky=\"ew\"\n )\n\n self.selected_strategy = dropdown_options.get()\n\n # Inline function that updates the info label based on\n # the strategy selected.\n def update_info_label(*args):\n\n user_selection = dropdown_options.get()\n self.selected_strategy = user_selection\n\n for option in self.training_options.values():\n if option == user_selection:\n info_label.configure(text=self.strategy_info_text[option])\n\n dropdown_options.trace('w', update_info_label)", "title": "" }, { "docid": "b4ea51e61b83755830d836a8639a2252", "score": "0.4890578", "text": "def build(self, builder):\n raise NotImplementedError()", "title": "" }, { "docid": "6be047fc888b35ecaf02ff885002c1a3", "score": "0.48894134", "text": "def build(self, builder):\n builder.start(\"SourceID\", {})\n builder.data(self.source_id)\n builder.end(\"SourceID\")", "title": "" }, { "docid": "2758ccc6e113cd701f1889f846a8b82b", "score": "0.4889367", "text": "def build(self):\n\t\tself.scripts = self.read_scripts()\n\t\t\n\t\tif not self.scripts:\n\t\t\tself.q_model.clear()\n\t\t\tself.q_model.setHorizontalHeaderLabels(self._header_items)\n\t\t\treturn\n\n\t\tfor script in self.scripts:\n\t\t\tname = QtGui.QStandardItem(script)\n\t\t\tauthor = QtGui.QStandardItem(self.scripts[script]['author'])\n\t\t\tcategory = QtGui.QStandardItem(self.scripts[script]['category'])\n\t\t\tversion = QtGui.QStandardItem(str(self.scripts[script]['version']))\n\t\t\tfile_ext = os.path.splitext(self.scripts[script]['filename'])[1]\n\t\t\tfiletype = QtGui.QStandardItem(self.icon_switch(file_ext), '')\n\n\t\t\titem = self.q_model.appendRow([filetype, name, category, author, version])", "title": "" }, { "docid": "e21344734d9428eb178a9a38091a539c", "score": "0.48885906", "text": "def buildColumn_main(self,parent, asScroll = False): \n if asScroll:\n _inside = mUI.MelScrollLayout(parent,useTemplate = 'cgmUISubTemplate') \n else:\n _inside = mUI.MelColumnLayout(parent,useTemplate = 'cgmUISubTemplate') \n \n cgmUI.add_Header('Single')\n \"\"\"\n #>>>Objects Load Row ---------------------------------------------------------------------------------------\n _row_objLoad = mUI.MelHSingleStretchLayout(_inside,ut='cgmUITemplate',padding = 5) \n\n mUI.MelSpacer(_row_objLoad,w=10)\n mUI.MelLabel(_row_objLoad, \n l='Source:')\n\n uiTF_objLoad = mUI.MelLabel(_row_objLoad,ut='cgmUITemplate',l='',\n en=True)\n\n self.uiTF_objLoad = uiTF_objLoad\n cgmUI.add_Button(_row_objLoad,'<<',\n cgmGEN.Callback(uiFunc_load_selected,self),\n \"Load first selected object.\") \n _row_objLoad.setStretchWidget(uiTF_objLoad)\n mUI.MelSpacer(_row_objLoad,w=10)\n\n _row_objLoad.layout()\n uiFunc_load_selected(self)\n \"\"\"\n for key in ['source','export']:\n _plug = 'var_path_{0}'.format(key)\n try:self.__dict__[_plug]\n except:self.create_guiOptionVar('path_{0}'.format(key),defaultValue = '') \n \n mUI.MelSeparator(_inside,ut='cgmUISubTemplate',h=3)\n \n _row = mUI.MelHSingleStretchLayout(_inside,ut='cgmUISubTemplate',padding = 5)\n\n mUI.MelSpacer(_row,w=5) \n mUI.MelLabel(_row,l='{0}: '.format(CORESTRINGS.capFirst(key)))\n \n self.uiTF[key] = mUI.MelTextField(_row,\n text = self.__dict__[_plug].getValue(),\n ann='Local Path | {0}'.format(key),\n #cc = cgmGEN.Callback(uiCC_checkPath,self,key,'local'),)\n )\n _row.setStretchWidget( self.uiTF[key] )\n \n mc.button(parent=_row,\n l = 'Set',\n ut = 'cgmUITemplate',\n c = cgmGEN.Callback(uiButton_setPathToTextField,self,key,self.uiTF),\n )\n mc.button(parent=_row,\n l = 'Explorer',\n ut = 'cgmUITemplate',\n c = cgmGEN.Callback(uiButton_openPath,self,key,self.uiTF),\n ) \n mUI.MelSpacer(_row,w=5) \n _row.layout()\n \n \n \n \n #>>> Import options ---------------------------------------------------------------------------------------\n _row = mUI.MelHSingleStretchLayout(_inside,ut='cgmUISubTemplate')\n mUI.MelSpacer(_row,w=1)\n \n mUI.MelLabel(_row,l=\" Options:\")\n _row.setStretchWidget(mUI.MelSeparator(_row,w=10))\n \n for o,d in d_importOptions.iteritems():\n _plug = 'var_{0}'.format(o)\n try:self.__dict__[_plug]\n except:self.create_guiOptionVar('{0}'.format(o),defaultValue = 1)\n \n cb = mUI.MelCheckBox(_row,\n l = d.get('s',o),\n annotation = d.get('ann'),\n value = self.__dict__[_plug].value,\n onCommand = cgmGEN.Callback(self.__dict__[_plug].setValue,1),\n offCommand = cgmGEN.Callback(self.__dict__[_plug].setValue,0))\n self.uiCB_d[o] = cb\n #mUI.MelSpacer(_row,w=5)\n \n mUI.MelSpacer(_row,w=1)\n _row.layout()\n \n \n \n #>>> ---------------------------------------------------------------------------------------------------\n\n \n _mRow = mUI.MelHSingleStretchLayout(_inside,ut='cgmUISubTemplate',padding = 10)\n mUI.MelLabel(_mRow,l=\" Pivot:\")\n #_mRow.setStretchWidget(mUI.MelSeparator(_mRow,)) \n\n _optionMenu = mUI.MelOptionMenu(_mRow,ut = 'cgmUITemplate',h=25)\n mUI.MelSpacer(_mRow,w=10)\n \n _mRow.setStretchWidget(_optionMenu)\n \n self.uiOM_pivot = _optionMenu\n \n _plug = 'var_pivot'\n try:self.__dict__[_plug]\n except:self.create_guiOptionVar('pivot'.format(o),defaultValue = 'frontBottom')\n \n for o in ['none','frontCenter','frontBottom']:\n _optionMenu.append(o)\n \n _optionMenu.selectByValue(self.__dict__[_plug].value)\n _mRow.layout()\n \n \n mc.setParent(_inside)\n mUI.MelSpacer(_inside,h=5)\n #cgmUI.add_HeaderBreak()\n #mUI.MelSpacer(_inside,h=5)\n \n #>>> Buttons --------------------------------------------------------------------------------------------\n _row_base = mUI.MelHLayout(_inside,ut='cgmUISubTemplate',padding = 5)\n mc.button(parent=_row_base,\n l = 'Import',\n ut = 'cgmUITemplate',\n c = lambda *a:self.uiFunc_importSVG(),\n ann = \"Import SVG\")\n\n mc.button(parent=_row_base,\n l = 'Export',\n ut = 'cgmUITemplate',\n c = lambda *a:self.uiFunc_exportSVG(), \n #c = lambda *a:SNAPCALLS.snap_action(None,'closestPoint'),\n ann = \"Export SVG\") \n _row_base.layout() \n \n #Batch ===================================================================================================\n mc.setParent(_inside) \n cgmUI.add_Header('Batch')\n mUI.MelSpacer(_inside,h=5)\n _row_base = mUI.MelHLayout(_inside,ut='cgmUISubTemplate',padding = 5)\n mc.button(parent=_row_base,\n l = 'Import all SVG',\n ut = 'cgmUITemplate',\n c = lambda *a:self.uiFunc_importBatchSVG(), \n #c = lambda *a:SNAPCALLS.snap_action(None,'closestPoint'),\n ann = \"Batch import SVGs from path\") \n _row_base.layout() \n \n \n mUI.MelSpacer(_inside,h=5)\n \n _row_base = mUI.MelHLayout(_inside,ut='cgmUISubTemplate',padding = 5)\n mc.button(parent=_row_base,\n l = 'Import/Export All',\n ut = 'cgmUITemplate',\n c = lambda *a:self.uiFunc_importExportBatchSVG(), \n #c = lambda *a:SNAPCALLS.snap_action(None,'closestPoint'),\n ann = \"Batch import/export SVGs from path\") \n _row_base.layout()\n \n mUI.MelSpacer(_inside,h=5)\n \"\"\"\n _row_base = mUI.MelHLayout(_inside,ut='cgmUISubTemplate',padding = 5)\n mc.button(parent=_row_base,\n l = 'ViewPort Shading',\n ut = 'cgmUITemplate',\n c = lambda *a:uiButton_setViewportShading(), \n #c = lambda *a:SNAPCALLS.snap_action(None,'closestPoint'),\n ann = \"Change viewport options\") \n _row_base.layout() \"\"\" \n \n return _inside", "title": "" }, { "docid": "c44996c3161af6231281be915a9ea7ed", "score": "0.4886704", "text": "def build(self):\r\n return UiDesign()", "title": "" }, { "docid": "7f3d6ef125ff4e5e950b0efeee0ce2b3", "score": "0.4873958", "text": "def _find_options_in_meta(self, content: str) -> OPTION_TYPE:\n\n found: OPTION_TYPE = {}\n\n for x in re.findall(r\"<meta [^>]*>\", content):\n if re.search(rf\"name=[\\\"']{self.config.meta_tag_prefix}\", x):\n name = re.findall(\n rf\"name=[\\\"']{self.config.meta_tag_prefix}([^\\\"']*)\", x)[0]\n found[name] = re.findall(r\"content=[\\\"']([^\\\"']*)\", x)[0]\n\n return found", "title": "" }, { "docid": "8545e2524fb41fa6d779affcce4d4ddb", "score": "0.48725381", "text": "def add_subparser_build(self):\n parser = self.add_subparser(\n 'build',\n parser_help='build the container image',\n )\n\n parser.add_argument(\n '--container-runtime',\n choices=[\n 'podman',\n 'docker',\n ],\n help='override the container runtime to use'\n )", "title": "" }, { "docid": "438736d4f41aa3fbf791876fd5a3dcec", "score": "0.48713195", "text": "def buildGui(self):\r\n return None", "title": "" }, { "docid": "74473917a26c30a0a26e16478b85ebc6", "score": "0.4866073", "text": "def designate(title, section='main'):\n \n # begin designation\n designation = ' ' * 20\n \n # if marked for user parameters\n if section == 'settings':\n \n # begin designation with indicator\n designation = '*** settings -----> '\n \n # add code designator\n designation += '^ [code] (for {}: {})'.format(section, title)\n \n # print\n print(designation)\n \n return None", "title": "" }, { "docid": "ced219dbe27c470bb4bf34cce1c15f75", "score": "0.4859541", "text": "def build(self):\n built_string = self.name\n if self.opts:\n built_string += '='\n built_string += ':'.join('{0}={1}'.format(opt, val) for opt, val in self.opts.items())\n return built_string", "title": "" }, { "docid": "e72da6623279abb0f7b697b38c3e9479", "score": "0.48590454", "text": "def buildOptions(self):\n ZenScriptBase.buildOptions(self)\n self.parser.add_option('--organizer',\n dest=\"orgname\",default=None,\n help=\"specify the organizer for which you'd like to query software for, \"\\\n \"this setting will take presedence over the devices setting\")\n self.parser.add_option('--device',\n dest=\"devices\", type=\"str\", default=None, action=\"append\",\n help=\"specify the device(s) you want to query software for\")\n self.parser.add_option('--settype', type=\"str\",\n dest=\"settype\", default=None,\n help=\"specify which type of set operation you'd like to perform, \\\n i.e. union, difference, intersection\")", "title": "" }, { "docid": "a458540c4a6bb58f901b7822c9706905", "score": "0.4845436", "text": "def add_option(self):\r\n pass", "title": "" }, { "docid": "1e4582e23debb577ee1c28bdf1a42450", "score": "0.48391053", "text": "def _build(self):\n pass", "title": "" }, { "docid": "338a3c12b2f5c7584d7fbbd6595f1f78", "score": "0.48348072", "text": "def generate_choices(self, ctx):\n pass", "title": "" }, { "docid": "d6e74b6ce1bef395f1e232911019c953", "score": "0.48326936", "text": "def build(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "da694ceba0df109a946ced1bd39f494e", "score": "0.4831457", "text": "def build(self) -> bytes:\n if self.options_resolved:\n raise ValueError(\"option indexes must be assigned before building\")\n oi1 = typing.cast(int, self.option_index_1)\n oi2 = typing.cast(int, self.option_index_2)\n no1 = typing.cast(int, self.num_options_1)\n no2 = typing.cast(int, self.num_options_2)\n return self.__format.pack(\n self.sd_type.value,\n oi1,\n oi2,\n (no1 << 4) | no2,\n self.service_id,\n self.instance_id,\n self.major_version,\n self.ttl >> 16,\n self.ttl & 0xFFFF,\n self.minver_or_counter,\n )", "title": "" }, { "docid": "e949f49e430df6a0246bd4b2da5897a5", "score": "0.48289722", "text": "def get ( self ):\n\n def get_list ( value_str ):\n if value_str is None:\n return []\n else:\n l = value_str.split ( ', ' )\n return [ e for e in l if e.strip() ]\n\n\n fdef = roverlay.rpackage.descriptionfields.DescriptionFields()\n parser = self._parser\n\n\n for field_name in parser.sections():\n field = (\n roverlay.rpackage.descriptionfields.DescriptionField ( field_name )\n )\n\n for option, value in parser.items ( field_name, 1 ):\n if option == 'alias' or option == 'alias_withcase':\n for alias in get_list ( value ):\n field.add_simple_alias ( alias, True )\n\n elif option == 'alias_nocase':\n for alias in get_list ( value ):\n field.add_simple_alias ( alias, False )\n\n elif option == 'default_value':\n field.set_default_value ( value )\n\n elif option == 'allowed_value':\n field.add_allowed_value ( value )\n\n elif option == 'allowed_values':\n for item in get_list ( value ):\n field.add_allowed_value ( item )\n\n elif option == 'flags':\n for flag in get_list ( value ):\n field.add_flag ( flag )\n\n else:\n # treat option as flag\n field.add_flag ( option )\n\n fdef.add ( field )\n # --- end for;\n\n fdef.update()\n\n if fdef.get_fields_with_flag ( 'isLicense' ):\n fdef.license_map = self._create_license_map()\n\n return fdef", "title": "" }, { "docid": "4b3d1cb074762a52acddcd8bfc10eaf3", "score": "0.4826522", "text": "def perform_command(self):\n if len(self.actual_arguments) < 1:\n return self.print_help()\n\n if self.has_option([u\"-e\", u\"--examples\"]):\n return self.print_examples(False)\n\n if self.has_option(u\"--examples-all\"):\n return self.print_examples(True)\n\n if self.has_option([u\"--list-parameters\"]):\n return self.print_parameters()\n\n parameter = self.has_option_with_value(u\"--list-values\")\n if parameter is not None:\n return self.print_values(parameter)\n elif self.has_option(u\"--list-values\"):\n return self.print_values(u\"?\")\n\n # NOTE list() is needed for Python3, where keys() is not a list!\n demo = self.has_option(list(self.DEMOS.keys()))\n demo_parameters = u\"\"\n download_from_youtube = self.has_option([u\"-y\", u\"--youtube\"])\n largest_audio = self.has_option(u\"--largest-audio\")\n keep_audio = self.has_option(u\"--keep-audio\")\n output_html = self.has_option(u\"--output-html\")\n validate = not self.has_option(u\"--skip-validator\")\n print_faster_rate = self.has_option(u\"--faster-rate\")\n print_rates = self.has_option(u\"--rate\")\n print_zero = self.has_option(u\"--zero\")\n presets_word = self.has_option(u\"--presets-word\")\n\n if demo:\n validate = False\n for key in self.DEMOS:\n if self.has_option(key):\n demo_parameters = self.DEMOS[key]\n audio_file_path = demo_parameters[u\"audio\"]\n text_file_path = demo_parameters[u\"text\"]\n config_string = demo_parameters[u\"config\"]\n sync_map_file_path = demo_parameters[u\"syncmap\"]\n # TODO allow injecting rconf options directly from DEMOS options field\n if key == u\"--example-cewsubprocess\":\n self.rconf[RuntimeConfiguration.CEW_SUBPROCESS_ENABLED] = True\n elif key == u\"--example-ctw-espeak\":\n self.rconf[RuntimeConfiguration.TTS] = \"custom\"\n self.rconf[RuntimeConfiguration.TTS_PATH] = self.CTW_ESPEAK\n elif key == u\"--example-ctw-speect\":\n self.rconf[RuntimeConfiguration.TTS] = \"custom\"\n self.rconf[RuntimeConfiguration.TTS_PATH] = self.CTW_SPEECT\n elif key == u\"--example-festival\":\n self.rconf[RuntimeConfiguration.TTS] = \"festival\"\n elif key == u\"--example-mws\":\n self.rconf[RuntimeConfiguration.MFCC_WINDOW_LENGTH] = \"1.500\"\n self.rconf[RuntimeConfiguration.MFCC_WINDOW_SHIFT] = \"0.500\"\n elif key == u\"--example-multilevel-tts\":\n self.rconf[RuntimeConfiguration.TTS_L1] = \"festival\"\n self.rconf[RuntimeConfiguration.TTS_L2] = \"festival\"\n self.rconf[RuntimeConfiguration.TTS_L3] = \"espeak\"\n elif key == u\"--example-words-festival-cache\":\n self.rconf[RuntimeConfiguration.TTS] = \"festival\"\n self.rconf[RuntimeConfiguration.TTS_CACHE] = True\n elif key == u\"--example-faster-rate\":\n print_faster_rate = True\n elif key == u\"--example-no-zero\":\n print_zero = True\n elif key == u\"--example-py\":\n self.rconf[RuntimeConfiguration.C_EXTENSIONS] = False\n elif key == u\"--example-rate\":\n print_rates = True\n elif key == u\"--example-remove-nonspeech-rateaggressive\":\n print_rates = True\n elif key == u\"--example-youtube\":\n download_from_youtube = True\n break\n else:\n if len(self.actual_arguments) < 4:\n return self.print_help()\n audio_file_path = self.actual_arguments[0]\n text_file_path = self.actual_arguments[1]\n config_string = self.actual_arguments[2]\n sync_map_file_path = self.actual_arguments[3]\n\n if presets_word:\n self.print_info(u\"Preset for word-level alignment\")\n self.rconf[RuntimeConfiguration.MFCC_MASK_NONSPEECH] = True\n self.rconf[RuntimeConfiguration.MFCC_MASK_NONSPEECH_L3] = True\n\n html_file_path = None\n if output_html:\n keep_audio = True\n html_file_path = sync_map_file_path + u\".html\"\n\n if download_from_youtube:\n youtube_url = audio_file_path\n\n if (not download_from_youtube) and (not self.check_input_file(audio_file_path)):\n return self.ERROR_EXIT_CODE\n if not self.check_input_file(text_file_path):\n return self.ERROR_EXIT_CODE\n if not self.check_output_file(sync_map_file_path):\n return self.ERROR_EXIT_CODE\n if (html_file_path is not None) and (not self.check_output_file(html_file_path)):\n return self.ERROR_EXIT_CODE\n\n self.check_c_extensions()\n\n if demo:\n msg = []\n msg.append(u\"Running example task with arguments:\")\n if download_from_youtube:\n msg.append(u\" YouTube URL: %s\" % youtube_url)\n else:\n msg.append(u\" Audio file: %s\" % audio_file_path)\n msg.append(u\" Text file: %s\" % text_file_path)\n msg.append(u\" Config string: %s\" % config_string)\n msg.append(u\" Sync map file: %s\" % sync_map_file_path)\n if len(demo_parameters[u\"options\"]) > 0:\n msg.append(u\" Options: %s\" % demo_parameters[u\"options\"])\n self.print_info(u\"\\n\".join(msg))\n\n if validate:\n self.print_info(u\"Validating config string (specify --skip-validator to bypass)...\")\n validator = Validator(logger=self.logger)\n result = validator.check_configuration_string(config_string, is_job=False, external_name=True)\n if not result.passed:\n self.print_error(u\"The given config string is not valid:\")\n self.print_generic(result.pretty_print())\n return self.ERROR_EXIT_CODE\n self.print_info(u\"Validating config string... done\")\n\n if download_from_youtube:\n try:\n self.print_info(u\"Downloading audio from '%s' ...\" % youtube_url)\n downloader = Downloader(logger=self.logger)\n audio_file_path = downloader.audio_from_youtube(\n youtube_url,\n download=True,\n output_file_path=None,\n largest_audio=largest_audio\n )\n self.print_info(u\"Downloading audio from '%s' ... done\" % youtube_url)\n except ImportError:\n self.print_no_pafy_error()\n return self.ERROR_EXIT_CODE\n except Exception as exc:\n self.print_error(u\"An unexpected error occurred while downloading audio from YouTube:\")\n self.print_error(u\"%s\" % exc)\n return self.ERROR_EXIT_CODE\n else:\n audio_extension = gf.file_extension(audio_file_path)\n if audio_extension.lower() not in AudioFile.FILE_EXTENSIONS:\n self.print_warning(u\"Your audio file path has extension '%s', which is uncommon for an audio file.\" % audio_extension)\n self.print_warning(u\"Attempting at executing your Task anyway.\")\n self.print_warning(u\"If it fails, you might have swapped the first two arguments.\")\n self.print_warning(u\"The audio file path should be the first argument, the text file path the second.\")\n\n try:\n self.print_info(u\"Creating task...\")\n task = Task(config_string, logger=self.logger)\n task.audio_file_path_absolute = audio_file_path\n task.text_file_path_absolute = text_file_path\n task.sync_map_file_path_absolute = sync_map_file_path\n self.print_info(u\"Creating task... done\")\n except Exception as exc:\n self.print_error(u\"An unexpected error occurred while creating the task:\")\n self.print_error(u\"%s\" % exc)\n return self.ERROR_EXIT_CODE\n\n try:\n self.print_info(u\"Executing task...\")\n executor = ExecuteTask(task=task, rconf=self.rconf, logger=self.logger)\n executor.execute()\n self.print_info(u\"Executing task... done\")\n except Exception as exc:\n self.print_error(u\"An unexpected error occurred while executing the task:\")\n self.print_error(u\"%s\" % exc)\n return self.ERROR_EXIT_CODE\n\n try:\n self.print_info(u\"Creating output sync map file...\")\n path = task.output_sync_map_file()\n self.print_info(u\"Creating output sync map file... done\")\n self.print_success(u\"Created file '%s'\" % path)\n except Exception as exc:\n self.print_error(u\"An unexpected error occurred while writing the sync map file:\")\n self.print_error(u\"%s\" % exc)\n return self.ERROR_EXIT_CODE\n\n if output_html:\n try:\n parameters = {}\n parameters[gc.PPN_TASK_OS_FILE_FORMAT] = task.configuration[\"o_format\"]\n parameters[gc.PPN_TASK_OS_FILE_EAF_AUDIO_REF] = task.configuration[\"o_eaf_audio_ref\"]\n parameters[gc.PPN_TASK_OS_FILE_SMIL_AUDIO_REF] = task.configuration[\"o_smil_audio_ref\"]\n parameters[gc.PPN_TASK_OS_FILE_SMIL_PAGE_REF] = task.configuration[\"o_smil_page_ref\"]\n self.print_info(u\"Creating output HTML file...\")\n task.sync_map.output_html_for_tuning(audio_file_path, html_file_path, parameters)\n self.print_info(u\"Creating output HTML file... done\")\n self.print_success(u\"Created file '%s'\" % html_file_path)\n except Exception as exc:\n self.print_error(u\"An unexpected error occurred while writing the HTML file:\")\n self.print_error(u\"%s\" % exc)\n return self.ERROR_EXIT_CODE\n\n if download_from_youtube:\n if keep_audio:\n self.print_info(u\"Option --keep-audio set: keeping downloaded file '%s'\" % audio_file_path)\n else:\n gf.delete_file(None, audio_file_path)\n\n if print_zero:\n zero_duration = [l for l in task.sync_map_leaves(SyncMapFragment.REGULAR) if l.begin == l.end]\n if len(zero_duration) > 0:\n self.print_warning(u\"Fragments with zero duration:\")\n for fragment in zero_duration:\n self.print_generic(u\" %s\" % (fragment.pretty_print))\n\n if print_rates:\n self.print_info(u\"Fragments with rates:\")\n for fragment in task.sync_map_leaves(SyncMapFragment.REGULAR):\n self.print_generic(u\" %s\\t%.3f\" % (fragment.pretty_print, fragment.rate or 0.0))\n\n if print_faster_rate:\n max_rate = task.configuration[\"aba_rate_value\"]\n if max_rate is not None:\n faster = [l for l in task.sync_map_leaves(SyncMapFragment.REGULAR) if l.rate >= max_rate + Decimal(\"0.001\")]\n if len(faster) > 0:\n self.print_warning(u\"Fragments with rate greater than %.3f:\" % max_rate)\n for fragment in faster:\n self.print_generic(u\" %s\\t%.3f\" % (fragment.pretty_print, fragment.rate or 0.0))\n\n return self.NO_ERROR_EXIT_CODE", "title": "" }, { "docid": "097a0b2db95d3e23d9485015a1f1e064", "score": "0.48255682", "text": "def do_options(self, line):\n headers_table, output_table = [\"Option\", \"Value\", \"Description\"], []\n for option, value in self.options.items():\n output_table.append([option, value[0], value[1]])\n print(display_messages(\"Available Options:\", info=True, sublime=True))\n return display_tabulate(headers_table, output_table)", "title": "" }, { "docid": "a0df648373d981e542086d5748e8029b", "score": "0.48178133", "text": "def build(self, e, view):\n assert self.path is not None\n line = '\\t'+'.'*70+'\\n'\n self.openOutput(self.path)\n self.write('@charset \"UTF-8\";\\n')\n self.write(\"/*\\n%s\\tGenerated by PageBot Version %s\\n%s*/\\n\" % (line, pagebot.__version__, line))\n\n doc = e.doc\n self.buildRootStyle(doc)\n self.buildMainStyles(doc)\n self.closeOutput()", "title": "" }, { "docid": "60d2d7f9aa639b76ff3e2394a274449b", "score": "0.48125952", "text": "def do_section(self, argumentsList):\n self.globalData.hasSomethingBeenAddedToFiles[\"geomMediaFile\"] = True\n self.currentCommand[\"selection\"] = True\n\n self.selectionCommandDict[\"level\"] = argumentsList[0]\n self.selectionCommandDict[\"height\"] = argumentsList[1]\n self.selectionCommandDict[\"height2\"] = argumentsList[2]\n self.selectionCommandDict[\"mediumName\"] = argumentsList[3]\n self.selectionCommandDict[\"name\"] = argumentsList[4]\n\n tmpName = self.selectionCommandDict[\"name\"]\n while True:\n if self.selectionCommandDict[\"name\"] in self.globalData.currentElementsNamesDict:\n self.globalData.numberForEqualElementsNames += 1\n self.selectionCommandDict[\"name\"] = tmpName + str(self.globalData.numberForEqualElementsNames)\n else: \n break\n self.selectionCommandDict[\"upperName\"] = self.selectionCommandDict[\"name\"] + \"U\"\n self.selectionCommandDict[\"lowerName\"] = self.selectionCommandDict[\"name\"] + \"D\"\n self.globalData.currentElementsNamesDict[self.selectionCommandDict[\"upperName\"]] = self.selectionCommandDict[\"upperName\"]\n self.globalData.elementsInThisFile[self.selectionCommandDict[\"upperName\"]] = self.selectionCommandDict[\"upperName\"]\n self.globalData.currentElementsNamesDict[self.selectionCommandDict[\"lowerName\"]] = self.selectionCommandDict[\"lowerName\"]\n self.globalData.elementsInThisFile[self.selectionCommandDict[\"lowerName\"]] = self.selectionCommandDict[\"lowerName\"]\n \n if self.createPyFiles:\n content = \"\"\" qwm_doc.addObject('Sketcher::SketchObject', 'sketch_{name}')\n qwm_doc.sketch_{name}.Placement = FreeCAD.Placement(FreeCAD.Vector(0.0,0.0,{level}),FreeCAD.Rotation(0.0, 0.0, 0.0, 1.0))\\n\"\"\".format(\n name = self.selectionCommandDict[\"name\"], \n level = self.selectionCommandDict[\"level\"],\n )\n \n self.globalData.writeToGeomMediaFile(content)", "title": "" }, { "docid": "3d093f28ae627616428ae31918283dd6", "score": "0.4805788", "text": "def render_criteria_options():\n seen = set()\n\n def unique(x):\n u = x not in seen\n seen.add(x)\n return u\n\n options = [json.loads(option) for option in filter(unique, request.args.getlist('o'))]\n # pdb.set_trace()\n # options = [o['data'] for o in options]\n return {'options': options, 'view': False}", "title": "" }, { "docid": "82ed8e01f2947c95be598c82b1746bc3", "score": "0.48056963", "text": "def build(self):\n raise NotImplementedError(\"build() needs to be implemented\")", "title": "" }, { "docid": "e8a8b876755a11b2b2aea6b340991e3c", "score": "0.48042586", "text": "def get_optionspanel(self, parent):\n drawobj, _id = self.get_current_selection()\n if drawobj is None:\n # no current selection-> show options of whole drawing\n size = (200, -1)\n self._optionspanel = NaviPanel(parent, obj=self._canvas.get_drawing(),\n attrconfigs=None,\n #tables = None,\n # table = None, id=None, ids=None,\n groupnames=['options'],\n mainframe=self.parent.get_mainframe(),\n #pos=wx.DefaultPosition, size=size, style = wx.MAXIMIZE_BOX|wx.RESIZE_BORDER,\n func_apply=self.on_apply_option,\n immediate_apply=False, panelstyle='default', # 'instrumental'\n standartbuttons=['apply', 'restore'])\n else:\n # show option of currently selected drawobj\n size = (200, -1)\n self._optionspanel = ObjPanel(parent, obj=drawobj,\n id=_id,\n attrconfigs=None,\n #tables = None,\n # table = None, id=None, ids=None,\n groupnames=['options'],\n func_change_obj=None,\n show_groupnames=False, show_title=True, is_modal=False,\n mainframe=self.parent.get_mainframe(),\n pos=wx.DefaultPosition, size=size, style=wx.MAXIMIZE_BOX | wx.RESIZE_BORDER,\n func_apply=self.on_apply_option,\n immediate_apply=False, panelstyle='default', # 'instrumental'\n standartbuttons=['apply', 'restore'])\n\n return self._optionspanel", "title": "" }, { "docid": "07d638067068d89d358159cb793284c1", "score": "0.48040456", "text": "def make_content(self):\n myvars, v = self.make_vars()\n\n ns = self.sketch.namespace\n packages = \"\\n\".join(map(lambda x: ' \"packages %s\" inherit => \"true\", usebundle => cfdc_blueprint:packages($(runenv), \"%s\", $(%s_packages), \"$(blueprint_packages_%s[$(%s_packages)])\");' % (x, x, x, x, x),\n v['package_manager']))\n\n packvars = \"\\n\".join(map(lambda x: ' \"%s_packages\" slist => getindices(\"blueprint_packages_%s\");' % (x, x),\n v['package_manager']))\n\n self.content = \"\"\"\nbody file control\n{\n namespace => \"%s\";\n}\n\nbundle agent install(runenv, metadata)\n{\n classes:\n \"$(vars)\" expression => \"default:runenv_$(runenv)_$(vars)\";\n \"not_$(vars)\" expression => \"!default:runenv_$(runenv)_$(vars)\";\n\n vars:\n \"vars\" slist => { \"@(default:$(runenv).env_vars)\" };\n \"$(vars)\" string => \"$(default:$(runenv).$(vars))\";\n\n \"all_files\" slist => getindices(\"blueprint_files\");\n\n%s\n%s\n\n methods:\n \"utils\" usebundle => default:eu($(runenv));\n\n activated::\n \"files\" inherit => \"true\", usebundle => cfdc_blueprint:files($(runenv), concat(dirname($(this.promise_filename)), \"/files\"), $(default:eu.path_prefix), $(all_files), \"%s:install.blueprint_files[$(all_files)]\");\n%s \n # \"sources\" inherit => \"true\", usebundle => cfdc_blueprint:sources($(runenv), dirname($(this.promise_filename)), $(blueprint_sources));\n\n verbose::\n \"metadata\" usebundle => default:report_metadata($(this.bundle), $(metadata)),\n inherit => \"true\";\n}\n\"\"\" % (ns, myvars, packvars, ns, packages)", "title": "" }, { "docid": "8e44955844d046243bbb620ac7705f27", "score": "0.48023203", "text": "def createPDFidContent(program, pdfIDTab):\n\n allNames = createChkBox(pdfIDTab, \"Display all the names\", \"none 10\", 2, 0, W)\n extraData = createChkBox(pdfIDTab, \"Display extra data\", \"none 10\", 3, 0, W)\n noZero = createChkBox(pdfIDTab, \"No zeros (supress output for counts equal to zero)\", \"none 10\", 4, 0, W)\n noJavascript = createChkBox(pdfIDTab, \"Disable JavaScript and auto launch\", \"none 10\", 5, 0, W)\n fileScan = createChkBox(pdfIDTab, \"force the scan of the file, even without proper %PDF header\", \"none 10\", 6, 0, W)\n outputLogFile = createChkBox(pdfIDTab, \"Output to log file\", \"none 10\", 7, 0, W)\n scanADirectory = createChkBox(pdfIDTab, \"Scan a directory\", \"none 10\", 8, 0, W)\n\n optionsArr = {\"allNames\": allNames, \"extraData\": extraData, \"noZero\": noZero, \"noJavascript\": noJavascript, \"fileScan\": fileScan, \"outputLogFile\": outputLogFile, \"scanADirectory\": scanADirectory}\n Button(pdfIDTab, text=\"OK\", width=14, command= lambda: makeThePDFIdCommandReady(program, optionsArr)).grid(row=9, column=0, sticky=W)", "title": "" }, { "docid": "d84a98c1f54f524e7d3efda447bff90d", "score": "0.47985172", "text": "def build_ui(self):\n self.ui = UI_SRCInfo.Ui_Form()\n self.ui.setupUi(self)\n self.timeperd = Timeperd()\n self.citation = Citeinfo(parent=self, include_lwork=False)\n\n self.citation.ui.fgdc_lworkcit.deleteLater()\n self.ui.widget_citation.layout().addWidget(self.citation)\n self.ui.widget_timeperd.layout().addWidget(self.timeperd)\n\n self.setup_dragdrop(self)", "title": "" }, { "docid": "e9d2ff3845266a0c2dca21a68f62a5fd", "score": "0.47965768", "text": "def prepare(self, obj):\n data = super(FluentPageIndex, self).prepare(obj)\n # get all text plugins\n try:\n text_contentitems = obj.contentitem_set.instance_of(TextItem)\n except:\n text_contentitems = []\n try:\n file_contentitems = obj.contentitem_set.instance_of(FileItem)\n except:\n file_contentitems = []\n try:\n picture_contentitems = obj.contentitem_set.instance_of(PictureItem)\n except:\n picture_contentitems = []\n t = loader.select_template(('search/indexes/fluentpage/fluentpage_text.txt', ))\n data['text'] = t.render(Context({'object': obj,\n 'content_data': { 'text': text_contentitems, 'file': file_contentitems, 'picture': picture_contentitems }}))\n return data", "title": "" }, { "docid": "adcae7dcb4394b44b52a7397021741cf", "score": "0.47937122", "text": "def update_text_content1(selected_options):\n global input_filecontents\n if not selected_options:\n shown_text=\"No file entered\"\n elif len(selected_options)>=1:\n latest_option = selected_options[-1]\n shown_text = input_filecontents[latest_option]\n shown_text = \"\".join([\"{:03d} {}\\n\".format(i, line) for i, line in enumerate(shown_text.split(\"\\n\"))])\n textwindow = dcc.Textarea(\n value=shown_text,\n style={\n \"height\": \"200px\",\n \"width\": \"500px\",\n 'lineHeight': '20px',\n 'borderWidth': '1px',\n 'marginLeft': '20px',\n 'marginRight': '10px',\n }\n )\n return textwindow", "title": "" }, { "docid": "323d862a222a3024000eb6a3cdf2b8a9", "score": "0.4791721", "text": "def build(self):\r\n return main_file", "title": "" }, { "docid": "ce98f051a3a53c64a5f3ba03a8755eeb", "score": "0.47885987", "text": "def create_option_menu(self):\r\n self.row_option = tkinter.OptionMenu(self.master, self.str_var_row,\r\n *self.size_option_list)\r\n self.col_option = tkinter.OptionMenu(self.master, self.str_var_col,\r\n *self.size_option_list)\r\n self.first_option = tkinter.OptionMenu(self.master, self.str_var_first,\r\n *self.side_option_list)\r\n self.tl_option = tkinter.OptionMenu(self.master, self.str_var_topleft,\r\n *self.side_option_list)\r\n\r\n self.str_var_row.set(self.size_option_list[3])\r\n self.str_var_col.set(self.size_option_list[3])\r\n self.str_var_first.set(self.side_option_list[0])\r\n self.str_var_topleft.set(self.side_option_list[1])\r\n\r\n self.row_option.grid(row=0, column=0, sticky=E+W)\r\n self.col_option.grid(row=1, column=0, sticky=E+W)\r\n self.first_option.grid(row=2, column=0, sticky=E+W)\r\n self.tl_option.grid(row=3, column=0, sticky=E+W)", "title": "" }, { "docid": "85b1d26c17ba36475fd43191bc396a96", "score": "0.47864935", "text": "def option1(start):", "title": "" }, { "docid": "ae24a5760a2c3760570cc2342f5fdc83", "score": "0.4780302", "text": "def build_macro_file_text(self):\n print(\"Building output ...\")\n bank_strings = {'m1': '', 'm2': '', 'm3': ''}\n for bank, gkey_assignment in self.assignments.items():\n for gkey, assign_info in gkey_assignment.items():\n configstring = (\n \"keys_{gkey}_name = {name}\\n\"\n \"keys_{gkey}_type = {type}\\n\"\n \"keys_{gkey}_maptype = {maptype}\\n\"\n \"keys_{gkey}_mappedkey = {kkey}\\n\".format(\n gkey=gkey, name=assign_info[\"name\"],\n type=assign_info[\"type\"],\n maptype=assign_info[\"maptype\"],\n kkey=assign_info[\"key\"]\n )\n )\n bank_strings[bank] += configstring\n macros_file_text = (\n \"[DEFAULT]\\n\"+\n \"name = %s\\n\" % (self.name)+\n \"version = 1.0\\n\"+\n \"icon = \\n\"+\n \"window_name =\\n\"+\n \"base_profile = \\n\"+\n \"background = \\n\"+\n \"author = %s\\n\" % (self.author)+\n \"activate_on_focus = False\\n\"+\n \"plugins_mode = all\\n\"+\n \"selected_plugins = ,profiles,menu\\n\"+\n \"send_delays = True\\n\"+\n \"fixed_delays = False\\n\"+\n \"press_delay = 50\\n\"+\n \"release_delay = 50\\n\"+\n \"models = g13\\n\"\n )\n for bank, configstring in bank_strings.items():\n macros_file_text += (\n \"[%s]\\n%s\\n\" % (bank, configstring)\n )\n macros_file_text += (\n \"\\n\"\n \"[m1-1]\\n\"\n \"\\n\"\n \"[m2-1]\\n\"\n \"\\n\"\n \"[m3-1]\\n\"\n \"\\n\"\n \"[m1-2]\\n\"\n \"\\n\"\n \"[m2-2]\\n\"\n \"\\n\"\n \"[m3-2]\\n\"\n \"\\n\\n\"\n )\n self.g15text = macros_file_text", "title": "" } ]
b3e7fb4c0bec180d6cc29cd90e15c3af
picks player to go first
[ { "docid": "ba5e66c73444f077c9a555f95d974d94", "score": "0.7084739", "text": "def pick_player():\n \n number = random.randint(0, 1)\n\n if number == 0:\n return 'Player 1'\n else:\n return 'Player 2'", "title": "" } ]
[ { "docid": "743fd69dc486d9851a5f7b2a45df898d", "score": "0.8099728", "text": "def choose_first_player(self):\n self.turn = random.choice([self.player1, self.cpu]) \n self.first_player = self.turn", "title": "" }, { "docid": "76ef58f5bee4e05fd4c01a9dfb2e66a6", "score": "0.80952483", "text": "def first_player_to_play(self): # Probleme si tous les doubles dans la pioche\n self.current_player = self.players[0] #human begins", "title": "" }, { "docid": "0bb00ee069d1b83bce58f891df533479", "score": "0.7866396", "text": "def choose_first_player(self):\n \n self.current_player = random.randint(1, 3) % 3\n\n print('\\n{} starts'.format(self.players[self.current_player].name))", "title": "" }, { "docid": "f33c7f4d286e90845937b7d9ed47e597", "score": "0.7565555", "text": "def choose_starter(self):\n\t\tif random.randint(0, 1) == 0:\n\t\t\tprint(\"Computer starts!\")\n\t\t\tboard.player = 'O'\n\t\telse:\n\t\t\tprint(\"You start!\")\n\t\t\tboard.player = 'X'", "title": "" }, { "docid": "4b8fad7a5940d83da4f0f1f4ae557c74", "score": "0.7123707", "text": "def whoGoesFirst():\n if random.randint(0,1) == 0:\n return 'computer'\n else:\n return 'player'", "title": "" }, { "docid": "111aec9628b11b958e2ced129e8f4267", "score": "0.68581903", "text": "def next_player(self):\n self.current_player = self.players[(self.current_player.identity.id + 1) % len(self.players)]", "title": "" }, { "docid": "5043b29e47d5c4e47dce5b2d32b4078b", "score": "0.6843655", "text": "def next_player(self):\n self.current_player = (self.current_player + 1) % 3", "title": "" }, { "docid": "f9a66631a4c5cba589c879aeaa359df2", "score": "0.67925966", "text": "def init_player(self):\n choice = input(\"Do you want to play first?(y/n) \\n\")\n if choice.lower() == 'y':\n return [2, 1] # human player is player2 and play first\n elif choice.lower() == 'n':\n return [1, 2] # AI play first\n else:\n print(\"Please input y or n ! \\n\")\n play_turn = self.init_player()\n return play_turn", "title": "" }, { "docid": "796d276044214a2f1ad3d8b00961e657", "score": "0.67255574", "text": "def get_next_player(self, player):\n player_one = self.get_player_one()\n player_two = self.get_player_two()\n if player_one.get_player_name() == player.get_player_name():\n self.set_turn(player_two.get_player_name())\n else:\n self.set_turn(player_one.get_player_name())", "title": "" }, { "docid": "9cc946517b6981f7583cca038ff11d89", "score": "0.6647778", "text": "def next_turn(self):\n self.player = (self.player + 1) % 2", "title": "" }, { "docid": "15cbc8badc9a6bd638ac726df2b67ea5", "score": "0.6634603", "text": "def int_player(self):\n if self.current_player == self.first_player:\n return 0\n else:\n return 1", "title": "" }, { "docid": "00a8c58b698eabce0c7004abda5ad6f0", "score": "0.65970176", "text": "def switch_player(self):\n self.current_player_idx = (self.current_player_idx + 1) % self.nbr_players", "title": "" }, { "docid": "8319d7d1eedab75cd16b654b833838dc", "score": "0.65710676", "text": "def pick_winner(self) -> \"PlayerKey\":", "title": "" }, { "docid": "265dbd8e87c14412cdd41c345e9654c7", "score": "0.65409535", "text": "def play(self, player, game):\n game.skip() #Skips the turn of next player ", "title": "" }, { "docid": "3887dfddb5270377d3ac6ad559aa9ef6", "score": "0.6519844", "text": "def get_next_moving_player(player):\n return 2 if player == 1 else 1", "title": "" }, { "docid": "8b69d0a9aad761502f3814f64bf16b04", "score": "0.65194094", "text": "def switch_player(self):\n self._current_player = self._player_one if self._current_player == self._player_two else self._player_two", "title": "" }, { "docid": "6e4e42fd8a88933c6a910d7005ba538e", "score": "0.6491364", "text": "def game_play_run(self):\n print \"Playing the game! :)\"\n self.turn_order = [self.turn_order_first] # add the first person\n for u in sorted(self.all_uids): # add everyone after the first person\n if u > self.turn_order_first:\n self.turn_order.append(u)\n for u in sorted(self.all_uids): # add everyone before the first person\n if u < self.turn_order_first:\n self.turn_order.append(u)\n time.sleep(2)\n while True:\n # Whose turn is it? Start at the first player and go around forever\n current_player = self.turn_order[self.game_turn % len(self.turn_order)]\n print \"It's player %s's turn!\" % str(ui.uid_to_friendly(current_player, self.all_uids))\n if current_player == self.uid: print \"(That's you!)\"\n # Roll the dice for that player\n print \"Fairly rolling the dice...\"\n dice_roll = self.run_die_roll() + self.run_die_roll()\n print \"The dice rolled to %s\" % str(dice_roll)\n \n if dice_roll != 7:\n # Distribute resources based on the dice roll\n owed = self.ui_board.resources_owed(dice_roll)\n print \"The bank distributes:\", owed\n # Mark those items as locally claimed. That user can \n for player, resources in owed.iteritems():\n for resource in resources:\n the_resource = self.resources.get_next_resource(resource)\n self.resources.set_resource_owner(the_resource, player)\n if player == ui.uid_to_friendly(self.uid, self.all_uids):\n # add to my hand\n self.hand.append(the_resource)\n elif current_player == self.uid:\n inp = raw_input(\"You control the robber! Where would you like to move it? (Hexagon index, ! for key)\")\n while inp == \"!\":\n self.ui_board.print_hex_reference()\n inp = raw_input(\"You control the robber! Where would you like to move it? (Hexagon index, ! for key)\")\n inp = int(inp)\n # move robber to inp\n self.broadcast_message({\"turn\": self.game_turn, \"robber\": inp})\n # steal from someone?\n #steal = raw_input(\"From whom would you like to steal (among those you put the robber next to)?\")\n #self.broadcast_message({\"steal\": ui.friendly_to_uid(steal, self.all_uids), \"steal_turn\": self.game_turn})\n # Wait for them to send their encrypted hand\n # Ask them for one of their keys\n # Add this item to our hand\n print \"Stealing during robbery is unimplemented.\"\n # Is it our turn?\n if current_player == self.uid:\n print \"It's your turn! What would you like to do?\"\n do = raw_input(\"1: try to make a trade, 2: buy a house, 3: buy a road, or nothing\")\n if do != \"\":\n if int(do) == 1:\n # Ask a user for trade\n # Broadcast ask\n print \"Trading is unimplemented\"\n pass\n elif int(do) == 2:\n # Choose the house\n house = self.house_place()\n # Spend resources\n print \"New house placement is unimplemented\"\n # Broadcast\n pass\n elif int(do) == 3:\n # Choose the road\n road = self.road_place()\n # Spend resources\n # Broadcast\n print \"New road placement is unimplemented\"\n pass\n self.broadcast_message({'turn_done': self.game_turn}) # end my turn\n else:\n print \"Waiting for the player to complete their turn (purchases, trades, etc.)\"\n while not self.game_turn in self.game_rounds or 'done' not in self.game_rounds[self.game_turn]:\n time.sleep(1)\n # only the current player can end their turn\n assert self.game_rounds[self.game_turn]['done'][1] == current_player\n # do we move the robber?\n if 'robber' in self.game_rounds[self.game_turn]:\n assert self.game_rounds[self.game_turn]['robber'][1] == current_player # verify the sender\n self.ui_board.move_robber(self.game_rounds[self.game_turn]['robber'][0]) # move it\n self.ui_board.print_actual_board()\n self.game_turn += 1", "title": "" }, { "docid": "9e72b9b1a438197bf01ce6f25efc1c63", "score": "0.64756346", "text": "def placeplayer(self):\n self.__player = random.choice(self.rooms)\n while self.player.wumpus or self.player.bats or self.player.pit:\n self.__player = random.choice(self.rooms)", "title": "" }, { "docid": "781ca16cccebcb626e80096580631003", "score": "0.64729303", "text": "def switch_player(current_player):\n if current_player == \"x\":\n return \"o\"\n else:\n return \"x\"", "title": "" }, { "docid": "6742efeb8e778a5daeb613a03c69032c", "score": "0.6466336", "text": "def play_again(server):\n server.player_handler.current_player = (\n server.player_handler.get_player(-server.player_handler.order))", "title": "" }, { "docid": "1ea56164c8e87c677d12bd1134c9f796", "score": "0.6460269", "text": "def step(self):\n #Get random player\n players = [random.choice(list(self.G.nodes()))]\n # if this player has at least two neighbors, go into a truel; otherwise, go into a duel\n if len(self.G[players[0]]) > 1:\n players.extend(_random_subset(list(self.G[players[0]]), 2))\n #print(\"Truel: \", players)\n players = self.sequential_truel(players)\n\n elif len(self.G[players[0]]) == 1:\n players.extend(_random_subset(list(self.G[players[0]]), 1))\n #print(\"Duel: \", players)\n players = self.random_duel(players)\n\n #Clear list after done\n players = []", "title": "" }, { "docid": "3fbb190bc0d6ce031457237cf61053d5", "score": "0.6460041", "text": "def set_player():\n if whoplays == 1:\n return \"X\"\n else:\n return \"O\"", "title": "" }, { "docid": "736d176f980591184076d17f976e748b", "score": "0.64371026", "text": "def __playHumanTurn__(self, choice):\n self.__inputChoice__(choice)", "title": "" }, { "docid": "2ad929c79db327b2be1d01e9f190eac4", "score": "0.63983643", "text": "def play(self, player, game):\r\n return", "title": "" }, { "docid": "2b47e3572fad163965a1e9d0a681be05", "score": "0.63979834", "text": "def play_once(human_plays_first):\n # This is all dummy scaffolding code right at the moment...\n import random # See Modules chapter ...\n rng = random.Random()\n # Pick a random result between -1 and 1.\n result = rng.randrange(-1,2)\n print(\"Human plays first={0}, winner={1} \"\n .format(human_plays_first, result))\n return result", "title": "" }, { "docid": "d9ad8dbfd0b5ffc32113becf010ae651", "score": "0.6396512", "text": "def plays_first():\n coin_choice = input(\"Heads or tails?: \")\n coin_list = [\"heads\", \"tails\"]\n correct = random.choice(coin_list)\n if (coin_choice.lower() != \"heads\") & (coin_choice.lower() != \"tails\"):\n print(\"Well, since you were unable to enter heads or tails I will choose for you.\")\n print(\"Person to the left is Player1 and person to the right is Player2.\")\n elif coin_choice.lower() == correct:\n print(\"If you chose \" + correct + \" you are Player1\")\n print(\"The other person is Player2\")\n elif coin_choice.lower() != correct:\n print(\"If you chose \" + correct + \" you are Player2\")\n print(\"The other person is Player1\")", "title": "" }, { "docid": "ac85f2efbaeec6eeeb35a696bb265dcd", "score": "0.63812727", "text": "def start(self) -> None:\n move = 0\n option = self.get_opponent_option()\n self.initialize_first_player()\n self.initialize_second_player(option)\n current_turn = \"0\"\n while True and move < 9 :\n choice = self.players[current_turn].get_choice(self)\n self.mark_moves(\n self.player_marks[current_turn],\n choice\n )\n self.print_current_board()\n if self.is_won(self.player_marks[current_turn]):\n self.print_winner_info(option, self.players[current_turn])\n break\n if current_turn == \"0\":\n current_turn = \"1\"\n else:\n current_turn = \"0\"\n move += 1\n if move == 9:\n print(\"Game Tie!!\")", "title": "" }, { "docid": "6222cf171df771afea03eddb2f6a6a92", "score": "0.6377076", "text": "def start_match(self):\r\n self.batting(0)\r\n if self.players == 1:\r\n self.atharva()\r\n else:\r\n self.batting(1)\r\n self.winner()", "title": "" }, { "docid": "f1355e3d7e8d63e9691cc5c2fd095e40", "score": "0.63398284", "text": "def who_plays_next(self):\n if self.game_over:\n return self.no_one\n return self.players[self.total_play_count % 2]", "title": "" }, { "docid": "a13869daa31e17ab5ab696ddda91fdf8", "score": "0.6309286", "text": "def move_choice(self, game, player, roll):", "title": "" }, { "docid": "ae387e51ca32d14c5f7d114786caf71a", "score": "0.6307088", "text": "def initialize_first_player(self) -> None:\n self.players[\"0\"] = HumanPlayer(self.player_marks[\"0\"])\n self.players[\"0\"].set_player_info(\"First Player\")", "title": "" }, { "docid": "f7d64e37fe28bfd5a7ee7278fc709d04", "score": "0.62940854", "text": "def coin_flip(player, opponent, options):\n\n print(\"\\nFlipping for who plays first...\")\n sleep(options['speed']/2)\n\n if randint(0, 1) == 1:\n print(\"{} goes first!\".format(player['name']))\n return player, opponent\n else:\n print(\"{} goes first!\".format(opponent['name']))\n return opponent, player", "title": "" }, { "docid": "52f8ca6614006d3b606718aeef00ac20", "score": "0.62672615", "text": "def next_player():\n session.pop('takeProcessFlag', None)\n session.pop('joker_index', None)\n\n game = pickle_read(\"game.pickle\")\n game.nextPlayer()\n pickle_write(\"game.pickle\",game)\n return redirect(url_for('play'))", "title": "" }, { "docid": "28c3957643a1690de2b76ce7f5c552f6", "score": "0.6265145", "text": "def switch_current_player(self):\n if self.player == Board.X:\n self.player = Board.O\n else:\n self.player = Board.X", "title": "" }, { "docid": "48b1427a0d9d67cdefc364619734449f", "score": "0.6262001", "text": "def test_get_next_player__first_move(self):\n returned = undertest.get_next_player(None)\n expected = c.X\n self.assertEqual(expected, returned)", "title": "" }, { "docid": "9476c938f8edb90afe22aac72ea09d25", "score": "0.6258162", "text": "def who_goes_first():\n\n # This function should randomly choose whether the x's or the o's go\n # first.\n return random.choice(['x', 'o'])", "title": "" }, { "docid": "93157684427266d1031b368650e3ee33", "score": "0.6242538", "text": "def prompt_to_play(self, player) :\n player.release_play_lock()\n query_interval = 0.03 #s\n while True :\n action = player.next_action\n if action is None :\n time.sleep(query_interval)\n continue\n # If an action was found lock the player and reset her 'next_action'\n player.close_play_lock()\n player.next_action = None\n return action", "title": "" }, { "docid": "38024c572bc5e6f94c956041fa999889", "score": "0.6239626", "text": "def restart_from_first_level(self):\n self._move_to_level(0)\n self.player = create_player(self._is_in_infinite_lives_mode)", "title": "" }, { "docid": "74ae66279403e6931eefe23ac447aa36", "score": "0.6223622", "text": "def player2(marbles_left):\n\tmax = 3\n\tif marbles_left < 3:\n\t\tmax = marbles_left\n\tp2_choice = random.randint(1, max)\n\treturn p2_choice", "title": "" }, { "docid": "263dca38997412dc5d14eaad79d3af57", "score": "0.6205331", "text": "def start_game():", "title": "" }, { "docid": "263dca38997412dc5d14eaad79d3af57", "score": "0.6205331", "text": "def start_game():", "title": "" }, { "docid": "ba4b9b5fd93949545add4dc941c5c357", "score": "0.6189161", "text": "def __move0(self):\n if self.player.wumpus:\n print(\"... OOPS! Bumped a Wumpus!\")\n if random.random() < 0.75:\n self.__movewumpus()\n else:\n raise PlayerDeath(\"TSK TSK TSK-Wumpus got you!\")\n elif self.player.pit:\n raise PlayerDeath(\"YYYYIIIIEEEE . . . Fell in a pit.\")\n elif self.player.bats:\n print(\"ZAP-Super Bat Snatch! Elsewhereville for you!\")\n self.__player = random.choice(self.rooms)\n self.__move0()", "title": "" }, { "docid": "df891278e08f91f7e833c272aac44ffd", "score": "0.6185608", "text": "def start_turn(self):\n pass", "title": "" }, { "docid": "080d0214de500fda4f92f5a0e839f12c", "score": "0.6167753", "text": "def switch_player(self):\n\t\tself.current_player = self.player2 if self.current_player == self.player1 else self.player1\n\t\treturn 'X' if self.turn == 'O' else 'O'", "title": "" }, { "docid": "5e9e8f1fa8da5816bcafdd3e0d6f1ff0", "score": "0.6167585", "text": "def start(self):\n round_number = 1\n loss_streak = 0\n while self.player.credits >= 1:\n self.deck.shuffle()\n print('### Round ' + str(round_number) + ' ###')\n winner = self.play_one_round()\n loss_streak = self.update_streak(loss_streak, winner)\n self.record_player_history(winner)\n round_number = round_number + 1", "title": "" }, { "docid": "1dba871d946fa3e181937da556a842fe", "score": "0.6157973", "text": "def random_player(game, state):\n return random.choice(game.actions(state)) if game.actions(state) else None", "title": "" }, { "docid": "991310212e3d47942a6d65885c4b2419", "score": "0.6155633", "text": "def first_participant_choice():\n return random.randint(0, 2)", "title": "" }, { "docid": "87171df805cbc2b813ee00afca398bd8", "score": "0.61523557", "text": "def go_to_initial_position():\n turn_around()\n move()\n turn_around()", "title": "" }, { "docid": "da244fe7ed2b77d9711a6f046415c22b", "score": "0.6142412", "text": "def next(self):\n self._player += 1\n if self._player >= self.__class__.players_n:\n self._player = 0", "title": "" }, { "docid": "e39f32c598e79ae60aa79580a99010d8", "score": "0.6136268", "text": "def next_player(self):\n self._valid_moves = None\n\n if game.current_player.end_of_game():\n game.current_player.wait()\n game.finished = True\n return\n\n i = game.players.index(game.current_player)\n\n # Current player now waiting for their next turn\n game.current_player.wait()\n\n # Find the index of the next player\n try:\n game.current_player = game.players[i + 1]\n except IndexError:\n game.current_player = game.players[0]", "title": "" }, { "docid": "0d2ee5667166f9769f75975e7dfe4d87", "score": "0.6120103", "text": "def playerSelection(player):\n print('\\nIs player {} a human or computer?'.format(player))\n print('1. Enter 1 if Human')\n print('2. Enter 2 if Computer')\n\n return makeChoice()", "title": "" }, { "docid": "1e6396f6f38fed7aa3194388c5ea6d35", "score": "0.61084056", "text": "def choose_move_turn_piece(self):\n self.choice_raz()", "title": "" }, { "docid": "16145d572a572f6e29f2459e75c05dab", "score": "0.6099771", "text": "def start(self):\n if not self.playing:\n self.deck.reset()\n self.resetGame()\n self.faceup_dealer = False\n self.deal()\n self.playing = True\n self.playOrder = list(self.table.keys())[::-1]\n self.getNextPlayer()\n else:\n print(\"Game in progress\")\n\n return", "title": "" }, { "docid": "0335b1b8329a45b37edaf89759a64f78", "score": "0.60806954", "text": "def turntracker(self):\r\n if self.turnindex == '':\r\n self.turnindex = random.choice(range(len(self.players)))\r\n else:\r\n if self.turnindex +1 == len(self.players):\r\n self.turnindex = 0\r\n else:\r\n self.turnindex += 1\r\n print(f\"It looks like it's {self.players[self.turnindex].name}'s turn.\")", "title": "" }, { "docid": "4e27320ad138f4442e3eda33dbdef554", "score": "0.6078441", "text": "def starting_player()->None:\n while True:\n response=input('Enter B to have black as starting player or enter W to have white as starting player: ').strip().lower()\n if response=='b':\n othello.STARTING_PLAYER=othello.BLACK\n break\n elif response=='w':\n othello.STARTING_PLAYER=othello.WHITE\n break\n else:\n invalid_command(response)", "title": "" }, { "docid": "e4d4d3dbb66df1cd74936d96924ca320", "score": "0.6070064", "text": "def choose_card_to_play(self):\n return random.choice(self.player.phand)", "title": "" }, { "docid": "f09be705de436a2ad04fe1edb76ecca3", "score": "0.6062658", "text": "def main_loop(self):\r\n player_names = list(self.player_fleets.keys())\r\n actual_player = random.randint(0, len(player_names) - 1)\r\n print(\"The lucky first player is... \" + player_names[actual_player] + \"!\")\r\n while len(self.player_fleets) > 1:\r\n # let all players do their job until only one of them is left...\r\n player_name = player_names[actual_player]\r\n if player_name not in self.player_fleets:\r\n print(player_name, \"is already on the bottom of the sea, thus skipped!\")\r\n else:\r\n input(player_name + \" will now do their turn; look away, guys, and press enter! \")\r\n print(\"\\n\" * 100)\r\n self.bombard_fleet(player_name)\r\n print(\"\\n\" * 100)\r\n actual_player = (actual_player + 1) % len(player_names)\r\n print(list(self.player_fleets.keys())[0], \"won by staying alive for the longest!\")", "title": "" }, { "docid": "c465a3c539961e64e9fb14f916ea83bb", "score": "0.6052357", "text": "def begin_turn(self):\r\n pass", "title": "" }, { "docid": "328aead7916f87e1554812ba020f2863", "score": "0.6051927", "text": "def input_player(self):\n player_choice = input(\"Choose rock, paper, or scissors: \")\n player_choice = player_choice.lower()\n print(\"You chose \" + player_choice)\n\n if player_choice not in [\"rock\", \"paper\", \"scissors\"]:\n print(\"Please try again.\")\n player_choice = None\n self.input_player()\n\n else:\n self.player_choice = player_choice", "title": "" }, { "docid": "45f083a0c371b3dd88efb6a26d555ece", "score": "0.6025998", "text": "def choose_turn(self):\n self.choice_raz()", "title": "" }, { "docid": "49bcfb5ed37d3c808759d176e84ed223", "score": "0.6009295", "text": "def play(self):\n while not self.isOver():\n self.player1.play()\n if not self.isOver():\n self.player2.play()\n if self.winner == None:\n print(\"Game Over! It's a tie.\")\n print(self)\n else:\n print(\"Congratulations! \"+self.winner.getName()+\" has won!\")\n print(self)", "title": "" }, { "docid": "dce60cf3f5a7bb01f68d33e136f1cf40", "score": "0.60071546", "text": "def next_player(self):\n x_count = self.number_of_moves(SQUARE.X)\n o_count = self.number_of_moves(SQUARE.O)\n return SQUARE.X if x_count <= o_count else SQUARE.O", "title": "" }, { "docid": "ea307bcef02b47fb439452eb9b88fb01", "score": "0.60033345", "text": "def next_player_turn(self):\n self.current_turn = self.creator if self.current_turn != self.creator else self.opponent\n self.save()", "title": "" }, { "docid": "1907edbd78f16647fcde3075e484f59d", "score": "0.59967", "text": "def choose_computer_move(who):\r\n \r\n #initial approach will be a random choice from the list of available moves\r\n \r\n import random\r\n \r\n return random.choice(all_possible_moves_for(who))", "title": "" }, { "docid": "23f28bccef009a6a362fe7de913a25e6", "score": "0.5992187", "text": "def reset_round(self):\n self.suit_to_follow = CardGame.Suit.Undefined\n self.first_player = True", "title": "" }, { "docid": "b9772cc30c44212b92dc3dfdf31274bd", "score": "0.599064", "text": "def special(self, game, player):\n if player.flip_journey_token():\n player.output(\"Ranger gives +5 Cards\")\n player.pickup_cards(5)", "title": "" }, { "docid": "2326bc56ca873a63fdf7f2708476c1f1", "score": "0.5986472", "text": "def mc_trial(board, player):\n player_flag = True\n player_dict = {True: player, False: provided.PLAYERX if (player == provided.PLAYERO) else provided.PLAYERO}\n emptys = board.get_empty_squares()\n while (emptys):\n selected = emptys.pop(random.randrange(len(board.get_empty_squares())))\n board.move(selected[0], selected[1], player_dict[player_flag])\n if (board.check_win()):\n break\n player_flag = not player_flag", "title": "" }, { "docid": "63461f22ba8323cf9aa5398322f0538b", "score": "0.5986005", "text": "def first_card_to_match(self):\n while self.card_to_match.category != 'Normal':\n self.card_to_match = choice(self.deck)", "title": "" }, { "docid": "c3357a006a300d3b8f3d6cc590f94183", "score": "0.59753585", "text": "def next_player(self, player=None):\n if player is None:\n player = self.current_player\n return (player + 1) % 4", "title": "" }, { "docid": "7b821f66f75a4ff171761a51ed71397b", "score": "0.59716654", "text": "def win(self, player):", "title": "" }, { "docid": "84ba6db8d08e8e20ec6c7df01ca9b772", "score": "0.59597474", "text": "def start_game():\n coin_flip = random.randrange(1, 3)\n if coin_flip == 1:\n print(\"---You start first. Good luck!---\")\n while computer_board.ship_count > 0 and player_board.ship_count > 0:\n player_turn()\n if computer_board.ship_count == 0 or player_board.ship_count == 0:\n player_board.display_board()\n computer_board.display_board()\n break\n computer_turn()\n player_board.display_board()\n computer_board.display_board()\n else:\n print(\"---Your enemy starts first! You start second. Good luck!---\")\n while computer_board.ship_count > 0 and player_board.ship_count > 0:\n computer_turn()\n player_board.display_board()\n computer_board.display_board()\n if computer_board.ship_count == 0 or player_board.ship_count == 0:\n break\n player_turn()", "title": "" }, { "docid": "86d2cb1fe8ffa9e2f555fc0da12ffe62", "score": "0.59586394", "text": "def decide(self):\r\n\r\n self.maybe_shoot()\r\n next(self.move_cycle)", "title": "" }, { "docid": "ceaaf75c8f246b19060445d39861caca", "score": "0.5944272", "text": "def play(self, player: Player):\n print(f\"you are {self.location_description}\")\n\n while self.options:\n print(\"What will you do now?\")\n print(f\"{self.lb.join([str(idx)+'.'+opt for idx, opt in enumerate(self.options)])}\")\n print(self.lb)\n player_choice = input()\n\n option = self.options.pop(int(player_choice))\n if \"pick up\" in option.lower():\n if self.second_treasure is not None and self.treasure[0] == self.second_treasure:\n if random.random() < 0.5:\n player.christmas()\n else:\n player.math_questions()\n # pretty bad design here, str match would be better\n player.take(self.treasure[0], self)\n\n elif \"attack\" in option.lower():\n player.attack(self)\n\n elif \"move on\" in option.lower():\n break\n\n # updating options\n if self.task_accomplished() and self.exit_description not in self.options:\n self.options.append(self.exit_description)\n if len(self.treasure) == 1 and self.second_treasure is not None:\n self.treasure.append(self.second_treasure)\n self.options.append(f\"Pick up the {self.second_treasure.name}\")\n\n print(self.exit_description)", "title": "" }, { "docid": "28f1d4d6a83751a67633fa5b9dadb82d", "score": "0.5935956", "text": "def pick_song(self):\n name = self.get_text(None)\n if name == ERROR:\n self.change_img(mode=PLAY)\n return\n self.play_song(name)", "title": "" }, { "docid": "7b267750742a76116df72e3b27279f2c", "score": "0.5929682", "text": "def previous_player(self):\n self.current_player = (self.current_player - 1) % 3", "title": "" }, { "docid": "01b4e9be29ce53427bfe6eb72064f2da", "score": "0.59233695", "text": "def continue_game(self):\n self.game()", "title": "" }, { "docid": "637dc1fc32091f3df913dadb19d4fa1b", "score": "0.59222806", "text": "def start_game(self):\n while not self._is_win_or_tie():\n self._do_turn()", "title": "" }, { "docid": "522dca8c53f0e9d87e98136a3fa63788", "score": "0.59202576", "text": "def choose_card_to_return(self):\n return random.choice(self.player.played)", "title": "" }, { "docid": "32596f67d65f43f33afa4eff6c5d0d93", "score": "0.5915617", "text": "def __playTurn__(self, choice=None, opponent=None):\n if self._type == \"Computer\":\n self.__playComputerTurn__(opponent)\n else:\n self.__playHumanTurn__(choice)", "title": "" }, { "docid": "9d64b1814c210d917b2dac075ec32d41", "score": "0.5914858", "text": "def process(self, player):\n r = random.random()\n if r < self.p_die:\n return DIE\n if r < self.p_restart:\n return MOVE_TO_START\n return CONTINUE", "title": "" }, { "docid": "fe3d09a11a0c70e6c73b6b4f5faaecb8", "score": "0.59144735", "text": "def players_pick():\n\tprint(\"Select your fighter:\\n\")\n\tprint(\"For Hobo with a Slingshot || enter: A ||\\nFor Gladiator || type B ||\\nFor the Elf || type C\")\n\tletter = \"\"\n\twhile letter != \"A\" or \"B\" or \"C\":\n\t\tletter = input(\"Enter your selection: \")\n\t\tif letter == \"A\":\n\t\t\tplayer = Hobo()\n\t\t\tbreak\n\t\telif letter == \"B\":\n\t\t\tplayer = Gladiator()\n\t\t\tbreak\n\t\telif letter == \"C\":\n\t\t\tplayer = Elf()\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"Incorrect input\\nChoices are A, B or C: \")\n\treturn player", "title": "" }, { "docid": "be0ca5176bcb1c239b43a9b8c3510934", "score": "0.5910221", "text": "def player1(lst, round, index, marbles_left):\n\tp1_choice = lst[round][index]\n\tp1_choice = int(p1_choice)\n\tif p1_choice > marbles_left:\n\t\tp1_choice = marbles_left\n\treturn p1_choice", "title": "" }, { "docid": "c4eb326a10b37931a78c45de0cd61045", "score": "0.5908532", "text": "def initialize_second_player(self, option):\n if option is BOT:\n self.players[\"1\"] = BotPlayer(self.player_marks[\"1\"])\n self.players[\"1\"].set_player_info(\"Bot Player\")\n else:\n self.players[\"1\"] = HumanPlayer(self.player_marks[\"1\"])\n self.players[\"1\"].set_player_info(\"Second Player\")", "title": "" }, { "docid": "22837ad4cdb1dab1357444282ccf7691", "score": "0.5907427", "text": "def start_play(self):\n\n print(Fore.LIGHTYELLOW_EX + \"Player 1\")\n print(Fore.LIGHTMAGENTA_EX + \"Enter the name :\")\n player1 = input('> ')\n print(\"\\n\")\n\n print(Fore.LIGHTBLUE_EX + \"Player 2\")\n print(Fore.LIGHTMAGENTA_EX +\n 'type y to play with bot and h to play with other player?')\n choice = input('> ')\n while True:\n if choice == 'y':\n print(Fore.LIGHTRED_EX + 'select the bot mode n: normal s: smart')\n choice = input('> ')\n if choice == 's':\n player2 = 'Smart_bot'\n smart_bot = Smart_bot()\n break\n elif choice == 'n':\n player2 = 'Random_bot'\n random_bot = Random_Bot()\n print(\"\\n\")\n break\n\n elif choice == 'h':\n print(Fore.LIGHTYELLOW_EX + \"Enter the name : \")\n player2 = input('> ')\n game = Game_logic()\n print(\"\\n\")\n break\n\n # print(player2)\n # Stores the player who chooses X and O\n cur_player = player1\n\n if player1 in player2:\n player2 += \"-2\"\n # Stores the choice of players\n\n # Stores the options\n options = [colored(\"X\", 'red'),\n colored(\"O\", 'cyan')]\n\n # Stores the scoreboard\n score_board = {player1: 0, player2: 0}\n scoreboard(score_board)\n\n # Game Loop for a series of Tic Tac Toe\n # The loop runs until the players quit\n self.quit = \"Enter 3 to quit\"\n while True:\n global winner\n # Player choice Menu\n print(Fore.LIGHTBLUE_EX + \"Turn to choose for \", cur_player)\n print(Fore.LIGHTYELLOW_EX + \"Enter 1 for X\")\n print(Fore.LIGHTRED_EX + \"Enter 2 for O\")\n print(Fore.LIGHTGREEN_EX + self.quit)\n\n # Try exception for CHOICE input\n try:\n choice = int(input(\"> \"))\n except ValueError:\n print(Fore.LIGHTRED_EX + \"Wrong Input!!! Try Again\\n\")\n continue\n\n # Conditions for player choice\n if choice == 1:\n self.player_choice[colored(\"X\", 'red')] = cur_player\n if cur_player == player1:\n self.player_choice[colored(\"O\", 'cyan')] = player2\n else:\n self.player_choice[colored(\"O\", 'cyan')] = player1\n\n elif choice == 2:\n self.player_choice[colored(\"O\", 'cyan')] = cur_player\n if cur_player == player1:\n self.player_choice[colored(\"X\", 'red')] = player2\n else:\n self.player_choice[colored(\"X\", 'red')] = player1\n elif choice == 3:\n print(Fore.LIGHTYELLOW_EX + \"Final Scores\")\n scoreboard(score_board)\n break\n\n else:\n print(Fore.LIGHTRED_EX + \"Wrong Choice!!!! Try Again\\n\")\n\n # Stores the winner in a single game of Tic Tac Toe\n if player2 != \"Smart_bot\" and player2 != \"Random_bot\":\n winner = game.multi_player(options[choice-1])\n elif player2 == \"Smart_bot\":\n winner = smart_bot.smart_bot(\n options[choice-1], self.player_choice)\n elif player2 == \"Random_bot\":\n\n winner = random_bot.random_bot(\n options[choice-1], self.player_choice)\n\n # Edits the scoreboard according to the winner\n\n if winner != 'D':\n player_won = self.player_choice[winner]\n score_board[player_won] = score_board[player_won] + 1\n\n scoreboard(score_board)\n # Switch player who chooses X or O\n if cur_player == player1:\n cur_player = player2\n else:\n cur_player = player1", "title": "" }, { "docid": "7349264a67e6d826a3e30b20b1ab2aa7", "score": "0.59050214", "text": "def startGame(self):\n self._resetBoard()\n self._current_player = 'X'\n self._game_started = True", "title": "" }, { "docid": "1c3791c04a735453de4a553566fae0ea", "score": "0.5900352", "text": "def test_play_second(self):\n self.plr.journey_token = False\n self.plr.piles[Piles.HAND].set()\n self.plr.add_card(self.card, Piles.HAND)\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 5)\n self.assertTrue(self.plr.journey_token)", "title": "" }, { "docid": "2cedcc67e3c37c5660da275609c58686", "score": "0.5893798", "text": "def play_game():\n\n # keep on playing until player don't want to play\n while True:\n # make empty TIC TAC TOE BOARD\n board = list(map(lambda x: ' ', range(9)))\n # assign markers to players\n player1, player2 = make_markers()\n # decide randomly who goes first\n turn = pick_player()\n print(f'{turn} will go first')\n\n # ask user if they want to play the game\n play_game = input(\"Play game 'Yes' | 'No'\\t\")\n\n if play_game.lower().startswith('y'):\n game = True\n else:\n game = False\n\n # keep on playing if user wants to play\n while game:\n if turn == 'Player 1':\n\n # print the board on terminal\n make_board(board)\n # player 1 picks the position to place their marker\n position = input_position(board)\n # place the marker on the board\n board = place_marker(board, player1, position)\n\n # check if player 1 won\n if check_result(board, player1):\n make_board(board)\n print('Player 1 won')\n game = False\n # check if board is full\n else:\n if full_board(board):\n make_board(board)\n print(\"It's a Draw !\")\n break\n # if none of above, its player 2 turn\n else:\n turn = 'Player 2'\n\n else:\n\n # print the board on terminal\n make_board(board)\n # player 2 picks the positin to place their marker\n position = input_position(board)\n # place the marker on the board\n board = place_marker(board, player2, position)\n\n # check if player 2 won\n if check_result(board, player2):\n make_board(board)\n print('Player 2 won')\n game = False\n # check if board is full\n else:\n if full_board(board):\n make_board(board)\n print(\"It's a Draw !\")\n break\n # if none of the above, its player 1 turn\n else:\n turn = 'Player 1'\n\n # if user wants to stop playing\n if not play_again():\n break", "title": "" }, { "docid": "596227bcd877562069510249d9fd2863", "score": "0.58879024", "text": "def skip_player(server):\n server.player_handler.next_player()", "title": "" }, { "docid": "705d8c0a9e7bd4b051c4778722ee3a15", "score": "0.58871263", "text": "def current_game():\n\t\t\n\tos.system('clear')\n\tgrid = create_initial_grid()\n\n\tif 'IA Normal' not in Data.current_player.values() and 'IA Difficile' not in Data.current_player.values():\n\t\tcreate_player()\n\n\n\tplayer = random.choice([Data.current_player['Activator'], Data.current_player['Inhibitor']])\n\n\tprint('\\nPour cette partie :'\n\n\t\t'\\n \t[+]', colors.GREEN + Data.current_player['Activator'] + colors.STOP, 'is the '+colors.GREEN+'Activator'+colors.STOP+\n\t\t\n\t\t'\\n \t[+]', colors.RED + Data.current_player['Inhibitor'] + colors.STOP, 'is the '+colors.RED+'Inhibitor\\n'+colors.STOP)\n\n\ttime.sleep(1.5)\n\tretry = ''\n\tend = False\n\tstop = False\n\n\twhile not stop:\n\t\twhile not end:\n\t\t\tos.system('clear')\n\t\t\tgrid, end = playing(player, grid)\n\n\t\t\tif player == Data.current_player['Activator']:\n\t\t\t\tplayer = Data.current_player['Inhibitor']\n\t\t\telse:\n\t\t\t\tplayer = Data.current_player['Activator']\n\n\t\tprint(\"\"\"\n\t\t \n\t\t _ _ _ \n\t\t | | | | | | \n\t\t | | | | ___ | |__ __ ___ _____ \n\t\t | |/\\| |/ _ \\ | '_ \\ / _` \\ \\ / / _ \\ \n\t\t \\ /\\ / __/ | | | | (_| |\\ V / __/ \n\t\t \\/ \\/ \\___| |_| |_|\\__,_| \\_/ \\___| \n\t\t \n\t\t\"\"\")\n\n\t\ttime.sleep(0.4)\n\n\n\t\tprint(\"\"\"\n\t\t __ _ \n\t\t / _` | \n\t\t | (_| | \n\t\t \\__,_| \n\t\t \"\"\")\n\t\ttime.sleep(0.4)\n\t\tprint(\"\"\"\n\t\t _ _ _ _ \n\t\t | | | (_) | |\n\t\t | | | |_ _ __ _ __ ___ _ __ | |\n\t\t | |/\\| | | '_ \\| '_ \\ / _ \\ '__| | |\n\t\t \\ /\\ / | | | | | | | __/ | |_|\n\t\t \\/ \\/|_|_| |_|_| |_|\\___|_| (_)\n\t\t \n\t\t\t\"\"\")\n\t\ttime.sleep(0.8)\n\t\tos.system('clear')\n\t\tdisplay_grid(grid)\n\t\tprint(\"\"\"\n\t\t\t##################\n\t\t\t# #\n\t\t\t# SCORES #\n\t\t\t# #\n\t\t\t##################\n\t\t\t\"\"\")\n\t\ts_activator, s_inhibitor = get_score(grid)\n\n\t\tif s_activator > s_inhibitor:\n\t\t\tprint(\"\\n [+] Félicitation\", Data.current_player['Activator'], '! Tu as gagné cette manche avec un score de', s_activator, 'points')\n\t\telif s_activator < s_inhibitor:\n\t\t\tprint(\"\\n [+] Félicitation\", Data.current_player['Inhibitor'], '! Tu as gagné cette manche avec un score de', s_inhibitor, 'points')\n\t\telse:\n\t\t\tprint('\\n [+] Il semblerait que nous ayons une égalité !')\n\n\t\tprint(\"\\n Score de\", Data.current_player['Activator'], '(ACTIVATOR)', s_activator, end = '\t\t|\t')\n\t\tprint(\"Score de\", Data.current_player['Inhibitor'], '(INHIBITOR)', s_inhibitor)\n\n\t\t# Reinitialize the grid after the end of a game and \n\t\tgrid = create_initial_grid()\n\t\tend = False\n\n\t\ttry:\n\t\t\tretry = str(input(\"\\n [+] Voulez-vous faire une nouvelle partie ? (O/N)\\n Choix : \")).upper()\n\n\t\t\tif retry not in ['O', 'N']:\n\t\t\t\traise ValueError\n\t\t\telif retry == 'O':\n\t\t\t\tstop = False\n\t\t\telse:\n\t\t\t\tstop = True\n\t\t\t\t# Reinitialize the players data if the user don't want to continue to play\n\t\t\t\tData.current_player = {'Activator' : colors.GREEN + 'Activator' + colors.STOP, 'Inhibitor' : colors.RED + 'Inhibitor' + colors.STOP}\n\n\t\texcept ValueError:\n\t\t\tprint(\"\\n [+] Veuillez entrer une réponse correcte (O/N)\")", "title": "" }, { "docid": "0fe7fcfed78580e1a97dd9dc40b8519e", "score": "0.5874573", "text": "def startCompetition(self) -> None:\r\n pass", "title": "" }, { "docid": "382a76915f9ad2225b1fcb097393e602", "score": "0.5871021", "text": "def wait_for_next_turn(self):\n while self.is_turn_to_play():\n self.wait(1)\n\n print('Spell round begins')\n\n \"\"\" Start detecting if it's our turn to play again \"\"\"\n while not self.is_turn_to_play():\n self.wait(1)\n\n print('Our turn to play')\n return self", "title": "" }, { "docid": "b1753843bb311c0ee805dafd259bf33b", "score": "0.5855006", "text": "def play(self):\n print(\"== new game ==\")\n player = self.__player\n dealer = self.__dealer\n deck = self.__deck\n player.get(deck.next())\n dealer.get(deck.next())\n player.get(deck.next())\n dealer.get(deck.next(open=False))\n print(\"Dealer :\", dealer)\n print(player.name, \":\", player)\n if player.total == 21:\n print(\"Blackjack!\", player.name, \"wins.\")\n player.earn_chips(2)\n else:\n while player.total < 21 and \\\n Reader.ox(player.name + \": Hit?(o/x) \"):\n player.get(deck.next())\n print(player.name, \":\", player)\n if player.total > 21:\n print(player.name, \"busts!\")\n player.lose_chips(1)\n else:\n while dealer.total <= 16:\n dealer.get(deck.next())\n if dealer.total > 21:\n print(\"Dealer busts!\")\n player.earn_chips(1)\n elif dealer.total == player.total:\n print(\"We draw.\")\n elif dealer.total > player.total:\n print(player.name, \"loses.\")\n player.lose_chips(1)\n else:\n print(player.name, \"wins.\")\n player.earn_chips(1)\n dealer.open()\n print(\"Dealer :\", dealer)\n player.clear()\n dealer.clear()", "title": "" }, { "docid": "d6729f9bf2e83e5e08094ace6bb5ba6e", "score": "0.58527684", "text": "def increment_player(self):\n self.currentPlayer += 1\n if self.currentPlayer > self.maxPlayers:\n self.currentPlayer = 1", "title": "" }, { "docid": "9704952b133d6c47c3388410862ee0ef", "score": "0.58499324", "text": "def first_deal(settings, stats):\n deal_player(settings, stats)\n deal_dealer(settings, stats)\n deal_player(settings, stats)\n deal_dealer(settings, stats)\n stats.hand_dealt = True", "title": "" }, { "docid": "89098ff1b216a5033cac80bd874c442c", "score": "0.5844982", "text": "def move_place_choice(self, game, player, roll):", "title": "" }, { "docid": "3bc1b66cc567f3c3b1f52390d91375e8", "score": "0.58447057", "text": "def play_song(self, name):\n # if self.manager.client.song_playing == \"\":\n self.change_img()\n self.manager.client.play_next_song = False\n msg = self.manager.client.play_song_top(name)\n if msg is not None:\n tk.messagebox.showinfo(TITLE, DOESNT_EXIST)\n self.change_img(mode=PLAY)", "title": "" }, { "docid": "c74e5c0d9070f6dc7700990154cad766", "score": "0.58438975", "text": "def start_game(self):\n pass", "title": "" }, { "docid": "c74e5c0d9070f6dc7700990154cad766", "score": "0.58438975", "text": "def start_game(self):\n pass", "title": "" }, { "docid": "035a60fa6a7794053004d2062cb0efa9", "score": "0.58414215", "text": "def play_as_player(self):\n\t\t\n\t\t# Game loop\n\t\twhile True:\n\t\t\t# Choosing a move by passing the move history through the bot.\n\t\t\toutputs = self.think(self.move_history)\n\t\t\tself_move = list(outputs[-1]).index(max(outputs[-1]))\n\t\t\t\n\t\t\t# Getting the user's move.\n\t\t\tuser_move = self.get_move_input(\"Choose rock, paper, or scissors: \")\n\t\t\t\n\t\t\t# Telling the user what the computer played.\n\t\t\tprint(f\"I played {number_to_move[self_move]}.\")\n\t\t\t\n\t\t\t# Checking who won.\n\t\t\tif beats[user_move] == self_move:\n\t\t\t\tprint(\"I win!\")\n\t\t\telif beats[self_move] == user_move:\n\t\t\t\tprint(\"You win.\")\n\t\t\telse:\n\t\t\t\tprint(\"Tie!\")\n\t\t\t# Adding a line break for readability.\n\t\t\tprint()\n\t\t\t\n\t\t\t# Making the ideal output.\n\t\t\tperfect_output = [0] * 3\n\t\t\tperfect_output[beats[user_move]] = 1\n\t\t\t# Correcting the weights.\n\t\t\tself.adjust(self.move_history, outputs, perfect_output)\n\t\t\t\n\t\t\t# Creating the addition to add to the move history.\n\t\t\thistory_addition = [0] * 6\n\t\t\thistory_addition[self_move] = 1\n\t\t\thistory_addition[user_move + 3] = 1\n\t\t\t# Updating the move history by shifting the existing values and adding the addition.\n\t\t\tself.move_history[:-6] = self.move_history[6:]\n\t\t\tself.move_history[-6:] = history_addition", "title": "" }, { "docid": "d1e4b3fe5ebc3e9715edc1029934da30", "score": "0.58411604", "text": "def move_to(self):\n #self.find_wall()\n \n t = self.find_best_way()\n if t:\n click(t)\n else:\n click(random.choice(locations))", "title": "" } ]
7cf2e49eae0d6e3fa469ffd16542de71
Test read 4 series in 6 pages.
[ { "docid": "fda7fba2af675a3944dd133dc5af19ff", "score": "0.5077106", "text": "def test_read_generic_series():\n fname = public_file('tifffile/generic_series.tif')\n with TiffFile(fname) as tif:\n assert tif.byteorder == '<'\n assert len(tif.pages) == 6\n assert len(tif.series) == 4\n # assert series 0 properties\n series = tif.series[0]\n assert series.shape == (3, 20, 20)\n assert series.dtype == numpy.uint8\n assert series.axes == 'IYX'\n assert series.kind == 'generic'\n page = series.pages[0]\n assert page.compression == LZW\n assert page.imagewidth == 20\n assert page.imagelength == 20\n assert page.bitspersample == 8\n assert page.samplesperpixel == 1\n data = tif.asarray(series=0)\n assert data.flags['C_CONTIGUOUS']\n assert data.shape == (3, 20, 20)\n assert data.dtype == numpy.uint8\n assert tuple(data[:, 9, 9]) == (19, 90, 206)\n assert_aszarr_method(tif, data, series=0)\n # assert series 1 properties\n series = tif.series[1]\n assert series.shape == (10, 10, 3)\n assert series.dtype == numpy.float32\n assert series.axes == 'YXS'\n assert series.kind == 'generic'\n page = series.pages[0]\n assert page.photometric == RGB\n assert page.compression == LZW\n assert page.imagewidth == 10\n assert page.imagelength == 10\n assert page.bitspersample == 32\n assert page.samplesperpixel == 3\n data = tif.asarray(series=1)\n assert isinstance(data, numpy.ndarray)\n assert data.flags['C_CONTIGUOUS']\n assert data.shape == (10, 10, 3)\n assert data.dtype == numpy.float32\n assert round(abs(data[9, 9, 1] - 214.5733642578125), 7) == 0\n assert_aszarr_method(tif, data, series=1)\n # assert series 2 properties\n series = tif.series[2]\n assert series.shape == (20, 20, 3)\n assert series.dtype == numpy.uint8\n assert series.axes == 'YXS'\n assert series.kind == 'generic'\n page = series.pages[0]\n assert page.photometric == RGB\n assert page.compression == LZW\n assert page.imagewidth == 20\n assert page.imagelength == 20\n assert page.bitspersample == 8\n assert page.samplesperpixel == 3\n data = tif.asarray(series=2)\n assert isinstance(data, numpy.ndarray)\n assert data.flags['C_CONTIGUOUS']\n assert data.shape == (20, 20, 3)\n assert data.dtype == numpy.uint8\n assert tuple(data[9, 9, :]) == (19, 90, 206)\n assert_aszarr_method(tif, data, series=2)\n # assert series 3 properties\n series = tif.series[3]\n assert series.shape == (10, 10)\n assert series.dtype == numpy.float32\n assert series.axes == 'YX'\n assert series.kind == 'generic'\n page = series.pages[0]\n assert page.compression == LZW\n assert page.imagewidth == 10\n assert page.imagelength == 10\n assert page.bitspersample == 32\n assert page.samplesperpixel == 1\n data = tif.asarray(series=3)\n assert isinstance(data, numpy.ndarray)\n assert data.flags['C_CONTIGUOUS']\n assert data.shape == (10, 10)\n assert data.dtype == numpy.float32\n assert round(abs(data[9, 9] - 223.1648712158203), 7) == 0\n assert_aszarr_method(tif, data, series=3)\n assert__str__(tif)", "title": "" } ]
[ { "docid": "1dc788c32847d7e6de2a26a47e6ac0a4", "score": "0.61458886", "text": "def test_get_page_size_12_page17(self):\n response = self.client.get(self.url + \"?page_size=12&page=6\")\n self.assertEqual(len(response.data[\"results\"]), 3)", "title": "" }, { "docid": "2780c1bdaa75378fe2e2813a517eb27d", "score": "0.6130678", "text": "def test_nlp_page_reader():\n reader = ShardSegment()\n reader.open(NLP_FILE_NAME + \"0\")\n\n fields = reader.candidate_fields\n logger.info(\"fields: {}\".format(fields))\n\n reader.category_field = \"rating\"\n\n info = reader.read_category_info()\n logger.info(\"category info: {}\".format(info))\n\n img1 = reader.read_at_page_by_id(0, 0, 1)\n logger.info(\"img1 len: {}, img1[0] len: {}, img1[0]: {}\".format(len(img1), len(img1[0]), img1[0]))\n\n img2 = reader.read_at_page_by_name(\"7\", 0, 1)\n logger.info(\"img2 len: {}, img2[0] len: {}, img2[0]: {}\".format(len(img2), len(img2[0]), img2[0]))\n\n paths = [\"{}{}\".format(NLP_FILE_NAME, str(x).rjust(1, '0'))\n for x in range(FILES_NUM)]\n for x in paths:\n os.remove(\"{}\".format(x))\n os.remove(\"{}.db\".format(x))", "title": "" }, { "docid": "eb3956321dcb6b5a71edff3f458d477b", "score": "0.58482623", "text": "def test_read_3(self):\n temp1 = \"30878\\n\"\n temp2, temp3, temp4 = netflix_read(temp1, 0)\n self.assertEqual(temp2, 30878)\n self.assertEqual(temp3, 0)\n self.assertEqual(temp4, 0)", "title": "" }, { "docid": "10fbddd0e19c2720c45cf12d65b63530", "score": "0.5782139", "text": "def test_get_page_list(self):\n pass", "title": "" }, { "docid": "8b4dc06a86349cc1947625441545b016", "score": "0.57622683", "text": "def test_get_page_size_12(self):\n response = self.client.get(self.url + \"?page_size=12\")\n self.assertEqual(len(response.data[\"results\"]), 12)", "title": "" }, { "docid": "1500829fc4a31e3a6eb88172b02eab5a", "score": "0.5753963", "text": "def test_issue_pages_iterator():\n data = random_data(numpy.int8, (8, 219, 301))\n with TempFileName('page_iterator') as fname:\n imwrite(fname, data[0])\n imwrite(\n fname,\n data,\n photometric=MINISBLACK,\n append=True,\n metadata={'axes': 'ZYX'},\n )\n imwrite(fname, data[-1], append=True)\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 10\n assert len(tif.series) == 3\n page = tif.pages[1]\n assert isinstance(page, TiffPage)\n assert page.is_contiguous\n assert page.photometric == MINISBLACK\n assert page.imagewidth == 301\n assert page.imagelength == 219\n assert page.samplesperpixel == 1\n # test read series 1\n series = tif.series[1]\n assert len(series._pages) == 1\n assert len(series.pages) == 8\n image = series.asarray()\n assert_array_equal(data, image)\n for i, page in enumerate(series.pages):\n assert page is not None\n im = page.asarray()\n assert_array_equal(image[i], im)\n assert__str__(tif)", "title": "" }, { "docid": "f4c533590464945073901911297e9680", "score": "0.5713999", "text": "def test_timeseries_page(page: Page):\n page.goto(f'{BASE_URL}/Time_series')\n\n page.get_by_text('Running...').wait_for(state='detached')\n\n expect(page).to_have_title('Time_series · Streamlit')\n\n expect(\n page.get_by_text('Add your input data in the left panel to continue')\n ).to_be_visible()\n\n page.locator('label').filter(\n has_text='Load example data').locator('span').click()\n\n expect(page.get_by_text('Select a method to continue')).to_be_visible()\n\n page.locator('label').filter(has_text='RISE').locator('span').click()\n\n page.get_by_text('Running...').wait_for(state='detached', timeout=45_000)\n\n for selector in (\n page.get_by_role('heading', name='RISE').get_by_text('RISE'),\n # first image\n page.get_by_role('heading', name='winter').get_by_text('winter'),\n page.get_by_role('img', name='0').first,\n # second image\n page.get_by_role('heading', name='summer').get_by_text('summer'),\n page.get_by_role('img', name='0').nth(1),\n ):\n expect(selector).to_be_visible()", "title": "" }, { "docid": "0568f99bf00cc147eb65bd078f8fc040", "score": "0.56657195", "text": "def test_read_3(self):\n s_line = \"100253:\\n\"\n i, j = netflix_read(s_line)\n self.assertEqual(i, True)\n self.assertEqual(j, 100253)", "title": "" }, { "docid": "89d51a1be4dc8a7b6cb862f714ffd8b2", "score": "0.5656401", "text": "def test_mkv_page_reader():\n reader = ShardSegment()\n reader.open(MKV_FILE_NAME + \"0\")\n\n fields = reader.candidate_fields\n logger.info(\"fields: {}\".format(fields))\n\n reader.category_field = \"id\"\n\n info = reader.read_category_info()\n logger.info(\"category info: {}\".format(info))\n\n img1 = reader.read_at_page_by_id(0, 0, 1)\n logger.info(\"img1 len: {}, img1[0] len: {}, img1[0]: {}\".format(len(img1), len(img1[0]), img1[0]))\n\n img2 = reader.read_at_page_by_name(\"2\", 0, 1)\n logger.info(\"img2 len: {}, img2[0] len: {}, img2[0]: {}\".format(len(img2), len(img2[0]), img2[0]))", "title": "" }, { "docid": "ee598c6aa98929251c44113180c8c58d", "score": "0.5630376", "text": "def test_section5_sub2_first(self):\n metrics = [\"Top5\", \"Top20\", \"Top100\"]\n ground_truth = [73.8, 84.27, 89.34]\n\n output_file = 'runs/run.nq-test.dkrr.trec'\n json_file = 'runs/run.nq-test.dkrr.json'\n self.temp_files.append(output_file)\n self.temp_files.append(json_file)\n\n # retrieval\n run_cmd = f'python -m pyserini.search.faiss \\\n --index wikipedia-dpr-dkrr-nq \\\n --topics nq-test \\\n --encoder castorini/dkrr-dpr-nq-retriever \\\n --output {output_file} --query-prefix question: \\\n --threads 72 --batch-size 72 \\\n --hits 100'\n status = os.system(run_cmd)\n self.assertEqual(status, 0)\n\n # conversion\n convert_cmd = f'python -m pyserini.eval.convert_trec_run_to_dpr_retrieval_run \\\n --topics nq-test \\\n --index wikipedia-dpr \\\n --input {output_file} \\\n --output {json_file}'\n status = os.system(convert_cmd)\n self.assertEqual(status, 0)\n\n # evaluation\n eval_cmd = f'python -m pyserini.eval.evaluate_dpr_retrieval \\\n --retrieval {json_file} \\\n --topk 5 20 100'\n stdout, stderr = run_command(eval_cmd)\n \n scores = [] \n for mt in metrics: \n scores.append(parse_score_qa(stdout, mt, 4) * 100)\n\n for score in zip(scores, ground_truth):\n self.assertAlmostEqual(score[0], score[1], delta=0.02)", "title": "" }, { "docid": "5515f71d43ef1769e1abcc982e4a3e79", "score": "0.5595531", "text": "def test_cv_page_reader():\n reader = ShardSegment()\n reader.open(CV_FILE_NAME + \"0\")\n fields = reader.candidate_fields\n logger.info(\"fields: {}\".format(fields))\n\n reader.category_field = \"label\"\n\n info = reader.read_category_info()\n logger.info(\"category info: {}\".format(info))\n\n img1 = reader.read_at_page_by_id(0, 0, 1)\n logger.info(\"img1 len: {}, img1[0] len: {}\".format(len(img1), len(img1[0])))\n\n img2 = reader.read_at_page_by_name(\"822\", 0, 1)\n logger.info(\"img2 len: {}, img2[0] len: {}\".format(len(img2), len(img2[0])))\n\n paths = [\"{}{}\".format(CV_FILE_NAME, str(x).rjust(1, '0'))\n for x in range(FILES_NUM)]\n for x in paths:\n os.remove(\"{}\".format(x))\n os.remove(\"{}.db\".format(x))", "title": "" }, { "docid": "dc0da422e89fe4c22cacae0bde6bf408", "score": "0.555477", "text": "def test_mkv_page_reader_random():\n reader = ShardSegment()\n reader.open(MKV_FILE_NAME + \"0\")\n\n fields = reader.candidate_fields\n logger.info(\"fields: {}\".format(fields))\n\n reader.category_field = \"id\"\n\n names = random.sample(range(1, 6), 5)\n for name in names:\n img2 = reader.read_at_page_by_name(str(name), 0, 2)\n logger.info(\"name: {}, img2[0] len: {}\".format(str(name), len(img2[0])))\n\n paths = [\"{}{}\".format(MKV_FILE_NAME, str(x).rjust(1, '0'))\n for x in range(FILES_NUM)]\n for x in paths:\n os.remove(\"{}\".format(x))\n os.remove(\"{}.db\".format(x))", "title": "" }, { "docid": "696f612b47b4e369add558cf2ee154e3", "score": "0.55434", "text": "def test_response_pagination(self):\n # Enough entries for three pages of results:\n for m in range(1, 5):\n for d in range(1, 27):\n EntryFactory(diary_date=make_date(f\"1660-{m:02}-{d:02}\"))\n\n url = reverse(\"api:entry-list\", kwargs={\"format\": \"json\"})\n response = self.client.get(url + \"?page=2\", SERVER_NAME=\"example.com\")\n\n self.assertEqual(response.data[\"totalResults\"], 104)\n self.assertEqual(response.data[\"totalPages\"], 3)\n self.assertEqual(\n response.data[\"nextPageURL\"],\n f\"http://example.com{url}?page=3\",\n )\n self.assertEqual(\n response.data[\"previousPageURL\"],\n f\"http://example.com{url}\",\n )\n self.assertEqual(len(response.data[\"results\"]), 50)", "title": "" }, { "docid": "9cb4a70cfca02f4def17587523f40797", "score": "0.5519001", "text": "def test_read_1(self):\n s_line = \"1:\\n\"\n i, j = netflix_read(s_line)\n self.assertEqual(i, True)\n self.assertEqual(j, 1)", "title": "" }, { "docid": "eaa423617c8e6fc18abb4d4c8f061385", "score": "0.55145395", "text": "def ex_8():\n\n # Initialize reader object: df_reader\n df_reader = pd.read_csv(ind_pop, chunksize=10)\n\n # Print two chunks\n pp(next(df_reader))\n pp(next(df_reader))", "title": "" }, { "docid": "d9039380df21c59b85682f2f06fe1ba1", "score": "0.5496642", "text": "def test01():\n sobj = ScrapeNPR(\"scrape_npr.log\", \"DEBUG\")\n pprint(sobj.scrape())", "title": "" }, { "docid": "914007556f05bdf0e30055d1a336928c", "score": "0.5493609", "text": "def test_get_many(self):\n\n with open(os.path.join(RESOURCE_PATH, 'subset2_reduced.csv'), 'rU') as stream_handle:\n\n # test the telemetered particle stream\n parser = AuvEngAuvParser(stream_handle,\n self.exception_callback,\n is_telemetered=True)\n\n particles = parser.get_records(200)\n\n self.assert_particles(particles, 'subset2_reduced_telem.yml', RESOURCE_PATH)\n\n self.assertEqual(self.exception_callback_value, [])\n\n with open(os.path.join(RESOURCE_PATH, 'subset2_reduced.csv'), 'rU') as stream_handle:\n\n # test the recovered particle stream\n parser = AuvEngAuvParser(stream_handle,\n self.exception_callback,\n is_telemetered=False)\n\n particles = parser.get_records(200)\n\n self.assert_particles(particles, 'subset2_reduced_recov.yml', RESOURCE_PATH)\n\n self.assertEqual(self.exception_callback_value, [])", "title": "" }, { "docid": "15f2821807e8eb6cfd50f5e9e4c73d38", "score": "0.54853755", "text": "def test_response_pagination(self):\n cat = Category.add_root(title=\"Animals\", slug=\"animals\")\n for n in range(1, 102):\n TopicFactory(categories=[cat])\n\n url = reverse(\"api:topic-list\", kwargs={\"format\": \"json\"})\n response = self.client.get(url + \"?page=2\", SERVER_NAME=\"example.com\")\n\n self.assertEqual(response.data[\"totalResults\"], 101)\n self.assertEqual(response.data[\"totalPages\"], 3)\n self.assertEqual(\n response.data[\"nextPageURL\"],\n f\"http://example.com{url}?page=3\",\n )\n self.assertEqual(\n response.data[\"previousPageURL\"],\n f\"http://example.com{url}\",\n )\n self.assertEqual(len(response.data[\"results\"]), 50)", "title": "" }, { "docid": "a0182838196e73db60d86e30690a7c8c", "score": "0.546408", "text": "def test_get_next_n():\n pass", "title": "" }, { "docid": "41f56ab5a37711988a054c7015fbf11f", "score": "0.5454608", "text": "def test_read_1(self):\n temp1 = \"12345:\\n\"\n temp2, temp3, temp4 = netflix_read(temp1, 0)\n self.assertEqual(temp2, 12345)\n self.assertEqual(temp3, 1)\n self.assertEqual(temp4, 12345)", "title": "" }, { "docid": "954a73d7d49edbc7c30b07e649daa5c7", "score": "0.54481816", "text": "def test_one_page_turn(self):\n result = drawing_book(6, 2)\n self.assertEquals(result, 1)", "title": "" }, { "docid": "769245f2df9f97b096a634e0d7f7bfad", "score": "0.5431986", "text": "def scrape_many(start=1,\n step=250,\n pages=7,\n second_delay=1,\n url_format=link):\n movies = []\n for i in range(pages):\n print(\"Scraping results starting at {}\".format(start))\n movies.extend(scrape_movies(url_format.format(start=start)))\n start += step\n # time.sleep(second_delay)\n return movies", "title": "" }, { "docid": "7e32010f861078625529932f72f66a42", "score": "0.5416321", "text": "def test_read_2(self):\n s_line = \"1564536453\\n\"\n i, j = netflix_read(s_line)\n self.assertEqual(i, False)\n self.assertEqual(j, 1564536453)", "title": "" }, { "docid": "29f881b16651a0eeb6fba5dd4087fbf7", "score": "0.5403731", "text": "def test_pagination(self):\n create_articles(11)\n second_article = Article.get_articles(page=1)[9]\n first_article = Article.get_articles(page=2)[0]\n self.assertEqual(second_article.id, 2)\n self.assertEqual(first_article.id, 1)", "title": "" }, { "docid": "36f2d94a707d033b8fe9360683e83864", "score": "0.5384126", "text": "def test_read_selection(chunkmode):\n fname = public_file('tifffile/multiscene_pyramidal.ome.tif')\n selection = (8, slice(16, 17), slice(None), slice(51, 99), slice(51, 99))\n # series 0\n assert_array_equal(\n imread(fname)[8, 16:17, :, 51:99, 51:99],\n imread(fname, selection=selection, chunkmode=chunkmode),\n )\n # level 1\n assert_array_equal(\n imread(fname, series=0, level=1)[8, 16:17, :, 51:99, 51:99],\n imread(\n fname, series=0, level=1, selection=selection, chunkmode=chunkmode\n ),\n )\n # page 99\n assert_array_equal(\n imread(fname, key=99)[51:99, 51:99],\n imread(\n fname,\n key=99,\n selection=(slice(51, 99), slice(51, 99)),\n chunkmode=chunkmode,\n ),\n )\n # series 1\n assert_array_equal(\n imread(fname, series=1)[51:99, 51:99],\n imread(\n fname,\n series=1,\n selection=(slice(51, 99), slice(51, 99)),\n chunkmode=chunkmode,\n ),\n )", "title": "" }, { "docid": "20962629595a14b58a55da7e5ffcf0c7", "score": "0.53697824", "text": "def test_get_page_filter(self):\n response = self.client.get(self.url + \"?name=Man\")\n self.assertEqual(len(response.data[\"results\"]), 4)", "title": "" }, { "docid": "d945bf5b3f7d33e0f373adf2c7f7340f", "score": "0.53656983", "text": "def test_read_ome_4d_series():\n # 4D (7 time points, 5 focal planes)\n fname = public_file('OME/bioformats-artificial/4D-series.ome.tiff')\n with TiffFile(fname) as tif:\n assert tif.is_ome\n assert tif.byteorder == '>'\n assert len(tif.pages) == 35\n assert len(tif.series) == 1\n # assert page properties\n page = tif.pages.first\n assert page.is_contiguous\n assert page.tags['Software'].value[:15] == 'OME Bio-Formats'\n assert page.compression == NONE\n assert page.imagewidth == 439\n assert page.imagelength == 167\n assert page.bitspersample == 8\n assert page.samplesperpixel == 1\n # assert series properties\n series = tif.series[0]\n assert series.shape == (7, 5, 167, 439)\n assert series.dtype == numpy.int8\n assert series.axes == 'TZYX'\n assert series.kind == 'ome'\n assert not series.is_multifile\n # assert data\n data = tif.asarray()\n assert isinstance(data, numpy.ndarray)\n assert data.shape == (7, 5, 167, 439)\n assert data.dtype == numpy.int8\n assert data[6, 4, 158, 428] == 91\n assert_aszarr_method(tif, data)\n assert__str__(tif)", "title": "" }, { "docid": "ef129f4dcc51070f2dcba17d74086e0d", "score": "0.53507423", "text": "def test_parse_sections():", "title": "" }, { "docid": "0821d35084afb009382cb99d6a14cb31", "score": "0.53434575", "text": "def test_get_many(self):\n log.debug('===== START TEST GET MANY RECOVERED =====')\n in_file = self.open_file(FILE_0_863)\n parser = self.create_parser(RECOVERED_PARTICLE_CLASS, in_file)\n\n # In a single read, get all particles for this file.\n result = parser.get_records(RECORDS_FILE_0_863)\n\n # self.assertEqual(result, expected_particle)\n self.assert_particles(result, YML_0_863, RESOURCE_PATH)\n self.assertListEqual(self.exception_callback_value, [])\n in_file.close()\n\n log.debug('===== START TEST GET MANY TELEMETERED =====')\n in_file = self.open_file(FILE_7_1061)\n parser = self.create_parser(TELEMETERED_PARTICLE_CLASS, in_file)\n\n # In a single read, get all particles for this file.\n result = parser.get_records(RECORDS_FILE_7_1061)\n\n self.assert_particles(result, YML_7_1061, RESOURCE_PATH)\n self.assertListEqual(self.exception_callback_value, [])\n\n in_file.close()\n log.debug('===== END TEST GET MANY =====')", "title": "" }, { "docid": "7e37a351056917e958661c570e81fedb", "score": "0.5334443", "text": "def testIterator4():\r\n ITER_TYPE = 'SEQ'\r\n ITER_RETURN = 'TESTCASE'\r\n ITER_RANGE = None\r\n \r\n imported_data = FileSetup('MedData.pkl')\r\n\r\n py_elm = PyELM.PyELM()\r\n py_elm.AddELM('TESTCASE', imported_data.ELM)\r\n py_elm.LoadSeqs(imported_data.seq)\r\n\r\n\r\n counter = 0\r\n for this_val in py_elm.GetIter(ITER_TYPE, ITER_RETURN, ITER_RANGE):\r\n nose.tools.assert_equal(this_val, imported_data.SimpleAns[counter] == 1,\r\n 'Sequence Iteration Failed')\r\n counter += 1", "title": "" }, { "docid": "93a37d8cf10ade070c085cfb07352d77", "score": "0.5322022", "text": "def test_4_paragraphs(self):\n expected_length = 4\n actual_length = len(paragraphs(self.panda_facts_file))\n self.assertEqual(actual_length, expected_length, msg=\"I don't know if facts are counted :(\")", "title": "" }, { "docid": "712a87f4dbfd2eb3ef201d8931643d29", "score": "0.5321416", "text": "def test_nlp_file_reader():\n dataset = ShardReader()\n dataset.open(NLP_FILE_NAME + \"0\")\n dataset.launch()\n index = 0\n iterator = dataset.get_next()\n while iterator:\n for _, raw in iterator:\n logger.info(\"#item{}: {}\".format(index, raw))\n index += 1\n iterator = dataset.get_next()\n dataset.finish()\n dataset.close()", "title": "" }, { "docid": "d6ef0c3674c09e3f4c5061c14df2e673", "score": "0.531829", "text": "def test_get_random_reads(self):\n\n genome = fau.read_genome(full_file_name)\n\n random_reads = dnau.get_random_reads(genome, 5, 100)\n\n self.assertEqual(len(random_reads), 5)\n\n for read in random_reads:\n self.assertEqual(len(read), 100)", "title": "" }, { "docid": "45a43918392c2f8b55b147fb84411b5f", "score": "0.5313588", "text": "def testPageSize( self ):\r\n self.assertTrue( pykd.getPageSize() >= 4*1024 )", "title": "" }, { "docid": "d1779fc52d2891e4dc7032595c2b92b3", "score": "0.53042805", "text": "def test_valid_page_handling(self):\n response = self.client.get(\n reverse('test-pagination', kwargs={'page': 2}))\n self.assertEqual(\"5,6,7,8,9\", response.content.decode())", "title": "" }, { "docid": "e5c73fc363d907f6a7e1846f08e26acf", "score": "0.5290169", "text": "def series_chapter_num_tester(test_case, series_url, chapter_nums,\n series_cls, in_progress=False):\n series = series_cls(series_url)\n for chapter in series.chapters:\n test_case.assertIn(chapter.chapter, chapter_nums)\n if not in_progress:\n test_case.assertEqual(len(chapter_nums), len(series.chapters))\n else:\n test_case.assertLessEqual(len(chapter_nums), len(series.chapters))", "title": "" }, { "docid": "75de8e7970f78cf9e7b738b3f8a54625", "score": "0.5279182", "text": "def test_scrape_data():\n\n words = Words()\n\n # Add ERPs and terms\n words.set_erps(['N400', 'P600'])\n words.set_terms(['language', 'memory'])\n words.set_exclusions(['cell', ''])\n\n #words.scrape_data(db='pubmed', retmax='5')\n\n assert True", "title": "" }, { "docid": "16f829dfd1644cc5e41b7038b37d2bf8", "score": "0.5274602", "text": "def test_read_2(self):\n temp1 = \"12345\\n\"\n temp2, temp3, temp4 = netflix_read(temp1, 0)\n self.assertEqual(temp2, 12345)\n self.assertEqual(temp3, 0)\n self.assertEqual(temp4, 0)", "title": "" }, { "docid": "cb0d5705c9231bc45cb28b15e009d39b", "score": "0.5269433", "text": "def test_numbers_5():\n print('test_numbers_5 <============================ actual test code')\n assert fp.itr('5') == 26", "title": "" }, { "docid": "35b2ff5ebe7678925a9e72945c71510d", "score": "0.52607155", "text": "def test_read_page_basic():\n f = Path(\"./docs/pages/index.md\")\n page = hastie.content.read_page(f)\n assert page[\"filename\"] == Path(\"docs/pages/index.md\")\n assert page[\"title\"] == \"Welcome to Hastie\"", "title": "" }, { "docid": "4eb823a2e0180031be740e54d06aa194", "score": "0.5259255", "text": "def test_search_multiple_pages(self):\n results = list(Film.search(\"terminator\", max_page=3))\n single_film = results[0]\n self.assertEqual(len(results), 30)\n self.assertIsInstance(single_film, Film)\n self.assertEqual(single_film.title, \"Terminator\")\n self.assertEqual(single_film.url, \"http://www.filmweb.pl/Terminator\")", "title": "" }, { "docid": "2f3d4ffb155fe9329f617d6b398d5125", "score": "0.5251389", "text": "def test_countdots():\r\n assert dp.countdots(url1) == 6\r\n assert dp.countdots(url2) == 2", "title": "" }, { "docid": "b57f35119329081e4b279787a80be92d", "score": "0.52511907", "text": "def test_total_events_thrown(self, h5reader):\n h5reader.open()\n for i, event in enumerate(h5reader):\n assert event.total_events_thrown == 3*(i+1)\n h5reader.close()", "title": "" }, { "docid": "d7fc258eb59ccc02cc74b08aeb6e6095", "score": "0.52371866", "text": "def test_read_function(url):\n web_data = DataReader(url)\n data, columns = web_data.read()\n assert_equals(True, len(data) > 0)\n assert_equals(True, len(columns) > 0)\n assert_equals(252, len(data))\n assert_equals(15, len(columns))\n assert_equals(23, data.loc[0, 'Age (years)'])\n assert_equals('Weight (lbs)', columns[3])\n return data, columns", "title": "" }, { "docid": "22da177ad5cacb4bbc31107c2bd37770", "score": "0.5223515", "text": "def test_number_of_rows(self):\n sis_term_id = \"2013-spring\"\n label = \"rad\"\n\n week = 1\n self.assertEqual(\n get_row_count(get_view_name(sis_term_id, week, label)),\n 0)\n week = 2\n self.assertEqual(\n get_row_count(get_view_name(sis_term_id, week, label)),\n 0)\n week = 3\n self.assertEqual(\n get_row_count(get_view_name(sis_term_id, week, label)),\n 20)\n week = 4\n self.assertEqual(\n get_row_count(get_view_name(sis_term_id, week, label)),\n 20)\n week = 5\n self.assertEqual(\n get_row_count(get_view_name(sis_term_id, week, label)),\n 20)\n week = 6\n self.assertEqual(\n get_row_count(get_view_name(sis_term_id, week, label)),\n 0)\n week = 7\n self.assertEqual(\n get_row_count(get_view_name(sis_term_id, week, label)),\n 0)\n week = 8\n self.assertEqual(\n get_row_count(get_view_name(sis_term_id, week, label)),\n 20)\n week = 9\n self.assertEqual(\n get_row_count(get_view_name(sis_term_id, week, label)),\n 0)\n week = 10\n self.assertEqual(\n get_row_count(get_view_name(sis_term_id, week, label)),\n 20)\n week = 11\n self.assertEqual(\n get_row_count(get_view_name(sis_term_id, week, label)),\n 0)\n week = 12\n self.assertEqual(\n get_row_count(get_view_name(sis_term_id, week, label)),\n 0)", "title": "" }, { "docid": "ae809ea5ad0a9d3a1cc4903ab918eca7", "score": "0.52218205", "text": "def test_bug_9692(self):\n in_file = self.open_file(\"20140805.metbk2A.log\")\n parser = self.create_parser(TELEMETERED_PARTICLE_CLASS, in_file)\n\n # In a single read, get all particles for this file.\n result = parser.get_records(5)\n\n self.assertEqual(len(result), 4)\n self.assertListEqual(self.exception_callback_value, [])\n in_file.close()", "title": "" }, { "docid": "d0990fe8bd98b8958e45dd6cd448494c", "score": "0.5218228", "text": "def get_pages(self):\n pages = []\n for i in range(1, 6):\n page = requests.get((self.url).format(i))\n pages.append(page)\n return pages", "title": "" }, { "docid": "7e432c2f1e63ebe1b73557134455f313", "score": "0.5217627", "text": "def read_naver(self, code, company, pages_to_fetch):", "title": "" }, { "docid": "0754229ac003aa14743162eeeed992fa", "score": "0.52079034", "text": "def test01():\n sobj = ScrapeNYT(\"scrape_nyt.log\", \"DEBUG\")\n pprint(sobj.scrape())", "title": "" }, { "docid": "401b53cabb2db90b87539611e5386691", "score": "0.5205459", "text": "def test_issue_pages_number():\n fname = public_file('tifffile/100000_pages.tif')\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 100000\n assert__str__(tif, 0)", "title": "" }, { "docid": "018d764b08991d3e8e1f4b608352fa6b", "score": "0.5201358", "text": "def test_paginate(self):\n factory = RequestFactory()\n articles = self.get_article_set()\n url = '/dummy/page'\n\n for test in self.get_scenarios():\n params, page, prev_params, next_params, results = test\n request = factory.get(url + '?' + QueryDict(params).urlencode())\n paginator, queryset = paginate(request, articles, 3)\n self.assertEqual(paginator.this_page.number, page)\n self.assertEqual(queryset, results)\n if next_params:\n next_dict = QueryDict(next_params)\n pag_dict = QueryDict(urlparse(paginator.next_url).query)\n self.assertEqual(next_dict, pag_dict)\n if prev_params:\n prev_dict = QueryDict(prev_params)\n pag_dict = QueryDict(urlparse(paginator.previous_url).query)\n self.assertEqual(prev_dict, pag_dict)\n\n # there is no page 4 so it throws an error\n request = factory.get('/dummy?p=4')\n with self.assertRaises(Http404):\n paginate(request, articles, 3)\n\n # allow empty works\n request = factory.get('/dummy')\n paginator, queryset = paginate(request, [], 3, allow_empty=True)\n self.assertEqual(queryset, [])\n\n # don't allow empty\n request = factory.get('/dummy')\n with self.assertRaises(Http404):\n paginate(request, [], 3, allow_empty=False)", "title": "" }, { "docid": "d12285b9ff691107c9a36d5b3de53a61", "score": "0.51953614", "text": "def test_S6(self):\n pass", "title": "" }, { "docid": "c704f2abdc22b83cdbe7e3c68da61751", "score": "0.5194507", "text": "def test_implicit_page_handling(self):\n response = self.client.get(\n reverse('test-pagination'))\n self.assertEqual(\"0,1,2,3,4\", response.content.decode())", "title": "" }, { "docid": "4f4cfffa5e99b480501065e6754e3b88", "score": "0.51883245", "text": "def test_get_page_detail(self):\n pass", "title": "" }, { "docid": "d838af7cd1f4612715aff1d1eb452f02", "score": "0.5186225", "text": "def test_basic(self):\n mission = MissionFactory()\n pages = PageFactory.create_batch(3, mission=mission)\n users = UserFactory.create_batch(2)\n\n page1 = mission.next_page_for_user(users[0])\n page2 = mission.next_page_for_user(users[1])\n\n self.assertIsNotNone(page1)\n self.assertIsNotNone(page2)\n self.assertNotEqual(page1.pk, page2.pk)\n self.assertEqual(page1.pk, pages[0].pk)\n self.assertEqual(page2.pk, pages[1].pk)", "title": "" }, { "docid": "4016ef3e7dc7ffa07e2dabd04f89bfd9", "score": "0.51831096", "text": "def ztest_get_3_lines(self):\n gems_extractor = gems_feeder.GEMSExtractor(start_time = \"11.308.15.02.03\", end_time = \"11.308.15.12.03\", severity = [\"A\",\"W\",\"I\"], facility = ['DVB_EUR_UPLINK'])\n \n cpt = 0\n \n lines = []\n \n for line in gems_extractor:\n if cpt < 3:\n lines.append(line)\n else:\n #quit loop after 3 iters\n break\n \n cpt += 1\n \n self.assertEquals({'facility': u'DVB_EUR_UPLINK', \\\n 'time': u'11.308.15.02.18.149', \\\n 'agent': u'LogFileAgent', \\\n 'host': u'eumet01.localdomain', \\\n 'msg': u'xferlog: Entry detected: Fri Nov 4 15:01:18 2011 0 10.60.200.21 200429 /home/eumetsat/data/groups/EPS-METOP-ASCA-L1/ASCA_SZO_1B_M02_20111104135702Z_20111104135959Z_N_O_20111104150024Z.tmp b _ i r eumetsat ftp 0 * c',\\\n 'lvl': u'I'}, \\\n lines[0], \"First lines is not equal to what it should\") \n \n self.assertEquals(lines[2]['time'], '11.308.15.02.18.149') \n \n self.assertEqual(\"A\", \"A\",\"A is not equal to A\")\n \n self.assertEquals(lines[2]['msg'], 'xferlog: Entry detected: Fri Nov 4 15:01:19 2011 3 137.129.9.61 369124 /home/eumetsat/data/retim/groups/retim-1567/HAJX81_ARPM_041200.20111104150058_P1567PTD6CF_REUFMFI.369124.GB.tmp b _ i r retim ftp 0 * c')", "title": "" }, { "docid": "a19718cba09c7dcbc75880e2005a3c23", "score": "0.51817846", "text": "def test_reading_index(self):\n resp = self.client.get(url_for('readings.all_readings'))\n self.assertTrue('All Readings' in str(resp.data))", "title": "" }, { "docid": "8a86cf366f76debdc02fbfb1f0adfdc9", "score": "0.51803213", "text": "def test_read_ome_multi_channel_4d_series():\n # 4D (7 time points, 5 focal planes, 3 channels)\n fname = public_file(\n 'OME/bioformats-artificial/multi-channel-4D-series.ome.tiff'\n )\n with TiffFile(fname) as tif:\n assert tif.is_ome\n assert tif.byteorder == '>'\n assert len(tif.pages) == 105\n assert len(tif.series) == 1\n # assert page properties\n page = tif.pages.first\n assert page.is_contiguous\n assert page.tags['Software'].value[:15] == 'OME Bio-Formats'\n assert page.compression == NONE\n assert page.imagewidth == 439\n assert page.imagelength == 167\n assert page.bitspersample == 8\n assert page.samplesperpixel == 1\n # assert series properties\n series = tif.series[0]\n assert series.shape == (7, 3, 5, 167, 439)\n assert series.dtype == numpy.int8\n assert series.axes == 'TCZYX'\n assert series.kind == 'ome'\n assert series.get_shape(False) == (7, 3, 5, 167, 439, 1)\n assert series.get_axes(False) == 'TCZYXS'\n assert not series.is_multifile\n # assert data\n data = tif.asarray()\n assert isinstance(data, numpy.ndarray)\n assert data.shape == (7, 3, 5, 167, 439)\n assert data.dtype == numpy.int8\n assert data[6, 0, 4, 158, 428] == 91\n assert_aszarr_method(tif, data)\n # don't squeeze\n data = tif.asarray(squeeze=False)\n assert_aszarr_method(tif, data, squeeze=False)\n assert__str__(tif)", "title": "" }, { "docid": "2a30bd427e3eb445a1385e2902f08d47", "score": "0.5174178", "text": "def testGetNumPlots(self):\n assert self.testDoc.GetNumPlots() == 1", "title": "" }, { "docid": "44b8dba0c78ee302378572433105124b", "score": "0.5173921", "text": "def test_get_series(self, urlopen):\n self.prepare_urlopen(urlopen,\n http_response=sp500_obs_call.response)\n serie = self.fred.get_series('SP500', observation_start='9/2/2014',\n observation_end='9/5/2014')\n urlopen.assert_called_with(sp500_obs_call.url)\n self.assertEqual(serie.loc['9/2/2014'], 2002.28)\n self.assertEqual(len(serie), 4)", "title": "" }, { "docid": "3441360b5edf390670e30e15d444a1c1", "score": "0.51720864", "text": "def testIterator3():\r\n ITER_TYPE = 'SEQ'\r\n ITER_RETURN = 'Sequence'\r\n\r\n imported_data = FileSetup('MedData.pkl')\r\n\r\n py_elm = PyELM.PyELM()\r\n py_elm.LoadSeqs(imported_data.seq)\r\n\r\n counter = 0\r\n for this_val in py_elm.GetIter(ITER_TYPE, ITER_RETURN, None):\r\n nose.tools.assert_equal(len(this_val),\r\n len(imported_data.seq[counter]),\r\n 'Sequence Iteration Failed')\r\n counter += 1\r\n\r\n for this_val in py_elm.GetIter(ITER_TYPE, ITER_RETURN, [0, 200]):\r\n nose.tools.assert_equal(len(this_val),\r\n 200, 'Sequence Iteration Failed')", "title": "" }, { "docid": "0de9f3da6aced1b734ba1bcb9d067d80", "score": "0.51713866", "text": "def test_6(self):\n pass", "title": "" }, { "docid": "57a45a8de47bf336d740cd97f6374014", "score": "0.51648605", "text": "def test_bug_13106(self):\n\n in_file = self.open_file(FILE_8_1440)\n parser = self.create_parser(TELEMETERED_PARTICLE_CLASS, in_file)\n\n # In a single read, get all particles for this file.\n result = parser.get_records(RECORDS_FILE_8_1440)\n self.assert_particles(result, YML_8_1440, RESOURCE_PATH)\n self.assertEqual(len(result), RECORDS_FILE_8_1440)\n self.assertListEqual(self.exception_callback_value, [])\n in_file.close()", "title": "" }, { "docid": "e1d33de07f04c40c966d9ec6894ece45", "score": "0.5161511", "text": "def test_items_per_page(self):\n eq_(settings.ITEMS_PER_PAGE % 3, 0)", "title": "" }, { "docid": "08c18166510e9b7aff9bfa05046a52ab", "score": "0.51606053", "text": "def test_get_record_gen(tub):\n records = tub.get_record_gen()\n assert len([ next(records) for x in range(20) ]) == 20", "title": "" }, { "docid": "2898b78e6bfea8d9420cca197bd82858", "score": "0.5153502", "text": "def test04_range(self):\n\n if common.verbose:\n print('\\n', '-=' * 30)\n print(\"Running %s.test04_range...\" % self.__class__.__name__)\n\n # Case where step == nrowsinbuf\n self.nrows = self.expectedrows\n self.nrowsinbuf = 11 # Choose a small value for the buffer size\n self.start = 1\n self.stop = self.expectedrows\n self.step = 11\n\n self.check_range()", "title": "" }, { "docid": "54471a35e3662440dcedfda5b862a7fc", "score": "0.5150938", "text": "def test_next_section_offsets(self):\n text = u\"\\n\\n§ 201.3 sdsa\\nsdd dsdsadsa \\n asdsas\\nSection\\n\"\n text += u\"§ 201.20 dfds \\n sdfds § 201.2 saddsa \\n\\n sdsadsa\"\n self.assertEqual((2, 45), reg_text.next_section_offsets(text, 201))\n\n text = u\"\\n\\n§ 201.3 sdsa\\nsdd dsdsadsa \\n asdsas\\nSection\\n\"\n text += u\"201.20 dfds \\n sdfds § 201.2 saddsa \\n\\n sdsadsa\"\n self.assertEqual((2, len(text)),\n reg_text.next_section_offsets(text, 201))\n\n text = u\"\\n\\n§ 201.3 sdsa\\nsdd dsdsadsa \\nAppendix A to Part 201\"\n self.assertEqual((2, 29), reg_text.next_section_offsets(text, 201))\n\n text = u\"\\n\\n§ 201.3 sdsa\\nsdd dsdsadsa \\nSupplement I\"\n self.assertEqual((2, 29), reg_text.next_section_offsets(text, 201))\n\n text = u\"Appendix A to Part 201\\n\\n§ 201.3 sdsa\\nsdd dsdsadsa\"\n self.assertEqual(None, reg_text.next_section_offsets(text, 201))\n\n text = u\"Supplement I\\n\\n§ 201.3 sdsa\\nsdd dsdsadsa\"\n self.assertEqual(None, reg_text.next_section_offsets(text, 201))", "title": "" }, { "docid": "b5fce4b6d6cd590ca45d227c659a9c10", "score": "0.5150937", "text": "def test_stream(self):\n url=\"https://raw.githubusercontent.com/strets123/frictionless-pres/master/data/smdataset%3Fpage%5Bnumber%5D%3D0\"\n with tabulator.Stream(\n url,\n format=\"json-api\", \n custom_parsers={\"json-api\": jsonapi_parser.JSONAPIParser},\n property='data',\n ) as stream:\n for index, item in enumerate(stream):\n self.assertTrue(isinstance(item[0], dict))\n self.assertIn(\"attributes\", item[0])\n self.assertIn(\"id\", item[0])\n self.assertIn(\"links\", item[0])\n self.assertEqual(len(item), 1)", "title": "" }, { "docid": "9c61edc7a2d6f4899a586f0d40987c6c", "score": "0.5148339", "text": "def test_no_pages_turned(self):\n result = drawing_book(5, 4)\n self.assertEquals(result, 0)", "title": "" }, { "docid": "ec29fa97832b6206f6cb9086fd8c48d3", "score": "0.51474404", "text": "def test_read_metaseries():\n # Strips do not contain an EOI code as required by the TIFF spec.\n fname = private_file('metaseries/metaseries.tif')\n with TiffFile(fname) as tif:\n assert tif.byteorder == '<'\n assert len(tif.pages) == 1\n assert len(tif.series) == 1\n # assert page properties\n page = tif.pages.first\n assert page.imagewidth == 1392\n assert page.imagelength == 1040\n assert page.bitspersample == 16\n # assert metadata\n assert page.description.startswith('<MetaData>')\n # assert series properties\n series = tif.series[0]\n assert series.shape == (1040, 1392)\n assert series.dtype == numpy.uint16\n assert series.axes == 'YX'\n assert series.kind == 'uniform'\n # assert data\n data = tif.asarray()\n assert data.shape == (1040, 1392)\n assert data.dtype == numpy.uint16\n assert data[256, 256] == 1917\n assert_aszarr_method(series, data)\n assert_aszarr_method(series, data, chunkmode='page')\n assert__str__(tif)", "title": "" }, { "docid": "28e54f51cf1984207d9893794ce78445", "score": "0.5147279", "text": "def test_slice_len(test_df_year):\n\n assert len(test_df_year.slice(scenario=\"scen_a\")) == 4", "title": "" }, { "docid": "a28b00cc5c01a29e026639bb33c628df", "score": "0.5146809", "text": "def test_papers(self):\n pass", "title": "" }, { "docid": "cd5ec453bff0ebb88654ae6ebc2d35e2", "score": "0.5144664", "text": "def test_get_steps(self):\n pass", "title": "" }, { "docid": "51bcbed31bc96f9f7ac44c159625659e", "score": "0.5144497", "text": "def test_results_overview_split_on(self):\n browser = Browser()\n browser.open(CFG_SITE_URL + '/search?p=of&sc=1')\n body = browser.response().read()\n if body.find(\"Results overview\") == -1:\n self.fail(\"Oops, when split by collection is on, \"\n \"results overview should be present.\")\n if body.find('<a name=\"Atlantis%20Institute%20of%20Fictive%20Science\"></a>') > -1:\n self.fail(\"Oops, when split by collection is on, \"\n \"Atlantis collection should not be found.\")\n if body.find('<a name=\"15\"></a>') == -1:\n self.fail(\"Oops, when split by collection is on, \"\n \"Multimedia & Arts should be found.\")\n try:\n browser.find_link(url='#15')\n except LinkNotFoundError:\n self.fail(\"Oops, when split by collection is on, \"\n \"a link to Multimedia & Arts should be found.\")", "title": "" }, { "docid": "62884df2bd7a6893bb5d56bb464f00e9", "score": "0.5141837", "text": "def test_teacher_reorder_the_selected_reading_sections_8023(self):\n self.ps.test_updates['name'] = 't1.14.032' \\\n + inspect.currentframe().f_code.co_name[4:]\n self.ps.test_updates['tags'] = ['t1', 't1.14', 't1.14.032', '8023']\n self.ps.test_updates['passed'] = False\n\n # Test steps and verification assertions\n assignment_name = 'reading_032_%d' % (randint(100, 999))\n today = datetime.date.today()\n start = randint(0, 6)\n finish = start + randint(1, 6)\n begin = (today + datetime.timedelta(days=start)).strftime('%m/%d/%Y')\n end = (today + datetime.timedelta(days=finish)).strftime('%m/%d/%Y')\n self.teacher.add_assignment(\n assignment='reading',\n args={\n 'title': assignment_name,\n 'description': 'description',\n 'periods': {'all': (begin, end)},\n 'reading_list': ['ch1'],\n 'status': 'draft',\n }\n )\n try:\n self.teacher.wait.until(\n expect.presence_of_element_located(\n (By.XPATH, '//div[@class=\"month-wrapper\"]')\n )\n )\n self.teacher.find(\n By.XPATH,\n '//label[contains(text(),\"{0}\")]'.format(assignment_name)\n ).click()\n except NoSuchElementException:\n self.teacher.find(\n By.XPATH,\n '//a[contains(@class,\"header-control next\")]'\n ).click()\n self.teacher.wait.until(\n expect.presence_of_element_located(\n (By.XPATH, '//div[@class=\"month-wrapper\"]')\n )\n )\n self.teacher.find(\n By.XPATH,\n '//label[contains(text(),\"{0}\")]'.format(assignment_name)\n ).click()\n self.teacher.wait.until(\n expect.element_to_be_clickable(\n (By.ID, 'reading-title')\n )\n )\n sections = self.teacher.find_all(\n By.XPATH,\n '//li[@class=\"selected-section\"]' +\n '//span[@class=\"chapter-section\"]'\n )\n second_sec = sections[1].get_attribute('data-chapter-section')\n self.teacher.find(\n By.XPATH,\n '//button[contains(@class,\"move-reading-up\")]'\n ).click()\n sections_new = self.teacher.find_all(\n By.XPATH,\n '//li[@class=\"selected-section\"]' +\n '//span[@class=\"chapter-section\"]'\n )\n new_first_sec = sections_new[0].get_attribute('data-chapter-section')\n assert(second_sec == new_first_sec), \\\n 'did not rearrange sections'\n self.teacher.find(\n By.XPATH,\n '//button[contains(@class,\"-publish\")]'\n ).click()\n try:\n self.teacher.find(\n By.XPATH,\n '//label[contains(text(),\"{0}\")]'.format(assignment_name)\n )\n except NoSuchElementException:\n self.teacher.find(\n By.XPATH,\n '//a[contains(@class,\"header-control next\")]'\n ).click()\n self.teacher.find(\n By.XPATH,\n '//label[contains(text(),\"{0}\")]'.format(assignment_name)\n )\n\n self.ps.test_updates['passed'] = True", "title": "" }, { "docid": "a200f3a4c6bf9b21cdb56cbc70485327", "score": "0.5138966", "text": "def parse_section_page(self, response):\n item_path = '//table/tbody/tr/td/a[contains(@href, \"article\")]/@href'\n\n for item_URL in response.xpath(item_path).extract():\n url = '{}{}'.format(self.base_URL, item_URL)\n if(self.item_count < self.item_limit):\n yield scrapy.Request(url=url, callback=self.parse_item_page)\n self.item_count += 1", "title": "" }, { "docid": "e41c59d85fcbb2a7c50e76fd775923e4", "score": "0.51385516", "text": "def test_slice_retrieve_all(self):\n matches = self.cont.get_slice(contig, 0, len(self.seq))\n\n count = 0;\n for index in matches:\n count = count + len(index[4])\n\n assert count == 10000", "title": "" }, { "docid": "2ce4bcf801f317fbb12a96887f5d2c89", "score": "0.51352787", "text": "def test_read(self):\n pytest4 = self.dbs.get_file(\"PYTEST4\", internal=True,\n fieldnames=[\"GENDER\", \"NUMBERS\", \"YESNO\"])\n cursor = pytest4.traverser(\"B\", \"e\")\n rec = cursor.next()\n\n self.assertEqual(rec[0], \"M\")\n self.assertEqual(rec[1], \"8\")\n self.assertEqual(rec[2], \"Y\")\n\n pytest4 = self.dbs.get_file(\"PYTEST4\", internal=False,\n fieldnames=[\"GENDER\", \"NUMBERS\", \"YESNO\"])\n cursor = pytest4.traverser(\"B\", \"e\")\n rec = cursor.next()\n\n self.assertEqual(rec[0], \"MALE\")\n self.assertEqual(rec[1], \"EIGHT\")\n self.assertEqual(rec[2], \"YES\")", "title": "" }, { "docid": "7c264671ac419a5c9d07cacfe18054c2", "score": "0.51335776", "text": "def test_sequential_processing(self):\n self.execute_fake_download(self.files['dld'])\n self.execute_decompress(self.sample, self.files['xml'])\n self.execute_extract(self.files['xml'], self.files['jsonl'])\n self.execute_sentences(self.files['jsonl'], self.files['txt'])\n self.execute_word_vocabulary(\n self.files['txt'], self.files['wvoc'], self.files['wplot'])\n self.execute_char_vocabulary(\n self.files['txt'], self.files['cvoc'], self.files['cplot'])", "title": "" }, { "docid": "165407c7f4c1361549808cb7e507e29d", "score": "0.51311845", "text": "def test_read_svs_jp2k_33003_1():\n fname = private_file('AperioSVS/JP2K-33003-1.svs')\n with TiffFile(fname) as tif:\n assert tif.is_svs\n assert not tif.is_scanimage\n assert len(tif.pages) == 6\n assert len(tif.series) == 4\n for page in tif.pages:\n svs_description_metadata(page.description)\n # first page\n page = tif.pages.first\n assert page.is_svs\n assert not page.is_subsampled\n assert page.photometric == RGB\n assert page.is_tiled\n assert page.compression == APERIO_JP2000_YCBC\n assert page.shape == (17497, 15374, 3)\n metadata = svs_description_metadata(page.description)\n assert metadata['Header'].startswith('Aperio Image Library')\n assert metadata['Originalheight'] == 17597\n # page 4\n page = tif.pages[4]\n assert page.is_svs\n assert page.is_reduced\n assert page.photometric == RGB\n assert page.compression == LZW\n assert page.shape == (422, 415, 3)\n metadata = svs_description_metadata(page.description)\n assert 'label 415x422' in metadata['Header']\n assert_aszarr_method(page)\n assert__str__(tif)", "title": "" }, { "docid": "e25c73ab8cdd56a7dd8594dfeeb878bd", "score": "0.5129103", "text": "def test_returns_specified_page_users(self):\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"page\": 2},\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"users\"]), 11)\n self.assertEqual(response.json[\"pagination\"][\"page\"], 2)\n self.assertEqual(response.json[\"pagination\"][\"hasNext\"], False)\n self.assertEqual(response.json[\"pagination\"][\"hasPrev\"], True)\n self.assertEqual(response.json[\"pagination\"][\"pages\"], 2)", "title": "" }, { "docid": "9228912b1fc27405298587d2f3c293ad", "score": "0.5121556", "text": "def test_train(self):\n sampler = sampling.EpisodeDescriptionSampler(DATASET_SPEC, Split.TRAIN)\n self.generate_and_check(sampler, 10)", "title": "" }, { "docid": "b3b0f8311bf3fc7e3391ce16298e4acd", "score": "0.51135737", "text": "def test_parquet_read():\n _parquet_write()\n\n parquet_file = pq.ParquetFile(SAMPLE_FILENAME)\n assert parquet_file.num_row_groups == 5 + 1\n batches = []\n for i in range(parquet_file.num_row_groups):\n batches.append(parquet_file.read_row_group(i))", "title": "" }, { "docid": "d7cade371a778fcd5ab3e0d169aac15f", "score": "0.511302", "text": "def test_recuperate_number_wiki_page():\n result = recuperate_number_wiki_page(\"Openclassrooms\")\n assert result == 4338589", "title": "" }, { "docid": "da83a4d119f7eb108e689b199df09597", "score": "0.5110007", "text": "def test_read_scn_collection():\n # collection of 43 CZYX images\n # https://forum.image.sc/t/43585\n fname = private_file(\n 'LeicaSCN/19-3-12_b5992c2e-5b6e-46f2-bf9b-d5872bdebdc1.SCN'\n )\n with TiffFile(fname) as tif:\n assert tif.is_scn\n assert tif.is_bigtiff\n assert len(tif.pages) == 5358\n assert len(tif.series) == 46\n # first page\n page = tif.pages.first\n assert page.is_scn\n assert page.is_tiled\n assert page.photometric == YCBCR\n assert page.compression == JPEG\n assert page.shape == (12990, 5741, 3)\n metadata = tif.scn_metadata\n assert metadata.startswith('<?xml version=\"1.0\" encoding=\"utf-8\"?>')\n for series in tif.series[2:]:\n assert series.kind == 'scn'\n assert series.axes == 'CZYX'\n assert series.shape[:2] == (4, 8)\n assert len(series.levels) in {2, 3, 4, 5}\n assert len(series.pages) == 32\n # third series\n series = tif.series[2]\n assert series.shape == (4, 8, 946, 993)\n assert_aszarr_method(series)\n assert__str__(tif)", "title": "" }, { "docid": "35ae1cd8931556f1932eb83f76e88e3f", "score": "0.5109626", "text": "def test_read_CTD_count(self):\n parser = SBE52BinaryCTDParser(CTD_FILE) # no error? then we parsed the file into profiles and records!\n\n records = parser.get_records(5)\n self.assertEqual(5, len(records))\n\n self.assertTrue(records[0]['driver_timestamp']<records[1]['driver_timestamp'])\n self.assertEquals(self._get_particle_value(records[0],'upload_time'), self._get_particle_value(records[1],'upload_time'))\n\n # total 513 records; already read 5 above; read 500 more...\n records = parser.get_records(500)\n self.assertEqual(500, len(records))\n\n # now should only get last 3\n records = parser.get_records(20)\n self.assertEquals(8, len(records))\n\n # now should get none\n records = parser.get_records(8)\n self.assertTrue(records is None)", "title": "" }, { "docid": "4773dac3d62210c9623456220e511ef1", "score": "0.5104126", "text": "def test_scrape_data_hist():\n\n words = Words()\n\n # Add ERPs and terms\n words.set_erps(['N180', 'P600'])\n words.set_terms(['language', 'memory'])\n words.set_exclusions(['protein', ''])\n\n #words.scrape_data(db='pubmed', retmax='5', use_hist=True)\n\n assert True", "title": "" }, { "docid": "9fbf73c549b13d8a410b264451a73532", "score": "0.5102775", "text": "def test_get_next_round_nsdi():", "title": "" }, { "docid": "11ab8e19af8d16883f324a283b316de7", "score": "0.50951713", "text": "def test_noscript_pagination(self):\n threads_per_page = settings.MISAGO_THREADS_PER_PAGE\n\n threads = []\n for _ in range(settings.MISAGO_THREADS_PER_PAGE * 3):\n threads.append(testutils.post_thread(category=self.first_category))\n\n # secondary page renders\n response = self.client.get('/?page=2')\n self.assertEqual(response.status_code, 200)\n\n for thread in threads[:threads_per_page]:\n self.assertNotContainsThread(response, thread)\n for thread in threads[threads_per_page:threads_per_page * 2]:\n self.assertContainsThread(response, thread)\n for thread in threads[threads_per_page * 2:]:\n self.assertNotContainsThread(response, thread)\n\n self.assertNotContains(response, '/?page=1')\n self.assertContains(response, '/?page=3')\n\n # third page renders\n response = self.client.get('/?page=3')\n self.assertEqual(response.status_code, 200)\n\n for thread in threads[threads_per_page:]:\n self.assertNotContainsThread(response, thread)\n for thread in threads[:threads_per_page]:\n self.assertContainsThread(response, thread)\n\n self.assertContains(response, '/?page=2')\n self.assertNotContains(response, '/?page=4')\n\n # excessive page gives 404\n response = self.client.get('/?page=4')\n self.assertEqual(response.status_code, 404)", "title": "" }, { "docid": "7e44e18e4b28a4d508c3cacac323a942", "score": "0.50923365", "text": "def test_read_h5(self):\n test_data = datapreprocessing.DataConverter('../MillionSongSubset/data/A/A/A', '../lastfm_subset/A/A/A')\n test_data_df = test_data.read_h5()\n self.assertEqual(test_data_df.shape, (33, 53))\n self.assertEqual(test_data_df.columns[0], 'analysis_sample_rate')\n self.assertEqual(test_data_df.columns[-1], 'year')", "title": "" }, { "docid": "485b0c905c9f1646394ed09c62d249cf", "score": "0.5092273", "text": "def test06_range(self):\n\n if common.verbose:\n print('\\n', '-=' * 30)\n print(\"Running %s.test06_range...\" % self.__class__.__name__)\n\n # Case where step > 3 * nrowsinbuf\n self.nrows = 3\n self.nrowsinbuf = 3 # Choose a small value for the buffer size\n self.start = 2\n self.stop = self.expectedrows\n self.step = 10\n\n self.check_range()", "title": "" }, { "docid": "80c28b241b7abd9892746d40819146a4", "score": "0.50852543", "text": "def test_publications2(self):\n\t\tr1 = get(f\"http://{self.server_ip}:{self.server_port}/publications?limit=5\")\n\t\tl = loads(r1.text)\n\t\tself.assertEqual(l, {\"data\": [{\"author\": \"Louise A. Dennis\", \"title\": \"Computational Goals, Values and Decision-Making.\", \"pages\": \"2487-2495\", \"year\": \"2020\", \"volume\": \"26\", \"journal\": \"Sci. Eng. Ethics\", \"number\": \"5\", \"ee\": \"https://doi.org/10.1007/s11948-020-00244-y\", \"url\": \"db/journals/see/see26.html#Dennis20\"}, {\"author\": \"Indira Nair\", \"author0\": \"William M. Bulleit\", \"title\": \"Pragmatism and Care in Engineering Ethics.\", \"pages\": \"65-87\", \"year\": \"2020\", \"volume\": \"26\", \"journal\": \"Sci. Eng. Ethics\", \"number\": \"1\", \"ee\": \"https://doi.org/10.1007/s11948-018-0080-y\", \"ee0\": \"https://www.wikidata.org/entity/Q90930992\", \"url\": \"db/journals/see/see26.html#NairB20\"}, {\"author\": \"Petr Houdek\", \"title\": \"Fraud and Understanding the Moral Mind: Need for Implementation of Organizational Characteristics into Behavioral Ethics.\", \"pages\": \"691-707\", \"year\": \"2020\", \"volume\": \"26\", \"journal\": \"Sci. Eng. Ethics\", \"number\": \"2\", \"ee\": \"https://doi.org/10.1007/s11948-019-00117-z\", \"ee0\": \"https://www.wikidata.org/entity/Q92736437\", \"url\": \"db/journals/see/see26.html#Houdek20\"}, {\"author\": \"M. Reza Hosseini\", \"author0\": \"Igor Martek\", \"author1\": \"Saeed Banihashemi\", \"author2\": \"Albert P. C. Chan\", \"author3\": \"Amos Darko\", \"author4\": \"Mahdi Tahmasebi\", \"title\": \"Distinguishing Characteristics of Corruption Risks in Iranian Construction Projects: A Weighted Correlation Network Analysis.\", \"pages\": \"205-231\", \"year\": \"2020\", \"volume\": \"26\", \"journal\": \"Sci. Eng. Ethics\", \"number\": \"1\", \"ee\": \"https://doi.org/10.1007/s11948-019-00089-0\", \"ee0\": \"https://www.wikidata.org/entity/Q91374986\", \"url\": \"db/journals/see/see26.html#HosseiniMBCDT20\"}, {\"author\": \"Rafi Rashid\", \"title\": \"Training STEM Ph.D. Students to Deal with Moral Dilemmas.\", \"pages\": \"1861-1872\", \"year\": \"2020\", \"volume\": \"26\", \"journal\": \"Sci. Eng. Ethics\", \"number\": \"3\", \"ee\": \"https://doi.org/10.1007/s11948-019-00174-4\", \"ee0\": \"https://www.wikidata.org/entity/Q92485691\", \"url\": \"db/journals/see/see26.html#Rashid20\"}]})\n\t\tr1 = get(f\"http://{self.server_ip}:{self.server_port}/publications?limit=1\")\n\t\tl = loads(r1.text)\n\t\tself.assertEqual(l, {\"data\": [{\"author\": \"Louise A. Dennis\", \"title\": \"Computational Goals, Values and Decision-Making.\", \"pages\": \"2487-2495\", \"year\": \"2020\", \"volume\": \"26\", \"journal\": \"Sci. Eng. Ethics\", \"number\": \"5\", \"ee\": \"https://doi.org/10.1007/s11948-020-00244-y\", \"url\": \"db/journals/see/see26.html#Dennis20\"}]})", "title": "" }, { "docid": "1ef48963cc8bca8883457ef394a474fb", "score": "0.5085234", "text": "def test_read_mdgel_rat():\n # Second page does not contain data, only private tags\n fname = private_file('mdgel/rat.gel')\n with TiffFile(fname) as tif:\n assert tif.byteorder == '<'\n assert len(tif.pages) == 2\n assert len(tif.series) == 1\n # assert page properties\n page = tif.pages.first\n assert page.is_contiguous\n assert page.compression == NONE\n assert page.imagewidth == 1528\n assert page.imagelength == 413\n assert page.bitspersample == 16\n assert page.samplesperpixel == 1\n assert page.tags['Software'].value == (\n 'ImageQuant Software Release Version 2.0'\n )\n assert page.tags['PageName'].value == r'C:\\DATA\\RAT.GEL'\n\n # assert 2nd page properties\n page = tif.pages[1]\n assert page.is_mdgel\n assert page.imagewidth == 0\n assert page.imagelength == 0\n assert page.bitspersample == 1\n assert page.samplesperpixel == 1\n assert page.tags['MDFileTag'].value == 2\n assert page.tags['MDScalePixel'].value == (1, 21025)\n assert len(page.tags['MDColorTable'].value) == 17\n md = tif.mdgel_metadata\n assert md['SampleInfo'] == 'Rat slices from Dr. Schweitzer'\n assert md['PrepDate'] == '12 July 90'\n assert md['PrepTime'] == '40hr'\n assert md['FileUnits'] == 'Counts'\n\n # assert series properties\n series = tif.series[0]\n assert series.shape == (413, 1528)\n assert series.dtype == numpy.float32\n assert series.axes == 'YX'\n assert series.kind == 'mdgel'\n # assert data\n data = series.asarray()\n assert isinstance(data, numpy.ndarray)\n assert data.shape == (413, 1528)\n assert data.dtype == numpy.float32\n assert round(abs(data[260, 740] - 399.1728515625), 7) == 0\n assert_aszarr_method(series, data)\n assert_aszarr_method(series, data, chunkmode='page')\n assert__str__(tif)", "title": "" }, { "docid": "6429ab74f00df4ff59739ae223561a75", "score": "0.5083087", "text": "def test_5(self):\n pass", "title": "" }, { "docid": "cc2cb6f3650dbdde8c333a597208dd49", "score": "0.5080099", "text": "def test_4(self):\n pass", "title": "" }, { "docid": "b79976348719096fbda5243d69b81b76", "score": "0.50724643", "text": "def test_multi_fetch_data(self):\n nodes = list(self.finder.find_nodes(Query(self.series1)))\n time_info, data = self.finder.fetch_multi(nodes,\n int(self.start_time.strftime(\"%s\")),\n int(self.end_time.strftime(\"%s\")))\n self.assertTrue(self.series1 in data,\n msg=\"Did not get data for requested series %s - got data for %s\" % (\n self.series1, data.keys(),))\n self.assertEqual(time_info,\n (int(self.start_time.strftime(\"%s\")),\n int(self.end_time.strftime(\"%s\")),\n self.step),\n msg=\"Time info and step do not match our requested values\")\n datapoints = [v for v in data[self.series1] if v]\n self.assertTrue(len(datapoints) == self.num_datapoints,\n msg=\"Expected %s datapoints - got %s\" % (\n self.num_datapoints, len(datapoints),))", "title": "" }, { "docid": "ebb64cfa943f19902833f36faf205863", "score": "0.5072437", "text": "def stories_per_page():\r\n return 30", "title": "" }, { "docid": "ea97ef4b81c71e4dcd1bb67e47bf9ba5", "score": "0.50705093", "text": "def test_get_data(self, h5reader):\n h5reader.open()\n for i, event in enumerate(h5reader):\n particle_floats, particle_strings = event.get_data('particles_meta')\n assert particle_floats[0][0] == 12\n assert particle_strings[0][0] == 'electron_neutrino'\n assert event.get_data('particles_meta', 'vertex_z') == -500\n assert event.get_data('triggers') == [bool(i)]\n assert np.array_equal(event.get_data('signal_amplitudes'),\n [[2, 2]])\n h5reader.close()", "title": "" }, { "docid": "e614e80e3e3bbc909af8f13926c191d7", "score": "0.50702", "text": "def test_read_100000_pages_movie():\n fname = public_file('tifffile/100000_pages.tif')\n with TiffFile(fname, _useframes=True) as tif:\n assert tif.is_imagej\n assert tif.byteorder == '>'\n assert len(tif.pages) == 100000\n assert len(tif.series) == 1\n # assert series properties\n series = tif.series[0]\n assert series.shape == (100000, 64, 64)\n assert series.dtype == numpy.uint16\n assert series.axes == 'TYX'\n assert series.kind == 'imagej'\n # assert page properties\n frame = tif.pages[100]\n assert isinstance(frame, TiffFrame) # uniform=True\n assert frame.shape == (64, 64)\n frame = tif.pages.first\n assert frame.imagewidth == 64\n assert frame.imagelength == 64\n assert frame.bitspersample == 16\n assert frame.compression == 1\n assert frame.shape == (64, 64)\n assert frame.shaped == (1, 1, 64, 64, 1)\n assert frame.ndim == 2\n assert frame.size == 4096\n assert frame.nbytes == 8192\n assert frame.axes == 'YX'\n assert frame._nextifd() == 819200206\n assert frame.is_final\n assert frame.is_contiguous\n assert frame.is_memmappable\n assert frame.hash\n assert frame.decode\n assert frame.aszarr()\n # assert ImageJ tags\n ijmeta = tif.imagej_metadata\n assert ijmeta is not None\n assert ijmeta['ImageJ'] == '1.48g'\n assert round(abs(ijmeta['max'] - 119.0), 7) == 0\n assert round(abs(ijmeta['min'] - 86.0), 7) == 0\n # assert data\n data = tif.asarray()\n assert data.flags['C_CONTIGUOUS']\n assert data.shape == (100000, 64, 64)\n assert data.dtype == numpy.uint16\n assert round(abs(data[7310, 25, 25] - 100), 7) == 0\n # too slow: assert_aszarr_method(tif, data)\n del data\n assert__str__(tif, 0)", "title": "" }, { "docid": "00e9a8be38a01fdfa6dc985a3f63a6ad", "score": "0.50664526", "text": "def test_product_pagination(self):\r\n products = []\r\n limit = 2\r\n products.append(create_product(name=\"test product\"))\r\n products.append(create_product(name=\"test product 2\"))\r\n products.append(create_product(name=\"test product 3\"))\r\n products.append(create_product(name=\"test product 4\"))\r\n products.append(create_product(name=\"test product 5\"))\r\n res = self.client.get(PRODUCTS_URL, {'limit': limit, 'offset': 0})\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)\r\n self.assertEqual(len(res.data['results']), limit)\r\n self.assertTrue(res.data['previous'] is None)\r\n self.assertFalse(res.data['next'] is None)", "title": "" } ]
29d68b084388e0271e4254f03d17719b
Called on game end.
[ { "docid": "6c52813fb82e4da6560c338a205bca95", "score": "0.9067296", "text": "def on_game_end(self) -> None:", "title": "" } ]
[ { "docid": "1d3c2859a1ecc59bc02ef9fdf50581c7", "score": "0.84280807", "text": "def hook_end_of_game(self, game, player):", "title": "" }, { "docid": "0439697e16f72cf8fc8dbcc339bd1bfd", "score": "0.8149644", "text": "async def end_game(self):", "title": "" }, { "docid": "8ab150ce2c81187fd13e90347342eb3c", "score": "0.79432976", "text": "def end(self):\n self.on_end()", "title": "" }, { "docid": "1393c768f22f9ba3b22526072e5cb249", "score": "0.7878691", "text": "def exit_game(self):\n self.onexit()", "title": "" }, { "docid": "4214f8f2b69ee0c48294418c2fec1b03", "score": "0.7838046", "text": "def on_end(self):\n pass", "title": "" }, { "docid": "4214f8f2b69ee0c48294418c2fec1b03", "score": "0.7838046", "text": "def on_end(self):\n pass", "title": "" }, { "docid": "b1d78baba00433f4b61ed2a5d7a4c7e1", "score": "0.7829294", "text": "def _end_game(self):\n print(\"Time's Up!\")\n self.active_game.clear()", "title": "" }, { "docid": "09fa8d184abc69524635a80b17e528e2", "score": "0.77912253", "text": "def end(self):", "title": "" }, { "docid": "9d60cfc36f3c2a2f3a8639230f772b1e", "score": "0.7789218", "text": "def end_game(self):\n self._game_started = False\n self._game_complete = True", "title": "" }, { "docid": "b2ba79fa4c19b0dab0ba570d3ba7b582", "score": "0.7780106", "text": "def endGame(self):\r\n self.started = False", "title": "" }, { "docid": "c3003d7f1d6f10743bf232cdea4cb68e", "score": "0.7765144", "text": "def on_end(self):", "title": "" }, { "docid": "b20d925df7080f8bdc5bda950a6777ce", "score": "0.7658304", "text": "def do_finish(self):\n print 'You finished the game!'", "title": "" }, { "docid": "e1cfc51ae5f40a5dd21a4049add4a1b2", "score": "0.7654785", "text": "def on_game_finished(self):\n print('got to game finished') \n self.ros_node_mgr.send_game_cmd(TapGameCommand.SHOW_GAME_END)", "title": "" }, { "docid": "4be1dc762e7f0ef623105dcef3ad9fc2", "score": "0.7638124", "text": "def end(self):\n\t\treturn", "title": "" }, { "docid": "b2b77b4a7fd2478f16c53c864def3aff", "score": "0.76312095", "text": "def end(self):\n\n pass", "title": "" }, { "docid": "b53a7436fc4d12481680e4608f7ceb09", "score": "0.7628224", "text": "def endgame(self):\n self.refstate = STATE_SETUP\n self.bot.endgame()", "title": "" }, { "docid": "a3166254e7ac8006193cd867a41a9a6a", "score": "0.7599635", "text": "def end(self):\n pass", "title": "" }, { "docid": "a3166254e7ac8006193cd867a41a9a6a", "score": "0.7599635", "text": "def end(self):\n pass", "title": "" }, { "docid": "a3166254e7ac8006193cd867a41a9a6a", "score": "0.7599635", "text": "def end(self):\n pass", "title": "" }, { "docid": "a3166254e7ac8006193cd867a41a9a6a", "score": "0.7599635", "text": "def end(self):\n pass", "title": "" }, { "docid": "a3166254e7ac8006193cd867a41a9a6a", "score": "0.7599635", "text": "def end(self):\n pass", "title": "" }, { "docid": "a3166254e7ac8006193cd867a41a9a6a", "score": "0.7599635", "text": "def end(self):\n pass", "title": "" }, { "docid": "a3166254e7ac8006193cd867a41a9a6a", "score": "0.7599635", "text": "def end(self):\n pass", "title": "" }, { "docid": "a3166254e7ac8006193cd867a41a9a6a", "score": "0.7599635", "text": "def end(self):\n pass", "title": "" }, { "docid": "18c104d1841ce212250951ad09897e56", "score": "0.75650895", "text": "def die(self):\r\n self.game.end()\r\n super(ZapMan, self).die()", "title": "" }, { "docid": "4add6ba2b815e35cf5c806ab2720eb6c", "score": "0.7552846", "text": "def game_exit(self):\n exit()", "title": "" }, { "docid": "abb975fb16fb6f813d49b77d88d3baaf", "score": "0.755054", "text": "def endGame(self):\n self.resetScores()", "title": "" }, { "docid": "7499b4aca9bb48af459bf06a92add7c5", "score": "0.7517498", "text": "def finish(self):\n\n pass", "title": "" }, { "docid": "c64703f27f62cff97cd78b0dc742a46b", "score": "0.75155365", "text": "def end_game(self) -> None:\n self.over = True", "title": "" }, { "docid": "968b405c73b1d215c5abe0453778291f", "score": "0.75144845", "text": "def finish(self):\n\t\tpass", "title": "" }, { "docid": "bcb52efea1898eceb764d7c029c79fae", "score": "0.74962735", "text": "def end_game(self) -> None:\r\n self.running = False", "title": "" }, { "docid": "9d606be86b41ee52efe3d0b7e4701c04", "score": "0.74805504", "text": "def endHook(self):\n # Empty ", "title": "" }, { "docid": "7d0b093131056fb162c5cb220fefdaeb", "score": "0.74613", "text": "def end(self):\n\n return", "title": "" }, { "docid": "7561e9e7e13340df2f4c30d0608115af", "score": "0.74561137", "text": "def on_end(self, error_code, message):\n self.handle_game_end(error_code, message)", "title": "" }, { "docid": "9d60d7c941ed7102fa6c93128493449f", "score": "0.7441707", "text": "def finish(self):\n pass", "title": "" }, { "docid": "9d60d7c941ed7102fa6c93128493449f", "score": "0.7441707", "text": "def finish(self):\n pass", "title": "" }, { "docid": "9d60d7c941ed7102fa6c93128493449f", "score": "0.7441707", "text": "def finish(self):\n pass", "title": "" }, { "docid": "6282853e3ada1f65c6c262293dbe1ea8", "score": "0.74232805", "text": "def finish(self):", "title": "" }, { "docid": "6282853e3ada1f65c6c262293dbe1ea8", "score": "0.74232805", "text": "def finish(self):", "title": "" }, { "docid": "6282853e3ada1f65c6c262293dbe1ea8", "score": "0.74232805", "text": "def finish(self):", "title": "" }, { "docid": "6282853e3ada1f65c6c262293dbe1ea8", "score": "0.74232805", "text": "def finish(self):", "title": "" }, { "docid": "b41af4a13a92b0452710539c27877739", "score": "0.7386487", "text": "def leaveGame():", "title": "" }, { "docid": "16d5c102209f1fdeccfe11546ed8acf7", "score": "0.7367535", "text": "def EndLayer(self):", "title": "" }, { "docid": "11f244c54b504c41d50ab0467226ec35", "score": "0.7346741", "text": "def exitPlay(self):\n pass", "title": "" }, { "docid": "d1e17be89a6cefccb4c816a3c8b72689", "score": "0.73466307", "text": "def _on_finish(self):\n pass", "title": "" }, { "docid": "fe4309811c10f3c7c6500c3a8550af0f", "score": "0.7345917", "text": "def end_game(self):\n end_message = games.Message(value = \"You Died\",\n size = 95,\n color = color.red,\n x = games.screen.width/2,\n y = games.screen.height/2,\n lifetime = 9 * games.screen.fps,\n after_death = games.screen.quit)\n games.screen.add(end_message)", "title": "" }, { "docid": "f801b8a4ca2d6d8e9cc24426bc7b87cf", "score": "0.73345464", "text": "def end(game_state):\n print(\"GAME OVER\")", "title": "" }, { "docid": "dd0cf3fcab6a0c15ba616b3d2ad4a5b5", "score": "0.7332736", "text": "def on_run_end(self):\n pass", "title": "" }, { "docid": "d098ed0538990793747ac56e95966e1d", "score": "0.7329234", "text": "def end_game(self):\n self.subwindows['input'].render_warning(f\"GAME OVER: {self.game.player}\")\n self.screen.refresh()\n self.screen.nodelay(0)\n u = self.screen.getch()\n self.do_quit()", "title": "" }, { "docid": "ba8832d005a522d5071b7a1f3b703cb0", "score": "0.73198855", "text": "def hook_end_turn(self, game, player):", "title": "" }, { "docid": "ec058ba545085afb8d4bbcd8c94f6da5", "score": "0.7315886", "text": "def plugin_end(self):\r\n pass", "title": "" }, { "docid": "ca16fcd73c63108d6e95054476dc2669", "score": "0.7311722", "text": "def end_game(self):\n api.end_game(self.api_key, self.game_state.game_id)", "title": "" }, { "docid": "44312b4d214e0c00e1e2f7a1e0571691", "score": "0.7308019", "text": "def end(self, won, reason):\n # replace with your end logic", "title": "" }, { "docid": "00de92503e2c11acff9b9778b6582425", "score": "0.7303975", "text": "def finished(self):\n\n pass", "title": "" }, { "docid": "94fc549ac93ee58bb78a9993cc1a924b", "score": "0.7294059", "text": "def on_ended(self):", "title": "" }, { "docid": "94fc549ac93ee58bb78a9993cc1a924b", "score": "0.7294059", "text": "def on_ended(self):", "title": "" }, { "docid": "b4433887d5cce0ed37b7fe3ea2eada14", "score": "0.7279141", "text": "def end(self):\n self.screen.end()\n\n # Exit from export modules\n self.stats.end()", "title": "" }, { "docid": "41c9c172de6014d600431cdce4824966", "score": "0.72621596", "text": "def end(self):\n xmp_end_player(self._ctx)", "title": "" }, { "docid": "a120e08ecb96d603a8ca04803a7f7022", "score": "0.72515243", "text": "def end(self) -> None:\n pass", "title": "" }, { "docid": "b1b8b965977fbb4f3619153c29e59ec4", "score": "0.72508305", "text": "def on_cleanup(self):\n\n pygame.quit()", "title": "" }, { "docid": "e7f9beef30f7b0ce8d1c251e2958151e", "score": "0.7248259", "text": "def on_animation_end(self):\n pass", "title": "" }, { "docid": "321c864c547306d6d439c94ec04562e3", "score": "0.7241537", "text": "def finished(self):\n pass", "title": "" }, { "docid": "baa870e968b36cee6480596c46323706", "score": "0.72335047", "text": "def end_game():\n\n global game_on\n game_on = False", "title": "" }, { "docid": "8523e8cb34100c775c3d1a79f4ad5258", "score": "0.7217624", "text": "def on_finish(self):\n pass", "title": "" }, { "docid": "e21da4d79abffbd2476a165b099c8b18", "score": "0.7212682", "text": "def end_game(self):\n # Handle stats for last ball here\n self.ball = 0\n self.log(\"Skel: 'GAME ENDED\")\n\n # ball time is handled in ball drained callback\n \n # Also handle game stats.\n for i in range(0,len(self.players)):\n game_time = self.get_game_time(i)\n self.game_data['Audits']['Games Played'] += 1\n self.game_data['Audits']['Avg Game Time'] = self.calc_time_average_string( self.game_data['Audits']['Games Played'], self.game_data['Audits']['Avg Game Time'], game_time)\n self.game_data['Audits']['Avg Score'] = self.calc_number_average(self.game_data['Audits']['Games Played'], self.game_data['Audits']['Avg Score'], self.players[i].score)\n\n self.log(\"Skel: 'player %d score %d\" % (i, self.players[i].score))\n \n self.save_game_data('game_user_data.yaml')\n\n # show any animations you want in ball_ending\n self.notifyModes('evt_game_ending', args=None, event_complete_fn=self.game_ended)", "title": "" }, { "docid": "822aa43b4bc1caf57b46aca0478f07a6", "score": "0.7177083", "text": "def finish(self):\n self.engine.unbind()", "title": "" }, { "docid": "91d42c0339878c34a816067acd640288", "score": "0.71713394", "text": "def end(self):\n self.robot.shooter.retractBoth()", "title": "" }, { "docid": "34e16aca7af496dd32ee994152711e5f", "score": "0.7163315", "text": "def finish():", "title": "" }, { "docid": "67c7fc2560eb7d2e53c872029c1658a2", "score": "0.71520233", "text": "def _exit(self):\n pygame.quit()\n quit()", "title": "" }, { "docid": "ed28f516cd9c95388ce32b963fcfbb8d", "score": "0.71511084", "text": "def end_game() -> None:\n print(\"Credits: HeartRails, @eq__s\")", "title": "" }, { "docid": "af4450ade2965ef02636b3541b879b9a", "score": "0.7144043", "text": "def done(self):\n self.engine.set_lives(0)\n self.engine.next_player()", "title": "" }, { "docid": "81b2e177a7bdd1bd58aa498345643987", "score": "0.7138201", "text": "def on_cleanup(self):\n pygame.quit()\n quit()", "title": "" }, { "docid": "5b47ed97838e069004233f0ea5ef7d41", "score": "0.7131462", "text": "def recog_end(self):\n\n pass", "title": "" }, { "docid": "2ea7536103b66f1d1a406c5b1f9a24f8", "score": "0.71289665", "text": "def game_end(self):\n self.start = 0\n # return self.start", "title": "" }, { "docid": "4218b3455e82378fafa6d84807c83d10", "score": "0.7120056", "text": "def end_episode(self):\n pass", "title": "" }, { "docid": "e8aa82f20aadb823b5fd5376b6599c92", "score": "0.7119156", "text": "def finish(self):\n #print \"%s.finish()\" % self.__class__.__name__\n pass", "title": "" }, { "docid": "69e64c8a34f776dca552fe2dfae09639", "score": "0.71127826", "text": "def end(self, won, reason):", "title": "" }, { "docid": "077acae55a96ca15ca74ddfb75916e2d", "score": "0.7111669", "text": "def at_disconnect(self):\n if self.location: # have to check, in case of multiple connections closing \n self.location.msg_contents(\"%s has left the game.\" % self.name, exclude=[self])\n self.db.prelogout_location = self.location\n self.location = None", "title": "" }, { "docid": "66f8e86987aae3c3f4ea1ec4728c1dde", "score": "0.71035415", "text": "def end_game(self):\n\n add_score(self.score)\n self.sprites.add(\n TextSprite(\"GAME OVER\", 256, 250, True),\n TextSprite(f\"SCORE: {self.score}\", 256, 300, True),\n TextSprite(\"press 't' to end game\", 256, 400, True),\n TextSprite(\"press 'r' to restart\", 256, 440, True)\n )", "title": "" }, { "docid": "674bb1fa325fbd6e6d94e216bdb38d62", "score": "0.70791554", "text": "def after_turn(self):\n pass", "title": "" }, { "docid": "65f720c9c8e843c1122736274368edde", "score": "0.70716006", "text": "def on_episode_end(self):\n pass", "title": "" }, { "docid": "2ca6df68c67e28e20d07776384fc093c", "score": "0.7070069", "text": "def on_close(self):\n\t\ttry:\n\t\t\tlogging.info(self.client_id+\" disconnect. Ending game\")\n\t\t\tdata.GAMES[self.game_id].stop_game()\n\t\texcept:\n\t\t\tpass", "title": "" }, { "docid": "53064513226d5bbfb1c3204fb60dddf5", "score": "0.7051257", "text": "def end(self):\n pass", "title": "" }, { "docid": "f946d688e2971858a1dd31e5e6a1a2e2", "score": "0.70509547", "text": "def on_generation_end(self):\n pass", "title": "" }, { "docid": "59e9b169fce53150477589c1e826ca0c", "score": "0.7048251", "text": "def poststop(self):\n pass", "title": "" }, { "docid": "c5b1347134af4e7f95d0856fc820780e", "score": "0.7041096", "text": "def on_finish(self):", "title": "" }, { "docid": "8fe96184e9854abf7ef4baeda0b8abfc", "score": "0.7039713", "text": "def OnExit(self):", "title": "" }, { "docid": "8fe96184e9854abf7ef4baeda0b8abfc", "score": "0.70396805", "text": "def OnExit(self):", "title": "" }, { "docid": "693e21b5b6949aa8d07e2047a91968cb", "score": "0.7038435", "text": "def end_tournament(self):", "title": "" }, { "docid": "f16dd9e657d46fc2481da58f0a27d714", "score": "0.7028267", "text": "def OnQuit(self):", "title": "" }, { "docid": "0b264a6af2433f5fc1d421c283543bec", "score": "0.7027035", "text": "def end(self):\n\t\tself._runningScene.onExit()\n\t\tself._runningScene.cleanup()\n\t\tself._scenesStack = []\n\t\tself._gestureDispatch.removeAllListeners()\n\t\tself._stopAnimation()", "title": "" }, { "docid": "b38c4fe9f32fff6a77d29351a7f0facb", "score": "0.7017934", "text": "def finalize_game(self, score, step, run):\n pass", "title": "" }, { "docid": "2e09d43bcfcebac0d430f501951d75e6", "score": "0.7015858", "text": "def on_death(self):", "title": "" }, { "docid": "fefb6223793480c2889cf434258bd567", "score": "0.7012972", "text": "def on_exit(self):", "title": "" }, { "docid": "fefb6223793480c2889cf434258bd567", "score": "0.7012972", "text": "def on_exit(self):", "title": "" }, { "docid": "3d80493e81ef40a0d19ba78fa958b4e9", "score": "0.7008257", "text": "def on_close(self):\n\n # Do a reverse lookup and check which game did\n # the player disconnect from\n if self in self.application.gameLookup:\n gameIndex = self.application.gameLookup[self]\n currentGame = self.application.games[gameIndex]\n\n # Handle the quit event\n currentGame.playerQuit(self)\n elif self.application.waitingPlayer is not None:\n\n # If waiting player exits then remove waiting player.\n if self.application.waitingPlayer.socket == self:\n self.application.waitingPlayer = None", "title": "" }, { "docid": "6350260d68589e95207908dbaa404584", "score": "0.70075566", "text": "def finish(self):\n\t\tself.finished = True", "title": "" }, { "docid": "cd89a089da28039c409e24eaa9be2d2f", "score": "0.69941425", "text": "def quit(self, event):\n\t\tGame.current.shutdown()", "title": "" } ]
406e848dba14958e11fbeac4d1148241
Checks that `true_negative_rate` calculates the right quantity.
[ { "docid": "9a1893e4df5308175962382670e3d5c8", "score": "0.7326625", "text": "def test_true_negative_rate(self):\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(0.0, 1.0 - self._penalty_predictions) *\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - np.sign(self._constraint_predictions))) *\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.true_negative_rate(self._split_context)\n self._check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)", "title": "" } ]
[ { "docid": "0dc92908a3c6787e6ea8d9b75225f569", "score": "0.7055266", "text": "def test_false_negative_rate(self):\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(\n 0.0, 1.0 - self._penalty_predictions) * (self._penalty_labels > 0.0)\n * self._penalty_weights * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n (self._penalty_labels > 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - np.sign(self._constraint_predictions))) *\n (self._constraint_labels > 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n (self._constraint_labels > 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.false_negative_rate(self._split_context)\n self._check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)", "title": "" }, { "docid": "48b3ad70edf6e6eabf838fb871acf0f2", "score": "0.70510167", "text": "def test_false_positive_rate(self):\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(0.0, 1.0 + self._penalty_predictions) *\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n (self._penalty_labels <= 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 + np.sign(self._constraint_predictions))) *\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n (self._constraint_labels <= 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.false_positive_rate(self._split_context)\n self._check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)", "title": "" }, { "docid": "03502727d92aafbb98948ca57d54653f", "score": "0.68465984", "text": "def validate_false_positive_rate(arg):\n try:\n rate = float(arg)\n except ValueError:\n raise InvalidArgument(\"false-positive-rate not a float: '%s'\" % arg)\n\n if rate < 0:\n raise InvalidArgument(\"false-positive rate cannot be < 0: '%s'\" % arg)\n\n return rate", "title": "" }, { "docid": "f408b8f8dc06416e337d810844d76e0a", "score": "0.6732482", "text": "def test_true_positive_rate(self):\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(\n 0.0, 1.0 + self._penalty_predictions) * (self._penalty_labels > 0.0)\n * self._penalty_weights * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n (self._penalty_labels > 0.0) * self._penalty_weights *\n self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 + np.sign(self._constraint_predictions))) *\n (self._constraint_labels > 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n (self._constraint_labels > 0.0) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.true_positive_rate(self._split_context)\n self._check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)", "title": "" }, { "docid": "f536ad550b83f7364d9943a2de738e14", "score": "0.6731999", "text": "def false_negative_rate(true_positives, false_negatives):\n return np.divide(false_negatives, false_negatives + true_positives)", "title": "" }, { "docid": "6580d4eb023abe3cf6d5f7934a45e8cf", "score": "0.66146886", "text": "def get_true_negative_rate(false_positive, true_negative, epsilon=1e-8):\n false_positive, true_negative = list(\n map(tensor2float, [false_positive, true_negative])\n )\n\n try:\n tnr = true_negative / (true_negative + false_positive)\n\n except ZeroDivisionError:\n tnr = true_negative / (true_negative + false_positive + epsilon)\n\n return tnr", "title": "" }, { "docid": "f817c7bcf63c2b3649ef7be533a4a161", "score": "0.65353835", "text": "def false_positive_rate(true_negatives, false_positives):\n return np.divide(false_positives, false_positives + true_negatives)", "title": "" }, { "docid": "545a60f3faae9bdfc35c1bb0be44a720", "score": "0.65277594", "text": "def validate_negative_stock(self, sle):\n\t\tdiff = self.qty_after_transaction + flt(sle.actual_qty)\n\n\t\tif diff < 0 and abs(diff) > 0.0001:\n\t\t\t# negative stock!\n\t\t\texc = sle.copy().update({\"diff\": diff})\n\t\t\tself.exceptions.append(exc)\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "title": "" }, { "docid": "3ec8ab030f638141b0d54c0802cc1cdd", "score": "0.64671457", "text": "def true_negative_rate(self, class_index):\n return javabridge.call(self.jobject, \"trueNegativeRate\", \"(I)D\", class_index)", "title": "" }, { "docid": "9a2e5d7fbbf7dfc8c51d5dcdc20704d2", "score": "0.6438318", "text": "def test_negative_prediction_rate(self):\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(0.0, 1.0 - self._penalty_predictions) * self._penalty_weights\n * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - np.sign(self._constraint_predictions))) *\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.negative_prediction_rate(\n self._split_context)\n self._check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)", "title": "" }, { "docid": "417fafb030c7c8661dca3dc2d729f2ee", "score": "0.6362543", "text": "def validate_negative_stock(self, sle):\n\t\tdiff = self.wh_data.qty_after_transaction + flt(sle.actual_qty)\n\n\t\tif diff < 0 and abs(diff) > 0.0001:\n\t\t\t# negative stock!\n\t\t\texc = sle.copy().update({\"diff\": diff})\n\t\t\tself.exceptions.setdefault(sle.warehouse, []).append(exc)\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "title": "" }, { "docid": "427ac46742abe7a9295327e642dfb510", "score": "0.6332512", "text": "def weighted_true_negative_rate(self):\n return javabridge.call(self.jobject, \"weightedTrueNegativeRate\", \"()D\")", "title": "" }, { "docid": "248d0e017d985a48070e8152ceeaa0b0", "score": "0.6196491", "text": "def is_negative(self):\n return self.stanje < 0", "title": "" }, { "docid": "b8dc51a4eb633ecd8144186c57acc661", "score": "0.618669", "text": "def uses_negatives(self) -> bool:\n return False", "title": "" }, { "docid": "1032adb5c3b45039309a61a69a87c158", "score": "0.6184973", "text": "def get_true_positive_rate(true_positive, false_negative, epsilon=1e-8):\n true_positive, false_negative = list(\n map(tensor2float, [true_positive, false_negative])\n )\n\n try:\n tpr = true_positive / (true_positive + false_negative)\n\n except ZeroDivisionError:\n tpr = true_positive / (true_positive + false_negative + epsilon)\n\n return tpr", "title": "" }, { "docid": "0d68262eb248c661f535ccb63d6559a4", "score": "0.61761105", "text": "def test_disallow_negatives(self):\n recipe = Recipe.objects.first()\n u_s = UnitShifter()\n try:\n u_s.scale_recipe(recipe, -3)\n assert False\n except:\n assert True\n\n print \"Good job. A recipe with a new positive serving size.\"", "title": "" }, { "docid": "5f27b5be2466b3b6b4421739c038cb5d", "score": "0.6148797", "text": "def false_negative_rate(self, class_index):\n return javabridge.call(self.jobject, \"falseNegativeRate\", \"(I)D\", class_index)", "title": "" }, { "docid": "842643f1cf700aa6fbe4baf6943d0caf", "score": "0.61466265", "text": "def _is_negative(cav, negative_options): # pylint: disable=unused-argument\n # type: (models.CustomAttributeValue, list) -> bool\n return False", "title": "" }, { "docid": "5e4b2f8ff836efba4aa91bdb85065dca", "score": "0.6093219", "text": "def weighted_false_negative_rate(self):\n return javabridge.call(self.jobject, \"weightedFalseNegativeRate\", \"()D\")", "title": "" }, { "docid": "505cf09c7c3c92d80237fa779a98955d", "score": "0.6047045", "text": "def false_positive_rate(self, class_index):\n return javabridge.call(self.jobject, \"falsePositiveRate\", \"(I)D\", class_index)", "title": "" }, { "docid": "345405ed6f3fe61811c6652b45e210a9", "score": "0.59932584", "text": "def weighted_false_positive_rate(self):\n return javabridge.call(self.jobject, \"weightedFalsePositiveRate\", \"()D\")", "title": "" }, { "docid": "c10781402997d702a034eb259a58da03", "score": "0.5931606", "text": "def is_negative(self):\n return self._sign_str == s.ZERO or self._sign_str.upper() == s.NEGATIVE", "title": "" }, { "docid": "0949e8d0d4a9580bec3ca61d6b1bdc6a", "score": "0.59245324", "text": "def test_positive_prediction_rate(self):\n # For the penalty, the default loss is hinge.\n expected_penalty_numerator = np.sum(\n np.maximum(0.0, 1.0 + self._penalty_predictions) * self._penalty_weights\n * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 + np.sign(self._constraint_predictions))) *\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.positive_prediction_rate(\n self._split_context)\n self._check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)", "title": "" }, { "docid": "d1dcc2af022f45b8d1e16897b93a7177", "score": "0.5830701", "text": "def test_on_one_negative_value(self):\n\n with pytest.raises(ValueError) as e:\n va.calc_grav_acceleration(-1, 1)\n assert e.match(\n \"Mass and radius values must be positive.\"), \"Expected ValueError.\"\n with pytest.raises(ValueError) as e:\n va.calc_grav_acceleration(1, -1)\n assert e.match(\n \"Mass and radius values must be positive.\"), \"Expected ValueError.\"", "title": "" }, { "docid": "456babc361fd2e4e2bb896504957ce4e", "score": "0.58145714", "text": "def check_response_negative(self, rqi=None, rsc=None, error_message=None):\n raise NotImplementedError(\"Negative response validation not implemented\")", "title": "" }, { "docid": "3964ddf3b5093d070fe74940de94527a", "score": "0.5762629", "text": "def true_negative(y_true, y_pred):\n tn = 0\n for yt, yp in zip(y_true, y_pred):\n if yt == 0 and yp == 0:\n tn += 1\n return tn", "title": "" }, { "docid": "366ab000bed4a77b0c8fa946b37803da", "score": "0.57215977", "text": "def is_nonneg(self):\n return self.sign_from_args()[0]", "title": "" }, { "docid": "52ea5e98cab054c5be538e0650f2b968", "score": "0.571585", "text": "def is_negative(number):\n return number < 0", "title": "" }, { "docid": "377ba6fd715cd4e8be7fd3986e5f7a37", "score": "0.5707427", "text": "def check_exchange_negative(self, response_primitive, rsc, error_message=None):\n raise NotImplementedError(\"Negative exchange validation not implemented\")", "title": "" }, { "docid": "c69317455407c92ff372fab405336e16", "score": "0.5637376", "text": "def isNegative(obj):\n\n return not isPositive(obj)", "title": "" }, { "docid": "e5c993988dd83c1237fecacc11cb6151", "score": "0.5609874", "text": "def test_negative(data, value):\n sum_ = sum_digits(data)\n if sum_ != value:\n print('Test_negative: Expected value is {}, '.format(value) +\n 'actual is {}'.format(sum_))", "title": "" }, { "docid": "99fcfbb323af29668225569b83513c00", "score": "0.5601455", "text": "def test_is_base_experience_approximated_passing_negative_value(self):\n\n p1 = Player([Pokemon(\"Pichu\", 42), Pokemon(\"Pikachu\", 145)])\n p2 = Player([Pokemon(\"Dito\", 30), Pokemon(\"Charizard\", 260)])\n tc = TradeCalculator(p1, p2)\n self.assertRaises(ValueError, lambda: tc._is_base_experience_approximated(-42, 10))", "title": "" }, { "docid": "87f5b5e65d8e9a001a07e9f02a35c4ed", "score": "0.5595129", "text": "def test_invalid_amount_negative(self):\n entity_a1 = self.entities[1]\n entity_a3 = self.entities[3]\n\n # NOTE: StrToInt64, which is used in trade_MP, parses negative amounts as 0.\n\n TestInfo.ExpectFail()\n\n try: txid_a48 = entity_a1.trade('0.00000001', TDiv1, '-0.00000001', TMSC, ADD_1)\n except: txid_a48 = '0'\n self.check_invalid('amount desired is negative (-0.00000001 TMSC)', txid_a48)\n\n try: txid_a49 = entity_a3.trade('-0.00000001', TDivMax, '0.00000001', TMSC, ADD_1)\n except: txid_a49 = '0'\n self.check_invalid('amount for sale is negative (-0.00000001 TDivMax)', txid_a49)\n\n try: txid_a50 = entity_a1.trade('1', TIndiv1, '-0.00000001', TMSC, ADD_1)\n except: txid_a50 = '0'\n self.check_invalid('amount desired is negative (-0.00000001 TMSC)', txid_a50)\n\n try: txid_a51 = entity_a3.trade('-1', TIndivMax, '0.00000001', TMSC, ADD_1)\n except: txid_a51 = '0'\n self.check_invalid('amount for sale is negative (-1 TIndivMax)', txid_a51)\n\n try: txid_a52 = entity_a1.trade('-0.00000001', TMSC, '-0.00000001', TDiv1, ADD_1)\n except: txid_a52 = '0'\n self.check_invalid('both amounts are negative (-0.00000001 TMSC, -0.00000001 TDiv1)', txid_a52)\n\n try: txid_a53 = entity_a1.trade('-0.00000001', TMSC, '-1', TIndiv1, ADD_1)\n except: txid_a53 = '0'\n self.check_invalid('both amounts are negative (-0.00000001 TMSC,-1 TIndiv1)', txid_a53)\n\n TestInfo.StopExpectation()", "title": "" }, { "docid": "5a4b429c595dd10ffc7159424ad64968", "score": "0.55916566", "text": "def check_validity(self):\n negative_sum = np.sum(self.u[self.x_array > 0])\n positive_sum = np.sum(self.u[self.x_array < 0])\n if abs(negative_sum + positive_sum) < 1e-2 or \\\n abs(negative_sum - positive_sum) < 1e-2:\n self.valid = True\n else:\n self.valid = False", "title": "" }, { "docid": "9d0f4b1f314f2aa8d37b060fc0cf588d", "score": "0.55752337", "text": "def true_negatives(y_true, y_pred, pos_label=1):\n if type(y_true) == list:\n y_true = np.array(y_true)\n if type(y_pred) == list:\n y_pred = np.array(y_pred)\n\n return np.sum((y_true != pos_label) & (y_pred != pos_label))", "title": "" }, { "docid": "5948c32cf4d4bdcb3925aa66a18f5039", "score": "0.55610836", "text": "def test_on_two_negative_values(self):\n\n with pytest.raises(ValueError) as e:\n va.calc_grav_acceleration(-1, -1)\n assert e.match(\n \"Mass and radius values must be positive.\"), \"Expected ValueError.\"", "title": "" }, { "docid": "90846b31e1c05c0080afac0c2c96b0ce", "score": "0.555902", "text": "def false_negatives(y_true, y_pred, pos_label=1):\n if type(y_true) == list:\n y_true = np.array(y_true)\n if type(y_pred) == list:\n y_pred = np.array(y_pred)\n\n return np.sum((y_true == pos_label) & (y_pred != pos_label))", "title": "" }, { "docid": "afca98be8c69f8be56321983ca31ad38", "score": "0.55517775", "text": "def false_negative(y_true, y_pred):\n fn = 0\n for yt, yp in zip(y_true, y_pred):\n if yt == 1 and yp == 0:\n fn += 1\n return fn", "title": "" }, { "docid": "8237ea635336c126693037191ee57f87", "score": "0.549076", "text": "def test_cannot_withdraw_negative_amount(self):\n self.withdraw[\"withdrawal_amount\"] = -1000\n response = self.client.post(\"/accounts/withdraw\",\n data=json.dumps(self.withdraw),\n headers=self.headers)\n self.assertEqual(response.status_code, 400)\n\n data = json.loads(response.get_data())\n self.assertEqual(data,\n {\"message\": {\n \"withdrawal_amount\": \"The maximum withdrawal per\"\n \" transaction is 20000. The minimum is 1.\"}})", "title": "" }, { "docid": "e3d84e553ca97c155e87b7e0dd6a0c54", "score": "0.54905725", "text": "def true_positive_rate(self, class_index):\n return javabridge.call(self.jobject, \"truePositiveRate\", \"(I)D\", class_index)", "title": "" }, { "docid": "391fd89f7d1d2136996efaafa4eae94b", "score": "0.5470716", "text": "def test_all_neg(self):\n A = [-1, -10, 0]\n self.assertEqual(1, solution(A))", "title": "" }, { "docid": "805273429371dcb449045f04219096e9", "score": "0.54655844", "text": "def test_negative_price_raises_exception(self):\n with pytest.raises(AssertionError, match=\"The price must be non-negative.\"):\n GoodState(-1)", "title": "" }, { "docid": "6be92f56ea471c351771a6faf21815d8", "score": "0.54546785", "text": "def gr_neg(self, Y):\r\n return 2 * Y * (Y < 0)", "title": "" }, { "docid": "d503a43fe5ebb14204af68b0f63dbc3f", "score": "0.5453787", "text": "def test_error_rate(self):\n # For the penalty, the default loss is hinge.\n expected_signed_penalty_labels = (self._penalty_labels > 0.0) * 2.0 - 1.0\n expected_penalty_numerator = np.sum(\n np.maximum(\n 0.0,\n 1.0 - expected_signed_penalty_labels * self._penalty_predictions) *\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_signed_constraint_labels = (\n (self._constraint_labels > 0.0) * 2.0 - 1.0)\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - expected_signed_constraint_labels * np.sign(\n self._constraint_predictions))) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.error_rate(self._split_context)\n self._check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)", "title": "" }, { "docid": "5215762045b7c9e4deafd68b5a15d84e", "score": "0.54517275", "text": "def test_negative_number (self):\n\t\tself.assertTrue (solution (4,5,-8, \"+\"),-8 )", "title": "" }, { "docid": "bc8fb7665a86d10fa16bc3559ecb2837", "score": "0.54516745", "text": "def test_non_zero(self):\n roc_curve = Roc(self.sig_eff, self.bkg_rej)\n result_bkg_rej = self.bkg_rej[[False, True, False, True, True, False, True]]\n result_sig_eff = self.sig_eff[[False, True, False, True, True, False, True]]\n np.testing.assert_array_almost_equal(\n roc_curve.non_zero, (result_bkg_rej, result_sig_eff)\n )", "title": "" }, { "docid": "cbf3483d4a6cce8e346416988b862c04", "score": "0.54506356", "text": "def test_is_float_negative(self):\n self.assertEqual(is_float('-1.01'), True)", "title": "" }, { "docid": "ffbbac0ee4a1ab001eb668f20f937d76", "score": "0.5446086", "text": "def false_negative_loss_scale_factor(self):\n\t\tif self.false_negatives is None:\n\t\t\traise Exception(\"Cannot get loss_scale_factor, model.false_negatives is undefined \")\n\n\t\tvalue = tf.to_float(tf.abs(self.false_negatives))\n\t\t# value = tf.Variable(1.0, dtype=self.gpu_type, name=\"loss_scale_factor\", trainable=False)\n\t\t# if its zero, return one (no punishment) since log(0) is undefined\n\t\t# this avoids NaN weights\n\t\tcond = tf.cond(tf.equal(value, 0.0),\n\t\t\t\t\t true_fn=lambda: 1.0,\n\t\t\t\t\t false_fn=lambda: self.fn_punish(value))\n\t\ttf.summary.scalar(name=\"loss_scale_computed\", tensor=cond)\n\t\treturn cond # tf.assign(value, cond)", "title": "" }, { "docid": "2ddd24f592b911b2afb4892cde619f85", "score": "0.54177237", "text": "def test_is_square_negative_test(self):\n\n allure.dynamic.title(\"Non square numbers (negative)\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p></p>\")\n\n with allure.step(\"Test non square number: 3\"):\n self.assertEqual(is_square(3),\n False,\n \"3 is not a square number\")", "title": "" }, { "docid": "137ab6a965b7743fdf6129ed7b413ce2", "score": "0.54124326", "text": "def is_positive(self):\n return self._sign_str == s.ZERO or self._sign_str.upper() == s.POSITIVE", "title": "" }, { "docid": "6d63981a1a84e158aae51ab2a7ac8425", "score": "0.5410168", "text": "def test_n_is_positive(self) -> None:\n for n in [-10000, -5, -1, 0]:\n with self.assertRaises(ValueError):\n vector_quantization(tf.placeholder(tf.float32, [None, 1, 3]), n)", "title": "" }, { "docid": "495e1601707d4217e242554a6474f679", "score": "0.5404053", "text": "def test_is_positive_int_negative(self):\n self.assertEqual(is_positive_int('-1'), False)", "title": "" }, { "docid": "d93c7ddcb8c119a1f617a43b272f8076", "score": "0.5403123", "text": "def _check_SALEVALUE(sv_trend, sv_nad):\n sv_trend = abs(sv_trend)\n sv_nad = abs(sv_nad)\n if sv_trend == sv_nad:\n return True, 0\n else:\n return False, sv_trend - sv_nad", "title": "" }, { "docid": "a74dd2d6743179e68c1a2795129ffcf7", "score": "0.539783", "text": "def is_negative(self, element):\n return False", "title": "" }, { "docid": "54bdeea8370bc5b9e5a8f9bfabab18f4", "score": "0.53855973", "text": "def needRestock(self):\n #we'll set for now the threshold at *five* items\n #so we need to check if self.quantity is less than five.\n if self.quantity < 5:\n return True\n else:\n return False", "title": "" }, { "docid": "e63832addfa3d847aab9e1c346fa1a20", "score": "0.53703195", "text": "def false_positive_rate(tn, fp):\n if (fp+tn) == 0:\n return 0\n return fp/float(fp+tn)", "title": "" }, { "docid": "68e789e1a27d577e8a58dc31e626afc8", "score": "0.5356425", "text": "def test_add_sale_quantity_not_positive_integers(self):\n\n response = self.app_test_client.post('{}/products'.format(\n self.base_url), json=self.Product, headers=dict(Authorization=self.token),\n content_type='application/json')\n\n response = self.app_test_client.post('{}/sales'.format(\n self.base_url), json={\n 'items': [\n {\n 'name': 'Carpet',\n 'quantity': -1\n }\n ]\n }, headers=dict(Authorization=self.token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], \"The quantity should be a positive integer\")", "title": "" }, { "docid": "3ae85f6417b70ea28c4ccbcbf4f807fb", "score": "0.53508216", "text": "def is_correct_score(score):\n return 0.0 <= score <= 10.0", "title": "" }, { "docid": "b675732a769c1e64c465a1a81eab44f4", "score": "0.5346823", "text": "def true_negatives(predicted_y, validation_y):\n val_neg = np.where(validation_y == 0)\n pred_neg = np.where(predicted_y == 0)\n\n return np.in1d(pred_neg, val_neg).sum()", "title": "" }, { "docid": "6145a38e34785807d83a6be4895dcac7", "score": "0.5331474", "text": "def test_is_positive_int_ngood(self):\n self.assertEqual(is_positive_int(-1), False)", "title": "" }, { "docid": "67ed7d9661e52b1e0f6ddf63ed4745f6", "score": "0.5323193", "text": "def is_positive(value):\n return value >= 0", "title": "" }, { "docid": "9afce2d5cbed93da2660090ec5856d6f", "score": "0.53195536", "text": "def isValidCartQuantity(quantity):\n return isinstance(quantity, int) and quantity > 0", "title": "" }, { "docid": "e1ccf73b3ff32db80637aa544ecfa995", "score": "0.53159803", "text": "def negative(self):\n return Predicate('negative')", "title": "" }, { "docid": "3397804420bd46b86814b10229e3cddd", "score": "0.5312969", "text": "def eval_score(y_true, y_pred):\n error = y_true-y_pred\n \n erro = []\n for errors in error:\n if errors < 0: x = (np.exp((-errors/13))-1)\n if errors >= 0: x = (np.exp((errors/20))-1)\n erro.append(x)\n \n return -np.sum(erro)", "title": "" }, { "docid": "ec138e10d562119d58dd2b2252f8282e", "score": "0.5295526", "text": "def __incorrect_quantity(n_found, spec):\n if not spec.is_many() and n_found > 1:\n return True\n elif isinstance(spec.quantity, int) and n_found != spec.quantity:\n return True\n return False", "title": "" }, { "docid": "e7761b579d6841f5c6e10891043d9cf2", "score": "0.52843726", "text": "def is_positive(self, a):\n return self.request32('is_positive', a)", "title": "" }, { "docid": "0f957f54b032452892db29269a513184", "score": "0.528377", "text": "def test_neg_sharpe_ratio_single_holding():\n allocs = np.array([[1], [1], [1]])\n prices = np.array([[2], [4], [16]])\n prices_norm = prices / prices[0]\n sr = neg_sharpe_ratio(allocs, prices_norm)\n\n assert sr == -3 * np.sqrt(252)", "title": "" }, { "docid": "4b231d4222812d3041fffaa5a0e6a000", "score": "0.52799493", "text": "def _check_cost(self, cr, uid, ids, context=None):\n count = 0\n for product in self.browse(cr, uid, ids, context=context):\n if (product.standard_price <= 0):\n message = _(\"The Cost Must Be Greater Than Zero!\")\n count += 1\n if count > 0 :\n raise osv.except_osv(_('ValidateError'), _(message))\n return True", "title": "" }, { "docid": "644815950ace7063dc6edaebd176e4f3", "score": "0.52675474", "text": "def is_non_negative_value(value): # type: (Any) -> bool\n return value >= 0", "title": "" }, { "docid": "d086e7d19f63b55225882b1b02c96d32", "score": "0.52634734", "text": "def is_free(self):\n \n # allow a small tolerance due to the unreliability of floating\n # point math in most languages (including Python)\n total = self.setup_charge_amount + self.recurring_charge_amount\n return total < 0.000001 and total > -0.000001", "title": "" }, { "docid": "e4cda1f2fc36dc3cc21c7dceaaea3455", "score": "0.5261118", "text": "def true_negatives(labels,\n predictions,\n weights=None,\n metrics_collections=None,\n updates_collections=None,\n name=None):\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.true_negatives is not '\n 'supported when eager execution is enabled.')\n\n with variable_scope.variable_scope(name, 'true_negatives',\n (predictions, labels, weights)):\n\n predictions, labels, weights = _remove_squeezable_dimensions(\n predictions=math_ops.cast(predictions, dtype=dtypes.bool),\n labels=math_ops.cast(labels, dtype=dtypes.bool),\n weights=weights)\n is_true_negative = math_ops.logical_and(\n math_ops.equal(labels, False), math_ops.equal(predictions, False))\n return _count_condition(is_true_negative, weights, metrics_collections,\n updates_collections)", "title": "" }, { "docid": "efef9106bb9523c8ef9e7d7d209601c2", "score": "0.52562416", "text": "def test04a_calculate_reliability_bad_inputs(self):\r\n\r\n self.assertFalse(self.DUT.calculate_reliability(\r\n (None, None, None, None), self._mission_time,\r\n self._hr_multiplier[0]))\r\n self.assertAlmostEqual(self.DUT.hazard_rate, 0.0)\r\n self.assertAlmostEqual(self.DUT.mtbf, 0.0)\r\n self.assertAlmostEqual(self.DUT.reliability, 1.0)", "title": "" }, { "docid": "3602863a6542546db5db8eecf8558569", "score": "0.5251461", "text": "def _missing(self) -> bool:\n return self.value() == -1", "title": "" }, { "docid": "b4dd57335b40f6d05a49c1f082fa33e1", "score": "0.52456427", "text": "def reward(self, reward):\n return np.sign(reward)", "title": "" }, { "docid": "b4dd57335b40f6d05a49c1f082fa33e1", "score": "0.52456427", "text": "def reward(self, reward):\n return np.sign(reward)", "title": "" }, { "docid": "cf745410f0fb55141d6015835f1befb3", "score": "0.52446884", "text": "def gradient(self, y_true, y_pred):\n y_true = y_true or -1\n if y_true * y_pred < 1:\n return -y_pred\n return 0", "title": "" }, { "docid": "7b462f0389111d9d80c995fdf10db1a9", "score": "0.52253914", "text": "def false_positive(y_true, y_pred):\n fp = 0\n for yt, yp in zip(y_true, y_pred):\n if yt == 0 and yp == 1:\n fp += 1\n return fp", "title": "" }, { "docid": "20a14d9598a4f2b7946f06428240f640", "score": "0.522087", "text": "def __need_misfit_quantification(self, iteration, event, window_set):\n validation_dict = self.comm.storyteller.validation_dict\n\n quantify_misfit = True\n if iteration in validation_dict.keys():\n if event in validation_dict[iteration][\"events\"].keys():\n if validation_dict[iteration][\"events\"][event] != 0.0:\n quantify_misfit = False\n\n if not quantify_misfit:\n message = (\n f\"Will not quantify misfit for event {event}, \"\n f\"iteration {iteration} \"\n f\"window set {window_set}. If you want it computed, \"\n f\"change value in validation toml to 0.0\"\n )\n self.print(message)\n\n return quantify_misfit", "title": "" }, { "docid": "b81e3d1f44e03f0878a8f7f420f4e576", "score": "0.5220708", "text": "def not_negative(value):\n if value < 0:\n raise ValidationError('Value cannot be negative')", "title": "" }, { "docid": "5aa8343d02c3a5c8845c154c25f94138", "score": "0.5218506", "text": "def branch_neg():\n\n if accumulator < 0:\n return True\n else:\n return False", "title": "" }, { "docid": "519f6cf1213ae764fd34f16080b5af23", "score": "0.52153015", "text": "def _valid_sell_(quantity, person, ticker):\r\n # if quantity is not a value: error out\r\n try:\r\n quantity = int(quantity)\r\n except ValueError:\r\n return False\r\n\r\n result = StockHoldings.query.filter(StockHoldings.person_id == person.id,\r\n StockHoldings.stock_ticker == ticker).first()\r\n # if not owned or not enough owned: error out\r\n if not result or result.quantity < quantity or result.quantity is 0:\r\n return False\r\n\r\n return result", "title": "" }, { "docid": "f1aaab8a9f32a759a0cde73ef95a02d8", "score": "0.521483", "text": "def test_rate_wrong_question(self):\n CommonTestCases.user_token_assert_in(\n self,\n rate_wrong_question,\n \"Provide the missing items\"\n )", "title": "" }, { "docid": "6dd9b11f30b47ff33910e2b0d574b3aa", "score": "0.52059287", "text": "def test_less_or_equal_to_zero(self):\n course_run, user = create_purchasable_course_run()\n price_obj = course_run.courseprice_set.get(is_valid=True)\n\n for invalid_price in (0, -1.23,):\n price_obj.price = invalid_price\n price_obj.save()\n\n with patch('ecommerce.api.get_purchasable_course_run', autospec=True, return_value=course_run) as mocked:\n with self.assertRaises(ImproperlyConfigured) as ex:\n create_unfulfilled_order(course_run.edx_course_key, user)\n assert ex.exception.args[0] == \"Price to be charged is less than or equal to zero\"\n assert mocked.call_count == 1\n assert mocked.call_args[0] == (course_run.edx_course_key, user)\n\n assert Order.objects.count() == 0", "title": "" }, { "docid": "4fae51b5280c53f92e2bf8c38a91e134", "score": "0.5205395", "text": "def test_probs_to_rates(self):\n probs = dict([(i, Probs.random(DnaPairs)) for i in range(100)])\n rates = probs_to_rates(probs)\n #check we got at most the same number of items as in probs\n assert len(rates) <= len(probs)\n #check that we didn't get anything bad\n vals = rates.values()\n for v in vals:\n assert not v.isSignificantlyComplex()\n #check that we didn't miss anything good\n for key, val in probs.items():\n if key not in rates:\n try:\n r = val.toRates()\n print r.isValid()\n assert r.isSignificantlyComplex() or (not r.isValid())\n except (ZeroDivisionError, OverflowError, ValueError):\n pass", "title": "" }, { "docid": "fd9d86ed5fbdc92e362d8c2227bc50dd", "score": "0.5189607", "text": "def pretrade_riskcheck(self):\n if self.compute_notional() >= self.maxNotionalOutstanding:\n return False\n return True", "title": "" }, { "docid": "e7b0b5ba00abea57b018077d7880715e", "score": "0.51877505", "text": "def __neg__(self):\n return -1 * self", "title": "" }, { "docid": "5683a541ca775ee958ba1279dacea8c7", "score": "0.5186653", "text": "def exp_rmspe(y_true, y_pred):\n pct = tf.square((tf.exp(y_true) - tf.exp(y_pred)) / tf.exp(y_true))\n # Compute mean excluding stores with zero denominator.\n x = tf.reduce_sum(tf.where(y_true > 0.001, pct, tf.zeros_like(pct)))\n y = tf.reduce_sum(tf.where(y_true > 0.001, tf.ones_like(pct), tf.zeros_like(pct)))\n return tf.sqrt(x / y)", "title": "" }, { "docid": "9a7c3648e8cc1d5d82c8781b77eedef9", "score": "0.5185107", "text": "def test_stock_news_happypath_neg(self):\n url, score = commands.stock_news(\n self.stock_ticker,\n -1.0\n )\n if not url:\n url, score = self.retry_stock_news(1.0)\n\n assert isinstance(url, str)\n assert isinstance(score, str) # scores are cast to str\n\n assert float(score) <= 0.0", "title": "" }, { "docid": "2b463b207698fcd0e64d35f1638d9084", "score": "0.5178666", "text": "def _check_rate_range(self) -> None:\n if self.probably_no_rate not in range(-self.rate_gradation + 1, 0) \\\n or self.probably_rate not in range(1, self.rate_gradation):\n raise ProbabilityRatesException(\n 'One of the following variables not in range ({minus_rate_gradation};{rate_gradation}): '\n 'probably_no_rate, probably_rate'.format(\n minus_rate_gradation=-self.rate_gradation, rate_gradation=self.rate_gradation))\n self.log.debug('Probability rate ranges are OK')", "title": "" }, { "docid": "9fd019c72ddaf7d2483b2a76712bac95", "score": "0.5178267", "text": "def check_money_cant_be_negative(sender, instance, **kwargs):\n\tif instance.money < 0:\n\t\traise IntegrityError(\"Money can't be negative.\")", "title": "" }, { "docid": "582b8d8cfe8dc400a648d488252879a0", "score": "0.5177482", "text": "def test_score_amount(self):\n self.assertEqual(self.comment.score, -1)", "title": "" }, { "docid": "5b1b1bf45e3068d648b18a1c0e9937ed", "score": "0.51774144", "text": "def test_negative_number_is_invalid(self):\n invalid = -123\n self.assertFalse(luhn.is_valid(invalid))", "title": "" }, { "docid": "44e7fb171b8ad1b4cea1d4f9b5b38250", "score": "0.51702154", "text": "def _set_is_positive(self, params: np.ndarray) -> bool:\n return True", "title": "" }, { "docid": "0e73f6c768efa13ba02843374cc3e240", "score": "0.5167092", "text": "def checkPosNeg(self):\n # 20230125, check if we have both pos/neg valocities\n # we really need to do this after removing outliers\n velocityDrew = self.getVelocity(removeOutliers=True, medianFilter=0)\n _minVelSign = np.sign(np.nanmin(velocityDrew))\n _maxVelSign = np.sign(np.nanmax(velocityDrew))\n if _minVelSign != _maxVelSign:\n logger.warning(f'VELOCITY HAS BOTH POS AND NEGATIVE')\n logger.warning(f' file:{self.getFileName()}')\n logger.warning(f' minVel:{np.nanmin(velocityDrew)} maxVel:{np.nanmax(velocityDrew)}')\n return True\n else:\n return False", "title": "" }, { "docid": "f7342769fef114d9afe5eb3a32264745", "score": "0.5166344", "text": "def test_discretize_negatives(self):\n \n expected_result=[1, 1, 0, 0]\n result=stats.discretize([-0.1, -0.2, -0.3, -0.4])\n self.assertEqual(expected_result,result)", "title": "" }, { "docid": "5787013b133f9f9535303ca6441fc005", "score": "0.5165369", "text": "def test_negative_values():\n assert multiply(-5, -4) == 20", "title": "" }, { "docid": "3ffdc258fe2666e405df87e61d00d266", "score": "0.51641524", "text": "def __neg__(self):\n negative = deepcopy(self)\n\n if negative.getDigits() == [0]:\n return negative\n\n negative.setSign([1, 0][negative.getSign()])\n return negative", "title": "" }, { "docid": "95d6be6042e62ca073fcb363ded5f85d", "score": "0.51625407", "text": "def is_negative(self, txt: str) -> bool:\n res = self.analyse_text(txt)\n return res['compound'] < 0", "title": "" }, { "docid": "ecb4450fc101ae580290c7f74671efdb", "score": "0.5161611", "text": "def test_only negatives(self):\n self.assertEqual(max_integer([-4, -3, -2, -1]), -1)", "title": "" }, { "docid": "1ac330cb7598e6df30925b80ad07d517", "score": "0.5160693", "text": "def false_negatives(predicted_y, validation_y):\n val_pos = np.where(validation_y == 1)\n pred_neg = np.where(predicted_y == 0)\n\n return np.in1d(pred_neg, val_pos).sum()", "title": "" } ]
c0d84acff8b3c57d9ac6f26fd8edc728
Add an ECDF confidence interval to a plot. This method of computing a confidence interval can be thought of as computing confidence intervals of the inverse ECDF in the sense that we compute a confidence interval for the xvalues for each of the discrete values of the ECDF. This is equivalent to computing bootstrap confidence intervals for the ECDF. Here is why. Imagine we draw bootstrap samples and for each we make an ECDF. Let's say we make 5 such ECDFs and we wish to compute a 60% confidence interval. (You can generalize to arbitrary number of ECDFs and confidence interval.) Each of these 5 ECDFs can be defined as starting at the same point and ending at the same point. Specifically, they start at x = min(data), y = 0 and end at x = max(data), y = 1. Furthermore, they are all monotonically increasing functions. Now, let's say we are constructing a confidence interval for the ECDF at position x. To do so, we put a dot on the second ECDF from the top at x and a dot on the second ECDF from the bottom. This gives us the middle 60% of ECDF values. Now, say we are constructing a confidence interval for the IECDF. We go to ECDF value y and we find the second ECDF from the left and place a dot on it. We also put a dot on the second ECDF from the right. Because all ECDFs are monotonic and start and end at the same points, the dot we put on the secondleftmost ECDF is also on the second curve from the top for some other x. Similarly, the secondrightmost ECDF is also on the second curve from the bottom for some other x. (You can sketch this out, and it becomes clear.) So, any dot we put on an ECDF for computing a confidence interval for an IECDF is also a dot we would put on an ECDF for computing a confidence of the ECDF. If we want to compute the confidence interval over the whole domain of xvalues, we will cover the same set of points if we compute the confidence interval of the ECDF or the IECDF. So, we end up filling between the same two sets of curves. It turns out that the IECDF formulation is actually much easier to implement.
[ { "docid": "a218626aeed8ffd9717c12b0ff268c93", "score": "0.6035395", "text": "def _ecdf_conf_int(\n p,\n data,\n complementary=False,\n q_axis=\"x\",\n n_bs_reps=1000,\n ptiles=[2.5, 97.5],\n **kwargs,\n):\n data = utils._convert_data(data)\n\n bs_reps = np.array(\n [np.sort(np.random.choice(data, size=len(data))) for _ in range(n_bs_reps)]\n )\n\n # Compute the confidence intervals\n iecdf_low, iecdf_high = np.percentile(np.array(bs_reps), ptiles, axis=0)\n\n # y-values for ECDFs\n y = np.arange(1, len(data) + 1) / len(data)\n\n # Make them staircases\n x_low, y_plot = _to_staircase(x=iecdf_low, y=y)\n x_high, _ = _to_staircase(x=iecdf_high, y=y)\n\n if q_axis == \"y\":\n if complementary:\n p, patch = utils._fill_between(\n p, x1=1 - y_plot, y1=x_low, x2=1 - y_plot, y2=x_high, **kwargs\n )\n else:\n p, patch = utils._fill_between(\n p, x1=y_plot, y1=x_low, x2=y_plot, y2=x_high, **kwargs\n )\n elif q_axis == \"x\":\n if complementary:\n p, patch = utils._fill_between(\n p, x1=x_low, y1=1 - y_plot, x2=x_high, y2=1 - y_plot, **kwargs\n )\n else:\n p, patch = utils._fill_between(\n p, x1=x_low, y1=y_plot, x2=x_high, y2=y_plot, **kwargs\n )\n else:\n raise RuntimeError(\"`q_axis` must be either 'x' or 'y'.\")\n\n return p, patch", "title": "" } ]
[ { "docid": "3d6ee8375a96da8e4774c9274ea54b55", "score": "0.6659887", "text": "def display_confidence_interval_of_two_independant_eer(roc1, roc2, alpha):\n\n base, lower, upper, e = confidence_interval_of_two_independant_eer(roc1, roc2, alpha)\n plt.figure()\n plt.hist(e)\n n, bins, patches = plt.hist(e, bins=100, fill=False)\n plt.vlines(upper, 0, max(n), label='upper', linestyle='dotted')\n plt.vlines(lower, 0, max(n), label='lower', linestyle='dashed')", "title": "" }, { "docid": "5a3be0e1110f0672f0b257bc7216903f", "score": "0.614412", "text": "def ecdf(\n data=None,\n q=None,\n cats=None,\n q_axis=\"x\",\n palette=None,\n order=None,\n p=None,\n show_legend=True,\n legend_label=None,\n legend_location=\"right\",\n legend_orientation=\"vertical\",\n tooltips=None,\n complementary=False,\n kind=\"collection\",\n style=\"dots\",\n conf_int=False,\n ptiles=[2.5, 97.5],\n n_bs_reps=10000,\n click_policy=\"hide\",\n marker=\"circle\",\n marker_kwargs=None,\n line_kwargs=None,\n conf_int_kwargs=None,\n horizontal=None,\n val=None,\n **kwargs,\n):\n # Protect against mutability of dicts\n marker_kwargs = copy.copy(marker_kwargs)\n line_kwargs = copy.copy(line_kwargs)\n conf_int_kwargs = copy.copy(conf_int_kwargs)\n\n q = utils._parse_deprecations(q, q_axis, val, horizontal, \"y\")\n\n if style == \"formal\" and complementary:\n raise NotImplementedError(\"Complementary formal ECDFs not yet implemented.\")\n\n if palette is None:\n palette = colorcet.b_glasbey_category10\n\n data, q, cats, show_legend = utils._data_cats(\n data, q, cats, show_legend, legend_label\n )\n\n cats, cols = utils._check_cat_input(\n data, cats, q, None, None, tooltips, palette, order, marker_kwargs\n )\n\n kwargs = utils._fig_dimensions(kwargs)\n\n if conf_int and \"y_axis_type\" in kwargs and kwargs[\"y_axis_type\"] == \"log\":\n warnings.warn(\n \"Cannot reliably draw confidence intervals with a y-axis on a log scale because zero cannot be represented. Omitting confidence interval.\"\n )\n conf_int = False\n if (\n conf_int\n and \"x_axis_type\" in kwargs\n and kwargs[\"x_axis_type\"] == \"log\"\n and (data[q] <= 0).any()\n ):\n warnings.warn(\n \"Cannot draw confidence intervals with a x-axis on a log scale because some values are negative. Any negative values will be omitted from the ECDF.\"\n )\n conf_int = False\n\n if marker_kwargs is None:\n marker_kwargs = {}\n if line_kwargs is None:\n line_kwargs = {}\n\n y = \"__ECCDF\" if complementary else \"__ECDF\"\n\n if q_axis == \"y\":\n if \"x_axis_label\" not in kwargs:\n if complementary:\n kwargs[\"x_axis_label\"] = \"ECCDF\"\n else:\n kwargs[\"x_axis_label\"] = \"ECDF\"\n else:\n if \"y_axis_label\" not in kwargs:\n if complementary:\n kwargs[\"y_axis_label\"] = \"ECCDF\"\n else:\n kwargs[\"y_axis_label\"] = \"ECDF\"\n\n if q_axis == \"y\":\n if \"y_axis_label\" not in kwargs:\n kwargs[\"y_axis_label\"] = q\n else:\n if \"x_axis_label\" not in kwargs:\n kwargs[\"x_axis_label\"] = q\n\n if style in [\"formal\", \"staircase\"] and \"line_width\" not in line_kwargs:\n line_kwargs[\"line_width\"] = 2\n\n if conf_int_kwargs is None:\n conf_int_kwargs = {}\n if \"fill_alpha\" not in conf_int_kwargs:\n conf_int_kwargs[\"fill_alpha\"] = 0.5\n if \"line_alpha\" not in conf_int_kwargs and \"line_color\" not in conf_int_kwargs:\n conf_int_kwargs[\"line_alpha\"] = 0\n\n df = data.copy()\n if kind == \"collection\":\n if style == \"dots\":\n df[y] = df.groupby(cats)[q].transform(_ecdf_y, complementary=complementary)\n elif kind == \"colored\":\n df[y] = df[q].transform(_ecdf_y, complementary=complementary)\n cols += [y]\n else:\n raise RuntimeError(\"`kind` must be in `['collection', 'colored']\")\n\n _, df[\"__label\"] = utils._source_and_labels_from_cats(df, cats)\n cols += [\"__label\"]\n\n if order is not None:\n if type(cats) in [list, tuple]:\n df[\"__sort\"] = df.apply(lambda r: order.index(tuple(r[cats])), axis=1)\n else:\n df[\"__sort\"] = df.apply(lambda r: order.index(r[cats]), axis=1)\n df = df.sort_values(by=\"__sort\")\n\n if p is None:\n p = bokeh.plotting.figure(**kwargs)\n\n if style == \"dots\":\n marker_fun = utils._get_marker(p, marker)\n\n if tooltips is not None:\n if style in [\"formal\", \"staircase\"]:\n warnings.warn(\n \"Cannot have tooltips for formal ECDFs because there are no points to hover over. Omitting tooltips\"\n )\n else:\n p.add_tools(bokeh.models.HoverTool(tooltips=tooltips))\n\n markers = []\n lines = []\n patches = []\n labels = []\n\n if kind == \"collection\":\n # Explicitly loop to enable click policies on the legend\n # (not possible with factors)\n for i, (name, g) in enumerate(df.groupby(cats, sort=False)):\n labels.append(g[\"__label\"].iloc[0])\n if conf_int:\n conf_int_kwargs[\"fill_color\"] = palette[i % len(palette)]\n # conf_int_kwargs[\"legend_label\"] = g[\"__label\"].iloc[0]\n p, patch = _ecdf_conf_int(\n p,\n g[q],\n complementary=complementary,\n q_axis=q_axis,\n n_bs_reps=n_bs_reps,\n ptiles=ptiles,\n **conf_int_kwargs,\n )\n patches.append(patch)\n\n marker_kwargs[\"color\"] = palette[i % len(palette)]\n # marker_kwargs[\"legend_label\"] = g[\"__label\"].iloc[0]\n line_kwargs[\"color\"] = palette[i % len(palette)]\n # line_kwargs[\"legend_label\"] = g[\"__label\"].iloc[0]\n if style == \"staircase\":\n p, new_line = _staircase_ecdf(\n p,\n data=g[q],\n complementary=complementary,\n q_axis=q_axis,\n line_kwargs=line_kwargs,\n )\n lines.append(new_line)\n elif style == \"dots\":\n if q_axis == \"y\":\n markers.append(marker_fun(source=g, x=y, y=q, **marker_kwargs))\n else:\n markers.append(marker_fun(source=g, x=q, y=y, **marker_kwargs))\n elif style == \"formal\":\n p, circle, segment = _formal_ecdf(\n p,\n data=g[q],\n complementary=complementary,\n q_axis=q_axis,\n marker_kwargs=marker_kwargs,\n line_kwargs=line_kwargs,\n )\n markers.append(circle)\n lines.append(segment)\n elif kind == \"colored\":\n if style in [\"formal\", \"staircase\"]:\n raise RuntimeError(\n \"Cannot have a formal or staircase ECDF with `kind='colored'`.\"\n )\n\n if conf_int:\n if \"fill_color\" not in conf_int_kwargs:\n conf_int_kwargs[\"fill_color\"] = \"gray\"\n\n p, patch = _ecdf_conf_int(\n p,\n df[q],\n complementary=complementary,\n q_axis=q_axis,\n n_bs_reps=n_bs_reps,\n ptiles=ptiles,\n **conf_int_kwargs,\n )\n\n y = \"__ECCDF\" if complementary else \"__ECDF\"\n\n # Explicitly loop to enable click policies on the legend (not possible with factors)\n for i, (name, g) in enumerate(df.groupby(cats, sort=False)):\n source = bokeh.models.ColumnDataSource(g[cols])\n mkwargs = marker_kwargs\n # mkwargs[\"legend_label\"] = g[\"__label\"].iloc[0]\n mkwargs[\"color\"] = palette[i % len(palette)]\n labels.append(g[\"__label\"].iloc[0])\n if q_axis == \"y\":\n markers.append(marker_fun(source=source, x=y, y=q, **mkwargs))\n else:\n markers.append(marker_fun(source=source, x=q, y=y, **mkwargs))\n\n return _dist_legend(\n p,\n show_legend,\n legend_location,\n legend_orientation,\n click_policy,\n labels,\n markers,\n lines,\n patches,\n )", "title": "" }, { "docid": "68522c32635059a9a997bd03762360c1", "score": "0.6103807", "text": "def ecdf(data):\n # Number of data points\n n = len(data)\n\n # x-data for the ECDF\n x = np.sort(data)\n\n # y-data for the ECDF\n y = np.arange(1, n + 1) / n\n\n return x, y", "title": "" }, { "docid": "ee5547839eb1fc6ea468730f43dbe93d", "score": "0.6098925", "text": "def confidence_intervall_plot(data, alpha, exog_var):\n\n sns.set_theme(style=\"whitegrid\")\n \n # set up dic and initialise variables\n data_dict = {}\n data_dict['variable'] = [data.l2OFn_all, data.l2OFa_all_ln, data.l2OFn_oofv, data.l2OFa_oofv_ln, data.l2OFn_oda, data.l2OFa_oda_ln]\n \n # get all the models\n if exog_var == \"l2Exports_ln\" or exog_var == \"l2FDI_China_ln\":\n as1,oofv_s1, oda_s1, as2, oofv_s2, oda_s2 = OFn_OFa_all_Table2_robustness(data, False, [exog_var])\n \n data_dict = {}\n data_dict['variable'] = [data.l2OFn_all, data.l2OFa_all_ln, data.l2OFn_oofv, data.l2OFa_oofv_ln,\n data.l2OFn_oda, data.l2OFa_oda_ln]\n # calculate 90% CI\n data_dict['low'] = [as1.conf_int(level = 1-alpha).loc[\"Chinese_OFn_(t-2)\", \"lower\"], \n as2.conf_int(level = 1-alpha).loc[\"Chinese_OFa_(t-2)\", \"lower\"],\n oofv_s1.conf_int(level = 1-alpha).loc[\"Chinese_OFn_(t-2)\", \"lower\"], \n oofv_s2.conf_int(level = 1-alpha).loc[\"Chinese_OFa_(t-2)\", \"lower\"], \n oda_s1.conf_int(level = 1-alpha).loc[\"Chinese_OFn_(t-2)\", \"lower\"], \n oda_s2.conf_int(level = 1-alpha).loc[\"Chinese_OFa_(t-2)\", \"lower\"]]\n data_dict['up'] = [as1.conf_int(level = 1-alpha).loc[\"Chinese_OFn_(t-2)\", \"upper\"], \n as2.conf_int(level = 1-alpha).loc[\"Chinese_OFa_(t-2)\", \"upper\"],\n oofv_s1.conf_int(level = 1-alpha).loc[\"Chinese_OFn_(t-2)\", \"upper\"], \n oofv_s2.conf_int(level = 1-alpha).loc[\"Chinese_OFa_(t-2)\", \"upper\"],\n oda_s1.conf_int(level = 1-alpha).loc[\"Chinese_OFn_(t-2)\", \"upper\"], \n oda_s2.conf_int(level = 1-alpha).loc[\"Chinese_OFa_(t-2)\", \"upper\"]]\n\n dataset_robust = pd.DataFrame(data_dict)\n\n col = ['ro-','ro-', \"go-\",\"go-\",\"bo-\",\"bo-\"]\n labels = [\"OFn_all\", \"OFa_all_ln\", \"OFn_oofv\", \"OFa_oofv_ln\", \"OFn_oda\", \"OFa_oda_ln\"]\n\n fig, ax = plt.subplots(figsize=(14,6)) \n x = 0\n for low,up,y in zip(dataset_robust['low'],dataset_robust['up'],range(len(dataset_robust))):\n ax = plt.plot((low,up),(y,y),col[x], label = labels[x]) \n x +=1\n plt.yticks(list(range(len(dataset_robust))), [\"OFn_all\", \"OFa_all_ln\", \"OFn_oofv\", \"OFa_oofv_ln\", \"OFn_oda\", \"OFa_oda_ln\"])\n plt.xlabel(\"Robustness tests: Effect on Growth for choosen specification\")\n plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)\n\n plt.tight_layout(pad=2.5)\n \n \n ###\n \n else:\n ao1, af1, as1, ao2, af2, as2 = OFn_OFa_all_Table2(data, False)\n oda_o1, oda_f1, oda_s1, oda_o2, oda_f2, oda_s2 = OFn_OFa_oda_Table2(data, False)\n oofv_o1, oofv_f1, oofv_s1, oofv_o2, oofv_f2, oofv_s2 = OFn_OFa_oofv_Table2(data, False)\n \n \n \n # calculate 90% CI\n data_dict['low'] = [ao1.conf_int(level = 1-alpha).loc[\"l2OFn_all\", \"lower\"], \n ao2.conf_int(level = 1-alpha).loc[\"l2OFa_all_ln\", \"lower\"],\n oofv_o1.conf_int(level = 1-alpha).loc[\"l2OFn_oofv\", \"lower\"], \n oofv_o2.conf_int(level = 1-alpha).loc[\"l2OFa_oofv_ln\", \"lower\"], \n oda_o1.conf_int(level = 1-alpha).loc[\"l2OFn_oda\", \"lower\"], \n oda_o2.conf_int(level = 1-alpha).loc[\"l2OFa_oda_ln\", \"lower\"]]\n data_dict['up'] = [ao1.conf_int(level = 1-alpha).loc[\"l2OFn_all\", \"upper\"], \n ao2.conf_int(level = 1-alpha).loc[\"l2OFa_all_ln\", \"upper\"],\n oofv_o1.conf_int(level = 1-alpha).loc[\"l2OFn_oofv\", \"upper\"], \n oofv_o2.conf_int(level = 1-alpha).loc[\"l2OFa_oofv_ln\", \"upper\"],\n oda_o1.conf_int(level = 1-alpha).loc[\"l2OFn_oda\", \"upper\"], \n oda_o2.conf_int(level = 1-alpha).loc[\"l2OFa_oda_ln\", \"upper\"]]\n\n dataset = pd.DataFrame(data_dict)\n\n\n data_dict2 = {}\n data_dict2['variable'] = [data.l2OFn_all, data.l2OFa_all_ln, data.l2OFn_oofv, data.l2OFa_oofv_ln,\n data.l2OFn_oda, data.l2OFa_oda_ln]\n # calculate 90% CI\n data_dict2['low'] = [as1.conf_int(level = 1-alpha).loc[\"Chinese_OFn_(t-2)\", \"lower\"], \n as2.conf_int(level = 1-alpha).loc[\"Chinese_OFa_(t-2)\", \"lower\"],\n oofv_s1.conf_int(level = 1-alpha).loc[\"Chinese_OFn_(t-2)\", \"lower\"], \n oofv_s2.conf_int(level = 1-alpha).loc[\"Chinese_OFa_(t-2)\", \"lower\"], \n oda_s1.conf_int(level = 1-alpha).loc[\"Chinese_OFn_(t-2)\", \"lower\"], \n oda_s2.conf_int(level = 1-alpha).loc[\"Chinese_OFa_(t-2)\", \"lower\"]]\n data_dict2['up'] = [as1.conf_int(level = 1-alpha).loc[\"Chinese_OFn_(t-2)\", \"upper\"], \n as2.conf_int(level = 1-alpha).loc[\"Chinese_OFa_(t-2)\", \"upper\"],\n oofv_s1.conf_int(level = 1-alpha).loc[\"Chinese_OFn_(t-2)\", \"upper\"], \n oofv_s2.conf_int(level = 1-alpha).loc[\"Chinese_OFa_(t-2)\", \"upper\"],\n oda_s1.conf_int(level = 1-alpha).loc[\"Chinese_OFn_(t-2)\", \"upper\"], \n oda_s2.conf_int(level = 1-alpha).loc[\"Chinese_OFa_(t-2)\", \"upper\"]]\n\n dataset2 = pd.DataFrame(data_dict2)\n\n col = ['ro-','ro-', \"go-\",\"go-\",\"bo-\",\"bo-\"]\n labels = [\"OFn_all\", \"OFa_all_ln\", \"OFn_oofv\", \"OFa_oofv_ln\", \"OFn_oda\", \"OFa_oda_ln\"]\n\n fig, ax = plt.subplots(1,2, figsize=(14,6)) \n x = 0\n plt.subplot(121)\n for low,up,y in zip(dataset['low'],dataset['up'],range(len(dataset))):\n ax = plt.plot((low,up),(y,y),col[x], label = labels[x]) \n x +=1\n plt.yticks(list(range(len(dataset))), [\"OFn_all\", \"OFa_all_ln\", \"OFn_oofv\", \"OFa_oofv_ln\", \"OFn_oda\", \"OFa_oda_ln\"])\n plt.xlabel(\"Effect on Growth for OLS\")\n\n #plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)\n\n x = 0\n plt.subplot(122)\n for low,up,y in zip(dataset2['low'],dataset2['up'],range(len(dataset2))):\n ax = plt.plot((low,up),(y,y),col[x], label = labels[x]) \n x +=1\n plt.yticks(list(range(len(dataset))), [\"OFn_all\", \"OFa_all_ln\", \"OFn_oofv\", \"OFa_oofv_ln\", \"OFn_oda\", \"OFa_oda_ln\"])\n plt.xlabel(\"Effect on Growth for 2SLS\")\n\n\n plt.tight_layout(pad=2.5)", "title": "" }, { "docid": "47af79ff39fb7e13328f6fac70898746", "score": "0.60913634", "text": "def ecdf(data):\n # Number of data points: n\n n = len(data)\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n+1) / n\n\n return x, y", "title": "" }, { "docid": "dece38b9c600ac8471ce048e31adabd0", "score": "0.6089741", "text": "def ecdf(data):\n\n # Number of data points: n\n n = len(data)\n\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n+1) / n\n\n return x, y", "title": "" }, { "docid": "55e42ad4f2d28d9768ef66c182603ca2", "score": "0.60697407", "text": "def ecdf(data):\n\n # Number of data points: n\n n= len(data)\n\n # x-data for the ECDF: x\n x= np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n+1)/n\n\n return x, y", "title": "" }, { "docid": "4f564638f8665df98aac60d074a7488f", "score": "0.6067151", "text": "def ecdf(data):\n\n # Number of data points: n\n n = len(data)\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n+1) / n\n\n return x, y", "title": "" }, { "docid": "4f564638f8665df98aac60d074a7488f", "score": "0.6067151", "text": "def ecdf(data):\n\n # Number of data points: n\n n = len(data)\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n+1) / n\n\n return x, y", "title": "" }, { "docid": "4f564638f8665df98aac60d074a7488f", "score": "0.6067151", "text": "def ecdf(data):\n\n # Number of data points: n\n n = len(data)\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n+1) / n\n\n return x, y", "title": "" }, { "docid": "bbc058bab581a74d87dc1b26725235a5", "score": "0.6057889", "text": "def ecdf(data):\n\n # Number of data points: n\n n = len(data)\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, 1+n) / n\n\n return x, y", "title": "" }, { "docid": "59b8d9b68b8f971a6d26c0ff6d886825", "score": "0.6057376", "text": "def ecdf(data):\n\n # Number of data points: n\n n = len(data)\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n + 1) / n\n\n return x, y", "title": "" }, { "docid": "4afcfbe1a30cd83d4a6d7d59e80efae7", "score": "0.59362596", "text": "def plot_pred_ci(self, n_days=1):\n\n # Only show confidence intervals\n # for the range where cross-validation\n # results are available\n if n_days > self.pred.shape[0]:\n raise ValueError(\n \"\"\"n_days must be smaller than %d, which is the array\n of mean squared errors calculated\n using block cross validation\"\"\"\n % self.pred.shape[0]\n )\n\n t = np.linspace(0, n_days + 1, n_days + 1)\n pred_low_2s = self.pred[1:] - 2 * self.mse_avg\n pred_low_s = self.pred[1:] - self.mse_avg\n pred_high_s = self.pred[1:] + self.mse_avg\n pred_high_2s = self.pred[1:] + 2 * self.mse_avg\n plt.figure(figsize=[6, 6])\n ax = plt.axes()\n ax.tick_params(axis=\"both\", which=\"major\", labelsize=14)\n\n plt.plot(t[1:], pred_low_2s[:n_days], \"b-.\", linewidth=2)\n plt.plot(t[1:], pred_low_s[:n_days], \"b--\", linewidth=2)\n plt.plot(t, self.pred[: n_days + 1], \"k-\", linewidth=3)\n plt.plot(t[1:], pred_high_s[:n_days], \"r--\", linewidth=2)\n plt.plot(t[1:], pred_high_2s[:n_days], \"r-.\", linewidth=2)\n\n # Fancy filling\n plt.fill_between(\n t[1:], pred_low_s[:n_days], self.pred[1 : n_days + 1], alpha=0.3, color=\"b\"\n )\n plt.fill_between(\n t[1:],\n pred_low_2s[:n_days],\n self.pred[1 : n_days + 1],\n alpha=0.15,\n color=\"b\",\n )\n plt.fill_between(\n t[1:], pred_high_s[:n_days], self.pred[1 : n_days + 1], alpha=0.3, color=\"r\"\n )\n plt.fill_between(\n t[1:],\n pred_high_2s[:n_days],\n self.pred[1 : n_days + 1],\n alpha=0.15,\n color=\"r\",\n )\n\n plt.legend(\n [\n \"-2$\\\\sigma$ 95% IC\", # pylint: disable=W1401\n \"-$\\\\sigma$ 66% IC\", # pylint: disable=W1401\n \"Prediction\",\n \"+$\\\\sigma$ 66% IC\", # pylint: disable=W1401\n \"+2$\\\\sigma$ 95% IC\", # pylint: disable=W1401\n ],\n fontsize=12,\n )\n # plt.plot([0, 1], [self.n_obs_end, self.pred[0]], \"k-\")\n\n plt.title(\"Model predictions\", size=15)\n plt.xlabel(\"Days since the end of the sample\", size=14)\n plt.ylabel(\"Number of infected\", size=14)\n plt.show()", "title": "" }, { "docid": "6eff0193d638b4830d253d01dbba81bb", "score": "0.5927992", "text": "def test_confidence_ellipse_subplots():\n np.random.seed(0)\n PARAMETERS = {\n \"Positive correlation\": [[0.85, 0.35], [0.15, -0.65]],\n \"Negative correlation\": [[0.9, -0.4], [0.1, -0.6]],\n \"Weak correlation\": [[1, 0], [0, 1]],\n }\n mu = 2, 4\n scale = 3, 5\n\n figure, axs = plt.subplots(1, 3, figsize=(9, 3))\n for ax, (title, dependency) in zip(axs, PARAMETERS.items()):\n x, y = get_correlated_dataset(800, dependency, mu, scale)\n ax.scatter(x, y, s=0.5)\n ax.axvline(c=\"grey\", lw=1)\n ax.axhline(c=\"grey\", lw=1)\n draw_confidence_ellipse(x, y, ax=ax, n_std=2.5, edgecolor=\"red\")\n ax.scatter(mu[0], mu[1], c=\"red\", s=3)\n ax.set_title(title)\n ax.set_xlim(-8, 12)\n ax.set_ylim(-15, 20)\n\n return figure", "title": "" }, { "docid": "64cbe71bc821d2995d551ffa89097c13", "score": "0.59129345", "text": "def confidence_2d(xsamples,ysamples,ax=None,intervals=None,nbins=20,linecolor='k',histunder=False,cmap=\"Blues\",filled=False,linewidth=1.):\n if intervals is None:\n intervals = 1.0 - np.exp(-0.5 * np.array([0., 1., 2.]) ** 2)\n H,yedges,xedges = np.histogram2d(ysamples,xsamples,bins=nbins)\n\n\n #get the contour levels\n h = np.sort(H.flatten())[::-1]\n cdf = np.cumsum(h)/np.cumsum(h)[-1]\n v = np.array([h[ cdf<=li ][-1] for li in intervals[1:]])[::-1]\n v = np.append(v,h[0])\n\n xc = np.array([.5*(xedges[i]+xedges[i+1]) for i in np.arange(nbins)]) #bin centres\n yc = np.array([.5*(yedges[i]+yedges[i+1]) for i in np.arange(nbins)])\n\n xx,yy = np.meshgrid(xc,yc)\n\n if ax is None:\n fig,ax = plt.subplots()\n if histunder:\n ax.hist2d(xsamples,ysamples,bins=nbins,cmap=cmap)\n ax.contour(xx,yy,H,levels=v,colors=linecolor,extend='max',linewidths=linewidth)\n elif filled:\n ax.contourf(xx,yy,H,levels=v,cmap=cmap)\n ax.contour(xx,yy,H,levels=v,colors=linecolor,extend='max',linewidths=linewidth)\n else:\n ax.contour(xx,yy,H,levels=v,colors=linecolor,linewidths=linewidth) \n\n return ax", "title": "" }, { "docid": "276d5be2a27575032c1e44d5e89eb582", "score": "0.58327645", "text": "def confidence_interval_of_two_independant_eer(roc1, roc2, alpha, verbose=True):\n\n assert roc1.estimated_eer and roc1.bootstraped_eers, \\\n \"You must call get_confidence_interval before for roc1\"\n assert roc2.estimated_eer and roc2.bootstraped_eers, \\\n \"You must call get_confidence_interval before for roc2\"\n\n e = np.array(roc1.bootstraped_eers) - np.array(roc2.bootstraped_eers) \\\n - (roc1.estimated_eer - roc2.estimated_eer )\n\n e_L, e_U = get_confidence_limits(e, alpha)\n base = roc1.estimated_eer - roc2.estimated_eer\n\n\n if verbose:\n print \"Comparison of two independant EER\"\n print \"=================================\"\n print \"Estimated difference EER1-EER2: %0.6f\" % base\n print \"Lower boundary: %0.6f\" % ( base - e_U)\n print \"Upper boundary: %0.6f\" % ( base - e_L)\n\n return base, base - e_U, base - e_L, e", "title": "" }, { "docid": "89abf79e181a04a3187ecbcdc88b2a6b", "score": "0.5755696", "text": "def confidence(isamples, cfd=68.27, bins=100, gaussian_fit=False, weights=None,\n verbose=True, save=False, output_dir='', force=False,\n output_file='confidence.txt', title=None, ndig=1, plsc=None,\n labels=['r', 'theta', 'f'], gt=None, **kwargs):\n\n try:\n l = isamples.shape[1]\n if l == 1:\n isamples = isamples[:, 0]\n except BaseException:\n l = 1\n\n if not l == len(labels):\n raise ValueError(\"Length of labels different to number of parameters\")\n\n if gt is not None:\n if len(gt) != l:\n msg = \"If provided, the length of ground truth values should match\"\n msg += \" number of parameters\"\n raise TypeError(msg)\n if np.isscalar(ndig):\n ndig = [ndig]*l\n else:\n if len(ndig) != l:\n msg = \"Length of ndig list different to number of parameters\"\n raise ValueError(msg)\n\n pKey = labels\n label_file = labels\n\n confidenceInterval = {}\n val_max = {}\n\n if cfd == 100:\n cfd = 99.9\n\n #########################################\n ## Determine the confidence interval ##\n #########################################\n if gaussian_fit:\n mu = np.zeros(l)\n sigma = np.zeros_like(mu)\n\n if gaussian_fit:\n nrows = 2*max(int(np.ceil(l/4)), 1)\n fig, ax = plt.subplots(nrows, min(4, l), figsize=(12, 4*nrows))\n else:\n nrows = max(int(np.ceil(l/4)), 1)\n fig, ax = plt.subplots(nrows, min(4, l), figsize=(12, 4*nrows))\n\n for j in range(l):\n if nrows > 1:\n if l > 1:\n ax0_tmp = ax[j//4][j % 4]\n if gaussian_fit:\n ax1_tmp = ax[nrows//2+j//4][j % 4]\n else:\n ax0_tmp = ax[j//4]\n if gaussian_fit:\n ax1_tmp = ax[nrows//2+j//4]\n elif l > 1 and not gaussian_fit:\n ax0_tmp = ax[j]\n else:\n ax0_tmp = ax\n if l > 1:\n if gaussian_fit:\n n, bin_vertices, _ = ax0_tmp.hist(isamples[:, j], bins=bins,\n weights=weights,\n histtype='step',\n edgecolor='gray')\n else:\n n, bin_vertices, _ = ax0_tmp.hist(isamples[:, j], bins=bins,\n weights=weights,\n histtype='step',\n edgecolor='gray')\n else:\n if gaussian_fit:\n n, bin_vertices, _ = ax0_tmp.hist(isamples[:], bins=bins,\n weights=weights,\n histtype='step',\n edgecolor='gray')\n else:\n n, bin_vertices, _ = ax0_tmp.hist(isamples[:], bins=bins,\n weights=weights,\n histtype='step',\n edgecolor='gray')\n bins_width = np.mean(np.diff(bin_vertices))\n surface_total = np.sum(np.ones_like(n)*bins_width * n)\n n_arg_sort = np.argsort(n)[::-1]\n\n test = 0\n pourcentage = 0\n for k, jj in enumerate(n_arg_sort):\n test = test + bins_width*n[int(jj)]\n pourcentage = test/surface_total*100\n if pourcentage > cfd:\n if verbose:\n msg = 'percentage for {}: {}%'\n print(msg.format(label_file[j], pourcentage))\n break\n if k == 0:\n msg = \"WARNING: Percentile reached in a single bin. \"\n msg += \"This may be due to outliers or a small sample.\"\n msg += \"Uncertainties will be unreliable. Try one of these:\"\n msg += \"increase bins, or trim outliers, or decrease cfd.\"\n if force:\n raise ValueError(msg)\n else:\n print(msg)\n n_arg_min = int(n_arg_sort[:k+1].min())\n n_arg_max = int(n_arg_sort[:k+1].max())\n\n if n_arg_min == 0:\n n_arg_min += 1\n if n_arg_max == bins:\n n_arg_max -= 1\n\n val_max[pKey[j]] = bin_vertices[int(n_arg_sort[0])]+bins_width/2.\n confidenceInterval[pKey[j]] = np.array([bin_vertices[n_arg_min-1],\n bin_vertices[n_arg_max+1]]\n - val_max[pKey[j]])\n if title is not None:\n if isinstance(title, str):\n lab = title\n else:\n lab = pKey[j]\n if l > 1:\n arg = (isamples[:, j] >= bin_vertices[n_arg_min - 1]) * \\\n (isamples[:, j] <= bin_vertices[n_arg_max + 1])\n if gaussian_fit:\n ax0_tmp.hist(isamples[arg, j], bins=bin_vertices,\n facecolor='gray', edgecolor='darkgray',\n histtype='stepfilled', alpha=0.5)\n ax0_tmp.set_xlabel(labels[j])\n if j == 0:\n ax0_tmp.set_ylabel('Counts')\n if title is not None:\n fmt = \"{{:.{0}f}}\".format(ndig[j]).format\n msg = r\"${{{0}}}_{{{1}}}^{{+{2}}}$\"\n tit = msg.format(fmt(val_max[pKey[j]]),\n fmt(confidenceInterval[pKey[j]][0]),\n fmt(confidenceInterval[pKey[j]][1]))\n if gt is not None:\n tit += r\" (gt: ${{{0}}}$)\".format(fmt(gt[j]))\n ax0_tmp.set_title(\"{0}: {1}\".format(lab, tit), fontsize=10)\n if gt is not None:\n x_close = find_nearest(bin_vertices, gt[j])\n ax0_tmp.vlines(gt[j], 0, n[x_close], label='gt',\n linestyles='dashed', color='blue')\n label = 'estimate'\n else:\n label = None\n ax0_tmp.vlines(val_max[pKey[j]], 0, n[int(n_arg_sort[0])],\n linestyles='dashed', color='red', label=label)\n\n mu[j], sigma[j] = norm.fit(isamples[:, j])\n n_fit, bins_fit = np.histogram(isamples[:, j], bins, density=1,\n weights=weights)\n ax1_tmp.hist(isamples[:, j], bins, density=1, weights=weights,\n facecolor='gray', edgecolor='darkgray',\n histtype='step')\n y = norm.pdf(bins_fit, mu[j], sigma[j])\n ax1_tmp.plot(bins_fit, y, 'g-', linewidth=2, alpha=0.7)\n\n ax1_tmp.set_xlabel(labels[j])\n if j == 0:\n ax1_tmp.set_ylabel('Counts')\n\n if title is not None:\n fmt = \"{{:.{0}f}}\".format(ndig[j]).format\n msg = r\"$\\mu$ = {0}, $\\sigma$ = {1}\"\n tit = msg.format(fmt(mu[j]), fmt(sigma[j]))\n if gt is not None:\n tit += r\" (gt: ${{{0}}}$)\".format(fmt(gt[j]))\n ax1_tmp.set_title(\"{0}: {1}\".format(lab, tit),\n fontsize=10)\n if gt is not None:\n x_close = find_nearest(bins_fit, gt[j])\n ax1_tmp.vlines(gt[j], 0, y[x_close], linestyles='dashed',\n color='blue', label='gt')\n label = r'estimate ($\\mu$)'\n else:\n label = None\n\n ax1_tmp.vlines(mu[j], 0, np.amax(y), linestyles='dashed',\n color='green', label=label)\n if gt is not None:\n ax0_tmp.legend()\n ax1_tmp.legend()\n\n else:\n ax0_tmp.hist(isamples[arg, j], bins=bin_vertices,\n facecolor='gray', edgecolor='darkgray',\n histtype='stepfilled', alpha=0.5)\n ax0_tmp.vlines(val_max[pKey[j]], 0, n[int(n_arg_sort[0])],\n linestyles='dashed', color='red')\n ax0_tmp.set_xlabel(labels[j])\n if j == 0:\n ax0_tmp.set_ylabel('Counts')\n\n if title is not None:\n fmt = \"{{:.{0}f}}\".format(ndig[j]).format\n msg = r\"${{{0}}}_{{{1}}}^{{+{2}}}$\"\n tit = msg.format(fmt(val_max[pKey[j]]),\n fmt(confidenceInterval[pKey[j]][0]),\n fmt(confidenceInterval[pKey[j]][1]))\n if gt is not None:\n tit += r\" (gt: ${{{0}}}$)\".format(fmt(gt[j]))\n ax0_tmp.set_title(\"{0}: {1}\".format(lab, tit), fontsize=10)\n if gt is not None:\n x_close = find_nearest(bin_vertices, gt[j])\n ax0_tmp.vlines(gt[j], 0, n[x_close], label='gt',\n linestyles='dashed', color='blue')\n label = 'estimate'\n else:\n label = None\n ax0_tmp.vlines(val_max[pKey[j]], 0, n[int(n_arg_sort[0])],\n linestyles='dashed', color='red', label=label)\n if gt is not None:\n ax0_tmp.legend()\n\n else:\n arg = (isamples[:] >= bin_vertices[n_arg_min - 1]) * \\\n (isamples[:] <= bin_vertices[n_arg_max + 1])\n if gaussian_fit:\n ax0_tmp.hist(isamples[arg], bins=bin_vertices,\n facecolor='gray', edgecolor='darkgray',\n histtype='stepfilled', alpha=0.5)\n\n ax0_tmp.set_xlabel(labels[j])\n if j == 0:\n ax0_tmp.set_ylabel('Counts')\n\n if gt is not None:\n x_close = find_nearest(bin_vertices, gt[j])\n ax0_tmp.vlines(gt[j], 0, n[x_close], label='gt',\n linestyles='dashed', color='blue')\n label = 'estimate'\n else:\n label = None\n ax0_tmp.vlines(val_max[pKey[j]], 0, n[int(n_arg_sort[0])],\n linestyles='dashed', color='red', label=label)\n\n if title is not None:\n fmt = \"{{:.{0}f}}\".format(ndig[j]).format\n msg = r\"${{{0}}}_{{{1}}}^{{+{2}}}$\"\n tit = msg.format(fmt(val_max[pKey[j]]),\n fmt(confidenceInterval[pKey[j]][0]),\n fmt(confidenceInterval[pKey[j]][1]))\n if gt is not None:\n tit += r\" (gt: ${{{0}}}$)\".format(fmt(gt[j]))\n ax0_tmp.set_title(\"{0}: {1}\".format(lab, tit), fontsize=10)\n\n mu[j], sigma[j] = norm.fit(isamples[:])\n n_fit, bins_fit = np.histogram(isamples[:], bins, density=1,\n weights=weights)\n ax1_tmp.hist(isamples[:], bins, density=1, weights=weights,\n facecolor='gray', edgecolor='darkgray',\n histtype='step')\n y = norm.pdf(bins_fit, mu[j], sigma[j])\n ax1_tmp.plot(bins_fit, y, 'g-', linewidth=2, alpha=0.7)\n\n ax1_tmp.set_xlabel(labels[j])\n if j == 0:\n ax1_tmp.set_ylabel('Counts')\n\n if title is not None:\n fmt = \"{{:.{0}f}}\".format(ndig[j]).format\n msg = r\"$\\mu$ = {{{0}}}, $\\sigma$ = {{{1}}}\"\n tit = msg.format(fmt(mu[j]), fmt(sigma[j]))\n if gt is not None:\n tit += r\" (gt: ${{{0}}}$)\".format(fmt(gt[j]))\n ax1_tmp.set_title(\"{0}: {1}\".format(lab, tit),\n fontsize=10)\n\n if gt is not None:\n x_close = find_nearest(bins_fit, gt[j])\n ax1_tmp.vlines(gt[j], 0, y[x_close], label='gt',\n linestyles='dashed', color='blue')\n label = r'estimate ($\\mu$)'\n else:\n label = None\n ax1_tmp.vlines(mu[j], 0, np.amax(y), linestyles='dashed',\n color='green', label=label)\n if gt is not None:\n ax0_tmp.legend()\n ax1_tmp.legend()\n\n else:\n ax0_tmp.hist(isamples[arg], bins=bin_vertices, facecolor='gray',\n edgecolor='darkgray', histtype='stepfilled',\n alpha=0.5)\n ax0_tmp.set_xlabel(labels[j])\n if j == 0:\n ax0_tmp.set_ylabel('Counts')\n\n if title is not None:\n fmt = \"{{:.{0}f}}\".format(ndig[j]).format\n msg = r\"${{{0}}}_{{{1}}}^{{+{2}}}$\"\n tit = msg.format(fmt(val_max[pKey[j]]),\n fmt(confidenceInterval[pKey[j]][0]),\n fmt(confidenceInterval[pKey[j]][1]))\n if gt is not None:\n tit += r\" (gt: ${{{0}}}$)\".format(fmt(gt[j]))\n ax0_tmp.set_title(\"{0}: {1}\".format(lab, tit), fontsize=10)\n if gt is not None:\n x_close = find_nearest(bin_vertices, gt[j])\n ax0_tmp.vlines(gt[j], 0, n[x_close], label='gt',\n linestyles='dashed', color='blue')\n label = 'estimate'\n else:\n label = None\n ax0_tmp.vlines(val_max[pKey[j]], 0, n[int(n_arg_sort[0])],\n linestyles='dashed', color='red', label=label)\n if gt is not None:\n ax0_tmp.legend()\n\n plt.tight_layout(w_pad=0.1)\n\n if save:\n if gaussian_fit:\n plt.savefig(output_dir+'confi_hist_gaussfit.pdf')\n else:\n plt.savefig(output_dir+'confi_hist.pdf')\n\n if verbose:\n print('\\n\\nConfidence intervals:')\n for i, lab in enumerate(labels):\n print('{}: {} [{},{}]'.format(lab, val_max[lab],\n confidenceInterval[lab][0],\n confidenceInterval[lab][1]))\n if gaussian_fit:\n print()\n print('Gaussian fit results:')\n for i, lab in enumerate(labels):\n print('{}: {} +-{}'.format(lab, mu[i], sigma[i]))\n\n ##############################################\n ## Write inference results in a text file ##\n ##############################################\n if save:\n with open(output_dir+output_file, \"w\") as f:\n f.write('###########################\\n')\n f.write('#### INFERENCE TEST ###\\n')\n f.write('###########################\\n')\n f.write(' \\n')\n f.write('Results of the MCMC fit\\n')\n f.write('----------------------- \\n')\n f.write(' \\n')\n f.write('>> Position and flux of the planet (highly probable):\\n')\n f.write('{} % confidence interval\\n'.format(cfd))\n f.write(' \\n')\n\n for i in range(l):\n confidenceMax = confidenceInterval[pKey[i]][1]\n confidenceMin = -confidenceInterval[pKey[i]][0]\n if i == 2 or l == 1:\n text = '{}: \\t\\t\\t{:.3f} \\t-{:.3f} \\t+{:.3f}\\n'\n else:\n text = '{}: \\t\\t\\t{:.3f} \\t\\t-{:.3f} \\t\\t+{:.3f}\\n'\n\n f.write(text.format(pKey[i], val_max[pKey[i]],\n confidenceMin, confidenceMax))\n if l > 1 and plsc is not None and 'r' in labels:\n f.write(' ')\n f.write('Platescale = {} mas\\n'.format(plsc*1000))\n f.write('r (mas): \\t\\t{:.2f} \\t\\t-{:.2f} \\t\\t+{:.2f}\\n'.format(\n val_max[pKey[0]]*plsc*1000,\n -confidenceInterval[pKey[0]][0]*plsc*1000,\n confidenceInterval[pKey[0]][1]*plsc*1000))\n\n if gaussian_fit:\n return mu, sigma\n else:\n return val_max, confidenceInterval", "title": "" }, { "docid": "7c5d5467d6b9ec34c688e5073e824ce0", "score": "0.56896406", "text": "def _make_expo_cdf():\n n = 40\n max = 2.5\n xs = [max * i / n for i in range(n)]\n\n lam = 2.0\n ps = [_expo_cdf(x, lam) for x in xs]\n\n percentile = -math.log(0.05) / lam\n print('Fraction <= ', percentile, _expo_cdf(lam, percentile))\n\n pyplot.clf()\n pyplot.plot(xs, ps, linewidth=2)\n _05_myplot._save('expo_cdf',\n title='Exponential CDF',\n xlabel='x',\n ylabel='CDF',\n legend=False)", "title": "" }, { "docid": "8f45f2d7d16a6f5b48feda4f369b3a5b", "score": "0.5684424", "text": "def ecdf(data):\n # Get x data (sort out data)\n x = np.sort(data)\n # Get y data (compute from x)\n y = np.arange(1, len(data)+1)/len(data)\n return x,y", "title": "" }, { "docid": "a257868403a6cc0a6a71d8b170116e1f", "score": "0.5670661", "text": "def ECDF(self, column_name, x_label):\n x = np.sort(self.dataframe[column_name])\n y = np.arange(1, len(x)+1) / len(x)\n _ = plt.plot(x,y, marker='.', linestyle='none')\n _ = plt.xlabel(x_label)\n _ = plt.ylabel(\"ECDF\")\n plt.margins(0.02)\n plt.show()", "title": "" }, { "docid": "741f83bd92fef6d48f0f13d53dfa74b8", "score": "0.5592027", "text": "def confidence_interval(data, axis=0, cis=95, n_boots=200, random_state=None,\n fcn=None, skipna=True, verbose=None):\n set_log_level(verbose)\n # ---------------------------------- I/O ----------------------------------\n if isinstance(cis, (int, float, str)):\n cis = [cis]\n assert isinstance(cis, (list, tuple, np.ndarray))\n assert isinstance(n_boots, int)\n need_ci = np.any([isinstance(k, (int, float)) for k in cis])\n logger.info(f\" Estimating CI (cis={cis}, axis={axis}, \"\n f\"n_boots={n_boots}, skipna=True, \"\n f\"random_state={random_state})\")\n\n # default functions\n if fcn is None:\n fcn = np.nanmean if skipna else np.mean\n fcn_std = np.nanstd if skipna else np.std\n\n # ------------------------------- DATAARRAY -------------------------------\n if isinstance(data, xr.DataArray):\n if isinstance(axis, str):\n axis = data.get_axis_num(axis)\n dims = [d for n_d, d in enumerate(data.dims) if n_d != axis]\n coords = [data[d].data for d in dims]\n attrs = data.attrs\n attrs.update(n_boots=n_boots, random_state=random_state,\n skipna=skipna, fcn=fcn.__name__)\n attrs = check_attrs(attrs)\n name = 'CI' if data.name is None else data.name + '_CI'\n x = data.data\n else:\n x = data\n\n # ------------------------------- BOOSTRAPS -------------------------------\n if need_ci:\n # compute summary statistics\n part = bootstrap_partitions(x.shape[axis], n_partitions=n_boots,\n random_state=random_state)\n x_ci = []\n for k in range(n_boots):\n sl = [slice(None)] * x.ndim\n sl[axis] = part[k]\n x_ci.append(fcn(x[tuple(sl)], axis=axis))\n x_ci = np.stack(x_ci)\n\n # -------------------------------- CI / STD -------------------------------\n # infer ci bounds\n cib = []\n for n_ci, ci in enumerate(cis):\n if isinstance(ci, (int, float)):\n halpha = (100. - ci) / 2.\n _ci = np.percentile(x_ci, [halpha, 100. - halpha], axis=0)\n elif ci in ['sd', 'sem']:\n x_sd, x_m = fcn_std(x, axis=axis), fcn(x, axis=axis)\n if ci == 'sem':\n x_sd /= np.sqrt(x.shape[axis])\n _ci = np.stack([x_m - x_sd, x_m + x_sd])\n cib.append(_ci)\n cib = np.stack(cib)\n\n # --------------------------------- XARRAY --------------------------------\n # xarray formatting (if needed)\n if isinstance(data, xr.DataArray):\n cib = xr.DataArray(\n cib, dims=['ci', 'bound'] + dims,\n coords=[cis, ['low', 'high']] + coords,\n attrs=attrs, name=name\n )\n\n return cib", "title": "" }, { "docid": "414e12a1989cf60ea40de24a716fe9fa", "score": "0.55445665", "text": "def confidenceInterval(testY, predictedY):\n (e_rate, se) = estimateError(testY, predictedY)\n tmp = 1.96*se\n interval = [e_rate - tmp, e_rate + tmp]\n return (e_rate, se, interval)", "title": "" }, { "docid": "0f8f401416ac994abf3366ed059ef3c1", "score": "0.55426675", "text": "def confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):\r\n if x.size != y.size:\r\n raise ValueError(\"x and y must be the same size\")\r\n\r\n cov = np.cov(x, y)\r\n pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])\r\n # Using a special case to obtain the eigenvalues of this\r\n # two-dimensionl dataset.\r\n ell_radius_x = np.sqrt(1 + pearson)\r\n ell_radius_y = np.sqrt(1 - pearson)\r\n ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,\r\n facecolor=facecolor, **kwargs)\r\n\r\n # Calculating the stdandard deviation of x from\r\n # the squareroot of the variance and multiplying\r\n # with the given number of standard deviations.\r\n scale_x = np.sqrt(cov[0, 0]) * n_std\r\n mean_x = np.mean(x)\r\n\r\n # calculating the stdandard deviation of y ...\r\n scale_y = np.sqrt(cov[1, 1]) * n_std\r\n mean_y = np.mean(y)\r\n\r\n transf = transforms.Affine2D() \\\r\n .rotate_deg(45) \\\r\n .scale(scale_x, scale_y) \\\r\n .translate(mean_x, mean_y)\r\n\r\n ellipse.set_transform(transf + ax.transData)\r\n return ax.add_patch(ellipse)", "title": "" }, { "docid": "5bd3a4de20b2980a1c859f85f225061d", "score": "0.5533115", "text": "def OR():\n fig, ax = plt.subplots(figsize=(7,7))\n outcomes = [\"Severe disease\",\"ICU Admission\", \"Invasive Ventilation\",\"Maternal Death\"]\n values = [np.array([1.83, 2.11, 1.72,0.91]), np.array([2.37,2.71,6.61,2.27]), np.array([1.81,1.70,5.26,2.53]), np.array([2.0,4.72,68.82,4.25]), np.array([2.12,4.67,18.61,14.88])]\n # 95% confidence interval\n upper_cf = np.array([np.array([2.63,2.63,4.97,3.72]),np.array([3.07,6.63,22.02,4.31]), np.array([2.20,2.15,15.68,8.17]),np.array([3.48,9.41,420.48,9.95]),np.array([2.78,11.22,1324.16,52.81])])-values\n lower_cf = values-np.array([np.array([1.27,1.69,0.60,0.22]),np.array([1.83,1.10,1.98,1.20]),np.array([1.49,1.34,1.76,0.78]),np.array([1.14,2.37,9.69,1.82]),np.array([1.62,1.94,0.26,4.19])])\n tot_cf = np.array([lower_cf, upper_cf])\n labels_cf = np.array([[\"1.27-2.63\",\"1.69-2.63\",\"0.60-4.97\",\"0.22-3.72\"], [\"1.83-3.07\",\"1.10-6.63\",\"1.98-22.02\",\"1.20-4.31\"], [\"1.49-2.20\",\"1.34-2.15\",\"1.76-15.68\",\"0.78-8.17\"], [\"1.14-3.48\",\"2.37-9.41\",\"9.69-420.48\",\"1.82-9.95\"], [\"1.62-2.78\",\"1.94-11.22\",\"0.26-1324.16\",\"4.19-52.81\"]])\n n = len(values) # Number of bars to plot\n w = .15 # With of each column\n x = np.arange(0, len(outcomes)) # Center position of group on x axis\n labels = [r\"Age $\\geq$ 35\", r\"BMI $\\geq$ 30\", \"Any Comorbidity\", \"Chronic hypertension\", \"Pre-existing diabetes\"]\n for i, value, label in zip(range(5), values, labels):\n position = x + (w*(1-n)/2) + i*w\n bars=ax.bar(position, value, width=w, label=label)\n ax.bar_label(container=bars,labels=labels_cf[i], padding=-5, size=5, rotation=45)\n\n plt.xticks(x, outcomes)\n plt.ylabel(\"Odds ratio\")\n plt.title(\"Odds ratios of maternal risk factors assiciated with severe SARS-CoV-2\")\n plt.legend(fontsize=8)\n\n plt.show()", "title": "" }, { "docid": "281c015c8ea5eff9ebffd7947da52de5", "score": "0.5519836", "text": "def eqwcplot(wlv=[0.1, 2, 3, 8], confidence=0.95, ifig=1):\n colors = ['b','g','r','c','m','y','k','b','g','r','c','m','y','k']\n markers = ['o','^','d','p','h','*','+','o','^','d','p','h','*','+']\n # markers = ['+','*',',','.','1','2','3',\n # '4','<','>','D','H','^','_','h']\n\n ## eqwc is consisted of iso-work segment\n ## for all possible stress ratio\n eqwc = ysplot(wlv, confidence, ifig=None) \n fig = plt.figure(ifig,figsize=(8,6))\n fignorm = plt.figure(ifig+1,figsize=(8,6))\n\n ## ax = fig.add_subplot(111)\n ax = fig.add_axes((0.12,0.12,0.65,0.8))\n axnorm = fignorm.add_axes((0.12,0.12,0.65,0.8))\n\n ## xnorm\n for i in range(len(eqwc)):\n cwc = np.array(eqwc[i]).transpose()\n x = cwc[0][0]\n y = cwc[1][0]\n xe = cwc[1][1]\n ye = cwc[0][1]\n ## finds the loading along rd from which the\n ## normalization will take place.\n\n for j in range(len(y)):\n if y[j]==0:\n xnorm = x[j]\n break\n pass\n ax.errorbar(x, y, xe, ye, marker=markers[i],\n color='Black', #color = colors[i],\n #mec = colors[i],\n label='%5.2f'%wlv[i],\n mfc='None',\n ls=' ', #no line\n markersize=10.,\n markeredgewidth=1.,\n #label=r'work$^{pl}$=%6.2f'%wlv[i]\n )\n\n axnorm.errorbar(x/xnorm, y/xnorm, xe/xnorm, ye/xnorm, marker=markers[i],\n color='Black', #color = colors[i],\n #mec = colors[i],\n label='%5.2f'%wlv[i],\n mfc='None',\n ls=' ', #no line\n markersize=10.,\n markeredgewidth=1.,\n #label=r'work$^{pl}$=%6.2f'%wlv[i]\n ) \n pass\n ax.legend(loc=2, bbox_to_anchor=(1.05, 1.0))\n ax.set_xlabel(r'$\\sigma_{RD}$ [MPa]',dict(fontsize=20))\n ax.set_ylabel(r'$\\sigma_{TD}$ [MPa]',dict(fontsize=20))\n ax.set_aspect('equal')\n ax.set_xlim(-30,);ax.set_ylim(-30,)\n ax.text(x=5, y=25, s='confidence: %3.1f'%(confidence*100)+'%')\n axnorm.legend(loc=2, bbox_to_anchor=(1.05, 1.0))\n axnorm.set_xlabel(r'$\\sigma_{RD}/\\bar{\\sigma}^{YS}_{RD}$',dict(fontsize=20))\n axnorm.set_ylabel(r'$\\sigma_{TD}/\\bar{\\sigma}^{YS}_{RD}$',dict(fontsize=20))\n axnorm.set_aspect('equal')\n axnorm.set_xlim(-0.05,);axnorm.set_ylim(-0.05,)\n axnorm.text(x=5, y=25, s='confidence: %3.1f'%(confidence*100)+'%')\n pass", "title": "" }, { "docid": "c05a226d002c0ce8a998c4688ca6a8d3", "score": "0.551459", "text": "def ecdf(data):\n\n #Compute the values of the x-axis\n x = np.sort(data)\n\n bins = np.arange(0, len(data))\n bin_counts, bin_edges = np.histogram(x, len(bins))\n\n #Cumulative counts\n count = 0\n cum_bin_counts = np.array([])\n for n in bin_counts:\n count = count + n\n cum_bin_counts = np.append(cum_bin_counts, count)\n\n #Compute the values of the y-axis\n y = (cum_bin_counts + 1) / len(data)\n\n return (x, y)", "title": "" }, { "docid": "dcb64437e70188a71f5de23d97b45570", "score": "0.55007654", "text": "def plot_CI(ax, mu, se, sig_level=0.05, color='blue', two_tailed=True):\n left, right = confidence_interval(mean=mu, se=se, sig_level=sig_level)\n ax.axvline(left, c=color, linestyle='--', alpha=0.5)\n ax.axvline(right, c=color, linestyle='--', alpha=0.5)", "title": "" }, { "docid": "155c0ab682473b6ce4992edb1fd51cad", "score": "0.5500172", "text": "def confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):\n if x.size != y.size:\n raise ValueError(\"x and y must be the same size\")\n\n cov = np.cov(x, y)\n pearson = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])\n # Using a special case to obtain the eigenvalues of this\n # two-dimensionl dataset.\n ell_radius_x = np.sqrt(1 + pearson)\n ell_radius_y = np.sqrt(1 - pearson)\n ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,\n facecolor=facecolor, **kwargs)\n\n # Calculating the stdandard deviation of x from\n # the squareroot of the variance and multiplying\n # with the given number of standard deviations.\n scale_x = np.sqrt(cov[0, 0]) * n_std\n mean_x = np.mean(x)\n\n # calculating the stdandard deviation of y ...\n scale_y = np.sqrt(cov[1, 1]) * n_std\n mean_y = np.mean(y)\n\n transf = transforms.Affine2D() \\\n .rotate_deg(45) \\\n .scale(scale_x, scale_y) \\\n .translate(mean_x, mean_y)\n\n ellipse.set_transform(transf + ax.transData)\n return ax.add_patch(ellipse)", "title": "" }, { "docid": "9c57439d10b8a25d06390bc26fdb2fc0", "score": "0.5494489", "text": "def confidence_ellipse(x, y, ax, n_std=3.0, facecolor='none', **kwargs):\n if x.size != y.size:\n raise ValueError(\"x and y must be the same size\")\n\n cov = np.cov(x, y)\n pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])\n # Using a special case to obtain the eigenvalues of this\n # two-dimensionl dataset.\n ell_radius_x = np.sqrt(1 + pearson)\n ell_radius_y = np.sqrt(1 - pearson)\n ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,\n facecolor=facecolor, **kwargs)\n\n # Calculating the stdandard deviation of x from\n # the squareroot of the variance and multiplying\n # with the given number of standard deviations.\n scale_x = np.sqrt(cov[0, 0]) * n_std\n mean_x = np.mean(x)\n\n # calculating the stdandard deviation of y ...\n scale_y = np.sqrt(cov[1, 1]) * n_std\n mean_y = np.mean(y)\n\n transf = transforms.Affine2D() \\\n .rotate_deg(45) \\\n .scale(scale_x, scale_y) \\\n .translate(mean_x, mean_y)\n\n ellipse.set_transform(transf + ax.transData)\n return ax.add_patch(ellipse)", "title": "" }, { "docid": "237133ceeaa3e1bf3ae7d94897099b92", "score": "0.54871666", "text": "def confidence_ellipse(\n x=None, y=None, cov=None, ax=None, n_std=3.0, facecolor=\"none\", **kwargs\n):\n if x is None and y is None:\n if cov is None:\n raise ValueError(\"Either \")\n if x.size != y.size:\n raise ValueError(\"x and y must be the same size\")\n\n cov = np.cov(x, y)\n pearson = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])\n # Using a special case to obtain the eigenvalues of this\n # two-dimensionl dataset.\n ell_radius_x = np.sqrt(1 + pearson)\n ell_radius_y = np.sqrt(1 - pearson)\n ellipse = Ellipse(\n (0, 0),\n width=ell_radius_x * 2,\n height=ell_radius_y * 2,\n facecolor=facecolor,\n **kwargs\n )\n\n # Calculating the stdandard deviation of x from\n # the squareroot of the variance and multiplying\n # with the given number of standard deviations.\n scale_x = np.sqrt(cov[0, 0]) * n_std\n mean_x = np.mean(x)\n\n # calculating the stdandard deviation of y ...\n scale_y = np.sqrt(cov[1, 1]) * n_std\n mean_y = np.mean(y)\n\n transf = (\n transforms.Affine2D()\n .rotate_deg(45)\n .scale(scale_x, scale_y)\n .translate(mean_x, mean_y)\n )\n\n ellipse.set_transform(transf + ax.transData)\n return ax.add_patch(ellipse)", "title": "" }, { "docid": "0e38a3cc6e08023401151109d03f8f2f", "score": "0.54837245", "text": "def coverage_plot(model, n=100, interval_type='ETI', savename=None, ax=None, large_ix=None, rel_log_errors=False):\n\n if ax is None:\n fig, ax = plt.subplots(figsize=(17, 9), constrained_layout=True)\n _, y_pred_samples, _, _ = model.get_predictions()\n y_test, _, _ = model.get_y_test_and_mix()\n\n if large_ix is not None:\n for m, i in enumerate(large_ix):\n y_pred_samples = np.delete(y_pred_samples, i, axis=1)\n y_test = y_test.drop(y_test.index[i])\n for f, k in enumerate(large_ix):\n if k > i:\n large_ix[f] = k - 1\n\n coverage_deviation_score, CAOS, CAUS, percentiles, pred_percentiles = sm.area_deviation_from_ideal_coverage(\n y_pred_samples, y_test,\n interval_type=interval_type,\n resolution=n, get_percentiles=True, rel_log_errors=rel_log_errors)\n ax.plot(percentiles, pred_percentiles,\n label=interval_type + ' Curve', color=colors['Fire Opal'])\n ax.plot(percentiles, percentiles, label='Ideal Curve', color=colors['Emerald'])\n ax.fill_between(percentiles, percentiles, pred_percentiles,\n where=pred_percentiles >= percentiles, color=colors['Verdigris'], alpha=0.2,\n label='CAOS: %s' % (round(CAOS, 5)))\n ax.fill_between(percentiles, pred_percentiles, percentiles,\n where=pred_percentiles < percentiles, color=colors['Maximum Yellow Red'], alpha=0.2,\n label='CAUS: %s' % (round(CAUS, 5)))\n ax.set_xlabel('Percentiles')\n ax.set_ylabel('Percentile Coverage')\n ax.legend()\n # ax.tick_params(axis='x', which='minor', bottom=False)\n minor_ticks = np.arange(5, 100, 5)\n minor_ticks_labels = [5, 10, 15, 25, 30, 35, 45, 50, 55, 65, 70, 75, 85, 90, 95]\n ax.set_xticks(minor_ticks, minor=True)\n ax.set_yticks(minor_ticks, minor=True)\n ax.grid(b=True, which='major', color='grey', linestyle='-')\n ax.grid(b=True, which='minor', color='grey', linestyle='--', alpha=0.2)\n ax.set_xticklabels(minor_ticks_labels, minor=True, fontsize=12)\n ax.set_yticklabels(minor_ticks_labels, minor=True, fontsize=12)\n ax.set_ylim(bottom=0, top=100)\n ax.set_xlim(left=0, right=100)\n if savename is not None:\n plt.savefig(savename, dpi=300)\n return ax", "title": "" }, { "docid": "50676c60f6b0253370ee00dd8f07039d", "score": "0.5403564", "text": "def confidenceInterval(data, confidence):\r\n a = 1.0*np.array(data)#convert data to numpy array\r\n n = len(a)#length of list\r\n se = np.std(a,ddof = 1)#standard deviation/error\r\n h = se * scipy.stats.norm.ppf(1-(1 - confidence)/2.) / np.sqrt(n)#calculate the confidence interval\r\n return h", "title": "" }, { "docid": "c665af111b0e729ed59053f62a75845f", "score": "0.5402021", "text": "def plot_bootstraps(x, y, epicoef, **kwargs):\n # make dictionaries for plotting\n colors = {'actual': '#33a02c', 'xy=x': 'blue', 'xy=y': 'k',\n 'xy=x=y': '#1f78b4', 'xy=x+y': '#ff7f00', 'suppress': '#e31a1c'\n }\n labels = {'actual': 'data', 'xy=x': label(x, y),\n 'xy=y': label(y, x), 'xy=x=y': 'Unbranched',\n 'xy=x+y': 'log-Additive', 'suppress': 'Suppression'\n }\n\n # checks and balances\n if type(epicoef) is not dict:\n raise ValueError('epicoef must be a dictionary')\n\n epistasis_choice = ['actual', 'xy=x', 'xy=y', 'xy=x=y', 'xy=x+y',\n 'suppress']\n\n for epistasis in epistasis_choice:\n if epistasis.lower() not in epicoef.keys():\n warning = 'epicoef must contain keys for all epistasis models'\n raise ValueError(warning)\n\n if len(epicoef[epistasis.lower()]) < 10:\n warning = 'too few bootstraps. Please perform >100' + \\\n 'bootstraps per test'\n raise ValueError(warning)\n\n fig, ax = plt.subplots()\n for model, s in epicoef.items():\n try:\n sns.kdeplot(data=s, label=labels[model.lower()],\n color=colors[model.lower()], **kwargs)\n except:\n print('{0} did not have a label'.format(model))\n next\n\n # plot a horizontal line wherever the actual data mean is\n plt.gca().axvline(epicoef['actual'].mean(), color='#33a02c', ls='--', lw=3)\n\n plt.xlabel('Epistasis Coefficient')\n plt.ylabel('Cumulative Density Function')\n\n return ax", "title": "" }, { "docid": "42b4f6c0c3dc251aa1bc0d7d7e83b38e", "score": "0.53653544", "text": "def confidence_intervals(self, \n x,\n ci=0.95,\n n=1000):\n\n # Sample from the predictive distribution\n pred_dist = self.predictive_distribution(x, n=n)\n\n # TODO: assumes y is scalar, add a check for that\n\n # Compute percentiles of the predictive distribution\n lb = 100*(1.0-ci)/2.0\n q = [lb, 100.0-lb]\n prcs = np.percentile(pred_dist, q, axis=0)\n return prcs[0, :], prcs[1, :]", "title": "" }, { "docid": "093ccef2ba742e344710ed7badc0b118", "score": "0.53637767", "text": "def question_5():\n x = np.arange(-10, 10, 0.001)\n\n # j:\n fig = plt.figure()\n # creating a normal distribution with mean 4 and scale 1\n cdf0 = scipy.stats.norm.cdf(x, 4, 1)\n\n # creating a normal distribution with mean 6 and scale 1\n cdf1 = scipy.stats.norm.cdf(x, 6, 1)\n\n plt.title(\"CDF\")\n plt.xlabel(\"x\")\n plt.ylabel(\"Probability density\")\n\n plt.plot(x, cdf0, label = \"Mean = 4\", color=\"orange\")\n plt.plot(x, cdf1, label = \"Mean = 6\", color=\"purple\")\n\n plt.legend()\n plt.show()\n\n # creating a normal distribution with mean 4 and scale 1\n pdf0 = scipy.stats.norm.pdf(x, 4, 1)\n\n # creating a normal distribution with mean 6 and scale 1\n pdf1 = scipy.stats.norm.pdf(x, 6, 1)\n\n plt.title(\"PDF\")\n plt.xlabel(\"x\")\n plt.ylabel(\"Probability density\")\n\n plt.plot(x, pdf0, label=\"Mean = 4\", color=\"orange\")\n plt.plot(x, pdf1, label=\"Mean = 6\", color=\"purple\")\n\n plt.legend()\n plt.show()\n\n # ii:\n mu_0 = 4\n mu_1 = 6\n pai_0 = 0.5\n pai_1 = 0.5\n w0 = -pai_1 *(mu_1*(1)*mu_1) + pai_0 *(mu_0*(1)*mu_0)\n w = 2\n\n h = scs.expit(w*x+ w0)\n plt.title(\"h(x) as a function of x\")\n plt.xlabel(\"x\")\n plt.ylabel(\"h(x)\")\n\n plt.plot(x, h, color=\"purple\")\n\n plt.show()\n\n # iii:\n x1= np.arange(0, 1, 0.001)\n h1 = (scs.logit(x1)-w0)/ w\n cdf_h0 = scipy.stats.norm.cdf(h1, 4, 1)\n\n plt.title(\"CDF of h(x) for x~X| Y = 0\")\n plt.xlabel(\"x\")\n plt.ylabel(\"Probability density\")\n\n plt.plot(x1, cdf_h0, label=\"Mean = 4\", color=\"orange\")\n\n plt.legend()\n plt.show()\n\n\n cdf_h1 = scipy.stats.norm.cdf(h1, 6, 1)\n\n plt.title(\"CDF of h(x) for x~X| Y = 1\")\n plt.xlabel(\"x\")\n plt.ylabel(\"Probability density\")\n\n plt.plot(x1, cdf_h1, label=\"Mean = 6\", color=\"purple\")\n\n plt.legend()\n plt.show()\n\n # iv:\n\n x2= np.arange(0, 1.00, 0.001)\n\n z1 = cdf_h0\n z2 = cdf_h1\n\n plt.title(\"1- CDF of h(x) for z1~X| Y = 0, z2~X| Y = 1\")\n plt.xlabel(\"x\")\n plt.ylabel(\"Probability density\")\n plt.plot(x2, 1-z1, label=\"z1~X| Y = 0\", color=\"orange\")\n plt.plot(x2, 1 - z2, label=\"z2~X| Y = 1\", color=\"purple\")\n\n plt.legend()\n plt.show()\n\n\n # vi:\n\n plt.title(\"RCO curve of h\")\n plt.xlabel(\"FPR\")\n plt.ylabel(\"TPR\")\n plt.plot(1-z1, 1 - z2, color=\"orange\")\n\n plt.show()", "title": "" }, { "docid": "d1a5a43aa567de928e0f757ac2ec0465", "score": "0.5353658", "text": "def plot_etotal(ecut_list, etotals, aug_ratios, **kwargs):\n show = kwargs.pop(\"show\", True)\n savefig = kwargs.pop(\"savefig\", None)\n\n import matplotlib.pyplot as plt\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n\n npts = len(ecut_list)\n\n if len(aug_ratios) != 1 and len(aug_ratios) != len(etotals):\n raise ValueError(\"The number of sublists in etotal must equal the number of aug_ratios\")\n\n if len(aug_ratios) == 1:\n etotals = [etotals,]\n\n lines, legends = [], []\n\n emax = -np.inf\n for (aratio, etot) in zip(aug_ratios, etotals):\n emev = np.array(etot) * Ha_to_eV * 1000\n emev_inf = npts * [emev[-1]]\n yy = emev - emev_inf\n\n emax = np.max(emax, np.max(yy))\n\n line, = ax.plot(ecut_list, yy, \"-->\", linewidth=3.0, markersize=10)\n\n lines.append(line)\n legends.append(\"aug_ratio = %s\" % aratio)\n\n ax.legend(lines, legends, 'upper right', shadow=True)\n\n # Set xticks and labels.\n ax.grid(True)\n ax.set_xlabel(\"Ecut [Ha]\")\n ax.set_ylabel(\"$\\Delta$ Etotal [meV]\")\n ax.set_xticks(ecut_list)\n\n #ax.yaxis.set_view_interval(-10, emax + 0.01 * abs(emax))\n #ax.xaxis.set_view_interval(-10, 20)\n ax.yaxis.set_view_interval(-10, 20)\n\n ax.set_title(\"$\\Delta$ Etotal Vs Ecut\")\n\n if show:\n plt.show()\n\n if savefig is not None:\n fig.savefig(savefig)\n\n return fig", "title": "" }, { "docid": "1bf89800acc4b022f169d0cd07604174", "score": "0.5342512", "text": "def confidence_interval(data):\n if ch.check_list(data):\n return [avg.average(data) - (1.96 * SEM(data)), avg.average(data) + (1.96 * SEM(data))]", "title": "" }, { "docid": "e7870ef3c1286f41a929c5da3bb23835", "score": "0.5334976", "text": "def empiricaldiscount_combinedgroups():\n\t\n\t### Set plot parameters and style\n\tsb.set(style='ticks')\n\tfig, axes = plt.subplots(figsize=(12/1.3, 7/1.3))\n\n\t### Initialize path to CSV with summary of hedonic model outputs\n\toutputs_csv_uri = os.path.join(\n\t\tpaths.outputs_dir, 'HedonicOutputs_Summary.csv')\n\n\t### Read CSV with summary of hedonic model outputs to Pandas DataFrame\n\tdf = pd.read_csv(outputs_csv_uri)\n\t\n\t### Set formatting parameters\n\tscalar, fmt, lw, ms = 0.2, 'o-', 1.5, 8\n\n\t### Get national data\n\tmodel_label = 'attitude-x_coastal-x_disclosure-x'\n\trow = df[(df['model_label']==model_label) & (~df['100yr_coeff'].isna())]\n\tcoeff = float(row['100yr_coeff'])\n\tse = float(row['100yr_se'])\n\n\t### Plot national data\n\taxes.axhline(coeff, color='k', linestyle='--', alpha=0.5)\n\taxes.fill_between([0.5, 2.5], y1=coeff-(2*se), y2=coeff+(2*se), \n\t\t\t\t\t color='k', ec='none', alpha=0.2)\n\n\t### Get county groups data\n\tmodel_a, model_b, model_c, model_d = utils.get_modeloutputs()\n\n\t### Initialize 100-yr coefficients and standard errors\n\tcoeff_a = float(model_a['100yr_coeff'].dropna())\n\tcoeff_b = float(model_b['100yr_coeff'].dropna())\n\tcoeff_c = float(model_c['100yr_coeff'].dropna())\n\tcoeff_d = float(model_d['100yr_coeff'].dropna())\n\n\tstd_error_a = float(model_a['100yr_se'].dropna())\n\tstd_error_b = float(model_b['100yr_se'].dropna())\n\tstd_error_c = float(model_c['100yr_se'].dropna())\n\tstd_error_d = float(model_d['100yr_se'].dropna())\n\n\t### Specify location on x-axis for no disclosure requirements\n\tx = 1\n\n\t### Plot coeff for no disclosure & below median climate concern\n\terrorbar_dict = {'fmt':fmt, 'color':'purple', 'lw':lw, 'ms':ms, 'mfc':'w'}\n\taxes.errorbar(x-scalar, coeff_a, yerr=2*std_error_a, **errorbar_dict)\n\n\t### Plot coeff for no disclosure & above median climate concern\n\terrorbar_dict = {'fmt':fmt, 'color':'purple', 'lw':lw, 'ms':ms}\n\taxes.errorbar(x+scalar, coeff_c, yerr=2*std_error_c, **errorbar_dict)\n\n\t### Specify location on x-axis for at least one disclosure \n\tx = 2\n\n\t### Plot coeff for at least one disclosure & below median climate concern\n\terrorbar_dict = {'fmt':fmt, 'color':'green', 'lw':lw, 'ms':ms, 'mfc':'w'}\n\taxes.errorbar(x-scalar, coeff_b, yerr=2*std_error_b, **errorbar_dict)\n\t\n\t### Plot coeff for at least one disclosure & below above climate concern\n\terrorbar_dict = {'fmt':fmt, 'color':'green', 'lw':lw, 'ms':ms}\n\taxes.errorbar(x+scalar, coeff_d, yerr=2*std_error_d, **errorbar_dict)\n\n\t### Plot formatting\n\taxes.set_xlim(0.5, 2.5)\n\taxes.set_ylim(-0.13, 0)\n\n\taxes.set_ylabel('Empirical flood zone discount (%)')\n\n\taxes.set_xticks([1, 2])\n\taxes.set_xticklabels(\n\t\t['No disclosure requirements', \n\t\t'At least one disclosure requirement'])\n\n\tytick_labels = [round(t*100) for t in axes.get_yticks()]\n\taxes.set_yticklabels(ytick_labels)\n\n\t### Create legend labels\n\taxes.plot(-1, 0, 'ko-', ms=ms, mfc='w', label='Below median climate concern')\n\taxes.plot(-1, 0, 'ko-', ms=ms, label='Above median climate concern')\n\n\t### Create legend\n\taxes.legend(loc='lower left')\n\n\t### Save figure\n\tfn = 'empiricaldiscount_combinedgroups.png'\n\turi = os.path.join(paths.figures_dir, fn)\n\tplt.savefig(uri, bbox_inches='tight', dpi=600)\n\tplt.savefig(uri.replace('png', 'pdf'), bbox_inches='tight')\n\n\t### Open figure\n\ttime.sleep(0.5)\n\tsubprocess.run(['open', uri])\n\n\treturn None", "title": "" }, { "docid": "3641ae3dd457f29023c234b86f1919ab", "score": "0.5332112", "text": "def plot_bootstrap(X, Y, FES, sd_fes, sd_fes_prog, FES_lim=11, ofe_lim=11, FES_step=1, ofe_step=1):\n\t\n\tfig, axs = plt.subplots(1, 3, figsize=(15, 4))\n\tcp = axs[0].contourf(X, Y, FES, levels=range(0, FES_lim, 1), cmap='coolwarm', antialiased=False, alpha=0.8);\n\tcbar = plt.colorbar(cp, ax=axs[0])\n\taxs[0].set_ylabel('CV2', fontsize=11)\n\taxs[0].set_xlabel('CV1', fontsize=11)\n\taxs[0].set_title('Average FES', fontsize=11)\n\n\tcp = axs[1].contourf(X, Y, sd_fes, levels=np.linspace(0, ofe_lim, 10), cmap='coolwarm', antialiased=False, alpha=0.8);\n\tcbar = plt.colorbar(cp, ax=axs[1])\n\tcbar.set_label(\"Variance of Average FES [kJ/mol]$^2$\", rotation=270)\n\taxs[1].set_ylabel('CV2', fontsize=11)\n\taxs[1].set_xlabel('CV1', fontsize=11)\n\taxs[1].set_title('Bootstrap Variance of FES', fontsize=11)\n\n\n\taxs[2].plot( range(len(sd_fes_prog)), sd_fes_prog);\n\taxs[2].set_ylabel('Average Variance of Average FES [kJ/mol]$^2$', fontsize=11)\n\taxs[2].set_xlabel('Bootstrap iterations', fontsize=11)\n\taxs[2].set_title('Global Convergence of Bootstrap Variance', fontsize=11)\n\n\tplt.rcParams[\"figure.figsize\"] = (5,4)", "title": "" }, { "docid": "611f3d6ed5b0dca5a7dabe598bb4d643", "score": "0.5322603", "text": "def plot_fig2cd(self,highconfidence):\n\n highc_string = '_highconfidence' if highconfidence else ''\n\n def plot_jointhist(data, xlabel, ylabel, xmax, ymax, highconfidence):\n \"\"\"\n plot the histogram as a scatter plot with marginal histograms,\n and ensure empty bins are easily distinguishable from ones\n that have at least 1 hit.\n\n generally use xcrit = 12,ycrit = 5 unless using only high\n confidence interactions, then xcrit = 6, ycrit = 2\n \"\"\"\n x = data[xlabel]\n y = data[ylabel]\n\n if highconfidence:\n \txcrit = 6\n \tycrit = 2\n else:\n \txcrit = 12\n \tycrit = 5\n\n # First, plot the scatter plot\n g = sns.JointGrid(x=x, y=y, size=4,\n xlim=(-1, xmax+1), ylim=(-1, ymax+1))\n g = g.plot_joint(plt.scatter, alpha=0.2)\n plt.gcf()\n\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n\n # annotate only unique points\n # annotate only unique points\n ann_df = data.drop_duplicates((xlabel, ylabel), keep=False)\n ann_df = ann_df[(ann_df[xlabel] > xcrit) | (ann_df[ylabel] > ycrit)]\n for i, row in ann_df.iterrows():\n plt.annotate(i, xy=(row[xlabel], row[ylabel]),\n xytext=(row[xlabel]+1, row[ylabel]+1),\n ha='center', va='top', size=10,\n textcoords='data')\n\n # Next, plot the marginal histograms\n g = g.plot_marginals(sns.distplot, kde=False)\n\n return plt.gcf()\n\n\n # if using high confidence edges, then find those edges\n cols = ('bigg.reaction', 'bigg.metabolite')\n if highconfidence:\n reg = self.regulation\n reg = reg[reg['Source'] == 'BRENDA']\n reg['RefList'] = [item.split(',') if pd.notnull(item) else 0 for item in reg['Literature']]\n\n # Remove whitespace from each item in each list\n for ii in reg.index:\n reg.at[ii,'RefList'] = [item.strip() for item in reg.at[ii,'RefList']]\n\n # Add shorthand name\n reg['ShortHand'] = reg['bigg.metabolite'].str.cat('-->' + reg['bigg.reaction'])\n reglit = reg.groupby(cols)\n\n highc = pd.DataFrame( columns = ['NumRef','Refs'] )\n for ii in reglit.groups.keys():\n ixs = reglit.groups[ii]\n tempref = reg.ix[ixs,'RefList']\n refs = np.unique(list(itertools.chain.from_iterable(tempref)))\n highc.ix[ii[1] + '-->' + ii[0],] = [len(refs),';'.join(refs)]\n\n fig, axs = plt.subplots(1, 1, figsize=(6, 5))\n axs.hist( highc['NumRef'],bins = 0.5 + np.arange(0,highc['NumRef'].max()) )\n axs.set_xlabel('Number of Literature References')\n axs.set_ylabel('Number of SMRN edges')\n settings.savefig(fig, 'histogram_highconfidence_SMRN')\n highc.sort_values('NumRef', axis=0, ascending=False, inplace=True)\n highc.to_csv( os.path.join(settings.RESULT_DIR, 'histogram_highconfidence_SMRN.csv') )\n\n # join interaction table with bigg.reaction IDs\n # and keep only one copy of each reaction-metabolite pair\n if highconfidence:\n bigg_effectors = reg[reg['ShortHand'].isin(highc[highc['NumRef'] > 1].index)]\n bigg_effectors = bigg_effectors.groupby(cols).first().reset_index()\n else:\n bigg_effectors = self.regulation.groupby(cols).first().reset_index()\n\n bigg_effectors.drop('KI_Value', axis=1, inplace=True)\n\n # add columns for counting the positive and negative interactions\n bigg_effectors[N_ACT_LABEL] = 0\n bigg_effectors[N_INH_LABEL] = 0\n bigg_effectors.loc[bigg_effectors['Mode'] == '+', N_ACT_LABEL] = 1\n bigg_effectors.loc[bigg_effectors['Mode'] == '-', N_INH_LABEL] = 1\n\n grouped_by_met = bigg_effectors.groupby('bigg.metabolite').sum()\n grouped_by_rxn = bigg_effectors.groupby('bigg.reaction').sum()\n\n xmax = max(grouped_by_met[N_INH_LABEL].max(),\n grouped_by_rxn[N_INH_LABEL].max())\n ymax = max(grouped_by_met[N_ACT_LABEL].max(),\n grouped_by_rxn[N_ACT_LABEL].max())\n\n fig = plot_jointhist(grouped_by_met, N_INH_LABEL, N_ACT_LABEL,\n xmax, ymax, highconfidence)\n fig.get_axes()[0].annotate('c', xy=(0.02, 0.98),\n xycoords='axes fraction', ha='left', va='top',\n size=20)\n settings.savefig(fig, 'fig2c' + highc_string)\n\n fig = plot_jointhist(grouped_by_rxn, N_INH_LABEL, N_ACT_LABEL,\n xmax, ymax, highconfidence)\n fig.get_axes()[0].annotate('d', xy=(0.02, 0.98),\n xycoords='axes fraction', ha='left', va='top',\n size=20)\n settings.savefig(fig, 'fig2d' + highc_string)\n \n # write a table of only the high confidence interactions", "title": "" }, { "docid": "c947a53f26d4f727354d1f90a34f7b47", "score": "0.5316689", "text": "def _plot_line_CI(ax,\n x,\n y,\n sorted_x,\n low_CI,\n high_CI,\n color,\n label\n ):\n ax.plot(x, y, lw=1, color=color, alpha=1, label=label)\n # shade the CI\n ax.fill_between(sorted_x, \n low_CI, \n high_CI, \n color=color, \n alpha=0.4, \n )", "title": "" }, { "docid": "e0636750084e81b7e1c5b506fa0417b7", "score": "0.52435327", "text": "def confidence_interval(data, alpha=0.1):\n alpha = 0.1\n t = lambda column: scipy_stats.t.isf(alpha/2.0, len(column)-1)\n width = lambda column: t(column) * numpy.std(column.values, ddof=1)/sqrt(len(column))\n formatted_interval = lambda column: \"%.2f +/- %.4f\" % (column.mean(), width(column))\n return pandas.Series([formatted_interval(data[c]) for c in data.columns], index=data.columns)", "title": "" }, { "docid": "d64436027dad77b03c4e90e2a0e33cb9", "score": "0.5225972", "text": "def compute_confidence_interval(data):\n a = 1.0 * np.array(data)\n m = np.mean(a)\n std = np.std(a)\n pm = 1.96 * (std / np.sqrt(len(a)))\n return m, pm", "title": "" }, { "docid": "30a07037b7bc8bd3c8a1dc1e3d9f258c", "score": "0.51952773", "text": "def generate_confidence_intervals(noxn_path, se_path, output_dir, basename):\n def lower_ci_op(mean, standard_error):\n \"\"\"Calculate lower bound of 95% confidence interval from mean and se.\"\"\"\n valid_mask = (\n (~numpy.isclose(mean, noxn_nodata)) &\n (~numpy.isclose(standard_error, se_nodata)))\n result = numpy.empty(mean.shape, dtype=numpy.float32)\n result[:] = _TARGET_NODATA\n result[valid_mask] = (\n mean[valid_mask] - 1.96 * standard_error[valid_mask])\n return result\n\n def upper_ci_op(mean, standard_error):\n \"\"\"Calculate upper bound of 95% confidence interval from mean and se.\"\"\"\n valid_mask = (\n (~numpy.isclose(mean, noxn_nodata)) &\n (~numpy.isclose(standard_error, se_nodata)))\n result = numpy.empty(mean.shape, dtype=numpy.float32)\n result[:] = _TARGET_NODATA\n result[valid_mask] = (\n mean[valid_mask] + 1.96 * standard_error[valid_mask])\n return result\n\n noxn_nodata = pygeoprocessing.get_raster_info(noxn_path)['nodata'][0]\n se_nodata = pygeoprocessing.get_raster_info(se_path)['nodata'][0]\n lower_bound_path = os.path.join(\n output_dir, 'noxn_95%_lower_bound_{}.tif'.format(basename))\n pygeoprocessing.raster_calculator(\n [(path, 1) for path in [noxn_path, se_path]],\n lower_ci_op, lower_bound_path, gdal.GDT_Float32, _TARGET_NODATA)\n upper_bound_path = os.path.join(\n output_dir, 'noxn_95%_upper_bound_{}.tif'.format(basename))\n pygeoprocessing.raster_calculator(\n [(path, 1) for path in [noxn_path, se_path]],\n upper_ci_op, upper_bound_path, gdal.GDT_Float32, _TARGET_NODATA)", "title": "" }, { "docid": "603d1dc531399c2f2918d40419518974", "score": "0.5194962", "text": "def confidence_interval(res: OptimizeResult, **kwargs):\n if not isinstance(res, OptimizeResult):\n raise ValueError('Argument \\'res\\' should be an instance of \\'scipy.optimize.OptimizeResult\\'')\n\n confidence = kwargs.get('confidence', 0.95)\n\n # The vector of residuals at the solution\n residuals = res.fun\n # The number of data points\n n = len(residuals)\n # The number of parameters\n p = len(res.x)\n # The degrees of freedom\n dfe = n - p\n # Get MSE. The degrees of freedom when J is full rank is v = n-p and n-rank(J) otherwise\n mse = (LA.norm(residuals)) ** 2 / dfe\n\n # Needs to estimate the jacobian at the predictor point!!!\n # ypred = func(x,res.x)\n # delta = np.zeros((len(ypred),p));\n # fdiffstep = np.amax(np.spacing(res.x)**(1/3));\n # for i in range(p):\n # change = np.zeros(p)\n # if res.x[i] == 0:\n # nb = np.sqrt(LA.norm(res.x))\n # change[i] = fdiffstep * (nb + (nb == 0))\n # else:\n # change[i] = fdiffstep * res.x[i]\n #\n # predplus = func(x,res.x+change)\n # delta[:,i] = (predplus - ypred)/change[i]\n\n # Find R to get the variance\n _, R = LA.qr(res.jac)\n # Get the rank of jac_pnp\n Rinv = LA.pinv(R)\n\n v = np.sum(Rinv ** 2, axis=1) * mse\n alpha = 1.0 - confidence\n tval = t.ppf(1.0 - alpha / 2.0, dfe)\n delta = np.sqrt(v) * tval\n ci = np.zeros((p, 2), dtype=np.float64)\n\n for i, p, d in zip(range(n), res.x, delta):\n ci[i, :] = [p - d, p + d]\n\n return ci", "title": "" }, { "docid": "ed83e70db4ad30149b45a3697c56212e", "score": "0.51639426", "text": "def fig_HD20010_parameters():\n\n df = pd.read_csv('HD20010_params_EWcut_fixlogg_3sigma.dat')\n c = sns.color_palette()\n\n fig = plt.figure()\n ax1 = fig.add_subplot(311)\n plt.setp(ax1.get_xticklabels(), visible=False)\n ax1.errorbar(df.EW, df.Teff, yerr=df.Tefferr, fmt='o', color=c[0])\n ax1.fill_between([0, 20], [6131-255]*2, [6131+255]*2, color='k', alpha=0.2, edgecolor='w')\n ax1.hlines(6131, 0, 20, linestyle='--')\n ax1.set_xlim(-1, 21)\n ax1.set_ylim(5800, 7800)\n ax1.set_ylabel('Teff [K]')\n\n ax2 = fig.add_subplot(312)\n plt.setp(ax2.get_xticklabels(), visible=False)\n ax2.errorbar(df.EW, df.feh, yerr=df.feherr, fmt='o', color=c[1])\n ax2.fill_between([0, 20], [-0.23-0.14]*2, [-0.23+0.14]*2, color='k', alpha=0.2, edgecolor='w')\n ax2.hlines(-0.23, 0, 20, linestyles='--')\n ax2.set_xlim(-1, 21)\n ax2.set_ylim(-1.5, 1.5)\n ax2.set_ylabel('[Fe/H]')\n\n ax3 = fig.add_subplot(313)\n plt.setp(ax3, xticks=[0, 5, 10, 15, 20], xticklabels=['No cut', 5, 10, 15, 20])\n ax3.errorbar(df.EW, df.vt, yerr=df.vterr, fmt='o', color=c[2])\n ax3.fill_between([0, 20], [1.90-1.08]*2, [1.90+1.08]*2, color='k', alpha=0.2, edgecolor='w')\n ax3.hlines(1.90, 0, 20, linestyles='--')\n ax3.set_xlim(-1, 21)\n ax3.set_ylim(1, 5)\n ax3.set_yticks(range(1, 6))\n ax3.set_xlabel(r'EW cuts [m$\\AA$]')\n ax3.set_ylabel(r'$\\xi_\\mathrm{micro}$ [km/s]')\n\n plt.tight_layout()\n plt.show()\n # plt.savefig('figures/HD20010_parameters_cuts.pdf')", "title": "" }, { "docid": "029033d40b372944c5c0fd4540c880d6", "score": "0.51423585", "text": "def rec_sensitivity(self, nrs, mean_clus_fracs, std_clus_fracs, intervals,\n here):\n\n colors = ['AntiqueWhite', 'Aquamarine', 'BlueViolet', 'Brown', 'Coral',\n 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan', 'DarkBlue',\n 'DarkCyan', 'DarkGoldenRod', 'Red']\n\n int_ranges = intervals\n\n (fig, ax) = plt.subplots()\n\n utr_count = nrs\n\n # Get the x_coordinates\n x_coords = range(1,len(intervals)+1)\n\n #fraclabel = 'Average polyadenylation sites found relative to annotation'\n # Make a line-plot of the fals_pos\n #ax.plot(x_coords, mean_clus_fracs, label=fraclabel, c='Green', lw=2)\n ax.plot(x_coords, mean_clus_fracs, c='Green', lw=2)\n ax.errorbar(x_coords, mean_clus_fracs, yerr=std_clus_fracs, c='Green',\n lw=1, fmt=None)\n\n # Set y-ticks\n ax.set_ylim((0,3))\n yticks = np.arange(0,3.5,0.5)\n ax.set_yticks(yticks)\n ax.set_yticklabels([val for val in yticks])\n ax.yaxis.grid(True)\n # Set the colors and fontsizes of the ticks\n for t in ax.get_yticklabels():\n t.set_color('Green')\n t.set_fontsize(10)\n\n # Create a 'x-twinned' y axis.\n ax2 = ax.twinx()\n x_coords = range(1,len(utr_count)+1)\n ax2.bar(x_coords, utr_count, color='Blue', width=0.6,\n align='center', label='# of 3UTRs in interval')\n ax2.set_ylabel('Number of 3UTRs', size=13)\n\n # Set the colors and fontsizes of the ticks\n for tl in ax2.get_yticklabels():\n tl.set_color('Blue')\n tl.set_fontsize(10)\n #tl.set_fontweight('bold')\n\n # Some hack to get the line-plot in front\n ax.set_zorder(ax2.get_zorder()+1) # put ax in front of ax2\n ax.patch.set_visible(False) # hide the 'canvas'\n\n # Set x-ticks\n ax.set_xticks(x_coords)\n xlabels = ['('+str(v[0])+', '+str(v[1])+']' for v in int_ranges]\n xlabels[-1] = '('+str(int_ranges[-1][0])+', inf)'\n ax.set_xticklabels(xlabels)\n ax.legend(loc='upper right')\n ax.set_ylabel('Discovered/annotated poly(A) sites in 3UTRs', size=13)\n ax.set_xlabel('RPKM ranges for 3UTRs', size=13)\n\n # Set xlim so that \n ax.set_xlim((0.5, max(x_coords)+0.5))\n\n title = 'More poly(A) clusters are found for high-RPKM 3UTRs'\n ax.set_title(title, size=15)\n\n output_dir = os.path.join(here, 'Results_and_figures', 'GENCODE_report',\n 'Figures')\n filename = 'More_polyA_clusters_for_high_RPKM_3UTRS'\n filepath = os.path.join(output_dir, filename+'.pdf')\n fig.savefig(filepath, format='pdf')\n filepath = os.path.join(output_dir, filename+'.eps')\n fig.savefig(filepath, format='eps', papertype='A4')", "title": "" }, { "docid": "cda2dd1608d1f567a99d955161734b40", "score": "0.5119222", "text": "def OLS_stat():\n N = [100, 1000] # Number of data points\n sigma2 = [0.01, 1] # Irreducable error\n\n # Initialize model\n model_ols = OLS()\n poly_deg = 5 # complexity\n p = 0.9 # 90% confidence interval\n\n # Dataframe for storing results\n df = pd.DataFrame(columns=['N', '$\\sigma^2$', 'MSE', '$R^2$'])\n\n # Setup for plotting\n labels = generate_labels(poly_deg)\n cmap = plt.get_cmap(\"Greens\")\n\n for n in N:\n for s2 in sigma2:\n x = np.random.uniform(0, 1, (n, 2))\n noise = np.random.normal(0, s2, n)\n z = frankeFunction(x[:, 0], x[:, 1]) + noise\n model_ols.fit(x, z, poly_deg)\n\n mse = model_ols.mse(x, z)\n r2 = model_ols.r2(x, z)\n df = df.append({'N': n, '$\\\\sigma^2$': s2, 'MSE': mse,\n '$R^2$': r2}, ignore_index=True)\n\n CI = model_ols.confidence_interval(p)\n norm = matplotlib.colors.Normalize(vmin=-10, vmax=len(CI))\n\n fig = plt.figure(figsize=(8, 6))\n plt.yticks(np.arange(model_ols.params), labels)\n plt.grid()\n\n for i in range(len(CI)):\n plt.plot(CI[i], (i, i), color=cmap(norm(i)))\n plt.plot(CI[i], (i, i), \"o\", color=cmap(norm(i)))\n\n plt.gca().set_title(\"90% Confidence Interval\")\n textstr = '\\n'.join((\n \"$N = {}$\".format(n),\n \"$\\\\sigma^2 = {}$\".format(s2)))\n props = dict(boxstyle='round', facecolor='lightblue', alpha=0.5)\n plt.gca().text(0.83, 0.95, textstr, transform=plt.gca().transAxes,\n fontsize=14, verticalalignment='top', bbox=props)\n text_s2 = str(s2).replace(\".\", \"_\")\n fig.savefig(fig_path(\"conf_{}_{}.pdf\".format(n, text_s2)))\n\n # Render dataframe to a LaTeX tabular environment table and write to file\n pd.options.display.float_format = '{:,.3f}'.format\n df = df.apply(lambda x: x.astype(\n int) if np.allclose(x, x.astype(int)) else x)\n pd.options.display.latex.escape = False\n latex = df.to_latex(index=False, column_format='cccc')\n latex = latex.replace('\\\\toprule', '\\\\hline \\\\hline')\n latex = latex.replace('\\\\midrule', '\\\\hline \\\\hline')\n latex = latex.replace('\\\\bottomrule', '\\\\hline \\\\hline')\n\n with open(tab_path('ols_stat.tex'), 'w') as f:\n f.write(latex)", "title": "" }, { "docid": "8e2e02f64d8741d7a6ea1fed6d319957", "score": "0.5102951", "text": "def _cei(x, gp_objective, xi, gp_constraint, constraint_upper):\n ei = UtilityFunction._ei(x, gp_objective, xi)\n\n mean, std = gp_constraint.predict(x, return_std=True)\n z = (constraint_upper - mean) / std\n\n cumulative_probabiliy = norm.cdf(z)\n return cumulative_probabiliy * ei", "title": "" }, { "docid": "90e4027f519a1d0b12accd9ce0b5b1e7", "score": "0.50867677", "text": "def ECDF(values):\n x = np.array(values, copy=True)\n x.sort()\n x.shape = np.product(x.shape, axis=0)\n n = x.shape[0]\n y = (np.arange(n) + 1.) / n\n return StepFunction(x, y)", "title": "" }, { "docid": "3d8e392f637e6d34604079c795fada05", "score": "0.5061502", "text": "def test_generate_confidence_interval(self):\n new = [networks['a'][0], networks['b'][0], networks['c'][0]]\n ranking = _generate_centralities_parallel(new)\n centrality_scores = [(ranking[i][0], ranking[i][2]['Betweenness']) for i in range(len(ranking))]\n CI = generate_confidence_interval(centrality_scores)\n self.assertEqual(CI['OTU_1'], (0, 1))", "title": "" }, { "docid": "2c1872f0526d491a062d973d366ed4ea", "score": "0.5061003", "text": "def confidence_interval_wrapper():\n noxn_dir = \"C:/Users/ginge/Documents/NatCap/GIS_local/NCI_NDR/Results_3.2.20/subset_2000_2015/R_ranger_pred\"\n output_dir = noxn_dir\n for scenario_key in _N_EXPORT_PATH_DICT:\n surface_noxn_path = os.path.join(\n noxn_dir, 'surface_noxn_{}.tif'.format(scenario_key))\n surface_noxn_se_path = os.path.join(\n noxn_dir, 'surface_noxn_se_{}.tif'.format(scenario_key))\n basename = 'surface_{}'.format(scenario_key)\n generate_confidence_intervals(\n surface_noxn_path, surface_noxn_se_path, output_dir, basename)\n\n\n ground_noxn_path = os.path.join(\n noxn_dir, 'ground_noxn_{}.tif'.format(scenario_key))\n ground_noxn_se_path = os.path.join(\n noxn_dir, 'ground_noxn_se_{}.tif'.format(scenario_key))\n basename = 'ground_{}'.format(scenario_key)\n generate_confidence_intervals(\n ground_noxn_path, ground_noxn_se_path, output_dir, basename)", "title": "" }, { "docid": "c01c5f371c1e91cb80586b1719e210d1", "score": "0.5043632", "text": "def plotECDF(data,color='k',linewidth=1.0,linestyle='-',label=None):\n x = sorted(data)\n y = np.array(range(1,len(x)+1))/float(len(x))\n if label == None:\n return plt.plot(x,y,color=color,linewidth=linewidth,linestyle=linestyle)\n else:\n return plt.plot(x,y,color=color,label=label,linewidth=linewidth,linestyle=linestyle)", "title": "" }, { "docid": "2a74f60d1379a28be276ec7503fd8bc9", "score": "0.5038908", "text": "def byConfidenceInterval(self) -> ConfidenceIntervalResults:\n global_mean: Rational = Moment.mean(self.data)\n\n upper, lower = ops.splitList(self.data.data, lambda obs: obs <= global_mean)\n upper_std_dev: Rational = Moment.std_dev(Vector(upper))\n lower_std_dev: Rational = Moment.std_dev(Vector(lower))\n np_upper = np.std(upper)\n np_lower = np.std(lower)\n\n upper_outliers, upper_data = ops.splitList(upper, lambda obs: obs <= global_mean + upper_std_dev)\n lower_outliers, lower_data = ops.splitList(lower, lambda obs: obs >= global_mean - lower_std_dev)\n\n return ConfidenceIntervalResults(\n global_mean,\n upper_std_dev,\n lower_std_dev,\n upper_data + lower_data, \n Vector(lower_outliers).sort().data, \n Vector(upper_outliers).sort().data\n )", "title": "" }, { "docid": "d5f693c250a544b9823176124424a1c9", "score": "0.5037317", "text": "def _confidence_interval(self, std):\n return 1.96 * std / np.sqrt(self.n_iter)", "title": "" }, { "docid": "2ef495f4df5fa554a81f291064f932ab", "score": "0.5024837", "text": "def plot_CI_multi_pairs(A, B, sig_level=0.05):\n\n # initiate plot object\n fig, ax = plt.subplots(figsize=(12, 3))\n\n # initiate containers for standard error and differences\n SE = []\n d = []\n # iterate through X and N and calculate d and SE\n for i in range(len(A)):\n X_A = A[i][1]\n N_A = A[i][0]\n X_B = B[i][1]\n N_B = B[i][0]\n d.append(X_B / N_B - X_A / N_A)\n SE.append(pooled_SE(N_A, N_B, X_A, X_B))\n\n # convert to numpy arrays\n SE = np.array(SE)\n d = np.array(d)\n\n # z value\n z = z_val(sig_level)\n\n # confidence interval values\n ci = SE * z\n\n # bar to represent the confidence interval\n y = np.arange(len(A))\n ax.hlines(y, d-ci, d+ci, color='blue', alpha=0.4, lw=10, zorder=1)\n # marker for the mean\n ax.scatter(d, y, s=300, marker='|', lw=10, color='magenta', zorder=2)\n\n # vertical line to represent 0\n ax.axvline(0, c='grey', linestyle='-')\n\n # invert y axis to show variant 1 at the top\n ax.invert_yaxis()\n\n # label variants on y axis\n labels = ['metric{}'.format(i+1) for i in range(len(A))]\n plt.yticks(np.arange(len(A)), labels)", "title": "" }, { "docid": "2207a219c7e915e67ade155868e612b5", "score": "0.50126565", "text": "def eccdf(data: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n sorted_data, cdf = ecdf(data)\n return sorted_data, 1. - cdf", "title": "" }, { "docid": "ef707ca301cde5ea21b16556845e6905", "score": "0.50087917", "text": "def compute_ece(scores,\n e,\n t,\n a,\n group,\n eval_time,\n strat='quantile',\n adj='KM'):\n _, _, _, ece = calibration_curve(\n scores,\n e,\n t,\n a,\n group,\n eval_time,\n typ=adj,\n ret_bins=True,\n strat=strat,\n n_bins=20) \n\n return ece", "title": "" }, { "docid": "bc4bf23689c325f8602adcd057b8f73b", "score": "0.5002702", "text": "def test_confidence(self):\n tic = TimeIndependentCounter()\n tic.count(0)\n tic.count(3)\n tic.count(5)\n tic.count(2)\n tic.count(5)\n tic.count(8)\n tic.count(1)\n tic.count(2)\n tic.count(1)\n\n self.assertAlmostEqual(tic.report_confidence_interval(.05, print_report=True), 1.96, delta=.01,\n msg=\"Error in Confidence interval calculation. Wrong size of half interval returned.\")\n self.assertAlmostEqual(tic.report_confidence_interval(.1, print_report=False), 1.58, delta=.01,\n msg=\"Error in Confidence interval calculation. Wrong size of half interval returned.\")\n self.assertAlmostEqual(tic.report_confidence_interval(.2, print_report=False), 1.187, delta=.01,\n msg=\"Error in Confidence interval calculation. Wrong size of half interval returned.\")\n\n self.assertEqual(tic.is_in_confidence_interval(4.5, alpha=.05), True,\n msg=\"Error in Confidence interval calculation. Value should be in interval, but isn't.\")\n self.assertEqual(tic.is_in_confidence_interval(1.3, alpha=.05), True,\n msg=\"Error in Confidence interval calculation. Value should be in interval, but isn't.\")\n self.assertEqual(tic.is_in_confidence_interval(5.0, alpha=.05), False,\n msg=\"Error in Confidence interval calculation. Value id in interval, but shouldn't.\")\n self.assertEqual(tic.is_in_confidence_interval(4.5, alpha=.1), True,\n msg=\"Error in Confidence interval calculation. Value should be in interval, but isn't.\")\n self.assertEqual(tic.is_in_confidence_interval(1.3, alpha=.1), False,\n msg=\"Error in Confidence interval calculation. Value id in interval, but shouldn't.\")\n self.assertEqual(tic.is_in_confidence_interval(5.0, alpha=.1), False,\n msg=\"Error in Confidence interval calculation. Value id in interval, but shouldn't.\")\n self.assertEqual(tic.is_in_confidence_interval(4.5, alpha=.2), False,\n msg=\"Error in Confidence interval calculation. Value id in interval, but shouldn't.\")\n self.assertEqual(tic.is_in_confidence_interval(1.3, alpha=.2), False,\n msg=\"Error in Confidence interval calculation. Value id in interval, but shouldn't.\")\n self.assertEqual(tic.is_in_confidence_interval(4.0, alpha=.2), True,\n msg=\"Error in Confidence interval calculation. Value should be in interval, but isn't.\")\n\n lower, upper = tic.report_bootstrap_confidence_interval(alpha=.05, resample_size=10000)\n self.assertAlmostEqual(lower, 1.55556, delta=0.01,\n msg=\"Error in bootstrap confidence interval calculation. Wrong lower boundary.\")\n self.assertAlmostEqual(upper, 4.66667, delta=0.01,\n msg=\"Error in bootstrap confidence interval calculation. Wrong upper boundary.\")\n\n self.assertEqual(tic.is_in_bootstrap_confidence_interval(4, resample_size=5000, alpha=.05), True,\n msg=\"Error in Confidence interval calculation. Value should be in interval, but isn't.\")\n self.assertEqual(tic.is_in_bootstrap_confidence_interval(1, resample_size=5000, alpha=.05), False,\n msg=\"Error in Confidence interval calculation. Value id in interval, but shouldn't.\")", "title": "" }, { "docid": "2310dbc7e62c82aac5c703f4cf9916f0", "score": "0.49981126", "text": "def calc_EC50(fn, dff, settings, t20):\n # disable divide by zero warnings encountered during curve fitting\n np.seterr(divide='ignore')\n\n # define datasets that have been adjusted before attempting fitting (\"_orig\" is default, \"_ful\", \"fixed upper limit\"\n # is for specific LD50 analyses\n datasets = ast.literal_eval(settings[\"datasets\"])\n\n # extract the data file path, method (e.g. EC50), expected curveshape (S or Z), etc. from settings file\n data_file = dff.loc[fn, \"response data file\"]\n print(data_file)\n method = \"{ct}{pr}\".format(ct=settings[\"calculation_type\"],\n pr=str(settings[\"percentage_response\"]))\n dose_response_curveshape = settings[\"dose_response_curveshape\"]\n doselabel = settings[\"x-axis (dose) label\"]\n doseunits = settings[\"x-axis (dose) units\"]\n\n # create new output file directories, if they don't exist already\n dir_columns = [\"output_folder\", \"ofd_csv\", \"ofd_curves\"]\n if settings[\"save_as_pdf\"] in (True, \"TRUE\"):\n dir_columns.append(\"ofd_pdfs\")\n for column in dir_columns:\n if not os.path.exists(dff.loc[fn, column]):\n os.makedirs(dff.loc[fn, column])\n\n # examine the input file to confirm integrity, correct datatype, etc\n dff = examine_input_datafile(fn, dff)\n if dff.loc[fn, \"resp_datafile_ok\"] != True:\n raise tools.DatafileError(\"The response datafile is not readable, or in the incorrect format.\"\n \"\\nFile affected : \\n{}\".format(data_file))\n\n # obtain the details regarding the data format\n eccpy, resp_datafileformat, resp_machinetype, resp_assaytype = dff.loc[fn, \"response dataformat\"].split(\"|\")\n\n # for the 96-well samples, obtain path for file with the dose concentrations and sample names\n dose_conc_excelfile = dff.loc[fn, \"dose conc file\"]\n # replace np.nan with an empty string, if the sample is from the 12-well platereader and no excel file is given\n if isinstance(dose_conc_excelfile, float):\n if np.isnan(dose_conc_excelfile):\n dose_conc_excelfile = \"\"\n # define path to file with dose concentrations\n dose_conc_excel_path = os.path.join(dff.loc[fn, \"input file directory\"], dose_conc_excelfile)\n\n if resp_machinetype == \"versamax\" and dff.loc[fn, \"resp_datafile_ok\"] == True:\n # read versamax text file, convert to dataframe\n df_resp_orig, df_resp_all, df_dose_orig = read_versamax_txt_datafile(fn, dff, resp_assaytype)\n else:\n df_dose_orig = \"not yet created\"\n df_resp_all = \"not yet created\"\n # do nothing. the response data file is not from VersaMax, is probably from the 12-well platereader\n pass\n data_file_path = dff.loc[fn,\"data_file_path\"]\n # standardise the dose concentration data so that samples are listed in index(vertical), and doses horizontal\n df_dose_all, df_resp_all = standardise_doseconc_data(fn, dff, df_dose_orig, df_resp_all, data_file_path)\n\n if resp_machinetype == \"versamax\":\n\n # create empty dataframe to hold the distributed datapoints\n df_resp_all = pd.DataFrame()\n\n if resp_assaytype in [\"8dose12sample\", \"12dose8sample\", \"24dose4sample\"]:\n # double-check that the dose file is really an excel file\n if dose_conc_excel_path[-4:] not in [\".xls\", \"xlsx\"]:\n raise ValueError(\"File with dose concentrations does not end in .xls or .xlsx. ({}) Please check settings file.\".format(dose_conc_excel_path))\n # define the relevant column name with the response values\n col_resp = \"MeanAbsorb\"\n elif \"ampconc\" in resp_assaytype:\n # DEPRECATED. Use instead [\"8dose12sample\", \"12dose8sample\", \"24dose4sample\"]\n # define the relevant column name with the response values\n col_resp = \"MeanOD600\"\n\n # iterate through the rows, dispersing the datapoints throughout the new dataframe df_resp_all\n for row in df_resp_orig.index:\n # extract the sample name (e.g. A01, or AA01)\n Sample_Name = df_resp_orig.loc[row, \"Sample\"]\n # split the Sample_Name into components, sample letter and dose number (e.g. AA, 01)\n sLet_versamax = Sample_Name[:-2]\n dosenum = Sample_Name[-2:]\n # now distribute each response datapoint in a new dataframe,\n # where the sLet is the index and the dosenumber the columns\n # index_in_df_resp_all = df_resp_orig.loc[row, \"index\"]\n # column_in_df_resp_all = df_resp_orig.loc[row, \"column\"]\n response_datapoint = df_resp_orig.loc[row, col_resp]\n # df_resp_all.loc[index_in_df_resp_all,column_in_df_resp_all] = y_value_response\n df_resp_all.loc[sLet_versamax, dosenum] = response_datapoint\n\n # create new DataFrame for Sample names (dfS)\n dfS = pd.read_excel(dose_conc_excel_path, sheet_name=\"samples\", index_col=0)\n # replace original index with the resp_assaytype\n dfS[\"orig_index\"] = dfS.index\n assert resp_assaytype in dfS.columns\n dfS.set_index(resp_assaytype, drop=False, inplace=True)\n\n # Match \"Contains_Data\" between the samples tab (dfS), and the XxdoseYsample tab (df_dose_all)\n # First, convert all \"true-like\" to python bool. (and anything else to False)\n dfS[\"Contains_Data\"] = dfS[\"Contains_Data\"].apply(tools.convert_truelike_to_bool)\n df_dose_all[\"Contains_Data\"] = df_dose_all[\"Contains_Data\"].apply(tools.convert_truelike_to_bool)\n # The lengths won't match, but this doesn't mean much. Find the shortest list.\n len_df_dose_all, len_dfS = df_dose_all.shape[0], dfS.shape[0]\n min_num_datapoints = np.min([len_df_dose_all, len_dfS])\n # extract list of bool from both lists,truncated to have the same length\n df_dose_all_Contains_Data = df_dose_all[\"Contains_Data\"].iloc[:min_num_datapoints]\n dfS_Contains_Data = dfS[\"Contains_Data\"].iloc[:min_num_datapoints]\n\n\n # Third, check if these two lists match. Also check that the response data has the same index as dose data.\n if df_dose_all_Contains_Data.tolist() == dfS_Contains_Data.tolist():\n if df_dose_all.index.tolist() == df_resp_all.index.tolist():\n # add the \"Contains_Data\" col from dose excel file to dataframe containing response data (from txt datafile)\n df_resp_all[\"Contains_Data\"] = df_dose_all_Contains_Data\n else:\n sys.stdout.write(\"WARNING: There is a mismatch in the indexing of the dose and the response data.\"\n \"\\nDose index = {}\\n Response index = {}\\nAttempting to reindex the response data \"\n \"to fit the samples Contains_Data column.\".format(df_dose_all.index.tolist(), df_resp_all.index.tolist()))\n # reindex the series so that it matches the df_resp_all index\n # this assumes that the samples tab has correctly labelled the True and False Contains_Data column\n df_dose_all_Contains_Data = df_dose_all_Contains_Data.reindex(index = df_resp_all.index)\n # transfer column, as per usual\n df_resp_all[\"Contains_Data\"] = df_dose_all_Contains_Data\n else:\n\n raise tools.DataMismatchError(\"\\n\\nThe 'Contains_Data' columns/rows are not matching in {a} & samples tabs.\"\n \"\\n\\nDouble-check excel file.\\n\\nAffected file :\\n{p}\\n\\n\"\n \"{b} tab:\\n{c}\\n\\nsamples tab:\\n{d}\\n\\n\".format(a=resp_assaytype,\n p=dose_conc_excel_path,\n b=resp_assaytype,\n c=df_dose_all_Contains_Data,\n d=dfS_Contains_Data))\n # create a list of the samples that is the same number of rows as df_dose_all (original dfS index is ignored)\n n_rows_df_dose_all = df_dose_all.shape[0]\n # transfer sample names to dataframes with dose concentrations & response values\n series_of_sample_names_with_standard_AA_AB_index = dfS.loc[dfS.Contains_Data][\"samples\"]\n df_dose_all['samples'] = series_of_sample_names_with_standard_AA_AB_index\n df_resp_all['samples'] = series_of_sample_names_with_standard_AA_AB_index\n\n # create a view on the dataframes, so that it only shows the microplate data manually marked as \"Contains_Data\" = True\n dfdose = df_dose_all[df_dose_all.Contains_Data == True].copy()\n dfresp = df_resp_all[df_resp_all.Contains_Data == True].copy()\n dict_dfe = {}\n\n # determine the longest sample name\n sample_name_len_max = df_dose_all.samples.str.len().max()\n\n #set the fontsize for the figure\n fig_fontsize = 6\n #set the default font for the figures\n plt.rc('font', family='sans-serif')\n plt.rc('font', serif='Helvetica Neue')\n plt.rc('text', usetex='false')\n plt.rcParams.update({'font.size': fig_fontsize})\n\n # iterate through all of the samples marked for analysis within the excel file with dose concentrations\n for sNum, sLet in enumerate(dfdose.index):\n # create a new dataframe for the evaluation of the EC50 calculations\n # Create a DataFrame for EC50 data (dfe)\n # There will be a new dfe for each sample number. Each dfe will be added to a dictionary, which is then\n # converted to df_eval\n dfe = pd.DataFrame()\n dfe.loc[\"sLet\", sLet] = sLet\n dfe.loc[\"sNum\", sLet] = sNum\n # obtain the name for that sample\n sample_name = str(dfdose.loc[sLet, \"samples\"])\n dfe.loc[\"sample_name\", sLet] = sample_name\n\n # set up the path for the image files to be saved in\n fig0_single_sample_png = os.path.join(dff.loc[fn,\"ofd_curves\"], \"%s \" % sLet + sample_name) + \".png\"\n fig0_single_sample_pdf = os.path.join(dff.loc[fn,\"ofd_pdfs\"], \"%s \" % sLet + sample_name) + \".pdf\"\n\n #reindex so that only rows that contain data in both dataframes (i.e. dose and response data) are kept for analysis\n #take the index of both dataframes after NaN is removed.\n index_dfs = dfresp.loc[sLet,:].dropna().index\n #Find common elements using the set intersection function.\n cols_with_data_in_both_x_and_y = index_dfs.intersection(dfdose.loc[sLet,:].dropna().index)\n # reindex to drop the columns with text or boolean values\n x_orig = dfdose.loc[sLet,:].reindex(index = cols_with_data_in_both_x_and_y)\n # drop the two text columns, Contains_Data and samples (sample names)\n x_orig.drop([\"Contains_Data\", \"samples\"], inplace=True)\n # convert dtype to float\n x_orig = x_orig.astype(float)\n # add the original x values to the output dataframe\n dfe.loc[\"x\",sLet] = list(x_orig)\n # select the original y (response) values\n y_orig = dfresp.loc[sLet,:].reindex(index = cols_with_data_in_both_x_and_y)\n # drop the two text columns, Contains_Data and samples (sample names)\n y_orig.drop([\"Contains_Data\", \"samples\"], inplace=True)\n # convert dtype to float\n y_orig = y_orig.astype(float)\n\n # add to output dataframe\n dfe.loc[\"y\",sLet] = list(y_orig)\n\n for d in datasets:\n # currently add the ynorm_orig and xnorm_orig to all datasets. Other datasets might require changes later.\n # normalise the x datapoints between 0 and 1 to improve curve fitting\n dfe.loc[\"xnorm{}\".format(d),sLet], dfe.loc[\"xmin{}\".format(d),sLet], dfe.loc[\"xmax{}\".format(d),sLet] = tools.normalise_0_1(x_orig)\n # normalise the y datapoints between 0 and 1 to improve curve fitting\n dfe.loc[\"ynorm{}\".format(d),sLet], dfe.loc[\"ymin{}\".format(d),sLet], dfe.loc[\"ymax{}\".format(d),sLet] = tools.normalise_0_1(y_orig)\n\n #make an array of >250 datapoints representing the x-axis of the curve\n min = 0\n max = settings[\"fitted_curve_xaxis_max\"]\n n_datapoints = settings[\"fitted_curve_n_datapoints\"]\n dfe.loc[\"x_fitted_norm\", sLet] = np.linspace(min, max, n_datapoints)\n dfe.loc[\"n_doseconc_tested\", sLet] = len(x_orig)\n\n #######################################################################################################\n # #\n # Create fixed upper limit (ful) dataset for LD50 data #\n # (lowdose horizontal datapoints assumed to have high variation, normalised between #\n # two smaller yvalues so data can be fitted to a Z-shaped sigmoidal curve) #\n # #\n #######################################################################################################\n\n if \"_ful\" in datasets:\n # copy x values to the ful dataset, which utilise the same x-datapoints\n dfe.loc[\"x_ful\", sLet] = dfe.loc[\"x\", sLet]\n dfe.loc[\"x_fitted_norm_ful\", sLet] = dfe.loc[\"x_fitted_norm\", sLet]\n\n ful_max = settings[\"ful.yaxis_fixed_upper_limit_max\"]\n ful_min = settings[\"ful.yaxis_fixed_upper_limit_min\"]\n\n # find where the first datapoint drops below the settings[\"ful.yaxis_fixed_upper_limit_min\"]\n if y_orig.min() < ful_min:\n index_y_ful = np.min(np.where(y_orig < ful_min))\n else:\n # there are insufficient lowresponse datapoints(insuff_lowresp_dp). E.g. cells are overgrown.\n # All datapoints are above ful. np.where returns a tuple. Replace index with len(array).\n index_y_ful = len(y_orig)\n # select values above index_y_ful, which are above the fixed upper limit\n y_orig_data_above_ful = y_orig[:index_y_ful]\n y_orig_data_below_ful = y_orig[index_y_ful:]\n\n if len(y_orig_data_above_ful) > 1:\n # normalise these datapoints between 0 and 1\n y_orig_data_above_ful_norm_0_1 = tools.normalise_0_1(y_orig_data_above_ful)[0]\n # calculate the width for the normalisation of the adjusted datapoints\n normalisation_width = ful_max - ful_min\n # convert the data which is normalised from 0-1, to normalised between the fixed_upper_limit_min and max\n y_orig_data_above_ful_norm = y_orig_data_above_ful_norm_0_1 * normalisation_width + ful_min\n y_ful = np.append(y_orig_data_above_ful_norm, y_orig_data_below_ful)\n # add to output dataframe\n dfe.loc[\"y_ful\",sLet] = list(y_ful)\n else:\n # if there is only one datapoint above the limit, ful adjustment is useless, y_ful = y_orig\n dfe.loc[\"y_ful\",sLet] = dfe.loc[\"y\",sLet]\n\n # normalise the y (response) data between 0 and 1 for the fitting algorithm\n for d in datasets:\n ynorm, ymin, ymax = tools.normalise_0_1(np.array(dfe.loc[\"y{}\".format(d), sLet]))\n dfe.loc[\"ynorm{}\".format(d), sLet] = ynorm\n dfe.loc[\"ymin{}\".format(d), sLet] = ymin\n dfe.loc[\"ymax{}\".format(d), sLet] = ymax\n\n #######################################################################################################\n # #\n # Setup figure showing the dose-response curves for a single sample #\n # #\n #######################################################################################################\n\n # close any open figures\n plt.close(\"all\")\n # Create a new figure (i.e. a new 2x2 canvas). If EC50_calculable is false, some errors will be printed.\n fig, axarr = plt.subplots(nrows=2, ncols=2, dpi=300)\n # set an annotation fontsize\n af = 10\n # create a colour list for the various datasets, with selected colours from the tableau20\n co = [t20[0], t20[2], t20[3], t20[4], t20[5], t20[6]]\n # create dictionary to hold the size of the plotted datapoints for each dataset (gradually smaller)\n sd = {}\n # create dictionary to hold the linestyles for the EC50 locators\n ls = {}\n linestylelist = [\"dashed\", \"dotted\", \"-.\", \"dashed\", \"dotted\", \"-.\",\"dashed\", \"dotted\", \"-.\",]\n for n,d in enumerate(datasets):\n size_start = 15\n size_increment = -7\n sd[d] = size_start + size_increment * n\n ls[d] = linestylelist[n]\n # set alpha (transparency) level for plotted lines and data in general\n al = 0.8\n # define xycoordinates for later annotations\n xyc = \"axes fraction\"\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # #\n # Dose-Respose Curve Fig01: Raw data #\n # (Datapoints only - fitting is plotted later) #\n # #\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # _________\n # |XXXX| | Subplot 0, axarr[0, 0]\n # |XXXX|____|\n # | | |\n # |____|____|\n\n # Determine which datasets are going to be plotted\n datasets = ast.literal_eval(settings[\"datasets\"])\n\n # set the subplot number on the canvas\n Plot_Nr = 1\n for n, d in enumerate(datasets):\n # change the dataset name (e.g. \"_orig\" to \"\") to an empty string if there is only one dataset for analysis\n d_name = \"\" if len(datasets) == 1 else d\n # add original datapoints as a scattergram\n axarr[0,0].scatter(x_orig, dfe.loc[\"y{}\".format(d), sLet], color=co[n], s=sd[d], label=d_name[1:], alpha=al)\n # set xlabel, ylabel, title, etc\n axarr[0,0].set_xlabel(\"{a} ({b})\".format(a=doselabel, b=doseunits), fontsize = fig_fontsize)\n axarr[0,0].set_ylabel(settings[\"y-axis (response) label\"],rotation='vertical', fontsize = fig_fontsize)\n axarr[0,0].set_title(\"%s %s\" %(sLet, sample_name), fontsize = fig_fontsize)\n axarr[0,0].grid(True, color = '0.75')\n # set the limit of the y-axis to 1 + 0.1, if all datapoints are very low\n ymin, ymax = axarr[0,0].get_ylim()\n # xmin, xmax = axarr[0,0].get_xlim()\n #set y-axis intercept\n # ylim_min_raw = -0.05\n # define the y-limit for the non-normalised (raw) data as the minimum value minus a percentage of max value\n ylim_min_raw = ymin - ymax * 0.05\n # set the x-axis limits\n # xlim_min_raw = -10\n # xlim_min_raw = xmin# - xmax * 0.01\n # define the x-limit for the non-normalised (raw) data as the minimum value minus a percentage of max value\n xlim_min_raw = x_orig.min() - x_orig.max() * 0.1\n ylim_max_raw = y_orig.max() + 0.1 if y_orig.max() > 1.0 else 1.0\n axarr[0,0].set_ylim(ylim_min_raw, ylim_max_raw)\n # set the x-axis limit so that the legend does not hide too many data points\n # find the maximum dose conc. in the whole experiment for that day\n # maxAC = x_orig.max()\n # obtain the variable altering the extension of the x-axis\n # x_axis_extension_after_ACmax_in_plot1 = dff.loc[fn, \"x-axis extension in summary fig_0\"]\n # define 110% of the limit of the x-axis as the maximum dose conc.\n # xlim_max_plot1 = maxAC + x_axis_extension_after_ACmax_in_plot1\n # define 110% of the limit of the x-axis as the maximum dose conc.\n xlim_max_plot1 = x_orig.max() * 1.1\n axarr[0,0].set_xlim(xlim_min_raw,xlim_max_plot1)\n # axarr[0, 0].set_xlim(0, xlim_max_plot1)\n if \"_ful\" in datasets:\n axarr[0,0].annotate(text=\"original data\", xy=(0.71,0.9), fontsize=af, xycoords=xyc, color = t20[0])\n axarr[0,0].annotate(text=\"fixed upper limit (ful) data\", xy=(0.43,0.8), fontsize=af, xycoords=xyc, color = t20[2])\n\n #######################################################################################################\n # #\n # Preliminary judgements whether EC50 is calculable #\n # #\n #######################################################################################################\n\n # count the number of orig response datapoints above and below the \"yaxis upper-lower cutoff\" (yulc) value\n dfe.loc[\"n_resp_dp_below_yulc\", sLet] = len(np.where(y_orig < settings[\"yaxis upper-lower cutoff\"])[0])\n dfe.loc[\"n_resp_dp_above_yulc\", sLet] = len(np.where(y_orig > settings[\"yaxis upper-lower cutoff\"])[0])\n\n if dfe.loc[\"n_resp_dp_below_yulc\", sLet] < settings[\"min_num_dp_above&below_yulc\"]:\n \"\"\"The cells are \"insuff_lowresp_dp\" if any of the following are true\n - there are less than two datapoints above the live-dead cutoff value\n \"\"\"\n for d in datasets:\n dfe.loc[\"EC50{}\".format(d), sLet], dfe.loc[\"rsquared{}\".format(d), sLet] = \"insuff_lowresp_dp\", 0\n dfe.loc[\"EC50_hill_eq{}\".format(d), sLet], dfe.loc[\"n_highdose_datapoints{}\".format(d), sLet] = 0, 0\n dfe.loc[\"n_lowdose_datapoints{}\".format(d), sLet], dfe.loc[\"EC50_calculable{}\".format(d), sLet] = 0, False\n dfe.loc[\"residuals_mean{}\".format(d),sLet], dfe.loc[\"y_fitted{}\".format(d),sLet] = np.nan, np.nan\n dfe.loc[\"y_fitted_norm{}\".format(d),sLet], dfe.loc[\"indices_lowdose_datapoints{}\".format(d),sLet] = np.nan, np.nan\n dfe.loc[\"indices_lowdose_datapoints_excl_nearest_EC50{}\".format(d),sLet] = np.nan\n dfe.loc[\"x_fitted{}\".format(d), sLet], dfe.loc[\"response_lowdose_datapoints{}\".format(d), sLet] = np.nan, np.nan\n # label the sample as not okay\n dfe.loc[\"ymin{}\".format(d), \"%s_okay\" % sLet] = False\n dfe.loc[\"data_seems_okay{}\".format(d),sLet] = False\n\n elif any([y_orig[1] < settings[\"min_resp_at_2nd_doseconc\"],\n dfe.loc[\"n_resp_dp_above_yulc\", sLet] < settings[\"min_num_dp_above&below_yulc\"],\n np.array(y_orig)[-2] < settings[\"min_resp_at_2ndlast_doseconc\"]]):\n \"\"\"For high-throughput LD50 calculations, the cells have \"insuff_highresp_dp\" if any of the following are true:\n - the y-value of the second datapoint (second dose) is smaller than a fixed minimum value (min_resp_at_2nd_doseconc)\n - there are less than two datapoints above a fixed value (yaxis upper-lower cutoff)\n \"\"\"\n for d in datasets:\n dfe.loc[\"EC50{}\".format(d), sLet], dfe.loc[\"rsquared{}\".format(d), sLet] = \"insuff_highresp_dp\", 0\n dfe.loc[\"EC50_hill_eq{}\".format(d), sLet], dfe.loc[\"n_highdose_datapoints{}\".format(d), sLet] = 0, 0\n dfe.loc[\"n_lowdose_datapoints{}\".format(d), sLet], dfe.loc[\"EC50_calculable{}\".format(d), sLet] = 0, False\n dfe.loc[\"residuals_mean{}\".format(d),sLet], dfe.loc[\"y_fitted{}\".format(d),sLet] = np.nan, np.nan\n dfe.loc[\"y_fitted_norm{}\".format(d),sLet], dfe.loc[\"indices_lowdose_datapoints{}\".format(d),sLet] = np.nan, np.nan\n dfe.loc[\"indices_lowdose_datapoints_excl_nearest_EC50{}\".format(d),sLet] = np.nan\n dfe.loc[\"x_fitted{}\".format(d), sLet], dfe.loc[\"response_lowdose_datapoints{}\".format(d), sLet] = np.nan, np.nan\n # label the sample as not okay\n dfe.loc[\"ymax{}\".format(d), \"%s_okay\" % sLet] = False\n dfe.loc[\"data_seems_okay{}\".format(d),sLet] = False\n else:\n #######################################################################################################\n # #\n # Fit sigmoidal curve to the data #\n # #\n #######################################################################################################\n\n #as a starting point, guess the sigmoidal constants\n if dose_response_curveshape == \"S\":\n hill_constants_guess = (0.0,1.0,0.5,10.0)\n elif dose_response_curveshape == \"Z\":\n hill_constants_guess = (1.0,0.0,0.5,10.0)\n\n for d in datasets:\n # change the dataset name (e.g. \"_orig\" to \"\") to an empty string if there is only one dataset for analysis\n d_name = \"\" if len(datasets) == 1 else d\n ynorm = np.array(dfe.loc[\"ynorm{}\".format(d),sLet])\n xnorm = np.array(dfe.loc[\"xnorm{}\".format(d),sLet])\n #use the scipy optimise function to fit a curve to the data points\n hill_constants, cov, infodict, mesg, ier = leastsq(tools.residuals, hill_constants_guess,\n args=(tools.hill_eq, xnorm, ynorm),\n full_output=1)\n\n # save the hill constants for later use\n dfe.loc[\"hill_constants{}\".format(d),sLet] = list(hill_constants)\n\n # obtain the rsquared value for the fit of the curve to the data\n # code is from http://stackoverflow.com/questions/7588371/scipy-leastsq-goodness-of-fit-estimator\n ss_err = np.sum(np.array(infodict['fvec'])**2)\n ss_tot = np.sum((ynorm-ynorm.mean())**2)\n rsquared = 1 - (ss_err/ss_tot)\n # add the rsquared value to the output dataframe\n dfe.loc[\"rsquared{}\".format(d),sLet] = rsquared\n\n # also calculate the average residual as a rough \"goodness of fit\"\n # first apply the optimised function to the original datapoints\n y_fitted_xnorm = tools.hill_eq(hill_constants, xnorm)\n # calculate the residuals, as the (observed y-values)-(fitted y-values). Convert to positive floats.\n residuals_norm = abs(ynorm - y_fitted_xnorm)\n # calculate mean residual\n residuals_norm_mean = residuals_norm.mean()\n # denormalise to the original y-value scale\n residuals_mean = tools.denormalise_0_1(residuals_norm_mean, dfe.loc[\"ymin{}\".format(d), sLet],\n dfe.loc[\"ymax{}\".format(d), sLet])\n # add to output dataframe\n dfe.loc[\"residuals_mean{}\".format(d),sLet] = residuals_mean\n # use mean residual as rough yerr when plotting the EC50 in a barchart. Multiply by 100 to make visible.\n dfe.loc[\"yerr{}\".format(d),sLet] = residuals_mean * 100\n\n # obtain the constants of the optimised sigmoidal Hill function\n upper, lower, EC50_hill_eq_norm, hillslope = hill_constants\n\n # denormalise\n xmin = dfe.loc[\"xmin{}\".format(d),sLet]\n xmax = dfe.loc[\"xmax{}\".format(d),sLet]\n dfe.loc[\"EC50_hill_eq{}\".format(d),sLet] = tools.denormalise_0_1(hill_constants[2], xmin, xmax)\n\n # add to output dataframe\n dfe.loc[\"EC50_hill_eq_norm{}\".format(d), sLet] = EC50_hill_eq_norm\n dfe.loc[\"hillslope{}\".format(d), sLet] = hillslope\n\n #calculate the value for y for the 1500 points\n x_fitted_norm = np.array(dfe.loc[\"x_fitted_norm{}\".format(d), sLet])\n dfe.loc[\"y_fitted_norm{}\".format(d), sLet] = tools.hill_eq(hill_constants, x_fitted_norm)\n\n #######################################################################################################\n # #\n # Calculate the EC50 using the fitted curve #\n # #\n #######################################################################################################\n\n # obtain the calculation method from the settings file\n method_calc_y50 = settings[\"method_calc_y50\"]\n # obtain the percentage response used for calculation (e.g. 50 ,for EC50)\n percentage_response = settings[\"percentage_response\"]\n # calculate fraction response (i.e. convert EC50 to 0.5)\n fract_response = percentage_response / 100\n\n if method_calc_y50 == \"y50 = (curve_max - curve_min)*0.5 + curve_min\":\n if dose_response_curveshape == \"S\":\n # define the x-value for curvemin as the first x-value\n curvemin = dfe.loc[\"xnorm{}\".format(d),sLet][0]\n # define the x-value for curvemax as the last x-value\n curvemax = dfe.loc[\"xnorm{}\".format(d),sLet][-1]\n if dose_response_curveshape == \"Z\":\n # define the x-value for curvemin as the last x-value\n curvemin = dfe.loc[\"xnorm{}\".format(d),sLet][-1]\n # define the x-value for curvemax as the first x-value\n curvemax = dfe.loc[\"xnorm{}\".format(d),sLet][0]\n # use the hill equation to find the y-value of the curve at these positions\n y50_curvemin = tools.hill_eq(dfe.loc[\"hill_constants{}\".format(d),sLet], curvemin)\n y50_curvemax = tools.hill_eq(dfe.loc[\"hill_constants{}\".format(d),sLet], curvemax)\n # define y50 (yvalue at top of curve - yvalue at bottom of the curve) * 0.5 [or 0.9 for EC90, etc.]\n y50_norm = (y50_curvemax - y50_curvemin) * fract_response + y50_curvemin\n\n # detect extended curve formula (e.g. \"y50 = (extendedcurve|0.2|_max - extendedcurve|0.2|_min)*0.5\")\n elif \"extendedcurve\" in method_calc_y50 and \"|\" in method_calc_y50:\n if dose_response_curveshape == \"S\":\n # define the x-value for curvemin as the first x-value\n curvemin = dfe.loc[\"xnorm{}\".format(d),sLet][0]\n # define the x-value for curvemax as the last x-value\n curvemax = dfe.loc[\"xnorm{}\".format(d),sLet][-1]\n if dose_response_curveshape == \"Z\":\n # define the x-value for curvemin as the last x-value\n curvemin = dfe.loc[\"xnorm{}\".format(d),sLet][-1]\n # define the x-value for curvemax as the first x-value\n curvemax = dfe.loc[\"xnorm{}\".format(d),sLet][0]\n x_range = curvemax - curvemin\n # extract the extension (e.g. 0.2, 20% from the text string in the settings file)\n extension_curvemax = float(method_calc_y50.split(\"|\")[1])\n extension_curvemin = float(method_calc_y50.split(\"|\")[3])\n if dose_response_curveshape == \"S\":\n # define the x-value for curvemin as the first x-value, minus the xrange * extension\n curvemin = dfe.loc[\"xnorm{}\".format(d),sLet][0] - x_range * extension_curvemin\n # define the x-value for curvemax as the last x-value\n curvemax = dfe.loc[\"xnorm{}\".format(d),sLet][-1] + x_range * extension_curvemax\n if dose_response_curveshape == \"Z\":\n # define the x-value for curvemin as the last x-value\n curvemin = dfe.loc[\"xnorm{}\".format(d),sLet][-1] + x_range * extension_curvemax\n # define the x-value for curvemax as the first x-value\n curvemax = dfe.loc[\"xnorm{}\".format(d),sLet][0] - x_range * extension_curvemin\n # use the hill equation to find the y-value of the curve at these positions\n y50_curvemin = tools.hill_eq(dfe.loc[\"hill_constants{}\".format(d),sLet], curvemin)\n y50_curvemax = tools.hill_eq(dfe.loc[\"hill_constants{}\".format(d),sLet], curvemax)\n # define y50 (yvalue at top of curve - yvalue at bottom of the curve) * 0.5 [or 0.9 for EC90, etc.]\n y50_norm = (y50_curvemax - y50_curvemin) * fract_response\n\n elif method_calc_y50 == \"y50 = (resp_max - resp_min)*0.5\":\n # define y50 as the centre between min and max datapoints\n # currently the data is normalised between zero and one, so actually y50 = fract_response\n y50_norm = (ynorm.max() - ynorm.min()) * fract_response\n\n elif method_calc_y50 == \"y50 = (resp_end - resp_start)*0.5\":\n y50_norm = (ynorm[-1] - ynorm[0]) * fract_response\n\n elif method_calc_y50 == \"y50 = (resp_start - resp_end)*0.5\":\n y50_norm = (ynorm[0] - ynorm[-1]) * fract_response\n\n else:\n raise ValueError(\"method_calc_y50 ({}) is not recognised. \"\n \"Please check the excel settings file.\".format(method_calc_y50))\n\n # add value to output series\n dfe.loc[\"y50_norm{}\".format(d),sLet] = y50_norm\n\n #the y-value of 50% cell density is calculated as the middle position in the curve\n #if the curve is perfectly symmetrical, the EC50 should equal the constant 'k' from the hill_constants\n dfe.loc[\"curve_max_norm{}\".format(d),sLet] = dfe.loc[\"y_fitted_norm{}\".format(d),sLet].max()\n dfe.loc[\"curve_min_norm{}\".format(d),sLet] = dfe.loc[\"y_fitted_norm{}\".format(d),sLet].min()\n\n #dfe.loc[\"EC50_norm_bq{}\".format(d),\"%s_okay\" % sLet]\n brentq_out_tuple = calc_EC50_brent_eq(sLet, sample_name, dfe.loc[\"hill_constants{}\".format(d), sLet],\n dfe.loc[\"y50_norm{}\".format(d), sLet])\n\n dfe.loc[\"EC50_norm_bq{}\".format(d), sLet], dfe.loc[\"EC50_calculable{}\".format(d), sLet] = brentq_out_tuple\n\n # add if the EC50 was calculable to the summary dataframe \"okay\" column\n if dfe.loc[\"EC50_calculable{}\".format(d), sLet] == True:\n dfe.loc[\"EC50_norm_bq{}\".format(d),\"%s_okay\" % sLet] = True\n else:\n dfe.loc[\"EC50_norm_bq{}\".format(d),\"%s_okay\" % sLet] = False\n\n # denormalise the EC50\n dfe.loc[\"EC50{}\".format(d),sLet] = float(tools.denormalise_0_1(brentq_out_tuple[0], xmin, xmax))\n\n # denormalise the fitted y-values for the curve\n dfe.loc[\"y_fitted{}\".format(d),sLet] = tools.denormalise_0_1(dfe.loc[\"y_fitted_norm{}\".format(d),sLet],\n dfe.loc[\"ymin{}\".format(d),sLet],\n dfe.loc[\"ymax{}\".format(d),sLet])\n\n # denormalise the y50, the y-value used to calculated the EC50\n dfe.loc[\"y50{}\".format(d),sLet] = tools.denormalise_0_1(dfe.loc[\"y50_norm{}\".format(d),sLet],\n dfe.loc[\"ymin{}\".format(d),sLet],\n dfe.loc[\"ymax{}\".format(d),sLet])\n\n # denormalise the fitted data, back to original dose and response concentrations\n dfe.loc[\"x_fitted{}\".format(d),sLet] = tools.denormalise_0_1(x_fitted_norm, xmin, xmax)\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # #\n # Dose-Respose Curve Fig01: Fitted curve #\n # (raw datapoints plotted earlier) #\n # #\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # _________\n # |XXXX| | Subplot 0, axarr[0, 0]\n # |XXXX|____|\n # | | |\n # |____|____|\n\n # Determine which datasets are going to be plotted\n datasets = ast.literal_eval(settings[\"datasets\"])\n\n for n, d in enumerate(datasets):\n # change the dataset name (e.g. \"_orig\" to \"\") to an empty string if there is only one dataset for analysis\n d_name = \"\" if len(datasets) == 1 else d\n # add original datapoints as a scattergram\n axarr[0,0].scatter(dfe.loc[\"x{}\".format(d), sLet], dfe.loc[\"y{}\".format(d), sLet], color=co[n], s=sd[d],\n label=d_name[1:], alpha=al)\n # plot fitted curve on the subplot with the original y values\n axarr[0, 0].plot(dfe.loc[\"x_fitted{}\".format(d), sLet], dfe.loc[\"y_fitted{}\".format(d), sLet],\n '-', color = co[n], alpha=al)\n # extract y50 and EC50 from dataframe\n y50 = dfe.loc[\"y50{}\".format(d), sLet]\n EC50 = dfe.loc[\"EC50{}\".format(d),sLet]\n # draw horizontal line from y50 to EC50\n axarr[0, 0].hlines(y=y50, xmin=xlim_min_raw, xmax=EC50, colors = co[n], linestyles=ls[d], label='', alpha=al)\n # draw vertical line at EC50 from y50\n axarr[0, 0].vlines(x=EC50, ymin=ylim_min_raw, ymax=y50, colors=co[n], linestyles=ls[d])\n \"\"\"Plot vertical and horizontal lines showing location of EC50 according to the hill equation.\n Not recommended. Hill Eq constant is less reliable than the brent root-finding method.\n EC50_hill = dfe.loc[\"EC50_hill_eq{}\".format(d),sLet]\n axarr[0, 0].hlines(y=y50, xmin=xlim_min_raw, xmax=EC50_hill, colors = co[n], linestyles=ls[d])\n axarr[0, 0].vlines(x=EC50_hill, ymin=ylim_min_raw, ymax=y50, colors=co[n], linestyles=ls[d])\"\"\"\n\n if len(datasets) > 1:\n lg = axarr[0,0].legend(loc=7, ncol=1, scatterpoints=1)\n lg.draw_frame(False)\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # #\n # Dose-Respose Curve Fig02: Normalised data, with fitted curve. #\n # #\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # _________\n # | | | Subplot 1, axarr[1, 0]\n # |____|____|\n # |XXXX| |\n # |XXXX|____|\n\n #set y-axis intercept\n ymin_norm = -0.2\n xmin_norm = -0.05\n for n, d in enumerate(datasets):\n # add normalised datapoints as a scattergram\n axarr[1,0].scatter(dfe.loc[\"xnorm{}\".format(d), sLet], dfe.loc[\"ynorm{}\".format(d), sLet],\n color=co[n], s=sd[d], label=d_name[1:])\n # add the fitted curve as a line plot\n axarr[1,0].plot(dfe.loc[\"x_fitted_norm{}\".format(d),sLet], dfe.loc[\"y_fitted_norm{}\".format(d), sLet],\n '-', color=co[n], alpha=0.8)\n # add horizontal line at y50\n axarr[1,0].hlines(y=dfe.loc[\"y50_norm{}\".format(d), sLet], xmin=xmin_norm, colors = co[n],\n xmax=dfe.loc[\"EC50_norm_bq{}\".format(d), sLet], linestyles=ls[d])\n\n\n # add vertical line at EC50\n axarr[1,0].vlines(x=dfe.loc[\"EC50_norm_bq{}\".format(d), sLet], ymin=ymin_norm, colors = co[n],\n ymax=dfe.loc[\"y50_norm{}\".format(d), sLet], linestyles=ls[d])\n\n # set xlabel, ylabel, title, grid, etc\n axarr[1,0].set_xlabel(\"dose concentration (normalised)\", fontsize = fig_fontsize)\n axarr[1,0].set_ylabel(\"response concentration (normalised)\",rotation='vertical', fontsize = fig_fontsize)\n axarr[1, 0].text(0.6, 1.1, \"normalised data\", horizontalalignment='center', fontsize=fig_fontsize)\n axarr[1,0].grid(True, color = '0.75')\n axarr[1,0].set_ylim(ymin_norm, 1.2)\n axarr[1,0].set_xlim(xmin_norm, 1.2)\n\n if len(datasets) > 1:\n lg = axarr[1,0].legend(loc=7, scatterpoints=1)\n lg.draw_frame(False)\n if \"_ful\" in datasets:\n # set annotation in top right\n axarr[1,0].annotate(text=\"normalised (ful) data\", xy=(0.53,0.9), fontsize=af, xycoords=xyc, color=t20[2])\n axarr[1,0].annotate(text=\"normalised data\", xy=(0.63,0.8), fontsize=af, xycoords=xyc, color=t20[0])\n\n # analyse the curve fit and data to judge whether the EC50 value is accurate\n dfe = judgefit.judge_fit(dfe, sLet, settings)\n # dfe_index = pd.Series(dfe.index)\n # dfe_index_ful = dfe_index[dfe_index.apply(lambda x : \"_ful\" in x)]\n # dfe_index_orig = dfe_index[dfe_index.apply(lambda x : x[-5:] == \"_orig\")]\n # dfe_ful = dfe.loc[dfe_index_ful]\n # dfe_orig = dfe.loc[dfe_index_orig]\n\n dict_data_okay = {}\n for d in datasets:\n # create empty list to hold all row indices in dfe that relate to that dataset (based on suffix)\n list_rows_d = []\n for row in dfe.index:\n # if the suffix in the column matches the dataset (e.g. x_orig[-5:] == \"_orig\"]\n if row[-5:] == d:\n # add the row index label to the list\n list_rows_d.append(row)\n # reindex the dataframe to contain only the judgement data relevant to that dataset\n dfe_d = dfe.reindex(index=list_rows_d)\n if False in list(dfe_d.loc[:,\"{}_okay\".format(sLet)]):\n data_seems_okay = False\n else:\n # none of the judge_fit judgements suggest the data is not good, label as okay\n data_seems_okay = True\n # add to a dictionary for quick access\n dict_data_okay[d] = data_seems_okay\n # add the final judgement to the list, for use in the dataframe later\n dfe.loc[\"data_seems_okay{}\".format(d),sLet] = data_seems_okay\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # #\n # Dose-Respose Curve Fig03: Notes on automatic judgement of data quality #\n # #\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # _________\n # | |XXXX| Subplot 3, axarr[0, 1]\n # |____|XXXX|\n # | | |\n # |____|____|\n\n Plot_Nr = 3\n # write text on the empty figure\n yaxis_pos = np.linspace(0.9,0.1,10)\n xaxis_left = 0.05\n # set the xaxis position of the annotation for the ful samples\n xful = 0.6\n # set the xaxis position of the annotation for the orig samples\n xori = 0.8\n # # data_evaluation = \"data seems good\" if data_seems_okay_ful else \"ful data needs checking\"\n # title_colour = \"k\" if data_seems_okay_ful else \"r\"\n title_summ = \"Sample %s\" % (sLet)\n axarr[0,1].set_title(title_summ, fontsize = af, color = \"k\", alpha=0.75)\n\n if resp_machinetype == \"versamax\" and resp_assaytype in [\"8_ampconc\", \"12_ampconc\", \"24_ampconc\"]:\n if \"Uniprot#1\" in dfS.columns:\n # combine columns to create the N-terminal vector name\n Nvect = \"\".join(dfS.loc[sLet,\"Uniprot#1\":\"Mutant#1\"])\n # combine columns to create the C-terminal vector name\n Cvect = \"\".join(dfS.loc[sLet,\"Uniprot#2\":\"Mutant#2\"])\n # extract assay version (1.1, 1.2, etc)\n Version = dfS.loc[sLet,\"notes\"].strip('_')\n axarr[0,1].annotate(text=\"Nvect: %s\" % Nvect, xy=(xaxis_left,yaxis_pos[0]), fontsize=af, xycoords=xyc, alpha=0.75)\n axarr[0,1].annotate(text=\"Cvect: %s\" % Cvect, xy=(xaxis_left,yaxis_pos[1]), fontsize=af, xycoords=xyc, alpha=0.75)\n # axarr[0,1].annotate(text=data_evaluation, xy=(0.6,yaxis_pos[0]), fontsize=af, xycoords=xyc, alpha=0.75)\n axarr[0,1].annotate(text=Version, ha='right', xy=(0.98,yaxis_pos[0]), fontsize=af, xycoords=xyc, alpha=0.75)\n\n # create dictionary to hold x-axis locations of annotations, for each dataset tested\n xd = {}\n for n,d in enumerate(datasets):\n # start x position\n start = 0.85\n # distance between x positions (will need to make smaller if datasets tested > 2)\n dist = 0.2\n # calculate position for that annotation\n xpos = start - dist * n\n # add to dictionary\n xd[d] = xpos\n\n # for d in datasets:\n # #add headers to table showing the rsquared and other aspects of the fit and dataset\n # axarr[0,1].annotate(text=d_name[1:], xy=(xd[d],yaxis_pos[2]), fontsize=af, xycoords=xyc, alpha=0.75)\n\n axarr[0,1].annotate(text=\"{a} ({b})\".format(a=method, b=doseunits), xy=(xaxis_left,yaxis_pos[3]), fontsize=af, xycoords=xyc, alpha=0.75)\n axarr[0,1].annotate(text=\"rsquared\", xy=(xaxis_left,yaxis_pos[4]), fontsize=af, xycoords=xyc, alpha=0.75)\n axarr[0,1].annotate(text=\"hillslope\", xy=(xaxis_left,yaxis_pos[5]), fontsize=af, xycoords=xyc, alpha=0.75)\n axarr[0,1].annotate(text=\"n_lowdose_datapoints\", xy=(xaxis_left,yaxis_pos[6]), fontsize=af, xycoords=xyc, alpha=0.75)\n axarr[0,1].annotate(text=\"std_lowdose_datapoints\", xy=(xaxis_left,yaxis_pos[7]), fontsize=af, xycoords=xyc, alpha=0.75)\n axarr[0,1].annotate(text=\"n_highdose_datapoints\", xy=(xaxis_left,yaxis_pos[8]), fontsize=af, xycoords=xyc, alpha=0.75)\n axarr[0,1].annotate(text=\"std_highdose_datapoints\", xy=(xaxis_left,yaxis_pos[9]), fontsize=af, xycoords=xyc, alpha=0.75)\n\n for d in datasets:\n # change the dataset name (e.g. \"_orig\" to \"\") to an empty string if there is only one dataset for analysis\n d_name = \"\" if len(datasets) == 1 else d\n #add headers to table showing the rsquared and other aspects of the fit and dataset\n axarr[0,1].annotate(text=d_name[1:], xy=(xd[d],yaxis_pos[2]), fontsize=af, xycoords=xyc, alpha=0.75)\n if dfe.loc[\"EC50_calculable{}\".format(d),sLet]:\n EC50colour = \"k\" if dfe.loc[\"data_seems_okay{}\".format(d),sLet] == True else \"r\"\n axarr[0,1].annotate(text=\"%0.2f\" % dfe.loc[\"EC50{}\".format(d),sLet], xy=(xd[d],yaxis_pos[3]), fontsize=af, xycoords=xyc, alpha=0.75, color=EC50colour)\n # rsquared of the fit to the data\n axarr[0,1].annotate(text=\"%0.2f\"% dfe.loc[\"rsquared{}\".format(d),sLet], xy=(xd[d],yaxis_pos[4]), fontsize=af, xycoords=xyc, alpha=0.75,color=dfe.loc[\"rsquared{}\".format(d),\"%s_colour\" % sLet])\n # hillslope of the fit to the data\n axarr[0,1].annotate(text=\"%0.1f\"% dfe.loc[\"hillslope{}\".format(d),sLet], xy=(xd[d],yaxis_pos[5]), fontsize=af, xycoords=xyc, alpha=0.75, color = dfe.loc[\"hillslope{}\".format(d),\"%s_colour\" % sLet])\n # number of lowdose datapoints\n axarr[0,1].annotate(text=\"%i\"% dfe.loc[\"n_lowdose_datapoints{}\".format(d),sLet], xy=(xd[d],yaxis_pos[6]), fontsize=af, xycoords=xyc, alpha=0.75, color = dfe.loc[\"n_lowdose_datapoints{}\".format(d),\"%s_colour\" % sLet])\n # std of lowdose datapoints\n axarr[0,1].annotate(text=\"%0.2f\"% dfe.loc[\"std_resp_lowdose_datapoints{}\".format(d),sLet], xy=(xd[d],yaxis_pos[7]), fontsize=af, xycoords=xyc, alpha=0.75, color = dfe.loc[\"std_resp_lowdose_datapoints{}\".format(d),\"%s_colour\" % sLet])\n # number of highdose datapoints\n axarr[0,1].annotate(text=\"%i\"% dfe.loc[\"n_highdose_datapoints{}\".format(d),sLet], xy=(xd[d],yaxis_pos[8]), fontsize=af, xycoords=xyc, alpha=0.75, color = dfe.loc[\"n_highdose_datapoints{}\".format(d),\"%s_colour\" % sLet])\n # std of highdose datapoints\n axarr[0,1].annotate(text=\"%0.2f\"% dfe.loc[\"std_resp_highdose_datapoints{}\".format(d),sLet], xy=(xd[d],yaxis_pos[9]), fontsize=af, xycoords=xyc, alpha=0.75, color = dfe.loc[\"std_resp_highdose_datapoints{}\".format(d),\"%s_colour\" % sLet])\n\n else:\n if isinstance(dfe.loc[\"EC50{}\".format(d),sLet], str):\n dfe.loc[\"EC50_to_insert{}\".format(d),sLet] = \"N/A\"\n elif isinstance(dfe.loc[\"EC50{}\".format(d),sLet], float):\n dfe.loc[\"EC50_to_insert{}\".format(d),sLet] = \"0.0f\".format(dfe.loc[\"EC50{}\".format(d),sLet])\n # insert error string or EC50, coloured red to indicate likely poor data\n axarr[0,1].annotate(text=dfe.loc[\"EC50_to_insert{}\".format(d),sLet], xy=(xd[d],yaxis_pos[3]), fontsize=af, xycoords=xyc, alpha=0.75, color = \"r\")\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # #\n # Dose-Respose Curve Fig04: More notes on automatic judgement of data quality #\n # #\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # _________\n # | | | Subplot 4, axarr[1, 1]\n # |____|____|\n # | |XXXX|\n # |____|XXXX|\n\n Plot_Nr = 4\n\n # #add a table showing the rsquared and other aspects of the fit and dataset\n # axarr[1,1].annotate(text=\"ful\", xy=(xful,yaxis_pos[2]), fontsize=af, xycoords=xyc, alpha=0.75)\n # axarr[1,1].annotate(text=\"orig\", xy=(0.8,yaxis_pos[2]), fontsize=af, xycoords=xyc, alpha=0.75)\n\n axarr[1,1].annotate(text=\"dose conc. stepsize\", xy=(xaxis_left,yaxis_pos[4]), fontsize=af, xycoords=xyc, alpha=0.75)\n axarr[1,1].annotate(text=\"slope at lowdose\", xy=(xaxis_left,yaxis_pos[5]), fontsize=af, xycoords=xyc, alpha=0.75)\n axarr[1,1].annotate(text=\"slope at highdose\", xy=(xaxis_left,yaxis_pos[6]), fontsize=af, xycoords=xyc, alpha=0.75)\n\n for n,d in enumerate(datasets):\n # change the dataset name (e.g. \"_orig\" to \"\") to an empty string if there is only one dataset for analysis\n d_name = \"\" if len(datasets) == 1 else d\n #add a table showing the rsquared and other aspects of the fit for adjusted datasets\n axarr[1,1].annotate(text=d_name[1:], xy=(xd[d],yaxis_pos[2]), fontsize=af, xycoords=xyc, alpha=0.75)\n EC50_calculable = dfe.loc[\"EC50_calculable{}\".format(d),sLet]\n if EC50_calculable:\n # add the stepsize near the EC50, which determines whether more dose concentrations are necessary\n stepsize = \"{:0.2f}\".format(dfe.loc[\"doseconc_stepsize_at_EC50{}\".format(d), sLet])\n stepcolour = dfe.loc[\"doseconc_stepsize_at_EC50{}\".format(d), \"%s_colour\" % sLet]\n\n # if dfe.loc[\"doseconc_stepsize_at_EC50{}\".format(d),\"%s_okay\" % sLet] == True:\n axarr[1,1].annotate(text=stepsize,xy=(xd[d],yaxis_pos[4]), fontsize=af, xycoords=xyc, alpha=0.75, color=stepcolour)\n saxe_lowdose = \"{:0.2f}\".format(dfe.loc[\"saxe_lowdose{}\".format(d), sLet])\n saxe_highdose = \"{:0.2f}\".format(dfe.loc[\"saxe_highdose{}\".format(d), sLet])\n axarr[1,1].annotate(text=saxe_lowdose,xy=(xd[d],yaxis_pos[5]), fontsize=af, xycoords=xyc, alpha=0.75,\n color=dfe.loc[\"saxe_lowdose{}\".format(d),\"%s_colour\" % sLet])\n axarr[1,1].annotate(text=saxe_highdose,xy=(xd[d],yaxis_pos[6]), fontsize=af, xycoords=xyc, alpha=0.75,\n color=dfe.loc[\"saxe_highdose{}\".format(d),\"%s_colour\" % sLet])\n if stepcolour != \"k\":\n doseconc_steps_at_EC50 = dfe.loc[\"doseconc_steps_at_EC50{}\".format(d),sLet]\n axarr[1,0].plot(doseconc_steps_at_EC50,(0,0), color=stepcolour, linestyle=\"-\", lw=2)\n\n # if the slope at the lowdose is above the chosen cutoff, draw a line on the normalised plot, axarr[1,0]\n if dfe.loc[\"saxe_lowdose{}\".format(d), sLet] > settings[\"max_lowdose_slope\"]:\n saxe_lowdose_values = dfe.loc[\"saxe_lowdose_values{}\".format(d), sLet]\n # draw red vertical line showing the slope at the lowdose datapoint\n axarr[1,0].plot(saxe_lowdose_values[0], saxe_lowdose_values[1], 'r-', lw=2)\n # if the slope at the highdose is higher than the chosen cutoff, draw a line on tho normalised plot\n if dfe.loc[\"saxe_highdose{}\".format(d), sLet] > settings[\"max_highdose_slope\"]:\n saxe_highdose_values = dfe.loc[\"saxe_highdose_values{}\".format(d), sLet]\n # draw red vertical line showing the slope at the lowdose datapoint\n axarr[1,0].plot(saxe_highdose_values[0], saxe_highdose_values[1], 'r-', lw=2)\n\n else:\n # optional: print the dataframe showing which parameters are not acceptable\n EC50 = dfe.loc[\"EC50{}\".format(d),sLet]\n if isinstance(EC50, str):\n # define x_position of annotation. Place the orig at around 0.6, & 2nd dataset(_ful) at around 0.3\n xd_wide = xd[d]-0.3-0.3*n\n axarr[1,1].annotate(text=d_name[1:],xy=(xd_wide,yaxis_pos[5]), fontsize=af, xycoords=xyc, color=\"r\")\n axarr[1,1].annotate(text=EC50,xy=(xd_wide,yaxis_pos[6]), fontsize=af, xycoords=xyc, color=\"r\")\n\n # create a dictionary with the formatted EC50 values for printing on the figure\n EC50_str_dict = {}\n for d in datasets:\n EC50 = dfe.loc[\"EC50{}\".format(d),sLet]\n EC50_str_dict[d] = \"%.01f\" % EC50 if isinstance(EC50, float) else EC50\n # use % formatting to add the padding for the sample name\n padding = sample_name_len_max if sample_name_len_max <= 44 else 44\n samplestring = \"\\n{sLet} {s:>%s} {m} : \" % int(padding)\n sys.stdout.write(samplestring.format(sLet=sLet, s=sample_name[:44], m=method))\n for n, d in enumerate(datasets):\n data_seems_okay = dfe.loc[\"data_seems_okay{}\".format(d),sLet]\n if data_seems_okay:\n data_evaluation = \"(data seems good)\"\n else:\n if EC50_calculable:\n data_evaluation = \"(needs checking)\"\n else:\n if EC50_str_dict[d] == \"insuff_lowresp_dp\":\n data_evaluation = \"(EC50 not calculable, insufficient low response datapoints)\"\n elif EC50_str_dict[d] == \"insuff_highresp_dp\":\n data_evaluation = \"(EC50 not calculable, insufficient high response datapoints)\"\n sys.stdout.write(\"{d:>4} = {EC:>5} {eval:>17}\".format(d=d_name[1:], EC=EC50_str_dict[d], eval=data_evaluation))\n if n < len(datasets):\n sys.stdout.write(\", \")\n sys.stdout.flush()\n\n #save figure with the fitted curve and calculated EC50 value\n fig.tight_layout()\n fig.savefig(fig0_single_sample_png, format='png', dpi=140)\n if settings[\"save_as_pdf\"] in (True,\"TRUE\"):\n fig.savefig(fig0_single_sample_pdf, format='pdf')\n plt.close('all')\n dict_dfe[sLet] = dfe\n\n # create a DataFrame for EVALuation (df_eval), which combines all the dfe dataframes from the previous loop\n # note that df_eval and dfe are transposed, which is somewhat confusing.\n df_eval = pd.DataFrame()\n for sLet in dict_dfe:\n # extract each individual dfe separately, and add to df_eval\n indiv_dfe = dict_dfe[sLet]\n df_eval = pd.concat([df_eval,indiv_dfe], axis=1)\n\n # transpose, (exchange column and rows)\n df_eval = df_eval.T\n # create a column containing the sample letter and sample name combined\n df_eval['sLet_plus_sample_name'] = df_eval['sLet'] + \" \" + df_eval['sample_name']\n # create columns to be visible first in the output csv and excel files\n sel_col = ['sLet_plus_sample_name']\n for d in datasets:\n sel_col_d = ['EC50{}'.format(d), 'data_seems_okay{}'.format(d), 'rsquared{}'.format(d), 'doseconc_stepsize_at_EC50{}'.format(d), 'EC50_hill_eq{}'.format(d), 'curve_min_norm{}'.format(d),\n 'hillslope{}'.format(d), 'std_resp_highdose_datapoints{}'.format(d), 'n_highdose_datapoints{}'.format(d)]\n sel_col += sel_col_d\n # sel_col += ['sNum', 'n_highdose_datapoints_orig','n_lowdose_datapoints_orig']\n # reindex so columns are first\n df_eval = tools.reindex_df_so_selected_cols_are_first(df_eval, sel_col, raise_error=False)\n\n # divide the dataframe into two new dataframes, one for the values (_val) and another for the booleans\n # related to whether the data is okay or not. simply copy orig dataframe and drop unwanted columns.\n df_eval_values = df_eval.copy()\n for row in df_eval_values.index:\n if \"_okay\" in row:\n df_eval_values.drop(row, inplace=True)\n if \"_colour\" in row:\n df_eval_values.drop(row, inplace=True)\n # copy orig dataframe and drop unwanted columns.\n df_eval_bool = df_eval.copy()\n for row in df_eval_bool.index:\n if \"_okay\" not in row:\n df_eval_bool.drop(row, inplace=True)\n # drop empty columns\n df_eval_bool.dropna(how=\"all\", axis=1, inplace=True)\n\n # sort by index (sLet)\n df_eval_values.sort_index(inplace=True)\n # # reindex so that selected columns are visible first\n # df_eval_values_selected_cols = ['sample_name', 'EC50_ful', 'data_seems_okay_ful', 'data_seems_okay_orig', 'rsquared_orig', 'EC50_hill_eq_ful','n_highdose_datapoints_orig','n_lowdose_datapoints_orig','sNum']\n # df_eval_values = reindex_df_so_selected_cols_are_first(df_eval_values, df_eval_values_selected_cols)\n # sort by index (sLet)\n df_eval_bool.sort_index(inplace=True)\n # # reindex so that selected columns are visible first\n # df_eval_bool = reindex_df_so_selected_cols_are_first(df_eval_bool, df_eval_values_selected_cols)\n\n #set up the summary figure to contain 2 subplots\n n_plots_per_fig = 2\n nrows = 2\n dict_organising_subplots = tools.create_dict_organising_subplots(n_plots_per_fig,n_rows=nrows)\n #set the fontsize for the figure\n fig_fontsize = 6\n\n # for d in datasets:\n # # go through all of the data, and set the label colour to red if the data_seems_okay is false\n # df_eval_values[\"xlabel_colour{}\".format(d)] = df_eval_values[\"data_seems_okay{}\".format(d)].apply(lambda c: \"k\" if c == True else \"r\")\n\n for d in datasets:\n # change the dataset name (e.g. \"_orig\" to \"\") to an empty string if there is only one dataset for analysis\n d_name = \"\" if len(datasets) == 1 else d\n # go through all of the data, and set the label colour to red if the data_seems_okay is false\n df_eval_values[\"xlabel_colour{}\".format(d)] = df_eval_values[\"data_seems_okay{}\".format(d)].apply(lambda c: \"k\" if c == True else \"r\")\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # #\n # Summ_Plot01, summ_Fig00: scattergram, original data with fitted curve #\n # #\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # _________\n # |XXXXXXXXX| Subplot 0, axarr[0]\n # |XXXXXXXXX|\n # | |\n # |_________|\n\n # set the axes array position\n r = 0\n # set the figure number (canvas number, for summmary figures)\n fig_nr = 1\n fig, axarr = plt.subplots(nrows=2, ncols=1, dpi=300)\n EC50_calculable_list = list(df_eval_values.loc[:,\"EC50_calculable{}\".format(d)])\n\n if True in EC50_calculable_list:\n # plot the curves first, as this is used in the legend\n for sLet in df_eval_values.index:\n sNum = df_eval_values.loc[sLet,'sNum']\n axarr[r].plot(df_eval_values.loc[sLet,\"x_fitted{}\".format(d)],\n df_eval_values.loc[sLet,\"y_fitted{}\".format(d)],\n '-',\n color = t20[sNum],\n alpha = 0.8,\n label = sLet)\n # set the legend. Note that this is based on the last samples plotted\n lg = axarr[r].legend(df_eval_values['sLet_plus_sample_name'], loc='upper right', scatterpoints=1)\n lg.draw_frame(False)\n # set xlabel, ylabel, title, grid, etc\n axarr[r].set_xlabel(\"{a} ({b})\".format(a=doselabel, b=doseunits), fontsize = fig_fontsize)\n axarr[r].set_ylabel(settings[\"y-axis (response) label\"],rotation='vertical', fontsize = fig_fontsize)\n axarr[r].set_title('{a} data {b}'.format(a=d_name[1:],b=data_file),\n fontsize = fig_fontsize, x = 0.22)\n axarr[r].grid(True, color = '0.75')\n # plot the raw datapoints last, as these interfere with the legend\n ylim_min_list = []\n ylim_max_list = []\n for sLet in df_eval_values.index:\n sNum = df_eval_values.loc[sLet,'sNum']\n axarr[r].scatter(df_eval_values.loc[sLet,\"x{}\".format(d)],\n df_eval_values.loc[sLet,\"y{}\".format(d)],\n color = t20[sNum],\n alpha = 0.8,\n s = 15,\n label = sLet)\n # grab the min and max for the y-values. This is used for ax.set_ylim.\n ylim_min_list.append(np.array(df_eval_values.loc[sLet,\"y{}\".format(d)]).min())\n ylim_max_list.append(np.array(df_eval_values.loc[sLet, \"y{}\".format(d)]).max())\n\n lowest_y_datapoint = np.array(ylim_min_list).min()\n highest_y_datapoint = np.array(ylim_max_list).max()\n axarr[r].set_ylim(lowest_y_datapoint, highest_y_datapoint)\n\n if not True in list(df_eval_values.loc[:,\"EC50_calculable{}\".format(d)]):\n # if none of the curves could be fitted, base the legend on the datapoints rather than the curves\n lg = axarr[r].legend(df_eval_values['sLet_plus_sample_name'], loc='upper right', scatterpoints=1)\n lg.draw_frame(False)\n # set the x-axis limit so that the legend does not hide too many data points\n # find the maximum dose concentration in the whole experiment for that day\n # maxAC = x_orig.max().max()\n # # obtain the variable altering the extension of the x-axis\n # x_axis_extension_after_dosemax_in_summ_plot = dff.loc[fn, \"x-axis extension in summary fig_0\"]\n # #define the limit of the x-axis as the maximum dose conc.\n # xlim_max = maxAC + x_axis_extension_after_dosemax_in_summ_plot\n # # set the x-axis limits\n # axarr[r].set_xlim(-10,xlim_max)\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # #\n # Summ_Plot02, summ_Fig01: barchart EC50 (sLet on x-axis) #\n # #\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # _________\n # | | Subplot 1, axarr[1]\n # |_________|\n # |XXXXXXXXX|\n # |XXXXXXXXX|\n\n # Plot_Nr = Plot_Nr + 1\n # newfig, savefig, fig_nr, plot_nr_in_fig, r, c = dict_organising_subplots[Plot_Nr]\n # set the axes array position\n r = 1\n # use the sample letter alone as the name on the x-axis\n x_names = df_eval_values.sLet\n # the number of boxes in the bar-chart is the length of the initial dataset\n x_n_boxes = x_names.shape[0]\n # the position of the boxes in the bar-chart is the range(n_boxes)\n box_indices = range(x_n_boxes)\n if True in list(df_eval_values.loc[:,\"EC50_calculable{}\".format(d)]):\n # define the y-axis data\n y_EC50 = pd.to_numeric(df_eval_values[\"EC50{}\".format(d)], errors=\"coerce\")\n # obtain yerr [[REMOVED: NOT SUITABLE FOR DATASETS WITH SMALL DOSE VALUES]]\n #yerr = df_eval_values[\"yerr{}\".format(d)].fillna(0)\n # create a new object (usually not used) that contains a bar-chart on figure (ax) #1\n bar_container = axarr[r].bar(box_indices, y_EC50, color = t20, align=\"center\",\n error_kw=dict(ecolor='k', lw=1, capsize=2, capthick=1))#yerr=yerr,\n # set the xticks\n axarr[r].set_xticks(box_indices)\n # set the labels of the x-axis\n axarr[r].set_xticklabels(x_names)\n for xtick, colour in zip(axarr[r].get_xticklabels(), df_eval_values[\"xlabel_colour{}\".format(d)]):\n xtick.set_color(colour)\n # set the limits of the x-axis\n axarr[r].set_xlim([-1, x_n_boxes])\n # set the x-axis title\n axarr[r].set_xlabel(\"sample letter\")\n # set the y-axis title\n axarr[r].set_ylabel(\"{a} ({b})\".format(a=method, b=doseunits))\n #save figure\n fig.tight_layout()\n fig.savefig(dff.loc[fn,\"EC50_analysis_fig_basename\"] + d_name + '.png', format='png', dpi=150)\n if settings[\"save_as_pdf\"] in (True,\"TRUE\"):\n fig.savefig(dff.loc[fn,\"EC50_analysis_fig_basename_pdf\"] + d_name + '.pdf', format='pdf')\n\n if True in list(df_eval_values.loc[:,\"EC50_calculable{}\".format(d)]):\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # #\n # Barchart01: barchart EC50 (full name on x-axis) #\n # #\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n # _________\n # |XXXXXXXXX| Full Canvas\n # |XXXXXXXXX|\n # |XXXXXXXXX|\n # |XXXXXXXXX|\n '''\n Barchart01: barchart original data, EC50_orig (full name on x-axis)\n '''\n bar_fig_nr = 1\n # create new figure (i.e., canvas)\n fig, ax = plt.subplots()\n # use the sample letter plus the sample name as the name on the x-axis\n x_names = df_eval_values.sLet_plus_sample_name\n # yerr_ful = df_eval_values.residuals_mean_ful.fillna(0)*100\n axarr[r].set_title('{a} data {b}'.format(a=d_name[1:],b=data_file),\n fontsize = fig_fontsize, x = 0.22)\n # create a new object (usually not used) that contains a bar-chart on figure (ax) #1\n bar_container = ax.bar(box_indices, y_EC50, color=t20, align=\"center\",\n error_kw=dict(ecolor='k', lw=1, capsize=2, capthick=1))#yerr=yerr,\n # set the xticks. apply the appropriate colour, as decided by the \"judge\" script\n ax.set_xticks(box_indices)\n # df_eval_values[\"xlabel_colour_orig\"] = df_eval_values.data_seems_okay_orig.apply(lambda c: \"k\" if c == True else \"r\")\n for xtick, colour in zip(ax.get_xticklabels(), df_eval_values[\"xlabel_colour{}\".format(d)]):\n xtick.set_color(colour)\n # set the labels of the x-axis\n ax.set_xticklabels(x_names, rotation = 90)\n # set the limits of the x-axis\n ax.set_xlim([-1, x_n_boxes])\n # set the y-axis title\n ax.set_ylabel(\"{a} ({b})\".format(a=method, b=doseunits))\n\n #save figure\n try:\n fig.tight_layout()\n except ValueError:\n sys.stdout.write(\"Sample names may need to be truncated for barchart. Current length = {}\".format(x_names.str.len().max()))\n sys.stdout.flush()\n x_names = x_names.str[0:70]\n ax.set_xticklabels(x_names, rotation=90)\n fig.tight_layout()\n fig.savefig(dff.loc[fn,\"EC50_analysis_fig_basename\"] + d_name + \"_bar\" + '.png', format='png', dpi=150)\n if settings[\"save_as_pdf\"] in (True, \"TRUE\"):\n fig.savefig(dff.loc[fn,\"EC50_analysis_fig_basename_pdf\"] + d_name + \"_bar\" + '.pdf', format='pdf')\n plt.close('all')\n\n # drop the columns with a large number of datapoints, to reduce size of the output files\n #list_fitted_cols = \"y_fitted_norm_ful\", \"x_fitted_norm\",\"y_fitted_norm_orig\",\"x_fitted_orig\",\"y_fitted_orig\", \"y_fitted_ful\",\n\n list_arraylike_cols = []\n list_fitted_cols = []\n for d in datasets:\n # define list of large columns with fitted data, for that dataset\n fitted_d = [\"x_fitted_norm{}\".format(d), \"y_fitted_norm{}\".format(d), \"x_fitted{}\".format(d), \"y_fitted{}\".format(d)]\n # add to full list for all datasets\n list_fitted_cols = list_fitted_cols + fitted_d\n # define list of arraylike columns, for that dataset\n arraylike_d = [\"x{}\".format(d), \"y{}\".format(d), \"indices_lowdose_datapoints{}\".format(d),\n \"indices_lowdose_datapoints_excl_nearest_EC50{}\".format(d),\"response_lowdose_datapoints{}\".format(d)]\n # add to full list for all datasets\n list_arraylike_cols = list_arraylike_cols + arraylike_d\n\n # drop columns with fitted data, to reduce filesize\n df_eval_values.drop(list_fitted_cols, axis=1, inplace=True)\n\n # convert listlike to stringlists\n df_eval_values = tools.convert_listlike_cols_to_str(df_eval_values, list_arraylike_cols)\n\n df_by_sample = analyse_by_sample_name_for_single_run(df_eval_values)\n\n # save evaluation dataframe to csv\n df_eval_values.to_csv(dff.loc[fn,\"ofd_EC50_eval_csv\"], sep=\",\", quoting=csv.QUOTE_NONNUMERIC)\n df_eval_values.to_csv(dff.loc[fn,\"ofd_EC50_eval_tabsep_csv\"], sep=\"\\t\", quoting=csv.QUOTE_NONNUMERIC)\n # save evaluation dataframe to excel\n writer = pd.ExcelWriter(dff.loc[fn,\"ofd_EC50_eval_excel\"])#engine='xlsxwriter'\n df_eval_values.to_excel(writer, sheet_name=\"v_\" + data_file[:20])\n df_eval_bool.to_excel(writer, sheet_name=\"b_\" + data_file[:20])\n df_by_sample.to_excel(writer, sheet_name=\"by_sample\")\n settings.to_frame().to_excel(writer, sheet_name=\"settings\")\n writer.save()\n writer.close()\n # save the settings in the csv folder, so that a permanent record of the settings is kept\n settings_csv_path = os.path.join(dff.loc[fn, \"ofd_csv\"], \"settings.csv\")\n settings.to_csv(settings_csv_path)\n print('\\n-------------------------------------\\n')", "title": "" }, { "docid": "940f528f24bd6bbfd56ef5ac19e8770c", "score": "0.4982235", "text": "def make_plot_of_effect_on_ses_coefficient(all_income_results, plot_errorbars):\n plt.figure(figsize=[4, .5 * len(all_income_results)])\n ytick_labels = None\n for plt_idx, SES_col in enumerate(GAPS_OF_INTEREST_COLS):\n plt.subplot(len(GAPS_OF_INTEREST_COLS), 1, plt_idx + 1)\n df_to_plot = all_income_results.loc[all_income_results['SES_col'] == SES_col]\n for i in range(len(df_to_plot)):\n beta = float(df_to_plot.iloc[i]['$b_{SES}$'])\n low_CI = float(df_to_plot.iloc[i]['lower CI'])\n high_CI = float(df_to_plot.iloc[i]['upper CI'])\n if plot_errorbars:\n xerrs = [[beta - low_CI], [high_CI - beta]] \n else:\n xerrs = None\n plt.errorbar([beta], [i], xerr=xerrs,\n color='black', \n capsize=3)\n plt.scatter([beta], [i], color='blue')\n pretty_names = {'binarized_income_at_least_50k':r'$\\beta_{income\\geq50k}$', \n 'binarized_education_graduated_college':r'$\\beta_{college grad}$', \n 'is_male':r'$\\beta_{male}$'}\n if ytick_labels is None:\n ytick_labels = list(df_to_plot['controls'])\n else:\n assert ytick_labels == list(df_to_plot['controls'])\n plt.yticks(range(len(df_to_plot)), ytick_labels)\n plt.xlim([0, 8])\n plt.title(pretty_names[SES_col])\n plt.ylabel(\"Other controls\")\n plt.subplots_adjust(hspace=.5)\n plt.show()", "title": "" }, { "docid": "36f742e89c17867c5ca7d013fa687dca", "score": "0.49783444", "text": "def OverlayFitEllipse(img_edges, confidence_parameters, new_controls, globalflags):\n #confidence parameters\n best_ellipse = confidence_parameters[0]\n pnts = confidence_parameters[1]\n norm_err = confidence_parameters[2]\n inliers = confidence_parameters[3]\n #global flags\n debug = globalflags[0]\n displayImages = globalflags[1]\n #create a color image\n img_color = cv2.merge((img_edges,img_edges,img_edges))\n if debug:print(\"Shape of color image is \" + str(img_color.shape))\n OverlayRANSACFit(img_color, pnts, inliers, best_ellipse)\n if displayImages == 1 :\n fig,(ax1,ax2) = plt.subplots(ncols =2 ,nrows =1, figsize=(8,4))\n ax1.set_title(\"Normalized error of the fit\")\n ax1.plot(norm_err, 'k-')\n ax2.set_title(str(new_controls))\n ax2.imshow(img_color)\n return img_color", "title": "" }, { "docid": "16cc874cd287ca0c1eed251b66b4176f", "score": "0.49746385", "text": "def make_plot(ccf, t_bins, mean_rate_ci, t_length, delay,\n plot_file, energies=[], prefix=\"--\", tab_file=None):\n ###################################################################\n ## Make a ratio of ccf to the mean count rate in the interest band\n ###################################################################\n\n # mean_ccf = np.mean(ccf, axis=0)\n # ccf_resid = ccf - mean_ccf\n a = np.array([mean_rate_ci, ] * (2 * t_length + 1)).T\n with np.errstate(all='ignore'):\n ratio = np.where(a != 0, ccf / a, 0)\n\n # print \"\\tMinimum value:\", np.min(ratio)\n # \tprint \"\\tMaximum value:\", np.max(ratio)\n\n ######################################################\n ## Saving to a dat file so that we can use fimgcreate\n ######################################################\n\n# # ratio[27:, ] = 0\n if np.shape(ratio)[0] == 64:\n ratio[28:,] = 0\n elif np.shape(ratio)[0] == 32:\n ratio[26:,] = 0\n out_file = os.path.dirname(plot_file) + \"/temp.dat\"\n R = ratio.real.flatten('C')\n comment_str = \"From %s\" % tab_file\n np.savetxt(out_file, R, fmt=\"%.8f\", comments=comment_str)\n\n #############\n ## Plotting!\n #############\n print(\"Plotting 2D CCF: %s\" % plot_file)\n\n font_prop = font_manager.FontProperties(size=20)\n # fig, ax = plt.subplots(1, 1, figsize=(14, 8), dpi=300, tight_layout=True)\n fig, ax = plt.subplots(1, 1, figsize=(10, 7.5), dpi=300, tight_layout=True)\n\n if len(energies) > 0: ## If energies exists as a variable\n # plt.pcolor(t_bins, energies, ratio, cmap='YlGnBu_r', vmin=-0.3, vmax=0.3)\n # plt.pcolor(t_bins, energies, ratio, cmap='YlGnBu_r', vmin=-0.05, vmax=0.05)\n\n plt.pcolor(t_bins, energies, ratio, cmap='spring')\n # plt.pcolor(t_bins, energies, ratio, cmap='hot', vmin=-0.26, vmax=0.42)\n # plt.pcolor(t_bins, energies, ratio, cmap='spring', vmin=-0.04,\n # vmax=0.04)\n else:\n# \t\tplt.pcolor(ratio, cmap='hot', vmin=-4.0, vmax=4.0)\n plt.pcolor(ratio, cmap='hot')\n plt.xlim(t_bins[0], t_bins[-1])\n # ax.vlines(0.0, 2, 31, linestyle='solid', color='black', lw=1.0)\n\n cbar = plt.colorbar()\n cbar.set_label('Ratio of CCF to mean count rate', \\\n fontproperties=font_prop)\n cb_ax = cbar.ax\n cb_ax.tick_params(axis='y', labelsize=18)\n # cbar.set_ticks([-0.04, -0.03, -0.02, -0.01, 0.00, 0.01, 0.02, 0.03, 0.04])\n\n if len(energies) > 0: ## If energies exists as a variable\n ax.set_ylabel('Energy (keV)', fontproperties=font_prop)\n if len(energies) == 65:\n ax.set_ylim(3, 20)\n # rect = patches.Rectangle((-t_length,energies[10]), 2*t_length, 0.41,\n # facecolor=\"orange\", ec=\"none\")\n rect = patches.Rectangle((-t_length, energies[10]), 2 * t_length, 0.41,\n facecolor=\"black\", ec=\"none\")\n ax.add_patch(rect)\n else:\n ax.set_ylabel('Energy channel', fontproperties=font_prop)\n ax.set_ylim(0, np.shape(ratio)[0])\n # rect = patches.Rectangle((-t_length,10), 2*t_length, 1, ec=\"none\")\n\n zero_outline = patches.Rectangle((0, 2), 0.5, 26, edgecolor=\"black\",\n facecolor=\"none\")\n ax.add_patch(zero_outline)\n\n ax.set_xlim(-t_length, t_length)\n # ax.set_xlabel(r'Time-delay ($\\times\\,$8.15$\\,$ms)', fontproperties=font_prop)\n # ax.set_xlabel('Time-delay bins ', fontproperties=font_prop)\n ax.set_xlabel(r'Time-delay ($\\times\\,$%.2f$\\,$ms)' % delay,\n fontproperties=font_prop)\n\n ## Setting the axes' minor ticks. It's complicated.\n x_maj_loc = ax.get_xticks()\n y_maj_loc = ax.get_yticks()\n # y_maj_loc = [5, 10, 15, 20]\n # ax.set_yticks(y_maj_loc)\n x_min_mult = 0.2 * (x_maj_loc[1] - x_maj_loc[0])\n y_min_mult = 0.2 * (y_maj_loc[1] - y_maj_loc[0])\n xLocator = MultipleLocator(x_min_mult) ## loc of minor ticks on x-axis\n yLocator = MultipleLocator(y_min_mult) ## loc of minor ticks on y-axis\n ax.xaxis.set_minor_locator(xLocator)\n ax.yaxis.set_minor_locator(yLocator)\n\n ax.tick_params(axis='x', labelsize=20)\n ax.tick_params(axis='y', labelsize=20)\n ax.tick_params(which='major', width=1.5, length=7)\n ax.tick_params(which='minor', width=1.5, length=4)\n for axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(1.5)\n # plt.show()\n plt.savefig(plot_file)\n plt.close()", "title": "" }, { "docid": "60ce8d434a3c55be209d4aad3d166d83", "score": "0.4969558", "text": "def plot_proportion_w_confint(data_df, x_col, y_col, top_n=10, max_ci_len=1.0,\n show_n_obs=None, **kwargs):\n\n def _add_confint_columns():\n \"\"\"Adds the confidence interval columns to a DataFrame.\"\"\"\n\n # Get upper and lower bounds for confidence intervals\n confint_list =\\\n [proportion_confint(cnt, n_obs, method='wilson')\n for cnt, n_obs in zip(grouped_df.cnt, grouped_df.n_obs)]\n\n # Transpose lists so we can insert them into the DataFrame\n confint_list = list(zip(*confint_list))\n # Lower bound of confidence interval\n grouped_df['ci_lower'] = confint_list[0]\n # Upper bound of confidence interval\n grouped_df['ci_upper'] = confint_list[1]\n # Width of confidence interval\n grouped_df['ci_length'] = grouped_df.ci_upper - grouped_df.ci_lower\n # Amount of error to the left of the mean\n grouped_df['error_left'] = grouped_df.prop - grouped_df.ci_lower\n # Amount of error to the right of the mean\n grouped_df['error_right'] = grouped_df.ci_upper - grouped_df.prop\n\n return grouped_df\n\n def _plot_n_obs():\n \"\"\"Plots the number of observations either as text to the right\n of the bars or in the axis.\n \"\"\"\n\n if show_n_obs == 'in_plot':\n for index, n_obs in enumerate(grouped_df.n_obs):\n n_obs_txt = f'n_obs = {n_obs:,}'\n plt.text(0.01, index, n_obs_txt,\n color='white', size=12, verticalalignment='center')\n elif show_n_obs == 'in_axis':\n # Include number of observations in index\n grouped_df.index = [f'{sr} (n_obs = {n_obs:,})'\n for sr, n_obs in zip(grouped_df.index,\n grouped_df.n_obs)]\n elif show_n_obs is not None:\n raise ValueError(\"show_n_obs should be either 'in_plot' or 'in_axis'.\")\n\n def _create_plot(**kwargs):\n \"\"\"Plots horizontal bars indicating the proportion.\"\"\"\n # Plot bars\n grouped_df\\\n .prop\\\n .plot(kind='barh', label='Proportion', **kwargs)\n\n if 'ax' in kwargs:\n ax = kwargs['ax']\n # Plot error bars\n ax.errorbar(grouped_df.prop, np.arange(len(grouped_df)),\n xerr=[grouped_df.error_left, grouped_df.error_right],\n fmt='o', c='black', label='Confidence Interval')\n\n ax.set_xlabel('Proportion')\n ax.set_xlim(0, 1)\n\n ax.legend(loc=0)\n\n else:\n # Plot error bars\n plt.errorbar(grouped_df.prop, np.arange(len(grouped_df)),\n xerr=[grouped_df.error_left, grouped_df.error_right],\n fmt='o', c='black', label='Confidence Interval')\n\n plt.xlabel('Proportion')\n plt.xlim(0, 1)\n\n plt.legend(loc=0)\n\n plt.tight_layout()\n\n\n grouped_df = data_df[[y_col, x_col]]\\\n .fillna('none')\\\n .groupby(x_col)\\\n .agg([np.sum, np.size, np.mean])\n\n grouped_df.columns = ['cnt', 'n_obs', 'prop']\n\n grouped_df = _add_confint_columns()\n\n # Sort values, filter by interval length, and take the top features\n grouped_df = grouped_df\\\n .sort_values('prop')\\\n .query('ci_length < @max_ci_len')\\\n .tail(top_n)\n\n _plot_n_obs()\n _create_plot(**kwargs)\n\n return grouped_df", "title": "" }, { "docid": "dc38c893f6e122f6a61551322d2e71aa", "score": "0.49660546", "text": "def plot_CI_multi_OvAll(N, X, sig_level=0.05, dmin=None):\n\n # initiate plot object\n fig, ax = plt.subplots(figsize=(12, 3))\n\n # get control group values\n N_A = N[0]\n X_A = X[0]\n\n # initiate containers for standard error and differences\n SE = []\n d = []\n\n # iterate through X and N and calculate d and SE\n for idx in range(1, len(N)):\n X_B = X[idx]\n N_B = N[idx]\n d.append(X_B / N_B - X_A / N_A)\n SE.append(pooled_SE(N_A, N_B, X_A, X_B))\n SE = np.array(SE)\n d = np.array(d)\n\n # z value\n z = z_val(sig_level)\n ci = SE * z\n\n # bars to represent the confidence interval\n y = np.arange(len(N)-1)\n ax.hlines(y, d-ci, d+ci, color='blue', alpha=0.4, lw=10, zorder=1)\n # marker for the mean\n ax.scatter(d, y, s=300, marker='|', lw=10, color='magenta', zorder=2)\n\n # vertical line to represent 0\n ax.axvline(0, c='grey', linestyle='-')\n\n # plot dmin\n if dmin is not None:\n ax.axvline(-dmin, c='red', linestyle='--', alpha=0.75)\n ax.axvline(dmin, c='green', linestyle='--', alpha=0.75)\n\n # invert y axis to show variant 1 at the top\n ax.invert_yaxis()\n\n # label variants on y axis\n labels = ['variant{}'.format(i+1) for i in range(len(N)-1)]\n plt.yticks(np.arange(len(N)-1), labels)", "title": "" }, { "docid": "fc0efae07221a6053bc700fd3554fe8a", "score": "0.49619004", "text": "def calibrate_ecm(data):\r\n ## 2. ECM\r\n y = data.iloc[:, 0]\r\n x = data.iloc[:, 1:]\r\n\r\n # dLnSt = a + z_t-1 + dLnSt_t-1 + dLnF_t + dLnF_t + epsi\r\n # --- step 1. residual from lnSt = a1 + a2lnF, calculate resid given a1,a2\r\n x1 = sm.add_constant(x)\r\n model_ols = regression.linear_model.OLS(y, x1).fit()\r\n\r\n # --- step 2. Estimate the ECM\r\n x2 = sm.add_constant(np.column_stack((model_ols.resid[0:-1],\r\n y[0:-1],\r\n x[1:],\r\n x[0:-1])))\r\n\r\n model_ecm = regression.linear_model.OLS(y.iloc[1:], x2).fit()\r\n #params_ecm = model_ecm.params\r\n fitted_values_ecm = np.dot(x2, model_ecm.params)\r\n\r\n return model_ecm, fitted_values_ecm", "title": "" }, { "docid": "bf312e4c91c0abf4dc665fb6a7910258", "score": "0.49557728", "text": "def confidence_interval(df,param,coeff=2.42):\n \n df2=df.copy()\n\n df_stats=df2[param].describe().T\n stats=df_stats[['count','mean','std']]\n\n stats\n ci95_hi=stats['mean'] + coeff*stats['std']/math.sqrt(stats['count'])\n ci95_lo=stats['mean'] - coeff*stats['std']/math.sqrt(stats['count'])\n df6=df2.loc[(df2[param]>=ci95_lo)&(df2[param]<=ci95_hi)]\n return df6", "title": "" }, { "docid": "55028401e451ad2c3bf91fd6ccd90d09", "score": "0.49504286", "text": "def plot_error(nrd_f, nrd_c, func=calc_l2_error, att='height'):\n xi = nrd_f.data_obj_list[0].x\n yi = nrd_f.data_obj_list[0].y\n zf = [getattr(ob, att) for ob in nrd_f.data_obj_list]\n zi = interp_to_mesh(xi, yi, nrd_c, ind, att)\n err = func(zf, zi)\n N = len(nrd_f.data_obj_list)\n t = np.linspace(0, N, N)\n fig = plt.figure()\n plt.plot(t, err)\n plt.xlabel(\"timestep\")\n plt.ylabel(\"error\")\n plt.title(\"{}: error in {}\".format(nrd_c.name, att))\n return fig", "title": "" }, { "docid": "6e1022dac497b6192c35b1ee49447c3f", "score": "0.49390823", "text": "def ysplot(wlv=[0.1,10, 20, 30],\n confidence=0.95, ifig=None):\n if ifig==None: pass\n else:\n fig = plt.figure(ifig)\n ax = fig.add_subplot(111)\n pass\n\n ## iso work segment dictionary\n dat = weq(wlv, confidence)\n al = dat.keys() \n al.sort() # All levels of work is sorted\n\n # clist = np.log(np.linspace(0, 3, len(wlv)))\n # colors= []\n # for i in range(len(clist)):\n # colors.append(plt.cm.cmap_d['winter_r'](clist[i]))\n # pass\n\n colors = ['b','g','r','c','m','y','k']\n\n eqwc = [] #eq-work contour\n for wlv in range(len(al)):\n rl = dat[al[wlv]].keys(); rl.sort()\n eqwc.append([])\n for ir in rl: #loop over stress ratio\n xm = dat[al[wlv]][ir]['ysx_avg']\n ym = dat[al[wlv]][ir]['ysy_avg']\n hx = dat[al[wlv]][ir]['ysx_conf']\n hy = dat[al[wlv]][ir]['ysy_conf']\n\n # assigns the mean value and error to a\n # iso-work segment\n eqwc[wlv].append([[xm,ym],[hx,hy]])\n\n if ifig==None: pass\n else: ax.errorbar(\n xm, ym, xerr=hx,\n yerr=hy, fmt='o',#mfc='None',\n color=colors[wlv],\n mec=colors[wlv],\n mfc='None')\n pass\n pass\n if ifig==None:pass\n else:\n ax.set_xlim(-10,)\n ax.set_ylim(-10,)\n ax.set_aspect('equal')\n pass\n return eqwc", "title": "" }, { "docid": "6e523f9f8e2190bd123f3794da205d88", "score": "0.49348024", "text": "def EI(x):\n \tepsilon = 0.1\n \tx1=np.array(x).reshape(-1,ndim)\n \tmuNew, stdNew = gp.predict(x1, return_std=True)\n \tfMax=max(Y_init)\n \tZ = (muNew - fMax - epsilon)/stdNew\n \treturn -((muNew - fMax - epsilon)* scipy.stats.norm.cdf(Z) + stdNew*scipy.stats.norm.pdf(Z))", "title": "" }, { "docid": "09f282b2d1ffdfe7c377d5d1425890e1", "score": "0.4934781", "text": "def empiricaldiscount_combinedgroups_xsmodel():\n\t\n\t### Set plot parameters and style\n\tsb.set(style='ticks')\n\tfig, axes = plt.subplots(figsize=(12/1.2, 7/1.2))\n\n\t### Initialize path to CSV with summary of XS model outputs\n\toutputs_csv_uri = os.path.join(paths.xs_coeffs_csv_uri)\n\n\t### Read CSV with summary of XS model outputs to Pandas DataFrame\n\tdf = pd.read_csv(outputs_csv_uri)\n\n\t### Drop coefficients for properties not at risk of flooding\n\tdf = df[df['risk']!=0]\n\t\n\t### Set formatting parameters\n\tscalar, lw, ms = 0.25, 1.5, 8\n\n\t### Specify x for no disclosure requirements & below median climate concern\n\tx = 1\n\tdf1 = df[df['group']=='ncnd']\n\tfmt_dict = {'ms':ms, 'mfc':'w', 'mec':'purple', 'ecolor': 'purple'}\n\n\t### Plot\n\taxes.errorbar(x-scalar, df1['coefficient'][df1['fz_risk']=='100'], \n\t\t2*df1['se'][df1['fz_risk']=='100'], marker='o', **fmt_dict)\n\t\n\taxes.errorbar(x, df1['coefficient'][df1['fz_risk']=='500'], \n\t\t2*df1['se'][df1['fz_risk']=='500'], marker='s', **fmt_dict)\n\t\n\taxes.errorbar(x+scalar, df1['coefficient'][df1['fz_risk']=='outside'], \n\t\t2*df1['se'][df1['fz_risk']=='outside'], marker='^', **fmt_dict)\n\n\n\t### Specify x for no disclosure requirements & above median climate concern\n\tx = 2\n\tdf2 = df[df['group']=='cnd']\n\tfmt_dict = {'ms':ms, 'mfc':'purple', 'mec':'purple', 'ecolor': 'purple'}\n\n\t### Plot\n\taxes.errorbar(x-scalar, df2['coefficient'][df2['fz_risk']=='100'], \n\t\t2*df2['se'][df2['fz_risk']=='100'], marker='o', **fmt_dict)\n\n\taxes.errorbar(x, df2['coefficient'][df2['fz_risk']=='500'], \n\t\t2*df2['se'][df2['fz_risk']=='500'], marker='s', **fmt_dict)\n\n\taxes.errorbar(x+scalar, df2['coefficient'][df2['fz_risk']=='outside'], \n\t\t2*df2['se'][df2['fz_risk']=='outside'], marker='^', **fmt_dict)\n\n\t### Specify x for at least one disclosure & below median climate concern\n\tx = 3\n\tdf3 = df[df['group']=='ncd']\n\tfmt_dict = {'ms':ms, 'mfc':'w', 'mec':'green', 'ecolor': 'green'}\n\n\t### Plot\n\taxes.errorbar(x-scalar, df3['coefficient'][df3['fz_risk']=='100'], \n\t\tdf3['se'][df3['fz_risk']=='100'], marker='o', **fmt_dict)\n\n\taxes.errorbar(x, df3['coefficient'][df3['fz_risk']=='500'], \n\t\tdf3['se'][df3['fz_risk']=='500'], marker='s', **fmt_dict)\n\n\taxes.errorbar(x+scalar, df3['coefficient'][df3['fz_risk']=='outside'], \n\t\tdf3['se'][df3['fz_risk']=='outside'], marker='^', **fmt_dict)\n\n\t### Specify x for at least one disclosure & above median climate concern\n\tx = 4\n\tdf4 = df[df['group']=='cd']\n\tfmt_dict = {'ms':ms, 'mfc':'green', 'mec':'green', 'ecolor': 'green'}\n\n\t### Plot\n\taxes.errorbar(x-scalar, df4['coefficient'][df4['fz_risk']=='100'], \n\t\tdf4['se'][df4['fz_risk']=='100'], marker='o', **fmt_dict)\n\n\taxes.errorbar(x, df3['coefficient'][df3['fz_risk']=='500'], \n\t\tdf3['se'][df3['fz_risk']=='500'], marker='s', **fmt_dict)\n\n\taxes.errorbar(x+scalar, df4['coefficient'][df4['fz_risk']=='outside'], \n\t\tdf4['se'][df4['fz_risk']=='outside'], marker='^', **fmt_dict)\n\n\t## Plot formatting\n\taxes.set_xlim(0.5, 4.5)\n\n\taxes.set_ylabel('Empirical flood zone discount (%)')\n\n\taxes.set_xticks([0.5, 1.5, 2.5, 3.5, 4.5])\n\taxes.set_xticklabels([\n\t\t'',\n\t\t'\\n\\nNo disclosure requirements', \n\t\t'',\n\t\t'\\n\\nAt least one disclosure requirement', \n\t\t''],\n\t\tweight = 'bold')\n\n\ttext_fmt = {'ha':'center', 'fontstyle': 'italic'}\n\ty_loc = axes.get_ylim()[0] - ((axes.get_ylim()[1] - axes.get_ylim()[0]) * 0.08)\n\taxes.text(1, y_loc, 'Below median\\nclimate concern', **text_fmt)\n\taxes.text(2, y_loc, 'Above median\\nclimate concern', **text_fmt)\n\taxes.text(3, y_loc, 'Below median\\nclimate concern', **text_fmt)\n\taxes.text(4, y_loc, 'Above median\\nclimate concern', **text_fmt)\n\n\tfor x in [1.5, 2.5, 3.5]:\n\t\taxes.axvline(x=x, color='k', linestyle='--', alpha=0.5)\n\n\taxes.axhline(y=0, color='k')\n\n\tytick_labels = [round(t*100) for t in axes.get_yticks()]\n\taxes.set_yticklabels(ytick_labels)\n\n\t### Create legend labels\n\taxes.plot(-1, 0, 'ko', ms=ms, label='100-year flood zone')\n\taxes.plot(-1, 0, 'ks', ms=ms, label='500-year flood zone')\n\taxes.plot(-1, 0, 'k^', ms=ms, label='Outside flood zone')\n\n\t### Create legend\n\taxes.legend(loc='lower left', fontsize=10)\n\n\t### Save figure\n\tfn = 'empiricaldiscount_combinedgroups_xsmodel.png'\n\turi = os.path.join(paths.figures_dir, fn)\n\tplt.savefig(uri, bbox_inches='tight', dpi=600)\n\tplt.savefig(uri.replace('png', 'pdf'), bbox_inches='tight')\n\n\t### Open figure\n\ttime.sleep(0.5)\n\tsubprocess.run(['open', uri])\n\n\treturn None", "title": "" }, { "docid": "3ff418e125cf0d5bbb640d5972674c9c", "score": "0.49286523", "text": "def _confidence_for_samples(self, distribution):\n self.conf_interval_low = self.mean - (distribution * self.std_dev / math.sqrt(self.num_samples))\n self.conf_interval_high = self.mean + (distribution * self.std_dev / math.sqrt(self.num_samples))\n \n self.conf_interval_size_abs = (self.conf_interval_high\n - self.conf_interval_low)\n self.conf_interval_size = self.conf_interval_size_abs / self.mean", "title": "" }, { "docid": "0f2f131fd5ce274b9b11dda9e7621e19", "score": "0.4919277", "text": "def fig_conf_vs_compl():\n outfile = 'fig_conf_vs_compl.pdf'\n\n # Load Test\n test_dlas = test_to_tbl('../Vetting/data/test_dlas_96629_10000.json.gz')\n # Load vette\n vette_10k = ltu.loadjson('../Vetting/vette_10k.json')\n test_ml_idx = np.array(vette_10k['test_idx'])\n # Load ML\n ml_abs = pred_to_tbl('../Vetting/data/test_dlas_96629_predictions.json.gz')\n\n # Matches\n match = test_ml_idx >= 0\n conf = ml_abs['conf'][test_ml_idx[match]]\n max_compl = np.sum(match) / len(test_dlas)\n\n # Sort\n isrt = np.argsort(conf)\n\n # Start the plot\n fig = plt.figure(figsize=(6, 6))\n plt.clf()\n gs = gridspec.GridSpec(1,1)\n\n ax = plt.subplot(gs[0])\n\n # Plot\n cumsum = np.arange(np.sum(match)) / len(test_dlas)\n ax.plot(conf[isrt], max_compl - cumsum)\n\n ax.set_ylabel(r'Completeness (> conf)')\n ax.set_xlabel(r'Confidence')\n #ax.xaxis.set_major_locator(plt.MultipleLocator(0.5))\n ax.set_ylim(0.8, 1)\n set_fontsize(ax, 15.)\n\n #legend = plt.legend(loc='upper right', scatterpoints=1, borderpad=0.3,\n # handletextpad=0.3, fontsize='x-large', numpoints=1)\n\n # Finish\n plt.tight_layout(pad=0.2, h_pad=0.1, w_pad=0.2)\n plt.savefig(outfile)\n plt.close()\n print(\"Wrote {:s}\".format(outfile))", "title": "" }, { "docid": "c481d69028b83c467bf21f93a56d7713", "score": "0.4917383", "text": "def figure_combined(xx, xticks, cc_exp, cc_theo, tt, t_trans, D, F, D_STD, F_STD,\n error, plt_profiles='all', suffix='', save=False,\n savePath=os.getcwd()):\n # setting number of profiles to plot\n c_nbr = cc_theo[0, :].size # number of numerical profiles\n if plt_profiles is 'all' or c_nbr < plt_profiles:\n plt_nbr = np.arange(1, c_nbr) # plot all profiles\n else:\n # logarithmicly selecting profiles to plot, more for earlier times\n plt_nbr = np.unique(np.logspace(0, np.log10(c_nbr-1), num=plt_profiles).astype(int))\n\n # creating x-vector for plotting experimental profiles\n diff = cc_theo[:, 1].size - cc_exp[1].size # difference in lengths\n xx_exp = xx[diff:] # truncated vector for plotting experimental profiles\n\n # create appropriate colormap using dummy plot\n z = [tt/60, tt/60, tt/60] # amplitude dummy is time\n dummy_map = plt.imshow(z, cmap='jet', norm=mpl.colors.LogNorm())\n # linear map between [0, 1] ~ log(t) in range of [t_min, t_max], t_min > 0\n colors = [dummy_map.cmap(np.log10(tt[j])/(np.log10(tt[-1])-np.log10(tt[1])) -\n np.log10(tt[1])/(np.log10(tt[-1])-np.log10(tt[1]))) for j in plt_nbr]\n\n fig = plt.figure() # create figure\n ax_profiles = plt.subplot2grid((2, 3), (0, 0), rowspan=2, colspan=2)\n ax_D = plt.subplot2grid((2, 3), (0, 2))\n ax_F = plt.subplot2grid((2, 3), (1, 2), sharex=ax_D)\n # subplot labels\n fig.text(0.005, 0.92, 'A', fontsize='xx-large', weight='extra bold') # add subplot label\n fig.text(0.645, 0.92, 'B', fontsize='xx-large', weight='extra bold')\n fig.text(0.645, 0.53, 'C', fontsize='xx-large', weight='extra bold')\n\n # plotting concentration profiles\n plt_c_theo, plt_c_exp = [], []\n for j, col in zip(plt_nbr, colors): # plot rest of profiles\n if j < len(cc_exp): # only plot experimental data if provided\n plt_c_exp.append(ax_profiles.plot(xx_exp, cc_exp[j], '.', color=col))\n plt_c_theo.append(ax_profiles.plot(xx, cc_theo[:, j], '--', color=col))\n ax_profiles.set(xlabel='z-distance [$\\mu$m]', ylabel='Normalized concentration')\n plt_c_zero = ax_profiles.plot(xx, cc_exp[0], '-k') # t=0 profile\n # printing legend\n ax_profiles.legend([plt_c_zero[0], plt_c_exp[0][0], plt_c_theo[0][0]],\n [\"c$_{init}$ (t = 0, z)\", \"Experiment\", \"Numerical\"],\n frameon=False, loc='lower left')\n # show also computed error\n ax_profiles.text(0.8, 0.9, '$\\sigma$ = $\\pm$ %.3f' % error,\n transform=ax_profiles.transAxes)\n # create colorbar with correct labels\n fig.colorbar(dummy_map, label='Time [min]', pad=0.0125, ax=ax_profiles)\n\n # plotting D and F profiles\n for ax, df, df_std, col, label in zip([ax_D, ax_F], [D, F], [D_STD, F_STD],\n ['r', 'b'], ['D [$\\mu$m$^2$/s]', 'F [k$_B$T]']):\n ax.errorbar(xx, df, yerr=df_std, fmt='.--'+col)\n ax.set(ylabel=label)\n ax.get_yaxis().set_label_coords(-0.21, 0.5)\n ax.axhline(df[-1], ls=':', c=col)\n ax.set_ylim([0 - 0.1*np.max(df), np.max(df) + np.max(df_std) + 0.1*np.max(df)])\n ax_F.set(xlabel='z-distance [$\\mu$m]') # set x-axes\n plt.setp(ax_D.get_xticklabels(), visible=False) # don't show x-ticks for D plot\n # indicate values in solution and in bulk\n yy_D, yy_F = [0, np.min(D), np.max(D)], [np.min(F), np.max(F)]\n for ax, ticks, col, form in zip([ax_F, ax_D], [yy_F, yy_D], ['blue', 'red'], ['%.2f', '%.1f']):\n ax.set_yticks(ticks)\n ax.get_yticklabels()[-1].set_color(col)\n ax.yaxis.set_major_formatter(FormatStrFormatter(form))\n ax_D.get_yticklabels()[-2].set_color('red')\n\n # nicen up plots with background colors\n dx_2 = abs(xx[-2]-xx[-1]) # bin size in second segment\n for ax in [ax_F, ax_D, ax_profiles]:\n if ax is ax_profiles:\n skips = 1\n else: # for D, F plots only use half of xticks\n skips = 2\n ax.set_xticks(xticks[0][::skips])\n ax.set_xticklabels(xticks[1][::skips])\n ax.axvline(t_trans, ls=':', c='k') # indicate transition\n ax.axvspan(-2*dx_2, t_trans, color=[0.875, 0.875, 1], lw=0) # bulk = blue\n ax.axvspan(t_trans, xx[-1]+2*dx_2, color=[0.9, 0.9, 0.9], lw=0) # gel = grey\n ax.set_xlim([xx[0]-2*dx_2, xx[-1]+2*dx_2])\n\n # for double column figures in acs style format\n w_double = 7 # inch size for width of double column figure for ACS journals\n width, height = fig.get_size_inches()\n fig.set_size_inches(w_double, height)\n fig.tight_layout(pad=0.5, w_pad=0.55)\n\n if save:\n plt.savefig(savePath+'results_combined_%s.pdf' % suffix)\n else:\n plt.show()", "title": "" }, { "docid": "8d4a7c34cb3879fa631fb277ad46042d", "score": "0.49167725", "text": "def plot_experiments(vels, x_plot, r_pdf, r_cdf, f_pdf, f_cdf):\n\n density_fig, density_ax = plt.subplots()\n density_ax.hist(vels, density=True, label='Sample data')\n\n density_ax.plot(x_plot, r_pdf, label='Adiabatic reduction')\n density_ax.plot(x_plot, f_pdf, label='Full model')\n density_ax.legend(loc='best')\n plt.show()\n\n x_cdf = np.sort(vels)\n x_cdf = np.insert(x_cdf, 0, 0)\n y_cdf = np.linspace(0, 1, len(x_cdf))\n x_cdf = np.append(x_cdf, 1.5)\n y_cdf = np.append(y_cdf, 1)\n\n cdf_fig, cdf_ax = plt.subplots()\n cdf_ax.step(x_cdf, y_cdf, where='post', label='Sample data')\n cdf_ax.plot(x_plot, r_cdf, label='Adiabatic reduction')\n cdf_ax.plot(x_plot, f_cdf, label='Full model')\n cdf_ax.legend(loc='best')\n plt.show()", "title": "" }, { "docid": "43351a8219cffedebc77478eeb9b20df", "score": "0.49156013", "text": "def hypergeom_conf_interval(n, x, N, cl=0.975, alternative=\"two-sided\", G=None,\n method = 'clopper-pearson', **kwargs):\n assert alternative in (\"two-sided\", \"lower\", \"upper\")\n if G is None:\n G = (x / n) * N\n ci_low = 0\n ci_upp = N\n if alternative == 'two-sided':\n cl = 1 - (1 - cl) / 2\n if (method == \"clopper-pearson\"):\n if alternative != \"upper\" and x > 0:\n def f(q):\n return cl - hypergeom.cdf(x - 1, N, q, n)\n while f(G) < 0:\n G = (G+N)/2\n ci_low = ceil(brentq(f, 0.0, G, *kwargs))\n\n if alternative != \"lower\" and x < n:\n def f(q):\n return hypergeom.cdf(x, N, q, n) - (1 - cl)\n while f(G) < 0:\n G = G/2\n ci_upp = floor(brentq(f, G, N, *kwargs))\n elif (method == \"sterne\"):\n if alternative != \"upper\" and x > 0:\n while x not in hypergeom_accept(N, ci_low, n, 1 - cl,\n randomized=False):\n ci_low += 1\n if ci_low > n:\n ci_low = n\n break\n if alternative != \"lower\" and x < n:\n while x not in hypergeom_accept(N, ci_upp, n, 1 - cl,\n randomized=False):\n ci_upp -= 1\n if ci_upp < 0:\n ci_upp = 0\n break\n return ci_low, ci_upp", "title": "" }, { "docid": "24aa4b65c3de952ca8f92c27250da0af", "score": "0.49095696", "text": "def ecdf(data):\n return np.sort(data), np.arange(1, len(data)+1) / len(data)", "title": "" }, { "docid": "24aa4b65c3de952ca8f92c27250da0af", "score": "0.49095696", "text": "def ecdf(data):\n return np.sort(data), np.arange(1, len(data)+1) / len(data)", "title": "" }, { "docid": "8bdd85ede8b1078d6d8fad0ad3281669", "score": "0.4907333", "text": "def make_dose_plot(self):\r\n shape = self.doseDistribution.shape\r\n yMax = shape[0]/self.doseDistribution.DPC\r\n xMax = shape[1]/self.doseDistribution.DPC\r\n \r\n #smooth if desired\r\n if self.ui.smooth.isChecked():\r\n if not hasattr(self,\"origDoseDistribution\"): #save original dose distribution, if not already saved\r\n self.origDoseDistribution = self.doseDistribution\r\n #create a smoothed array of the original dose and then cast it to an dose array\r\n #then copy the DPC and unit from the old array, there should be a more comfortable way....\r\n self.doseDistribution = self.smooth_dose(self.origDoseDistribution).view(DoseArray)\r\n self.doseDistribution.DPC = self.origDoseDistribution.DPC\r\n self.doseDistribution.unit = self.origDoseDistribution.unit\r\n \r\n elif hasattr(self,\"origDoseDistribution\"):#restor original dose distribution if smooth is unchecked\r\n self.doseDistribution = self.origDoseDistribution\r\n del self.origDoseDistribution\r\n \r\n self.ax1.cla()\r\n #plot the dose distrubtion\r\n self.dosePlot = self.ax1.imshow(self.doseDistribution,\r\n interpolation=\"nearest\",\r\n extent=[0,xMax,yMax,0],\r\n zorder=-1)#image should be lowest zorder\r\n \r\n self.clb = self.fig.colorbar(self.dosePlot, cax = self.clbAxes,\r\n orientation=\"vertical\", \r\n format=ScalarFormatterWithUnit(unit=self.doseDistribution.unit))\r\n \r\n\r\n self.ax1.minorticks_on()\r\n for axis in ['top','bottom','left','right']:\r\n self.ax1.spines[axis].set_linewidth(2.0)\r\n \r\n self.ax1.tick_params(which='major',direction=\"out\",width=2.0,length=6,\r\n bottom=True,top=True,left=True,right=True,\r\n labelbottom=True,labeltop=True,labelleft=True,labelright=True) \r\n self.ax1.tick_params(which='minor',direction=\"out\",width=1.5,length=4,\r\n bottom=True,top=True,left=True,right=True)\r\n \r\n self.update_dose_plot()", "title": "" }, { "docid": "a37e2b1173b554cc85913d8a5680436c", "score": "0.49028176", "text": "def _figure_setup(self, **kwargs):\n fig, axes = pyplot.subplots(2, 1, **kwargs)\n ax1, ax2 = axes\n self.predicted_line, = ax1.plot(self.x, self.predicted, '-r')\n if self.data is not None:\n self.data_line, = ax1.plot(self.x, self.data, '.k')\n ax1.set_ylabel('Gravity anomaly (mGal)')\n ax1.set_xlabel('x (m)', labelpad=-10)\n ax1.set_xlim(self.area[:2])\n ax1.set_ylim((-200, 200))\n ax1.grid()\n tmp = ax2.pcolor(numpy.array([self.density_range]), cmap=self.cmap)\n tmp.set_visible(False)\n pyplot.colorbar(tmp, orientation='horizontal',\n pad=0.08, aspect=80).set_label(r'Density (kg/cm3)')\n # Remake the polygons and lines to make sure they belong to the right\n # axis coordinates\n vertices = [p.xy for p in self.polygons]\n newpolygons, newlines = [], []\n for xy, dens in zip(vertices, self.densities):\n poly, line = self._make_polygon(xy, dens)\n newpolygons.append(poly)\n newlines.append(line)\n ax2.add_patch(poly)\n ax2.add_line(line)\n self.polygons = newpolygons\n self.lines = newlines\n ax2.set_xlim(self.area[:2])\n ax2.set_ylim(self.area[2:])\n ax2.grid()\n ax2.invert_yaxis()\n ax2.set_ylabel('z (m)')\n fig.subplots_adjust(top=0.95, left=0.1, right=0.95, bottom=0.06,\n hspace=0.1)\n self.canvas = fig.canvas\n self.dataax = axes[0]\n self.modelax = axes[1]\n fig.canvas.draw()\n return fig", "title": "" }, { "docid": "a5ac213faaca567b87059f723eb1c1f5", "score": "0.49007562", "text": "def plot_calibration(y_pred, y_std, y_true, ax, num_bins=200):\n\n # Compute proportions\n exp_proportions = np.linspace(0, 1, num_bins)\n\n norm = stats.norm(loc=0, scale=1)\n gaussian_lower_bound = norm.ppf(0.5 - exp_proportions / 2.0)\n gaussian_upper_bound = norm.ppf(0.5 + exp_proportions / 2.0)\n residuals = y_pred - y_true\n normalized_residuals = (residuals.flatten() / y_std.flatten()).reshape(-1, 1)\n above_lower = normalized_residuals >= gaussian_lower_bound\n below_upper = normalized_residuals <= gaussian_upper_bound\n\n within_quantile = above_lower * below_upper\n obs_proportions = np.sum(within_quantile, axis=0).flatten() / len(residuals)\n\n ax.plot([0, 1], [0, 1], \"--\", label=\"Ideal\", c=diag_color)\n plot_color(ax, exp_proportions, obs_proportions,(exp_proportions-obs_proportions)>0)\n #ax.scatter(exp_proportions, obs_proportions, c = (exp_proportions-obs_proportions)>0, edgecolor=None)\n\n ax.fill_between(exp_proportions, exp_proportions, obs_proportions,\n where= (exp_proportions-obs_proportions)>0, interpolate = True,\n color=\"black\", alpha=0.2, label=\"Overconfident\")\n ax.fill_between(exp_proportions, exp_proportions, obs_proportions,\n where= (exp_proportions-obs_proportions)<0, interpolate=True,\n color=\"blue\", alpha=0.2, label=\"Underconfident\")\n\n ax.set_aspect('equal', adjustable='box')\n buff = 0\n ax.set_xlim([0 - buff, 1 + buff])\n ax.set_ylim([0 - buff, 1 + buff])\n\n # Compute miscalibration area\n polygon_points = []\n for point in zip(exp_proportions, obs_proportions):\n polygon_points.append(point)\n for point in zip(reversed(exp_proportions), reversed(exp_proportions)):\n polygon_points.append(point)\n polygon_points.append((exp_proportions[0], obs_proportions[0]))\n polygon = Polygon(polygon_points)\n x, y = polygon.exterior.xy # original data\n ls = LineString(np.c_[x, y]) # closed, non-simple\n lr = LineString(ls.coords[:] + ls.coords[0:1])\n mls = unary_union(lr)\n polygon_area_list = [poly.area for poly in polygonize(mls)]\n miscalibration_area = np.asarray(polygon_area_list).sum()\n\n # Annotate plot with the miscalibration area\n ax.text(\n x=0.95,\n y=0.05,\n s=\"Miscalibration area = %.2f\" % miscalibration_area,\n verticalalignment=\"bottom\",\n horizontalalignment=\"right\",\n )\n\n ax.set_xlabel(\"Theoretical proportion in Gaussian interval\")\n ax.set_ylabel(\"Observed proportion in Gaussian interval\")\n ax.legend()", "title": "" }, { "docid": "f571f5e1bb86393039692473700aad2b", "score": "0.48974836", "text": "def empirical_cdf(data):\n\n vals = pd.Series(data).value_counts()\n ecdf = pd.DataFrame(data).set_index(keys=0)\n probs = pd.DataFrame(vals.sort_index().cumsum() / np.float(len(data)))\n ecdf = ecdf.join(probs)\n ecdf = ecdf.reset_index()\n ecdf.columns = ['data', 'ecdf']\n\n return ecdf", "title": "" }, { "docid": "e1a911f4e7ef5079f2b146292965e76f", "score": "0.48934835", "text": "def cdf_exp(left, right, scale):\n return expon.cdf(right, scale=scale) - expon.cdf(left, scale=scale)", "title": "" }, { "docid": "fa8eed25e1c683d982fcbe86c7a49262", "score": "0.48929313", "text": "def plotDistributionCIRisk(X, ymax, bins):\n\n plt.style.use('ggplot')\n\n plt.figure(figsize=(18,6))\n plt.hist(X,alpha=0.5,bins=bins,color='blue',label='5 year risk distribution')\n \n mean, _, conf_int = confidenceInterval(X,alpha=0.95)\n print('Mean: ', mean)\n print('Median: ', np.median(X))\n print('Number of risk values: ', len(X))\n print('C.I.: ', conf_int)\n plt.vlines( x=conf_int[0],ymin=0, ymax=ymax, alpha=0.8, color='green', linewidth='3',label='95% C.I. of median')\n plt.vlines( x=conf_int[1],ymin=0, ymax=ymax, alpha=0.8, color='green', linewidth='3')\n plt.vlines( x=np.median(X),ymin=0, ymax=ymax, alpha=0.8, color='black', linewidth='3'\n , label= 'median = ' + str(np.round(mean,2)))\n plt.vlines( x=1.67,ymin=0, ymax=ymax, alpha=0.8, color='red', linewidth='3', label='1.67 Threshold')\n \n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n plt.title('Distribution of Risk with 1.67 threshold', fontsize=20)\n plt.xlabel('5 Year Risk value', fontsize=16)\n plt.ylabel('density', fontsize=16)\n plt.legend(loc='best', fontsize=16)\n plt.show()", "title": "" }, { "docid": "805d7c189df17b8d7822155db9952a23", "score": "0.4891808", "text": "def generate_aln_break_point_plot( aln_end_values, outputfile, saturation, resolution, title, contig_len, factor ):\n\t\n\ty_values = []\n\tx_values = []\n\tfor i in range( int( contig_len/ resolution) ):\n\t\tx_values.append( i*resolution )\n\t\ttry:\n\t\t\ty_values.append( min( [ saturation, aln_end_values[ i ] ] ) )\n\t\texcept KeyError:\n\t\t\ty_values.append( 0 )\n\t\n\t\n\tmedian = max( [ 1, np.median( y_values ) ] )\n\tiqr = max( [ 1, stats.iqr( y_values ) ] )\n\tprint \"median: \" + str( median )\n\tprint \"IQR: \" + str( iqr )\n\t\n\tplot_x_values = []\n\tplot_y_values = []\n\tfor i, y in enumerate( y_values ):\n\t\tif y > median + factor * iqr:\n\t\t\tplot_x_values.append( x_values[ i ] )\n\t\t\tplot_y_values.append( y )\n\t\n\t\n\t# --- construct figure --- #\n\tfig, ax = plt.subplots()\n\t\n\tax.set_title( title )\n\t#ax.plot( plot_x_values, plot_y_values, linestyle=\":\", marker=\".\", color=\"black\", linewidth=0.01 )\n\tax.scatter( plot_x_values, plot_y_values, marker=\"o\", color=\"black\", s=1 )\n\t\n\tax.set_xlabel( \"position on reference sequence [bp]\" )\n\tax.set_ylabel( \"number of alignment starts/ends\" )\n\t\n\tfig.savefig( outputfile, dpi=300 )\n\tplt.close( \"all\" )\n\t\n\t# --- prepare data for return --- #\n\tavg_ends = np.mean( y_values )\n\tfinal_data = []\n\tfor i, val in enumerate( plot_y_values ):\n\t\tfinal_data.append( { 'pos': plot_x_values[ i ], 'counts': val, 'rel': val / avg_ends } )\n\treturn final_data", "title": "" }, { "docid": "4e9e2ee7cf238badbd2bb2f641fb4816", "score": "0.48887056", "text": "def visualize_covariance_ellipse(covariance, center, conf=None, std=None, ax=None, debug=True, **kwargs):\n if debug:\n if conf is not None:\n assert isscalar(conf) and conf >= 0 and conf <= 1, 'the confidence is not in a good range'\n if std is not None:\n assert ispositiveinteger(std), 'the number of standard deviation should be a positive integer'\n\n def eigsorted(covariance):\n vals, vecs = np.linalg.eigh(covariance)\n # order = vals.argsort()[::-1]\n # return vals[order], vecs[:,order]\n return vals, vecs\n\n if conf is not None:\n conf = np.asarray(conf)\n elif std is not None:\n conf = 2 * norm.cdf(std) - 1\n else:\n raise ValueError('One of `conf` and `std` should be specified.')\n r2 = chi2.ppf(conf, 2)\n\n if ax is None:\n ax = plt.gca()\n\n vals, vecs = eigsorted(covariance)\n theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))\n # theta = np.degrees(np.arctan2(*vecs[::-1, 0]))\n\n # Width and height are \"full\" widths, not radius\n # width, height = 2 * std * np.sqrt(vals)\n width, height = 2 * np.sqrt(np.sqrt(vals) * r2)\n # width, height = 2 * np.sqrt(vals[:, None] * r2)\n\n ellipse = Ellipse(xy=center, width=width, height=height, angle=theta, **kwargs)\n ellipse.set_facecolor('none')\n\n ax.add_artist(ellipse)\n return ellipse", "title": "" }, { "docid": "59cf36c518366e124ebc1088c60ac8d0", "score": "0.48859203", "text": "def get_confidence_interval(df_bootstrap, conf_pct=0.95):\n conf_margin = (1 - conf_pct)/2\n df_conf_intvl = pd.DataFrame(columns=['min', 'max'], index=df_bootstrap.index)\n\n df_conf_intvl['min'] = df_bootstrap.quantile(conf_margin, axis=1)\n df_conf_intvl['max'] = df_bootstrap.quantile(1-conf_margin, axis=1)\n\n df_conf_intvl = df_conf_intvl.sort_index()\n\n return df_conf_intvl", "title": "" }, { "docid": "c7177dfc91b7251c595ad3343bcfa0e2", "score": "0.48839086", "text": "def confidence_interval(m, s, n, alpha):\n import numpy as np\n from scipy import stats\n\n # Compute one-tailed Student's t quantile with (0.5 * alpha) significance\n # per tail, n - 1 degrees of freedom\n t = stats.t.ppf(1 - (0.5 * alpha), n - 1)\n\n # Compute lower and upper confidence intervals for the mean\n lower = m - t * s / np.sqrt(n)\n upper = m + t * s / np.sqrt(n)\n\n print('{:.0f}% confidence interval for the population mean:\\n'\\\n .format((1 - alpha) * 100))\n print('Sample size: {:.0f}'.format(n))\n print('Sample mean: {:.2f}'.format(m))\n print('Sample standard deviation: {:.2f}'.format(s))\n print('\\n(lower; upper) = ({:.4f}; {:.4f})'.format(lower, upper))", "title": "" }, { "docid": "f2d23c744c62fb37d25bb6580ed664a0", "score": "0.48797297", "text": "def plot_eigenvalues(eigenvalues_list, titles_list,ylim_list,\n output_file_name, n):\n fig= plt.figure(figsize=(15, 4))\n for i in range(3): \n plt.subplot(1, 3, i+1)\n plt.tight_layout()\n plt.scatter([j for j in range(10)],\n eigenvalues_list[i][0:10],\n c=\"blue\", marker = \"*\")\n y_label = str(i+1)+' Eigenvector'\n plt.title(titles_list[i])\n plt.xlabel ('Number')\n plt.ylabel ('Eigenvalue')\n plt.ylim(ylim_list[i])\n plt.savefig(output_file_name)\n plt.show()", "title": "" }, { "docid": "2ca3636dd8d891c9336d306f3de9eb38", "score": "0.48644537", "text": "def confidence_intervals(fit_result, sigmas=(1, 2, 3), _larch=None, **kws):\n fitter = getattr(fit_result, 'fitter', None)\n result = getattr(fit_result, 'fit_details', None)\n return conf_interval(fitter, result, sigmas=sigmas, **kws)", "title": "" }, { "docid": "96894eca89f2472759dd32d8e4fcb4e5", "score": "0.48577508", "text": "def compute_confidence(self):\n pseudo_distance = self.connectivities_coarse.copy()\n pseudo_distance.data = 1./pseudo_distance.data\n connectivities_coarse_tree = minimum_spanning_tree(pseudo_distance)\n connectivities_coarse_tree.data = 1./connectivities_coarse_tree.data\n connectivities_coarse_tree_indices = [\n connectivities_coarse_tree[i].nonzero()[1]\n for i in range(connectivities_coarse_tree.shape[0])]\n # inter- and intra-cluster based confidence\n if not self._tree_based_confidence:\n total_n = self.n_neighbors * np.array(self.vc.sizes())\n logg.msg('{:>2} {:>2} {:>4} {:>4} {:>4} '\n '{:>7} {:>7} {:>7} {:>7}'\n .format('i', 'j', 'conn', 'n[i]', 'n[j]',\n 'avg', 'thresh', 'var', 'conf'), v=5)\n maximum = self.connectivities_coarse.max()\n confidence = self.connectivities_coarse.copy() # initializing\n for i in range(self.connectivities_coarse.shape[0]):\n for j in range(i+1, self.connectivities_coarse.shape[1]):\n if self.connectivities_coarse[i, j] > 0:\n minimum = min(total_n[i], total_n[j])\n average = self.connectivities_coarse[i, j] / minimum\n geom_mean = np.sqrt(total_n[i] * total_n[j])\n confidence[i, j] = self.connectivities_coarse[i, j] / geom_mean\n # confidence[i, j] = self.connectivities_coarse[i, j] / maximum\n variance = 0.0\n # variance = self.threshold * (1-self.threshold)\n # if average > self.threshold:\n # confidence[i, j] = 1\n # else:\n # confidence[i, j] = norm.cdf(average,\n # self.threshold, variance)\n logg.msg(\n '{:2} {:2} {:4} {:4} {:4} '\n '{:7.2} {:7.2} {:7.2} {:7.2}'\n .format(i, j, int(self.connectivities_coarse[i, j]),\n total_n[i], total_n[j],\n average, self.threshold, variance, confidence[i, j]), v=5)\n confidence[j, i] = confidence[i, j]\n # tree-based confidence\n else:\n median_connectivities_coarse_tree = np.median(connectivities_coarse_tree.data)\n confidence = self.connectivities_coarse.copy()\n confidence.data[self.connectivities_coarse.data >= median_connectivities_coarse_tree] = 1\n connectivities_coarse_adjusted = self.connectivities_coarse.copy()\n connectivities_coarse_adjusted.data -= median_connectivities_coarse_tree\n connectivities_coarse_adjusted.data = np.exp(connectivities_coarse_adjusted.data)\n index = self.connectivities_coarse.data < median_connectivities_coarse_tree\n confidence.data[index] = connectivities_coarse_adjusted.data[index]\n confidence_tree = self.compute_confidence_tree(\n confidence, connectivities_coarse_tree_indices)\n self.confidence = confidence\n self.confidence_tree = confidence_tree", "title": "" }, { "docid": "f16849ffb103c4d9c680b98720caa43d", "score": "0.48537478", "text": "def cdf(self, x):\n pi = 3.1415926536\n e = 2.7182818285\n erfval = (x - self.mean) / (self.stddev * 2 ** .5)\n y = erfval\n series = (1 + (y - y ** 3 / 3 + y ** 5 / 10 - y ** 7 / 42\n + y ** 9 / 216) * 2 / (pi ** .5)) / 2\n cdfval = (.5 + series)\n return series", "title": "" }, { "docid": "621f5152aba2875c4d95889f580bc6e6", "score": "0.48421213", "text": "def confidence(naf):\n return ConfidenceInterval(naf.cumulative_hazard_.values[:, 0][-1],\n naf.confidence_interval_['{}_lower_0.95'.format(self.label)].values[-1],\n naf.confidence_interval_['{}_upper_0.95'.format(self.label)].values[-1],\n float('nan'))", "title": "" }, { "docid": "22f08c67f323d02f8060ae4061dbd581", "score": "0.4840385", "text": "def plot_extinction_curves(starpair_list, inpath, outpath):\n # plot the extinction curves in E(lambda-V)\n plot_multi_extinction(\n starpair_list,\n inpath,\n range=[0.76, 5.5],\n spread=True,\n exclude=[\"IRS\"],\n pdf=True,\n )\n\n # specify the offsets and angles for the star names\n offsets = [\n 0.04,\n 0.04,\n 0.0,\n 0.04,\n 0.0,\n 0.05,\n 0.03,\n 0.0,\n -0.07,\n 0.02,\n 0.01,\n 0.05,\n 0.01,\n 0.05,\n 0.02,\n ]\n angles = [\n -40,\n -46,\n -36,\n -30,\n -46,\n -46,\n -42,\n -42,\n -44,\n -46,\n -42,\n -46,\n -46,\n -46,\n -46,\n ]\n\n # plot the extinction curves in A(lambda)/A(V)\n fig, ax = plot_multi_extinction(\n starpair_list,\n inpath,\n alax=True,\n fitmodel=True,\n range=[0.76, 5.3],\n spread=True,\n exclude=[\"IRS\", \"I\", \"L\", \"IRAC1\", \"IRAC2\", \"WISE1\", \"WISE2\"],\n text_offsets=offsets,\n text_angles=angles,\n figsize=(15, 18.75),\n pdf=True,\n )\n\n # change the fitted lines to thinner dashed lines\n for i, line in enumerate(ax.lines):\n if line.get_color() == \"crimson\":\n plt.setp(ax.lines[i], ls=\"--\", lw=1.5)\n elif line.get_color() == \"k\":\n plt.setp(ax.lines[i], alpha=1)\n\n # finalize and save the plot\n ax.set_ylim(-0.1, 4.15)\n ax.get_legend().remove()\n fig.savefig(outpath + \"ext_curves_alav.pdf\", bbox_inches=\"tight\")", "title": "" }, { "docid": "d6fb5728b82deb652179294169c8714b", "score": "0.48402095", "text": "def supp_figure_1():\r\n\r\n # Load the data\r\n df = pd.read_csv(os.path.join(DATA_DIR, 'Bogue Banks Volumes and Aspect Ratios.csv'))\r\n\r\n # Set bins edges\r\n data_set = sorted(set(df['Volume'].dropna()))\r\n bins = np.append(data_set, data_set[-1] + 1)\r\n\r\n # Use the histogram function to bin the data and find the CDF\r\n counts, bin_edges = np.histogram(df['Volume'].dropna(), bins=bins, normed=True, density=False)\r\n counts = counts.astype(float) / len(df['Volume'])\r\n cdf = np.cumsum(counts)\r\n\r\n # Find the percentile for a volume of 52m3/m\r\n use_vol = (53.5 + 50.6) / 2\r\n vol_diff = np.abs(bin_edges[1:] - use_vol)\r\n min_ix = np.argmin(vol_diff)\r\n use_percentile = cdf[min_ix]\r\n\r\n # Setup the figure\r\n fig, ax = plt.subplots(figsize=(figure_inches, figure_inches), dpi=figure_dpi)\r\n\r\n # Add a grid\r\n add_grids(ax)\r\n\r\n # Plot the CDF\r\n ax.plot(bin_edges[1:], cdf, zorder=2)\r\n\r\n # Add a line for the model volume\r\n ax.axvline(x=use_vol, ymin=0, ymax=use_percentile, color='black', linestyle='--', zorder=4)\r\n ax.axhline(y=use_percentile, xmin=0, xmax=(use_vol / 400), color='black', linestyle='--', zorder=4)\r\n ax.scatter(x=use_vol, y=use_percentile, color='red', marker='o', s=25, zorder=6)\r\n\r\n # Set axes limits\r\n axis_limits(ax, l=0, r=400, b=0, t=1)\r\n\r\n # Label the axes\r\n add_label(ax, s='Dune Volume (m$^{3}$/m)', type='X')\r\n add_label(ax, s='CDF', type='Y')\r\n add_label(ax, s=f'{use_vol} m3/m\\n{np.around(use_percentile * 100, decimals=2)}th Percentile', type='T')\r\n\r\n # Save and close\r\n save_and_close(fig=fig, title='Bogue Banks Dune Volume CDF', tight=True)", "title": "" }, { "docid": "a5ad2ffd3468e87ad5c13b7d1f469b84", "score": "0.48401138", "text": "def plot(self, vlo=2., vhi=98., nc=-1, method='p', mpl=False, cmap=CMDEF, \\\n close=True, x1=None, x2=None, y1=None, y2=None, sepmin=1.):\n\n if nc == -1:\n nc1 = 0\n nc2 = len(self)\n else:\n nc1 = nc\n nc2 = nc+1\n \n if not mpl:\n if close: pg.pgopen('/xs')\n if nc2-nc1 > 1: pg.pgsubp(nc2-nc1,1)\n\n prange = []\n for nc, ccd in enumerate(self._data[nc1:nc2]):\n\n # Determine intensity range to display\n if method == 'p':\n vmin, vmax = ccd.centile((vlo,vhi))\n elif method == 'a':\n vmin, vmax = ccd.min(), ccd.max()\n elif method == 'd':\n vmin, vmax = vlo, vhi\n else:\n raise UltracamError('MCCD.plot: method must be one of p, a or d.')\n\n if vmin == vmax:\n vmin -= sepmin/2.\n vmax += sepmin/2.\n prange.append((vmin, vmax))\n\n # start\n nxmax, nymax = ccd.nxmax, ccd.nymax\n x1 = 0.5 if x1 is None else x1\n x2 = nxmax+0.5 if x2 is None else x2\n y1 = 0.5 if y1 is None else y1\n y2 = nymax+0.5 if y2 is None else y2\n\n if mpl:\n if nc2-nc1 > 1:\n plt.subplot(1,nc2-nc1,nc+1)\n plt.axis('equal')\n else:\n if nc2-nc1 > 1: pg.pgpanl(nc-nc1+1,1)\n pg.pgwnad(x1,x2,y1,y2)\n\n # plot CCD\n ccd.plot(vmin,vmax,mpl,cmap)\n\n # per-frame finishing-off\n if mpl:\n plt.xlim(x1,x2)\n plt.ylim(y1,y2)\n else:\n pg.pgbox('bcnst',0,0,'bcnst',0,0)\n pg.pglab('X','Y','')\n\n if close:\n if mpl:\n plt.show()\n else:\n pg.pgclos()\n\n # return intensity range(s) used\n if len(prange) == 1:\n return prange[0]\n else:\n return tuple(prange)", "title": "" }, { "docid": "8e9ccfef2c7e8ed70ab4e174f5a03e28", "score": "0.4826807", "text": "def plot_intervals(self, ax=None):\n title = \"95% credible intervals\"\n return plot_conf_or_credible_interval(\n [\n self.credible_interval_95_mean_voting_prefs[0],\n self.credible_interval_95_mean_voting_prefs[1],\n ],\n self.group_names_for_display(),\n self.candidate_name,\n title,\n ax=ax,\n )", "title": "" }, { "docid": "6e1f0b0aafc0899e5bfeca9bf0ccd2d3", "score": "0.4826767", "text": "def AUROC_bootstrap_CI(y_test, y_score, interval = 0.95, n_bootstraps = 10000):\r\n\r\n #print(\"\\nCalculating {}% confidence interval.\".format(interval*100))\r\n #print(\"Bootstrapping with {} random samples.\\n\".format(n_bootstraps))\r\n \r\n bootstrapped_scores = []\r\n rng = np.random.RandomState()\r\n \r\n for i in (range(n_bootstraps)):\r\n # Bootstrap by sampling with replacement on the prediction indices\r\n indices = rng.random_integers(0, len(y_score) - 1, len(y_score))\r\n if len(np.unique(y_test[indices])) < 2:\r\n # We need at least one positive and one negative sample for ROC AUC\r\n # to be defined: reject the sample\r\n continue\r\n\r\n score = roc_auc_score(y_test[indices], y_score[indices])\r\n bootstrapped_scores.append(score)\r\n\r\n sorted_scores = np.sort(np.array(bootstrapped_scores))\r\n \r\n # Compute the lower and upper bound of the confidence interval\r\n low = (1 - interval)/2\r\n high = 1 - low\r\n confidence_lower = sorted_scores[int(low * len(sorted_scores))]\r\n confidence_upper = sorted_scores[int(high * len(sorted_scores))]\r\n \r\n return (confidence_lower, confidence_upper)", "title": "" }, { "docid": "64c1238c185e3300e8c63c946604f245", "score": "0.48221225", "text": "def plot_concentration(axis, data, x, bw, color, label, linewidth, alpha):\n kernel = gaussian_kde(data, bw)\n density = kernel(x)\n axis.plot(x, density, lw=linewidth, color=color, label=label)\n axis.fill_between(x, 0.0, density, color=color, alpha=alpha)", "title": "" }, { "docid": "595e1a7fb298f63f868fa3bf91023673", "score": "0.4816136", "text": "def summary_plots(x_array,\n y_array,\n log_base=np.log10,\n variable_name=None,\n distribution_type=stats.distributions.norm):\n\n x = np.sort(x_array) # assert to check the data; better to use argsort of x and y to make robust against if x and y are linked\n y = np.sort(y_array)\n if not (x == 0).any():\n logx = log_base(x)\n else:\n print('{} has zero values'.format(variable_name))\n xl = x.copy()\n xl[xl == 0] = xl[np.nonzero(x)][0]-10**-4 # subtract small value from all the data?\n logx = log_base(xl)\n \n log_type = str(log_base).split(' ')[-1]\n \n fig = plt.figure(figsize = [10,10]) \n # ECDF\n ax1 = fig.add_subplot(2,2,1) # set logscale on xaxes\n ax1 = plt.scatter(x,\n y, \n s=0.5,\n alpha=0.7,\n c='k')\n ax1.axes.yaxis.set_label_text('ECDF')\n ax1.axes.xaxis.set_label_text(variable_name)\n \n # logged ECDF\n ax2 = fig.add_subplot(2,2,2)\n ax2 = plt.scatter(logx,\n y,\n s=0.5,\n alpha=0.7,\n c='k')\n ax2.axes.yaxis.set_label_text('ECDF') \n ax2.axes.xaxis.set_label_text('{}_{}'.format(\n log_type,\n variable_name\n ))\n \n # histogram\n ax3 = fig.add_subplot(2,2,3)\n ax3 = sns.distplot(logx,\n norm_hist=True,\n color='k')\n ax3.axes.xaxis.set_label_text('{}_{}'.format(\n log_type,\n variable_name\n ))\n ax3.axes.yaxis.set_label_text('probability density')\n \n # qq plot\n ax4 = fig.add_subplot(2,2,4)\n sm.qqplot(logx,\n distribution_type,\n line='q',\n fit=True,\n ax=ax4,\n markersize=0.5,\n color='k')\n ax4.axes.xaxis.set_label_text('Theoretical quantiles ({})'.format(\n str(distribution_type).split('.')[-1]\n ))\n ax4.axes.yaxis.set_label_text('Sample quantiles ({}_{})'.format(\n log_type,\n variable_name\n ))\n\n return", "title": "" } ]
1878690f3a2e767f1c480802400d82f0
Decorator for Flask API routes that configures a structlog logging context. This decorator also logs the entry and exit from the route.
[ { "docid": "2d832e7e5483354d907c2b610640ffa1", "score": "0.7450892", "text": "def log_route(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n # new() creates a new logging context\n logger = get_logger(\"ltddasher\").new()\n # bind information about request that appears in all\n logger = logger.bind(request_id=str(uuid.uuid4()),\n method=request.method,\n path=request.path)\n logger.info(\"new request\")\n return_value = f(*args, **kwargs)\n logger.info(\"returned\", status=return_value[1])\n return return_value\n return decorated_function", "title": "" } ]
[ { "docid": "95d41ca0a128e51527551a1736829200", "score": "0.58519137", "text": "def register_logging(app):\n\n class RequestFormatter(logging.Formatter):\n\n def format(self, record):\n record.url = request.url\n record.remote_addr = request.remote_addr\n return super(RequestFormatter, self).format(record)\n\n request_formatter = RequestFormatter(\n '[%(asctime)s] - %(name)s - %(remote_addr)s requested %(url)s\\n'\n '%(levelname)s in %(module)s: %(message)s'\n )\n\n if not app.debug:\n file_handler = RotatingFileHandler(\n os.path.join(basedir, 'logs/MMCs.log'),\n maxBytes=10 * 1024 * 1024, backupCount=10)\n else:\n file_handler = RotatingFileHandler(\n os.path.join(basedir, 'logs/MMCs-dev.log'),\n maxBytes=10 * 1024 * 1024, backupCount=10)\n\n file_handler.setFormatter(request_formatter)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n\n mail_handler = SMTPHandler(\n mailhost=app.config['MAIL_SERVER'],\n fromaddr=app.config['MAIL_USERNAME'],\n toaddrs=app.config['ADMIN_EMAIL'],\n subject='MMCs Application Error',\n credentials=('apikey', app.config['MAIL_PASSWORD']))\n mail_handler.setLevel(logging.INFO)\n mail_handler.setFormatter(request_formatter)\n app.logger.addHandler(mail_handler)", "title": "" }, { "docid": "3894e54d943978332fb9e742f707ee86", "score": "0.5834488", "text": "def _log_task(func):\n @functools.wraps(func)\n def wrapped(self, *args, **kwargs):\n logger.info(sanitize_request(self.request))\n return func(self, *args, **kwargs)\n return wrapped", "title": "" }, { "docid": "a817b7f31e086c203681cf8e4b5b106f", "score": "0.5661278", "text": "def log_request(self, *args):", "title": "" }, { "docid": "9b259c3a0f1ec497d5cee3e3c081fd23", "score": "0.5641445", "text": "def logger(func):\n\t@wraps(func)\n\tdef wrapper( *args, **kwargs ):\n logging.basicConfig( filename = '{}.log'.format( func.__name__ ),\n\t\t\t\t\t\t\t format = '%(asctime)s -- %(levelname)s:%(name)s: %(message)s',\n\t\t\t\t\t\t\t datefmt = '%Y/%m/%d-%H:%M:%S',\n\t\t\t\t\t\t\t level = logging.INFO )\n\t\t\n\t\t# custom the logging information\n\t\tlogging.info( 'Ran with args: {} and kwargs: {}'.format( args, kwargs ) )\n\t\treturn func( *args, **kwargs )\n\n\treturn wrapper", "title": "" }, { "docid": "891779e5412aad0dba36ac07680f731f", "score": "0.5631555", "text": "def log(parameters=False, response=False):\n def decorator(func):\n def wrapper(*args, **kwargs):\n if parameters:\n LOGGER.info(PARAM_LOG_MESSAGE, args)\n func_response = func(*args, **kwargs)\n if response:\n LOGGER.info(RESPONSE_LOG_MESSAGE, func_response)\n return func_response\n return wrapper\n return decorator", "title": "" }, { "docid": "53d747204decc7232aad0a59ee9c8c0a", "score": "0.5608421", "text": "def addDebugLogToHandle(func):\n\n def handle(self, pkt, retPak):\n return func(self, pkt, retPak)\n\n return handle", "title": "" }, { "docid": "124e31acdc59fc562425484972de81d6", "score": "0.55987227", "text": "def log_function(handler):\n\n _log_meta = dict(\n app_id=\"app-up\",\n user=\"-\",\n username=\"-\",\n response_code=\"-\",\n\n http_uri=handler.request.uri,\n http_status=handler.get_status(),\n http_method=handler.request.method,\n http_version=handler.request.version,\n\n remote_ip=handler.request.remote_ip,\n request_time=1000.0 * handler.request.request_time(),\n\n response_length=handler.request.headers.get(\"Content-Length\", 0),\n request_args=handler.request.arguments,\n request_date=datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n )\n request_time = _log_meta['request_time']\n if float(request_time) > 1000:\n _log_meta['request_time'] = str(round(float(request_time) / 1000, 2)) + 's'\n else:\n _log_meta['request_time'] = str(round(float(request_time), 2)) + 'ms'\n\n if handler.get_status() < 400:\n log_method = access_log.info\n elif handler.get_status() < 500:\n log_method = access_log.warning\n else:\n log_method = access_log.error\n\n log_method(\"[%(request_date)s] %(remote_ip)s %(user)s %(username)s - \\\"%\"\n \"(http_method)s %(http_uri)s %(http_version)s\\\" %(http_status)s - \"\n \"%(response_length)sbyte - %(request_time)s - %(app_id)s - [%(request_args)s] -\", _log_meta)", "title": "" }, { "docid": "ec11f16873d562f52930c8aded0625c1", "score": "0.5591312", "text": "def func2log_request(cls, func, logfunc):\n @wraps(func)\n def wrapped(*_, **__):\n logfunc({'headers': {k: v for k, v in request.headers},\n 'json': request.json,\n })\n return func(*_, **__)\n\n return wrapped", "title": "" }, { "docid": "6811e0882a3f9b1acc6b52d6ed78e303", "score": "0.5585001", "text": "def dh_log(self, func):\n\n def call(*args, **kwargs):\n \"\"\" Actual wrapping \"\"\"\n self.__entry(func)\n result = func(*args, **kwargs)\n self.__exit(func)\n return result\n\n return call", "title": "" }, { "docid": "2fde327f8c642221b54ec07723f9a373", "score": "0.5562439", "text": "def log_response_hook(response, *args, **kwargs): # pylint: disable=unused-argument\n log.info('[%s] [%d] [%f] %s',\n response.request.method, response.status_code, response.elapsed.total_seconds(), response.url)", "title": "" }, { "docid": "59e9e06b6dfba0015fa8986b91f1b70f", "score": "0.5532234", "text": "def debug_only(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if not current_app.config['DEBUG']:\n abort(404)\n current_app.logger.warn('This endpoint disabled in live builds: {}'.format(request.url))\n return func(*args, **kwargs)\n return decorated_view", "title": "" }, { "docid": "4af110cb3e8f4cc76310fa660f1b08aa", "score": "0.55158883", "text": "def setup_logging(app: Flask) -> None:\n\n logging.basicConfig(\n handlers=[\n logging.StreamHandler(),\n RotatingFileHandler(\n filename=app.config[\"LOG_FILE_PATH\"],\n maxBytes=app.config[\"LOG_MAX_BYTES\"],\n backupCount=app.config[\"LOG_BACKUP_COUNT\"],\n ),\n ],\n format=\"[%(asctime)s] %(levelname)s \"\n \"[%(name)s.%(funcName)s:%(lineno)d] %(message)s\",\n datefmt=\"%Y-%m-%dT%H:%M:%S\",\n )\n\n app.logger.setLevel(app.config[\"LOG_LEVEL\"])", "title": "" }, { "docid": "3a6602c34ef9cce7ad4548d7c93c6b36", "score": "0.55021465", "text": "def log(ctx, **kwargs):\n ctx.init_args(**kwargs)\n pass", "title": "" }, { "docid": "dd838d3b914d5a5c1428c3007ce2c41a", "score": "0.5499974", "text": "def log(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n actual_logger.info('doing %s' % func.__name__)\n actual_logger.debug(\n 'with args: %s and kwargs: %s' % (args, kwargs)\n )\n result = func(*args, **kwargs)\n actual_logger.debug('\\nresult: %s' % result)\n return result\n return wrapper", "title": "" }, { "docid": "e68a41598e22553a2772d28800542797", "score": "0.5486605", "text": "def log_request(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "617a850713f84cbc72e70dcc85907069", "score": "0.5443267", "text": "def trace(f):\n @wraps(f)\n def wrapper(self,*args,**kwargs):\n self.trace.append(f)\n f(self,*args,**kwargs)\n return wrapper", "title": "" }, { "docid": "25ce69ee5f129e9131e292636538a320", "score": "0.5419718", "text": "async def access_logger(app, handler):\n async def logging_handler(request):\n start_time = time.time()\n request_name = hex(int(start_time * 10000))[-6:]\n client_ip, _ = request.transport.get_extra_info(\n 'peername', ('UNKNOWN', None))\n\n # log request\n LOGGER.info(\n 'Request %s: \"%s %s\" from %s',\n request_name,\n request.method,\n request.rel_url,\n client_ip)\n\n def log_response(response):\n # pylint: disable=protected-access\n content_length = response._headers.get('Content-Length',\n 'UNKNOWN')\n if content_length == 'UNKNOWN':\n LOGGER.info(\n 'Response %s: %s status, %s size, in %.3fs',\n request_name,\n response._status,\n content_length,\n time.time() - start_time)\n else:\n LOGGER.info(\n 'Response %s: %s status, %sB size, in %.3fs',\n request_name,\n response._status,\n content_length,\n time.time() - start_time)\n\n try:\n response = await handler(request)\n log_response(response)\n return response\n except web.HTTPError as e:\n log_response(e)\n raise e\n\n return logging_handler", "title": "" }, { "docid": "9757aff0591141d2e5b784702006289d", "score": "0.5414845", "text": "def tracer(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n params = ', '.join(tuple(f'{a}' for a in args)\n + tuple(f'{k}={v}' for k, v in kwargs.items()))\n LOG.debug(f'{func.__name__}({params})')\n return func(*args, **kwargs)\n return wrapper", "title": "" }, { "docid": "d41d89a5c2a4942536d6b46fb347f159", "score": "0.5414133", "text": "def get_route_handler(self) -> Callable:\n original_route_handler = super().get_route_handler()\n async def custom_route_handler(request: Request) -> Response:\n \"\"\"\n Traces given request and its response.\n :param request: to trace\n \"\"\"\n should_ignore_request = True\n try:\n epsagon.trace.trace_factory.switch_to_async_tracer()\n if not ignore_request('', request.url.path.lower()):\n should_ignore_request = False\n trace = epsagon.trace.trace_factory.get_or_create_trace()\n trace.prepare()\n\n except Exception as exception: # pylint: disable=W0703\n return await original_route_handler(request)\n\n if should_ignore_request:\n return await original_route_handler(request)\n\n runner = None\n response = None\n try:\n body = await request.json()\n except json.decoder.JSONDecodeError:\n body = ''\n try:\n runner = FastapiRunner(time.time(), request, json.dumps(body))\n trace.set_runner(runner)\n collect_container_metadata(runner.resource['metadata'])\n except Exception as exception: # pylint: disable=W0703\n warnings.warn('Could not extract request', EpsagonWarning)\n raised_err = None\n try:\n response: Response = await original_route_handler(request)\n except Exception as exception: # pylint: disable=W0703\n raised_err = exception\n traceback_data = get_traceback_data_from_exception(exception)\n trace.runner.set_exception(exception, traceback_data)\n\n try:\n if not raised_err and response is not None and runner:\n if ignore_request(\n response.headers.get('Content-Type', '').lower(),\n ''\n ):\n return response\n\n runner.update_response(response)\n\n if runner:\n epsagon.trace.trace_factory.send_traces()\n except Exception as exception: # pylint: disable=W0703\n print_debug('Failed to send traces: {}'.format(exception))\n\n if raised_err:\n raise raised_err\n\n return response\n\n return custom_route_handler", "title": "" }, { "docid": "194e78ac8ed4468a8c2e9afd96855c1b", "score": "0.5408171", "text": "def __init__(self, decorator_fn):\n super(GRPCMiddleware.MiddlewareInterceptor, self).__init__()\n self._decorator_fn = decorator_fn", "title": "" }, { "docid": "293b9e60642d8fb4a7cb4263dc9453c2", "score": "0.5406406", "text": "def logger(func):\n @wraps(func)\n def wrapper(self, *args):\n old_parameters = self._parameters\n old_variables = self._variables\n timestamp = str(datetime.datetime.now())\n func(self, *args)\n if DictDiffer(old_parameters, self._parameters).changes:\n self._log.append((timestamp,\n DictDiffer(old_parameters, self._parameters).changes))\n if DictDiffer(old_parameters, self._parameters).changes:\n self._log.append(DictDiffer(old_variables, self._parameters).changes)\n return wrapper", "title": "" }, { "docid": "83dddf5285cfd245b1a36bf776741561", "score": "0.53888744", "text": "def log_decorator(wrapped):\n\n def log_enter_exit(*args, **kwargs):\n arguments = \"\"\n LOGGER.debug(\"{}({}) [ENTERING]\".format(wrapped.__name__, arguments))\n result = wrapped(*args, **kwargs)\n LOGGER.debug(\"{}() [LEAVING]\".format(wrapped.__name__))\n return result # noqa: R504\n\n return log_enter_exit", "title": "" }, { "docid": "d7af5a3deedd9c132b9efa24899a41ed", "score": "0.53722113", "text": "def setup_access_logging(app, logger, debug):\n def before():\n g.request_start_time = time.time()\n\n def after(response):\n try:\n if hasattr(g, 'request_start_time'):\n time_taken = (time.time() - g.request_start_time)\n time_taken = '%.2fms' % (float(time_taken) * 1000.0)\n else:\n time_taken = ' '\n\n code = response.status_code\n method = request.method\n user_agent = request.headers.get('User-Agent', '<unknown user agent>')\n url = '%s' % (request.path)\n if request.query_string:\n url += '?%s' % (request.query_string)\n\n if not debug or not url.endswith(('.html', '.css', '.js')):\n # During debugging suppress html, css and js file access logging\n logger.info('%s %s %s %s %s', method, url, code, time_taken, user_agent)\n except:\n # Do not crash everything because of failing logger\n pass\n\n return response\n\n app.before_request(before)\n app.after_request(after)", "title": "" }, { "docid": "25bcfcce3d8699c9e5b50350abdf9557", "score": "0.5370295", "text": "def refine_logger(logger, **kwargs):\n return partial(logger, **kwargs)", "title": "" }, { "docid": "7fc7e7c8a355930d162c91f5803a50a7", "score": "0.53699064", "text": "def route(url, **kwargs):\n \n def decorator(func):\n if getattr(func, 'route', None):\n func.route += [url]\n func.route_kwargs += [kwargs]\n else:\n func.route = [url]\n func.route_kwargs = [kwargs]\n return func\n\n return decorator", "title": "" }, { "docid": "6d28b6e3cef0df0e62a28bcda2a52131", "score": "0.5369155", "text": "def logger():\n\n def log(func):\n \"\"\" Decorator to print info about function being called \"\"\"\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n actual_logger.info('doing %s' % func.__name__)\n actual_logger.debug(\n 'with args: %s and kwargs: %s' % (args, kwargs)\n )\n result = func(*args, **kwargs)\n actual_logger.debug('\\nresult: %s' % result)\n return result\n return wrapper\n return log", "title": "" }, { "docid": "ed1f504b5f476979b538aab56eeaf93d", "score": "0.5364105", "text": "def logbodies(self, func):\n @functools.wraps(func)\n def wrapper(container, *args, **kwargs):\n request = container.request\n request_data = request.get_data()\n\n def log(message, data):\n container.logger.info(\"{0} : \\n {1}\".format(message, data))\n\n if request_data:\n request_data = json.dumps(\n json.loads(\n request_data\n ),\n indent=4,\n separators=(',', ': ')\n )\n\n log(\"REQUEST BODY\", request_data)\n response = func(container, *args, **kwargs)\n if response.headers[\"content-type\"] == \"application/json\":\n log(\"RESPONSE DATA\", response.data)\n return response\n\n return wrapper", "title": "" }, { "docid": "2ee8dceab79659880577fa2b5a496e81", "score": "0.53604823", "text": "def tracefunc(f):\n def _tracefunc(*args, **kwargs):\n applogs_dir = core.getenv(\"APPLOGS_DIR\")\n day = time.strftime(\"%Y%m%d\")\n logfile = os.path.join(applogs_dir, day + '.log')\n logformat = \"%(asctime)s|%(clientip)s|%(rtime)s|%(user)s|%(levelname)s|%(funcname)s|%(message)s\"\n logging.basicConfig(filename=logfile, format=logformat, \n datefmt=\"%Y/%m/%d %H:%M:%S\", level=logging.DEBUG)\n log_info = cStringIO.StringIO()\n stdout_old = sys.stdout\n sys.stdout = log_info\n log_error = cStringIO.StringIO()\n stderr_old = sys.stderr\n sys.stderr = log_error\n log_trace = cStringIO.StringIO()\n t = time.time()\n try:\n r = f(*args, **kwargs)\n except:\n traceback.print_exc(file=log_trace)\n data = {\n \"clientip\": os.environ.get(\"REMOTE_ADDR\", \"null\"),\n \"user\": os.environ.get(\"USER\", \"null\"),\n \"funcname\": f.func_name,\n \"rtime\": \"%dus\" % int((time.time() - t) * 1000000),\n }\n err = log_error.getvalue()\n if err:\n logging.error(err.replace('\\n', '\\\\n'), extra=data)\n sys.stderr = stderr_old\n info = log_info.getvalue()\n trace = log_trace.getvalue()\n if (not err and not trace) or info:\n logging.info(info.replace('\\n', '\\\\n'), extra=data)\n sys.stdout = stdout_old\n if trace:\n logging.error(trace, extra=data)\n return error.error_response(500, trace=trace.replace('\\n', '\\\\n'))\n return r\n return _tracefunc", "title": "" }, { "docid": "25d36e3bd132f6bd480f0135039bfaf1", "score": "0.5348755", "text": "def decorator(f):\n route_pattern = self.build_route_pattern(route_str)\n self.routes.append((route_pattern, f))\n\n return f", "title": "" }, { "docid": "8df4d39d5cb2a11ad151106851a01406", "score": "0.53436625", "text": "def run_logger(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"Wraps logging feature around function\"\"\"\n root_dir = os.path.abspath(os.curdir)\n log_file = os.path.join(root_dir, \"tensorfree.log\")\n log_form = \"[%(asctime)s] %(levelname)-8s %(message)s\"\n logging.basicConfig(\n level=logging.INFO, format=log_form, filename=log_file, filemode=\"w+\"\n )\n\n logger = logging.getLogger(\"wrap_logger\")\n logger.info(f\"{func.__name__} - {args}\")\n\n return func(*args, **kwargs)\n\n return wrapper", "title": "" }, { "docid": "d775838e37ee7ff4413516b3756915eb", "score": "0.52928925", "text": "def funclog(logger):\n\n # check if logger is from structlog\n use_structlog = False\n if STRUCTLOG:\n if isinstance(logger, structlog._config.BoundLoggerLazyProxy):\n real_logger = logger\n use_structlog = True\n # If a Logger object is passed in, use that. Otherwise, get the default\n # Logger.\n if use_structlog:\n pass\n elif isinstance(logger, Logger):\n real_logger = logger\n else:\n real_logger = getLogger()\n\n # __qualname__ is prettier but it didn't get added until 3.5\n name_attr = '__name__' if sys.version_info < (3, 5) else '__qualname__'\n\n def get_arg_string(args, kwargs):\n \"\"\"Convert args and kwargs to a pretty string.\"\"\"\n return ', '.join([\"'{}'\".format(a) if type(a) == str else\n '{}'.format(a) for a in args] +\n [\"{}='{}'\".format(a, v) if type(v) == str else\n '{}={}'.format(a, v) for a, v in sorted(kwargs.items())])\n\n def real_decorator(fn):\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n frame_info = inspect.getframeinfo(inspect.stack()[1][0])\n filename = os.path.basename(frame_info.filename)\n lineno = frame_info.lineno\n func_name = getattr(fn, name_attr)\n arg_string = get_arg_string(args, kwargs)\n source_info = '{}:{}:{}({})'.format(filename, lineno, func_name,\n arg_string)\n if use_structlog:\n real_logger.debug(u'calling', source_info=source_info)\n else:\n real_logger.debug(u'calling %s', source_info)\n try:\n res = fn(*args, **kwargs)\n except Exception as e:\n if use_structlog:\n real_logger.exception(\n u'{} threw exception'.format(source_info), e=e)\n else:\n real_logger.exception(\n u'%s threw exception:\\n%s', source_info, e)\n raise\n if use_structlog:\n real_logger.debug(u'{} returned'.format(source_info), res=res)\n else:\n real_logger.debug(u'%s returned: %s', source_info, res)\n return res\n return wrapper\n\n if type(logger) == type(real_decorator):\n return real_decorator(logger)\n\n return real_decorator", "title": "" }, { "docid": "fcdb2135b5922cf07b8c0cf7a4514a24", "score": "0.5292739", "text": "def trace(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n logger.debug('Start {!r}'. format(func.__name__))\n result = func(*args, **kwargs)\n logger.debug('End {!r}'. format(func.__name__))\n return result\n return wrapper", "title": "" }, { "docid": "9bead6bae575c99ba45ab0d9c0b65b82", "score": "0.5281288", "text": "def test_falcon_request_log(headers, expected):\n app = falcon.API(middleware=falcon_logging.LoggingMiddleware())\n app.add_route('/test/path', TestResource())\n\n _set_up_falcon_logging(app)\n _check_falcon_request_log(app, headers, expected)", "title": "" }, { "docid": "945c05edd7a8ecbf827b4e26f0baecbe", "score": "0.5255199", "text": "def to_api_logger(logger):\n def api_logger_adapter(msg, *args, **kwargs):\n extra = {}\n request = kwargs.pop('request', None)\n if request:\n path = request.path\n if request.query_string:\n path += '?' + request.query_string\n extra['path'] = path\n\n response = kwargs.pop('response', None)\n if response and response.status:\n extra['status'] = int(response.status.split()[0])\n\n extra.update(kwargs.get('extra', {}))\n kwargs['extra'] = extra\n\n return msg, args, kwargs\n\n return adapt_logger(logger, api_logger_adapter)", "title": "" }, { "docid": "b045075f4fff1c2e219ac84a21b4e520", "score": "0.52398926", "text": "def logheaders(self, func):\n @functools.wraps(func)\n def wrapper(container, *args, **kwargs):\n request = container.request\n\n def log(message, data):\n container.logger.info(\"{0} : \\n {1}\".format(message, data))\n\n log(\"REQUEST HEADERS\", request.headers)\n response = func(container, *args, **kwargs)\n log(\"RESPONSE HEADERS\", response.headers)\n return response\n\n return wrapper", "title": "" }, { "docid": "6c57010ce407317926941da62fbc0d4d", "score": "0.52398664", "text": "def wrapper(*args, **kwargs):\n root_dir = os.path.abspath(os.curdir)\n log_file = os.path.join(root_dir, \"tensorfree.log\")\n log_form = \"[%(asctime)s] %(levelname)-8s %(message)s\"\n logging.basicConfig(\n level=logging.INFO, format=log_form, filename=log_file, filemode=\"w+\"\n )\n\n logger = logging.getLogger(\"wrap_logger\")\n logger.info(f\"{func.__name__} - {args}\")\n\n return func(*args, **kwargs)", "title": "" }, { "docid": "753ca1ce234d40737596497e5e4617a6", "score": "0.52383214", "text": "def _log(self):\r\n self.application.log_request(self)", "title": "" }, { "docid": "f11b4910c1a1c3044de5fe95e2ff2ae8", "score": "0.52234966", "text": "def log_all(self, func):\n\n @functools.wraps(func)\n def wrap(*args, **kwargs):\n name = func.__name__\n\n # Logging Arguments\n self.log_info(f\"{name} Arguments:\")\n for argument in args:\n self.log(f\"\\t{argument}\")\n\n # Logging kwargs if exist\n if len(kwargs) > 0:\n self.log(f\"{name} Key Word Arguments:\")\n for key, value in kwargs.items():\n self.log(f\"\\t {key} : {value}\")\n\n # Logging Return Value\n result = func(*args, **kwargs)\n self.log(f\"{name} Return Value:\")\n self.log(f\"\\t{result}\")\n self.log(\"-\" * 20)\n return result\n\n return wrap", "title": "" }, { "docid": "85a0d750c52cf9db555d2db37c6f9588", "score": "0.52221197", "text": "def configure_logging(app):\n if app.config[\"DEBUG\"]: # pragma: no cover\n stderr_log_level = logging.DEBUG\n else:\n stderr_log_level = DEFAULT_STDERR_LOGLEVEL\n if app.config[\"QUIET\"]: # pragma: no cover\n file_log_level = logging.ERROR\n else:\n file_log_level = DEFAULT_FILE_LOGLEVEL\n app.logger.addFilter(ContextualFilter())\n app.logger.setLevel(logging.DEBUG)\n for handler in app.logger.handlers: # set levels on existing handlers\n handler.setLevel(stderr_log_level)\n handler.setFormatter(\n logging.Formatter(app.config[\"STDERR_LOG_FORMAT\"])\n )\n #\n # Start Sentry monitoring, if SENTRY_DNS is configured.\n #\n if app.config[\"SENTRY_DSN\"] != \"\": # pragma: no cover\n from raven.contrib.flask import Sentry\n\n # import logging\n Sentry(\n app,\n dsn=app.config[\"SENTRY_DSN\"],\n # logging=True,\n # level=logging.ERROR\n )\n\n #\n # Start log file.\n #\n if app.config[\"LOGFILE\"]: # start a log file\n logfile_name = app.config[\"LOGGER_NAME\"] + \"_errors.log\"\n app.config[\"LOGFILE_NAME\"] = logfile_name\n logfile_path = Path(app.config[\"LOG\"]) / logfile_name\n if app.config[\"DEBUG\"]: # pragma: no cover\n print(f'Logging to file \"{str(logfile_path)}\".', file=sys.stderr)\n if not logfile_path.parent.is_dir(): # create logs/ dir\n try: # pragma: no cover\n logfile_path.parent.mkdir(\n mode=int(app.config[\"DIR_MODE\"], 8), parents=True\n )\n except OSError: # pragma: no cover\n app.logger.error(\n 'Unable to create logfile directory \"%s\"',\n logfile_path.parent,\n )\n raise OSError\n log_handler = logging.handlers.RotatingFileHandler(\n str(logfile_path),\n maxBytes=app.config[\"LOGFILE_MAXBYTES\"],\n backupCount=app.config[\"LOGFILE_BACKUPCOUNT\"],\n )\n\n log_handler.setLevel(file_log_level)\n werkzeug_logger = logging.getLogger(\"werkzeug\")\n werkzeug_logger.addHandler(log_handler)\n for handler in app.logger.handlers: # set levels on existing handlers\n handler.setLevel(file_log_level)\n handler.setFormatter(\n logging.Formatter(app.config[\"STDERR_LOG_FORMAT\"])\n )\n app.logger.addHandler(log_handler)\n #\n # Do some logging on startup.\n #\n app.logger.debug('Command line: \"%s\"', \" \".join(sys.argv))\n app.logger.debug(\n \"%s version %s\", app.config[\"LOGGER_NAME\"], app.config[\"VERSION\"]\n )\n app.logger.debug(\n \"Run started at %s\", datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n )", "title": "" }, { "docid": "11fe16c8983acbcde91fb8f94634f036", "score": "0.52079326", "text": "def logged(name=None):\n def decorate(func):\n logatt_name = name if name else 'logname' # nom de l'Attribut\n # no need to worry about that, allows for future extension...\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n print 'log'\n logname = self.__dict__[logatt_name] # looks into the object self the logger name\n log = logging.getLogger(logname)\n # log.info(format_str(func.__module__))\n log.info(format_str(func.__name__))\n #log.debug('args:' + format_str(args) + format_str(kwargs))#un peu lourd... on enleve ca pour l'instant\n try:\n print 'debut run fun'\n res = func(self, *args, **kwargs)\n # TODO : parse res and if in res you've got the name Exception in java style, raise\n # an externalError\n print 'fin run fun'\n except Exception as e:\n print 'exception catchée'\n # log exceptions only if it hasnt been already logged\n # avoids loggind an exception multiple times \n if e.logged is False :\n print 'exception catchéeprintée'\n e.logged = True\n # log level this depending on its name\n e_name = e.__class__.__name__ \n if 'exception' in e.__class__.__name__.lower():\n log.warning(e_name, exc_info=True)\n else: # alors erreur\n log.error(e_name, exc_info=True)\n raise #passe le même objet exception\n if res:\n log.debug(format_str(res))\n return res\n return wrapper\n return decorate", "title": "" }, { "docid": "11a9d665367faa3e140381238c4f7d01", "score": "0.5198694", "text": "def context_wrapper(meth):\n\n def add_context(request, *args, **kwargs):\n ctx = None\n\n xtr = request.get_header('X-TRACE')\n if xtr:\n md = oboe.Metadata.fromString(xtr)\n ctx = oboe.Context(md)\n ctx.set_as_default()\n\n try:\n res = meth(request, *args, **kwargs)\n\n if oboe.Context.get_default().is_valid():\n oboe.Context.clear_default()\n\n return res\n\n except:\n raise\n\n\n return add_context", "title": "" }, { "docid": "6c50830633af0529c878ebad565aedf7", "score": "0.5176102", "text": "def configure_logging(app, environment):\n simple = logging.Formatter(\n fmt=(\n \"%(levelname)s %(asctime)s(%(name)s#%(lineno)d)\"\n \"[%(method)s %(host)s%(path)s]\"\n \"%(data)s - %(message)s\"\n )\n )\n\n email = logging.Formatter(\n fmt=(\n \"%(asctime)s - %(levelname)s %(name)s\\n\"\n \"%(pathname)s@%(funcName)s#%(lineno)d\\n\"\n \"%(method)s @%(host)s%(path)s\\n\\n\"\n \"HEADERS: %(headers)s\\n\\n\"\n \"INPUT: %(data)s\\n\\n\"\n \"%(message)s\"\n\n )\n )\n\n if environment == \"devel\":\n ch = logging.StreamHandler()\n ch.addFilter(ResponseContextFilter())\n ch.setFormatter(simple)\n rl = logging.getLogger()\n rl.setLevel(logging.DEBUG)\n rl.addHandler(ch)\n elif environment == \"staging\":\n rl = logging.getLogger()\n rl.setLevel(logging.DEBUG)\n if app.config.get('LOG_FILE_PATH', None):\n fh = logging.handlers.TimedRotatingFileHandler(\n app.config.get('LOG_FILE_PATH'),\n when='midnight',\n backupCount=30)\n fh.addFilter(ResponseContextFilter())\n fh.setFormatter(simple)\n fh.setLevel(logging.INFO)\n rl.addHandler(fh)\n elif environment == \"prod\":\n rl = logging.getLogger()\n rl.setLevel(logging.DEBUG)\n if app.config.get('ADMIN_EMAILS', None):\n eh = SMTPHandler('127.0.0.1',\n 'noc+mws@10gen.com',\n app.config.get('ADMIN_EMAILS'), 'MWS Failure')\n eh.setLevel(logging.ERROR)\n eh.setFormatter(email)\n rl.addHandler(eh)\n if app.config.get('LOG_FILE_PATH', None):\n fh = logging.handlers.TimedRotatingFileHandler(\n app.config.get('LOG_FILE_PATH'),\n when='midnight',\n backupCount=30)\n fh.addFilter(ResponseContextFilter())\n fh.setFormatter(simple)\n fh.setLevel(logging.INFO)\n rl.addHandler(fh)", "title": "" }, { "docid": "20ea9653e969c894fa380fb86c806331", "score": "0.51683295", "text": "def test_route():\n app.logger.debug(\"========== Test Method for admin: ==========\")\n info = test_local(meaningful=False, testing='logging', mod='log', id=1)\n pprint(info)\n print(\"************************************************************************************\")\n print(app.config.get('GAE_VERSION', 'UNKNOWN VERSION'))\n print(\"************************************************************************************\")\n # pprint(app.config)\n CloudLog.test_loggers(app, app.log_names, context='package')\n print(\"--------------------------------------------------\")\n return redirect(url_for('view', **info))", "title": "" }, { "docid": "9199fc40b2e5d84f315b14bd1e346c66", "score": "0.5160039", "text": "def main(config, flask_app):\n logger_setup(config)\n run_flask_app(flask_app)", "title": "" }, { "docid": "555e813739428678916ea3fca6aa9b90", "score": "0.5155073", "text": "def log_request(self, handler):\r\n if \"log_function\" in self.settings:\r\n self.settings[\"log_function\"](handler)\r\n return\r\n if handler.get_status() < 400:\r\n log_method = access_log.info\r\n elif handler.get_status() < 500:\r\n log_method = access_log.warning\r\n else:\r\n log_method = access_log.error\r\n request_time = 1000.0 * handler.request.request_time()\r\n log_method(\"%d %s %.2fms\", handler.get_status(),\r\n handler._request_summary(), request_time)", "title": "" }, { "docid": "435b31401f606115187599c08a746e7c", "score": "0.5127313", "text": "def log (**kw):\n\n if settings.DEV_SERVER:\n protocol = 'http'\n else:\n protocol = 'https'\n \n # build loggly url from values in settings.py \n loggly_url = \"%s://logs-01.loggly.com/inputs/%s/tag/%s/\" % (protocol, settings.LOGGLY_API_KEY, settings.LOGGLY_APP_NAME)\n \n #TODO: see if we can put app_name in the request header as \"Application\" instead of in a tag, \n # then we can use the tag for something else\n \n try:\n# logging.info(\"SERVER_SOFTWARE: %s\" % os.getenv('SERVER_SOFTWARE',''))\n logging.info (\"logging to %s\" % loggly_url)\n log_data = \"PLAINTEXT=\" + urllib2.quote(json.dumps(kw))\n response = urllib2.urlopen(loggly_url, log_data) \n return json.load(response) \n except Exception, err:\n #write external logging failures to the application log\n logging.error (\"Failed to send log entry to Loggly:%s\\n%s\" % (loggly_url, str(err)))", "title": "" }, { "docid": "feff3c26c035a7504f4612c79964ddd0", "score": "0.5119956", "text": "def tracer(func_name=None, method_name=None):\n\n def decorator(func):\n @functools.wraps(func)\n def trace_wrapper(*args, **kwargs):\n \"\"\"\n The trace wrapper use two variables:\n\n syft.hook.trace.active: True if we are in the recording mode\n of operations\n syft.hook.trace.out_of_operation: by default set to True, turns\n to False when executing a recorded operation to prevent from\n recording sub operations\n \"\"\"\n\n if syft.hook.trace.active and syft.hook.trace.out_of_operation:\n # Select if the tracer records a function or a method, not none or both\n assert (func_name is None) ^ (method_name is None)\n\n cmd_name = func_name or method_name\n\n if method_name is not None:\n # We extract the self with args[0]\n command = (cmd_name, args[0], args[1:], kwargs)\n else:\n command = (cmd_name, None, args, kwargs)\n\n syft.hook.trace.out_of_operation = False\n\n response = func(*args, **kwargs)\n\n syft.hook.trace.out_of_operation = True\n\n syft.hook.trace.logs.append((command, response))\n else:\n response = func(*args, **kwargs)\n\n return response\n\n return trace_wrapper\n\n return decorator", "title": "" }, { "docid": "a83d6877c2f5a8890bf2cf0ee09af85b", "score": "0.5102787", "text": "def setup_middleware(app: web.Application) -> None:\n app.middlewares.append(bind_logger)", "title": "" }, { "docid": "42eee2f88b61001e6065ec24190a6564", "score": "0.5088467", "text": "def activate(self):\n self._ident = self._env.get('my_ident', __name__, 'microservice')\n self._log_format = self._env.get('log_format', 'json')\n if self._log_format == 'text':\n self.logger = logging.getLogger(__name__)\n else:\n self.logger = structlog.get_logger(__name__)\n\n logging.basicConfig(\n format=\"%(message)s\",\n stream=sys.stdout,\n level=LEVELS.get(self._env.get('log_level', 'INFO').lower(), logging.INFO)\n )\n\n self._is_json = self._log_format == 'json'\n\n def add_service_name(logger, method_name, event_dict): # pylint: disable=unused-argument\n \"\"\"\n Add the service name to the event dict.\n \"\"\"\n event_dict['service'] = self._ident\n return event_dict\n\n processors = [\n structlog.processors.TimeStamper(fmt='iso'),\n structlog.stdlib.filter_by_level,\n add_service_name,\n structlog.stdlib.add_log_level,\n structlog.processors.format_exc_info,\n structlog.processors.JSONRenderer(sort_keys=True)\n ]\n structlog.configure(\n processors=processors,\n context_class=structlog.threadlocal.wrap_dict(dict),\n logger_factory=structlog.stdlib.LoggerFactory(),\n wrapper_class=structlog.stdlib.BoundLogger,\n cache_logger_on_first_use=True\n )\n #self.info('Logger activated', environment=self._env.environment)", "title": "" }, { "docid": "3c3f7e33202cf83a468df8fefbbe31d0", "score": "0.50763243", "text": "def _call_wrapper(self, attr):\n def logger_wrapper(msg, *args):\n \"\"\"\n A wrapper around the log object.\n \"\"\"\n call = getattr(self.activity_object._log, attr)\n call(msg, *args)\n try:\n msg = msg % args\n except:\n msg = str(msg)\n token, user = self.activity_object.getTokenUserForThread()\n if not token:\n return\n Monitor.log_activity(token, user, msg)\n return logger_wrapper", "title": "" }, { "docid": "72842145013a14e378825b367ca9622a", "score": "0.50712794", "text": "def trace(func):\n @functools.wraps(func)\n def inner(*args, **kwargs):\n print(func.__name__, args, kwargs)\n res = func(*args, **kwargs)\n return res\n return inner", "title": "" }, { "docid": "d2c982e2898ea56d90011570fb35628f", "score": "0.50568", "text": "def debug_wrapper(f, *args, **kwargs):\n with debug_context():\n f(*args, **kwargs)", "title": "" }, { "docid": "8de707555221e4b14c3a8e1ddcf057b3", "score": "0.5043503", "text": "def configure_component_logger(\n *,\n component_name: str,\n component_id: str,\n component_type: Optional[str] = None,\n log_level: int = logging.INFO,\n max_bytes: Optional[int] = None,\n backup_count: Optional[int] = None,\n):\n logger = logging.getLogger(SERVE_LOGGER_NAME)\n logger.propagate = False\n logger.setLevel(log_level)\n if os.environ.get(DEBUG_LOG_ENV_VAR, \"0\") != \"0\":\n logger.setLevel(logging.DEBUG)\n\n factory = logging.getLogRecordFactory()\n\n def record_factory(*args, **kwargs):\n request_context = ray.serve.context._serve_request_context.get()\n record = factory(*args, **kwargs)\n if request_context.route:\n record.route = request_context.route\n if request_context.request_id:\n record.request_id = request_context.request_id\n return record\n\n logging.setLogRecordFactory(record_factory)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(ServeFormatter(component_name, component_id))\n stream_handler.addFilter(log_to_stderr_filter)\n logger.addHandler(stream_handler)\n\n logs_dir = os.path.join(\n ray._private.worker._global_node.get_logs_dir_path(), \"serve\"\n )\n os.makedirs(logs_dir, exist_ok=True)\n if max_bytes is None:\n max_bytes = ray._private.worker._global_node.max_bytes\n if backup_count is None:\n backup_count = ray._private.worker._global_node.backup_count\n if component_type is not None:\n component_name = f\"{component_type}_{component_name}\"\n log_file_name = LOG_FILE_FMT.format(\n component_name=component_name, component_id=component_id\n )\n file_handler = logging.handlers.RotatingFileHandler(\n os.path.join(logs_dir, log_file_name),\n maxBytes=max_bytes,\n backupCount=backup_count,\n )\n file_handler.setFormatter(ServeFormatter(component_name, component_id))\n logger.addHandler(file_handler)", "title": "" }, { "docid": "1f02442798e37ab727548071f887b7ad", "score": "0.50412196", "text": "def decorator_dispatcher(self, request, *args, **kwargs):\n pass", "title": "" }, { "docid": "e89d3747e19241120dafa5d1bb275a8b", "score": "0.5038794", "text": "def log(*unused_args):\n\t\tpass", "title": "" }, { "docid": "345acc127e9f02a4a26b693a5180e8f5", "score": "0.50327843", "text": "def logging(func):\n\n @functools.wraps(func)\n def wrapper_debug(*args, **kwargs):\n args_repr = [repr(a) for a in args]\n kwargs_repr = [f\"{k}={v!r}\" for k, v in kwargs.items()]\n signature = \", \".join(args_repr + kwargs_repr)\n LOGGER.info(\"Calling %s(%s)\", func.__name__, signature)\n start_time = time.perf_counter()\n value = func(*args, **kwargs)\n LOGGER.info(\"%s returned %s\", func.__name__, value)\n end_time = time.perf_counter()\n run_time = end_time - start_time\n LOGGER.info(\"Finished %s in %.4f secs\", func.__name__, run_time)\n return value\n\n return wrapper_debug", "title": "" }, { "docid": "bdfa18da9d7e1a105d0a42ef1f34ed1c", "score": "0.50284237", "text": "def after_feature(context, feature):\n logger = logging.getLogger(__name__)", "title": "" }, { "docid": "53264bd9f3c0fc2112a5844017f6d5bd", "score": "0.50251955", "text": "def add_route(app, fn, context=default_context):\n transmute_func = TransmuteFunction(fn, args_not_from_request=[\"request\"])\n handler = create_handler(transmute_func, context=context)\n get_swagger_spec(app).add_func(transmute_func, context)\n\n for p in transmute_func.paths:\n aiohttp_path = _convert_to_aiohttp_path(p)\n resource = app.router.add_resource(aiohttp_path)\n for method in transmute_func.methods:\n resource.add_route(method, handler)", "title": "" }, { "docid": "3521bc36233b6f3e2a3e6b18f3d6132d", "score": "0.50126487", "text": "def _log(type, message, *args, **kwargs):\n global _logger\n if _logger is None:\n import logging\n handler = logging.StreamHandler()\n _logger = logging.getLogger('werkzeug')\n _logger.addHandler(handler)\n _logger.setLevel(logging.INFO)\n getattr(_logger, type)(message.rstrip(), *args, **kwargs)", "title": "" }, { "docid": "cbe6f634f9b0e3e392e453b08cd93b34", "score": "0.5012005", "text": "def add_logging_hook(obj, func_name, log_func, module_name, log_args=True):\n\n def log_call(index, args, kwargs):\n if log_args:\n log_func(\n \"{indent}{module_name}-{index}: calling {func_name} with {args}, {kwargs}\".format(\n indent=_indent(),\n module_name=module_name,\n index=index,\n func_name=func_name,\n args=args,\n kwargs=kwargs,\n )\n )\n else:\n log_func(\n \"{indent}{module_name}-{index}: calling {func_name} with <REDACTED>\".format(\n indent=_indent(), module_name=module_name, index=index, func_name=func_name\n )\n )\n\n def log_return(index, ret):\n log_func(\n \"{indent}{module_name}-{index}: {func_name} returned {ret}\".format(\n indent=_unindent(),\n module_name=module_name,\n index=index,\n func_name=func_name,\n ret=ret,\n )\n )\n\n def log_exception(index, e):\n log_func(\n \"{indent}{module_name}-{index}: {func_name} RAISED {exc}\".format(\n indent=_unindent(),\n module_name=module_name,\n index=index,\n func_name=func_name,\n exc=str(e) or type(e),\n )\n )\n\n func_or_coro = getattr(obj, func_name)\n\n if (\n inspect.isawaitable(func_or_coro)\n or inspect.iscoroutine(func_or_coro)\n or inspect.iscoroutinefunction(func_or_coro)\n ):\n\n @functools.wraps(func_or_coro)\n async def coro_wrapper(*args, **kwargs):\n index = _get_next_call_index()\n log_call(index, args, kwargs)\n try:\n ret = await func_or_coro(*args, **kwargs)\n except Exception as e:\n log_exception(index, e)\n raise\n else:\n log_return(index, ret)\n return ret\n\n setattr(obj, func_name, coro_wrapper)\n else:\n\n @functools.wraps(func_or_coro)\n def func_wrapper(*args, **kwargs):\n index = _get_next_call_index()\n log_call(index, args, kwargs)\n try:\n ret = func_or_coro(*args, **kwargs)\n except Exception as e:\n log_exception(index, e)\n raise\n else:\n log_return(index, ret)\n return ret\n\n setattr(obj, func_name, func_wrapper)", "title": "" }, { "docid": "da64800cbf92a2ca0db4897382d60048", "score": "0.5006852", "text": "def debug_decorator(func):\r\n def decorated_func(*args, **kwargs):\r\n with debug_context(func.__name__):\r\n return_value = func(*args, **kwargs)\r\n return return_value\r\n return decorated_func", "title": "" }, { "docid": "e8b55a775efbd269637517dbe48d8a45", "score": "0.50034535", "text": "async def custom_route_handler(request: Request) -> Response:\n should_ignore_request = True\n try:\n epsagon.trace.trace_factory.switch_to_async_tracer()\n if not ignore_request('', request.url.path.lower()):\n should_ignore_request = False\n trace = epsagon.trace.trace_factory.get_or_create_trace()\n trace.prepare()\n\n except Exception as exception: # pylint: disable=W0703\n return await original_route_handler(request)\n\n if should_ignore_request:\n return await original_route_handler(request)\n\n runner = None\n response = None\n try:\n body = await request.json()\n except json.decoder.JSONDecodeError:\n body = ''\n try:\n runner = FastapiRunner(time.time(), request, json.dumps(body))\n trace.set_runner(runner)\n collect_container_metadata(runner.resource['metadata'])\n except Exception as exception: # pylint: disable=W0703\n warnings.warn('Could not extract request', EpsagonWarning)\n raised_err = None\n try:\n response: Response = await original_route_handler(request)\n except Exception as exception: # pylint: disable=W0703\n raised_err = exception\n traceback_data = get_traceback_data_from_exception(exception)\n trace.runner.set_exception(exception, traceback_data)\n\n try:\n if not raised_err and response is not None and runner:\n if ignore_request(\n response.headers.get('Content-Type', '').lower(),\n ''\n ):\n return response\n\n runner.update_response(response)\n\n if runner:\n epsagon.trace.trace_factory.send_traces()\n except Exception as exception: # pylint: disable=W0703\n print_debug('Failed to send traces: {}'.format(exception))\n\n if raised_err:\n raise raised_err\n\n return response", "title": "" }, { "docid": "4b9c4ef2d32fadd87b569762b78fc87d", "score": "0.5001748", "text": "def perform_logging(log, fields, log_func, disp, intent, box):\n all_fields = merge(fields, intent.fields)\n log_func(log, all_fields, disp, intent, box)", "title": "" }, { "docid": "e8c3fcbf56bbd5633f30c5f4523247dc", "score": "0.50010514", "text": "def doctest_ServerBase_startup_logging():", "title": "" }, { "docid": "c55dc5078fd2453d7b81e9d0e477af00", "score": "0.5000129", "text": "def log_routine(level, label):\r\n def factory(func):\r\n @functools.wraps(func)\r\n def decorator(*args, **kwargs):\r\n if level not in __LOG_LEVELS:\r\n return func(*args, **kwargs)\r\n\r\n print '>>>> BEGIN ' + label\r\n result = func(*args, **kwargs)\r\n print '<<<< END ' + label\r\n return result\r\n return decorator\r\n return factory", "title": "" }, { "docid": "e0d3d8bae3997f5db425265f14c6a296", "score": "0.49887398", "text": "def _get_logging_context():\n return structlog.contextvars.merge_contextvars(None, None, {})", "title": "" }, { "docid": "7249e534e45e38ae27649b85a4c5c82e", "score": "0.49745837", "text": "def trace(func, api_name=''):\n if hasattr(func, 'api'):\n api_name = func.api\n def trace_func(self, *args, **kwargs):\n log.debug('%s: %s' % (api_name, args))\n return func(self, *args, **kwargs)\n trace_func.api = api_name\n return trace_func", "title": "" }, { "docid": "75421e0634dc084404f5f2e95970a03f", "score": "0.49733594", "text": "def initialize_structlog():\n processors = STRUCTLOG_PROCESSORS\n renderer = structlog.processors.JSONRenderer()\n\n if LOG_PRETTY:\n if not HAS_STRUCTLOG_DEV:\n raise ImportError('The structlog[dev] module is required when LOG_PRETTY=True.')\n renderer = structlog.dev.ConsoleRenderer()\n\n processors.append(renderer)\n\n # Configure the logging module to the most basic implementation possible.\n logging.basicConfig(level=LOG_LEVEL, stream=sys.stdout, format='%(message)s')\n\n structlog.configure_once(\n processors=processors,\n context_class=structlog.threadlocal.wrap_dict(dict),\n logger_factory=structlog.stdlib.LoggerFactory(),\n wrapper_class=structlog.stdlib.BoundLogger,\n cache_logger_on_first_use=True,\n )", "title": "" }, { "docid": "56affce03cedff7af37eaed1fed72026", "score": "0.49701038", "text": "def decorator(func: Callable) -> Callable:\n\n @api.response(403, message)\n @api.header(\n \"Authorization\",\n \"JWT token to authorize request. Example: 'Bearer <my token>'\",\n )\n @wraps(func)\n def wrapper(*args, **kwargs) -> Callable:\n \"\"\"\n validates JWT token is present,\n adds token to g\n \"\"\"\n\n auth = request.headers.get(\"Authorization\")\n\n if not auth:\n raise BadRequest(\n \"Unauthorized\",\n 401,\n {\"Request.Header.Authorization\": \"JWT token is required\"},\n )\n\n auth_list = auth.split(\" \")\n if len(auth_list) != 2:\n raise BadRequest(\n \"Invalid Auth Header\",\n 400,\n {\"Authorization\": \"JWT token has the format: 'Bearer <token>'\"},\n )\n\n token = auth_list[1]\n payload = User.decode_auth_token(token)\n\n # attaching token and payload to app context\n g.token = token\n g.payload = payload\n\n return func(*args, **kwargs)\n\n return wrapper", "title": "" }, { "docid": "44ccf70e27ff4dd2584468eb47213b72", "score": "0.4964829", "text": "def init_flask(self, flask_app):\n self.url_adapter = flask_app.url_map.bind('localhost')\n self.endpoint_lookup = {\n rule.endpoint: rule.rule\n for rule in flask_app.url_map.iter_rules()\n }\n\n @flask_app.route(self.path, endpoint=self.path)\n def get_metrics():\n registry = CollectorRegistry()\n MultiProcessCollector(registry)\n data = generate_latest(registry)\n response_headers = [\n ('Content-type', CONTENT_TYPE_LATEST),\n ('Content-Length', str(len(data))),\n ]\n return data, 200, response_headers", "title": "" }, { "docid": "94df4fe64b2725af9b3ba0f6583c1f7d", "score": "0.49618128", "text": "def log(self, extra):\n try:\n msg = \"{method} {path}{0}\".format(\n '?' if 'qs' in extra else '',\n **extra\n )\n except Exception:\n logger.exception('error generating access log', extra=extra)\n else:\n logger.info(msg, extra=extra)", "title": "" }, { "docid": "e453b5990028fb3f648bc04fc3e22a65", "score": "0.49467087", "text": "def function_logging(func):\n @wraps(func)\n def wraper(*args, **kwargs):\n logging.debug(\n f'Called {func.__name__}.\\n With args:{args} kwargs:{kwargs}')\n return func(*args, **kwargs)\n return wraper", "title": "" }, { "docid": "1bbd1998a7ed5c77b05ec58d639e68a7", "score": "0.4946654", "text": "def loggable(f, *args, **kws):\n self = args[0]\n self.Exec.indent += 1\n self.Exec.logger.debug(\"%s call: %s.%s\" % ('>' * self.Exec.indent, f.__module__, f.__name__))\n ret = f(*args, **kws)\n self.Exec.logger.debug(\"%s exit: %s.%s\" % ('<' * self.Exec.indent, f.__module__, f.__name__))\n self.Exec.indent -= 1\n return ret", "title": "" }, { "docid": "5893af7eb1a44fc89571434e28626899", "score": "0.49452323", "text": "def __init__(self, *args, **kwargs):\n self.route = None\n super(BaseApiHandler, self).__init__(*args, **kwargs)", "title": "" }, { "docid": "9aa4aef64ec07f49922e7c317dbbc249", "score": "0.49418652", "text": "def route(self, path):\n\n def wrapper(handler):\n self.add_route(path, handler)\n return handler\n\n return wrapper", "title": "" }, { "docid": "0ba0e7946acbc7258ecb697d06295b97", "score": "0.49398023", "text": "def main(ctx, endpoint, debug):\n\n if ctx.obj is None:\n ctx.obj = {}\n\n ctx.obj['endpoint'] = endpoint", "title": "" }, { "docid": "0ba0e7946acbc7258ecb697d06295b97", "score": "0.49398023", "text": "def main(ctx, endpoint, debug):\n\n if ctx.obj is None:\n ctx.obj = {}\n\n ctx.obj['endpoint'] = endpoint", "title": "" }, { "docid": "b5b53c38351e33ff8651779f4bacd204", "score": "0.49315456", "text": "def test_context_log(self, monkeypatch):\n def echofixer(msg, file):\n return msg\n monkeypatch.setattr(click, 'echo', echofixer)\n\n context = Context()\n result = context.log(\"TEST\", 'test')\n assert callable(context.log)\n assert result is None", "title": "" }, { "docid": "23be53e5cddf76d0318f30b27553f52e", "score": "0.49181798", "text": "def enter_tag(func):\n\n @wraps(func)\n def add_log(*args, **kwargs):\n try:\n log.debug(\"Enter func: %s\", func.__name__)\n return func(*args, **kwargs)\n finally:\n log.debug(\"Exit func: %s\", func.__name__)\n\n return add_log", "title": "" }, { "docid": "90ad587b556433a52b4749ab045e0ad0", "score": "0.49163347", "text": "def __init__(\n self,\n name,\n use_tracer=None,\n use_metric=False,\n use_logging_level=logging.DEBUG,\n use_optimizer=None,\n use_cors=None,\n use_default_error=None,\n use_scheduler=None,\n all=None,\n flaskName=None,\n *args,\n **kwargs,\n ):\n # TODO: Add more text here for current situation\n if flaskName is None:\n flaskName = __name__\n\n super().__init__(flaskName, *args, **kwargs)\n logger = logging.getLogger(\"\")\n\n self.serviceName = name\n self.metrics = None\n self.tracing = None\n self.optimize = None\n self.cors = None\n self.default_errorhandler = None\n self.scheduler = None\n\n if all is not None and all is not False:\n use_tracer = True\n use_metric = True\n use_optimizer = True\n use_cors = True\n use_default_error = True\n use_scheduler = True\n\n logger.info(\"--- Start Connexion-Plus ---\")\n\n if not isinstance(self.app, (Flask, FlaskApp)):\n logger.warning(\n \"Given App is not flask, so it cannot get any functionality added from this lib currently.\"\n )\n return\n\n # add default error\n if use_default_error is not None and use_default_error is not False:\n from werkzeug.exceptions import HTTPException\n from werkzeug.exceptions import default_exceptions\n\n logger.info(\"Add default error handler to Flask...\")\n\n if callable(use_default_error):\n self.default_errorhandler = use_default_error\n\n logger.info(\"use given handler.\")\n\n else:\n\n def handle_error(e):\n code = 500\n if isinstance(e, HTTPException):\n code = e.code\n\n error = {\n \"error\": e.__class__.__name__,\n \"http_code\": code,\n \"description\": str(e),\n }\n logger.exception(error)\n return jsonify(error), code\n\n self.default_errorhandler = handle_error\n\n logger.info(\"use default one\")\n\n # register for all json exceptions\n self.app.register_error_handler(\n Exception, self.default_errorhandler)\n\n # register handler for all http exceptions\n for ex in default_exceptions:\n self.app.register_error_handler(ex, self.default_errorhandler)\n\n if use_scheduler is not None and use_scheduler is not False:\n logger.info(\"Add background scheduler to Flask\")\n from flask_apscheduler import APScheduler\n\n self.scheduler = APScheduler()\n self.scheduler.init_app(self.app)\n self.scheduler.start()\n\n # add optimizer\n if use_optimizer is not None and use_optimizer is not False:\n logger.info(\"Add optimizer to Flask...\")\n from .Optimizer import FlaskOptimize\n\n config = {\"compress\": False, \"minify\": False}\n if isinstance(use_optimizer, dict):\n config.update(use_optimizer)\n\n if isinstance(use_optimizer, bool) and use_optimizer:\n config.update({\"compress\": True, \"minify\": True})\n\n logger.info(\"use config {}.\".format(config))\n\n self.optimize = FlaskOptimize(self.app, config)\n\n # add CORS\n if use_cors is not None and use_cors is not False:\n logger.info(\"Add cors to Flask...\")\n from flask_cors import CORS\n\n if isinstance(use_cors, dict):\n logger.info(\"use given settings.\")\n self.cors = CORS(self.app, resources=use_cors)\n else:\n logger.info(\"use default ones.\")\n self.cors = CORS(self.app)\n\n logger.info(\"CORS added.\")\n\n # add prometheus\n if use_metric is not None and use_metric is not False:\n # TODO: add configuration https://github.com/rycus86/prometheus_flask_exporter#configuration\n\n from prometheus_flask_exporter import PrometheusMetrics\n\n self.metrics = PrometheusMetrics(self.app)\n logger.info(\"Add prometheus to Flask\")\n\n # add tracing\n if use_tracer is not None and use_tracer is not False:\n logger.info(\"Add opentracing to Flask...\")\n # add tracing to all routes in flaskApp\n from flask_opentracing import FlaskTracing\n import opentracing\n from functools import wraps\n from flask import request\n\n def wrapper(fn):\n @wraps(fn)\n def request_func(*args, **kwargs):\n if request.path != \"/metrics\":\n return fn(*args, **kwargs)\n\n return request_func\n\n FlaskTracing._before_request_fn = wrapper(\n FlaskTracing._before_request_fn)\n FlaskTracing._after_request_fn = wrapper(\n FlaskTracing._after_request_fn)\n\n config = None\n if not isinstance(use_tracer, opentracing.Tracer):\n logger.info(\"use default one.\")\n from jaeger_client import Config as jConfig\n\n tracer_config = {\n \"sampler\": {\"type\": \"const\", \"param\": 1, },\n \"local_agent\": {\n \"reporting_host\": \"jaeger-agent\",\n \"reporting_port\": 5775,\n },\n \"logging\": True,\n }\n\n if isinstance(use_tracer, dict):\n tracer_config = use_tracer\n\n if isinstance(use_metric, bool) and use_metric is True:\n logger.info(\"Use metrics for tracer.\")\n from jaeger_client.metrics.prometheus import (\n PrometheusMetricsFactory,\n )\n\n config = jConfig(\n config=tracer_config,\n service_name=f\"{name}ConnexionPlus\",\n metrics_factory=PrometheusMetricsFactory(\n namespace=f\"{name}ConnexionPlus\"\n ),\n )\n else:\n logger.info(\"no metrics for tracer configured.\")\n config = jConfig(\n config=tracer_config, service_name=f\"{name}ConnexionPlus\",\n )\n else:\n logger.info(\"use given tracer config.\")\n\n tracer_obj = use_tracer if config is None else config.initialize_tracer()\n self.tracing = FlaskTracing(tracer_obj, True, self.app)\n\n # add tracer to everything to support spans through multiple microservices via rpc-calls\n from opentracing_instrumentation.client_hooks import install_all_patches\n\n install_all_patches()\n logger.info(\"All tracing relevant libs patched.\")\n\n # add a TracingHandler for Logging\n from .TracingHandler import TracingHandler\n\n th = TracingHandler(use_tracer)\n th.setLevel(use_logging_level)\n\n logging.getLogger(\"\").addHandler(th)\n logger.info(\"Finished Tracer adding.\")\n\n logger.info(\"--- Finished Connexion-Plus ---\")", "title": "" }, { "docid": "ea74c1d4f44d9ed6c4f3d108c108c2ef", "score": "0.49162266", "text": "def test_logging_without_request():\n app = falcon.API()\n _set_up_falcon_logging(app)\n cf_logging.FRAMEWORK.context.set_correlation_id('value')\n\n logger, stream = config_logger('main.logger')\n logger.info('works')\n assert check_log_record(stream, JOB_LOG_SCHEMA, {'msg': v_str('works')}) == {}", "title": "" }, { "docid": "9793bda5015da48e21c4e82baf567526", "score": "0.49131352", "text": "def __call__(self, f):\n\n def wrapped_f(*args, **kwargs):\n try:\n start = datetime.now()\n output = f(*args, **kwargs)\n self.app.logger.info(\"Time taken for %s is %s\", self.api_key,\n datetime.now() - start)\n return output\n except KeyError as e:\n exception_trace = traceback.format_exc()\n self.app.logger.error(\"Exception in %s API:\\n%s\",\n self.api_key, exception_trace)\n self.app.logger.error(\"Missing key : \" + str(e))\n return error_response(400, \"{} is required\".format(str(e)))\n except ValueError as e:\n exception_trace = traceback.format_exc()\n self.app.logger.error(\"Exception in %s API:\\n%s\",\n self.api_key, exception_trace)\n return error_response(400, \"Bad Request. Incorrect Parameter types\")\n except CustomError as error:\n exception_trace = traceback.format_exc()\n self.app.logger.error(\"Exception in %s API: %s\\n%s\",\n self.api_key,\n error.get_message(),\n exception_trace)\n return error_response(error.get_error_code(), error.get_message())\n except:\n exception_trace = traceback.format_exc()\n self.app.logger.error(\"Exception in %s API:\\n%s\",\n self.api_key, exception_trace)\n return error_response(500, \"Internal Server Error\")\n finally:\n pass\n\n return wrapped_f", "title": "" }, { "docid": "14e643a5ddf01421d4aafa013093ccbd", "score": "0.4901908", "text": "def render(tpl=None, section=\"home\", fmt=\"html\"):\n def decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n template_name = tpl\n if template_name is None:\n template_name = request.endpoint.replace('.', '/') + '.html'\n ctx = f(*args, **kwargs)\n if ctx is None:\n ctx = {}\n elif isinstance(ctx, WerkzeugResponse):\n return ctx\n if fmt == \"json\":\n #return Response(json.dumps(ctx), mimetype='application/json')\n return jsonify(ctx)\n else:\n return flask_render_template(template_name, section=section, **ctx)\n\n return decorated_function\n return decorator", "title": "" }, { "docid": "bf5c5b9ed71b339d058284f2e0c9596c", "score": "0.48960802", "text": "def logtraffic(self, func):\n wrapper = self.merge(\n func,\n self.logheaders,\n self.logbodies\n )\n\n return wrapper", "title": "" }, { "docid": "21e2eab039a5becee0b51035f0a985c6", "score": "0.48897767", "text": "def loggedcall(fct):\n @wraps(fct)\n def wrapper(*args, **kwargs):\n logger.info(\"Function -- %s-- called with arguments -- %s -- and keywords -- %s --\" % \\\n (fct.__name__, str(args), str(kwargs)))\n return_value = fct(*args, **kwargs)\n logger.info(\"Function -- %s -- returned -- %s --\" % \\\n (fct.__name__, return_value))\n return return_value\n return wrapper", "title": "" }, { "docid": "81b43f110b7df8de6e198e961b5cd159", "score": "0.48895407", "text": "def add_handler(self, *args, **kwargs):\n self._logger.add(*args, **kwargs)", "title": "" }, { "docid": "38f76ed5efa9aeb6f95ca44f2258e14e", "score": "0.4882911", "text": "def setup_logging(logging_port):\n\n global configured\n\n if configured:\n raise RuntimeError(\"Logging was already configured\")\n\n logging.config.dictConfig(\n {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"filters\": { # filter out logs that do not come from teos\n \"teos\": {\"()\": MultiNameFilter, \"names\": [\"teos\", \"waitress\"]},\n },\n \"formatters\": {\n \"json_formatter\": {\n \"()\": structlog.stdlib.ProcessorFormatter,\n \"processor\": structlog.processors.JSONRenderer(),\n \"foreign_pre_chain\": [timestamper, add_api_component],\n }\n },\n \"handlers\": {\n \"socket\": {\n \"level\": \"DEBUG\",\n \"class\": \"teos.logger.FormattedSocketHandler\",\n \"host\": \"localhost\",\n \"port\": logging_port,\n \"filters\": [\"teos\"],\n \"formatter\": \"json_formatter\",\n },\n },\n \"loggers\": {\"\": {\"handlers\": [\"socket\"], \"level\": \"DEBUG\", \"propagate\": True}},\n }\n )\n\n structlog.configure(\n processors=[\n structlog.stdlib.PositionalArgumentsFormatter(),\n timestamper,\n structlog.stdlib.ProcessorFormatter.wrap_for_formatter,\n ],\n context_class=dict,\n logger_factory=structlog.stdlib.LoggerFactory(),\n wrapper_class=structlog.stdlib.BoundLogger,\n cache_logger_on_first_use=True,\n )\n\n configured = True", "title": "" }, { "docid": "d015e8e1df4683135dbc814b91364471", "score": "0.48815727", "text": "def _logit(fn, *args, **kw):\n\n def logger(*args, **kw):\n ret = fn(*args, **kw)\n print('{} called with args({}), kwargs({}); returns({})'.format(fn.__name__, args, kw, ret))\n return ret\n\n return logger", "title": "" }, { "docid": "39257fbd6fabaf3c248a6bec71a3b84b", "score": "0.48743403", "text": "def welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n\n\n\n# Define what to do when a user hits the /about route\n@app.route(\"/api/v1.0/precipitation\")\ndef about():\n print(\"Server received request for 'About' page...\")\n return \"Welcome to my 'About' page!\"\n\n# Define what to do when a user hits the /about route\n@app.route(\"/api/v1.0/stations\")\ndef about():\n print(\"Server received request for 'About' page...\")\n return \"Welcome to my 'About' page!\"\n\n# Define what to do when a user hits the /about route\n@app.route(\"/api/v1.0/tobs\")\ndef about():\n print(\"Server received request for 'About' page...\")\n return \"Welcome to my 'About' page!\"\n\n# Define what to do when a user hits the /about route\n@app.route(\"/api/v1.0/<start>\")\ndef about():\n print(\"Server received request for 'About' page...\")\n return \"Welcome to my 'About' page!\"\n\n# Define what to do when a user hits the /about route\n@app.route(\"/api/v1.0/<start>/<end>\")\ndef about():\n print(\"Server received request for 'About' page...\")\n return \"Welcome to my 'About' page!\"\n\nif __name__ == \"__main__\":\n app.run(debug=True)", "title": "" }, { "docid": "d5ee1e6b4ba62515d4ac7ee1601eb5b7", "score": "0.48701876", "text": "def set_werkzeug_log_color():\n from django.core.management.color import color_style\n from werkzeug.serving import WSGIRequestHandler\n from werkzeug._internal import _log\n\n _style = color_style()\n _orig_log = WSGIRequestHandler.log\n\n def werk_log(self, type, message, *args):\n try:\n msg = '%s - - [%s] %s' % (\n self.address_string(),\n self.log_date_time_string(),\n message % args,\n )\n http_code = str(args[1])\n except Exception:\n return _orig_log(type, message, *args)\n\n # Utilize terminal colors, if available\n if http_code[0] == '2':\n # Put 2XX first, since it should be the common case\n msg = _style.HTTP_SUCCESS(msg)\n elif http_code[0] == '1':\n msg = _style.HTTP_INFO(msg)\n elif http_code == '304':\n msg = _style.HTTP_NOT_MODIFIED(msg)\n elif http_code[0] == '3':\n msg = _style.HTTP_REDIRECT(msg)\n elif http_code == '404':\n msg = _style.HTTP_NOT_FOUND(msg)\n elif http_code[0] == '4':\n msg = _style.HTTP_BAD_REQUEST(msg)\n else:\n # Any 5XX, or any other response\n msg = _style.HTTP_SERVER_ERROR(msg)\n\n _log(type, msg)\n\n WSGIRequestHandler.log = werk_log", "title": "" }, { "docid": "5e8389714f591e4917b0db9ded756bd7", "score": "0.48683423", "text": "def log(self, request, response, time):\n if request.path == \"/healthcheck\":\n return\n\n access_logger.info(\n \"{http_method} {path_qs} {status} in {timeit} sec\",\n http_method=request.method,\n path=request.path,\n path_qs=request.path_qs,\n query=dict(request.query),\n timeit=time,\n status=response.status,\n )", "title": "" }, { "docid": "04f9849b14c25a1dca66c6203c411c0e", "score": "0.48657122", "text": "def __call__(self, func):\n functools.update_wrapper(self, func)\n\n # Add debug log for any function you wish for TS, provides trace of incident\n if self.log_type == \"Debug\": # Used to wrap any function and does not know about or care about flags\n def debug(*args, **kwargs):\n if args and isinstance(args[0], handler_pc.Pc):\n mch = args[0]\n else:\n mch = self.ch\n \"\"\"__tracer_var_ becomes _logger__tracer_var_ in the trace.\n This is used to determine if we are within the wrapping frame\n or the wrapped frame.\n\n Leave this in place to receive only the wrapped frame trace info\n - we dont care about the wrapping frame information.\"\"\"\n __tracer_var_ = 0\n try:\n return func(*args, **kwargs)\n except Exception as err:\n if isinstance(mch, handler_pc.Pc):\n if mch.level == merc.MAX_LEVEL:\n mch.send(\"Debug has been Enabled\\n\\n\")\n char_parse_exception(err, args, ch=mch)\n else:\n noch_parse_exception(err, args)\n return\n return debug\n\n if self.log_type == \"Interp\": # Used with interp and either debug command, or global debug flag\n def interp_debug(*args, **kwargs):\n if merc.GDF is False and merc.GDCF is False: # Check for global/debug command flags\n return func(*args, **kwargs) # if none of the debugs are on, just send the command as normal\n if args and isinstance(args[0], handler_pc.Pc):\n \"\"\"check if there are args, and the args entail a character structure\"\"\"\n mch = args[0] # If so, lets make a char object so we can send messages as needed\n else:\n mch = self.ch # If so, lets make a char object so we can send messages as needed\n \"\"\"__tracer_var_ becomes _logger__tracer_var_ in the trace.\n This is used to determine if we are within the wrapping frame\n or the wrapped frame.\n\n Leave this in place to receive only the wrapped frame trace info\n - we dont care about the wrapping frame information.\"\"\"\n __tracer_var_ = 0\n try:\n return func(*args, **kwargs)\n except Exception as err:\n if isinstance(mch, handler_pc.Pc):\n mch.send(\"#RERROR:#n Debug has been enabled.\\n\")\n mch.send(\"%s\\n\" % err)\n char_parse_exception(err, args, ch=mch)\n else:\n noch_parse_exception(err, args)\n return\n return interp_debug\n\n else:\n return func", "title": "" }, { "docid": "180a67c2f66bafb0f578955f4b04c4ae", "score": "0.4858772", "text": "def logger_handler():\r\n # logging formatter\r\n formatter = ColoredFormatter(\r\n \"{green}{asctime}{reset} :: {bold_purple}{name:^13}{reset} :: {log_color}{levelname:^8}{reset} :: {bold_white}{message}\",\r\n datefmt=\"%H:%M:%S\",\r\n reset=True,\r\n log_colors={\r\n \"INFO\": \"bold_cyan\",\r\n \"DEBUG\": \"bold_yellow\",\r\n \"WARNING\": \"bold_red,fg_thin_yellow\",\r\n \"ERROR\": \"bold_red\",\r\n \"CRITICAL\": \"bold_red,bg_white\",\r\n },\r\n style=\"{\",\r\n )\r\n # check if VIDGEAR_LOGFILE defined\r\n file_mode = os.environ.get(\"VIDGEAR_LOGFILE\", False)\r\n # define handler\r\n handler = log.StreamHandler()\r\n if file_mode and isinstance(file_mode, str):\r\n file_path = os.path.abspath(file_mode)\r\n if (os.name == \"nt\" or os.access in os.supports_effective_ids) and os.access(\r\n os.path.dirname(file_path), os.W_OK\r\n ):\r\n file_path = (\r\n os.path.join(file_path, \"vidgear.log\")\r\n if os.path.isdir(file_path)\r\n else file_path\r\n )\r\n handler = log.FileHandler(file_path, mode=\"a\")\r\n formatter = log.Formatter(\r\n \"{asctime} :: {name} :: {levelname} :: {message}\",\r\n datefmt=\"%H:%M:%S\",\r\n style=\"{\",\r\n )\r\n\r\n handler.setFormatter(formatter)\r\n return handler", "title": "" }, { "docid": "758288b2424be6adad61f69db7be2f76", "score": "0.485874", "text": "def with_logging(self):\n def with_logging_decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n self._save_log(f'Running job: {func.__name__}.')\n result = func(*args, **kwargs)\n self._save_log(f'Job {func.__name__} completed.')\n return result\n\n return wrapper\n return with_logging_decorator", "title": "" }, { "docid": "cb9a526fa3ed134fd4b93b0daeee3c20", "score": "0.48546028", "text": "def log_request(self, context):\n request_message = self.request_format % context.log_vars\n self.logger.info(request_message)", "title": "" }, { "docid": "075acc28820671b5f977f2b72f7ba5d1", "score": "0.48487025", "text": "def wrap(app):\n if getattr(app, '_talisker_wrapped', False):\n return app\n\n config = talisker.config.get_config()\n environ = {\n 'statsd': talisker.statsd.get_client(),\n 'requests': talisker.requests.get_session(),\n }\n headers = {'X-VCS-Revision': config.revision_id}\n\n wrapped = app\n # added in reverse order\n wrapped = talisker.endpoints.StandardEndpointMiddleware(wrapped)\n wrapped = TaliskerMiddleware(wrapped, environ, headers)\n wrapped._talisker_wrapped = True\n wrapped._talisker_original_app = app\n return wrapped", "title": "" }, { "docid": "16ae14a0ff91214f8ad31bc956b98b05", "score": "0.48389655", "text": "def logger_wrapper(msg, *args):\n call = getattr(self.activity_object._log, attr)\n call(msg, *args)\n try:\n msg = msg % args\n except:\n msg = str(msg)\n token, user = self.activity_object.getTokenUserForThread()\n if not token:\n return\n Monitor.log_activity(token, user, msg)", "title": "" }, { "docid": "8812ebcc79835ba66b0c3bbe6b147300", "score": "0.48329988", "text": "def mock_api(response_log_path):\n\n def wrap(test_func):\n @wraps(test_func)\n @responses.activate\n def wrapped():\n with open(response_log_path) as f:\n for line in f:\n response = json.loads(line)\n response[\"url\"] = re.sub(\n r\"(https?:\\/\\/).*(:9100)\", r\"\\1localhost\\2\", response[\"url\"]\n )\n responses.add(**response)\n test_func()\n\n return wrapped\n\n return wrap", "title": "" }, { "docid": "a8299cbf7d69287586636c577154f7aa", "score": "0.48262867", "text": "def post_invocation(self,\n logger: Logger,\n context: Context,\n func_args: typing.Dict[str, object] = {},\n func_ret: typing.Optional[object] = None,\n *args,\n **kwargs) -> None:\n pass", "title": "" }, { "docid": "0bd0aa2e3cbc24386a4e57c9618158bc", "score": "0.48262417", "text": "def __init__(self, log=None, sec_context=None):\n self._handler_map = {}\n self._log = log\n self._sec_ctxt = sec_context", "title": "" } ]
8189ff5ceb326cc763f8574719002d01
Iterate over the jobs.
[ { "docid": "37989d955fd11cd99c46db115f2a7726", "score": "0.8211515", "text": "def __iter__(self):\n for j in self.jobs:\n yield j", "title": "" } ]
[ { "docid": "f7e47df71a6a35e785ddba578270c9a4", "score": "0.809505", "text": "def jobs(self):\n for job in self._jobs:\n yield job", "title": "" }, { "docid": "407993a0dcab8166e87568ddc55eb712", "score": "0.739263", "text": "def _all_jobs(self):\n for child in self.extract.children: # Each child is a different charge state.\n for job in child.itervalues(): yield job", "title": "" }, { "docid": "6069bdb88e61d0744e9e71b17a0204dd", "score": "0.738412", "text": "def _get_jobs(self):\n for job in new_jobs():\n self.create_job(job)", "title": "" }, { "docid": "07e377e9e9cf6bc5d74d7965d165791a", "score": "0.7335271", "text": "def jobs(self):\n self._prevent_dos()\n for job in _fetch(user=self.user):\n yield job", "title": "" }, { "docid": "b3fc9b74be67c6e5e7c982c3c83365a1", "score": "0.708459", "text": "def do_jobs(self):\n for household in [self.household]+self.other_households:\n household.do_jobs()", "title": "" }, { "docid": "585a07701d015da91ebf4aa38fb4bd3a", "score": "0.6995804", "text": "def iterJobs(self):\n if not self.fh:\n raise PyAnnolibError(\"filehandle was not set in Build object\")\n\n # Create the parser\n parser = AnnoXMLBodyParser(self, self.ignore_unknown)\n\n # Parse the file\n return parser.parse(self.fh)", "title": "" }, { "docid": "e0f8d3a2936d04ed037f4e0fc4efe3c4", "score": "0.69635195", "text": "def __jobs(self, jobs: Jobs, parent_job_name: str = \"\") -> Iterator[Job]:\n for job in jobs:\n if parent_job_name:\n job[\"name\"] = f\"{parent_job_name}/{job['name']}\"\n if job.get(\"buildable\") and self._include_job(job):\n yield job\n for child_job in self.__jobs(job.get(\"jobs\", []), parent_job_name=job[\"name\"]):\n yield child_job", "title": "" }, { "docid": "a968f057771919dcf62a9fd7b37689d3", "score": "0.68632543", "text": "def run_jobs (self):\n\n if len (self.jobs) > 0:\n\n job = self.jobs[:1].pop ()\n self.jobs = self.jobs[1:]\n\n self.handle_job (job)\n\n else:\n\n if self.idle_job is not None:\n self.run_idle_job ()\n else:\n self.piglow.all (0)", "title": "" }, { "docid": "1e909157bf88f716cc435049345c5c45", "score": "0.67962915", "text": "def iterate(self):\n # Submit new batches if allowed\n while self._allow_submit(self.batches.next_index):\n next_batch = self.prepare_new_batch(self.batches.next_index)\n logger.debug(\"Submitting batch %d\" % self.batches.next_index)\n self.batches.submit(next_batch)\n\n # Handle the next ready batch in succession\n batch, batch_index = self.batches.wait_next()\n logger.debug('Received batch %d' % batch_index)\n self.update(batch, batch_index)", "title": "" }, { "docid": "ce64eb127fc73c296e6dabdbb4626190", "score": "0.66862214", "text": "def process_step1_jobs(self):\n while self.icur < self.iend:\n # check how many jobs are currently running\n njb = jobcount()\n # if there are too many jobs, pause for 5 minutes\n if njb > 900:\n msg = 'There are {} jobs. Pausing for 5min.'.format(njb)\n errmsg(msg)\n time.sleep(300)\n # otherwise, send the batch of jobs for the current jackknife\n else:\n self._sendbatch()\n msg = 'Job batch for jkidx {} sent.'.format(self.icur)\n errmsg(msg)\n self.icur += 1", "title": "" }, { "docid": "95b7a1e0d423b982dba345b90d35d312", "score": "0.6663757", "text": "def job_get_list():", "title": "" }, { "docid": "3474c54e435c5b434a5732e9a27b9212", "score": "0.6652212", "text": "def getJobs(self):\n jlist = self._branchView.get_data(self._branchView.python_api_url(self._branchView.baseurl))[\"jobs\"]\n for j in jlist:\n job = FrozenJSON(j)\n yield job", "title": "" }, { "docid": "c433b09c996d6bc4354454811f9c7846", "score": "0.66368306", "text": "def completed_jobs(self) -> Iterator[AsyncJob]:\n if not self._running_jobs:\n self._start_jobs()\n\n while self._running_jobs:\n completed_jobs = self._check_jobs_status_and_restart()\n while not completed_jobs:\n logger.info(f\"No jobs ready to be consumed, wait for {self.JOB_STATUS_UPDATE_SLEEP_SECONDS} seconds\")\n time.sleep(self.JOB_STATUS_UPDATE_SLEEP_SECONDS)\n completed_jobs = self._check_jobs_status_and_restart()\n yield from completed_jobs\n self._start_jobs()", "title": "" }, { "docid": "03a9064ad5c5fd461ba5059d7bbe93a0", "score": "0.66050106", "text": "def jobs(self):\n return self._jobs", "title": "" }, { "docid": "79de6f67c2830db5497fd04da6520c68", "score": "0.6584163", "text": "def runJobs(self):\n\n tot_jobs = len(self.jobs)\n suc_jobs = 0\n\n for job in self.jobs:\n provider_set = self.getVideoProvider(job[\"provider\"]) # load the provider for this job\n\n if self.provider is None or not provider_set:\n print(f\"{self.prä} Provider {job['provider']} is not available.\")\n continue\n\n config_map = {}\n for t in self.provider.getConfig():\n if not t in self.config.keys():\n print(f\"{self.prä} Key {t} not set on config\")\n config_map[t] = self.config[t]\n\n self.provider.setConfig(config_map)\n\n print(f\"{self.prä} Load job \\\"{job['name']}\\\"\")\n urls = self.provider.getUrlsByTag(job[\"tag\"], job[\"tagvalue\"])\n\n if not urls:\n continue\n\n for url in urls:\n ret = self.download(url, job)\n\n if ret:\n suc_jobs += 1\n suc_jobs += 1 # if there is nothing to download we count it as success too.\n\n print(f\"{self.prä} {suc_jobs} out of {tot_jobs} succeded.\")\n\n if suc_jobs == tot_jobs:\n return True\n else:\n return False", "title": "" }, { "docid": "4b77cb0584bc2d9fdb2db2b9b36e1d4f", "score": "0.656312", "text": "def start_all_jobs(self):\n assert self.run_queue.empty()\n for job in self.jobs.values():\n self.queue_job(job)", "title": "" }, { "docid": "ffa0931ee546cce9f28774f15ca23ff0", "score": "0.65584165", "text": "def process_jobs(self, jobs: List[Dict[str, Any]]) -> List[Dict[str, str]]:\n\n processed_jobs = []\n for raw in jobs:\n try:\n processed = self.process_job(raw)\n except BaseException as err:\n _log.warning(\"Error while processing record '%s'! (%s) %s\", raw, type(err).__name__, err)\n continue\n\n processed_jobs.append(processed)\n\n return processed_jobs", "title": "" }, { "docid": "a201e80ff79a54954ed9f7daef1472e4", "score": "0.65219194", "text": "def jobs(self, jobs):\n self._jobs = jobs", "title": "" }, { "docid": "9e3f4dba0752825dee5a0cc25ed1b515", "score": "0.65095425", "text": "def _generate_async_jobs(self, params: Mapping) -> Iterator[AsyncJob]:\n\n self._next_cursor_value = self._get_start_date()\n for ts_start in self._date_intervals():\n if ts_start in self._completed_slices:\n continue\n ts_end = ts_start + pendulum.duration(days=self.time_increment - 1)\n interval = pendulum.Period(ts_start, ts_end)\n yield InsightAsyncJob(api=self._api.api, edge_object=self._api.account, interval=interval, params=params)", "title": "" }, { "docid": "663bc22e5770c8eec4a4db7d27550292", "score": "0.6483469", "text": "async def run_forever(self):\n async for job in self:\n if job:\n await job.work()", "title": "" }, { "docid": "a0d0e31432df0715dd0292828f1b73a6", "score": "0.6420857", "text": "def _worker_loop(self, job_queue, progress_queue):\n thread_private_mem = self._get_thread_working_mem()\n jobs_processed = 0\n while True:\n job = job_queue.get()\n if job is None:\n progress_queue.put(None)\n break # no more jobs => quit this worker\n data_iterable, alpha = job\n\n tally, raw_tally = self._do_train_job(data_iterable, alpha, thread_private_mem)\n\n progress_queue.put((len(data_iterable), tally, raw_tally)) # report back progress\n jobs_processed += 1\n logger.debug(\"worker exiting, processed %i jobs\", jobs_processed)", "title": "" }, { "docid": "c3b83c8842f3fe6e15f3597b719385d5", "score": "0.6419956", "text": "def iter_running(self):\n raise NotImplementedError()", "title": "" }, { "docid": "751dba69adccc24d42ccc5242a2f8fc8", "score": "0.6415741", "text": "async def manage_job_processing(self):\n while True:\n # Get collection of \"active\" jobs\n active_jobs: List[RequestedJob] = self.get_all_active_jobs()\n\n # TODO: something must transition MODEL_EXEC_RUNNING Jobs to MODEL_EXEC_COMPLETED (probably Monitor class)\n # TODO: something must transition OUTPUT_EXEC_RUNNING Jobs to OUTPUT_EXEC_COMPLETED (probably Monitor class)\n\n # Process the jobs into various organized collections\n organized_lists = self._organize_active_jobs(active_jobs)\n jobs_eligible_for_allocate = organized_lists[0]\n jobs_to_release_resources = organized_lists[1]\n jobs_completed_phase = organized_lists[2]\n\n for job_with_allocations_to_release in jobs_to_release_resources:\n self.release_allocations(job_with_allocations_to_release)\n\n for job_transitioning_phases in jobs_completed_phase:\n # TODO: figure out what to do here; e.g., start output service after model_exec is done\n pass\n\n # Build prioritized list/queue of allocation eligible Jobs\n priority_queues = self.build_prioritized_pending_allocation_queues(jobs_eligible_for_allocate)\n high_priority_queue = priority_queues['high']\n low_priority_queue = priority_queues['low']\n med_priority_queue = priority_queues['medium']\n\n # Request allocations and get collection of jobs that were allocated, starting first with high priorities\n allocated_successfully = self._request_allocations_for_queue(high_priority_queue)\n # Only even process others if any and all high priority jobs get allocated\n if len(allocated_successfully) == len(high_priority_queue):\n allocated_successfully.extend(self._request_allocations_for_queue(med_priority_queue))\n allocated_successfully.extend(self._request_allocations_for_queue(low_priority_queue))\n\n # For each Job that received an allocation, save updated state and pass to scheduler\n for job in allocated_successfully:\n if self.request_scheduling(job):\n job.status_step = JobExecStep.SCHEDULED\n else:\n job.status_step = JobExecStep.FAILED\n # TODO: probably log something about this, or raise exception\n self.save_job(job)\n\n await sleep(60)", "title": "" }, { "docid": "20b1b07a78027ab6a881d2ddcaf21b0d", "score": "0.64051634", "text": "def parseJobs(self, cb, user_data=None):\n for job in self.iterJobs():\n retval = cb(job, user_data)\n if retval == StopParseJobs:\n break", "title": "" }, { "docid": "d0f5dcc29c6f1d50ce0e7bb3b109c6fc", "score": "0.63652015", "text": "def _parse_jobs(self):\n for job in self.root:\n if job.tag != \"Job\":\n print('Not a job!')\n continue #TODO: Log some kind of error so user knows\n\n try:\n type = Type[job.attrib['type']]\n except KeyError:\n type = Type['Error'] #TODO: Log some kind of error so user knows\n print('Job type was invalid!')\n continue\n\n try:\n name = job.attrib['name']\n except KeyError:\n name = 'default name' #TODO - better default name\n \n actions = []\n for action in job:\n actions.append((action.tag, action.attrib))\n\n #Build appropriate job\n try:\n self.jobs.append(self._create_job(type,job,name,actions))\n except BadJobTypeException:\n continue #TODO better error handling and logging", "title": "" }, { "docid": "53c1bf0d4a39e73d2cfb1f991ab28e00", "score": "0.6359884", "text": "def process(self):\n proxies = self.clidb.getProxies()\n for proxyid in proxies:\n # get number of all states of jobs with fairshare proxyid\n states = self.getJobStateCount('fairshare = {}'.format(proxyid))\n\n # get number of running and submitted jobs\n running = 0\n submitted = 0\n for state in states:\n if state['arcstate'] == 'running':\n running = state['COUNT(arcstate)']\n elif state['arcstate'] in ('submitted', 'submitting'):\n submitted += state['COUNT(arcstate)']\n #self.log.debug('{} jobs running for proxyid {}'.format(running, proxyid))\n #self.log.debug('{} jobs submitted for proxyid {}'.format(submitted, proxyid))\n\n if submitted < max(0.2 * running, 100): # TODO: HARDCODED\n self.insertNewJobs(proxyid, 20)", "title": "" }, { "docid": "468c1796bee7621a8b5c828209a85e42", "score": "0.6324985", "text": "def map(self, iterable):\n\t\t#initialize\n\t\tworkers = []\n\t\tfree_workers = queue.Queue()\n\t\tself.request_stop = multiprocessing.Event()\n\t\tself._chunksize = 1\n\n\t\t#start up workers\n\t\tfor i in range(self.num_workers):\n\t\t\tparent_conn, child_conn = multiprocessing.Pipe()\n\t\t\tworker = {} \n\t\t\tworker[\"connection\"] = parent_conn\n\t\t\tworker[\"process\"] = Process(target = self._work, args=(child_conn, self.context_func))\n\t\t\tworkers.append(worker)\n\t\t\tfree_workers.put(worker)\n\t\t\tworker[\"process\"].start()\n\n\t\t#process values\n\t\tjobs = {}\n\t\tjobid = 0\n\t\tbatch = []\n\t\tlast_processing_times = [self.chunkseconds] * 10*self.num_workers #init with chunkseconds, such that intial chunksize is 1\n\t\tlast_processing_time_pos = 0\n\n\t\tfor value in iterable:\n\t\t\t#wait as long as all workers are busy\n\t\t\twhile free_workers.empty():\n\t\t\t\tminjobid = min(jobs.keys())\n\t\t\t\tjobs[minjobid][\"worker\"][\"connection\"].poll(0.1)\n\t\t\t\tfor job in jobs.values():\n\t\t\t\t\tif (not \"stopped\" in job) and job[\"worker\"][\"connection\"].poll():\n\t\t\t\t\t\tjob.update(job[\"worker\"][\"connection\"].recv())\n\t\t\t\t\t\tfree_workers.put(job[\"worker\"])\n\t\t\t\t\t\tjob[\"worker\"] = None\n\n\t\t\t#if job limit reached, wait for leftmost job to finish\n\t\t\tif len(jobs) >= 10*self.num_workers: #do not start jobs for more than 10*workers batches ahead to save memory\n\t\t\t\twhile not (self.job_is_finished(jobs[min(jobs.keys())], timeout = 0.1)):\n\t\t\t\t\tpass\n\n\t\t\t#yield results while leftmost batch is ready\n\t\t\twhile len(jobs) > 0 and (self.job_is_finished(jobs[min(jobs.keys())])): \n\t\t\t\tminjobid = min(jobs.keys())\n\t\t\t\tif (not \"stopped\" in jobs[minjobid]) and jobs[minjobid][\"worker\"][\"connection\"].poll(): #FIXME: worker might have new job!!!!\n\t\t\t\t\tjobs[minjobid].update(jobs[minjobid][\"worker\"][\"connection\"].recv())\n\t\t\t\t\tfree_workers.put(jobs[minjobid][\"worker\"])\n\t\t\t\t\tjobs[minjobid][\"worker\"] = None\n\n\t\t\t\t#update optimal chunksize based on 10*workers last batch processing times\n\t\t\t\tlast_processing_times[last_processing_time_pos] = (jobs[minjobid][\"stopped\"] - jobs[minjobid][\"started\"])/self._chunksize\n\t\t\t\tlast_processing_time_pos = (last_processing_time_pos + 1) % (10*self.num_workers) #rotate through list with last processing times\n\n\t\t\t\tavg_processing_time = sum(last_processing_times)/len(last_processing_times)\n\t\t\t\tdesired_chunksize = int(math.ceil(self.chunkseconds/avg_processing_time)) #batch should take chunkseconds s to calculate\n\t\t\t\tself._chunksize = min(desired_chunksize, max(10,2*self._chunksize)) #double chunksize at most every time (but allow to go to 10 directly in the beginning)\n\t\t\t\t#print(self._chunksize)\n\n\t\t\t\tif \"error\" in jobs[minjobid]:\n\t\t\t\t\tself.request_stop.set()\n\t\t\t\t\t#shutdown workers\n\t\t\t\t\tfor worker in workers:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tworker[\"connection\"].send(None) #send shutdown command\n\t\t\t\t\t\t\tworker[\"connection\"].close()\n\t\t\t\t\t\t\tworker[\"process\"].join()\n\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\t\n\t\t\t\t\tex_type, ex_value, tb_str = jobs[minjobid][\"error\"]\n\t\t\t\t\tmessage = '%s (in subprocess)\\n%s' % (str(ex_value), tb_str)\n\t\t\t\t\traise ex_type(message)\n\n\t\t\t\tfor r in jobs[minjobid][\"results\"]:\n\t\t\t\t\tyield r\n\t\t\t\tdel jobs[minjobid]\n\n\t\t\t#start new job if batch full\n\t\t\tbatch.append(value)\n\n\t\t\tif len(batch) >= self._chunksize:\n\t\t\t\tjob = {}\n\t\t\t\tjobs[jobid] = job\n\t\t\t\tjob[\"started\"] = time.time()\n\t\t\t\tjob[\"worker\"] = free_workers.get()\n\t\t\t\tjob[\"worker\"][\"connection\"].send(batch)\n\n\t\t\t\tbatch = []\n\t\t\t\tjobid += 1\n\n\t\t#wait while all workers busy\n\t\twhile free_workers.empty():\n\t\t\tminjobid = min(jobs.keys())\n\t\t\tjobs[minjobid][\"worker\"][\"connection\"].poll(1)\n\t\t\tfor job in jobs.values():\n\t\t\t\tif (not \"stopped\" in job) and job[\"worker\"][\"connection\"].poll():\n\t\t\t\t\tjob.update(job[\"worker\"][\"connection\"].recv())\n\t\t\t\t\tfree_workers.put(job[\"worker\"])\n\t\t\t\t\tjob[\"worker\"] = None\n\n\t\t#submit last batch\n\t\tif len(batch) > 0:\n\t\t\tjob = {}\n\t\t\tjobs[jobid] = job\n\t\t\tjob[\"started\"] = time.time()\n\t\t\tjob[\"worker\"] = free_workers.get()\n\t\t\tjob[\"worker\"][\"connection\"].send(batch)\n\t\t\tbatch = []\n\n\t\t#wait for all jobs to finish\n\t\twhile len(jobs) > 0:\n\t\t\tif (self.job_is_finished(jobs[min(jobs.keys())], timeout = 1.0)):\n\t\t\t\tminjobid = min(jobs.keys())\n\t\t\t\tif (not \"stopped\" in jobs[minjobid]) and jobs[minjobid][\"worker\"][\"connection\"].poll():\n\t\t\t\t\tjobs[minjobid].update(jobs[minjobid][\"worker\"][\"connection\"].recv())\n\t\t\t\t\tfree_workers.put(jobs[minjobid][\"worker\"])\n\t\t\t\t\tjobs[minjobid][\"worker\"] = None\n\n\t\t\t\tif \"error\" in jobs[minjobid]:\n\t\t\t\t\tself.request_stop.set()\n\t\t\t\t\t#shutdown workers\n\t\t\t\t\tfor worker in workers:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tworker[\"connection\"].send(None) #send shutdown command\n\t\t\t\t\t\t\tworker[\"connection\"].close()\n\t\t\t\t\t\t\tworker[\"process\"].join()\n\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\t\n\t\t\t\t\tex_type, ex_value, tb_str = jobs[minjobid][\"error\"]\n\t\t\t\t\tmessage = '%s (in subprocess)\\n%s' % (str(ex_value), tb_str)\n\t\t\t\t\traise ex_type(message)\n\n\t\t\t\tfor r in jobs[minjobid][\"results\"]:\n\t\t\t\t\tyield r\n\t\t\t\tdel jobs[minjobid]\n\n\t\t#shutdown workers\n\t\tfor worker in workers:\n\t\t\ttry:\n\t\t\t\tworker[\"connection\"].send(None) #send shutdown command\n\t\t\t\tworker[\"connection\"].close()\n\t\t\t\tworker[\"process\"].join()\n\t\t\texcept Exception as e:\n\t\t\t\tpass", "title": "" }, { "docid": "ca3a8035db743a8c1c4e67cc104a996b", "score": "0.6317815", "text": "def _job_generator(self) -> str:\n while True:\n yield self.faker.job()", "title": "" }, { "docid": "47b143dd2df8af2354d5f0648a31d25f", "score": "0.63127154", "text": "def jobs(self):\n return self._request_obj(self._urls[\"jobs\"])", "title": "" }, { "docid": "037f5de31ac555035bb43b91be36b081", "score": "0.6287602", "text": "def jobs(self):\n\n if not hasattr(self, \"_jobs\"):\n self._jobs = {}\n return self._jobs", "title": "" }, { "docid": "5646580965ad068e54fb647e7646ac5c", "score": "0.6285586", "text": "def run(self):\r\n # per-process job get() timeout\r\n timeout = 1.0\r\n\r\n # update our tilesets\r\n self.update_tilesets()\r\n\r\n # register for all available signals\r\n def register_signal(name, sig):\r\n def handler(*args, **kwargs):\r\n self.signal_queue.put((name, args, kwargs), False)\r\n sig.set_interceptor(handler)\r\n for name, sig in Signal.signals.iteritems():\r\n register_signal(name, sig)\r\n\r\n # notify that we're starting up\r\n self.result_queue.put(None, False)\r\n while True:\r\n try:\r\n job = self.job_queue.get(True, timeout)\r\n if job == None:\r\n # this is a end-of-jobs sentinel\r\n return\r\n\r\n # unpack job\r\n tv, ti, workitem = job\r\n\r\n if tv != self.tileset_version:\r\n # our tilesets changed!\r\n self.update_tilesets()\r\n assert tv == self.tileset_version\r\n\r\n # do job\r\n ret = self.tilesets[ti].do_work(workitem)\r\n result = (ti, workitem, ret,)\r\n self.result_queue.put(result, False)\r\n except Queue.Empty:\r\n pass", "title": "" }, { "docid": "fd87a01e903fd0efb7a0f382d18b11a5", "score": "0.62635416", "text": "def schedule_jobs(self):\n idle_workers = self.get_idle_worker(task)\n for worker in idel_workers:\n jobs = self.get_job(worker)\n for job in jobs:\n self.execute_job(job)", "title": "" }, { "docid": "1630468a1d8644a63740a5189e36b872", "score": "0.62576514", "text": "def job_result_each(self, job_id):\n for row in self.job_result_format_each(job_id, \"msgpack\"):\n yield row", "title": "" }, { "docid": "07e1622bda6c45c5763cd48369c26def", "score": "0.6244347", "text": "def getAllJobs(self):\n\n jobs = [job for job in self.iterJobs()]\n\n return jobs", "title": "" }, { "docid": "5a26d0428abfb4e2cb35d332cdfd6d65", "score": "0.6243019", "text": "def inner_iterable():\n if current_job:\n set_current_job(current_job)\n\n for x in iterable:\n yield x\n\n if current_job:\n set_current_job(None)", "title": "" }, { "docid": "db38c445990c9533d9cc1a68c22f00e1", "score": "0.6238126", "text": "def run_jobs_serial(jobs,X,Y):\n results = list()\n for job in jobs:\n r = job.run(X,Y)\n results.append(r)\n return results", "title": "" }, { "docid": "f7b384b696f5bbd1727cba5ec555e991", "score": "0.6236887", "text": "def batch_work(self):\n\n try:\n while True:\n gen = execute_queries(self._stop_event, **{'STOA': self.config['PROVIDER']})\n for listener, result in gen:\n for task in _triggers:\n task(listener, result, self._stop_event)\n gen = collect_fragment(self._stop_event, **{'STOA': self.config['PROVIDER']})\n for collector, (t, s, p, o) in gen:\n for task in _triggers:\n task(collector, (t, s, p, o), self._stop_event)\n for task in _finishers:\n task(self._stop_event)\n self._stop_event.wait(self.__refresh_rate)\n except Exception, e:\n traceback.print_exc()\n log.error(e.message)", "title": "" }, { "docid": "c508c8cf3275eba19b0f26a287ed282d", "score": "0.6219218", "text": "def _run(self):\n\n while len(self._queue) > 0: # While there are jobs in the queue\n self._manage(); # Block until process finishes if too many running \n job = self._queue.pop(0); # Pop job object off of queue\n job.start( nowait = True ); # Start and do NOT wait to finish (non-blocking)\n self._jobs.append( job ); # Append job to jobs arrray \n self._manage( waitall = True ); # Wait for rest of jobs to finish", "title": "" }, { "docid": "cebeeea4265cb9997cb8010317bfddda", "score": "0.6189491", "text": "def iter_results(self, cast_type: Type[Serializable] = None):\n for job in self.jobs.filter(Job.status == JobStatus.SUCCESS):\n yield job.result(cast_type=cast_type)", "title": "" }, { "docid": "a562ef6e90af373ae076273fdf79558e", "score": "0.6186101", "text": "def scrape_job_details(self):\n logger.debug(\"Scraping job details\")\n\n job_detail_files = get_matching_csv_files(CLJobScraper.JOB_LINKS_ROOT_FILENAME)\n if not job_detail_files:\n logger.debug(\"No files found with root filename %s\", CLJobScraper.JOB_LINKS_ROOT_FILENAME)\n else:\n for filename in job_detail_files:\n with open(filename, \"rb\") as csv_file:\n csv_reader = csv.reader(csv_file)\n header = csv_reader.next()\n\n for row in csv_reader:\n res = scrape_single_job_details(row[0])\n if res is not None:\n yield res,\n\n logger.debug(\"Sleeping Zzz\")\n time.sleep(SLEEP_TIME) # sleep to avoid being blacklisted", "title": "" }, { "docid": "2de1ce9ccf29ff68e16ef83fb0f27416", "score": "0.61827546", "text": "def do_job(self):\n pass", "title": "" }, { "docid": "f2d9d9da557001eef808a05e0b568d5b", "score": "0.61665237", "text": "def loop(self):\n (options, args) = self.parser.parse_known_args()\n self._suppress = self._overwrite = options.force\n\n base_dir = (directories.HADOOP_BASE if self._use_hadoop\n else directories.NFS_BASE)\n\n for a_script in self._script_list:\n for a_type in self._data_list:\n for a_gen in self._generator_list:\n for a_run in self._run_list:\n job_config = dict(base = base_dir,\n prod = self._prod,\n script = a_script,\n dtype = a_type,\n gen = a_gen,\n run = a_run,\n ftype = self._ftype,\n paramfile = self._paramfile,\n microtree = self._microtree,\n weights_store = self._weights_store)\n\n data_dir_template = self._cmd_builder.data_dir_template\n data_dir = data_dir_template.format(**job_config)\n\n if not os.path.exists(data_dir):\n print data_dir, 'does not exist!'\n else:\n subdirs = natsorted(os.walk(data_dir).next()[1])\n # check if we want to process full sample or not\n if options.dev:\n subdirs = subdirs[0:min(len(subdirs), 4)]\n\n for a_subdir in subdirs:\n job_config['subdir'] = a_subdir\n job_config['inp'] = os.path.join(data_dir,\n a_subdir)\n\n self._append(job_config)", "title": "" }, { "docid": "c8e4e3dbe2fbf41e7f83b5d666f7ac2f", "score": "0.61547005", "text": "def _sendbatch(self):\n for idx in xrange(15):\n self._sendjob(idx)", "title": "" }, { "docid": "983b8f89488547fb79a95512e13d0c85", "score": "0.61520976", "text": "def get_next_job():\n current_app.log.debug(\"Worker requesting job batch, request: %s\", pformat(request.data))\n require_attrs('types')\n# Job = request.db.tables.Job # pylint: disable=invalid-name\n# JobElement = request.db.tables.JobElement # pylint: disable=invalid-name\n alg_name = request.data.get('algorithm', 'BY_NUMBER').upper()\n elements = Algorithm[alg_name](**request.data.get('algorithm.args', {}))\n# elements = JobElement.query.filter(JobElement.status.in_((JobStatus.NEW, JobStatus.FAILED)),\n# JobElement.attempts < JobElement.max_tries)\\\n# .join(JobElement.job)\\\n# .filter(Job.type.in_(request.data['types']))\\\n# .order_by(Job.priority)\\\n# .order_by(Job.id)\\\n# .limit(10)\\\n# .all()\n if not elements:\n abort(404, description=\"No work to be done.\")\n\n work = []\n for job, elements_group in groupby(elements, key=attrgetter('job')):\n elements = []\n# for element in list(elements_group):\n for element in iter(elements_group):\n element.status = JobStatus.SUBMITTED\n element.update() # should be a bulk update\n element_dict = element.asdict()\n element_dict['token'] = request.token_svc.issue(\"%d.%d\" % (job.id, element.id))\n elements.append(element_dict)\n job.status = max(ele.status for ele in job.elements)\n job_dict = job.asdict()\n job_dict['elements'] = elements\n work.append(job_dict)\n job.update()\n current_app.log.debug(\"Sending worker job batch: %s\", pformat(work))\n return jsonify(work)", "title": "" }, { "docid": "ef34c4a152dc36ee7a142c4b2b424faa", "score": "0.6149765", "text": "def process_job(self, process, jobs):\n raise NotImplementedError", "title": "" }, { "docid": "6d3ec88f31aac58edffe3f7a4602ba32", "score": "0.6143488", "text": "async def view_jobs(self, ctx):\n s = \"**Jobs**\\n\"\n for n, job in enumerate(self.jobs):\n s += f\"{n+1}: {str(job)}\\n\"\n\n await ctx.send(s)", "title": "" }, { "docid": "7255b43adc4bbedf409715a932774468", "score": "0.6142319", "text": "def inner_iterable():\n if current_job:\n set_current_job(current_job)\n\n for x in iterable:\n yield x\n\n if current_job:\n set_current_job(None)", "title": "" }, { "docid": "a7fbf9fa77642a29515af0b37a6ed56b", "score": "0.61351603", "text": "def run(self):\n self.connect()\n for tweet in self.fetch(since_id=self.last_job.get('max_id', None)):\n tweet = self.filter(tweet)\n if tweet:\n yield self.format(tweet)", "title": "" }, { "docid": "15fc9e819d7f3f6c8cb43e7c8715a004", "score": "0.61319464", "text": "def start_fetching(self):\n # ToDo: This should be parallelized, as it sometimes can take long to fetch the results from a provider.\n job_directory = 'openeo-sentinel-reference-jobs/'\n if self.mock_mode:\n job_directory = 'mock-examples'\n # In the future, the regions layer could be removed,\n # it is not factual information but just a pattern for myself\n regions = [f.path for f in os.scandir(job_directory) if f.is_dir()]\n\n for region in regions:\n jobs = [f.path for f in os.scandir(region) if f.is_dir()]\n for job in jobs:\n for provider in self.backendProviders['providers']:\n\n if provider.get('local') is None:\n user = provider['credentials']['user']\n password = provider['credentials']['password']\n\n process_graph_folder = os.path.join(job, provider['name'])\n # ToDo: Think whether the directory should contain only one process graph anyway\n if os.path.exists(process_graph_folder) is False:\n continue\n\n process_graphs = [f for f in os.listdir(process_graph_folder)\n if os.path.isfile(os.path.join(process_graph_folder, f))]\n\n # Filter out all files in the folder that are not JSON\n process_graphs = [fi for fi in process_graphs if fi.endswith(\".json\")]\n\n try:\n path_to_process_graph = os.path.join(process_graph_folder, process_graphs[0])\n except Exception:\n print(\"Job not configured for provider:\", provider)\n continue\n\n path_to_validation_rules = os.path.join(job, 'validation-rules.json')\n with open(path_to_process_graph, 'r') as process_graph:\n\n process_graph = json.loads(process_graph.read())\n save_path = process_graph_folder.replace(job_directory, 'reports/')\n\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n job_identifier = region.split('/')[1] + '-' + job.split('/')[-1]\n # Only run selected job that is passed by the CLI argument\n if self.selectedJob:\n if self.selectedJob != job_identifier:\n continue\n\n if provider.get('local') is True:\n file_path = process_graph['file']\n else:\n file_path = save_path + '/' + job_identifier + '.' + 'png'\n\n download_successful = False\n # start stopwatch\n start_time = time.time()\n time.clock()\n backend_job_id = ''\n\n if self.offline_mode is not True and provider.get('local') is not True:\n try:\n con = openeo.connect(provider['baseURL'], auth_type=BearerAuth,\n auth_options={\"username\": user, \"password\": password})\n except Exception as e:\n # ToDo: Log if a connection to a provider fails\n print(e)\n print('Connection to provider failed')\n download_successful = False\n\n if provider['name'] == 'EURAC':\n try:\n print('Downloading synchronously')\n con.download(process_graph, 0, file_path, {'format': 'PNG'})\n download_successful = True\n except Exception as e:\n print(e)\n download_successful = False\n else:\n try:\n openEO_job = con.create_job(process_graph, output_format='PNG')\n openEO_job.start_job()\n print(openEO_job.describe_job())\n backend_job_id = openEO_job.describe_job().get('job_id')\n while download_successful is not True:\n try:\n openEO_job.download_results(file_path)\n print(openEO_job.describe_job())\n download_successful = True\n print(openEO_job.delete_job())\n except ConnectionAbortedError:\n download_successful = False\n print('Retrying to download file in 10 seconds')\n print(openEO_job.describe_job())\n time.sleep(10)\n except ConnectionAbortedError as e:\n # Not authorized etc.\n print(e)\n except Exception as e:\n print(e)\n download_successful = False\n # stopwatch end\n end_time = time.time()\n # ToDo: Time_to_result could be improved by measuring the time it takes until the images are\n # ready to be downloaded, instead of measuring when they finished downloading\n time_to_result = end_time - start_time\n\n if download_successful or provider.get('local') is True:\n print('Downloading results took ' + str(time_to_result) + ' seconds')\n else:\n print('No download: ' + provider['name'] + ' , our own job-id: ' + job_identifier)\n print('Backend Job ID: ' + backend_job_id)\n if provider['name'] is 'GEE':\n print(openEO_job.delete_job())\n time_to_result = float(\"inf\")\n\n if self.offline_mode or provider.get('local') is True:\n download_successful = True\n\n self._jobs_names.append(job_identifier)\n\n details = {\n 'backend': provider['name'],\n 'job': job_identifier,\n 'file': file_path,\n 'validation-rules-path': path_to_validation_rules,\n 'provider_job_id': backend_job_id,\n 'time_to_result': time_to_result,\n 'download_successful': download_successful\n }\n\n self.results.append(details)\n\n self._jobs_names = set(self._jobs_names)", "title": "" }, { "docid": "73b8dfa46de0944aee48e905b46e25c5", "score": "0.61262476", "text": "def _request(self):\n if len(self._job_list) < 1:\n # Should we alert? This /is/ pretty obvious.\n return\n request_body = \"[ {0} ]\".format(\n ','.join(job.request() for job in self._job_list))\n responses = self._server._run_request(request_body)\n del self._job_list[:]\n if not responses:\n responses = []\n return MultiCallIterator(responses)", "title": "" }, { "docid": "0cefdf8e7123eb4ac958058dc1f91694", "score": "0.6118375", "text": "def list_jobs(MaxResults=None, NextToken=None, Order=None, Queue=None, Status=None):\n pass", "title": "" }, { "docid": "fbfba9257cc96cfd235bc4e148f8f7cb", "score": "0.6106576", "text": "def scrape_jobs(self, domain):\n JOB_XPATH = (\".//div[contains(@class, 'content')]\"\n \"//p[contains(@class, 'row')]\")\n MAP_TAG_XPATH = \".//span[@class='maptag']\"\n JOB_LINK_XPATH = \".//a[@class='hdrlnk']\"\n MAX_RESULTS = 100\n\n payload = {\"employment_type\": \"1\", \"s\": 1}\n\n endpoint = os.path.join(domain, \"search\", \"jjj\")\n logger.debug(\"Hitting jobs endpoint %s w/payload %s\", endpoint, payload)\n\n res = retry(functools.partial(requests.get, endpoint, params=payload), SLEEP_TIME)\n\n while True:\n try:\n # parse results\n doc = html.fromstring(res.text)\n\n listings = doc.xpath(JOB_XPATH)\n if not listings:\n logger.debug(\"Paginated scraping complete\")\n break\n\n logger.debug(\"%s listings found\", len(listings))\n for listing in listings:\n # this signifies we can get the lat & long when drilling\n if listing.find(MAP_TAG_XPATH) is not None:\n anchor_elem = listing.find(JOB_LINK_XPATH)\n link = \"{}{}\".format(domain, anchor_elem.get(\"href\")[1:]) # slice off the leading slash\n\n yield link,\n except Exception:\n logger.exception(\"Encountered an issue while scraping jobs\", exc_info=True)\n finally:\n if len(listings) < MAX_RESULTS:\n logger.debug(\"Paginated scraping complete\")\n break\n\n payload[\"s\"] += MAX_RESULTS\n\n logger.debug(\"Sleeping Zzz\")\n time.sleep(SLEEP_TIME) # sleep to avoid being blacklisted\n logger.debug(\"Hitting jobs endpoint %s w/payload %s\", endpoint, payload)\n res = retry(functools.partial(requests.get, endpoint, params=payload), SLEEP_TIME)", "title": "" }, { "docid": "658e04ee5b0c4d26d344baf0840c8734", "score": "0.6102724", "text": "def run_parallel(self):\n aArgs = self.hmodel.allocModel.getSerializableParamsForLocalStep()\n oArgs = self.hmodel.obsModel.getSerializableParamsForLocalStep()\n\n # MAP!\n # Create several tasks (one per worker) and add to job queue\n for jobInfo in sliceGenerator(self.nDoc, self.nWorkers,\n aArgs=aArgs, oArgs=oArgs):\n self.JobQ.put(jobInfo)\n\n self.JobQ.join()\n # REDUCE!\n # Aggregate results across across all workers\n # Avoids JobQueue.join() call (which blocks execution)\n # Instead lets main process aggregate all results as they come in.\n nTaskDone = 0\n while nTaskDone < self.nWorkers:\n if not self.ResultQ.empty():\n SSchunk = self.ResultQ.get()\n if nTaskDone == 0:\n SS = SSchunk\n else:\n SS += SSchunk\n nTaskDone += 1\n # At this point all jobs are marked complete.\n return SS", "title": "" }, { "docid": "8a3ffca221bf70e1634c7748928b897d", "score": "0.60865027", "text": "def jobs(self):\n return Jobs(self.client, self.base_url)", "title": "" }, { "docid": "f821a402054984e3f6332185b8c86ff3", "score": "0.6073604", "text": "def __load_jobs(self):\n\n start = datetime.now()\n with self.conn.cursor() as cursor:\n for row in cursor.execute(self.SELECT).fetchall():\n try:\n job = self.Job(self, row)\n job.register()\n except Exception:\n self.logger.exception(\"malformed job %s\", tuple(row))\n elapsed = datetime.now() - start\n self.logger.info(\"registered %d jobs in %s\", len(self.jobs), elapsed)", "title": "" }, { "docid": "3c7236c90624eaf86455730795780374", "score": "0.6067999", "text": "def set_jobs(self):\n self.logger.info(\"Reading in the list of jobs from configurations\")\n\n jobs_from_config: Dict = self.config.get(JOBS)\n if jobs_from_config:\n self.jobs = [\n {\n JOB_NAME: name,\n JOB_ORIGIN: nodes[JOB_ORIGIN],\n JOB_DESTINATION: nodes[JOB_DESTINATION]\n }\n for name, nodes in jobs_from_config.items()\n ]\n else:\n self.logger.warning(\"There are no jobs to process in the config file.\")\n\n self.successful_jobs = []\n self.failed_jobs = []", "title": "" }, { "docid": "3917028afe050656ce9a7d96f832f5bb", "score": "0.60642254", "text": "def __iter__(self):\n for _ in self.files:\n dataset = self.get_next_dataset()\n for i in range(self.iterations_per_file):\n yield from iterate_dataset(dataset)", "title": "" }, { "docid": "5bf29fbe7a43ec36f685db95772a5172", "score": "0.60629106", "text": "def get_jobs():\n\n with _job_store:\n job_list = _job_store.list_jobs()\n return [_internal_job_to_rest_job(job) for job in job_list]", "title": "" }, { "docid": "6331f519b137beb123c37807e2f1d3eb", "score": "0.6059905", "text": "def get_blocking_jobs(self):", "title": "" }, { "docid": "e5aa5762924b550521c3defc81111b7f", "score": "0.6059764", "text": "def run(self):\n # scaffold dicts\n for j in self.work_deque:\n self.eta_estimates[j.key] = []\n self.results[j.key] = []\n\n errors = False\n start_time = time.time() # rough overall timer, not used for actual results\n\n while True:\n jobs_left = len(self)\n print(\"%s%d jobs left in scheduler queue%s\" %\n (ANSI_CYAN, jobs_left, ANSI_RESET))\n\n if jobs_left == 0:\n break\n\n tfmt = self.get_overall_time_estimate_formatter()\n print(\"{}{:<25s}: {}{}\".format(ANSI_CYAN,\n \"Current time\",\n tfmt.start_str,\n ANSI_RESET))\n\n\n print(\"{}{:<25s}: {} ({} from now){}\".format(ANSI_CYAN,\n \"Estimated completion\",\n tfmt.finish_str,\n tfmt.delta_str,\n ANSI_RESET))\n\n if (self.eta_avail is not None) and (self.jobs_done < self.eta_avail):\n print(\"{}Jobs until ETA known: {}{}\".format(ANSI_CYAN,\n self.jobs_until_eta_known(),\n ANSI_RESET))\n job = self.next_job()\n exec_result = job.run()\n\n if not exec_result and not BENCH_DRYRUN:\n errors = True\n\n self.results[job.key].append(exec_result)\n\n # We dump the json after each experiment so we can monitor the\n # json file mid-run. It is overwritten each time.\n dump_json(self.config_file, self.out_file, self.results)\n\n self.jobs_done += 1\n\n end_time = time.time() # rough overall timer, not used for actual results\n\n print(\"Done: Results dumped to %s\" % self.out_file)\n if errors:\n print(\"%s ERRORS OCCURRED! READ THE LOG!%s\" % (ANSI_RED, ANSI_RESET))\n\n print(\"Completed in (roughly) %f seconds\" % (end_time - start_time))", "title": "" }, { "docid": "34c6aa5c072308c9b2a544496ae10dc5", "score": "0.60578424", "text": "def getoutput(self, jobs):\n\n logging.debug(\"Staring gLite getoutput method..\")\n\n command = \"glite-wms-job-output --json --noint\"\n outdiropt = \"--dir\"\n\n workqueued = {}\n currentwork = len(workqueued)\n\n completedJobs = []\n failedJobs = []\n abortedJobs = []\n canceledJob = []\n\n retrievedproxy = {}\n\n ## Start up processes\n input = multiprocessing.Queue()\n result = multiprocessing.Queue()\n self.start(input, result)\n\n #creates chunks of work per multi-processes\n # TODO: evaluate if passing just one job per work is too much overhead\n\n for jj in jobs:\n ownersandbox = jj['userdn']+\":\"+jj['usergroup']+\":\"+jj['userrole']\n valid, ownerproxy = (False, None)\n exportproxy = 'echo $X509_USER_PROXY'\n proxymsg = ''\n if ownersandbox in retrievedproxy:\n valid = True\n ownerproxy = retrievedproxy[ownersandbox]\n else:\n valid, ownerproxy, proxymsg = self.validateProxy( ownersandbox )\n\n if valid:\n retrievedproxy[ownersandbox] = ownerproxy\n exportproxy = \"export X509_USER_PROXY=%s\" % ownerproxy\n else:\n msg = \"Problem retrieving user proxy, or user proxy \" + \\\n \"expired '%s'.\\n\" % ownersandbox\n msg += 'Detailed error: \"%s\".' % proxymsg\n logging.error( msg )\n failedJobs.append( jj )\n self.fakeReport(\"GetOutputFailure\", msg, -1, jj)\n continue\n\n logging.info(\"Processing job %s \" %str(jj['status']))\n\n if jj['status'] not in ['Done']:\n if jj['status'] in ['Aborted', 'Purged']:\n abortedJobs.append( jj )\n elif jj['status'] in ['Cancelled by user', 'Cancelled']:\n canceledJob.append( jj )\n continue\n\n cmd = '%s %s %s %s' \\\n % (command, outdiropt, jj['cache_dir'], jj['gridid'])\n logging.debug(\"Enqueuing getoutput for job %i\" % jj['jobid'] )\n workqueued[currentwork] = jj['jobid']\n completecmd = 'source %s && export LD_LIBRARY_PATH=%s:$LD_LIBRARY_PATH && %s && %s' % (self.setupScript, self.manualenvprefix, exportproxy, cmd)\n input.put((currentwork, completecmd, 'output'))\n currentwork += 1\n\n # Now we should have sent all jobs to be submitted\n # Going to do the rest of it now\n logging.debug(\"Waiting for %i works to finish...\" % len(workqueued))\n for n in xrange(len(workqueued)):\n logging.debug(\"Waiting for work number %i to finish..\" % n)\n res = None\n try:\n res = result.get(block = True, timeout = self.basetimeout)\n except Queue.Empty:\n logging.error(\"Timeout retrieving result %i out of %i\" % (n, len(workqueued)) )\n continue\n jsout = res['jsout']\n error = res['stderr']\n exit = res['exit']\n workid = res['workid']\n # Check error\n if exit != 0:\n msg = 'Error executing get-output:\\n\\texit code: %i\\n' % exit\n msg += '\\tstderr: %s\\n\\tjson: %s' % (error, str(jsout.strip()))\n logging.error( msg )\n failedJobs.append(workqueued[workid])\n for jj in jobs:\n if jj['jobid'] == workqueued[workid]:\n self.fakeReport(\"GetOutputFailure\", msg, -1, jj)\n break\n continue\n else:\n if jsout is not None:\n jobid = workqueued[workid]\n if 'result' in jsout and jsout['result'] == 'success':\n completedJobs.append(jobid)\n else:\n failedJobs.append(jobid)\n for jj in jobs:\n if jj['jobid'] == jobid:\n self.fakeReport(\"GetOutputFailure\", msg, -1, jj)\n break\n\n ## Shut down processes\n logging.debug(\"About to close the subprocesses...\")\n self.close(input, result)\n\n return completedJobs, failedJobs, abortedJobs, canceledJob", "title": "" }, { "docid": "135e046aecda85533f6d0ff130ac6bc9", "score": "0.6053589", "text": "def pass_assign_jobs(self) -> None:\n servers = self.firesimtopol.get_dfs_order_servers()\n for i in range(len(servers)):\n servers[i].assign_job(self.workload.get_job(i))", "title": "" }, { "docid": "bc340dfa7f53ce5e1d57f9762099ebb4", "score": "0.6046942", "text": "def test_list_jobs(self):\n pass", "title": "" }, { "docid": "7f0e297812a244201ae6d6cf5d8f23f5", "score": "0.6035402", "text": "def jobs(program):\n for offset, status, job in program.disco.joblist():\n print \"%s\\t%s\" % (job, status) if program.options.status else job", "title": "" }, { "docid": "7b7f59a4631ac97552fe1341d38b5ea1", "score": "0.6034056", "text": "def queue_jobs(self):\n self.queue = []\n for _, v in self.jobs.items():\n dependent_queries = []\n dependent_queries = set(\n v['dependent_query']) - set(self.processed_jobs)\n if (v['is_finished'] is False) and (not dependent_queries):\n self.queue.append(v['query_id'])", "title": "" }, { "docid": "51fd81dff1760ce73a04017aec5a114d", "score": "0.6000066", "text": "def complete(self, jobs):\n # Run your command in parallel\n # This sends the outputWorker function\n # Whatever's in gridid as an argument\n # And at the end waits for a return\n\n # NOTE: This is a blocking function\n\n\n #input = [x['gridid'] for x in jobs]\n\n #results = self.pool.map(outputWorker, input,\n # chunksize = self.chunksize)\n\n #return results\n completed, failed, aborted, canceled = self.getoutput(jobs)\n if len( aborted ) > 0:\n abortcompl, abortfail = self.postMortem( jobs = aborted )\n if len( canceled ) > 0:\n for jj in canceled:\n self.fakeReport(\"JobKilled\", \"Job has been canceled\", -1, jj)\n\n return", "title": "" }, { "docid": "c43f018754fee1200f33299ea6eecd65", "score": "0.5999994", "text": "def jobSchedulerIter(self, aCursor):\n logger.debug(\"balanced jobSchedulerIter: compiling list of active processors\")\n try:\n sql = \"\"\"select\n p.id,\n count(j.owner)\n from\n processors p left join jobs j on p.id = j.owner\n and p.lastSeenDateTime > now() - interval %s\n and j.success is null\n group by p.id\"\"\"\n try:\n aCursor.execute(sql, (self.config.processorCheckInTime,) )\n logger.debug(\"sql succeeded\")\n aCursor.connection.commit()\n except psycopg2.ProgrammingError:\n logger.debug(\"some other database transaction failed and didn't close properly. Roll it back and try to continue.\")\n try:\n aCursor.connection.rollback()\n aCursor.execute(sql)\n except:\n logger.debug(\"sql failed for the 2nd time - quit\")\n self.quit = True\n aCursor.connection.rollback()\n socorro.lib.util.reportExceptionAndAbort(logger)\n listOfProcessorIds = [[aRow[0], aRow[1]] for aRow in aCursor.fetchall()] #processorId, numberOfAssignedJobs\n logger.debug(\"listOfProcessorIds: %s\", str(listOfProcessorIds))\n if not listOfProcessorIds:\n raise Monitor.NoProcessorsRegisteredException(\"There are no processors registered\")\n while True:\n logger.debug(\"sort the list of (processorId, numberOfAssignedJobs) pairs\")\n listOfProcessorIds.sort(Monitor.compareSecondOfSequence)\n # the processor with the fewest jobs is about to be assigned a new job, so increment its count\n listOfProcessorIds[0][1] += 1\n logger.debug(\"yield the processorId which had the fewest jobs: %d\", listOfProcessorIds[0][0])\n yield listOfProcessorIds[0][0]\n except Monitor.NoProcessorsRegisteredException:\n self.quit = True\n socorro.lib.util.reportExceptionAndAbort(logger)", "title": "" }, { "docid": "66a0a7b5550c3ca3799738d89221fc87", "score": "0.5983334", "text": "async def manage_job_processing(self):\n pass", "title": "" }, { "docid": "6aca7debf02f67053d92346cc4681eba", "score": "0.5977915", "text": "def create_jobs(self):\n tasks = multiprocessing.Queue()\n returncodes = multiprocessing.Queue()\n commands = [[self.script, \"-d\", self.demodir, \"-l\", \"1\", \"2\"]] * 5\n for cnt, cmd in enumerate(commands):\n job_name = \"job_{0}\".format(cnt)\n tasks.put((job_name, cmd))\n tasks.put(FLAG_ALL_DONE)\n\n return tasks, returncodes", "title": "" }, { "docid": "10a1472e4ec60c061bbd6061a36e1dff", "score": "0.5970682", "text": "def run_loop(self):\n\n for message in self.messaging_queue:\n message, topic = self.process_message(message)\n subscribers = self.get_subscribers_for_topic(topic)\n self.push_message_to_subscribers(subscribers, message)", "title": "" }, { "docid": "fff59b6fd654203f22189c02d44caf2a", "score": "0.5962272", "text": "def instance_some_jobs(self):\n self.check_invariants()\n\n n = 0\n reasons = {}\n while True:\n if not self.ready_todo:\n reasons['jobs'] = 'no jobs ready'\n break\n\n if not self.can_accept_job(reasons):\n break\n\n # todo: add task priority\n job_id = self.next_job()\n assert job_id in self.ready_todo\n\n self.log('chosen next_job', job_id=job_id)\n\n self.start_job(job_id)\n n += 1\n\n # print('cur %d Instanced %d, %s' % (len(self.processing2result), n, reasons))\n\n self.check_invariants()\n return reasons", "title": "" }, { "docid": "a8e9bccae0dbf6937dc79c4280d06bbd", "score": "0.59603786", "text": "def get_all_active_jobs(self) -> List[Job]:\n pass", "title": "" }, { "docid": "ae5534284781576871ed254653f44c1f", "score": "0.59534585", "text": "def list_jobs(self):\n return self._host.get_jobs()", "title": "" }, { "docid": "1dde25dfcdf6875ca650776f10dd469f", "score": "0.59513956", "text": "def create_jobs():\n for x in JOB_NUMBER:\n queue.put(x)\n\n queue.join()", "title": "" }, { "docid": "95c29c9f99193a5ac6a91fbc21e35703", "score": "0.59480786", "text": "def _parallel_for(func):\n @wraps(func)\n def run_jobs(*args, **kwargs):\n timer = Timer()\n args_set = expand_parameters(args)\n # Divide args among cores\n num_servers = len(cores.server_dicts)\n chunk_size = int(len(args_set) / num_servers)\n bytes_sent = 0\n bytes_received = 0\n # Create a package of the function and args for each core\n for i, sd in enumerate(cores.server_dicts):\n timer.start('distribution')\n data_file = '_more_cores_{}.pkl'.format(i)\n with open(data_file, 'wb') as f:\n # print(chunk_size * (i + 1) - chunk_size * i)\n obj = (\n func, args_set[chunk_size * i:chunk_size * (i + 1)]\n )\n # print(dill.detect.baditems(func))\n # print(dill.detect.badobjects(func))\n # print(dill.detect.badtypes(func))\n dill.dump(obj, f, recurse=True)\n cores.connect(i)\n cores.send_file(data_file, 'data_{}.pkl'.format(i))\n bytes_sent += os.path.getsize(data_file)\n os.remove(data_file)\n # cmd = 'nohup bash run_job {0} {1} {2}'.format(\n cmd = 'nohup bash run_job {0} {1} {2} > nohup_{1}.out &'.format(\n cores.requirements['python'], i, sd['cores']\n )\n print('Sending', cmd)\n ssh_stdin, ssh_stdout, ssh_stderr = cores.send_command(cmd)\n for line in ssh_stdout.readlines():\n print(line)\n for line in ssh_stderr.readlines():\n print(line)\n cores.disconnect()\n timer.record()\n # Wait for all jobs to complete\n remaining = list(range(num_servers))\n return_data = [None] * len(args_set)\n timer.start('run_and_check')\n while len(remaining) > 0:\n for r in list(remaining):\n time.sleep(DELAY_TIME)\n print('checking', r)\n # Check for existence of file\n cores.connect(r)\n if cores.is_job_complete():\n timer.record()\n timer.start('collection')\n remaining.remove(r)\n # Get function return values\n results_file = 'results_{}.pkl'.format(r)\n cores.get_file(results_file, results_file)\n with open(results_file, 'rb') as f:\n for i, result in enumerate(pickle.load(f)):\n return_data[chunk_size * r + i] = result\n bytes_received += os.path.getsize(results_file)\n os.remove(results_file)\n timer.record()\n timer.start('run_and_check')\n cores.disconnect()\n timer.record()\n print(timer)\n print('bytes sent:', bytes_sent, 'bytes received:', bytes_received)\n return return_data\n\n return run_jobs", "title": "" }, { "docid": "bd4a64595866f5086597d88f0c86ce09", "score": "0.5942457", "text": "def get_all_jobs(self, after: datetime = datetime(1970, 1, 1)) -> List[Job]:\n raise NotImplementedError(\"This feature is not yet implemented\")", "title": "" }, { "docid": "d0ab1f25bcb7ab19c3c050068a0f017e", "score": "0.5928526", "text": "def _thread_run(self):\n while True:\n try:\n job = self._queue.get(timeout=self._queue_timeout)\n try:\n logging.debug('Running job.')\n result = job[0](*job[1], **job[2])\n if self._queue_out:\n self._queue_out.put(result, timeout=self._queue_timeout)\n finally:\n self._queue.task_done()\n except Empty:\n logging.debug('No jobs, quitting.')\n break", "title": "" }, { "docid": "c2f549fc665979a808700bbe8ad0b648", "score": "0.59179646", "text": "def list_jobs(self, deleted: bool = False) -> Iterable[SpallocJob]:", "title": "" }, { "docid": "564c887c4003e2e0f74b9a9206abc337", "score": "0.5912629", "text": "def run_problem_loop(self, jobs):\n all_sel = self.pool.Selection_all()\n # mark all solvable as multiversion\n # this allow to create a list of packages \n # that can satisfy many profiles \n self.jobs = all_sel.jobs(solv.Job.SOLVER_MULTIVERSION) + jobs\n\n changed = True\n while changed:\n solver = super().run_problem_loop(self.jobs)\n self.build_job_cache()\n trans = solver.transaction()\n solvables = []\n\n for cl in trans.classify(solv.Transaction.SOLVER_TRANSACTION_SHOW_OBSOLETES | \n solv.Transaction.SOLVER_TRANSACTION_OBSOLETE_IS_UPGRADE):\n if cl.type == solv.Transaction.SOLVER_TRANSACTION_INSTALL:\n pass\n elif cl.type == solv.Transaction.SOLVER_TRANSACTION_REINSTALLED:\n pass\n elif cl.type == solv.Transaction.SOLVER_TRANSACTION_DOWNGRADED:\n pass\n elif cl.type == solv.Transaction.SOLVER_TRANSACTION_CHANGED:\n pass\n elif cl.type == solv.Transaction.SOLVER_TRANSACTION_UPGRADED:\n pass\n elif cl.type == solv.Transaction.SOLVER_TRANSACTION_VENDORCHANGE:\n pass\n elif cl.type == solv.Transaction.SOLVER_TRANSACTION_ARCHCHANGE:\n pass\n else:\n continue\n \n solvables += cl.solvables()\n #import pdb; pdb.set_trace() \n changed = self.__align_multiversion_pacakges(solvables) \n return solver", "title": "" }, { "docid": "72c29168320a953c397856fb8990d7e1", "score": "0.5908026", "text": "def getJobs(self):\n\t\treturn self.root.findall(\"./job\")", "title": "" }, { "docid": "790f1246f11602f93da07df8af9c5bcc", "score": "0.59080124", "text": "def __iter__(self):\n for r in self.results:\n yield r", "title": "" }, { "docid": "a9b2013e14902ef12d33c304c60a38ef", "score": "0.59063184", "text": "def run(self):\n\n # Add workers\n workers = [asyncio.Task(self._work())\n for _ in range(self.max_tasks)]\n\n # Wait content of workers ends\n yield from self.q.join()\n\n for w in workers:\n w.cancel()", "title": "" }, { "docid": "d1478bb94066da9b2a86d80eaa3f1034", "score": "0.59042895", "text": "def run():\n while _job_registry:\n tick()", "title": "" }, { "docid": "8e9479af76e51e3010ceb541f6ef2623", "score": "0.58989966", "text": "def __refresh_jobs(self):\n\n start = datetime.now()\n found = set()\n with self.conn.cursor() as cursor:\n for row in cursor.execute(self.SELECT).fetchall():\n try:\n job = self.Job(self, row)\n found.add(job.id)\n job.register()\n except Exception:\n self.logger.exception(\"malformed job %s\", tuple(row))\n for id in set(self.jobs) - found:\n job = self.jobs[id]\n self.logger.info(\"Job %r dropped\", job.name)\n self.scheduler.remove_job(id)\n del self.jobs[id]\n elapsed = datetime.now() - start\n self.logger.debug(\"jobs refreshed in %s\", elapsed)", "title": "" }, { "docid": "804af9441f90883ee62887cf76a191cd", "score": "0.58934015", "text": "def jobs(self):\n return self.slurm_info.jobs", "title": "" }, { "docid": "d7c872c0f7335f16c0ad78ac1037815c", "score": "0.5891497", "text": "def run(self):\n # split by maximum number of submission so we do it in batches\n adaptive_l = []\n batch = [\n self.worker_list[i:i + self.queue_limit]\n for i in range(0, len(self.worker_list), self.queue_limit)\n ]\n total_batches = len(batch)\n try:\n for batch_num, worker_list in enumerate(batch, start=1):\n log.info(f\"> Running batch {batch_num}/{total_batches}\")\n start = time.time()\n for worker in worker_list:\n worker.run()\n\n # check if those finished\n completed = False\n while not completed:\n for worker in worker_list:\n worker.update_status()\n if worker.job_status != \"finished\":\n log.info(\n f\">> {worker.job_fname.name}\"\n f\" {worker.job_status}\"\n )\n\n completed_count = sum(\n w.job_status == \"finished\" for w in worker_list\n )\n\n failed_count = sum(\n w.job_status == \"failed\" for w in worker_list\n )\n\n if completed_count + failed_count == len(worker_list):\n completed = True\n end = time.time()\n elapsed = end - start\n adaptive_l.append(elapsed)\n else:\n if not adaptive_l:\n # This is the first run, use pre-defined waits\n if len(worker_list) < 10:\n sleep_timer = 10\n elif len(worker_list) < 50:\n sleep_timer = 30\n else:\n sleep_timer = 60\n else:\n # We already know how long it took, use the average\n sleep_timer = round(\n sum(adaptive_l) / len(adaptive_l)\n )\n log.info(f\">> Waiting... ({sleep_timer:.2f}s)\")\n time.sleep(sleep_timer)\n\n per = (float(batch_num) / float(total_batches)) * 100\n log.info(\n f\">> Batch {batch_num}/{total_batches} took \"\n f\"{elapsed:.2f}s to finish, {per:.2f}% complete\")\n\n except KeyboardInterrupt as err:\n self.terminate()\n raise err", "title": "" }, { "docid": "86a33c5bad1728a239774e4c052059e5", "score": "0.5888002", "text": "def get_jobs(self):\n pipeline_obj = Pipeline.objects.get(id=self.get_pipeline_id())\n inputs = self.create_input()\n name = \"DEMO JOB\"\n job = dict(app=pipeline_obj.id, inputs=inputs, name=name, tags={})\n serialized_run = RunCreator(**job)\n jobs = [serialized_run]\n return jobs", "title": "" }, { "docid": "52f9fe9b06e704b3582b743a183770c9", "score": "0.5884918", "text": "def fetch(self):\n\n logger.info(\"Looking for projects at url '%s'\", self.url)\n\n self._purge_cache_queue()\n nbuilds = 0 # number of builds processed\n njobs = 0 # number of jobs processed\n\n projects = json.loads(self.client.get_jobs())\n jobs = projects['jobs']\n\n for job in jobs:\n njobs += 1\n logger.debug(\"Adding builds from %s (%i/%i)\" % (job['url'], njobs, len(jobs)))\n raw_builds = self.client.get_builds(job['name'])\n self._push_cache_queue(raw_builds)\n self._flush_cache_queue()\n builds = json.loads(raw_builds)\n builds = builds['builds']\n for build in builds:\n yield build\n nbuilds += 1\n\n logger.info(\"Total number of jobs: %i\" % (njobs))\n logger.info(\"Total number of builds: %i\" % (nbuilds))", "title": "" }, { "docid": "513ef3842297e5bbcfa21e2adff9d688", "score": "0.5884058", "text": "def list_jobs():\n return _job_registry.items()", "title": "" }, { "docid": "956d8e2f6f6c53f7d95cfea9c99bf345", "score": "0.58769035", "text": "def scrape_jobs(self, response):\n raise NotImplementedError", "title": "" }, { "docid": "5331825aba1e29c308f84fa62acea795", "score": "0.5870788", "text": "def loopIterator(self):\n for loop in self.loops:\n yield loop", "title": "" }, { "docid": "99f9268ee47f0b52abec2710aa6ce721", "score": "0.5859279", "text": "def iterate_loop(self):\n # Do whatever you need to do, but try to make sure it doesn't take too long\n\n # This line is very important, it keeps the auto reload working\n super().iterate_loop()", "title": "" }, { "docid": "64cc2d8f02f28381bdadad4dee31945a", "score": "0.5855088", "text": "def loadJobs(self):\n self.runningJobs = []\n self.pendingJobs = []\n self.finishedJobs = []\n self.sge.reloadJobs()\n self.runningJobs = self.sge.getRunningJobs()\n self.pendingJobs = self.sge.getPendingJobs()\n self.finishedJobs = self.sge.getFinishedJobs()", "title": "" }, { "docid": "471549ed267be69eeb1daa055d16466f", "score": "0.5849716", "text": "def schedule_jobs(self):\n self.add_jobs_ir_remote()", "title": "" }, { "docid": "5d183e796cdaa4d643eaa7ed925dae56", "score": "0.58360684", "text": "def loop_through_runs(self):\n\n # Track processed runfolders to use later for naming logfiles.\n processed_runfolders = []\n\n # Process any runfolders added to class instance with self.set_runfolders()\n for folder in self.runfolders:\n runfolder_instance = RunfolderProcessor(\n folder, self.now, debug_mode=config.testing\n )\n # Append processed runfolders to tracking list\n if runfolder_instance.quarterback():\n processed_runfolders.append(folder)\n # close down the run folder specific logger handles\n runfolder_instance.loggers.shutdown_logs()\n\n # Add names of any processed runfolders to logfile\n if processed_runfolders:\n original_logfile_path = (\n config.upload_and_setoff_workflow_logfile\n + self.now\n + \"_upload_and_setoff_workflow.log\"\n )\n new_logfile = original_logfile_path.replace(\n self.now, self.now + \"_\" + \"_\".join(processed_runfolders)\n )\n os.rename(original_logfile_path, new_logfile)", "title": "" }, { "docid": "11567510410d4641e4e9a12ce929b240", "score": "0.5834685", "text": "def submit(self, jobs, deps=None):\n if deps is None:\n deps = itertools.repeat([])\n futures = []\n for job, jdeps in zip(jobs, deps):\n job_args = (job.id, job.get_submission_arguments(jdeps))\n futures.append(\n self._submitters.apply_async(\n _submit_job, job_args,\n callback=self._wrap_callback(job)))\n\n results = []\n for future in futures:\n tt, jid, output = future.get()\n t_ep = tt.timestamp()\n logger.info(\"[Proc Time: {} (ep: {})] ---> Submitted job: {}\".format(tt, t_ep, jid))\n results.append((tt, jid, output))\n\n return results", "title": "" }, { "docid": "ad8f88c9dab74e789b859abf6f40cbf4", "score": "0.5828206", "text": "def iterate(self, handle):\n ...", "title": "" }, { "docid": "1121143be06d7bb31a3589f530343ec1", "score": "0.58059657", "text": "def __iter__(self):\n for item in self.schedule:\n yield item", "title": "" }, { "docid": "62a0e7d6712ecd43b1ed9b7eddaffdee", "score": "0.58037925", "text": "def _download_results_from_googlecloud(self):\n threads = []\n logger.info(\"Start downloading files\")\n for t in self.job.tasks:\n thread = threading.Thread(target=self._download_bucket, args=(t, ))\n threads.append(thread)\n thread.start()\n\n self._wait_for_threads_finished(threads, \"Downloading files\")", "title": "" }, { "docid": "bfefc6323cf5ce85f5abecdbbf91cb68", "score": "0.58004576", "text": "def get_jobs(self, **params):\n\n endpoint = \"Jobs.json\"\n return self.request(endpoint, params=params)", "title": "" } ]
d080f5aec8528d6068e0df21d0371990
Get the shaker's target speed in RPM, if set.
[ { "docid": "d925a35697aceb752043a937dbef162f", "score": "0.73180777", "text": "def get_target_speed(self) -> Optional[int]:\n return self._sync_module_hardware.target_speed # type: ignore[no-any-return]", "title": "" } ]
[ { "docid": "70e56cf43a1248fb1d17dec37a3a5e8b", "score": "0.68449944", "text": "def get_speed(self):\n return self.speed", "title": "" }, { "docid": "1ea5984678f409270b4204aef0be6c84", "score": "0.67140687", "text": "def get_speed(self):\r\n return self._speed", "title": "" }, { "docid": "dff8a1b8e88a262a9430430054118fc9", "score": "0.66871446", "text": "def get_speed(self):\n return self._speed", "title": "" }, { "docid": "c97455f5001830b121871cdcd66a524a", "score": "0.6665086", "text": "def rock_speed(self):\n return self.__speed", "title": "" }, { "docid": "f1ff496877924e0422cda3db14b470b0", "score": "0.66385245", "text": "def get_motor_speed(self):\n\t\treturn self._motor_speed", "title": "" }, { "docid": "b4bbd95827ddccb1461aeb068428e753", "score": "0.6633365", "text": "def _get_speed(self):\n return self.client.read_holding_registers(0x0c, 0x01, unit=0x01).registers[0]/10.", "title": "" }, { "docid": "2e9b16868986467324c130e74ca590db", "score": "0.66088176", "text": "def getSpeed(self):\n\t\treturn self._speed", "title": "" }, { "docid": "e572d7c8bc612ee517c3a48b217d2129", "score": "0.6577624", "text": "def getSpeed(self):\n return _osg.Sequence_getSpeed(self)", "title": "" }, { "docid": "caa1c6b150bd913d467eb3c2ca4ddd45", "score": "0.6508636", "text": "def speed(self):\n return self.named.data.sensordata['torso_subtreelinvel'][0]", "title": "" }, { "docid": "2c8e32c192490691e4e9311d89d2e4ff", "score": "0.650493", "text": "def speed(self) -> float:\n if self._speed is None:\n if INCLUDE_MAX_SPEED:\n self._speed = choice(self._latest_speeds + [self._max_speed])\n elif self._latest_speeds:\n self._speed = choice(self._latest_speeds)\n else:\n self._speed = self._max_speed\n\n return self._speed", "title": "" }, { "docid": "74f23db7403decf28e2b0ec8a3d79658", "score": "0.6489949", "text": "def speed(self):\n return self.speed", "title": "" }, { "docid": "b2373ec0267b5bcdca212e96921433c3", "score": "0.6441364", "text": "def get_speed(self):\n try:\n raw_speed = self.connection.query(obd.commands.SPEED)\n # print(str(raw_speed.value).split(\" \")[0])\n raw_speed_2 = int(str(raw_speed.value).split(\" \")[0])\n # print(raw_speed_2)\n if raw_speed_2:\n return raw_speed_2 # non-blocking, returns immediately\n except:\n #print(\"Speed not found\")\n return self.fake_it()", "title": "" }, { "docid": "72a70434cc9c508b16693373127ec30d", "score": "0.6439321", "text": "def getRobotSpeed(self):\n return self.speed", "title": "" }, { "docid": "2683b35a9a19728c83c72278cb2c5b2a", "score": "0.64212954", "text": "def Get(self):\n return self.speed", "title": "" }, { "docid": "13ae9efe54e2a6b8c41a35023328dba9", "score": "0.6401448", "text": "def get_speed(self):\r\n p = self._get_sub_text('speed')\r\n if not p:\r\n return None\r\n else:\r\n try:\r\n return float(p)\r\n except ValueError:\r\n return None", "title": "" }, { "docid": "e9fd8af89b9dbc1d28900ee19844846f", "score": "0.63518363", "text": "def speed(self):\n return self.max_speed / e ** ((self.fuel + self.used_capacity) / self.drone_mass)", "title": "" }, { "docid": "6a28d291621174e7370c8c8913000feb", "score": "0.6291786", "text": "def motor_speed(self) -> int:\n return self.data[\"motor_speed\"]", "title": "" }, { "docid": "b78cb3f869ebdf3e701b0355dab0683c", "score": "0.6267551", "text": "def speed(self):\n return self._ioctl(SPI._IOC_RD_MAX_SPEED_HZ)", "title": "" }, { "docid": "30e10d6c8ea60649701558330e82d81b", "score": "0.6255869", "text": "def speed(self) -> float:\n return self.__speed", "title": "" }, { "docid": "ed75daa6805d865c5c486b868fe37129", "score": "0.6194389", "text": "def _get_right_syringe_speed(self) -> Optional[int]:\n if not self.cycle_active_in.get():\n return self.microlab.right.syringe_default_speed.get()\n return self.right_cycle_speed", "title": "" }, { "docid": "f68e37be7e032e3c250cde80316be960", "score": "0.61080104", "text": "def getSpeed(self):\n lws=self.leftWheel.getSpeed()\n rws=self.rightWheel.getSpeed()\n\n return (rws+lws)/2", "title": "" }, { "docid": "4700c987f7c3131b34aed2f6c673de70", "score": "0.6094145", "text": "def get_current_speed(self) -> int:\n return self._sync_module_hardware.speed # type: ignore[no-any-return]", "title": "" }, { "docid": "70ef0dc8a6633dac6ee36df547b91335", "score": "0.60831714", "text": "def speed(self):\n return 0.8", "title": "" }, { "docid": "b45da5ffd6ad5909f1e61a100773562d", "score": "0.6035572", "text": "def getSpeed(self):\n value = self.getRaw()\n if value > self.getMaxPositivePwm():\n return 1.0\n elif value < self.getMinNegativePwm():\n return -1.0\n elif value > self.getMinPositivePwm():\n return float(value - self.getMinPositivePwm()) / self.getPositiveScaleFactor()\n elif value < self.getMaxNegativePwm():\n return float(value - self.getMaxNegativePwm()) / self.getNegativeScaleFactor()\n else:\n return 0.0", "title": "" }, { "docid": "97732962a878abf47fe9bd8a40a40cd0", "score": "0.5982989", "text": "def speed(self):\n\n return -((exp(-self.r_foreign * self.expiry - (self.d1Squared)/2)/(self.underlying * self.volatility * sqrt(self.TWOPI*self.expiry)))/self.underlying) * (1 + self.d1/(self.volatility*sqrt(self.expiry)))", "title": "" }, { "docid": "5f4d724996fff41d2cada4142024c8fb", "score": "0.59778506", "text": "def download_speed(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"download_speed\")", "title": "" }, { "docid": "7a0d065026bf8d88ce5599a5b7e1f735", "score": "0.59714603", "text": "def getSpeed(self):\n\n return self.paddle_speed", "title": "" }, { "docid": "f53fda0d515e49f4ad6d5c5eea9bec46", "score": "0.58883935", "text": "def getSpeed(self):\n return animator.getSpeed()", "title": "" }, { "docid": "46b798c3ee317847765180a19bc2e86b", "score": "0.58870876", "text": "def getSpeedControl(self):\n return self.speedControl", "title": "" }, { "docid": "5f4704fe18baa007b64179308eedc83a", "score": "0.5874346", "text": "def get_car_max_speed(self):\n return self.max_speed", "title": "" }, { "docid": "47cbef6317715b48de3c321095265af8", "score": "0.58709544", "text": "def speed(self):\n\n return self.get_moving_average(0.5)", "title": "" }, { "docid": "8b9575f6e66500d0716f1c22ffd9ee46", "score": "0.5863905", "text": "def speed(self, oldOBD):\n self.serialTX(\"0D\")\n speedValues = self.serialRX()\n if speedValues == -1:\n speed_float = oldOBD[0]\n else:\n speed_hex = speedValues[0]\n speed_float = float(int(\"0x\"+speed_hex, 0))\n \n if config.units == 'metric':\n return speed_float\n else:\n speed_float = speed_float*0.621371\n \n return speed_float", "title": "" }, { "docid": "57806961fa7e3aff5e3f5b2a5e3e8bac", "score": "0.584745", "text": "def fan_speed(self) -> str | None:\n return self.device.fan_speed", "title": "" }, { "docid": "35050b4c2f2262b5d40469db8c85d7c2", "score": "0.5841419", "text": "def get_speed_y(self):\r\n return self.__speed_y", "title": "" }, { "docid": "751b6a351830947a38acc9ddd3e21915", "score": "0.58239925", "text": "def get_right_syringe_speed(self) -> Tuple[Optional[int], int]:\n return self._return_invalid_alarm_when_parameter_is_none(\n self.microlab.right.syringe_default_speed.get()\n )", "title": "" }, { "docid": "edd6abcfa14f992609dcb6a93e4c3e70", "score": "0.5812608", "text": "def get_wind_speed(self):\n\n return self.weather_dict[\"wind_speed\"]", "title": "" }, { "docid": "ec62789730ab455971d3e3e782dfa53c", "score": "0.5789829", "text": "def _find_slowest_speed(self, transport):\n return self._find_slowest_transport_name_and_its_speed(transport)[1]", "title": "" }, { "docid": "8de69d99cd202f565fbec0bb19a69a02", "score": "0.5788361", "text": "def movement_speed(self):\n return data.movement_speed[self.tid]", "title": "" }, { "docid": "ce5ceb005364ebb3c2585e49b31bec7a", "score": "0.57688224", "text": "def demand(self):\n\n if self._goal is None:\n self.choose_goal()\n\n # if nearly at goal point, choose the next one\n d = np.linalg.norm(self._veh._x[0:2] - self._goal)\n if d < self._dthresh:\n self.choose_goal()\n # elif d > 2 * self._d_prev:\n # self.choose_goal()\n # self._d_prev = d\n\n speed = self._speed\n\n goal_heading = math.atan2(self._goal[1]-self._veh._x[1], self._goal[0]-self._veh._x[0])\n d_heading = base.angdiff(goal_heading, self._veh._x[2])\n steer = d_heading\n\n print(' ', speed, steer)\n return speed, steer", "title": "" }, { "docid": "55aeb8062418f0e871c9aee33ce5dd3e", "score": "0.5730905", "text": "def get_current_speed(cls) -> int:\n # if is_boosted is False, boosted_text_speed will be multiplied by zero\n return max(cls.default_text_speed, cls.boosted_text_speed * cls.is_boosted)", "title": "" }, { "docid": "bc3f8c8ef34e40366dde2694bc2dd796", "score": "0.5725759", "text": "def getMaxSpeed(self):\n return self.maxSpeed / 5", "title": "" }, { "docid": "ab82e6d7c42f900f5d04cd37e87620e9", "score": "0.5712096", "text": "def _find_fastest_speed(self, transport):\n return self._find_fastest_transport_name_and_its_speed(transport)[1]", "title": "" }, { "docid": "42401c59047a15092d37bf7959b08fd5", "score": "0.57000655", "text": "def rpm(self):\n return self._pulse_counter.rpm", "title": "" }, { "docid": "98fdf50d0dd4d96413304cfad7fe1b94", "score": "0.5686609", "text": "def adjustedSpeed(self):\r\n\t\treturn self.speed() * self.game().timeCompression()", "title": "" }, { "docid": "ef4463a6fae18c3922674c0af65a590e", "score": "0.56810874", "text": "def get_speed(self):\r\n return self.__x_speed, self.__y_speed", "title": "" }, { "docid": "4f3fd8f8d47e9c67223214119b16344c", "score": "0.5680847", "text": "def _get_target_output_power(self):\n return self.__target_output_power", "title": "" }, { "docid": "577a6517d2e8ede149af9e82db0d13e8", "score": "0.566765", "text": "def wind_speed(self):\n return None", "title": "" }, { "docid": "50d30506eb099adf30ab7cbeb12b62de", "score": "0.56639016", "text": "def getMaxSpeed(self):\n return self.maxSpeed", "title": "" }, { "docid": "50d30506eb099adf30ab7cbeb12b62de", "score": "0.56639016", "text": "def getMaxSpeed(self):\n return self.maxSpeed", "title": "" }, { "docid": "2d51afb5f6504f622e628efcad7738d5", "score": "0.5655632", "text": "def wavelength_speed(self):\n return self.query('SN')", "title": "" }, { "docid": "ac9c7757a7e4454d368d1ef5870fa7e6", "score": "0.56518924", "text": "def rpm2speed(self, msg):\n\n print(f\"Received rpm: {msg.data}\")\n\n # Convert from rpm to ground speed\n input_rpm = msg.data\n\n # The wheel radius parameter can be accessed using `get_parameter`, and\n # the value contained within can be accessed using `get_parameter_value`\n\n wheel_radius_param = self.get_parameter(\n \"wheel_radius\").get_parameter_value().double_value\n\n output_speed = 2 * pi * wheel_radius_param \\\n * input_rpm / 60 # Speed in m/s\n\n print(f\"Calculated robot speed: {output_speed}\")\n\n # Publish on the \"/speed\" topic\n msg = Float32()\n msg.data = float(output_speed)\n self.pub.publish(msg)", "title": "" }, { "docid": "bd0853e629e0cb6c02580e4f6d31577c", "score": "0.5638145", "text": "def wind_speed(self):\n try:\n return float(self.api.result.get(WIND_SPEED[0])) * 3.6\n except Exception:\n return", "title": "" }, { "docid": "8a9114e1aea670d525ed2c31d3b714c7", "score": "0.56343895", "text": "def get_percentage_by_target_speed(veh, target_speed):\n # target speed in m/s\n target_speed = target_speed / 3.6\n speed_limit = veh.get_speed_limit() # in m/s\n per = (speed_limit - target_speed) / speed_limit\n\n return per", "title": "" }, { "docid": "2b0aa3fccf3eec7074ec57ebf0ea7217", "score": "0.56234026", "text": "def _get_left_syringe_speed(self) -> Optional[int]:\n if not self.cycle_active_in.get():\n return self.microlab.left.syringe_default_speed.get()\n return self.left_cycle_speed", "title": "" }, { "docid": "236bc3af5e23850a6d8c5a81d38c003d", "score": "0.5618902", "text": "def wspd(self):\n res = 0\n for cmpnt in self.wind_cmpnt:\n res += cmpnt**2\n res = res**0.5\n res.rename('wind_speed')\n return res", "title": "" }, { "docid": "fc3c358044d31af8bc59e456032e9593", "score": "0.5617921", "text": "def fan_speed(self):\n return self._fan_speed", "title": "" }, { "docid": "2a2d7c53c1b092ba93396e38928f2420", "score": "0.5591178", "text": "def fan_speed(self):\n return self._fan_speed.value", "title": "" }, { "docid": "aa28f2d28e1ce596cd7eba36174ea129", "score": "0.5589757", "text": "def get_movement_speed(self):\n movement_speed = RegularTrainer.MOVEMENT_TYPE_DICT.get(\n self.movement_type)\n return movement_speed", "title": "" }, { "docid": "a58687bd5afa51137a15df48de0bda38", "score": "0.5576827", "text": "def calc_tare_torque(rpm):\n return 0.000474675989476*rpm + 0.876750155952", "title": "" }, { "docid": "0397cb725c9f1921a21a44f6dc808e70", "score": "0.55748683", "text": "def get_right_valve_speed(self) -> Tuple[Optional[int], int]:\n return self._return_invalid_alarm_when_parameter_is_none(\n self.microlab.right.valve_speed.get()\n )", "title": "" }, { "docid": "8989698c3735035a0a13d6d57a8dc410", "score": "0.5554888", "text": "def kill_speed(self):\n return self._kill_speed", "title": "" }, { "docid": "d31795e7355fd0621fea6fd4f183f319", "score": "0.55515397", "text": "def GetLateralSpeed(*args):\r\n return _pynewton.Tire_GetLateralSpeed(*args)", "title": "" }, { "docid": "9d64074e59e7765c7f1adbe155cdb497", "score": "0.5541563", "text": "def percentage(self) -> Optional[int]:\n if self._device.speed is None:\n return None\n return ranged_value_to_percentage(SPEED_RANGE, int(self._device.speed))", "title": "" }, { "docid": "e92b2989ed71cfbc5feef5b4a95368f6", "score": "0.5538577", "text": "def speed_count(self) -> int:\n if self.device.category == \"kj\":\n return self.air_purifier_speed_range_len\n return super().speed_count", "title": "" }, { "docid": "903210fb52b6395427fe5f108d06dd4e", "score": "0.5525445", "text": "def speed(self):\n if self._state:\n from miio.airhumidifier import OperationMode\n\n return OperationMode(self._state_attrs[ATTR_MODE]).name\n\n return None", "title": "" }, { "docid": "af389db509ff91cbce2bd8add3b9d097", "score": "0.55223733", "text": "def rpm(self):\n return self._rpm", "title": "" }, { "docid": "e8c466c13d0d4b5d099acb4dd0f2f386", "score": "0.5502597", "text": "def Speed(self) -> int:", "title": "" }, { "docid": "fd5fb642e470f87ad09c60b73bc39ce4", "score": "0.5500486", "text": "def max_speed(self) -> float:\n return self._max_speed", "title": "" }, { "docid": "b8c7d57eb03df9180b9404712752b5bb", "score": "0.5491617", "text": "def target_temperature_step(self):\n return 1.0", "title": "" }, { "docid": "19793c179ebcadded3ab8a62bba43656", "score": "0.54884815", "text": "def _getSoundSpeed(self, alt):\n # evaluate the atmosphere model\n P, T = self.atm(alt)\n a = np.sqrt(self.gamma * self.R * T)\n\n return copy.copy(a)", "title": "" }, { "docid": "45556e19c02b7f0293fe397eb056dfc1", "score": "0.54731894", "text": "def get_speed(self) -> str:\n try:\n start = self.session_database.entries[0].time\n return f\"{int(self.current_location/(time() - start)*60)} cpm\"\n except IndexError:\n return \"0 cpm\"", "title": "" }, { "docid": "8085e18ea4193d1a8081c85969b5dd4b", "score": "0.5467908", "text": "def get_speed(vehicle):\n vel = vehicle.get_velocity()\n\n return 3.6 * math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2)", "title": "" }, { "docid": "dae9fe34a4c2929bb721c4106328a78d", "score": "0.5465564", "text": "def get_speed(agent, location):\n car = agent.info.my_car\n local = dot(location - car.location, car.rotation)\n angle = cap(math.atan2(local[1], local[0]), -3, 3)\n distance = distance_2d(car.location, location)\n if distance > 2.5 * velocity_2d(car.velocity):\n return 2250\n return 2250 - (400 * (angle ** 2))", "title": "" }, { "docid": "7a507ec97bf66cf3f33949ee29eb35ff", "score": "0.5464658", "text": "def wind_speed(self):\n return self._currently.wind_speed", "title": "" }, { "docid": "31a259d21f2bec0e897dce4e40bef166", "score": "0.5457667", "text": "def findMaxSpeed(self):\n v = self.vconfig\n powerAvailable = self.powerAvailable(0)\n speeds = v['Power Curve']['Speeds']\n powers = v['Power Curve']['PowersSL']\n maxSpeed = 0.\n maxSpeedPower = 0.\n for i in range(len(speeds)-1):\n if powers[i]<powerAvailable and speeds[i]>maxSpeed:\n maxSpeed = speeds[i]\n maxSpeedPower = powers[i]\n v['Performance']['MaxSpeed'] = maxSpeed\n v['Performance']['PowerAtMaxSpeed'] = maxSpeedPower\n v['Performance']['SFCatMaxSpeed'] = self.SFC(maxSpeedPower)", "title": "" }, { "docid": "2b09fa89c55595e96faf5a019521bf99", "score": "0.54561263", "text": "def get_speed_x(self):\r\n return self.__speed_x", "title": "" }, { "docid": "29d9bed1917b6813381e57c94b489b9e", "score": "0.54514605", "text": "def _get_speed(event):\n if event.get('code', \"\").lower() == \"progress\" and isinstance(event['details'], dict):\n return event['details'].get(\"mbps\")", "title": "" }, { "docid": "e6e8fbc708b34bd2481bf45992da7878", "score": "0.54469633", "text": "def target_temperature_step(self) -> float:\n return float(self._config[CONF_TEMP_STEP])", "title": "" }, { "docid": "458e1c10a174dfe0399b600471a1852c", "score": "0.5441924", "text": "def fluo_laser(self):\n return self._fluo_laser_power", "title": "" }, { "docid": "71af65d42a6adbb76703eab731547b66", "score": "0.5441913", "text": "def get_speed(self, servo_id, model_name):\n # Register Address and Length variables\n register_present_speed = self.dynotools.getRegisterAddressByModel(model_name, \"present_velocity\")\n register_present_speed_length = self.dynotools.getAddressSizeByModel(model_name, \"present_velocity\")\n\n # Read using present position\n raw_response = self.read(servo_id, register_present_speed, register_present_speed_length)\n response = raw_response[0]\n\n # TODO: Either implement or remove error handling\n # if response:\n # self.exception_on_error(response[4], servo_id, 'fetching present speed')\n\n if register_present_speed_length == 2:\n speed = response[0] + (response[1] << 8)\n\n elif register_present_speed_length == 4: \n if response[3] >= 1:\n present_speed_ref = [255, 255, 255, 255]\n response[0] = present_speed_ref[0] - response[0]\n response[1] = present_speed_ref[1] - response[1]\n response[2] = present_speed_ref[2] - response[2]\n response[3] = present_speed_ref[3] - response[3]\n speed = (response[0] + 1) + (response[1] << 8) + (response[2] << 16) + (response[3] << 32)\n speed *= -1\n \n else:\n speed = response[0] + (response[1] << 8) + (response[2] << 16) + (response[3] << 32)\n\n return speed", "title": "" }, { "docid": "29c815eb375d0088cd5c3c39c15b85e8", "score": "0.5438912", "text": "def trip_speed(self, units='kph'):\n count = 0\n speed_total = 0\n if self.conn.query(commands.ENGINE_LOAD).is_null():\n return 'could not retrieve non-null response from vehicle'\n while self.conn.query(commands.ENGINE_LOAD) <= 0:\n continue\n else:\n while self.conn.query(commands.ENGINE_LOAD) > 0: \n speed_total += query(commands.SPEED)\n count += 1\n\n avg_speed = speed_total / count\n \n if units == 'mph':\n avg_speed = avg_speed * 0.621\n \n return avg_speed", "title": "" }, { "docid": "76d04bea99a1aa4dfa06526c67b57e52", "score": "0.54350585", "text": "def target_temperature_step(self):\n return 0.5", "title": "" }, { "docid": "d91c36bd26e53d1262e63c256d7bc6a3", "score": "0.54325813", "text": "def get_speed(vehicle):\n # TODO consider the jetson case\n vel = vehicle.velocity\n return 3.6 * math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2)", "title": "" }, { "docid": "2fbc883de1e3bfc50f92db55ce6c5ea5", "score": "0.542486", "text": "def getSpeedLR(self):\r\n diff = time() - self.last_time\r\n self.last_time = time()\r\n speedL = 1/diff * self.CountL\r\n speedR = 1/diff * self.CountR\r\n return(speedL, speedR)", "title": "" }, { "docid": "031fbd6c51bfd7a6707a736888191665", "score": "0.5422008", "text": "def getShuntResistance(self, value):\n self.ShuntResistance_Ohm = value", "title": "" }, { "docid": "b547b18bbe63a8c76779304f12bf5edd", "score": "0.5416916", "text": "def get_speed_status(self) -> SpeedStatus:\n return self._sync_module_hardware.speed_status # type: ignore[no-any-return]", "title": "" }, { "docid": "0618f75431e8ee38e34b6c9012658cba", "score": "0.54106146", "text": "def speed(self):\r\n if self._speed is None:\r\n diff_xy = np.diff(self.xy,axis=1)\r\n if len(diff_xy):\r\n self._speed = np.hypot(diff_xy[0,:],diff_xy[1,:])\r\n self._speed = np.append(self._speed,[0])\r\n self._speed = self._speed * self.posSampRate\r\n else:\r\n self._speed = np.array([])\r\n return self._speed", "title": "" }, { "docid": "002f035b7f282025d873b1964e887c40", "score": "0.53934866", "text": "def get_speedLR(self):\r\n diff = time() - self.last_time\r\n self.last_time = time()\r\n speedL = 1/diff * self.CountL\r\n speedR = 1/diff * self.CountR\r\n return(int(speedL), int(speedR))", "title": "" }, { "docid": "cc805701391b0e03542fe330b7db42f2", "score": "0.5390384", "text": "def setSpeed(self, *args):\n return _osg.Sequence_setSpeed(self, *args)", "title": "" }, { "docid": "a52fa3d04c7c78402a45082f48d1d1a5", "score": "0.53891885", "text": "def find_fastest_s(self):\n return self._find_fastest_speed(self._STAR_SHIPS)", "title": "" }, { "docid": "e3038fddbfdadbcbb9007bf494e39882", "score": "0.5384073", "text": "def speed(self):\n return pow(self.vx * self.vx + self.vy * self.vy, 0.5)", "title": "" }, { "docid": "04fa4ab49772cc3e87578230c49cff8a", "score": "0.53807616", "text": "def find_slowest_s(self):\n return self._find_slowest_speed(self._STAR_SHIPS)", "title": "" }, { "docid": "a0a052f8be74d42186cf732dd34d556b", "score": "0.53783184", "text": "def target_temperature(self) -> float | None:\n return self._ac[\"setTemp\"]", "title": "" }, { "docid": "4eb20f029e390055e29b25d845cfd7dd", "score": "0.5377201", "text": "def setSpeed(self,transport):\r\n self.transport=transport\r\n if transport==\"walk\":\r\n self.userSpeed=5\r\n if transport==\"bicycle\":\r\n self.userSpeed=16\r\n if transport==\"car\":\r\n self.userSpeed=80", "title": "" }, { "docid": "4e53129e6fde2b16a7432163b65c8557", "score": "0.5372317", "text": "def change_speed(self, ds):\n self.rpm += ds", "title": "" }, { "docid": "bbbb54a00b31e763d07016a29876f45d", "score": "0.53598326", "text": "def speed_mp(M,T):\n return (2*R*T/M)**0.5", "title": "" }, { "docid": "1e51ffbce3dbd2702303ae843cb7c526", "score": "0.5357622", "text": "def target_temperature(self) -> float:\n return self.device.level", "title": "" }, { "docid": "0774797d42e4182ef77f1ae1d2d15f0e", "score": "0.5356607", "text": "def set_max_speed(self):\n self.jlink.JLINKARM_SetMaxSpeed()", "title": "" }, { "docid": "7d3c6cad6bf877a40841499bc90ac407", "score": "0.5356445", "text": "def upload_speed(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"upload_speed\")", "title": "" }, { "docid": "21e352cb5d91fa399e47718768f0e271", "score": "0.53559613", "text": "def _set_rpm(self, rpm):\n self._rpm = rpm\n # T is the amount of time to stop between signals\n self._T = (60.0 / rpm) / self.steps_per_rev", "title": "" } ]
b28f5773fdb4318bcc9e943c8a126dd8
Returns dictonary that maps each word to its letter code words list of words return dictonary
[ { "docid": "8d885fb8271e024d34facbf930cbf6d0", "score": "0.8214427", "text": "def map_of_codes(words):\n\tmap_of_codes = dict()\n\tcodes = []\n\tfor word in words:\n\t\tmap_of_codes[word] = letter_code(word)\n\treturn map_of_codes", "title": "" } ]
[ { "docid": "5f97c1f59df0a248e91209194694929d", "score": "0.7414668", "text": "def make_word_dict(wordlist):\n alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n word_dict = dict()\n for word in wordlist:\n word_dict.setdefault(word[0].upper(), []).append(word.upper())\n return word_dict", "title": "" }, { "docid": "9a4f3a29405b4a1f59bbdae3cf4bb749", "score": "0.7354137", "text": "def sort_letters(self):\n dictionary = dict()\n for word in self.wordlist:\n key = list(word)\n key.sort()\n key = \"\".join(key)\n if key not in dictionary:\n dictionary[key] = [word]\n else:\n dictionary[key].append(word)\n return dictionary", "title": "" }, { "docid": "87346771649896ece879f3c801a44438", "score": "0.72714823", "text": "def build_word_list(word):\n word_list = []\n for letter in word:\n letter_dict = {\"letter\": letter, \"guessed\": False}\n word_list.append(letter_dict)\n return word_list", "title": "" }, { "docid": "666fea9a718e6b6360baa4549890dfe5", "score": "0.71301335", "text": "def a_words():\n words = load_words()\n return {\n word\n for word in words\n if word.startswith(\"a\")\n and len(word) > 1\n and word[1] in string.ascii_letters[:13]\n }", "title": "" }, { "docid": "afd2848ee7d04f9e28c49eed39cdbaca", "score": "0.71150595", "text": "def words(key_codes):\n letter_codes = (code if 31 < code < 127 else 32 for code in key_codes)\n return filter(str.isalpha, groups(letter_codes))", "title": "" }, { "docid": "1e34c1070b896f38c8d7528c4c7e0567", "score": "0.6952814", "text": "def encode(self, words: List[str]) -> Dict[str, str]:\n try:\n return {\n word: \"\".join(\n self.codebook[letter] for letter in word\n ) for word in words\n }\n except KeyError:\n raise UnableToEncodeStringException()", "title": "" }, { "docid": "4f8575ef3b96b2bbb8cca56803eea7a5", "score": "0.6748794", "text": "def create_word_dict(words_list):\n words_df = pd.DataFrame(words_list, columns=['letter'])\n words_count = words_df['letter'].value_counts()\n words_dict = words_count.to_dict()\n return words_dict", "title": "" }, { "docid": "101e9058d1bc557b23f4c46c0930f210", "score": "0.6716204", "text": "def build_dict(self):\n dic = {}\n f = open(\"/usr/share/dict/words\", \"r\")\n word_list = f.readlines()\n for word in word_list:\n word = word.strip().lower()\n words = ''.join(sorted(word))\n dic[words] = word\n return dic", "title": "" }, { "docid": "7c92da69b48e4ffe3c503475d6548bed", "score": "0.66234314", "text": "def make_anagrams_map(map_of_codes):\n\t#creates a dictonary that maps single letter code to all possible anagrams\n\tmap_of_anagrams = dict()\n\tfor word, code in map_of_codes.iteritems():\n\t\tmap_of_anagrams.setdefault(code, []).append(word)\n\treturn map_of_anagrams", "title": "" }, { "docid": "b441b4a778a52e118cf5e11cc25de653", "score": "0.6592105", "text": "def make_word_dict():\n d = dict()\n fin = open('../ch9/words.txt')\n for line in fin:\n word = line.strip().lower()\n d[word] = word\n return d", "title": "" }, { "docid": "7b5263b51d11814949c127c0ae557973", "score": "0.65387064", "text": "def get_words_list(letters: List[str],\n words_from_dict: List[str]) -> List[str]:\n output_words = []\n for word_from_dict in words_from_dict:\n if (len(word_from_dict) >= 4) and (letters[4] in word_from_dict):\n word_from_dict_list = list(word_from_dict)\n for letter in letters:\n if letter in word_from_dict_list:\n word_from_dict_list.remove(letter)\n if not word_from_dict_list:\n output_words.append(word_from_dict)\n\n return output_words", "title": "" }, { "docid": "75cc0dad2dcbb2175d48738cd8762508", "score": "0.6527951", "text": "def word_dict():\n\n d = {}\n with open('words.txt') as fin:\n for line in fin:\n d[line.strip().lower()] = []\n return d", "title": "" }, { "docid": "31d982db4a8935c5db1d6d514ac4fa0d", "score": "0.6524358", "text": "def make_words_dict(words):\n words_dict = {}\n for word in words:\n sorted_word = sorted_str(word)\n if sorted_word in words_dict:\n words_dict[sorted_word].append(word)\n else:\n words_dict[sorted_word] = [word]\n return words_dict", "title": "" }, { "docid": "413fe6c1bd6623bd04b89d1460ed562d", "score": "0.640289", "text": "def abc_list(context):\n alpha_search = []\n for char in string.ascii_uppercase:\n alpha_search.append(char)\n return {'alpha_search': alpha_search,\n 'target_letter': context['target_letter']}", "title": "" }, { "docid": "5aa9c1e6021eb712e4d322c5af695f06", "score": "0.63932145", "text": "def convert_word(word, map):\n ret = []\n for ch in word:\n ret.append(map[ch])\n return ret", "title": "" }, { "docid": "7a94e8ca42a87cccaedb2206f7188f67", "score": "0.637467", "text": "def make_word_dict():\r\n d = dict()\r\n fin = open('words.txt')\r\n for line in fin:\r\n word = line.strip().lower()\r\n d[word] = word\r\n\r\n return d", "title": "" }, { "docid": "cddf31e369266849d605a94f1aadd1b0", "score": "0.63213146", "text": "def collect_words(sequence, order):\n char_map = {}\n\n # iterate through the sequence(as long as the characters support the ordering)\n for counter in range(order, len(sequence) - order + 1):\n\n current_char = sequence[counter]\n\n # create an empty list to add all the characters that occurred previously\n # from the current character based on the order\n previous_chars = []\n\n for in_counter in range(counter + 1 - order, counter + 1):\n previous_chars.append(sequence[in_counter - 1])\n\n if current_char not in char_map:\n char_map[current_char] = previous_chars\n else:\n char_map[current_char] += previous_chars\n\n # proceed counting in accordance the order.\n counter += order\n return char_map", "title": "" }, { "docid": "25c0c6a8966a2c3033b71370a9ec9ceb", "score": "0.63004583", "text": "def get_letters_seq(data):\n # Letters of all words\n a1 = [set(x) for l in data.values() for x in l]\n # All letters (20)\n letter = ['<PAD>'] + list(set([x for l in a1 for x in l]))\n # Letters-index dictionaries\n letter_to_ix = dict((i, j) for j, i in enumerate(letter))\n ix_to_letter = dict((j, i) for j, i in enumerate(letter))\n return letter, letter_to_ix, ix_to_letter", "title": "" }, { "docid": "216f5eed457730d0d9636edf718164f9", "score": "0.6232511", "text": "def map_chars(texts_list):\n all_testtrain = reduce(lambda x, y: x + y, texts_list)\n chars = sorted(list(set(\"\".join(all_testtrain))))\n chardict = dict((c, i + 1) for i, c in enumerate(chars))\n chardict[\"$\"] = 0\n vocab_size = len(chardict)\n rchardict = dict((i + 1, c) for i, c in enumerate(chars))\n rchardict[0] = \"$\"\n print(\"Characters mapped\")\n print(\" No. of characters: {}\".format(vocab_size))\n return [chardict, rchardict, vocab_size]", "title": "" }, { "docid": "180db94deb7794db8e9ce528b5f9ceae", "score": "0.6176166", "text": "def assign_letter_codes(codes, code_numbers, unrhymed_verses):\n rhyme_codes = {}\n rhymes = []\n endings = []\n for rhyme in code_numbers:\n if rhyme in unrhymed_verses:\n endings.append('') # do not track unrhymed verse endings\n rhymes.append(-1) # unrhymed verse\n else:\n if rhyme not in rhyme_codes:\n rhyme_codes[rhyme] = len(rhyme_codes)\n endings.append(codes[rhyme])\n rhymes.append(rhyme_codes[rhyme])\n return rhymes, endings", "title": "" }, { "docid": "8d8f0a672a40324db07b33c5119d949d", "score": "0.6168008", "text": "def word_count_map(data):\n (byte_offset, line_value) = data\n for s in split_into_sentences(line_value):\n for w in split_into_words(s.lower()):\n yield (w, s)", "title": "" }, { "docid": "117be442b917b6402ef761e75449fb98", "score": "0.61571044", "text": "def get_dictionary():\n sym_spell = spell.SymSpell()\n return sym_spell.words", "title": "" }, { "docid": "8646aa76e508c9f16070c526f4024b01", "score": "0.61280966", "text": "def create_character_maps():\n #alfabet = u' 0abcdefghijklmnopqrstuvwxyz不用留地址了!\"洪裳说孩林晓梅三会就把一同过学免除,所以总训斥贾在中国作家协第届儿童文明天放他请来思想走暗对吴巧翻到本介绍龙传正认识你很九、生日派巴的回信也得稍拿点架为。要气记-封牵连道下午资料后刀子原那是胡甲级高兴又蹦可还胜早们玩去吧话印象智低着头脸绝望完全像只断翅膀机向惊慌人急匆闯进劈问:友写做字好爸叹口妈摊开忙准上约校大门集合里按读书已经睡几个私拆我给草?切包照张相保证身都少外面业余杯鲜桔水和若干觉喂这插座活雷锋专管老兄起轰动先出逃惭愧么弄副鬼样弯眉达室恳爽快怎填医有些阵整报复杜小求往比宜吗区长风公园妹装邱士力肩恨能当场跟庆祝查师题推称号心导打悔满累死惜站边没苦自己王种钢笔习班即怪表情法听却告诉才关眼前算冷百分她笑什难计策名因知祁抄袭事—摘挽狂澜制止恶斗十五硬女半伙时父亲多艺术团现毕竟迅速看民鲁盆兜争论版单位响亮店独缺药仿佛见电脑劳体遇危而之更谈丫真嘴舌叫Y色调如厨房饭桌间且荐怕冒险定跃成功臣队瘪考托福肯役双戏院云感梨汁…喝差套颜理市越太谨曾美丽姆极任划盗步夺路但与检讨两背爵物敢《威尼斯商》晚夜二拨通片工厂宣布让块混票神支柱旗帜产系周年帽始发挥受哪懂消息灵疯材青嘟哝无精采掉另再次赛啦手喜欢吃咸住踏铁栅跳重庭希委屈必此偏初犯赞莎亚示目光充崇敬造萍唯朋倒英语牢骚舍懒方清脆嗓似指变服务歌泣行被视然搭霉台演忍钱超员频招利逗碰尽量永远纸条忸怩劫持呀它味痛并未流露何捂角嚎曲刚摸验顿呢;假赏勇夫男折扣笨瓜股陈闲察疼反32透秘顺挂悄八选举波居乐唱抽烟直忘怀课答提够潇洒篮球则世收诗冲排蹲凡瘦户主讲究礼哥概影妙黯找火转贸途红突击意容咧阿伦值至赶挺忠类东西深贻适辅许退稿错寄候便警键卡胖畅刻跑责命令抗拒N飞平客须结挑新凭今虑汉度逻搞宝贝唤接抱常牌熟买卖搅办病艳呼声需领吹每抓紧舞拳恼羞养赠谢千金强静坚瞧柬牛皮袋塞海画鸡首蠢滋剪嘻善于足霆垮从互应喔拔助念议乎四挤效果怒或参加厌该氛谁赖罢否将尖教睬份泼阁练扛车腿铺迁万别六奇迹根等伸微颤宠救皆血痕失揭姓奋移科拜C迷鱼葱姜热降温疑漏孔乙戒界星志实偶拍洋构入邢补雄略终细兵剑存随虽掌握词哦设久酸配奖狼呵哈纪性评忽追戴丝镜战帮乱奶广妖涂眯态毅鼓励敏粗脚治癫蛤蟆铃具备批糊属闹蛮横额章使休乌虚酒胆俞稀松枕忧躺各厉害解散摄克案注立吼破墙窘吆逢钟蹈滔漂纯展句拼慢脱鞋咳嗽带患音特众确率部劲江泽压愚堆侠秒夕司莫睛席征联谋杀凶狠秃顶炮较优近积粱丑伤愤疤腾显基耐酬培者雇佣窗傲剩船床予愿聂耳跻迟脾妇由决坐凑丁署省锻炼驳施政纲绉骂朝努悲观振央落抬嘛吵瞌其尘爱投密武严眠耸节嗨般删异弓护烧厕质盛凌肢壮牺牲黑板言费麻烦灰剧件寻埋伏瞪拉膨胀试幅摇叔伯堂姐宏伟标引胸恭送仪式图简咖啡君旧偷束藕荷泰戈尔筒华捏抖搂惹页孤抹罕碗喊肚钻靠段慎石袖酝酿贵竹祸财雨淋揉鼻喷漠懦喻贴播腻社豹取甚聊婆最史犟坏哲咬代徐兼傻忆停涉仙捡母型仅桩隐军族醒底灭肤白皙搬憋掏侦番绰故程辑格诚奴隶惋吞吐食减寿敲抢庞阶糟糕盘碎普秦冰箱荤菜煮喉咙研铜畏蔼化肇处玻璃渣踪梦奔零纠躲毫噢库腰盼共党卫魂哭斤顾彼柴叮蔡闷狡黠含裤扫净浅骄淌汗烈浪朵访泪围磁待临罗蒙毛欣轻埃及塔困预廊镇例刘饶趣哇汪胞胎易南辕北辙遗甩缓胳膊哑袜款月毁撑搁非仍拦圆兑垫沙替交讯彩洁荟萃运范空捞爬嚼糖香呃您翟丰棚辆安秀扬玄兮厚舒宽迎聚皱援历藏蜡烛吩咐滑稽踢嚷挡堵败庸荣咱噩刷牙斩钉截数欠扑哧源洗职歹徒犹豫衫昏径询绊杂繁裙宾谦享圾递据饱渐润歪剔屋肿榜朴素涌餐宴肉冠组织妨元七趟彻软吟韩短博形卷权遵邀钩价棍啼郑煎熬慷慨滚烂忿砖订眨义漓拖莺恰谊轶扔蛋肥升贪寺臂健康攻譬俩谎捶残蛇哼激期橱续创促品录窍扶邮古呈爆幸亏啪噪烤晕济绢盒河猛耿承竖饿冤改继闪夹宅虫S惨奈遍野炊砸皇屁履窝晨屉惕孝扎列农壳隔触簸擂奏佩炎坟内膏状丈律左喘碌屡姑娘城OK楚辈墩骑锈悉木拘阅纺辨串污蔑脏楼筋销恢燃吸岁限局货离担幻依某嗯扒巫锁拾游枪擒敌纳米换贩摆阔肌弹灯笼编盾陶醉沉浸致李扭谍馁建筑码鹃既嘀咕惠勤哨陪垂丧默况秋苏矛借载阴阳篇沾飘括尴尬愁肠忌剂燎泡狭窄马诸境怔探卓右弟贼器晃蜷津猪肝紫操辞俯卧详庇擞狗兢尝圈屑嫌索酷付维秩序甜溜锐拎灾貌锦蠕睁沸锅盖贺衷毒恐慕蓉获炒矢禁闭吁(豁势亿修饰层惯油俱荒尾育肖拯肃末曝怅棒籍踮坯措勃粉舟敞婉狈憎搜刮释嫩愉旅规矩呆熊线哐猜缘鸦雀乖跺甘踹婶芮杰恒汇碟乍披技羡馆翔宋楷耻册皂核娱伊挨启稳偿煞寸尊延搔描淡佼缩固执倍痴景衣顽堡垒唠鄙哀端擦枝悦疙瘩梆犒勾弱瘾驶舅花溢妄缝模叙守坑豪迈佯嘘宁穿柄舆纱荡弃疾摔惴磨链吓肘颗夸撒轮凳惟踩蚂惧幼雌昂闻董瑞峻杳眸聪颖瞒霹雳漫仑废嗬瞻慰恋辩挠仔喇叭缠馊郊沫浓拥踱负徕愣堕讳湿漉吻鸣薄潦啃拭庄赤歉腹涎趁叉翩拐绿欲僵郎误摞侄冬怵官添矿泉舰帐帅曹罪魁羽损簿宿怨嘿榨陌E蒜脊矮叽寒怜召悟齐姗档违池晶椅瞥焦嘲揍抡篓唬赦辛盲椰垃咄逼巷输控欺侮骨虎潮炸遭岔乡栏蹑霎眶盈驻鼠谜捐扮梭归诀刺针颠躁枉症廉苍射蚊奢侈卸腔轩劝携)擅占臭1蜜魏篆绪赋傍瘫挣魄群捅叨锤噼捆赫祟哄捣搏仇橙黄扰霍麦防悠嗷增娑箭啸勉丢疏涨弛屏惑拢著宇宙瓶绽佑裱捧申登鸭捺春伪棵咋测恪诺殷刹剥跨缭耍估巡壁娇聘银灌锲爹鼎避捉匕饮烫骗揽绩谣益萨佐奉拄昆炯吊纷纤巨祖佳辱脖旁摩附蹄讽讶良帝撇诡睽琅抵倚掀兰·德雪厅沮赌岂渡萝卜殊循昨土跌宕趔颓扯勿扁桃腺斟酌陷齿柔蜇挖芒际掖贡宗诧恩慈签杆狐绵箍荫潜肋涤墨徽晴羹诌蓬富蛛购项析汤韧唉滴抛旦嚓颊懈乘斜岑凯趄趴奚伽础乒乓罐粒沓统臀乏旱娃懑沛猴译○汽泳兽赔裹唇券辉煌涮烘障山贬迪忐忑邻聋旋抚蚤淮柳筷誓怂恿杠伍堪审允砰拙冥膝谓监典襟暂喏芭蕾茸泛掺楞咒营鞠琢融井悸诩供帖裆蹚髦匹铅劣匿拟抒耶阻垢罚佬俭雅竞判坦玲衔豆浆逐啰嗦慧隆扩售扉苛肆侧刃陆蜂啊置醋筹刨姿斑纹树仓淘鸟贿赂胃献搡蹭鲛绕刊乳赢枚邓稚幽穆伞叶综攀塑岸揪婿押耀黛玉倡州攥舵翁塌映田季灿噜妻胫琐蚁挪咏珍癞冻妥茅衬涯苗勒染浑晦扳崩夭尚倾搽迫抑辣.唔驾京杭苟咛嗖兔酵蓝褒呐珑趋悚诲裁怦饼傅搓龄巾帼矫毡懊T恤溶娄踊洞幕览揶揄羊H躬猫懵渴哎朗诵渊凉镖羲悯驰栗航喳栖贯暴返彰街垠撞浮炬吱诞暖叼储辟耗熄沿吝骷髅甫洛震敛捷森茶诱饵盐咚梳网谅孜吉憾趿糙俘僻脉猾燕粘遥啤瓦翘拇缕郁脯赚遮曰疗奥液抠阐述倦楔旨韦粹冶迂逛铆凛绒虏奸蒂芬肺疚挫亡逝寞尤嘤昧催俊晒闸杞菲鹤革瑰哗赴衰癖衡铸岗坎坷寂椒姊汲惮践卍\\084/9567狙MB环L+F汐猎撼莱!A[]:魔撤GIRU()VJ,D遛Z酪】撸$WP昵X圣槽伐'\n #alfabet = u' 0abcdefghijklmnopqrstuvwxyz不用留地址了!\"洪裳说孩林晓梅三会就把一同过学免除,所以总训斥贾在中国作家协第届儿童文明天放他请来思想走暗对吴巧翻到本介绍龙传正认识你很九、生日派巴的回信也得稍拿点架为。要气记-封牵连道下午资料后刀子原那是胡甲级高兴又蹦可还胜早们玩去吧话印象智低着头脸绝望完全像只断翅膀机向惊慌人急匆闯进劈问:友写做字好爸叹口妈摊开忙准上约校大门集合里按读书已经睡几个私拆我给草?切包照张相保证身都少外面业余杯鲜桔水和若干觉喂这插座活雷锋专管老兄起轰动先出逃惭愧么弄副鬼样弯眉达室恳爽快怎填医有些阵整报复杜小求往比宜吗区长风公园妹装邱士力肩恨能当场跟庆祝查师题推称号心导打悔满累死惜站边没苦自己王种钢笔习班即怪表情法听却告诉才关眼前算冷百分她笑什难计策名因知祁抄袭事—摘挽狂澜制止恶斗十五硬女半伙时父亲多艺术团现毕竟迅速看民鲁盆兜争论版单位响亮店独缺药仿佛见电脑劳体遇危而之更谈丫真嘴舌叫Y色调如厨房饭桌间且荐怕冒险定跃成功臣队瘪考托福肯役双戏院云感梨汁…喝差套颜理市越太谨曾美丽姆极任划盗步夺路但与检讨两背爵物敢《威尼斯商》晚夜二拨通片工厂宣布让块混票神支柱旗帜产系周年帽始发挥受哪懂消息灵疯材青嘟哝无精采掉另再次赛啦手喜欢吃咸住踏铁栅跳重庭希委屈必此偏初犯赞莎亚示目光充崇敬造萍唯朋倒英语牢骚舍懒方清脆嗓似指变服务歌泣行被视然搭霉台演忍钱超员频招利逗碰尽量永远纸条忸怩劫持呀它味痛并未流露何捂角嚎曲刚摸验顿呢;假赏勇夫男折扣笨瓜股陈闲察疼反32透秘顺挂悄八选举波居乐唱抽烟直忘怀课答提够潇洒篮球则世收诗冲排蹲凡瘦户主讲究礼哥概影妙黯找火转贸途红突击意容咧阿伦值至赶挺忠类东西深贻适辅许退稿错寄候便警键卡胖畅刻跑责命令抗拒N飞平客须结挑新凭今虑汉度逻搞宝贝唤接抱常牌熟买卖搅办病艳呼声需领吹每抓紧舞拳恼羞养赠谢千金强静坚瞧柬牛皮袋塞海画鸡首蠢滋剪嘻善于足霆垮从互应喔拔助念议乎四挤效果怒或参加厌该氛谁赖罢否将尖教睬份泼阁练扛车腿铺迁万别六奇迹根等伸微颤宠救皆血痕失揭姓奋移科拜C迷鱼葱姜热降温疑漏孔乙戒界星志实偶拍洋构入邢补雄略终细兵剑存随虽掌握词哦设久酸配奖狼呵哈纪性评忽追戴丝镜战帮乱奶广妖涂眯态毅鼓励敏粗脚治癫蛤蟆铃具备批糊属闹蛮横额章使休乌虚酒胆俞稀松枕忧躺各厉害解散摄克案注立吼破墙窘吆逢钟蹈滔漂纯展句拼慢脱鞋咳嗽带患音特众确率部劲江泽压愚堆侠秒夕司莫睛席征联谋杀凶狠秃顶炮较优近积粱丑伤愤疤腾显基耐酬培者雇佣窗傲剩船床予愿聂耳跻迟脾妇由决坐凑丁署省锻炼驳施政纲绉骂朝努悲观振央落抬嘛吵瞌其尘爱投密武严眠耸节嗨般删异弓护烧厕质盛凌肢壮牺牲黑板言费麻烦灰剧件寻埋伏瞪拉膨胀试幅摇叔伯堂姐宏伟标引胸恭送仪式图简咖啡君旧偷束藕荷泰戈尔筒华捏抖搂惹页孤抹罕碗喊肚钻靠段慎石袖酝酿贵竹祸财雨淋揉鼻喷漠懦喻贴播腻社豹取甚聊婆最史犟坏哲咬代徐兼傻忆停涉仙捡母型仅桩隐军族醒底灭肤白皙搬憋掏侦番绰故程辑格诚奴隶惋吞吐食减寿敲抢庞阶糟糕盘碎普秦冰箱荤菜煮喉咙研铜畏蔼化肇处玻璃渣踪梦奔零纠躲毫噢库腰盼共党卫魂哭斤顾彼柴叮蔡闷狡黠含裤扫净浅骄淌汗烈浪朵访泪围磁待临罗蒙毛欣轻埃及塔困预廊镇例刘饶趣哇汪胞胎易南辕北辙遗甩缓胳膊哑袜款月毁撑搁非仍拦圆兑垫沙替交讯彩洁荟萃运范空捞爬嚼糖香呃您翟丰棚辆安秀扬玄兮厚舒宽迎聚皱援历藏蜡烛吩咐滑稽踢嚷挡堵败庸荣咱噩刷牙斩钉截数欠扑哧源洗职歹徒犹豫衫昏径询绊杂繁裙宾谦享圾递据饱渐润歪剔屋肿榜朴素涌餐宴肉冠组织妨元七趟彻软吟韩短博形卷权遵邀钩价棍啼郑煎熬慷慨滚烂忿砖订眨义漓拖莺恰谊轶扔蛋肥升贪寺臂健康攻譬俩谎捶残蛇哼激期橱续创促品录窍扶邮古呈爆幸亏啪噪烤晕济绢盒河猛耿承竖饿冤改继闪夹宅虫S惨奈遍野炊砸皇屁履窝晨屉惕孝扎列农壳隔触簸擂奏佩炎坟内膏状丈律左喘碌屡姑娘城OK楚辈墩骑锈悉木拘阅纺辨串污蔑脏楼筋销恢燃吸岁限局货离担幻依某嗯扒巫锁拾游枪擒敌纳米换贩摆阔肌弹灯笼编盾陶醉沉浸致李扭谍馁建筑码鹃既嘀咕惠勤哨陪垂丧默况秋苏矛借载阴阳篇沾飘括尴尬愁肠忌剂燎泡狭窄马诸境怔探卓右弟贼器晃蜷津猪肝紫操辞俯卧详庇擞狗兢尝圈屑嫌索酷付维秩序甜溜锐拎灾貌锦蠕睁沸锅盖贺衷毒恐慕蓉获炒矢禁闭吁(豁势亿修饰层惯油俱荒尾育肖拯肃末曝怅棒籍踮坯措勃粉舟敞婉狈憎搜刮释嫩愉旅规矩呆熊线哐猜缘鸦雀乖跺甘踹婶芮杰恒汇碟乍披技羡馆翔宋楷耻册皂核娱伊挨启稳偿煞寸尊延搔描淡佼缩固执倍痴景衣顽堡垒唠鄙哀端擦枝悦疙瘩梆犒勾弱瘾驶舅花溢妄缝模叙守坑豪迈佯嘘宁穿柄舆纱荡弃疾摔惴磨链吓肘颗夸撒轮凳惟踩蚂惧幼雌昂闻董瑞峻杳眸聪颖瞒霹雳漫仑废嗬瞻慰恋辩挠仔喇叭缠馊郊沫浓拥踱负徕愣堕讳湿漉吻鸣薄潦啃拭庄赤歉腹涎趁叉翩拐绿欲僵郎误摞侄冬怵官添矿泉舰帐帅曹罪魁羽损簿宿怨嘿榨陌E蒜脊矮叽寒怜召悟齐姗档违池晶椅瞥焦嘲揍抡篓唬赦辛盲椰垃咄逼巷输控欺侮骨虎潮炸遭岔乡栏蹑霎眶盈驻鼠谜捐扮梭归诀刺针颠躁枉症廉苍射蚊奢侈卸腔轩劝携)擅占臭1蜜魏篆绪赋傍瘫挣魄群捅叨锤噼捆赫祟哄捣搏仇橙黄扰霍麦防悠嗷增娑箭啸勉丢疏涨弛屏惑拢著宇宙瓶绽佑裱捧申登鸭捺春伪棵咋测恪诺殷刹剥跨缭耍估巡壁娇聘银灌锲爹鼎避捉匕饮烫骗揽绩谣益萨佐奉拄昆炯吊纷纤巨祖佳辱脖旁摩附蹄讽讶良帝撇诡睽琅抵倚掀兰·德雪厅沮赌岂渡萝卜殊循昨土跌宕趔颓扯勿扁桃腺斟酌陷齿柔蜇挖芒际掖贡宗诧恩慈签杆狐绵箍荫潜肋涤墨徽晴羹诌蓬富蛛购项析汤韧唉滴抛旦嚓颊懈乘斜岑凯趄趴奚伽础乒乓罐粒沓统臀乏旱娃懑沛猴译○汽泳兽赔裹唇券辉煌涮烘障山贬迪忐忑邻聋旋抚蚤淮柳筷誓怂恿杠伍堪审允砰拙冥膝谓监典襟暂喏芭蕾茸泛掺楞咒营鞠琢融井悸诩供帖裆蹚髦匹铅劣匿拟抒耶阻垢罚佬俭雅竞判坦玲衔豆浆逐啰嗦慧隆扩售扉苛肆侧刃陆蜂啊置醋筹刨姿斑纹树仓淘鸟贿赂胃献搡蹭鲛绕刊乳赢枚邓稚幽穆伞叶综攀塑岸揪婿押耀黛玉倡州攥舵翁塌映田季灿噜妻胫琐蚁挪咏珍癞冻妥茅衬涯苗勒染浑晦扳崩夭尚倾搽迫抑辣.唔驾京杭苟咛嗖兔酵蓝褒呐珑趋悚诲裁怦饼傅搓龄巾帼矫毡懊T恤溶娄踊洞幕览揶揄羊H躬猫懵渴哎朗诵渊凉镖羲悯驰栗航喳栖贯暴返彰街垠撞浮炬吱诞暖叼储辟耗熄沿吝骷髅甫洛震敛捷森茶诱饵盐咚梳网谅孜吉憾趿糙俘僻脉猾燕粘遥啤瓦翘拇缕郁脯赚遮曰疗奥液抠阐述倦楔旨韦粹冶迂逛铆凛绒虏奸蒂芬肺疚挫亡逝寞尤嘤昧催俊晒闸杞菲鹤革瑰哗赴衰癖衡铸岗坎坷寂椒姊汲惮践卍84/9567狙MB环L+F汐猎撼莱!A[]:魔撤GIRU()VJ,D遛Z酪】撸$WP昵X圣槽伐_|珠“>涛汹澎湃爷”靴仆鞭嵌霸愈;@渔藻泥叩槛侍斧钺稼蔚膳喧?貂砌囱叱贱俄拗#绣&砍'\n #alfabet = u' abcdefghijklmnopqrstuvwxyz不用留地址了!\"洪裳说孩林晓梅三会就把一同过学免除,所以总训斥贾在中国作家协第届儿童文明天放他请来思想走暗对吴巧翻到本介绍龙传正认识你很九、生日派巴的回信也得稍拿点架为。要气记-封牵连道下午资料后刀子原那是胡甲级高兴又蹦可还胜早们玩去吧话印象智低着头脸绝望完全像只断翅膀机向惊慌人急匆闯进劈问:友写做字好爸叹口妈摊开忙准上约校大门集合里按读书已经睡几个私拆我给草?切包照张相保证身都少外面业余杯鲜桔水和若干觉喂这插座活雷锋专管老兄起轰动先出逃惭愧么弄副鬼样弯眉达室恳爽快怎填医有些阵整报复杜小求往比宜吗区长风公园妹装邱士力肩恨能当场跟庆祝查师题推称号心导打悔满累死惜站边没苦自己王种钢笔习班即怪表情法听却告诉才关眼前算冷百分她笑什难计策名因知祁抄袭事—摘挽狂澜制止恶斗十五硬女半伙时父亲多艺术团现毕竟迅速看民鲁盆兜争论版单位响亮店独缺药仿佛见电脑劳体遇危而之更谈丫真嘴舌叫Y色调如厨房饭桌间且荐怕冒险定跃成功臣队瘪考托福肯役双戏院云感梨汁…喝差套颜理市越太谨曾美丽姆极任划盗步夺路但与检讨两背爵物敢《威尼斯商》晚夜二拨通片工厂宣布让块混票神支柱旗帜产系周年帽始发挥受哪懂消息灵疯材青嘟哝无精采掉另再次赛啦手喜欢吃咸住踏铁栅跳重庭希委屈必此偏初犯赞莎亚示目光充崇敬造萍唯朋倒英语牢骚舍懒方清脆嗓似指变服务歌泣行被视然搭霉台演忍钱超员频招利逗碰尽量永远纸条忸怩劫持呀它味痛并未流露何捂角嚎曲刚摸验顿呢;假赏勇夫男折扣笨瓜股陈闲察疼反32透秘顺挂悄八选举波居乐唱抽烟直忘怀课答提够潇洒篮球则世收诗冲排蹲凡瘦户主讲究礼哥概影妙黯找火转贸途红突击意容咧阿伦值至赶挺忠类东西深贻适辅许退稿错寄候便警键卡胖畅刻跑责命令抗拒N飞平客须结挑新凭今虑汉度逻搞宝贝唤接抱常牌熟买卖搅办病艳呼声需领吹每抓紧舞拳恼羞养赠谢千金强静坚瞧柬牛皮袋塞海画鸡首蠢滋剪嘻善于足霆垮从互应喔拔助念议乎四挤效果怒或参加厌该氛谁赖罢否将尖教睬份泼阁练扛车腿铺迁万别六奇迹根等伸微颤宠救皆血痕失揭姓奋移科拜C迷鱼葱姜热降温疑漏孔乙戒界星志实偶拍洋构入邢补雄略终细兵剑存随虽掌握词哦设久酸配奖狼呵哈纪性评忽追戴丝镜战帮乱奶广妖涂眯态毅鼓励敏粗脚治癫蛤蟆铃具备批糊属闹蛮横额章使休乌虚酒胆俞稀松枕忧躺各厉害解散摄克案注立吼破墙窘吆逢钟蹈滔漂纯展句拼慢脱鞋咳嗽带患音特众确率部劲江泽压愚堆侠秒夕司莫睛席征联谋杀凶狠秃顶炮较优近积粱丑伤愤疤腾显基耐酬培者雇佣窗傲剩船床予愿聂耳跻迟脾妇由决坐凑丁署省锻炼驳施政纲绉骂朝努悲观振央落抬嘛吵瞌其尘爱投密武严眠耸节嗨般删异弓护烧厕质盛凌肢壮牺牲黑板言费麻烦灰剧件寻埋伏瞪拉膨胀试幅摇叔伯堂姐宏伟标引胸恭送仪式图简咖啡君旧偷束藕荷泰戈尔筒华捏抖搂惹页孤抹罕碗喊肚钻靠段慎石袖酝酿贵竹祸财雨淋揉鼻喷漠懦喻贴播腻社豹取甚聊婆最史犟坏哲咬代徐兼傻忆停涉仙捡母型仅桩隐军族醒底灭肤白皙搬憋掏侦番绰故程辑格诚奴隶惋吞吐食减寿敲抢庞阶糟糕盘碎普秦冰箱荤菜煮喉咙研铜畏蔼化肇处玻璃渣踪梦奔零纠躲毫噢库腰盼共党卫魂哭斤顾彼柴叮蔡闷狡黠含裤扫净浅骄淌汗烈浪朵访泪围磁待临罗蒙毛欣轻埃及塔困预廊镇例刘饶趣哇汪胞胎易南辕北辙遗甩缓胳膊哑袜款月毁撑搁非仍拦圆兑垫沙替交讯彩洁荟萃运范空捞爬嚼糖香呃您翟丰棚辆安秀扬玄兮厚舒宽迎聚皱援历藏蜡烛吩咐滑稽踢嚷挡堵败庸荣咱噩刷牙斩钉截数欠扑哧源洗职歹徒犹豫衫昏径询绊杂繁裙宾谦享圾递据饱渐润歪剔屋肿榜朴素涌餐宴肉冠组织妨元七趟彻软吟韩短博形卷权遵邀钩价棍啼郑煎熬慷慨滚烂忿砖订眨义漓拖莺恰谊轶扔蛋肥升贪寺臂健康攻譬俩谎捶残蛇哼激期橱续创促品录窍扶邮古呈爆幸亏啪噪烤晕济绢盒河猛耿承竖饿冤改继闪夹宅虫S惨奈遍野炊砸皇屁履窝晨屉惕孝扎列农壳隔触簸擂奏佩炎坟内膏状丈律左喘碌屡姑娘城OK楚辈墩骑锈悉木拘阅纺辨串污蔑脏楼筋销恢燃吸岁限局货离担幻依某嗯扒巫锁拾游枪擒敌纳米换贩摆阔肌弹灯笼编盾陶醉沉浸致李扭谍馁建筑码鹃既嘀咕惠勤哨陪垂丧默况秋苏矛借载阴阳篇沾飘括尴尬愁肠忌剂燎泡狭窄马诸境怔探卓右弟贼器晃蜷津猪肝紫操辞俯卧详庇擞狗兢尝圈屑嫌索酷付维秩序甜溜锐拎灾貌锦蠕睁沸锅盖贺衷毒恐慕蓉获炒矢禁闭吁(豁势亿修饰层惯油俱荒尾育肖拯肃末曝怅棒籍踮坯措勃粉舟敞婉狈憎搜刮释嫩愉旅规矩呆熊线哐猜缘鸦雀乖跺甘踹婶芮杰恒汇碟乍披技羡馆翔宋楷耻册皂核娱伊挨启稳偿煞寸尊延搔描淡佼缩固执倍痴景衣顽堡垒唠鄙哀端擦枝悦疙瘩梆犒勾弱瘾驶舅花溢妄缝模叙守坑豪迈佯嘘宁穿柄舆纱荡弃疾摔惴磨链吓肘颗夸撒轮凳惟踩蚂惧幼雌昂闻董瑞峻杳眸聪颖瞒霹雳漫仑废嗬瞻慰恋辩挠仔喇叭缠馊郊沫浓拥踱负徕愣堕讳湿漉吻鸣薄潦啃拭庄赤歉腹涎趁叉翩拐绿欲僵郎误摞侄冬怵官添矿泉舰帐帅曹罪魁羽损簿宿怨嘿榨陌E蒜脊矮叽寒怜召悟齐姗档违池晶椅瞥焦嘲揍抡篓唬赦辛盲椰垃咄逼巷输控欺侮骨虎潮炸遭岔乡栏蹑霎眶盈驻鼠谜捐扮梭归诀刺针颠躁枉症廉苍射蚊奢侈卸腔轩劝携)擅占臭1蜜魏篆绪赋傍瘫挣魄群捅叨锤噼捆赫祟哄捣搏仇橙黄扰霍麦防悠嗷增娑箭啸勉丢疏涨弛屏惑拢著宇宙瓶绽佑裱捧申登鸭捺春伪棵咋测恪诺殷刹剥跨缭耍估巡壁娇聘银灌锲爹鼎避捉匕饮烫骗揽绩谣益萨佐奉拄昆炯吊纷纤巨祖佳辱脖旁摩附蹄讽讶良帝撇诡睽琅抵倚掀兰·德雪厅沮赌岂渡萝卜殊循昨土跌宕趔颓扯勿扁桃腺斟酌陷齿柔蜇挖芒际掖贡宗诧恩慈签杆狐绵箍荫潜肋涤墨徽晴羹诌蓬富蛛购项析汤韧唉滴抛旦嚓颊懈乘斜岑凯趄趴奚伽础乒乓罐粒沓统臀乏旱娃懑沛猴译○汽泳兽赔裹唇券辉煌涮烘障山贬迪忐忑邻聋旋抚蚤淮柳筷誓怂恿杠伍堪审允砰拙冥膝谓监典襟暂喏芭蕾茸泛掺楞咒营鞠琢融井悸诩供帖裆蹚髦匹铅劣匿拟抒耶阻垢罚佬俭雅竞判坦玲衔豆浆逐啰嗦慧隆扩售扉苛肆侧刃陆蜂啊置醋筹刨姿斑纹树仓淘鸟贿赂胃献搡蹭鲛绕刊乳赢枚邓稚幽穆伞叶综攀塑岸揪婿押耀黛玉倡州攥舵翁塌映田季灿噜妻胫琐蚁挪咏珍癞冻妥茅衬涯苗勒染浑晦扳崩夭尚倾搽迫抑辣.唔驾京杭苟咛嗖兔酵蓝褒呐珑趋悚诲裁怦饼傅搓龄巾帼矫毡懊T恤溶娄踊洞幕览揶揄羊H躬猫懵渴哎朗诵渊凉镖羲悯驰栗航喳栖贯暴返彰街垠撞浮炬吱诞暖叼储辟耗熄沿吝骷髅甫洛震敛捷森茶诱饵盐咚梳网谅孜吉憾趿糙俘僻脉猾燕粘遥啤瓦翘拇缕郁脯赚遮曰疗奥液抠阐述倦楔旨韦粹冶迂逛铆凛绒虏奸蒂芬肺疚挫亡逝寞尤嘤昧催俊晒闸杞菲鹤革瑰哗赴衰癖衡铸岗坎坷寂椒姊汲惮践卍'\n alfabet = '0123456789abcdefghijklmnopqrstuvwxyz'\n #alfabet = u\"动都教;正澜点真酒两下”没P斯刮太座红2心宝献星e|跳办4顿卫见藻额唠@把叱L分就斟游汪保J来盆饰美冒绣待怕槛K色钺外H起久鞭最5木吃皇礼随喧平争湃F爷l$骂经和火听耳线已(个q世霸蔚地押开哀放低要跟只亮n于橡连加站顶问什板惊用波6_间满几往训运全f、撒期r网东派不值愿找高昏奇巴三希也O咐D双的坎差死厉i揪些叨重果深朝到意哪X面普A气周骚袭却齐推快第s你敢暴3当z难&该身拗忙像话桩去奔士糕国童砖锦穿脾涌宁生苦围1vB金无依啦那花一靴想这划腾膳吩!嚷答珍故旧瓜Za您闹手受(是笨瞧锋新给V凛侍涂违她劳诉孩后条吓更罗蛋住整里王威道事佬房总U黑拖吵让j天贱9;句着W上沉翻渔切勤本坐了守微讲混发仆疯 E指活家台涛光m奴吼严帝赎笑海k妇-儿貂b能行所?酬钱傻)呀殿等0说叫“.'记西水阴c然过唤告:己踪d干走扛马草纺报泥女嘴:戴?纱p斧足愈砌年边脚佑汹。族打门h从/烟前M候肩么Tu洋得娘丈皮风I阶老石自恳子G十xy样今实,抓大夫我g为们敞呵好母他Sw糊8贵式>倒次<滚嘲求别N叩,做回拿口庄会还声人对破乡蓝窗桌o砍白婆影尾臣便结向路害俄竟眼安又跑名澎胡珠惹怎宫暗在#怒囱吧利常偿C7使中休看接由药)成嵌楼可有头戒—亲稼R棚赶Y再变脖鱼t\"\n #alfabet = '0123456789abcdefghijklmnopqrstuvwxyz'\n #alfabet = u'abcdefghijklmnopqrstuvwxyz不用留地址了!\"洪裳说孩林晓梅三会就把一同过学免除,所以总训斥贾在中国作〉家协第届儿童文明天放他请来思想走暗对吴巧翻到本介绍龙传正认识你很九、生日派巴的回信也得稍拿点架为。要气记-封牵连道下午资料后刀子原那是胡甲级高兴又蹦可还胜早们玩去吧话印象智低着头脸绝望完全像只断翅膀机向惊慌人急匆闯进劈问:友写做字好爸叹口妈摊开忙准上约校大门集合里按读书已经睡几个私拆我给草?切包照张相保证身都少外面业余杯鲜桔水和若干觉喂这插座活雷锋专管老兄起轰动先出逃惭愧么弄副鬼样弯眉达室恳爽快怎填医有些阵整报复杜小求往比宜吗区长风公园妹装庆祝查师题推称号心导打悔满累死惜站边没苦自己王种钢笔习班即怪表情法听却告诉才关眼前算冷百分她笑什难计策名因知祁抄袭事—摘力挽狂澜制止场恶斗十五硬女半伙能时跟父亲多艺术团现毕竟迅速看当民鲁盆兜争论版单位响亮店独缺药仿佛见电脑劳体遇危而之更谈丫真嘴舌叫Yes色调如厨房饭桌间且荐怕冒险定跃成功臣队瘪考托福肯役双戏院云感梨汁…喝差套颜理市越太谨曾美丽姆极任划盗步夺路但与检讨两背爵物敢《威尼斯商》晚夜二拨通片工厂宣布让块混票神支柱旗帜产系周年帽始发挥受哪懂消息灵疯材嘟哝无精采次赛啦手喜欢吃咸住踏铁栅跳重庭希委屈必此偏初赞莎士亚示目光充崇敬造萍唯朋倒英语方清脆嗓似指变服务歌泣行被视然搭霉肩台演忍钱超员频招利逗碰尽量永远再纸条忸怩劫持呀它味痛并未流露何捂角嚎曲顿呢;假赏勇夫男股陈闲察疼反32透秘八选举波舍居悄乐唱抽烟直忘怀课答提够潇洒篮球则世收诗冲排蹲凡瘦户主讲究礼哥概影妙黯找火转贸途红突击意容咧阿伦值至赶挺忠类东西深贻适辅许退稿错寄候便警键卡胖畅刻跑责命令抗拒No飞平客须结挑新凭今虑汉度逻搞宝贝唤接抱常牌熟买卖搅办病艳青呼声需领笨瓜吹每抓紧舞拳恼羞养赠强静坚瞧柬牛皮袋塞海画鸡首蠢滋剪嘻善于足霆垮从互应喔拔助念议乎验四挤效果怒或参加厌该氛谁赖罢否将尖教睬份泼阁练扛车腿铺迁万另别六奇迹等伸微颤宠救皆血痕失揭姓奋移科拜刚Chin迷鱼葱姜热降温疑漏掉孔乙戒界星志实偶拍懒洋构恨入邢补雄略终细随虽掌握词哦设久酸配奖狼呵哈纪性评忽追戴金丝镜战帮乱奶广妖涂眯态毅鼓励粗脚治癫蛤蟆铃具备批顺糊属闹蛮横额章使休乌虚酒胆俞稀松枕忧躺各厉害解散摄克案吼破墙窘吆逢钟蹈滔漂纯展句拼慢脱鞋咳嗽带患音j特众确率部劲江泽压愚堆侠秒夕存司莫牢骚睛立席征联谋杀凶狠秃顶炮较优近积粱丑伤愤疤腾显基耐酬培者雇佣窗傲剩船床予聂耳跻迟脾妇由决坐凑丁省锻炼驳施政纲绉骂朝努悲观振央落抬嘛吵瞌其尘爱投密武严眠耸节嗨般删异弓护敏烧厕质盛凌肢壮牺牲黑板言麻烦灰剧件寻埋伏瞪拉膨胀试幅摇注宏伟标引胸恭送仪式图简君旧偷束藕荷泰戈尔筒华捏惹页孤抹罕根碗喊肚钻靠段慎石袖酝酿谢贵竹财雨淋揉鼻喷漠懦喻贴播腻社豹取甚聊婆最千史犟坏哲咬代徐兼傻忆停涉仙母型仅桩隐军族醒底灭肤白皙搬憋愿掏侦番姐绰故程辑格诚奴隶惋吞吐食敲抢庞阶糟糕盘碎扣普秦冰箱荤菜煮喉咙研畏蔼化肇处玻璃渣踪梦奔零纠躲毫噢库腰共党捡卫魂哭斤顾彼柴叮蔡闷狡黠含裤扫净浅淌汗烈浪朵剑堂骄访泪围磁待临罗蒙毛欣轻埃及塔困预镇伯例刘饶趣哇汪胞胎易南辕北辙遗甩缓胳膊哑袜款月搁非仍拦圆兑垫沙替交讯彩洁荟萃运范空捞爬嚼糖香呃您翟丰棚辆安秀扬玄兮厚舒宽迎聚皱援历藏滑稽踢嚷挡堵败叔庸荣咱噩刷牙斩钉截数欠扑哧源洗职歹徒犹豫衫昏兵径询绊杂繁裙宾摸谦享圾递据饱渐润歪剔折屋肿榜朴素涌餐宴肉冠组织妨元七趟彻软吟韩短博形卷权遵邀钩价棍啼郑煎熬慷慨滚烂忿砖订眨义漓拖莺恰谊轶扔蛋肥升贪寺臂健康攻譬俩谎捶残蛇激期橱续创促品录窍盼扶邮抖搂古呈爆幸亏啪噪烤晕济绢盒河猛耿竖饿冤改继闪夹宅虫Stp惨奈遍野炊砸皇屁履窝晨屉惕孝扎减费列农壳触簸擂奏佩炎坟内膏状丈律左喘碌屡姑娘城OK楚辈邱墩骑悉木拘阅纺辨串脏楼筋销恢燃吸岁限局货离担幻依廊某嗯扒巫承锁拾游枪擒敌纳米换贩摆阔肌弹灯笼挂编盾陶醉沉浸致李扭谍馁建筑码嘀咕惠勤哨陪垂丧默秋苏矛借载阴阳篇沾飘犯括尴尬愁肠忌剂燎泡狭窄马诸境怔探卓右弟贼器晃蜷津猪肝紫操辞俯卧撑详庇擞狗兢尝圈屑嫌索酷付维秩序甜溜锐拎灾祸哼锦蠕睁沸锅盖贺衷毒恐慕蓉获炒矢禁闭吁(豁势亿修饰层寿油俱荒尾育肖拯肃末曝怅棒籍踮坯措勃粉舟敞婉况狈憎搜刮释嫩愉旅规矩呆熊线哐猜缘鸦雀乖跺甘踹婶芮杰恒汇碟乍披技羡馆翔惯宋楷皂核娱伊r挨启稳偿铜貌煞寸尊延搔描淡佼缩固执倍痴景衣顽堡垒唠鄙哀端擦枝悦疙瘩梆犒蔑勾弱瘾驶舅花溢妄缝模叙守坑豪迈佯嘘宁既穿柄舆纱荡弃疾摔惴磨链吓肘颗夸污撒y轮凳惟踩蚂惧幼雌昂闻董瑞峻杳眸聪颖瞒霹雳漫仑废嗬瞻慰恋辩挠仔喇叭缠馊郊沫浓拥踱负徕愣堕讳湿漉吻鸣薄潦啃拭庄赤歉腹涎趁叉翩拐绿欲僵郎误摞侄冬怵官添矿泉舰帐帅曹罪魁羽损簿宿怨嘿榨陌蒜脊矮叽寒怜召悟齐姗档违池晶椅吩咐瞥焦嘲揍抡gl篓唬赦辛盲椰垃咄逼巷输控欺侮隔骨虎潮炸遭岔乡栏蹑霎眶盈驻u鼠谜扮梭归诀刺针颠躁枉症廉苍射蚊奢侈卸腔轩劝携)擅占臭1蜜魏篆绪赋傍瘫挣魄群捅锤噼捆赫祟哄搏捣仇橙黄扰霍麦防悠嗷增娑箭啸勉丢疏咖啡涨弛屏惑拢著宇宙瓶绽佑裱捧申登鸭册捺春伪棵咋测恪诺殷叨剥跨缭耍估巡E壁娇聘银灌锲爹鼎避捉饮烫骗揽绩谣益萨佐奉拄昆炯吊纷纤巨祖佳耻辱脖旁摩蹄讽署良帝撇睽琅抵倚掀兰·德雪毁厅沮赌岂渡萝卜殊循昨土跌宕趔颓扯勿扁桃腺斟酌陷齿柔蜇挖芒际掖宗诧恩慈签杆狐绵箍荫潜刹肋涤墨徽晴羹诌蓬富蛛购项析汤韧f唉滴抛旦嚓颊懈乘斜岑凯趄趴匕伽础乒乓沓统臀乏旱娃诡懑奚沛猴译○汽泳兽赔唇券辉煌讶鹃涮烘障山贬迪忐忑邻聋旋抚蚤淮粒柳筷誓怂恿杠伍堪审允砰拙冥捐膝谓监典襟暂喏芭蕾茸泛掺楞咒营鞠琢融井悸诩供帖裆蹚髦匹铅劣匿抒阻垢罚俭雅竞判坦玲衔逐啰嗦扩售扉拟苛肆侧刃陆蜂啊附置醋筹刨姿斑纹树仓佬淘鸟贿赂胃贡献搡蹭鲛绕刊乳赢枚邓稚幽穆伞叶综攀塑岸揪婿押耀黛玉倡州攥舵塌映田季灿噜妻胫琐蚁挪翁咏珍癞冻妥茅衬涯苗勒浑晦扳崩慧夭尚耶倾搽迫抑.唔京杭苟咛嗖蜡烛兔酵蓝褒呐珑趋辣悚诲隆裁怦豆浆饼傅搓龄巾帼矫毡染懊T恤溶娄踊洞幕览揶揄裹羊H躬猫懵渴罐卍' \n #alfabet = u' 0123456789abcdefghijklmnopqrstuvwxyz/不用留地址了!\"洪裳说孩林晓梅三会就把一同过学免除,所以总训斥贾在中国作家协第届儿童文明天放他请来思想走暗对吴巧翻到本介绍龙传正认识你很九、生日派巴的回信也得稍拿点架为。要气记-封牵连道下午资料后刀子原那是胡甲级高兴又蹦可还胜早们玩去吧话印象智低着头脸绝望完全像只断翅膀机向惊慌人急匆闯进劈问:友写做字好爸叹口妈摊开忙准上约校大门集合里按读书已经睡几个私拆我给草?切包照张相保证身都少外面业余杯鲜桔水和若干觉喂这插座活雷锋专管老兄起轰动先出逃惭愧么弄副鬼样弯眉达室恳爽快怎填医有些阵整报复杜小求往比宜吗区长风公园妹装邱士力肩恨能当场跟庆祝查师题推称号心导打悔满累死惜站边没苦自己王种钢笔习班即怪表情法听却告诉才关眼前算冷百分她笑什难计策名因知祁抄袭事—摘挽狂澜制止恶斗十五硬女半伙时父亲多艺术团现毕竟迅速看民鲁盆兜争论版单位响亮店独缺药仿佛见电脑劳体遇危而之更谈丫真嘴舌叫Y色调如厨房饭桌间且荐怕冒险定跃成功臣队瘪考托福肯役双戏院云感梨汁…喝差套颜理市越太谨曾美丽姆极任划盗步夺路但与检讨两背爵物敢《威尼斯商》晚夜二拨通片工厂宣布让块混票神支柱旗帜产系周年帽始发挥受哪懂消息灵疯材青嘟哝无精采掉另再次赛啦手喜欢吃咸住踏铁栅跳重庭希委屈必此偏初犯赞莎亚示目光充崇敬造萍唯朋倒英语牢骚舍懒方清脆嗓似指变服务歌泣行被视然搭霉台演忍钱超员频招利逗碰尽量永远纸条忸怩劫持呀它味痛并未流露何捂角嚎曲刚摸验顿呢;假赏勇夫男折扣笨瓜股陈闲察疼反32透秘顺挂悄八选举波居乐唱抽烟直忘怀课答提够潇洒篮球则世收诗冲排蹲凡瘦户主讲究礼哥概影妙黯找火转贸途红突击意容咧阿伦值至赶挺忠类东西深贻适辅许退稿错寄候便警键卡胖畅刻跑责命令抗拒N飞平客须结挑新凭今虑汉度逻搞宝贝唤接抱常牌熟买卖搅办病艳呼声需领吹每抓紧舞拳恼羞养赠谢千金强静坚瞧柬牛皮袋塞海画鸡首蠢滋剪嘻善于足霆垮从互应喔拔助念议乎四挤效果怒或参加厌该氛谁赖罢否将尖教睬份泼阁练扛车腿铺迁万别六奇迹根等伸微颤宠救皆血痕失揭姓奋移科拜C迷鱼葱姜热降温疑漏孔乙戒界星志实偶拍洋构入邢补雄略终细兵剑存随虽掌握词哦设久酸配奖狼呵哈纪性评忽追戴丝镜战帮乱奶广妖涂眯态毅鼓励敏粗脚治癫蛤蟆铃具备批糊属闹蛮横额章使休乌虚酒胆俞稀松枕忧躺各厉害解散摄克案注立吼破墙窘吆逢钟蹈滔漂纯展句拼慢脱鞋咳嗽带患音特众确率部劲江泽压愚堆侠秒夕司莫睛席征联谋杀凶狠秃顶炮较优近积粱丑伤愤疤腾显基耐酬培者雇佣窗傲剩船床予愿聂耳跻迟脾妇由决坐凑丁署省锻炼驳施政纲绉骂朝努悲观振央落抬嘛吵瞌其尘爱投密武严眠耸节嗨般删异弓护烧厕质盛凌肢壮牺牲黑板言费麻烦灰剧件寻埋伏瞪拉膨胀试幅摇叔伯堂姐宏伟标引胸恭送仪式图简咖啡君旧偷束藕荷泰戈尔筒华捏抖搂惹页孤抹罕碗喊肚钻靠段慎石袖酝酿贵竹祸财雨淋揉鼻喷漠懦喻贴播腻社豹取甚聊婆最史犟坏哲咬代徐兼傻忆停涉仙捡母型仅桩隐军族醒底灭肤白皙搬憋掏侦番绰故程辑格诚奴隶惋吞吐食减寿敲抢庞阶糟糕盘碎普秦冰箱荤菜煮喉咙研铜畏蔼化肇处玻璃渣踪梦奔零纠躲毫噢库腰盼共党卫魂哭斤顾彼柴叮蔡闷狡黠含裤扫净浅骄淌汗烈浪朵访泪围磁待临罗蒙毛欣轻埃及塔困预廊镇例刘饶趣哇汪胞胎易南辕北辙遗甩缓胳膊哑袜款月毁撑搁非仍拦圆兑垫沙替交讯彩洁荟萃运范空捞爬嚼糖香呃您翟丰棚辆安秀扬玄兮厚舒宽迎聚皱援历藏蜡烛吩咐滑稽踢嚷挡堵败庸荣咱噩刷牙斩钉截数欠扑哧源洗职歹徒犹豫衫昏径询绊杂繁裙宾谦享圾递据饱渐润歪剔屋肿榜朴素涌餐宴肉冠组织妨元七趟彻软吟韩短博形卷权遵邀钩价棍啼郑煎熬慷慨滚烂忿砖订眨义漓拖莺恰谊轶扔蛋肥升贪寺臂健康攻譬俩谎捶残蛇哼激期橱续创促品录窍扶邮古呈爆幸亏啪噪烤晕济绢盒河猛耿承竖饿冤改继闪夹宅虫S惨奈遍野炊砸皇屁履窝晨屉惕孝扎列农壳隔触簸擂奏佩炎坟内膏状丈律左喘碌屡姑娘城OK楚辈墩骑锈悉木拘阅纺辨串污蔑脏楼筋销恢燃吸岁限局货离担幻依某嗯扒巫锁拾游枪擒敌纳米换贩摆阔肌弹灯笼编盾陶醉沉浸致李扭谍馁建筑码鹃既嘀咕惠勤哨陪垂丧默况秋苏矛借载阴阳篇沾飘括尴尬愁肠忌剂燎泡狭窄马诸境怔探卓右弟贼器晃蜷津猪肝紫操辞俯卧详庇擞狗兢尝圈屑嫌索酷付维秩序甜溜锐拎灾貌锦蠕睁沸锅盖贺衷毒恐慕蓉获炒矢禁闭吁(豁势亿修饰层惯油俱荒尾育肖拯肃末曝怅棒籍踮坯措勃粉舟敞婉狈憎搜刮释嫩愉旅规矩呆熊线哐猜缘鸦雀乖跺甘踹婶芮杰恒汇碟乍披技羡馆翔宋楷耻册皂核娱伊挨启稳偿煞寸尊延搔描淡佼缩固执倍痴景衣顽堡垒唠鄙哀端擦枝悦疙瘩梆犒勾弱瘾驶舅花溢妄缝模叙守坑豪迈佯嘘宁穿柄舆纱荡弃疾摔惴磨链吓肘颗夸撒轮凳惟踩蚂惧幼雌昂闻董瑞峻杳眸聪颖瞒霹雳漫仑废嗬瞻慰恋辩挠仔喇叭缠馊郊沫浓拥踱负徕愣堕讳湿漉吻鸣薄潦啃拭庄赤歉腹涎趁叉翩拐绿欲僵郎误摞侄冬怵官添矿泉舰帐帅曹罪魁羽损簿宿怨嘿榨陌E蒜脊矮叽寒怜召悟齐姗档违池晶椅瞥焦嘲揍抡篓唬赦辛盲椰垃咄逼巷输控欺侮骨虎潮炸遭岔乡栏蹑霎眶盈驻鼠谜捐扮梭归诀刺针颠躁枉症廉苍射蚊奢侈卸腔轩劝携)擅占臭1蜜魏篆绪赋傍瘫挣魄群捅叨锤噼捆赫祟哄捣搏仇橙黄扰霍麦防悠嗷增娑箭啸勉丢疏涨弛屏惑拢著宇宙瓶绽佑裱捧申登鸭捺春伪棵咋测恪诺殷刹剥跨缭耍估巡壁娇聘银灌锲爹鼎避捉匕饮烫骗揽绩谣益萨佐奉拄昆炯吊纷纤巨祖佳辱脖旁摩附蹄讽讶良帝撇诡睽琅抵倚掀兰·德雪厅沮赌岂渡萝卜殊循昨土跌宕趔颓扯勿扁桃腺斟酌陷齿柔蜇挖芒际掖贡宗诧恩慈签杆狐绵箍荫潜肋涤墨徽晴羹诌蓬富蛛购项析汤韧唉滴抛旦嚓颊懈乘斜岑凯趄趴奚伽础乒乓罐粒沓统臀乏旱娃懑沛猴译○汽泳兽赔裹唇券辉煌涮烘障山贬迪忐忑邻聋旋抚蚤淮柳筷誓怂恿杠伍堪审允砰拙冥膝谓监典襟暂喏芭蕾茸泛掺楞咒营鞠琢融井悸诩供帖裆蹚髦匹铅劣匿拟抒耶阻垢罚佬俭雅竞判坦玲衔豆浆逐啰嗦慧隆扩售扉苛肆侧刃陆蜂啊置醋筹刨姿斑纹树仓淘鸟贿赂胃献搡蹭鲛绕刊乳赢枚邓稚幽穆伞叶综攀塑岸揪婿押耀黛玉倡州攥舵翁塌映田季灿噜妻胫琐蚁挪咏珍癞冻妥茅衬涯苗勒染浑晦扳崩夭尚倾搽迫抑辣.唔驾京杭苟咛嗖兔酵蓝褒呐珑趋悚诲裁怦饼傅搓龄巾帼矫毡懊T恤溶娄踊洞幕览揶揄羊H躬猫懵渴哎朗诵渊凉镖羲悯驰栗航喳栖贯暴返彰街垠撞浮炬吱诞暖叼储辟耗熄沿吝骷髅甫洛震敛捷森茶诱饵盐咚梳网谅孜吉憾趿糙俘僻脉猾燕粘遥啤瓦翘拇缕郁脯赚遮曰疗奥液抠阐述倦楔旨韦粹冶迂逛铆凛绒虏奸蒂芬肺疚挫亡逝寞尤嘤昧催俊晒闸杞菲鹤革瑰哗赴衰癖衡铸岗坎坷寂椒姊汲惮践卍'\n #alfabet = u' ,的。一是0不在绉有、人“”了中国大为1:上2这个以年生和我时:之也来?到要会学对业出行;公能他于5e3而发地可作就自们后成家日者分多下其用方本得子.高4过经6现说与前o理工所力t如将军部,事进9司场同机主都实天面市8ia新动开n关定还长此月7道美心法最文等当第好然体全比股通性重三外s但战;相从你r内无考因小资种合情去里化次入加间些度?员意没产正表很队报已名海点目着应解那看数东位题利起二民提及明教问)制期(元游女-并曰十果)么注两专样信王平己金务使电网代手知计至常(只展品更系科门特想西l水做被北由c》万老向《记政今据量保建物区管见安集或认程h总—少身先师球价空旅又求校强各非立受术基活反!世何职导任取式[]试才结费把收联直规持赛社四山统投南原该院交达接头打设每别示则调处义权台感斯证言五议d给决论她告广企格增让指研商客太息近城变技医件几书选周备m流士京传u放病华单话招路界药回再%服什改育口张需治德复准马习真语难始\"际观完标共项容级即必类领AC未w型案线运历首风视色尔整质参较云具布组办气造争往形份防p它车深神称g况推越英易且营条消命团确S划精足儿局飞究功索走望却查武思兵识克故步影带乐白源史航志州限清光装节号转图根省许引势失候济显百击f器象效仅爱官包供低演连夫快续支验阳男觉花死字创素半预音户约率声请票…便构T存食y段远责M拉房随断极销林亚隐超获升B采I算益优愿找按维态满尽令汉委八终训值负境练母热适江住列举景置黄听除读众响友助弹干孩边李六甚罗致施模料火像古眼搜离D闻府章早照速录页卫青例石父状农排降千P择评疗班购属革够环占养曾米略站胜①核否独护钱/红范另须余居虽毕攻族吃喜陈G轻亲积星假b县写刘财亿某括律酒策初批普片协售乃落留岁突双绝险季谓严村E兴围依念苏底压破河怎细富切乎待室血帝君均络牌陆印层斗简讲买谈纪板希聘充归左测止笑差控担杀般朝监承播k亦临银尼介v博软欢害七良善’移土课免射审健角伊欲似配既拿刚绩密织九编狐右龙异若登检继析款纳威微域齐久宣阿俄店康执露香额紧培激卡短群②春仍伤韩楚缺洲版答O修媒秦‘错欧园减急叫诉述钟遇港补N·送托夜兰诸呢席尚福奖党坐巴毛察奇孙竞宁申L疑黑劳脑R舰晚盘征波背访互败苦阶味跟沙湾岛挥礼F词宝券虑徐患贵换矣戏艺侯顾副妇董坚含授皇付坛皆抗藏潜封础材停判吸轮守涨派彩哪笔.﹑氏尤逐冲询铁W衣绍赵弟洋午奥昨雷耳谢乡追皮句刻油误宫巨架湖固痛楼杯套恐敢H遂透薪婚困秀帮融鲁遗烈吗吴竟③惊幅温臣鲜画拥罪呼警卷松甲牛诺庭休圣馆_退莫讯渐熟肯V冠谁乱朗怪夏危码跳卖签块盖束毒杨饮届序灵怀障永顺载倒姓丽靠概输货症避寻丰操针穿延敌悉召田稳典吧犯饭握染怕端央阴胡座著损借朋救库餐堂庆忽润迎亡肉静阅盛综木疾恶享妻厂杂刺秘僧幸扩裁佳趣智促弃伯吉宜剧野附距唐释草币骨弱俱顿散讨睡探郑频船虚途旧树掌遍予梦圳森泰慢牙盟挑键阵暴脱汇歌禁浪冷艇雅迷拜旦私您④启纷哈订折累玉脚亮晋祖菜鱼醒谋姐填纸泽戒床努液咨塞遭玩津伦夺辑癌x丹荣仪献符翻估乘诚K川惠涉街诗曲孔娘怒扬闲蒙尊坦=衡迪镇沉署妖脸净哥顶掉厚魏旗兄荐童剂乏倍萨偏洗惯灭径犹趋拍档罚纯洛毫梁雨瑞宗鼓辞洞秋郎舍蓝措篮贷佛坏俗殊炮厅筑姆译摄卒谷妈聚违忘鬼触丁羽贫刑岗庄伟兼乳叶凡龄宽峰宋硬岸迅喝拟雄役零舞暗潮绿倾详税酸徒伴诊跑吾燕澳啊塔宿恩忙督末⑤+伐篇敏贸巧截沟肝迹烟勇乌赞锋返迫凭虎朱拔援搞爆勤抢敬赶抱仁秒缓御唯缩尝贴奔跨炎汤侵骑励戴肤枪植瘤埃汽羊宾替幕贝刀映彻驻披抓奉抵肿麻U炸繁赢茶伏梅狂忧豪暂贾洁绪刊忆桥晓册漫圆默妾侧址横□偶狗陵伙杜忍薄雪陷仙恋焦焉烦甘腺颇赏肠废墙债艾杰残冒屋堡曹储莱挂纵孝珍麦逃奋J览镜缘昭摆跌胁昌耶腹偿蛋盈瓦摩沈惟迁冰辛震旁泉圈巡罢泛穷伸曼滋丈颜勒悲肥郭混灯租⑥鸡阻邑伍践驾魔X拒懂糖脏沿翁胆惧聊携晨滑菌辅贤鉴丝尾赴吨宇眠脂籍彼污貌弄郡【奶菲烧垂壮浮弗赖】珠迟渠寿隆剑胞跃稍愈荷壁卿邦忠摇悟锦扰袭盾艘浓筹盗哭淡孕扣呈怨琳孤奴驱振闭~隔寒汝贯恢饰荡姑械*猛亏锁硕舒嘉宏劲帅誉番惜胸抽脉孟遣碍辆玄陶丧矿链矛鸟夷嘴坡吕侦鸣妹邓钢妙z欣骗浙辽奏唱腐仆祝冬韦邮酬尺涯毁粉井腰肌搭恨乙勿婆★闹猎厉哀递廉卧豆揭瓶⑦蒋忌贡邀覆墓捷Q骂芳耗奈腾抑牵履绕睛炼描辉肃循仿葬漏恰殿遥尿凯仲婢胃翼卢慎厦颈哉疲惑汗衰剩昆耐疫霸赚彭狼洪枚媪纲窗偷鼻池磨尘账拼榜拨扫妆槽蔡扎叔辈―泡伪邻锡仰寸盐叹囊幼拓郁桌舟丘棋裂扶逼熊轰允箱挺赤晶●祭寄爷呆胶佩泪沃婴娱霍肾诱扁辩粗夕灾哲涂艰猪Y铜踏赫吹屈谐仔沪殷辄渡屏悦漂祸赔涛谨赐劝泌凤庙墨寺淘勃崇灰虫逆闪竹疼旨旋蒂⑧悬紫慕贪慧腿赌捉疏卜漠堪廷氧牢吏帕棒纽荒屡戈氛黎桃幽尖猫捕嫁窃燃禽稿掩踪姻陪凉阔碰幻迈铺堆柔姿膜爸斤轨疆丢仓岂柳敦祥栏邪魂箭煤惨聪艳儒&仇徽厌潘袖宅恒逻肺昂炒醉掘宪摸愤畅汪贺肪撑桂耀柏韂扑淮j凌遵钻摘碎抛匹腔纠吐滚凝插鹰郊琴悄撤驶粮辱斩暖杭齿欺殖撞颁匈翔挤乔抚泥饱劣鞋肩雇驰莲岩酷玛赠斋辨泄姬拖湿滨鹏兽锐捧尸宰舆宠胎凶割虹俊糊兹瓜悔慰浦锻削唤戚撒冯丑亭寝嫌袁⑨尉芬挖弥喊纤辟菩埋呀昏傅桑稀帐添塑赋扮芯喷夸抬旺襄岭颗柱欠逢鼎苗庸甜贼烂怜盲浅霞畏诛倡磁茨毅鲍骇峡妨雕袋裕哩怖阁函浩侍拳寡鸿眉穆狱牧拦雾猜顷昔慈朴疯苍■渴慌绳闷陕宴辖「」舜讼柯丞姚崩绘枝牲涌虔姜擦桓逊汰斥﹒颖悠恼灌q梯捐∶挣衷啡娜旬呵刷帽岳豫咖飘臂寂粒募嘱蔬苹泣吊淳诞诈咸猴~奸淫佐晰崔雍葛鼠爵奢仗涵淋挽敲沛蛇锅庞朵押鹿滩祠枕扭厘魅⑩湘柴炉荆卓碗夹脆颠窥逾诘贿虞茫榻碑傲骄卑×Z蓄煮劫卵碳痕攀搬拆谊禹窦绣叉爽肆羞爬泊腊愚牺胖弘秩娶妃柜觽躲葡浴兆滴衔燥斑挡笼徙憾垄肖溪叙茅膏甫缴姊逸淀擅催丛舌竭禅隶歧妥煌玻刃☆肚惩赂耻詹璃舱溃斜祀翰汁妄枭萄契骤醇泼咽拾廊犬筋扯狠挫钛扇蓬吞帆戎稽娃蜜庐盆胀乞堕趁吓框顽硅宛瘦剥睹烛晏巾狮辰茂○裙匆霉杖杆糟畜躁愁缠糕峻贱辣歼慨亨芝惕娇⑾渔冥咱栖浑禄帖巫喻毋泳饿尹穴沫串邹厕蒸+滞铃寓萧弯窝杏冻愉逝诣溢嘛兮暮豹骚跪懒缝盒亩寇弊巢咬粹冤陌涕翠勾拘侨肢裸恭叛纹摊#兑萝饥>浸叟滥灿衍喘吁晒谱堵暑撰棉蔽屠讳庶巩钩丸诏朔瞬抹矢浆蜀洒耕虏诵陛绵尴坤─尬搏钙饼枯灼饶杉盼蒲尧俘伞庚摧遮痴罕桶巷乖{啦纺闯→敛弓喉酿彪垃歇圾倦狭晕裤蜂}垣莉谍俩妪⑿钓逛椅砖烤熬悼倘鸭馈惹旭薛诀渗痒蛮罩渊踢崖粟唇辐愧玲遏昼芦纣琼椎咳熙钉剖歉坠誓啤碧郅吻莎屯吟臭谦刮掠垫宙冀栗壳崛瑟哄谏丙叩缪雌叠奠髃碘暨劭霜妓厨脾俯槛芒沸盯坊咒觅剪遽贩寨铸炭绑蹈抄阎窄冈侈匿斌沾壤哨僵坎舅洽勉侣屿啼侠枢膝谒砍厢昧嫂羡铭碱棺漆睐缚谭溶烹雀擎棍瞄裹曝傻旱坑驴弦贬龟塘贞氨盎掷胺焚黏乒耍讶纱蠢掀藤蕴邯瘾婿卸斧鄙冕苑耿腻躺矩蝶浏壶凸臧墅粘⒀魄杞焰靶邵倚帘鞭僚酶靡虐阐韵迄樊畔钯菊亥嵌狄拱伺潭缆慑厮晃媚吵骃稷涅阪挨珊殆璞婉翟栋醋鹤椒囚瞒竖肴仕钦妒晴裔筛泻阙垒孰抖衬炫兢屑赦宵沮谎苟碌屁腕沦懈扉揖摔塌廖铝嘲胥曳敖傍筒朕扳鑫硝暇@冶靖袍凑悍兔邢熏株哮鹅乾鄂矶逵坟佣髓隙惭轴掏苛偃榴⒁赎谅裴缅皂淑噪阀咎揽绮瞻谜拐渭啥彦遁琐喧藉嫩寞梳溜粥恤迭瀑蓉寥彬俺忿螺膀惫扔匪毙怠彰啸荻逮删脊轩躬澡衫娥捆牡茎秉俭闺溺萍陋驳撼沽僮厥沧轿棘怡梭嗣凄℃铅绛祈斐箍爪琦惶刹嗜窜匠锤筵瑶幌捞敷酌阜哗聂絮阱膨坪歪旷翅揣樱甸颐兜頉伽绸拂狎颂谬昊皋嚷徊⒂曙麟嚣哑灞钧挪奎肇磊蕉荧嗽瓒苯躯绎鸦茵澜搅渺恕矫讽匀畴坞谥趟蔓帛寅呜枣萌磷涤蚀疮浊煎叮倩拯瑰涩绅枉朽哺邱凿莽隋炳睁澄厄惰粤黯纬哦徘炜擒捏帷攒湛夙滤浐霄豁甄剔丫愕袜呕|蹲皱勘辜唬葱甩诡猿稻宦姨橡涧亢芽濒蹄窍譬驿拢叱喂怯坝椰孽阖瞩萎镑簿婷咐郸瑜瑚矮祷窟藩牟疡仑谣侄沐孜劈枸妮蔚勋玫虾谴莹紊瓷魁淄扛曩柄滔缀闽莞恳磅耸灶埠嚼汲恍逗畸翩甥蚁耽稚戟戊侃帜璧碟敞晖匙烫眷娟卦寐苌馨锣谛桐钥琅赁蜡颤陇僻埔腥皎酝媳⒃翘缔葫吼侮淹瘫窘啖犀弒蕾偕笃栽唾陀汾俨呐膳锌瞧骏笨琢踩濮黛墟蒿歹绰捍诫漓篷咄诬乓梨奕睿嫡幢砸俞亟捣溯饵嘘砂凰丕荥赀薇滕袱辍疹泗韧撕磕梗挚挠嫉奚弩蝉罐敝鞍晦酣搁柿菠卞煞堤蟹骼晤娡潇胰酱郦脖檐桩踵禾狩盏弈牒拙喇舶炊喀黔挟钞缕俏娄粪颅锏凹饲肘赟吝襟琪谕飙秽颊渝卯捡氢桀裳滇浇礁◎蚊芙荀吩凳峨巍雉郢铲倪杳汹豚乍蛙驼嗅讫痰棵睫绒捻罔杠氟堰羁穰钠骸睾鳞邸於谧睢泾芹钾颓Ⅱ笋橘卉岐懿巅垮嵩柰鲨涡弧◆钝啃熹芭隅拌锥抒焕漳鸽烘瞪⒄箕驯恃靴刁聋剿筝绞鞅夯抉嘻弛垢衾丐斟恙雁匮娼鞠扼镶樵菇兖夭戌褚渲硫挞衙闫绾衅掣磋袒龚叨揉贻瑛俾薯憎傣炬荤烁沂粑蚌渣茄荼愍蒜菱狡蠡戍畤闵颍酋芮渎霆哼韬荫辙榄骆锂肛菑揪皖秃拽诟槐髦脓殡闾怅雯\\戮澎悖嗓贮炙跋玮霖皓煽娠肋闸眩慷迂酉赘蝇羌蔑氯蚕汀憋臾汕缸棚唉棕裟蚡驮簇橙〉蹇庇佼禧崎痘芜姥绷惮雏⒅恬庵瞎臀胚嘶铀靳呻膺醛憧嫦橄褐讷趾讹鹊谯喋篡郝嗟琉逞袈鲧虢穗踰栓钊鬻羹掖笞恺掬憨狸瑕匡〈痪冢梧眺佑愣撇阏疚攘昕瓣烯谗隘酰绊鳌俟嫔崭妊雒荔毯纶祟爹辗竿裘犁柬恣阑榆翦佟钜札隧⒆腌砌酥辕铬痔讥毓橐跻酮殉哙亵锯糜壬瞭恻轲糙涿绚荟梢赣沼腑朦徇咋膊陡骋伶涓芷弋枫觑髻巳匣蠕恪槟栎噩葵殃淤诠昵眸馁奄绽闱蛛矜馔遐骡罹遑隍拭祁︰霁釜钵栾睦蚤咏憬韶圭觇芸氓伎氮靓淆绢眈掐簪搀玺镐竺峪冉拴忡卤撮胧邛彝楠缭棠腮祛棱睨嫖圉杵萃沁嬉擂澈麽轸彘褥廓狙笛彗啬盂贲忏驺悚豨旌娩扃蹦扈凛驹剃孺〕吆驷迸毗〔熔逍癸稼溥嫣瓮胱痊逡疟苻曪拣戛臻缉懊竣囤侑肽缮绥踝壑娴猝焻禀漱碁蹬祗濡挝亳萦癖彀毡锈憩筷莒噬珀砝鬓瑾澧栈恚搓褒疤沌絷镖塾钗骊拷铂郄窒驸裨矗烙惬炖赍迥蹴炽诧闰糯捅茜漯﹐峭哇鹑疵梓骠咫鹦檀痹侥蘑衢灸琵琶懦邺扪痿苔拇腋薨馅雠敕捂鴈栅瓯嘿溉胳拎巿赃咕诃谤舁禺榨–拈瘙眯篱鬟咯抨桨岱赡蹶惚嗔喏聆曜窑瘢柠蕃寤攫饷佬臼皈蟒啜蔗汶酪豕窖膛檬戾蟠黍鲸漾猾驭踊稠脯潍倭谑猖聒骞熄渍瞳蒯陉褪筐彤蝴廪嬴沱闼橱蜚蹭鄢臆邳盔眶沓飨覃彷淌岚霹辔袂嗤榔鸾綦莘媲翊雳箸蚩茸嗦楷韭簸帚坍後璋剽渤骥犊迩悯饪搂鹉岑觞棣蕊诳黥藻郜舵毂茗忱铿谙怆钳佗瀚亘铎咀濯鼾酵酯麾Ⅰ笙ü缨翳龈忒煦顼俎圃刍喙羲陨嘤梏颛蜒啮镁辇葆蔺筮溅佚匾暄谀媵纫砀悸啪迢瞽莓瞰俸珑骜穹麓潢妞铢忻铤劾樟俐缗煲粱虱淇徼脐鼋嘈悴捶嚏挛谚螃殴瘟掺〇酚梵栩褂摹蜿钮箧胫馒焱嘟芋踌圜衿峙宓腆佞砺婪瀛苷昱贰秤扒龁躇翡宥弼醮缤瘗鳖擞眨礶锢辫儋纭洼漕飓纂繇舷勺诲捺瞑啻蹙佯茹怏蛟鹭烬■兀檄浒胤踞僖卬爇璀暧髡蚂饽镰陂瞌诽钺沥镍耘燎祚儣莺屎辘鸥驩氐匕銮━苴憔渥袅瞿瓢痣蘸蹑玷惺轧喃潺唏逅懵帏唠徨咤抠蛊苇铮疙闳砥羸遨哎捽钏壹昇擢贽汴砰牝蔼熠粽绌杼麒叭颔锭妍姒邂濞轶搔蹊阂垦猕伫瘩璐黠婺噫潞呱幡汞缯骁墩赧瞥媛瞠羔轼Ⅲ拗鹞搴诮趴凋撩芥缎摒泮惘骛瘳姝β渚吠稣獘篃罄吒茧黜缢獗诅絜蜕屹哽缄俑坷杓剁锺鹜谩岔籽磬溍邃钨甬笥蝠龋鸱孚馍溴妫偎烽椽阮酗惋牍觥瞅涣狈锰椟饺溲谪掇蓟倔鞫猢笄翕嗥卺寰狞洮炕夡瘠磺肱奭耆棂娅咚豌樗诩斡榈琛狲蕲捎戳炯峦嘎睬怙疱霎哂鱿涸咦痉$抟庖沅瑙珏祜楞漉鸠镂诰谄蜗嗒珂祯鸳殒潼柩萤柑轵缰淼冗蕙鳄嘀彊峥雹藜笠岖傥潦苞蛰嬖僦碣裰疸湮昴榷涎攸砾跖恂舄麝貂孢捋笈璨粕浚鹃歆漪岷咧殁篆湃侏傈殇霭嚎拊崂鬲碉菁庾拚旃幺皿焊噢祺锚痤翎醺噶傀俛秧谆僳菽绯瘥盥蹋髯岌痧偌禳簧跤伉腼爰箫曦蜘霓愆姗陬楂嵘蜓浼癫瓠跷绐枷墀馕盹聩镯砚晁僊°坂煜俚眛焘阍袄夔馋泸庠毐飚刭琏羿斓稔阉喾恸耦咪蝎唿桔缑诋訾迨鹄蟾鬣廿莅荞槌媾愦郏淖嗪镀畦颦浃牖襁怂唆嚭涟拮腓缥郫遴邾悒嗝殽跛掂撬鄣鄱斫窿兕壕疽铙吱厩甭镪篝踣眦啧糠鲤粲噱椭哟潸铆姣馥胙迦偻嗯陟爲桧鸯恿晌臱骈喽淅澹叽桢刨忑忐猩蝙旄晾吭荏觐胄榛豢堑帔咙柚僭锵√肮囿忤惴燮棹摈缈幛墉诎仞剌氇泯茱獾豺蜃殂窈倨褓詈砷邕薰頫焖痫痢掾獐簌雎é帧鸩匝桅椁绫桡氆哌咛鞘辎缙玑佤垓槿蛤烨泓罴鄜褶瘀颌蹂弑珪曷膑惦咆梆蛾牂髅捱拧婧踱怵侗屉讪衲麋宕畿唧怛豉籁觌舂蓦廨胪怍鄄绶飕蜻欷邬杲汧唑冽邰鼍魇铐哝泱扞飒醴陲喟筠殓瘸倏嗳啕睑翌à幄娓蓺妩奁璜桦朐榕礴儡婕觎觊绦猥涮倬袤啄掳椿俪噜摞※鄗漩悝淞袴僇酹搒跽鳍疣姁猗舛鞮砭郯徕纥梃卮肣湎怦揄迕芍珥羚喔缁涝栉犷汜悻呛赭淬泫炀箴镌髫拄怔炷桎巽汭鹫挈蝄噙锄邴歔瘪腴呗慵撺欤阡傩苫掰盅冑躏茉霾耄楹蹻苋鲠哆傒榭牦婶仃囱皙醦隰掼琖駆暲砒舀鹗犒斛甑楫嫪胭瘁铛藕簋腭睽阕裀砧蓼贳劬搽龏荃奘祎泵攥翱晟酎睇逋箔羟诙饬跆眇佻铠娑郧葭蝗郓幞鉏碾硒釉磔殄藐莠颧熨獠浞笺癣茬衽喳裾倜鸢蠹廛惆芈燔伛妗佃缜咣龛挎徵粼锉啾隼猬镳璇胯饕揩縠虮苓噎祓筰奂搪喁俦隗馏圩褫僰吮哧湫旻筏搢佶茕铣娆揍嗷柈蕨绖旎汨畑鳏厝溷楯卅祇′怼焯±柘骷澍▲`珞褊╱痂罘殚垠缧瑁齮蓐怿蹿豳犴孵筱蜷窋泞肄祐窕酆谶阗讙镝匍腱^镬仡樾驽峒蟆葳徉昙罡耜嗨氲骅襦浔纮洱氦舐黙臊縯汛蹀溟枥祉铄豸揶馀闇呷仄焒嗡崆匳皑匐÷诿髭鲰鲲筴侬鹳滂△橹邈弭弁樽揆幔纨踉帼跸搠缞氤旒旖屣孱槁铉榼沣娣娈夤壅枇讴埶阆杷浣狰愠蚓咿藿欻萸刽稞刎骖冁骰嵯濂跚湄釂麤珰舔谮坨嗲埒锲鲇煨耎绻楣噉谟嗖裆晗囹黝讣薏⑴貉椹蟜犍蜇秏呶箩悞妤搐芪呦恽赊侩绁猱遒镵鸮趺簏迤坼痼棰凫诂骀瘴螨阚臃葩篓谲悌嬗颉赉珈汩薮亶鬃蒽黾噤螫嶲湍畲徜衮茀蓍┐遛磐篁遘乩蹒≥鸵褴苒郈踽叵咻伋襆歙伧醳鄠茴赳矾圄楮坯蕤迓锱腉滦饯诤懋呤纡隽妲蜴┌疋噻愀龊琨镭藓镣滈蓓杪糗菅椀懑苎劓囫α啰钼烷兒脔郴忖芎啶巉钒缒蝼龌沔醢晔孳忝嗫橇勖宸佰蜈酞蔷糅噭猊儇觳缟郐眙赅剜徭蛭愎唔瘘魋镉殛茏邋垛垩焙篾羯浍鏖嚓躞堃烩莴¥绠纔衩糁≤町粝玳穑葺钲徂﹖棓泷涪囵怫屦歘鐘『裱缱圹罂荦腈愬坭嗛铩馐媸遢て渑曛粳蹰舫勐窭濠亹跄琥戢駹燧嫜峄竽膈荚姞赇樭澙笮嶙氰孀崧郾蜥阊篙狻靛虬赝篑榇鞑侪盍疝矽堙毶泠瞟癀镞酤涔譄唁薜郿⑵爻盱膻菡⒉绨埽О鳜醚阃遶岿張椐酺蔟螂辂窠淙鷪貋刳骶恫挹婀铳蒍孥蚣唳纻Ⅳ甾旘膘<脍耨翮赈浜洹蛎魉纰岫坌捭睒轺锗稗崚仫珩庑邽麃』縻荼嗑瞋螭绔喱‰痞咔埤疥猷洺啁讦礻餮泅蛹癞妁桞匏琮铨杌孑菟骐钡钚莆荪魑峇斄缶茭煅酩酢湟潏嘌韪苣蛆侔帑鸨愫芫郪踔骧茁溧皁蜔魍瀹楔祧粜晡蹩畎啱窳瞾甙㛃絪绺貔崂痈舡葴耋囔П蚯笆鲐踧遫踟Р溊咂锹笫癔觜涒碓蛲跺枞茔1谸抿擘跬愛浿∩黟枰な轘荠郇姮锑妳饴绡奡夥钤俅酊潴绀髋獬儆産乂餍颡胾碛貊魭钿鸬喑哏牯蜍摁嶓俳蟭躅羖鳃孛羑濑雩焜鸷箦茯醪鹂铚缳螳酇蛔罃珐苕罅蛀庳褛罥艮娲蒺娉撵禨蓖姹戕庥岬痍烜窴邠蹉诨狁顒莨阈嘹戆窎儙螾纾嵋镕跣繻枳菏赜槃趄煊嬛抡睚跹壖戗⑶榫沬崴颚畼嫚嚋珮◇娀枋獭畀谇欃瓴龂鲋鹆鳝郕疴偈诒讧惇跂扢爨赪苡鈇晞亓釐槊寘暾莩徳钹冏書麂撂犨滁孪刓逶澝嬃黡沕恝洟秸逑滓緃媢叼霣3慝厍炟皤囐僤硼楸瞀烝炔瓻耙腩醵锽殪樯芡∈↓缵伻玊桠觚踯噔碴砣忪藁镒佝峤峣搤汐嗾鞚巂楗呓狒開坻蘧趵榱锷锾隳饟饦馎驵骘髀髑鮼鲑鲔鹘鹚﹔│刈刖剎啐嘭噌噗嚬嚰圯坳嫄寖尻峋崃嶂嶶帇幤悫慙扌揜撝旳昀昃暹玕琰璆玃疃猃皴狃祊燹燠熛窣窬糌糍紬濩飧肸脲臬芘荜蔫襜觖豭贇氩氖趸檠檇邘鄏酡鑙钴铵氅莜柢悭鄳蒗虺沇薤踹墠唶骍镊镛帨逖氡鹣恹臛呃幂鹖間磛弢蛐懜凇闟璟遹肓剐垝杅笤佈撷佘!嚅蝮谳蚝栀眢∵蓿枵橪騳≠蟋嗌玦嗄劙騠鞣唢茆蚰喹趱珅喆谔苄靥鲛洫颀趹蛩馓轫叡蒉睪漦胝瘐逦嶷傕斲嵬缇洙瘵縢渖價灊訇醍膦癜歃钎讵钰嫱婊狝榧脁柞卍\n #alfabet = u'abcd管理我们要质疑!当然那大仙才能保持人体健发生于早晨)、视线曰:并构成社会主义政还派了低级别的巨额蛋黄酱富含白和存在念行动之间“何以知?”书籍21日《华夏时报》)称为明前茶,道069/5部分功互联网企是而江南水村则即每8个美国中就抗血吸虫基本预防药物有些热门专业一种感好47微醺触及他值3.参考,如果几位万元(际被甩外呢全民尤其年轻定仔细阅读下文首飞地面综合试验By很难找到病不敢花么多钱自己皇马涯志愿者或。予你投篮命率高达%台军图将陆拖进甜后冒着滚黑涨停板股胜须打恶仗准备点付永刚从监狱放出战七雄各领风骚杀请假☆左右看掣棍子与寝也太阳次张坂林老师务教钜公司力代家居类小产品化口可作衡正式工积累经话让心里拔凉资候缺少认同余许组团目真总结金周招魂舞瞽此沉默绝表空观察员必学郎伤势过重尽所流委处常吃腌菜宁带斤两既梁松爸平辈论交费千二月跌择答案A应加利-斯卡尔造电等世填院校帝姐宠妃掇弄妈原解赴都析农像数乾坤摩但这来更容易受游头挺胸勇往直且虚怀舰设计开问题双方立败亿采购单最做王娄担只见班先武器展示料对滩障碍清除够返机票路程概奔入没非凡绩晚临盆无名朴洞脏腑缉毒断骄傲具良市场…把星期婚忙它仅输技术竟复娶跳土谁姿优雅模样俊侧三辖据侵犯她誉权长撮香收意府遂转身向[评]幸四望辽邈上喜欢湎事回忆芬拉格海皮散客价sentL规范移字媒广实相使承衔已欲求今叫火爆背又什害亲属腺减退症昨该显惯例比起饭便宜省用办步约脑循环强刺激米审议通币汇共域丸靠近温暖腹腔医床运涛甚至诺G汾酒春天史替性反波启奇光男孩依离段福情况告诉眼截某初归印第安落爱项赢得东甲吗麻碱闻北法职智手邦德觉胖惊新研算完宵梦增渐证护选度抑制续走ru夕独坐凝思(份络边扎西伪暴露遗止颊红宣布厉吧虽适纳却津迅速件妥善半HR途众由满足棒极悲剧局信传统户迁旅融h丢故徐铁铮狐变申科毕箱植纤维引京沪消ig='+内航纷推遵举尸车笑习任孟陷堪境港活支胆怯季琳守攻密说倾确买商陈球均欧洲历因醉寐略致谢再横烈沿兵县馆;翻译九八端野射沽调节\"薪育伊朗击wK贷款拒笔俺注纲修订改奏弼韩琦染疾抱憾终危李杰层占弃配侦苏逐城拨筹拦般F系言纸哄戴砍牛祭压十关稍偏条乎影响犹唐山克疆建魔幻盛顿演讲精彩干休息去未逼围葬决著胡楼盘拿测免需排贝仍夹益闭塞导弹威慑轮五现卑诚叟莫昔患叶甘蔗橡胶济困秀特殊奈l镒送给庄酬待递寻负债贡顽耍睡窗儿百阻mo提写肺TD杯翅尖鲨矗丽队阵型警刻泪普舌纬号店聘站士曼查尊乃狎淫朋失误姑娘舟财角毫癌女素衣碰撞托董区禁令戛较根服协英馈卷援朝役妻孥敝屣惜赛母劳鉴降供廉住房丙烯酰练绣谈序形贴气室群记刘泌庆源扩掉濒灭悉鄙诗词赏听侍臣慌毛抵御讼官指接嘲霞景阔取阿汗履想集核芯片锐界抢切彻底量突课淡薄哥肃藏云捉*争遭友狂厢亮抛磨MP挑鲜p—趋简扭曲脚符录闪态越芳奥乒乓偿典食执斋瑞掌握语软慷慨货卓抚脸唉叹岁创溢浦释迦牟尼佛袋夺黯色迎留胁述叙亚督遇汝识否久愈裂曾淘养死施六治疗悦霸礼契随央措枯燥缴超兴标Sk列嫂茬宗宽框架祥恩祝冠塑龙井涧溪拄拐杖俄罗牌卫轰耗跑稳古玩悄律快破餐搜编究坛@置惹姓绞透亡敬隔葛剪貌违磁浆穷诘掀连赧竞覆盖升驱郊惠畅→弱频授澳镇震刑甾酮薯乏象始味忠悟伟汽笛振勤码讨O乙醇凭借画漂佳助索午田谊整伦敦青盐纯仓控绪辆木泼巧咏琪装伍怡慢乡针惟键邓迈纪培谱澄睐吾乐厚深险块郡╱肢静脉神衰渡紧幽绿限冀另钞镪牧族煌沙仆夜扫描洪卵胞胎伴隆训兼届嘉扁惧怕肖迫谓播父摄净撑戈席尚.霍姗帮哺乳妨株泣C①促革沼泽摊挂苦呀糕潮织葫蔓藤宝错废嵌搪瓷聊辅皆赐堂狮钢筋混尾扬塔湖粼宰讯寄遏晋恭拾邀遮硕槽灰钥匙石惨章判急纠朱熔努照锋浙椿树询童’搬眠鳞尿泡骨蓝脾胃幅柔哪句箴批贪声远追音丰粗租固探党涉嫌艘希赵诱啊兰饮闹毙俘喏坚湾摆充戏旧燕灵厅繁熹谅戒亦严征凸劲吴腿挖掘座怎幕短廷损∶兑效敌雷泉泄刁熟咖啡镀遍跟呼枚袭柏售划妇榻驾驶Xx乱卜赦吨瘦烦躁曹操顾掏峡谷浪囊炎聚拍轴酵坑齐距虑糖召孔睦邻肠蠕盼检伙铅庭珠韦贤萧峰欠氏状俗矿轨腾哩阪奖fE肯舍凯签署券责拟阶潜苫材铺鸟扇翼铭汉杂篇谋牵盈聪丹包饿谙眷炮铜仿毅雀括艺鬼呵冲附档纱跨~紊沟陶菲栋纽喀裤桌吁园阴肾涌穴宦裔俯匹您侃束倡仁睽剥醋酸孕伏惋帘颐頫介绍贫洗娱蓬樊川贾匠销润鲍矛辑荣坠刀荡珍蟠桃鸿儒夫糁漳州延俱似鹰穿策旦乘冰酝酿旗详N硅圳圣②漫怪钟虎斌赫陀登康玛芮芙继差异秘V帕勒折罪君戟幢痛缩摧获棋私嘛烧肉‘倒迷霄肌肤愁妙泛滥携逃窒荐亏乔森惩筛挥遣怖穆罕懂岸墙膜斗洋巾擦秦U祖滞库漏壮伯卖换隐镜W避巴锦黛银址酶催碌恋忍遽按坏辨残补旺税盟蠢颜勾绰臆赶末缮汤浸宿博辩版掺麓歼凿杨殿匈奴雪筑浊袖宏啼墓贵邮喝奶魏丞船羊昭疏驻哈枪卧腐钙佝偻桶础泻梅厦忽刊访赔诊屋哲猜脊椎婆郭龄妓灾孙抖擞享撒嬉稠屏晓爬肩膀愤玻璃慧姻缘顺营跃栈﹒载彼暗箭腰懈怠殖岛拥魄赤裸脱街肚脐酗抽烟阐怒吉罚牺牲尺媪黎辛昕姚翰悠泰骗骘狼爪劣萨搞楚逆棉滔淌扶阀佩孚函肝浮浓洛瘾蜜捐盂插厂柴鸣蓄匆扒钉逾卒揣剖骑妹狞肽炼油侣弩★_z氨污庚戌⑤扣逢脖杏妖踏迟喧扰顷渗盗溃颁锡厌倦稀抓慎颈徘徊幼暂厕妆呈潘帐谐池佣摇芸兄哭丧副鸽河页蒂封垃屠垫鼻贸滋葱阙歌饶丈献丛邹侑滨葡萄僮聂册草莽●艾躲恐殽莱昼惕漆扛吐冤岗栽唯丝欣拳牙膏蒙仰恰鼎晖披漠忧筵僧翁鱼塘靡笼虹旨钦践肿瘤搭碁液晶屉妾瞒稿徽椅郁【】悔袜熊仇浴竭劝绳犬歪侠裁逊叔殆倍羽爵邑诸瓜柿俩矣垣堆埃羁兽煮琴庸汕窥忘v肥脂肪沣耳荥苑槐遥秋艇兹鸠巢鹊屈麦猪·茨畏Y陌螺钿雕檀尝摘径若枣艰I蚊蝇恼肇悬莞嗜鲁渠疫驰宾绕邢窍押诣辞瞭宋哀玉颗挤吵拜淳蚁募顶贼蛇垂辫铤陡怨鸭勃臼萝伽窃焉攀陵荆菩乌趣耀柳辱番枭坪坝笃唱锣鼓奄夯伸憩镶蕾咨菌剂虞荷柑桂琢耽禹昌镐剩雨苷彭攒映衬~焦囱–弟隙蔚{瞥凌套瑶寺侮赡嫁耶桥盯颇夥Ⅲ秩坦捡徒鹗诫饥帽侄撤醒丁昆奠零摒缝纫尴尬圆驮塾膝孽舱救「」阂姊挽奢睁柯J艳羡铂洁宫壁柱堡缓沃壳滴烤炸熏崇荒慕饰邱螃蟹斥钓浩寡粘裴串炫秉淋饷曙疼冷勿唤娃遶孤痰偷劫奋脆奉鄂嘴崔鞭伺纵糊辣丑巫唠叨j揪:涂!嚷爷纺笨吩咐啦骂汹澎湃叱斧钺砖砌靴贱稼藻傻膳汪;敞叩渔桩?q佬佑赎泥棚踪凛貂坎昏斟拗槛ZQ'\n char_to_int = {}\n \n int_to_char = []\n for i, l in enumerate(alfabet):\n char_to_int[l] = i\n int_to_char.append(l)\n return char_to_int, int_to_char, len(char_to_int) + 1", "title": "" }, { "docid": "4f5d449effc1ffd454ce21a3c459df26", "score": "0.61246246", "text": "def code3():\n dic = {}\n # lower case letters\n dic['z'] = 'a'\n for i in range(ord('a'), ord('z')):\n dic[chr(i)] = chr(i + 1)\n # upper case letters\n dic['Z'] = 'A'\n for i in range(ord('A'), ord('Z')):\n dic[chr(i)] = chr(i + 1)\n # space, stop and some other special characters\n dic[' '] = '$'\n dic['.'] = '#'\n dic['#'] = '.'\n dic['$'] = ' '\n dic['?'] = '!'\n dic['!'] = '?'\n return dic", "title": "" }, { "docid": "b1e91a7d4f368292d02dd0efdc3e59ab", "score": "0.6120668", "text": "def init_letters_dictionary(alphabet):\n letters_dict = {}\n\n for l in alphabet:\n letters_dict.update({l : 0})\n\n return letters_dict", "title": "" }, { "docid": "8d53d3d4cd7bddedc328b780e02a4199", "score": "0.6120321", "text": "def get_adjacencies(words):\n adj = {letter: [] for letter in letters}\n for word in words:\n for n in range(len(word) - 1):\n first_letter, second_letter = word[n], word[n + 1]\n adj[first_letter].append(second_letter)\n adj[second_letter].append(first_letter)\n return adj", "title": "" }, { "docid": "5ad335883dc5000f2f22ef8537a57408", "score": "0.60948694", "text": "def get_dictionary():\n stems, dictionary = set(), set()\n with open('words.txt') as f:\n for word in f:\n word = word.strip().upper()\n dictionary.add(word)\n\n for i in range(len(word)):\n stems.add(word[:i + 1])\n\n return dictionary, stems", "title": "" }, { "docid": "7d03acdde10b7260e4c5cb9d4001787a", "score": "0.606268", "text": "def words_to_dictionary(search,words):\n dic={}\n for word in words:\n #word=str(word.encode('ascii', 'ignore')).lower()\n word=str(word.lower())\n word=re.sub(r'[0-9]', '', word)\n word=re.sub('\\W+','', word )\n if word not in en_sw:\n wordnet_lemmatizer = WordNetLemmatizer()\n word=wordnet_lemmatizer.lemmatize(word)\n #if not( dic.has_key(word) ): #python2\n if not( word in dic ): #python3\n dic[(search,word)]=[\"R\"]\n return dic", "title": "" }, { "docid": "5d5c7ab6a5294113153afbe0f598c269", "score": "0.60510117", "text": "def create_dict(f):\n words = []\n with open(f) as fin:\n for line in fin:\n words += line.lower().split()\n return words", "title": "" }, { "docid": "040c780e0dffb7b21f1558616c2cd117", "score": "0.60496813", "text": "def letter_code(word):\n\treturn ''.join(sorted(word))", "title": "" }, { "docid": "3c212b97b5a7e09e0491e253f31b243c", "score": "0.6048791", "text": "def build_dict():\n fin = open('words.txt')\n dict_of_words = {}\n \n for line in fin:\n word = line.strip()\n dict_of_words[word] = 1\n return dict_of_words", "title": "" }, { "docid": "196d3cffcae879dc22c3ab418b070b41", "score": "0.6036554", "text": "def word_mapping(sentences, lower):\n words = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]\n dico = create_dico(words)\n dico['[MASK]'] = 30000000\n dico['[UNK]'] = 20000000\n dico['[PAD]'] = 10000000\n\n word_to_id, id_to_word = create_mapping(dico)\n print (\"Found %i unique words (%i in total)\" % (len(dico), sum(len(x) for x in words)))\n return dico, word_to_id, id_to_word", "title": "" }, { "docid": "a20aa2381a612d77c69ed72ab3094815", "score": "0.6033069", "text": "def map_words(text: list):\n\n hash_map = {}\n\n if text is not None:\n for word in text:\n if word in hash_map:\n hash_map[word] = hash_map[word] + 1\n else:\n hash_map[word] = 1\n return hash_map\n return None", "title": "" }, { "docid": "0cf3a15e4ff77612b116d41c3e994e9a", "score": "0.6032134", "text": "def add_word_lists(d):\n\n d[''] = []\n\n for word in d.keys():\n if len(word) == 1:\n d[word] = ['']\n\n else:\n for i in range(len(word)):\n new = word.replace(word[i], '')\n if new in d and new not in d[word]:\n d[word].append(new)\n\n return d", "title": "" }, { "docid": "45ca8668c644a3583110659249c39c74", "score": "0.602519", "text": "def keyword_mapping(keyword):\n # letters to be used as the keys\n input_letters = list(ascii_uppercase)\n # remaining letters to be mapped\n mapping_letters = list(ascii_uppercase)\n mapping = {}\n for letter in keyword.upper():\n if letter in mapping_letters:\n # if the letter hasn't already been mapped, map the next input\n # letter to it, and remove it from the letters to map\n mapping[input_letters.pop(0)] = letter\n mapping_letters.remove(letter)\n # map the remaining letters\n for letter in mapping_letters:\n mapping[input_letters.pop(0)] = letter\n return mapping", "title": "" }, { "docid": "a528eb153aa4f9660542a412fcbff5fc", "score": "0.60209775", "text": "def create_alphabetical_list(tokens: dict) -> dict:\n alphabetical_dict = defaultdict(list)\n # solution for this found here:\n # https://stackoverflow.com/questions/4058967/split-list-of-names-into-alphabetic-dictionary-in-python\n for token in tokens.keys():\n alphabetical_dict[token[0].upper()].append(token)\n return alphabetical_dict", "title": "" }, { "docid": "cebdf937b9cca7a1396dbb3052d4ab9f", "score": "0.6020199", "text": "def find_anagrams(word):\n anagrams = []\n chars = sorted(list(word))\n for d_word in DICT:\n if chars == sorted(list(d_word)):\n anagrams.append(d_word)\n return anagrams", "title": "" }, { "docid": "bbab916fe5680532302c860b7002af4f", "score": "0.6002704", "text": "def adv_alpha_sort_by_word_length(words):\n # instantiate empty word dictionary; count each word\n word_dictionary = {}\n\n # checks if word length is already a key;\n # if so, appends word; if not, creates key + starts value list with word\n # sorts each value list\n for word in words:\n if word_dictionary.get(len(word), 0) == 0:\n word_dictionary[len(word)] = [word]\n else:\n word_dictionary[len(word)].append(word)\n word_dictionary[len(word)].sort()\n\n return word_dictionary.items()", "title": "" }, { "docid": "ffb7076dd3bf56613b54843cebe8aff4", "score": "0.599915", "text": "def word_getter(letters):\n #I just want you to know I spent about 2 hours working on a recursion that does this\n #Then I discovered itertools\n #I'm never getting that 2 hours back am I?\n a = [l for l in letters]\n FirstSet = set()\n for i in range(1,len(letters)): \n for Tup in list(itertools.permutations(a,i+1)):\n FirstSet.add(Tup)\n\n #Write a function to convert tuples to a string\n #Use it to create a NewSet of strings from FirstSet, each of which is a permutation of letters\n def TupleConvert(tup):\n \"\"\"Converts tuples of characters or strings into a single string\"\"\"\n new_tuple = \"\".join(tup)\n return new_tuple\n NewSet = set()\n for tup in FirstSet:\n NewSet.add(TupleConvert(tup))\n\n #Save only words in the scrabble dictionary to FinalSet\n FinalSet = set()\n for word in NewSet:\n if word.upper() in WordData:\n FinalSet.add(word)\n return FinalSet", "title": "" }, { "docid": "1b18c0f53b265b0c5b12f59de4e8d4a2", "score": "0.59939146", "text": "def GetMatchingWords(abbreviations):\n matchDict = {}\n\n for abb in abbreviations:\n if abb not in matchDict.keys():\n pattern = GetAbbreviationRegexPattern(abb)\n matchDict[abb] = GetMatchesFromWordFile(pattern)\n\n return matchDict", "title": "" }, { "docid": "bd576d7af0f22587282a0d1eb06f8cab", "score": "0.5993233", "text": "def bigrams(words):\n d = DefaultDict(DefaultDict(0))\n for (w1, w2) in zip([None] + words, words + [None]):\n d[w1][w2] += 1\n return d", "title": "" }, { "docid": "898c206ca9be2213624c5e1bbd9f29d5", "score": "0.59697163", "text": "def get_index_mappings(chars):\n return {c: i for i, c in enumerate(chars)}, {i: c for i, c in enumerate(chars)}", "title": "" }, { "docid": "822f51dce0a1bb551a7f7ff8e7015539", "score": "0.5966307", "text": "def make_dictionary(words, context):\n dictionary = {}\n index = 0\n\n for word in words[index:]:\n key = ' '.join(words[index - context:index])\n if key in dictionary:\n dictionary[key].append(word)\n else:\n dictionary[key] = [word]\n\n index += 1\n return dictionary", "title": "" }, { "docid": "038e2e5702ce80bb739bb25774aa7997", "score": "0.59528744", "text": "def build_word_and_char_dict(args, examples, fields, dict_size=None,\n no_special_token=False, words=None):\n if words:\n words = words[:dict_size]\n else:\n words = load_words(args, examples, fields, dict_size)\n dictionary = UnicodeCharsVocabulary(words,\n args.max_characters_per_token,\n no_special_token)\n return dictionary", "title": "" }, { "docid": "71a8c309f68aee3c935ff365df0d3c43", "score": "0.5945094", "text": "def build_word_dict(args, examples):\n word_dict = Dictionary()\n for w in load_words(args, examples):\n word_dict.add(w)\n return word_dict", "title": "" }, { "docid": "d50351bface0d03af30ac721b7224511", "score": "0.5941661", "text": "def get_words(txt_data):\n sentences = txt_data.split('\\n') # list of sentences\n sentence_list = [i.lower().split(' ') for i in sentences]\n c = [a.insert(0, '<s>') for a in sentence_list]\n d = [z.append('</s>') for z in sentence_list]\n words = [word for sentence in sentence_list for word in sentence]\n return words", "title": "" }, { "docid": "d50351bface0d03af30ac721b7224511", "score": "0.5941661", "text": "def get_words(txt_data):\n sentences = txt_data.split('\\n') # list of sentences\n sentence_list = [i.lower().split(' ') for i in sentences]\n c = [a.insert(0, '<s>') for a in sentence_list]\n d = [z.append('</s>') for z in sentence_list]\n words = [word for sentence in sentence_list for word in sentence]\n return words", "title": "" }, { "docid": "62706372ab10932b0c6a22fb49152577", "score": "0.59393305", "text": "def scrabble_words(letters):\n\n\tres = []\n\tlength = len(letters)\n\n\tif length <= 1:\n\t\tres = [letters]\n\telse:\n\t\tfor i, c in enumerate(letters):\n\t\t\tfor scrabble_word in scrabble_words(letters[:i] + letters[i + 1:]):\n\t\t\t\tif letters[i] + scrabble_word not in res:\n\t\t\t\t\tres.append(letters[i] + scrabble_word)\n\t\t\t\tif scrabble_word not in res:\n\t\t\t\t\tres.append(scrabble_word)\n\treturn res", "title": "" }, { "docid": "5a31de309db2e1d02e205f15aa9e85f7", "score": "0.5939055", "text": "def _enumerate_word(self, word):\n\n char_count = {}\n enumerated_cars = []\n\n for ch in word:\n if ch not in char_count:\n char_count[ch] = 0\n else:\n char_count[ch] += 1\n\n enumerated_cars.append(ch + str(char_count[ch]))\n\n return enumerated_cars", "title": "" }, { "docid": "71fc5d08d744933de8b6d7d601750be3", "score": "0.59376687", "text": "def build_dict(word_list):\n\n trigrams = {}\n\n for i in range(len(word_list)-2):\n pair = tuple(word_list[i:i+2])\n follower = word_list[i+2]\n if pair not in trigrams:\n trigrams[pair] = [follower]\n else:\n trigrams[pair] += [follower]\n\n for key in trigrams:\n print(f'{key} = {trigrams[key]}')\n\n return trigrams\n # print(trigrams)", "title": "" }, { "docid": "9cd1a798192f6321917c5145a2c9762d", "score": "0.59329003", "text": "def group_anagrams(words):\n\n groups = {}\n for item in words:\n ordered_word = ''.join(sorted(item))\n if ordered_word in groups:\n groups[ordered_word] += [item]\n else:\n groups[ordered_word] = [item]\n return groups.values()", "title": "" }, { "docid": "06055ed8e142d0dee4567646222dc6d1", "score": "0.59293085", "text": "def get_words(filename: str,\n letters: List[str]) -> List[str]:\n with open(filename) as file:\n all_words = [line.rstrip().lower() for line in file.readlines()]\n\n return get_words_list(letters, all_words)", "title": "" }, { "docid": "ae876df2f6d4a21d25e4e6c64165a882", "score": "0.5920028", "text": "def word_list(self):\n return self._word_to_id.keys()", "title": "" }, { "docid": "89bd337856826bcdc2d2115f485bbf04", "score": "0.5916524", "text": "def buildDict(self, dict):\n for word in dict:\n for charIndex in range(0,len(word)):\n for w in self.chars:\n if w != word[charIndex]:\n newStr = word[0:charIndex]+w+word[charIndex+1:len(word)]\n self.dict[newStr] = 1", "title": "" }, { "docid": "e7135990cbd8ade8fda2a1631eff3ad9", "score": "0.59163016", "text": "def get_word_index(self) -> dict:\n\n return dict(zip(self.vocabulary, range(len(self.vocabulary))))", "title": "" }, { "docid": "33c61f466effba46c7c3df064d8f0e46", "score": "0.5904112", "text": "def word_mapping(sentences, lower):\n words = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]\n dico = create_dico(words)\n\n dico['<PAD>'] = 10000001\n dico['<UNK>'] = 10000000\n dico = {k:v for k,v in dico.items() if v>=3}\n word_to_id, id_to_word = create_mapping(dico)\n\n print(\"Found %i unique words (%i in total)\" % (\n len(dico), sum(len(x) for x in words)\n ))\n return dico, word_to_id, id_to_word", "title": "" }, { "docid": "e1351e7c9a0d66be6621d7fac5171e5f", "score": "0.5902606", "text": "def make_initial_letters_lowercase(words):\n # for loop? List comprehension?", "title": "" }, { "docid": "8fa41feaf8cea25c784f95d8239758ba", "score": "0.5888543", "text": "def edits2(word):\n return {e2 for e1 in edits1(word) for e2 in edits1(e1)}", "title": "" }, { "docid": "44a950ed746d1b45127a2a37fafe6701", "score": "0.58834666", "text": "def __build__chars__(self):\r\n all_letters = string.ascii_letters + string.digits + \" .'-\"\r\n return {all_letters[i]: i+1 for i in range(0, len(all_letters))}", "title": "" }, { "docid": "75fbf82a89b33ab4058b2ed15fe5ba6c", "score": "0.5873006", "text": "def create_lang_abbr_map():\n r = requests.get(\"https://ws.detectlanguage.com/0.2/languages\")\n return {x[\"code\"]: x[\"name\"] for x in r.json()}", "title": "" }, { "docid": "7c08d8ca9b534352927ba4e1e1dbdfa6", "score": "0.58715415", "text": "def generate_words(combo, scrabble_words_dict):\n set_of_words = set()\n for word in itertools.permutations(combo):\n word = ''.join(word) #joins the characters in the combo\n if word in scrabble_words_dict:\n set_of_words.add(word) #adds if it is valid word\n return set_of_words", "title": "" }, { "docid": "690de319718ee53d9ccbcf3a46990485", "score": "0.58626026", "text": "def words(self) -> List[str]:", "title": "" }, { "docid": "06e002245a43b299974bf4be07754fac", "score": "0.58593655", "text": "def bag_of_words(words):\n word_new = []\n for word in words:\n if word not in cachedStopWords:\n word_new.append(word)\n words = word_new\n return dict([(word, True) for word in words])", "title": "" }, { "docid": "07d20e15398d5cb67aaf64d7f644357c", "score": "0.5855599", "text": "def pattern(word):\n alphabet = [chr(ascii_code) for ascii_code in range(ord('A'), ord('Z') + 1 )]\n word_dict = {}\n pattern = \"\"\n for i in word:\n if i in word_dict:\n pass\n else:\n word_dict[i] = alphabet.pop(0)\n\n for i in word:\n pattern += word_dict[i]\n\n return pattern", "title": "" }, { "docid": "e1a24e289f93287d23074e13d4760e40", "score": "0.5854623", "text": "def generate_alpha_dict():\n return {k: v + 1 for v, k in enumerate(string.ascii_uppercase)}", "title": "" }, { "docid": "6853d9a731f2db1c11e186d43dabefd6", "score": "0.5845981", "text": "def letterCombinations(self, digits: str):\n if digits == \"\":\n return []\n return getWords(digits, \"\")", "title": "" }, { "docid": "43dd8d112d235d6e6bfd5a80529c78b8", "score": "0.5837669", "text": "def words( text ):\n stext = str(text)\n if ( not stext ):\n return []\n \n # first, split all the alphanumeric characters up\n phrases = EXPR_PHRASE.findall(stext)\n \n # second, split all the camel humped words\n output = []\n for phrase in phrases:\n output += EXPR_WORD.findall( phrase )\n \n return output", "title": "" }, { "docid": "d1b9960d9f12e017ce057f7881801589", "score": "0.5836475", "text": "def __init__(self, words):\n self.map = collections.defaultdict(list)\n for i, word in enumerate(words):\n self.map[word].append(i)", "title": "" }, { "docid": "c973e5715d66d6e6ecd9b3dcaf30719c", "score": "0.5824656", "text": "def wordDefinition(word):\n dic={}\n definitions=list(Word(word).definitions)\n #print definitions\n if len(definitions)!=0:\n lista=[]\n dic={}\n lista=definitions[0].split(' ')\n for palabra in lista:\n if palabra not in en_sw:\n #Meaning of a word\n palabra=lemmatize_word(palabra)\n #Insert in the dictionary\n if not(palabra in dic):\n dic[palabra]=1\n return dic", "title": "" }, { "docid": "7649ff27fb487c4bf6afa7b746d80ea2", "score": "0.58208865", "text": "def map_letters():\n letters = [None]\n for i in range(27):\n letters.append(chr(97 + i))\n return letters", "title": "" }, { "docid": "ccf6710b54c3d32997bcd0a40e4192d6", "score": "0.5818315", "text": "def get_word_list(file_name):\n\tfp = open(file_name, 'r')\n\twords = []\n\tfor line in fp:\n\t\tfor word in line.split():\n\t\t\twords.append(word)\n\t\n\tfor i in range(len(words)):\n\t\twords[i] = words[i].translate(None, \",./;'[]<?:{}=>-+_)(*&^%$#@!\")\n\t\twords[i] = words[i].lower()\n\treturn words\n\tpass", "title": "" }, { "docid": "c10788efb2ab75ee634b1640e3754a5d", "score": "0.58166677", "text": "def _load_dictionary(self) -> list:\n with open(self.dictionary_path, 'r') as f:\n words = f.readlines()\n # get rid of \\n\n words_without_newline_char = [\n word.replace('\\n', '') for word in words]\n # lowercase & keep only words between 5-12 chars long\n dictionary = [\n word.lower() for word in words_without_newline_char\n if len(word) in range(5, 13)]\n # remove duplicates\n dictionary = list(set(dictionary))\n \n return dictionary", "title": "" }, { "docid": "3202f05be800f757f900287eeb06529a", "score": "0.58054376", "text": "def build_dictionary(documents):\n result = Counter()\n\n for doc in documents:\n tokenized = tokenize(documents[doc].lower())\n for w in tokenized:\n if is_apt_word(w):\n result[w] += 1\n \n return dict(result)", "title": "" }, { "docid": "b52982144a998b549d10596c04d5506e", "score": "0.58021283", "text": "def build_dict(words):\n d = {}\n for i, word in enumerate(words):\n try:\n first, second, third = words[i], words[i+1], words[i+2]\n except IndexError:\n break\n key = (first, second)\n if key not in d:\n d[key] = []\n d[key].append(third)\n\n return d", "title": "" }, { "docid": "e5b642b1615e4d2e1b83920aa73b55d0", "score": "0.5798854", "text": "def get_dictionaries(cls, contents):\n unique_name = inspect.currentframe().f_back.f_code.co_name\n _, shadow = spelling.spellcheckable_and_shadow_contents(contents)\n valid_words = spelling.Dictionary(spelling.valid_words_set(),\n \"english_words\",\n cls.cache_dir)\n tch_set = spelling.technical_words_from_shadow_contents(shadow)\n technical_words = spelling.Dictionary(tch_set,\n \"technical_words_\" + unique_name,\n cls.cache_dir)\n\n return (valid_words, technical_words)", "title": "" }, { "docid": "81b99510cf674c9e8660fba2ffd8943d", "score": "0.5796859", "text": "def letter_dist(words):\n dist = defaultdict(lambda: 0)\n for word in words:\n for letter in word:\n dist[letter] += 1\n items = list(dist.items())\n items.sort(key=lambda x: x[1], reverse=True)\n return [x[0] for x in items], dist, sum([x[1] for x in items])", "title": "" }, { "docid": "9a4a542d2e5f7c86646a7a3061f37e85", "score": "0.5792412", "text": "def apply_mapping(self, solution_key):\n\n return [textutil.decode_word(c, solution_key)\n for c in self.ranked_cipher]", "title": "" }, { "docid": "c0aab349a338db25b3b3edf83410fb34", "score": "0.5790223", "text": "def grouped_anagrams(strings):\r\n word_dict = {}\r\n for word in strings:\r\n sorted_word = \"\".join(sorted(word))\r\n if sorted_word not in word_dict:\r\n word_dict[sorted_word] = [word]\r\n else:\r\n word_dict[sorted_word].append(word)\r\n\r\n return list(word_dict.values())", "title": "" }, { "docid": "441b3707d240fa0d79ed03459d683985", "score": "0.57735616", "text": "def Pwords(words):\n return sum(Pw[w.lower()] for w in words)", "title": "" }, { "docid": "9bcb3a051e99e4a7e6b159bb0a80404e", "score": "0.57689124", "text": "def word_guess(letters, length=None):\n length = length or len(letters)\n #letters = [ ch for ch in letters]\n results = []\n en_dict = enchant.Dict(\"en_US\")\n\n for word in permutations(letters, length):\n #word = ''.join(word)\n if en_dict.check(word):\n results.append(word)\n\n res = set(results)\n\n return res", "title": "" }, { "docid": "6bb8dcab18fbecbc18c1c68d127abe09", "score": "0.57632583", "text": "def bigram_to_single_word(bigrams_fd):\n out = dict(zip([x[0]+\" \"+x[1] for x in bigrams_fd.keys()],bigrams_fd.values()))\n return out", "title": "" }, { "docid": "c79991d26ade5da6df142d02968f92ae", "score": "0.57610315", "text": "def uncommon_word_map(texts):\n with open(\"MachineLearning\\\\lab01\\\\commonwords.txt\", \"r\") as file1:\n FileasList = file1.readlines()\n for i in range(len(FileasList)):\n FileasList[i] = FileasList[i][:-1]\n \n words_dic = {}\n for s in texts:\n str_list = s.split()\n for w in str_list:\n w = w.lower()\n if w in FileasList:\n continue\n if w not in words_dic:\n words_dic[w] = 1\n else:\n words_dic[w] += 1\n\n return words_dic", "title": "" }, { "docid": "d09791cc868a0041d4d232fb53daf4e5", "score": "0.57573086", "text": "def preprocess(lac, texts, word_dict, use_gpu=False, batch_size=1):\n result = []\n input_dict = {'text': texts}\n processed = lac.lexical_analysis(data=input_dict, use_gpu=use_gpu, batch_size=batch_size)\n unk_id = word_dict[\"<unk>\"]\n for index, data in enumerate(processed):\n result_i = {'processed': []}\n result_i['origin'] = texts[index]\n for word in data['word']:\n if word in word_dict:\n _index = word_dict[word]\n else:\n _index = unk_id\n result_i['processed'].append(_index)\n result.append(result_i)\n return result", "title": "" }, { "docid": "c7edb72bccfad0ff40924b9c411ec2f5", "score": "0.57499015", "text": "def create_dictionary(filename):\n file = open(filename, 'r')\n text = file.read()\n words = text.split()\n d = {}\n current_word = '$'\n for next_word in words:\n if '.' in current_word or '?' in current_word or '!' in current_word:\n current_word = '$'\n if current_word not in d:\n d[current_word] = [next_word]\n else:\n d[current_word] += [next_word]\n current_word = next_word\n return d", "title": "" }, { "docid": "2baf20b2e990b62bb95daa83249368a6", "score": "0.5744149", "text": "def set_wordList(words: list) -> dict:\n return WordSearch(words).formatted", "title": "" }, { "docid": "b1fc9a7ad2fee6310c643df2709d2edc", "score": "0.5738113", "text": "def code2():\n return {'i': 's', 's': 'g', 'g': 'i'}", "title": "" }, { "docid": "7856f02742592c641205bf03bd46572b", "score": "0.5734976", "text": "def create_word_bigram_dict(words_list):\n words_length = len(words_list)\n n = 2\n words_bigram = []\n for i in range(0, words_length - n + 1):\n n_gram = ' '.join(words_list[i: i + n])\n words_bigram.append(n_gram)\n\n bigram_df = pd.DataFrame(words_bigram, columns=['bigram'])\n bigram_count = bigram_df['bigram'].value_counts()\n bigram_dict = bigram_count.to_dict()\n return bigram_dict", "title": "" }, { "docid": "8677cc0da9faa4380cb036a7b38f3158", "score": "0.5734802", "text": "def makeAlphabet(symbols):\n alphabet = dict()\n i = 0\n for symbol in symbols:\n alphabet[i] = symbol\n i += 1\n return(alphabet)", "title": "" }, { "docid": "1bff7cc73681da702b0e7e88efed2348", "score": "0.5732264", "text": "def word_map(texts):\n # Write your code here", "title": "" }, { "docid": "c16b2aff71c5d2a645cbcb5284da3378", "score": "0.57308865", "text": "def word_entries(word_list):\n good_list = []\n for word in word_list:\n if len(word) > 2:\n if str.upper(word) == word:\n good_list.append(word)\n return good_list", "title": "" }, { "docid": "97e5461eb86abab89003bf91d852dd22", "score": "0.572551", "text": "def count_letters(text:str, chars:list=['a','e','i','o','u']) -> map:\r\n result_counts = {item: 0 for item in chars}\r\n for ch in text:\r\n if ch in result_counts:\r\n result_counts[ch] += 1\r\n return result_counts", "title": "" }, { "docid": "1e3eb4f8736cdc4bb246fa1aee96742d", "score": "0.5721474", "text": "def wordDict(self):\t\t\n\t\tallWords = self.vocabalury()\n\t\treturn self.word_freq(allWords)", "title": "" }, { "docid": "b80391b7fc6e17c58bc7324b0496c42c", "score": "0.57188857", "text": "def createDictionary(filename):\n pass\n f = open(filename)\n text = f.read()\n f.close()\n\n LoW = text.split() \n \n pw = '$'\n d = {}\n for nw in LoW:\n if pw not in d:\n d[pw] = [nw]\n else:\n d[pw] += [nw]\n pw = nw\n if nw[-1] in '.!?': # then check for whether that new pw ends in # punctuation -- if it _does_ then set pw = '$'\n pw = '$'\n return d", "title": "" }, { "docid": "5e312b63a97ee34eab0bc7d08a0de7b5", "score": "0.57162917", "text": "def mapper_anagram(self, _, word):\r\n yield ''.join(sorted(word)), word", "title": "" }, { "docid": "c178685a91764563ece8a49d42e910ec", "score": "0.5714916", "text": "def __construct_trie(self):\n\t\ttrie = {}\n\t\tfor word in self.wordlist:\n\t\t\ttemp_dict = trie\n\t\t\tfor letter in word:\n\t\t\t\ttemp_dict = temp_dict.setdefault(letter, {})\n\t\t\ttemp_dict['*'] = '*'\n\t\treturn trie", "title": "" }, { "docid": "48cdf90c6fd434550b8af8b1637138e6", "score": "0.57090336", "text": "def possible_word_set_from_string(self, string: str) -> Dict[str, WordCouple]:\n assert isinstance(string, str)\n assert string.replace(\" \", \"A\").isalpha()\n assert string.replace(\" \", \"A\").isupper()\n assert len(string) <= 15\n assert string.count(\" \") == 1 # \" \" is the wildcard letter and there must be only one\n\n word_dict = {}\n joker_index = string.index(\" \")\n for letter in list(map(chr, range(65, 91))): # alphabet uppercase letters\n string_2_be_tested = string.replace(\" \", letter)\n if self.this_is_a_valid_word(string_2_be_tested):\n word_dict[letter] = WordCouple(index=joker_index, word_str=string_2_be_tested)\n\n return word_dict", "title": "" }, { "docid": "5373318e5633acd3ba5fa37fdd710da7", "score": "0.5707884", "text": "def get_words_to_points(word_list):\n for word in word_list:\n points_dict[word] = get_word_score(word, HAND_SIZE)", "title": "" }, { "docid": "068c2d75377f2493d9b2a39698c35264", "score": "0.5704344", "text": "def pre_processing(words):\r\n result = []\r\n for word in words:\r\n if word not in cleanse_dict.keys():\r\n result.append(word)\r\n else:\r\n result.append(cleanse_dict[word])\r\n return result", "title": "" }, { "docid": "526d3bf20e329a1ef0e438cf0c0f85bb", "score": "0.57034236", "text": "def predict_dict(self, words):\n expansions = []\n for w in words:\n if w in self.expansion_dict:\n expansions += [self.expansion_dict[w]]\n elif w.lower() in self.expansion_dict:\n expansions += [self.expansion_dict[w.lower()]]\n else:\n expansions += [w]\n return expansions", "title": "" }, { "docid": "b4ae945bb7d25daf1e3aa332086d938f", "score": "0.5693178", "text": "def categorize2(word):\n lst = []\n for key in synlist:\n for w in d.suggest(stemmer.stem(word)):\n if w in synlist[key]:\n lst.append(key)\n return lst", "title": "" } ]
bf6eda80eb6f20a7499caa7bc94fa3d0
Parse and return input rangefile as dict
[ { "docid": "6a631f5147d3d486373264ba1b05b358", "score": "0.7883238", "text": "def _parsefile(self, rngpath: str) -> dict:\n\n # TODO check it's a rng file (avoid utf-8 encoding errors)\n try:\n with open(rngpath, 'r') as file:\n r = [v.split() for v in file]\n except (IOError, FileNotFoundError):\n raise ReadError('Error opening rng file %s' % rngpath)\n return\n\n natoms = int(r[0][0])\n nranges = int(r[0][1])\n end = int((1+natoms)*2)\n\n # shortname + colour (3 floats)\n atoms = np.array(r[2:end:2])\n rngs = r[int(end):int(end+nranges)] # ranges\n\n # Read rows as numpy string array\n rngsconv = np.array(rngs, dtype='S10')\n\n ranges = rngsconv[:,1:3].astype('f8') # Extract ranges as\n # 2 col array of floats\n composition = rngsconv[:,3:3+natoms].astype('b') # Extract ion\n # composition array\n # as bool\n\n return {'ranges':ranges,\n 'atoms':atoms,\n 'comp':composition,\n 'nranges':nranges,\n 'natoms':natoms,\n }", "title": "" } ]
[ { "docid": "d10c6e0f8acb3aa42547aa3663edd755", "score": "0.6517698", "text": "def _parse_input_file(input_file):\n input_dict = dict()\n logger = logging.getLogger(__name__)\n\n try:\n f = open(input_file, \"r\")\n for line in f:\n # Ignore comments in input file!\n line_loc = _string_to_substring(line, '!')[0]\n\n # ignore empty lines\n if len(line_loc.lstrip()) == 0:\n continue\n\n line_list = line_loc.strip().split('=')\n var_name = line_list[0].strip()\n value = line_list[1].strip()\n val_array = _string_to_substring(value, ',')\n if len(val_array) > 1:\n # Treat comma-delimited value as an array\n for n, value in enumerate(val_array):\n suffix = \"(%d)\" % (n+1)\n input_dict[var_name+suffix] = value.strip()\n else:\n # Single value\n input_dict[var_name] = value\n f.close()\n except TypeError:\n # If inputfile == None then the open will result in TypeError\n pass\n except:\n logger.error(\"input_file '%s' was not found\" % input_file)\n _abort(1)\n return input_dict", "title": "" }, { "docid": "20dbc0e18816af3d8628e78bed21a194", "score": "0.65066123", "text": "def parse_ranges():", "title": "" }, { "docid": "a7c64ddfd5e62694de003cd08d99d9f7", "score": "0.63691574", "text": "def read_file_to_dict(name):\n dictionary = {}\n with open(name) as file:\n key = 0\n for line in file:\n line = line.rstrip()\n if line.isdigit():\n dictionary[key].append(int(line, 2))\n if line.startswith('S'):\n key = int(line.partition(':')[2], 2)\n dictionary[key] = []\n return dictionary", "title": "" }, { "docid": "5b6c39823ee0d53d14b3f23d942822d6", "score": "0.63425994", "text": "def _parse(self, data):\n h = StringIO(data)\n result = {}\n for line in h.readlines():\n try:\n key, value = map(str.strip, line.split('\\t'))\n result[key] = int(value)\n except ValueError:\n pass\n return result", "title": "" }, { "docid": "c61bbed74c78d3afa402c6abbd184e27", "score": "0.63322926", "text": "def _parse(self, aid_file):\n\n ranges_by_name = {}\n for lineno, line in enumerate(aid_file):\n\n def error_message(msg):\n \"\"\"Creates an error message with the current parsing state.\"\"\"\n # pylint: disable=cell-var-from-loop\n return 'Error \"{}\" in file: \"{}\" on line: {}'.format(\n msg, self._aid_header, str(lineno))\n\n range_match = self._RESERVED_RANGE.match(line)\n if range_match:\n partition, name, start, value = range_match.groups()\n partition = partition.lower()\n if name is None:\n name = \"unnamed\"\n start = start == \"START\"\n value = int(value, 0)\n\n if partition == 'oem':\n partition = 'vendor'\n\n if partition not in ranges_by_name:\n ranges_by_name[partition] = {}\n if name not in ranges_by_name[partition]:\n ranges_by_name[partition][name] = [None, None]\n if ranges_by_name[partition][name][0 if start else 1] is not None:\n sys.exit(error_message(\"{} of range {} of partition {} was already defined\".format(\n \"Start\" if start else \"End\", name, partition)))\n ranges_by_name[partition][name][0 if start else 1] = value\n\n if AIDHeaderParser._AID_DEFINE.match(line):\n chunks = line.split()\n identifier = chunks[1]\n value = chunks[2]\n\n if any(\n x.match(identifier)\n for x in AIDHeaderParser._SKIP_AIDS):\n continue\n\n try:\n if not any(\n identifier.endswith(x)\n for x in AIDHeaderParser._AID_SKIP_RANGE):\n self._handle_aid(identifier, value)\n except ValueError as exception:\n sys.exit(\n error_message('{} for \"{}\"'.format(\n exception, identifier)))\n\n for partition in ranges_by_name:\n for name in ranges_by_name[partition]:\n start = ranges_by_name[partition][name][0]\n end = ranges_by_name[partition][name][1]\n if start is None:\n sys.exit(\"Range '%s' for partition '%s' had undefined start\" % (name, partition))\n if end is None:\n sys.exit(\"Range '%s' for partition '%s' had undefined end\" % (name, partition))\n if start > end:\n sys.exit(\"Range '%s' for partition '%s' had start after end. Start: %d, end: %d\" % (name, partition, start, end))\n\n if partition not in self._ranges:\n self._ranges[partition] = []\n self._ranges[partition].append((start, end))", "title": "" }, { "docid": "92cbfa845b59a7a9a3fcf8b6773559a2", "score": "0.628072", "text": "def read(self):\n \n try:\n temp_file = open(self.file, \"r\")\n text = temp_file.read()\n temp_file.close()\n temp_dict = {}\n lines = text.split(\"\\n\")\n lines.remove(\"\") # Some text editors will put this at end of the file\n for line in lines:\n result = line.split(\":\")\n key = result[0]\n value = result[1]\n variable = self.recognize(value)\n temp_dict[key] = variable\n return temp_dict\n except SystemError:\n print \"An error happened while trying to read the file:\"\n return SystemError", "title": "" }, { "docid": "991262bde06ede913ef3e67f86d3669a", "score": "0.62093157", "text": "def parse(valid_file_name)-> dict:\n dict_props = dict()\n with open(valid_file_name) as file:\n file_by_lines = file.readlines()\n for line in file_by_lines:\n line = line.strip()\n if PropertyFileToDictConverter.__is_valid_key_value_pair(line):\n key, value = PropertyFileToDictConverter.__parse_line(line)\n dict_props[key] = value\n return dict_props", "title": "" }, { "docid": "63ac1efde3b776e82bcf48048261ccfd", "score": "0.6188564", "text": "def read_pdb_starts():\n file_path = os.path.join(\"../data/input/etc\", \"pdb_starts.txt\")\n pdb_starts_dict = {}\n with open(file_path) as f1:\n for line in f1:\n if not line.startswith(\"#\") and not line.startswith(\"\\n\"):\n line_array = line.split(',')\n pdb_starts_dict[line[0:4]] = int(line_array[1])\n return pdb_starts_dict", "title": "" }, { "docid": "1fb9a2100f2f4425aa0fd8270c032132", "score": "0.612648", "text": "def _parse_infile(self):\n header = []\n vcf = OrderedDict()\n i = 0\n for line in self.infile:\n if line.startswith('#'):\n header.append(line)\n else:\n i += 1\n #print(i)\n line = line.rstrip('\\n').split('\\t')\n chrom = line[0]\n coord = line[1]\n ref = line[3]\n alt = line[4]\n vcf[(chrom, coord, ref, alt)] = line\n\n return header, vcf", "title": "" }, { "docid": "a4612e5bfe00af5a3abd03d675851531", "score": "0.6112825", "text": "def parse(input_file):\n # TODO: is json or xml more suitable for the input file format?\n parameters = dict()\n\n try:\n # open and parse the file\n pass\n except FileNotFoundError:\n print(\"Input file '%s' not found\" % input_file)\n sys.exit(-1)\n # add other exceptions\n except:\n print(\"Unexpected error!\")\n traceback.print_exc()\n sys.exit(-1)\n finally:\n # close the file and maybe clean up\n pass\n\n return parameters", "title": "" }, { "docid": "f5a3de036846a6011a61687443a7a9f2", "score": "0.6039144", "text": "def from_gff3_line_to_dict( line ):\n fields = line.strip().split( \"\\t\" )\n assert len( fields ) == 9 # sanity check\n result = {\n \"seqid\": fields[0],\n \"source\": fields[1],\n \"type\": fields[2],\n \"start\": None if fields[3] == \".\" else int( fields[3] ),\n \"end\": None if fields[4] == \".\" else int( fields[4] ),\n \"score\": None if fields[5] == \".\" else float(fields[5]),\n \"strand\": None if fields[6] == \".\" else fields[6],\n \"phase\": None if fields[7] == \".\" else fields[7],\n \"attributes\": parse_attributes( fields[8] ),\n \"attributes_string\": fields[8] # keep this around as useful.\n }\n result[\"ID\"] = result[\"attributes\"].get( \"ID\", None )\n result[\"Parent\"] = result[\"attributes\"].get( \"Parent\", None )\n return result", "title": "" }, { "docid": "7fe69d06307d49d5f069dcc8406d755f", "score": "0.6023709", "text": "def make_dicts(file_input):\n with open(file_input) as file_1:\n price_list = {}\n for line in file_1:\n prefix, price = line.split()\n\t\t\t# Here we are spliting the phone numbers and prices from a file.\n price_list[prefix] = float(price)\n return price_list", "title": "" }, { "docid": "d5168a01e1a00a2d0f5ed5b336968ef1", "score": "0.6010963", "text": "def parse_input(filename):\n with open(filename, 'r') as input_file:\n activity_log = {}\n for line in input_file:\n match = re.match(r'\\[(.*)\\] (.*)', line)\n activity_log.update({match[1]: match[2]})\n return generate_guard_dict(OrderedDict(sorted(activity_log.items())))", "title": "" }, { "docid": "33f6038f2c694882401cf45aad048506", "score": "0.5954986", "text": "def parse_file(file, dict={}):\n try:\n f = open(file)\n except IOError:\n return dict\n else:\n lines = f.readlines()\n vlines =[]\n for line in lines:\n if not re.match(r\"^\\s*$\",line) and not re.match(r\"^#.*$\",line):\n vlines.append(line.strip('\\n'))\n lines = []\n while len(vlines) >0:\n i = vlines.pop(0)\n i =re.sub(r\"\\s*#.*$\",\"\",i)\n while i.endswith('\\\\'):\n try:\n o = vlines.pop(0)\n except IndexError:\n o = \"\"\n i = i.rstrip('\\\\') + o.strip()\n lines.append(i)\n\n for opt in lines:\n [name,val] = opt.split(\"=\",1)\n dict[name] = val.strip('\"')\n \n return dict\n\n #for file in file_list:\n # default_dict=_parse_file(file,default_dict)\n #parser = OptionParser(option_list=option_list)\n #parser.set_defaults(default_dict)\n #(options,args) = parser.parse_args(args)\n #return options", "title": "" }, { "docid": "2e339f93ffc1751a1c23de74529f45be", "score": "0.5928876", "text": "def parse_map_file(path):\n content = {}\n with open(path) as fp:\n for line in fp:\n if not line or line.startswith(\"#\"):\n continue\n name, value = line.split(\"=\", 1)\n content[name.strip()] = value.strip()\n return content", "title": "" }, { "docid": "8c41980799142d3f4281f5b179dacd91", "score": "0.591993", "text": "def extract_seq(input_file):\n result_dict = dict()\n with open(input_file, 'rb') as input:\n list_lines = input.readlines()\n i = 0\n while i < len(list_lines):\n line = list_lines[i]\n if line.startswith(\">\"):\n header = line.strip()\n j = i+1\n seq = \"\"\n while j < len(list_lines):\n curr_line = list_lines[j]\n if curr_line.startswith(\">\"):\n break\n else:\n seq += curr_line.strip()\n j += 1\n result_dict[header] = seq\n i = j\n else:\n i += 1\n return result_dict", "title": "" }, { "docid": "02f45e0f6768ca83980d61cd588e9ef0", "score": "0.5916067", "text": "def parse_fastq(file_name):\t\n\tfastq_dict = {}\n\tfile = open(file_name)\n\tfile_content = file.readlines()\n\ti = 0\n\twhile i < len(file_content):\n\t\tif i % 4 == 0:\n\t\t\tfastq_dict[file_content[i].strip('\\n')] = file_content[i+1].strip('\\n')\n\t\t\ti += 1\n\t\telse:\n\t\t\ti += 1\n\treturn fastq_dict", "title": "" }, { "docid": "bf4ea6aa5d45396258db0018429460fd", "score": "0.5915719", "text": "def conv_to_dict(conv_fn):\n\n conv_dict = dict()\n f = open(conv_fn, \"r+\")\n for line in f:\n idx, name = line.strip().split(\" \")\n conv_dict[idx] = name\n\n return conv_dict", "title": "" }, { "docid": "093c1348a46c2df4cc68e3784da617f9", "score": "0.5913376", "text": "def file_parse(file):\n insulin_doses = {}\n for value in file.items():\n events = value[1]\n for event_data in events:\n if event_data['eventType'] == 'insulin':\n insulin_doses[event_data['eventId']] = (event_data['systemTime'], event_data['eventSubType'],\n event_data['value'])\n return insulin_doses", "title": "" }, { "docid": "e052cbf60c554552e18db0d2570c1040", "score": "0.5876272", "text": "def read_file(filename):\r\n dict = {}\r\n key = 0\r\n\r\n fd = open(filename, \"r\")\r\n for line in fd:\r\n if line[0] == '#':\r\n continue\t\t# causes the loop to grab another line\r\n key += 1\r\n values = line.rstrip('\\n').split(',')\r\n lat = float(values[7])\r\n lon = float(values[8])\r\n mag = float(values[1])\r\n dep = float(values[10])\r\n dict[key] = [lon, lat, mag, dep]\r\n fd.close()\r\n return dict", "title": "" }, { "docid": "ba342185aea4af9940e833470b603cbe", "score": "0.58344674", "text": "def get_yutiming_header_data(f, header_start=7, header_end=9):\n with open(f, 'r') as file_:\n lines = file_.readlines()\n regex = re.compile(r'^(\\s*.*?)(\\d+\\.\\d*)\\s*')\n d = {}\n for line in lines[header_start-1:header_end]:\n m = regex.search(line)\n if m is not None:\n g = m.groups()\n key = str(g[0]).strip()\n key = key.rstrip(':')\n value = float(g[1])\n d[key] = value\n return d", "title": "" }, { "docid": "01876a0bec6365dd0a330ad602fa1874", "score": "0.58277315", "text": "def parse_data(fname):\n\n data_dict = dict()\n file_stream = open(fname, 'r')\n csvreader = csv.reader(file_stream, delimiter='\\t')\n\n # Assumes the data is already sorted.\n for line in csvreader:\n y_label, start, end, name, _, plus_minus = line\n start, end = int(start), int(end)\n # Get the value to plot\n diff = end - start\n current_store = data_dict.get(y_label, [])\n # Fill in gaps between previous end and current start\n gap = determine_gap(current_store=current_store, start=start)\n if gap:\n current_store.append(gap)\n\n color = get_color(name)\n # Add this segment to data dict.\n current_store.append(f\"{diff}:{end}:{name}:{color}\")\n\n # Add a black bar between segments\n data_dict[y_label] = current_store\n\n return data_dict", "title": "" }, { "docid": "3ac0b3943fac1d268dd8f902dcda4a2a", "score": "0.582328", "text": "def make_seqdict(input_file, format='fasta'):\n if is_gzipped(input_file):\n try:\n seq_handle = gzip.open(input_file, 'rb')\n except IOError:\n print('Cannot open fasta file')\n sys.exit()\n else:\n try:\n seq_handle = open(input_file, 'r')\n except IOError:\n print('Cannot open fasta file')\n sys.exit()\n\n seq_dict = SeqIO.to_dict(SeqIO.parse(seq_handle, format))\n return seq_dict", "title": "" }, { "docid": "a07a0f8e996ec9f8b5b4c5ee5e7fbcfb", "score": "0.5822034", "text": "def parse_calib_file(file_name: str) -> dict:\n with open(file_name, encoding='UTF-8') as file:\n lines = file.readlines()\n\n lines = [ln.split('=')[1].rstrip('\\n') for ln in lines]\n\n return {\n 'intrinsic_0': _parse_intrinsic(lines[0]),\n 'intrinsic_1': _parse_intrinsic(lines[1]),\n 'cx_diff': float(lines[2]),\n 'baseline': float(lines[3]),\n 'shape': (int(lines[5]), int(lines[4])),\n 'disp_n': int(lines[6]),\n 'is_int': True if int(lines[7]) else False,\n 'disp_min': int(lines[8]),\n 'disp_max': int(lines[9]),\n 'disp_y_avg': float(lines[10]),\n 'disp_y_max': int(lines[11]),\n }", "title": "" }, { "docid": "283d0c23a962a61135bd8b2be01122f7", "score": "0.5820655", "text": "def file_parser(file_path: str, key_col: int, key_type: Any, val_col: int, val_type: Any,\n delimiter: str=',', rows_to_skip: int=0) -> Dict[Any, Any]:\n assert os.path.isfile(file_path), '{} not found'.format(file_path)\n data = dict()\n with open(file_path) as f:\n i = 0\n for line in f:\n if i < rows_to_skip:\n i += 1\n continue\n split_line = line.rstrip().split(sep=delimiter)\n data[key_type(split_line[key_col])] = val_type(split_line[val_col])\n return data", "title": "" }, { "docid": "e92d64f5675d6c06cafe09d478cd93a2", "score": "0.58146775", "text": "def readParameters ():\n\n params = {}\n\n for line in fileinput.input ('./regions.txt'):\n \n if line.startswith ('#') or not line.strip ():\n continue\n\n fields = line.split ('=')\n name, size = fields[0].strip (), float (fields[1])\n params.update ({name: size})\n\n return params", "title": "" }, { "docid": "9664c81fa955bf9604cf62f8c1ef710c", "score": "0.5812527", "text": "def read_data_from_file(filename):\n out_dict = {}\n return out_dict", "title": "" }, { "docid": "0624aacf5cb60d0a6bd9bf4c7f8063e0", "score": "0.58062273", "text": "def _read(self, file_path: str) -> Dict[str, str]:\n data = dict()\n with open(file_path, 'r') as fasta:\n sequences = filter(None, fasta.read().split('>'))\n for seq in sequences:\n rna_id, rna = self._get_rna(seq)\n data[rna_id] = rna\n return data", "title": "" }, { "docid": "193fb22019f52c95cea55ba7a9c45c25", "score": "0.5805819", "text": "def __readfile(self) -> dict:\n gene_lists = dict()\n with open(self.filename, \"r\") as f:\n for record in f:\n fields = record.strip().split(\"\\t\")\n if self.read_descriptor:\n gene_lists[fields[0]] = {}\n gene_lists[fields[0]][\"genes\"] = fields[2:]\n gene_lists[fields[0]][\"descriptor\"] = fields[1]\n else:\n gene_lists[fields[0]] = fields[2:]\n return gene_lists", "title": "" }, { "docid": "f8545f3bf0cdfeeb5973d25b89b30d8d", "score": "0.580378", "text": "def segment_assignments_to_dict(rep_str):\n rep_str = rep_str.strip(\" \\n\")\n rep_lines = rep_str.split(\"\\n\")\n reps = collections.OrderedDict()\n for line in rep_lines: \n if not \"->\" in line: \n #print(\"skipping line\", line) \n continue \n k,v = line.split(\"->\")\n k = k.strip().upper()\n v = v.strip()\n \n v1,v2 = v.split(\":\")\n v1 = v1.strip()\n v2 = v2.strip()\n reps[k] = [v1,v2]\n return reps", "title": "" }, { "docid": "a6074a1cdd0ed751bca46cd5095be2ed", "score": "0.5793482", "text": "def parse_file(expected_events_file):\n print(\"\\n- Parsing expected events file: %s\" % expected_events_file)\n with open(expected_events_file, \"r\") as fd:\n lines = fd.readlines()\n families = []\n event_definitions = {}\n for line in lines:\n line = line.strip()\n if line == \"\":\n # empty line: ignore\n pass\n elif line.startswith(COMMENT_LABEL):\n # It is a comment: ignore\n pass\n elif line.startswith(FAMILY_LABEL):\n # It is a family entry\n f_elements = line.split()\n f_type = int(f_elements[1])\n f_name = f_elements[2]\n families.append((f_type, f_name))\n else:\n # It is an event/range definition\n d_elements = line.split()\n d_type = int(d_elements[0])\n d_mode = d_elements[1]\n if d_mode == EVENT_LABEL:\n d_event = int(d_elements[2])\n d_appearances = d_elements[3]\n if d_appearances == \"undefined\":\n d_appearances = -1\n elif \",\" in d_appearances:\n d_appearances_values = d_appearances.split(\",\")\n d_appearances = [int(d_app) for d_app in d_appearances_values]\n else:\n d_appearances = int(d_appearances)\n if d_appearances > 0 or d_appearances == -1:\n if d_type not in list(event_definitions.keys()):\n # create new type\n event_definitions[d_type] = {}\n if EVENT_LABEL not in list(event_definitions[d_type].keys()):\n # create event label\n event_definitions[d_type][EVENT_LABEL] = {}\n if d_event in list(event_definitions[d_type][EVENT_LABEL].keys()):\n # redefined event\n raise Exception(\n \"ERROR: Event defined twice: %s %s\" % (d_type, d_event)\n )\n else:\n # include event\n event_definitions[d_type][EVENT_LABEL][d_event] = d_appearances\n if d_mode == RANGE_LABEL:\n d_min_event = int(d_elements[2])\n d_max_event = int(d_elements[3])\n d_appearances = d_elements[4]\n if d_appearances == \"undefined\":\n d_appearances = -1\n elif \",\" in d_appearances:\n d_appearances_values = d_appearances.split(\",\")\n d_appearances = [int(d_app) for d_app in d_appearances_values]\n else:\n d_appearances = int(d_appearances)\n if (\n isinstance(d_appearances, int)\n and (d_appearances > 0 or d_appearances == -1)\n ) or (isinstance(d_appearances, list)):\n if d_type not in list(event_definitions.keys()):\n # create new type\n event_definitions[d_type] = {}\n if RANGE_LABEL not in list(event_definitions[d_type].keys()):\n # create range label\n event_definitions[d_type][RANGE_LABEL] = []\n else:\n # redefined erange\n raise Exception(\"ERROR: Event range defined twice: %s\" % d_type)\n # include event\n event_definitions[d_type][RANGE_LABEL] = (\n d_min_event,\n d_max_event,\n d_appearances,\n )\n if d_mode != EVENT_LABEL and d_mode != RANGE_LABEL:\n raise Exception(\n \"Unsupported event mode: %s (supported are: event or range)\"\n % d_mode\n )\n print(\"\\t- Rules:\")\n print(event_definitions)\n return families, event_definitions", "title": "" }, { "docid": "3035f36ab00a7dc017dff58e5ce9b062", "score": "0.5793257", "text": "def parse_calibration(self):\n\n if not os.path.exists(self.cache_file):\n return\n\n with open(self.cache_file, \"rb\") as f:\n lines = f.read().decode('ascii').splitlines()\n\n calibration_dict = {}\n np127 = np.float32(127.0)\n for line in lines:\n split = line.split(':')\n if len(split) != 2:\n continue\n tensor = split[0]\n dynamic_range = np.uint32(int(split[1], 16)).view(np.dtype('float32')).item() * np127\n calibration_dict[tensor] = dynamic_range\n\n return calibration_dict", "title": "" }, { "docid": "79cb57b7f898012c48ce7ff31e25be51", "score": "0.57899207", "text": "def parse_file(self, f):\n final_output = {}\n for line in f:\n output = self.line(line)\n self.merge_output(final_output, output)\n return final_output", "title": "" }, { "docid": "ccb83933a0a60d4a1a1f2e09c10c111a", "score": "0.57880765", "text": "def genome_dic(g_file):\n\tgdict = {}\n\twith open(g_file) as ifile:\n\t\tfor i in ifile:\n\t\t\ti = i.split()\n\t\t\tgdict[i[0]] = int(i[1])\n\treturn gdict", "title": "" }, { "docid": "e3dd1fd8135ebcef7a0252e60b747931", "score": "0.5721721", "text": "def marker_position_parser(marker_position_file):\n thedic = {}\n with open(marker_position_file, \"r\") as thefile:\n next(thefile) #skip the header\n for line in thefile:\n elements = line.split(\"\\t\")\n key, chr, position = elements[0], elements[1], elements[2]\n thedic[key] = [chr, position]\n return thedic", "title": "" }, { "docid": "c1ad8342b635218b443130030fe8442a", "score": "0.57080233", "text": "def read_file(filename: str) -> dict:\n\n # Initializes file as a blank string\n file = \"\"\n\n # Attempts to open the user-given file, if the file is not found, an exception is\n # thrown, a standard error is displayed, and the program is terminated\n try:\n file = open(\"../\" + filename)\n\n except FileNotFoundError:\n sys.stderr.write(\"Error: \" + filename + \" does not exist!\\n\")\n exit(1)\n\n # Initializes Usage (for better readability) and occurrences (the dictionary that will be returned)\n Usage = collections.namedtuple(\"Usage\", (\"year\", \"occurrence\"))\n occurrences = {}\n\n # Iterates through each line in file\n for line in file:\n\n # Splits line into its 3 components (parses by commas) and strips any excess whitespace\n components = line.strip().split(',')\n\n # Creates a Usage tuple, entry, with the 2 components as parameters\n # components[0] = word, components[1] = year, components[2] = number of occurrences\n entry = Usage(components[1].strip(), components[2].strip())\n word = components[0].strip().lower()\n\n # If the word is already a key, append its year and usage to its value list as a tuple,\n # otherwise, simply add a new entry to occurrences\n if word in occurrences:\n occurrences[word].append(entry)\n else:\n occurrences[word] = [entry]\n\n return occurrences", "title": "" }, { "docid": "a334e8f0e48b9ca5411779af19331f9c", "score": "0.56775296", "text": "def get_hpdict(infilename):\n hp_dict = defaultdict(list)\n with open(infilename) as file:\n for line in file:\n if line.startswith('#'):\n continue\n pix = int(line.split()[-1])\n hp_dict[pix].append(line)\n return hp_dict", "title": "" }, { "docid": "3eb6f8f2e01bea7c93bc65ef696e2e90", "score": "0.5661182", "text": "def ark2dict(arkfile):", "title": "" }, { "docid": "00ad9f10ebc3096937c7e029b86e7123", "score": "0.5638942", "text": "def fasta_parser(fasta_path):\n g_dict={}\n with open(fasta_path, 'r') as f0:\n lines = f0.readlines()\n seq=''\n for line in lines:\n if line.startswith('>'):\n try: # assign the previous sequence to the previous key\n g_dict[key]=seq\n except: # handle the first key, not assigned error\n pass\n key=line.strip()[1:]\n seq=''\n else:\n seq+=line.strip()\n g_dict[key]=seq ## assign the last sequence to the key\n return g_dict", "title": "" }, { "docid": "9a00e1b85a591a6a272572cc1e40dccf", "score": "0.56304854", "text": "def parse_locs_file(locs_fn):\n raw_locs = defaultdict(set)\n with open(locs_fn) as locs_fp:\n for line in locs_fp:\n try:\n chrm, pos, _, _, _, strand = line.split()[:6]\n # bed specs indicate 0-based start so don't shift here\n pos = int(pos)\n raw_locs[(chrm, strand)].add(pos)\n except:\n continue\n\n return dict((cs, np.array(sorted(cs_poss)))\n for cs, cs_poss in raw_locs.items())", "title": "" }, { "docid": "b9f43d53f10a986ab197590db924f1d3", "score": "0.56301385", "text": "def read_in_file(f):\n rain_data = {}\n while True:\n line = f.readline()\n if line:\n line = line.strip()\n rain_data[line] = tuple(f.readline().split())\n else:\n break\n return rain_data", "title": "" }, { "docid": "62cf8d8076daf39d9fb3a20d8da69491", "score": "0.56228703", "text": "def parse_fences_from_file():\n model_fences = {}\n\n fencefile = path.join(path.dirname(path.realpath(__file__)), 'public/data/dates_and_spatial_range.txt')\n\n with open(fencefile, mode='r') as f:\n f.readline() # skip column headings line\n for line in f.readlines():\n if not (line == '' or 'Model name' in line): # end condition\n line = line.strip()\n linevals = line.split('|')\n start_date = (datetime.strptime(linevals[1].split(' ')[0], '%m/%d/%Y') + timedelta(days=1)) \\\n .strftime('%m/%d/%Y')\n # begin_time = linevals[1].split(' ')[1]\n end_date = (datetime.strptime(linevals[2].split(' ')[0], '%m/%d/%Y') - timedelta(days=1)) \\\n .strftime('%m/%d/%Y')\n # end_time = linevals[2].split(' ')[1]\n nbound = linevals[3].split(', ')[0]\n ebound = linevals[3].split(', ')[1]\n sbound = linevals[3].split(', ')[2]\n wbound = linevals[3].split(', ')[3]\n model_fences[linevals[0]] = {\n 'start_date': start_date,\n 'end_date': end_date,\n 'extents': {\n 'maxY': nbound,\n 'maxX': ebound,\n 'minY': sbound,\n 'minX': wbound\n }\n }\n\n return model_fences", "title": "" }, { "docid": "0ed0ef2e4d48b72ae0dde5e92f814d0f", "score": "0.56207144", "text": "def keyval_to_dict(file_path):\n my_dict = {}\n with open(file_path) as f:\n for line in f:\n name, val = line.partition(\":\")[::2]\n my_dict[name.strip()] = val.strip()\n return my_dict", "title": "" }, { "docid": "8020a7cccd81f6f33afda29218952000", "score": "0.561767", "text": "def read_filter(filter_file):\n #init\n gap_num = 0\n d = {}\n l=[]\n #open file\n try:\n f, file_enc = open_file(filter_file, 'r')\n except IOError:\n print _('Error: filter file: [%s] does not exist' % (filter_file))\n return {}\n #get type\n try:\n line = f.next()\n line = line.strip()\n d[_sections[0]] = line\n f.next()\n except StopIteration:\n #filter file too short\n print _('Error: filter file: [%s] invalid (too short!)' % (filter_file))\n #print _('\n return {}\n #for each line in file\n for line in f:\n line = line.strip()\n if line == '':\n #end of section, add current list (as a dictionary) to filter\n gap_num += 1\n d[_sections[gap_num]] = dict(l)\n l = []\n else:\n #read name & true / false setting\n try:\n next_line = f.next().strip()\n except StopIterator:\n next_line = 'False'\n filt_item = [line, next_line.lower() == 'true']\n l.append(filt_item)\n #done, close file & return dictionary\n f.close()\n return d", "title": "" }, { "docid": "c2cf6003bc22ddeabe9b6b30759bdd9f", "score": "0.56165034", "text": "def getChromSizes(infile):\n chromDict = {}\n with open(infile, 'r') as f:\n for line in f:\n line = line.strip()\n if line == '':\n continue\n data = line.split()\n chromDict[data[0]] = int(data[1])\n return chromDict", "title": "" }, { "docid": "c84372129fdf80b9bc977b00cd2bc9e3", "score": "0.5614116", "text": "def get_mapping(infile):\n with open(infile) as map:\n my_map = eval(map.read().strip('\\n'))\n return my_map", "title": "" }, { "docid": "212af0d8d887d27ac1bdd99e874d3aab", "score": "0.56105006", "text": "def file_to_dict(filename):\r\n\tdictionary = {}\r\n\tFile1 = open(filename, \"r\")\r\n\tprint(\"The items available are : \")\r\n\tfor i in (File1.read()).split(\"\\n\"):\r\n\t\tif len(i) > 2:\r\n\t\t\ttemp_lis = i.split(\", \")\r\n\t\t\tprint(temp_lis[2], temp_lis[0], \" costing : \", temp_lis[1])\r\n\t\t\tdictionary[temp_lis[0]] = ([int(temp_lis[1]), int(temp_lis[2])])\r\n\tFile1.close()\r\n\treturn dictionary", "title": "" }, { "docid": "f7d8c6316d5a30d72ea36095d7888fdf", "score": "0.56094015", "text": "def process_file(filename):\n out_dict = {}\n with open(filename) as data_file:\n for line in data_file.readlines():\n # Split the data into an array\n data = [float(d) for d in line.split()]\n\n # Initial size of the system\n current_system_size = 4\n\n # Split the loop lengths from data into out_dict\n for loop_length in data:\n if current_system_size not in out_dict:\n out_dict[current_system_size] = []\n\n # Add the data to the appropriate system_size\n out_dict[current_system_size].append(loop_length)\n current_system_size *= 2\n\n return out_dict", "title": "" }, { "docid": "06e8f2452598b84dffdbf0c78bb156a5", "score": "0.56034845", "text": "def file_to_dictionary(file):\n\t\n\tnew_string = _string_from_file(file)\n\tnew_list = _extract_data_from_list(new_string)\n\tnew_dict = _create_dictionary_from_list(new_list)\n\n\treturn new_dict", "title": "" }, { "docid": "004b18e08e066afaa5e7d3238b8a3f57", "score": "0.5602653", "text": "def make_seq_dict(file):\n\n seq_dict = {}\n\n for line in file.read().rstrip().split('\\n'):\n if line.startswith('>'):\n header = line\n if header not in seq_dict:\n seq_dict[header] = {}\n seq_dict[header]['sequence'] = ''\n seq_dict[header]['gc'] = 0\n else:\n seq = line\n test_nucleotides(seq)\n seq_dict[header]['sequence'] += seq\n gc = calculate_gc(seq_dict[header]['sequence'])\n seq_dict[header]['gc'] = gc\n\n test_n_seqs(seq_dict)\n\n test_len(seq_dict)\n\n # print(seq_dict)\n\n return seq_dict", "title": "" }, { "docid": "b9fd6a5280ad09e3e4b981c3e56964d0", "score": "0.5593648", "text": "def get_forwards_dict(forwards_file):\n fwd_dict = {}\n fi=open(forwards_file)\n header = fi.readline()\n for lin in fi:\n lii = lin.split()\n fwd_dict[int(lii[0])]=lii[1]\n return fwd_dict", "title": "" }, { "docid": "4a0cac902fc8fdfc726bebbab84cacc7", "score": "0.55903184", "text": "def _parse(self):\n\n if self._file_path is None and self._file_handler is None:\n return\n\n # Create dictionary from a file\n self._from_file()", "title": "" }, { "docid": "524344155db73a4ae33fef6675423055", "score": "0.5581703", "text": "def readFromFile(self, infile, ignore_strand=False):\n\n self.mForwardRegions = {}\n self.mReverseRegions = {}\n self.mRegions = []\n self.mIgnoreStrand = ignore_strand\n n = 0\n for line in infile:\n if line[0] == \"#\":\n continue\n\n token, sbjct_token, sbjct_strand, sbjct_from, sbjct_to = line[\n :-1].split(\"\\t\")[:5]\n\n if ignore_strand:\n key = sbjct_token\n else:\n key = \"%s-%s\" % (sbjct_token, sbjct_strand)\n\n if key not in self.mForwardRegions:\n self.mForwardRegions[key] = []\n self.mReverseRegions[key] = []\n\n self.mForwardRegions[key].append((int(sbjct_from), n))\n self.mReverseRegions[key].append((int(sbjct_to), n))\n self.mRegions.append((token, sbjct_from, sbjct_to))\n n += 1\n\n for k, v in self.mForwardRegions.items():\n v.sort()\n self.mForwardRegions[k] = (map(lambda x: x[0], v),\n map(lambda x: x[1], v))\n\n for k, v in self.mReverseRegions.items():\n v.sort()\n self.mReverseRegions[k] = (map(lambda x: x[0], v),\n map(lambda x: x[1], v))", "title": "" }, { "docid": "8c9e67c9e1bedbd345a80b7360855b6c", "score": "0.55686647", "text": "def read_to_dict(inputfile):\n\n opened = False\n if not hasattr(inputfile, 'read'):\n opened = True\n inputfile = open(inputfile, 'r')\n\n dict = {}\n for line in inputfile:\n line = line.strip().lower()\n dict[line] = True\n\n if opened:\n inputfile.close()\n\n return dict", "title": "" }, { "docid": "057d19cf111182c3b290b2f9c14bf1ee", "score": "0.554986", "text": "def protocol_parse(filename, protocol):\n \n f = file(filename, 'r')\n varnames = f.readline().strip().split(\",\")\n targetline = [l.strip().split(\",\") for l in f if l.startswith(protocol)][0]\n f.close()\n return dict( zip(varnames,targetline) )", "title": "" }, { "docid": "f9720d6a8862676fabb16cea2634aced", "score": "0.55435276", "text": "def parse_file(infile, gene_to_prot, prot_start_stop):\n with open(infile, \"r\") as fh:\n for line in fh:\n line = split_line(line)\n if not test_line(line):\n continue\n if line.startswith(\"Protein\"): # 1 st line\n pass\n Protein_Group, Protein_ID, Protein_Accession, Peptide, Unique,\\\n ten_10lgP, Mass, Length, ppm, m_z, z, RT, \\\n Area_Sample_1, Fraction, Scan, Source_File, \\\n Feature, Feature_Sample_1, \\\n Start, End, PTM, AScore, \\\n Found_By = line.split(\",\")\n gene_to_prot[Protein_Accession].add(Peptide)\n gene_plus_prot = \"%s_%s\" % (Protein_Accession, Peptide)\n start_stop = \"%s_%s\" % (Start, End)\n prot_start_stop[gene_plus_prot] = start_stop\n return gene_to_prot, prot_start_stop", "title": "" }, { "docid": "975b1ccec3ee3adf5c85d0d1c6f85dee", "score": "0.5542247", "text": "def from_gff3_line_to_dict( line ):\n fields = line.strip().split( \"\\t\" )\n assert len( fields ) == 9 # sanity check\n\n def getAttribute( entry, regexp ):\n m = re.search( regexp, entry )\n return None if m is None else m.group(1)\n def removeAttribute( entry, regexp ):\n return re.sub( regexp, \"\", entry )\n\n attributes = fields[8]\n result = {\n \"ID\": getAttribute( attributes, _regular_expressions[ \"ID\" ] ),\n \"Parent\": getAttribute( attributes, _regular_expressions[ \"Parent\" ] ),\n \"Name\": getAttribute( attributes, _regular_expressions[ \"Name\" ] ),\n \"biotype\": getAttribute( attributes, _regular_expressions[ \"biotype\" ] ),\n \"seqid\": fields[0],\n \"source\": fields[1],\n \"type\": fields[2],\n \"start\": None if fields[3] == \".\" else int( fields[3] ),\n \"end\": None if fields[4] == \".\" else int( fields[4] ),\n \"score\": None if fields[5] == \".\" else float(fields[5]),\n \"strand\": None if fields[6] == \".\" else fields[6],\n \"phase\": None if fields[7] == \".\" else fields[7],\n \"attributes\": None\n }\n for attribute in _regular_expressions.keys():\n result[attribute] = getAttribute( attributes, _regular_expressions[attribute] )\n attributes = removeAttribute( attributes, _regular_expressions[attribute] )\n result['attributes'] = attributes\n return result", "title": "" }, { "docid": "4efdbd9ba05d8ce659aaa37ba5104840", "score": "0.5537716", "text": "def read_file_into_memory(filename):\n \n input_data = open(filename, 'r')\n \n input_array = []\n for line in input_data:\n input_array.append(line.strip('\\n'))\n \n input_data.close()\n operation_array = []\n output_array = []\n \n for item in input_array:\n operation, output = item.split(' -> ')\n operation_array.append(operation)\n output_array.append(output)\n \n input_dict = dict( zip(output_array, operation_array) )\n \n return input_dict", "title": "" }, { "docid": "1272a088f0c6d91c4154a13bab7b76af", "score": "0.5532868", "text": "def parser_geneID_file(geneID_file):\n with open(geneID_file, \"r\") as thefile:\n dic = {}\n for line in thefile:\n elements = line.split()\n if len(elements) != 1:\n dic[elements[1]] = elements[0]\n return dic", "title": "" }, { "docid": "722486a64752ec3ddbb3d2c6ecb4d2b8", "score": "0.5525743", "text": "def make_dict(self, filename):\n for line in read_file(filename):\n if not line.startswith('#'):\n error, correct, pos = line.split('\\t')\n self.fixdict[error + '+' + pos] = correct", "title": "" }, { "docid": "65a68a9dcb2885f71b10654e82b6e7cd", "score": "0.5524135", "text": "def load_dataset(filepath: str) -> (Dict[str, Tuple[Tuple[int, int], Tuple[int, int]]], List[int], List[List[int]]):\n step = 0\n rules = {}\n own = []\n tickets = []\n with open(filepath) as file:\n for line in file.readlines():\n if line == \"\\n\":\n step += 1\n else:\n line.replace(\"\\n\", \"\")\n if \"ticket\" in line:\n continue\n if step == 0:\n rule, bound_string = line.split(\":\")\n l, r = bound_string.strip().split(\" or \")\n l1, l2 = l.split(\"-\")\n r1, r2 = r.split(\"-\")\n l_int = (int(l1), int(l2))\n r_int = (int(r1), int(r2))\n rules[str(rule)] = (l_int, r_int)\n elif step == 1:\n own = [int(num) for num in line.split(\",\")]\n elif step == 2:\n tickets.append([int(num) for num in line.split(\",\")])\n\n return rules, own, tickets", "title": "" }, { "docid": "53bd7a4f1c49ecbeebf797228090b996", "score": "0.55226004", "text": "def _internal_mfileparser(self, infile):\n is_reverse = {}\n genes = []\n gene_tracker = []\n comments = []\n sequences = ddict(str) # map each gene name to a sequence\n gpos = ddict(tuple)\n master_dict = {}\n speciesname = ''\n species_gc = 1\n\n with open(infile, 'r') as handle:\n line = handle.readline()\n while line and not line.startswith('>'):\n # Try to scan for the list of potential genes\n # if line.startswith(';;'):\n # line = line.strip()\n # nevermind, not useful\n line = handle.readline()\n # skip to first line with '>'\n # Set the required specname and gc code for the genome\n if line:\n # skip to genomic seq\n speciesname = line[1:].rstrip()\n species_gc = speciesname.split()[-1] # last item\n if species_gc and species_gc != speciesname:\n species_gc = species_gc.split('=')[-1].strip()\n\n line = handle.readline()\n # Storing genes and if they should be reversed.\n while line:\n line = line.strip()\n if line.startswith(';;'):\n pass\n elif line.startswith(';'):\n # Necessary informations are parsed\n\n line = line.strip('; ')\n if ';;' in line:\n comments.append(line.rsplit(';;')[-1])\n else:\n comments.append('')\n line = line.split(';;')[0].strip('; ')\n try:\n genename, updown, startend = line.split()[0:3]\n startend = startend.split()[0]\n is_starting = False\n\n # We should store the gene in genes with these conditions:\n # 1- If gene name has ==> and start in it\n # 2- If gene name has <== and end in it, then reverse it.\n # We will be removing introns and exons from gene names.\n if not (\"-I\" in genename or '-E' in genename):\n genes.append(genename)\n if updown == \"==>\" and startend == \"start\":\n is_reverse[genename] = False\n is_starting = True\n if updown == \"<==\" and startend == \"end\":\n is_reverse[genename] = True\n is_starting = True\n if genename not in gene_tracker and is_starting:\n gene_tracker.append(genename)\n else:\n gene_tracker = [\n gn for gn in gene_tracker if gn != genename]\n\n except ValueError:\n pass\n # this is one of the gene like rnl that we don't need anyway\n\n else:\n # If they are lowercase, this means they belong\n # to an intron which should not be taken into the sequence.\n pos, seq = line.split()\n self.full_genome += seq # saving whole genome\n if not seq.islower(): # sequence is exon\n for g in gene_tracker: # if the gene is not removed already, it's its sequence\n sequences[g] += seq\n cur_pos = gpos.get(g)\n if not cur_pos:\n gpos[g] = (int(pos), int(pos)+len(seq))\n else:\n gpos[g] = (cur_pos[0], cur_pos[1]+len(seq))\n line = handle.readline()\n\n # \"\"\" Now we should reverse 5'->3' strands to 3'->5' strands. \"\"\"\n for g, seq in sequences.items():\n if is_reverse.get(g):\n sequences[g] = reverse_sequence(seq)\n\n master_dict = {'species_name': speciesname, 'species_gc': species_gc,\n 'genes_list': genes, 'sequences': sequences, 'comments': comments, 'gpos': gpos}\n\n return master_dict", "title": "" }, { "docid": "14a92fa550aa5a21347230e25b7cf9a7", "score": "0.5517426", "text": "def parse_summary_file_into_dict(filename):\n done = False\n # results = {}\n results = collections.OrderedDict()\n with open(filename, 'r') as result_file:\n while not done:\n try:\n line = next(result_file)\n tokens = line.split('\\t')\n tag = tokens[0].strip()\n value = tokens[1].strip()\n if tag != ' ' or tag != '':\n results[tag] = value\n else:\n break\n except StopIteration:\n break\n return results", "title": "" }, { "docid": "6d29506dddd6b7b706a298cf908001e9", "score": "0.5509084", "text": "def parse_config_file():", "title": "" }, { "docid": "be524b8856e84b42df42145911aa0828", "score": "0.55048764", "text": "def _parse_filename(self, filename):\n ret = {}\n ret[\"X\"] = filename[0]\n ret[\"obsidentif\"] = filename[1:11]\n ret[\"I\"] = filename[11:13]\n ret[\"U\"] = filename[13]\n ret[\"E\"] = filename[14:17]\n ret[\"T\"] = filename[17:23]\n ret[\"S\"] = filename[23]\n ret[\"X-\"] = filename[24:27]\n ret[\"Z\"] = filename[28:]\n return ret", "title": "" }, { "docid": "89fd1988ad3562e0480dabeefadc7b13", "score": "0.5501389", "text": "def normalized_file_read(filename,min_depth):\n\tf = open(filename,'r')\n\toutdict = {}\n\tfor str_x in f:\n\t\tstr_x = str_x.strip(\"\\n\")\n\t\tlist_x = str_x.split(\"\\t\")\n\t\tif list_x[0] == \"sgRNA\":\n\t\t\t#print(\"\\t\".join(list_x+[\"low_lfc\",\"medium_lfc\",\"high_lfc\",\"minus_count_cutoff\",\"plus_count_cutoff\"]))\n\t\t\tcontinue\n\t\tsgRNA_name = list_x[0]\n\t\tCXCR4minus_Rep1 = float(list_x[2])\n\t\tCXCR4minus_Rep2 = float(list_x[3])\n\t\tCXCR4minus_Rep3 = float(list_x[4])\n\t\tCXCR4plus_Rep1 = float(list_x[5])\n\t\tCXCR4plus_Rep2 = float(list_x[6])\n\t\tCXCR4plus_Rep3 = float(list_x[7])\n\t\tminus_list = [CXCR4minus_Rep1,CXCR4minus_Rep2,CXCR4minus_Rep3]\n\t\tplus_list = [CXCR4plus_Rep1,CXCR4plus_Rep2,CXCR4plus_Rep3]\n\t\tLFC_list = lfc_count(minus_list,plus_list)\n\t\tLFC_list = [str(x) for x in LFC_list]\n\t\tmedium_LFC = LFC_list[1]\n\t\tminus_cutoff = mean_count_cutoff(minus_list,cutoff_value = min_depth)\n\t\tplus_cutoff = mean_count_cutoff(plus_list,cutoff_value = min_depth)\n\t\toutdict[sgRNA_name] = \"\\t\".join([medium_LFC,minus_cutoff,plus_cutoff])\n\treturn outdict\n\t\t#print(\"\\t\".join(list_x+LFC_list) + \"\\t\" + minus_cutoff + \"\\t\" + plus_cutoff)", "title": "" }, { "docid": "2a8338083f5e6d26f86fc7565fec298b", "score": "0.5498422", "text": "def parse_positions_hh(line):\n dictionary = dict()\n dictionary[\"RECTYPE\"] = line[0:1]\n dictionary[\"YEAR\"] = line[1:5]\n dictionary[\"DATANUM\"] = line[5:7]\n dictionary[\"SERIAL\"] = line[7:15]\n dictionary[\"NUMPREC\"] = line[15:17]\n dictionary[\"SUBSAMP\"] = line[17:19]\n dictionary[\"HHWT\"] = line[19:29]\n dictionary[\"NUMPERHH\"] = line[29:33]\n dictionary[\"HHTYPE\"] = line[33:34]\n dictionary[\"DWELLING\"] = line[34:42]\n dictionary[\"SLPERNUM\"] = line[42:44]\n dictionary[\"CPI99\"] = line[44:49]\n dictionary[\"REGION\"] = line[49:51]\n dictionary[\"STATEICP\"] = line[51:53]\n dictionary[\"STATEFIP\"] = line[53:55]\n dictionary[\"COUNTY\"] = line[55:59]\n dictionary[\"URBAN\"] = line[59:60]\n dictionary[\"METRO\"] = line[60:61]\n dictionary[\"METAREA\"] = line[61:64]\n dictionary[\"METAREAD\"] = line[64:68]\n dictionary[\"CITY\"] = line[68:72]\n dictionary[\"CITYPOP\"] = line[72:77]\n dictionary[\"SIZEPL\"] = line[77:79]\n dictionary[\"URBPOP\"] = line[79:84]\n dictionary[\"SEA\"] = line[84:87]\n dictionary[\"WARD\"] = line[87:90]\n dictionary[\"CNTRY\"] = line[90:93]\n dictionary[\"GQ\"] = line[93:94]\n dictionary[\"GQTYPE\"] = line[94:95]\n dictionary[\"GQTYPED\"] = line[95:98]\n dictionary[\"GQFUNDS\"] = line[98:100]\n dictionary[\"FARM\"] = line[100:101]\n dictionary[\"OWNERSHP\"] = line[101:102]\n dictionary[\"OWNERSHPD\"] = line[102:104]\n dictionary[\"RENT\"] = line[104:108]\n dictionary[\"VALUEH\"] = line[108:115]\n dictionary[\"NFAMS\"] = line[115:117]\n dictionary[\"NSUBFAM\"] = line[117:118]\n dictionary[\"NCOUPLES\"] = line[118:119]\n dictionary[\"NMOTHERS\"] = line[119:120]\n dictionary[\"NFATHERS\"] = line[120:121]\n dictionary[\"MULTGEN\"] = line[121:122]\n dictionary[\"MULTGEND\"] = line[122:124]\n dictionary[\"ENUMDIST\"] = line[124:128]\n dictionary[\"SUPDIST\"] = line[128:131]\n dictionary[\"RESPOND\"] = line[131:132]\n dictionary[\"SPLIT\"] = line[132:133]\n dictionary[\"SPLITHID\"] = line[133:141]\n dictionary[\"SPLITNUM\"] = line[141:145]\n dictionary[\"SPLIT40\"] = line[145:146]\n dictionary[\"SERIAL40\"] = line[146:154]\n dictionary[\"NUMPREC40\"] = line[154:158]\n dictionary[\"EDMISS\"] = line[158:159]\n\n return dictionary", "title": "" }, { "docid": "9644521c3f82ddc5ddc8574260793d93", "score": "0.5497287", "text": "def read_snp_file(infile,both=False):\n if (both):\n snp_dict=defaultdict(defaultdict) #dictionary of chroms with positions and pVals of snps\n else:\n snp_dict=defaultdict(set) #dictionary of chroms with positions and pVals of snps\n inf = open(infile,\"r\")\n #load dictionary\n for line in inf:\n if re.match(\"\\#\",line):\n continue\n line=line.rstrip()\n fields=line.split()\n if (len(fields) < 3): continue\n if both:\n snp_dict[fields[0]][fields[1]]=\"\\t\".join(fields[2:])\n else:\n snp_dict[fields[0]].add(int(fields[1])) \n inf.close()\n return snp_dict", "title": "" }, { "docid": "ca29e8c04ea46162f38ad5c6bcc526bf", "score": "0.5487817", "text": "def get_template_rect(rect_filename):\n with open(rect_filename, 'r') as f:\n values = [float(v) for v in f.read().split()]\n return dict(zip(['x', 'y', 'w', 'h'], values[0:4]))", "title": "" }, { "docid": "3f859ca65b749783f63901f7cf9382db", "score": "0.5482765", "text": "def parse_lines(input_file_name, lines): # returns a dictionary\n d = {}\n chips = []\n for line in lines:\n if line.find(':') == -1:\n _die(\"reading file %s. \" + \n \"Missing colon in line: %s\" % \n (input_file_name, line))\n (chip, extensions) = line.split(':')\n chip = chip.strip()\n chips.append(chip)\n extensions = extensions.split()\n if chip in d:\n _die(\"Duplicate definition of %s in %s\" % \n (chip, input_file_name))\n if chip == 'ALL':\n _die(\"Cannot define a chip named 'ALL'.\" +\n \" That name is reserved.\")\n d[chip] = extensions\n return (chips,d)", "title": "" }, { "docid": "88d63607f0f0d2b5c3664469bb0c72e7", "score": "0.54636097", "text": "def load(f):\n m=0\n res = {}\n for lines in f:\n l = lines.split()\n if len(l)!=11: continue\n if l[1]=='Row': continue\n e = l[3]+l[5]\n res[e] = ( float(l[7]) , float(l[9]) )\n return res", "title": "" }, { "docid": "584187d0466e8dec76005c2ab75b6f78", "score": "0.54585725", "text": "def _parse_range_string(self):\n # Split the range string into a list of individual ranges\n range_strings = self.range_string.split(',')\n\n # Parse each range string and store the resulting range in the list\n start = end = None\n for range_string in range_strings:\n if \"-\" in range_string:\n start_, end_ = range_string.split('-')\n else:\n start_ = end_ = range_string\n\n if start is None:\n start = int(start_)\n if end is None:\n end = int(end_)\n elif int(start_) == end + 1:\n if start_ == end_:\n end = int(start_)\n else:\n end = int(end_)\n else:\n self.ranges.append(range(start, end + 1)) # Range is exclusive, so we need to add 1 to the end value\n start = int(start_)\n end = int(end_)\n self.ranges.append(range(start, end + 1))", "title": "" }, { "docid": "45978d0f658963da5857850abd1034f3", "score": "0.54562396", "text": "def readfile(filename):\n\n\tfile = open(filename, \"rt\")\n\n\tretval = {}\n\n\tT = int(file.readline().strip())\n\tretval['T'] = T\n\n\ttests = []\n\n\tfor i in xrange(T):\n\t\tline = file.readline().strip()\n\n\t\tparts = line.split(\" \")\n\n\t\tA = int(parts[0])\n\t\tB = int(parts[1])\n\n\t\ttest = {'A' : A, 'B' : B}\n\n\t\ttests = tests + [test, ]\n\n\tretval['tests'] = tests\n\n\treturn retval", "title": "" }, { "docid": "d136f9960d326e4106533d5e430885e6", "score": "0.54495823", "text": "def read(file_):\n check_presence_init(file_)\n\n dict_ = {'varnames': []}\n for line in open(file_).readlines():\n\n list_ = shlex.split(line)\n\n is_empty = (list_ == [])\n\n if not is_empty:\n is_keyword = list_[0].isupper()\n else:\n continue\n\n if is_keyword:\n keyword = list_[0]\n dict_[keyword] = {}\n continue\n\n process(list_, dict_, keyword)\n\n dict_ = auxiliary(dict_)\n\n return dict_", "title": "" }, { "docid": "c7c2427526d87cecb74e855da2517167", "score": "0.5443984", "text": "def read_geom_to_dict(input_geom) -> dict:\n\n important_keys = [\"fs\", \"ss\", \"corner_x\", \"corner_y\", \"coffset\", \"clen\", \"res\"]\n float_keys = [\"corner_x\", \"corner_y\", \"coffset\", \"clen\", \"res\"]\n ret = {}\n for key in important_keys:\n ret[key] = 0.0\n\n with open(input_geom, \"r\") as fin:\n for line in fin:\n for key in important_keys:\n if key == \"clen\":\n if line.replace(\" \", \"\").split(\"=\")[0] == key:\n ret[key] = np.float(line.replace(\" \", \"\").split(\"=\")[1])\n elif line[0] != \";\" and line.replace(\" \", \"\").split(\"=\")[0].endswith(\n f\"/{key}\"\n ):\n _, value = line[:-1].replace(\" \", \"\").split(\"=\")\n if key in float_keys:\n value = np.float(value)\n ret[key] = value\n\n M = angles2matrix(ret[\"ss\"], ret[\"fs\"])\n ret[\"M\"] = M\n\n return ret", "title": "" }, { "docid": "ab9575d2585dc366991995508c3ccc1a", "score": "0.54439276", "text": "def _parse_line(line: str) -> dict:\n d_str, all_changes = line.split(' | ')\n date = parse(re.sub(r'Date:[ ]+', '', d_str)).date()\n\n # add insertions and deletions\n insertions = re.findall(r'([0-9]+) insertions', all_changes)\n deletions = re.findall(r'([0-9]+) deletions', all_changes)\n changes = int(insertions[0]) if insertions else 0\n changes += int(deletions[0]) if deletions else 0\n return {'date': date, 'changes': changes}", "title": "" }, { "docid": "2b56c3f021e318b3d2bd4911f0c259b0", "score": "0.54393244", "text": "def dict_from_pbtxt_file(fname):\n lines = [line.rstrip('\\n').strip() for line in open(fname)]\n label_map = {}\n curr_label = ''\n curr_id = 0\n\n for l in lines:\n \n if l.startswith( 'display_name: '):\n curr_label = l.split(' ')[1]\n\n if l.startswith( 'id: '):\n curr_id = int(l.split(' ')[1])\n\n if l.startswith( '}'):\n # print(curr_id, curr_label)\n label_map[curr_id] = curr_label.replace(\"\\\"\", \"\")\n\n return label_map", "title": "" }, { "docid": "93b4aaf5c3cb951054d0456d49d4a902", "score": "0.54380137", "text": "def conf_to_dict(fname):\n name_match = re.compile(\"<name>(.*?)</name>\")\n val_match = re.compile(\"<value>(.*?)</value>\")\n conf = {}\n for line in open(fname):\n name = name_match.search(line)\n if name:\n key = name.groups()[0]\n val = val_match.search(line)\n if val:\n val = val.groups()[0]\n conf[key] = val\n return conf", "title": "" }, { "docid": "32fbe2ec4e122446f576ac2c18491cea", "score": "0.54354304", "text": "def _read_section(self, content):\n dictionary = OrderedDict()\n for line in content.split(\"\\n\"):\n # Remove comments and skip empty lines\n line = re.sub(\"#.*\", \"\", line)\n if re.match(r\"^\\s*$\", line):\n continue\n # Parse key and value\n matched = re.search(\"([^=]+)=(.*)\", line)\n if not matched:\n raise StructuredFieldError(\n \"Invalid key/value line: {0}\".format(line))\n key = matched.groups()[0].strip()\n value = matched.groups()[1].strip()\n # Handle multiple values if enabled\n if key in dictionary and self._multi:\n if isinstance(dictionary[key], list):\n dictionary[key].append(value)\n else:\n dictionary[key] = [dictionary[key], value]\n else:\n dictionary[key] = value\n return dictionary", "title": "" }, { "docid": "f9e16c3efb66eebd2a4b957b62066f75", "score": "0.54335237", "text": "def __load(self, ifile):\n # load map entries from file\n output = {}\n optmatch = None\n finput = AltFileInput(ifile, encoding = self.encd)\n for line in finput:\n if line:\n optmatch = RE_OPTIONS.match(line)\n if optmatch:\n if self.flags:\n raise RuleFormatError( \\\n msg = \"Multiple flag lines are not supported\", \\\n efile = finput)\n else:\n self.flags = optmatch.group(1)\n self.ignorecase = RegExp(self.flags, \"\").re.flags & re.IGNORECASE\n continue\n # find map entries\n line = skip_comments(line)\n m = MAP_DELIMITER.search(line)\n if m:\n src, trg = self.__normalize_quotes(line[0:m.start()], \\\n line[m.end():])\n if not (src and trg):\n print src.encode('utf-8')\n print trg.encode('utf-8')\n raise RuleFormatError(efile = finput)\n src = re.escape(src)\n if self.ignorecase:\n output[src.lower()] = trg\n else:\n output[src] = trg\n elif line:\n raise RuleFormatError(efile = finput)\n return output", "title": "" }, { "docid": "935d35d82f9f5d098fd8c7c594d803de", "score": "0.54278433", "text": "def builddict(fname,\n ignorestrings=['#'],\n dictdelim='=',\n startblock = None, \n endblock =None):\n f = open(fname, \"r\")\n line = f.readline()\n i = 0\n \n #print ignorestrings\n paramdict={}\n readin = False\n while line != '': \n if startblock: \n if (readin ==False):\n if line.find(startblock) !=-1:\n readin = True\n else:\n readin =True\n if readin == False:\n line = f.readline()\n continue \n #while line != '':\n tmp = _tokenizeline(line, ignorestrings = ignorestrings , \n delimstrings = dictdelim)\n \n #print line , tmp\n tmp = tmp[0]\n if len(tmp) >1:\n key = tmp[0].strip()\n #print key, tmp\n val = tmp[1].strip()\n paramdict[str(key)] = str(val) \n line=f.readline()\n if endblock and line.find(endblock) !=-1:\n readin = False\n #print \"FOUND ENDBLOCK\"\n continue\n \n f.close()\n return paramdict", "title": "" }, { "docid": "8e4129356812ba710b3c8c38eecaad89", "score": "0.54276705", "text": "def value_ranges(cls) -> Dict[str, Union[Tuple[float, float], List[str]]]:\n return {}", "title": "" }, { "docid": "d6c2cf3c17e741dc6d9365ee5aa27b5e", "score": "0.5427396", "text": "def parse_sequence_file(file_path):\n\n in_buff = open(file_path)\n in_content = in_buff.readlines()\n\n counter = 0\n seq_dict = {}\n\n for line in in_content:\n\n # remove tailing new line\n line = line.strip()\n\n if counter % 3 == 0:\n\n name = line\n\n elif counter % 3 == 1:\n\n sequence = line\n\n elif counter % 3 == 2:\n\n ss_pred = line\n\n if 'U' not in ss_pred:\n seq_dict[name] = (sequence, ss_pred)\n\n counter += 1\n\n return seq_dict", "title": "" }, { "docid": "0d3f11f5bfa46237bee28bdf72852cc6", "score": "0.5420881", "text": "def get_file_to_dict(self):\n self.check_file_format()\n\n if self.file_format == 'YAML':\n return self.is_yaml_format()\n elif self.file_format == 'JSON':\n return self.get_json_to_dict()\n elif self.file_format == 'TERRAFORM':\n pass\n # Extension idea, although parsing HCL to JSON/usable format seems incredibly time consuming. Something\n # to look into with extra time.", "title": "" }, { "docid": "6c3444ccfaf0845b1a18f74520041fa8", "score": "0.54182297", "text": "def _parse_vcf(self) -> Dict[str, Union[str, int, float]]:\n for record in self.reader:\n\n data: Dict[str, Union[str, int, float]] = {\n 'chrom': record.CHROM,\n 'pos': record.POS,\n 'ref': record.REF,\n 'alt': record.ALT,\n 'qual': record.QUAL,\n 'id': record.ID,\n 'filter': record.FILTER,\n }\n\n info: Dict[str, Union[str, int, float]] = record.INFO\n\n for k, v in info.items():\n if isinstance(v, list):\n v: str = ';'.join(map(str, v))\n data[k] = v\n \n assert len(record.samples)==2\n \n sample_type: str = \"normal\"\n for sample in record.samples:\n sample_name: str = sample.sample\n sample_data: Dict[str, Union[str, int, float]] = sample.data\n for k, v in sample_data._asdict().items():\n if isinstance(v, list):\n v: str = ';'.join([str(val) for val in v])\n k: str = '{}_{}'.format(sample_type, k)\n data[k] = v\n sample_type: str = \"tumor\"\n yield data", "title": "" }, { "docid": "0801de21696b2d15d84acb3ca9ce0e84", "score": "0.54155296", "text": "def parse_output(data_output_file):\n # parse the file\n with open(data_output_file, 'r') as fil:\n dict_ = {}\n # go through all the lines in the file\n for line in fil:\n ll = line.strip()\n if ll[0].isalpha():\n # if the first character in a line is a letter\n # it's the beginning of a new variable\n key = ll\n # make a new dict_ entry if it doesn't exist and\n # initialise as empty list\n if not dict_.has_key(key):\n dict_[key] = []\n else:\n # do nothing and continue to fill the list\n pass\n else:\n # otherwise read the data\n ll = ll.split() # split at white space\n num = [float(el) for el in ll] # convert to float\n dict_[key].append(num) # append to the list\n\n # convert to numpy arrays\n for key in dict_:\n dict_[key] = N.array(dict_[key]).squeeze()\n return dict_", "title": "" }, { "docid": "0b48469a55bd8853ead29ff3aab6be61", "score": "0.54101497", "text": "def parse_pref(file):\n dict = {}\n with open(file) as f:\n raw_content = f.read()\n lines = raw_content.splitlines(True)[1:]\n for line in lines:\n student_id = int(line.split('\\t')[0])\n pref_list_line = line.split('\\t')[1]\n pref_list = [int(x) for x in pref_list_line.split()]\n dict[student_id] = pref_list\n return dict", "title": "" }, { "docid": "0b4ef894db2b569d47a5e14008dd05e6", "score": "0.5404602", "text": "def read_data_to_list(self, *delimiters) -> dict:", "title": "" }, { "docid": "a00d2b437b8bd9726ef4f7535b4a2f26", "score": "0.54021627", "text": "def make_dictionary(line):", "title": "" }, { "docid": "43cf8ef825551a0fe09fc96ee45c150f", "score": "0.5401121", "text": "def setup_data(input_filename: str) -> InputData:\n data: Dict[str, Any] = {\"signal\": {}, \"background\": {}}\n with uproot.open(input_filename) as f:\n for region in [\"signal\", \"background\"]:\n for rp in [\"inclusive\", \"in_plane\", \"mid_plane\", \"out_of_plane\"]:\n data[region][rp] = f[f\"{region}Dominated_{rp}\"]\n\n return data", "title": "" }, { "docid": "472b3ab63ba213dde3834d796852d3eb", "score": "0.5398357", "text": "def parse_ttd_file(self, file: str) -> dict:\n parsed_data = collections.defaultdict(dict) # type: ignore\n\n # wish they'd make this file easier to parse\n seen_dashed_lines = 0\n dashed_line_re = re.compile(r'^-+\\n')\n blank_line_re = re.compile(r'^\\s*$')\n\n with open(file, 'r') as fh:\n for line in fh:\n if dashed_line_re.match(line):\n seen_dashed_lines = seen_dashed_lines + 1\n continue\n\n if seen_dashed_lines < 2 or blank_line_re.match(line):\n continue\n\n (target_id, abbrev, data_list) = self.parse_line(line)\n\n if target_id not in parsed_data:\n parsed_data[target_id] = dict()\n\n if abbrev not in parsed_data[target_id]:\n parsed_data[target_id][abbrev] = []\n\n parsed_data[target_id][abbrev].append(data_list)\n\n return parsed_data", "title": "" }, { "docid": "f56504515cae2994f921ccb6f542856b", "score": "0.53923154", "text": "def process_stadium_map_file(p):\n\n\n f = open(p)\n \n nm = []\n \n for line in f:\n if line.strip() and not line.startswith(\"*\"):\n fields = [e.strip() for e in line.split(',')]\n fields = [e for e in fields if e]\n if len(fields) == 3:\n team, stadium, start = fields\n\n # Process date at the correct time so we don't have to do this?\n #end = unicode(datetime.date.today().year)\n end = str(datetime.date.today().year)\n elif len(fields) == 4:\n team, stadium, start, end = fields\n else:\n print(\"Incorrect fields for stadium map: %s\" % fields)\n continue\n\n\n start = correct_date(start, start=True)\n end = correct_date(end, start=False)\n\n nm.append({\n 'team': team,\n 'stadium': stadium.strip(),\n 'start': start,\n 'end': end,\n })\n return nm", "title": "" }, { "docid": "d784c63604f8f6cb4a008fb0d531d510", "score": "0.5390056", "text": "def _read_raw_calib_file(self, filepath):\n # From https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py\n\n data = {}\n with open(filepath, 'r') as f:\n for line in f.readlines():\n key, value = line.split(':', 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n return data", "title": "" }, { "docid": "d1320700e1464e9d5d6be636c53c39a1", "score": "0.5388462", "text": "def read_excel(filename: str) -> dict:\n with open(filename, 'r') as file:\n usr_inputs = {}\n\n for line in file.readlines()[1:]: # ignore the cloumn titles\n info = line.split()\n\n attr, *data = info # unpack each line\n data = list(map(float, data)) # convert str to float\n usr_inputs[attr] = data\n\n return usr_inputs", "title": "" }, { "docid": "9d67502823d7a42e737e813fc595c3e3", "score": "0.53877926", "text": "def read_precursor_results(inputfile):\n\n opened = False\n if not hasattr(inputfile, 'read'):\n opened = True\n inputfile = open(inputfile, 'r')\n\n results = []\n i = 0\n for line in inputfile.readlines():\n line = line.strip()\n if line[0] == \"#\":\n continue\n if i%4 == 0:\n tmpName = line[1:].lower()\n if i%4 == 1:\n tmpSeq = line.split(\"\\t\")\n if i%4 == 2:\n tmpStart = line.split(\"\\t\")\n if i%4 == 3:\n tmpEnd = line.split(\"\\t\")\n results.append({\"seq\": tmpSeq, \"start\": tmpStart, \"end\": tmpEnd, \"name\": tmpName})\n i += 1\n\n if opened:\n inputfile.close()\n\n return results", "title": "" }, { "docid": "aab303217853b6fdd0118dd13ade35d5", "score": "0.53833747", "text": "def read_mapping_file(mapping_file):\n mapping_dict = {}\n if not mapping_file:\n return mapping_dict\n if not os.path.isfile(mapping_file):\n return mapping_dict\n\n with open(mapping_file, \"r\") as f:\n for line in f:\n if len(line) <= 2:\n continue\n line = line.split()\n if len(line) < 2:\n continue\n try:\n map_name, map_value = line[0], np.float32(line[1:])\n mapping_dict[map_name] = tuple(map_value)\n except ValueError:\n tf.logging.fatal(\n \"unknown input format: {}\".format(mapping_file))\n raise\n return mapping_dict", "title": "" }, { "docid": "ccf76cd17d6a40b880019ee23de8c153", "score": "0.5379336", "text": "def phonebook_load(filename):\n f = open(filename)\n {name: number for name, number in\n [line.rstrip(\"\\n\").split() for line in f]}\n f.close()", "title": "" }, { "docid": "b5d4617a3433112155c05e95d02218ef", "score": "0.5375964", "text": "def parse_conll(conll_file):\n f=open(conll_file,'r').read().split('\\n')\n cdict=defaultdict(list)\n clist=[]\n for line in f:\n if line.startswith('#') or line=='':\n continue\n l=line.split('\\t')\n clist.append(l)\n for line in clist:\n sent_id=line[0]\n #print sent_id\n cdict[int(sent_id)].append(line)\n return cdict", "title": "" }, { "docid": "975189b331eff346b8a299f79da237e2", "score": "0.53754264", "text": "def read_calib_file(filepath):\n data = {}\n\n with open(filepath, \"r\") as f:\n for line in f.readlines():\n key, value = line.split(\":\", 1)\n # The only non-float values in these files are dates, which\n # we don't care about anyway\n try:\n data[key] = np.array([float(x) for x in value.split()])\n except ValueError:\n pass\n\n return data", "title": "" }, { "docid": "b717db0107b108c20b627317c6101336", "score": "0.53729755", "text": "def parse_listing(file) :\n \n dictSamples = {}\n count = 0\n list_groups = []\n \n with open(file) as lines:\n for line in lines:\n \n if(count==0):\n count+=1\n \n list_groups = line.strip().split(\"\\t\")\n #print(list_groups)\n continue\n \n elements = line.strip().split(\"\\t\")\n #bcr_patient_barcode Call\n #TCGA-A8-A08F LumB\n #TCGA-A8-A09K LumA\n \n #Subject_ID Clinical.Molecular.Subtype\n #EX181420 Triple Negative\n #EX181336 Lum B\n #EX181261 Lum Unk\n \n if elements[0] not in dictSamples :\n dictSamples[elements[0]] = { }\n \n for name_group in list_groups :\n #print(name_group)\n #print(list_groups.index(name_group))\n #print(elements)\n dictSamples[elements[0]][name_group] = elements[list_groups.index(name_group)]\n \n lines.close()\n\n return dictSamples,list_groups", "title": "" } ]
b258891e1e264a29c11ff6d0fcfa079b
Create an HDFS paths to look at from provided main hdfs dir and provided time range list.
[ { "docid": "9b30c587ff7b852baa427ddc269af5c9", "score": "0.7487591", "text": "def make_hdfs_path(hdir, trange):\n return ['%s/%s' % (hdir, d) for d in range_dates(trange)]", "title": "" } ]
[ { "docid": "75d211320736d6cda3ae25d0d0e34509", "score": "0.6589645", "text": "def test_hdfs_paths ():\n \"\"\"\n hdfs = PyWebHdfsClient(host='localhost',port='50070', user_name='hdfs')\n hdfs.make_dir(\"inbound/bloomberg\") \n hdfs.make_dir(\"inbound/bloomberg/ohlcv\")\n hdfs.make_dir(\"inbound/bloomberg/instrument\")\n hdfs.make_dir(\"inbound/reuters\")\n hdfs.make_dir(\"inbound/reuters/tick\")\n hdfs.make_dir(\"inbound/reuters/ohlcv\")\n hdfs.make_dir(\"inbound/reuters/depth\")\n hdfs.make_dir(\"data/store\")\n \"\"\"\n pass", "title": "" }, { "docid": "242e1da6abe4b0e77bd3c60779b6c298", "score": "0.54640007", "text": "def create_subset_folders (absolute_consistency_path):\n # Creating mapping of subset of videos\n task_mapping = {}; task_no = 0\n f = open(\"subset\",'r')\n for line in f:\n task_id = int(line.strip())\n task_mapping[task_id] = task_no\n task_no += 1\n\n video_path = os.path.join(absolute_consistency_path, 'data/coin/videos')\n subset_path = os.path.join(absolute_consistency_path, 'data/coin/subset')\n\n frames_path = os.path.join(absolute_consistency_path, 'data/coin/raw_frames')\n subset_frames_path = os.path.join(absolute_consistency_path, 'data/coin/subset_frames')\n\n # Creating symbolic links for the respective videos\n if os.path.exists(subset_path):\n shutil.rmtree(subset_path) # Remove the existing soft links\n os.mkdir(subset_path)\n\n for task_id in task_mapping:\n src = os.path.join(video_path, str(task_id))\n dst = os.path.join(subset_path, str(task_mapping[task_id]))\n os.symlink(src, dst)\n\n # Creating symbolic links for respective extracted frames\n if os.path.exists(subset_frames_path):\n shutil.rmtree(subset_frames_path) # Remove the existing soft links\n os.mkdir(subset_frames_path)\n\n task_folders = glob.glob(os.path.join(frames_path, '*'))\n for tf in task_folders:\n task_id = int(tf.split('/')[-1])\n if task_id in task_mapping:\n frame_folders = glob.glob(os.path.join(tf, '*'))\n for f in frame_folders:\n f_id = f.split('/')[-1]\n src = f\n dst = os.path.join(subset_frames_path, f_id)\n os.symlink(src, dst)\n\n return task_mapping", "title": "" }, { "docid": "c8129c5c16d816a8b66cd5784ba13671", "score": "0.5457388", "text": "def mkloc_arc(self):\n#make local subdirectories\n for i in self.dlev1: \n self.mkdirs(i)\n#create the day subdirectories\n days = calendar.monthrange(int(i[:4]),int(i[4:]))[1]\n for j in range(1,days+1): \n ddir = '{0}/{0}{1:2d}'.format(i,j).replace(' ','0')\n self.mkdirs(ddir)", "title": "" }, { "docid": "3beee9eed9f80edc6dbd042dbcc04314", "score": "0.54529214", "text": "def set_data_paths(subscription_dirname, base_dir='.'):\n # Get day in YYYY-MM-DD format\n\n day = datetime.datetime.now().strftime('%Y-%m-%d')\n\n if base_dir.startswith('/'):\n base_dir = os.path.abspath(base_dir)\n elif base_dir.startswith('~'):\n base_dir = os.path.expanduser(base_dir)\n scan_data_dir = os.path.join(base_dir, subscription_dirname, day)\n print(\"scan_data_dir\", scan_data_dir)\n raw_data_dir = scan_data_dir + '/raw'\n print(\"raw_data_dir\", raw_data_dir)\n if not os.path.exists(raw_data_dir):\n os.makedirs(raw_data_dir)\n filtered_data_dir = scan_data_dir + '/filtered'\n print(\"filtered_data_dir\", filtered_data_dir)\n if not os.path.exists(filtered_data_dir):\n os.makedirs(filtered_data_dir)\n return scan_data_dir, raw_data_dir, filtered_data_dir", "title": "" }, { "docid": "6322c9e454b04b35a925e361514d6115", "score": "0.5419313", "text": "def GetPaths():\n getent = runtime.GetEnts()\n masterd_log = constants.DAEMONS_LOGFILES[constants.MASTERD]\n noded_log = constants.DAEMONS_LOGFILES[constants.NODED]\n confd_log = constants.DAEMONS_LOGFILES[constants.CONFD]\n wconfd_log = constants.DAEMONS_LOGFILES[constants.WCONFD]\n luxid_log = constants.DAEMONS_LOGFILES[constants.LUXID]\n rapi_log = constants.DAEMONS_LOGFILES[constants.RAPI]\n mond_log = constants.DAEMONS_LOGFILES[constants.MOND]\n metad_log = constants.DAEMONS_LOGFILES[constants.METAD]\n\n mond_extra_log = constants.DAEMONS_EXTRA_LOGFILES[constants.MOND]\n metad_extra_log = constants.DAEMONS_EXTRA_LOGFILES[constants.METAD]\n\n jobs_log = pathutils.GetLogFilename(\"jobs\")\n\n rapi_dir = os.path.join(pathutils.DATA_DIR, \"rapi\")\n cleaner_log_dir = os.path.join(pathutils.LOG_DIR, \"cleaner\")\n master_cleaner_log_dir = os.path.join(pathutils.LOG_DIR, \"master-cleaner\")\n\n # A note on the ordering: The parent directory (type C{DIR}) must always be\n # listed before files (type C{FILE}) in that directory. Once the directory is\n # set, only files directly in that directory can be listed.\n paths = [\n (pathutils.DATA_DIR, DIR, 0o755, getent.masterd_uid, getent.masterd_gid),\n (pathutils.CLUSTER_DOMAIN_SECRET_FILE, FILE, 0o640,\n getent.masterd_uid, getent.masterd_gid, False),\n (pathutils.CLUSTER_CONF_FILE, FILE, 0o640,\n getent.masterd_uid, getent.confd_gid, False),\n (pathutils.LOCK_STATUS_FILE, FILE, 0o640,\n getent.masterd_uid, getent.confd_gid, False),\n (pathutils.TEMP_RES_STATUS_FILE, FILE, 0o640,\n getent.masterd_uid, getent.confd_gid, False),\n (pathutils.CONFD_HMAC_KEY, FILE, 0o440,\n getent.confd_uid, getent.masterd_gid, False),\n (pathutils.SSH_KNOWN_HOSTS_FILE, FILE, 0o644,\n getent.masterd_uid, getent.masterd_gid, False),\n (pathutils.RAPI_CERT_FILE, FILE, 0o440,\n getent.rapi_uid, getent.masterd_gid, False),\n (pathutils.SPICE_CERT_FILE, FILE, 0o440,\n getent.noded_uid, getent.masterd_gid, False),\n (pathutils.SPICE_CACERT_FILE, FILE, 0o440,\n getent.noded_uid, getent.masterd_gid, False),\n (pathutils.NODED_CERT_FILE, FILE, pathutils.NODED_CERT_MODE,\n getent.masterd_uid, getent.masterd_gid, False),\n (pathutils.NODED_CLIENT_CERT_FILE, FILE, pathutils.NODED_CERT_MODE,\n getent.masterd_uid, getent.masterd_gid, False),\n (pathutils.WATCHER_PAUSEFILE, FILE, 0o644,\n getent.masterd_uid, getent.masterd_gid, False),\n ]\n\n ss = ssconf.SimpleStore()\n for ss_path in ss.GetFileList():\n paths.append((ss_path, FILE, constants.SS_FILE_PERMS,\n getent.noded_uid, getent.noded_gid, False))\n\n paths.extend([\n (pathutils.QUEUE_DIR, DIR, 0o750, getent.masterd_uid, getent.daemons_gid),\n (pathutils.QUEUE_DIR, QUEUE_DIR, constants.JOB_QUEUE_FILES_PERMS,\n getent.masterd_uid, getent.daemons_gid),\n (pathutils.JOB_QUEUE_DRAIN_FILE, FILE, 0o644,\n getent.masterd_uid, getent.daemons_gid, False),\n (pathutils.JOB_QUEUE_LOCK_FILE, FILE, constants.JOB_QUEUE_FILES_PERMS,\n getent.masterd_uid, getent.daemons_gid, False),\n (pathutils.JOB_QUEUE_SERIAL_FILE, FILE, constants.JOB_QUEUE_FILES_PERMS,\n getent.masterd_uid, getent.daemons_gid, False),\n (pathutils.JOB_QUEUE_VERSION_FILE, FILE, constants.JOB_QUEUE_FILES_PERMS,\n getent.masterd_uid, getent.daemons_gid, False),\n (pathutils.JOB_QUEUE_ARCHIVE_DIR, DIR, 0o750,\n getent.masterd_uid, getent.daemons_gid),\n (rapi_dir, DIR, 0o750, getent.rapi_uid, getent.masterd_gid),\n (pathutils.RAPI_USERS_FILE, FILE, 0o640,\n getent.rapi_uid, getent.masterd_gid, False),\n (pathutils.RUN_DIR, DIR, 0o775, getent.masterd_uid, getent.daemons_gid),\n (pathutils.SOCKET_DIR, DIR, 0o770, getent.masterd_uid, getent.daemons_gid),\n (pathutils.MASTER_SOCKET, FILE, 0o660,\n getent.masterd_uid, getent.daemons_gid, False),\n (pathutils.QUERY_SOCKET, FILE, 0o660,\n getent.luxid_uid, getent.daemons_gid, False),\n (pathutils.BDEV_CACHE_DIR, DIR, 0o755,\n getent.noded_uid, getent.masterd_gid),\n (pathutils.UIDPOOL_LOCKDIR, DIR, 0o750,\n getent.noded_uid, getent.masterd_gid),\n (pathutils.DISK_LINKS_DIR, DIR, 0o755,\n getent.noded_uid, getent.masterd_gid),\n (pathutils.CRYPTO_KEYS_DIR, DIR, 0o700,\n getent.noded_uid, getent.masterd_gid),\n (pathutils.IMPORT_EXPORT_DIR, DIR, 0o755,\n getent.noded_uid, getent.masterd_gid),\n (pathutils.LOG_DIR, DIR, 0o770, getent.masterd_uid, getent.daemons_gid),\n (masterd_log, FILE, 0o600, getent.masterd_uid, getent.masterd_gid, False),\n (confd_log, FILE, 0o600, getent.confd_uid, getent.masterd_gid, False),\n (wconfd_log, FILE, 0o600, getent.wconfd_uid, getent.masterd_gid, False),\n (luxid_log, FILE, 0o600, getent.luxid_uid, getent.masterd_gid, False),\n (noded_log, FILE, 0o600, getent.noded_uid, getent.masterd_gid, False),\n (rapi_log, FILE, 0o600, getent.rapi_uid, getent.masterd_gid, False),\n (mond_log, FILE, 0o600, getent.mond_uid, getent.masterd_gid, False),\n (mond_extra_log[\"access\"], FILE, 0o600,\n getent.mond_uid, getent.masterd_gid, False),\n (mond_extra_log[\"error\"], FILE, 0o600,\n getent.mond_uid, getent.masterd_gid, False),\n (metad_log, FILE, 0o600, getent.metad_uid, getent.metad_gid, False),\n (metad_extra_log[\"access\"], FILE, 0o600,\n getent.metad_uid, getent.metad_gid, False),\n (metad_extra_log[\"error\"], FILE, 0o600,\n getent.metad_uid, getent.metad_gid, False),\n (jobs_log, FILE, 0o600, getent.luxid_uid, getent.luxid_gid, False),\n (pathutils.LOG_OS_DIR, DIR, 0o750, getent.noded_uid, getent.daemons_gid),\n (pathutils.LOG_XEN_DIR, DIR, 0o750, getent.noded_uid, getent.daemons_gid),\n (pathutils.LOG_KVM_DIR, DIR, 0o750, getent.noded_uid, getent.daemons_gid),\n (cleaner_log_dir, DIR, 0o750, getent.noded_uid, getent.noded_gid),\n (master_cleaner_log_dir, DIR, 0o750,\n getent.masterd_uid, getent.masterd_gid),\n (pathutils.INSTANCE_REASON_DIR, DIR, 0o755, getent.noded_uid,\n getent.noded_gid),\n (pathutils.LIVELOCK_DIR, DIR, 0o750,\n getent.masterd_uid, getent.daemons_gid),\n (pathutils.LUXID_MESSAGE_DIR, DIR, 0o750, getent.masterd_uid,\n getent.daemons_gid),\n ])\n\n return paths", "title": "" }, { "docid": "2f90d76938c580db71d68da0ca14c9d5", "score": "0.5409928", "text": "def _make_data_folder(Dataset : _BaseDataset, pair_path_list : List[List[str]]) -> int:\n data_gt_dir_path = Path(\"./data/gt/dataset_train\")\n data_tracker_dir_path = Path(\"./data/trackers/dataset_train/data\")\n\n if Dataset.get_name() == \"DAVIS\":\n data_gt_dir_path = data_gt_dir_path.joinpath(\"class\")\n data_tracker_dir_path = data_tracker_dir_path.joinpath(\"class\")\n\n elif Dataset.get_name() in [\"Kitti2DBox\", \"KittiMOTS\"]:\n data_gt_dir_path = data_gt_dir_path.joinpath(\"label_02\")\n\n elif Dataset.get_name() in [\"MOTSChallenge\", \"MotChallenge2DBox\"]: \n data_gt_dir_path = data_gt_dir_path.parent\n data_tracker_dir_path = data_tracker_dir_path.parent.joinpath(\"data\")\n seqmap_dir_path = data_gt_dir_path.joinpath(\"seqmaps\")\n\n elif Dataset.get_name() == \"YouTubeVIS\":\n data_gt_dir_path = Path(\"./data/gt/youtube_vis_train/\")\n data_tracker_dir_path = Path(\"./data/trackers/youtube_vis_train/youtube_vis_train/data/\")\n\n # Directories for any dataset\n makedirs(data_gt_dir_path) \n makedirs(data_tracker_dir_path)\n \n # Directories for MOT seqmap\n if Dataset.get_name() in [\"MotChallenge2DBox\", \"MOTSChallenge\"]:\n makedirs(seqmap_dir_path)\n if Dataset.get_name() == \"MotChallenge2DBox\":\n seqmap_full_path = seqmap_dir_path.joinpath(\"dataset-train.txt\")\n else: # MOTS\n seqmap_full_path = seqmap_dir_path.joinpath(\"MOTS-train.txt\")\n with open(seqmap_full_path, \"a+\") as file:\n file.write(\"name\\n\")\n\n for i,pair_path in enumerate(pair_path_list):\n # Name local variables\n gt_full_path = Path(pair_path[0])\n tracker_full_path = Path(pair_path[1])\n gt_file_name = gt_full_path.name\n tracker_file_name = tracker_full_path.name\n \n # Check files existence\n if not( gt_full_path.is_file() and tracker_full_path.is_file() ):\n print(\"FILES DON'T EXIST\")\n print(\"gt file path: {}\".format(gt_full_path))\n print(\"tracker file path: {}\".format(tracker_full_path))\n return 1\n \n # Kitti seqmap append\n if Dataset.get_name() in [\"Kitti2DBox\", \"KittiMOTS\"] :\n # Get seqmap info \n with open(gt_full_path, 'r') as file:\n first_frame_number = file.readline().split(\" \")[0]\n for line in file:\n pass\n # Last frame number must be incremented\n last_frame_number = str(int(line.split(\" \")[0]) + 1)\n # Set kitti seqmap\n kitti_dir_path = data_gt_dir_path.parent\n if Dataset.get_name() == \"Kitti2DBox\":\n seqmap_file_name = \"evaluate_tracking.seqmap.train\"\n else: # Kitti MOTS\n seqmap_file_name = \"evaluate_mots.seqmap.train\" \n # Write in file\n with open(kitti_dir_path.joinpath(seqmap_file_name), 'a+') as file:\n file.write(gt_full_path.stem + \" empty \" + first_frame_number +\n \" \" + last_frame_number + '\\n')\n \n # MOT(S) Challenge seqmap\n if Dataset.get_name() in [\"MotChallenge2DBox\", \"MOTSChallenge\"]:\n # Get seqLength\n seqLength : str = \"\"\n with open(gt_full_path, 'r') as file:\n for line in file:\n pass\n if Dataset.get_name() == \"MotChallenge2DBox\":\n seqLength = line.split(\",\")[0] \n else: # MOTS \n seqLength = line.split(\" \")[0]\n\n # Create seqinfo.ini\n seq_name : str = \"seq_{}\".format(i+1)\n data_seq_dir_path : Path = data_gt_dir_path.joinpath(seq_name)\n data_seq_gt_dir_path : Path = data_seq_dir_path.joinpath(\"gt\")\n makedirs(data_seq_gt_dir_path)\n with open(data_seq_dir_path.joinpath(\"seqinfo.ini\"), \"w+\") as file:\n file.write(\n dedent(\n \"\"\"\\\n [Sequence]\n name={}\n seqLength={}\n \"\"\".format(\n seq_name, seqLength\n )\n )\n )\n \n # Append seqmaps file\n with open(seqmap_full_path, 'a+') as file:\n file.write(seq_name + \"\\n\")\n \n # Copy files\n if not ( Dataset.get_name() in [\"MotChallenge2DBox\", \"MOTSChallenge\"] ):\n copyfile(gt_full_path, data_gt_dir_path.joinpath(gt_file_name))\n copyfile(tracker_full_path, data_tracker_dir_path.joinpath(tracker_file_name))\n else: # MOT(S) has a special hierarchy\n copyfile(gt_full_path, data_seq_gt_dir_path.joinpath(\"gt.txt\"))\n copyfile(tracker_full_path, data_tracker_dir_path.joinpath(seq_name + \".txt\"))\n \n return 0", "title": "" }, { "docid": "4f4af64268311780cc8c6da3f3fb35e8", "score": "0.53454536", "text": "def push_GSC_onto_HDFS(program,URI,start_date,end_date):\n create_hdfs_dir = \"hadoop fs -mkdir /csv_web\"\n copyTohadoop = \"hadoop fs -copyFromLocal ./%s /web_data\" %(dir_path)# fix this <-- add user input\n\n #Pull GSC data into local file\n subprocess.run([program,URI,start_date,end_date])\n # check if filepath exits\n ## if filepath does not exist create the file PATH\n subprocess.run(create_hdfs_dir)\n subprocess.run(copyTohadoop)", "title": "" }, { "docid": "4a1f4b30c4e31aa761df6056cd48235e", "score": "0.5291028", "text": "def generate_currents_hdf5(timestart, timeend, path, outpath, compression_level = 1):\n \n # generate list of dates from daterange given\n daterange = [parse(t) for t in [timestart, timeend]]\n U_files = []\n V_files = []\n T_files = []\n\n # append all filename strings within daterange to lists\n for day in range(np.diff(daterange)[0].days):\n datestamp = daterange[0] + timedelta(days=day)\n datestr1 = datestamp.strftime('%d%b%y').lower()\n datestr2 = datestamp.strftime('%Y%m%d')\n \n # check if file exists. exit if it does not. add path to list if it does.\n # U files\n U_path = f'{path}{datestr1}/SalishSea_1h_{datestr2}_{datestr2}_grid_U.nc'\n if not os.path.exists(U_path):\n print(f'File {U_path} not found. Check Directory and/or Date Range.')\n return\n U_files.append(U_path)\n # V files\n V_path = f'{path}{datestr1}/SalishSea_1h_{datestr2}_{datestr2}_grid_V.nc'\n if not os.path.exists(V_path):\n print(f'File {V_path} not found. Check Directory and/or Date Range.')\n return\n V_files.append(V_path)\n # T files\n T_path = f'{path}{datestr1}/SalishSea_1h_{datestr2}_{datestr2}_grid_T.nc'\n if not os.path.exists(T_path):\n print(f'File {T_path} not found. Check Directory and/or Date Range.')\n return\n T_files.append(T_path)\n \n print('\\nAll source files found')\n # string: output folder name with date ranges used. end date will be lower by a day than timeend because datasets only go until midnight\n folder = str(datetime(parse(timestart).year, parse(timestart).month, parse(timestart).day).strftime('%d%b%y').lower()) + '-' + str(datetime(parse(timeend).year, parse(timeend).month, parse(timeend).day-1).strftime('%d%b%y').lower())\n # create output directory\n dirname = f'{outpath}hdf5/{folder}/'\n if not os.path.exists(os.path.dirname(dirname)):\n try:\n os.makedirs(os.path.dirname(dirname))\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n print(f'\\nOutput directory {dirname} created\\n')\n # create hdf5 file and create tree structure\n\n return U_files, V_files, T_files, dirname, compression_level", "title": "" }, { "docid": "744c2677008f037be95acc16d490bc58", "score": "0.5240763", "text": "def get_paths():\n paths = []\n for source in sources:\n for year in years:\n year = str(year)\n for month in range(1, 13):\n month = '%02d' % month # Pad zero for single digit months\n path = basepath + \"/\" + source + \"/\" + year + \"/\" + month\n paths.append(path)\n return paths", "title": "" }, { "docid": "b0f8734d753856778a3646532871f6c5", "score": "0.5187343", "text": "def get_file_list(datadescriptor, starttime, endtime, cfg, scan=None):\n ndays = int(np.ceil(((endtime-starttime).total_seconds())/(3600.*24.)))\n radarnr, datagroup, datatype, dataset, product = get_datatype_fields(\n datadescriptor)\n ind_rad = int(radarnr[5:8])-1\n\n if (datatype == 'Nh') or (datatype == 'Nv'):\n datatype = 'dBZ'\n\n t_filelist = []\n for i in range(ndays):\n if datagroup == 'RAINBOW':\n if scan is None:\n warn('Unknown scan name')\n return None\n daydir = (\n starttime+datetime.timedelta(days=i)).strftime('%Y-%m-%d')\n dayinfo = (starttime+datetime.timedelta(days=i)).strftime('%Y%m%d')\n datapath = cfg['datapath'][ind_rad] + scan + daydir + '/'\n if (not os.path.isdir(datapath)):\n warn(\"WARNING: Unknown datapath '%s'\" % datapath)\n continue\n dayfilelist = glob.glob(datapath+dayinfo+'*'+datatype+'.*')\n for filename in dayfilelist:\n t_filelist.append(filename)\n elif datagroup == 'RAD4ALP':\n if scan is None:\n warn('Unknown scan name')\n return None\n dayinfo = (starttime+datetime.timedelta(days=i)).strftime('%y%j')\n basename = ('P'+cfg['RadarRes'][ind_rad] +\n cfg['RadarName'][ind_rad]+dayinfo)\n if cfg['path_convention'] == 'LTE':\n yy = dayinfo[0:2]\n dy = dayinfo[2:]\n subf = ('P' + cfg['RadarRes'][ind_rad] +\n cfg['RadarName'][ind_rad] + yy + 'hdf' + dy)\n datapath = cfg['datapath'][ind_rad] + subf + '/'\n else:\n datapath = cfg['datapath'][ind_rad] + dayinfo + '/' + basename + '/'\n if (not os.path.isdir(datapath)):\n warn(\"WARNING: Unknown datapath '%s'\" % datapath)\n continue\n dayfilelist = glob.glob(datapath+basename+'*.'+scan+'*')\n for filename in dayfilelist:\n t_filelist.append(filename)\n elif datagroup == 'CFRADIAL':\n daydir = (\n starttime+datetime.timedelta(days=i)).strftime('%Y-%m-%d')\n dayinfo = (starttime+datetime.timedelta(days=i)).strftime('%Y%m%d')\n datapath = (\n cfg['loadbasepath'][ind_rad]+cfg['loadname'][ind_rad]+'/' +\n daydir+'/'+dataset+'/'+product+'/')\n if (not os.path.isdir(datapath)):\n warn(\"WARNING: Unknown datapath '%s'\" % datapath)\n continue\n dayfilelist = glob.glob(datapath+dayinfo+'*'+datatype+'.nc')\n for filename in dayfilelist:\n t_filelist.append(filename)\n elif datagroup == 'MXPOL':\n if scan is None:\n warn('Unknown scan name')\n return None\n if cfg['path_convention'] == 'LTE':\n sub1 = str(starttime.year)\n sub2 = starttime.strftime('%m')\n sub3 = starttime.strftime('%d')\n datapath = cfg['datapath'][ind_rad] + '/' + sub1 + '/' + sub2 + '/' + sub3 + '/'\n basename = 'MXPol-polar-' + starttime.strftime('%Y%m%d') + '-*-' + scan + '*'\n dayfilelist = glob.glob(datapath+basename)\n else:\n warn(\"insert MCH convention here\")\n return None\n for filename in dayfilelist:\n t_filelist.append(filename)\n filelist = []\n for filename in t_filelist:\n filenamestr = str(filename)\n fdatetime = get_datetime(filenamestr, datadescriptor)\n if (fdatetime >= starttime) and (fdatetime <= endtime):\n filelist.append(filenamestr)\n\n return sorted(filelist)", "title": "" }, { "docid": "eec3d421abe644999392078337d276c0", "score": "0.5173419", "text": "def generate_filelist(start, end=None, station='ghb', coord_format='XYZ'):\n end = start if end is None else end\n if (type(start) is dt.datetime) and (type(end) is dt.datetime) and (start <= end):\n searchlist = pd.date_range(start=start, end=end).to_pydatetime().tolist()\n filelist = []\n for date in searchlist:\n year = date.year\n month = date.month\n day = date.day\n for root, dirs, files in os.walk('{0}/{1}/{2:02}/'.format(datapath_local, year, month)):\n filelist.extend([root + file for file in files if ((os.stat(root + file).st_size > 0) and\n (station.upper() in file) and\n ('{}{:02}{:02}'.format(year,month,day) in file) and\n (coord_format.upper() in file))])\n\n return sorted(filelist)", "title": "" }, { "docid": "5099502986c0225d8ab9e2ae7c75d4b4", "score": "0.51085", "text": "def _create_paths(self, basedir, name=None):\n if name:\n datapath = os.path.join(basedir, name)\n else:\n datapath = basedir\n\n dbpath = os.path.join(datapath, 'db')\n if not os.path.exists(dbpath):\n os.makedirs(dbpath)\n if self.args['verbose']:\n print 'creating directory: %s'%dbpath\n\n return datapath", "title": "" }, { "docid": "979570bb3efa711bac5eac1db5f0acd8", "score": "0.5064477", "text": "def make_dirs(dtype, raw_dir, download, dload_date, anneal_name, pid_name):\n\n paths = {}\n\n # If no download date is provided, assume the data is being downloaded and set date to today\n if dload_date==None: \n # Setting today's/download date for directory namess\n now = Time.now()\n dload_date = now.strftime('%Y%b%d')\n print('Download date: ', dload_date)\n \n # Setting the names of the raw, anneal, raw copied data, CTE corrected, and download directories to paths\n paths['RAW_DIR'] = raw_dir\n if dtype=='dark':\n paths['ANN_DIR'] = os.path.join(paths['RAW_DIR'], anneal_name + 'anneal_rawdarks_aquery' + dload_date)\n paths['RWD_DIR'] = os.path.join(paths['ANN_DIR'], 'raw_darks')\n paths['CTE_CORR_DIR'] = os.path.join(paths['ANN_DIR'], 'ctecorr_darks')\n paths['DLD_DIR'] = os.path.join(paths['ANN_DIR'], 'mastDownload', 'HST') #Naming convention from MAST, don't need to make DLD_DIR\n if dtype=='science':\n paths['PID_DIR'] = os.path.join(paths['RAW_DIR'], pid_name + '_rawdata_aquery' + dload_date)\n paths['RWD_DIR'] = os.path.join(paths['PID_DIR'], 'raw_sci')\n paths['CTE_CORR_DIR'] = os.path.join(paths['PID_DIR'], 'ctecorr_sci')\n paths['DLD_DIR'] = os.path.join(paths['PID_DIR'], 'mastDownload', 'HST') #Naming convention from MAST, don't need to make DLD_DIR\n paths['CW3_DIR'] = os.path.join(paths['PID_DIR'], 'calwf3_sci')\n \n # Making RAW_DIR if it doesn't exist\n if not os.path.exists(paths['RAW_DIR']): \n os.makedirs(paths['RAW_DIR'], 0o774)\n print('Created raw data directory: ', paths['RAW_DIR'])\n \n if dtype=='dark': \n # Making the subdirectories if they don't exist\n if os.path.exists(paths['ANN_DIR']): \n print('Anneal directory already exists!!! Not creating sub-directories.')\n else:\n os.mkdir(paths['ANN_DIR'], 0o774)\n print('Created anneal cycle directory: {}'.format(paths['ANN_DIR']))\n\n os.mkdir(paths['RWD_DIR'], 0o774)\n print('Created raw darks directory for copied raw files: {}'.format(paths['RWD_DIR']))\n\n os.mkdir(paths['CTE_CORR_DIR'], 0o774)\n print('Created CTE corrected darks directory: {}'.format(paths['CTE_CORR_DIR']))\n\n if dtype=='science':\n # Making the subdirectories if they don't exist\n if os.path.exists(paths['PID_DIR']): \n print('Proposal ID directory already exists!!! Not creating sub-directories.')\n else:\n os.mkdir(paths['PID_DIR'], 0o774)\n print('Created proposal ID directory: {}'.format(paths['PID_DIR']))\n\n os.mkdir(paths['RWD_DIR'], 0o774)\n print('Created raw science data directory for copied raw files: {}'.format(paths['RWD_DIR']))\n\n os.mkdir(paths['CTE_CORR_DIR'], 0o774)\n print('Created CTE corrected science data directory: {}'.format(paths['CTE_CORR_DIR']))\n\n os.mkdir(paths['CW3_DIR'], 0o774)\n print('Created directory for processing science data with calwf3.e software: {}'.format(paths['CW3_DIR']))\n\n return paths", "title": "" }, { "docid": "46984292edff86892048358312005e52", "score": "0.5049378", "text": "def get_local_files(sdate,edate,twin,product,\n dict_for_sub=None,path_local=None):\n filelst = []\n pathlst = []\n tmpdate = sdate-timedelta(minutes=twin)\n if path_local is None:\n print('path_local is None -> checking config file')\n while (tmpdate <= edate + relativedelta(months=+1)):\n try:\n # create local path for each time\n path_template = \\\n satellite_dict[product]['dst'].get(\n 'path_template')\n strsublst = \\\n satellite_dict[product]['dst'].get('strsub')\n subdict = \\\n make_subdict(strsublst,\n class_object_dict=dict_for_sub)\n path_local = make_pathtofile(path_template,\\\n strsublst,subdict)\n path_local = (\n os.path.join(\n path_local,\n tmpdate.strftime('%Y'),\n tmpdate.strftime('%m'))\n )\n print(path_local)\n if os.path.isdir(path_local):\n tmplst = np.sort(os.listdir(path_local))\n filelst.append(tmplst)\n pathlst.append([os.path.join(path_local,e)\n for e in tmplst])\n tmpdate = tmpdate + relativedelta(months=+1)\n path_local = None\n except Exception as e:\n print(e)\n tmpdate = tmpdate + relativedelta(months=+1)\n filelst = np.sort(flatten(filelst))\n pathlst = np.sort(flatten(pathlst))\n else:\n filelst = np.sort(os.listdir(path_local))\n pathlst = [os.path.join(path_local,e) for e in filelst]\n idx_start,tmp = check_date(filelst, sdate - timedelta(minutes=twin))\n tmp,idx_end = check_date(filelst, edate + timedelta(minutes=twin))\n if idx_end == 0:\n idx_end = len(pathlst)-1\n del tmp\n pathlst = np.unique(pathlst[idx_start:idx_end+1])\n filelst = np.unique(filelst[idx_start:idx_end+1])\n print (str(int(len(pathlst))) + \" valid files found\")\n return pathlst, filelst", "title": "" }, { "docid": "4e1badb25628c1d6ce670ff8d2828230", "score": "0.50186074", "text": "def _prepare_paths(base_dir, has_offset, previous_model=None, intercept_only=False):\n if intercept_only:\n feature_dir = None\n feature_file = None\n else:\n feature_dir = os.path.join(base_dir, \"featureList\")\n feature_file = os.path.join(feature_dir, \"global\")\n metadata_dir = os.path.join(base_dir, \"metadata\")\n all_paths = AllPaths(\n training_data_dir=os.path.join(base_dir, \"trainingData\"),\n validation_data_dir=os.path.join(base_dir, \"validationData\"),\n metadata_file=os.path.join(metadata_dir, \"tensor_metadata.json\"),\n feature_file=feature_file,\n training_score_dir=os.path.join(base_dir, \"trainingScore\"),\n validation_score_dir=os.path.join(base_dir, \"validationScore\"),\n output_model_dir=os.path.join(base_dir, \"modelOutput\"))\n if feature_dir:\n tf.io.gfile.mkdir(feature_dir)\n tf.io.gfile.mkdir(metadata_dir)\n tf.io.gfile.mkdir(all_paths.training_data_dir)\n tf.io.gfile.mkdir(all_paths.validation_data_dir)\n tf.io.gfile.mkdir(all_paths.output_model_dir)\n tf.io.gfile.mkdir(all_paths.training_score_dir)\n tf.io.gfile.mkdir(all_paths.validation_score_dir)\n if feature_file:\n _create_feature_file(all_paths.feature_file)\n _create_metadata_file(all_paths.metadata_file, has_offset)\n if previous_model is not None:\n _write_model(previous_model, all_paths.feature_file, all_paths.output_model_dir, intercept_only)\n return all_paths", "title": "" }, { "docid": "76e8076bd624ef1efa305c9557aff7b2", "score": "0.5000719", "text": "def __get_history_paths(self, time_val, utc_now):\n\n files = []\n if len(time_val) > 20 or time_val.startswith(\"now-\"):\n if time_val.startswith(\"now-\"):\n start = utc_now\n finish = self.__utc_format(time_val[4:],\n utc_now)\n else:\n # our ranges are 19 chars of timestamp, a '-',\n # and another timestamp\n start = self.__utc_format(time_val[:19],\n utc_now)\n finish = self.__utc_format(time_val[20:],\n utc_now)\n if start > finish:\n raise apx.HistoryRequestException(_(\"Start \"\n \"time must be older than finish time: \"\n \"{0}\").format(time_val))\n files = self.__get_history_range(start, finish)\n else:\n # there can be multiple event files per timestamp\n prefix = self.__utc_format(time_val, utc_now)\n files = glob.glob(os.path.join(self._img.history.path,\n \"{0}*\".format(prefix)))\n if not files:\n raise apx.HistoryRequestException(_(\"No history \"\n \"entries found for {0}\").format(time_val))\n return files", "title": "" }, { "docid": "a32bdfc4cfdc89631a41e2158d477ab1", "score": "0.499292", "text": "def create_final_locations(self):\t\n\n\t\t# for organizing projects by date:\n\t\ttoday = date.today()\n\t\tyear = today.year\n\t\tmonth = today.month\n\n\t\t# double-check that the config file had a valid path to a directory\n\t\tif os.path.isdir(self.config_params_dict.get('destination_path')):\n\n\t\t\t# try to create the directory-- it may already exist, in which case we catch the exception and move on.\n\t\t\t# Any other errors encountered in creating the directory will cause pipeline to exit\n\t\t\ttry:\n\t\t\t\tyear_dir = os.path.join(self.config_params_dict.get('destination_path'), str(year))\n\t\t\t\tif not os.path.isdir(year_dir):\n\t\t\t\t\tos.mkdir(year_dir)\n\t\t\t\t\tcorrect_permissions(year_dir)\n\t\t\t\t\tlogging.info('Creating new year-level directory at %s' % year_dir)\n\t\t\t\tmonth_dir = os.path.join(year_dir, str(month))\n\t\t\t\tif not os.path.isdir(month_dir):\n\t\t\t\t\tos.mkdir(month_dir)\n\t\t\t\t\tcorrect_permissions(month_dir)\n\t\t\t\t\tlogging.info('Creating new month-level directory at %s' % month_dir)\t\t\t\t\t\n\n\t\t\texcept OSError as ex:\n\t\t\t\tlogging.error('Exception occured:')\n\t\t\t\tlogging.error(ex.strerror)\n\t\t\t\tsys.exit(1)\n\n\t\t\t# check that we do have a destination directory to go to.\n\t\t\tif os.path.isdir(month_dir):\n\t\t\t\tself.target_dir = month_dir\n\t\t\t\tself.fc_index_map = {}\n\t\t\t\tfor project_id in self.project_id_list:\n\t\t\t\t\tPipeline.create_project_structure(self, project_id)\n\t\t\telse:\n\t\t\t\tlogging.error('Target directory %s does not exist for some reason. Maybe permissions?' % target_dir)\n\t\t\t\tsys.exit(1)\n\t\telse:\n\t\t\tlogging.error('The path supplied as the final destination base directory is not, in fact, a directory')\n\t\t\tsys.exit(1)", "title": "" }, { "docid": "fd5f4e75158563ffcdc4bc2fffafccc0", "score": "0.49891597", "text": "def create_logfiles(logfile_fullpaths:list):\n for logfile_fullpath in logfile_fullpaths:\n if not os.path.isfile(logfile_fullpath):\n touch(logfile_fullpath)", "title": "" }, { "docid": "3ebf22ff23608583efc39a0ad84dd1a0", "score": "0.49848986", "text": "def _find_io_files_for_renaming(\n top_input_dir_name, first_date_unix_sec, last_date_unix_sec,\n top_output_dir_name):\n\n dates_unix_sec = time_periods.range_and_interval_to_list(\n start_time_unix_sec=first_date_unix_sec,\n end_time_unix_sec=last_date_unix_sec, time_interval_sec=DAYS_TO_SECONDS,\n include_endpoint=True)\n\n date_strings = [\n time_conversion.unix_sec_to_string(t, DATE_FORMAT)\n for t in dates_unix_sec\n ]\n\n num_dates = len(date_strings)\n input_file_names_by_date = [numpy.array([], dtype=object)] * num_dates\n output_file_names_by_date = [numpy.array([], dtype=object)] * num_dates\n valid_times_by_date_unix_sec = [numpy.array([], dtype=int)] * num_dates\n\n for i in range(num_dates):\n print('Finding input files for date {0:s}...'.format(date_strings[i]))\n\n these_input_file_names = tracking_io.find_files_one_spc_date(\n spc_date_string=date_strings[i],\n source_name=tracking_utils.PROBSEVERE_NAME,\n top_tracking_dir_name=top_input_dir_name,\n tracking_scale_metres2=DUMMY_TRACKING_SCALE_METRES2,\n raise_error_if_missing=True\n )[0]\n\n these_input_file_names.sort()\n these_valid_times_unix_sec = numpy.array([\n tracking_io.file_name_to_time(f) for f in these_input_file_names\n ], dtype=int)\n\n these_output_file_names = []\n for t in these_valid_times_unix_sec:\n these_output_file_names.append(tracking_io.find_file(\n valid_time_unix_sec=t,\n source_name=tracking_utils.PROBSEVERE_NAME,\n top_tracking_dir_name=top_output_dir_name,\n tracking_scale_metres2=DUMMY_TRACKING_SCALE_METRES2,\n raise_error_if_missing=False\n ))\n\n input_file_names_by_date[i] = numpy.array(\n these_input_file_names, dtype=object)\n output_file_names_by_date[i] = numpy.array(\n these_output_file_names, dtype=object)\n valid_times_by_date_unix_sec[i] = these_valid_times_unix_sec\n\n print(SEPARATOR_STRING)\n\n return (input_file_names_by_date, output_file_names_by_date,\n valid_times_by_date_unix_sec)", "title": "" }, { "docid": "441963f91a98f866ebacbd32ce416e13", "score": "0.4938589", "text": "def _setFilePaths(self, start=None, end=None):\n if not start:\n start = self.start_block\n if not end:\n start = self.end_block\n\n self.f_pickle = \"{}/pickles/{}_{}.p\".format(DATADIR, start, end)\n self.f_graph = \"{}/graphs/{}_{}.gt\".format(DATADIR, start, end)\n self.f_snapshot = \"{}/snapshots/{}_{}.png\".format(DATADIR, start, end)", "title": "" }, { "docid": "6516675cd73d03fe0bbed6dd0d0cb6fb", "score": "0.49346396", "text": "def download_directories(self, pathlist, destdir, osl):\n raise NotImplementedError", "title": "" }, { "docid": "8bbee81a9d2f2926cd9d43ff712e768b", "score": "0.49263975", "text": "def search_path(path, time_range, lon_range=None, lat_range=None):\n\n time_range = [pd.Timestamp(t) for t in time_range]\n\n # First, go the day-of-year folder.\n doy = time_range[0].dayofyear\n\n search_path = Path(path).joinpath('{:03}'.format(doy))\n\n all_files = np.array(sorted(search_path.rglob('OR_GLM*')))\n\n times = np.array(filename2date(all_files))\n\n # To make sure we don't miss any files, buffer the times a bit.\n # Since GLM files are 20 seconds, this should be sufficient.\n # We'll make it a little bigger just to be sure...\n BUFFER = pd.Timedelta(30, 's')\n\n idx = (times >= (time_range[0]-BUFFER)) & (times <= (time_range[1]+BUFFER))\n\n if not np.count_nonzero(idx):\n print('No files found')\n return None\n\n # These are the files that should contain our range.\n files = all_files[idx]\n\n all_g = GLM(files)\n\n fl_t = all_g.flashes.time\n\n # Sigh, can't do datetime64[ns] and Timestamp.\n # And, we don't have an quick way to convert an array of datetime64 to Timestamp.\n good_idx = (fl_t >= np.datetime64(time_range[0])) & (fl_t <= np.datetime64(time_range[1]))\n\n if lat_range is not None:\n fl_lat = all_g.flashes.lat\n lat_idx = (fl_lat >= lat_range[0]) & (fl_lat <= lat_range[1])\n\n good_idx = good_idx & lat_idx\n\n if lon_range is not None:\n fl_lon = all_g.flashes.lon\n lon_idx = (fl_lon >= lon_range[0]) & (fl_lon <= lon_range[1])\n\n good_idx = good_idx & lon_idx\n # Make sure we have _something_\n if not np.count_nonzero(good_idx):\n print('No data found in provided range')\n return None\n else:\n fl = all_g.flashes[good_idx]\n\n ev, grp = all_g.get_groups(fl.id, combine=True, events=True)\n\n g = GLM()\n g.flashes = Ltg(fl)\n g.groups = Ltg(grp)\n g.events = Ltg(ev)\n\n return g", "title": "" }, { "docid": "7ac6fd52c197d422e9be9327d6355be9", "score": "0.49068555", "text": "def _CreateTestData(self, testdata):\n pathspecs = []\n files = []\n for filepath, localfile in testdata:\n files.append(open(localfile, \"rb\"))\n\n p = rdf_paths.PathSpec(path=filepath)\n pathspecs.append(p)\n\n return pathspecs, files", "title": "" }, { "docid": "29c335111da0b48c24cfcb7e52e0cced", "score": "0.48979846", "text": "def _add_tmp(self, path_list, test_path):\n return [os.path.join(test_path, p) for p in path_list]", "title": "" }, { "docid": "7d251a7e6657056b88ead981cee42030", "score": "0.48933828", "text": "def timeseries_path(cfg):\n output_folder = os.path.join(cfg.output_root, 'check_output', 'timeseries')\n tools.create_dir(output_folder, 'timeseries')\n output_path = os.path.join(cfg.output_root, 'check_output', 'timeseries')\n \n return output_path", "title": "" }, { "docid": "4c77782c041e348d241f468bcdeef803", "score": "0.48810452", "text": "def test_date_dir_names(self):\n sep = \"__\"\n n = 5\n date_format = '%m-%d:%H-%M-%S'\n path = os.path.join(self.base_path, \"date\")\n\n epc = ExperimentsPathController(folder_name_type='date', sep=sep,\n date_format=date_format)\n\n for _ in range(n):\n epc(path)\n exp_dir_name = datetime.now().strftime(date_format)\n dirs = os.listdir(path)\n self.assertTrue(exp_dir_name in dirs)\n time.sleep(1)", "title": "" }, { "docid": "63c23c4a261c8026906c7e3f61f7011b", "score": "0.48587406", "text": "def locate_directories():\r\n # Get the current working directory --should be the StoveOpt one\r\n current_working_dir = os.getcwd() # absolute path of current working direcrtory\r\n print(\"here is your current WD:\" + current_working_dir)\r\n \r\n # Steps from the StoveOpt parent folder to the counterFlowFlame2D folder\r\n dir_steps = \"/foamfiles/counterFlowFlame2D/\"\r\n \r\n # Extra steps to the various cases\r\n step_25 = \"case_25/\"\r\n step_50 = \"case_50/\"\r\n step_100 = \"case_100/\"\r\n step_125 = \"case_125/\"\r\n step_150 = \"case_150/\"\r\n \r\n # Full filepaths for the various cases\r\n path_25 = current_working_dir + dir_steps + step_25\r\n path_50 = current_working_dir + dir_steps + step_50\r\n path_125 = current_working_dir + dir_steps + step_125\r\n path_150 = current_working_dir + dir_steps + step_150\r\n path_100 = current_working_dir + dir_steps + step_100\r\n \r\n # return the \r\n return path_100, path_25, path_50, path_125, path_150", "title": "" }, { "docid": "93293b85424002bff058e292069a37dd", "score": "0.48488086", "text": "def gen_eval_dirs():\r\n date_time = strftime(\"%Y%m%d-%H%M%S\", localtime())\r\n # Generate run dir\r\n run_dir = os.path.join('evals',date_time)\r\n if not os.path.exists(run_dir):\r\n os.makedirs(run_dir)\r\n return run_dir", "title": "" }, { "docid": "28c44579094038db42027c0c12c23a66", "score": "0.48288724", "text": "def buildDirectories(self, PFCnames, timesteps, dataPath, clobberFlag=True, chmod=0o774, UID=-1, GID=-1):\n for t in timesteps:\n #Build timestep directory\n if dataPath[-1]!='/':\n timeDir = dataPath + '/{:06d}/'.format(t)\n else:\n timeDir = dataPath + '{:06d}/'.format(t)\n\n #don't overwrite time directories, just PFC directories\n self.makeDir(timeDir, clobberFlag=False, mode=chmod, UID=UID, GID=GID)\n\n #build directory for each PFC partname\n for name in PFCnames:\n pfcDir = timeDir + name\n #overwrite PFC directories\n self.makeDir(pfcDir, clobberFlag=True, mode=chmod, UID=UID, GID=GID)\n #set tree permissions\n self.recursivePermissions(timeDir, UID, GID, chmod)\n return", "title": "" }, { "docid": "63286c73a1b00a7d9dd49d8bd2ab8afd", "score": "0.4818065", "text": "def create_paths(args: argparse.Namespace) -> argparse.Namespace:\n time_stamp = \"{:%Y%b%d-%H%M%S}\".format(datetime.now())\n print(time_stamp)\n if not hasattr(args, \"out_dir\") or args.out_dir is None:\n if not os.path.isdir(\"./results\"):\n os.mkdir(\"./results\")\n out_dir = f\"./results/{time_stamp}_{args.experiment:s}\"\n os.mkdir(out_dir)\n args.out_dir = out_dir\n elif not os.path.isdir(args.out_dir):\n raise Exception(f\"Directory {args.out_dir} does not exist.\")\n\n if not hasattr(args, \"run_id\"):\n args.run_id = 0\n\n return args", "title": "" }, { "docid": "77b3f5ce28ff44d85f6f7a9fccae0a16", "score": "0.48166665", "text": "def create_file_paths(self):\n\n if self.job_dir is None:\n self.job_dir = os.path.basename(self.input_file).split('.')[0]\n\n if self.job_id is None:\n self.job_id = self.job_dir\n\n self.job_dir = os.path.join(self.simulations_dir, self.job_dir)\n\n # Create Processes directories\n self.processes_dir = [os.path.join(self.job_dir, self.preprocessing_dir),\n os.path.join(self.job_dir, self.simulation_dir),\n os.path.join(self.job_dir, self.postprocessing_dir)]\n\n # Redundancy\n self.preprocessing_dir = self.processes_dir[0]\n self.simulation_dir = self.processes_dir[1]\n self.postprocessing_dir = self.processes_dir[2]\n\n # Redirect to the correct process folder\n if self.process == 'preprocessing':\n indx = 0\n else:\n # Note that Postprocessing needs the link to simulation's folder\n # because that is where I look for energy files and pickle files\n indx = 1\n\n # Equilibration directory and sub_dir\n self.equilibration_dir = os.path.join(self.processes_dir[indx], self.equilibration_dir)\n self.eq_dump_dir = os.path.join(self.equilibration_dir, 'dumps')\n # Production dir and sub_dir\n self.production_dir = os.path.join(self.processes_dir[indx], self.production_dir)\n self.prod_dump_dir = os.path.join(self.production_dir, \"dumps\")\n\n # Production phase filenames\n self.prod_energy_filename = os.path.join(self.production_dir, \"ProductionEnergy_\" + self.job_id + '.csv')\n self.prod_ptcls_filename = os.path.join(self.prod_dump_dir, \"checkpoint_\")\n\n # Equilibration phase filenames\n self.eq_energy_filename = os.path.join(self.equilibration_dir, \"EquilibrationEnergy_\" + self.job_id + '.csv')\n self.eq_ptcls_filename = os.path.join(self.eq_dump_dir, \"checkpoint_\")\n\n # Magnetic dir\n if self.electrostatic_equilibration:\n self.magnetization_dir = os.path.join(self.processes_dir[indx], self.magnetization_dir)\n self.mag_dump_dir = os.path.join(self.magnetization_dir, \"dumps\")\n # Magnetization phase filenames\n self.mag_energy_filename = os.path.join(self.magnetization_dir,\n \"MagnetizationEnergy_\" + self.job_id + '.csv')\n self.mag_ptcls_filename = os.path.join(self.mag_dump_dir, \"checkpoint_\")\n\n if self.process == 'postprocessing':\n indx = 2 # Redirect to the correct folder\n\n # Log File\n if self.log_file is None:\n self.log_file = os.path.join(self.processes_dir[indx], \"log_\" + self.job_id + \".out\")\n else:\n self.log_file = os.path.join(self.processes_dir[indx], self.log_file)", "title": "" }, { "docid": "2377696c15caed65bf948a3c3c09c055", "score": "0.48017627", "text": "def get_existing_paths(self, run_federated_query_benchmark):\n\n def _path_exists(path_details):\n \"\"\"Adds a path to the path_set if it exists.\n\n Constructs a path based off of the parameters in the path_details\n tuple. Checks that the constructed path exists in the bucket\n defined in the outer function. If so, the path is added to path_set.\n\n Args:\n path_details (tuple): of\n (file_type,\n num_column,\n column_type,\n num_file,\n table_size)\n \"\"\"\n file_type, \\\n num_column, \\\n column_type, \\\n num_file, \\\n table_size = path_details\n for compression_type in compression_types[file_type]:\n if compression_type == 'none':\n extension = file_type\n else:\n extension = compression_extensions[compression_type]\n\n path = path_string.format(\n file_type,\n compression_type,\n num_column,\n column_type,\n num_file,\n table_size,\n extension,\n )\n exists = storage.Blob(\n bucket=bucket,\n name=path,\n ).exists(gcs_client)\n total_table_size = int(num_file) * \\\n int(table_size.split('MB')[0])\n if exists:\n if run_federated_query_benchmark and \\\n (compression_type == 'snappy' or\n total_table_size > MB_IN_TB):\n continue\n path_set.add(path)\n\n logging.info('Discovering files from parameters list that exist'\n ' in bucket {0:s}.'.format(self.bucket_name))\n if run_federated_query_benchmark:\n logging.info(\n 'External queries on snappy compressed files are not '\n 'supported. Snappy files will not be added to set of existing '\n 'paths.')\n logging.info(\n 'Only paths that will result in table less than 1 TB will be '\n 'added to the set of existing paths to ensure limit query cost.'\n )\n file_types = self.file_params['fileType']\n compression_types = self.file_params['fileCompressionTypes']\n num_columns = self.file_params['numColumns']\n column_types = self.file_params['columnTypes']\n num_files = self.file_params['numFiles']\n table_sizes = self.file_params['stagingDataSizes']\n compression_extensions = (\n file_constants.FILE_CONSTANTS['compressionExtensions'])\n path_set = set()\n path_string = ('fileType={0:s}/compression={1:s}/numColumns={2:d}/'\n 'columnTypes={3:s}/numFiles={4:d}/tableSize={5:s}/'\n 'file1.{6:s}')\n\n gcs_client = storage.Client(project=self.project_id)\n bucket = gcs_client.get_bucket(self.bucket_name)\n\n with ThreadPoolExecutor() as p:\n p.map(\n _path_exists,\n itertools.product(\n file_types,\n num_columns,\n column_types,\n num_files,\n table_sizes,\n ))\n\n logging.info('Done discovering {0:d} existing files.'.format(\n len(path_set)))\n return path_set", "title": "" }, { "docid": "82651f2c9678e0419559f43e0dc8136d", "score": "0.4800954", "text": "def get_ranged(base_path):\n return [base_path.replace('<range>/', ''),\n base_path.replace('<range>', '<datetime:start_time>/to/<datetime:end_time>')]", "title": "" }, { "docid": "a2a794ecdabb8f1d023c38aaf36337c4", "score": "0.47957456", "text": "def performArnoldPathmapping( startFrame, endFrame, tempLocation=None ):\n if tempLocation:\n performArnoldPathmapping.tempLocation = tempLocation\n else:\n if not performArnoldPathmapping.tempLocation:\n raise ValueError( \"The first call made to performArnoldPathmapping must provided a tempLocation\" )\n \n #a simple regex for finding frame numbers\n frameRE = re.compile( r'#+' )\n \n # Define a function that will be used when looping to replace padding with a 0 padded string.\n def __replaceHashesWithZeroPaddedFrame( frameNum, origFileName ):\n return frameRE.sub( lambda matchObj: str( frameNum ).zfill( len(matchObj.group(0)) ), origFileName )\n\n standInObjects = maya.cmds.ls( type=\"aiStandIn\" )\n for standIn in standInObjects:\n try:\n # If we have already seen this node before then grab the settings that we need\n origDir, origFileName = performArnoldPathmapping.originalProperties[ standIn ]\n except KeyError:\n # If we have not seen this node before then store it's original path and update the path in the node to where we will be pathmapping the file.\n standinFile = maya.cmds.getAttr( standIn + \".dso\" )\n\n if not standinFile or os.path.splitext( standinFile )[ 1 ].lower() != \".ass\":\n # If the standinFile isn't set or isn't .ass file then we cannot pathmap it.\n continue\n\n origDir, origFileName = os.path.split( standinFile )\n standinTempLocation = os.path.join( performArnoldPathmapping.tempLocation, standIn )\n\n maya.cmds.setAttr( \"%s.dso\" % standIn, os.path.join( standinTempLocation, origFileName ), type=\"string\" )\n #Create the Temp directory the first time we see a new standin\n if not os.path.isdir( standinTempLocation ):\n os.makedirs( standinTempLocation )\n\n performArnoldPathmapping.originalProperties[ standIn ] = (origDir, origFileName)\n\n for frame in range( startFrame, endFrame + 1 ):\n # evaluate the frame that the node is using (Normally it will be the same as the scene but it can be different)\n evalFrame = maya.cmds.getAttr( \"%s.frameNumber\" % standIn, time=frame )\n fileNameWithFrame = __replaceHashesWithZeroPaddedFrame( evalFrame, origFileName )\n\n # If we have already mapped this file then continue.\n if not ( standIn, fileNameWithFrame ) in performArnoldPathmapping.mappedFiles:\n #Perform pathmapping\n runPathmappingOnFile(\n os.path.join( origDir, fileNameWithFrame ),\n os.path.join( performArnoldPathmapping.tempLocation, standIn, fileNameWithFrame )\n )\n performArnoldPathmapping.mappedFiles.add( ( standIn, fileNameWithFrame ) )", "title": "" }, { "docid": "2179469c279c86bf402f21ad933d9ed9", "score": "0.47901127", "text": "def gen_train_dirs():\r\n date_time = strftime(\"%Y%m%d-%H%M%S\", localtime())\r\n # Define paths\r\n run_dir = os.path.join('runs',date_time)\r\n logs_dir = os.path.join(run_dir,'logs')\r\n settings_path = os.path.join(run_dir,'settings.txt')\r\n weight_dir = os.path.join(run_dir,'weights')\r\n img_dir = os.path.join(run_dir,'images')\r\n # Create dirs\r\n if not os.path.exists(run_dir):\r\n os.makedirs(run_dir)\r\n os.makedirs(weight_dir)\r\n os.makedirs(img_dir)\r\n os.makedirs(logs_dir)\r\n # Set pwd to run dir\r\n os.chdir(run_dir)", "title": "" }, { "docid": "d33f86428f923a5919a954f71c1d363e", "score": "0.47871578", "text": "def init_config_paths():\n\n time_prefix = get_rollover_time_str(time.time())\n\n config['mode_dir'] = 'backtest/' if config['enable_backtest'] else 'monitor/'\n config['mode_path'] = config['user_path'] + config['node_dir'] + config['mode_dir']\n config['logs_path'] = config['mode_path'] + defaults.LOGS_DIR + time_prefix + '/'\n config['charts_path'] = config['mode_path'] + defaults.CHARTS_DIR + time_prefix + '/'\n config['snapshot_path'] = config['mode_path'] + defaults.SNAPSHOT_DIR + time_prefix + '/'\n config['state_path'] = config['mode_path'] + defaults.STATE_DIR\n config['alert_log'] = config['mode_path'] + defaults.LOGS_DIR + defaults.ALERT_LOG\n config['output_log'] = config['logs_path'] + defaults.OUTPUT_LOG\n config['debug_log'] = config['logs_path'] + defaults.DEBUG_LOG\n config['error_log'] = config['logs_path'] + defaults.ERROR_LOG", "title": "" }, { "docid": "b3078a330b6412104c79099910f7fb3b", "score": "0.47729492", "text": "def bfsfilepaths(lane, starttimestr, band, bf_data_dir, port0, stnid):\n port = port0 + lane\n pre_bf_dir, pst_bf_dir = bf_data_dir.split('?')\n outdumpdir = pre_bf_dir + str(lane) + pst_bf_dir\n outfilepre = \"udp_\" + stnid\n rcumode = ilisa.observations.modeparms.band2rcumode(band)\n outarg = os.path.join(outdumpdir, outfilepre)\n dumplogname = os.path.join(outdumpdir, '{}_lane{}_rcu{}.log'.format(dumpername,\n lane,\n rcumode))\n datafileguess = outarg + '_' + str(port) + '.start.' + starttimestr + '.000'\n return outdumpdir, outarg, datafileguess, dumplogname", "title": "" }, { "docid": "2bfd10e435a65c4e8c45e85da354068d", "score": "0.4768646", "text": "def dummy_filelist(tmp_path):\n root_file = tmp_path / \"root_file.txt\"\n root_file.touch()\n\n first_dir = tmp_path / \"first\"\n first_dir.mkdir()\n\n second_dir = first_dir / \"second\"\n second_dir.mkdir()\n\n third_dir = second_dir / \"third\"\n third_dir.mkdir()\n\n for i in range(10):\n f = first_dir / f\"first_{i:02d}.txt\"\n f.touch()\n f = second_dir / f\"second_{i:02d}.txt\"\n f.touch()\n f = third_dir / f\"third_{i:02d}.txt\"\n f.touch()\n\n return tmp_path", "title": "" }, { "docid": "4af8781b60259a120a63443545bfb907", "score": "0.47487435", "text": "def _prepare_dirs_for_loss(self, target_dirs: List[Tensor]):\n raise NotImplementedError", "title": "" }, { "docid": "86a15a05f8114b13005f360d50f18b6b", "score": "0.47358495", "text": "def make_training_set(path_hdfs, pos_disp_dir, neg_disp_dir,\n pos_dir, neg_dir, pos_file, neg_file, max_count):\n key_to_path = {}\n for (d1, d2) in [(pos_disp_dir, pos_dir), (neg_disp_dir, neg_dir)]:\n if not os.path.exists(d2):\n os.makedirs(d2)\n key_to_path.update([(os.path.splitext(\n os.path.basename(f))[0], d2) for f in glob.glob('%s/*' % d1)])\n\n pos_fp = open(pos_file, 'w')\n neg_fp = open(neg_file, 'w')\n\n count = 0\n for k, (i, bs) in hadoopy.readtb(path_hdfs):\n try:\n path = key_to_path[k]\n # save the original image\n filename = '%s/%s.jpg' % (path, k)\n print(filename)\n with open(filename, 'wb') as f:\n f.write(i)\n # update the positive/negative training lists\n if path == pos_dir:\n pos_fp.write('%s %i' % (filename, len(bs)))\n for b in bs:\n pos_fp.write(' %i %i %i %i' % (\n b[0], b[1], b[2] - b[0] + 1, b[3] - b[1] + 1))\n pos_fp.write('\\n')\n else:\n neg_fp.write('%s\\n' % filename)\n except KeyError:\n pass\n # update count and break loop if necessary\n # TODO(Vlad): can we slice notation on a list of generators?\n count += 1\n if count > max_count:\n break\n\n pos_fp.close()\n neg_fp.close()", "title": "" }, { "docid": "576ae7e4aba18c529c7c175264faed48", "score": "0.47349834", "text": "def create_dirs(timestamp, config):\n\tpaths = {}\n\n\tpath_experiment = os.path.join(config[\"experiment\"][\"experiments_path\"], config[\"experiment\"][\"name\"])\n\t\n\tpaths[\"experiment\"] = path_experiment\n\t\n\t# init\n\tif not os.path.exists(path_experiment):\n\t\tos.makedirs(paths[\"experiment\"])\n\n\t# dynamically add sudirectories directories\n\tfor key in [\"checkpoints\", \"configs\", \"tensorboardX\"]:\n\t\tpath = os.path.join(paths[\"experiment\"], key)\n\t\tpaths[key] = path\n\n\n\t\tif (not (key == \"tensorboardX\")) or ((key == \"tensorboardX\") and config[\"visualisation\"][\"use_tensorboardX\"]):\n\t\t\tif not os.path.exists(path):\n\t\t\t\tos.makedirs(paths[key])\n\n\treturn paths", "title": "" }, { "docid": "50c8f1974475611ddb52a291f75333f1", "score": "0.47319135", "text": "def createPaths(self, prefix = './'):\n\n dt = datetime.datetime.now()\n self.path = \"{0}_{1}_{2:04d}{3:02d}{4:02d}_{5:02d}{6:02d}{7:02d}\".format(\n prefix, self.name, dt.year, dt.month, dt.day, dt.hour,\n dt.minute, dt.second)\n #self.path = \"%s_%s_%04d%02d%02d_%02d%02d%02d\" % \\\n #(prefix, self.name,\n #dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)\n\n try:\n self.path = os.path.abspath(self.path) #convert to proper absolute path\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n\n except OSError as ex:\n console.terse(\"Error: creating server log directory '{0}'\\n\".format(ex))\n return False\n\n console.concise(\" Created Server {0} Log Directory = '{1}'\\n\".format(self.name, self.path))\n\n self.logPath = os.path.join(self.path, \"{0}.txt\".format('log'))\n self.logPath = os.path.abspath(self.logPath) #convert to proper absolute path\n\n return True", "title": "" }, { "docid": "ecaf93341c75aee3ef0079f67aa91d96", "score": "0.47236225", "text": "def evsam_monit_etf(startTIS, limitTIS):\n # ############################################################## #\n # fill evsam_glbl_etf with SAM ETF probe results of CMS services #\n # ############################################################## #\n global evsam_glbl_etf\n PATH_HDFS_PREFIX = \"/project/monitoring/archive/sam3/raw/metric/\"\n\n # prepare service hostname list:\n # ==============================\n hostnames = []\n for cmssites in evsam_glbl_topology:\n for service in evsam_glbl_topology[ cmssites ]:\n if service['host'] not in hostnames:\n hostnames.append( service['host'] )\n\n # prepare HDFS subdirectory list:\n # ===============================\n logging.info(\"Retrieving SAM ETF probe result docs from MonIT HDFS\")\n logging.log(15, \" starting %s, limit %s\" %\n (time.strftime(\"%Y-%b-%d %H:%M\", time.gmtime(startTIS)),\n time.strftime(\"%Y-%b-%d %H:%M\", time.gmtime(limitTIS))))\n #\n tisDay = 24*60*60\n ts = time.gmtime( startTIS )\n startMidnight = calendar.timegm( ts[:3] + (0, 0, 0) + ts[6:] )\n now = int( time.time() )\n startTmpArea = max( calendar.timegm( time.gmtime( now - (6 * tisDay) ) ),\n startTIS - tisDay)\n limitLocalTmpArea = calendar.timegm( time.localtime( now ) ) + tisDay\n #\n dirList = []\n for dirDay in range(startMidnight, limitTIS, tisDay):\n dirList.append( time.strftime(\"%Y/%m/%d\", time.gmtime( dirDay )) )\n for dirDay in range(startTmpArea, limitLocalTmpArea, tisDay):\n dirList.append( time.strftime(\"%Y/%m/%d.tmp\", time.gmtime( dirDay )) )\n del(dirDay)\n\n versions = {}\n try:\n with pydoop.hdfs.hdfs() as myHDFS:\n fileHndl = None\n fileObj = None\n fileName = None\n fileNames = None\n for subDir in dirList:\n logging.debug(\" checking HDFS directory %s\" % subDir)\n if not myHDFS.exists( PATH_HDFS_PREFIX + subDir ):\n continue\n # get list of files in directory:\n myList = myHDFS.list_directory( PATH_HDFS_PREFIX + subDir )\n fileNames = [ d['name'] for d in myList\n if (( d['kind'] == \"file\" ) and ( d['size'] != 0 )) ]\n del(myList)\n for fileName in fileNames:\n logging.debug(\" file %s\" % os.path.basename(fileName))\n fileHndl = None\n fileObj = None\n try:\n if ( os.path.splitext(fileName)[-1] == \".gz\" ):\n fileHndl = myHDFS.open_file(fileName)\n fileObj = gzip.GzipFile(fileobj=fileHndl)\n else:\n fileObj = myHDFS.open_file(fileName)\n # read documents and add relevant records to list:\n for myLine in fileObj:\n myJson = json.loads(myLine.decode('utf-8'))\n if (( 'metadata' not in myJson ) or\n ( 'data' not in myJson )):\n continue\n if (( 'topic' not in myJson['metadata'] ) or\n ( 'kafka_timestamp' not in myJson['metadata'] ) or\n ( 'timestamp' not in myJson['data'] ) or\n ( 'dst_hostname' not in myJson['data'] ) or\n ( 'service_flavour' not in myJson['data'] ) or\n ( 'metric_name' not in myJson['data'] ) or\n ( 'status' not in myJson['data'] ) or\n ( 'vo' not in myJson['data'] )):\n continue\n if ( myJson['metadata']['topic'] != \"sam3_raw_metric\" ):\n continue\n #\n if ( myJson['data']['vo'] != \"cms\" ):\n continue\n #\n probeTIS = int(myJson['data']['timestamp']/1000)\n if ( probeTIS < startTIS ):\n continue\n if ( probeTIS >= limitTIS ):\n continue\n #\n if myJson['data']['dst_hostname'] not in hostnames:\n continue\n #\n key = ( probeTIS,\n myJson['data']['dst_hostname'],\n myJson['data']['service_flavour'],\n myJson['data']['metric_name'] )\n version = myJson['metadata']['kafka_timestamp']\n if key in versions:\n if ( version <= versions[key] ):\n continue\n #\n versions[key] = version\n logging.log(9, (\" adding %s result of %s / \" +\n \"%s\") %\n (key[3].split(\"-/cms/Role=\",1)[0],\n key[1], key[2]))\n evsam_glbl_etf[key] = myJson['data']['status']\n\n except json.decoder.JSONDecodeError as excptn:\n logging.error(\"JSON decoding failure, file %s: %s\" %\n (fileName, str(excptn)))\n except FileNotFoundError as excptn:\n logging.error(\"HDFS file not found, %s: %s\" %\n (fileName, str(excptn)))\n except IOError as excptn:\n logging.error(\"HDFS access failure, file %s: %s\" %\n (fileName, str(excptn)))\n finally:\n if fileObj is not None:\n fileObj.close()\n if fileHndl is not None:\n fileHndl.close()\n del(fileHndl)\n del(fileObj)\n del(fileName)\n del(fileNames)\n except IOError:\n logging.error(\"Failed to fetch SAM ETF probe results from MonIT HDFS\")\n\n logging.info(\" found %d relevant SAM ETF probe results in MonIT\" %\n len(evsam_glbl_etf))\n return", "title": "" }, { "docid": "e98826d9fba52d0fc86d1e10cb1cdf9c", "score": "0.47212765", "text": "def list_data_files(upath, stream='all', start_t=None, end_t=None):\n\n ## Walk the user path and get all .csv files\n flist = []\n for path, subdirs, files in os.walk(upath):\n for name in files:\n if name.endswith('.csv'):\n flist.append(os.path.join(path, name))\n \n ## Subset out stream if necessary\n ## Note nested surveys require indexing at -3, the rest at -2.\n if stream != 'all':\n flist = [f for f in flist if (f.split('/')[-2].startswith(stream)) or \n (f.split('/')[-3].startswith(stream))]\n \n ## Filter out empty files\n ## NOTE: This should **not** be necessary with new processing code\n flist = [f for f in flist if row_count(f) > 0]\n \n ## Subset by time if end or start time specified\n if (start_t is not None) or (end_t is not None):\n times = [f.split('/')[-1].split('.')[-2] for f in flist]\n times = [datetime.datetime.strptime(t, '%Y-%m-%d %H_%M_%S') \n for t in times]\n \n ## Note the one hour buffer\n if start_t is None:\n start_t = min(times) - datetime.timedelta(hours=1)\n if end_t is None:\n end_t = max(times) + datetime.timedelta(hours=1)\n \n bools = np.array([t >= start_t and t <= end_t for t in times])\n flist = list(np.array(flist)[bools])\n \n return flist", "title": "" }, { "docid": "99b8c105e8588c2cc4eb2069c6bc8e85", "score": "0.4696756", "text": "def get_htdocs_dirs(self):\n return [('timingandestimation', resource_filename(__name__, 'htdocs'))]", "title": "" }, { "docid": "53b66453565588fd2165e850f1d518e6", "score": "0.46899518", "text": "def _create_sub_dirs(self, dir_names):\n for dname in dir_names:\n dir_name = os.path.join(self.testrundir, dname)\n self._log.debug('Create directory %s', dir_name)\n os.mkdir(dir_name)", "title": "" }, { "docid": "efc2c183c6e799674b7396b2bbbcee06", "score": "0.4673381", "text": "def test_directory_origin_add_missing_dirs(sdc_builder, sdc_executor, total_time, num_of_dirs):\n\n temp_dir = sdc_executor.execute_shell(f'mktemp -d').stdout.rstrip()\n\n mkdir_builder = sdc_builder.get_pipeline_builder()\n mkdir_source = mkdir_builder.add_stage('Jython Scripting')\n mkdir_script = \"\"\"\n try:\n sdc.importLock()\n import random\n import string\n import os\n import shutil\n finally:\n sdc.importUnlock()\n from timeit import default_timer as timer\n\n\n def randomword(length):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(length))\n\n\n PREFIX = \"%s\"\n TOTAL_TIME = %d\n NUM_OF_DIRS = %d\n start = timer()\n while timer() - start < TOTAL_TIME:\n dirs = []\n for i in range(NUM_OF_DIRS):\n dir_name = PREFIX + \"/\" + randomword(10)\n os.mkdir(dir_name)\n dirs.append( dir_name )\n for dir_name in dirs:\n shutil.rmtree(dir_name)\n \"\"\" % (temp_dir, total_time, num_of_dirs)\n # textwrap.dedent helps to strip leading whitespaces for valid Python indentation\n mkdir_source.set_attributes(user_script=textwrap.dedent(mkdir_script))\n mkdir_wiretap = mkdir_builder.add_wiretap()\n mkdir_source >> mkdir_wiretap.destination\n mkdir_pipeline = mkdir_builder.build()\n\n rddir_builder = sdc_builder.get_pipeline_builder()\n rddir_source = rddir_builder.add_stage('Directory')\n rddir_source.set_attributes(files_directory=temp_dir,\n file_name_pattern=\"*\",\n read_order='TIMESTAMP',\n process_subdirectories=True,\n data_format=\"DELIMITED\")\n rddir_wiretap = rddir_builder.add_wiretap()\n rddir_source >> rddir_wiretap.destination\n rddir_pipeline = rddir_builder.build()\n\n sdc_executor.add_pipeline(mkdir_pipeline)\n sdc_executor.add_pipeline(rddir_pipeline)\n sdc_executor.start_pipeline(rddir_pipeline)\n sdc_executor.start_pipeline(mkdir_pipeline).wait_for_finished()\n sdc_executor.stop_pipeline(rddir_pipeline, force=True)\n sdc_executor.execute_shell(f'rm -rf {temp_dir}')\n\n # look for an IOException related to temp_dir in the pipeline logs\n logs = sdc_executor.get_logs(pipeline=rddir_pipeline)\n pattern = \".*IOException.+\" + temp_dir\n regex = re.compile(pattern)\n matches = re.findall(regex, str(logs))\n if len(matches) > 0:\n for match in matches:\n logger.debug(\"Try to access a deleted directory: '%s'\", match)\n assert False, 'Should not reach here. Tried to read some directory that have been deleted.'", "title": "" }, { "docid": "c71a05ee8c8c198612d959a426156d44", "score": "0.46731243", "text": "def analyze_gtfs_date(date: datetime.date,\n local_full_paths: Dict[str, str],\n output_folder: str = configuration.files.full_paths.output,\n output_file_type: str = configuration.files.output_file_type) -> List[str]:\n\n date_str = date.strftime('%Y-%m-%d')\n trip_stats_output_path = join(output_folder, f'trip_stats_{date_str}.{output_file_type}')\n route_stats_output_path = join(output_folder, f'route_stats_{date_str}.{output_file_type}')\n\n feed = prepare_partridge_feed(date, local_full_paths[GTFS_FILE_NAME])\n\n tariff_path_to_use = local_full_paths[TARIFF_FILE_NAME]\n logging.info(f'Creating zones DF from {tariff_path_to_use}')\n zones = get_zones_df(tariff_path_to_use)\n\n gtfs_file_base_name = basename(local_full_paths[GTFS_FILE_NAME])\n\n ts = compute_trip_stats(feed, zones, date, gtfs_file_base_name)\n save_trip_stats(ts, trip_stats_output_path)\n log_trip_stats(ts)\n\n rs = compute_route_stats(ts, date, gtfs_file_base_name)\n save_route_stats(rs, route_stats_output_path)\n log_route_stats(rs)\n\n return [route_stats_output_path, trip_stats_output_path]", "title": "" }, { "docid": "266b3fc3b254ded672ce7192478a207f", "score": "0.46391493", "text": "def create_log_files(list_to_benchmark):\n logger.info(\"*\"*80)\n logger.info(\"Creating empty log files for all the DMS\")\n for each in list_to_benchmark:\n os.system('touch %s' % ('/var/log/' + directory_maps[each] + '/load_logs.log'))\n os.system('touch %s' % ('/var/log/' + directory_maps[each] + '/query_cold_logs.log'))\n os.system('touch %s' % ('/var/log/' + directory_maps[each] + '/query_hot_logs.log'))\n os.system('touch %s' % ('/var/log/' + directory_maps[each] + '/index_logs.log'))\n\n logger.info(\"Created empty log files for all the DMS\")\n logger.info(\"*\"*80)", "title": "" }, { "docid": "b07dcd52720290f306f5514a752cf74b", "score": "0.46348083", "text": "def _compute_daily_path(self, delta=datetime.timedelta(minutes=20)):\n data = []\n for interval in range(24 * 3 + 1):\n now = self._day + delta * interval\n azi, alt = self._sky.compute_position(self._body, now)\n data.append((azi, alt))\n self.path = list(zip(*data))", "title": "" }, { "docid": "57a47be5769ff3d3ccf01453225610fc", "score": "0.4608841", "text": "def storePaths(wavelength):\n\tnow = datetime.utcnow()\n\tstart = datetime(2010,5,1,0,0,0)\n\thalfyear = timedelta(days=182)\n\tlst = []\n\twhile (start + halfyear < now):\n\t\tlst.extend(fetch(start, start + halfyear, wavelength, td=86400))\n\t\tstart += halfyear\n\tlst.extend(fetch(start, now, wavelength, td=86400))\n\n\tqualCheck(lst, wavelength)\n\n\tf = open(wavelength + 'file_locations.txt','a')\n\tfor fle in lst:\n\t\tf.write(fle + '\\n')\n\tf.close()", "title": "" }, { "docid": "d8fa4f3f7a245fdd5e177e8f0b81ca18", "score": "0.46031603", "text": "def paths_lists(self):\n \n pass", "title": "" }, { "docid": "45b63a8e6f8595ec70235d85659a7599", "score": "0.460297", "text": "def test_date_dir_names_duplicates(self):\n sep = \"__\"\n n = 5\n date_format = '%m-%d:%H-%M-%S'\n path = os.path.join(self.base_path, \"date\")\n\n epc = ExperimentsPathController(folder_name_type='date', sep=sep,\n date_format=date_format)\n\n for _ in range(n):\n epc(path)\n\n exp_dir_name = datetime.now().strftime(date_format)\n exp_dirs = [sep.join([exp_dir_name, str(i)]) for i in range(1, n)] + \\\n [exp_dir_name]\n act_dirs = os.listdir(path)\n\n self.assertTrue(set(act_dirs) == set(exp_dirs))", "title": "" }, { "docid": "e0f9c5a000085abd1ffa25def6dd7e23", "score": "0.46016333", "text": "def setPathDate(rlist):\n # for ii in range(0, len(new_list)):\n # n = new_list[ii]\n # if n.startswith('Path:'):\n # new_list[ii] = 'Path: ' + getFilepath() + '\\n'\n # if n.startswith('Datum'):\n # new_list[ii] = 'Datum: ' + str(time.time()) + '\\n'\n\n return rlist", "title": "" }, { "docid": "efce7511f8eba928092ffc14fda4e7f4", "score": "0.46014518", "text": "def registerFilesOfInputDir(self, inputDir=None, input_path_list=None,\n input_site_handler=None, \\\n pegasusFolderName='', inputSuffixSet=None, indexFileSuffixSet=None,\n **keywords):\n if input_path_list is None:\n input_path_list = []\n if inputDir and os.path.isdir(inputDir):\n fnameLs = os.listdir(inputDir)\n for fname in fnameLs:\n input_path = os.path.realpath(os.path.join(inputDir, fname))\n input_path_list.append(input_path)\n\n if inputSuffixSet is None:\n inputSuffixSet = getattr(self, 'inputSuffixSet', None)\n print(f\"Registering {len(input_path_list)} input files with suffix in \"\n f\" {inputSuffixSet} ... \", flush=True, end='')\n returnData = PassingData(jobDataLs = [])\n counter = 0\n for input_path in input_path_list:\n counter += 1\n suffix = getRealPrefixSuffix(input_path)[1]\n #default fakeSuffixSet includes .gz\n if inputSuffixSet is not None and len(inputSuffixSet)>0 and suffix \\\n not in inputSuffixSet:\n #skip input whose suffix is not in inputSuffixSet \\\n # if inputSuffixSet is a non-empty set.\n continue\n if indexFileSuffixSet is not None and len(indexFileSuffixSet)>0 \\\n and suffix in indexFileSuffixSet:\n #skip index files, they are affiliates of real input data files.\n continue\n\n # Add the file to the replica catalog\n lfn = os.path.join(pegasusFolderName, os.path.basename(input_path))\n input_file = File(lfn)\n input_file.name = lfn\n self.replica_catalog.add_replica(input_site_handler, lfn=input_file, pfn=input_path)\n input_file.abspath = input_path\n jobData = PassingData(output=input_file, job=None, jobLs=[],\n file=input_file, fileLs=[input_file], indexFileLs=[])\n # Find all index files if indexFileSuffixSet is given.\n if indexFileSuffixSet:\n for indexFileSuffix in indexFileSuffixSet:\n indexFilename = '%s%s'%(input_path, indexFileSuffix)\n if os.path.isfile(indexFilename):\n indexFile = self.registerOneInputFile(\n input_path=indexFilename,\n input_site_handler=input_site_handler,\n folderName=pegasusFolderName, \\\n useAbsolutePathAsPegasusFileName=False,\n checkFileExistence=True)\n jobData.fileLs.append(indexFile)\n jobData.indexFileLs.append(indexFile)\n returnData.jobDataLs.append(jobData)\n print(f\"{len(returnData.jobDataLs)} out of {len(input_path_list)} \"\n f\"possible files registered. Done.\", flush=True)\n return returnData", "title": "" }, { "docid": "e37fa7643361450a23790b8266e392e4", "score": "0.4598848", "text": "def create_index_list(cf, d, date):\n #import datetime\n climfreq = d[\"Climatology\"]\n # firstly what climatology is requested\n if climfreq == 'Single':\n list_StDate = []\n list_EnDate = []\n if 'StartDate' in cf['Options'].keys():\n xlStDate = cf['Options']['StartDate']\n list_StDate.append(GetDateIndex(date,xlStDate,ts=d[\"flux_period\"],default=0,match='exact'))\n else:\n logger.error(\"No StartDate given. Define which time for footprint calculation in StartDate (DD/MM/YYYY hh:mm)\")\n\n list_EnDate.append(list_StDate[0]+1)\n\n elif climfreq == 'Special':\n list_StDate = []\n list_EnDate = []\n if 'StartDate' in cf['Options'].keys():\n xlStDate = cf['Options']['StartDate']\n print xlStDate\n list_StDate.append(GetDateIndex(date,xlStDate,ts=d[\"flux_period\"],default=0,match='exact'))\n else:\n list_StDate.append(0) # start from begin of file\n if 'EndDate' in cf['Options'].keys():\n xlEnDate = cf['Options']['EndDate']\n list_EnDate.append(GetDateIndex(date,xlEnDate,ts=d[\"flux_period\"],default=0,match='exact'))\n else:\n list_EnDate.append(len(date)-1) # run to end of file\n\n elif climfreq == 'Hourly':\n # if file is half hourly every single data is used\n if 'StartDate' in cf['Options'].keys():\n xlStDate = cf['Options']['StartDate']\n firstIdx = GetDateIndex(date,xlStDate,ts=d[\"flux_period\"],default=0,match='exact')\n else:\n firstIdx = 0 # start from begin of file\n if 'EndDate' in cf['Options'].keys():\n xlEnDate = cf['Options']['EndDate']\n lastIdx = GetDateIndex(date,xlEnDate,ts=d[\"flux_period\"],default=0,match='exact')\n else:\n lastIdx = len(date)-2 # run to end of file\n list_StDate = range(firstIdx,lastIdx)\n list_EnDate = range(firstIdx+1,lastIdx+1)\n print 'Start to End = ',list_StDate, list_EnDate\n\n elif climfreq == 'Daily':\n StDate = date[0]\n EnDate = date[-1]\n sd = pandas.date_range(start=StDate, end=EnDate, freq='D', normalize=True) # frequency daily\n ndays = len(sd)\n list_StDate = []\n list_EnDate = []\n list_StDate.append(GetDateIndex(date,sd[0],ts=d[\"flux_period\"],default=0,match='exact'))\n list_EnDate.append(GetDateIndex(date,sd[1],ts=d[\"flux_period\"],default=-1,match='exact'))\n for i in range(1,ndays-1):\n list_StDate.append(GetDateIndex(date,sd[i],ts=d[\"flux_period\"],default=0,match='exact') +1)\n list_EnDate.append(GetDateIndex(date,sd[i+1],ts=d[\"flux_period\"],default=-1,match='exact'))\n test_i = GetDateIndex(date,sd[-1],ts=d[\"flux_period\"],default=0,match='exact')\n if test_i < len(date)-2: # at least one value for the next day, so only midnight not allowed\n list_StDate.append(test_i+1)\n list_EnDate.append(len(date)-1)\n\n elif climfreq == 'Monthly':\n StDate = date[0]\n EnDate = date[-1]\n sm = pandas.date_range(start=StDate, end=EnDate, freq='MS', normalize=True) # frequency monthly\n num_int = len(sm)\n list_StDate = []\n list_EnDate = []\n test_i = GetDateIndex(date,sm[0],ts=d[\"flux_period\"],default=0,match='exact')\n if test_i > 0:\n list_StDate.append(0)\n list_EnDate.append(test_i)\n list_StDate.append(GetDateIndex(date,sm[0],ts=d[\"flux_period\"],default=0,match='exact')+1)\n list_EnDate.append(GetDateIndex(date,sm[1],ts=d[\"flux_period\"],default=-1,match='exact'))\n else:\n list_StDate.append(GetDateIndex(date,sm[0],ts=d[\"flux_period\"],default=0,match='exact'))\n list_EnDate.append(GetDateIndex(date,sm[1],ts=d[\"flux_period\"],default=-1,match='exact'))\n for i in range(1,num_int-1):\n list_StDate.append(GetDateIndex(date,sm[i],ts=d[\"flux_period\"],default=0,match='exact')+1)\n list_EnDate.append(GetDateIndex(date,sm[i+1],ts=d[\"flux_period\"],default=-1,match='exact'))\n test_i = GetDateIndex(date,sm[-1],ts=d[\"flux_period\"],default=0,match='exact')\n if test_i < len(date)-2: # at least one value for the next day, so only midnight not allowed\n list_StDate.append(test_i+1)\n list_EnDate.append(len(date)-1)\n\n elif climfreq == 'Annual':\n # Find number of years in df\n StDate = date[0]\n EnDate = date[-1]\n years_index = []\n #date.apply(lambda x: x.year)\n #for i in range(min(year),max(year)+1):\n for i in range(StDate.year,EnDate.year+1):\n years_index.append(i)\n num = len(years_index)\n years_index.append(max(years_index)+1)\n #print num,years_index\n list_StDate = []\n list_EnDate = []\n st = datetime.datetime(years_index[0],1,1,0,0)\n en = datetime.datetime(years_index[1],1,1,0,0)\n list_StDate.append(GetDateIndex(date,st,ts=d[\"flux_period\"],default=0,match='exact'))\n list_EnDate.append(GetDateIndex(date,en,ts=d[\"flux_period\"],default=-1,match='exact'))\n if num > 1:\n if num > 2:\n for i in range(1,num-1):\n st = datetime.datetime(years_index[i],1,1,0,0)\n en = datetime.datetime(years_index[i+1],1,1,0,0)\n list_StDate.append(GetDateIndex(date,st,ts=d[\"flux_period\"],default=0,match='exact')+1)\n list_EnDate.append(GetDateIndex(date,en,ts=d[\"flux_period\"],default=-1,match='exact'))\n st = datetime.datetime(years_index[num-1],1,1,0,0)\n en = datetime.datetime(years_index[num],1,1,0,0)\n test_is = GetDateIndex(date,st,ts=d[\"flux_period\"],default=-1,match='exact')\n test_ie = GetDateIndex(date,en,ts=d[\"flux_period\"],default=-1,match='exact')\n if test_ie - test_is > 2:\n list_StDate.append(test_is+1)\n list_EnDate.append(test_ie)\n return list_StDate,list_EnDate", "title": "" }, { "docid": "f21c63e5031047ec5e62ba60ae289e20", "score": "0.45858327", "text": "def evsam_monit_fetch(t15bins, t1bins, t6bins, t24bins):\n # ##################################################################### #\n # fill evsam_glbl_monitdocs with site/service status of CMS sites/hosts #\n # ##################################################################### #\n global evsam_glbl_monitdocs\n PATH_HDFS_PREFIX = \"/project/monitoring/archive/cmssst/raw/ssbmetric/\"\n\n # prepare HDFS subdirectory list:\n # ===============================\n logging.info(\"Retrieving CMS SAM site/service status docs from MonIT HDFS\")\n #\n tisDay = 24*60*60\n now = int( time.time() )\n startTmpArea = calendar.timegm( time.gmtime( now - (6 * tisDay) ) )\n limitLocalTmpArea = calendar.timegm( time.localtime( now ) ) + tisDay\n #\n dirList = []\n #\n if ( len(t15bins) > 0 ):\n logging.log(15, \" 15 min time bins %d (%s), ..., %d (%s)\" %\n (t15bins[0], time.strftime(\"%Y-%b-%d %H:%M:%S\",\n time.gmtime(t15bins[0]*900)),\n t15bins[-1], time.strftime(\"%Y-%b-%d %H:%M:%S\",\n time.gmtime((t15bins[-1]*900)+899))))\n lowestTbin = now\n for tbin in t15bins:\n if ( tbin < lowestTbin ):\n lowestTbin = tbin\n dirString = time.strftime(\"sam15min/%Y/%m/%d\",\n time.gmtime( tbin * 900 ))\n if dirString not in dirList:\n dirList.append( dirString )\n for dirDay in range(max( startTmpArea, lowestTbin - tisDay ),\n limitLocalTmpArea, tisDay):\n dirList.append( time.strftime(\"sam15min/%Y/%m/%d.tmp\",\n time.gmtime( dirDay )) )\n #\n if ( len(t1bins) > 0 ):\n logging.log(15, \" 1 hour time bins %d (%s), ..., %d (%s)\" %\n (t1bins[0], time.strftime(\"%Y-%b-%d %H:%M\",\n time.gmtime(t1bins[0]*3600)),\n t1bins[-1], time.strftime(\"%Y-%b-%d %H:%M\",\n time.gmtime((t1bins[-1]*3600)+3599))))\n lowestTbin = now\n for tbin in t1bins:\n if ( tbin < lowestTbin ):\n lowestTbin = tbin\n dirString = time.strftime(\"sam1hour/%Y/%m/%d\",\n time.gmtime( tbin * 3600 ))\n if dirString not in dirList:\n dirList.append( dirString )\n for dirDay in range(max( startTmpArea, lowestTbin - tisDay ),\n limitLocalTmpArea, tisDay):\n dirList.append( time.strftime(\"sam1hour/%Y/%m/%d.tmp\",\n time.gmtime( dirDay )) )\n if ( len(t6bins) > 0 ):\n logging.log(15, \" 6 hour time bins %d (%s), ..., %d (%s)\" %\n (t6bins[0], time.strftime(\"%Y-%b-%d %H:%M\",\n time.gmtime(t6bins[0]*21600)),\n t6bins[-1], time.strftime(\"%Y-%b-%d %H:%M\",\n time.gmtime((t6bins[-1]*21600)+21599))))\n lowestTbin = now\n for tbin in t6bins:\n if ( tbin < lowestTbin ):\n lowestTbin = tbin\n dirString = time.strftime(\"sam6hour/%Y/%m/%d\",\n time.gmtime( tbin * 21600 ))\n if dirString not in dirList:\n dirList.append( dirString )\n for dirDay in range(max( startTmpArea, lowestTbin - tisDay ),\n limitLocalTmpArea, tisDay):\n dirList.append( time.strftime(\"sam6hour/%Y/%m/%d.tmp\",\n time.gmtime( dirDay )) )\n if ( len(t24bins) > 0 ):\n logging.log(15, \" 1 day time bins %d (%s), ..., %d (%s)\" %\n (t24bins[0], time.strftime(\"%Y-%b-%d %H:%M\",\n time.gmtime(t24bins[0]*86400)),\n t24bins[-1], time.strftime(\"%Y-%b-%d %H:%M\",\n time.gmtime((t24bins[-1]*86400)+86399))))\n lowestTbin = now\n for tbin in t24bins:\n if ( tbin < lowestTbin ):\n lowestTbin = tbin\n dirString = time.strftime(\"sam1day/%Y/%m/%d\",\n time.gmtime( tbin * 86400 ))\n if dirString not in dirList:\n dirList.append( dirString )\n for dirDay in range(max( startTmpArea, lowestTbin - tisDay ),\n limitLocalTmpArea, tisDay):\n dirList.append( time.strftime(\"sam1day/%Y/%m/%d.tmp\",\n time.gmtime( dirDay )) )\n if ( len(dirList) == 0 ):\n return\n del(dirDay)\n\n tmpDict = {}\n try:\n with pydoop.hdfs.hdfs() as myHDFS:\n fileHndl = None\n fileObj = None\n fileName = None\n fileNames = None\n for subDir in dirList:\n logging.debug(\" checking HDFS directory %s\" % subDir)\n if not myHDFS.exists( PATH_HDFS_PREFIX + subDir ):\n continue\n # get list of files in directory:\n myList = myHDFS.list_directory( PATH_HDFS_PREFIX + subDir )\n fileNames = [ d['name'] for d in myList\n if (( d['kind'] == \"file\" ) and ( d['size'] != 0 )) ]\n del(myList)\n for fileName in fileNames:\n logging.debug(\" file %s\" % os.path.basename(fileName))\n fileHndl = None\n fileObj = None\n try:\n if ( os.path.splitext(fileName)[-1] == \".gz\" ):\n fileHndl = myHDFS.open_file(fileName)\n fileObj = gzip.GzipFile(fileobj=fileHndl)\n else:\n fileObj = myHDFS.open_file(fileName)\n # read documents and add relevant records to global:\n for myLine in fileObj:\n myJson = json.loads(myLine.decode('utf-8'))\n if (( 'metadata' not in myJson ) or\n ( 'data' not in myJson )):\n continue\n if ( \"monit_hdfs_path\" not in myJson['metadata'] ):\n if ( \"path\" in myJson['metadata'] ):\n myJson['metadata']['monit_hdfs_path'] = \\\n myJson['metadata']['path']\n else:\n continue\n if (( 'timestamp' not in myJson['metadata'] ) or\n ( 'kafka_timestamp' not in myJson['metadata'] ) or\n ( 'name' not in myJson['data'] ) or\n ( 'type' not in myJson['data'] ) or\n ( 'status' not in myJson['data'] )):\n continue\n #\n tis = int(myJson['metadata']['timestamp']/1000)\n if ( myJson['metadata']['monit_hdfs_path'] ==\n \"sam15min\" ):\n tbin = int( tis / 900 )\n if tbin not in t15bins:\n continue\n elif ( myJson['metadata']['monit_hdfs_path'] ==\n \"sam1hour\" ):\n tbin = int( tis / 3600 )\n if tbin not in t1bins:\n continue\n elif ( myJson['metadata']['monit_hdfs_path'] ==\n \"sam6hour\" ):\n tbin = int( tis / 21600 )\n if tbin not in t6bins:\n continue\n elif ( myJson['metadata']['monit_hdfs_path'] ==\n \"sam1day\" ):\n tbin = int( tis / 86400 )\n if tbin not in t24bins:\n continue\n else:\n continue\n #\n if 'availability' not in myJson['data']:\n myJson['data']['availability'] = None\n if 'reliability' not in myJson['data']:\n myJson['data']['reliability'] = None\n if 'detail' not in myJson['data']:\n myJson['data']['detail'] = None\n #\n version = myJson['metadata']['kafka_timestamp']\n #\n key = ( myJson['metadata']['monit_hdfs_path'],\n tbin,\n myJson['data']['name'],\n myJson['data']['type'] )\n val = { 'v': version,\n 'd': myJson['data'] }\n if key in tmpDict:\n if ( version <= tmpDict[key]['v'] ):\n continue\n #\n tmpDict[key] = val\n\n except json.decoder.JSONDecodeError as excptn:\n logging.error(\"JSON decoding failure, file %s: %s\" %\n (fileName, str(excptn)))\n except FileNotFoundError as excptn:\n logging.error(\"HDFS file not found, %s: %s\" %\n (fileName, str(excptn)))\n except IOError as excptn:\n logging.error(\"HDFS access failure, file %s: %s\" %\n (fileName, str(excptn)))\n finally:\n if fileObj is not None:\n fileObj.close()\n if fileHndl is not None:\n fileHndl.close()\n del(fileHndl)\n del(fileObj)\n del(fileName)\n del(fileNames)\n except:\n logging.error(\"Failed to fetch CMS SAM metric docs from MonIT HDFS\")\n\n # convert temporary dictionary into global dictionary of arrays:\n for longKey in tmpDict:\n shortKey = ( longKey[0], longKey[1] )\n if shortKey not in evsam_glbl_monitdocs:\n evsam_glbl_monitdocs[shortKey] = []\n evsam_glbl_monitdocs[shortKey].append( tmpDict[longKey]['d'] )\n logging.log(9, \" adding %s (%d) of %s / %s\" %\n (longKey[0], longKey[1], longKey[2], longKey[3]))\n #\n logging.info(\" found %d relevant CMS SAM metric docs in MonIT\" %\n len(tmpDict))\n del(tmpDict)\n #\n return", "title": "" }, { "docid": "f2b2261d6a0e057030b0f391b6d38fd1", "score": "0.4584066", "text": "def upload_directories(self, dirs, path, osl):\n raise NotImplementedError", "title": "" }, { "docid": "3924e112b2ed94ccc53ea96d63efb947", "score": "0.45811415", "text": "def evsam_monit_downtime(startTIS, limitTIS):\n # ################################################################## #\n # fill evsam_glbl_down with CMS downtime information from MonIT/HDFS #\n # ################################################################## #\n global evsam_glbl_downtimes\n PATH_HDFS_PREFIX = \"/project/monitoring/archive/cmssst/raw/ssbmetric/\"\n\n # prepare HDFS subdirectory list:\n # ===============================\n logging.info(\"Retrieving CMS downtime docs from MonIT HDFS\")\n logging.log(15, \" starting %s, limit %s\" %\n (time.strftime(\"%Y-%b-%d %H:%M\", time.gmtime(startTIS)),\n time.strftime(\"%Y-%b-%d %H:%M\", time.gmtime(limitTIS))))\n #\n tisDay = 24*60*60\n ts = time.gmtime( startTIS )\n startTISmidnight = calendar.timegm( ts[:3] + (0, 0, 0) + ts[6:] )\n now = int( time.time() )\n startTmpArea = max( calendar.timegm( time.gmtime( now - (6 * tisDay) ) ),\n startTIS - tisDay)\n limitLocalTmpArea = calendar.timegm( time.localtime( now ) ) + tisDay\n #\n dirList = []\n for dirDay in range(startTISmidnight, limitTIS, tisDay):\n dirList.append( time.strftime(\"down15min/%Y/%m/%d\",\n time.gmtime( dirDay )) )\n for dirDay in range(startTmpArea, limitLocalTmpArea, tisDay):\n dirList.append( time.strftime(\"down15min/%Y/%m/%d.tmp\",\n time.gmtime( dirDay )) )\n del(dirDay)\n\n tmpDict = {}\n try:\n with pydoop.hdfs.hdfs() as myHDFS:\n fileHndl = None\n fileObj = None\n fileName = None\n fileNames = None\n for subDir in dirList:\n logging.debug(\" checking HDFS directory %s\" % subDir)\n if not myHDFS.exists( PATH_HDFS_PREFIX + subDir ):\n continue\n # get list of files in directory:\n myList = myHDFS.list_directory( PATH_HDFS_PREFIX + subDir )\n fileNames = [ d['name'] for d in myList\n if (( d['kind'] == \"file\" ) and ( d['size'] != 0 )) ]\n del(myList)\n for fileName in fileNames:\n logging.debug(\" file %s\" % os.path.basename(fileName))\n try:\n if ( os.path.splitext(fileName)[-1] == \".gz\" ):\n fileHndl = myHDFS.open_file(fileName)\n fileObj = gzip.GzipFile(fileobj=fileHndl)\n else:\n fileHndl = None\n fileObj = myHDFS.open_file(fileName)\n # read documents and add relevant records to list:\n for myLine in fileObj:\n myJson = json.loads(myLine.decode('utf-8'))\n if (( 'metadata' not in myJson ) or\n ( 'data' not in myJson )):\n continue\n if ( \"monit_hdfs_path\" not in myJson['metadata'] ):\n if ( \"path\" in myJson['metadata'] ):\n myJson['metadata']['monit_hdfs_path'] = \\\n myJson['metadata']['path']\n else:\n continue\n if (( 'timestamp' not in myJson['metadata'] ) or\n ( 'kafka_timestamp' not in myJson['metadata'] ) or\n ( 'name' not in myJson['data'] ) or\n ( 'type' not in myJson['data'] ) or\n ( 'status' not in myJson['data'] ) or\n ( 'duration' not in myJson['data'] )):\n continue\n #\n if ( myJson['metadata']['monit_hdfs_path'] !=\n \"down15min\" ):\n continue\n #\n tis = int(myJson['metadata']['timestamp']/1000)\n if ( tis < startTISmidnight ):\n continue\n if ( tis >= limitTIS ):\n continue\n #\n t15bin = int( tis / 900 )\n if t15bin not in tmpDict:\n tmpDict[ t15bin ] = []\n tmpDict[ t15bin ].append(\n { 'v': myJson['metadata']['kafka_timestamp'],\n 'd': myJson['data'] } )\n\n except json.decoder.JSONDecodeError as excptn:\n logging.error(\"JSON decoding failure, file %s: %s\" %\n (fileName, str(excptn)))\n except FileNotFoundError as excptn:\n logging.error(\"HDFS file not found, %s: %s\" %\n (fileName, str(excptn)))\n except IOError as excptn:\n logging.error(\"HDFS access failure, file %s: %s\" %\n (fileName, str(excptn)))\n finally:\n if fileObj is not None:\n fileObj.close()\n fileObj = None\n if fileHndl is not None:\n fileHndl.close()\n fileHndl = None\n del(fileHndl)\n del(fileObj)\n del(fileName)\n del(fileNames)\n except:\n logging.error(\"Failed to fetch CMS downtime docs from MonIT HDFS\")\n\n\n # convert temp into global dictionary and filter out superseded versions:\n cnt = 0\n evsam_glbl_downtimes = {}\n for t15bin in tmpDict:\n # find latest MonIT document version:\n newest = 0\n for entry in tmpDict[ t15bin ]:\n if ( entry['v'] > newest ):\n newest = entry['v']\n #\n evsam_glbl_downtimes[ t15bin ] = []\n #\n # fill downtime entries of latest MonIT document version:\n for entry in tmpDict[ t15bin ]:\n # allow 5 min for MonIT importer processing\n if ( (newest - entry['v']) <= 300000 ):\n # add category in case of known service:\n if entry['d']['type'] in evsam_glbl_types:\n entry['d']['ctgry'] = evsam_glbl_types[ entry['d']['type'] ]\n else:\n entry['d']['ctgry'] = \"\"\n evsam_glbl_downtimes[ t15bin ].append( entry['d'] )\n cnt += 1\n logging.log(9, \" adding %d %s of %s / %s\" %\n (t15bin, entry['d']['status'],\n entry['d']['name'], entry['d']['ctgry']))\n del(tmpDict)\n #\n logging.info(\" found %d CMS site downtimes in %d timebins in MonIT\" %\n (cnt, len(evsam_glbl_downtimes)))\n #\n return", "title": "" }, { "docid": "3d8ecc5fd0a621ec243bf7e1091d5fbc", "score": "0.45780843", "text": "def create_parallel_dirs(root, number, prefix, *subdirs):\n for n in range(number):\n indexed_dir = os.path.join(root, f\"{prefix}{n}\")\n if not os.path.exists(indexed_dir):\n os.makedirs(indexed_dir)\n for name in subdirs:\n subdir_path = os.path.join(root, f\"{prefix}{n}\", name)\n if not os.path.exists(subdir_path):\n os.makedirs(subdir_path)", "title": "" }, { "docid": "6b25e21cf80d6460b0724c5ebb17a2f3", "score": "0.45734447", "text": "def checkHistoryFiles(tseries, dout_s_root, case, rstart_year, rstop_year, comp, suffix, filep, subdir):\n if tseries.upper() in ['T','TRUE'] :\n htype = 'series'\n else :\n htype = 'slice'\n\n # make sure subdir does not include a trailing \"/\"\n if subdir.endswith('/'):\n subdir = subdir[:-1]\n in_dir = '{0}/{1}/{2}'.format(dout_s_root, comp, subdir)\n\n # check the in_dir directory exists \n if not os.path.isdir(in_dir):\n err_msg = 'ERROR: diagUtilsLib.checkHistoryFiles {0} directory is not available.'.format(in_dir)\n raise OSError(err_msg)\n\n # get the file paths and formats - TO DO may need to get this from namelist var or env_archive\n files = '{0}.{1}'.format(case, suffix)\n fformat = '{0}/{1}*'.format(in_dir, files)\n \n if htype == 'slice':\n # get the first and last years from the first and last monthly history files\n allHfiles = sorted(glob.glob(fformat))\n if len(allHfiles) > 0:\n pattern = re.compile(filep)\n hfiles = filter_pick(allHfiles, pattern)\n\n if hfiles:\n # the first element of the hfiles list has the start year\n tlist = hfiles[0].split('.')\n slist = tlist[-2].split('-')\n hfstart_year = slist[0]\n hfstart_month = slist[1]\n\n # the last element of the hfiles list has the stop year\n tlist = hfiles[-1].split('.')\n slist = tlist[-2].split('-')\n hfstop_year = slist[0]\n hfstop_month = slist[1]\n else:\n print('ERROR diagUtilsLib.checkHistoryFiles: No history time slice files found matching pattern = {0}'.format(pattern))\n sys.exit(1)\n else:\n print('ERROR diagUtilsLib.checkHistoryFiles: No history time slice files found matching format {0}'.format(fformat))\n sys.exit(1)\n\n elif htype == 'series':\n hfiles = sorted(glob.glob(fformat))\n # the first variable time series file has the stop and start years\n if len(hfiles) > 0:\n tlist = hfiles[0].split('.')\n slist = tlist[-2].split('-')\n hfstart_year = slist[0][:4]\n hfstart_month = slist[0][4:6]\n hfstop_year = slist[1][:4]\n hfstop_month = slist[1][4:6]\n\n if not check_series_years(hfstart_year, hfstart_month, hfstop_year, hfstop_month, hfiles[0]):\n print('ERROR: diagUtilsLib.checkHistoryFiles Time series filename does not match file time slice count.')\n sys.exit(1)\n else:\n print('ERROR diagUtilsLib.checkHistoryFiles: No history time series files found matching format {0}'.format(fformat))\n sys.exit(1)\n\n # check if the XML YEAR0 and YEAR1 are within the actual start_year and stop_year bounds \n # defined by the actual history files\n start_year, stop_year = checkXMLyears(hfstart_year, hfstop_year, rstart_year, rstop_year)\n\n return (start_year, stop_year, in_dir, htype, hfiles[0])", "title": "" }, { "docid": "38e2892b8ff075b663121d43411fdc7c", "score": "0.45717627", "text": "def ecmwf_get_valid_forecast_folder_list(main_watershed_forecast_folder, file_extension): \n directories = sorted([d for d in os.listdir(main_watershed_forecast_folder) \\\n if os.path.isdir(os.path.join(main_watershed_forecast_folder, d))],\n reverse=True)\n output_directories = []\n directory_count = 0\n for directory in directories:\n date = datetime.datetime.strptime(directory.split(\".\")[0],\"%Y%m%d\")\n hour = int(directory.split(\".\")[-1])/100\n path_to_files = os.path.join(main_watershed_forecast_folder, directory)\n if os.path.exists(path_to_files):\n basin_files = glob(os.path.join(path_to_files,\"*{0}\".format(file_extension)))\n #only add directory to the list if valid \n if len(basin_files) >0:\n output_directories.append({\n 'id' : directory, \n 'text' : str(date + datetime.timedelta(hours=int(hour)))\n })\n directory_count += 1\n #limit number of directories\n if(directory_count>64):\n break \n return output_directories", "title": "" }, { "docid": "7040f4ac66eede3585f13bed0196b5ce", "score": "0.456156", "text": "def make_paths(dirpath):\n if not os.path.exists(dirpath):\n os.makedirs(dirpath, exist_ok=True)\n\n return", "title": "" }, { "docid": "293cc049a1b4d2c92b1802845a2334b1", "score": "0.45533603", "text": "def _assign_sub_dirs(self, sub_dirs=tuple('all')):\n # Get the list of raw sub dir names\n if sub_dirs[0] == 'all':\n self.raw_sub_dir_names = os.listdir(self.parent_dir)\n else:\n self.raw_sub_dir_names = sub_dirs\n # Make label to map raw sub dir names to numeric values\n self.label_map = self._make_label_map()\n\n # Get the full path of the raw sub dirs\n filtered_sub_dir = filter(self._accepted_dir_name, self.raw_sub_dir_names)\n self.sub_dirs = map(self._path_relative_to_parent, filtered_sub_dir)", "title": "" }, { "docid": "bc5a23dc1817192fa48ba36631cffecb", "score": "0.4540649", "text": "def prepare_local_paths(path_list):\n fails = 0\n for path in path_list:\n if not create_local_path(path):\n fails += 1\n return fails", "title": "" }, { "docid": "ee12967287ac87df4aec1b8beb0608a9", "score": "0.4535878", "text": "def get_htdocs_dirs(self):\r\n return [('worktime', resource_filename(__name__, 'htdocs'))]", "title": "" }, { "docid": "f866f9d2ff7af5ec69e0691fac8d6609", "score": "0.45345265", "text": "def check_log_files(self, dt_from, dt_to):\n if self.mobile_proxy:\n result = []\n dt_time = dt_from\n while dt_time <= dt_to:\n file_path = self.check_log_file(dt_time)\n dt_time = dt_time + timedelta(hours=1)\n if file_path:\n result.append(file_path)\n return result\n else:\n params = []\n dt_temp = dt_from\n while dt_temp <= dt_to:\n params.append(dt_temp)\n dt_temp = dt_temp + timedelta(hours=1)\n result = utils.parallel_map(self.check_log_file, params=params)\n log_files = []\n for file_path in result:\n if file_path:\n log_files.append(file_path)\n log_files.sort()\n return log_files", "title": "" }, { "docid": "b4c68cfaf87f57f9a251bb975e31509e", "score": "0.45330453", "text": "def make_sub_directories(self):\n\n dbDir = self.getDbFilePath()\n if not os.path.exists(dbDir):\n os.makedirs(dbDir)\n imgDir = self.getImageFilePath()\n if not os.path.exists(imgDir):\n os.makedirs(imgDir)\n svgDir = self.getSvgFilePath()\n if not os.path.exists(svgDir):\n os.makedirs(svgDir)\n outputDir = self.getOutputPath()\n if not os.path.exists(outputDir):\n os.makedirs(outputDir)\n tempDir = self.getTempPath()\n if not os.path.exists(tempDir):\n os.makedirs(tempDir)\n drawingPath = self.getDrawingFilePath()\n if not os.path.exists(drawingPath):\n os.makedirs(drawingPath)\n trainingPath = self.getTrainingFilePath()\n if not os.path.exists(trainingPath):\n os.makedirs(trainingPath)\n trainingSymbolPath = self.getTrainingSymbolFilePath()\n if not os.path.exists(trainingSymbolPath):\n os.makedirs(trainingSymbolPath)\n\n path = os.path.join(tempDir, 'Tile')\n if not os.path.exists(path):\n os.makedirs(path)\n path = os.path.join(drawingPath, 'Native')\n if not os.path.exists(path):\n os.makedirs(path)\n\n # create folder and copy data sheet files\n path = self.get_data_sheet_path()\n if not os.path.exists(path):\n os.makedirs(path)\n\n source_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Datasheets')\n data_sheet_files = [f for f in os.listdir(source_path) if os.path.isfile(os.path.join(source_path, f))\n and (os.path.splitext(f)[1].upper() == '.XLSX')]\n for data_sheet in data_sheet_files:\n copyfile(os.path.join(source_path, data_sheet), os.path.join(path, data_sheet))", "title": "" }, { "docid": "38640ac9d732b81af1915847cd6119e1", "score": "0.45307568", "text": "def paths_to_use(self):\n \n pass", "title": "" }, { "docid": "50449d6f2bacdcc68b4ca0d145017402", "score": "0.45224035", "text": "def rangeOfSeries(requestContext, *seriesLists):\r\n (seriesList,start,end,step) = normalize(seriesLists)\r\n name = \"rangeOfSeries(%s)\" % ','.join(set([s.pathExpression for s in seriesList]))\r\n values = ( safeSubtract(max(row), min(row)) for row in izip(*seriesList) )\r\n series = TimeSeries(name,start,end,step,values)\r\n series.pathExpression = name\r\n return [series]", "title": "" }, { "docid": "a163e5bc48a96b3f2ab53205de07690b", "score": "0.45101568", "text": "def get_gtfs_dir(self, gtfs_period):\n\n gtfs_df = pd.read_csv('data/gtfs_lookup.csv')\n gtfs_series = gtfs_df.iloc[gtfs_period]\n\n self.gtfs_directory = gtfs_series['directory']", "title": "" }, { "docid": "a163e5bc48a96b3f2ab53205de07690b", "score": "0.45101568", "text": "def get_gtfs_dir(self, gtfs_period):\n\n gtfs_df = pd.read_csv('data/gtfs_lookup.csv')\n gtfs_series = gtfs_df.iloc[gtfs_period]\n\n self.gtfs_directory = gtfs_series['directory']", "title": "" }, { "docid": "d08b60e31b27bef6742f09d004e5748a", "score": "0.44769013", "text": "def get_files(path):\n ignore_dates = []\n existings = base_service.BaseService.get_existing_ingests(\"Clickstream\")\n for existing in existings:\n if existing[2] == 'file':\n pathvars = existing[3].split('/')\n ignore_dates.append(pathvars[len(pathvars)-2] + \"/\" + pathvars[len(pathvars)-1])\n\n required_files = []\n main_path = os.path.realpath(os.path.join(path, 'clickstream_logs', 'latest'))\n\n # Changed for new clickstream format\n #for subdir in os.listdir(main_path):\n # if os.path.isdir(os.path.join(main_path, subdir)):\n for filename in os.listdir(main_path):\n extension = os.path.splitext(filename)[1]\n if extension == '.log':\n pathvars = os.path.join(main_path, filename).split('/')\n ignore_check = pathvars[len(pathvars)-2] + \"/\" + pathvars[len(pathvars)-1]\n if ignore_check not in ignore_dates:\n required_files.append(os.path.join(main_path, filename))\n else:\n pass\n #print \"IGNORING \"+ignore_check\n maxdates = {}\n for required_file in required_files:\n dirname = os.path.dirname(required_file)\n filename = os.path.basename(required_file)\n filetime = filenametodate(filename)\n if dirname not in maxdates:\n maxdates[dirname] = filetime\n if filetime > maxdates[dirname]:\n maxdates[dirname] = filetime\n for i in reversed(xrange(len(required_files))):\n dirname = os.path.dirname(required_files[i])\n filename = os.path.basename(required_files[i])\n filetime = filenametodate(filename)\n if maxdates[dirname] == filetime:\n del required_files[i]\n pass\n return required_files", "title": "" }, { "docid": "1ccf94f8dddfd0897baf3f2433fcec54", "score": "0.44765204", "text": "def _init_dirs(self):\n self._conf_dir = os.path.join(self._root_dir, 'conf')\n self._data_dir = os.path.join(self._root_dir, 'data')\n self._log_dir = os.path.join(self._root_dir, 'logs')\n self._zk_dir = os.path.join(self._root_dir, 'zk')\n os.makedirs(self._conf_dir)\n os.makedirs(self._data_dir)\n os.makedirs(self._log_dir)\n os.makedirs(self._zk_dir)\n # Needs to exist so that testinstances doesn't break\n open(self.logfile, 'w').close()", "title": "" }, { "docid": "158876887cefdb8ed7edd745784f206e", "score": "0.44746765", "text": "def get_existing_paths(path_pos, path_neither, path_neg):\n key_to_path = {}\n for d in [path_pos, path_neither, path_neg]:\n if not os.path.exists(d):\n os.makedirs(d)\n key_to_path.update([(os.path.splitext(\n os.path.basename(f))[0], d) for f in glob.glob('%s/*' % d)])\n return key_to_path", "title": "" }, { "docid": "e14e4f6b61e7512e707c71c77072f88e", "score": "0.44727835", "text": "def data_folder(query_list,gbk_list,time_var):\n\tmy_current_directory = os.getcwd()\n\t# Moving files to Data\n\tfor gbk in gbk_list:\n\t\tgbk_base=basename(normpath(gbk))\n\t\tshutil.copyfile(str(gbk), my_current_directory+\"/Data_\"+time_var+\"/\"+gbk_base)\n\tfor n_query in range(len(query_list)):\n\t\tquery_base=basename(normpath(query_list[0]))\n\t\tshutil.copyfile(str(query_list[0]), my_current_directory+\"/Data_\"+time_var+\"/\"+query_base)\n\t# Moving gbk_multi.fasta to Results as it is required for all the queries\n\tshutil.move(my_current_directory+\"/gbk_multi.fasta\", my_current_directory+\"/Results_\"+time_var+\"/gbk_multi.fasta\")\n\treturn", "title": "" }, { "docid": "9f4d37df5a5d4a986f01db0ba4d82bbb", "score": "0.44727424", "text": "def harvester(config, output_path, grids_to_use=[]):\n\n # =====================================================\n # Read harvester_config.yaml and setup variables\n # =====================================================\n dataset_name = config['ds_name']\n start_time = config['start']\n end_time = config['end']\n data_time_scale = config['data_time_scale']\n host = config['host']\n ddir = config['ddir']\n\n if end_time == 'NOW':\n end_time = datetime.utcnow().strftime(\"%Y%m%dT%H:%M:%SZ\")\n\n target_dir = f'{output_path}/{dataset_name}/harvested_granules/'\n\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n\n time_format = \"%Y-%m-%dT%H:%M:%SZ\"\n entries_for_solr = []\n last_success_item = {}\n granule_dates = []\n chk_time = datetime.utcnow().strftime(time_format)\n now = datetime.utcnow()\n updating = False\n\n solr_utils.clean_solr(config, grids_to_use)\n print(f'Downloading {dataset_name} files to {target_dir}\\n')\n\n # =====================================================\n # Pull existing entries from Solr\n # =====================================================\n docs = {}\n descendants_docs = {}\n\n # Query for existing harvested docs\n fq = ['type_s:granule', f'dataset_s:{dataset_name}']\n query_docs = solr_utils.solr_query(fq)\n\n if len(query_docs) > 0:\n for doc in query_docs:\n docs[doc['filename_s']] = doc\n\n fq = ['type_s:dataset', f'dataset_s:{dataset_name}']\n query_docs = solr_utils.solr_query(fq)\n\n # Query for existing descendants docs\n fq = ['type_s:descendants', f'dataset_s:{dataset_name}']\n existing_descendants_docs = solr_utils.solr_query(fq)\n\n if len(existing_descendants_docs) > 0:\n for doc in existing_descendants_docs:\n if doc['hemisphere_s']:\n key = (doc['date_s'], doc['hemisphere_s'])\n else:\n key = doc['date_s']\n descendants_docs[key] = doc\n\n # =====================================================\n # Setup NSIDC loop variables\n # =====================================================\n try:\n ftp = FTP(host)\n ftp.login(config['user'])\n except Exception as e:\n log.exception(f'Harvesting failed. Unable to connect to FTP. {e}')\n return 'Harvesting failed. Unable to connect to FTP.'\n\n start_time_dt = datetime.strptime(start_time, \"%Y%m%dT%H:%M:%SZ\")\n end_time_dt = datetime.strptime(end_time, \"%Y%m%dT%H:%M:%SZ\")\n\n start_year = start_time[:4]\n end_year = end_time[:4]\n years = np.arange(int(start_year), int(end_year) + 1)\n\n # =====================================================\n # NSIDC loop\n # =====================================================\n # Iterate through hemispheres given in config\n for region in config['regions']:\n hemi = 'nh' if region == 'north' else 'sh'\n\n for year in years:\n\n ftp_dir = f'{ddir}{region}/{data_time_scale}/{year}/'\n\n try:\n files = []\n ftp.dir(ftp_dir, files.append)\n\n # Ignore first two FTP entries as they aren't related to data\n files = files[2:]\n # Extract file names from FTP directory\n files = [e.split()[-1] for e in files]\n\n if not files:\n print(f'No granules found for region {region} in {year}.')\n except:\n log.exception(\n f'Error finding files at {ftp_dir}. Check harvester config.')\n print(\n f'Error finding files at {ftp_dir}. Check harvester config.')\n\n for newfile in files:\n try:\n if not any(extension in newfile for extension in ['.nc', '.bz2', '.gz']):\n continue\n\n url = f'{ftp_dir}{newfile}'\n\n # Extract the date from the filename\n date = getdate(config['regex'], newfile)\n date_time = datetime.strptime(date, \"%Y%m%d\")\n new_date_format = f'{date[:4]}-{date[4:6]}-{date[6:]}T00:00:00Z'\n\n # Ignore granules with start time less than wanted start time\n # List of files contains ALL files on FTP dir from that year\n if (start_time_dt > date_time) or (end_time_dt < date_time):\n continue\n\n granule_dates.append(datetime.strptime(\n new_date_format, config['date_regex']))\n\n # Granule metadata used for Solr harvested entries\n item = {}\n item['type_s'] = 'granule'\n item['date_s'] = new_date_format\n item['dataset_s'] = dataset_name\n item['hemisphere_s'] = hemi\n item['filename_s'] = newfile\n item['source_s'] = f'ftp://{host}/{url}'\n\n # Granule metadata used for initializing Solr descendants entries\n descendants_item = {}\n descendants_item['type_s'] = 'descendants'\n descendants_item['date_s'] = item[\"date_s\"]\n descendants_item['dataset_s'] = item['dataset_s']\n descendants_item['hemisphere_s'] = hemi\n descendants_item['filename_s'] = newfile\n descendants_item['source_s'] = item['source_s']\n\n updating = False\n\n # Attempt to get last modified time of file\n try:\n mod_time = ftp.voidcmd(\"MDTM \"+url)[4:]\n mod_date_time = parser.parse(mod_time)\n mod_time = mod_date_time.strftime(time_format)\n item['modified_time_dt'] = mod_time\n except:\n mod_date_time = now\n\n # If granule doesn't exist or previously failed or has been updated since last harvest\n updating = (not newfile in docs.keys()) or \\\n (not docs[newfile]['harvest_success_b']) or \\\n (datetime.strptime(\n docs[newfile]['download_time_dt'], time_format) <= mod_date_time)\n\n # If updating, download file if necessary\n if updating:\n year = date[:4]\n local_fp = f'{target_dir}{year}/{newfile}'\n\n if not os.path.exists(f'{target_dir}{year}/'):\n os.makedirs(f'{target_dir}{year}/')\n\n # If file doesn't exist locally, download it\n if not os.path.exists(local_fp):\n print(f' - Downloading {newfile} to {local_fp}')\n try:\n with open(local_fp, 'wb') as f:\n ftp.retrbinary('RETR '+url, f.write)\n except:\n os.unlink(local_fp)\n\n # If file exists, but is out of date, download it\n elif datetime.fromtimestamp(os.path.getmtime(local_fp)) <= mod_date_time:\n print(\n f' - Updating {newfile} and downloading to {local_fp}')\n try:\n with open(local_fp, 'wb') as f:\n ftp.retrbinary('RETR '+url, f.write)\n except:\n os.unlink(local_fp)\n\n else:\n print(\n f' - {newfile} already downloaded and up to date')\n\n if newfile in docs.keys():\n item['id'] = docs[newfile]['id']\n\n # Create checksum for file\n item['checksum_s'] = file_utils.md5(local_fp)\n item['pre_transformation_file_path_s'] = local_fp\n item['harvest_success_b'] = True\n item['file_size_l'] = os.path.getsize(local_fp)\n\n else:\n print(\n f' - {newfile} already downloaded and up to date')\n\n except Exception as e:\n log.exception(e)\n print(f' - {e}')\n if updating:\n print(f' - {newfile} failed to download')\n\n item['harvest_success_b'] = False\n item['pre_transformation_file_path_s'] = ''\n item['file_size_l'] = 0\n\n if updating:\n item['download_time_dt'] = chk_time\n\n # Update Solr entry using id if it exists\n if hemi:\n key = (descendants_item['date_s'], hemi)\n else:\n key = descendants_item['date_s']\n\n if key in descendants_docs.keys():\n descendants_item['id'] = descendants_docs[key]['id']\n\n descendants_item['harvest_success_b'] = item['harvest_success_b']\n descendants_item['pre_transformation_file_path_s'] = item['pre_transformation_file_path_s']\n\n entries_for_solr.append(item)\n entries_for_solr.append(descendants_item)\n\n if item['harvest_success_b']:\n last_success_item = item\n\n print(f'\\nDownloading {dataset_name} complete\\n')\n\n ftp.quit()\n\n # Only update Solr harvested entries if there are fresh downloads\n if entries_for_solr:\n # Update Solr with downloaded granule metadata entries\n r = solr_utils.solr_update(entries_for_solr, r=True)\n\n if r.status_code == 200:\n print('Successfully created or updated Solr harvested documents')\n else:\n print('Failed to create Solr harvested documents')\n\n # Query for Solr failed harvest documents\n fq = ['type_s:granule',\n f'dataset_s:{dataset_name}', f'harvest_success_b:false']\n failed_harvesting = solr_utils.solr_query(fq)\n\n # Query for Solr successful harvest documents\n fq = ['type_s:granule',\n f'dataset_s:{dataset_name}', f'harvest_success_b:true']\n successful_harvesting = solr_utils.solr_query(fq)\n\n harvest_status = f'All granules successfully harvested'\n\n if not successful_harvesting:\n harvest_status = f'No usable granules harvested (either all failed or no data collected)'\n elif failed_harvesting:\n harvest_status = f'{len(failed_harvesting)} harvested granules failed'\n\n overall_start = min(granule_dates) if granule_dates else None\n overall_end = max(granule_dates) if granule_dates else None\n\n # Query for Solr Dataset-level Document\n fq = ['type_s:dataset', f'dataset_s:{dataset_name}']\n dataset_query = solr_utils.solr_query(fq)\n\n # If dataset entry exists on Solr\n update = (len(dataset_query) == 1)\n\n # =====================================================\n # Solr dataset entry\n # =====================================================\n if not update:\n # -----------------------------------------------------\n # Create Solr dataset entry\n # -----------------------------------------------------\n ds_meta = {}\n ds_meta['type_s'] = 'dataset'\n ds_meta['dataset_s'] = dataset_name\n ds_meta['short_name_s'] = config['original_dataset_short_name']\n ds_meta['source_s'] = f'ftp://{host}/{ddir}'\n ds_meta['data_time_scale_s'] = data_time_scale\n ds_meta['date_format_s'] = config['date_format']\n ds_meta['last_checked_dt'] = chk_time\n ds_meta['original_dataset_title_s'] = config['original_dataset_title']\n ds_meta['original_dataset_short_name_s'] = config['original_dataset_short_name']\n ds_meta['original_dataset_url_s'] = config['original_dataset_url']\n ds_meta['original_dataset_reference_s'] = config['original_dataset_reference']\n ds_meta['original_dataset_doi_s'] = config['original_dataset_doi']\n\n # Only include start_date and end_date if there was at least one successful download\n if overall_start != None:\n ds_meta['start_date_dt'] = overall_start.strftime(\n time_format)\n ds_meta['end_date_dt'] = overall_end.strftime(time_format)\n\n # Only include last_download_dt if there was at least one successful download\n if last_success_item:\n ds_meta['last_download_dt'] = last_success_item['download_time_dt']\n\n ds_meta['harvest_status_s'] = harvest_status\n\n # Update Solr with dataset metadata\n r = solr_utils.solr_update([ds_meta], r=True)\n\n if r.status_code == 200:\n print('Successfully created Solr dataset document')\n else:\n print('Failed to create Solr dataset document')\n\n # If the dataset entry needs to be created, so do the field entries\n\n # -----------------------------------------------------\n # Create Solr dataset field entries\n # -----------------------------------------------------\n\n # Query for Solr field documents\n fq = ['type_s:field', f'dataset_s:{dataset_name}']\n field_query = solr_utils.solr_query(fq)\n\n body = []\n for field in config['fields']:\n field_obj = {}\n field_obj['type_s'] = {'set': 'field'}\n field_obj['dataset_s'] = {'set': dataset_name}\n field_obj['name_s'] = {'set': field['name']}\n field_obj['long_name_s'] = {'set': field['long_name']}\n field_obj['standard_name_s'] = {'set': field['standard_name']}\n field_obj['units_s'] = {'set': field['units']}\n\n for solr_field in field_query:\n if field['name'] == solr_field['name_s']:\n field_obj['id'] = {'set': solr_field['id']}\n\n body.append(field_obj)\n\n # Update Solr with dataset fields metadata\n r = solr_utils.solr_update(body, r=True)\n\n if r.status_code == 200:\n print('Successfully created Solr field documents')\n else:\n print('Failed to create Solr field documents')\n\n # if dataset entry exists, update download time, converage start date, coverage end date\n else:\n # -----------------------------------------------------\n # Update Solr dataset entry\n # -----------------------------------------------------\n dataset_metadata = dataset_query[0]\n\n # Query for dates of all harvested docs\n fq = [f'dataset_s:{dataset_name}',\n 'type_s:granule', 'harvest_success_b:true']\n dates_query = solr_utils.solr_query(fq, fl='date_s')\n dates = [x['date_s'] for x in dates_query]\n\n # Build update document body\n update_doc = {}\n update_doc['id'] = dataset_metadata['id']\n update_doc['last_checked_dt'] = {\"set\": chk_time}\n if dates:\n update_doc['start_date_dt'] = {\"set\": min(dates)}\n update_doc['end_date_dt'] = {\"set\": max(dates)}\n\n if entries_for_solr:\n update_doc['harvest_status_s'] = {\"set\": harvest_status}\n\n if 'download_time_dt' in last_success_item.keys():\n update_doc['last_download_dt'] = {\n \"set\": last_success_item['download_time_dt']}\n\n # Update Solr with modified dataset entry\n r = solr_utils.solr_update([update_doc], r=True)\n\n if r.status_code == 200:\n print('Successfully updated Solr dataset document\\n')\n else:\n print('Failed to update Solr dataset document\\n')\n return harvest_status", "title": "" }, { "docid": "2cfb72a41961af88791ee57912e4ceaf", "score": "0.4463141", "text": "def populate(self, files):\n for file_parts in files:\n dir_name = os.path.join(self.tmpdir, *file_parts[:-1])\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n with open(os.path.join(dir_name, file_parts[-1]), 'w') as f:\n f.write('content')\n return [os.path.join(*parts) for parts in files]", "title": "" }, { "docid": "673d4eca6ffe0470848433f3613edf1e", "score": "0.44600013", "text": "def get_data(root,\n area_start=0, area_end=None, area_step=1,\n room_start=0, room_end=None, room_step=1):\n print(\"Loading Data...\")\n area_dfs = []\n areas = subfolders(root)\n if area_end is None:\n area_end = len(areas)\n for area in areas[area_start:area_end:area_step]:\n print(area)\n room_dfs = []\n rooms = subfolders(root + '/' + area)\n if room_end is None:\n room_end = len(rooms)\n for room in rooms[room_start:room_end:room_step]:\n print(room)\n this_room_dir = root + \"/\" + area + '/' + room\n annotations_dir, subdirs, annotated_files = next(os.walk(this_room_dir + \"/Annotations\"))\n annotated_files = [f for f in annotated_files if f[-4:] == \".txt\"]\n annots_df = []\n for file in annotated_files:\n df = pc_to_df(annotations_dir + \"/\" + file)\n df['annotation'] = re.sub(\"_.*\", \"\", file)\n annots_df.append(df)\n room_df = pd.concat(annots_df)\n room_df['room'] = room\n room_dfs.append(room_df)\n print(\"Joining Area...\")\n for room_df in room_dfs:\n room_df['area'] = area\n area_dfs.append(room_dfs)\n print(\"Done getting data!\")\n return area_dfs", "title": "" }, { "docid": "9c37919538416d17c57b33e490a11d2e", "score": "0.44500446", "text": "def _delete_and_create_dirs(cls, list_of_dir_names):\n for dir_name in list_of_dir_names:\n if os.path.exists(dir_name):\n shutil.rmtree(dir_name)\n os.makedirs(dir_name)", "title": "" }, { "docid": "a06ff1c3f4054a79d1c142b82d6b6c55", "score": "0.44499525", "text": "def create_paths(paths):\n for path in paths:\n try:\n os.mkdir(path)\n except OSError as err:\n if err.errno == errno.EEXIST: # file already exists\n logging.basicConfig(\n format='%(levelname)s: %(message)s', level=logging.INFO\n )\n logging.info(f'{path} already exists.')\n else:\n raise", "title": "" }, { "docid": "bc2769e74ae3b8694eb744cf3b7a496d", "score": "0.44476068", "text": "def locate_file_list(start_path: Text, name_end: Text) -> List:\n if os.path.isdir(start_path):\n start_dir_path = start_path\n else:\n raise FileNotFoundError(f\"invalid path: {start_path}\")\n\n # This will return a list of all file names\n file_list = []\n for fn in os.listdir(start_dir_path):\n if fn.endswith(name_end):\n file_list.append(os.path.join(start_path, fn))\n\n return file_list", "title": "" }, { "docid": "d91b97f254fa8ea46a7aac0173f38fe3", "score": "0.4446214", "text": "def create_directories_from_list(dir_list, path=\"\", with_text=False):\n _create_directories(dir_list, path, with_text)", "title": "" }, { "docid": "cce9fc494504d1cd6ebd934c4ae7ed95", "score": "0.4441981", "text": "def extract_fs_long_dir(id,timepoint):\n\n search = glob.glob(op.join(fs_dir,\"*\"+str(id)+\"*\"+str(timepoint+1)+\"*\"+\".long.*\"))\n\n try:\n return op.basename(search[0])\n except:\n return np.nan", "title": "" }, { "docid": "b120c0b165503df5a480e237f1f52ab3", "score": "0.44413844", "text": "def _create_directories_if_needed(self):\n\t\tpass", "title": "" }, { "docid": "4d786b621364cd21c0770fc63303d43b", "score": "0.44402686", "text": "def datasets_path(*paths):\n return resources_path(DATASETSFOLDER, *paths)", "title": "" }, { "docid": "e35eda59e797e57fadfc0cae39ed5318", "score": "0.4437746", "text": "def get_filespace_mappings(master_filespace_map, options, array):\n \n standby_filespace_map = []\n tmp_validator = lambda str, default, ignore1: str if str and str != '' else default\n \n first = True\n \n standby_on_same_host = (options.standby_host == array.master.getSegmentHostName())\n if (standby_on_same_host):\n logger.info('Standby is requested to be initialized on the same host as the master.')\n logger.info('Hence, use of default (or primary master) filespace location is not applicable.')\n \n fs_loc = None\n for item in master_filespace_map:\n # for pg_system on remote host, we use the same value without\n # asking users\n if (item[0] == 'pg_system' and (not standby_on_same_host)):\n fs_loc = item[1]\n else:\n if first:\n print \"\"\"\nThe filespace locations on the master must be mapped to\nlocations on the standby. These locations must be empty on the\nstandby master host. The default provided is the location of\nthe filespace on the master (except if the master and the\nstandby are hosted on the same node or host). In most cases the\ndefaults can be used.\n\"\"\"\n first = False\n\n while(True):\n fs_loc = ask_input(None,\n 'Enter standby filespace location for filespace %s (default: %s)'\n % (item[0], \"NA\" if (standby_on_same_host) else item[1]),\n '', item[1], tmp_validator, None)\n\n if ((os.path.abspath(fs_loc) == item[1]) and standby_on_same_host):\n print('ERROR - Use of default (or primary master) filespace location is not applicable.\\n')\n else:\n break\n \n fs_loc = os.path.abspath(fs_loc)\n standby_filespace_map.append([item[0], fs_loc])\n\n return standby_filespace_map", "title": "" }, { "docid": "b38e6196eee27ebbc4262c7f21c447e2", "score": "0.44355324", "text": "def get_filenames(setup):\n ###################################################\n # DEFAULT #\n ###################################################\n for key in _setup:\n if key not in setup.keys():\n setup[key] = _setup[key]\n\n ###################################################\n # GET CANDIDATES #\n ###################################################\n # ========== raw ================================ #\n lev_in = setup['lev_in']\n if lev_in == 'raw':\n io_sensor = init.get_io_sensor(setup)\n filenames = io_sensor.paths.get_filenames(setup)\n \n\n\n # ========== level >= 0 =============================== #\n else:\n time_beg = setup['time_beg']\n time_end = setup['time_end']\n \n # input check\n if time_beg is not None:\n assert isinstance(time_beg, dt.datetime)\n if time_end is not None:\n assert isinstance(time_end, dt.datetime)\n\n # path check\n print(lev_in)\n path = setup['path_base_in'] + '/%s' % lev_in # problem for calibration --> you need a lev_\n print(lev_in)\n if not os.path.isdir(path):\n raise IOError('Directory does not exist: %s' % path)\n\n # filename candidates\n dig = '[0-9]'\n pattern = path + '/%s/%s/%s/*.nc' % (4 * dig, 2 * dig, 2 * dig)\n filenames = sorted(glob.glob(pattern))\n\n ###################################################\n # FILTER CANDIDATES #\n ###################################################\n filenames = filter_files_for_time_beg(filenames, setup)\n filenames = filter_files_for_time_end(filenames, setup)\n\n return filenames", "title": "" }, { "docid": "7edec1d54399bd3ee77b3ead76cb8db9", "score": "0.4433967", "text": "def _create_dsets(self, dsets, locid, path):\n for d in dsets:\n # Record the full HDF5 path to the dataset...\n self._dset_path[d['id']] = pp.join(path, d['title'])\n # Generate dataset code...\n self._create_dset(d['id'], d['title'],\n self._d['datasets'][d['id']], locid)", "title": "" }, { "docid": "34dafd9ce87e7345c86ec6ea54c3a00a", "score": "0.44324467", "text": "def get_tasks(path_to_subdir):\n sweep_dir = 'lnrho_sweep'\n if os.path.exists(sweep_dir):\n # print('Removing old dir')\n # shutil.rmtree(sweep_dir)\n pass\n else:\n print('making dir')\n os.mkdir(sweep_dir)\n print('dir made.')\n\n tasks = []\n skipped = 0\n for sigma in [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4]:\n #for lnrho in [-9, -8, -7, -6, -5, -4, -3, -2]:\n for lnrho in [-7, -6, -5.5, -5, -4.75, -4.5, -4.25, -4, -3.5]:\n output_name = os.path.join(sweep_dir, 'cost_{}_{}.h5'.format(lnrho, sigma))\n if os.path.exists(output_name):\n # print('{} exists. Do not include in worklist.'.format(output_name))\n skipped += 1\n continue\n \n else:\n tasks += [(path_to_subdir, lnrho, sigma, output_name)]\n print('{} files found and skipped.'.format(skipped))\n return tasks", "title": "" }, { "docid": "324d11e4cfdafa5ce7259586fe480ea3", "score": "0.443131", "text": "def _create_directories(sub_dir_list, path=\"\", with_text=False):\n for sub_dir in sub_dir_list:\n dir = _create_path_from_param(sub_dir, path)\n try:\n if not os.path.exists(dir):\n os.makedirs(dir)\n if with_text:\n _create_text_file(sub_dir, dir)\n else:\n print(f\"Directory: {dir} already exists!\")\n except TypeError as e:\n print(e)\n raise", "title": "" }, { "docid": "7bf1337b25d8b050fbfc4079d948d4f7", "score": "0.44301504", "text": "def collect_standby_filespace_dir(array):\n\n dirty_paths = list()\n fs_dirs = array.standbyMaster.getSegmentFilespaces().values()\n for fs_dir in fs_dirs:\n if unix.FileDirExists.remote(name='check if dirty path left over',\n remote_host=array.standbyMaster.getSegmentHostName(),\n directory=fs_dir):\n dirty_paths.append(fs_dir)\n\n if len(dirty_paths) == 0:\n return\n\n save_dir = '/tmp'\n fs_file = os.path.join(save_dir, 'standby_filespaces_only.txt')\n\n logger.info('Saving standby filespace locations')\n\n with open(fs_file, 'w') as fw:\n for dirty_path in dirty_paths:\n fw.write(dirty_path+'\\n')\n\n cpCmd = unix.Scp('scp file to standby host',\n fs_file,\n fs_file,\n dstHost=array.standbyMaster.getSegmentHostName())\n cpCmd.run(validateAfter=True)\n\n if (array.standbyMaster.getSegmentHostName() not in\n [array.master.getSegmentHostName(), 'localhost', socket.gethostname()]):\n try:\n os.remove(fs_file)\n except:\n pass\n\n logger.info('List of standby filespace directories saved to %s on standby host' %\n fs_file)\n logger.warn('Please make cleanup for standby host only!')", "title": "" }, { "docid": "4261fc38f60210acf88be01d93dafd29", "score": "0.44280046", "text": "def main():\n START_DT = pd.datetime.now() - datetime.timedelta(hours=1)\n END_DT= pd.datetime.now() - datetime.timedelta(hours=1)\n\n service = SERVICE + \"data=all&tz=Etc/UTC&format=comma&latlon=yes&\"\n\n service += START_DT.strftime(\"year1=%Y&month1=%m&day1=%d&hour1=%H&\")\n service += END_DT.strftime(\"year2=%Y&month2=%m&day2=%d&hour2=%H&\")\n\n\n stations = get_stations_from_filelist(airports)\n \n \n for station in stations:\n uri = \"%s&station=%s\" % (service, station)\n data = download_data(uri)\n dbutils.fs.put(\"dbfs:/_METAR_13_/{}_{}_{}.csv\".format(station, START_DT.strftime(\"%Y_%m_%d_%H_%M_%S\"), END_DT.strftime(\"%Y_%m_%d_%H_%M_%S\")), data)", "title": "" }, { "docid": "de5b8d15fb7e3b5d093e12ae1a713ef6", "score": "0.44269443", "text": "def load_data_paths(dataset, \r\n pattern='/cam1/event[0-9]_tirf/*PreNbin*.tif',\r\n file_mnt=\"mnt/plabNAS/\"):\r\n\r\n files = []\r\n\r\n with open('data/curated_data.txt', 'rt') as f:\r\n for line in f:\r\n if line.lower().startswith(\"*{}\".format(dataset.lower())):\r\n for line in f:\r\n # the dataest is finished\r\n if line.lower().startswith('*'):\r\n break\r\n # empty line\r\n if not line.strip():\r\n continue\r\n else:\r\n files.append(re.sub(r'\\\\', r'/', '/' +file_mnt +line.strip()))\r\n \r\n files = [glob.glob(file+ pattern) for file in files]\r\n files = [item for sublist in files for item in sublist]\r\n\r\n return files", "title": "" }, { "docid": "4b4da1a9d6d12bbd3f38b3d5cbe8066a", "score": "0.44258708", "text": "def create_temp_paths(self, lock_dir):\n command = [\"/bin/mkdir {}\".format(self.source_tmp_dir)]\n result = self.run_command(self.source_host, command)\n self.parallel_checksum_source_path = os.path.join(self.source_tmp_dir, 'transferrer_source.md5sum')\n self.parallel_checksum_target_path = os.path.join(lock_dir, 'transferrer_target.md5sum')\n if result.returncode != 0:\n raise Exception('Creation of temporary directory failed at source {}:{}'.\n format(self.source_host, self.source_tmp_dir))", "title": "" }, { "docid": "8d32b8c389574e843e6e49139ef16f17", "score": "0.44239548", "text": "def create_edges(dirs):\n\n for subdir in dirs:\n source = subdir[-2:].upper()\n for file in os.listdir(subdir):\n with open(os.path.join(subdir, file)) as datafile:\n data = json.load(datafile)\n process_entities(data, source)", "title": "" }, { "docid": "083b95120d025b59d54eb450fcbe3eaa", "score": "0.44207847", "text": "def find_training_data(start_date,train_period,train_data_path):\n\n TF_file = []\n now_time = parse_date(start_date)\n delta_time = timedelta(train_period)\n\n for f_name in os.listdir(train_data_path):\n #if ('h5' in f_name or 'tfrecords' in f_name) and 'patch' not in f_name:\n # f_time_str = f_name.replace('train_data', '').replace('.h5', '').replace('.tfrecords', '')\n try:\n if ('h5' in f_name or 'tf' in f_name) and 'patch' not in f_name:\n f_time_str = f_name.replace('train_data_', '').replace('.h5', '').replace('.tf', '') \n f_time = datetime.strptime(f_time_str, '%Y-%m-%dT%H:%M:%S.%f')\n if now_time > f_time > now_time + delta_time:\n TF_file.append(train_data_path+f_name)\n except:\n logging.info('{0} file name not matching the format'.format(f_name))\n return TF_file", "title": "" }, { "docid": "9904a9ed4ec293123bb982409586d494", "score": "0.4420089", "text": "def get_dates(case: Optional[str] = None,\n path: Optional[str] = None) -> List[Tuple[float, float]]:\n if path is not None:\n pass\n elif case == 'minidata1':\n path = '/scratch2/spirou/drs-data/common/minidata1'\n else:\n path = '/scratch2/spirou/drs-data/common/minidata2'\n # get a list of directories\n directories = os.listdir(path)\n # storage for dates\n dates = []\n # loop around directories\n for directory in directories:\n # if we are not dealing with a directory continue\n if not os.path.isdir(directory):\n continue\n # print progress\n print(f'Processing {directory}')\n # get file list in sub-directory\n files = glob.glob(os.path.join(path, directory, '*.fits'))\n # start the min and max times as extremely high/low respectively\n mindate = np.inf\n maxdate = -np.inf\n # loop around files\n for filename in tqdm(files):\n # get the header of this file\n hdr = fits.getheader(filename)\n # if we have a minimum date change mindate\n if hdr['MJDATE'] < mindate:\n mindate = float(hdr['MJDATE']) - 1 / 24.0\n # if we have a maximum date change maxdate\n if hdr['MJDATE'] > maxdate:\n maxdate = float(hdr['MJDATE']) + 1 / 24.0\n # append the best found min and max time to dates\n dates.append((mindate, maxdate))\n # return dates for use in get_data()\n return dates", "title": "" }, { "docid": "745f52483ee2da14103c077c115b7031", "score": "0.4416476", "text": "def tstamps_for_daterange(self, startdate, enddate):\n file_list = []\n delta_all = enddate - startdate\n timestamps = []\n\n for i in range(delta_all.days + 1):\n timestamp = startdate + timedelta(days=i)\n\n files = self._search_files(\n timestamp, custom_templ=self.day_search_str)\n\n file_list.extend(sorted(files))\n\n for filename in file_list:\n timestamps.append(self._get_orbit_start_date(filename))\n\n timestamps = [dt for dt in timestamps if dt >= startdate and dt <= enddate]\n return timestamps", "title": "" }, { "docid": "51612028c62b76d117fb65f343667e0a", "score": "0.44163185", "text": "def create_paths(methods_list, base_path = '/home/wangyzh/'):\n paths = {}\n for method in methods_list:\n paths[method] = base_path + method\n return paths", "title": "" } ]
2fdd7fa013ed09db82187224e9f0dfd7
Cleans the string from problematic chars
[ { "docid": "70329abdd6d6f66c0d1ffdf75eb2ffab", "score": "0.0", "text": "def replace_char(text):\n\n for ch in ['/', '`', '*', '{', '}', '[', ']', '(', ')', '#', '+', '-', '.', '!', '\\$', ':', '|']:\n text = text.replace(ch, \"_\")\n return text", "title": "" } ]
[ { "docid": "8c6de6119fd3932290e8db0f3495cf46", "score": "0.7372472", "text": "def Clean(s):\n for c in BAD_CHARACTERS:\n s = s.replace(c, '_')\n return s", "title": "" }, { "docid": "a71c91b0a137d047cb307a2063bea342", "score": "0.7360314", "text": "def clean_string(s):\n c = re.sub(r'\\s+', ' ', re.sub(r'[^A-Za-z0-9 .:]', '', s))\n return c", "title": "" }, { "docid": "d125475ed0bb208e1a9d98c9c6a8e67e", "score": "0.73280174", "text": "def string_cleanup(s, garbage=\":,-()&\"):\n s_new = ''\n for x in s:\n if x not in garbage:\n s_new += x\n\n return s_new", "title": "" }, { "docid": "939a209480a816dba82ab17add7e3f20", "score": "0.7316219", "text": "def clean_str(self,string):\r\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\r\n string = re.sub(r\"\\'s\", \" \\'s\", string)\r\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\r\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\r\n string = re.sub(r\"\\'re\", \" \\'re\", string)\r\n string = re.sub(r\"\\'d\", \" \\'d\", string)\r\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\r\n string = re.sub(r\",\", \" , \", string)\r\n string = re.sub(r\"!\", \" ! \", string)\r\n string = re.sub(r\"\\(\", \" \\( \", string)\r\n string = re.sub(r\"\\)\", \" \\) \", string)\r\n string = re.sub(r\"\\?\", \" \\? \", string)\r\n string = re.sub(r\"\\s{2,}\", \" \", string)\r\n return string.strip().lower()", "title": "" }, { "docid": "39da874d93ae4b428bea0fb15ba5d6b2", "score": "0.7290518", "text": "def clean(str):\n str = str.replace(u\"“\",u\"``\")\n str = str.replace(u\"”\",u\"''\")\n str = str.replace(u' \"',u\" ``\")\n str = str.replace(u'\"',u\"''\")\n str = str.replace(u'fi',u\"fi\")\n str = str.replace(u'fl',u\"fl\")\n str = str.replace(u'’',u\"'\")\n str = str.replace(u'–',u\"---\")\n str = str.replace(u'&',u\"\\\\&\")\n str = str.replace(u'#',u\"\\\\#\")\n str = str.replace(u'_',u\"\\\\_\")\n \n return str", "title": "" }, { "docid": "4b4ccf22366a698ba34f3f1b987dbc5d", "score": "0.72840136", "text": "def clean_string(value):\n\treturn re.sub(r'[^a-zA-Z0-9_.]', '', str(value))", "title": "" }, { "docid": "05ad8bb7496dbb68cbd2ba65b637b628", "score": "0.7243641", "text": "def clean_str(string):\r\n\t\t\tstring = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\r\n\t\t\tstring = re.sub(r\"\\'s\", \" \\'s\", string)\r\n\t\t\tstring = re.sub(r\"\\'ve\", \" \\'ve\", string)\r\n\t\t\tstring = re.sub(r\"n\\'t\", \" n\\'t\", string)\r\n\t\t\tstring = re.sub(r\"\\'re\", \" \\'re\", string)\r\n\t\t\tstring = re.sub(r\"\\'d\", \" \\'d\", string)\r\n\t\t\tstring = re.sub(r\"\\'ll\", \" \\'ll\", string)\r\n\t\t\tstring = re.sub(r\",\", \" , \", string)\r\n\t\t\tstring = re.sub(r\"!\", \" ! \", string)\r\n\t\t\tstring = re.sub(r\"\\(\", \" \\( \", string)\r\n\t\t\tstring = re.sub(r\"\\)\", \" \\) \", string)\r\n\t\t\tstring = re.sub(r\"\\?\", \" \\? \", string)\r\n\t\t\tstring = re.sub(r\"\\s{2,}\", \" \", string)\r\n\t\t\treturn string.strip()", "title": "" }, { "docid": "4d2f9c1d5556cea992ef3d170cfb42cd", "score": "0.72356755", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip()", "title": "" }, { "docid": "5b5654dd16616eb90755e044f69c4c5e", "score": "0.72106624", "text": "def clean_str(string):\n\tstring = re.sub(r\"\\\"+\", \" \", string)\n\tstring = re.sub(r\"\\'+\", \" \", string)\n\tstring = re.sub(r\",+\", \",\", string)\n\tstring = re.sub(r\"\\.+\", \".\", string)\n\tstring = re.sub(r\"\\?+\", \"?\", string)\n\tstring = re.sub(r\"!+\", \"!\", string)\n\tstring = re.sub(r\",\", \" , \", string)\n\tstring = re.sub(r\"!\", \" ! \", string)\n\tstring = re.sub(r\"\\(\", \" ( \", string)\n\tstring = re.sub(r\"\\)\", \" ) \", string)\n\tstring = re.sub(r\"\\?\", \" ? \", string)\n\tstring = re.sub(r\"\\s{2,}\", \" \", string)\n\treturn string.strip().lower()", "title": "" }, { "docid": "71120b104e5f2eb6d697ff48337af967", "score": "0.7182881", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "fc148840d79006adc289c7346b7bf9a2", "score": "0.71789575", "text": "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "title": "" }, { "docid": "90388b3cb891ef929da995658e1c0cf9", "score": "0.71368796", "text": "def clean_str(data, remove=''):\n return data.translate(None, remove)", "title": "" }, { "docid": "cfa14bf6feb28e2eb7b303ce64c2498c", "score": "0.7128668", "text": "def basic_cleaning2(string):\n\n string = string.lower()\n string = re.sub('[0-9\\(\\)\\!\\^\\%\\$\\'\\\"\\.;,-\\?\\{\\}\\[\\]\\\\/]', ' ', string)\n string = re.sub(' +', ' ', string)\n return string", "title": "" }, { "docid": "d1ba7436d5d8721eef7bec1d97eafb73", "score": "0.71280456", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n string = re.sub(r\"[.,#!$%&;:{}=_`~()/\\\\]\", \"\", string)\n string = re.sub(r\"http\\S+\", \" \" ,string)\n string = re.sub(\"[^a-zA-Z]\", \" \",string)\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string) \n string = re.sub(r\"\\s{2,}\", \" \", string)\n re.sub(r'(.)\\1+', r'\\1\\1', string) \n \n return string.strip().lower()", "title": "" }, { "docid": "c317af63cb1f281fbcde8f8c088802e7", "score": "0.71126366", "text": "def clean_str(string):\n #just return string if already cleaned\n return string", "title": "" }, { "docid": "f1ad805c30eb105093212ce268019c4f", "score": "0.708663", "text": "def clean_str(string):\n\t#string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n\tstring = re.sub(r\"\\'s\", \" \\'s\", string)\n\tstring = re.sub(r\"\\'ve\", \" \\'ve\", string)\n\tstring = re.sub(r\"n\\'t\", \" n\\'t\", string)\n\tstring = re.sub(r\"\\'re\", \" \\'re\", string)\n\tstring = re.sub(r\"\\'d\", \" \\'d\", string)\n\tstring = re.sub(r\"\\'ll\", \" \\'ll\", string)\n\tstring = re.sub(r\",\", \" , \", string)\n\tstring = re.sub(r\"!\", \" ! \", string)\n\tstring = re.sub(r\"\\(\", \" \\( \", string)\n\tstring = re.sub(r\"\\)\", \" \\) \", string)\n\tstring = re.sub(r\"\\?\", \" \\? \", string)\n\tstring = re.sub(r\"\\s{2,}\", \" \", string)\n\tstring = re.sub(r\"\\s+\", \" \", string)\n\t#return string.strip().split()\n\treturn string", "title": "" }, { "docid": "2f91017acce8370e49ca13e0e468fcc0", "score": "0.70707", "text": "def clean_str(string):\n\tstring = re.sub(r\"[^A-Za-z0-9()<>/,!?\\'\\`]\", \" \", string)\n\tstring = re.sub(r\"\\'s\", \" \\'s\", string)\n\tstring = re.sub(r\"\\'ve\", \" \\'ve\", string)\n\tstring = re.sub(r\"n\\'t\", \" n\\'t\", string)\n\tstring = re.sub(r\"\\'re\", \" \\'re\", string)\n\tstring = re.sub(r\"\\'d\", \" \\'d\", string)\n\tstring = re.sub(r\"\\'ll\", \" \\'ll\", string)\n\tstring = re.sub(r\",\", \" , \", string)\n\tstring = re.sub(r\"!\", \" ! \", string)\n\tstring = re.sub(r\"\\(\", \" \\( \", string)\n\tstring = re.sub(r\"\\)\", \" \\) \", string)\n\tstring = re.sub(r\"\\?\", \" \\? \", string)\n\tstring = re.sub(r\"\\s{2,}\", \" \", string)\n\treturn string.strip().lower()", "title": "" }, { "docid": "a0c99b1f7dd103c6b93e594bf302ff97", "score": "0.7065066", "text": "def clean_xml_string(s):\n return VALID_XML_CHARS_REGEX.sub(\"\", s)", "title": "" }, { "docid": "4c1f72d624a2444e1814a702ea0cf7e8", "score": "0.70633143", "text": "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "title": "" }, { "docid": "c36e7bcb734ca9a0ce323fc6a2ca7f62", "score": "0.7058493", "text": "def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "title": "" }, { "docid": "adbd309b7e463a8f7a3a4b2770b9db20", "score": "0.7057789", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "51e3d056de4ad2ae1c8640079471c0a4", "score": "0.7050733", "text": "def cleanup_input(data):\n data = re.sub(r'[^0-9A-Za-z ()_,.-:]', '', data)\n return data", "title": "" }, { "docid": "ebe7c7ce83f598a1a3554ddfe04f5140", "score": "0.70454866", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n string = re.sub(r\"@\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "ebe7c7ce83f598a1a3554ddfe04f5140", "score": "0.70454866", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n string = re.sub(r\"@\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "d9006957d7f6e6be427e1285c487362f", "score": "0.70454746", "text": "def clean_unicode(text):\n clean_text = text.encode(\"ascii\", errors=\"replace\").strip().decode(\"ascii\")\n clean_text = clean_text.replace(\"?\", ' ')\n return clean_text", "title": "" }, { "docid": "e0931977a2506fc66f9f132724b500b7", "score": "0.70336586", "text": "def clean_string(str):\n if not isinstance(str, basestring):\n str = remove_unicode(str)\n str = replace_non_break_space(str)\n str = new_line_to_whitespace(str)\n str = remove_links(str)\n return str", "title": "" }, { "docid": "2566b0472ff4dd1f9853f0f8db0323a6", "score": "0.70327944", "text": "def CLEAN(text):\n return _control_char_re.sub('', text)", "title": "" }, { "docid": "e710d2b7dd6bea065a81a73c2a00b0e9", "score": "0.7013353", "text": "def clean_txt(txt):\n r = txt.encode(\"utf-8\", errors=\"backslashreplace\").decode('utf-8').replace(\"\\\\u0144\", \"\")\n return r", "title": "" }, { "docid": "acb898541c339412b7b2bf87dc0df0fa", "score": "0.70114446", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n string = re.sub(r\"@\", \"\", string)\n return string.lower()", "title": "" }, { "docid": "7cec58c6817ba6c85bb5e9e4e268e3ae", "score": "0.7010156", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n # replace more than 2 whitespace with 1 whitespace\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().split()", "title": "" }, { "docid": "cbceffc9eea3e39fc31ec08a7729986a", "score": "0.69948083", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" 's\", string)\n string = re.sub(r\"\\'ve\", \" 've\", string)\n string = re.sub(r\"n\\'t\", \" n't\", string)\n string = re.sub(r\"\\'re\", \" 're\", string)\n string = re.sub(r\"\\'d\", \" 'd\", string)\n string = re.sub(r\"\\'ll\", \" 'll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "26d2c4db745b7f7705d651d855f875a4", "score": "0.6992143", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "26d2c4db745b7f7705d651d855f875a4", "score": "0.6992143", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "26d2c4db745b7f7705d651d855f875a4", "score": "0.6992143", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "26d2c4db745b7f7705d651d855f875a4", "score": "0.6992143", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "26d2c4db745b7f7705d651d855f875a4", "score": "0.6992143", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "26d2c4db745b7f7705d651d855f875a4", "score": "0.6992143", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "26d2c4db745b7f7705d651d855f875a4", "score": "0.6992143", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "26d2c4db745b7f7705d651d855f875a4", "score": "0.6992143", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "26d2c4db745b7f7705d651d855f875a4", "score": "0.6992143", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "26d2c4db745b7f7705d651d855f875a4", "score": "0.6992143", "text": "def clean_str(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "597561cfc6068e810c7b14fe945ce139", "score": "0.69870543", "text": "def clean_str(string):\n\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" ( \", string)\n string = re.sub(r\"\\)\", \" ) \", string)\n string = re.sub(r\"\\?\", \" ? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n\n return string.strip().lower()", "title": "" }, { "docid": "a99fa7d059140d9499aa89fba99dcd33", "score": "0.69859827", "text": "def fixString(string):\n string = re.sub(r\"[^A-Z-]\", \"\", string)\n string = string.strip(\"\\n\")\n return string", "title": "" }, { "docid": "4a1e8af3f9d0ed67cab2db06132e9f11", "score": "0.69809616", "text": "def clean_str(string):\n #string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n #string = re.sub(r\"\\?\", \" \\? \", string)\n #string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "70e961bcab2d9e4f2d389d40a1029fe4", "score": "0.6969637", "text": "def clean_string(self, input_str):\n punctuation = [i for i in string.punctuation] + ['„', '“', '”', '–']\n\n for char in punctuation:\n input_str = input_str.replace(char, \" \")\n\n return input_str", "title": "" }, { "docid": "22b1f67db475606dd37244a3c7227734", "score": "0.69659454", "text": "def sanitize(instring):\r\n return instring.encode('ascii','replace')", "title": "" }, { "docid": "4b6b7a80be3b5c9bdc3e6edae55d2696", "score": "0.696103", "text": "def _strip_invalid_xml(s):\n if _badchars_re.search(s):\n return ''.join(c for c in s if c >= ' ' or c in '\\r\\n\\t')\n else:\n return s", "title": "" }, { "docid": "4b6b7a80be3b5c9bdc3e6edae55d2696", "score": "0.696103", "text": "def _strip_invalid_xml(s):\n if _badchars_re.search(s):\n return ''.join(c for c in s if c >= ' ' or c in '\\r\\n\\t')\n else:\n return s", "title": "" }, { "docid": "bd57ef566ec925fa4904d53c03fa7a2c", "score": "0.69582975", "text": "def clean_str(string):\n # Remove punctuation\n string = re.sub(r\"[^\\u4e00-\\u9fff]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip()", "title": "" }, { "docid": "3db25cc81b2382a09905025a9e32a35c", "score": "0.6954925", "text": "def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet", "title": "" }, { "docid": "ad9f774a4ea932368eb2df627de08ecb", "score": "0.6949244", "text": "def clean_str(string):\n string = unidecode(string)\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n string = re.sub(r\"\\\"\", \"\", string)\n return string.strip().lower()", "title": "" }, { "docid": "22c0980397e31e70f3a4aa3117e979df", "score": "0.6942852", "text": "def clean_up(sentence):\n\treturn unicode(sentence.strip().replace(\"\\n\", \"\"), errors='ignore').strip().replace(\"\\x0c\", \"\")", "title": "" }, { "docid": "49ffda584d5207790a06124faa853d61", "score": "0.69426024", "text": "def clean(sent):\n p1 = re.compile('\\W')\n p2 = re.compile('\\s+')\n sent = re.sub(r\"http\\S+\", \"\", sent)\n sent = ReplaceThreeOrMore(sent)\n sent = remove_unicode_diac(sent)\n sent = sent.replace('_', ' ')\n sent = re.sub(r'[A-Za-z0-9]', r'', sent)\n sent = re.sub(p1, ' ', sent)\n sent = re.sub(p2, ' ', sent)\n return sent", "title": "" }, { "docid": "03277db968e62980cb2b5aa15f8a7990", "score": "0.6913672", "text": "def str_clean(string):\n\timport re\n\treturn re.sub(ur'[\\W_]+', u'', string, flags=re.UNICODE).lower()", "title": "" }, { "docid": "19c4ae99cb23300cb0f066eacb0f2161", "score": "0.69071877", "text": "def clean(string):\r\n if string is None or not string: return ''\r\n string = html.unescape(string)\r\n string = unicodedata.normalize('NFC', string)\r\n string = unescape(string)\r\n string = html.escape(string)\r\n string = unicodedata.normalize('NFC', string)\r\n return string", "title": "" }, { "docid": "f3f6013bafa5d6364acb14e14afe48ff", "score": "0.68873024", "text": "def clean_string(raw_string):\n if raw_string == None:\n return\n\n clean_string = raw_string.strip('\\n')\n clean_string = ' '.join(clean_string.split())\n return clean_string", "title": "" }, { "docid": "7cc7529de16f7f17324ee949d5fb71bb", "score": "0.68775374", "text": "def sanitise(s, max_len=MAX_STRING_LENGTH):\n result = ''\n if len(s) > max_len:\n s = s[0: max_len // 2] + \"\\n*** <snip> ***\\n\" + s[-max_len // 2:]\n lines = s.rstrip().splitlines()\n for line in lines:\n for c in line.rstrip() + '\\n':\n if c < ' ' and c != '\\n':\n if c == '\\t':\n c = r'\\t'\n elif c == '\\r':\n c = r'\\r'\n else:\n c = r'\\{:03o}'.format(ord(c))\n result += c\n return result.rstrip()", "title": "" }, { "docid": "9e0dcdf8d7cb485b43e5f4bddd702945", "score": "0.6870073", "text": "def cleaning(string):\n\n if type(string) == float or type(string) == int:\n return string\n res = ''\n if string != string:\n return string\n string = string.replace(\"\\\\r\", \"\")\n string = string.replace(\"\\\\n\", \"\")\n string = string.replace(\"\\\\b\", \"\")\n string = string.replace(\"\\\\t\", \"\")\n for i in string:\n if i.isalpha():\n res = res + i\n return res.lower()", "title": "" }, { "docid": "e05bddf69bdbfa3cd543086510a400e4", "score": "0.6858739", "text": "def _cleanstr(string):\n cstr = _rmspace.sub(' ', string.strip(\" \\\"'\"))\n return _unescape(cstr)", "title": "" }, { "docid": "ac6c2478cb02871cc38b1b0ac21838b8", "score": "0.6846437", "text": "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "title": "" }, { "docid": "35356385f5075ec2410a8b09f4aa5216", "score": "0.6845431", "text": "def fixString(string):\n string = re.sub(r\"[^A-Z-]\", \"\", string)\n return string", "title": "" }, { "docid": "bdd0c3f42705b9c5c56029e5e219d6a2", "score": "0.6843067", "text": "def sanitize_string(unclean_string: str) -> str:\n return unidecode(unclean_string)", "title": "" }, { "docid": "6df4d559580691df547e591bb86b50f0", "score": "0.6839993", "text": "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "19f0f704585be95931f4163939bd9482", "score": "0.6839809", "text": "def clean_str(cleaned_tweet):\n # string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", cleaned_tweet)\n string = re.sub(r'(.)\\1+', r'\\1\\1', cleaned_tweet)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n string = re.sub(r\"“”¨«»®´·º½¾¿¡§£₤‘’\", \"\", string)\n return string.strip().lower()", "title": "" }, { "docid": "e31cb8bf0fffa9977ab1394925911fe7", "score": "0.6821067", "text": "def cleanstr(s):\n try:\n s = str(s)\n except Exception:\n s = utf(s).encode('utf-8')\n s = s.replace('/', '.')\n return s", "title": "" }, { "docid": "d3be5f0830715f47e7fbd479ec06b447", "score": "0.6821053", "text": "def replace_bad_characters(self, str):\n\n str = unicode(BeautifulStoneSoup(str,\n convertEntities=BeautifulStoneSoup.HTML_ENTITIES))\n str = unicodedata.normalize('NFKD', str).encode('ascii', 'ignore')\n str = unicode(re.sub('[^\\w\\s-]', '', str).strip().lower())\n str = unicode(str.replace(' ', '-'))\n return str", "title": "" }, { "docid": "0a6e5512a71b3fcc49bcb81cf4b136e5", "score": "0.682041", "text": "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string) \n string = re.sub(r\"\\s{2,}\", \" \", string) \n return string.strip().lower()", "title": "" }, { "docid": "739935d5ae58e7d8df2aca09849d969a", "score": "0.6776708", "text": "def clean_text(text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xfffd or is_control(char):\n continue\n if is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)", "title": "" }, { "docid": "2602dec771a83cc029cf17a5d62cb961", "score": "0.67746466", "text": "def clean_str(string):\n #print file_list[count]\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "5bb7d0210a14c549003786724c36c9c1", "score": "0.67731494", "text": "def clean_string(i_string):\r\n unCharSet = {'\\\\': ' ', \"'\": '', '(': ' ', ')': ' ',\r\n '.': ' ', ',': ' ', '&': ' and '}\r\n\r\n if i_string is None:\r\n return None\r\n\r\n for key, value in unCharSet.items():\r\n i_string = i_string.replace(key, value)\r\n\r\n o_string = i_string.split()\r\n\r\n o_string = \" \".join(o_string)\r\n return o_string", "title": "" }, { "docid": "2e9fc24644b1ed4ee87ff012b165493c", "score": "0.6771594", "text": "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "d7208b30898398ebc9a16a746dabff7b", "score": "0.67635465", "text": "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "title": "" }, { "docid": "d7208b30898398ebc9a16a746dabff7b", "score": "0.67635465", "text": "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "title": "" }, { "docid": "a348acfa00d8efb80ed604c3cd7af170", "score": "0.6711991", "text": "def clean_str(string):\n string = re.sub(r\"[0-9]\", \"\", string)\n string = re.sub(r\"年\", \"\", string)\n string = re.sub(r\"月\", \"\", string)\n string = re.sub(r\"日\", \"\", string)\n # string = re.sub(r\"\\'s\", \" \\'s\", string)\n # string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n # string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n # string = re.sub(r\"\\'re\", \" \\'re\", string)\n # string = re.sub(r\"\\'d\", \" \\'d\", string)\n # string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\"“\", \"\", string)\n string = re.sub(r\" ”\", \"\", string)\n string = re.sub(r\";\", \"\", string)\n string = re.sub(r\",\", \"\", string)\n string = re.sub(r\";\", \"\", string)\n string = re.sub(r\",\", \"\", string)\n string = re.sub(r\"&\", \"\", string)\n string = re.sub(r\"!\", \"\", string)\n string = re.sub(r\"。\", \"\", string)\n string = re.sub(r\"-\", \"\", string)\n string = re.sub(r\"(\", \"\", string)\n string = re.sub(r\")\", \"\", string)\n string = re.sub(r\"?\", \"\", string)\n string = re.sub(r\"、\", \"\", string)\n string = re.sub(r\"x\", \"某\", string)\n string = re.sub(r\"X\", \"某\", string)\n string = re.sub(r\"×\", \"某\", string)\n\n\n # string = re.sub(r\"\\s{2,}\", \" \", string)\n string = re.sub(r\" \", \" \", string)\n return string.strip().lower()", "title": "" }, { "docid": "4992de71d817f9f8b62acf338e4a0f05", "score": "0.6693854", "text": "def sanitize_str(s):\n # throw away unkown characters\n return [c for c in s if c in letters]", "title": "" }, { "docid": "65bda86da1fca9d19ad51dc943eb0324", "score": "0.6681667", "text": "def to_clean_str(s: str) -> str:\n return re.sub(\"[^a-zA-Z0-9]\", \"\", s).lower()", "title": "" }, { "docid": "4d08429166fa9d0d7b7b9bd07ffe8b30", "score": "0.66768634", "text": "def _clean(self, string):\n return re.sub('\\s+', ' ', string).strip()", "title": "" }, { "docid": "acbb6dcd1cba7840fc2ec7afb441caed", "score": "0.66763705", "text": "def _cleanup_string(self, bytes):\n try:\n b = bytes.index(b'\\x00')\n except ValueError:\n return bytes.decode('latin-1').strip()\n else:\n return bytes[:b].decode('latin-1').strip()", "title": "" }, { "docid": "2fb69955b5cb4a476a4969b4df1b4522", "score": "0.66762406", "text": "def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")", "title": "" }, { "docid": "794dbf61a34ac97c2d274a3522038218", "score": "0.66730124", "text": "def removeSpecialChars(self) -> None:\n self.text = re.sub('[^a-zA-z0-9\\n\\.\\s]', '', self.text)", "title": "" }, { "docid": "61cf02beb6dad17c3ac93ace951355b0", "score": "0.66639763", "text": "def clean_string(text: str, ascii_only=False) -> str:\n done = False\n while text and not done:\n done = True\n if ((text[0] == '\"' and text[-1] == '\"') or\n (text[0] == '[' and text[-1] == ']')):\n text = text[1:-1]\n done = False\n if text[:2] == \"u'\" and text[-1] == \"'\":\n text = text[2:-1]\n done = False\n if ascii_only:\n try:\n # Python v3.7\n if text.isascii(): # type: ignore\n return text\n except AttributeError:\n # Python less than v3.7\n pass\n return ''.join(filter(lambda c: ord(c) >= 32 and ord(c) < 0x7F,\n list(text)))\n return text", "title": "" }, { "docid": "046e5f01a5b112d86b7b5afcd76071e8", "score": "0.6663362", "text": "def super_clean_str(string):\n return ''.join(x for x in string if x.isalnum()).lower()", "title": "" }, { "docid": "2b606f60f1f5aa428703e7bdea07a440", "score": "0.6662041", "text": "def clean(self, str_dirty):\n i = 0\n result = \"\"\n\n # See if there are any substrings that need to be cleaned.\n #\n result = clean_re.sub(self.clean_substr, str_dirty)\n result = trim_re.sub(self.trim_substr, result)\n\n # The regular expression only matches cases where there was a\n # non-emptry string to be cleaned and trimmed, so we do\n # another pass that just removes any \"!!!CLEAN!!!!!!CLEAN!!!\"\n # from the string.\n #\n result = result.replace(\"!!!CLEAN!!!!!!CLEAN!!!\", \"\")\n result = result.replace(\"!!!TRIM!!!!!!TRIM!!!\", \"\")\n\n return result", "title": "" }, { "docid": "ecfd6dee5a778a5b5b51ba584f554a64", "score": "0.6657382", "text": "def cleanString(self, s):\r\n s = s.lower()\r\n for x in s: \r\n if x in punctuation:\r\n s = s.replace(x, '')\r\n return s", "title": "" }, { "docid": "d7087c8962214131126574b530227a81", "score": "0.6624621", "text": "def clean_string(in_str):\n # Remove extra whitespaces\n in_str = ' '.join(in_str.split())\n # Remove whitespaces before punctuation\n in_str = re.sub(r'\\s([?.!\"](?:\\s|$))', r'\\1', in_str)\n\n return in_str", "title": "" }, { "docid": "0e0f53b736950f093683a90ae99950e6", "score": "0.6620758", "text": "def clean(name):\n name = remove_extra(name)\n name = unidecode.unidecode(name) # Remove diacritics\n name = \"\".join(\n list(filter(lambda c: c in (string.ascii_letters + string.digits + \" \"), name))\n )\n name = name.lower().strip()\n return name", "title": "" }, { "docid": "47fb0fc62b969f6ce25b87cdbe30daf7", "score": "0.6611032", "text": "def get_clean_text(messy_text: str) -> str:\n new_text = \"\"\n replace = {\n \"*\": \"\\\"\",\n \"!\": \"?\",\n \"/\": ',',\n \"?\": \"!\"\n }\n remove = \"1234567890&@#$%^()_+|><~\"\n pls_do_upper = False\n for l in messy_text:\n if l in replace:\n new_text += replace[l]\n elif l not in remove:\n if pls_do_upper:\n new_text += l.upper()\n else:\n new_text += l\n return new_text", "title": "" }, { "docid": "746aa54e6b39cea9fc03d3ebbdb31a2b", "score": "0.659407", "text": "def clean(line):\n line = line.strip('\\n').strip()\n line = line.replace('\\xe2\\x80\\x93', '-')\n line = line.replace('\\xe2\\x80\\x99', '\\'')\n\n return line", "title": "" }, { "docid": "ebc19037613a7efd86b7dacd1a2c976a", "score": "0.6592187", "text": "def remove_string_special_characters(s):\n stripped = re.sub('[^\\w\\s]', '', s)\n stripped = re.sub('_', '', stripped)\n stripped = re.sub('\\s+', ' ', stripped)\n stripped = stripped.strip()\n\n return stripped", "title": "" }, { "docid": "7a1892abc715a18e6c5b95c2fc712dc4", "score": "0.6579374", "text": "def cleanUpString(text):\r\n if text is None or text == '':\r\n return text\r\n try:\r\n text = text.encode(\"utf-8\")\r\n except:\r\n newText = \"\"\r\n t = text.decode(\"utf-8\")\r\n for c in t:\r\n newC = c\r\n if ord(c)>127:\r\n newC = \"&#%s;\" % ord(c)\r\n if ord(c)==8211:\r\n #change to this otherwise the toc has &#8211; value instead of endash\r\n newC = chr(45)\r\n if ord(c)==160:\r\n #&nbsp;\r\n newC = \" \"\r\n newText += newC\r\n text = newText\r\n text = str(text)\r\n return text", "title": "" }, { "docid": "2ab356d9462aceca4c7adec069ecb09c", "score": "0.6573891", "text": "def clean_str(string):\n # seperate puctuations from words\n string = string.decode()\n string = re.sub(\"(www\\S*)\\s\", \" \", string)\n string = re.sub(r\"[^A-Za-z(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\'s\", \" \\'s\", string)\n string = re.sub(r\"\\'ve\", \" \\'ve\", string)\n string = re.sub(r\"n\\'t\", \" n\\'t\", string)\n string = re.sub(r\"\\'re\", \" \\'re\", string)\n string = re.sub(r\"\\'d\", \" \\'d\", string)\n string = re.sub(r\"\\'ll\", \" \\'ll\", string)\n string = re.sub(r\",\", \" , \", string)\n string = re.sub(r\"!\", \" ! \", string)\n string = re.sub(r\"\\(\", \" \\( \", string)\n string = re.sub(r\"\\)\", \" \\) \", string)\n string = re.sub(r\"\\?\", \" \\? \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n string = re.sub(r'\"', ' ', string)\n string = string.replace(\"\\'ve\", \"have\").replace(\"\\'d\",\"had\").replace(\"\\'s\",\"is\")\n string = string.replace(\"n\\'t\",\"not\").replace(\"\\'re\",\"are\").replace(\"\\'ll\",\"will\")\n string = string.replace(\"\\n\",\" \").replace(\"\\(\",\" \").replace(\"\\)\",\" \").replace(\"\\?\",\" \")\n return string.strip().lower()", "title": "" }, { "docid": "733aebfa92aeab15e628f46b8ef567f3", "score": "0.65696675", "text": "def clean_string(string, var = False):\n\t\n\t\"\"\"removes bullet points\"\"\"\n\tstring = re.sub(\"\\\\u2022\", \"\", string) \n\tstring = re.sub(\"\\\\u00b7\", \"\", string)\n\t\n\t\"\"\"removes numbers\"\"\"\n\tstring = re.sub(r\"[0-9]\",\"\", string)\n\t\n\t\"\"\"removes punctuation and other symbols\"\"\"\n\tfilters='!\"\\#$%&()*+-/:;,<=>?@[\\\\]^`{|}~\\t\\n'\n\ttranslate_dict = dict((c, \" \") for c in filters)\n\ttranslate_map = str.maketrans(translate_dict)\n\tfilters2='.'\n\ttranslate_dict2 = dict((c, \"_\") for c in filters2)\n\ttranslate_map2 = str.maketrans(translate_dict2)\n\tstring = string.translate(translate_map)\n\tstring = string.translate(translate_map2)\n\t\n\t\"\"\"removes unnecessary spaces\"\"\"\n\tstring = re.sub('\\s+', ' ', string).strip()\n\treturn string.strip() if var else string.strip().lower()", "title": "" }, { "docid": "ea64f8790b400eb1ebeee0df8962e005", "score": "0.65464705", "text": "def scrub(input_string):\n return ''.join(k for k in input_string if k.isalnum())", "title": "" }, { "docid": "51e1044266a276a31949c9862e9cb341", "score": "0.6529159", "text": "def clean_str(s):\n s = re.sub(r\"[^\\\\p{L}\\\\s]\", \" \", s) # This removes accents, which we want.\n s = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", s) #This removes accents, which we want.\n s = re.sub(r\"\\'s\", \"\", s)\n s = re.sub(r\"\\'ve\", \"have\", s)\n s = re.sub(r\"n\\'t\", \" not\", s)\n s = re.sub(r\"\\'re\", \" are\", s)\n s = re.sub(r\"\\'d\", \" would\", s)\n s = re.sub(r\"\\'ll\", \" will\", s)\n s = re.sub(r\",\", \"\", s) #s = re.sub(r\",\", \" ,\", s)\n s = re.sub(r\"!\", \"\", s)\n # s = re.sub(r\"\\(\", \"\\(\", s)\n # s = re.sub(r\"\\)\", \"\\) \", s)\n s = re.sub(r\"\\?\", \"\", s)\n s = re.sub(r\"\\s{2,}\", \" \", s)\n s = re.sub(r\" \", \" \", s)\n return s.strip().lower()", "title": "" }, { "docid": "4fa9d3230de1b3618dab8d27d9da79b2", "score": "0.6525442", "text": "def clean_text(text):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text.strip().lower()", "title": "" }, { "docid": "34a6d7fc266bd49635e45a9b827113b4", "score": "0.65193295", "text": "def quote_bad_chars(s):\n bad_chars = [\"(\", \")\"]\n for char in bad_chars:\n s = s.replace(char, quotestring(char))\n return s", "title": "" }, { "docid": "fa696b8ef0f86377fc97a61c53be4a2a", "score": "0.65169996", "text": "def clean_word(word):\n return \"\".join([c for c in word.lower() if ord(c) < 128])", "title": "" }, { "docid": "6855c44ec67b1c4012e6a701bfbfa8cd", "score": "0.6516056", "text": "def clean_str(string, TREC=False):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string) \n string = re.sub(r\"\\'s\", \" \\'s\", string) \n string = re.sub(r\"\\'ve\", \" \\'ve\", string) \n string = re.sub(r\"n\\'t\", \" n\\'t\", string) \n string = re.sub(r\"\\'re\", \" \\'re\", string) \n string = re.sub(r\"\\'d\", \" \\'d\", string) \n string = re.sub(r\"\\'ll\", \" \\'ll\", string) \n string = re.sub(r\",\", \" , \", string) \n string = re.sub(r\"!\", \" ! \", string) \n string = re.sub(r\"\\(\", \" \\( \", string) \n string = re.sub(r\"\\)\", \" \\) \", string) \n string = re.sub(r\"\\?\", \" \\? \", string) \n string = re.sub(r\"\\s{2,}\", \" \", string) \n return string.strip() if TREC else string.strip().lower()", "title": "" }, { "docid": "6855c44ec67b1c4012e6a701bfbfa8cd", "score": "0.6516056", "text": "def clean_str(string, TREC=False):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string) \n string = re.sub(r\"\\'s\", \" \\'s\", string) \n string = re.sub(r\"\\'ve\", \" \\'ve\", string) \n string = re.sub(r\"n\\'t\", \" n\\'t\", string) \n string = re.sub(r\"\\'re\", \" \\'re\", string) \n string = re.sub(r\"\\'d\", \" \\'d\", string) \n string = re.sub(r\"\\'ll\", \" \\'ll\", string) \n string = re.sub(r\",\", \" , \", string) \n string = re.sub(r\"!\", \" ! \", string) \n string = re.sub(r\"\\(\", \" \\( \", string) \n string = re.sub(r\"\\)\", \" \\) \", string) \n string = re.sub(r\"\\?\", \" \\? \", string) \n string = re.sub(r\"\\s{2,}\", \" \", string) \n return string.strip() if TREC else string.strip().lower()", "title": "" }, { "docid": "6855c44ec67b1c4012e6a701bfbfa8cd", "score": "0.6516056", "text": "def clean_str(string, TREC=False):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string) \n string = re.sub(r\"\\'s\", \" \\'s\", string) \n string = re.sub(r\"\\'ve\", \" \\'ve\", string) \n string = re.sub(r\"n\\'t\", \" n\\'t\", string) \n string = re.sub(r\"\\'re\", \" \\'re\", string) \n string = re.sub(r\"\\'d\", \" \\'d\", string) \n string = re.sub(r\"\\'ll\", \" \\'ll\", string) \n string = re.sub(r\",\", \" , \", string) \n string = re.sub(r\"!\", \" ! \", string) \n string = re.sub(r\"\\(\", \" \\( \", string) \n string = re.sub(r\"\\)\", \" \\) \", string) \n string = re.sub(r\"\\?\", \" \\? \", string) \n string = re.sub(r\"\\s{2,}\", \" \", string) \n return string.strip() if TREC else string.strip().lower()", "title": "" }, { "docid": "88cb7a61939f17cc222276c67192027a", "score": "0.6514265", "text": "def strip_other_charcter():\n pass", "title": "" } ]
2f40207c805b23dcf4c5f28e87c60024
Retrun this object's __task_name
[ { "docid": "c734aab3ea8671bfaac122fc4477f21f", "score": "0.9185114", "text": "def task_name(self) -> str:\n return self.__task_name", "title": "" } ]
[ { "docid": "429f8ece8d00bee28363560ebd509627", "score": "0.93508923", "text": "def task_name(self):\n return self._task_name", "title": "" }, { "docid": "633258a65ced3a28d4a6a4094dcccf55", "score": "0.91086996", "text": "def task_name(self):\n return function_or_class_name(self.task)", "title": "" }, { "docid": "567ea23a7690cb6272c7585b0954b07c", "score": "0.9035103", "text": "def get_task_name(self) -> str:\n return self._task_name", "title": "" }, { "docid": "d1fa12dcde70398fc16369e7fae0546a", "score": "0.90049607", "text": "def get_task_name(cls):\n return cls.__name__", "title": "" }, { "docid": "3ada0e086c422faf01fe38cfcb5d7775", "score": "0.87400883", "text": "def taskName(self):\n return self.taskAttrs['Name']", "title": "" }, { "docid": "8985220a1c26624cd829c9412892dbc0", "score": "0.84543914", "text": "def taskname():\n raise NotImplementedError()", "title": "" }, { "docid": "3b1c18cca17717cddcee2a2a209e0e29", "score": "0.80703586", "text": "def getName(self, *args, **kw):\n raise NotImplementedError(\"class TaskPars is not to be used directly\")", "title": "" }, { "docid": "748f2cd3d6293ca95641f2cbee24d5a4", "score": "0.77365124", "text": "def task_run_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"task_run_name\")", "title": "" }, { "docid": "3f1247e5139a0ab2178c359ce143932d", "score": "0.77184147", "text": "def get_task(self):\n return self.task", "title": "" }, { "docid": "3f1247e5139a0ab2178c359ce143932d", "score": "0.77184147", "text": "def get_task(self):\n return self.task", "title": "" }, { "docid": "4e9559ffc999f6bd5ac32c8083b663d0", "score": "0.76786333", "text": "def get_task_name_with_subtasks(self) -> str:\n return self._task_name_with_subtasks", "title": "" }, { "docid": "1e7e81f3395a7c86437b95b99e64550a", "score": "0.76100236", "text": "def task(self):\n return tasks[self.name]", "title": "" }, { "docid": "a1a770544d6be8016d85b4f78a727ddf", "score": "0.76029885", "text": "def task(self):\n return self._data.get('task')", "title": "" }, { "docid": "3ae077f9377ea40d64c60573b503ccf4", "score": "0.75634164", "text": "def __str__(self):\n return (self.task)", "title": "" }, { "docid": "fe04cd9e073f03b603bc9c5669ba6a28", "score": "0.7551182", "text": "def _GetTaskName(log_entry):\n resource_labels = {} if not log_entry.resource else _ToDict(\n log_entry.resource.labels)\n return 'unknown_task' if not resource_labels.get(\n 'task_name') else resource_labels['task_name']", "title": "" }, { "docid": "bf5a58121efb220aa173a0fb8c5d6d92", "score": "0.7356992", "text": "def name(self) -> str:\n return 'timed task'", "title": "" }, { "docid": "ca33f61b0ba63df89c65417fa756e95f", "score": "0.7332869", "text": "def task(self):\n return self._task", "title": "" }, { "docid": "fd1ebeab7691d36c93b4c34f9eaaa003", "score": "0.73244995", "text": "def name(state):\n return TaskState.TASK_STATES[state]", "title": "" }, { "docid": "c19acb1cfb2ff2fa3a2b9b321939b632", "score": "0.7276842", "text": "def task_id(self) -> str:\n return self._task_id", "title": "" }, { "docid": "a0925e73455645366e76d323ff99bf1b", "score": "0.7217091", "text": "def __str__(self):\n return \"Task: '{0.name}' in {0.start_time}-{0.end_time}\".format(self)", "title": "" }, { "docid": "b202b9697d0337e807baf5b3c6d12af7", "score": "0.72047216", "text": "def task_name(self, sha, **opts):\n return sha + '_' + datetime.datetime.now().isoformat()", "title": "" }, { "docid": "3eeb20dd15cb0f13620b1508ef18e218", "score": "0.71483606", "text": "def get_name(self):\n return self.process_name", "title": "" }, { "docid": "851d5290b4c7765e49f27f534446864e", "score": "0.7111676", "text": "def _get_name(self):\n return self.__class__.__name__", "title": "" }, { "docid": "91a274d854587877faa9b00807b0e038", "score": "0.70213807", "text": "def _get_id(self):\n if ':' in self.task:\n path = self.get_path()\n t_path, task = (path + '.' + self.task\n if path else self.task).split(':')\n\n # Build the task id by assembling the package name and the class\n # name\n return t_path.split('.', 1)[0] + '.' + task\n\n else:\n return self.task", "title": "" }, { "docid": "1bedfeb81c8a58ac51ccb2cc1976a3c6", "score": "0.7007999", "text": "def getPkgname(self, *args, **kw):\n raise NotImplementedError(\"class TaskPars is not to be used directly\")", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.6966836", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.6966836", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.6966836", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.6966836", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.6966836", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.6966836", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "d825ab0e10d3547e9006ea33606826ba", "score": "0.69652706", "text": "def _get_name(self):\n return self.__name", "title": "" }, { "docid": "50ba29032912aad3b0c7068338ef71ae", "score": "0.6952709", "text": "def task_id(self):\n return self._task_id", "title": "" }, { "docid": "50ba29032912aad3b0c7068338ef71ae", "score": "0.6952709", "text": "def task_id(self):\n return self._task_id", "title": "" }, { "docid": "9fe1f8c573d666f689c033747bef1f23", "score": "0.6926564", "text": "def get_job_name(self):\r\n return self._job_name", "title": "" }, { "docid": "0ee875bd1d84d5edfd84fecd05708a7a", "score": "0.69189084", "text": "def get_name(self):\n return self.__class__.__name__", "title": "" }, { "docid": "c50db2fc7245a4a4899627ed7f7ef15a", "score": "0.6901261", "text": "def get_name(self):\n\n\t\treturn self.__name", "title": "" }, { "docid": "c50db2fc7245a4a4899627ed7f7ef15a", "score": "0.6901261", "text": "def get_name(self):\n\n\t\treturn self.__name", "title": "" }, { "docid": "0433fe19c92408ee95b8c60a1a21706a", "score": "0.68902", "text": "def _get_name___(self):\n return self.__name___", "title": "" }, { "docid": "a8b4f71adad3f566bf7a2166101dcd4b", "score": "0.6881254", "text": "def get_name(self):\n return self.__name", "title": "" }, { "docid": "a8b4f71adad3f566bf7a2166101dcd4b", "score": "0.6881254", "text": "def get_name(self):\n return self.__name", "title": "" }, { "docid": "a8b4f71adad3f566bf7a2166101dcd4b", "score": "0.6881254", "text": "def get_name(self):\n return self.__name", "title": "" }, { "docid": "a8b4f71adad3f566bf7a2166101dcd4b", "score": "0.6881254", "text": "def get_name(self):\n return self.__name", "title": "" }, { "docid": "a8b4f71adad3f566bf7a2166101dcd4b", "score": "0.6881254", "text": "def get_name(self):\n return self.__name", "title": "" }, { "docid": "0c6bc548e7a89ac5de038e382f06f5e7", "score": "0.6875251", "text": "def _get_name(self):\n return self._name", "title": "" }, { "docid": "0c6bc548e7a89ac5de038e382f06f5e7", "score": "0.6875251", "text": "def _get_name(self):\n return self._name", "title": "" }, { "docid": "fe53e85f1e61b875eb17748e09ec38fe", "score": "0.68610036", "text": "def get_name(self):\n return self.__name", "title": "" }, { "docid": "89fbf6c0013eb2e04803a7fc361f412b", "score": "0.68533885", "text": "def get_name(self) -> str:\n return self.__name", "title": "" }, { "docid": "5b34c084b9d60d1798f5fc4e445d95e8", "score": "0.6797522", "text": "def get_name(self):\n pass", "title": "" }, { "docid": "8f660a4609a120427ad900de78ee0fc8", "score": "0.67859423", "text": "def model_task(self):\n return self._task", "title": "" }, { "docid": "4474437ce97c57451932c9753257a40d", "score": "0.67745167", "text": "def name(self):\n if self._name is None:\n return self.cmd\n return self._name", "title": "" }, { "docid": "9558446527bf007c65e38fba3a6dd579", "score": "0.67650396", "text": "def execution_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"execution_name\")", "title": "" }, { "docid": "0d869b8058c30ea0753ef15b17acafe3", "score": "0.67626315", "text": "def name ( self ) :\n return self.__name", "title": "" }, { "docid": "89f51b0286d6d6eaa9ad914c2e1c494d", "score": "0.6762259", "text": "def task_unit(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"task_unit\")", "title": "" }, { "docid": "7cf1d6c716debc9fc1923b589759508c", "score": "0.67495024", "text": "def name(self):\n return self.__name__", "title": "" }, { "docid": "7cf1d6c716debc9fc1923b589759508c", "score": "0.67495024", "text": "def name(self):\n return self.__name__", "title": "" }, { "docid": "fe6fdd84ccc83f5a793221e87c0e89e8", "score": "0.6728776", "text": "def name(self):\n\t\treturn self.__class__.__name__", "title": "" }, { "docid": "5512fffe71569edfada9144c917af67e", "score": "0.6724038", "text": "def get_name(self) -> str:\n pass", "title": "" }, { "docid": "5512fffe71569edfada9144c917af67e", "score": "0.6724038", "text": "def get_name(self) -> str:\n pass", "title": "" }, { "docid": "971bf79664bfd79eccba661be12d1c01", "score": "0.671014", "text": "def name( self ) :\n\n return( self.__name )", "title": "" }, { "docid": "9c58d38dc379935c577155c4b43ebe8a", "score": "0.6708219", "text": "def task_name(self, task_name):\n self._task_name = task_name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" }, { "docid": "8a81611711eaf11b735e82080fa71d30", "score": "0.6705117", "text": "def name(self):\n return self.__name", "title": "" } ]
7f034481e85837eda6d8b7bd7bd789bb
r""" Resets the various data arrays on the object back to their original state. This is useful for repeating a simulation at different inlet conditions, or invasion points for instance.
[ { "docid": "2193e11289f06efaf8f3a5957103b8c2", "score": "0.6870243", "text": "def reset(self):\n self[\"pore.invasion_pressure\"] = np.inf\n self[\"throat.invasion_pressure\"] = np.inf\n self[\"pore.invasion_sequence\"] = -1\n self[\"throat.invasion_sequence\"] = -1\n self[\"pore.invasion_saturation\"] = -1\n self[\"throat.invasion_saturation\"] = -1\n self[\"pore.cluster\"] = -1\n self[\"throat.cluster\"] = -1\n self[\"pore.trapped\"] = np.inf\n self[\"throat.trapped\"] = np.inf\n self[\"pore.inlets\"] = False\n self[\"pore.outlets\"] = False\n self[\"pore.residual\"] = False\n self[\"throat.residual\"] = False\n for elem in [\"pore\", \"throat\"]:\n for prop in [\"occupancy\"]:\n try:\n del self[elem + \".\" + prop]\n except KeyError:\n pass\n\n # Masks for tracking pores and throats at the interface\n # Basically a quick way of getting to all the elements in the queues\n self._interface_Ts = np.zeros(self.Nt, dtype=bool)\n self._interface_Ps = np.zeros(self.Np, dtype=bool)\n if hasattr(self, \"invasion_running\"):\n del self.invasion_running", "title": "" } ]
[ { "docid": "f0eddd2f77ea202d0c6aaaef3004d993", "score": "0.7440329", "text": "def reset(self):\n self.set_t(0.0)\n self.m = 0\n self.n = 0\n self.o = 0\n self.resize_arrays()\n self.clear_ijv()\n self.clear_ts()", "title": "" }, { "docid": "6342d546e35b7f58eb6c45c1143a8a0a", "score": "0.7396124", "text": "def reset(self):\n self.w = np.zeros(self.n)\n self.z = np.zeros(self.n)", "title": "" }, { "docid": "75c22ae1369db0e86f17a5ee0c67f0c0", "score": "0.7380458", "text": "def __reset(self):\n # self.__xarr = np.zeros(self.__arr.shape)\n # self.__yarr = np.zeros(self.__arr.shape)\n self.__pos = []\n self.__x = False\n self.__y = False\n self.__dlvo_xarray = False\n self.__dlvo_yarray = False", "title": "" }, { "docid": "0527bc3142e66683fd38f11eabaca892", "score": "0.73782796", "text": "def reset(self):\n self.stimuli = zeros(self.dimensions)\n self.present = zeros(self.dimensions)\n self.activity = zeros(self.N)\n\n self.phase = random(self.N) * 2*pi\n self.intrinsic = random(self.N) * self.order + self.base\n self.spikes = zeros(self.N)\n self.phases = self.phase", "title": "" }, { "docid": "9d2a20010169c71cb7e283e34622ba70", "score": "0.72274435", "text": "def reset(self):\r\n self.t = 0.\r\n self._estimates = np.zeros((self._number_of_arms,))\r\n self._counts = np.zeros((self._number_of_arms,))", "title": "" }, { "docid": "00e2cf409cd64b34d4ff7984ccc62c4f", "score": "0.716084", "text": "def resetData(self):\n\t\tself.adj_data = io.imread(self.data_dir)", "title": "" }, { "docid": "b81d2c7e398b9fad4805ae9d7e61e0b7", "score": "0.7138188", "text": "def reset(self):\n self.U, self.R, self.T, self.counter = self.empty_memory_structure()\n self.prev_s = None\n self.prev_a = None", "title": "" }, { "docid": "c25218e71f642e55dece13f617715dea", "score": "0.7132106", "text": "def Reset(self):\n\tif self.impulseRecording:\n\t self.impulses=[]\n\t for d in range(dim):\n\t\tself.impulses.append([[],[]])", "title": "" }, { "docid": "15aba88fb7fe55c70b6383fcb680e4a5", "score": "0.70966005", "text": "def reset_deformation(self):\n self.array_mu_x.fill(0.0)\n self.array_mu_y.fill(0.0)\n self.array_mu_z.fill(0.0)", "title": "" }, { "docid": "9182c5bbd71a9ffc0d9fe6a803baa5ba", "score": "0.7092724", "text": "def reset(self):\n\n self.states_buffer = []\n self.actions_buffer = []\n self.rewards_buffer = []\n self.episode_number = []\n\n self.states_buffer = []\n self.actions_buffer = []\n self.rewards_buffer = []\n self.done = False\n self.action = []\n self.episode_number = 0\n self.episodic_reward = 0\n self.episodic_reward_buffer = []\n self.rolling_reward = []\n self.episodic_loss = 0\n self.rolling_loss = []\n self.record = float('-inf')\n self.number_of_steps = 0\n self.episodic_record = []", "title": "" }, { "docid": "2b64bb648a6c9bb544f5e593135f5be1", "score": "0.70752597", "text": "def __reset(self):\n self.skytmphist = []\n self.ambtmphist = []\n self.timearray = []", "title": "" }, { "docid": "636ab78f444b5374a6d2be0e67d201d2", "score": "0.70632267", "text": "def reset(self):\n\n self._data = []", "title": "" }, { "docid": "bf1e8726494e779151e2b880f985d63b", "score": "0.7034991", "text": "def reset(self):\n\n self.caustic = False\n self.converge = np.zeros(self.converge.shape)\n self.currentItr = 0\n self.zer4UpNm = np.zeros(self.zer4UpNm.shape)\n self.wcomp = np.zeros(self.wcomp.shape)\n self.West = np.zeros(self.West.shape)\n self.zcomp = np.zeros(self.zcomp.shape)\n self.zc = np.zeros(self.zc.shape)\n self.pMask = None\n self.cMask = None\n self.pMaskPad = None\n self.cMaskPad = None", "title": "" }, { "docid": "d364ba5de6320b8014a258079b38c02d", "score": "0.7026247", "text": "def reset(self):\n\t\tn_sensors = self.n_sensors\n\t\tself.arr[:,0] = rnd(n_sensors)\n\t\tself.arr[:,1] = rnd(n_sensors)\n\t\tself.source_pos[:] = rnd(2)\n\t\tself.set_sensors()", "title": "" }, { "docid": "ef5818820f84e968be23076d2e278516", "score": "0.70139503", "text": "def reset_volatile_data(self):\n self.glue_until_normal_mode = False\n self.view.run_command('unmark_undo_groups_for_gluing')\n self.processing_notation = False\n self.non_interactive = False\n self.reset_during_init = True", "title": "" }, { "docid": "89ceca3adf232f9f0870066b3bc0641c", "score": "0.7003352", "text": "def reset(self):\r\n self._data = []", "title": "" }, { "docid": "b80924b00a00272fe72a3e60a1e30861", "score": "0.70008993", "text": "def reset(self):\r\n self.__data = None\r\n self.__blocks = {}", "title": "" }, { "docid": "80f0e42be6d288bf04471b9b19423640", "score": "0.6989219", "text": "def reset(self):\n self.values = None\n self.keys = None\n self.mask = None", "title": "" }, { "docid": "a6cb98c2cb3d1e906c57dc849084f1ed", "score": "0.69687486", "text": "def reset(self):\n self._world = np.zeros(self._shape)", "title": "" }, { "docid": "9d61ff869dcbf774682c2d77febeac95", "score": "0.6934378", "text": "def reset(self):\n for i in range(3):\n self._axis[i].reset()", "title": "" }, { "docid": "1d4ce2063e1ef221b8f405ddad42d893", "score": "0.6931701", "text": "def reset(self):\n for r in self.robots:\n r.reset()\n p.stepSimulation()", "title": "" }, { "docid": "3c23877c49a17e12a86c90b7c416a866", "score": "0.6926605", "text": "def reset(self):\n self.state = copy.copy(self.mu)\n self.mu = self.mu_init * np.ones(self.size)", "title": "" }, { "docid": "99f3935bfb571b87e08d99432b75baf3", "score": "0.69247574", "text": "def reset_state(self):\n\n self.h.data.zero()\n self.c.data.zero()", "title": "" }, { "docid": "ee0c3c473a6baff2604c9bca61ee5957", "score": "0.69190323", "text": "def reset(self):\n self.position_history = []\n self.drone_position = [0,0,0]\n self.drone_motors_speed = [0,0,0,0]", "title": "" }, { "docid": "8df76d58fb70712710f7503591a9330a", "score": "0.6918727", "text": "def reset():\n state.strings[:] = []\n state.integers[:] = []\n state.externals[:] = []\n state.booleans[:] = []\n state.routines[:] = []\n state.groupings[:] = []\n state.stringescapes[:] = []\n state.stringdefs.clear()", "title": "" }, { "docid": "9ec2c959e5b73380fc8ee2acfd356065", "score": "0.69184834", "text": "def reset(self):\n self.samples = 0\n self._counts = np.zeros(self.bins)\n self._velocity = np.zeros(self.bins)", "title": "" }, { "docid": "59a46e6783cdd77630dfa1c1e84208ad", "score": "0.6916687", "text": "def reset(self):\n self.control_counter = 0 # Store the last roll, pitch, and yaw.\n self.last_rpy = np.zeros(3) # Initialized PID control variables.\n self.last_pos_e = np.zeros(3)\n self.integral_pos_e = np.zeros(3)\n self.last_rpy_e = np.zeros(3)\n self.integral_rpy_e = np.zeros(3)", "title": "" }, { "docid": "545aedaad08ca63250145c7c14ddd2ce", "score": "0.69158876", "text": "def reset(self):\n self.state = np.zeros((3,3,3), dtype=float)\n self.legal_next_states = self.get_legal_next_states(self.state)\n self.done = False\n self.outcome = None\n self.move_count = 0\n self.history = []", "title": "" }, { "docid": "45cc6294209fc5076b0ab55b59139c83", "score": "0.6905427", "text": "def reset_recalc(self):\n self.reset_mask(outliers=True)\n\n if self.psdfits:\n self._arrays = OrderedDict()\n self._dissens = OrderedDict()\n self._trap_stiffness = OrderedDict()\n self._drag = OrderedDict()\n self._red_chi2 = OrderedDict()\n self._offset = OrderedDict()\n self._gen_height_data()\n self._construct_arrays()", "title": "" }, { "docid": "77ace1e0716f572dee08efb153419af8", "score": "0.6880386", "text": "def reset(self):\n self.state_vector = np.zeros_like(self.state_vector)\n self.t_seconds = 0.0", "title": "" }, { "docid": "c575d0600c7e626768a783d16a31c09f", "score": "0.6863954", "text": "def reset(self):\n\n self.W.set_value(np.zeros((self.n_in, self.n_out), \n dtype=theano.config.floatX), \n borrow=True)\n self.b.set_value(np.zeros((self.n_out,), \n dtype=theano.config.floatX), \n borrow=True)", "title": "" }, { "docid": "2d186e0b961a9596eb2113d4cc61675a", "score": "0.6851096", "text": "def _reset(self):\n self.data_segments = []\n self.interpolated_loudness_buffer = []\n self.interpolated_pitch_buffer = []\n self.is_playing = True\n self.playback_pos = 0\n self.song_ended = False\n self.track = None\n self.track_duration = None\n self.track_id = None\n self.visualizer.reset()", "title": "" }, { "docid": "39ea2cad700742c407865ae3fa5e5f67", "score": "0.6828733", "text": "def reset(self, **kwargs):\n reset_ctrlpts = kwargs.get('ctrlpts', False)\n reset_evalpts = kwargs.get('evalpts', False)\n\n if reset_ctrlpts:\n self._control_points = self._init_var(self._array_type)\n self._control_points2D = self._init_var(self._array_type)\n self._control_points_size_u = 0\n self._control_points_size_v = 0\n self._bounding_box = self._init_var(self._array_type)\n\n if reset_evalpts:\n self._surface_points = self._init_var(self._array_type)\n\n # Reset vertices and triangles\n self._tsl_component.reset()", "title": "" }, { "docid": "3594825cbde20dfa5b3eed438d26e6e6", "score": "0.6820475", "text": "def clear_arrays(self):\n self.clear_fg()\n self.clear_xy()\n self.clear_z()", "title": "" }, { "docid": "2947d0c7b24a02767eb5d03564b0f651", "score": "0.6801706", "text": "def reset_all(self):\n\n self.sht = False\n self.uppersht = 0.2\n self.upperlng = 1\n self._factors = None\n self.alpha = 0.01\n self.kindbench = True\n self.benchindex = None\n self._benchweights = None\n self.allowTO = False\n self.turnover = 0.05\n self.allowTE = False\n self.TE = 0.05\n\n self.reset_risk_constraints()\n self.reset_linear_constraints()\n self.reset_inputs()", "title": "" }, { "docid": "82da412726c8c4d49d8463347d17495a", "score": "0.67915714", "text": "def reset(self):\n self._coco_gt = COCO()\n self._detections = np.empty(shape=(0, 7))\n self._dataset = {\n 'images': [],\n 'annotations': [],\n 'categories': []\n }\n self._images = set()\n self._annotation_id = 1\n self._category_ids = set()", "title": "" }, { "docid": "65d0638d80f09ab8308421a4bd0d3932", "score": "0.67882186", "text": "def reset(self):\n self._arguments = None\n self._acceleration = None\n self._angle = None\n self._clockwise = None\n self._configuration = None\n self._debug = None\n self._deceleration = None\n self._pdb = None\n self._pudb = None\n self._section = None\n self._set = None\n self._silent = None\n self._timeout = None\n self._velocity = None\n return", "title": "" }, { "docid": "a576a695ad69d7857ca309e6161e849d", "score": "0.6779723", "text": "def reinit(self):\n\n # Reset all the cells in the map\n for p in self.map.values():\n p.reset()\n\n self.clear_source()\n self.clear_target()", "title": "" }, { "docid": "2539668b1f6e2987fba1ebb73c12f0a8", "score": "0.6775998", "text": "def reset(self):\n self.episode_num += 1\n self.num_steps = 0\n self.world.reset()\n self.robot_interface.reset()\n if (self.world.is_sim):\n \"\"\"\n Insert special code for resetting in simulation:\n Examples:\n Randomizing object placement\n Randomizing camera parameters.\n \"\"\"\n pass\n else:\n \"\"\"\n Insert code for reseting in real world.\n \"\"\"\n pass\n\n observation = self.get_observation()\n\n #self._reset_objects()\n self._random_reset_objects()\n\n self.robot_interface.set_gripper_to_value(0.0)\n self.curr_state = self.state_list[0]\n\n return observation #np.zeros((FEATURE_LEN), np.float32)", "title": "" }, { "docid": "4ba7c5c40a316808e18c64ed8c41d049", "score": "0.6775315", "text": "def reset(self) -> None:\n self.targets = []\n self.targets_engaged = 0\n self.players = []\n self.boxes = []\n self.platforms = []\n self.thinkingbox = None\n self.player_on_ground = False\n self.player_in_thinkingbox = False", "title": "" }, { "docid": "b7ca9ac067204640f2d79fe8e6e65bbf", "score": "0.6770156", "text": "def reset_state(self):\n super(Reader, self).reset_state()\n self.data1d = []\n self.data2d = []\n self.raw_data = None\n self.multi_frame = False\n self.data_frames = []\n self.data_uncertainty_frames = []\n self.errors = []\n self.logging = []\n self.q_names = []\n self.mask_name = u''\n self.i_name = u''\n self.i_node = u''\n self.i_uncertainties_name = u''\n self.q_uncertainty_names = []\n self.q_resolution_names = []\n self.parent_class = u''\n self.detector = Detector()\n self.collimation = Collimation()\n self.aperture = Aperture()\n self.process = Process()\n self.trans_spectrum = TransmissionSpectrum()", "title": "" }, { "docid": "654b5c1f404aac4292608d8c05b02209", "score": "0.67695856", "text": "def reset(self):\n self.mu = np.zeros((self.plan_hor,2))\n self.sigma = np.ones((self.plan_hor,2))", "title": "" }, { "docid": "605edbdb1bcf00c62f304587c274b63c", "score": "0.67686427", "text": "def clear_data(self):\r\n self.oat_values = []\r\n self.rat_values = []\r\n self.mat_values = []\r\n self.timestamp = []\r\n return", "title": "" }, { "docid": "72eab9efa26b7b1b25ba3e402b29e402", "score": "0.6761589", "text": "def reset_data(self):\n\n q, Iq, dIq, dq = self.get_raw_data()\n\n self.q = q\n self.Iq = Iq\n self.dIq = dIq\n self.dq = dq\n\n self.__incoh_bkgd = 0\n self.__smear_tags = np.array([])\n self.__trim_tags = np.array([])\n self._update_remove_tags()", "title": "" }, { "docid": "5e00ae2dbd96cb683a9baf72cba3dc25", "score": "0.67602587", "text": "def reset(self):\n self.observation = {}\n self.state.reset()\n self.policy.reset()\n self.turn_id = 1", "title": "" }, { "docid": "28107002bbff586779bf0084f1d2b9cf", "score": "0.6752454", "text": "def reset(self):\n ddict = self._reqrep(cmd=\"reset\")\n self.rgb_array = ddict.pop(\"rgb_array\", None)\n return ddict.pop(\"obs\"), ddict", "title": "" }, { "docid": "31952788a116bc2cf1003b14d2e4aa20", "score": "0.67475665", "text": "def reset(self):\n self._mapMaker.reset()\n self._resetVariables()", "title": "" }, { "docid": "3dd9e0ac9b41def447d391ba2dad2613", "score": "0.674446", "text": "def reset(self) -> None:\n self.t = torch.zeros(self.num_envs, dtype=float, device=self.device)", "title": "" }, { "docid": "3f75adae89afc0450b4e510c73040d45", "score": "0.67414546", "text": "def _reset(self):\n self.model = None\n self.parameters = None\n self.mcmc = None\n self._point_estimates = None\n self._n_abs = None", "title": "" }, { "docid": "9a41f677d11bca3e9f6f1b5a2d0949bf", "score": "0.67370063", "text": "def reset(self):\n self.memory.reset_multisteps(-1)", "title": "" }, { "docid": "b1222f76d0adde6b56b4b3d0b108dadd", "score": "0.6733449", "text": "def reset_simulation(self):\n self.__state = DRAW_ONCE\n self.__reset_world()", "title": "" }, { "docid": "a2bb83a8aed660eecc7a29013a69ba9a", "score": "0.6720943", "text": "def reset(self):\n self._i = [-1, [0]]\n self._store = {}", "title": "" }, { "docid": "b63c06bf634caef53ad01b978837ca4c", "score": "0.6719965", "text": "def resetValues(self):\n self.state = []\n self.cost = 0", "title": "" }, { "docid": "0edbcf890e19217b518b7820daac89a9", "score": "0.6715775", "text": "def reset(self):\n self.memory.reset()", "title": "" }, { "docid": "0edbcf890e19217b518b7820daac89a9", "score": "0.6715775", "text": "def reset(self):\n self.memory.reset()", "title": "" }, { "docid": "0edbcf890e19217b518b7820daac89a9", "score": "0.6715775", "text": "def reset(self):\n self.memory.reset()", "title": "" }, { "docid": "837d53766b891a3802c8d56870cdad15", "score": "0.6715199", "text": "def reset(self) -> np.ndarray:\n self.obs = self.env.reset()\n return np.array(self.obs['board']).reshape((self.rows, self.columns, 1))", "title": "" }, { "docid": "d5f0e72f3bca2436b434bfb7cdeb0c2c", "score": "0.6714819", "text": "def reset(self):\n ob = self.env.reset()\n state = np.concatenate([ob] * self.action_repeat)\n return state", "title": "" }, { "docid": "6eeb82c2d94a11f5c59ed7bf5817e8b3", "score": "0.6708776", "text": "def reset_state(self):\n self.state = np.zeros((self.state_size, 1), dtype=self.typefloat)", "title": "" }, { "docid": "02dd7bfede24ebdc9f88e8c030414752", "score": "0.6703852", "text": "def reset(self):\n self.values.clear()\n orig_pos = self._get_orig_position()\n self.body.position = pymunk.Vec2d(orig_pos.x * self.scale, orig_pos.y * self.scale)\n self.body.angle = math.radians(self._get_orig_orientation())\n self.body.velocity = (0, 0)\n self.body.angular_velocity = 0\n for obj in self.side_bar_sprites:\n obj.reset()", "title": "" }, { "docid": "8477273c298fdb612fa0edd6eaced5f1", "score": "0.66990304", "text": "def reset(self):\n\n self.state = copy.copy(self.mu)", "title": "" }, { "docid": "3dc4288ef4ab303e059954ed7545d537", "score": "0.6694735", "text": "def _reset( self ):\n vs = self._variable_names\n ic = self.initial_conditions\n # delete what's there\n for v in vs:\n if hasattr( self, 'posterior_' + v ):\n delattr( self, 'posterior_' + v )\n # reset hyperparameters\n for v in vs:\n getattr( self, '_reset_theta_' + v )()\n # reset latent variables\n for v in vs:\n getattr( self, '_reset_%s_vec' % v )()\n # set up posteriors\n for v in vs:\n kws = {}\n kws[ 'theta_' + v ] = getattr( self, 'theta_' + v )\n kws[ v + '_vec' ] = getattr( self, v + '_vec' )\n p = self._create_posterior( **kws )\n p.is_point_estimate = True\n setattr( self, 'posterior_' + v, p )\n for vi in vs:\n pi = getattr( self, 'posterior_' + vi )\n for vj in vs:\n pj = getattr( self, 'posterior_' + vj )\n if not (vi == vj):\n setattr( pi, 'posterior_'+vj, pj )", "title": "" }, { "docid": "24673326a2a8eb554fc2ee9d8e5683ca", "score": "0.6692918", "text": "def reset(self):\n self._samples = None", "title": "" }, { "docid": "6bb703673eeee7ed39141013a8f8ccaf", "score": "0.66881394", "text": "def _reset(self):\n\n if self.test_only == False:\n tmp = np.random.randint(self.data_index_range)\n tmp2 = self.data_index_range / self.batch_size\n self.start_line = np.arange(tmp, tmp+tmp2*self.batch_size, tmp2)\n else:\n tmp = 0\n self.start_line = np.arange(self.batch_size)\n self.start_line = np.mod(self.start_line,self.data_index_range)\n \n data_all = self.a[creating_index_matrix_without_ind(self.ind_hash,self.memory_frame)]\n data_all = np.reshape(data_all,[-1,self.Da * self.memory_frame])\n \n self.kde_generator = KernelDensity(kernel='tophat', bandwidth=0.002).fit(data_all)\n self.kde_estimator = KernelDensity(kernel='tophat', bandwidth=0.05).fit(data_all)", "title": "" }, { "docid": "6bb703673eeee7ed39141013a8f8ccaf", "score": "0.66881394", "text": "def _reset(self):\n\n if self.test_only == False:\n tmp = np.random.randint(self.data_index_range)\n tmp2 = self.data_index_range / self.batch_size\n self.start_line = np.arange(tmp, tmp+tmp2*self.batch_size, tmp2)\n else:\n tmp = 0\n self.start_line = np.arange(self.batch_size)\n self.start_line = np.mod(self.start_line,self.data_index_range)\n \n data_all = self.a[creating_index_matrix_without_ind(self.ind_hash,self.memory_frame)]\n data_all = np.reshape(data_all,[-1,self.Da * self.memory_frame])\n \n self.kde_generator = KernelDensity(kernel='tophat', bandwidth=0.002).fit(data_all)\n self.kde_estimator = KernelDensity(kernel='tophat', bandwidth=0.05).fit(data_all)", "title": "" }, { "docid": "6bb703673eeee7ed39141013a8f8ccaf", "score": "0.66881394", "text": "def _reset(self):\n\n if self.test_only == False:\n tmp = np.random.randint(self.data_index_range)\n tmp2 = self.data_index_range / self.batch_size\n self.start_line = np.arange(tmp, tmp+tmp2*self.batch_size, tmp2)\n else:\n tmp = 0\n self.start_line = np.arange(self.batch_size)\n self.start_line = np.mod(self.start_line,self.data_index_range)\n \n data_all = self.a[creating_index_matrix_without_ind(self.ind_hash,self.memory_frame)]\n data_all = np.reshape(data_all,[-1,self.Da * self.memory_frame])\n \n self.kde_generator = KernelDensity(kernel='tophat', bandwidth=0.002).fit(data_all)\n self.kde_estimator = KernelDensity(kernel='tophat', bandwidth=0.05).fit(data_all)", "title": "" }, { "docid": "6bb703673eeee7ed39141013a8f8ccaf", "score": "0.66881394", "text": "def _reset(self):\n\n if self.test_only == False:\n tmp = np.random.randint(self.data_index_range)\n tmp2 = self.data_index_range / self.batch_size\n self.start_line = np.arange(tmp, tmp+tmp2*self.batch_size, tmp2)\n else:\n tmp = 0\n self.start_line = np.arange(self.batch_size)\n self.start_line = np.mod(self.start_line,self.data_index_range)\n \n data_all = self.a[creating_index_matrix_without_ind(self.ind_hash,self.memory_frame)]\n data_all = np.reshape(data_all,[-1,self.Da * self.memory_frame])\n \n self.kde_generator = KernelDensity(kernel='tophat', bandwidth=0.002).fit(data_all)\n self.kde_estimator = KernelDensity(kernel='tophat', bandwidth=0.05).fit(data_all)", "title": "" }, { "docid": "6bb703673eeee7ed39141013a8f8ccaf", "score": "0.66881394", "text": "def _reset(self):\n\n if self.test_only == False:\n tmp = np.random.randint(self.data_index_range)\n tmp2 = self.data_index_range / self.batch_size\n self.start_line = np.arange(tmp, tmp+tmp2*self.batch_size, tmp2)\n else:\n tmp = 0\n self.start_line = np.arange(self.batch_size)\n self.start_line = np.mod(self.start_line,self.data_index_range)\n \n data_all = self.a[creating_index_matrix_without_ind(self.ind_hash,self.memory_frame)]\n data_all = np.reshape(data_all,[-1,self.Da * self.memory_frame])\n \n self.kde_generator = KernelDensity(kernel='tophat', bandwidth=0.002).fit(data_all)\n self.kde_estimator = KernelDensity(kernel='tophat', bandwidth=0.05).fit(data_all)", "title": "" }, { "docid": "6bb703673eeee7ed39141013a8f8ccaf", "score": "0.66881394", "text": "def _reset(self):\n\n if self.test_only == False:\n tmp = np.random.randint(self.data_index_range)\n tmp2 = self.data_index_range / self.batch_size\n self.start_line = np.arange(tmp, tmp+tmp2*self.batch_size, tmp2)\n else:\n tmp = 0\n self.start_line = np.arange(self.batch_size)\n self.start_line = np.mod(self.start_line,self.data_index_range)\n \n data_all = self.a[creating_index_matrix_without_ind(self.ind_hash,self.memory_frame)]\n data_all = np.reshape(data_all,[-1,self.Da * self.memory_frame])\n \n self.kde_generator = KernelDensity(kernel='tophat', bandwidth=0.002).fit(data_all)\n self.kde_estimator = KernelDensity(kernel='tophat', bandwidth=0.05).fit(data_all)", "title": "" }, { "docid": "f4d6493ed17b150ceb614bf8177185d3", "score": "0.6681414", "text": "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, \"scale_\"):\n del self.scale_\n del self.min_\n # del self.n_samples_seen_\n del self.data_min_\n del self.data_max_\n del self.data_range_", "title": "" }, { "docid": "9e735333bd2c7b34bd2c579ae637a615", "score": "0.6672569", "text": "def reset(self):\n if self.macro_average:\n self.num_inst = 0\n self.sum_metric = 0.0\n else:\n class_count = self.num_classes - 1 if self.ignore_bg else self.num_classes\n self.area_inter = np.zeros((class_count,), np.uint64)\n self.area_union = np.zeros((class_count,), np.uint64)", "title": "" }, { "docid": "cf3c5905b9e6cb8ffc9fa62ac8b08218", "score": "0.667141", "text": "def reset(self):\n self.p_value = 0\n self.window = np.array([])\n self.change_detected = False", "title": "" }, { "docid": "82bced7df2bf4221c67c3823f202afca", "score": "0.66674346", "text": "def _reset(self):\n # reset theta-specific weights\n self.alignment.weights = None\n\n # also reset frequencies since these\n # were based on the weights (and the\n # given pseudo-count)\n self.alignment._frequencies = None\n self.alignment._pair_frequencies = None\n self.regularized_frequencies = None\n self.regularized_pair_frequencies = None\n\n # reset covariance matrix and its inverse\n self.covariance_matrix = None\n self.covariance_matrix_inv = None", "title": "" }, { "docid": "987829ca61b700b0937d098793ceb3a3", "score": "0.66646814", "text": "def _reset(self):\n\n if self.test_only == False:\n tmp = np.random.randint(self.data_index_range)\n tmp2 = self.data_index_range / self.batch_size\n self.start_line = np.arange(tmp, tmp+tmp2*self.batch_size, tmp2)\n else:\n tmp = 0\n self.start_line = np.arange(self.batch_size)\n self.start_line = np.mod(self.start_line,self.data_index_range)\n \n data_all = self.a[creating_index_matrix_without_ind(self.a_hash,self.memory_frame)]\n data_all = np.reshape(data_all,[-1,self.Da * self.memory_frame])\n \n self.kde_generator = KernelDensity(kernel='tophat', bandwidth=0.002).fit(data_all)\n self.kde_estimator = KernelDensity(kernel='tophat', bandwidth=0.05).fit(data_all)", "title": "" }, { "docid": "987829ca61b700b0937d098793ceb3a3", "score": "0.66646814", "text": "def _reset(self):\n\n if self.test_only == False:\n tmp = np.random.randint(self.data_index_range)\n tmp2 = self.data_index_range / self.batch_size\n self.start_line = np.arange(tmp, tmp+tmp2*self.batch_size, tmp2)\n else:\n tmp = 0\n self.start_line = np.arange(self.batch_size)\n self.start_line = np.mod(self.start_line,self.data_index_range)\n \n data_all = self.a[creating_index_matrix_without_ind(self.a_hash,self.memory_frame)]\n data_all = np.reshape(data_all,[-1,self.Da * self.memory_frame])\n \n self.kde_generator = KernelDensity(kernel='tophat', bandwidth=0.002).fit(data_all)\n self.kde_estimator = KernelDensity(kernel='tophat', bandwidth=0.05).fit(data_all)", "title": "" }, { "docid": "1ca3ccdf89b486c10cac4a84e11067bd", "score": "0.6661412", "text": "def reset_vars(self):\n self.rgb_img = None\n self.gray_img = None\n self.segments = []", "title": "" }, { "docid": "00ef631e9cfa3de8fe2d6e1777e62ab3", "score": "0.6659344", "text": "def _reset(self):\n # Grid with two initial values\n self._state = np.zeros(shape=(4,4), dtype=np.int64)\n a, b = random.sample([(x,y) for x in range(4) for y in range(4)], 2)\n self._state[a[0]][a[1]] = 2\n self._state[b[0]][b[1]] = 2\n\n self._episode_ended = False\n\n # Returns \"restart\" TimeStep with the state of the game\n return ts.restart(self._state)", "title": "" }, { "docid": "e4d69397c03a9d3974ce5d8da65ccae0", "score": "0.665904", "text": "def reset(self):\n self._index = self._size = 0\n self.indice = []", "title": "" }, { "docid": "77e6cd25628a4591ffc5f6f6c801df92", "score": "0.6654214", "text": "def reset(self):\n self.sim.reset()\n state = np.concatenate([self.sim.pose] * self.action_repeat) \n return state", "title": "" }, { "docid": "77e6cd25628a4591ffc5f6f6c801df92", "score": "0.6654214", "text": "def reset(self):\n self.sim.reset()\n state = np.concatenate([self.sim.pose] * self.action_repeat) \n return state", "title": "" }, { "docid": "f2b5e77b4a1d449b40f700b574e93c89", "score": "0.6652277", "text": "def reinit(self):\n \n self.source = set()\n self.target = set()\n \n # Reset all the cells in the map\n for p in self.map.values():\n p.reset()", "title": "" }, { "docid": "991e188beb35efb0ebf7ba304d2a6683", "score": "0.665136", "text": "def reset(self, **kwargs):\n reset_ctrlpts = kwargs.get('ctrlpts', False)\n reset_evalpts = kwargs.get('evalpts', False)\n\n if reset_ctrlpts:\n self._control_points = self._init_var(self._array_type)\n self._bounding_box = self._init_var(self._array_type)\n\n if reset_evalpts:\n self._curve_points = self._init_var(self._array_type)", "title": "" }, { "docid": "f42101fa21baa1003d329720d1946066", "score": "0.6648822", "text": "def _reset_all(self):\n for i in range(self.len):\n self._reset_i(i)", "title": "" }, { "docid": "2db3f5f77bdb5699be84f8acba9feb3f", "score": "0.6646749", "text": "def reset(self):\n self.u = np.zeros(self.d, dtype=np.int32)\n self.q = np.zeros(self.d, dtype=np.double) + self.fudge", "title": "" }, { "docid": "42053d3396d80c2d0ca8b766f21e1d2b", "score": "0.6639694", "text": "def reset(self) -> None:\n self._data = {}", "title": "" }, { "docid": "647cf6a2e46cc4c1be99994d846b2de2", "score": "0.6637828", "text": "def reset(self):\n self.V = np.zeros((self.n * self.n,))\n self.Vc = np.zeros((self.n * self.n, ))", "title": "" }, { "docid": "f772218ae3bbbc81910876027bc39872", "score": "0.6633487", "text": "def reset(self):\n # Reset the history of tracked variables\n self.history = {var_name: [] for var_name in self.track_vars}\n\n # Set the sow variables to their initial levels\n for var_name in self.sow_state:\n self.sow_state[var_name] = self.sow_init[var_name]\n\n # Reset each AgentType in the market\n for this_type in self.agents:\n this_type.reset()", "title": "" }, { "docid": "0f88b4892434e91728506def15362e06", "score": "0.6632283", "text": "def reset(self):\n self.inSettings = None\n self.outSettings = None", "title": "" }, { "docid": "3d87eef017e49d3310e28545d1134673", "score": "0.6630578", "text": "def reset(self):\n self.framecoord = []", "title": "" }, { "docid": "8739b336d0fd8f95b8d86dcd4860f5a9", "score": "0.662839", "text": "def reset(self):\n self.state = copy.copy(self.mu)", "title": "" }, { "docid": "8739b336d0fd8f95b8d86dcd4860f5a9", "score": "0.662839", "text": "def reset(self):\n self.state = copy.copy(self.mu)", "title": "" }, { "docid": "8739b336d0fd8f95b8d86dcd4860f5a9", "score": "0.662839", "text": "def reset(self):\n self.state = copy.copy(self.mu)", "title": "" }, { "docid": "8739b336d0fd8f95b8d86dcd4860f5a9", "score": "0.662839", "text": "def reset(self):\n self.state = copy.copy(self.mu)", "title": "" }, { "docid": "8739b336d0fd8f95b8d86dcd4860f5a9", "score": "0.662839", "text": "def reset(self):\n self.state = copy.copy(self.mu)", "title": "" }, { "docid": "8739b336d0fd8f95b8d86dcd4860f5a9", "score": "0.662839", "text": "def reset(self):\n self.state = copy.copy(self.mu)", "title": "" }, { "docid": "8739b336d0fd8f95b8d86dcd4860f5a9", "score": "0.662839", "text": "def reset(self):\n self.state = copy.copy(self.mu)", "title": "" }, { "docid": "8739b336d0fd8f95b8d86dcd4860f5a9", "score": "0.662839", "text": "def reset(self):\n self.state = copy.copy(self.mu)", "title": "" }, { "docid": "8739b336d0fd8f95b8d86dcd4860f5a9", "score": "0.662839", "text": "def reset(self):\n self.state = copy.copy(self.mu)", "title": "" }, { "docid": "8739b336d0fd8f95b8d86dcd4860f5a9", "score": "0.662839", "text": "def reset(self):\n self.state = copy.copy(self.mu)", "title": "" }, { "docid": "8739b336d0fd8f95b8d86dcd4860f5a9", "score": "0.662839", "text": "def reset(self):\n self.state = copy.copy(self.mu)", "title": "" } ]
1f541f95fbd8bd940dea6d66b7fb9ccc
Retrieve a list of all categories.
[ { "docid": "e3e9bd171ad02482f1c9fb8c3af4bed6", "score": "0.0", "text": "def get_category() -> jsonify:\n\tcategories = []\n\tcategory_results = db.session.query(IncidentCategory).all()\n\tfor category in category_results:\n\t\tnew_category = {}\n\t\tnew_category['id'] = category.id\n\t\tnew_category['name'] = category.name\n\t\tcategories.append(new_category)\n\treturn jsonify({\"Data\":categories})", "title": "" } ]
[ { "docid": "07b4db07f5ded899901b6122913b1154", "score": "0.8321011", "text": "def get_all_categories():\n categories_dict = get_categories_dict_db()\n return jsonify(categories_dict)", "title": "" }, { "docid": "d1e978a0dd31186c3637d6d570a03874", "score": "0.82590973", "text": "def list_categories(\n db: Session, \n ):\n return db.query(models.Category).all()", "title": "" }, { "docid": "27b25b2717d9a4b2861042a9c7a89cfb", "score": "0.81627494", "text": "def get(self):\n return CategoryRepository.get_all()", "title": "" }, { "docid": "85bc65db7843ff5ccfbf44053a31ce62", "score": "0.80402356", "text": "def get_categories():\n categories = session.query(Category).all()\n return categories", "title": "" }, { "docid": "21799d85a19cce03bb5aa8f886a0d50b", "score": "0.7926472", "text": "def getCategories(self): \n self.send(\"getCategories\")\n return self.readResponseAsArrayConvertSpecial()", "title": "" }, { "docid": "a0dfd177d0f27da369f0fdc0a0134d7d", "score": "0.78496766", "text": "def api_all_categories():\n\n categories = session.query(Categories).all()\n\n return jsonify(categories=[i.serialize for i in categories])", "title": "" }, { "docid": "c0d87e13f30ff33712accc9ee28dce9b", "score": "0.77827084", "text": "def get_categories():\n return Category.objects.all().order_by('name')", "title": "" }, { "docid": "8306171d3b09fabf19cc97e087b77221", "score": "0.77070165", "text": "def get_categories():\n url = EVENTBRITE_URL + 'categories/?token=' + os.environ['EVENTBRITE_TOKEN']\n r = requests.get(url)\n categories = r.json()\n categories_list = {'categories':categories['categories']}\n return categories_list", "title": "" }, { "docid": "d95a54975968e4528ea72ce71ad337d1", "score": "0.77058005", "text": "def get_categories():\n categories = get_all_categories()\n status_code = HTTPStatus.OK\n if categories:\n RESPONSE_BODY[\"message\"] = \"OK. Categories List\"\n RESPONSE_BODY[\"data\"] = categories\n else:\n RESPONSE_BODY[\"message\"] = \"OK. No categories found\"\n RESPONSE_BODY[\"data\"] = categories\n status_code = HTTPStatus.NOT_FOUND\n\n my_info = {\"categories\": categories, \"status_code\": status_code}\n\n return render_template('categories.html', my_info=my_info)", "title": "" }, { "docid": "ca459d567dd0670f5dd16046c23e4d86", "score": "0.7702868", "text": "def get_categories(self):\n\t\tt = self.table_category\n\t\tresults = select([t.c.category], t.c.property==self.id).execute().fetchall()\n\t\treturn [x['category'] for x in results]", "title": "" }, { "docid": "9cdae219e05ffe8f6ae730e0d3c2590a", "score": "0.7685155", "text": "def all_categories():\n return reuters.categories()", "title": "" }, { "docid": "138d2b588d7187739ee4e8e504a8053d", "score": "0.7598455", "text": "def retrieve_categories():\n try:\n categories = Category.query.all()\n if not categories:\n abort(404)\n return jsonify({\n 'success': True,\n 'categories': [category.type for category in categories]\n }), 200\n except Exception as error:\n raise error\n finally:\n db.session.close()", "title": "" }, { "docid": "ef7a6c8551094a7c8e503bab3eae0fd1", "score": "0.753383", "text": "def categories(self):\n return self._categories.values()", "title": "" }, { "docid": "1c591c831c0ad7f42ce31654a59072b6", "score": "0.7520114", "text": "def getCategories(self):\n return self.categories", "title": "" }, { "docid": "c1e404b89e8cd4e4cfd687cb65311491", "score": "0.7496491", "text": "def get_all_categroies(self):\n return Izettle.product_url.format('categories')", "title": "" }, { "docid": "7552b8c4a3d51c935f89ed7ac6994a6e", "score": "0.74691856", "text": "def categories(self):\n return self._data[\"categories\"]", "title": "" }, { "docid": "5d8e23049e4f7db313764fd57c60c392", "score": "0.7398001", "text": "def GetCategories(self):\n return self.get_iface().GetCategories()", "title": "" }, { "docid": "f0de6094227a0865ce5da26e9935caa0", "score": "0.7378209", "text": "def categories(self, page=None, query=None):\n if page is None:\n page = self.current_page\n return self._get(self.CATEGORIES, query, page)", "title": "" }, { "docid": "ecc0b599122c6b02a4b119fee1e4bbe7", "score": "0.7354935", "text": "def categories(self, limit='max', hidden=0, **evil):\n #typecheck\n titles = '|'.join(p.title for p in self._things)\n params = {\n 'action': 'query',\n 'titles': titles,\n 'prop': 'categories',\n 'clprop': 'sortkey|timestamp|hidden',\n 'clshow': ('hidden'\n if hidden == 1\n else ('!hidden'\n if hidden == -1\n else None)),\n 'cllimit': int(limit) if limit != 'max' else limit\n }\n params.update(evil)\n return self._mklist(params, 'categories', Page, Page)", "title": "" }, { "docid": "01606e3ee65c8a203496ddf6b22b3d95", "score": "0.7327609", "text": "def categories(self):\n return self._categories", "title": "" }, { "docid": "01606e3ee65c8a203496ddf6b22b3d95", "score": "0.7327609", "text": "def categories(self):\n return self._categories", "title": "" }, { "docid": "eaf861f25ef19ba93bf41d7e23847295", "score": "0.7325791", "text": "def get_shop_category_list(self, **kwargs):\n return self.client.execute(\"shop_categorys/get_shop_category_list\", \"GET\", kwargs)", "title": "" }, { "docid": "f61bc81cd1d834cf2e1d895d62408fe7", "score": "0.72881585", "text": "def get_categories(self) -> CategoriesResponse:\n resp = self.__client.request(\n self.__api.op['get_universe_categories']())\n return CategoriesResponse(resp)", "title": "" }, { "docid": "a80b3783605c08954c6d0e4e17af80ef", "score": "0.72818637", "text": "def buildAllCategories(cls):\r\n\r\n # Don't build if there are any categories in the datastore already\r\n if cls.query().get():\r\n return\r\n root_category = categories.ctree\r\n cls.buildCategory(root_category, None)", "title": "" }, { "docid": "7f7b6f4781f0ae753dd97c1b9de78f94", "score": "0.7271131", "text": "def categories(self):\n return list(self.category_tables.keys())", "title": "" }, { "docid": "c63b756dc5b2998944c8822a465b8b2b", "score": "0.72474355", "text": "def getCategories(self):\n return self.server.metaWeblog.getCategories(1, self.username, self.password)", "title": "" }, { "docid": "68dfa13465511139b8624dad91107a7d", "score": "0.7198814", "text": "def Categories(self):\n return self._categories", "title": "" }, { "docid": "253e688810d921ce8113280f9d269fce", "score": "0.7188421", "text": "def getCatalog():\n categories = session.query(Category).options(\n joinedload(Category.items)).all()\n return categories", "title": "" }, { "docid": "8b51486c2e6bd89e2b545310e31ef637", "score": "0.7176542", "text": "def _get_all_categories(self):\n test_path = os.path.join(os.getcwd(), self._error_file)\n win_path = Path(test_path)\n ns = self._sh.NameSpace(str(win_path.parent))\n item = ns.ParseName(str(win_path.name))\n\n for category_num in range(350):\n category_name = str(ns.GetDetailsOf(None, category_num))\n category_value = str(ns.GetDetailsOf(item, category_num))\n\n if category_name != '' or category_value != '':\n self._categories.append(category_name)", "title": "" }, { "docid": "620d83d547fa9de71c4c82a07e98926d", "score": "0.71573055", "text": "def List_Categories(self):\n xml_code = self.Get_Categories()\n tree = xml.etree.ElementTree.fromstring(xml_code)\n for node in tree.findall('payload/category'):\n print(node.find('id').text, node.find('name').text)\n return", "title": "" }, { "docid": "dcceaf1f74bc8c595424619ed0ca793e", "score": "0.714214", "text": "def categories(self):\n return self.__categories", "title": "" }, { "docid": "2bb5d0532b462985a18abca1c7f00020", "score": "0.71405625", "text": "def get_categories():\n categories = []\n\n all = CatalogCategoryModel.objects.select_related(\"description_page\").all()\n for category in all:\n if not category.parent:\n categories.append(category)\n return categories, all", "title": "" }, { "docid": "aebeabb0f5a652d2a99e268127968137", "score": "0.7114384", "text": "def get_account_categories(self):\n return self.get(\"/account-categories\")", "title": "" }, { "docid": "b6948f416fc0803842947ba19fd91137", "score": "0.70829785", "text": "def get_cats(self):\n\t\treturn tuple(self.store.find(categories.Category))", "title": "" }, { "docid": "d0a87a9506765a34e223b48ed620a42a", "score": "0.7071841", "text": "def list(self, request):\n \n categories = Category.objects.all()\n\n serializer = CategorySerializer(\n categories, many=True, context={'request': request})\n return Response(serializer.data)", "title": "" }, { "docid": "5243b8edee4980e486d2b6cadbe477fd", "score": "0.7064833", "text": "def get_categories():\n all_categories = mongo.db.categories.find()\n all_plant_types = mongo.db.plant_types.find()\n all_shade_tolerance = mongo.db.shade_tolerance.find()\n return render_template(\"categories.html\",\n categories=all_categories,\n plant_types=all_plant_types,\n shade_tolerance=all_shade_tolerance)", "title": "" }, { "docid": "ea019b0871941b58810ecf773179d3fd", "score": "0.7037482", "text": "def categories(self, count: int = 1, offset: int = 0) -> list:\n\n @dataclass\n class Category:\n id: int\n title: str\n clues_count: int\n\n response = self.session.get(\n f\"{self.baseURL}/api/categories\", params={\"count\": count, \"offset\": offset}\n )\n response.raise_for_status()\n array = response.json()\n cats = []\n for category in array:\n cats.append(Category(**category))\n return cats", "title": "" }, { "docid": "5d694fbb9c2ff0f5ccc8650088f9b958", "score": "0.7036701", "text": "def context_get_categories():\n categories = Category.query.all()\n return dict(categories=categories)", "title": "" }, { "docid": "086eb0ed695bb52d26f2c1c6816dc7c2", "score": "0.7035111", "text": "def fetch_categories(self):\n response = (\n self.youtube.videoCategories()\n .list(part=\"snippet\", regionCode=\"US\")\n .execute()\n )\n return youtube_api.parse_categories(response)", "title": "" }, { "docid": "865aab8e2f645abf1103fe03d7e1109f", "score": "0.7034185", "text": "def get_queryset(self):\n return Category.objects.all()", "title": "" }, { "docid": "1c76b87e7eb83951cc1c13b0647be2d5", "score": "0.7023658", "text": "def categories(self) -> Sequence[str]:\n return pulumi.get(self, \"categories\")", "title": "" }, { "docid": "1c76b87e7eb83951cc1c13b0647be2d5", "score": "0.7023658", "text": "def categories(self) -> Sequence[str]:\n return pulumi.get(self, \"categories\")", "title": "" }, { "docid": "259637ed7522e827d36013f7b8e319a0", "score": "0.6983266", "text": "def category_list(data):\n categories = []\n for category in data.payload.browseResponse.category:\n categories.append(Category(category))\n return categories", "title": "" }, { "docid": "73f967690b333364a1e22532ebd3c1ca", "score": "0.69661725", "text": "def _get_categories(distillery):\n if distillery:\n return distillery.categories.all()", "title": "" }, { "docid": "48ddf874d17523d57f77f2e31f35d29d", "score": "0.6952562", "text": "def get_category_list(self, limit=100):\n\n # list 2 return\n categories_list = list()\n\n cursor = self.cnx.cursor()\n\n # 1rst call we must determine string comparison\n comp_req = \"SELECT * from category LIMIT \" + str(limit)\n\n cursor.execute(comp_req)\n\n for a_row in cursor:\n map_row = dict(zip(cursor.column_names, a_row))\n categories_list.append(Category.buildfrommysql(**map_row))\n\n cursor.close()\n return categories_list", "title": "" }, { "docid": "cf443a0d1db43817a3233c2e280e5efb", "score": "0.6944055", "text": "def test_all_categories(self):\n mysite = self.get_site()\n ac = list(mysite.allcategories(total=10))\n self.assertLessEqual(len(ac), 10)\n for cat in ac:\n self.assertIsInstance(cat, pywikibot.Category)\n\n for cat in mysite.allcategories(total=5, start='Abc'):\n self.assertIsInstance(cat, pywikibot.Category)\n self.assertGreaterEqual(cat.title(with_ns=False), 'Abc')\n for cat in mysite.allcategories(total=5, prefix='Def'):\n self.assertIsInstance(cat, pywikibot.Category)\n self.assertTrue(cat.title(with_ns=False).startswith('Def'))\n # Bug T17985 - reverse and start combined; fixed in v 1.14\n for cat in mysite.allcategories(total=5, start='Hij', reverse=True):\n self.assertIsInstance(cat, pywikibot.Category)\n self.assertLessEqual(cat.title(with_ns=False), 'Hij')", "title": "" }, { "docid": "a02975f6a344d40ac986fe0f7b9b7ef8", "score": "0.6934553", "text": "def listStoreCategories(self):\n\n\t\tprint \"listStoreCategoriesk() <>\"\n\t\tresult=['Fermentables','Hops','Yeast','Consumables','Other']\n\t\treturn {'operation':'listStoreCategories','status' :1,'json' : json.dumps( {\"result\": result})}", "title": "" }, { "docid": "6f1ad997288611383a14550cd58ee7b6", "score": "0.6934504", "text": "def get_categories(self, active_only: bool):\n return self.connection.Categories.get(active_only=active_only)['data']", "title": "" }, { "docid": "fda6b3758e20318b0ff9b4bcfc1f6afd", "score": "0.69155836", "text": "def api_categories():\n categories = session.query(Category).all()\n return jsonify(categories=[category.serialize for category in categories])", "title": "" }, { "docid": "5273a5674aec5181c71542555c8e1e46", "score": "0.68959963", "text": "def request_category_list(self):\n try:\n c_response = requests.get(\"https://opentdb.com/api_category.php\")\n categories = c_response.json()\n category_list = {}\n for item in categories[\"trivia_categories\"]:\n category_list[item[\"name\"]] = str(item[\"id\"])\n return category_list\n except requests.exceptions.ConnectionError:\n self.online = False\n return None", "title": "" }, { "docid": "08862cb0a423ce001dcf97c98e0bcf89", "score": "0.688859", "text": "async def categories_list(message: types.Message):\n categories = C.get_all_categories()\n answer_message = \"Categories:\\n\\n\" +\\\n ('\\n'.join([c.name+\"(\"+\", \".join(c.aliases)+\")\" for c in categories]))\n await message.answer(answer_message)", "title": "" }, { "docid": "0551e8a8f7a1111721e161656468ced0", "score": "0.68885297", "text": "def all_categoriesJSON():\n categories = db_session.query(Category).all()\n return jsonify(Categories=[c.serialize for c in categories])", "title": "" }, { "docid": "edf2869965720cd7f366ae58fcef0b1f", "score": "0.6868465", "text": "def list(self, request):\n categories = Category.objects.all().order_by('label')\n\n category = self.request.query_params.get('categoryId', None)\n if category is not None:\n categories = categories.filter(category__id=category)\n\n serializer = CategorySerializer(\n categories, many=True, context={'request': request})\n return Response(serializer.data)", "title": "" }, { "docid": "7f0a2c790b09c956361ac07204aeac5a", "score": "0.6862198", "text": "def get_categories(user, query=None):\n if query is None:\n # Get all\n trans=get_transactions(user)\n else:\n trans=query\n return [i['category'] for i in trans]", "title": "" }, { "docid": "f8e0f74870532a586ea0244dba266dbb", "score": "0.68494135", "text": "def get_categories(self):\n\n if self._categories is None:\n filename = os.path.join(self._cache_dir, \"categories.csv\")\n\n with lockfile.FileLock(self._download_lock_filename):\n # Download categories file if it is out of date.\n if self._is_file_expired(filename):\n self._download_categories_csv(filename)\n\n # Read categories file.\n self._categories = list(csv.DictReader(open(filename)))\n\n # Cleanup suspect data.\n if self._clean:\n self._clean_categories()\n\n return self._categories", "title": "" }, { "docid": "804f9f86341e2baa18a226c4575653d7", "score": "0.68328756", "text": "def get_categories(self, obj):\n return [self.create_category(*cat)\n for cat in getattr(obj, \"categories\", [])]", "title": "" }, { "docid": "fabb760a1476702e4cd6bd596aeb1711", "score": "0.68269396", "text": "def fetch_categories(self, title):\n try:\n categories = self.wiki_local.categories(title)\n return [category[0] for category in categories]\n except:\n print(\"Error while getting category members! Returning empty list.\")\n return []", "title": "" }, { "docid": "f3a16664daaa9949b22607887e3752e7", "score": "0.6788669", "text": "def get_categories(request):\n query_dict = get_query_from_json(request)\n categories = models.Category.objects.filter(type='cultural')\n source_categories = []\n if 'source' in query_dict:\n source = models.Source.objects.filter(id=query_dict['source'])\n variables = models.Variable.objects.filter(source=source)\n for c in categories:\n if variables.filter(index_categories=c.id):\n source_categories.append(c)\n return Response(\n serializers.CategorySerializer(source_categories, many=True).data)\n return Response(serializers.CategorySerializer(categories, many=True).data)", "title": "" }, { "docid": "df1efffd626064fb00aeb29d38a7b888", "score": "0.6769771", "text": "def test_get_all_categories(self):\n response = self.client.post(\n '/v2/categories',\n content_type=\"application/json\"\n )\n self.assertEqual(response.status_code, 201)", "title": "" }, { "docid": "ae0953351241c91c54bda36579d641c9", "score": "0.6755753", "text": "def categories(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"categories\")", "title": "" }, { "docid": "ae0953351241c91c54bda36579d641c9", "score": "0.6755753", "text": "def categories(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"categories\")", "title": "" }, { "docid": "566b7fde2522e8586a3971ed86c6b9d5", "score": "0.6747793", "text": "def getProjectCategories(self):\n text = self.generateRequest('/v2.1/ProjectCategories', 'GET','')\n return self.dictToList(json.loads(text))", "title": "" }, { "docid": "0be64dea6df01da9607ab39884a6c4f5", "score": "0.6743968", "text": "def categories_api():\n categories = Catalog.query.all()\n category_list = [c.serialize() for c in categories]\n return jsonify(category_list)", "title": "" }, { "docid": "2e521533a5ace661c59e9868c2024faa", "score": "0.67155653", "text": "def list_cats():\n if \"user\" in session:\n return render_template(\"all_cats.html\",\n cats_list=all_cats)\n else:\n flash(\"Please login first\")\n return redirect(\"/login\")", "title": "" }, { "docid": "18128ff119e0bc4412c746a5a3e23d8b", "score": "0.6707836", "text": "def list_opportunity_categories(self):\n\n top = Config[\"OpportunityCategories\"][\"Endpoints\"][\"GetAll\"][\"DefaultQueryParameters\"][\"Top\"]\n skip = 0\n\n page = self.get_json(Config[\"OpportunityCategories\"][\"Endpoints\"][\"GetAll\"][\"Url\"].format(skip=skip, top=top),\n http_method=Config[\"OpportunityCategories\"][\"Endpoints\"][\"GetAll\"][\"Method\"])\n json_obj = [] + page\n while len(page) > 0:\n skip += top\n page = self.get_json(Config[\"OpportunityCategories\"][\"Endpoints\"][\"GetAll\"][\"Url\"].format(skip=skip, top=top),\n http_method=Config[\"OpportunityCategories\"][\"Endpoints\"][\"GetAll\"][\"Method\"])\n json_obj += page\n\n return [OpportunityCategory.from_json(json_obj=obj) for obj in json_obj]", "title": "" }, { "docid": "2ea7b4ffafb938bb786443469c11fb6e", "score": "0.67040986", "text": "def categories(self):\r\n return Category.objects.filter(\r\n elements__environments__profile=self).distinct().order_by(\"name\")", "title": "" }, { "docid": "964ff580980df1704309eba71c4bfa32", "score": "0.66829693", "text": "def getCategories(self,filter = None, startIndex = None, pageSize = None, sortBy = None, responseFields = None):\r\n\r\n\t\turl = MozuUrl(\"/api/commerce/catalog/storefront/categories/?filter={filter}&startIndex={startIndex}&pageSize={pageSize}&sortBy={sortBy}&responseFields={responseFields}\", \"GET\", UrlLocation.TenantPod, False);\r\n\t\turl.formatUrl(\"filter\", filter);\r\n\t\turl.formatUrl(\"pageSize\", pageSize);\r\n\t\turl.formatUrl(\"responseFields\", responseFields);\r\n\t\turl.formatUrl(\"sortBy\", sortBy);\r\n\t\turl.formatUrl(\"startIndex\", startIndex);\r\n\t\tself.client.withResourceUrl(url).execute();\r\n\t\treturn self.client.result();", "title": "" }, { "docid": "f08c77d51ecdec55ff665ac9f7297be1", "score": "0.66598", "text": "def get_categories(self) -> list[str]:\n\n ul = self.driver.find_element_by_xpath(\n '//*[@id=\"react-root\"]/div[2]/div/div/ul[1]')\n ul_li = ul.find_elements_by_tag_name('li')\n ul_li.pop(len(ul_li)-1)\n categories = []\n\n for li in ul_li:\n category = li.find_elements_by_tag_name('a')[-1].text\n categories.append(self.normalize_string(category))\n\n return categories", "title": "" }, { "docid": "fed83a8c39d11238d8eb3649e4211763", "score": "0.66547024", "text": "def get(self, request, format=None):\n serializer = CategoriesSerializer(Category.objects.all(), many=True)\n return JsonResponse(serializer.data, status=200, safe=False)", "title": "" }, { "docid": "5d242fc3d7868793d2cbcd82e5a2456b", "score": "0.6619072", "text": "def list_categories(self) -> List[Tuple[str, str]]:\n category_list = [(name, path) for name, path in self.category_map.items()]\n # Fix the order of category list.\n category_list.sort(key=lambda category: category[0])\n return category_list", "title": "" }, { "docid": "3792a1c778eb95f3dd0b64740cf86645", "score": "0.6591694", "text": "def retrieve_category():\n\n selection = Category.query.order_by(Category.id).all()\n\n if len(selection) == 0:\n abort(404)\n\n return jsonify({\n 'success': True,\n 'categories': [cat.type for cat in selection],\n 'total_categories': len(selection)\n })", "title": "" }, { "docid": "79c50a7675117df6517e83bee990b5f7", "score": "0.653209", "text": "async def categories(self, ctx):\n\n await ctx.send(f\"Hangman categories include: `{', '.join(HangmanGame.get_categories())}`\")", "title": "" }, { "docid": "d2eecd13b573e02aaadc6d082399749b", "score": "0.6527657", "text": "def retrieve_cats(limit: int = 2, cursor: str = None):\n logger.info('Retrieving all cats')\n with client.context():\n query = models.CatNDB.query()\n res, cursor, _ = query.fetch_page(\n limit=limit, start_cursor=ndb.Cursor(urlsafe=cursor))\n\n return ([_ndb_to_cat(c) for c in res],\n cursor.urlsafe() if cursor is not None else '')", "title": "" }, { "docid": "feecd4322f3296360bf83cf922ad5a51", "score": "0.65242034", "text": "def getFileCategories(self):\n text = self.generateRequest('/v2.1/FileCategories', 'GET', '')\n return self.dictToList(json.loads(text))", "title": "" }, { "docid": "27843ceae4847f9f6f78de7e02a9d3bd", "score": "0.6493865", "text": "def get_categories():\n return load_config_file(\"categories\")", "title": "" }, { "docid": "393477e5552a3d2682154f324419a6cf", "score": "0.64914846", "text": "def get_paginated_categories():\n categories_limit = request.args.get('limit', QUESTIONS_PER_PAGE, type=int)\n selected_page = request.args.get('page', 1, type=int)\n start_index = (selected_page - 1) * categories_limit\n \n current_categories = \\\n Category.query.order_by(Category.id).limit(\n categories_limit\n ).offset(start_index).all()\n\n if len(current_categories) == 0:\n abort(404)\n\n formatted_categories = format_items(current_categories)\n categories_dict = {}\n for category in formatted_categories:\n key = category['id']\n value = category['type']\n categories_dict[key] = value\n\n return jsonify({\n 'success': True,\n 'categories': categories_dict\n })", "title": "" }, { "docid": "5b62926ebfd739a374910027089e9004", "score": "0.6476686", "text": "def display_categories(self):\n self.cursor.execute(\"SELECT * FROM Category ORDER BY id\")\n for i in self.cursor.fetchall():\n print(i[0], \"-\", i[1])", "title": "" }, { "docid": "c86172b9e90145657b07989b8039819f", "score": "0.64765674", "text": "def getCategoryList(self):\n\n return self._labels", "title": "" }, { "docid": "ba353d2fbcf5407c4f3b161abf6c36c6", "score": "0.6454787", "text": "def get_categories(self, file_name):\r\n json_file = self._read_json_file(file_name)\r\n category_list = []\r\n json_categories = json_file.get('category')\r\n for json_category in json_categories:\r\n name = json_category.get('name')\r\n category_list.append(Category(name))\r\n return category_list", "title": "" }, { "docid": "ed82127ebf479457a81f611a17ef344b", "score": "0.64487803", "text": "def categories_list(self):\n\n article_tags_ids = self._get_article_tags_ids(self.article_id)\n categories_rows = self._get_categories_rows()\n article_categories_rows = []\n for category_row in categories_rows:\n category_id = category_row[0]\n category_tags_ids = self._get_category_tags_ids(category_id)\n if set(category_tags_ids) <= set(article_tags_ids):\n article_categories_rows.append(category_row)\n return [dict(zip(CATEGORIES_KEYS, row)) for row in article_categories_rows]", "title": "" }, { "docid": "8e79f39732e056d12b674249b4782048", "score": "0.6435391", "text": "def table_categories(self):\n return self._table_categories", "title": "" }, { "docid": "a59075018450c48b79bafdbbab02e103", "score": "0.64095443", "text": "def showCategories():\n\n # Retrieve all the categories from the database\n categories = database_session.query(Category).order_by(asc(Category.name))\n\n # Render the homepage template containing all the categories\n return render_template('index.html',\n categories = categories,\n email = user_session.get('email')\n )", "title": "" }, { "docid": "e297e2cbf09eb4f3e03479de5ab383ea", "score": "0.6406981", "text": "def get_category_titles(self):\n\n return self.catbrowser.get_category_titles()", "title": "" }, { "docid": "da18732d3a6a769b457cfbc543147d22", "score": "0.640316", "text": "def getCategoriesVocab(self):\n return self.getAvailableCategoriesAsDisplayList()", "title": "" }, { "docid": "de68b892a5e9410ebeb38531ff943bd5", "score": "0.64016384", "text": "def getListOfCategories(catMap: Dict[str, str] = defineKeyToCategory()):\n return list(catMap.values())", "title": "" }, { "docid": "66dea977b65519d92658378a5e2c39df", "score": "0.64002883", "text": "def get_category(request):\n if request.method == 'GET':\n queryset = Category.objects.all()\n serializer = serializers.CategorySerializer(queryset, many=True)\n return Response({\"data\": serializer.data}, status=status.HTTP_200_OK)", "title": "" }, { "docid": "13376538d2bcf87e30480621d11641aa", "score": "0.6368498", "text": "def get_categories(self):\n self.categories = [None for e in range(MAX_CATEGORIES)]\n\n r = requester('https://fr.openfoodfacts.org/categories.json')\n\n for category in r['tags']:\n for i in range(len(self.categories)):\n if self.categories[i] is None or \\\n self.categories[i][\"products\"] < category[\"products\"]:\n\n self.categories.insert(i, category)\n self.categories.pop(len(self.categories) - 1)\n break", "title": "" }, { "docid": "f06e4e5912b13ef63c77451dafecdbaa", "score": "0.63654906", "text": "def all_categories(request):\n sub_cat_parent = SubCategoryParent.objects.all()\n sliders = AllCategorySlider.objects.active()\n variations = Variation.objects.all()\n context = {'products': variations, \"sub_cat_parent\": sub_cat_parent, \"sliders\": sliders}\n template = 'products/all_category.html'\n return render(request, template, context)", "title": "" }, { "docid": "80aa01e4ce8fb0a59b746d97cd973024", "score": "0.6348918", "text": "def read_categories(d: int = 0, f: int = 100, db: Session = Depends(get_db)):\n #get categories\n categories = crud.get_categories(db, skip=d, limit=f)\n return categories", "title": "" }, { "docid": "66d3f84bcf099378288cc11f126a28ec", "score": "0.63478035", "text": "def Get_Categories(self):\n url = 'http://www.blip.tv/?section=categories&cmd=view&skin=api'\n xml_code = urllib.request.urlopen(url).read()\n return xml_code", "title": "" }, { "docid": "92fe47d4c952c7bb612aed41520a3f3f", "score": "0.6333613", "text": "def showCategoriesJSON():\n categories = db.getAllCategories()\n return jsonify(Categories=[r.serialize for r in categories])", "title": "" }, { "docid": "05e2740e3268064c6a15ff9370432637", "score": "0.633259", "text": "def getCatalog():\n catalog = []\n categories = session.query(Category).order_by(Category.name).all()\n for c in categories:\n cat = c.serialize\n cat['items'] = []\n items = session.query(Item).filter_by(\n category_id=c.id).order_by(Item.name)\n for i in items:\n cat['items'].append(i.serialize)\n catalog.append(cat)\n return catalog", "title": "" }, { "docid": "b0203b39f1a188bfe7e24dafa5f22a55", "score": "0.63292956", "text": "def collect_all_categories():\n files = os.listdir()\n markdown_pattern = re.compile(r\".*\\.md\")\n cates = list(set([read_markdown_head(file) for file in filter(markdown_pattern.match, files)]))\n return [cat.strip() for cat in cates]", "title": "" }, { "docid": "847639e88ad4dc84074a238d858508df", "score": "0.632302", "text": "def categories_action(self, args):\n categories = self.get_categories()\n categories.sort(key=lambda c: c['label'].lower())\n for category in categories:\n print(\"Label:\", category['label'])\n print(\"Id:\", category['id'])\n print(\"Description:\", category.get('description'))\n print(\"\")\n return 0", "title": "" }, { "docid": "f052d0906b02b011a5687a393b3617dd", "score": "0.63184845", "text": "def get_categories(url: str) -> bs4.element.ResultSet:\n try:\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0\"}\n r = requests.get(url, headers=headers)\n except requests.exceptions.MissingSchema:\n print(\"The supplied URL is invalid. Please update and run again.\")\n raise Exception(\"InvalidURL\")\n soup = BeautifulSoup(r.text, 'html.parser')\n return soup.find_all(attrs={\"class\": \"category\"})", "title": "" }, { "docid": "017c5ad900ddd9b8c9889394814c2a2b", "score": "0.63020325", "text": "def get_categories():\n\n global conn\n\n cur = conn.cursor()\n query = \"select id, name from category order by 1\"\n try:\n cur.execute(query)\n data = cur.fetchall()\n categories = {k: v for k, v in data}\n return categories\n\n except Exception as e:\n print(\"couldn't fetch data from category: \", e)\n conn.close()\n print(\"database connection closed\")", "title": "" }, { "docid": "fe0cd20b77fd69e002b9bbbeea09095e", "score": "0.6292625", "text": "def test_get_all_categories(self):\n res = self.client().get('/api/v1/categories')\n data = json.loads(res.data)\n # status code should be 200\n self.assertEqual(res.status_code, 200)\n self.assertIsInstance(data['categories'], dict)", "title": "" }, { "docid": "cb695948ba65351fcd4482264317b269", "score": "0.6290295", "text": "def _get_ordered_categories():\n return list(\n Category.objects.all()\n .order_by('position')\n .values_list('name', flat=True)\n )", "title": "" }, { "docid": "1085b262c32eda981523eb857ff52c91", "score": "0.6272454", "text": "def showCategories():\n categories = session.query(Category).order_by(asc(Category.name))\n items = session.query(Item).order_by(desc(Item.id))\n if 'username' not in login_session:\n return render_template(\n 'public_categories.html', categories=categories,\n items=items, is_login=isLogin(), STATE=loginToken())\n else:\n return render_template(\n 'categories.html', categories=categories,\n items=items, is_login=isLogin(), STATE=loginToken())", "title": "" }, { "docid": "0a84b3bcbd4d81850f77730223e2822b", "score": "0.62649494", "text": "def getOpportunityCategories(self):\n text = self.generateRequest('/v2.1/OpportunityCategories', 'GET', '')\n return self.dictToList(json.loads(text))", "title": "" }, { "docid": "b435bf3c59eb012239bf98fb4ef8aa54", "score": "0.6244131", "text": "def get_category_usage(self):\n category_data = []\n for category_object in self.category_list:\n category_data.append(category_object.get_category_data())\n return category_data", "title": "" } ]
45ce4a07d98b6c6a05f38ced74b4eb0c
The number of successful requests the account has performed.
[ { "docid": "6b0c1d7f270bf6b56f0725f0ccc3c186", "score": "0.62084264", "text": "def successful_api_calls(self):\n return self._successful_api_calls", "title": "" } ]
[ { "docid": "496d049346c0727358a4441acc8cb687", "score": "0.7959406", "text": "def successfulCount(self) -> int:", "title": "" }, { "docid": "2521faaba0023070a9f908979978721e", "score": "0.69510853", "text": "def nreturned(self):\n if not self._counters_calculated:\n self._counters_calculated = True\n self._extract_counters()\n\n return self._nreturned", "title": "" }, { "docid": "9a6a875b26a83a40b841b849e2690cdd", "score": "0.6880506", "text": "def pendingCount(self) -> int:", "title": "" }, { "docid": "81e01583f739adcbedce110756eaa30d", "score": "0.68050814", "text": "def api_reqcount():\n uid = g.uid\n if (uid == 0):\n return '0'\n return str(getReqCount(uid))", "title": "" }, { "docid": "53a0da81baa59c6cca79f2f26c8e967e", "score": "0.67430335", "text": "def attempts(self):\n return len(self)", "title": "" }, { "docid": "ce80039ab7cb6456117427ca3394143b", "score": "0.66511095", "text": "def GetCount(self) -> int:\n ...", "title": "" }, { "docid": "ce80039ab7cb6456117427ca3394143b", "score": "0.66511095", "text": "def GetCount(self) -> int:\n ...", "title": "" }, { "docid": "ce80039ab7cb6456117427ca3394143b", "score": "0.66511095", "text": "def GetCount(self) -> int:\n ...", "title": "" }, { "docid": "6e32f140d5241e5e8007c6d00ee210fa", "score": "0.664684", "text": "def count(self) -> int:\n return pulumi.get(self, \"count\")", "title": "" }, { "docid": "6e32f140d5241e5e8007c6d00ee210fa", "score": "0.664684", "text": "def count(self) -> int:\n return pulumi.get(self, \"count\")", "title": "" }, { "docid": "6e32f140d5241e5e8007c6d00ee210fa", "score": "0.664684", "text": "def count(self) -> int:\n return pulumi.get(self, \"count\")", "title": "" }, { "docid": "7288fef832b167d079db8f3c7d4c6185", "score": "0.6633359", "text": "def count_results(self):\n return self.results.count()", "title": "" }, { "docid": "878401efb00ac31649008ebf8ad881f3", "score": "0.6624129", "text": "def get_num_failed(self):\n return self.num_failed", "title": "" }, { "docid": "5f764b108d1bce16ba096a08b2ee9f72", "score": "0.6598421", "text": "def success(self):\n return self.smsrecipient_set.filter(status=\"SENT\").count() + \\\n self.smsrecipient_set.filter(status=\"SUCCESS\").count()", "title": "" }, { "docid": "2bc98ba9497afed24c3314dc2a658a45", "score": "0.6571201", "text": "def state(self):\n total = 0\n for job_id, job_responders in self.ws.responders.items():\n if self.response in job_responders:\n total = total + len(job_responders[self.response])\n return total", "title": "" }, { "docid": "951c9de742e25fbb686d7f7a4c2209f8", "score": "0.6562037", "text": "def get_amount_of_results(self):\n return len(self.results)", "title": "" }, { "docid": "406ec142762a61896b599a0c5c2737d6", "score": "0.6547481", "text": "def count(self):\n return 0", "title": "" }, { "docid": "406ec142762a61896b599a0c5c2737d6", "score": "0.6547481", "text": "def count(self):\n return 0", "title": "" }, { "docid": "f995bf6f5e7ba485d4f7a8b03d7a7fa2", "score": "0.6540085", "text": "def retry_count(self):\n return self._retry_count", "title": "" }, { "docid": "7a84c942c873e187c2fb44b7085c0dcc", "score": "0.6518998", "text": "def results_count(self):\n return len(self.get_results)", "title": "" }, { "docid": "78a38cd7aa0cd2e9057f9c471afe48b8", "score": "0.6483766", "text": "def get_count(self):\r\n return self.count", "title": "" }, { "docid": "c8c1e4fde185e94f872d5dc73fb83c28", "score": "0.64813715", "text": "def count(self) -> int:\n\t\treturn self.count", "title": "" }, { "docid": "c8c1e4fde185e94f872d5dc73fb83c28", "score": "0.64813715", "text": "def count(self) -> int:\n\t\treturn self.count", "title": "" }, { "docid": "b4a1159b82d4cb9ee518a7e759d6cfbb", "score": "0.64808977", "text": "def get_nb_successor (self) :\n return len (self.successor)", "title": "" }, { "docid": "cadc59e9d5be6f8178e2ac482a3ef1de", "score": "0.6457461", "text": "def get_progress(self):\n return len(self._search_context.get_issued_queries()) / float(len(self._search_context.get_all_queries()))", "title": "" }, { "docid": "17529eb6f5c5d9dda9dbd60908c4e5c2", "score": "0.6440445", "text": "def requests_outstanding(self):\n return self._requests_outstanding", "title": "" }, { "docid": "d2290b252c811740c3ed8c644a8cedb6", "score": "0.6419765", "text": "def count_error(self) -> int:\n return self._count_error", "title": "" }, { "docid": "2f121e41e593260b43a86908dbb314be", "score": "0.64157856", "text": "def count(self):\n\n return self.total", "title": "" }, { "docid": "870a86fb2ab0393ba5820b029d8b2fd8", "score": "0.63862854", "text": "def Count():\n return CheckForError(lib.Relays_Get_Count())", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.6380628", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.6380628", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.6380628", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "a6789a03ab90a1823345bd38777929d2", "score": "0.63487566", "text": "def pending_imported_devices_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"pending_imported_devices_count\")", "title": "" }, { "docid": "d61fbcb10dbed475408920f30f4908de", "score": "0.63098496", "text": "def processedCount(self) -> int:", "title": "" }, { "docid": "12d05464e78a4c93cba7d3d04ac1efbe", "score": "0.63028216", "text": "def count(self):\n return len(self.__errors)", "title": "" }, { "docid": "dbaffb9ec18789ef16824409a0ad7dfe", "score": "0.6301764", "text": "def count(self) -> int:\n return self._count", "title": "" }, { "docid": "348348195927848874961bbc21824d6a", "score": "0.6296127", "text": "def total_results(self) -> int:\n return self.__total_results", "title": "" }, { "docid": "8f8ed6b52aa3c19eaabd43a2ce48fe52", "score": "0.62920064", "text": "def getTotalUserCount(self):\n result = self.maxUserCount + 1\n with self.usersLock:\n waitingUserCount = self.waitingUsersSortedSet.size() \n activeUserCount = self.activeUsersSortedSet.size()\n result = waitingUserCount + activeUserCount\n #\n return result", "title": "" }, { "docid": "5f395410c8be889e008f7db4034c269e", "score": "0.6287061", "text": "def get_total_successful_builds(self):\n query = \"\"\"\n SELECT\n COUNT(*)\n FROM\n builds\n WHERE\n status\n \"\"\"\n self.cursor.execute(query)\n result = self.cursor.fetchone()\n return result[0]", "title": "" }, { "docid": "f87910646d8d72354375a211bccc1c55", "score": "0.62794197", "text": "def rate_limit_count(self):\n return self._rate_limit_count", "title": "" }, { "docid": "6e5ceb5f72aebe3d4cf4e28ff1f89eda", "score": "0.6271769", "text": "def number_of_errors(self):\n return self._number_of_errors", "title": "" }, { "docid": "fb29076d39d482c9c3be027396d45816", "score": "0.6268665", "text": "def get_total_actions(self):\n return self.n_actions", "title": "" }, { "docid": "242c2a5d2a2c63e0b50a6797c37b459c", "score": "0.62657315", "text": "def getCount(self):\n return self.cnt", "title": "" }, { "docid": "b946ac0ec88e6f451f06eef360857c28", "score": "0.62578076", "text": "def no_of_recruitment(self):\n if not self.expected_employees:\n return 1\n \n return self.expected_employees - self.no_of_hired_employee", "title": "" }, { "docid": "53f2ff2fb4d5c3b4a26529b6408cf81f", "score": "0.62492853", "text": "def count(self):\n\n return self._count", "title": "" }, { "docid": "85805fac3efdf0759ebf369aec797b64", "score": "0.62426805", "text": "def count(self) -> int:\n return self.__count", "title": "" }, { "docid": "419cf521e096e8fb40e20964ff0d0e4f", "score": "0.6241535", "text": "def tries(self):\n return self._tries", "title": "" }, { "docid": "7702815a55b6bece4c2470115d73502d", "score": "0.6203003", "text": "def total_count(self) -> int:\n return self.__total_count", "title": "" }, { "docid": "fc909b72fd00138653921d70d0db7472", "score": "0.6188708", "text": "def _increment_counters(self):\n self.request_count += 1\n self.request_total += 1", "title": "" }, { "docid": "461894010b922fe5b421ab45f28135be", "score": "0.61860484", "text": "def get_attempts(self):\n return self.attempts", "title": "" }, { "docid": "1d3c63ad37cde55161daee6a7ca2f0c8", "score": "0.61844367", "text": "def num_required_trial(self):\n return self._num_required_trial", "title": "" }, { "docid": "57797c101b425b49253733a1c9176e7d", "score": "0.61586857", "text": "def get_n_credits(self):\n return self.count_credits", "title": "" }, { "docid": "117106bbfb349a002c1c152c2f509c7a", "score": "0.6151725", "text": "def complete(self) -> int:\n return self.response.get(b'complete', 0)", "title": "" }, { "docid": "70308b5c9967f29dfeadf72b4dbcbdae", "score": "0.61474997", "text": "def calls(self):\n return self.engine.call_count", "title": "" }, { "docid": "c94dc24ebd029e1226da06f06763a41c", "score": "0.6140524", "text": "def count(self):\n return self.es_client.count()", "title": "" }, { "docid": "891a8c9d714addd829424568ed1d6fa8", "score": "0.61377424", "text": "def total_count(self) -> int:\n return self._total_count", "title": "" }, { "docid": "1e4de85734e19ba96599f173362cbae3", "score": "0.61372393", "text": "def number_of_on_calls(self) -> Optional[int]:\n return pulumi.get(self, \"number_of_on_calls\")", "title": "" }, { "docid": "1cf7cdfcb5484a1f4480c2ed48c971bf", "score": "0.6136882", "text": "def GetCount(self):\n r = _comm(f'COMM _ok GetCount'.encode(), self._host)\n return int.from_bytes(r, 'big')", "title": "" }, { "docid": "7d53d461e5d5d150b095916fcaed54ab", "score": "0.61302596", "text": "def __len__(self):\n return len(self._exceptions) + len(self._responses)", "title": "" }, { "docid": "42f2515662e3dc99b7928ad6ba5e7599", "score": "0.6126105", "text": "def getErrorCount(self):\n self.errorCountSemaphore.acquire()\n count = self.errorCount\n self.errorCountSemaphore.release()\n return count", "title": "" }, { "docid": "7d4b68fbe9e6f9813bf8e78ae5d386fe", "score": "0.6124681", "text": "def total_iterations(self):\n return self._total_iterations", "title": "" }, { "docid": "fc08dbc6c45fb40484988bac80ffe294", "score": "0.61143005", "text": "def get_remaining_calls(self) -> int:\n body = {'data': {'key': self.token}}\n response = self.request(url=\"https://europe-west1-fmpdev-1d3ca.cloudfunctions.net/getRemainingCalls\",\n method=\"POST\", body=body)\n\n return int(json.loads(response)['result'])", "title": "" }, { "docid": "a488aa4f8f1a0600a89d8ed1c57b2242", "score": "0.6105512", "text": "def count_result(self) -> t.Optional[int]:\n return self._count_result", "title": "" }, { "docid": "0cead2beda6288b2929db2bafc4fb196", "score": "0.61044395", "text": "def Count(self) -> int:", "title": "" }, { "docid": "d8d2334fad39c5052fb827ec91cd79d1", "score": "0.6096347", "text": "def success_threshold(self) -> int:\n return pulumi.get(self, \"success_threshold\")", "title": "" }, { "docid": "99b762b54a55acba2fbb1c2c2ddc469d", "score": "0.60923517", "text": "def n_results(self) -> int:\n return sum(len(entries) for entries in self.entries.values())", "title": "" }, { "docid": "9b7dbc31c0d67a03239d00867cd2a507", "score": "0.608442", "text": "def on_request_success(request_type, name, response_time, response_length):\r\n stats[\"content-length\"] += response_length", "title": "" }, { "docid": "cda14a564def11585ade5d2983acf6e7", "score": "0.60827404", "text": "def __len__(self):\n try:\n return self.res_cnt\n except AttributeError:\n return 0", "title": "" }, { "docid": "9642de721029b2a329f78c9ed8fd131b", "score": "0.6069372", "text": "def count(self) -> str:\n return pulumi.get(self, \"count\")", "title": "" }, { "docid": "f201ddd597a759f71011eb554634686b", "score": "0.6068552", "text": "def count(self):\n return int(self._do_search(count=True)[0])", "title": "" }, { "docid": "92f50f1597cf4ea58c5b87cf6cfd9ed1", "score": "0.60642076", "text": "def credits(self):\n return int(self.response.get('credits', '0'))", "title": "" }, { "docid": "be3d13b70016e5549e9de69aa7a98050", "score": "0.6058926", "text": "def get_total_count(self) -> int:\n return self.total_count", "title": "" }, { "docid": "5a642470d62f4591cc3a0f493d88bf16", "score": "0.6058615", "text": "def returning_users_count(self):\n return self.returning_users.count()", "title": "" }, { "docid": "2295c9cd8c603c5c3cac6c075ee94891", "score": "0.60565484", "text": "def failed_imported_devices_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"failed_imported_devices_count\")", "title": "" }, { "docid": "dbbd74f007e5b631ead4edf29e6f5919", "score": "0.6056476", "text": "def count(self):\n pass", "title": "" }, { "docid": "dbbd74f007e5b631ead4edf29e6f5919", "score": "0.6056476", "text": "def count(self):\n pass", "title": "" }, { "docid": "3c448c36d8db8e69fa0cc0860a42b7d3", "score": "0.605622", "text": "def usercount(self):\n return len(self.registered_clients)", "title": "" }, { "docid": "09ddb48edec1992f68f93b3bd58d501d", "score": "0.6044687", "text": "def count(self):\n return self.intcnt", "title": "" }, { "docid": "0495c25c9efea6f3f93758efb59c3df4", "score": "0.6034245", "text": "def n_done(self) -> int:\n if self._db is None:\n return 0\n return self._db.count(lambda e: e.is_done)", "title": "" }, { "docid": "e319979f87f7fa67f826cc8b40d97647", "score": "0.60314834", "text": "def count(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"count\")", "title": "" }, { "docid": "87ff2ad350a1cb73e5f93bd4c00c1099", "score": "0.60273075", "text": "def images_count (self):\n r = self.api_request('account/images_count.json')\n result = simplejson.loads(r)\n self.count = result['images_count']['count']\n return self.count", "title": "" }, { "docid": "1887a77c7557009238f45ff8988b32ff", "score": "0.6027215", "text": "def count(self):\n certs = [x for x in self.certificates.all() if x.is_valid]\n return len(certs)", "title": "" }, { "docid": "8322f0f70271e92f7558657fd792add7", "score": "0.60221994", "text": "def entry_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"entry_count\")", "title": "" }, { "docid": "ccbe4910e2baf725b1e3a46733d42a20", "score": "0.6015739", "text": "def getcount(self):\n return self.processcount", "title": "" }, { "docid": "ae0c58594e40a4bfaa25f1f5763bc84d", "score": "0.6014239", "text": "def getUserCount(self):\n return len(self.users)", "title": "" }, { "docid": "46cbbd481524d6dde8ca4847c0985611", "score": "0.6012699", "text": "def account_block_count(account):\n\tdata = {\"action\": \"account_block_count\", \"account\" : account}\n\treturn data", "title": "" }, { "docid": "3a66b7e358802adce8615368414d26cf", "score": "0.6007238", "text": "def get_num_files(self):\n return self.ex('SELECT Count(*) FROM cache').fetchone()[0]", "title": "" }, { "docid": "eac0e2e1c5a53e2bac81b7d6f2f9d1d0", "score": "0.5998533", "text": "def count_completed_trials(self, experiment: Experiment) -> int:\n raise NotImplementedError()", "title": "" }, { "docid": "8498bd9f6074bb12ec74ac02f83728e4", "score": "0.5994058", "text": "def status_code(self):\n\n if RL_LIMITED_REQUESTS > RL_REQUEST_COUNT:\n return 200\n\n return 429", "title": "" }, { "docid": "f4ba1d185b66a8eba6830c594e20815e", "score": "0.5993527", "text": "def total_content_length():\r\n return \"Total content-length recieved: %i\" % stats[\"content-length\"]", "title": "" }, { "docid": "92bb80c117efad5dee4f7cd5a2bd0d10", "score": "0.59928024", "text": "def Count(self):\r\n\t\treturn self._get_attribute('count')", "title": "" }, { "docid": "92bb80c117efad5dee4f7cd5a2bd0d10", "score": "0.59928024", "text": "def Count(self):\r\n\t\treturn self._get_attribute('count')", "title": "" }, { "docid": "8f29e63fad1c39df42f788d57fd65335", "score": "0.5990894", "text": "def _num_completed(self) -> int:\n step_trials = self.trial_indices[self.index]\n by_status = self.experiment.trial_indices_by_status\n\n return len(\n step_trials.intersection(\n by_status[TrialStatus.COMPLETED].union(\n by_status[TrialStatus.EARLY_STOPPED]\n )\n )\n )", "title": "" }, { "docid": "aac20ce772ba67857cd75707db0fce83", "score": "0.5987645", "text": "def get_n_actions(self):\n return self.count_actions", "title": "" }, { "docid": "c36dce873d4115999b5382a010d51057", "score": "0.59865", "text": "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "title": "" }, { "docid": "873dcef28b9137aef9c06e5ff0f5fb2d", "score": "0.5983534", "text": "def n_waiting(self):\n # We don't need synchronization here since this is an ephemeral result\n # anyway. It returns the correct value in the steady state.\n if self._state == 0:\n return self._count\n return 0", "title": "" }, { "docid": "46939a98483911898dd2ab2693970628", "score": "0.59788305", "text": "def num_creditos(self) -> int:\n return self._num_creditos", "title": "" }, { "docid": "267bf4c38074b98f26803f595407d076", "score": "0.59735256", "text": "async def get_count(self, **kwargs) -> int:\n self.count = await self.model.count(**kwargs)\n return self.count", "title": "" }, { "docid": "cf0cb722fba91daffb72c9cd9f6e4ce5", "score": "0.59698045", "text": "def error_count(self) -> int:\n return len(self.errors)", "title": "" }, { "docid": "1c0a6d46752672a6fe74adbbedf1e20f", "score": "0.5968539", "text": "def total_activities_count(self):\n try:\n return self.activities_collection.count()\n except:\n self.log_error(MongoDatabase.total_activities_count.__name__ + \": Exception\")\n return 0", "title": "" } ]
8f03b306abc58c21ce7eb710ad1a723f
Define observation groups for a given table of bins. Define one group for each possible combination of the observation group axis bins, defined as rows in the input table.
[ { "docid": "58d54a7800624b4b26151f929e05d855", "score": "0.7689213", "text": "def define_groups(self, table):\n if len(self.obs_groups_table.columns) is not 0:\n raise RuntimeError(\n \"Catched attempt to overwrite existing obs groups table.\")\n\n # define number of groups\n n_groups = 1\n # loop over observation axes\n for i_axis in np.arange(len(self.obs_group_axes)):\n n_groups *= self.obs_group_axes[i_axis].n_bins\n\n if len(table) is not n_groups:\n raise ValueError(\"Invalid table length. Got {0}, expected {1}\".format(\n len(table), n_groups))\n\n # fill table, with first the obs group IDs, then the axis columns\n self.obs_groups_table = table\n self.obs_groups_table.add_column(Column(name='GROUP_ID',\n data=np.arange(n_groups)),\n index=0)", "title": "" } ]
[ { "docid": "479a74bfcb57297f06609ec038a52285", "score": "0.73450506", "text": "def group_observation_table(self, obs_table):\n if 'GROUP_ID' in obs_table.colnames:\n raise KeyError(\n \"Catched attempt to overwrite existing grouping in the table.\")\n\n # read the obs groups table row by row (i.e. 1 group at\n # a time) and lookup the range/value for each parameter\n n_axes = len(self.obs_group_axes)\n list_obs_table_grouped = []\n for i_row in np.arange(self.n_groups):\n i_group = self.obs_groups_table['GROUP_ID'][i_row]\n # loop over obs group axes to find out the names and formats\n # of the parameters to define the selection criteria\n obs_table_selected = obs_table\n for i_axis in np.arange(n_axes):\n name = self.obs_group_axes[i_axis].name\n format = self.obs_group_axes[i_axis].format\n\n if format == 'bin_edges':\n min_value = recover_units(self.obs_groups_table[name + '_MIN'][i_row],\n self.obs_groups_table[name + '_MIN'])\n max_value = recover_units(self.obs_groups_table[name + '_MAX'][i_row],\n self.obs_groups_table[name + '_MAX'])\n elif format == 'bin_values':\n min_value = recover_units(self.obs_groups_table[name][i_row],\n self.obs_groups_table[name])\n max_value = min_value\n # apply selection to the table\n selection = dict(type='par_box', variable=name,\n value_range=(min_value, max_value))\n obs_table_selected = obs_table_selected.select_observations(selection)\n # define group and fill in list of grouped observation tables\n group_id_data = i_group * np.ones(len(obs_table_selected), dtype=np.int)\n obs_table_selected.add_column(Column(name='GROUP_ID', data=group_id_data),\n index=0)\n list_obs_table_grouped.append(obs_table_selected)\n\n # stack all groups\n obs_table_grouped = vstack(list_obs_table_grouped)\n\n return obs_table_grouped", "title": "" }, { "docid": "75456f8eae4613947e2e021d81953e45", "score": "0.63996917", "text": "def table_to_axes(table):\n # subset table: remove obs groups column\n if table.colnames[0] == 'GROUP_ID':\n table = table[table.colnames[1:]]\n\n axes = []\n for i_col, col_name in enumerate(table.columns):\n data = np.unique(table[col_name].data)\n # recover units\n data = recover_units(data, table[col_name])\n axes.append(ObservationGroupAxis(col_name, data,\n 'bin_values'))\n # format will be reviewed in a further step\n\n # detect range variables and eventually merge columns\n for i_col in np.arange(len(axes)):\n try:\n split_name_min = axes[i_col].name.rsplit(\"_\", 1)\n split_name_max = axes[i_col + 1].name.rsplit(\"_\", 1)\n if (split_name_min[-1] == 'MIN'\n and split_name_max[-1] == 'MAX'\n and split_name_min[0] == split_name_max[0]):\n min_values = axes[i_col].bins\n max_values = axes[i_col + 1].bins\n edges = np.unique(np.append(min_values, max_values))\n # recover units\n edges = recover_units(edges, min_values)\n\n axes[i_col] = ObservationGroupAxis(split_name_min[0], edges,\n 'bin_edges')\n axes.pop(i_col + 1) # remove next entry on the list\n except:\n pass\n\n return axes", "title": "" }, { "docid": "f56236737a23930cf2cf42336c8f5907", "score": "0.6125227", "text": "def group_observations(self):\n\n obs_table = self.define_obs_table()\n\n # Define observation groups\n axes = [ObservationGroupAxis('ZEN_PNT', [0, 49, 90], fmt='edges')]\n obs_groups = ObservationGroups(axes)\n log.info(obs_groups.info)\n\n # Apply observation grouping\n obs_table = obs_groups.apply(obs_table)\n\n # Store the results\n filename = self.obs_table_grouped_filename\n log.info('Writing {}'.format(filename))\n obs_table.write(str(filename), format='ascii.ecsv')\n self.obs_table = obs_table\n\n filename = self.group_table_filename\n log.info('Writing {}'.format(filename))\n obs_groups.obs_groups_table.write(str(filename), format='ascii.ecsv')\n self.ntot_group = obs_groups.n_groups", "title": "" }, { "docid": "f4b54bb539430867363d36ad3acf6d29", "score": "0.59165764", "text": "def bin_grp(a, num_bins):\n\n a = list(a)\n a.sort()\n\n left = min(a)\n right = max(a)\n\n bin_size = (right - left) / num_bins\n bins = []\n labels = []\n pt = 0\n\n for i in range(num_bins):\n b = []\n labels.append((left + i * bin_size, left + (i + 1) * bin_size))\n\n while pt < len(a) and a[pt] <= left + (i + 1) * bin_size:\n b.append(a[pt])\n pt += 1\n\n bins.append(b)\n\n return labels, bins", "title": "" }, { "docid": "6ddbb13660016aabae0d74a7d406ff5f", "score": "0.5741548", "text": "def getEnergyGroups(energies_df, num_bins = 5):\n energies = energies_df['Integrals (m/s^2)'].values\n energy_bins = []\n i = 0\n incrmt = np.ceil(max(energies)/num_bins/1000)*1000\n while i < max(energies):\n energy_bins.append((i,i+incrmt))\n i += incrmt\n \n energy_groups = []\n for i in range(len(energies)):\n for j in range(len(energy_bins)):\n if energies[i] <= energy_bins[j][1] and energies[i] >= energy_bins[j][0]:\n energy_groups.append(j+1)\n \n energies_df['Group'] = energy_groups\n \n return(energies_df, energy_bins)", "title": "" }, { "docid": "ae0536245e3ae59fe46ba065c8befb94", "score": "0.56132823", "text": "def _make_table(x, y, num_bins):\n\n assert len(x.shape) == 1\n assert len(y.shape) == 1\n\n # The only fast way to do this is by reindexing the table as an index array\n reindex = x * num_bins + y\n # Then piling everything up with bincount and reshaping it back into the table\n return np.bincount(reindex, minlength=num_bins ** 2).reshape(num_bins, num_bins).astype(np.dtype(float))", "title": "" }, { "docid": "388f5b8976a4f227f56f99d6fd645b4d", "score": "0.5554124", "text": "def create_bins(gases, num_bins, mof_list, element_pmf_results):\n\n # Save list of dictionaries for first MOF to use as a list of all gas mole fractions\n # Might be good to make this it's own function early on for easier access to list of comps\n comp_set_dict = [row for row in element_pmf_results if row['MOF'] == mof_list[0]]\n # comps_array = []\n # for row in comp_set_dict:\n # comps_array.append([float(row[gas]) for gas in gases])\n comps_array = np.array([[float(row[gas]) for gas in gases] for row in comp_set_dict])\n # Figure out what is different between commented approach and current one!\n\n # Determine the set of points used to create bins\n bin_points = []\n for i in range(len(gases)):\n lower_comp = min(comps_array[:,i])\n upper_comp = max(comps_array[:,i])\n # Brian Bins\n lower_lim = lower_comp - 0.5*(upper_comp-lower_comp)/(num_bins-1)\n upper_lim = upper_comp + 0.5*(upper_comp-lower_comp)/(num_bins-1)\n # Jenna Bins\n # lower_lim = lower_comp\n # upper_lim = upper_comp + (upper_comp-lower_comp)/(num_bins)\n bin_points.append(np.linspace(lower_lim, upper_lim, num=num_bins+1, endpoint=True))\n bin_points = np.transpose(np.vstack(bin_points))\n\n # Reformat bin_points\n bins = []\n for row in bin_points:\n bins.append({gases[i] : row[i] for i in range(len(gases))})\n\n return bins", "title": "" }, { "docid": "f7421c1cb7bb7847890dae3aee5ba43f", "score": "0.55137163", "text": "def create_groupings(inputs):\n for n in range(1, len(inputs) + 1):\n for split_indices in itertools.combinations(range(1, len(inputs)), n - 1):\n grouping = []\n prev_split_index = None\n for split_index in itertools.chain(split_indices, [None]):\n group = set(inputs[prev_split_index:split_index])\n grouping.append(group)\n prev_split_index = split_index\n yield grouping", "title": "" }, { "docid": "3f38cc81e82922a022aabe1f1d9dc58b", "score": "0.54292905", "text": "def get_group_of_observations(self, obs_table, group,\n inverted=False, apply_grouping=False):\n if apply_grouping:\n obs_table = self.group_observation_table(obs_table)\n\n selection = dict(type='par_box', variable='GROUP_ID',\n value_range=(group, group), inverted=inverted)\n return obs_table.select_observations(selection)", "title": "" }, { "docid": "700ee143ea1d0aac4edeba4c17d9bab7", "score": "0.5423195", "text": "def explode_bins(ht, cols_bins, verbose):\n # check types\n if not all([ht[x].dtype == hl.dtype('int32') for x in cols_bins]):\n raise TypeError('All fields in cols_bins must have type int32.')\n # ensure sequential 1-indexed integers\n vals_map = {}\n for ele in cols_bins:\n n_count = ht.aggregate(hl.agg.counter(ht[ele]))\n if verbose:\n print('Bin distribution across variants for ' + ele + ':')\n print(n_count)\n vals_vec = [i for i in list(n_count.keys()) if i is not None]\n vals_vec.sort()\n tf = vals_vec == [i for i in range(1,len(vals_vec)+1)]\n if not tf:\n raise ValueError('Field ' + ele + ' is not consecutive 1-indexed. ' + \\\n 'Values found were: ' + str(vals_vec))\n vals_map.update({ele: vals_vec})\n \n # transform to wide format\n holder = {}\n for ele in cols_bins:\n if len(holder) == 0:\n holder = {ele + '_' + str(x): [x] for x in vals_map[ele]}\n else:\n holder = {item + '_' + ele + '_' + str(x): arr + [x] \n for item, arr in holder.items() for x in vals_map[ele]}\n \n # create new columns\n col_holder = {}\n for k,v in holder.items():\n conditions = [ht[ele] == val for ele, val in zip(cols_bins,holder[k])]\n final_cond = conditions[0]\n for i in range(1, len(conditions)):\n final_cond = final_cond & conditions[i]\n col_holder.update({k: hl.if_else(final_cond, 1, 0)})\n ht = ht.annotate(**col_holder)\n \n # remove original columns\n ht = ht.drop(*cols_bins)\n \n # return\n return ht", "title": "" }, { "docid": "a85236d30f004eb223edcd4fdd244b75", "score": "0.5360634", "text": "def binGen(array, binsNumber=200):\n array_his = np.histogram(array,binsNumber)\n array_his = [list(i) for i in array_his]\n\n def getDatainBins(ticks):\n \"\"\"\n ticks: [(lowerbound, upperbound),..]\n return a list of lists which contains data index for\n each bins\n \"\"\"\n binData = []\n for k, bounds in enumerate(ticks):\n lower, upper= bounds\n if k == len(ticks)-1:\n #last bar, should inclue the last number\n dataIndex = array.ix[(array >= float(lower))& (array <= float(upper))].index.values\n else:\n dataIndex = array.ix[(array >= float(lower)) & (array < float(upper))].index.values\n binData.append(list(dataIndex))\n return binData\n\n bins = array_his[0]\n ticksOrigin = array_his[1]\n ticksOrigin = zip(ticksOrigin[:-1], ticksOrigin[1:])\n\n binData = getDatainBins(ticksOrigin)\n ticks = ['%.2f'%i for i in array_his[1]]\n ticks = zip(ticks[:-1], ticks[1:])\n\n\n assert type(binsNumber)==int\n jetcolor = genJetColormap(binsNumber)\n feature_his = []\n for k, v in enumerate(zip(bins, ticks)):\n dic = {'bins':v[0], 'ticks':v[1], 'binData':binData[k], 'color':jetcolor[k]}\n feature_his.append(dic)\n\n return (feature_his, ticksOrigin)", "title": "" }, { "docid": "16aa5d5d7c5acb1339554e1a0dde7510", "score": "0.5309108", "text": "def multi_histogram(queryset, column, bins, slice_on, choices):\n queryset = _get_queryset(queryset)\n\n field_values = get_column_values(queryset, slice_on, choices)\n\n bins = [force_text(bin) for bin in bins]\n\n whens = tuple(\n between_include_start(column, bins[k], bins[k+1], Value(force_text(bins[k])))\n for k in range(len(bins) - 1)\n ) + (\n When(Q(**{column + '__gte': bins[-1]}), Value(force_text(bins[-1]))),\n )\n\n ordering_whens = tuple(\n between_include_start(column, bins[k], bins[k + 1], Value(k))\n for k in range(len(bins) - 1)\n ) + (\n When(Q(**{column + '__gte': bins[-1]}), Value(len(bins) - 1)),\n )\n\n bin_annotation = {\n 'bin': Case(*whens, output_field=CharField()),\n 'order': Case(*ordering_whens, output_field=IntegerField())\n }\n\n histogram_annotation = {\n display_value: Count(Case(When(Q(**{slice_on: field_value}), then=1), output_field=IntegerField()))\n for field_value, display_value in field_values\n }\n\n qs = queryset.annotate(**bin_annotation).order_by('order').values('bin').filter(bin__isnull=False).annotate(**histogram_annotation)\n\n return _zero_fill(qs, bins, field_values)", "title": "" }, { "docid": "43b2639080d88112a5f491da11946f02", "score": "0.5280175", "text": "def bin_data(df, bin_width, max_bin, min_bin=0, column_to_bin='',scale_up_category=1):\n\n\tbins = np.arange(min_bin, (max_bin+bin_width), bin_width)\n\n\t# create bin names\n\tnummerical_names=np.arange(min_bin*scale_up_category, max_bin*scale_up_category, bin_width*scale_up_category)\n\t#group_names= [\"%.2f\" % x for x in nummerical_names]\n\n\t#string_names=[str(i) for i in nummerical_names]\n\t#string_names_PLUS=[str(i) for i in nummerical_names+bin_width]\n\t#group_names = [\"%s - %s\" %(x for x in string_names, y for y in string_names_PLUS)]\n\t\n\tif scale_up_category == 1:\n\t\tgroup_names= [\"%.2f\" % x for x in nummerical_names]\n\telif scale_up_category != 1:\n\t\t#group_names= [\"%.2f x %i\" % x for x in nummerical_names] # where second value is the scale up category\n\t\tgroup_names= [\"%.2f\" % x for x in nummerical_names]\n\n\t# categories specific data according to bins (applies across the row so everything is kept together)\n\tdf['categories_str'] = pd.cut(df[column_to_bin], bins, labels=group_names)\n\tdf['categories_int'] = pd.cut(df[column_to_bin], bins, labels=nummerical_names)\n\n\treturn df, (group_names, nummerical_names)", "title": "" }, { "docid": "21dcffecf7d16d097aa11d2bca8296b0", "score": "0.52496016", "text": "def labelByBins(cls, values, bins):\n\n if all([isinstance(x, (list, tuple, numpy.ndarray)) for x in bins]):\n \n # values in multi-parameter \n if values.shape[0] < len(bins):\n raise ValueError(\"Number of bins has to be equal or smaller \"\n + \"than the \")\n\n else:\n\n # values in 1-parameter\n values_data = numpy.expand_dims(values, axis=0)\n if numpy.ma.isMaskedArray(values):\n values = numpy.ma.array(values_data, mask=values.mask)\n else:\n values = values_data\n labels, bin_ids = cls.labelByBins(values, [bins])\n bin_ids = dict([(key, value[0]) for key, value in list(bin_ids.items())])\n return labels, bin_ids\n\n # make explicit bins and max values for each binning\n real_bins = [[[one_bins[ind], one_bins[ind+1]] \n for ind in range(len(one_bins)-1)]\n for one_bins in bins]\n bin_max = [one_bins[-1] for one_bins in bins]\n\n # initialize variable for the loop\n new_shape = list(values.shape)[1:]\n n_dim = values.shape[0]\n id_ = 0\n labels = numpy.zeros(shape=new_shape, dtype=int)\n bin_ids = {}\n\n # assign labels from lowest bins up\n for combination in itertools.product(*real_bins):\n\n # label for the current combination\n tmp_labels = numpy.ones(shape=new_shape, dtype=bool)\n for one_bin, index in zip(combination, list(range(n_dim))):\n tmp_labels = tmp_labels & (values[index] >= one_bin[0])\n\n # update\n id_ += 1\n labels[tmp_labels] = id_\n bin_ids[id_] = combination\n\n # limit lables for highest bins\n for one_max, index in zip(bin_max, list(range(n_dim))):\n labels[values[index] > one_max] = 0\n\n # make sure masked elements are 0\n if numpy.ma.isMaskedArray(values):\n labels[values.mask[0]] = 0\n\n return labels, bin_ids", "title": "" }, { "docid": "f451563c8c1a4e02901a9330043f8180", "score": "0.5228682", "text": "def rebin_histogram(histogram, nbins_max, verbose=False):\n\n xaxis = histogram.GetXaxis()\n nbins = xaxis.GetNbins()\n \n rebin_found = False\n ngroup = 2\n while not rebin_found:\n if nbins%ngroup == 0 and nbins/ngroup < nbins_max:\n rebin_found = True\n else:\n ngroup += 1\n\n nbins = int(nbins/ngroup)\n if verbose:\n print(\"Rebin histogram with %d groups. New numbers of bins = %d\" %(ngroup, nbins))\n histogram.Rebin(ngroup)\n\n return nbins", "title": "" }, { "docid": "04daa05cdace23c0a2d8d81bfe765368", "score": "0.5138955", "text": "def hist_Mhi_vs_Mstar(groups_dict):\n \n\n \n \n row_count = 0\n for size in trange(1, 9, 1): #I'm planning to have 1-9 sized groups\n \n #if size % 3 == 0:\n # row_count += 1\n #this_ax = ax[row_count, size%3]\n \n group_size = size+1 # Select number of galaxies per group --- adding +1 because it is counting from 0.\n \n central_gals = groups_dict[group_size][\"Centrals\"]\n sat_gals = groups_dict[group_size][\"Satellites\"]\n single_gals = groups_dict[1]['Centrals']\n \n # Do plotting of groups of N-length; it will be placed on each figure\n # Centrals and Satellites; x & y axis histogram\n # Using function two_sided_histogram to plot the histogram output so here I call the function and give (x, y), (x, y)\n two_sided_histogram_groups(np.log10(G['StellarMass'][central_gals]*1e10/h),\n np.log10(np.sum(G['DiscHI'],axis=1)[central_gals]*1e10/h+1), \n np.log10(G['StellarMass'][sat_gals]*1e10/h),\n np.log10(np.sum(G['DiscHI'],axis=1)[sat_gals]*1e10/h+1))", "title": "" }, { "docid": "abebe0b9e89549015c3cd18233504a0c", "score": "0.5127316", "text": "def axis_bins ( bins ) :\n #\n bins = set ( bins )\n bins = [ i for i in bins ]\n bins.sort()\n #\n if 2 > len ( bins ) :\n raise AttributeError(\"axis_bins: insufficient length of bins: %s\" % bins )\n #\n return ROOT.TAxis ( len ( bins ) - 1 , array.array ( 'd' , bins ) )\n #", "title": "" }, { "docid": "f0939d5096ff431f5180316217ffd78a", "score": "0.510974", "text": "def hist_Mhi_vs_Mstar_with_singles(groups_dict):\n \n row_count = 0\n for size in trange(1, 9, 1): #I'm planning to have 1-9 sized groups\n \n #if size % 3 == 0:\n # row_count += 1\n #this_ax = ax[row_count, size%3]\n \n group_size = size+1 # Select number of galaxies per group --- adding +1 because it is counting from 0.\n \n central_gals = groups_dict[group_size][\"Centrals\"]\n sat_gals = groups_dict[group_size][\"Satellites\"]\n single_gals = groups_dict[1]['Centrals']\n \n # Do plotting of groups of N-length; it will be placed on each figure\n # Centrals and Satellites; x & y axis histogram\n # Using function two_sided_histogram to plot the histogram output so here I call the function and give (x, y), (x, y)\n two_sided_histogram_group_and_single(np.log10(G['StellarMass'][central_gals]*1e10/h),\n np.log10(np.sum(G['DiscHI'],axis=1)[central_gals]*1e10/h+1), \n np.log10(G['StellarMass'][sat_gals]*1e10/h),\n np.log10(np.sum(G['DiscHI'],axis=1)[sat_gals]*1e10/h+1),\n np.log10(G['StellarMass'][single_gals]*1e10/h),\n np.log10(np.sum(G['DiscHI'],axis=1)[single_gals]*1e10/h+1) )", "title": "" }, { "docid": "1afe863b620c1d6d93da86d97d4be3fd", "score": "0.5067311", "text": "def fillTablesWithGroupNodes(self):\n self.groupNodes = GroupNode.getGroupNodes(finder)\n i = 0\n while i < len(self.groupNodes):\n gn = self.groupNodes.get(i)\n onEntry = getNextExtendedTableEntry(self.extendedTableIndex)\n onEntry.addEntry(gn.index1, gn.index2, gn.index3, Chain.GROUP_NODE, gn.cand, True, 0, 0, 0, 0, 0, 0)\n self.extendedTableMap.put(onEntry.entries[0], self.extendedTableIndex)\n self.extendedTableIndex += 1\n offEntry = getNextExtendedTableEntry(self.extendedTableIndex)\n offEntry.addEntry(gn.index1, gn.index2, gn.index3, Chain.GROUP_NODE, gn.cand, False, 0, 0, 0, 0, 0, 0)\n self.extendedTableMap.put(offEntry.entries[0], self.extendedTableIndex)\n self.extendedTableIndex += 1\n self.tmpSet.set(finder.getCandidates()[gn.cand])\n self.tmpSet.and_(gn.buddies)\n if not self.tmpSet.isEmpty():\n j = 0\n while j < len(self.tmpSet):\n index = self.tmpSet.get(j)\n onEntry.addEntry(index, gn.cand, False)\n tmp = self.onTable[index * 10 + gn.cand]\n tmp.addEntry(gn.index1, gn.index2, gn.index3, Chain.GROUP_NODE, gn.cand, False, 0, 0, 0, 0, 0, 0)\n j += 1\n self.tmpSet1.set(self.tmpSet)\n self.tmpSet1.and_(Sudoku2.BLOCK_TEMPLATES[gn.block])\n if not self.tmpSet1.isEmpty() and len(self.tmpSet1) == 1:\n offEntry.addEntry(self.tmpSet1.get(0), gn.cand, True)\n tmp = self.offTable[self.tmpSet1.get(0) * 10 + gn.cand]\n tmp.addEntry(gn.index1, gn.index2, gn.index3, Chain.GROUP_NODE, gn.cand, True, 0, 0, 0, 0, 0, 0)\n self.tmpSet1.set(self.tmpSet)\n if gn.line != -1:\n self.tmpSet1.and_(Sudoku2.LINE_TEMPLATES[gn.line])\n else:\n self.tmpSet1.and_(Sudoku2.COL_TEMPLATES[gn.col])\n if not self.tmpSet1.isEmpty() and len(self.tmpSet1) == 1:\n offEntry.addEntry(self.tmpSet1.get(0), gn.cand, True)\n tmp = self.offTable[self.tmpSet1.get(0) * 10 + gn.cand]\n tmp.addEntry(gn.index1, gn.index2, gn.index3, Chain.GROUP_NODE, gn.cand, True, 0, 0, 0, 0, 0, 0)\n lineAnz = 0\n line1Index = -1\n colAnz = 0\n col1Index = -1\n blockAnz = 0\n block1Index = -1\n gn2 = None\n j = 0\n while j < len(self.groupNodes):\n gn2 = self.groupNodes.get(j)\n if j == i:\n j += 1\n continue \n if gn.cand != gn2.cand:\n j += 1\n continue \n self.tmpSet2.set(gn.indices)\n if not self.tmpSet2.andEmpty(gn2.indices):\n j += 1\n continue \n if gn.line != -1 and gn.line == gn2.line:\n lineAnz += 1\n if lineAnz == 1:\n line1Index = j\n onEntry.addEntry(gn2.index1, gn2.index2, gn2.index3, Chain.GROUP_NODE, gn.cand, False, 0, 0, 0, 0, 0, 0)\n if gn.col != -1 and gn.col == gn2.col:\n colAnz += 1\n if colAnz == 1:\n col1Index = j\n onEntry.addEntry(gn2.index1, gn2.index2, gn2.index3, Chain.GROUP_NODE, gn.cand, False, 0, 0, 0, 0, 0, 0)\n if gn.block == gn2.block:\n blockAnz += 1\n if blockAnz == 1:\n block1Index = j\n onEntry.addEntry(gn2.index1, gn2.index2, gn2.index3, Chain.GROUP_NODE, gn.cand, False, 0, 0, 0, 0, 0, 0)\n j += 1\n if lineAnz == 1:\n gn2 = self.groupNodes.get(line1Index)\n self.tmpSet.set(Sudoku2.LINE_TEMPLATES[gn.line])\n self.tmpSet.and_(finder.getCandidates()[gn.cand])\n self.tmpSet.andNot(gn.indices)\n self.tmpSet.andNot(gn2.indices)\n if self.tmpSet.isEmpty():\n offEntry.addEntry(gn2.index1, gn2.index2, gn2.index3, Chain.GROUP_NODE, gn.cand, True, 0, 0, 0, 0, 0, 0)\n if colAnz == 1:\n gn2 = self.groupNodes.get(col1Index)\n self.tmpSet.set(Sudoku2.COL_TEMPLATES[gn.col])\n self.tmpSet.and_(finder.getCandidates()[gn.cand])\n self.tmpSet.andNot(gn.indices)\n self.tmpSet.andNot(gn2.indices)\n if self.tmpSet.isEmpty():\n offEntry.addEntry(gn2.index1, gn2.index2, gn2.index3, Chain.GROUP_NODE, gn.cand, True, 0, 0, 0, 0, 0, 0)\n if blockAnz == 1:\n gn2 = self.groupNodes.get(block1Index)\n self.tmpSet.set(Sudoku2.BLOCK_TEMPLATES[gn.block])\n self.tmpSet.and_(finder.getCandidates()[gn.cand])\n self.tmpSet.andNot(gn.indices)\n self.tmpSet.andNot(gn2.indices)\n if self.tmpSet.isEmpty():\n offEntry.addEntry(gn2.index1, gn2.index2, gn2.index3, Chain.GROUP_NODE, gn.cand, True, 0, 0, 0, 0, 0, 0)\n i += 1", "title": "" }, { "docid": "df7f7be4cba9009b7f8e54560f0f3fca", "score": "0.50603664", "text": "def test_groups_are_created(groups):\n grouper = FeatureGrouper(group=groups)\n X = grouper.transform(X_bin)\n assert \"mean(group_1)\" in X.columns\n assert X.columns[0] != X_bin.columns[0]", "title": "" }, { "docid": "033d4951bfae47465aa2df6968db6826", "score": "0.50447226", "text": "def hist_group_sum_Mhi_vs_Mstar(groups_dict, colors=['lightgrey','#feebe2','#fcc5c0','#fa9fb5','#f768a1','#dd3497','#ae017e','#7a0177'],\n legends=['2', '3', '4', '5', '6', '7', '8', '9'], markersizes=[90,150,210,270,330,400,470,540,610]):\n \n\n for size in trange(1, 9, 1): #I'm planning to have 1-9 sized groups\n\n group_size = size+1 # Select number of galaxies per group --- adding +1 because it is counting from 0.\n\n halo_groups = updated_dict[group_size][\"All_groups\"]\n \n x_values = []\n y_values = []\n\n for each_group in halo_groups:\n #print(each_group)\n x_sum = np.log10( np.sum((G[\"StellarMass\"][each_group]*1e10/h)) ) #gives array of the sum of the stellar masses \n y_sum = np.log10( np.sum((np.sum(G['DiscHI'],axis=1)[each_group]*1e10/h+1)) ) #gives array of the sum of the gas masses\n x_values.append(x_sum)\n y_values.append(y_sum)\n \n # Do plotting of groups of N-length; it will be placed on each figure\n # Centrals and Satellites; x & y axis histogram\n # Using function two_sided_histogram to plot the histogram output so here I call the function and give (x, y), (x, y)\n two_sided_histogram(x_values, y_values, colors[size-1], legends[size-1], markersizes[size-1]) # Call the function with specified colors to have different colors for each group.", "title": "" }, { "docid": "c239bcf44ce72ecf71b139903650be29", "score": "0.50391066", "text": "def group_summarize(input):\n num_groups = max([len(input[key]) for key in input.keys()]) / 2 + 1\n groupGC = [ [ ] for _ in range(num_groups) ]\n group_len = [ [ ] for _ in range(num_groups) ]\n group_ratio = [ [ ] for _ in range(num_groups) ]\n for key in input.keys():\n for i in range(num_groups):\n # start by grabbing the paired bins\n # all of which will be equally sized per group\n if len(input[key])/2 > i:\n tmpGC = input[key][i][0] + input[key][:i+1:-1][0][0]\n tmp_len = input[key][i][1] + input[key][:i+1:-1][0][1]\n groupGC[i].append(tmpGC)\n group_len[i].append(tmp_len)\n group_ratio[i].append(float(tmpGC) / float(tmp_len))\n # if central bin, just grab bin and add into groupGC and group_len\n # for group_ratio, separate slightly from other values in group\n elif len(input[key])/2 == i:\n tmpGC = input[key][i][0]\n tmp_len = input[key][i][1]\n groupGC[i].append(tmpGC)\n group_len[i].append(tmp_len)\n group_ratio[i].append([float(tmpGC) / float(tmp_len)])\n outRatio = [ [ ] for _ in range(num_groups) ]\n for i in range(num_groups):\n out_ratio[i] = float(sum(groupGC[i])) / float(sum(group_len[i]))\n return(out_ratio, group_ratio)", "title": "" }, { "docid": "cd438ead34a65b329361549f5d31cff7", "score": "0.5020962", "text": "def equally_spaced_bins(inner_value=1, outer_value=2, nbins=100):\n if inner_value >= outer_value:\n raise ValueError(\"The inner value must be strictly less than the outer value.\")\n if nbins <= 0:\n raise ValueError(\"The number of bins must be strictly greater than 0.\")\n bin_edges = np.zeros((2, nbins))\n bin_edges[0, :] = np.arange(0, nbins)\n bin_edges[1, :] = np.arange(1, nbins + 1)\n return inner_value + bin_edges * (outer_value - inner_value) / nbins", "title": "" }, { "docid": "90f8aba558ec34f849a84432c6f42b25", "score": "0.5008161", "text": "def calculate_ul_for_bins(df, nbins=10, col='uplift', groups_flag=False):\n df = df.sort_values(col, ascending=True) # sort values in score column\n df.reset_index(inplace=True, drop=True)\n chunks = np.array_split(df.index, nbins)\n df_chunks = pd.DataFrame(np.zeros((nbins, 1)), columns=['chunk_size'])\n for idx in df_chunks.index:\n df_tmp = df.loc[chunks[idx]]\n df_chunks.loc[idx, 'chunk_size'] = df_tmp.shape[0] # N samples in chunk\n df_chunks.loc[idx, 'min_'] = df_tmp[col].min() # min value of score in chunk\n df_chunks.loc[idx, 'max_'] = df_tmp[col].max() # max value of score in chunk\n df_chunks.loc[idx, 'purch_ratio'] = df_tmp.loc[:, 'target'].mean() # mean target value in chunk\n if groups_flag:\n df_chunks.loc[idx, 'purch_ratio_control'] = df_tmp.loc[(df_tmp['is_control_group']==1), 'target'].mean() # mean target in control group\n df_chunks.loc[idx, 'purch_ratio_treatment'] = df_tmp.loc[df_tmp['is_control_group']==0, 'target'].mean() # mean target in treatment group\n return df_chunks", "title": "" }, { "docid": "5bd50d75477a85bf697983f4b1938254", "score": "0.49710095", "text": "def setgroups(p_list): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "a2e99805b4cbff8d8d2dee80ee83bfae", "score": "0.49471572", "text": "def bins(cols, rowss):\n out = []\n for col in cols:\n ranges = {}\n for y, rows in rowss.items():\n for row in rows:\n x = row.cells[col.at]\n if x != \"?\":\n k = int(bin(col, x))\n range = RANGE(col.at, col.txt, x)\n ranges[k] = ranges[k] if k in ranges else range.get()\n range.extend(ranges[k], x, y)\n map(itself, ranges)\n ranges = list(dict(sorted(ranges.items())).values())\n out.append(ranges if isinstance(col, SYM) else merge_any(ranges))\n return out", "title": "" }, { "docid": "07ef86eb15c3006a627ec28de5342948", "score": "0.4938962", "text": "def genlabels(y, nbins = 100) :\n\tbins = np.linspace(y.min(),y.max(),nbins)\n\tyd = np.digitize(y,bins,right=True)\n\tynew = []\n\tfor row in np.arange(0,len(yd)) :\n\t\tynew.append(bins[yd[row]])\n\n\treturn bins, ynew", "title": "" }, { "docid": "59f742a09442c81045b84eaece1c6460", "score": "0.49249032", "text": "def array_to_bins_z(data_set, zStartIn, zEndIn, zIntIn):\n\n bin_size = zIntIn\n num_bins = int(np.ceil((zEndIn - zStartIn) / zIntIn))\n binned_grid = np.zeros(shape=(num_bins, 3)) ## last index is grid count\n\n\n for i in range(num_bins): ## set centres of bins\n binned_grid[i,0] = (i + 0.5)*bin_size + zStartIn\n\n\n for k in range(len(data_set)):\n x_val = data_set[k, 0] - zStartIn ## shift to the positive values\n y_val = data_set[k, 1]\n x_bin = int(x_val//bin_size) ## determine bin number to drop value in\n # print(x_bin, x_val)\n if x_bin >= 0 and x_bin < num_bins: ## bin only data in range, to avoid stray lipids\n binned_grid[x_bin, 1] += y_val\n binned_grid[x_bin,2] += 1\n\n binned_grid_normed = np.copy(binned_grid)\n binned_grid_normed[:, 1] = np.divide(binned_grid[:, 1], binned_grid[:, 2], out=np.zeros_like(binned_grid[:, 1]), where=binned_grid[:, 2] != 0) ## mean\n binned_grid_normed[:, 2] /= bin_size*len(data_set) ## normalise to 1\n\n\n return binned_grid_normed", "title": "" }, { "docid": "73b8877e8c43851cd2a52981a7174ff5", "score": "0.49213034", "text": "def make_bins(df, col, cat, bin_names=[]):\n df.loc[df.loc[:,col] > (round(df.loc[:,col]\n .quantile(.8),2)),\n cat]=bin_names[0]\n df.loc[(df.loc[:,col] > (round(df.loc[:,col]\n .quantile(.6),2))) \n &(df.loc[:,col]<=(round(df.loc[:,col]\n .quantile(.8),2))),\n cat]= bin_names[1]\n df.loc[(df.loc[:,col] > round(df.loc[:,col]\n .quantile(.4),2)) \n &(df.loc[:,col]<=round(df.loc[:,col]\n .quantile(.6),2)), \n cat]= bin_names[2]\n df.loc[(df.loc[:,col] > round(df.loc[:,col]\n .quantile(.2),2)) \n &(df.loc[:,col]<=round(df.loc[:,col]\n .quantile(.4),2)), \n cat]= bin_names[3]\n df.loc[df.loc[:,col] < round(df.loc[:,col]\n .quantile(.2),2),\n cat]=bin_names[4]", "title": "" }, { "docid": "0d57a6f8f39e87a3ee64fa9dcdf40246", "score": "0.49202722", "text": "def bins(cols, rowss):\n\n out = []\n\n for col in cols:\n ranges = {}\n\n for y, rows in rowss.items():\n for row in rows:\n x = row[col[\"at\"]]\n\n if x != \"?\":\n k = bin(col, x)\n\n if k not in ranges:\n ranges[k] = creation.RANGE(col[\"at\"], col[\"txt\"], x)\n\n update.extend(ranges[k], x, y)\n\n ranges = sorted(ranges.values(), key=lambda x: x[\"lo\"])\n out.append(ranges if \"isSym\" in col else mergeAny(ranges))\n\n return out", "title": "" }, { "docid": "421508fdf8a6647eecd7c922c2df73ab", "score": "0.49171045", "text": "def make_bins(self, binsize_start_end, column, ranges):\n\n bin_column = []\n \n if ranges is None:\n bin_size = int(binsize_start_end[0])\n start = int(binsize_start_end[1])\n end = int(binsize_start_end[2])\n ranges = [i for i in range(start, end+bin_size, bin_size)]\n \n\n\n #global outside_bin\n self.outside_bin = '<' + str(ranges[-1])\n \n i = 0\n\n for row in column:\n while i < len(ranges):\n if row <= ranges[i]:\n bin_column.append(ranges[i])\n break\n i = i+1\n \n if i == len(ranges):\n bin_column.append(self.outside_bin)\n break\n i = 0\n \n #global bin_column_len\n #thought of using this to specify len of bins\n #make use of it in formatting excel\n self.bin_column_len = len(bin_column)\n self.bin_values = ranges\n\n return bin_column", "title": "" }, { "docid": "a04961727346838aebc335b88e78903f", "score": "0.49160478", "text": "def add_bins(\n dframe,\n income_measure,\n num_bins,\n wt=\"s006\",\n decile_details=False,\n weight_by_income_measure=False,\n):\n assert isinstance(dframe, pd.DataFrame)\n assert income_measure in dframe\n if decile_details and num_bins != 10:\n msg = \"decile_details is True when num_quantiles is {}\"\n raise ValueError(msg.format(num_bins))\n dframe.sort_values(by=income_measure, inplace=True)\n if weight_by_income_measure:\n dframe[\"cumsum_temp\"] = np.cumsum(\n np.multiply(dframe[income_measure].values, dframe[wt].values)\n )\n min_cumsum = dframe[\"cumsum_temp\"].values[0]\n else:\n dframe[\"cumsum_temp\"] = np.cumsum(dframe[wt].values)\n min_cumsum = 0.0 # because s006 values are non-negative\n max_cumsum = dframe[\"cumsum_temp\"].values[-1]\n cumsum_range = max_cumsum - min_cumsum\n bin_width = cumsum_range / float(num_bins)\n bin_edges = list(min_cumsum + np.arange(0, (num_bins + 1)) * bin_width)\n bin_edges[-1] = 9e99 # raise top of last bin to include all observations\n bin_edges[0] = -9e99 # lower bottom of 1st bin to include all observation00s\n if decile_details:\n assert bin_edges[1] > 1e-9 # bin_edges[1] is top of bottom decile\n bin_edges.insert(1, 1e-9) # top of zeros\n bin_edges.insert(1, -1e-9) # top of negatives\n bin_edges.insert(-1, bin_edges[-2] + 0.5 * bin_width) # top of 90-95\n bin_edges.insert(-1, bin_edges[-2] + 0.4 * bin_width) # top of 95-99\n num_bins += 4\n labels = range(1, (num_bins + 1))\n dframe[\"bins\"] = pd.cut(\n dframe[\"cumsum_temp\"], bin_edges, right=False, labels=labels\n )\n dframe.drop(\"cumsum_temp\", axis=1, inplace=True)\n return dframe", "title": "" }, { "docid": "2292b007c86806a2b5ca2860e6417330", "score": "0.48865372", "text": "def generate_lifetime_group_table(groupset, table, compare_table, mergedSet=None, exist=False):\n if mergedSet:\n set_to_add = mergedSet\n else:\n set_to_add = groupset\n for egoid in set_to_add:\n if egoid in compare_table and set_to_add in compare_table[egoid]:\n value = compare_table[egoid][set_to_add]\n if mergedSet and egoid in groupset:\n value += 1\n else:\n if exist:\n raise KeyError(\"This Key is Required to Exist {} - {}\".format(egoid, set_to_add))\n value = 1\n\n if not egoid in table:\n table[egoid] = {set_to_add: value}\n else:\n table[egoid][set_to_add] = value\n\n return table", "title": "" }, { "docid": "a45572b714fe3796b4f8f734ffea9643", "score": "0.4870244", "text": "def range_hist(items, bins):\n big_hist = ut.dict_hist(items)\n hist = ut.odict([(b, 0) for b in bins])\n\n for k, v in big_hist.items():\n for b in bins:\n if isinstance(b, (list, tuple)):\n if k >= b[0] and k < b[1]:\n hist[b] += v\n elif k == b:\n hist[b] += v\n return hist", "title": "" }, { "docid": "5c10512addb216c13227cba036f82e4e", "score": "0.4851924", "text": "def _expand_table(self, bin_num):\n if 0 <= bin_num < len(self.table):\n return\n\n if bin_num < 0:\n diff_sec = bin_num * self.bin_size\n self.start = self.start + timedelta(seconds=diff_sec)\n for _ in range(abs(bin_num)):\n new_bin = {\n 'count': 0,\n 'sum': 0\n }\n self.table.insert(0, new_bin)\n else:\n final_sum = self.table[len(self.table) - 1]['sum']\n for _ in range(bin_num - len(self.table) + 1):\n new_bin = {\n 'count': 0,\n 'sum': final_sum\n }\n self.table.append(new_bin)", "title": "" }, { "docid": "41b71be795a6521c891ef13551d58527", "score": "0.48406515", "text": "def make_histogram(x, bins):\n x1 = min(x)\n x2 = max(x)\n dx = (x2-x1)/bins\n starts = array([arange(x1, x2, dx)]).T\n ends = starts + dx\n m = starts.shape[0]\n \n # This gets us the in each interval starts < a < ends\n xprime = tile(x, (m, 1))\n\n a = xprime >= broadcast_to(starts, xprime.shape)\n b = xprime < broadcast_to(ends, xprime.shape)\n\n frequencies = (a*b).sum(1)\n retbins = ones(len(starts)+1)\n retbins[:len(starts)]*=starts.T[0]\n retbins[-1] = ends[-1]\n return frequencies, retbins", "title": "" }, { "docid": "04dbbe6fc08d431ce9c890cd0f074623", "score": "0.4838093", "text": "def show_group_histos(input_map,lons,lats,kgroup_draws,\n xLons=[-0.5,-0.5,0.5, 0.5],\n xLats=[-0.5, 0.5,0.5,-0.5],\n alreadyTrimmed=True,\n lonsTrimmed=False,\n input_map_units='Mean Group',\n saveName=None,figsize=None):\n \n \n londim = input_map.shape[1]\n \n fig, ax = p.subplots(figsize=figsize)\n if alreadyTrimmed == True:\n map_day = input_map\n else:\n map_day = input_map[:,londim//4:-londim//4]\n \n if lonsTrimmed == True:\n useLons = lons\n else:\n useLons = lons[:,londim//4:-londim//4]\n \n plotData = ax.imshow(map_day, extent=[-90,90,-90,90])\n cbar = fig.colorbar(plotData,ax=ax)\n cbar.set_label(input_map_units)\n ax.set_ylabel('Latitude')\n ax.set_xlabel('Longitude')\n \n if figsize is None:\n windowLocationsX = [-0.16,-0.16, 1.0, 1.0]\n windowLocationsY = [ 0.1, 0.6 , 0.6, 0.1]\n else:\n windowLocationsX = [-0.26,-0.26, 1.1, 1.1]\n windowLocationsY = [ 0.1, 0.7 , 0.7, 0.1]\n \n windowLabels = ['A','B','C','D']\n for ind in np.arange(len(xLons)):\n xLon, xLat = xLons[ind], xLats[ind]\n left, bottom, width, height = [windowLocationsX[ind], windowLocationsY[ind], 0.2, 0.2]\n ax2 = fig.add_axes([left, bottom, width, height])\n iLon, iLat = np.argmin(np.abs(useLons[0,:] - xLon)), np.argmin(np.abs(lats[:,0] - xLat))\n ax.text(useLons[0,iLon]* 180./np.pi,lats[iLat,0]* 180./np.pi,windowLabels[ind],\n color='red')\n \n ax2.set_title(windowLabels[ind])\n ax2.set_xlabel('Group')\n \n ax2.hist(kgroup_draws[:,iLat,iLon])\n ax2.set_xlim(-0.5,np.max(kgroup_draws) + 0.5)\n ax2.set_xticks(np.arange(np.max(kgroup_draws) + 1))\n \n if saveName is not None:\n fig.savefig(saveName,bbox_inches='tight')\n \n #fig.suptitle('Retrieved group map, n={}, {:.2f}$\\mu$m'.format(degree,waves[waveInd]))", "title": "" }, { "docid": "5e73678b122c52c8549bc04322ddbffe", "score": "0.48370987", "text": "def _create_log_bins(self, df):\n\n df['confirmed_bins'] = df['confirmed'].apply(self._assign_bins)\n df['deaths_bins'] = df['deaths'].apply(self._assign_bins)\n\n return df", "title": "" }, { "docid": "cc615302471cb52245f389859811feca", "score": "0.48223096", "text": "def buckets_grouping(*buckets: float) -> \\\n Callable[[float], Tuple[Optional[int], Optional[str]]]:\n\n def _app(x, e=None) -> Tuple[Optional[int], Optional[str]]:\n if x is None:\n return None, e\n for (lower, upper, index) in intervals:\n if lower is None and x <= upper:\n return index, e\n elif upper is None and lower < x:\n return index, e\n elif lower is not None and upper is not None and lower < x <= upper:\n return index, e\n return None, \"bucket not found for {}\".format(x)\n\n size = len(buckets)\n if size <= 0 or any([x is None for x in buckets]):\n return fixed_input(None, \"buckets not provided\")\n intervals = list(zip([None] + list(buckets), list(buckets) + [None], range(1, len(buckets) + 2)))\n return _app", "title": "" }, { "docid": "92799bf0bbb7ffa94186779050c27adc", "score": "0.48220006", "text": "def reference_group_to_cells(self):\n for cell in self.cells:\n cell.box = self", "title": "" }, { "docid": "fad2cc42ea9d7199c39f03dcc4976afe", "score": "0.4814041", "text": "def add_hourgroups(weather_hist):\n weather_hist['WEATHER_YEAR'] = weather_hist.index.year\n weather_hist['WEATHER_MONTH'] = weather_hist.index.month\n weather_hist['WEATHER_DAY'] = weather_hist.index.day\n weather_hist['WEATHER_HOUR'] = weather_hist.index.hour\n weather_hist['WEATHER_DAYOFWEEK'] = \\\n weather_hist.index.dayofweek\n weather_hist['WEATHER_HOURGROUP'] = weather_hist['WEATHER_HOUR'].map(\n lambda x: 4 * (int(x) // 4)\n )", "title": "" }, { "docid": "8fc5a02f1d57ca3f2ced1a2ef44fd9aa", "score": "0.48092005", "text": "def bin_definition(n_bins_gammaness, n_bins_theta2):\n max_gam = 1\n max_th2 = 0.05 * u.deg * u.deg\n min_th2 = 0.005 * u.deg * u.deg\n\n gammaness_bins = np.linspace(0, max_gam, n_bins_gammaness)\n theta2_bins = np.linspace(min_th2, max_th2, n_bins_theta2)\n\n return gammaness_bins, theta2_bins", "title": "" }, { "docid": "0de73a812f0e3c0211fa0648814216ca", "score": "0.4798482", "text": "def table_to_grid(prefixes, fields, table):\n grid = {}\n\n headers = []\n for key in table[0].keys():\n if not isinstance(key, str):\n raise Exception(f\"Bad key '{key}' in table '{table[0]}'\")\n if not (key.endswith(\"_label\") and names.label_key_to_id_key(key) in table[0]):\n label = key\n if key in fields:\n label = fields[key][\"label\"]\n headers.append({\"label\": label, \"value\": key})\n grid[\"headers\"] = [headers]\n\n rows = []\n for row in table:\n newrow = []\n for key, value in row.items():\n cell = None\n if key == \"ab_id\":\n iri = names.id_to_iri(prefixes, value)\n if value in config.labels:\n label = config.labels[value]\n else:\n label = value.replace(\":\", \"-\")\n cell = {\"iri\": iri, \"label\": label, \"value\": value}\n elif key.endswith(\"_id\"):\n iri = \"\"\n if value:\n iri = names.id_to_iri(prefixes, value)\n label = value\n label_key = names.id_key_to_label_key(key)\n if label_key in row and row[label_key] and row[label_key].strip() != \"\":\n label = row[label_key]\n cell = {\"iri\": iri, \"label\": label, \"value\": value}\n elif key.endswith(\"_label\") and names.label_key_to_id_key(key) in row:\n pass\n else:\n cell = {\"label\": value, \"value\": value}\n if cell:\n newrow.append(cell)\n rows.append(newrow)\n grid[\"rows\"] = rows\n\n return grid", "title": "" }, { "docid": "d754bf08fe10bdb3f14866693c737978", "score": "0.47983995", "text": "def bin_data(data = None, x = None, axis = 1, num_of_bins = 300, dtype = 'float'):\n from numpy import zeros, nan,arange, nanmax, nanmin, random,nanmean, mean\n import math\n\n length = data.shape[0]\n width = data.shape[1]\n\n if length <= num_of_bins:\n y_max = data\n y_min = data\n y_mean = data\n x_out = x\n else:\n y_min = zeros(shape = (width,num_of_bins), dtype = dtype)\n y_max = zeros(shape = (width,num_of_bins), dtype = dtype)\n y_mean = zeros(shape = (width,num_of_bins), dtype = dtype)\n x_out = zeros(shape = (num_of_bins,), dtype = dtype)\n\n for j in range(width):\n idx = 0\n for i in range(num_of_bins):\n step = int(math.ceil(1.0*(length - idx)/(num_of_bins-i)))\n\n start = idx\n end = idx + step\n if 'int' in dtype:\n y_max[j,i] = int(nanmax(data[start:end,j]))\n y_mean[j,i] = int(nanmean(data[start:end,j]))\n y_min[j,i] = int(nanmin(data[start:end,j]))\n else:\n y_max[j,i] = nanmax(data[start:end,j])\n y_mean[j,i] = nanmean(data[start:end,j])\n y_min[j,i] = nanmin(data[start:end,j])\n x_out[i] = mean(x[start:end])\n idx += step\n dic = {}\n dic['x'] = x_out\n dic['y_min'] = y_min\n dic['y_max'] = y_max\n dic['y_mean'] = y_mean\n return dic", "title": "" }, { "docid": "ad6ba2e4082c366878508491dbe0dbb9", "score": "0.47899234", "text": "def makeHistogram(values, numBins, xLabel, yLabel, title=None):", "title": "" }, { "docid": "7537561535328a064b6fe2a3a833a562", "score": "0.47813517", "text": "def make_hist(param, bins=30):\n d = dict()\n d['strike'] = 0\n d['length'] = 1\n d['width'] = 2\n d['depth'] = 3\n d['slip'] = 4\n d['rake'] = 5\n d['dip'] = 6\n d['longitude'] = 7\n d['latitude'] = 8\n\n if param == 'all':\n for key in d.keys():\n make_hist(key, bins)\n elif param not in d.keys():\n print(\"Invalid parameter value.\")\n else:\n column = d[param]\n A = np.load('samples.npy') # Must be in the same directory\n freq = A[1:, -1]\n values = A[1:, column]\n\n L = []\n for i in range(len(freq)):\n L += ([values[i]]*int(freq[i]))\n\n # Can add other things to the plot if desired, like x and y axis\n # labels, etc.\n plt.hist(L, bins)\n plt.title(param)\n plt.show()", "title": "" }, { "docid": "2667cc1c4208ca8240416db89f74bc1f", "score": "0.4780208", "text": "def make_axis ( nbins , *bins ) :\n \n if isinstance ( nbins , ROOT.TAxis ) : return nbins \n\n if isinstance ( nbins , integer_types ) and 0 < nbins :\n \n if 1 == len ( bins ) and isinstance ( bins [ 0 ] , sequence_types ) :\n abins = [ float ( e ) for e in bins [ 0 ] ]\n if nbins +1 <= len ( abins ) :\n abins = abins [ : nbins + 1 ] \n if is_sorted ( abins ) : \n return ROOT.TAxis ( nbins , array.array ( 'd' , abins ) )\n \n elif 2 == len ( bins ) and \\\n isinstance ( bins [ 0 ] , num_types ) and \\\n isinstance ( bins [ 1 ] , num_types ) and bins [ 0 ] < bins [ 1 ] :\n return ROOT.TAxis ( nbins , bins [ 0 ] , bins [ 1 ] )\n \n elif nbins + 1 <= len ( bins ) : \n abins = [ float ( e ) for e in bins [ : nbins + 1 ] ]\n if is_sorted ( abins ) : \n return ROOT.TAxis ( nbins , array.array ( 'd' , abins ) ) \n\n elif isinstance ( nbins , sequence_types ) and not bins :\n \n abins = [ float ( e ) for e in nbins ]\n if 2 <= len ( abins ) and is_sorted ( abins ) : \n return ROOT.TAxis ( nbins , array.array ( 'd' , abins ) )\n \n if isinstance ( nbins , num_types ) and bins : \n abins = [ float ( nbins ) ] + [ float ( e ) for e in bins ]\n if is_sorted ( abins ) : \n return ROOT.TAxis ( nbins , array.array ( 'd' , abins ) ) \n\n raise ArgumentError('make_axis: invalid arguments %s' % str ( ( nbins , ) + bins ) )", "title": "" }, { "docid": "a71b3f1374d6f320782c7b254c68487d", "score": "0.47799486", "text": "def group_values(seq):\n ...", "title": "" }, { "docid": "6d904afcfd707176d69d3b8cfbe6918e", "score": "0.47713318", "text": "def bin_data(Xtest,X,step,ys,aggregation):\n bintotals = np.zeros(Xtest.shape[0])\n bincounts = np.zeros(Xtest.shape[0])\n if aggregation=='median':\n binagg = [list([]) for _ in xrange(Xtest.shape[0])]\n\n for i,tile in enumerate(Xtest): #loop through the tiles\n for x,y in zip(X,ys): #loop through the data\n intile = True\n for tiled,xd,s in zip(tile,x,step): #loop through the dimensions of the current tile, data and step\n if (xd<tiled) or (xd>tiled+s):\n intile = False\n break\n if intile:\n bintotals[i]+=y\n bincounts[i]+=1\n if aggregation=='median':\n binagg[i].append(y)\n if aggregation=='mean': \n binaverages = bintotals/bincounts\n if aggregation=='median':\n binaverages = np.zeros(Xtest.shape[0])\n for i, b in enumerate(binagg):\n binaverages[i] = np.median(b)\n return bincounts, bintotals, binaverages", "title": "" }, { "docid": "46dc4b71e62c407d2dd8156c9fcd30d8", "score": "0.47712073", "text": "def bin_df( df, binned_feature, binning_options=binning_options, plotting_options=plotting_options, scales=scales, range=None, bins=None, lumi_scale=1, density=False, weights=True, weights_arr_=None):\n binned_results = {}\n defaults = [\"pt\", \"met\", \"eta\", \"phi\", \"numb\", \"dphi\"]\n bins_default = binning_options[ binning_options.feature == \"default\"].binning.values[0]\n range_default = make_tuple(binning_options[ binning_options.feature == \"default\"].range.values[0])\n y_label_default = binning_options[ binning_options.feature == \"default\"].y_label.values[0]\n title_default = \"\".join(binned_feature.split(\"_\"))\n\n count_defaults = 0\n for default in defaults:\n if default in binned_feature.lower():\n count_defaults += 1\n range_default = make_tuple(binning_options[ binning_options.feature == \"default_\"+default ].range.values[0])\n bins_default = binning_options[ binning_options.feature == \"default_\"+default ].binning.values[0]\n y_label_default = binning_options[ binning_options.feature == \"default_\"+default ].y_label.values[0]\n title_default = binning_options[ binning_options.feature == \"default_\"+default ].title.values[0]\n if count_defaults > 1:\n if \"_\" in title_default:\n title_default = \" \".join(binned_feature.split(\"_\"))\n y_label_default = \"Entries\"\n\n\n\n if binned_feature in binning_options.feature.values:\n bins_ = binning_options[ binning_options.feature == binned_feature ].binning.values[0]\n range_ = make_tuple( binning_options[ binning_options.feature == binned_feature ].range.values[0] )\n y_label = binning_options[ binning_options.feature == binned_feature ].y_label.values[0]\n title = binning_options[ binning_options.feature == binned_feature].title.values[0]\n\n else:\n bins_ = bins_default \n range_ = range_default \n y_label = y_label_default \n title = title_default\n\n if bins == None:\n bins = bins_\n if range == None:\n range = range_\n\n if \"???\" in title:\n title = string.replace(title, \"???\", \" \".join(binned_feature.split(\"_\"))) \n if \"_\" in title:\n title = \" \".join(title.split(\"_\"))\n\n binned_results[\"plotting\"] = {\"y_label\": y_label, \"title\": title}\n\n\n unique_df_processes = df.process_decay.unique()\n\n for process in plotting_options.process_decay.unique():\n if process in unique_df_processes:\n df_process = df[df.process_decay == process]\n #print process, scales[process], range, bins\n if weights == True:\n if type(weights_arr_) == type(None):\n weights_arr = df_process.weight.values\n else:\n #print (df.process_decay == process).shape, weights_arr.shape\n weights_arr = weights_arr_[(df.process_decay == process).values]\n sq_weights_arr = weights_arr**2\n else:\n weights_arr = None\n sq_weights_arr = None\n binned_results[process] = list( np.histogram( df_process[binned_feature], bins=bins, range=range, weights=weights_arr, density=density ) )\n binned_results[process][0] = binned_results[process][0] * lumi_scale * scales[process]\n binned_results[process].append( (binned_results[process][1][1:] - binned_results[process][1][:-1]) / 2. + binned_results[process][1][:-1] )\n binned_results[process].append( np.histogram( df_process[binned_feature], bins=bins, range=range, weights=sq_weights_arr, density=density )[0] )\n binned_results[process][3] = binned_results[process][3] * lumi_scale**2. * scales[process]**2.\n\n return binned_results", "title": "" }, { "docid": "006536c843e60efd959c51233099248c", "score": "0.47581744", "text": "def __getEGrid(self, nbins, macroBins, microBins):\n if macroBins is not None and nbins == macroBins:\n return self.groups, True\n if microBins is not None and nbins == microBins:\n return self.microGroups, True\n return arange(0.5, nbins + 0.5), False", "title": "" }, { "docid": "35973dac5c1ad29f27f6f5a2fea0c20d", "score": "0.47527558", "text": "def grouping(var):\r\n g1,g2,g3,g4,g5 = {},{},{},{},{}\r\n for country, num in var.items():\r\n if num < 10000:\r\n g1[country] = num\r\n elif num < 100000:\r\n g2[country] = num\r\n elif num < 300000:\r\n g3[country] = num\r\n elif num < 500000:\r\n g4[country] = num\r\n else:\r\n g5[country] = num\r\n group_list = [g1,g2,g3,g4,g5]\r\n return group_list", "title": "" }, { "docid": "6642890c955325b1c82a4e9807152b85", "score": "0.47439545", "text": "def group_table(groups):\n\n group_dict = {}\n for group in groups:\n _count_apps(group, group_dict)\n\n fields = OrderedDict([\n ('ID', lambda g: g[0]['id']),\n ('APPS', lambda g: g[1]),\n ])\n\n tb = table(fields, group_dict.values(), sortby=\"ID\")\n tb.align['ID'] = 'l'\n\n return tb", "title": "" }, { "docid": "34cd85fce7089abf16c2d90000a95659", "score": "0.474264", "text": "def select_groups(adata, groups_order_subset='all', key='groups'):\n groups_order = adata.obs[key].cat.categories\n if key + '_masks' in adata.uns:\n groups_masks = adata.uns[key + '_masks']\n else:\n groups_masks = np.zeros(\n (len(adata.obs[key].cat.categories), adata.obs[key].values.size), dtype=bool\n )\n for iname, name in enumerate(adata.obs[key].cat.categories):\n # if the name is not found, fallback to index retrieval\n if adata.obs[key].cat.categories[iname] in adata.obs[key].values:\n mask = adata.obs[key].cat.categories[iname] == adata.obs[key].values\n else:\n mask = str(iname) == adata.obs[key].values\n groups_masks[iname] = mask\n groups_ids = list(range(len(groups_order)))\n if groups_order_subset != 'all':\n groups_ids = []\n for name in groups_order_subset:\n groups_ids.append(\n np.where(adata.obs[key].cat.categories.values == name)[0][0]\n )\n if len(groups_ids) == 0:\n # fallback to index retrieval\n groups_ids = np.where(\n np.in1d(\n np.arange(len(adata.obs[key].cat.categories)).astype(str),\n np.array(groups_order_subset),\n )\n )[0]\n if len(groups_ids) == 0:\n logg.debug(\n f'{np.array(groups_order_subset)} invalid! specify valid '\n f'groups_order (or indices) from {adata.obs[key].cat.categories}',\n )\n from sys import exit\n\n exit(0)\n groups_masks = groups_masks[groups_ids]\n groups_order_subset = adata.obs[key].cat.categories[groups_ids].values\n else:\n groups_order_subset = groups_order.values\n return groups_order_subset, groups_masks", "title": "" }, { "docid": "3c6b52e957f3248327ce72b75a76b9b5", "score": "0.474025", "text": "def setGrouping(self):\n\t\n\t\tgrouped = self.parent().grouped_checkBox.isChecked()\n\t\t\n\t\tif grouped:\n\t\t\tfor item in self.itemList:\n\t\t\t\tself.group.addToGroup(item)\n\t\telse:\n\t\t\tfor item in self.itemList:\n\t\t\t\tself.group.removeFromGroup(item)", "title": "" }, { "docid": "b8092dbd7165c3e419220ed06cbe85a4", "score": "0.47395885", "text": "def get_col_bins(col_nms: List[str]) -> List[tuple]:\n # Make a lists of starting and finishing indexes\n cols_start = col_nms[0::5]\n cols_fin = col_nms[4::5]\n # Generating a list of tuples which will be the age groupings\n col_bins = [(s, f) for s, f in zip(cols_start, cols_fin)]\n # Again adding \"90+\", doubling it so it's doubled, like the other tuples\n col_bins.append((cols_start[-1:]*2))\n # TODO: make this more intelligent. Only if there is one col name left over it should be doubled. \n return col_bins", "title": "" }, { "docid": "6ff8ef52389e53774659222c952121b1", "score": "0.47374737", "text": "def put_gender_age_datapoints_into_bins(data, binsize=5):\n\n # by default, we'll do 20 bins.\n num_bins = 100 // binsize\n\n # each bin is 5 years of age, by default. we'll toss any ages > 100. these will represent\n # counts of ages for that bin.\n male_bins = [0] * num_bins\n female_bins = [0] * num_bins\n\n for datum in data:\n age = int(datum['age'])\n gender = datum['gender']\n\n if 0 < age <= 100:\n bin_number = age // binsize\n if gender == 'Male':\n male_bins[bin_number] += 1\n elif gender == 'Female':\n female_bins[bin_number] += 1\n\n return (male_bins, female_bins)", "title": "" }, { "docid": "a74cccc3f8a197c05ffd14f7a2ec3668", "score": "0.47360235", "text": "def boxplot_set_vs_set(\n df, df_bins, x_cols, y_cols, legend_label=None, overlay=False,\n show=True, color=None, line_width=None):\n # Dimensions of subplots\n width = len(x_cols)\n height = len(y_cols)\n \n if (not overlay):\n plt.figure(figsize=(13, 7))\n \n for i, y in enumerate(y_cols):\n for j, x in enumerate(x_cols):\n if x == y:\n plt.subplot(height, width, (i * width) + (j + 1))\n plt.axis('off')\n break\n subplot_data = []\n bin_names = ['']\n unique_values = df_bins[x].unique()\n unique_values.sort()\n \n # If there are <= 10 x-values, use them as bins for each box plot\n if len(unique_values) <= 10:\n # Get the data for each bin's boxplot\n for unique_value in unique_values:\n subplot_data.append(df[df[x]==unique_value][y])\n bin_names.append(unique_value)\n \n # Else separate x-values into bins\n else:\n bin_size = (max(unique_values) - min(unique_values)) / 10\n bin_start = min(unique_values)\n bin_end = bin_start + bin_size\n \n # Get the data for each bin's boxplot\n for count in range(10):\n if count == 0:\n subplot_data.append(\n df[(bin_start<=df[x]) & (df[x]<=bin_end)][y])\n bin_names.append(\n \"{:02.2f} - {:02.2f}\".format(bin_start, bin_end))\n #bin_names.append(\n # \"{} <= x <= {}\".format(bin_start, bin_end))\n else:\n subplot_data.append(\n df[(bin_start<df[x]) & (df[x]<=bin_end)][y])\n bin_names.append(\n \"{:02.2f} - {:02.2f}\".format(bin_start, bin_end))\n #bin_names.append(\n # \"{} < x <= {}\".format(bin_start, bin_end))\n bin_start = bin_end\n bin_end += bin_size\n # Draw the boxplots with descriptors\n plt.subplot(height, width, (i * width) + (j + 1))\n bp = plt.boxplot(\n subplot_data, labels=([legend_label] * (len(bin_names) - 1)))\n if line_width:\n for part in bp.keys():\n plt.setp(bp[part], linewidth=line_width)\n if color:\n for part in bp.keys():\n plt.setp(bp[part], color=color)\n plt.xlabel(x)\n plt.ylabel(y)\n plt.xticks(\n range(len(bin_names)), bin_names, rotation='vertical')\n plt.tight_layout()\n plt.title('{}\\nvs. {}'.format(x, y))\n if show:\n plt.show()", "title": "" }, { "docid": "eea8101ab81907547301e9e2f5df52c7", "score": "0.4724654", "text": "def groupdivision(WordLists, GroupMap):\n # pack the Chunk data in to ChunkMap(because this is fast)\n for i in range(len(GroupMap)):\n for j in range(len(GroupMap[i])):\n GroupMap[i][j] = WordLists[GroupMap[i][j]]\n return GroupMap", "title": "" }, { "docid": "11ca752210324572225be256238603f1", "score": "0.47218835", "text": "def reference_group_to_cells(self):\n for cell in self.cells:\n cell.row = self", "title": "" }, { "docid": "a52a2f127fb1a141ee28545426007444", "score": "0.4719996", "text": "def cutouts_on_umap_grid(tbl:pandas.DataFrame, nxy:int, \n umap_keys:tuple, min_pts:int=1):\n\n # Grid\n umap_grid = grid_umap(\n tbl[umap_keys[0]].values,\n tbl[umap_keys[1]].values, \n nxy=nxy)\n\n # Unpack\n xmin, xmax = umap_grid['xmin'], umap_grid['xmax']\n ymin, ymax = umap_grid['ymin'], umap_grid['ymax']\n dxv = umap_grid['dxv']\n dyv = umap_grid['dyv']\n\n # Cut\n good = (tbl[umap_keys[0]] > xmin) & (\n tbl[umap_keys[0]] < xmax) & (\n tbl[umap_keys[1]] > ymin) & (\n tbl[umap_keys[1]] < ymax) & np.isfinite(tbl.LL)\n\n tbl = tbl.loc[good].copy()\n num_samples = len(tbl)\n print(f\"We have {num_samples} making the cuts.\")\n\n # Grid\n xval = umap_grid['xval']\n yval = umap_grid['yval']\n\n # Grab cutouts\n cutouts = []\n for x in xval[:-1]:\n for y in yval[:-1]:\n pts = np.where((tbl[umap_keys[0]] >= x) & (\n tbl[umap_keys[0]] < x+dxv) & (\n tbl[umap_keys[1]] >= y) & (tbl[umap_keys[1]] < y+dxv)\n & np.isfinite(tbl.LL))[0]\n if len(pts) < min_pts:\n cutouts.append(None)\n continue\n\n # Pick a random one\n ichoice = np.random.choice(len(pts), size=1)\n idx = int(pts[ichoice])\n cutout = tbl.iloc[idx]\n # Save\n cutouts.append(cutout)\n\n # Return\n return tbl, cutouts, umap_grid", "title": "" }, { "docid": "9abc22389aa26b26160b1a16ddba85c2", "score": "0.47139427", "text": "def histogram(self, column, table_or_query, nbins, range=None):\n if ' ' in table_or_query:\n table_clause = \"(%s) as foo\"%(table_or_query,)\n else:\n table_clause = table_or_query\n\n if range is None:\n data = self.execute(\"select min(%s), max(%s) from %s\" %\n (column, column, table_clause))\n min = data[0][0]\n max = data[0][1]\n else:\n min, max = range\n\n clause = (\"round(%d * (%s - (%f)) / (%f - (%f)))\" %\n (nbins, column, min, max, min))\n h = np.zeros(nbins)\n res = self.execute(\"select %s as bin, count(*) from %s \"\n \"where %s <= %d \"\n \"group by %s order by bin\" % (clause, table_clause,\n clause, nbins, clause))\n for bin, count in res:\n if bin == nbins:\n bin -= 1\n h[bin] = count\n return h, np.linspace(min, max, nbins + 1)", "title": "" }, { "docid": "88fd0b673cf64bca61787231b0786981", "score": "0.47076344", "text": "def create_bins(lower_bound, width, quantity):\n \n\n bins = []\n for low in range(lower_bound, \n lower_bound + quantity*width + 1, width):\n bins.append((low,low+width))", "title": "" }, { "docid": "80dde088ad6231c23eff6fbf6507d614", "score": "0.46972874", "text": "def _label_as_bins(self,valueslist,nbins):\r\n binnedvalues,binlabels=self._get_bin_values(valueslist,nbins)\r\n return [binlabels[bv] for bv in binnedvalues]", "title": "" }, { "docid": "49169f16ecc6ba6936aa9e0ce8c4b349", "score": "0.469355", "text": "def make_bins(chrominfo, binsize, by_strand):\n result = {}\n for name, l in chrominfo.items():\n n_bins = l // binsize #reads in last bin are discarded\n if not by_strand:\n result[name] = numpy.zeros(n_bins, dtype = numpy.int32)\n else:\n result[name] = [numpy.zeros(n_bins, dtype = numpy.int32),\n numpy.zeros(n_bins, dtype = numpy.int32)]\n return result", "title": "" }, { "docid": "a6216ce56417fc283df5fb114c35e7a2", "score": "0.46904555", "text": "def group_obs(ungrouped):\n ind = []\n grouped = []\n for i in range(ungrouped.shape[0]):\n if ungrouped[i][4] < 2.0 and ungrouped[i][2] < 2.0 and ungrouped[i][3] > -2.0:\n ind.append(i)\n ws_ungrouped = ungrouped[ind]\n mins_previous = []\n for line in ws_ungrouped:\n mins = []\n for i in range(ws_ungrouped.shape[0]):\n min = np.linalg.norm(line[2:5] - ws_ungrouped[i][2:5], ord=1)\n if min < 3.0:\n mins.append(i)\n if mins not in mins_previous:\n grouped.append(ws_ungrouped[mins])\n mins_previous = [mins]\n return grouped", "title": "" }, { "docid": "54901a39123ea49d2865b76f733205c1", "score": "0.4687115", "text": "def split_data(data, ncut, flag=1, nan_policy='omit', return_bins=False):\n tf_ind_nan = np.isnan(data)\n nan_in_rawdata = any(tf_ind_nan)\n if nan_in_rawdata:\n if nan_policy == 'omit':\n data = data[~tf_ind_nan].copy()\n elif nan_policy == 'raise':\n raise ValueError('The input contains nan values')\n else:\n return np.nan\n\n if flag == 1:\n slices = np.linspace(data.min(), data.max(), ncut + 1)\n elif flag == 2:\n segs = np.linspace(0, 100, int(ncut + 1))\n slices = np.percentile(data, segs)\n elif flag == 3:\n slices = np.percentile(data, ncut)\n else:\n raise ValueError('flag should be 1, 2 or 3')\n\n slices[0] = slices[0] - 1\n slices[-1] = slices[-1] + 1\n try:\n labels = np.digitize(data, slices) - 1\n except ValueError as e:\n idx_dup = np.hstack([False, np.isclose(np.diff(slices), 1e-15)])\n fixed_sp = slices.copy()\n val = fixed_sp[idx_dup] + np.arange(1, np.sum(idx_dup) + 1) * 1e-15\n np.place(fixed_sp, idx_dup, val)\n labels = np.digitize(data, fixed_sp) - 1\n\n if nan_in_rawdata and nan_policy == 'omit':\n idx_loc = np.where(tf_ind_nan)[0] - np.arange(sum(tf_ind_nan))\n labels = np.insert(labels.astype(float), idx_loc, np.nan)\n\n if return_bins:\n slices[0] = slices[0] + 1\n slices[-1] = slices[-1] - 1\n return np.array(labels), list(zip(slices[:-1], slices[1:]))\n else:\n return np.array(labels)", "title": "" }, { "docid": "1eb7d8fcfa9f7d7b3eb7c631f4a9caf9", "score": "0.46846956", "text": "def assign_node_groups(self):\r\n self.nodes = self.G.nodes()\r\n self.groups = range(0,self.k)\r\n print self.nodes\r\n self._nodes_of_group = {g : set() for g in self.groups}\r\n self._groups_of_node = {v : set() for v in self.nodes}\r\n \r\n # for every group place atmost gamma vertices in it \r\n for g in self.groups:\r\n nodes_sample = random.sample(self.nodes,random.randint(1,self.bnd))\r\n \r\n # update groups of v\r\n for v in nodes_sample: self._groups_of_node[v].add(g)\r\n self._nodes_of_group[g] = nodes_sample\r\n \r\n \r\n self.bnd = max(map(len,self._nodes_of_group.values()))\r\n print self.bnd\r\n print self._nodes_of_group", "title": "" }, { "docid": "f3860d1705034f737e444e352a7633c8", "score": "0.46796855", "text": "def get2d_histogram(x, y,\n value_range,\n nbins=100,\n dtype=tf.dtypes.int32):\n x_range = value_range[0]\n y_range = value_range[1]\n\n histy_bins = tf.histogram_fixed_width_bins(y, y_range, nbins=nbins, dtype=dtype)\n\n H = tf.map_fn(lambda i: tf.histogram_fixed_width(x[histy_bins == i], x_range, nbins=nbins),\n tf.range(nbins))\n return H # Matrix!", "title": "" }, { "docid": "a6ad5edb14ccc250c08be286666d6b45", "score": "0.46758324", "text": "def reference_group_to_cells(self):\n for cell in self.cells:\n cell.column = self", "title": "" }, { "docid": "bd8a6cda9d8e8e1b0f4b79e00f0dd46d", "score": "0.46574536", "text": "def _get_bin_edges(self, bin_type, bin_n):\n s_times = sorted([t for time in self.times.values() for t in time])\n if bin_type == 'linear':\n self.edges = np.linspace(s_times[0] - 1, s_times[-1], bin_n + 1)\n elif bin_type == 'distribution':\n quantiles = np.linspace(0, 100, bin_n + 1)\n self.edges = np.percentile(s_times, quantiles)\n self.edges[0] =- 1 # Edge correction\n elif bin_type == 'year':\n bin_size = 365.25 * bin_n\n self.edges = np.arange(int(s_times[0]-1), int(s_times[-1]) + bin_size, bin_size)\n else:\n raise TypeError(\"'bin_type' must be either 'linear', 'distribution' or 'year'\")", "title": "" }, { "docid": "dde4390e657d22fcff5d446714e5dbc7", "score": "0.46521094", "text": "def group_by_lines(units):\n groups = {i.line: [] for i in units}\n for i in units: groups[i.line].append(i)\n return groups", "title": "" }, { "docid": "3e6119f8845add9154f66750783231c4", "score": "0.46508846", "text": "def get_bin_mapping(y):\n classes_ = np.unique(y)\n original_bins = sorted(classes_)\n n_bins = len(original_bins)\n bins = np.arange(n_bins)\n get_old_bin = dict(zip(bins, original_bins))\n return get_old_bin, n_bins", "title": "" }, { "docid": "c459515ca5e62eab75ddaeeb5e40b655", "score": "0.46501943", "text": "def add_groups(records):\n groups = []\n for row in records['id']:\n photo_index = records.loc[row, 'photo_index']\n if pd.isnull(photo_index):\n groups.append(None)\n elif photo_index <= 76:\n groups.append('female')\n elif photo_index <= 195:\n groups.append('juvenile')\n else:\n groups.append('male')\n records['group'] = pd.Series(groups, index=records['id'])", "title": "" }, { "docid": "46d2de12e893038d695e5760834f0e52", "score": "0.46451113", "text": "def make_bins(array):\n bin_array = (array[:-1] + array[1:]) / 2\n spacing = bin_array[1] - bin_array[0]\n bin_array -= spacing\n bin_array = np.append(bin_array, bin_array[-1] + spacing)\n bin_array = np.append(bin_array, bin_array[-1] + spacing)\n return bin_array", "title": "" }, { "docid": "dabbfe4276440222fd785e9196991473", "score": "0.463957", "text": "def binned_stats(self, in_fname, nbins, split=False, **args):\n \n statistic = args.get(\"statistic\", \"mean\")\n in_track = SimpleBed(in_fname)\n \n if statistic in [\"min\", \"max\", \"std\"]:\n statistic = eval(statistic)\n\n for r in in_track: \n profile = np.zeros(r.end - r.start)\n for f in self.tabix_track.fetch(r.chrom, r.start, r.end):\n f = f.split()\n start = int(f[1])\n end = int(f[2])\n if start < r.start:\n start = r.start\n if end > r.end:\n end = r.end\n profile[start - r.start: end - r.end] = float(f[3])\n h,_,_ = binned_statistic(\n np.arange(r.end - r.start), \n profile, \n bins=nbins, \n statistic=statistic)\n yield [f[0], r.start, r.end] + list(h)", "title": "" }, { "docid": "f553d4c99751a06e66023f3b1ceab1c4", "score": "0.46355852", "text": "def plot(\n input: List[str],\n data: List[int],\n output: Optional[str] = None,\n bins: int = 50,\n kde: bool = False,\n left: Optional[float] = None,\n right: Optional[float] = None,\n title: Optional[str] = None,\n xlabel: Optional[str] = None,\n ylabel: Optional[str] = None,\n labels: Optional[List[str]] = None,\n groups: Optional[List[int]] = None,\n) -> None:\n\n if len(data) != len(input):\n raise ValueError(\"Inconsistent number of input files and data columns.\")\n\n # Check number of groups\n g_max = len(input) # One input per group\n groups_default = [0] * g_max # All inputs in the same group by default\n if groups is not None:\n if len(groups) != len(input):\n raise ValueError(\"Inconsistent number of input files and groups.\")\n\n # Check group index bounds\n for g in groups:\n if g < 0 or g >= g_max:\n raise ValueError(f\"Group index {g} is out of bounds [{0},{g_max})\")\n\n # Check that group indices are consecutive\n m = max(groups)\n for idx in range(m):\n if idx not in groups:\n raise ValueError(f\"Group indices are not consecutive.\")\n\n groups = groups_default if groups is None else groups\n\n n_plots = max(groups) + 1\n fig, axes = plt.subplots(1, n_plots, figsize=(5 * n_plots, 4))\n\n # Make axes iterable for a single plot\n if n_plots == 1:\n axes = [axes]\n\n # Check number of labels\n if labels is not None:\n if len(labels) != len(data):\n raise ValueError()\n\n # Set title and labels\n if title is not None:\n fig.suptitle(title)\n if xlabel is not None:\n for ax in axes:\n ax.set_xlabel(xlabel)\n\n # Get colormap\n cm = get_colormap()\n\n for i, idx in enumerate(data):\n\n # Load data\n d = np.loadtxt(input[i])\n\n # Get label (if exists)\n hist_kws, kde_kws = None, None\n try:\n # Try to use options.labels as a list\n label = labels[i]\n\n if kde:\n kde_kws = {\"label\": label}\n else:\n hist_kws = {\"label\": label}\n\n except TypeError: # If labels is not a list, a TypeError occurs\n # Do nothing (hist_kws=None, kde_kws=None)\n pass\n\n sns.distplot(\n d[:, idx],\n bins=bins,\n kde=kde,\n hist_kws=hist_kws,\n kde_kws=kde_kws,\n color=cm[i],\n ax=axes[groups[i]],\n )\n\n for ax in axes:\n ax.set_xlim(left=left, right=right)\n\n if labels is not None:\n plt.legend()\n\n if output is not None:\n plt.savefig(output)\n else:\n plt.show()", "title": "" }, { "docid": "e14103e97c0950aa4629dd52690f32bd", "score": "0.46344987", "text": "def regroup_PkBins(target_nbins, k3D, Pk3D, Nmodes3D):\n N = k3D.shape[0]\n n = int(N/target_nbins)\n \n k3D_new = np.zeros((target_nbins,), dtype = float)\n Pk3D_new = np.zeros((target_nbins,), dtype = float)\n Nmodes_new = np.zeros((target_nbins,), dtype = float)\n \n for p in np.arange(0, target_nbins):\n \n # Central kbin value\n k3D_new[p] = 0.5*(k3D[p * n] + k3D[(p + 1) * n - 1])\n \n # Could be more elegant, but does the job\n weightedPk = Nmodes3D[p * n : (p + 1) * n] * Pk3D[p * n : (p + 1) * n ]\n sumModes = np.sum(Nmodes3D[p * n : (p + 1) * n])\n Pk3D_new[p] = 1/(sumModes)*np.sum(weightedPk)\n Nmodes_new[p] = sumModes", "title": "" }, { "docid": "84bb30b4e3fc3b1c85f0cd0cdd243cc8", "score": "0.46344796", "text": "def __init__(self, bins):\n self.bins = bins", "title": "" }, { "docid": "8bc16727dfc77b2301052c57a5010380", "score": "0.4633216", "text": "def generate_histogram(arr, bins):\n exploded_l = []\n # loop through list duplicating the distance for every individual\n # in the geographical area\n for pair in arr:\n exploded = [[pair[0]] * pair[1]]\n exploded_l.extend(exploded)\n # flatten the list of lists\n exploded_l = [item for sublist in exploded_l for item in sublist]\n # convert to histogram\n binned = np.histogram(exploded_l, bins) \n return(binned)", "title": "" }, { "docid": "04fb5191f1c8a3e11e773cf64462ec9f", "score": "0.4632937", "text": "def binning(self, bin_bounds: List, samples: np.ndarray, *values: Iterable, nan: float = 0.0) -> Tuple:\n\n # determine number of samples in histogram bins\n num_samples_hist, _ = np.histogramdd(samples, bins=bin_bounds)\n binning_schemes = []\n binning_result = None\n\n # iterate over passed value arrays\n for val in values:\n binning_result = binned_statistic_dd(samples, val, statistic='mean', bins=bin_bounds, binned_statistic_result=binning_result)\n hist, _, _ = binning_result\n\n # blank out each bin that has less samples than a certain sample threshold in order\n # to improve robustness of the miscalibration scores\n # convert NaN entries to float\n hist[num_samples_hist < self.sample_threshold] = np.nan\n hist = np.nan_to_num(hist, nan=nan)\n\n binning_schemes.append(hist)\n\n _, edges, idx = binning_result\n\n # first step: expand bin numbers\n # correct bin number afterwards as this variable has offset of 1\n idx = np.asarray(np.unravel_index(idx, [len(bounds)+1 for bounds in bin_bounds]))\n idx -= 1\n\n # convert to tuple as this can be used for array indexing\n idx = tuple([dim for dim in idx])\n\n return tuple(binning_schemes), num_samples_hist, edges, idx", "title": "" }, { "docid": "eb09532ecb9ddaf89054e883c81b34bd", "score": "0.46183985", "text": "def value_group():", "title": "" }, { "docid": "864d67ce9dd7791b2c2102e07c46e8c8", "score": "0.461838", "text": "def create_binning_matrix2(Nbins,lint, lmax):\n### CREATION DE LA MATRICE DE BINNING ###\n \n \n N_tot=Nbins+ lint-1\n B=nm.zeros((Nbins+ lint-1,lmax+1 ))\n \n \n bin1=(nm.floor(nm.linspace(lint+1, lmax+1, Nbins+1)))\n\n bord_droit_tab=nm.zeros((N_tot), dtype=nm.int32)\n index=0\n index2=0\n for i in range(0, N_tot+3):\n if i >= 3 and i<=lint+1:\n bord_droit_tab[index]=i\n index+=1\n if i>lint+1:\n bord_droit_tab[index]=bin1[index2+1]\n index+=1\n index2+=1\n\n bord_gauche_tab=nm.zeros((N_tot), dtype=nm.int32)\n index=0\n index2=0\n for i in range(0, N_tot+2):\n if i >= 2 and i<lint+1:\n bord_gauche_tab[index]=i\n index+=1\n if i>lint:\n bord_gauche_tab[index]=bin1[index2]\n index+=1\n index2+=1\n \n\n tab2=nm.floor((bord_droit_tab+bord_gauche_tab)/2.)\n print nm.sum(bord_droit_tab-bord_gauche_tab)\n \n\n for i in range(N_tot):\n B[i,bord_gauche_tab[i]:bord_droit_tab[i]]=1.0/(bord_droit_tab-bord_gauche_tab)[i]\n \n\n\n return B, bin1, tab2, N_tot", "title": "" }, { "docid": "ead7b2202813615bd9b13985b71dd75f", "score": "0.46166295", "text": "def loop_bins(self, norands=False, match=True):\n\n # loop over parameter bins\n for ind, bound in enumerate(self.lenses[\"bounds\"]):\n self.ind = ind\n self.setbin(bound)\n\n self.save_clust()\n self.save_clust_jk()\n\n if not norands:\n self.randsel(match=match)\n self.save_rands()\n self.save_rands_jk()\n\n self.savelists(norands=norands)", "title": "" }, { "docid": "74c476e0cf8acd358e62e9e31a6233b2", "score": "0.46139327", "text": "def _group_tiles_by_cells(tile_index_list, cell_index_list):\n key_map = defaultdict(list)\n for x, y, t in tile_index_list:\n if (x, y) in cell_index_list:\n key_map[(x, y)].append((x, y, t))\n return key_map", "title": "" }, { "docid": "f848a4d27ebe54cff543611c3931339a", "score": "0.4602185", "text": "def hist_by_group(x: pd.Series, g: pd.Series, *args, **kwargs):\n fig, axs = plt.subplots(2, 2) if g.nunique() > 2 else plt.subplots(1, 2)\n fig.suptitle(f\"Distribution of {x.name} by {g.name}\")\n g = g.top_n(3)\n g.index = x.index\n\n for ax, v in zip(axs.ravel(), g.unique()):\n x_g = x[g == v]\n\n ax.hist(x_g, color=\"pink\", *args, **kwargs)\n ax.set_title(v)\n\n # mean + ci\n xbar = x_g.mean()\n z = 1.96 # 95% ci\n ci = z * (x_g.std() / math.sqrt(x_g.shape[0]))\n ub, lb = xbar + ci, xbar - ci\n\n ymin, ymax = ax.get_ylim()\n ax.vlines(xbar, ymin, ymax, ls=\"--\", color=\"gray\")\n ax.vlines([lb, ub], ymin, ymax, ls=\":\", color=\"gray\")\n\n return fig, axs", "title": "" }, { "docid": "1423f1f7488fe06b94a0decca8930a12", "score": "0.45991495", "text": "def makeTabularGroupQuery(array_dims, groupings=None, add_over_margins=(), name=\"\"):\n myarray_dims = tuple(array_dims)\n for x in myarray_dims:\n assert isinstance(x, int), \"array_dims {} must be ints\".format(myarray_dims)\n if add_over_margins in ([], None):\n myadd_over_margins = ()\n else:\n for x in add_over_margins:\n # check that x is int and in range\n assert x in range(len(array_dims)), \\\n f\"{x} in add_over_margins {add_over_margins} is not within histogram array dimensions number: from 0 to {len(array_dims)}\"\n # check that we don't get any duplicate dims\n assert len(add_over_margins) == len(set(add_over_margins)), (\n \"add_over_margins {} has duplicate dims\").format(add_over_margins)\n # passed checks, set myadd_over_margins\n myadd_over_margins = tuple(add_over_margins)\n if groupings in (None, (), [], False, {}):\n query = SumOverGroupedQuery(array_dims=myarray_dims,\n add_over_margins=myadd_over_margins, name=name)\n else:\n assert isinstance(groupings, dict), \"groupings must be a dictionary\"\n keys = list(groupings.keys())\n for x in keys:\n assert isinstance(x, int), \"groupings keys must be integers\"\n # check that the intersection is empty\n assert not set(keys).intersection(set(myadd_over_margins)), (\n \"add_over_margins {} and groupings {} overlap\").format(myadd_over_margins, keys)\n # if so set groupings\n mygroupings = groupings\n query = SumOverGroupedQuery(array_dims=myarray_dims, groupings=mygroupings,\n add_over_margins=myadd_over_margins, name=name)\n return query", "title": "" }, { "docid": "22ca5d87857bc71c546615e7d620cc27", "score": "0.45989573", "text": "def __init__(\n self,\n bins: Union[int, Iterable[int]] = 10,\n equal_intervals: bool = True,\n detection: bool = False,\n sample_threshold: int = 1\n ):\n\n self.bins = bins\n self.detection = detection\n self.sample_threshold = sample_threshold\n self.equal_intervals = equal_intervals", "title": "" }, { "docid": "01432081ea39c61d21b4b52f5a04e0af", "score": "0.45969623", "text": "def __init__(self, name, title, expr, cut, nbins, range_binning):\n self.name = name\n self.title = title\n self.expr = expr\n self.cut = cut\n self.nbins = nbins\n if self.nbins < 0:\n self.userBinning = True\n self.nbins = len(range_binning) - 1\n self.binning = array('d',range_binning)\n else:\n self.userBinning = False\n if len(range_binning) != 2:\n raise RuntimeError('histo : range_binning malformed: expect exactly two values')\n self.xmin = float(range_binning[0])\n self.xmax = float(range_binning[1])\n\n if self.userBinning:\n self.histo = ROOT.TH1D(self.name, self.title, self.nbins, self.binning)\n else:\n self.histo = ROOT.TH1D(self.name, self.title, self.nbins, self.xmin, self.xmax)\n \n self.histo.SetDirectory(0)", "title": "" }, { "docid": "f0fbb3add60a9d863154004466b103b4", "score": "0.4596736", "text": "def create_bins(ser, bins, labels=None):\n min_ser = np.min(ser) - 1\n max_ser = np.max(ser)\n return pd.cut(ser, np.linspace(min_ser, max_ser, bins), labels=None, retbins=True)", "title": "" }, { "docid": "8c58f69ed83cc76d15d2a6485630bb74", "score": "0.4595517", "text": "def plot_subgrp_histograms(data, fig_size = [12,12], nbins = 100,\n alpha_level = 0.7, gridline_width = 0.5):\n\n # create legend labels\n label2use = []\n for i in range(1,data.shape[1]+1):\n label2use.append(\"ASD%d\" % i)\n\n # make figure specific size\n plt.figure(figsize = fig_size)\n\n # plot histograms\n plt.hist(data, nbins, alpha = alpha_level, label = label2use)\n\n # add legend\n plt.legend()\n\n # add grid lines\n plt.grid(linewidth = gridline_width)\n\n # add x and y-axis labels\n plt.xlabel(\"DV\")\n plt.ylabel(\"Count\")\n\n # show plot\n plt.show()", "title": "" }, { "docid": "0045643effa881b7f6ee2ed3307a60df", "score": "0.45938587", "text": "def _h1_split2_ ( h1 , n ) :\n assert isinstance ( n , integer_types ) and 0 < n ,\\\n 'Invalid number of bins %s' % n\n\n histos = []\n N = len ( h1 ) + 1 \n from ostap.utils.utils import chunked as _chunked\n bins = _chunked ( range ( 1 , N ) , n )\n for item in bins :\n lst = list ( item )\n if not lst : return\n h = h1[ lst [ 0 ] : lst [ -1 ] +1 ]\n histos.append ( h )\n return tuple ( histos )", "title": "" }, { "docid": "772043ffa5dc720651465d20e2ea2d24", "score": "0.45933583", "text": "def group_edges(self):\n\n\t\tprint(\"\\nGrouping edges\")\n\t\tself.grouped_edges = dict()\n\t\tself.domains = dict()\n\t\tself.ranges = dict()\n\t\tfor edge in self.edges:\n\t\t\tif edge[0] not in self.grouped_edges:\n\t\t\t\tself.grouped_edges[edge[0]] = set()\n\t\t\tif edge[0] not in self.domains:\n\t\t\t\tself.domains[edge[0]] = set()\n\t\t\tif edge[0] not in self.ranges:\n\t\t\t\tself.ranges[edge[0]] = set()\n\t\t\tself.grouped_edges[edge[0]].add((edge[1], edge[2]))\n\t\t\tself.domains[edge[0]].add(edge[1])\n\t\t\tself.ranges[edge[0]].add(edge[2])", "title": "" }, { "docid": "83a8b29ba4a5275e15f58a849fee7454", "score": "0.45910466", "text": "def _prepare_bins_regression(self, var: np.ndarray, n_dims: int, range_: Union[List[Tuple[float, float]], None]):\n\n # extract range parameter if given\n if range_ is not None:\n assert len(range_) == n_dims, \"Parameter \\'range_\\' must have the same length as number of dimensions.\"\n assert all([isinstance(x, (tuple, list)) for x in range_]), \"Binning range_ must be passed as tuple/list of length 2.\"\n\n min_ = [x[0] for x in range_]\n max_ = [x[1] for x in range_]\n\n # if not explicitly given, use min_ and max_ scores in variance\n else:\n min_ = np.min(var, axis=0) # (d,)\n max_ = np.max(var, axis=0) # (d,)\n\n # use the _prepare_bins function to initialize the binning scheme\n # the function is designed to handle a batch of data - thus, we only need to access the first element\n bin_bounds = self._prepare_bins([var], num_features=n_dims, min_=min_, max_=max_)[0]\n\n return bin_bounds", "title": "" }, { "docid": "1351d47a484cf47c6a7f9cc1e276c142", "score": "0.45873898", "text": "def list_bins(l, bins):\n n = float(len(l)) / bins\n return [l[int(n * i):int(n * (i + 1))] for i in range(bins)]", "title": "" }, { "docid": "971e1e910884db6fe00ff8ca2cdddd62", "score": "0.45840928", "text": "def test_split_into_chunks(self):\n check_list =np.arange(10)\n bins = split_into_chunks(check_list, 2)\n print (bins[0] == np.arange(50))\n assert len(bins)==2\n assert np.all(bins[0] == np.arange(5))\n assert np.all(bins[1] == np.arange(5,10))", "title": "" }, { "docid": "fd9f3cfde027498ad23cd652eff724d7", "score": "0.45836836", "text": "def _prepare_bins(\n self,\n X: List[np.ndarray],\n num_features: int,\n min_: Union[float, List[float]] = 0.0,\n max_: Union[float, List[float]] = 1.0\n ) -> List[List[np.ndarray]]:\n\n # check bins parameter\n # is int? distribute to all dimensions\n if isinstance(self.bins, int):\n bins = [self.bins, ] * num_features\n\n # is iterable? check for compatibility with all properties found\n elif isinstance(self.bins, (tuple, list)):\n if len(self.bins) != num_features:\n raise AttributeError(\"Length of \\'bins\\' parameter must match number of features.\")\n else:\n bins = self.bins\n else:\n raise AttributeError(\"Unknown type of parameter \\'bins\\'.\")\n\n # distribute min_ to all dims\n if isinstance(min_, (int, float, np.floating, np.integer)):\n min_ = [min_, ] * num_features\n\n # distribute min_ to all dims\n if isinstance(max_, (int, float, np.floating, np.integer)):\n max_ = [max_, ] * num_features\n\n # create an own set of bin boundaries for each batch in X\n bin_bounds = [[np.linspace(min_[dim], max_[dim], b + 1) for dim, b in enumerate(bins)] for _ in X]\n\n # on equal_intervals=True, simply use linspace\n # if the goal is to equalize the amount of samples in each bin, use np.quantile\n if not self.equal_intervals:\n for i, (batch_X, bounds) in enumerate(zip(X, bin_bounds)):\n for dim, b in enumerate(bounds):\n quantile = np.quantile(batch_X[:, dim], q=b, axis=0)\n\n # set lower and upper bounds to confidence limits\n quantile[0] = 0.\n quantile[-1] = 1.\n bin_bounds[i][dim] = quantile\n\n return bin_bounds", "title": "" }, { "docid": "acb597369e76e2c456232ee8ce2d6d74", "score": "0.45787334", "text": "def define_bins(self, data):\n\n indexless_data = data[:, 1:]\n bounds = (np.min(indexless_data, axis=0),\n np.max(indexless_data, axis=0))\n\n # We chop up the min-max column ranges into 'nr_cubes' parts\n self.chunk_dist = (bounds[1] - bounds[0]) / self.nr_cubes\n\n # We calculate the overlapping windows distance\n self.overlap_dist = self.overlap_perc * self.chunk_dist\n\n # We find our starting point\n self.d = bounds[0]\n\n # Use a dimension index array on the projected X\n # (For now this uses the entire dimensionality, but we keep for experimentation)\n self.di = np.array(range(1, data.shape[1]))\n self.nr_dimensions = len(self.di)\n\n if type(self.nr_cubes) is not list:\n cubes = [self.nr_cubes] * self.nr_dimensions\n else:\n assert len(self.nr_cubes) == self.nr_dimensions, \"There are {} ({}) dimensions specified but {} dimensions needing specification. If you supply specific number of cubes for each dimension, please supply the correct number.\".format(\n len(self.nr_cubes), self.nr_cubes, self.nr_dimensions)\n cubes = self.nr_cubes\n\n coordinates = map(np.asarray, itertools.product(\n *(range(i) for i in cubes)))\n\n return coordinates", "title": "" }, { "docid": "3c843f2e9a75519f855f64471150a638", "score": "0.45745933", "text": "def _generate_bin_pairs(self, hist, bins, length, seed):\n self._prng = RandomState()\n self._prng.seed(seed)\n\n bin_pairs = np.empty((0, 2), dtype=np.int)\n ub = []\n\n for i in range(0, length):\n a_i, b_i = self._gen_pair(hist, bins, ub)\n bin_pairs = np.append(bin_pairs, [[a_i, b_i]], axis=0)\n\n return bin_pairs", "title": "" }, { "docid": "584d565892866a5985db4dc22b91b422", "score": "0.45726806", "text": "def create_alien_group(self, x, y):\n self.alien_group = AlienGroup(x, y)\n for alien_column in self.alien_group:\n self.add(alien_column)", "title": "" } ]
5200f887684da296076c157cf535b32d
create a new game play for the player
[ { "docid": "468cfde9f3d6990ec2b206b070a01e69", "score": "0.70035684", "text": "def make_a_play(self, new_play):\n self.plays.append(new_play)", "title": "" } ]
[ { "docid": "cee521afb114678b3670112c326212d2", "score": "0.77359706", "text": "def newGame():\r\n # click on Play\r\n pyautogui.click(PLAY_COORDS, duration=0.25)\r\n logging.debug('New game...')", "title": "" }, { "docid": "73e3ac0ca67deb029ff26e2e9573bb94", "score": "0.7456077", "text": "def create_game(self) -> UUID:\n\n new_game = Game()\n self.gamePlayers[new_game.id] = new_game.start_game()\n return new_game.id", "title": "" }, { "docid": "f3682a85e8e7827c51b80db6322dbdba", "score": "0.73384815", "text": "def new_play(self, play):\n teams = play.teams\n\n if len(teams.keys()) > 1:\n self.__compute_elo_team(play, teams)\n else:\n self.__compute_elo_player(play)", "title": "" }, { "docid": "a4883d0919f418fa115b3a8bf6afc405", "score": "0.7243869", "text": "def new_game(self):\r\n while self.playing:\r\n self.player_event()\r\n self.play_again()", "title": "" }, { "docid": "63e73a6d12186a1e3cb41acebd3ef086", "score": "0.70798516", "text": "def new_game():\n # create board\n board = BoardJSON()\n board.create_board()\n turn = 'player1'\n return board, turn", "title": "" }, { "docid": "7492e9e0c10a8cf231df552383a2b012", "score": "0.7007747", "text": "def create_game(self, request):\n # Get player by username\n player = get_player(request.user_name)\n\n # Create the game\n game = Game.create_game(player=player.key, title=request.title)\n\n # Return confirmation of game creation\n return game.to_form()", "title": "" }, { "docid": "841c096a05d2f781a67ec938d04d56e6", "score": "0.69603014", "text": "def play_game(self):\n self.get_welcome_screen()\n self.create_players()\n self.start_game()\n self.exit_screen()", "title": "" }, { "docid": "5821705e71cf419dda2bf89968f3cbca", "score": "0.69581664", "text": "def _create_game(self):\n raise NotImplementedError()", "title": "" }, { "docid": "a594cfae998317eb02919e057f574225", "score": "0.6948562", "text": "def create_game():\n game = Game()\n Player(game, name='Red', strategy=noisy_tft)\n Player(game, name='Blue', strategy=noisy_tft)\n Payoff.matrix(game, PAYOFF_MATRIX)\n return game", "title": "" }, { "docid": "bc4568b8d570a021321a0ea315b71475", "score": "0.6942767", "text": "def start_new_game(player1, player2):\n game = {}\n game[\"player1\"] = \"X\"\n game[\"player2\"] = \"O\"\n empty_board_row = [\"-\", \"-\", \"-\"]\n empty_board = [empty_board_row[:], empty_board_row[:], empty_board_row[:]]\n game[\"board\"] = empty_board\n game[\"next_turn\"] = \"X\"\n game[\"winner\"]= None\n return game", "title": "" }, { "docid": "5c4b6cf8590538a0fe460f1b403aeae8", "score": "0.69158405", "text": "def create_game(self, player_name, hero, map):\n return", "title": "" }, { "docid": "e2c871dc7a0c903b4f88dd09c5376b5b", "score": "0.6913446", "text": "def new_game(request):\n game = Game.objects.create()\n game.players.add(request.player, request.computer)\n request.session['game_pk'] = game.pk\n messages.info(request, 'Good luck, {0} :)'.format(request.player.name))\n return redirect('play_game')", "title": "" }, { "docid": "d871f710e0b810a73392eb94add8b224", "score": "0.6908391", "text": "def create_game(self):\n for name in self.player_names:\n self.players.append(Player(name, Hand()))\n\n self.distribute_cards()\n\n for player in self.players:\n player.show_hand()", "title": "" }, { "docid": "442c6ef22a6a0fbbaa7f7cf00df2112b", "score": "0.6901601", "text": "def new_game(self):\n self.display.blit(NEW_GAME, NEW_GAME_BUTTON)", "title": "" }, { "docid": "c93ae1ed3c76d454a9ef3fa8ed93f87b", "score": "0.68904215", "text": "def create(name):\r\n if name in Game.GAMES:\r\n game = Game.GAMES[name]\r\n else:\r\n Game.GAMES[name] = game = Game(name)\r\n return game", "title": "" }, { "docid": "f7df989f01649f21e37184b454a72de7", "score": "0.68555206", "text": "def _create_new_game(self):\n GameManager.new_game()\n self.game = GameManager.instance()\n self.selected = None\n self.redraw.emit()\n if self.players_select[UICommons.BLACK_STR].isChecked():\n self._computers_move()", "title": "" }, { "docid": "46720dcff277c540b4797762bbb4559b", "score": "0.68413395", "text": "def start_new_game():\n response = create_game()\n return vars(response)", "title": "" }, { "docid": "579aae32ded250fd8e2c34238968c043", "score": "0.68261606", "text": "def newPlayer(self, name=None):\n pass", "title": "" }, { "docid": "02c4db24af8550b843bf2d56182b9e56", "score": "0.68231463", "text": "async def play_new_game(self, closed_mode=True):\n # self.phase_handler.advance_to_phase(Phase.stopping) # Stop the current game since we start a new one\n # Start a new game with the same people\n if len(self.participants) == 0:\n await self.message_sender.send_message(embed=output.warn_participant_list_empty(), reaction=False,\n group=Group.warn)\n return\n guesser = self.participants.pop(0)\n self.participants.append(self.guesser)\n game = Game(self.channel, guesser=guesser, bot=self.bot,\n word_pool_distribution=self.wordpool,\n participants=self.participants if closed_mode else [],\n repeation=closed_mode,\n quick_delete=self.quick_delete, expected_tips_per_person=self.expected_tips_per_person,\n )\n games.append(game)\n game.play()", "title": "" }, { "docid": "65770e5242e16241d35d4107496af6f7", "score": "0.68160075", "text": "def __get_play(self, game='test_game',\n date='31/12/16',\n wintype='max', players='p1=12,p2=0',\n winners=None):\n new_play = Play()\n new_play.game = game\n new_play.wintype = wintype\n new_play.date = date\n new_play['_id'] = uuid.uuid4()\n if players is None:\n new_play.players.append(Play.create_player(login='p1', score=12))\n new_play.players.append(Play.create_player(login='p1', score=12))\n elif players != []:\n for name, score in [player.split('=')\n for player in players.split(',')]:\n new_play.players.append(Play.create_player(login=name, score=int(score)))\n if winners:\n new_play.winners = winners\n return new_play", "title": "" }, { "docid": "dee65533955dee94e0c7bb7f5145477e", "score": "0.6780792", "text": "def new_game(board_size: int, player_x: str, player_o: str):\n game = Game(board_size, player_x, player_o)\n db.session.add(game)\n db.session.commit()\n if game is None:\n return False\n return game.game_id", "title": "" }, { "docid": "35343423bcafa6758f1c3a82a8011f3c", "score": "0.67668986", "text": "def btnNewGame(self):\n statevars.variables = {}\n statevars.save(\"saves/save_1.json\")\n statemgr.switch(\"play\")", "title": "" }, { "docid": "5037bb4262bfa1a8235f8fe1b1886f6a", "score": "0.6737901", "text": "def start_new_game(player1, player2):\n return {\n 'player1': player1,\n 'player2': player2,\n 'board': [\n [\"-\", \"-\", \"-\"],\n [\"-\", \"-\", \"-\"],\n [\"-\", \"-\", \"-\"],\n ],\n 'next_turn': player1,\n 'winner': None\n }", "title": "" }, { "docid": "d1dcd8201b93d8650e2771aaea890194", "score": "0.6734131", "text": "async def gamecreate(self, ctx: commands.Context,game:str):\n\t\t\n\t\tcurrentNames = await self.config.GAMES()\n\t\tif game.lower() in currentNames:\n\t\t\tawait ctx.send(\"This game does already exist\")\n\t\telse:\n\t\t\tcurrentNames[game.lower()] = {}\n\t\t\tawait self.config.GAMES.set(currentNames)\n\t\t\tawait ctx.send(\"Game created\")", "title": "" }, { "docid": "405da1ccd25960146a29d8cc0e1cd4ae", "score": "0.67284954", "text": "def new_game(self):\n self.ai.stop() # If AI is currently looking for a move, termininate it\n\n self.board.reset()\n self.active.set(True)\n\n self.player_turn = self.player_starting\n self.player_starting = not self.player_starting\n self.cross_turn = True\n\n if not self.multiplayer and not self.player_turn:\n ai_thread = Thread(target=self._ai_turn, daemon=True)\n ai_thread.start()", "title": "" }, { "docid": "fb281fa04be4dcbcb2e72fbefa59238d", "score": "0.6719805", "text": "def add_player(self, player):\n if len(self.players) >= PLAYERS:\n return Response(response=json.dumps({\"error\": \"game already started\"}),\n status=400, mimetype=\"application/json\")\n self.players.append(player)\n if len(self.players) == PLAYERS:\n self.current_game = Game(Dealer(), self.deck, self.players, self.players[0])\n self.current_game.start()\n self.save()", "title": "" }, { "docid": "fb8eb4247c3ec629345e54babda03a36", "score": "0.67195755", "text": "def new_game(self, map_name: str = \"training0\"):\n if map_name:\n game_options = {\"mapName\": map_name}\n else:\n game_options = \"\"\n\n self.game_state = GameState(api.new_game(self.api_key, game_options))", "title": "" }, { "docid": "d3607ceb942df0e84a3e98f801e1ce47", "score": "0.6715166", "text": "def create_new_game():\n new_obj = XiangqiGame()\n new_obj.set_helper_mode(True)\n new_obj.set_debug_mode(False)\n \n return new_obj", "title": "" }, { "docid": "7bb2d0e1fce228b0cf3f897b364570cd", "score": "0.670059", "text": "def new_game(cls, playerTwo, playerOne, rounds=1):\n game = Game(playerOne=playerOne,\n playerTwo=playerTwo,\n rounds=rounds\n )\n game.put()\n return game", "title": "" }, { "docid": "bec2921a2b808b2a63a53ed80b4eb108", "score": "0.6664477", "text": "def new_play():\r\n session = Session() \r\n fecha = request.json.get('fecha', '')\r\n fijos = request.json.get('fijos', '')\r\n corridos = request.json.get('corridos', '')\r\n parles = request.json.get('parles', '')\r\n candados = request.json.get('candados', '')\r\n nota = request.json.get('nota', '')\r\n \r\n play = Play(\r\n fecha = fecha,\r\n fijos = fijos,\r\n corridos = corridos,\r\n parles = parles,\r\n candados = candados,\r\n\t\t\t nota = nota,\r\n user = g.user)\r\n session.add(play)\r\n session.commit()\r\n return jsonify({ 'play': play.id })#, 201 # 201 mean resource created\r", "title": "" }, { "docid": "d61af28c47440432797d1f269e7d8494", "score": "0.6660552", "text": "def newgame(self):\n #self.connect_clients()\n time.sleep(1) #wait one second\n\n self.engine = zzs.Engine(True)\n for uid in self.names:\n if uid in self.clients:\n self.engine.add_player(uid, 0)\n else:\n self.engine.add_player(uid, 1)\n\n self.ingame = ''.join(chr(uid) for uid in self.names.keys())\n\n print(f\"Server: sending start\")\n for uid, fd in self.clients.items():\n sock = self.connections[fd]\n msg = chr(uid) * 10\n try: # send long and personalised message to client\n sock.send(msg.encode())\n except ConnectionError:\n print(\"broken connection with \" + str(sock.getsockname()))\n\n #the game is about to start\n\n cdown = count.down(3)\n while True:\n try:\n tt, dt = next(cdown)\n except StopIteration:\n break\n else:\n if tt is not None:\n print(tt)", "title": "" }, { "docid": "9ed6c060e9b9eca35ce500258aa6399c", "score": "0.6653178", "text": "def new_player():\n return Score(player_name=\"Rico\", score=80, date=\"Sun Nov 29 15:59:51 2020\")", "title": "" }, { "docid": "432692e44e25bc54866c8880c55e1c1b", "score": "0.6649561", "text": "def play_game(name, players, run_id, current_cycle, config):\n game = Game(name=name, players=players, run_id=run_id, current_cycle=current_cycle, config=config)\n game.deal()\n return game.play()", "title": "" }, { "docid": "79f947429d68ae25164786c226e2bafc", "score": "0.66446394", "text": "def _manually_create_game():\n game_type = pyspiel.GameType(\n \"matching_pennies\",\n \"Matching Pennies\",\n pyspiel.GameType.Dynamics.SIMULTANEOUS,\n pyspiel.GameType.ChanceMode.DETERMINISTIC,\n pyspiel.GameType.Information.ONE_SHOT,\n pyspiel.GameType.Utility.ZERO_SUM,\n pyspiel.GameType.RewardModel.TERMINAL,\n 2, # max num players\n 2, # min_num_players\n True, # provides_information_state\n True, # provides_information_state_tensor\n False, # provides_observation\n False, # provides_observation_tensor\n dict() # parameter_specification\n )\n game = pyspiel.MatrixGame(\n game_type,\n {}, # game_parameters\n [\"Heads\", \"Tails\"], # row_action_names\n [\"Heads\", \"Tails\"], # col_action_names\n [[-1, 1], [1, -1]], # row player utilities\n [[1, -1], [-1, 1]] # col player utilities\n )\n return game", "title": "" }, { "docid": "2232d57097ea1da85e726077c43d1e71", "score": "0.66381407", "text": "def start_game(self):\n self.__player_turn = Game.PLAYER1", "title": "" }, { "docid": "e702f90cff66983279730035e36bdbba", "score": "0.66344476", "text": "def game_creation(self):\n try:\n self.manager.client.create_game(\n name=self.game_name.text or self.DEFAULT_NAME,\n capacity=self.capacity.text or self.DEFAULT_CAPACITY,\n map_width=self.map_width.text or self.DEFAULT_MAP_WIDTH,\n map_height=self.map_height.text or self.DEFAULT_MAP_HEIGHT,\n min_radius=self.min_radius.text or self.DEFAULT_MIN_RADIUS,\n win_size=self.win_size.text or self.DEFAULT_WIN_SIZE,\n max_speed=self.max_speed.text or self.DEFAULT_MAX_SPEED,\n eat_ratio=self.eat_ratio.text or self.DEFAULT_EAT_RATIO,\n food_production_rate=self.food_production_rate.text or self.DEFAULT_FOOD_PRODUCTION_RATE,\n max_hit_count=self.max_hit_count.text or self.DEFAULT_MAX_HIT_COUNT\n )\n except CreationFailedException as e:\n self.manager.warn(str(e), title=\"Error\")\n except ConnectionError:\n self.manager.warn(\"Could not connect to the server\", title=\"Error\")\n else:\n self.manager.main_screen()", "title": "" }, { "docid": "a96dd5662e69214fbb35625994d21621", "score": "0.6631085", "text": "def play_game(self, game_id=\"pong\"):\n pass", "title": "" }, { "docid": "74c659355ae8be06bfb5072eb8d2ac15", "score": "0.6627248", "text": "def new_game_clicked(self):\n self._create_new_game()", "title": "" }, { "docid": "898ee01626bf061c8f17107902287e67", "score": "0.66256905", "text": "def new_game(self):\n board = np.full((self.num_rows, self.num_cols), -1)\n self.board = board\n self.player_board = copy.deepcopy(self.board)\n self.first_move = False\n self.status = 0 # 0 means still playing, -1 is game over, 1 is a win\n self.reward = 0 # Reward for the agent", "title": "" }, { "docid": "4bdeaf669a2c3315defe662ddc22d525", "score": "0.6611497", "text": "def make_game():\n return ascii_art.ascii_art_to_game(\n GAME_ART, what_lies_beneath='.',\n sprites={'P': PlayerSprite})", "title": "" }, { "docid": "ad4b6f5adb5ea116cf1e6d9acfdf706d", "score": "0.66006124", "text": "def play(self, player, game):\n return None", "title": "" }, { "docid": "ad4b6f5adb5ea116cf1e6d9acfdf706d", "score": "0.66006124", "text": "def play(self, player, game):\n return None", "title": "" }, { "docid": "a6438b9f6dfafb70fe633ea27b58bc0c", "score": "0.65899575", "text": "def newGame(playerName):\n\tplayer1 = player(playerName,True)\n\tplayer2 = player('AI',False)\n\tg = cgame(player1,player2)\n\n\t# determine dealer\n\tdealer = random.choice([g.player1,g.player2])\n\tif dealer == g.player1:\n\t\tg.setDeal(True)\n\t\tg.message = 'The dealer is: %s' % dealer.name\n\treturn g", "title": "" }, { "docid": "bffa68446f1285a8894a6fa52d5573cf", "score": "0.65877694", "text": "def create_player(self, player_name):\n _player = Player(player_name)\n self.players[_player.get_id()] = _player\n return _player.get_id()", "title": "" }, { "docid": "2b8ec3d9c0235d0f56b4227e6b1fe6ad", "score": "0.65859705", "text": "def start_new_game(p: Player, scr) -> None:\n # Reset turn counter\n session_objects.s_turn_counter = 0\n \n # Add starter cards into deck\n p.deck = [\"copper coin\"] * 7\n p.deck += [\"land\"] * 3\n \n # Copy deck into drawpile\n p.drawpile = p.deck.copy()\n\n # shuffle draw pile\n random.shuffle(p.drawpile)\n\n # Draw 5 cards\n p.draw_cards(5, main_screen, verbose=False)", "title": "" }, { "docid": "227d3b75ae403a2656285f3a2a4a6e52", "score": "0.65820765", "text": "def new_game(screen):\n encounter_loop()", "title": "" }, { "docid": "a5cad91f309308c17b6901aea8d6fe2c", "score": "0.6576672", "text": "def CreateGame(self, game_type, players):\n if game_type not in GAMES:\n raise ValueError(\"Game '{}' not found or not supported\".format(game))\n\n objs = [Player(name) for name in players]\n game = GAMES[game_type][0](objs)\n\n self.games[game.id] = game\n self.players.update((p.id, p) for p in objs)\n self.in_room.update((p.id, game) for p in objs)\n return (game.id, objs)", "title": "" }, { "docid": "771a8dd89fd3165490704639302a0109", "score": "0.65726715", "text": "def newGame(self, players, blind, ante):\r\n self.players = players\r\n self.blind = blind\r\n self.ante = ante\r\n for i in range(len(players)):\r\n if players[i] == 'h': \r\n players[i] = PokerHuman(10000, \"player%s\" % i, i)\r\n elif players[i] == 'b': \r\n players[i] = PokerBot(10000, \"player%s\" % i, i, self)\r\n elif players[i] == None: pass\r\n else: raise \"illegal player\"\r\n self.gui.setUpPlayers(players)\r\n self.dealer = -1\r\n self.deal()", "title": "" }, { "docid": "63684639c6c6eedf5f8a8aefa47bdca8", "score": "0.656406", "text": "def startNewGame(self):\n while not self.env.in_game:\n self.env.startNewGame()\n self.epoch += 1", "title": "" }, { "docid": "64df585171b2986c5bb04a1f50e9d8b5", "score": "0.65626293", "text": "async def new(self, ctx, *, title: str = None):\n if title is None or title == \"\":\n title = \"The Hunger Games\"\n else:\n title = strip_mentions(ctx.message, title)\n title = sanitize_here_everyone(title)\n title = sanitize_special_chars(title)\n owner = ctx.author\n ret = hg.new_game(ctx.channel.id, owner.id, owner.name, title)\n if not await self.__check_errors(ctx, ret):\n return\n await ctx.send(\"{0} has started {1}! Use `{2}add [-m|-f] <name>` to add a player or `{2}join [-m|-f]` to enter the \"\n \"game yourself!\".format(owner.mention, title, prefix))", "title": "" }, { "docid": "491d295bb4e30717224f5d09db6e8d42", "score": "0.6550431", "text": "def play(self, player, game):\r\n super().play(player, game)\r\n game.next_player()", "title": "" }, { "docid": "e3fcefc6d267b2fac11d180d3e47bb82", "score": "0.6544139", "text": "def play_game(self):\n pass", "title": "" }, { "docid": "96d8096b7faea54b6312756b763aad02", "score": "0.65197605", "text": "def create_game(game_name):\n mdb.create_data(olib.Game(game_name).__dict__, \"games\")\n return 0", "title": "" }, { "docid": "8b90983909d751da9a61733bb3a96e0b", "score": "0.6511116", "text": "def start_game(self):\n\n self.task_generator = TaskGenerator(len(self.players))\n self.adjust_challenge()\n self.assign_tasks()", "title": "" }, { "docid": "e324bdf47e9e1693d3a2a3a81939838e", "score": "0.6495053", "text": "def create_player(player_name):\n mdb.create_data(olib.Player(player_name).__dict__, \"people\")\n return 0", "title": "" }, { "docid": "48eed3746c85de4e43a297b918ac2557", "score": "0.64866984", "text": "def create_game(self, game_name):\n self.games[game_name] = classes.Game(game_name)\n return self.games[game_name]", "title": "" }, { "docid": "43853b0d541103640c31c5b820124dfc", "score": "0.64826196", "text": "def newGame(self):\n\n\n self.allSprites = pygame.sprite.Group()\n self.platforms = pygame.sprite.Group()\n self.player = Player(self)\n self.spawner = Spawner(self, self.player)\n self.enemyList = pygame.sprite.Group()\n self.bulletList = pygame.sprite.Group()\n self.allSprites.add(self.player)", "title": "" }, { "docid": "1b7603bd51193cdd52b10200df909a92", "score": "0.6468652", "text": "def start_game():\n GameManager(constants.players_number, constants.tanks_number).run()", "title": "" }, { "docid": "da1a0dd06c1050aa44200d11eb903e87", "score": "0.64542717", "text": "def _start_game(self, open_game, final_ogp):\r\n situation = open_game.situation\r\n all_ogps = open_game.ogps + [final_ogp]\r\n running_game = tables.RunningGame()\r\n running_game.next_hh = 0\r\n # Maintain game ids from open games, essentially hijacking the\r\n # uniqueness of the gameid sequence in open game.\r\n running_game.gameid = open_game.gameid\r\n running_game.situation = situation\r\n # We have to calculate current userid in advance so we can flush.\r\n running_game.current_userid = \\\r\n all_ogps[situation.current_player_num].userid\r\n running_game.board_raw = situation.board_raw\r\n running_game.current_round = situation.current_round\r\n running_game.pot_pre = situation.pot_pre\r\n running_game.increment = situation.increment\r\n running_game.bet_count = situation.bet_count\r\n running_game.current_factor = 1.0\r\n running_game.last_action_time = datetime.datetime.utcnow()\r\n situation_players = situation.ordered_players()\r\n self.session.add(running_game)\r\n self.session.flush() # get gameid from database\r\n map_to_range = {p: p.range for p in situation.players}\r\n player_to_dealt = deal_from_ranges(map_to_range, running_game.board)\r\n for order, (ogp, s_p) in enumerate(zip(all_ogps, situation_players)):\r\n # create rgps in the order they will act in future rounds\r\n rgp = tables.RunningGameParticipant()\r\n rgp.gameid = running_game.gameid\r\n rgp.userid = ogp.userid # haven't loaded users, so just copy userid\r\n rgp.order = order\r\n rgp.stack = s_p.stack\r\n rgp.contributed = s_p.contributed\r\n rgp.range_raw = s_p.range_raw\r\n rgp.left_to_act = s_p.left_to_act\r\n rgp.folded = False\r\n rgp.cards_dealt = player_to_dealt[s_p]\r\n if situation.current_player_num == order:\r\n assert running_game.current_userid == ogp.userid\r\n self.session.add(rgp)\r\n self.session.flush() # populate game\r\n # Note that we do NOT create a range history item for them,\r\n # it is implied.\r\n # TODO: REVISIT: check that this cascades to ogps\r\n self.session.delete(open_game)\r\n self._deal_to_board(running_game) # also changes ranges\r\n notify_first_player(running_game, starter_id=final_ogp.userid)\r\n logging.debug(\"Started game %d\", open_game.gameid)\r\n return running_game", "title": "" }, { "docid": "9749c3c77b1a4621853bd589a71df5ef", "score": "0.64522636", "text": "def set_new_player(self) -> None:\n self.player_controller.set_new_player()\n self.start_program()", "title": "" }, { "docid": "907a8153befc165ab75f77a5ac45f176", "score": "0.6450045", "text": "def new_game():\n game = m.Game(word_gen.gen_word_list(), word_gen.gen_team_list())\n db.session.add(game)\n db.session.commit()\n return redirect(url_for('display_game', game_id=game.id))", "title": "" }, { "docid": "c5d237c5d0bbc38fa709cd848e7ff6ca", "score": "0.644758", "text": "def set_new_game(self):\r\n self._board = Board()\r\n self._cur_player = Game.PLAYER_ONE", "title": "" }, { "docid": "272bd13788678190b42f710e0335aa8d", "score": "0.6443037", "text": "def new_game():\n # Must initialize the log before we do anything that might emit a message.\n log.init()\n quest.display_welcome()\n\n player = Object(None, '@', 'player', libtcod.white, blocks=True,\n fighter=Fighter(\n hp=36,\n death_function=player_death,\n skills={'bow':70, 'first aid':24, 'grappling':40}))\n player.inventory = []\n player.level = 1\n player.game_state = 'playing'\n player.skill_points = 0\n player.turn_count = 0\n # True if there's a (hostile) fighter in FOV\n player.endangered = False\n\n _new_equipment(player,\n Object(None, '(', 'silk undertunic', libtcod.dark_sky,\n item=Item(description='A thick under-tunic of raw silk; prevents 2 bleeding.'),\n equipment=Equipment(slot='underclothes', bleeding_defense=2)))\n\n _new_equipment(player,\n Object(None, '(', 'quilt kaftan', libtcod.dark_sky,\n item=Item(description='A heavy quilted kaftan; keeps you warm and prevents 2 wound.'),\n equipment=Equipment(slot='robes', defense_bonus=2)))\n\n _new_equipment(player,\n Object(None, '(', 'felt cap', libtcod.dark_sky,\n item=Item(description='A Phrygian felt cap with a loose veil to keep the sun off.'),\n equipment=Equipment(slot='head')))\n\n _new_equipment(player, miscellany.horn_bow())\n _new_equipment(player, miscellany.arrow(12))\n _new_equipment(player, miscellany.dagger())\n\n _new_item(player, miscellany.kumiss(4))\n _new_item(player, miscellany.bandage(4))\n\n mountain_cartographer.make_map(player, 1)\n renderer.update_camera(player)\n\n renderer.finish_welcome()\n\n log.message('At last you have reached the foot of the mountain. She waits above.', libtcod.red)\n log.message('Press ? or F1 for help.')\n\n # _start_near_quarry(player)\n # _start_near_grotto(player)\n # _start_near_peak(player)\n # _start_near_end(player)\n\n # TEST\n # actions.add_to_map(player.current_map, player.pos, miscellany.sword())\n # actions.add_to_map(player.current_map, player.pos, miscellany.roundshield())\n\n libtcod.map_compute_fov(\n player.current_map.fov_map, player.x,\n player.y, config.TORCH_RADIUS, config.FOV_LIGHT_WALLS, config.FOV_ALGO)\n\n return player", "title": "" }, { "docid": "6361243bfb2fb486b3f7bd012633e8d7", "score": "0.6438719", "text": "def create_game(board):\n game_id = board[\"game_id\"]\n user_list = board[\"player_ids\"]\n returned_data = client.InitialDB.Active_Games.find_one({\"game_id\": game_id})\n \n if returned_data is None:\n\n #Remove the players from the lobby\n for curr_id in user_list:\n client.InitialDB.Lobby.find_one_and_delete({\"user_id\": curr_id})\n\n client.InitialDB.Active_Games.insert_one(board)\n return game_id\n\n return 'nah bro idk about it'", "title": "" }, { "docid": "00c0795071ba3b634d70520438c91d94", "score": "0.64366215", "text": "def create_default_player(self, path):\n pass", "title": "" }, { "docid": "ab28d4fcfdeae8d2c2eb7da18d67afb0", "score": "0.64359045", "text": "def start_playing(player_id):\n # Create a new game associated with the user\n add_game(player_id)\n\n # Gets the id of the game\n game_id = get_game_id(player_id)\n\n # Add the songs to the database\n songs = select_songs()\n add_songs_to_the_game(game_id, songs)", "title": "" }, { "docid": "a0cb15784d722a98e68bbfac9aa0cacc", "score": "0.6404718", "text": "def build_player(self):\n\t\t\n\t\tclear_screen()\n\n\t\ta = input('What is the name of your character? ')\n\t\tb = input('What is the Race of your character? ')\n\n\t\tself.info['Name'] = a.title()\n\t\tself.info['Race'] = b.title()\n\n\t\tclear_screen()\n\n\t\tprint('You have successfully created {} the {}.'.format(a.title(), b.title()))\n\t\tprint('You will begin with {} Hit Points and {} Gold Pieces.'.format(self.stats['HP'], \n\t\t\tself.stats['GOLD']))\n\t\tprint('\\nYou are now ready to start the game!')\n\n\t\tpress_enter()", "title": "" }, { "docid": "4c524d188fe455e110910b4ea8230ed1", "score": "0.6401394", "text": "def new_game(self):\n self.in_game = False\n self.move = 0\n self.who_move = 0\n self.who_will_move = 0\n self.used = []\n self.not_used = []\n self.players = []\n self.winners = []\n self.want_new_game = []\n self.chosen_suit = \"\"\n for user_id in self.users.keys():\n self.users[user_id].hand = []\n self.users[user_id].move_card_count = 0\n self.users[user_id].take_card_count = 0\n self.users[user_id].want_fold = False\n self.delete_bot_messages()", "title": "" }, { "docid": "b18dfd21f456cc3cbe9ffec3ee21fa75", "score": "0.6396059", "text": "def new_othello_game(self, request):\n # check players exist\n if request.player_two:\n playerNames = [ request.player_one, request.player_two ]\n else:\n playerNames = [ request.player_one ]\n print playerNames\n\n for player_name in playerNames:\n user_key = ndb.Key(User, player_name)\n user = user_key.get()\n if not user:\n raise endpoints.\\\n NotFoundException('Invalid key'\n ', user not found')\n else:\n othello_player = ndb.Key(\n OthelloPlayer, player_name, parent=user_key).get()\n if not othello_player:\n raise endpoints.NotFoundException(\n 'Invalid key, othello '\n 'player not found')\n\n print \"Players ok, creating game.\"\n game_id = self._newOthelloGame(request)\n\n return SimpleMessage(\n message='New game started. Game id is {}'.format(game_id))", "title": "" }, { "docid": "f07ed5f9ede12410309202a78d58e5e7", "score": "0.6391604", "text": "def create_a_player(username):\n db.session.add(Player(username))\n db.session.commit()", "title": "" }, { "docid": "5ba20a0a6936b11026ac5cb6912845c4", "score": "0.6387381", "text": "def create_player(self):\n if self.role == settings.ROLE_LIGHTER:\n self.player = Player.Lighter(self)\n elif self.role == settings.ROLE_SHOOTER:\n self.player = Player.Shooter(self)\n else:\n raise Exception(\"Role is not a valid player role: %s.\" % self.role)\n \n log(\"Initialized a game with role: %s\" % self.role)", "title": "" }, { "docid": "e41cb78bbeaf9910a86b373c96a82720", "score": "0.6378888", "text": "def playGame(self):\n\n game = Game() # Initializes Game object\n game.playerList(game.nPlayers()) # Populates the playerList and nPlayers in game object\n game.setGuesses() # Sets nGuesses\n\n # this creates a list of player board instances\n wom.player_board = [Board(each_player, game.n_guesses) for each_player in game.player_list]\n\n # gets each player to set a code for the next\n game.setCodes()\n\n # set play_order and begin guessing.\n game.roundRobin()\n\n print(\"\\nThe game is now finished.\")\n\n wom.tallyScore(game.n_guesses)", "title": "" }, { "docid": "d1f218d136689bb955a80aaf9fb0cc8f", "score": "0.63782465", "text": "def __new_game(self, remaining):\n\t\tself.game.new_game(remaining)\n\t\tself.game.start()\n\t\tself.canvas.delete(\"victory\")\n\t\tself.__draw_board()", "title": "" }, { "docid": "a16e352d3001481c88084c0aaf445bc1", "score": "0.637563", "text": "def new_game(self, request):\n user_one = User.query(User.name == request.user_one).get()\n if not user_one:\n raise endpoints.NotFoundException(\n 'A User with the name {} does not exist!'.format(request.user_one))\n user_two = User.query(User.name == request.user_two).get()\n if not user_two:\n raise endpoints.NotFoundException(\n 'A User with the name {} does not exist!'.format(request.user_two))\n if not request.total_rounds:\n game = Game.new_game(user_one.key, user_two.key)\n else:\n game = Game.new_game(user_one.key, user_two.key, request.total_rounds)\n # Use a task queue to update the user wins.\n # This is just a demonstration of memcache\n taskqueue.add(url='/tasks/cache_user_stats')\n return game.to_form(\"Game Successfully Created!\")", "title": "" }, { "docid": "12d9ed231f4e7371864a342ba9230e0c", "score": "0.63512146", "text": "def add_game(player_id):\n db.session.add(Game_with_Players(player_id, 0))\n db.session.commit()", "title": "" }, { "docid": "5476bf70bcc2704f09011533cba89a2c", "score": "0.6340113", "text": "def start_game(player_name):\r\n init_game_files(player_name)\r\n\r\n current_room = load_object(\"dungeon_entrance\")\r\n\r\n player = load_object(\"player\")\r\n\r\n current_room.set_player(player)\r\n\r\n return current_room", "title": "" }, { "docid": "a6bc52ad18ca1dda9f91dbd589227177", "score": "0.63368565", "text": "def make_player() -> dict:\n player = {\"name\": color_string_green(player_name_generator()),\n \"class\": \"\",\n \"hp\": PLAYER_BASE_HP(),\n \"position\": PLAYER_STARTING_POSITION(),\n \"level\": 1,\n \"experience\": 0,\n \"category\": \"player\",\n \"class_dictionary\": \"\"}\n player_class_generator(player)\n player_class_dictionary(player)\n return player", "title": "" }, { "docid": "179e63b946a198bb5eefdbf20f51e96f", "score": "0.6327967", "text": "def create_player(session, callback, player=None):\n if not player:\n player = Player()\n player._savable = False\n if not player.name:\n def _set_name(_session, new_name):\n player.name = new_name\n create_player(_session, callback, player)\n session.request(RequestNewPlayerName, _set_name)\n else:\n player.account = session.account\n player._savable = True\n callback(session, player)", "title": "" }, { "docid": "8da4ff4ea50f3eee6d6608ff51703227", "score": "0.6324517", "text": "def start_game(self, amount_of_players):\n self.game_running = True\n self.dicerino = dice.Dice()\n self.dicerino_hand = dice_hand.DiceHand()\n self.create_players(amount_of_players)\n self.player1, self.player2 = \\\n self.randomize_player(self.player1, self.player2)\n self.current_player = self.player1\n if self.current_player.get_id() == 666:\n self.auto_play()", "title": "" }, { "docid": "6792772ed9b0e1624249d56f627e972a", "score": "0.6322524", "text": "def start_game(self, plr_arg = [\"Test0\", \"Test1\", \"Test2\", \"Test3\"]):\n # Might as well error check this here. All games must have 4 players.\n if len(plr_arg) != NUM_PLAYERS:\n log.exception(\"Tried to start a game with <{0} players.\"\n \"\".format(NUM_PLAYERS))\n\n self.players = [Player(x, plr_arg[x]) for x in range(NUM_PLAYERS)]\n self.gs = GameState(self.generate_id())\n self.deal_hand()\n self.gs.active_player = self.gs.next_player(self.gs.dealer)\n self.gs.game_mode = GAME_MODE.BID\n self.gs.trump = None\n\n return self.publish('sog', None, None)", "title": "" }, { "docid": "279943f4118842ce6f540a64dd89dccd", "score": "0.6313202", "text": "async def game_start(self, channel):\n\n # Check if this is a game channel\n channel_rec = self.channel_get(channel)\n if not channel_rec:\n raise Exception(\"Not a game channel\")\n\n # Check to see if the game is in session\n if channel_rec.game_running:\n raise Exception(f\"Game is already running in {channel_rec.name}\")\n\n # Launch a new game by creating a new instance in Game\n game = Games.create(\n timestamp = datetime.datetime.now(),\n channel = channel_rec,\n )\n channel_rec.current_game = game\n channel_rec.game_running = True\n channel_rec.save()\n\n return game", "title": "" }, { "docid": "cad14da66cc1b1475baa7b8ee9edd98c", "score": "0.6310462", "text": "def new_game(self):\n\n self.model.games += 1\n self.update_labels()\n\n self.enable_buttons()\n self.randomize_buttons()", "title": "" }, { "docid": "3f1bf884c0f71d24d1ecfbc6910400b6", "score": "0.63048047", "text": "def new_game(console, command):\n if command.split(\"/\")[-1] == \"new_game.sh\":\n console[\"on_new_game\"] = True\n return \"@prompt:Enter a username: \"\n\n elif console[\"on_new_game\"]:\n console[\"on_new_game\"] = False\n for gamesave, _ in console.get_saves():\n if gamesave.get_username() == command:\n return f\"{gamesave.get_username()} already exists!\"\n try:\n console.set_save(command)\n console.in_play()\n return f\"Created gamesave {console.get_save().get_username()} ...\"\n except InvalidNameError as e:\n return str(e)", "title": "" }, { "docid": "00fbedd2b7412b32eeef668e5bed3d62", "score": "0.62940073", "text": "def new_game():\n new_chess_game = Chess()\n return new_chess_game", "title": "" }, { "docid": "756f230ffadf7783efb714b31ceffe1b", "score": "0.6291912", "text": "def createGame(self):\n self.round = 1\n self.finished = False\n self.winner = None\n\n # x always goes first\n self.turn = self.players[0]\n\n self.board = []\n for i in range(6):\n self.board.append([])\n for j in range(7):\n self.board[i].append(' ')", "title": "" }, { "docid": "859e178f95c783e4f90825b49f5935b4", "score": "0.6281063", "text": "def play(self):\r\n self.w = Game()\r\n self.w.show()\r\n self.hide()", "title": "" }, { "docid": "2ef1a3f02437e77fa98695f789642898", "score": "0.6280325", "text": "def start_new_game(self):\n self.interrupt_players()\n\n self.game = Game.Game()\n\n if self.new_player1.get() == \"Human\":\n self.player1 = Player.Human(self)\n elif self.new_player1.get() == \"Computer\":\n self.player1 = Computer.Computer(self, Minimax.Minimax(3))\n\n if self.new_player2.get() == \"Human\":\n self.player2 = Player.Human(self)\n elif self.new_player2.get() == \"Computer\":\n self.player2 = Computer.Computer(self, Minimax.Minimax(3))\n\n # show the main game window\n self.new_game_display.grid_remove()\n self.new_game_caption.grid_remove()\n self.new_game_button_player1_human.grid_remove()\n self.new_game_button_player1_computer.grid_remove()\n self.new_game_button_player2_human.grid_remove()\n self.new_game_button_player2_computer.grid_remove()\n self.new_game_button.grid_remove()\n self.game_window.grid(row=0)\n self.feedback_caption.grid(row=12)\n\n # setting the text on player 1 buttons\n self.player1_button1_text.set(\"{}\\n {}/{}\".format(self.game.player1.attack1.name,\n self.game.player1.attack1.damage,\n self.game.player1.attack1.hit_chance))\n\n self.player1_button2_text.set(\"{}\\n {}/{}\".format(self.game.player1.attack2.name,\n self.game.player1.attack2.damage,\n self.game.player1.attack2.hit_chance))\n\n self.player1_button3_text.set(\"{}\\n {}/{}\".format(self.game.player1.attack3.name,\n self.game.player1.attack3.damage,\n self.game.player1.attack3.hit_chance))\n\n self.player1_button4_text.set(\"{}\\n {}/{}\".format(self.game.player1.attack4.name,\n self.game.player1.attack4.damage,\n self.game.player1.attack4.hit_chance))\n\n # setting the text on player 2 buttons\n self.player2_button1_text.set(\"{}\\n {}/{}\".format(self.game.player2.attack1.name,\n self.game.player2.attack1.damage,\n self.game.player2.attack1.hit_chance))\n\n self.player2_button2_text.set(\"{}\\n {}/{}\".format(self.game.player2.attack2.name,\n self.game.player2.attack2.damage,\n self.game.player2.attack2.hit_chance))\n\n self.player2_button3_text.set(\"{}\\n {}/{}\".format(self.game.player2.attack3.name,\n self.game.player2.attack3.damage,\n self.game.player2.attack3.hit_chance))\n\n self.player2_button4_text.set(\"{}\\n {}/{}\".format(self.game.player2.attack4.name,\n self.game.player2.attack4.damage,\n self.game.player2.attack4.hit_chance))\n\n # resetting the variable captions\n self.feedback_modifiers_player1_text.set(\"No modifiers affecting {}\".format(self.game.player1.name))\n self.feedback_modifiers_player2_text.set(\"No modifiers affecting {}\".format(self.game.player2.name))\n self.feedback_caption_text.set(\"Welcome!\")\n\n # showing modifier status bars\n self.feedback_modifiers_player1.grid(row=7, rowspan=2, column=0, columnspan=4)\n self.feedback_modifiers_player2.grid(row=2, rowspan=2, column=0, columnspan=4)\n\n\n # setting and showing healthbars\n self.player1_health_bar = ttk.Progressbar(self.game_window,\n orient=\"horizontal\",\n length=WINDOW_WIDTH,\n maximum=self.game.player1.hp,\n value=self.game.player1.hp)\n self.player1_health_bar.grid(row=9, column=1, columnspan=2)\n self.player1_shown_health = self.game.player1.hp\n\n self.player2_health_bar = ttk.Progressbar(self.game_window,\n orient=\"horizontal\",\n length=WINDOW_WIDTH,\n maximum=self.game.player2.hp,\n value=self.game.player2.hp)\n self.player2_health_bar.grid(row=4, column=1, columnspan=2)\n self.player2_shown_health = self.game.player2.hp\n\n\n # loading player sprites\n self.player1_sprite_image = tkinter.PhotoImage(file='{}.png'.format(self.game.player1.name))\n self.player1_sprite = self.game_picture.create_image(WINDOW_WIDTH,\n WINDOW_HEIGHT * 3 // 4,\n image=self.player1_sprite_image)\n\n self.player2_sprite_image=tkinter.PhotoImage(file='{}.png'.format(self.game.player2.name))\n self.player2_sprite = self.game_picture.create_image(WINDOW_WIDTH,\n WINDOW_HEIGHT // 4,\n image=self.player2_sprite_image)\n\n # start with player 1\n self.player1.play()", "title": "" }, { "docid": "f88e8981d43b31237f10b7b3005470bc", "score": "0.62775564", "text": "def create_new_game(items):\n\n for item in items:\n players = item[\"players\"]\n item[\"players\"] = {}\n for player in players:\n item[\"players\"][str(player)] = {\"frames\": [], \"score\": None}", "title": "" }, { "docid": "60a3ce595c6b62e5f06dbecd505f1776", "score": "0.6277423", "text": "def create_player():\n bot_directory = os.path.join(WORKING_DIR, 'data', 'bot')\n bot_model = os.path.join(bot_directory, 'latest_model.zip')\n shutil.rmtree(bot_directory, ignore_errors=True)\n os.makedirs(bot_directory, exist_ok=True)\n\n # Downloading model\n download_file(MODEL_URL, bot_model, force=True)\n\n # Unzipping file\n zip_ref = zipfile.ZipFile(bot_model, 'r')\n zip_ref.extractall(bot_directory)\n zip_ref.close()\n\n # Detecting model type\n if os.path.exists(os.path.join(bot_directory, 'order_based.txt')):\n LOGGER.info('Creating order-based player.')\n player = create_model_based_player(OrderPolicyAdapter, OrderBaseDatasetBuilder)\n\n elif os.path.exists(os.path.join(bot_directory, 'token_based.txt')):\n LOGGER.info('Creating token-based player.')\n player = create_model_based_player(TokenPolicyAdapter, TokenBaseDatasetBuilder)\n\n else:\n LOGGER.info('Creating rule-based player')\n player = RuleBasedPlayer(ruleset=easy_ruleset)\n\n # Returning\n return player", "title": "" }, { "docid": "b0e038489bfd91f8f245e9616d78e46d", "score": "0.62771976", "text": "def game_example():\r\n return Game('France', 'Peru')", "title": "" }, { "docid": "5d2da168eb30c2274d5a9a75988a61c5", "score": "0.627577", "text": "def new_game(cls, user, attempts):\n ship_location = randint(1, 25)\n\n game = Game(user=user,\n ship_location=ship_location,\n attempts_allowed=attempts,\n attempts_remaining=attempts,\n game_over=False)\n game.put()\n return game", "title": "" }, { "docid": "e47bb27bd735228ab762cc6fa0ab8f22", "score": "0.6269966", "text": "def new_game(self, request):\r\n user = User.query(User.name == request.user_name).get()\r\n if not user:\r\n raise endpoints.NotFoundException(\r\n 'A user with that name does not exist!')\r\n try:\r\n game = Game.new_game(user.key,9)\r\n except ValueError:\r\n raise endpoints.BadRequestException('error message')\r\n\r\n # Use a task queue to update the average attempts remaining.\r\n # This operation is not needed to complete the creation of a new game\r\n # so it is performed out of sequence.\r\n taskqueue.add(url='/tasks/cache_average_attempts')\r\n game.letters_right_position = ''\r\n for i in range(len(game.word_to_guess)):\r\n game.letters_right_position += '_ '\r\n return game.to_game_form('Good luck playing Hangman!'+game.letters_right_position)", "title": "" }, { "docid": "2af37c54846db1e8117257a520594102", "score": "0.62694985", "text": "def create_match(self, request):\n # Get game and player from datastore\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n player = get_player(request.user_name)\n\n # Check if game has questions. A game with no questions is not playable\n if game.questions == []:\n raise endpoints.BadRequestException(\n 'The game has no questions yet and cannot be played.')\n\n # Check if game has been changed from editing to play mode\n if not game.play_mode:\n # Only game creators can put a game into play mode\n if player.key != game.creator:\n raise endpoints.BadRequestException(\n 'Only the game creator can put it into play mode.')\n else:\n # Check if creator has explicitly requested to start the game\n if not request.start_game:\n raise endpoints.BadRequestException(\n 'Play mode enabling was not requested.')\n else:\n # Put game into play mode and save the change\n game.play_mode = True\n game.put()\n\n # Create the match\n match = Match.create_match(player=player.key, game=game)\n\n # Return a confirmation of the match creation\n return match.to_form()", "title": "" }, { "docid": "71cf8817161c85b978a9018be3fd7323", "score": "0.6268821", "text": "def start_new_game(self):\n PyPokiBot.start_new_game(self)\n if self.me:\n me = self.me\n try:\n win_pct = me.flops_seen / float(me.wins)\n except ZeroDivisionError:\n win_pct = 0\n #is the bb always 2*sb?\n bb_per_100 = me.money_won / float(self.bet_size * 2)\n self.log(\"\"\"I have $%d and have won %d%% of the hands I've played\nthis session (%d%% bb/100)\"\"\" % (me.money_won, win_pct, bb_per_100))\n else:\n print \"I'm not in right now\"", "title": "" }, { "docid": "9a67bee03f36951166ea244eb6f4e9e4", "score": "0.62636065", "text": "def new_game(self):\n self._level = simpledialog.askstring(\"Input\", \"What level would you like to play?\",\n parent=self._master)\n if self._level is not None:\n self.reset_game()", "title": "" }, { "docid": "6538493eb5e674e7c4671e866936b22e", "score": "0.62616795", "text": "def new_game():\n #name,players = None,None\n create_game_form = NewGameForm()\n if create_game_form.validate_on_submit():\n #split the string of player names\n #TODO: error handling if this is not formatted correctly\n players = create_game_form.players.data.split(',')\n #Instantiate class\n game = Game(name=create_game_form.name.data)\n #add players\n game.add_players(player_names=players,num_players=len(players))\n #add to database\n game.archive(db)\n\n return render_template('new_game.html',form=create_game_form)", "title": "" }, { "docid": "0648bf67c7b7c6f82a26df6491926600", "score": "0.6261457", "text": "def add_player(self):\n pass", "title": "" }, { "docid": "275eba149c38a40602c300c0ecfe807b", "score": "0.6260065", "text": "def new_game(cls, user):\r\n words = [\"movie\", \"sand\", \"ketchup\", \"mobile\", \"computer\", \"broke\", \"pen\", \"pencil\", \"monk\", \"cinema\",\r\n \"kidnap\", \"war\",\r\n \"mango\", \"nap\"]\r\n target = str(random.choice(words))\r\n correct = []\r\n for x in range(len(target)):\r\n correct.append(\"*\")\r\n attempts = len(target)*2\r\n game = Game(user=user,target=target,\r\n attempts_allowed=attempts,attempts_remaining=attempts,\r\n correct=correct,incorrect=[],\r\n all_guesses=[],score=0,\r\n game_over=False)\r\n game.put()\r\n return game", "title": "" }, { "docid": "80b34ba8cc74f527eda069263ee2602a", "score": "0.62489617", "text": "def create_player(self, request):\n # Check username and email address for conflicts\n if Player.query(Player.email_address == request.email_address).get():\n raise endpoints.ConflictException(\n 'A Player with that email address already exists.')\n if Player.query(Player.user_name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A Player with that name already exists.')\n\n # Create player\n player = Player(user_name=request.user_name,\n email_address=request.email_address)\n player.put()\n\n # Return confirmation of player creation\n return StringMessage(message='Player successfully created.')", "title": "" }, { "docid": "a41ed44b3ab71b2cbc5591097b719520", "score": "0.62467754", "text": "def init_player(self, player: Player):\n if player.is_new:\n self.game.new_player(player)", "title": "" } ]
ae80bc40a3c04877f91ff122bd1b76ea
Returns a dictionary of cluster groups, containg the users in each group.
[ { "docid": "21e6d049f8a77f5287b79678d4323ba1", "score": "0.6638795", "text": "def get_cluster_groups(\n ocm_api: OCMBaseClient, cluster_id: str\n) -> dict[OCMClusterGroupId, OCMClusterGroup]:\n cluster_groups: dict[OCMClusterGroupId, OCMClusterGroup] = {}\n for group_dict in ocm_api.get_paginated(\n build_cluster_groups_url(cluster_id), max_page_size=10\n ):\n group = OCMClusterGroup(**group_dict)\n cluster_groups[group.id] = group\n return cluster_groups", "title": "" } ]
[ { "docid": "239f4d1489db15274ec8efcb1994e027", "score": "0.7212028", "text": "def list_groups(self):\n return self.session.query(schema.ClusterGroup).all()", "title": "" }, { "docid": "c8d4adec92e4fc81ecacf195a047e5eb", "score": "0.69516546", "text": "def get_cluster_groups(group_names, paths, metadata_path):\n cluster_groups = {}\n\n for i, path in enumerate(paths):\n group_name = group_names[i]\n clusters, cells = get_clusters_from_file(path)\n cluster_groups[group_name] = {\n 'cells': cells,\n 'cluster_file': clusters\n }\n \n metadata_clusters, cells = get_clusters_from_file(metadata_path)\n\n for group_name in cluster_groups:\n cluster_groups[group_name]['metadata_file'] = metadata_clusters\n\n # Print number of cells in each cluster label, for each cluster\n # (annotation), scope, and group\n for group_name in cluster_groups:\n print('Cluster group: ' + group_name)\n metadata_clusters = cluster_groups[group_name]['metadata_file']\n print('From metadata file:')\n for cluster_name in metadata_clusters:\n for label in metadata_clusters[cluster_name]:\n num_cells = str(len(metadata_clusters[cluster_name][label]))\n print(' Cells in ' + cluster_name + '/' + label + ': ' + num_cells)\n clusters = cluster_groups[group_name]['cluster_file']\n print('From cluster file:')\n for cluster_name in clusters:\n for label in clusters[cluster_name]:\n num_cells = str(len(clusters[cluster_name][label]))\n print(' Cells in ' + cluster_name + '/' + label + ': ' + num_cells)\n\n return cluster_groups", "title": "" }, { "docid": "8e5e048eb7ebd0a70a4aadcb210c947b", "score": "0.65408456", "text": "def get_node_groups(self, cluster_id):\n return self.sahara.clusters.get(cluster_id=cluster_id).node_groups", "title": "" }, { "docid": "5bba2e782718eaaf574bb5a642c18e95", "score": "0.652113", "text": "def groups(self):\n return {group: [\n host for host in hosts\n ] for group, hosts in self.inventory_manager.get_groups_dict().items()}", "title": "" }, { "docid": "677720bc77baedf610e98b50597e1386", "score": "0.6402704", "text": "def get_groups(self):\n return self.retrieve_api_results(\"/groups\")", "title": "" }, { "docid": "2a34e0026ceaf2ca8f287c1c3f78b814", "score": "0.63440156", "text": "def user_groups(self):\n return self.user.groups.all().values_list('name', flat=True)", "title": "" }, { "docid": "2b674f29b3e007cbf06b5b6f95d66284", "score": "0.6319084", "text": "def node_groups(self):\n response = self._get(\"/api/v2/node_groups.json\", paginate=True)\n return [NodeGroup(client=self, json=obj) for obj in response]", "title": "" }, { "docid": "4daca978c19a69bcfeda15c494f547a6", "score": "0.63098776", "text": "def getGroups(self):\n return self.usergroups", "title": "" }, { "docid": "afcb919f937301fd41f1646659cc45eb", "score": "0.6192657", "text": "def list_groups(ctx):\n url = ctx.obj['pdserver_url'] + \"/api/groups\"\n result = pdserver_request('GET', url)\n groups = result.json()\n\n for group in groups:\n print(\"{} {}\".format(group['_id'], group['name']))\n\n print(\" Users:\")\n for user_id in group['users']:\n print(\" {}\".format(user_id))\n\n print(\" Routers:\")\n for router_id in group['routers']:\n print(\" {}\".format(router_id))", "title": "" }, { "docid": "e04ef5bb85cb7a82fb9302e73d578a24", "score": "0.6183615", "text": "def get_groups(self):\n user_groups = self.user.groups.all()\n groups = []\n for group in user_groups:\n groups.append(group.name.lower())\n return groups", "title": "" }, { "docid": "60a31ae6d31088460d55b2480f6008c2", "score": "0.6166442", "text": "def user_groups(self):\n return self._user_groups", "title": "" }, { "docid": "36b4c13f5167cb17c1a692e98979196e", "score": "0.6154124", "text": "def group_member_dict():\n \n getent_group = subprocess.Popen([\"getent\", \"group\"],\n stdout=subprocess.PIPE).communicate()[0].split('\\n')\n\n member_dict = {}\n for group in getent_group:\n item = group.split(':')\n if len(item) > 3:\n member_dict[item[0]] = {'id': item[2], 'members':item[3].split(',')}\n \n return member_dict", "title": "" }, { "docid": "3aa4406e99a7685e355d7a1054f7517a", "score": "0.61540264", "text": "def groups(self):\n return self[\"groups\"]", "title": "" }, { "docid": "1caf948cdcfeea8c30eda085b7df3dd6", "score": "0.61340564", "text": "def get_groups(*args, **kwargs):\n try:\n if len(args) == 1:\n if type(args[0]) is list:\n usernames = args[0]\n else:\n usernames = args\n elif len(args) > 1:\n usernames = args\n else:\n usernames = [get_user()]\n\n groups = {}\n for user in usernames:\n try:\n groupinfo = subprocess.Popen([\"groups\",user],\n stdout=subprocess.PIPE).communicate()[0]\n groupstr = groupinfo.split(':')[1]\n groups[user] = groupstr.split()\n except:\n groups[user] = [] \n return groups\n except:\n print 'No groups found'\n return None", "title": "" }, { "docid": "a0535b0e210c24590d061ad0c251cf08", "score": "0.61171633", "text": "def get_groups_for_user(user):\n return [m.group for m in Member.objects.filter(user=user)]", "title": "" }, { "docid": "64805cd2cf32b5b97a467960fdc9480a", "score": "0.6105599", "text": "def _get_groups_and_members(self):\n # could be in one of two places, depending if the \n # account-manager is installed or not\n group_file_name = self._group_filename()\n groups_dict = dict()\n group_file = file(group_file_name)\n try:\n for group_line in group_file:\n # Ignore blank lines and lines starting with #\n group_line = group_line.strip()\n if group_line and not group_line.startswith('#'):\n group_name = group_line.split(':', 1)[0]\n group_members = group_line.split(':', 2)[1].split(' ')\n groups_dict[group_name] = [ x for x in [member.strip() for member in group_members] if x ]\n finally:\n group_file.close()\n if len(groups_dict):\n return groups_dict\n else:\n return None", "title": "" }, { "docid": "369ef1b3847068d4d8600ac58d34bd42", "score": "0.60975546", "text": "def get_groups(self) -> pandas.DataFrame:\n return self._client.get_groups().copy(deep=True)", "title": "" }, { "docid": "820dba558c41b75e3b79472670aab9c1", "score": "0.6094224", "text": "def __get_users_per_cluster__(self,workflow_id,subject_id,task_id,shape):\n postgres_cursor = self.postgres_session.cursor()\n # TODO - generalize for panoptes\n stmt = \"select aggregation from aggregations where workflow_id = \" + str(workflow_id) + \" and subject_id = '\" + str(subject_id) + \"'\"\n # stmt = \"select aggregation from aggregations where subject_id = '\" + str(subject_id) + \"'\"\n postgres_cursor.execute(stmt)\n\n # TODO - this should be a dict but doesn't seem to be - hmmmm :/\n agg = postgres_cursor.fetchone()\n\n if agg is None:\n print(\"returning none\")\n return {}\n\n if isinstance(agg[0],str):\n aggregations = json.loads(agg[0])\n else:\n aggregations = agg[0]\n\n assert isinstance(aggregations,dict)\n\n users = {}\n for cluster in aggregations[str(task_id)][shape + \" clusters\"].values():\n if cluster == \"cluster_index\":\n continue\n\n center = tuple(cluster[\"center\"])\n print(cluster)\n users[center] = cluster[\"users\"]\n # # TODO - should be only one way - check why both are necessary\n # if isinstance(cluster['existence'][0],dict):\n # probabilities[center] = cluster['existence'][0]['1']\n # else:\n # probabilities[center] = cluster['existence'][0][1]\n\n return users", "title": "" }, { "docid": "d8746fbcf2563ba7c51f5996b6d20985", "score": "0.6088506", "text": "def list_groups(self):\n return self._list_groups()", "title": "" }, { "docid": "f14a065b255ed8ab529f07d53ae5029f", "score": "0.6058897", "text": "def get_cluster_member_dict(initial_df: pd.DataFrame):\n data = initial_df.values\n cluster_member_dict = {d[0]: list() for d in data}\n for d in data:\n cluster_member_dict[d[0]].append(d[1])\n return cluster_member_dict", "title": "" }, { "docid": "43de722c0a94ec3570237feadbb18845", "score": "0.60407716", "text": "def list_groups(cm_id, caller_id):\n user = User.get(caller_id)\n groups = []\n for g in user.group_set.all():\n d = g.dict\n d['status'] = group_states['ok']\n groups.append(d)\n\n return groups", "title": "" }, { "docid": "bf5a5eb76183666b69515bf710369418", "score": "0.60283554", "text": "def get_groups(self):\n return self._groups", "title": "" }, { "docid": "7c153dc8a38282a0742702935335dd31", "score": "0.6025045", "text": "def get_user_groupings(cls):\n from proyecto_boneo.apps.administracion.planes.models import Division\n possible_destinatarios = []\n possible_destinatarios.extend({'id': cls.build_user_grouping_id(cls.TYPE_USER, user.id),\n 'text': user.get_full_name(),\n 'subtext': user.username}\n for user in UsuarioBoneo.objects.all())\n possible_destinatarios.extend({'id': cls.build_user_grouping_id(cls.TYPE_DIVISION, division.id),\n 'text': str(division),\n 'subtext': 'División'}\n for division in Division.objects.filter(activa=True).all())\n possible_destinatarios.extend({'id': cls.build_user_grouping_id(cls.TYPE_YEAR, anio),\n 'text': '{}º'.format(anio),\n 'subtext': 'Año de cursado'}\n for anio in Division.objects.años_plan())\n possible_destinatarios.extend({\n 'id': cls.build_user_grouping_id(cls.TYPE_USER_GROUP, group[1]),\n 'text': group[0],\n 'subtext': 'Grupo de usuarios'\n } for group in cls.USER_GROUPS)\n return possible_destinatarios", "title": "" }, { "docid": "2719ea47fe88eb8963febad5a3d6079a", "score": "0.60244846", "text": "def group(self):\n if config._OPERATING_SYSTEM == \"windows\":\n raise NotImplementedError(\n \"Directory.group is not supported on Windows\"\n ) \n \n # Get the group's details from the group database\n grp_group = grp.getgrnam(pathlib.Path(self.path).group())\n \n group = {}\n group[\"name\"] = grp_group.gr_name\n group[\"id\"] = grp_group.gr_gid\n group[\"members\"] = grp_group.gr_mem\n \n return group", "title": "" }, { "docid": "1d9e6143c8f9274a09e3d517bb52c25b", "score": "0.60115886", "text": "def clusters(self) -> Dict[str, dict]:\n if self._clusters is None:\n clusters = {}\n for elem in self._doc.get(\"clusters\", []):\n clusters[elem[\"name\"]] = c = elem[\"cluster\"]\n c.setdefault(\"server\", \"http://localhost\")\n _set_optional_field_as_bof(c, \"certificate-authority\")\n self._clusters = clusters\n return self._clusters", "title": "" }, { "docid": "573c2e46ea236dda5018dd3c8dd74ba8", "score": "0.6010602", "text": "def get_user_groups(cms_action):\n cms_action.send_cmd('id', expected_value=None)\n\n # text content: '<group_tyoe>', gids=\n text = get_text_after_value(cms_action.cli, 'groups=')\n return [k for k in text.split(',') if 'cns-' in k]", "title": "" }, { "docid": "18565ff482dda26dd4dfd461a95ce33d", "score": "0.6001054", "text": "def filter_group_dict(self):\n d = {}\n for n, m in self.filter_managers.iteritems():\n d[n] = m.list_group_names()\n return d", "title": "" }, { "docid": "7f54cce6f18b4b78d93a72760fbe0667", "score": "0.59946805", "text": "def groups(self):\n content_payload = {'UserId': self.id,\n 'MaxResults': 100}\n target = 'com.amazonaws.swbup.service.SWBUPService.ListGroupsForUser'\n for group in self._sso._get_paginated_results(content_payload=content_payload, # pylint: disable=protected-access\n path='userpool',\n target='ListGroupsForUser',\n amz_target=target,\n object_group='Groups',\n url=self.url):\n yield self._sso.get_group_by_id(group.get('GroupId'))", "title": "" }, { "docid": "b3f9eab7127c14799053c6720e2f60d1", "score": "0.5979201", "text": "def group_clusters(clusters, learner):\n # find hierarchy relationships\n child2parent = get_hierarchies(clusters)\n validf = lambda c: valid_number(c.c_range[0]) and valid_number(c.c_range[1])\n\n ignore = set()\n non_children = set()\n for c in clusters:\n if c in ignore:\n continue\n\n if len(child2parent[c]):\n for p in child2parent[c]:\n if contributing(c, p) > 0.6:\n non_children.add(c)\n ignore.add(p)\n _logger.debug(\"groupclust\\trm child cluster\\t%s\", c)\n elif not validf(c):\n _logger.debug(\"groupclust\\tc_range invalid \\t%s\", c)\n else:\n non_children.add(c)\n\n groups = []\n for key, group in group_by_inf_state(non_children, learner).iteritems():\n subgroups = group_by_tuple_ids(group)\n subgroup = filter(bool, map(merge_clauses, subgroups.values()))\n groups.append(subgroup)\n\n rules = filter(bool, map(group_to_rule, groups))\n rules = sort_rules(rules, learner)\n return rules", "title": "" }, { "docid": "3e8343f3348606a24e606500836fb3b9", "score": "0.59707075", "text": "def user_groups_in_realm_serialized(realm: Realm) -> List[UserGroupDict]:\n realm_groups = UserGroup.objects.filter(realm=realm)\n group_dicts: Dict[int, UserGroupDict] = {}\n for user_group in realm_groups:\n group_dicts[user_group.id] = dict(\n id=user_group.id,\n name=user_group.name,\n description=user_group.description,\n members=[],\n direct_subgroup_ids=[],\n is_system_group=user_group.is_system_group,\n can_mention_group=user_group.can_mention_group_id,\n )\n\n membership = UserGroupMembership.objects.filter(user_group__realm=realm).values_list(\n \"user_group_id\", \"user_profile_id\"\n )\n for user_group_id, user_profile_id in membership:\n group_dicts[user_group_id][\"members\"].append(user_profile_id)\n\n group_membership = GroupGroupMembership.objects.filter(subgroup__realm=realm).values_list(\n \"subgroup_id\", \"supergroup_id\"\n )\n for subgroup_id, supergroup_id in group_membership:\n group_dicts[supergroup_id][\"direct_subgroup_ids\"].append(subgroup_id)\n\n for group_dict in group_dicts.values():\n group_dict[\"members\"] = sorted(group_dict[\"members\"])\n group_dict[\"direct_subgroup_ids\"] = sorted(group_dict[\"direct_subgroup_ids\"])\n\n return sorted(group_dicts.values(), key=lambda group_dict: group_dict[\"id\"])", "title": "" }, { "docid": "259d5748b2a3806f7b4b43dcdc012732", "score": "0.5967752", "text": "def get_groups(self):\r\n return [g.name for g in self.root_group if g.id in self.groups]", "title": "" }, { "docid": "8245a4014bb11b402574c93b31463776", "score": "0.5965045", "text": "def get_groups(self):\n return [Group(raw) for raw in self._get('groups')['group']]", "title": "" }, { "docid": "2108ba03f4574e80dd4dd11b790de337", "score": "0.59598106", "text": "def instance_groups_dict(self):\n return self._instance_groups_dict", "title": "" }, { "docid": "0a78f0739b28ebc0b09808b216f6723d", "score": "0.59541893", "text": "def groups(self):\n return {ep.group for ep in self}", "title": "" }, { "docid": "823726bfbc2e937fb35c0783d07a8b36", "score": "0.59401596", "text": "def groups(self):\n return self._groups", "title": "" }, { "docid": "823726bfbc2e937fb35c0783d07a8b36", "score": "0.59401596", "text": "def groups(self):\n return self._groups", "title": "" }, { "docid": "37e88cfb5a46571e8f5f16bd2bae2863", "score": "0.5913613", "text": "def getGroupDict(self):\r\n activeGroupDict = {}\r\n for eachGroup in self.buildGroupDict.keys():\r\n self.buildGroupDict[eachGroup].map = mc.getAttr(self.surface.shape + '.' + self.buildGroupDict[eachGroup].attrName)\r\n if(self.buildGroupDict[eachGroup].group != None):\r\n mc.select(self.buildGroupDict[eachGroup].group, hi=True)\r\n self.buildGroupDict[eachGroup].members = mc.ls(sl=True)[1::2]\r\n\r\n #print(self.buildGroupDict[eachGroup].members)\r\n activeGroupDict[eachGroup] = self.buildGroupDict[eachGroup]\r\n else:\r\n mc.warning(str(eachGroup)+\" couldn't be use because it has no group assigned.\")\r\n return activeGroupDict", "title": "" }, { "docid": "1d0fc62b1f797139e9b8ac315013f57a", "score": "0.59119046", "text": "def get_group_members(user):\n id = UserData.objects.get(user_id=user).org_id\n if id == None:\n return {\n 'success': True,\n 'group_id': 'Not in a group',\n 'office': 'Unknown office',\n 'group_members': []\n }\n # user = UserData.objects.get(app_interal_id=id)\n\n client = mifosx_api('clients/{}'.format(id), user=user)\n if not client['success'] and not client['response']['groups']:\n return \"Failed. Could not find a group for client with id {}\".format(id)\n groups = client['response']['groups']\n\n # TODO:: fix dis shiz (assuming that clients can only be in one group....)\n if len(groups) == 0:\n return {\n 'success': True,\n 'group_id': 'Not in a group',\n 'office': 'Not a group member',\n 'group_members': []\n }\n\n group = groups[0]\n res = mifosx_api('groups/{}'.format(group['id']), params='associations=activeClientMembers&clientId={}'.format(id), user=user)\n if res['success']:\n group = []\n acm = res['response']['activeClientMembers']\n for person in acm:\n group.append({\n \"first_name\": person['firstname'],\n \"middle_name\": person.get('middlename', ''),\n \"last_name\": person['lastname'],\n \"phone\": person.get('mobileNo', \"\"),\n \"mifos_id\": person['id']\n })\n\n return {\n 'success': True,\n 'group_id': res['response']['id'],\n 'office': res['response']['officeName'],\n 'group_members': group\n }", "title": "" }, { "docid": "5049c32631bdde10df47de4aaedc4523", "score": "0.5903227", "text": "def group_list(self, **kwargs):\n\n return self.request('GET', '/groups', params=kwargs)", "title": "" }, { "docid": "5fa7679a5d80e011bd9175aab225059e", "score": "0.5891121", "text": "def getGroups(self):\n return self.groups", "title": "" }, { "docid": "5fa7679a5d80e011bd9175aab225059e", "score": "0.5891121", "text": "def getGroups(self):\n return self.groups", "title": "" }, { "docid": "11996db75b37e634d79b4cc10ca16659", "score": "0.5887983", "text": "def get_groups(self):\n if self.api_url is None:\n self.api_url = self.base_uri()\n\n self.logger.info('getting Sign user groups')\n groups = self._paginate_get(f\"{self.api_url}groups\", 'groupInfoList', GroupsInfo.from_dict, self.GROUP_PAGE_SIZE)\n return groups", "title": "" }, { "docid": "2416441e63f7fdf780a23d4b2e4e15be", "score": "0.5879893", "text": "def get_replication_groups(self):\n log.info(\"Fetching list of replication groups\")\n return self.conn.get(url='vdc/data-service/vpools')", "title": "" }, { "docid": "caaecfe85c53e67f164e9dde985accdb", "score": "0.5878", "text": "def groups(self):\n group_names = []\n for principal in self.group_detail_list.principals:\n group_names.append(principal.name)\n return group_names", "title": "" }, { "docid": "8c22fabab74ee46fbfde89d288b16d3a", "score": "0.58691126", "text": "def list_of_groups(self):\n return self.obs_groups_table['GROUP_ID'].data", "title": "" }, { "docid": "6468730c919d88301414793b0db72d8f", "score": "0.5861169", "text": "def groupByClassCluster(self):\n try:\n cursorClassCluster = managedb.groupByClassClusterDB()\n return cursorClassCluster\n except Exception as e:\n print(\"exception error DB\", str(e))", "title": "" }, { "docid": "575a072d0c732c6ef99f5a80f518b6de", "score": "0.5842818", "text": "def group_list(self):\n return self._groups()", "title": "" }, { "docid": "f42e6feaeb8e3bd4280cdb6d1d123271", "score": "0.58408743", "text": "def groups(self):\n return self.__groups", "title": "" }, { "docid": "bc030411c47a8541394470a5b476c7b7", "score": "0.5828462", "text": "def group_members(*args, **kwargs):\n if len(args) == 1:\n if type(args[0]) is list:\n groups = args[0]\n else:\n groups = args\n elif len(args) > 1:\n groups = args\n else:\n groups = [get_groups()]\n\n member_dict = group_member_dict()\n members = {}\n for group in groups:\n item = member_dict.get(group)\n if item:\n members[group] = item.get('members')\n else:\n members[group] = []\n\n return members", "title": "" }, { "docid": "ca60a33da0565c1d6f0435e3fff8c8ab", "score": "0.582101", "text": "def groups(self) :\n\t\ttry :\n\t\t\treturn self._groups\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "c4528c28fd143eec1391e09dbabb53f6", "score": "0.58124983", "text": "def getGroups(self):\n return self.__groups", "title": "" }, { "docid": "a85d286a4b50ba86e98ef25f05207ba2", "score": "0.58095926", "text": "def RetrieveAllGroups(self):\r\n uri = self._ServiceUrl('group', True, '', '', '')\r\n return self._GetPropertiesList(uri)", "title": "" }, { "docid": "0d3fa54f19d473b65551671e9dd49746", "score": "0.58036554", "text": "def get_groups():\n groups = load_yaml_file(GROUPS_FILE_PATH)\n return list(groups.keys())", "title": "" }, { "docid": "7011631144922813eb677e56ffdf1305", "score": "0.5802135", "text": "def ldap_list_groups(self):\n self.logger.debug(\"LDAP list-groups starting\")\n self.logger.debug(\"Displaying LDAP groups\")\n groups = []\n for user in self.ldap_users:\n for group in self.ldap_users[user]:\n if group not in groups:\n groups.append(group)\n self.logger.info(str(sorted(groups)))", "title": "" }, { "docid": "5ed100db809cc92236edfe022826f761", "score": "0.58010966", "text": "def findAllUserGroups(cls):\r\n return cls.query.all()", "title": "" }, { "docid": "4ca5982f69e401e2c8d72494aaa1352f", "score": "0.5789276", "text": "def get_instance_groups_dict(self):\n instance_groups_dict = {}\n instance_groups_config = self._resource_config.get(\"instance_groups\", [])\n for group in instance_groups_config:\n instance_groups_dict[group[\"instance_group_name\"]] = group\n return instance_groups_dict", "title": "" }, { "docid": "5ec1d84f0ee4e80fb168428899cd2b0a", "score": "0.5787909", "text": "def customer_groups(self):\n return self._customer_groups", "title": "" }, { "docid": "79a40fc49afae498a85b6de8413d95b4", "score": "0.5777756", "text": "def get_groups(self, session):\n cloudlet_group_url = 'https://' + self.access_hostname + '/cloudlets/api/v2/group-info'\n\n cloudlet_group_response = session.get(self.form_url(cloudlet_group_url))\n return cloudlet_group_response", "title": "" }, { "docid": "d1ab0d6d311fd45ed933ed69c2c7cc3c", "score": "0.57726383", "text": "def _get_similarity_groups(similarity_pairs_list):\n graph = nx.Graph()\n graph.add_edges_from(similarity_pairs_list.keys())\n cliques = nx.find_cliques(graph)\n groups = list(cliques)\n return groups", "title": "" }, { "docid": "261b45b2d59ea35eca22220f9ca8bc25", "score": "0.57716703", "text": "def groups(self) -> typing.Set[str]:\n flask.g.user_dependent = True\n return get_groups(self._identity, False)", "title": "" }, { "docid": "80871bb0f39cca3a538de2ca6c34916c", "score": "0.5749293", "text": "def users_group():\n pass", "title": "" }, { "docid": "d8442558eba9b9f87a3cc96d0ef08a01", "score": "0.57470345", "text": "def list_clusters(self):\n from apache_beam.runners.interactive import interactive_environment as ie\n\n clusters = ie.current_env().clusters\n all_cluster_data = {}\n for meta, dcm in clusters.dataproc_cluster_managers.items():\n all_cluster_data[obfuscate(meta)] = {\n 'cluster_name': meta.cluster_name,\n 'project': meta.project_id,\n 'region': meta.region,\n 'master_url': meta.master_url,\n 'dashboard': meta.dashboard,\n 'pipelines': [str(id(p)) for p in dcm.pipelines]\n }\n self._clusters = all_cluster_data\n return all_cluster_data", "title": "" }, { "docid": "3099991a965e3b9ee38f27ff0d68241b", "score": "0.5745152", "text": "def group_train_data(training_data):\n groups = collections.defaultdict(list)\n for p1, p2 in training_data:\n l = groups[tuple(p1)]\n l.append(p2)\n return list(groups.items())", "title": "" }, { "docid": "b86b91b8666cebfef01e7b59c86f261f", "score": "0.5741698", "text": "def existing_group_list(gi):\n gc = group_client(gi)\n return gc.get_groups()", "title": "" }, { "docid": "f4ffde0c768f499bf894b0b77df471f3", "score": "0.5727602", "text": "def groups(self, grouped_mci=True):\n collection = self._df\n\n # creates dataframe for each group\n group_names = collection[\"Group\"].unique()\n groups = {}\n for group in group_names:\n group_df = collection[collection[\"Group\"] == group]\n groups[group] = group_df\n\n # groups MCIs\n if grouped_mci is True:\n mci = collection[collection[\"Group\"].isin([\"MCI\", \"LMCI\", \"EMCI\"])]\n if not mci.empty:\n groups[\"MCI\"] = mci\n if \"LMCI\" in group_names:\n del groups[\"LMCI\"]\n if \"EMCI\" in group_names:\n del groups[\"EMCI\"]\n\n return groups", "title": "" }, { "docid": "4c7abb92738a235ed1af3611fc37222c", "score": "0.5724124", "text": "async def get_clusters(self):\n return {\n IN: self._in_clusters,\n OUT: self._out_clusters\n }", "title": "" }, { "docid": "293a1af139be150d4de828ea421a99cd", "score": "0.5720949", "text": "def groups(self):\n from office365.sharepoint.principal.groups.collection import GroupCollection\n return self.properties.get('Groups',\n GroupCollection(self.context, ResourcePath(\"Groups\", self.resource_path)))", "title": "" }, { "docid": "8a407743d82dfb75ff3a21736b5ef34d", "score": "0.57131076", "text": "def groupsForUser(userName):\n result = []\n factory = DBConnect.getConnection()\n getUserId = factory(classname = \"Requestor.ID\")\n userId = getUserId.execute(userName)\n getGroups = factory(classname = \"Requestor.GetAssociationNames\")\n groups = getGroups.execute(userId)\n return groups;", "title": "" }, { "docid": "13d01ee80e7cb2bb09d1ecf1473becbb", "score": "0.5703441", "text": "def get_group_if_exists(self, cluster, group_id):\n cluster_id = self.cluster_ids.get(cluster)\n if not cluster_id:\n return None\n api = f'/api/clusters_mgmt/v1/clusters/{cluster_id}/groups'\n groups = self._get_json(api)['items']\n if group_id not in [g['id'] for g in groups]:\n return None\n\n api = f'/api/clusters_mgmt/v1/clusters/{cluster_id}/' + \\\n f'groups/{group_id}/users'\n users = self._get_json(api)['items']\n return {'users': [u['id'] for u in users]}", "title": "" }, { "docid": "e2d19b88b66cd061a647fea7bfdc9fda", "score": "0.56971556", "text": "def _aggregate_clusters(clusters):\n return [\n {\n 'cluster': c,\n 'start': c[0][0],\n 'end': c[-1][0],\n 'total': sum(x[1] for x in c),\n 'count': len(c),\n 'max': max(x[1] for x in c),\n 'min': min(x[1] for x in c),\n }\n for c in clusters\n ]", "title": "" }, { "docid": "29c56c4b1ff2f2c2a85ba82653ef3025", "score": "0.56958586", "text": "def _GetResourceGroupsFromMemberships(\n project, name, namespace, repo_cluster, membership):\n resource_groups = []\n try:\n memberships = utils.ListMemberships(project)\n except exceptions.ConfigSyncError as err:\n raise err\n for member in memberships:\n if membership and not utils.MembershipMatched(member, membership):\n continue\n if repo_cluster and repo_cluster != member:\n continue\n try:\n utils.KubeconfigForMembership(project, member)\n member_rg = _GetResourceGroups(member, name, namespace)\n if member_rg:\n resource_groups.extend(member_rg)\n except exceptions.ConfigSyncError as err:\n log.error(err)\n return resource_groups", "title": "" }, { "docid": "5adc0c1eb60b1099a329245b5414ff57", "score": "0.5694443", "text": "def host_groups(self):\n return self._host_groups", "title": "" }, { "docid": "8b5d475d25197519d334c012ec940022", "score": "0.56914496", "text": "def get_groups(self):\n return self.get(self.groups_url)", "title": "" }, { "docid": "95d8e2b72e6145127e8cfa64826fa605", "score": "0.569098", "text": "def get_host_groups(host, inherited=True):\n if inherited:\n groups = host.get_groups()\n else:\n groups = host.groups\n return [gr.name for gr in groups if gr.name not in ['ungrouped', 'all']]", "title": "" }, { "docid": "219e9d9eb514a53e053c2145acc556a9", "score": "0.56847537", "text": "def list_clusters(self, groupname):\n group = self.session.query(schema.ClusterGroup).filter_by(name=groupname).first()\n \n if not group or not group.clusters:\n return None\n \n return group.clusters", "title": "" }, { "docid": "56002982c667117fbfa1d22a39d8e12a", "score": "0.56718814", "text": "def listGroups (self, cloudusergroupguid, jobguid = \"\", executionparams = {}):\n params =dict()\n params['cloudusergroupguid'] = cloudusergroupguid\n executionparams['rootobjectguid'] = cloudusergroupguid\n executionparams['rootobjecttype'] = 'cloudusergroup'\n\n \n return q.workflowengine.actionmanager.startRootobjectActionSynchronous('cloudusergroup', 'listGroups', params, jobguid=jobguid, executionparams=executionparams)", "title": "" }, { "docid": "5b77f634fbb689088b89170f03a119b5", "score": "0.5645638", "text": "def groups(self, **extra):\n result = self.raw(groups=True, **extra)\n\n return result['groups']", "title": "" }, { "docid": "ccf8160f5346d2de3216f6b11c6691b7", "score": "0.563631", "text": "def auth_groups(self) -> typing.Set[str]:\n flask.g.user_dependent = True\n return get_groups(self._identity, True)", "title": "" }, { "docid": "5c6e75c6e5b71e4c17a468b7b24d2286", "score": "0.5631117", "text": "def get_groups(genius_user):\n group = genius_user.groups.values('name')\n return group[0]['name']", "title": "" }, { "docid": "666fd15fa0eca9b379f7fc606e91aabf", "score": "0.56300044", "text": "def get_current_user_groups():\n return CurrentUserMiddleware.get_current_user().groups.all()", "title": "" }, { "docid": "b58a6a925d8bab7858e96fcbba0302eb", "score": "0.5623459", "text": "def _list_groups(self):\n pass", "title": "" }, { "docid": "8d73644f389df3f5302c64ba6737dd59", "score": "0.56187725", "text": "def _list(gvar):\n\n # Check for missing arguments or help required.\n _check_keys(gvar, [], [], ['-ok'])\n\n # Retrieve data (possibly after changing the group).\n response = __request(gvar, '/group/list/')\n\n # Filter response as requested (or not).\n group_list = __filter_by_cloud_name(gvar, json.loads(response['group_list']))\n\n # Print report\n print('Active User: %s, Active Group: %s, User\\'s Groups: %s' % (response['active_user'], response['active_group'], response['user_groups']))\n if gvar['command_args']['only-keys']:\n _show_table(\n gvar,\n group_list,\n [\n 'group_name/Group',\n ],\n )\n else:\n _show_table(\n gvar,\n group_list,\n [\n 'group_name/Group',\n 'condor_central_manager/Central Manager',\n 'yaml_name/YAML Name',\n ],\n )", "title": "" }, { "docid": "c43ffa6ed2e002138287e74d158e84c7", "score": "0.56161535", "text": "def make_username_dict(group=''):\n\n users = User.objects.all()\n\n udict = {}\n for ent in users:\n username = ent.username\n if group != '':\n if is_user_in_the_group(username, group = group) :\n first = ent.first_name\n last = ent.last_name\n email = ent.email\n tlist = [first, last, email]\n udict[username] = tlist\n else:\n continue\n else:\n continue\n\n return udict", "title": "" }, { "docid": "0ddae0bb5ae544caa92f594bb40ae368", "score": "0.5602817", "text": "def get_groups(token, db = False):\n if db:\n users_request = session.get(\n 'https://api.mongolab.com/api/1/databases/groupmehack/collections/%sgroups' % token,\n params = {'apiKey': mongo_lab_settings['api'], 's': json.dumps({'name': 1})}\n )\n return users_request.json()\n # find file that contains users-> then parse the csv and return json lis\n groups_filename = os.path.join('data_files', 'groups', '%s.csv' % token)\n if os.path.exists(groups_filename):\n with open(groups_filename, 'rb') as groups_file:\n csv_reader = csv.DictReader(groups_file, fieldnames = ['id', 'name', 'message_count'], delimiter = ',')\n # don't include header in list of groups (therefore exclude first element in array)\n return list(csv_reader)[1:]\n # if no file-> then return None as indicator\n return None", "title": "" }, { "docid": "ea96604161883d6cb980ddac53c2549f", "score": "0.5585142", "text": "def get(self):\n groups = group_service.all()\n groups.sort(key=lambda x: len(x.users))\n results = [{'name': group.name, 'id': group.id, 'user_count': len(group.users)}\n for group in groups]\n return {'success': True, 'groups': results}, 200", "title": "" }, { "docid": "be87372b36f0a224afe01710a201efa1", "score": "0.5572536", "text": "def list_group_nodes():\r\n return [node.name() for node in nuke.allNodes('Group')]", "title": "" }, { "docid": "f8c5a20b4e1f12e2d93fe6f6f029ce24", "score": "0.5570784", "text": "def __cluster__(self,markings,user_ids,tools,reduced_markings,image_dimensions,subject_id):\n assert len(markings) == len(user_ids)\n assert len(markings) == len(reduced_markings)\n\n if isinstance(user_ids,tuple):\n user_ids = list(user_ids)\n assert isinstance(user_ids,list)\n start = time.time()\n\n if len(user_ids) == len(set(user_ids)):\n # all of the markings are from different users => so only one cluster\n result = {\"users\":user_ids,\"cluster members\":markings,\"tools\":tools,\"num users\":len(user_ids)}\n result[\"center\"] = [self.__cluster_center__(axis) for axis in zip(*markings)]\n return [result],0\n\n # cluster based on the reduced markings, but list the clusters based on their original values\n dendrogram = self.__agglomerative__(reduced_markings)\n\n results = self.__tree_traverse__(dendrogram,markings,user_ids,tools)\n\n # todo - this is just for debugging\n for j in results:\n assert \"num users\" in j\n\n end = time.time()\n return results,end-start", "title": "" }, { "docid": "5b339ab52eb5237c165e2612a8311725", "score": "0.55657804", "text": "def groupList(self,l):\r\n\t\tgroups={}\r\n\t\tfor k in l:\r\n\t\t\tif not (groups.has_key(k[0])):\r\n\t\t\t\tgroups[k[0]]=[]\r\n\t\t\t\tgroups[k[0]].append(k[1])\r\n\t\t\telse:\r\n\t\t\t\tgroups[k[0]].append(k[1])\r\n\t\treturn groups", "title": "" }, { "docid": "21eb496345160bc429081ca81d2ec60b", "score": "0.5563073", "text": "def create_group(self, users):\n group = Group()\n if self.User_ID not in users:\n users.append(self.User_ID)\n group.create_group(users)\n\n for user_id in users:\n groups = json.loads(self.Redis_Server.hget(user_id, \"groups\").decode(\"utf-8\"))\n groups.append(group.Group_ID)\n self.Redis_Server.hmset(user_id, {\"groups\": json.dumps(groups)})", "title": "" }, { "docid": "c7fdd3a8d2ee16c703fd01a49e9e0c63", "score": "0.55571413", "text": "def _create_user_groups(cls, index_name, groups, social_data, social_network, app_data, now_es):\n # group\n permissions = {\n u'admin': u'can-admin',\n }\n groups_data = []\n groups_data_logical = {}\n for group in groups:\n group_data = {\n u'group__name__v1': group,\n u'group__slug__v1': slugify(group),\n u'group__tags__v1': None,\n u'group__created_on__v1': now_es,\n }\n if group in permissions:\n group_data[u'group__permissions__v1'] = [\n {\n u'group__permissions__name__v1': permissions[group],\n u'group__permissions__created_on__v1': now_es\n }\n ]\n es_response_raw = requests.post(\n '{}/{}/group'.format(settings.ELASTIC_SEARCH_HOST, index_name),\n data=json.dumps(group_data))\n if es_response_raw.status_code not in [200, 201]:\n raise XimpiaAPIException(_(u'Could not write group \"{}\" :: {}'.format(\n group, es_response_raw.content)))\n es_response = es_response_raw.json()\n logger.info(u'SetupSite :: created group {} id: {}'.format(\n group,\n es_response.get('_id', u'')\n ))\n # group_ids[group] = es_response.get('_id', '')\n group_data_logical = to_logical_doc('group', group_data)\n group_data_logical['id'] = es_response.get('_id', '')\n groups_data_logical[group_data_logical['id']] = group_data_logical\n groups_data.append(group_data_logical)\n logger.debug(u'groups_data : {}'.format(groups_data))\n # user\n seconds_two_months = str(int((datetime.now() + timedelta(days=60) -\n datetime(1970, 1, 1)).total_seconds()))\n user_data = {\n u'user__username__v1': \" \",\n u'user__alias__v1': \"\",\n u'user__email__v1': social_data.get('email', None),\n u'user__password__v1': None,\n u'user__avatar__v1': social_data.get('profile_picture', None),\n u'user__user_name__v1': social_data.get('name', None),\n u'user__first_name__v1': social_data.get('first_name', ''),\n u'user__last_name__v1': social_data.get('last_name', ''),\n u'user__social_networks__v1': [\n {\n u'user__social_networks__network__v1': social_network,\n u'user__social_networks__user_id__v1': social_data.get('user_id', None),\n u'user__social_networks__access_token__v1': social_data.get('access_token', None),\n u'user__social_networks__state__v1': None,\n u'user__social_networks__scopes__v1': social_data.get('scopes', None),\n u'user__social_networks__has_auth__v1': True,\n u'user__social_networks__link__v1': social_data.get('link', None),\n u'user__social_networks__expires_at__v1': social_data.get('expires_at', None),\n }\n ],\n u'user__permissions__v1': None,\n u'groups__v1': map(lambda x: {\n u'group__id': x['id'],\n u'group__name__v1': x['name']\n }, groups_data),\n u'user__is_active__v1': True,\n u'user__token__v1': None,\n u'user__expires_at__v1': time.strftime(\n '%Y-%m-%dT%H:%M:%S',\n time.gmtime(float(social_data.get('expires_at', seconds_two_months)))),\n u'user__session_id__v1': None,\n u'app__v1': {\n u'app__id': app_data['id'],\n u'app__slug__v1': app_data['slug'],\n u'app__name__v1': app_data['name'],\n u'site__v1': {\n u'site__id': app_data['site']['id'],\n u'site__slug__v1': app_data['site']['slug'],\n u'site__name__v1': app_data['site']['name'],\n }\n },\n u'user__created_on__v1': now_es,\n }\n es_response_raw = requests.post(\n '{}/{}/user'.format(settings.ELASTIC_SEARCH_HOST, index_name),\n data=json.dumps(user_data))\n if es_response_raw.status_code not in [200, 201]:\n raise XimpiaAPIException(_(u'Could not write user \"{}.{}\" :: {}'.format(\n social_network,\n social_data.get('user_id', None),\n es_response_raw.content)))\n es_response = es_response_raw.json()\n logger.info(u'SetupSite :: created user id: {}'.format(\n es_response.get('_id', '')\n ))\n user_data_logical = to_logical_doc('user', user_data)\n user_data_logical['id'] = es_response.get('_id', '')\n user_data['id'] = es_response.get('_id', '')\n # users groups\n for group_data in groups_data:\n es_response_raw = requests.post(\n '{}/{}/user-group'.format(settings.ELASTIC_SEARCH_HOST, index_name),\n data=json.dumps({\n u'user__v1': {\n u'user__id': user_data_logical[u'id'],\n u'user__username__v1': user_data_logical[u'username'],\n u'user__email__v1': user_data_logical[u'email'],\n u'user__avatar__v1': user_data_logical[u'avatar'],\n u'user__user_name__v1': user_data_logical[u'user_name'],\n u'user__social_networks__v1': user_data_logical[u'social_networks'],\n u'user__permissions__v1': user_data_logical[u'permissions'],\n u'user__created_on__v1': user_data_logical[u'created_on'],\n },\n u'group__v1': {\n u'group__id': group_data[u'id'],\n u'group__name__v1': group_data[u'name'],\n u'group__slug__v1': group_data[u'slug'],\n u'group__tags__v1': group_data[u'tags'],\n u'group__created_on__v1': group_data[u'created_on']\n },\n u'user-group__created_on__v1': now_es,\n }))\n if es_response_raw.status_code not in [200, 201]:\n raise XimpiaAPIException(_(u'Could not write user group :: {}'.format(\n es_response_raw.content\n )))\n es_response = es_response_raw.json()\n es_response['id'] = es_response.get('_id', '')\n logger.info(u'SetupSite :: created user group id: {}'.format(\n es_response.get('_id', '')\n ))\n return user_data_logical, groups_data", "title": "" }, { "docid": "920dc54ba77055f84bd79b1e803125d9", "score": "0.5551793", "text": "def get_group_lists(filename):\n # map groups to set of emails\n groups_of_students = {}\n input_file = open(filename, 'r')\n csv_reader = csv.reader(input_file)\n # get header row in lowercase\n header = [column.lower() for column in next(csv_reader, None)]\n # print(header)\n column_email = header.index('email')\n column_group = header.index('team')\n # column_first_name = header.index('first')\n # column_last_name = header.index('last')\n # print(\"Email: {}, Team: {}, Name: {} {}\".format(column_email, column_group, column_first_name, column_last_name))\n\n for row in csv_reader:\n # print(row)\n group = row[column_group]\n # skip blank groups (for unfinished data file)\n if not group:\n continue\n email = row[column_email]\n # name = \"{} {}\".format(row[column_first_name], row[column_last_name])\n # print(group, name, email)\n\n # update existing group's list,\n # or add student to new entry in dictionary if not already there\n try:\n groups_of_students[group].append(email)\n except KeyError:\n groups_of_students[group] = [email]\n\n # add staff members to groups\n if STAFF_TO_ADD:\n for group in groups_of_students:\n groups_of_students[group] += STAFF_TO_ADD\n input_file.close()\n return groups_of_students", "title": "" }, { "docid": "44bc7e4c3978412f6d43b8c0b7332fe8", "score": "0.55438834", "text": "def create_group_cluster_maps(gsm_file,clusters_G_file,roi_mask_file):\n \n import numpy as np\n from CPAC.basc.basc import ndarray_to_vol\n import CPAC.basc.utils as utils\n \n group_stability_mat = np.asarray([np.load(gsm_file)])\n group_stability_set = group_stability_mat[0]\n #nSubjects = indiv_stability_set.shape[0]\n nVoxels = group_stability_set.shape[1]\n clusters_G=np.load(clusters_G_file)\n group_cluster_stability=[]\n cluster_ids = np.unique(clusters_G)\n nClusters = cluster_ids.shape[0]\n print(\"igcm debug 2\")\n\n# cluster_voxel_scores = np.zeros((nSubjects,nClusters, nVoxels))\n# k_mask=np.zeros((nSubjects,nVoxels, nVoxels))\n group_cluster_voxel_scores = np.zeros((nClusters, nVoxels))\n k_mask=np.zeros((nVoxels, nVoxels))\n #for i in range(nSubjects):\n \n group_cluster_voxel_scores[:,:], k_mask[:,:] = utils.cluster_matrix_average(group_stability_set, clusters_G) \n\n\n for i in cluster_ids:\n group_cluster_stability.append(group_cluster_voxel_scores[(i-1),clusters_G==i].mean())\n \n for k in cluster_ids:\n print('k equals \\n\\n', k, '\\n\\n') #Loops through every row of cluster_voxel_scores and creates nifti files\n print('clustervoxelscores equals \\n\\n', group_cluster_voxel_scores[k-1,:], '\\n\\n')\n A, B = ndarray_to_vol(group_cluster_voxel_scores[k-1,:], roi_mask_file, roi_mask_file, 'group_level_cluster%i_stability.nii.gz' % k)\n print('Output A equals', A, '\\n\\n')\n return", "title": "" }, { "docid": "c70cdad49963cfb5dc92e176df3f245b", "score": "0.5540502", "text": "def collect_groups(self):\n groups = defaultdict(list)\n for frame in self.frames:\n groups[frame.group_id].append(frame)\n return groups", "title": "" }, { "docid": "8833fa5ecbfc8be029afd82f53c8006c", "score": "0.55381197", "text": "def get_groups(self) -> List['Group']:\n return self._create_from_nested_json('groups', Group)", "title": "" }, { "docid": "9a1f3cc7c9d1def7d6c24ed4e1d393bd", "score": "0.55361694", "text": "def _list_groups(self):\n groups = []\n group_path_len = len(self._bucket_path.split(\"/\")) - 1\n for obj in self._list_objects():\n rel_obj_path_spl = obj.key.split(\"/\")[group_path_len:]\n if len(rel_obj_path_spl) > 1:\n if rel_obj_path_spl[0] not in groups:\n groups.append(rel_obj_path_spl[0])\n return groups", "title": "" }, { "docid": "6158725ada24a83b27ed10a424967e96", "score": "0.5532379", "text": "def group_by_crange(self, clusters):\n clusters = sorted(clusters, key=lambda c: c.c_range[0])\n\n # list of: (group, union_c_range)\n groups, ranges = [], []\n for c in clusters:\n found = False\n for idx, (group, c_range) in enumerate(zip(groups, ranges)):\n if r_vol(r_intersect(c_range, c.c_range)):\n group.append(c)\n ranges[idx] = r_union(c_range, c.c_range)\n found = True\n break\n if not found:\n groups.append([c])\n ranges.append(list(c.c_range))\n return groups", "title": "" }, { "docid": "a40a8c41cc4073cab614338c4d18bf52", "score": "0.55314386", "text": "def get_all_locales_groups(self):\n all_locales_groups = {}\n for locale in self.get_locales():\n all_locales_groups.update(self.get_locale_groups(locale.locale_id))\n return all_locales_groups", "title": "" }, { "docid": "60f5125fb404c466f93cea915d68d78b", "score": "0.55300903", "text": "def get_customer_groups(self, params: dict = None) -> dict:\n\n return self.__client.call(\n \"GET\",\n Helper.create_request_url_from_params(self.__endpoint, params),\n self.__client.get_basic_headers_for_json()\n )", "title": "" }, { "docid": "70ba2b5ad980359159fda19e153bd461", "score": "0.5523514", "text": "def connection_manager_groups(self):\n response = self._get(\"/api/v2/connection_manager_groups.json\", paginate=False)\n return [ConnectionManagerGroup(client=self, json=obj) for obj in response]", "title": "" }, { "docid": "c707acb3ba23041e8ed5586e765b1a0d", "score": "0.5519813", "text": "def get_groups(self, **kwargs):\n return PaginatedList(\n Group, self._requester, \"GET\", \"group_categories/{}/groups\".format(self.id)\n )", "title": "" } ]
14ce7b744e56af6e0a63445334f0e67e
Internal method to get values of time series values in spring. Part of year aggregator function for gvg method.
[ { "docid": "792bab097e456deb76234d4b9407e6d3", "score": "0.0", "text": "def _get_spring(series: Series, min_n_meas: int) -> float:\n inspring = _in_spring(series)\n if inspring.sum() < min_n_meas:\n return Series(nan)\n else:\n return series.loc[inspring]", "title": "" } ]
[ { "docid": "78faf76c1525b9844ca51e96fbc8b9ac", "score": "0.59957784", "text": "def feb29(ts, dim='doy'):\n #return (ts.where(ts.doy.isin([59,60,61]),drop=True).mean(dim=dim).values)\n return (ts.where(ts.doy.isin([59,61]),drop=True).mean(dim=dim).values)", "title": "" }, { "docid": "eaed26bad8b3ff48fc23ec1756fd5475", "score": "0.5951848", "text": "def getYearlyDataByLatitude(self): \n return self.olrYearAvgsByLatitude", "title": "" }, { "docid": "177baf19b935f603e154be1a729ab6de", "score": "0.5934201", "text": "def get_year(data):", "title": "" }, { "docid": "838ce18132ae156b1bf8f5c012bdcf59", "score": "0.59174585", "text": "def q_gvg(\n series: Series,\n tmin: Optional[TimestampType] = None,\n tmax: Optional[TimestampType] = None,\n by_year: bool = True,\n) -> Series:\n if tmin is not None:\n series = series.loc[tmin:]\n if tmax is not None:\n series = series.loc[:tmax]\n series = series.resample(\"d\").median()\n inspring = _in_spring(series)\n if any(inspring):\n if by_year:\n return series.loc[inspring].resample(\"a\").median().mean()\n else:\n return series.loc[inspring].median()\n else:\n return nan", "title": "" }, { "docid": "0b8a22616e8c8d4d4155ac88e673cbda", "score": "0.5901529", "text": "def calculate_years(self):\n if self.sub_type == 'stock and energy':\n stock_min_year = min(self.stock.raw_values.index.levels[util.position_in_index(self.stock.raw_values, 'year')])\n sales_share_vintages = util.csv_read_table('DemandSales', 'vintage', return_iterable=True, subsector=self.name)\n if len(sales_share_vintages):\n sales_share_min_year = min(sales_share_vintages)\n else:\n sales_share_min_year = 9999\n energy_min_year = min(self.energy_demand.raw_values.index.levels[util.position_in_index(self.energy_demand.raw_values, 'year')])\n min_year = min(cfg.getParamAsInt('current_year'), stock_min_year, sales_share_min_year, energy_min_year)\n elif self.sub_type == 'stock and service':\n stock_min_year = min(self.stock.raw_values.index.levels[util.position_in_index(self.stock.raw_values, 'year')])\n sales_share_vintages = util.csv_read_table('DemandSales', 'vintage', return_iterable=True, subsector=self.name)\n if len(sales_share_vintages):\n sales_share_min_year = min(sales_share_vintages)\n else:\n sales_share_min_year = 9999\n service_min_year = min(self.service_demand.raw_values.index.levels[util.position_in_index(self.service_demand.raw_values, 'year')])\n min_year = min(cfg.getParamAsInt('current_year'), stock_min_year, sales_share_min_year, service_min_year)\n elif self.sub_type == 'service and efficiency':\n service_min_year = min(self.service_demand.raw_values.index.levels[\n util.position_in_index(self.service_demand.raw_values, 'year')])\n service_efficiency_min_year = min(self.service_demand.raw_values.index.levels[\n util.position_in_index(self.service_demand.raw_values, 'year')])\n min_year = min(cfg.getParamAsInt('current_year'), service_min_year, service_efficiency_min_year)\n elif self.sub_type == 'service and energy':\n service_min_year = min(self.service_demand.raw_values.index.levels[\n util.position_in_index(self.service_demand.raw_values, 'year')])\n energy_min_year = min(self.energy_demand.raw_values.index.levels[\n util.position_in_index(self.energy_demand.raw_values, 'year')])\n min_year = min(cfg.getParamAsInt('current_year'), service_min_year,\n energy_min_year)\n elif self.sub_type == 'energy':\n energy_min_year = min(self.energy_demand.raw_values.index.levels[\n util.position_in_index(self.energy_demand.raw_values, 'year')])\n min_year = min(cfg.getParamAsInt('current_year'), energy_min_year)\n\n elif self.sub_type == 'link':\n stock_min_year = min(self.stock.raw_values.index.levels[util.position_in_index(self.stock.raw_values, 'year')])\n sales_share_vintages = util.csv_read_table('DemandSales', 'vintage', return_iterable=True, subsector=self.name)\n if len(sales_share_vintages):\n sales_share_min_year = min(sales_share_vintages)\n else:\n sales_share_min_year = 9999\n min_year = min(cfg.getParamAsInt('current_year'), stock_min_year, sales_share_min_year)\n min_year = max(min_year, cfg.getParamAsInt('demand_start_year'))\n min_year = int(cfg.getParamAsInt('year_step') * round(float(min_year)/cfg.getParamAsInt('year_step')))\n self.years = range(min_year, cfg.getParamAsInt('end_year') + 1, cfg.getParamAsInt('year_step'))\n self.vintages = self.years", "title": "" }, { "docid": "7eb64b8cedaa1dd40a420306c4c4f8c4", "score": "0.58005154", "text": "def setupYearlyData(self):\n numYears = len(self.yearsToMonths.keys())\n \n # create olrByYear as a 3 dimensional array with dimensions of\n # latitude, longitude, and year\n current_times, current_lats, current_longs = self.values.shape\n self.olrByYear = np.zeros((numYears, current_lats, current_longs), np.float32)\n\n for year in self.yearsToMonths.keys():\n year_dict = self.yearsToMonths[year]\n year_index = year_dict[self.yi]\n monthList = year_dict[self.ml]\n monthIndex = year_dict[self.mi]\n print(\"Getting Year Averages for Year = %d\" % year)\n self.getYearAvgs(self.olrByYear[year_index,:], monthList, monthIndex)", "title": "" }, { "docid": "cc4a2120b3b4c9d2a01a67a47165ceb5", "score": "0.57660824", "text": "def getYearlyData(self):\n return self.olrByYear", "title": "" }, { "docid": "76439c8717d24b06c89ecdec829877b4", "score": "0.5679675", "text": "def get_timeseries(self, param_values, shape=None):\n raise NotImplementedError()", "title": "" }, { "docid": "b9428973f186387bbbf924614f996c5d", "score": "0.5647304", "text": "def _gxg(\n series: Series,\n year_agg: Function,\n tmin: Optional[TimestampType],\n tmax: Optional[TimestampType],\n fill_method: str,\n limit: Union[int, None],\n output: str,\n min_n_meas: int,\n min_n_years: int,\n year_offset: str,\n) -> Union[Series, float]:\n # handle tmin and tmax\n if tmin is not None:\n series = series.loc[tmin:]\n if tmax is not None:\n series = series.loc[:tmax]\n if series.empty:\n if output.startswith(\"year\"):\n return Series()\n elif output == \"mean\":\n return nan\n else:\n ValueError(\"{output:} is not a valid output option\".format(output=output))\n\n # resample the series to values at the 14th and 28th of every month\n # first generate a daily series by averaging multiple measurements during the day\n series = series.resample(\"d\").mean()\n select14or28 = True\n if fill_method is None:\n series = series.dropna()\n elif fill_method == \"ffill\":\n series = series.ffill(limit=limit)\n elif fill_method == \"bfill\":\n series = series.bfill(limit=limit)\n elif fill_method == \"nearest\":\n if limit == 0:\n # limit=0 is a trick to only use each measurement once\n # only keep days with measurements\n series = series.dropna()\n # generate an index at the 14th and 28th of every month\n buf = Timedelta(8, \"d\")\n ref_index = date_range(series.index.min() - buf, series.index.max() + buf)\n mask = [(x.day == 14) or (x.day == 28) for x in ref_index]\n ref_index = ref_index[mask]\n # only keep the days that are closest to series.index\n ref_index = get_sample(ref_index, series.index)\n # and set the index of series to this index\n # (and remove rows in series that are not in ref_index)\n series = series.reindex(ref_index, method=fill_method)\n select14or28 = False\n else:\n # with a large limit (larger than 6) it is possible that one measurement\n # is used more than once\n series = series.dropna().reindex(\n series.index, method=fill_method, limit=limit\n )\n else:\n series = series.interpolate(\n method=fill_method, limit=limit, limit_direction=\"both\"\n )\n\n # and select the 14th and 28th of each month (if needed still)\n if select14or28:\n mask = [(x.day == 14) or (x.day == 28) for x in series.index]\n series = series.loc[mask]\n\n # remove NaNs that may have formed in the process above\n series.dropna(inplace=True)\n\n # resample the series to yearly values\n if output == \"semimonthly\":\n return series\n elif output in [\"yearly\", \"mean\"]:\n yearly = series.resample(year_offset).apply(year_agg, min_n_meas=min_n_meas)\n elif output == \"g3\":\n yearly = series.resample(year_offset)\n collect = {}\n for yr, group in yearly:\n s = year_agg(group, min_n_meas=min_n_meas)\n if isinstance(s, Series):\n s = s.sort_index()\n collect[yr] = s\n yearly = concat(collect)\n\n # return statements\n if output.startswith(\"year\"):\n return yearly\n elif output == \"g3\":\n return yearly\n elif output == \"mean\":\n if yearly.notna().sum() < min_n_years:\n return nan\n else:\n return yearly.mean()\n else:\n msg = \"{} is not a valid output option\".format(output)\n raise (ValueError(msg))", "title": "" }, { "docid": "a94aa4ec93d604b6aca44ba52cdf81e5", "score": "0.5645225", "text": "def getLatitudeYearlyAvgs(self):\n if self.olrByYear is None:\n self.setupYearlyData()\n\n # setup the new olrYearAvgsByLatitude array\n numYears = len(self.yearsToMonths.keys())\n numLats = self.latDim.shape[0]\n self.olrYearAvgsByLatitude = np.zeros((numLats, numYears), dtype=np.float64)\n for lat in range(0, numLats, 1):\n print(\"Getting Averages for Latitude = %d\" % self.latDim[lat])\n for yr in range(0, numYears):\n self.olrYearAvgsByLatitude[lat,yr] = self.getLatAvg(lat, yr)", "title": "" }, { "docid": "ded32e993d32c29958afc3f82a716cbd", "score": "0.55802983", "text": "def processYearRange(self, input):\n return input['month'], input['season']", "title": "" }, { "docid": "3d4b535e423e0f3373646a6787817d6b", "score": "0.5530988", "text": "def annual_vols(self):\r\n gby = self.df.groupby('year')\r\n lst = [dfs.ret.std() * np.sqrt(dfs.shape[0])\r\n for key, dfs in gby]\r\n ks = [key for key, dfs in gby]\r\n dfv = pd.DataFrame(lst, columns=['volatility'], index=ks)\r\n dfv.drop(dfv.index[-1], inplace=True)\r\n return dfv", "title": "" }, { "docid": "e2a9317c77d0781f6b606c5a5061176e", "score": "0.54997903", "text": "def TD_calc(station):\n df=dd.ReadfromProcessed(station,'Monthly',sensor='LT')\n df=df.set_index('Datum')\n TD_ls = []\n for i in range(df.index[0].year,df.index[len(df.index)-1].year):\n df_year=df.loc[df.index.year == i]\n TD_year = df_year.LT.max() - df_year.LT.min()\n if pd.isna(TD_year)!= True:\n TD_ls.append(TD_year)\n\n TD = sum(TD_ls) / len(TD_ls)\n return TD", "title": "" }, { "docid": "db67533d755e91d19e0a6a4cf69acece", "score": "0.5486341", "text": "def _get_values(self) -> (coo_matrix, Dict[str, int], pd.Series):\n self.df = build_df('articles', self.start_date, self.end_date)\n self.ground_truth = build_df('ground_truth')\n self.df = clean_data(self.df)\n self.ground_truth = clean_data(self.ground_truth)\n res = transform_data(self.df, self.ground_truth,\n tfidf=self.tfidf, author=self.author,\n tags=self.tags, title=self.title,\n ngram=self.ngram,\n domain_endings=self.domain_endings,\n word_count=self.word_count,\n misspellings=self.misspellings,\n lshash=self.lshash,\n source_count=self.source_count)\n return res", "title": "" }, { "docid": "f66cc099481976031e42c6bff20e0be3", "score": "0.5477213", "text": "def compute_values(self, obs):\n _, values = self.model(obs)\n return values", "title": "" }, { "docid": "766da01f07a0a67aa3ffc49df298a43f", "score": "0.54194635", "text": "def gmv_annual_values(self) -> Tuple[float, float]:\n returns = Rebalance.return_ts(self.gmv_annual_weights, self.assets_ror, period=self.reb_period)\n return (\n Float.annualize_risk(returns.std(), returns.mean()),\n (returns + 1.0).prod() ** (_MONTHS_PER_YEAR / returns.shape[0]) - 1.0,\n )", "title": "" }, { "docid": "439e90431def4db5104d479dd1437e6d", "score": "0.5405197", "text": "def getDataForTimeSeriesAggregation(self):\n raise NotImplementedError", "title": "" }, { "docid": "64767ef0a35b87b6b5e99ddf7458c83b", "score": "0.5384996", "text": "def ttm_years():\n return (2013, 2015, 2018)", "title": "" }, { "docid": "6e1de29adb6297d911b700b677636f37", "score": "0.5349475", "text": "def read_data():\n print(\"NETCDF files being read\")\n whole_dataset = r\"C:\\Users\\krish\\Desktop\\Courses\\Thesis\\data\\downloadtrials\\snow\\complete.nc\"\n dataset = xr.open_dataset(whole_dataset, chunks = {'time':10}) #, 'row':10, 'col':10})\n \n # Narrowing the dataset to 1979-2018 spring-summer values(March-August)\n \n print(\"Raw data being processed for analysis\")\n springs = []\n for year in np.arange(1979,2019):\n beginning = \"%d-03-01\" %(year)\n end = \"%d-08-30\" %(year)\n dataset_spring = dataset.sel(time=slice(beginning,end))\n \n # Dataset snowcover values resampled from Weekly data to Monthly data here\n springs.append(dataset_spring.resample(time='M').mean())\n \n\n\n all_springs = xr.concat(springs, 'time', data_vars = 'minimal')\n \n \n return all_springs", "title": "" }, { "docid": "5c33cabf22d51741cde3c52ce7173b31", "score": "0.534927", "text": "def ts_vals(self):\n ts_vals = np.ndarray((self._nx))\n for i in range(self._nx):\n ts_vals[i] = self._loglikes[i].TS()\n\n return ts_vals", "title": "" }, { "docid": "099ac8996f9f00eadd872ed3c864ed0e", "score": "0.53252923", "text": "def p_value_maps():\n # creating the comparison datasets\n \n # july_c1_dominated:\n year = np.multiply(np.subtract(july_c1_dominated,1979),6)\n indx_may = np.add(2, year)\n indx_june = np.add(3, year)\n indx_july = np.add(4, year)\n indx_august = np.add(5, year)\n May_jc1 = all_springs.snow_cover[indx_may,:,:].values \n June_jc1 = all_springs.snow_cover[indx_june,:,:].values\n July_jc1 = all_springs.snow_cover[indx_july,:,:].values \n August_jc1 = all_springs.snow_cover[indx_august,:,:].values\n \n # july_c3_dominated:\n year = np.multiply(np.subtract(july_c3_dominated,1979),6)\n indx_may = np.add(2, year)\n indx_june = np.add(3, year)\n indx_july = np.add(4, year)\n indx_august = np.add(5, year)\n May_jc3 = all_springs.snow_cover[indx_may,:,:].values \n June_jc3 = all_springs.snow_cover[indx_june,:,:].values\n July_jc3 = all_springs.snow_cover[indx_july,:,:].values \n August_jc3 = all_springs.snow_cover[indx_august,:,:].values\n \n \n # july_neither_dominated:\n year = np.multiply(np.subtract(july_neither_dominated,1979),6)\n indx_may = np.add(2, year)\n indx_june = np.add(3, year)\n indx_july = np.add(4, year)\n indx_august = np.add(5, year)\n May_jn = all_springs.snow_cover[indx_may,:,:].values \n June_jn = all_springs.snow_cover[indx_june,:,:].values\n July_jn = all_springs.snow_cover[indx_july,:,:].values \n August_jn = all_springs.snow_cover[indx_august,:,:].values\n \n # august_c1_dominated:\n year = np.multiply(np.subtract(august_c1_dominated,1979),6)\n indx_may = np.add(2, year)\n indx_june = np.add(3, year)\n indx_july = np.add(4, year)\n indx_august = np.add(5, year)\n May_ac1 = all_springs.snow_cover[indx_may,:,:].values \n June_ac1 = all_springs.snow_cover[indx_june,:,:].values\n July_ac1 = all_springs.snow_cover[indx_july,:,:].values \n August_ac1 = all_springs.snow_cover[indx_august,:,:].values\n \n # august_c3_dominated:\n year = np.multiply(np.subtract(august_c3_dominated,1979),6)\n indx_may = np.add(2, year)\n indx_june = np.add(3, year)\n indx_july = np.add(4, year)\n indx_august = np.add(5, year)\n May_ac3 = all_springs.snow_cover[indx_may,:,:].values \n June_ac3 = all_springs.snow_cover[indx_june,:,:].values\n July_ac3 = all_springs.snow_cover[indx_july,:,:].values \n August_ac3 = all_springs.snow_cover[indx_august,:,:].values\n \n # august_neither_dominated:\n year = np.multiply(np.subtract(august_neither_dominated,1979),6)\n indx_may = np.add(2, year)\n indx_june = np.add(3, year)\n indx_july = np.add(4, year)\n indx_august = np.add(5, year)\n May_an = all_springs.snow_cover[indx_may,:,:].values \n June_an = all_springs.snow_cover[indx_june,:,:].values\n July_an = all_springs.snow_cover[indx_july,:,:].values \n August_an = all_springs.snow_cover[indx_august,:,:].values\n \n # DJ-J vs SJ-J\n May_jc1c3 = run_studenttest1(May_jc1, May_jc3)\n June_jc1c3 = run_studenttest1(June_jc1, June_jc3)\n \n July_jc1c3 = run_studenttest1(July_jc1, July_jc3)\n August_jc1c3 = run_studenttest1(August_jc1, August_jc3)\n \n # SJ-J vs NJ-J\n May_jc3cn = run_studenttest1(May_jc3, May_jn)\n June_jc3cn = run_studenttest1(June_jc3, June_jn)\n \n July_jc3cn = run_studenttest1(July_jc3, July_jn)\n August_jc3cn = run_studenttest1(August_jc3, August_jn)\n \n # NJ-J vs DJ-J \n May_jcnc1 = run_studenttest1(May_jn, May_jc1)\n June_jcnc1 = run_studenttest1(June_jn, June_jc1)\n \n July_jcnc1 = run_studenttest1(July_jn, July_jc1)\n August_jcnc1 = run_studenttest1(August_jn, August_jc1)\n \n \n # DJ-A vs SJ-A\n May_ac1c3 = run_studenttest1(May_ac1, May_ac3)\n June_ac1c3 = run_studenttest1(June_ac1, June_ac3)\n \n July_ac1c3 = run_studenttest1(July_ac1, July_ac3)\n August_ac1c3 = run_studenttest1(August_ac1, August_ac3)\n \n # SJ-A vs NJ-A\n May_ac3cn = run_studenttest1(May_ac3, May_an)\n June_ac3cn = run_studenttest1(June_ac3, June_an)\n \n July_ac3cn = run_studenttest1(July_ac3, July_an)\n August_ac3cn = run_studenttest1(August_ac3, August_an)\n \n # NJ-A vs DJ-A \n May_acnc1 = run_studenttest1(May_an, May_ac1)\n June_acnc1 = run_studenttest1(June_an, June_ac1)\n \n July_acnc1 = run_studenttest1(July_an, July_ac1)\n August_acnc1 = run_studenttest1(August_an, August_ac1)\n \n \n # Plotting parameters\n p_max, p_min, delta = 1, 0, 0.005\n p_n = int(1/0.005)\n colorbar_title = \" \"\n color = 'seismic'\n rnd = 2\n # more Plotting parameters\n title_DJSJ_J = \"P value map from Student t-test (Years with domination of double jet in July vs years with domination of single jet in July)\"\n title_SJNJ_J = \"P value map from Student t-test (Years with domination of single jet in July vs years with domination of neither jet in July)\"\n title_NJDJ_J = \"P value map from Student t-test (Years with domination of neither jet in July vs years with domination of double jet in July)\"\n title_DJSJ_A = \"P value map from Student t-test (Years with domination of double jet in August vs years with domination of single jet in August)\"\n title_SJNJ_A = \"P value map from Student t-test (Years with domination of single jet in August vs years with domination of neither jet in August)\"\n title_NJDJ_A = \"P value map from Student t-test (Years with domination of neither jet in August vs years with domination of double jet in August)\"\n \n subtitles = [\"May\", \"June\", \"July\", \"August\"]\n \n #Plotting\n \n #Comparison of years in July-Domination\n makeCompositeplots([May_jc1c3, June_jc1c3, July_jc1c3, August_jc1c3],title_DJSJ_J,subtitles,colorbar_title,p_min,p_max,p_n,rnd,color,res=8)#,color=\"Blues\")\n makeCompositeplots([May_jc3cn, June_jc3cn, July_jc3cn, August_jc3cn],title_SJNJ_J,subtitles,colorbar_title,p_min,p_max,p_n,rnd,color,res=8)\n makeCompositeplots([May_jcnc1, June_jcnc1, July_jcnc1, August_jcnc1],title_NJDJ_J,subtitles,colorbar_title,p_min,p_max,p_n,rnd,color,res=8)\n \n #Comparison of years in August-Domination\n makeCompositeplots([May_ac1c3, June_ac1c3, July_ac1c3, August_ac1c3],title_DJSJ_A,subtitles,colorbar_title,p_min,p_max,p_n,rnd,color,res=8)\n makeCompositeplots([May_ac3cn, June_ac3cn, July_ac3cn, August_ac3cn],title_SJNJ_A,subtitles,colorbar_title,p_min,p_max,p_n,rnd,color,res=8)\n makeCompositeplots([May_acnc1, June_acnc1, July_acnc1, August_acnc1],title_NJDJ_A,subtitles,colorbar_title,p_min,p_max,p_n,rnd,color,res=8)", "title": "" }, { "docid": "1a73410d6368f565edca6086403eca88", "score": "0.5277202", "text": "def annual_mean(self):\n if self.data.squeeze().ndim != 1:\n raise ValueError(('currently can only calculate annual mean '\n 'for a scalar time series.'))\n df = pd.DataFrame({self.varname: self.data.squeeze()})\n # I'm not using np.nanmean because I don't think CLM should\n # produce any Nans, so I want it to throw an error if it\n # encounters one\n #\n # extract a year, month, day from a numpy datetime64 by\n # (dt64.astype(object).year)\n yr = [t.astype(object).year for t in self.time]\n am = df.groupby(yr).aggregate(np.mean)\n return am", "title": "" }, { "docid": "018243bd5457dd220ea4d81bc5522328", "score": "0.52577007", "text": "def determine_tseries_aggvalue(cru_data, f_month2year, f_summary):\n # we need to summarise the 100+ year information to an aggregate map\n data = cru_data[:]\n # this just determines the average of days NOT YEARS\n n_years = data.shape[0]/12\n # split the array into a list by the number of years to reduce on\n data_list = np.split(data, n_years)\n # reduce the monthly values to an aggregate annual value using F2(X)\n data_years = [f_month2year(rl, axis=0) for rl in data_list]\n # reduce annual values to an aggregated value for the entire time-seires using F1(X)\n data_ts = f_summary(np.ma.dstack(data_years), axis=2)\n\n # return final reduce map to user\n return data_ts", "title": "" }, { "docid": "89cc95eb25d3127171336647286ecd1a", "score": "0.5255538", "text": "def projected_values(slope: float, y_int: float, years: int) -> List[float]:\r\n final_values = []\r\n for num in range(2021, years + 1):\r\n projection = slope * num + y_int\r\n final_values.append(projection)\r\n return final_values", "title": "" }, { "docid": "02e42988d3563120597e59108685f923", "score": "0.5252831", "text": "def get_year_avg (month_avgs, obs_count):\n\tnum_months = month_avgs.shape[2]\n\tyear_avgs = np.zeros(obs_count[:,:,0].shape)\n\n\tfor i in xrange(num_months):\n\t\tyear_avgs += month_avgs[:, :, i] * obs_count[:, :, i]\n\n\tnum_observations = np.sum(obs_count, axis=2)\n\n\t# Prevent divide by zero error\n\tzero_obs = np.zeros_like(num_observations)\n\tzero_obs[num_observations == 0] = 1\n\tyear_avgs /= (num_observations + zero_obs)\n\tyear_avgs[num_observations == 0] = -1000\n\treturn year_avgs, num_observations", "title": "" }, { "docid": "f7d1364624d2479ae9fe06b0e2b01fae", "score": "0.52240115", "text": "def _interpolate_and_sg(self, data_series):\n datestr = list(zip(*data_series))[0]\n valuestr = list(zip(*data_series))[1]\n valuestr = pd.Series(valuestr)\n value_interp = valuestr.interpolate(method=\"linear\", limit_direction=\"both\")\n # MARK: pay attention to limit_direction\n sg_result = savgol_filter(\n value_interp, window_length=self.sg_window, polyorder=self.sg_polyorder\n )\n result = list(zip(datestr, sg_result))\n return result", "title": "" }, { "docid": "9afd9cebd84f25973644d424b51de53a", "score": "0.52228737", "text": "def mean_value(self, tb_start, tb_end, flux_series):\n\n # replace date_series with the resampled version\n date = flux_series.index\n background = flux_series.loc[(date >= tb_start) & (date < tb_end)]\n mean_value = np.nanmean(background)\n sigma = np.nanstd(background)\n\n return [mean_value, sigma]", "title": "" }, { "docid": "1f5f34365bbc32111fae9180b2f89363", "score": "0.52124655", "text": "def estimate_year_data(self, years, frequency):\n data_year = self.price.index.year.unique()\n no_data_year = {pd.Period(year) for year in years} - {pd.Period(year) for year in data_year} # which years do we not have data for\n\n if len(no_data_year) > 0:\n for yr in no_data_year:\n source_year = pd.Period(max(data_year))\n\n source_data = self.price[self.price.index.year == source_year.year] # use source year data\n new_data = Lib.apply_growth(source_data, self.growth, source_year, yr, frequency)\n self.price = pd.concat([self.price, new_data], sort=True) # add to existing", "title": "" }, { "docid": "a6eb6dd363d35c018b5906bad21117a3", "score": "0.5196948", "text": "def get_data():\n pgconn = get_dbconn(\"coop\", user=\"nobody\")\n df = read_sql(\n \"\"\"\n select year, week_ending, num_value, state_alpha from nass_quickstats\n where commodity_desc = 'CORN' and statisticcat_desc = 'PROGRESS'\n and unit_desc = 'PCT SILKING' and\n util_practice_desc = 'ALL UTILIZATION PRACTICES'\n and num_value is not null\n ORDER by state_alpha, week_ending\n \"\"\",\n pgconn,\n index_col=None,\n )\n df[\"week_ending\"] = pd.to_datetime(df[\"week_ending\"])\n data = {}\n for state, gdf in df.groupby(\"state_alpha\"):\n sdf = gdf.copy()\n sdf.set_index(\"week_ending\", inplace=True)\n newdf = sdf.resample(\"D\").interpolate(method=\"linear\")\n y10 = newdf[newdf[\"year\"] > 2007]\n doyavgs = y10.groupby(y10.index.strftime(\"%m%d\")).mean()\n lastdate = pd.Timestamp(newdf.index.values[-1]).to_pydatetime()\n data[state] = {\n \"date\": lastdate,\n \"avg\": doyavgs.at[lastdate.strftime(\"%m%d\"), \"num_value\"],\n \"d2017\": newdf.at[lastdate, \"num_value\"],\n }\n print(\"%s %s\" % (state, data[state]))\n return data", "title": "" }, { "docid": "4f50b05b32561b0c67e03181b633bd35", "score": "0.51891536", "text": "def TD_calc_specific(station,datum):\n datum=datetime.datetime.strptime(datum,'%Y%m%d')\n df=dd.ReadfromProcessed(station,'Monthly',sensor='LT')\n df=df.set_index('Datum')\n df_year=df.loc[df.index.year == datum.year-1]\n TD = df_year.LT.max() - df_year.LT.min()\n return TD", "title": "" }, { "docid": "fe562ce4d91ba546f557ec2b200ba479", "score": "0.5188522", "text": "def annual_sum(self):\n if self.data.squeeze().ndim != 1:\n raise ValueError(('currently can only calculate annual sum '\n 'for a scalar time series.'))\n df = pd.DataFrame({self.varname: self.data.squeeze()})\n # I'm not using np.nanmean because I don't think CLM should\n # produce any Nans, so I want it to throw an error if it\n # encounters one\n #\n # extract a year, month, day from a numpy datetime64 by\n # (dt64.astype(object).year)\n yr = [t.astype(object).year for t in self.time]\n am = df.groupby(yr).aggregate(np.sum)\n return am", "title": "" }, { "docid": "a09f7f3b8584e88b37d98830c5dd0b10", "score": "0.5170459", "text": "def getIncomeAnalysis(ticker):\n\tdfGrossProfitEbit = getFinancialData(ticker, ['GrossProfit', 'EBIT'])\n\tdfNetIncomeRevenue = getFinancialData(ticker, ['TotalRevenue', 'NetIncome'])\n\tif dfGrossProfitEbit is not None and dfNetIncomeRevenue is not None:\n\t\tdf = pd.merge(dfGrossProfitEbit, dfNetIncomeRevenue, 'inner', on='asOfDate')\n\t\tdf['Year'] = pd.DatetimeIndex(df['asOfDate']).year\n\t\tdf = df.drop(['asOfDate'], axis=1)\n\t\treturn df\n\telse:\n\t\treturn None", "title": "" }, { "docid": "953ed3dadd8216acc878dcae6729ca76", "score": "0.5167796", "text": "def daily_avg(soil_moisture_dataframe, year):\n \n sm_daily_avg = soil_moisture_dataframe.set_index('doy')\n sm_daily_avg_year = sm_daily_avg[sm_daily_avg[\"year\"] == year]\n \n return sm_daily_avg_year", "title": "" }, { "docid": "10985dd9d7368dfbca932407a61ce627", "score": "0.5165065", "text": "def annualized_volatility(self) -> Tuple[Tuple[Union[datetime.date, float], ...], ...]:\n return self.__annualized_volatility", "title": "" }, { "docid": "49003caaec3c31fdc01427a02d452eba", "score": "0.516484", "text": "def temporal_currency(gis, df_current, output_features, grid_filter, geom, in_fields):\n try:\n\n out_fl = FeatureLayer(gis=gis,url=output_features)\n out_sdf = out_fl.query(geometry_filter=grid_filter,return_geometry=True,\n return_all_records=True).df\n\n ##---cut stuff above-----\n sq = df_current['SHAPE'].disjoint(geom) == False\n df_current = df_current[sq].copy()\n if len(df_current) > 0:\n dates = df_current[in_fields].tolist()\n count = len(dates)\n date_list_strings = [d for d in dates]\n date_list = [get_datetime(d) for d in dates]\n year_list = [int(x.year) for x in date_list]\n dom_year, dom_year_count = Counter(year_list).most_common()[0]\n dom_date, dom_date_count = Counter(get_datetime_string(date_list)).most_common()[0]\n count_picket_fences = sum(non_std == datetime.datetime(1902,1,1,0,0) for non_std in date_list)\n count_non_std_dates = sum(non_std == datetime.datetime(1901,1,1,0,0) for non_std in date_list) + count_picket_fences\n date_list_minus = [x for x in date_list if (x != datetime.datetime(1901,1,1,0,0) and x != datetime.datetime(1902,1,1,0,0))]\n if len(date_list_minus)>0:\n if dom_date == '1902-1-1' or dom_date == '1902-01-01':\n dom_date = non_std_date\n dom_year = non_std_year\n sccore = 6\n oldest = min(get_datetime_string(date_list_minus))\n newest = max(get_datetime_string(date_list_minus))\n change_list = [diff_date(dd) for dd in date_list_minus]\n count_2year = sum(x <= 2 for x in change_list)\n count_5year = sum((x <= 5 and x > 2) for x in change_list)\n count_10year = sum((x <= 10 and x > 5) for x in change_list)\n count_15year = sum((x <= 15 and x > 10) for x in change_list)\n count_15year_plus = sum(x >= 15 for x in change_list)\n elif dom_date == '1901-1-1' or dom_date == '1901-01-01':\n dom_date = 'NoInformation'\n dom_year = 0\n score = 6\n oldest = min(get_datetime_string(date_list_minus))\n newest = max(get_datetime_string(date_list_minus))\n change_list = [diff_date(dd) for dd in date_list_minus]\n count_2year = sum(x <= 2 for x in change_list)\n count_5year = sum((x <= 5 and x > 2) for x in change_list)\n count_10year = sum((x <= 10 and x > 5) for x in change_list)\n count_15year = sum((x <= 15 and x > 10) for x in change_list)\n count_15year_plus = sum(x >= 15 for x in change_list)\n else:\n dom_date = dom_date\n dom_year = dom_year\n oldest = min(get_datetime_string(date_list_minus))\n newest = max(get_datetime_string(date_list_minus))\n change_list = [diff_date(dd) for dd in date_list_minus]\n count_2year = sum(x <= 2 for x in change_list)\n count_5year = sum((x <= 5 and x > 2) for x in change_list)\n count_10year = sum((x <= 10 and x > 5) for x in change_list)\n count_15year = sum((x <= 15 and x > 10) for x in change_list)\n count_15year_plus = sum(x >= 15 for x in change_list)\n score = get_currency_score(dom_year)\n else:\n if dom_date == '1902-01-01':\n dom_date = non_std_date\n dom_year = non_std_year\n oldest = non_std_date\n newest = non_std_date\n change_list = 0\n count_2year = 0\n count_5year = 0\n count_10year = 0\n count_15year = 0\n count_15year_plus = 0\n score = 6\n else:\n dom_date = 'NoInformation'\n dom_year = 0\n oldest = 'NoInformation'\n newest = 'NoInformation'\n change_list = 0\n count_2year = 0\n count_5year = 0\n count_10year = 0\n count_15year = 0\n count_15year_plus = 0\n score = 6\n\n out_sdf[FIELDS[0]][0]=dom_date\n out_sdf[FIELDS[1]][0]=dom_date_count\n out_sdf[FIELDS[2]][0]=round(dom_date_count * 100.0 / count,1)\n out_sdf[FIELDS[3]][0]=dom_year\n out_sdf[FIELDS[4]][0]=dom_year_count\n out_sdf[FIELDS[5]][0]=round(dom_year_count * 100.0 / count,1)\n out_sdf[FIELDS[6]][0]=oldest\n out_sdf[FIELDS[7]][0]=newest\n out_sdf[FIELDS[8]][0]=count_non_std_dates\n out_sdf[FIELDS[9]][0]=round(float(count_non_std_dates) * 100.0 / count,1)\n out_sdf[FIELDS[10]][0]=round(float(count_2year) * 100.0 / count,1)\n out_sdf[FIELDS[11]][0]=round(float(count_5year) * 100.0 / count,1)\n out_sdf[FIELDS[12]][0]=round(float(count_10year) * 100.0 / count,1)\n out_sdf[FIELDS[13]][0]=round(float(count_15year) * 100.0 / count,1)\n out_sdf[FIELDS[14]][0]=round(float(count_15year_plus) * 100.0 / count,1)\n out_sdf[FIELDS[15]][0]=int(count)\n out_sdf[FIELDS[16]][0]=int(score)\n\n else:\n out_sdf[FIELDS[0]][0]=\"None\"\n out_sdf[FIELDS[1]][0]=0\n out_sdf[FIELDS[2]][0]=0\n out_sdf[FIELDS[3]][0]=0\n out_sdf[FIELDS[4]][0]=0\n out_sdf[FIELDS[5]][0]=0\n out_sdf[FIELDS[6]][0]=\"None\"\n out_sdf[FIELDS[7]][0]=\"None\"\n out_sdf[FIELDS[8]][0]=0\n out_sdf[FIELDS[9]][0]=0\n out_sdf[FIELDS[10]][0]=0\n out_sdf[FIELDS[11]][0]=0\n out_sdf[FIELDS[12]][0]=0\n out_sdf[FIELDS[13]][0]=0\n out_sdf[FIELDS[14]][0]=0\n out_sdf[FIELDS[15]][0]=0\n out_sdf[FIELDS[16]][0]=0\n\n return out_sdf, out_fl\n\n## out_sdf_as_featureset = out_sdf.to_featureset()\n## print(out_sdf_as_featureset)\n## out_fl.edit_features(updates=out_sdf_as_featureset)\n##\n## del df_current\n## del ext\n## del geom\n\n except FunctionError as f_e:\n messages = f_e.args[0]\n## arcpy.AddError(\"error in function: %s\" % messages[\"function\"])\n## arcpy.AddError(\"error on line: %s\" % messages[\"line\"])\n## arcpy.AddError(\"error in file name: %s\" % messages[\"filename\"])\n## arcpy.AddError(\"with error message: %s\" % messages[\"synerror\"])\n## arcpy.AddError(\"ArcPy Error Message: %s\" % messages[\"arc\"])\n except:\n line, filename, synerror = trace()", "title": "" }, { "docid": "3e6679e480327cefe7fcdb89407b22a8", "score": "0.5163169", "text": "def get_years(root, source=web_scraper, session=None):\n for x in source([root], \"year\", session):\n yield x", "title": "" }, { "docid": "892f92bce77af3d7fa6422e7c6ad9fb4", "score": "0.5151816", "text": "def get_shares(country, year, square=2, name='s_', store=yearly):\n\n year1 = 'y' + str(year) + '_'\n year0 = 'y' + str(year - 1) + '_'\n iyear1 = int(str(year) + '52')\n iyear0 = int(str(year - 1) + '52')\n\n df1 = yearly[year1 + country]['VALUE_1000ECU'][yearly[year1 + country]['STAT_REGIME'] == 4].ix[1]\n df0 = yearly[year0 + country]['VALUE_1000ECU'][yearly[year0 + country]['STAT_REGIME'] == 4].ix[1]\n\n gr1 = df1.groupby(axis=0, level='PRODUCT_NC')\n gr0 = df0.groupby(axis=0, level='PRODUCT_NC')\n\n l1 = []\n drop1 = []\n for product in gr1.groups.keys():\n try:\n l1.append((iyear1, product, ref_dict[product]))\n except KeyError:\n drop1.append(product)\n\n l0 = []\n drop0 = []\n for product in gr0.groups.keys():\n try:\n l0.append((iyear0, product, ref_dict[product]))\n except KeyError:\n drop0.append(product)\n\n # Check if return is actually what you want to do.\n return pd.DataFrame(\n np.log(df1 / gr1.sum().reindex(df1.index, level='PRODUCT_NC')).ix[iyear1] - (\n np.log(df0 / gr0.sum().reindex(df0.index, level='PRODUCT_NC')).ix[iyear0]) - (\n np.log(df1.ix[l1].ix[iyear1].reset_index(level='PARTNER')['VALUE_1000ECU'].reindex(df1.index, level='PRODUCT_NC').ix[iyear1] / gr1.sum().reindex(df1.index, level='PRODUCT_NC').ix[iyear1]) - (\n np.log(df0.ix[l0].ix[iyear0].reset_index(level='PARTNER')['VALUE_1000ECU'].reindex(df0.index, level='PRODUCT_NC').ix[iyear0] / gr0.sum().reindex(df0.index, level='PRODUCT_NC').ix[iyear0])\n )\n ), columns=[name + str(year)]\n ) ** square", "title": "" }, { "docid": "db9a4143f7c42922c1bf726efc3fb618", "score": "0.5141822", "text": "def test_aggregate_fortnightly_gross_pay(self):\n _, funct = MWSSTransformer.ops()[\"50\"]\n return_value = funct(\"50\", {\"50f\": \"1600\"}, 0)\n self.assertEqual(800, return_value)\n return_value = funct(\"50\", {\"50\": \"19200\", \"50f\": \"1600\"}, 0)\n self.assertEqual(20000, return_value)\n return_value = funct(\"50\", {\"50\": \"19200.5\"}, 0)\n self.assertEqual(19201, return_value)\n return_value = funct(\"50\", {\"50\": \"19200.5\", \"50f\": \"1600.5\"}, 0)\n self.assertEqual(20001, return_value)\n return_value = funct(\"50\", {\"50\": \"19200.49\"}, 0)\n self.assertEqual(19200, return_value)\n return_value = funct(\"50\", {\"50\": \"19200.02\", \"50f\": \"1600.02\"}, 0)\n self.assertEqual(20000, return_value)", "title": "" }, { "docid": "0e6ad241c27c2ba958782429fc0acce2", "score": "0.513551", "text": "def x(self, dt, models):\n data = {}\n db = dbio.connect(models.dbname)\n cur = db.cursor()\n for s in self.statevar:\n sql = \"select ensemble,st_x(geom),st_y(geom),val from (select ensemble,(ST_PixelAsCentroids(rast)).* from {0}.{1} where fdate=date '{2}-{3}-{4}') foo group by ensemble,geom order by ensemble\".format(\n models.name, s, dt.year, dt.month, dt.day)\n cur.execute(sql)\n e, lon, lat, vals = zip(*cur.fetchall())\n gid = [models[0].lgid[(l[0], l[1])] for l in zip(lat, lon)]\n nens = max(e)\n data[s] = np.array(vals).reshape((len(vals) / nens, nens))\n lat = np.array(lat).reshape((len(lat) / nens, nens))\n lon = np.array(lon).reshape((len(lon) / nens, nens))\n gid = np.array(gid).reshape((len(gid) / nens, nens))\n cur.close()\n db.close()\n return data, lat, lon, gid", "title": "" }, { "docid": "5bc4a1edb502df985a86a768a02733c7", "score": "0.5131472", "text": "def gen(self) :\n # stdv with dof accounting\n stdn = partial(np.std, ddof=1)\n\n # daily vol: \n p = self.param['daily_vol']\n dv = self.d.daily_vol(daily_open=p['open_hour'], daily_close=p['close_hour'])\n dv5 = strat_utils.rolling_window(dv, p['ma_days1'], np.mean)\n dvz = (dv5 - strat_utils.rolling_window(dv, p['ma_days2'], np.mean))/strat_utils.rolling_window(dv,p['ma_days2'], stdn)*2\n\n # daily ci:\n p = self.param['daily_ci']\n ci = self.d.daily_ci(p['roll_days'], daily_open=p['open_hour'], daily_close=p['close_hour'], settle=p['settle_hour'])\n ciz = (strat_utils.rolling_window(ci, p['ma_days1'],np.mean) - 50)/strat_utils.rolling_window(ci,p['ma_days2'],stdn)\n\n # signal_tf\n signal_tf = (-np.sign(np.clip(dvz,-1,0))*np.sign(np.clip(ciz,0,1))).astype(int)\n import scipy.stats\n pos_wt = scipy.stats.norm.cdf(np.abs(dvz)+np.abs(ciz)-2) * signal_tf\n\n # daily high/low/close/vol\n d_h, d_l, d_c = self.d.daily_hlc(hl_open='liquid_open', hl_close='liquid_close',close='daily_close')\n day = self.d.get_latest_day()\n\n # debug\n #if day == '20220404' :\n # import pdb\n # pdb.set_trace()\n\n return day, dvz, ciz, signal_tf, pos_wt, d_h, d_l, d_c, dv5, dv, ci", "title": "" }, { "docid": "c9b427f6f00d91d833a82156c470fc0d", "score": "0.51266074", "text": "def annualized_vol(returns,periods_per_year):\n return returns.std()*(periods_per_year**0.5)", "title": "" }, { "docid": "0740be13d9d17f848f5f8fe316cbf445", "score": "0.5125132", "text": "def get_nsigma_y(fp_data_H,start_date, end_date, sites, \n nmeasure, sigma_values, bl_period=10, bl_split=False,\n levels=None): \n nsites = len(sites) \n d0=pandas.to_datetime(start_date)\n d1=pandas.to_datetime(end_date)\n delta = d1 - d0\n ndays = delta.days\n \n y_bl=np.zeros((nmeasure))\n \n nsigma=0\n nsigma_max = np.int(np.ceil(ndays/np.float(bl_period)))\n ntime_stn=np.zeros((nsites))\n if levels is not None:\n ngroups=len(levels)-1\n \n ydim1=0\n sigma_models=[]\n \n for si in range(nsites):\n fp_data_H3 = fp_data_H[sites[si]].dropna(\"time\", how=\"all\") \n nsigma_stn=0\n \n mf_time_temp=fp_data_H3.time.values\n mf_time_temp2=pandas.to_datetime(mf_time_temp)\n pblh_temp=fp_data_H3.PBLH.values\n #mf_mod_temp=fp_data_H3.mf_mod.values\n ntime_stn[si]=len(mf_time_temp)\n \n bl_start=d0\n \n if bl_split is True:\n mf_mod_temp=fp_data_H3.mf_mod.values\n for ti in range(ngroups):\n \n wh = np.where(np.logical_and(pblh_temp>=levels[ti],\n pblh_temp<levels[ti+1]))\n \n if len(wh[0]) > 0:\n y_bl[wh+np.sum(ntime_stn[:si],dtype=np.uint16)]=nsigma_stn+nsigma\n \n #sigma_models.append(sigma_values[ti])\n if levels[ti]<499:\n #if levels[ti]>499:\n sigma_models.append(sigma_values)\n else:\n if len(wh[0]) > 1:\n sigma_models.append(np.std(mf_mod_temp[wh[0]]))\n else: \n sigma_models.append(20.)\n nsigma_stn+=1\n \n n_obs = len(wh[0])\n if n_obs > ydim1:\n ydim1 = n_obs*1\n \n \n nsigma+=nsigma_stn\n \n else:\n for ti in range(nsigma_max):\n bl_end=bl_start+dt.timedelta(days=bl_period)\n \n wh=np.where(np.logical_and(mf_time_temp2>=bl_start,\n mf_time_temp2<bl_end))\n # \n if len(wh[0]) > 0:\n y_bl[wh+np.sum(ntime_stn[:si],dtype=np.uint16)]=nsigma_stn+nsigma\n sigma_models.append(sigma_values)\n nsigma_stn+=1\n \n bl_start=bl_start+dt.timedelta(days=bl_period)\n n_obs = len(wh[0])\n if n_obs > ydim1:\n ydim1 = n_obs*1\n \n \n nsigma+=nsigma_stn\n \n # INDEX R\n R_indices = np.zeros((ydim1,nsigma), dtype=np.uint16)\n for ii in range(nsigma): \n wh_bl=np.where(y_bl == ii)\n nwh=len(wh_bl[0])\n R_indices[:nwh,ii]=wh_bl[0]+1\n if nwh < ydim1:\n R_indices[nwh:,ii]=np.max(wh_bl)+1\n \n ydim2=nsigma*1\n \n return R_indices, ydim1, ydim2, np.asarray(sigma_models)", "title": "" }, { "docid": "0dbeb111795472737082445e80aad442", "score": "0.51233625", "text": "def calc_emissions(ds, y0=2001, y1=2020):\n years = xr.DataArray(np.arange(y0, y1 + 1), dims=(\"year\",), name=\"year\")\n tree_loss = xr.concat(\n [xr.where((ds[\"lossyear\"] == year), 1.0, 0.0) for year in years], dim=years\n )\n\n return ds[\"agb\"] * tree_loss * TC_PER_TBM * TC02_PER_TC", "title": "" }, { "docid": "8409f276a77d76f5c103cd25c8a81636", "score": "0.51180017", "text": "def g_values(self):\n return np.array( [ self.genes[i].get_g_value() for i in range(0, self.size()) ] )", "title": "" }, { "docid": "14bd969def4e1e6621cfa6f3a98d749c", "score": "0.51074475", "text": "def getPlotSpectralsByYear(year, lng, lat):\r\n values = {}\r\n try:\r\n # timeseries = getTsTimeSeriesForPoint((float(lng), float(lat)))\r\n timeseries = getTsTimeSeriesForPointByYear(\r\n (float(lng), float(lat)), int(year))\r\n values = {\r\n 'timeseries': timeseries\r\n }\r\n return jsonify(values), 200\r\n except GEEException as e:\r\n logger.error(str(e))\r\n values = {\r\n 'errMsg': str(e)\r\n }\r\n return jsonify(values), 500", "title": "" }, { "docid": "aac19902ffab97f3cc8ea6acd42e41e0", "score": "0.5098381", "text": "def evaluate(self, ts):\n ts = np.asarray(ts)\n phases = PI2 * self.freq * ts + self.offset\n ys = self.amp * self.func(phases)\n return ys", "title": "" }, { "docid": "185368c9be581c46b2ab6af739790882", "score": "0.5089585", "text": "def evaluate(self, ts):\n ts = np.asarray(ts)\n phases = PI2 * self.freq * ts + self.offset\n ys = self.amp * np.exp(1j * phases)\n return ys", "title": "" }, { "docid": "b3adee1a489600990fc80db38aa22b36", "score": "0.5085703", "text": "def get_SE(frst_last, years=None):\n\n # separate the first and last observations and turn into numpy arrays\n\n frstObs, lastObs = zip(* frst_last )\n frstObs = np.array(frstObs)\n lastObs = np.array(lastObs)\n\n # sort out the range of years for which we create the timeseries\n\n if years is None: # use every year in the range\n\n y0 = min(frstObs); yf = max(lastObs);\n assert(y0 <= yf) # check we received a sensible input\n years = list(range(y0, yf+1)) # y0, y0+1, ..., yf inclusive\n\n # calculate S and E for each year in timeseries\n\n S = np.array( [ sum( (frstObs <= t) & (lastObs >= t) ) for t in years ] )\n E = np.array( [ sum( lastObs < t ) for t in years ] )\n\n return years, S, E", "title": "" }, { "docid": "16427f9b85b6f03d577fe94b8c9d42e8", "score": "0.50853354", "text": "def calc_period_hyperparameters(series, set_global = False):\n \n \n def calc_period(omega):\n return 2*np.pi/omega\n \n \n def calc_stability(y_act, y_beta):\n y_act = argrelextrema(y_act, np.greater)[0]\n y_beta = argrelextrema(y_beta, np.greater)[0]\n displacements = [min([abs(a-x) for a in y_act ]) for x in y_beta]\n return displacements\n \n \n series = series.fillna(method='ffill').fillna(0)\n x = self.augmented_frame.index.values\n alpha, alpha_props = fit_sin(series)\n y = normalize_series(series, alpha)\n beta, beta_props = fit_sin(y)\n\n y_beta = np.array([2 * beta(a) for a in x])\n y_ = savgol_filter(y, 21,3)\n if set_global:\n self.raw_walk_data = y_\n self.idealised_walk_data = y_beta\n \n dis = calc_stability(y.values, np.array(y_beta))\n per = calc_period(beta_props[1])\n # plt.plot(x,y)\n # plt.plot(x, y_beta)\n # plt.show()\n # plt.hist(dis)\n return {\"Stability\": dis, \"period\": per}", "title": "" }, { "docid": "5209b92995f3f325c19650b685dd6f1a", "score": "0.5080335", "text": "def get_timeseries(sww_filename,gauges):\n\n gauge_locations = gauges.values()\n gauge_names = gauges.keys()\n\n #tempfile = 'xyz1234tempfile.sww' # Has to end with sww\n\n #os.system('cp %s %s' % (sww_filename, tempfile))\n\n f = file_function(sww_filename, \n quantities='stage',\n interpolation_points=gauge_locations,\n use_cache=True,\n verbose=True)\n\n timevector = f.get_time()\n\n timeseries = {}\n for k, name in enumerate(gauge_names):\n model = timeseries[name] = []\n \n for t in timevector:\n model.append(f(t, point_id=k)[0])\n\n\n return num.array(timevector), timeseries", "title": "" }, { "docid": "d3289c5f5cfb0fea3d18c569a4729fdb", "score": "0.5077995", "text": "def getYearAvgs(self, oBY, month_list, monthIndex):\n lats = self.getLats()\n longs = self.getLongs()\n for lat in range(0, oBY.shape[0]):\n for long in range(0, oBY.shape[1], 1):\n #\n # monthData is a 1 dimensional slice of the array containing\n # OLR averages for each month for the given (lat,long)\n # coordinate.\n #\n monthData = self.values[:,lat,long]\n oBY[lat,long] = np.average(monthData[monthIndex:(monthIndex + len(month_list))])", "title": "" }, { "docid": "579f080f065809ac0741415184dea835", "score": "0.5077188", "text": "def average(self,field,year):\n\t\tary = self.generate_array_stats(field,year)\n\t\treturn np.average(ary)", "title": "" }, { "docid": "bcecf892a87b0a7f7b14cf1962f3daf5", "score": "0.5070637", "text": "def get_means_over_months_for_each_year(times, streamflow, months = range(1,13)):\n start_year = times[0].year\n end_year = times[-1].year\n\n\n assert len(times) == streamflow.shape[0]\n\n\n result = {}\n select_vector = None\n for the_year in xrange(start_year, end_year + 1):\n select_vector = map( lambda t: (t.month in months) and (t.year == the_year), times )\n indices = np.where(select_vector)[0]\n result[the_year] = np.mean(streamflow[indices, :], axis=0)\n print \"select_vector length = \", sum(map(int, select_vector))\n return result", "title": "" }, { "docid": "5818536828a1302ef7bfc016b9670f33", "score": "0.5066902", "text": "def multiyear_averages(\n cls, ds: xr.Dataset, month_periods: List[Tuple[int, int]], time_var: str = \"time\", time_dim: str = \"t_dim\"\n ) -> xr.Dataset:\n\n time_dim_da = ds[f\"{time_dim}\"]\n time_var_da = ds[f\"{time_var}\"]\n\n new_ds = ds\n # If time dimension isn't np.datetime64 but time variable is, then swap time variable to be the dimension.\n # There should be a 1 to 1 mapping between time dimension values and time variable values.\n # A datetime type is required for slicing dates over a dimension using xarray's sel() method.\n if not np.issubdtype(time_dim_da.dtype, np.datetime64) and np.issubdtype(time_var_da.dtype, np.datetime64):\n warn(\"Time dimension is not np.datatime64 but time variable is. Swapping time dimension for data variable.\")\n # Swap time_var with time_dim.\n new_ds = ds.swap_dims({f\"{time_dim}\": f\"{time_var}\"})\n time_dim = time_var\n elif not np.issubdtype(time_dim_da.dtype, np.datetime64) and not np.issubdtype(\n time_var_da.dtype, np.datetime64\n ):\n # Slicing will most likely fail for non np.datetime64 datatypes.\n warn(\"Neither time dimension or time variable data are np.datetime64. Time slicing may fail.\")\n\n # Get years of data.\n data_years = list(new_ds[f\"{time_dim}.year\"].data)\n # Append first year - 1, to account for possible WINTER month data of year prior to data beginning.\n data_years.insert(0, data_years[0] - 1)\n\n # Generate date ranges from years and given month periods.\n date_ranges = Climatology._get_date_ranges(data_years, month_periods)\n\n # Extract data from dataset between these date ranges and index each range with a common multi-index.\n datasets = []\n year_index = []\n month_index = []\n for date_range in date_ranges:\n sel_args = {f\"{time_dim}\": slice(date_range[0], date_range[1])}\n filtered = new_ds.sel(**sel_args)\n datasets.append(filtered)\n year_index = year_index + ([date_range[0].year] * filtered.sizes[time_dim])\n month_label = f\"{calendar.month_abbr[date_range[0].month]}-{calendar.month_abbr[date_range[1].month]}\"\n month_index = month_index + ([month_label] * filtered.sizes[time_dim])\n\n # New dataset built from extracted data between date ranges.\n filtered = xr.concat(datasets, dim=time_dim)\n # Data from same date range use common year-period multi-index so they can be grouped together.\n period_idx = pd.MultiIndex.from_arrays([year_index, month_index], names=(\"year\", \"period\"))\n filtered.coords[\"year_period\"] = (f\"{time_dim}\", period_idx)\n\n # For each data variable, group on year-period multi-index and find the mean.\n # New dataset containing means across date ranges is returned.\n ds_mean = xr.Dataset()\n for var_name, da in filtered.data_vars.items():\n try:\n # Apply .mean() to grouped data.\n # skipna flag used to ignore NaN values.\n da_mean = da.groupby(\"year_period\").mean(dim=time_dim, skipna=True)\n ds_mean[f\"{var_name}\"] = da_mean\n except ArithmeticError:\n warn(f\"Skipped mean calculation for {var_name} due to error: {traceback.format_exc()}\")\n return ds_mean", "title": "" }, { "docid": "1eae007a42810ed685a86b2521ea2290", "score": "0.5065865", "text": "def journey_data_years(all_y = False):\n \n if not (all_y):\n return (2012, 2014, 2015, 2016)\n else:\n return (2007, 2009, 2010, 2012, 2014, 2015, 2016)", "title": "" }, { "docid": "ce54e08d33dfda5fb05f85b68ca3b2ed", "score": "0.50636846", "text": "def daily_return(self,tick=None):\r\n if tick == None:\r\n total = self.total\r\n else:\r\n total = self.equities.loc[(tick,slice(None)),'close'].droplevel(0)\r\n daily_rtn = total/total.shift(1)-1\r\n daily_rtn[0] = 0\r\n return np.array(daily_rtn)", "title": "" }, { "docid": "93d6a0c1e9af70ec9adacf0d505f8c51", "score": "0.5060957", "text": "def gen_dates(baseyear=1999):\n mjd1=date_fun.date2mjd(baseyear,1,1,12,0,0)\n if is_leap(baseyear):\n mjds=np.arange(mjd1,mjd1+366)\n else:\n mjds=np.arange(mjd1,mjd1+365)\n dates=date_fun.mjd2date(mjds)\n return dates", "title": "" }, { "docid": "477c1eb2b284c516aa6d613b82c67712", "score": "0.50450563", "text": "def _get_yearly_average_maps(self, divisions_per_year=12):\r\n year_maps = []\r\n for i in range(0, len(self.map), divisions_per_year):\r\n months = self.map[i:i+divisions_per_year]\r\n year_maps.append(np.mean(months, axis=0))\r\n return np.asarray(year_maps)", "title": "" }, { "docid": "4f8070b01916ac6765e334338a04e947", "score": "0.50409573", "text": "def daily_avg_all_years(soil_moisture_dataframe):\n \n sm_daily_avg_all_years = soil_moisture_dataframe.set_index('doy')\n\n sm_year_daily_all_years = sm_daily_avg_all_years.groupby(\n [\"doy\"])[[\"sm_5cm\", \"sm_10cm\", \"sm_20cm\", \"sm_50cm\", \"sm_100cm\"]].mean()\n \n return sm_year_daily_all_years", "title": "" }, { "docid": "c1af9aca983815cd2b76d074af7eca6d", "score": "0.504026", "text": "def _compute_daily_averages(source_data: pd.DataFrame) -> pd.DataFrame():\n cols = [\n 'date',\n 'totalPrecipMM', \n 'minTemp', \n 'maxTemp', \n 'meanTemp', \n 'totalSnowCM'\n ]\n df = source_data[cols].set_index('date').astype(float).resample('1D').mean()\n return df", "title": "" }, { "docid": "2976e65b78ffae0d3a9f38db6ae39840", "score": "0.5034246", "text": "def get_monthly_avgs(weather):\n log('get_monthly_avgs')\n\n month_avgs = AccumDict()\n\n weather.sort(key=get_station_year)\n groups = itertools.groupby(weather, get_station_year)\n for ((station, wban, year), obs_iter) in groups:\n key = (station, wban)\n avg = mean([o.max_temp for o in obs_iter])\n month_avgs[key].append((year, avg))\n\n dump('01-get-monthly-avgs', ('station', 'wban', 'year', 'avgs'),\n iter_monthly_avgs(month_avgs))\n return month_avgs", "title": "" }, { "docid": "f70f2b2dd3541deb74a1323d2fb8f138", "score": "0.5032061", "text": "def mean(data,nyears=9,minval=-999,maxval=1e5):\n return(data.sum(axis=0)/nyears)", "title": "" }, { "docid": "754ff131897f65dc07c6b21f047a93c1", "score": "0.5027298", "text": "def get_year_anomaly_map_smooth(self, year):\r\n if not self._initialized:\r\n print(\"Class not initialized! Initialize class by calling\",\r\n \"initialize() or initialize_SST_from_RCP()!\")\r\n return self.smooth_map[year] - self.time_mean", "title": "" }, { "docid": "5fe07a4fa50409506e8fa85894bc51a9", "score": "0.5023009", "text": "def ts_transform(series: TimeSeries) -> TimeSeries:\n pass", "title": "" }, { "docid": "4cc9f256921d09e629bac9667d594f62", "score": "0.50153476", "text": "def get_value(self):\n return self.obs_val", "title": "" }, { "docid": "681f8bbb069a3802efbfd7b26ac7b290", "score": "0.501091", "text": "def getExtremes(timeSeries, percentile):\n years = timeSeries['year'].unique()\n extremes = pd.DataFrame(columns = ['year','meanPercObs', 'stdPercObs', \n 'meanPercRecon', 'stdPercRecon'])\n for ii in range(0, len(years)):\n currentYear = timeSeries[timeSeries['year'] == years[ii]]\n currentExtremes = currentYear[currentYear['surge'] >= currentYear['surge'].quantile(0.01*percentile)]\n currentData = pd.DataFrame([years[ii], currentExtremes['surge'].mean(),\n currentExtremes['surge'].std(),\n currentExtremes['surge_reconsturcted'].mean(),\n currentExtremes['surge_reconsturcted'].std()]).T\n currentData.columns = ['year','meanPercObs', 'stdPercObs', \n 'meanPercRecon', 'stdPercRecon']\n extremes = pd.concat([extremes, currentData], axis = 0)\n return extremes", "title": "" }, { "docid": "7692320a358748f911b1f95023968e04", "score": "0.5004184", "text": "def calc_annual_electric_savings (self):\n raise NotImplementedError, \"should be implemented by child class to\" +\\\n \" create self.annual_electric_savings as an np.array, length\" +\\\n \" self.project_life, of dollar values(numbers)\"", "title": "" }, { "docid": "7e3b92f8490a560e67818a250bce6e40", "score": "0.5002976", "text": "def get_combined_flow():\r\n\r\n dfs = []\r\n for year in range(2016, 2021):\r\n dfs.append(get_yearly_flow(year=year))\r\n flow_df = pd.concat(dfs)\r\n flow_df = flow_df.asfreq('H').interpolate()\r\n return flow_df", "title": "" }, { "docid": "94ed01ca863aee4ef258b5152e9b54f3", "score": "0.49991763", "text": "def get_solar_gains_per_year(window_area):\n return sum(\n external_shading\n * frame_area_fraction\n * non_perpendicular\n * 0.25\n * window_area\n * solar_global_radiation\n )", "title": "" }, { "docid": "7e19f49938a201ecc3c6692eb09908d9", "score": "0.49982917", "text": "def yearly_avg_sm(soil_moisture_dataframe):\n \n empty_dataframe = pd.DataFrame()\n\n years_list = years_list_all\n \n column_names = ['sm_5cm', 'sm_10cm', 'sm_20cm', 'sm_50cm', 'sm_100cm']\n\n for year in years_list:\n\n sm_year = soil_moisture_dataframe[soil_moisture_dataframe[\"year\"] == year]\n\n # Ensure that there is enough data for each year before mean is calculated\n if sm_year['day'].size > 275:\n \n # Missing/NaN data cleaning\n yearly_nan_analysis(sm_year)\n \n sm_year_mean = sm_year.groupby(\n [\"year\"])[[\"sm_5cm\", \"sm_10cm\", \n \"sm_20cm\", \"sm_50cm\", \n \"sm_100cm\"]].mean()\n\n empty_dataframe = empty_dataframe.append(sm_year_mean)\n\n else:\n data = {'sm_5cm':[np.nan], 'sm_10cm':[np.nan], 'sm_20cm':[np.nan], 'sm_50cm':[np.nan], 'sm_100cm':[np.nan]}\n filler_nan_df = pd.DataFrame(data, index=[year])\n empty_dataframe = empty_dataframe.append(filler_nan_df)\n print(year,': This year did not contain enough data and was set as NaN')\n \n return empty_dataframe", "title": "" }, { "docid": "f193ba94847b82851dbfc332a2bdcd80", "score": "0.49848348", "text": "def get_year(h5,songidx=0):\n return h5.root.musicbrainz.songs.cols.year[songidx]", "title": "" }, { "docid": "a26fed07a54a0852ff09c1abd98e5e07", "score": "0.4983865", "text": "def years(df):\n return df['dates'].apply(lambda x: (x.timetuple()).tm_year).unique()", "title": "" }, { "docid": "107d22dda2b89468267f994c150d1518", "score": "0.49784207", "text": "def annual_only_function(decyear, fit_params):\n model_def = [];\n w = 2 * np.pi / 1.0;\n for t in decyear:\n model_def.append((fit_params[0] * np.cos(w * t)) + (fit_params[1] * np.sin(w * t)));\n return model_def;", "title": "" }, { "docid": "eb1507da52ac7c5e977231764e4cd735", "score": "0.49732816", "text": "def annualized_returns(series, years):\n\n # Number of days to shift data. All years have 365 days\n # except leap-years which have 366 and occur every 4th year.\n # So on average a year has 365.25 days.\n days = int(years * 365.25)\n\n # Calculate annualized returns for all periods of this length.\n # Note: It is important we have daily (interpolated) data,\n # otherwise the series.shift(365) would shift much more than\n # a year, if the data only contains e.g. 250 days per year.\n ann_return = (series.shift(-days) / series) ** (1 / years) - 1.0\n\n return ann_return", "title": "" }, { "docid": "d17333811afd1eef390d38e9a97eb3be", "score": "0.4971324", "text": "def possible_grad_years(self):\n today = datetime.datetime.now()\n\n if self.current_semester() == 'SP': # second semester\n grad_range = range(today.year, today.year + 4)\n else: # first semester\n grad_range = range(today.year + 1, today.year + 5)\n\n return grad_range", "title": "" }, { "docid": "290ea081306f2dfe9ca15c7be6b0726b", "score": "0.4968136", "text": "def test_linear_trend_timewise_years(self):\n # Try with different days\n x = pd.Series(\n [\n 0,\n 365 * 24,\n 365 * 48,\n 365 * 72 + 24,\n ], # Add 24 to the last one since it's a leap year\n index=pd.DatetimeIndex(\n [\n \"2018-01-01 04:00:00\",\n \"2019-01-01 04:00:00\",\n \"2020-01-01 04:00:00\",\n \"2021-01-01 04:00:00\",\n ]\n ),\n )\n\n param = [\n {\"attr\": \"pvalue\"},\n {\"attr\": \"rvalue\"},\n {\"attr\": \"intercept\"},\n {\"attr\": \"slope\"},\n {\"attr\": \"stderr\"},\n ]\n res = linear_trend_timewise(x, param)\n\n res = pd.Series(dict(res))\n\n self.assertAlmostEqual(res['attr_\"pvalue\"'], 0, places=3)\n self.assertAlmostEqual(res['attr_\"stderr\"'], 0, places=3)\n self.assertAlmostEqual(res['attr_\"intercept\"'], 0, places=3)\n self.assertAlmostEqual(res['attr_\"slope\"'], 1.0, places=3)", "title": "" }, { "docid": "b73bc16cac39f47f31c97ebad7dfb345", "score": "0.49678615", "text": "def processing(data, vars):\n data = data[vars]\n data_resampled = resample(data, 5)\n data_filled = fill_na(data_resampled)\n data_iterpolated = data_interpolation(data_filled, method = \"polynomial\", order = 1, limit = 4)\n data_feature_added = feature_eng(data_iterpolated, mealzone = True)\n data_manipulated = additional_manipulation(data_feature_added)\n \n data_sampled = create_samples_V2(data_manipulated.loc[:,('basis_gsr','glucose','datetime')],number_lags = 0,colonne_da_laggare=[],colonna_Y='glucose',pred_horizon=0)\n data_sampled.drop('glucose_t', axis = 1, inplace = True)\n data_sampled.dropna(inplace = True)\n\n x, y = extract_data(data_sampled, 0)\n x = final_x_manipulation(x)\n \n\n return x, y", "title": "" }, { "docid": "fca7e27a467af91ffd1d51d30b43bd04", "score": "0.49618623", "text": "def test_all_years(self):\n extents_doy_59 = np.array([100., 500., 400., 300., 900., 200., 600., 700., 800., 1000.])\n extents_doy_227 = np.array([200., 500., 300., 400., 700., 100., 600., 800., 900., 1000])\n\n q = np.array([.25, .75])\n\n quantiles_doy_59 = np.percentile(extents_doy_59, q*100, interpolation='linear')\n quantiles_doy_227 = np.percentile(extents_doy_227, q*100, interpolation='linear')\n expected_values_25, expected_values_75 = zip(quantiles_doy_59, quantiles_doy_227)\n\n expected_columns = pd.Float64Index([.25, .75])\n\n expected_index = pd.Int64Index([59, 227], name='day of year')\n\n actual = warp.quantiles(self.df['total_extent_km2'], [1979, 2015], q)\n\n assert_index_equal(actual.index, expected_index)\n assert_index_equal(actual.columns, expected_columns)\n npt.assert_array_equal(actual[0.25].values, expected_values_25)\n npt.assert_array_equal(actual[0.75].values, expected_values_75)", "title": "" }, { "docid": "eea12d41124c98cc33264267a029f7f1", "score": "0.49610695", "text": "def get_yearly_totals(year):\n # todo: get year to work, pull from functions parameter\n print(\"year:\", year)\n # Distance\n distance = Ride.objects.filter(start_time__range=[\"2021-01-01\", \"2021-12-31\"]).aggregate(Sum('distance'))\n distance = round(distance[\"distance__sum\"], 2)\n\n\n # Number of Rides\n num_rides = Ride.objects.filter(start_time__range=[\"2021-01-01\", \"2021-12-31\"]).count\n\n # Numbers of Days\n # num_days = Ride.objects.filter(start_time__range=[\"2021-01-01\", \"2021-12-31\"]).distinct('start_time')\n\n\n yearly_totals = {\n 'distance': distance,\n 'num_rides': num_rides,\n # 'num_days': num_days,\n }\n\n return yearly_totals", "title": "" }, { "docid": "a1ee5b778a1896a1673ff8240e8c669c", "score": "0.4958591", "text": "def derive_yearly_amounts(df, start_business_year, extraction_date, contract_start_date_column_name, contract_end_date='actual_contract_end_date', row_per_each_contract_year=True, add_one_day=False, written_premium_column_name='asif_written_premium_excl_taxes', number_paid_premium_column_name='written_multiplier'):\r\n\r\n df_copy = deepcopy(df)\r\n extraction_year = extraction_date.year\r\n \r\n if row_per_each_contract_year == True:\r\n for year in range(start_business_year, extraction_year + 1):\r\n df_copy['exposure_in_{}'.format(year)] = df_copy.apply(lambda x: derive_annual_exposure(x, year, extraction_date, contract_start_date_column_name, contract_end_date, add_one_day), axis=1)\r\n df_copy['asif_earned_premium_in_{}'.format(year)] = df_copy['exposure_in_{}'.format(year)] * df_copy[written_premium_column_name]\r\n\r\n else:\r\n for year in range(start_business_year, extraction_year + 1):\r\n df_copy['exposure_in_{}'.format(year)] = df_copy.apply(lambda x: derive_annual_exposure(x, year, extraction_date, contract_start_date_column_name, contract_end_date, add_one_day), axis=1)\r\n df_copy['asif_written_premium_in_{}'.format(year)] = df_copy.apply(lambda x: derive_yearly_amount(x, year, extraction_date, contract_start_date_column_name, written_premium_column_name, contract_end_date, number_paid_premium_column_name), axis=1)\r\n df_copy['asif_earned_premium_in_{}'.format(year)] = df_copy['exposure_in_{}'.format(year)] * df_copy[written_premium_column_name] / df_copy[number_paid_premium_column_name]\r\n\r\n return df_copy", "title": "" }, { "docid": "802607ff3350e97ec45fd223fafeffb3", "score": "0.49560437", "text": "def compute_values(self):\n self._x_values = np.linspace(*(self.xRange), self.n_points)\n self._y_values = self.funcion(self._x_values, **self.dF_args)\n \n return self._x_values, self._y_values", "title": "" }, { "docid": "302f59fb84fffa55f6471d4b98e88a8f", "score": "0.4953331", "text": "def get_part_of_ts(self, data, elmt):\n res_ts = data[elmt[\"capteur\"]].copy()\n res_ts = res_ts.set_index(\"Date\")\n if elmt[\"week\"] and not elmt[\"month\"]:\n res_ts = res_ts[str(elmt[\"year\"])]\n res_ts = res_ts.groupby(pd.Grouper(freq='W'))\n for i in res_ts:\n if i[0].week == elmt[\"week\"]:\n res_ts = i[1]\n elif elmt[\"week\"] and elmt[\"month\"]:\n res_ts = res_ts[str(elmt[\"year\"]) +\"-\"+ str(elmt[\"month\"])]\n res_ts = res_ts.groupby(pd.Grouper(freq='W'))\n for i in res_ts:\n if i[0].week == elmt[\"week\"]:\n res_ts = i[1]\n elif elmt[\"month\"]:\n res_ts = res_ts[str(elmt[\"year\"]) +\"-\"+ str(elmt[\"month\"])]\n else:\n res_ts = res_ts[str(elmt[\"month\"])]\n res_ts = res_ts.reset_index()\n res_ts = self.ss.normalize(res_ts)\n return res_ts", "title": "" }, { "docid": "acae821d5e9e6c2de8a3ec7d6dfa6dfa", "score": "0.4952349", "text": "def function(t):\n return bc_values[0]", "title": "" }, { "docid": "edb886365735e8f45dd0b091d6f6accd", "score": "0.49504218", "text": "def year(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"year\")", "title": "" }, { "docid": "edb886365735e8f45dd0b091d6f6accd", "score": "0.49504218", "text": "def year(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"year\")", "title": "" }, { "docid": "5db99c26799cf758e3ce4fb04dc734ea", "score": "0.49495012", "text": "def interannual(data,yearstarts,fun=None):\n if fun==None:\n fun=np.mean\n annshape=list(data.shape)\n annshape[0]=len(yearstarts)\n annual_vals=np.zeros(annshape)\n for i in range(annshape[0]):\n startpt=yearstarts[i]\n if i==annshape[0]-1:\n endpt=None\n else:\n endpt=yearstarts[i+1]\n annual_vals[i,...]=fun(data[startpt:endpt,...],axis=0)\n\n annual_vals=np.ma.array(annual_vals,mask=~np.isfinite(annual_vals))\n \n return(np.std(annual_vals,axis=0))", "title": "" }, { "docid": "dd0720ca187253a75ab33d18da3ef8af", "score": "0.49423555", "text": "def bokeh_plot_gdd_years(city, startYear, endYear, tbase, tupper):\n\n for year in range(startYear, endYear+1):\n data = download_data(city, year)\n minT = data['Min Temp (°C)']; maxT = data['Max Temp (°C)']\n tmp = calc_gdd(list(minT),list(maxT),tbase,tupper)\n\n if tmp is None:\n print('Error in data for '+city+' in year '+str(year))\n\n else:\n n=len(data['Day']); Index = [None]*n\n\n for i in range(n):\n Index[i]=str(data['Month'][i])+\"_\"+str(data['Day'][i])\n \n gdd_day = list(tmp[0])\n\n if year == startYear:\n df = pd.DataFrame(gdd_day, index=Index, columns=[year])\n\n else:\n df[year] = pd.DataFrame(gdd_day, index=Index)\n\n data1 = df.transpose()\n Mean = [None]*366\n percentile_5 = [None]*366; percentile_95 = [None]*366; percentile_25 = [None]*366; percentile_75 = [None]*366\n total_years = len(list(data1['1_1']))\n percentile5 = round((5/100) * total_years); percentile95 = round((95/100) * total_years)\n percentile25 = round((25/100) * total_years); percentile75 = round((75/100) * total_years)\n\n i=0\n for day in df.index:\n sorteddata = list(data1[day])\n sorteddata.sort()\n Mean[int(i)]=data1[day].mean()\n percentile_5[int(i)] = sorteddata[int(percentile5)]; percentile_95[int(i)] = sorteddata[int(percentile95)]\n percentile_25[int(i)] = sorteddata[int(percentile25)]; percentile_75[int(i)] = sorteddata[int(percentile75)]\n i+=1\n\n source1=ColumnDataSource(dict(left=np.arange(0.5,366.5),top= percentile_95,right=np.arange(1.5,367.5),bottom=percentile_5))\n source2=ColumnDataSource(dict(left=np.arange(0.5,366.5),top= percentile_75,right=np.arange(1.5,367.5),bottom=percentile_25))\n\n plot = figure(plot_width=600, tools=\"\", toolbar_location=None, title=\"Daily GDD Statistics for {} from {} to {}\".format(city,startYear,endYear))\n plot.quad(top='top', bottom='bottom', left='left',right='right',source=source1,color=\"#000000\", legend=\"Percentile 5-95\")\n plot.quad(top='top', bottom='bottom',left='left',right='right', source=source2,color=\"#66ccff\",legend=\"percentile 25-75\")\n plot.line(np.arange(0,366),Mean,line_color='Red', line_width=0.5, legend='AverageTemp')\n plot.border_fill_color = \"whitesmoke\"\n plot.xaxis.axis_label = \"Days\"\n plot.yaxis.axis_label = \"Daily GDD Accumulation\"\n plot.axis.major_label_text_font_size = \"10pt\"\n plot.axis.axis_label_text_font_size = \"12pt\"\n plot.axis.axis_label_text_font_style = \"bold\"\n plot.x_range = DataRange1d(range_padding=0.0, bounds=None)\n plot.grid.grid_line_alpha = 0.3\n new_fname = os.path.dirname(os.path.realpath(__file__)) + \"/../Output/\" + \"OptionalTask1GDDPlot.html\"\n output_file(new_fname, title=\"OptionalTask1GDDPlot\")\n save(plot)", "title": "" }, { "docid": "d0fc20d0271fb6ed76e911293ce0d8fb", "score": "0.49388674", "text": "def get_revenue_data(self, year=None):\n\n revenue = []\n if year:\n year = int(year)\n else:\n year = current_year\n month_names = [datetime.date(year, i, 1).strftime('%B') for i in range(1, current_month + 1)]\n month_count = [\"0\" + str(datetime.date(year, i, 1).month)\n if datetime.date(year, i, 1).month == 1\n else str(datetime.date(year, i, 1).month)\n for i in range(1, current_month + 1)]\n\n for i in month_count:\n total = 0\n quote_list = []\n if self.request.user.groups.filter(name='store admin').exists():\n for customer in self.get_customers(self.request.user):\n if CustomerPremiumDetail.objects.filter(customer=customer, effectiveDate__year=year,\n effectiveDate__month=i):\n quote_list.append(CustomerPremiumDetail.objects.\n filter(customer=customer, effectiveDate__year=year,\n effectiveDate__month=i))\n quotes = list(itertools.chain(*quote_list))\n else:\n quotes = CustomerPremiumDetail.objects.filter(effectiveDate__year=year, effectiveDate__month=i)\n if quotes:\n for quote in quotes:\n total += int(Decimal(quote.jewel_price))\n revenue.append(total)\n else:\n revenue.append(0)\n return month_names, revenue", "title": "" }, { "docid": "6bb9f578a2af33deb23e7260dbe1344d", "score": "0.49371192", "text": "def calculate_sample_veg_monthly_avg(fprefix, y, m):\n\n iy = int(y)\n im = int(m)\n md = month_day[im-1]\n\n for d_1 in range(md):\n # Current day\n d = d_1 + 1\n\n # File name\n fname = '{prefix}-{year}-veg_{year}{month}{day:02d}_00p06.nc'.format( \\\n prefix=fprefix, year=y, month=m, day=d)\n # print('Reading veg file {0}'.format(fname))\n\n # Get the daily mean data\n fid = Dataset(fname, 'r')\n cvl_dayavg = np.mean(fid.variables['cvl'][:, :, :], axis=0)\n cvh_dayavg = np.mean(fid.variables['cvh'][:, :, :], axis=0)\n\n # Add up to the monthly data\n if d == 1: # initiate on the first day\n lon = fid.variables['lon'][:]\n lat = fid.variables['lat'][:]\n nlon = len(lon)\n nlat = len(lat)\n\n cvl_monavg = np.zeros( (nlat, nlon) )\n cvh_monavg = np.zeros( (nlat, nlon) )\n\n cvl_monavg += cvl_dayavg\n cvh_monavg += cvh_dayavg\n\n # print(m, np.mean(cvl_dayavg), np.mean(cvh_dayavg))\n\n # Close the file\n fid.close\n\n # Divided by the month day\n cvl_monavg /= md\n cvh_monavg /= md\n\n # print(m, np.mean(cvl_monavg), np.mean(cvh_monavg))\n\n return (cvl_monavg, cvh_monavg, lon, lat)", "title": "" }, { "docid": "4cd9e470aecf46c7aa51edd8e7cdc876", "score": "0.49362195", "text": "def _get_gmv_monthly(self) -> Tuple[float, float]:\n return (\n Rebalance.return_ts(\n self.gmv_monthly_weights, self.assets_ror, period=self.reb_period\n ).std(),\n Rebalance.return_ts(\n self.gmv_monthly_weights, self.assets_ror, period=self.reb_period\n ).mean(),\n )", "title": "" }, { "docid": "e43141803024a3a1da647cfde6b20568", "score": "0.49342582", "text": "def get_dates(self) -> np.ndarray:\n return self.dates", "title": "" }, { "docid": "f97d2d622adc1ebe20ee911f154f3d1b", "score": "0.49321002", "text": "def gbm(n_years=10, n_scenarios=1000,mu=0.07, sigma=0.15,steps_per_year=12,s_0=100.0,prices=True):\n \n dt=1/steps_per_year\n n_steps=int(n_years*steps_per_year)\n rets_plus_one=np.random.normal(loc=1+mu*dt,scale=sigma*np.sqrt(dt),size=(n_steps+1,n_scenarios))\n rets_plus_one[0]=1\n ret_val=s_0*pd.DataFrame(rets_plus_one).cumprod() if prices else rets_plus_one-1\n return ret_val", "title": "" }, { "docid": "b981036df2613b46da4787d922aaa650", "score": "0.49320692", "text": "def SWE_calc_period(station,station_n,startdate='',enddate=''):\n\n TD=TD_calc(station)\n PPTWT = PPTWT_calc(station_n)\n #TD=TD_calc_specific(station,datum)\n #PPTWT = PPTWT_calc_specific(station,datum)\n print('TD: ',TD)\n print('PPTWT: ',PPTWT)\n\n df = dd.ReadfromProcessed(station,'Daily',sensor='HS,LT',startdate=startdate,enddate=enddate)\n print(len(df))\n #df=df.dropna()\n print(len(df))\n df=df.set_index('Datum')\n SWE_ls=[]\n for i in range(0,len(df.index)):\n DOY = DOY_calc(datetime.datetime.strftime(df.index[i],\"%Y%m%d\"))\n H= df.HS[i] * 10\n a = [0.0533,0.948,0.1701,-0.1314,0.2922] #accumulation phase\n b = [0.0481,1.0395,0.1699,-0.0461,0.1804] #ablation phase\n SWE = a[0]*H**a[1]*PPTWT**a[2]*TD**a[3]*DOY**a[4]* \\\n (-np.tanh(.01*(DOY-180))+1)/2 + b[0]*H**b[1]* \\\n PPTWT**b[2]*TD**b[3]*DOY**b[4] * (np.tanh(.01*(DOY-180))+1)/2\n SWE_ls.append(SWE)\n df['SWE']=SWE_ls\n return df", "title": "" }, { "docid": "9eaab8a70b8bd59c3e89986b283ddeac", "score": "0.4931265", "text": "def util_granulate_time_series(time_series, scale):\n n = len(time_series)\n b = int(np.fix(n / scale))\n cts = [0] * b\n for i in range(b):\n cts[i] = np.mean(time_series[i * scale: (i + 1) * scale])\n return cts", "title": "" }, { "docid": "e6864ec1124b557725012800984b12c7", "score": "0.49278992", "text": "def get_values(ser: pd.Series) -> np.ndarray:\r\n return ser.values", "title": "" }, { "docid": "764c1f59288bfc253e975aa4eabf9fe0", "score": "0.4923953", "text": "def priceBasedMetrics(self, row):\n fmt = TMF1000Analyser.fmt\n quarterData = row\n startDateFmt = moment.Moment(quarterData['date_min']).format('MMM D, YYYY')\n endDateFmt = moment.Moment(quarterData['date_max']).format('MMM D, YYYY')\n # quarterDateFmt = moment.Moment(quarterData['quarterEnd']).format('MMM D, YYYY')\n quarterData['marketCap_max'] = quarterData['sharesbas'] * quarterData['high_max']\n quarterData['marketCap_min'] = quarterData['sharesbas'] * quarterData['low_min']\n quarterData['marketCap_last'] = quarterData['sharesbas'] * quarterData['close_last']\n quarterData['ev_min'] = quarterData['marketCap_min'] + quarterData['debt'] - quarterData['cashnequsd']\n quarterData['ev_max'] = quarterData['marketCap_max'] + quarterData['debt'] - quarterData['cashnequsd']\n quarterData['ev_last'] = quarterData['marketCap_last'] + quarterData['debt'] - quarterData['cashnequsd']\n\n quarterData['ttmPE_min'] = quarterData['low_min'] / quarterData['eps_ttm']\n quarterData['ttmPE_max'] = quarterData['high_max'] / quarterData['eps_ttm']\n quarterData['ttmPE_last'] = quarterData['close_last'] / quarterData['eps_ttm']\n # The 0.0000001 is a hack to get around DivisionByZero errors\n quarterData['1YPEG_min'] = pd.np.where(quarterData['eps_ttm_growth'] > 0, quarterData['ttmPE_min'] / (quarterData['eps_ttm_growth'] + 0.0000001 * 100), np.NAN)\n quarterData['1YPEG_max'] = pd.np.where(quarterData['eps_ttm_growth'] > 0, quarterData['ttmPE_max'] / (quarterData['eps_ttm_growth'] + 0.0000001 * 100), np.NAN)\n quarterData['1YPEG_last'] = pd.np.where(quarterData['eps_ttm_growth'] > 0, quarterData['ttmPE_last'] / (quarterData['eps_ttm_growth'] + 0.0000001 * 100), np.NAN)\n # quarterData['1YPEG_max'] = quarterData['ttmPE_max'] / (quarterData['eps_ttm_growth'] + 0.0000001 * 100)\n # quarterData['1YPEG_last'] = quarterData['ttmPE_last'] / (quarterData['eps_ttm_growth'] + 0.0000001 * 100)\n\n print('\\n#### Trading data (%s - %s)' % (startDateFmt, endDateFmt))\n print(\"* Trading range was %s to %s [%s]\" % (quarterData['low_min'], quarterData['high_max'], quarterData['close_last']))\n print(\"* Market cap was %s to %s [%s]\" % (\n fmt.number_formatter(quarterData['marketCap_min']), fmt.number_formatter(quarterData['marketCap_max']),\n fmt.number_formatter(quarterData['marketCap_last'])))\n\n lastTtmEps = quarterData['eps_ttm']\n rps = quarterData['rps_ttm']\n lastFreeCashFlow = quarterData['fcf_ttm']\n ttmRevenue = quarterData['revenue_ttm']\n\n if lastTtmEps < 0:\n print(\"* PE range not applicable (earnings < 0)\")\n else:\n print(\"* PE range was %s to %s [%s]\" % (\n fmt.number_formatter(quarterData['low_min'] / lastTtmEps), fmt.number_formatter(quarterData['low_max'] / lastTtmEps),\n fmt.number_formatter(quarterData['close_last'] / lastTtmEps)))\n\n print(\"* PS ratio range was %s to %s [%s]\" % (\n fmt.number_formatter((quarterData['low_min']) / rps), fmt.number_formatter((quarterData['high_max'] / rps)),\n fmt.number_formatter(quarterData['close_last'] / rps)))\n\n print(\"* Free cash flow (TTM) yield range was %s to %s [%s]\" % (\n fmt.number_formatter(lastFreeCashFlow * 100 / quarterData['marketCap_max']),\n fmt.number_formatter(lastFreeCashFlow * 100 / quarterData['marketCap_min']),\n fmt.number_formatter(lastFreeCashFlow * 100 / quarterData['marketCap_last'])))\n print(\"* EV/Sales was %s to %s [%s]\" % (\n fmt.number_formatter(quarterData['ev_min'] / ttmRevenue), fmt.number_formatter(quarterData['ev_max'] / ttmRevenue),\n fmt.number_formatter(quarterData['ev_last'] / ttmRevenue)))\n print(\"* TTM EPS growth was %s [EPS: %s versus %s]\" % (\n fmt.percent_formatter(quarterData['eps_ttm_growth']), fmt.number_formatter(quarterData['eps_ttm']),\n fmt.number_formatter(quarterData['eps_ttm_same'])))\n print(\"* 1YPEG (under 1.0 desirable) was %s to %s [%s]\" % (\n fmt.number_formatter(quarterData['1YPEG_min']), fmt.number_formatter(quarterData['1YPEG_max']),\n fmt.number_formatter(quarterData['1YPEG_last'])))\n pass", "title": "" }, { "docid": "a0247b9fe1cd3e4bab093774585a3c7f", "score": "0.49163705", "text": "def compute_time_stats(x, ts, delta=25, typ='amp', jrk=1):\n #jrk = 0\n #diffs_acc, diffs_jrk = extract_time_features(x,ts,delta)\n if typ == 'amp':\n diffs_acc = calculate_ts_amp_diffs(x, delta=delta)\n if jrk:\n diffs_jrk = calculate_ts_amp_diffs(np.diff(x), delta=delta*.75)\n else:\n diffs_acc = calculate_ts_diffs(x, ts, delta=delta)\n if jrk:\n diffs_jrk = calculate_ts_diffs(np.diff(x), ts[1:], delta=delta*.75)\n\n diffs_acc = np.array(diffs_acc)\n if jrk:\n diffs_jrk = np.array(diffs_jrk)\n \n\n means_acc = diffs_acc.mean(axis=0)\n stds_acc = diffs_acc.std(axis=0)\n if jrk:\n means_jrk = diffs_jrk.mean(axis=0)\n stds_jrk = diffs_jrk.std(axis=0)\n #print means_acc, stds_acc, means_jrk, stds_jrk\n #return np.mean(diffs_acc), np.std(diffs_acc), np.mean(diffs_jrk), np.std(diffs_jrk)\n \n if jrk:\n #print means_acc.shape, stds_acc.shape, means_jrk.shape, stds_jrk.shape\n rslt = np.concatenate((means_acc, stds_acc, means_jrk, stds_jrk))\n else:\n #print means_acc.shape, stds_acc.shape\n rslt = np.concatenate((means_acc, stds_acc))\n\n\n return rslt", "title": "" }, { "docid": "a03b7c2e195db3431da624826245ac13", "score": "0.490312", "text": "def _compute_log_value(self):", "title": "" }, { "docid": "85db3244e2d469ab4517569e5eac4cc8", "score": "0.49026433", "text": "def get_geojson(yr):\n gdf_year = gdf_merged[gdf_merged['year'] == yr]\n return gdf_year.to_json()", "title": "" }, { "docid": "2c33fffc081a2a5b2f8fb965528113e2", "score": "0.4894078", "text": "def avg_by_year(df):\n #calculate the average ratings by year\n avg_by_year_df = df.groupby('years')['ratings'].mean()\n \n #calculate the std ratings by year\n std_by_year_df = df.groupby('years')['ratings'].std()\n #convert Series to pd.dataframe\n avg_by_year_df = pd.DataFrame({'year': avg_by_year_df.index.tolist(),\n 'avg_ratings': avg_by_year_df,\n 'std': std_by_year_df})\n \n #reset index\n avg_by_year_df.reset_index(drop=True, inplace=True)\n \n return avg_by_year_df", "title": "" }, { "docid": "bf58d5dbfab5bd17f1c3e3a3e67ff549", "score": "0.4891904", "text": "def get_ga_agg():\n tables = pd.read_html(url_ga)\n [table] = [t for t in tables if \"Lab\" in t]\n\n gen_date = get_generated_date(url_ga)\n\n table = table.assign(date_gen=gen_date).assign(\n date_gen=lambda x: pd.to_datetime(x.date_gen)\n )\n return table", "title": "" } ]
597931493f291bce8f94e74758017c86
Apply rankbased thresholding on given matrix. In RCut (also known as `kperdoc`), only `rank` best topics are assigned to each document.
[ { "docid": "1f5e53d86b0f5f3524cee7273be71aeb", "score": "0.5806677", "text": "def r_cut(y, rank=3):\n y = np.array(y)\n y_pred = np.zeros(y.shape, dtype=bool)\n for i, row in enumerate(y):\n max_js = row.argsort()[-rank:][::-1]\n for j in max_js:\n y_pred[i, j] = True\n return y_pred", "title": "" } ]
[ { "docid": "e91e6f4e70eb1b78646730d67fa0b6ab", "score": "0.61783934", "text": "def rankify(mat, size=11):\n return generic_filter(mat, rankkern, size=(\n size, size), mode='constant', cval=-1)", "title": "" }, { "docid": "0fad1c1f3d96675b4d2e7da640495382", "score": "0.5966574", "text": "def get_score_rankings(weights,feature_matrix,qptr,max_documents=None, inverted=False):\n # minus to reverse ranking\n predictions = -np.squeeze(np.dot(weights.T,feature_matrix))\n return rank_queries(predictions,qptr,max_documents=max_documents,inverted=inverted)", "title": "" }, { "docid": "bc55da9f018c927a565b9a1cad75435a", "score": "0.5844435", "text": "def get_tensor_min_rank_filter(rank):\n return lambda arg_value: arg_value.is_tensor and len(arg_value.shape) >= rank", "title": "" }, { "docid": "61659716a0ad07b7ba22bef94194cd53", "score": "0.55821514", "text": "def rank_acc(score_matrix, tot_sample, candidate_len):\n assert score_matrix.shape[1] == 4\n # rank by ids\n score_matrix = np.take(score_matrix, np.argsort(score_matrix[:, 1]), axis=0)\n score_matrix = np.reshape(score_matrix, [tot_sample, candidate_len, 4])\n acc = 0\n for i in range(tot_sample):\n score_mat = score_matrix[i]\n top_idx = np.argsort(score_mat[:, 0])[::-1][0]\n acc = acc + int(score_mat[top_idx][2] == score_mat[top_idx][3])\n ret = OrderedDict()\n ret['rank_acc'] = round(float(acc/tot_sample), 4)\n ret['key_eval'] = \"rank_acc\"\n return ret", "title": "" }, { "docid": "55675ba675e65e18ede23a2a145d30de", "score": "0.55626476", "text": "def _rank(lam, ranking_top_m_list):\n \n ranking = rankdata(-lam, method='min')\n ranking_top = np.zeros((lam.shape[0], len(ranking_top_m_list)))\n for j, m in enumerate(ranking_top_m_list):\n ranking_top[:,j] = ranking <= m\n return ranking, ranking_top", "title": "" }, { "docid": "7aca77e7204d6dbe3342d56403c52e31", "score": "0.54659456", "text": "def pivot_columns(a, rank=None, threshold=None):\n assert (threshold is None) != (rank is None), \"Exactly one of threshold and rank should be specified.\"\n if threshold is not None:\n rank = approx_rank(a, threshold)\n return qr(a, pivoting=True)[2][:rank]", "title": "" }, { "docid": "7c51e53730e40f5d5ddcf34cdcc591fa", "score": "0.546399", "text": "def on_rank(self, ranks: List[int]):", "title": "" }, { "docid": "059b1e3e99237f6bf6d304dc6a942ff2", "score": "0.5460979", "text": "def page_rank(matrix, epsilon=0.001, limit=50):\n\n matrix_values = matrix.values\n rows, columns = matrix_values.shape\n\n ranks = np.zeros((rows, columns))\n\n for s in range(rows):\n for t in range(columns):\n t_n_links = matrix_values[t, :].sum()\n # If t is connected to s:\n if matrix_values[t, s] == 1:\n ranks[s, t] = epsilon / float(rows)\n assert not np.isclose(t_n_links, 0)\n ranks[s, t] += (1.0 - epsilon) / t_n_links\n elif np.isclose(t_n_links, 0):\n ranks[s, t] = 1.0 / float(rows)\n else:\n ranks[s, t] = epsilon / float(rows)\n\n relevance = np.ones((rows, 1)) * (1.0 / float(rows))\n old_relevance = relevance.copy()\n\n final_ranks = np.zeros((rows, 1))\n\n for i in range(limit):\n for x in range(rows):\n row_count = np.sum(ranks[x, :] * old_relevance.ravel())\n relevance[x] = row_count\n if i == limit - 1:\n final_ranks[x] = relevance[x]\n old_relevance = relevance.copy()\n\n df_out = pd.DataFrame(final_ranks, index=matrix.index, columns=['Rank'])\n\n return df_out", "title": "" }, { "docid": "5df0f3d2b2c623cd3f66347b7464222b", "score": "0.544007", "text": "def rankOfOrder(self, matrix, order):\n\n numMons = Polalg.numVariablesUpDegree(order, self.n)\n return Linalg.rank(matrix[0:numMons, 0:numMons], self.rankDecayThreshold, self.rankZeroThreshold)", "title": "" }, { "docid": "d19d5167aa9ed42a86db45604319e16e", "score": "0.5350771", "text": "def approx_rank(a, threshold=0.03):\n s = np.linalg.svd(a, compute_uv=False)\n rank = s[s >= threshold * s[0]]\n return len(rank)", "title": "" }, { "docid": "58aadb1a9f117bf529c4da578884febd", "score": "0.5341221", "text": "def select_rank_rmt_threshold(X, thresh_method='dg',\n noise_est='mp',\n noise_est_kwargs={},\n UDV=None):\n\n if UDV is None:\n UDV = svd_wrapper(X)\n\n if not isinstance(noise_est, Number):\n noise_est = estimate_noise(X, method=noise_est,\n UDV=UDV, **noise_est_kwargs)\n\n assert thresh_method in ['mpe', 'dg']\n\n if thresh_method == 'dg':\n thresh = donoho_gavish_threshold(shape=X.shape,\n sigma=noise_est)\n\n elif thresh_method == 'mpe':\n thresh = marcenko_pastur_edge_threshold(shape=X.shape,\n sigma=noise_est)\n\n svals = UDV[1]\n rank_est = sum(np.array(svals) > thresh)\n\n return rank_est, {'svals': svals,\n 'shape': X.shape,\n 'thresh': thresh,\n 'noise_estimate': noise_est}", "title": "" }, { "docid": "c2a24717e8419d8cb750b12724f372b3", "score": "0.52447975", "text": "def ranksort(self, scale=0):\n self.update_fitness_vals()\n if len(self.masks)>1:\n ranks = np.argsort(self.fitness_vals)\n self.fitness_vals = np.array(self.fitness_vals)[ranks].tolist()\n self.masks = np.array(self.masks)[ranks].tolist()\n self.output_fields = np.array(self.output_fields,dtype=np.int)[ranks].tolist()", "title": "" }, { "docid": "00f7a693df52b6607af4598f753d4331", "score": "0.5233521", "text": "def rank2score(rank):\n Nclass = len(rank)\n scores = [0 for i in range(Nclass)]\n for pos in range(Nclass):\n scores[rank[pos]] = Nclass - pos\n return scores", "title": "" }, { "docid": "4e8b677ea54da68ae2a9a085fac97643", "score": "0.5211106", "text": "def top_k(scores: np.ndarray, k: int = 1):\n return np.argsort(-scores)[:k]", "title": "" }, { "docid": "bbf8be38829f1eb0b3e6545f17beef11", "score": "0.52110565", "text": "def _scores_by_top_k(top_k: int, scores: np.ndarray) -> np.ndarray:\n if scores.shape[-1] < top_k:\n raise ValueError(\n 'not enough attributions were provided to perform the requested '\n 'calcuations for top k. The requested value for k is {}, but the '\n 'values are {}\\n\\nThis may be caused by a metric configuration error '\n 'or an error in the pipeline.'.format(top_k, scores))\n\n indices = np.argpartition(scores, -top_k)[-top_k:]\n indices = indices[np.argsort(-scores[indices])]\n return scores[indices]", "title": "" }, { "docid": "45e287868f566b12f498d58b57c53281", "score": "0.52073705", "text": "def rank(self, res, words):\r\n## for item in res:\r\n## self.itemScore_[item] = self.bm25_relevance(words,item)\r\n## for nrank in heapq.nlargest(10, self.itemScore_.items(), key=lambda(k,v):(v,k)):\r\n## print self.docs_[int(nrank[0])][0] + ' ' + str(nrank[1])\r\n###\r\n for nrank in heapq.nlargest(10, res, key = lambda x: self.bm25_relevance(words,x)):\r\n print self.docs_[int(nrank)][0] + ' ' + str(self.bm25_relevance(words,nrank))", "title": "" }, { "docid": "39089ac56f3eafd1dc2cdc9ef2e73b0c", "score": "0.5181577", "text": "def feature_ranking(score):\n idx = np.argsort(score, 0)\n return idx[::-1]", "title": "" }, { "docid": "a14150e3b2fe187da1e28cebfa1929c2", "score": "0.51528996", "text": "def ranking_fn(query_tokens, doc_id, inverted_index, term_frequency_map):\n return 1", "title": "" }, { "docid": "e7e6c6e757cbdf29f6ee6365f8a246fe", "score": "0.51509976", "text": "def rank(chrom,score):\n order = _sortrows(scipy.concatenate((score,chrom),1))\n\n ranksc = scipy.zeros((chrom.shape[0],1),'d')\n for x in range(1,len(score),1):\n ranksc[x] = 2*(float(x)/(chrom.shape[0]-1)) \n ranksc = _flip(ranksc)\n\n chrom = scipy.array(order[:,1:order.shape[1]])\n scores = scipy.reshape(order[:,0],(order.shape[0],1))\n\n return ranksc,chrom,scores", "title": "" }, { "docid": "3a7b7b05a6d485c67ce4143d5f24ad90", "score": "0.5140605", "text": "def predict_proba(self,list_images,threshold=0,mode='rank'):\n if mode=='rank':\n preds_number,_ = self.predict(list_images,self.sift_cluster,self.dict_vectorizer,self.model_number)\n preds_number.argmax(axis=1)\n \n if mode=='suit':\n preds_number, pred = self.predict(list_images,self.suit_sift_cluster,self.suit_dict_vectorizer,self.model_suit)\n return pred\n #preds_number.argmax(axis=1)\n \n return self.model_number.classes_[((preds_number > threshold) * preds_number).nonzero()[1]],preds_number[((preds_number > threshold) * preds_number).nonzero()]", "title": "" }, { "docid": "5d2bc3babffdaf326e98de261e3a5b51", "score": "0.5122717", "text": "def hits_k(rankings: np.ndarray, k: int) -> float:\n # N = rankings.shape[0]\n # max along columns to not double count for candidates expansion\n # return (rankings[:,:k] == reference).max(-1).sum() / N\n return rankings[:,:k].max(axis=1).mean()", "title": "" }, { "docid": "de3913d9115d8bfa1d481f3e95c9736b", "score": "0.5099935", "text": "def rank(self,rankby='size'):\n vals = []\n if rankby == 'size':\n for i in range(self.n):\n vals.append(int(self.clusters[i].volume))\n elif rankby == 'peak':\n for i in range(self.n):\n vals.append(int(self.clusters[i].peak))\n else:\n raise ValueError, \"rankby must be 'size' or 'peak' in clustmap.rank()\"\n ranks = N.argsort(vals) # biggest=n, smallest=1\n a = N.zeros(self.baseshape,N.Int)\n for i in range(self.n):\n# print (self.n-i), self.clusters[ranks[i]].volume\n this = N.not_equal(self.clusters[ranks[i]].toarray(),0)\n a = a + this*(self.n-i) # multiplier makes biggest=1, etc\n return a", "title": "" }, { "docid": "c662b61c991502a7db81dbb20d17b0f0", "score": "0.508207", "text": "def nms(\n boxes,\n scores,\n score_threshold,\n nms_threshold,\n top_k=200,\n normalized=True,\n eta=1.0,\n):\n index = -1\n for i in range(boxes.shape[0]):\n if (\n index > -1\n and iou(boxes[i], boxes[index], normalized) > nms_threshold\n ):\n weight_merge(boxes[i], boxes[index], scores[i], scores[index])\n scores[index] += scores[i]\n scores[i] = score_threshold - 1.0\n else:\n index = i\n\n all_scores = copy.deepcopy(scores)\n all_scores = all_scores.flatten()\n\n selected_indices = np.argwhere(all_scores > score_threshold)\n selected_indices = selected_indices.flatten()\n all_scores = all_scores[selected_indices]\n\n sorted_indices = np.argsort(-all_scores, axis=0, kind='mergesort')\n sorted_scores = all_scores[sorted_indices]\n sorted_indices = selected_indices[sorted_indices]\n\n if top_k > -1 and top_k < sorted_indices.shape[0]:\n sorted_indices = sorted_indices[:top_k]\n sorted_scores = sorted_scores[:top_k]\n\n selected_indices = []\n adaptive_threshold = nms_threshold\n for i in range(sorted_scores.shape[0]):\n idx = sorted_indices[i]\n keep = True\n for k in range(len(selected_indices)):\n if keep:\n kept_idx = selected_indices[k]\n overlap = iou(boxes[idx], boxes[kept_idx], normalized)\n keep = True if overlap <= adaptive_threshold else False\n else:\n break\n if keep:\n selected_indices.append(idx)\n if keep and eta < 1 and adaptive_threshold > 0.5:\n adaptive_threshold *= eta\n return selected_indices", "title": "" }, { "docid": "f3d596651772d778bc58b213b9498aa4", "score": "0.5078518", "text": "def rank_relevant_docs(relevant_docs, corpus_size, k=None):\n total_doc_scores = {}\n\n # CALCULATE EACH DOC SCORE ACCORDING TO WEIGHTS ON ALL THE SIM FUNCTION: INNER, COSINE, BM25\n for doc in relevant_docs:\n # cosine_score = Ranker.cosine_doc_score(relevant_docs[doc])\n BM25_score = Ranker.BM25_doc_score(relevant_docs[doc], corpus_size)\n inner_product_score = Ranker.inner_product(doc)\n total_doc_scores[doc] = 0.8 * BM25_score + 0.2 * inner_product_score # + 0 * cosine_score\n if total_doc_scores[doc] > Ranker.max_tfidf_score:\n Ranker.max_tfidf_score = total_doc_scores[doc]\n ################################################################################################\n number_of_relevant_docs_found = len(total_doc_scores)\n # trial and error - retrieve top % of the docs\n if k is None:\n k = number_of_relevant_docs_found\n\n # If the query is composed of words from the model try finding the closest doc in the embedding space\n # Else just sort the docs according the tf-idf\n if Ranker.query_vector.any():\n top_sorted_relevant_docs = Ranker.find_closest_documents(relevant_docs, total_doc_scores)\n\n else:\n top_sorted_relevant_docs = sorted(total_doc_scores.items(), key=lambda item: item[1], reverse=True)\n\n return Ranker.retrieve_top_k(list(top_sorted_relevant_docs.keys()), k)", "title": "" }, { "docid": "9902ba110d77df1dab1e7ce25e58b6ca", "score": "0.5065392", "text": "def test_rank(rank_list, word, n):\n _, base_check = zip(*sorted(rank_list[word]))\n # just subtract the mean of the dataset, to make the mean 0\n std_base_check = [data - np.mean(base_check) for data in base_check]\n \n words = []\n for key, values in rank_list.items():\n if key == word:\n continue\n\n _, test_check = zip(*sorted(values))\n # move the data to a mean of 0\n std_test_check = [data - np.mean(test_check) for data in test_check]\n score = brendan_whitney_test(std_base_check, std_test_check)\n \n words.append((score, key))\n \n words = sorted(words)\n return words[:n]", "title": "" }, { "docid": "83df6dab61a64c58bf9698d9be95a89b", "score": "0.50521034", "text": "def _tiebreak_sort(unranked, n_results):\n\n n_axis = len(unranked.shape)\n assert (n_axis == 1 or n_axis == 2)\n\n tiebreakers = np.random.random(unranked.shape)\n complex_predictions = np.empty(unranked.shape, dtype=np.complex)\n complex_predictions.real = unranked #score\n complex_predictions.imag = tiebreakers #random numbers to break ties\n\n max_n_docs = unranked.shape[-1]\n max_part = np.minimum(n_results, max_n_docs)\n if max_part == max_n_docs:\n return np.argsort(complex_predictions, axis=-1)\n\n part = np.argpartition(complex_predictions, max_part-1, axis=-1)\n slice_ind = (slice(None),) * (len(unranked.shape)-1)\n slice_ind += (slice(0,max_part),)\n\n if n_axis == 1:\n part_pred = complex_predictions[part[slice_ind]]\n front_sort = np.argsort(part_pred, axis=-1)\n part[slice_ind] = part[slice_ind][front_sort]\n else:\n extra_ind = np.arange(unranked.shape[0])[:,None]\n part_sliced = part[slice_ind]\n extra_ind = np.empty(part_sliced.shape, dtype=np.int32)\n extra_ind[:,:] = np.arange(unranked.shape[0])[:,None]\n part_pred = complex_predictions[extra_ind, part[slice_ind]]\n front_sort = np.argsort(part_pred, axis=-1) #index array from lowest prediction score to highest\n part_sliced[:, :] = part_sliced[extra_ind, front_sort]\n\n return part", "title": "" }, { "docid": "47002efc649dfd56166e24f10d2e66e4", "score": "0.50420797", "text": "def get_mean_ranks(self, level_dict):\n \n X_cond = self.get_levels(level_dict) #Get the condition of the levels \n \n cond_samples = self.__multiply_X_beta(X_cond) \n \n return rk.rank(cond_samples.mean(0), axis = -1)", "title": "" }, { "docid": "d5c5779fc0612a7583e33b6811b0662b", "score": "0.5034726", "text": "def rank_prune_rows(fraction_to_prune, param, param_name, zeros_mask_dict, model=None):\n\n assert param.dim() == 2, \"This thresholding is only supported for 2D weights\"\n ROWS_DIM = 0\n THRESHOLD_DIM = 'Cols'\n rows_mags = param.abs().mean(dim=ROWS_DIM)\n num_rows_to_prune = int(fraction_to_prune * rows_mags.size(0))\n if num_rows_to_prune == 0:\n msglogger.info(\"Too few filters - can't prune %.1f%% rows\", 100*fraction_to_prune)\n return\n bottomk_rows, _ = torch.topk(rows_mags, num_rows_to_prune, largest=False, sorted=True)\n threshold = bottomk_rows[-1]\n zeros_mask_dict[param_name].mask = distiller.group_threshold_mask(param, THRESHOLD_DIM, threshold, 'Mean_Abs')\n msglogger.info(\"L1RankedStructureParameterPruner - param: %s pruned=%.3f goal=%.3f (%d/%d)\", param_name,\n distiller.sparsity(zeros_mask_dict[param_name].mask),\n fraction_to_prune, num_rows_to_prune, rows_mags.size(0))", "title": "" }, { "docid": "02afeade138fbd9e30422d67a4308352", "score": "0.5019727", "text": "def rank_comparison_mode(data: pd.DataFrame, out_dir: Path, rank: bool = True):\n prop_table = build_results_table(data, \"failauc\", original_mode=False)\n if rank:\n prop_table = _add_rank_columns(prop_table, False)\n\n prop_table.columns = pd.MultiIndex.from_tuples(\n map(lambda t: t + (\"P\",), prop_table.columns)\n )\n\n orig_table = build_results_table(data, \"failauc\", original_mode=True)\n cmap = \"Oranges_r\"\n if rank:\n orig_table = _add_rank_columns(orig_table, False)\n cmap = \"Oranges\"\n orig_table.columns = pd.MultiIndex.from_tuples(\n map(lambda t: t + (\"O\",), orig_table.columns)\n )\n\n results_table = pd.concat((prop_table, orig_table), axis=1)\n results_table = results_table[\n list(filter(lambda t: \"ncs\" in t[1], results_table.columns))\n ]\n results_table = _reorder_studies(results_table, add_level=[\"P\", \"O\"])\n\n if rank:\n _formatter = lambda x: f\"{int(x):>3d}\"\n else:\n _formatter = (\n lambda x: f\"{x:>3.2f}\"[:4] if \".\" in f\"{x:>3.2f}\"[:3] else f\"{x:>3.2f}\"[:3]\n )\n\n results_table = results_table.rename(\n columns=_dataset_to_display_name,\n level=0,\n )\n\n # Render table\n results_table = results_table.astype(float).applymap(\n lambda val: round(val, 2) if val < 10 else round(val, 1)\n )\n\n gmap_vit = _compute_gmap(\n results_table.loc[\n results_table.index[\n results_table.index.get_level_values(1).str.contains(\"ViT\")\n ],\n results_table.columns,\n ],\n True,\n )\n gmap_cnn = _compute_gmap(\n results_table.loc[\n results_table.index[\n ~results_table.index.get_level_values(1).str.contains(\"ViT\")\n ],\n results_table.columns,\n ],\n True,\n )\n\n ltex = (\n results_table.style.background_gradient(\n cmap,\n axis=None,\n subset=(\n results_table.index[\n results_table.index.get_level_values(1).str.contains(\"ViT\")\n ],\n results_table.columns,\n ),\n gmap=gmap_vit,\n )\n .background_gradient(\n cmap,\n axis=None,\n subset=(\n results_table.index[\n ~results_table.index.get_level_values(1).str.contains(\"ViT\")\n ],\n results_table.columns,\n ),\n gmap=gmap_cnn,\n )\n .highlight_null(props=\"background-color: white;color: black\")\n .format(\n _formatter,\n na_rep=\"*\",\n )\n )\n\n ltex.data.columns = ltex.data.columns.set_names(\n [\"\\\\multicolumn{1}{c}{}\", \"study\", \"ncs-data set\", \"ood protocol\"]\n )\n print(len(results_table.columns))\n ltex = ltex.to_latex(\n convert_css=True,\n hrules=True,\n multicol_align=\"c?\",\n column_format=(\n \"ll?\"\n + 1 * \"*{2}{r}h\"\n + 3 * \"*{2}{r}h\"\n + 3 * \"*{2}{r}h\"\n + 3 * \"*{2}{r}h\"\n + \"*{2}{r}\"\n ),\n )\n\n # Remove toprule\n ltex = list(filter(lambda line: line != r\"\\toprule\", ltex.splitlines()))\n\n # No separators in first header row\n\n # Remove last separator in second header row\n # (this is just `replace(\"?\", \"\", 1)`, but from the right)\n ltex[1] = ltex[1][: ltex[1].rfind(\"?\")] + ltex[1][ltex[1].rfind(\"?\") + 1 :]\n ltex[2] = ltex[2][: ltex[2].rfind(\"?\")] + ltex[2][ltex[2].rfind(\"?\") + 1 :]\n ltex[3] = ltex[3][: ltex[3].rfind(\"?\")] + ltex[3][ltex[3].rfind(\"?\") + 1 :]\n\n # Insert empty row before ViT part\n i = ltex.index(next((x for x in ltex if \"ViT\" in x)))\n ltex.insert(i, \"\\\\midrule \\\\\\\\\")\n\n ltex = \"\\n\".join(ltex)\n\n with open(out_dir / f\"{'rank_' if rank else ''}mode_comparison.tex\", \"w\") as f:\n f.write(ltex)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n tmpdir = Path(tmpdir)\n shutil.copy2(\n out_dir / f\"{'rank_' if rank else ''}mode_comparison.tex\",\n tmpdir / f\"{'rank_' if rank else ''}mode_comparison.tex\",\n )\n with open(tmpdir / \"render.tex\", \"w\") as f:\n f.write(\n LATEX_TABLE_TEMPLATE.replace(\n \"{input_file}\", f\"{'rank_' if rank else ''}mode_comparison.tex\"\n ).replace(\"{metric}\", \"\")\n )\n\n subprocess.run(f\"lualatex render.tex\", shell=True, check=True, cwd=tmpdir)\n shutil.copy2(\n tmpdir / \"render.pdf\",\n out_dir / f\"{'rank_' if rank else ''}mode_comparison.pdf\",\n )", "title": "" }, { "docid": "6098f101e8854546e853d624579c62ef", "score": "0.5018434", "text": "def top_k_accuracy(scores, labels, topk=(1,)):\n res = []\n labels = np.array(labels)[:, np.newaxis]\n for k in topk:\n max_k_preds = np.argsort(scores, axis=1)[:, -k:][:, ::-1]\n match_array = np.logical_or.reduce(max_k_preds == labels, axis=1)\n topk_acc_score = match_array.sum() / match_array.shape[0]\n res.append(topk_acc_score)\n return res", "title": "" }, { "docid": "7caef37aa463b4a80d7c162cb2ccaf13", "score": "0.5010524", "text": "def rankkern(x):\n n = x.size\n mid = n // 2\n better = ((x >= 0) & (x < x[mid])).sum()\n return better / ((x >= 0).sum() - 1.0)", "title": "" }, { "docid": "8f0d1b666d58ee7407912bcae7008289", "score": "0.50079876", "text": "def threshold(mat, rule, **kwargs):\n try:\n if rule == 'degree':\n return threshold_on_degree(mat, **kwargs)\n elif rule == 'range':\n return threshold_in_range(mat, **kwargs)\n elif rule == 'quantile':\n return threshold_on_quantile(mat, **kwargs)\n elif rule == 'custom':\n return kwargs['custom_thresholder'](mat)\n except KeyError:\n raise ValueError(\"missing threshold parameter\")", "title": "" }, { "docid": "c5806f1242c65904c2d9639b9f7946ec", "score": "0.49923837", "text": "def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):\n scores = box_scores[:, -1]\n boxes = box_scores[:, :-1]\n picked = []\n indexes = np.argsort(scores)\n indexes = indexes[-candidate_size:]\n while len(indexes) > 0:\n current = indexes[-1]\n picked.append(current)\n if 0 < top_k == len(picked) or len(indexes) == 1:\n break\n current_box = boxes[current, :]\n indexes = indexes[:-1]\n rest_boxes = boxes[indexes, :]\n iou = iou_of(\n rest_boxes,\n np.expand_dims(current_box, axis=0),\n )\n indexes = indexes[iou <= iou_threshold]\n\n return box_scores[picked, :]", "title": "" }, { "docid": "6d8269e6ec8c87c21d03d01b1d9798f6", "score": "0.49848667", "text": "def compute_r_measure(pred: np.ndarray, true, max_rank=None) -> float:\n if max_rank is None:\n max_rank = len(true)\n elif max_rank > len(pred):\n max_rank = len(pred)\n return compute_precision(pred[:max_rank], true)", "title": "" }, { "docid": "10bbc150a7ac183b5e109542ae959108", "score": "0.49683604", "text": "def mention_rank(markables,i,feats,weights):\n maxScore = float('-inf')\n maxPos = 0\n for pos in range(i + 1):\n features = feats(markables, pos, i)\n score = sum([features[key] * weights[key] for key in features])\n if score > maxScore:\n maxScore = score\n maxPos = pos\n return maxPos", "title": "" }, { "docid": "d3748d23a6a5db0db72da904ff10e8a6", "score": "0.49607274", "text": "def rank_sentences(doc, doc_matrix, feature_names, top_n=3):\n sents = nltk.sent_tokenize(doc)\n sentences = [nltk.word_tokenize(sent) for sent in sents]\n \"\"\"\n tfidf_sent = [[doc_matrix[feature_names.index(w.lower())]\n for w in sent if w.lower() in feature_names]\n for sent in sentences]\n\n # Calculate Sentence Values\n doc_val = sum(doc_matrix)\n sent_values = [sum(sent) / doc_val for sent in tfidf_sent]\n # Apply Position Weights\n ranked_sents = [pair for pair in zip(range(len(sent_values)), sent_values)]\n print(ranked_sents)\n ranked_sents = sorted(ranked_sents, key=lambda x: x[1] * -1)\n selected_sents = ranked_sents[:top_n]\n sentence_indexes = [i[0] for i in selected_sents]\n \"\"\"\n sentence_indexes = [0, len(sentences) - 1]\n set_sentences = [set(i) for i in sentences]\n index_sentence_set = set()\n for index in sentence_indexes:\n index_sentence_set.update(set_sentences[index])\n for i in range(len(set_sentences)):\n if i in sentence_indexes:\n continue\n sentence = set_sentences[i]\n combined_set = sentence.intersection(index_sentence_set)\n if len(combined_set) < (len(sentence) * 1 // 4): # at least 1/4 of the words are novel\n sentence_indexes.append(i)\n index_sentence_set.update(sentence)\n return sorted(sentence_indexes)", "title": "" }, { "docid": "be299e014d21f34d69c71390d6ef60c1", "score": "0.49555093", "text": "def sort_by_rank(hand):\n hand.sort(key = lambda x: x.rank_val)\n return None", "title": "" }, { "docid": "c91cca1321f4ffc247ca6c760bac7920", "score": "0.4949495", "text": "def ranking_precision_score(y_true, y_score, k=10):\n unique_y = np.unique(y_true)\n\n if len(unique_y) > 2:\n raise ValueError(\"Only supported for two relevance levels.\")\n\n pos_label = unique_y[1]\n n_pos = np.sum(y_true == pos_label)\n\n order = np.argsort(y_score)[::-1]\n y_true = np.take(y_true, order[:k])\n n_relevant = np.sum(y_true == pos_label)\n\n # Divide by min(n_pos, k) such that the best achievable score is always 1.0.\n return float(n_relevant) / min(n_pos, k)", "title": "" }, { "docid": "b7936bc320706fb43911f27e959817ac", "score": "0.4945151", "text": "def threshold_metrics(\n frame : pd.DataFrame,\n guess : np.ndarray,\n thresh : float,\n order : List[str],\n ) -> np.ndarray:\n # Remove the padding column, if necessary.\n out = ext.extract_output(frame, order)\n target = utils.remove_padding(frame, out, order)\n guess = utils.remove_padding(frame, guess, order)\n n_hits = number_of_hits(frame)\n matrix = threshold(guess, thresh)\n stack = np.dstack((target, matrix)).transpose((0, 2, 1))\n rights = np.sum([pair[1, np.argmax(pair[0])] == 1 for pair in stack])\n wrongs = np.sum((stack[:, 0] - stack[:, 1] < 0).any(axis=1))\n multi = np.sum(np.sum(matrix, axis=1) > 1) # Hits assigned to multiple.\n no_tks = np.sum(np.sum(matrix, axis=1) < 1) # Hits unassigned to any.\n return np.array([rights, wrongs, multi, no_tks]) / n_hits", "title": "" }, { "docid": "46b96737b69310d125dfb9eeb6a7065c", "score": "0.49445158", "text": "def run_ranker():", "title": "" }, { "docid": "0b1f6cbb62f869c1e5bd4f1be9c9b3d2", "score": "0.49379915", "text": "def clean_and_sort(ranks, data_fold_split, topk):\n argsorted = []\n new_doclist_range = [0]\n\n for qid in range(data_fold_split.doclist_ranges.shape[0] - 1):\n irank = np.argsort(\n ranks[data_fold_split.doclist_ranges[qid]:data_fold_split.doclist_ranges[qid+1]])\n shown_len = min(irank.shape[0], topk)\n argsorted.append(\n data_fold_split.doclist_ranges[qid] + irank[:shown_len])\n new_doclist_range.append(shown_len)\n\n _argsorted = np.concatenate(argsorted, axis=0)\n _doclist_range = np.cumsum(np.array(new_doclist_range), axis=0)\n\n return _argsorted, _doclist_range", "title": "" }, { "docid": "7f38e610cfd60171dce380b9b67e5048", "score": "0.4918152", "text": "def select_top_k_scores(scores_in, pre_nms_num_detections=5000):\n scores_trans = tf.transpose(scores_in, perm=[0, 2, 1])\n\n top_k_scores, top_k_indices = tf.nn.top_k(\n scores_trans, k=pre_nms_num_detections, sorted=False)\n\n return tf.transpose(top_k_scores, [0, 2, 1]), tf.transpose(\n top_k_indices, [0, 2, 1])", "title": "" }, { "docid": "ed2cf427a380f866eda92926ab03a1a2", "score": "0.48990035", "text": "def tiecorrect(rankvals):\n arr = np.sort(rankvals)\n idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]\n cnt = np.diff(idx).astype(np.float64)\n\n size = np.float64(arr.size)\n return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)", "title": "" }, { "docid": "e562fc9bc30f5e4762b387f90f629552", "score": "0.4889505", "text": "def _select_top_k_scores(scores_in, pre_nms_num_detections):\n batch_size, num_anchors, num_class = scores_in.get_shape().as_list()\n scores_trans = tf.transpose(scores_in, perm=[0, 2, 1])\n scores_trans = tf.reshape(scores_trans, [-1, num_anchors])\n\n top_k_scores, top_k_indices = tf.nn.top_k(\n scores_trans, k=pre_nms_num_detections, sorted=True)\n\n top_k_scores = tf.reshape(top_k_scores,\n [batch_size, num_class, pre_nms_num_detections])\n top_k_indices = tf.reshape(top_k_indices,\n [batch_size, num_class, pre_nms_num_detections])\n\n return tf.transpose(top_k_scores,\n [0, 2, 1]), tf.transpose(top_k_indices, [0, 2, 1])", "title": "" }, { "docid": "c71ed553f3c7585530423f4e11939170", "score": "0.48893535", "text": "def get_output(interpreter, top_k, score_threshold):\n scores = common.output_tensor(interpreter, 0)\n categories = [\n Category(i, scores[i])\n for i in np.argpartition(scores, -top_k)[-top_k:]\n if scores[i] >= score_threshold\n ]\n return sorted(categories, key=operator.itemgetter(1), reverse=True)", "title": "" }, { "docid": "308db1893f0988a889cc818dd7772d8e", "score": "0.48871407", "text": "def rank_centers(centers_filename = None, centers_df = None, rank_column = 'chi2joint', min_nsubject = 2, min_nr = 1):\n\timport pandas as pd\n\timport ast\n\tfrom tcrdist.summarize import filter_gt, filter_is, test_for_subsets, test_for_almost_subsets\n\t\n\tif centers_filename is not None and centers_df is not None:\n\t\traise ValueError(\"rank centers can use <centers_filename> or <centers_df> but not both\")\n\tif centers_df is None:\n\t\tdf = pd.read_csv(centers_filename)\n\telse: \n\t\tdf = centers_df.copy()\n\n\t# VERY IMPORTANT NOTE, pandas reads lists as strings '[1,2]'; so we use ast.literal_eval to convert back t a list \n\tif not isinstance(df['target_neighbors'][0], list):\n\t\tdf['target_neighbors'] = df['target_neighbors'].apply(lambda s: list(ast.literal_eval(s)))\n\tdf = df.sort_values(rank_column, ascending = False)\n\tdf['novel'] = test_for_almost_subsets(df['target_neighbors'], min_nr)\n\tdf = filter_gt(df, 'nsubject', min_nsubject).copy()\n\tdf = filter_is(df, 'novel', 1).copy()\n\treturn df", "title": "" }, { "docid": "02675889489825585754a58e12e66d9f", "score": "0.48733878", "text": "def create_rank(scores):\n scores = abs(scores)\n n, d = scores.shape\n ranks = []\n for i, score in enumerate(scores):\n # Random permutation to avoid bias due to equal weights.\n idx = np.random.permutation(d)\n permutated_weights = score[idx]\n permutated_rank=(-permutated_weights).argsort().argsort()+1\n rank = permutated_rank[np.argsort(idx)]\n\n ranks.append(rank)\n\n return np.array(ranks)", "title": "" }, { "docid": "a6021205af826d0240c6d3b08d7f6b5a", "score": "0.4870397", "text": "def _preprocess_logits(\n self,\n scores,\n top_k=None,\n top_p=None,\n min_tokens_to_keep=1,\n filter_value=-float(\"Inf\"),\n ):\n if top_k is not None and top_k != 0:\n top_k = min(max(top_k, min_tokens_to_keep), scores.size(-1)) # Safety check\n # Remove all tokens with a probability less than the last token of the top-k\n indices_to_remove = scores < torch.topk(scores, top_k)[0][..., -1, None]\n scores = scores.masked_fill(indices_to_remove, filter_value)\n if top_p is not None and top_p < 1.0:\n sorted_logits, sorted_indices = torch.sort(scores, descending=True)\n cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)\n\n # Remove tokens with cumulative top_p above the threshold (token with 0 are kept)\n sorted_indices_to_remove = cumulative_probs > top_p\n if min_tokens_to_keep > 1:\n # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)\n sorted_indices_to_remove[..., : min_tokens_to_keep - 1] = 0\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[\n ..., :-1\n ].clone()\n sorted_indices_to_remove[..., 0] = 0\n\n # scatter sorted tensors to original indexing\n indices_to_remove = sorted_indices_to_remove.scatter(\n 1, sorted_indices, sorted_indices_to_remove\n )\n scores = scores.masked_fill(indices_to_remove, filter_value)\n\n return scores", "title": "" }, { "docid": "c2fc6b0dc649d88cacff2d0ecaba2916", "score": "0.48663607", "text": "def textrank(document): \n if isinstance(document, basestring): # if the document is not tokenized\n sentences = sent_tokenize_func(document)\n else: # if the sentences are already tokenized\n sentences = document\n\n # vectorizer\n vectorizer = CountVectorizer()\n\n # vectorize individual sentences counting up the word occurences\n matrix = vectorizer.fit_transform(sentences)\n\n # normalize counts\n normalized = normalize_func(matrix)\n\n # creates a similarity graph matrix\n similarity_graph = similarity_func(normalized)\n\n # converts matrix to a networkx' graph struct\n nx_graph = nx.to_networkx_graph(similarity_graph)\n\n # use PageRank to score vertices in the graph\n scores = nx.pagerank(nx_graph)\n\n # return vertices ordered by score\n return sorted((TextRank(score=scores[i], sentence=sentence, document=i)\n for i, sentence in enumerate(sentences)),\n reverse=True)", "title": "" }, { "docid": "d45efa0adc4e3abc91333e6c8a6eaded", "score": "0.4864663", "text": "def _re_rank(self, ranking, adjust):\n protected = []\n non_protected = []\n for item in ranking:\n if item.is_protected:\n protected.append(item)\n else:\n non_protected.append(item)\n\n return re_ranker.fair_top_k(self.k, protected, non_protected,\n self.create_adjusted_mtable() if adjust else self.create_unadjusted_mtable())", "title": "" }, { "docid": "10d274eae60eef594cb282d356e0db9c", "score": "0.4860588", "text": "def optimize_in_single_dimension(pvalues, a_max, image_to_node, score_function):\n\n alpha_thresholds = ScanningOps.create_alpha_thresholds(pvalues, a_max, thresholds_mode = 'set_number')\n # print(alpha_thresholds)\n \n# alpha_thresholds = np.unique(pvalues[:, :, 1])\n\n# #alpha_thresholds = alpha_thresholds[0::5] #take every 5th for speed purposes\n# # where does a_max fall in check\n# last_alpha_index = np.searchsorted(alpha_thresholds, a_max)\n# # resize check for only ones smaller than a_max\n# alpha_thresholds = alpha_thresholds[0:last_alpha_index]\n\n# step_for_50 = len(alpha_thresholds) / 50\n# alpha_thresholds = alpha_thresholds[0::int(step_for_50)+1]\n# # add on the max value to check as well as it may not have been part of unique\n# alpha_thresholds = np.append(alpha_thresholds, a_max)\n\n# #alpha_thresholds = np.arange(a_max/50, a_max, a_max/50)\n\n# unsort_priority = np.zeros(pvalues.shape[1])\n\n unsort_priority = None\n\n if image_to_node:\n number_of_elements = pvalues.shape[1] # searching over j columns\n size_of_given = pvalues.shape[0] # for fixed this many images\n unsort_priority = np.zeros(\n (pvalues.shape[1], alpha_thresholds.shape[0])) # number of columns\n else:\n number_of_elements = pvalues.shape[0] # searching over i rows\n size_of_given = pvalues.shape[1] # for this many fixed nodes\n unsort_priority = np.zeros(\n (pvalues.shape[0], alpha_thresholds.shape[0])) # number of rows\n\n for elem_indx in range(0, number_of_elements):\n #sort all the range maxes\n if image_to_node:\n # collect ranges over images(rows)\n arg_sort_max = np.argsort(pvalues[:, elem_indx, 1])\n #arg_sort_min = np.argsort(pvalues[:,e,0]) #collect ranges over images(rows)\n completely_included = np.searchsorted(\n pvalues[:, elem_indx, 1][arg_sort_max], alpha_thresholds, side='right')\n else:\n # collect ranges over nodes(columns)\n arg_sort_max = np.argsort(pvalues[elem_indx, :, 1])\n #arg_sort_min = np.argsort(pvalues[elem_indx,:,0])\n\n completely_included = np.searchsorted(\n pvalues[elem_indx, :, 1][arg_sort_max], alpha_thresholds, side='right')\n\n #print('complete included shape', completely_included.shape)\n # should be num elements by num thresh\n unsort_priority[elem_indx, :] = completely_included\n\n # print(\"unsort priority\", unsort_priority)\n # want to sort for a fixed thresh (across?)\n arg_sort_priority = np.argsort(-unsort_priority, axis=0)\n # print(\"arg_sort_priority\", arg_sort_priority)\n\n best_score_so_far = -10000\n best_alpha = -2\n\n alpha_count = 0\n for alpha_threshold in alpha_thresholds:\n\n # score each threshold by itself, cumulating priority,\n # cumulating count, alpha stays same.\n alpha_v = np.ones(number_of_elements)*alpha_threshold\n\n # may need to reverse this?\n n_alpha_v = np.cumsum(\n unsort_priority[:, alpha_count][arg_sort_priority][:, alpha_count])\n count_increments_this = np.ones(number_of_elements)*size_of_given\n n_v = np.cumsum(count_increments_this)\n\n vector_of_scores = score_function(n_alpha_v, n_v, alpha_v)\n\n best_score_for_this_alpha_idx = np.argmax(vector_of_scores)\n best_score_for_this_alpha = vector_of_scores[best_score_for_this_alpha_idx]\n\n if best_score_for_this_alpha > best_score_so_far:\n best_score_so_far = best_score_for_this_alpha\n best_size = best_score_for_this_alpha_idx + 1 # not sure 1 is needed?\n best_alpha = alpha_threshold\n best_alpha_count = alpha_count\n alpha_count = alpha_count + 1\n\n # after the alpha for loop we now have best score, best alpha, size of best subset,\n # and alpha counter use these with the priority argsort to reconstruct the best subset\n\n unsort = arg_sort_priority[:, best_alpha_count]\n\n subset = np.zeros(best_size).astype(int)\n for loc in range(0, best_size):\n subset[loc] = unsort[loc]\n \n return(best_score_so_far, subset, best_alpha)", "title": "" }, { "docid": "3113f643f943f1f8c8002433f8d4920a", "score": "0.48513332", "text": "def _streaming_confusion_matrix_at_thresholds(\n predictions, labels, thresholds, weights=None, includes=None):\n all_includes = ('tp', 'fn', 'tn', 'fp')\n if includes is None:\n includes = all_includes\n else:\n for include in includes:\n if include not in all_includes:\n raise ValueError('Invaild key: %s.' % include)\n\n predictions, labels, weights = _remove_squeezable_dimensions(\n predictions, labels, weights)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n num_thresholds = len(thresholds)\n\n # Reshape predictions and labels.\n predictions_2d = array_ops.reshape(predictions, [-1, 1])\n labels_2d = array_ops.reshape(\n math_ops.cast(labels, dtype=dtypes.bool), [1, -1])\n\n # Use static shape if known.\n num_predictions = predictions_2d.get_shape().as_list()[0]\n\n # Otherwise use dynamic shape.\n if num_predictions is None:\n num_predictions = array_ops.shape(predictions_2d)[0]\n thresh_tiled = array_ops.tile(\n array_ops.expand_dims(array_ops.constant(thresholds), [1]),\n array_ops.stack([1, num_predictions]))\n\n # Tile the predictions after thresholding them across different thresholds.\n pred_is_pos = math_ops.greater(\n array_ops.tile(array_ops.transpose(predictions_2d), [num_thresholds, 1]),\n thresh_tiled)\n if ('fn' in includes) or ('tn' in includes):\n pred_is_neg = math_ops.logical_not(pred_is_pos)\n\n # Tile labels by number of thresholds\n label_is_pos = array_ops.tile(labels_2d, [num_thresholds, 1])\n if ('fp' in includes) or ('tn' in includes):\n label_is_neg = math_ops.logical_not(label_is_pos)\n\n if weights is not None:\n broadcast_weights = _broadcast_weights(\n math_ops.to_float(weights), predictions)\n weights_tiled = array_ops.tile(array_ops.reshape(\n broadcast_weights, [1, -1]), [num_thresholds, 1])\n thresh_tiled.get_shape().assert_is_compatible_with(\n weights_tiled.get_shape())\n else:\n weights_tiled = None\n\n values = {}\n update_ops = {}\n\n if 'tp' in includes:\n true_positives = _create_local('true_positives', shape=[num_thresholds])\n is_true_positive = math_ops.to_float(\n math_ops.logical_and(label_is_pos, pred_is_pos))\n if weights_tiled is not None:\n is_true_positive *= weights_tiled\n update_ops['tp'] = state_ops.assign_add(\n true_positives, math_ops.reduce_sum(is_true_positive, 1))\n values['tp'] = true_positives\n\n if 'fn' in includes:\n false_negatives = _create_local('false_negatives', shape=[num_thresholds])\n is_false_negative = math_ops.to_float(\n math_ops.logical_and(label_is_pos, pred_is_neg))\n if weights_tiled is not None:\n is_false_negative *= weights_tiled\n update_ops['fn'] = state_ops.assign_add(\n false_negatives, math_ops.reduce_sum(is_false_negative, 1))\n values['fn'] = false_negatives\n\n if 'tn' in includes:\n true_negatives = _create_local('true_negatives', shape=[num_thresholds])\n is_true_negative = math_ops.to_float(\n math_ops.logical_and(label_is_neg, pred_is_neg))\n if weights_tiled is not None:\n is_true_negative *= weights_tiled\n update_ops['tn'] = state_ops.assign_add(\n true_negatives, math_ops.reduce_sum(is_true_negative, 1))\n values['tn'] = true_negatives\n\n if 'fp' in includes:\n false_positives = _create_local('false_positives', shape=[num_thresholds])\n is_false_positive = math_ops.to_float(\n math_ops.logical_and(label_is_neg, pred_is_pos))\n if weights_tiled is not None:\n is_false_positive *= weights_tiled\n update_ops['fp'] = state_ops.assign_add(\n false_positives, math_ops.reduce_sum(is_false_positive, 1))\n values['fp'] = false_positives\n\n return values, update_ops", "title": "" }, { "docid": "d8e9a84cfa044438148d4a1b75ad2ff6", "score": "0.48247913", "text": "def assert_rank(tensor, expected_rank, name=None):\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n raise ValueError(\n \"For the tensor `%s`, the actual tensor rank `%d` (shape = %s) is not \"\n \"equal to the expected tensor rank `%s`\" %\n (name, actual_rank, str(tensor.shape), str(expected_rank)))", "title": "" }, { "docid": "476ce4c4dfc6a329c9c084c95774d2b0", "score": "0.48206306", "text": "def score_model(\n y_true_binned,\n prediction_score,\n prediction_threshold=0.5,\n scoring_functions=SCORING_FUNCTIONS,\n):\n results = {\"prediction_threshold\": prediction_threshold}\n prediction = 1 * (np.array(prediction_score) >= prediction_threshold)\n\n for metric, function in scoring_functions.items():\n if function[TYPE] == SCORE:\n args = (y_true_binned, prediction_score)\n elif function[TYPE] == BIN:\n args = (y_true_binned, prediction)\n elif function[TYPE] == BOTH:\n args = (y_true_binned, prediction, prediction_score)\n else:\n raise ValueError(f\"TYPE = {function[TYPE]} not supported\")\n try:\n results[metric] = function[FUNCTION](*args, **function.get(KWARGS, {}))\n except Exception as e:\n print(e)\n results[metric] = 1\n if can_compute_enrichment_factor(results):\n results[ENRICHMENT] = results[\"precision\"] / results[\"proportion_active\"]\n return results", "title": "" }, { "docid": "1b253663ee7f51223c24e77cf41b27e8", "score": "0.48086765", "text": "def match(threshold, truths, priors):\n # jaccard index\n overlaps = jaccard(\n truths,\n priors\n )\n # (Bipartite Matching)\n # [1,num_objects] best prior for each ground truth\n best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)\n # [1,num_priors] best ground truth for each prior\n best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)\n best_truth_idx.squeeze_(0)\n best_truth_overlap.squeeze_(0)\n best_prior_idx.squeeze_(1)\n best_prior_overlap.squeeze_(1)\n best_truth_overlap.index_fill_(0, best_prior_idx, 2) # ensure best prior\n\n # TODO refactor: index best_prior_idx with long tensor\n # ensure every gt matches with its prior of max overlap\n for j in range(best_prior_idx.size(0)):\n best_truth_idx[best_prior_idx[j]] = j\n matches = truths[best_truth_idx] # Shape: [num_priors,4]\n # conf = labels[best_truth_idx] + 1 # Shape: [num_priors]\n # conf[best_truth_overlap < threshold] = 0 # label as background\n # loc = encode(matches, priors, variances)\n # loc_t[idx] = loc # [num_priors,4] encoded offsets to learn\n # conf_t[idx] = conf # [num_priors] top class label for each prior\n print(f\"overlaps: {overlaps}\")\n return matches", "title": "" }, { "docid": "b50b95e7cfd475e19a3b86b5311976e0", "score": "0.4800826", "text": "def transform_subtile_rank(\n transform_func: Callable[[np.ndarray], np.ndarray],\n rank: int,\n layout: Tuple[int, int],\n):\n total_ranks = layout[0] * layout[1]\n rank_array = np.arange(total_ranks).reshape(layout)\n transformed_rank_array = transform_func(rank_array)\n return rank_array[np.where(transformed_rank_array == rank)][0]", "title": "" }, { "docid": "71eb472cb3c6c8dc15cfeabee2246397", "score": "0.47967908", "text": "def compute_ranking(self, scores):\n # sort k=range(...) in decreasing order of the netflows[k]\n ranking = sorted(range(len(scores)), key=lambda k: scores[k],\n reverse=True)\n return ranking", "title": "" }, { "docid": "bf93ab181e66320c0b5e8aaca84dc385", "score": "0.4793026", "text": "def calculate_ranking(experiment):\n optimal_stats_wide = calculate_optimal_stats_wide(experiment, append_std=False)\n ranking = optimal_stats_wide.apply(lambda row: _return_row_ranking(row[3:], SCORERS[row[2]]._sign), axis=1)\n return pd.concat([optimal_stats_wide.iloc[:, :3], ranking], axis=1)", "title": "" }, { "docid": "20ea58217880b00f580a767d2b68191a", "score": "0.4789104", "text": "def ranking_eval(\n model,\n metrics,\n train_set,\n test_set,\n val_set=None,\n rating_threshold=1.0,\n exclude_unknowns=True,\n verbose=False,\n props=None,\n):\n\n if len(metrics) == 0:\n return [], []\n\n avg_results = []\n user_results = [{} for _ in enumerate(metrics)]\n\n gt_mat = test_set.csr_matrix\n train_mat = train_set.csr_matrix\n val_mat = None if val_set is None else val_set.csr_matrix\n\n def pos_items(csr_row):\n return [\n item_idx\n for (item_idx, rating) in zip(csr_row.indices, csr_row.data)\n if rating >= rating_threshold\n ]\n\n for user_idx in tqdm.tqdm(test_set.user_indices, disable=not verbose, miniters=100):\n test_pos_items = pos_items(gt_mat.getrow(user_idx))\n if len(test_pos_items) == 0:\n continue\n\n u_gt_pos = np.zeros(test_set.num_items, dtype='float')\n u_gt_pos[test_pos_items] = 1\n\n val_pos_items = [] if val_mat is None else pos_items(val_mat.getrow(user_idx))\n train_pos_items = (\n []\n if train_set.is_unk_user(user_idx)\n else pos_items(train_mat.getrow(user_idx))\n )\n\n u_gt_neg = np.ones(test_set.num_items, dtype='int')\n u_gt_neg[test_pos_items + val_pos_items + train_pos_items] = 0\n\n item_indices = None if exclude_unknowns else np.arange(test_set.num_items)\n item_rank, item_scores = model.rank(user_idx, item_indices)\n\n total_pi = 0.0\n if props is not None:\n for idx, e in enumerate(u_gt_pos):\n if e > 0 and props[str(idx)] > 0:\n u_gt_pos[idx] /= props[str(idx)]\n total_pi += 1 / props[str(idx)]\n\n for i, mt in enumerate(metrics):\n mt_score = mt.compute(\n gt_pos=u_gt_pos,\n gt_neg=u_gt_neg,\n pd_rank=item_rank,\n pd_scores=item_scores,\n )\n\n user_results[i][user_idx] = mt_score\n\n # avg results of ranking metrics\n for i, mt in enumerate(metrics):\n avg_results.append(sum(user_results[i].values()) / len(user_results[i]))\n\n return avg_results, user_results", "title": "" }, { "docid": "9fb2833206736e027598531cbcc8271b", "score": "0.47856644", "text": "def rank(self, X, Y):\n def my_tokenizer(text):\n initial_tokens = list(tokenize(text, lowercase=True, deacc=True))\n filtered_tokens = [t for t in initial_tokens if len(t)>2]\n return filtered_tokens\n\n tokenized_corpus = [my_tokenizer(y) for y in Y]\n tokenized_query = my_tokenizer(X[0][0])\n# dictionary = Dictionary(documents=tokenized_corpus)\n# corpus = [dictionary.doc2bow(y) for y in tokenized_corpus]\n# query = dictionary.doc2bow(tokenized_query)\n\n bm = BM25(tokenized_corpus)\n average_idf = sum(map(lambda k: float(bm.idf[k]), bm.idf.keys())) / len(bm.idf.keys())\n scores = bm.get_scores(tokenized_query, average_idf)\n return scores", "title": "" }, { "docid": "2019407953eb771ed1ef78589e0ff08c", "score": "0.47831088", "text": "def mean_percentile_rank(dataset_users, model):\r\n users_k=np.zeros(len(dataset_users))\r\n for user_id in dataset_users:\r\n predictions = model.get_user_predictions(user_id)\r\n # last index is the movie name with the highest rating\r\n indexes = np.argsort(predictions)\r\n percentiles = np.zeros(len(indexes))\r\n # going over movies from last to first (highest rating to lowest)\r\n for position, movie_id in enumerate(indexes[::-1]):\r\n # the percentile is the place of that movie divided by the len of all movies\r\n percentile = 1 - (position / len(indexes))\r\n # the array percentiles indexes are the movies, the values are the percentile of the movie.\r\n percentiles[movie_id] = percentile\r\n\r\n ground_truth = create_ground_truth(dataset_users[user_id])\r\n\r\n # for each movie count in ground truth (size k) add that movie rank\r\n sum_percentiles = 0\r\n for index, item in enumerate(ground_truth):\r\n sum_percentiles += percentiles[item-1]\r\n\r\n # divide by number of movies in ground truth (k in our case)\r\n sum_percentiles = sum_percentiles / len(ground_truth)\r\n\r\n users_k[user_id-1] = sum_percentiles\r\n\r\n return np.mean(users_k)", "title": "" }, { "docid": "694ecb399297e6521ad7bd5d344acdb1", "score": "0.47766444", "text": "def interpretability_performance(scores, x_model, threshold=0.01):\n\n pr_score = []\n roc_score = []\n for j, score in enumerate(scores):\n\n # calculate information of ground truth\n gt_info = np.log2(4) + np.sum(x_model[j] * np.log2(x_model[j] + 1e-10), axis=1)\n\n # set label if information is greater than 0\n label = np.zeros(gt_info.shape)\n label[gt_info > threshold] = 1\n\n # (don't evaluate over low info content motif positions)\n index = np.where((gt_info > threshold) | (gt_info == np.min(gt_info)))[0]\n\n # precision recall metric\n precision, recall, thresholds = precision_recall_curve(\n label[index], score[index]\n )\n pr_score.append(auc(recall, precision))\n\n # roc curve\n fpr, tpr, thresholds = roc_curve(label[index], score[index])\n roc_score.append(auc(fpr, tpr))\n\n roc_score = np.array(roc_score)\n pr_score = np.array(pr_score)\n\n return roc_score, pr_score", "title": "" }, { "docid": "f14d453c9d665715534c0b0e0a86fc91", "score": "0.47750986", "text": "def make_rank_func(weights):\n def rank(matchinfo):\n \"\"\"\n `matchinfo` is defined as returning 32-bit unsigned integers in\n machine byte order (see http://www.sqlite.org/fts3.html#matchinfo)\n and `struct` defaults to machine byte order.\n \"\"\"\n bufsize = len(matchinfo) # Length in bytes.\n matchinfo = [struct.unpack(b'I', matchinfo[i:i + 4])[0]\n for i in range(0, bufsize, 4)]\n it = iter(matchinfo[2:])\n return sum(x[0] * w / x[1]\n for x, w in zip(zip(it, it, it), weights)\n if x[1])\n return rank", "title": "" }, { "docid": "0256654cfbd465d6595cdd4707004bbf", "score": "0.47725248", "text": "def apply_topk(topk: float, weight: torch.Tensor, return_scale_factors=False):\n # Retain only the topk weights, multiplying the rest by 0.\n frac_to_zero = 1 - topk\n with torch.no_grad():\n flat_weight = weight.flatten()\n # Want to convert it away from a special tensor, hence the float() call.\n _, idx = flat_weight.float().abs().sort()\n # @idx is a @special_tensors._SpecialTensor, but we need to convert it\n # to a normal tensor for indexing to work properly.\n idx = torch.tensor(idx, requires_grad=False)\n f = int(frac_to_zero * weight.numel())\n scale_factors = torch.ones_like(flat_weight, requires_grad=False)\n scale_factors[idx[:f]] = 0\n scale_factors = scale_factors.view_as(weight)\n\n ret = weight * scale_factors\n\n if return_scale_factors:\n return ret, scale_factors\n\n return ret", "title": "" }, { "docid": "aa2c7e8c54873065460c66289b8c3a4e", "score": "0.47604322", "text": "def L1_pruning(args, weight, prune_ratio):\n percent = prune_ratio * 100\n weight = weight.cpu().detach().numpy() # convert cpu tensor to numpy\n shape = weight.shape\n weight2d = weight.reshape(shape[0], -1)\n shape2d = weight2d.shape\n row_l1_norm = LA.norm(weight2d, 1, axis=1)\n percentile = np.percentile(row_l1_norm, percent)\n under_threshold = row_l1_norm < percentile\n above_threshold = row_l1_norm > percentile\n weight2d[under_threshold, :] = 0\n above_threshold = above_threshold.astype(np.float32)\n expand_above_threshold = np.zeros(shape2d, dtype=np.float32)\n for i in range(shape2d[0]):\n expand_above_threshold[i, :] = above_threshold[i]\n weight = weight.reshape(shape)\n expand_above_threshold = expand_above_threshold.reshape(shape)\n return torch.from_numpy(expand_above_threshold).cuda(), torch.from_numpy(weight).cuda()", "title": "" }, { "docid": "593dead35288d15f7bbaccee1adb986b", "score": "0.47601005", "text": "def threshold_predictions(predictions, thr=0.999):\n if thr is None:\n return predictions[:]\n\n thresholded_preds = predictions[:]\n low_values_indices = thresholded_preds <= thr\n thresholded_preds[low_values_indices] = 0\n low_values_indices = thresholded_preds > thr\n thresholded_preds[low_values_indices] = 1\n return thresholded_preds", "title": "" }, { "docid": "bf90b5b2d8c9997d0c0fcfaf848de3d2", "score": "0.47554255", "text": "def topk_accuracy(scores, labels, ks, selected_class=None):\n if selected_class is not None:\n idx = labels == selected_class\n scores = scores[idx]\n labels = labels[idx]\n rankings = scores.argsort()[:, ::-1]\n # trim to max k to avoid extra computation\n maxk = np.max(ks)\n\n # compute true positives in the top-maxk predictions\n tp = rankings[:, :maxk] == labels.reshape(-1, 1)\n\n # trim to selected ks and compute accuracies\n return [tp[:, :k].max(1).mean() for k in ks]", "title": "" }, { "docid": "9d262b361b4f9c7b802b756fa3efc52f", "score": "0.47515237", "text": "def textrank(\n string: str,\n model=None,\n vectorizer=None,\n top_k: int = 5,\n atleast: int = 1,\n stopwords=get_stopwords,\n **kwargs,\n):\n stopwords = validator.validate_stopwords(stopwords)\n\n if not hasattr(model, 'fit_transform') and not hasattr(model, 'vectorize'):\n raise ValueError(\n 'model must have `fit_transform` or `vectorize` method'\n )\n\n if top_k < 1:\n raise ValueError('top_k must bigger than 0')\n if atleast < 1:\n raise ValueError('atleast must bigger than 0')\n if not vectorizer:\n auto_ngram = True\n else:\n auto_ngram = False\n if not hasattr(vectorizer, 'fit'):\n raise ValueError('vectorizer must have `fit` method')\n if auto_ngram and not len(stopwords):\n raise ValueError('insert stopwords if auto_ngram')\n\n if auto_ngram:\n vocab = _auto_ngram(string, stopwords)\n else:\n vocab = _base(string, vectorizer=vectorizer, **kwargs)\n\n if hasattr(model, 'fit_transform'):\n vectors = model.fit_transform(list(vocab.keys()))\n if hasattr(model, 'vectorize'):\n vectors = model.vectorize(list(vocab.keys()))\n similar = cosine_similarity(vectors, vectors)\n similar[similar >= 0.99999] = 0\n scores = pagerank(similar, **kwargs)\n total = sum(scores)\n ranked_sentences = sorted(\n [\n (scores[i] / total, s)\n for i, s in enumerate(vocab.keys())\n if vocab[s] >= atleast\n ],\n reverse=True,\n )\n\n return ranked_sentences[:top_k]", "title": "" }, { "docid": "12ea60618ef70cf6a512774d06bff70a", "score": "0.4747656", "text": "def threshold_cost(\n threshold: float,\n actuals: pd.Series,\n preds: pd.Series,\n cost_matrix: np.ndarray,\n) -> float:\n cost_tn, cost_fp, cost_fn, cost_tp = cost_matrix.ravel()\n bin_preds = np.where(preds >= threshold, 1, 0)\n matrix = metrics.confusion_matrix(actuals, bin_preds)\n tn, fp, fn, tp = matrix.ravel()\n\n # Punish the scores.\n cost = (tp * cost_tp) + (tn * cost_tn) + (fn * cost_fn) + (fp * cost_fp)\n\n return cost", "title": "" }, { "docid": "1316385332f564c23efbb3590d3d8165", "score": "0.47407508", "text": "def patch_scoring(M, threshold=0.):\r\n # Cloning important\r\n A = M.clone()\r\n\r\n # Zero diagonal\r\n A.fill_diagonal_(0)\r\n\r\n # Make sure symmetric and non nul\r\n A[A < 0] = 0\r\n C = A + A.t()\r\n\r\n # Sort pixels by inverse degree\r\n cent = -torch.sum(A > threshold, dim=1).type(torch.float32)\r\n sel = torch.argsort(cent, descending=True)\r\n\r\n return sel, cent", "title": "" }, { "docid": "293186c7dddcea683351ee4008536047", "score": "0.47376472", "text": "def predict(user_movie_pair,rating_matrix):\n learning_matrix = create_learning_matrices2(rating_matrix,user_movie_pair)\n nb_rank = np.sum(learning_matrix,axis=1)\n for i in range(5):\n learning_matrix[:,i] *= (i+1)\n learning_matrix[:,i+4] *= (i+1)\n total = np.sum(learning_matrix,axis=1)\n return total/nb_rank", "title": "" }, { "docid": "8585e0cef6891d4345271cf3e99ae985", "score": "0.4732902", "text": "def threshold(\n matrix : np.ndarray,\n thresh : float\n ) -> np.ndarray:\n threshold_matrix = np.copy(matrix)\n threshold_matrix[thresh > matrix] = 0\n threshold_matrix[thresh <= matrix] = 1\n return threshold_matrix", "title": "" }, { "docid": "08aed28c47742b90f76cf36584f28bc5", "score": "0.4730629", "text": "def feature_ranking(self, W):\n mcfs_score = W.max(axis=1)\n ranking = np.argsort(mcfs_score)[::-1]\n return ranking[:self.d]", "title": "" }, { "docid": "ece073f8cb2d14f0a37b0df391d6f2bf", "score": "0.472248", "text": "def rank_keyphrases(keyphrases, top_N_scores, M):\n \n scores = {keyphrase: sum([top_N_scores[token] / len(keyphrase) for token in keyphrase])\n for keyphrase in keyphrases}\n \n top_M_scores = {k_p: score for k_p, score in sorted(scores.items(),\n key = lambda item: item[1],\n reverse = True)[:M]}\n \n return top_M_scores", "title": "" }, { "docid": "e9928e6699a83316645f37f151566dcf", "score": "0.4718976", "text": "def low_rank_matrix_factorization(ratings, mask=None, num_features=15, regularization_amount=0.01):\r\n num_users, num_products = ratings.shape\r\n\r\n # If no mask is provided, consider all 'NaN' elements as missing and create a mask.\r\n if mask is None:\r\n mask = np.invert(np.isnan(ratings))\r\n\r\n # Replace NaN values with zero\r\n ratings = np.nan_to_num(ratings)\r\n\r\n # Create P and Q and fill with random numbers to start\r\n np.random.seed(0)\r\n P = np.random.randn(num_users, num_features)\r\n Q = np.random.randn(num_products, num_features)\r\n\r\n # Roll up P and Q into a contiguous array as fmin_cg expects\r\n initial = np.append(P.ravel(), Q.ravel())\r\n\r\n # Create an args array as fmin_cg expects\r\n args = (num_users, num_products, num_features, ratings, mask, regularization_amount)\r\n\r\n # Call fmin_cg to minimize the cost function and this find the best values for P and Q\r\n X = fmin_cg(cost, initial, fprime=gradient, args=args, maxiter=3000)\r\n\r\n # Unroll the new P and new Q arrays out of the contiguous array returned by fmin_cg\r\n nP = X[0:(num_users * num_features)].reshape(num_users, num_features)\r\n nQ = X[(num_users * num_features):].reshape(num_products, num_features)\r\n\r\n return nP, nQ.T", "title": "" }, { "docid": "6f03bdb196226dca17f502163afad7a6", "score": "0.4714063", "text": "def determineTopKDocuments(self,scores,k):\n candidateCounter = 0;\n for i in sorted(scores, key=scores.get, reverse=True):\n candidateCounter += 1;\n print (\"Document : \" + self.docIDToFnameMap[i]);\n if (candidateCounter == int(k)):\n break;\n print (\"candidateCounter\",candidateCounter);", "title": "" }, { "docid": "10dc43ba36382038e8f4b4806ed898ee", "score": "0.470868", "text": "def rank(ranked_list):\n return sorted(ranked_list, key=lambda x: x[0])", "title": "" }, { "docid": "f202c325d77e33e7b5f043d1b8cf14d2", "score": "0.4706679", "text": "def map_rank(traingnd, testgnd, hamming_rank):\n numtrain, numtest = hamming_rank.shape\n apall = np.zeros((numtrain, numtest))\n aa = np.array([i+1 for i in range(numtrain)])\n for i in range(numtest):\n y = hamming_rank[:, i]\n new_label = np.array([0 for j in range(numtrain)])\n relevant_indices = (np.matmul(traingnd, testgnd[i, :].reshape((-1, 1))) > 0).reshape(-1)\n new_label[relevant_indices] = 1\n xx = np.cumsum(new_label[y])\n x = xx * new_label[y]\n p = x / aa\n p = np.cumsum(p)\n mask = (p != 0)\n p[mask] = p[mask]/xx[mask]\n apall[:, i] = p.copy()\n mAP = np.mean(apall, axis=1)\n return mAP", "title": "" }, { "docid": "4fc0d2dee1abc4b8af5d8fdf5077869b", "score": "0.46988523", "text": "def get_interesting_ranks(test_results):\n rank_mat = test_results.get_rank_mat()\n # Find rows which scored differently over the various configs FIXME: duplicated\n isdiff_flags = [not np.all(row == row[0]) for row in rank_mat]\n #diff_aids = ut.compress(test_results.qaids, isdiff_flags)\n diff_rank = rank_mat.compress(isdiff_flags, axis=0)\n diff_qxs = np.where(isdiff_flags)[0]\n if False:\n rankcategory = np.log(diff_rank + 1)\n else:\n rankcategory = diff_rank.copy()\n rankcategory[diff_rank == 0] = 0\n rankcategory[diff_rank > 0] = 1\n rankcategory[diff_rank > 2] = 2\n rankcategory[diff_rank > 5] = 3\n rankcategory[diff_rank > 50] = 4\n rankcategory[diff_rank > 100] = 5\n row_rankcategory_std = np.std(rankcategory, axis=1)\n row_rankcategory_mean = np.mean(rankcategory, axis=1)\n import vtool as vt\n row_sortx = vt.argsort_multiarray(\n [row_rankcategory_std, row_rankcategory_mean], reverse=True)\n interesting_qx_list = diff_qxs.take(row_sortx).tolist()\n #print(\"INTERSETING MEASURE\")\n #print(interesting_qx_list)\n #print(row_rankcategory_std)\n #print(ut.take(qaids, row_sortx))\n #print(diff_rank.take(row_sortx, axis=0))\n return interesting_qx_list", "title": "" }, { "docid": "af6954c236c6a337050b1393d2111a91", "score": "0.46978393", "text": "def gen_matrix_rank(size: MatSize, rank: int, max_denom: int = 1, max_val: int = 3):\n A, piv = zeros(size[0], size[1]), nr.choice(rank, size[1])\n for i in range(rank):\n A[i,piv[i]] = 1\n for j in range(piv[i]+1, size[1]):\n A[i,j] = nr.choice([a for a in range(-1*max_val + 1, max_val)])\n for i in range(size[0]):\n j = nr.choice(size[0], min(size[0],3), replace=False)\n for k in j:\n if k!=i:\n A = A.elementary_row_op(op=\"n->n+km\", row=i, row2=k, k=nr.choice(max_val)*nr.choice([-1,1]))\n return A", "title": "" }, { "docid": "344949ffc881a63b896ab62936ffbdd9", "score": "0.4694353", "text": "def threshold(col, thresh):\n return 1.0 if col > thresh else 0.0", "title": "" }, { "docid": "6e8806cefd19fdc9aef8ddb37dd0860a", "score": "0.46933538", "text": "def test_position_rank (doc: Doc):\n # given\n position_rank = PositionRankFactory()\n base_text_rank = BaseTextRankFactory()\n\n # when\n processed_doc = position_rank(doc)\n phrases = processed_doc._.phrases\n comparison_doc = base_text_rank(doc)\n comparison_phrases = comparison_doc._.phrases\n\n # then\n assert set(p.rank for p in phrases) != set(p.rank for p in comparison_phrases)\n\n # The test article mentions `Chelsea` at the beginning of the\n # article while it mentions `Shanghai Shenhua` anecdotally later\n # in the article. However, with normal TextRank, `Shanghai\n # Shenhua` is part of top 10 phrases and `Chelsea` isn't. With\n # PositionRank, the situation is the opposite, which is the\n # desired outcome when parsing a news article.\n\n assert \"Chelsea\" in [p.text for p in phrases[:10]]\n assert \"Chelsea\" not in [p.text for p in comparison_phrases[:10]]\n assert \"Shanghai Shenhua\" not in \";\".join(p.text for p in phrases[:10])\n assert \"Shanghai Shenhua\" in \";\".join(p.text for p in comparison_phrases[:10])", "title": "" }, { "docid": "112f04f2474eedf7fac41e47ceb36166", "score": "0.4692125", "text": "def recall_curve(trial_ranks, top=None):\n if not isinstance(trial_ranks, np.ndarray):\n trial_ranks = np.array(trial_ranks)\n\n ret = np.zeros(len(trial_ranks))\n if top is None:\n for i in range(len(trial_ranks)):\n ret[i] = np.sum(trial_ranks[:i] <= i) / (i+1)\n else:\n for i in range(len(trial_ranks)):\n ret[i] = 1.0 * np.sum(trial_ranks[:i] < top) / top\n return ret", "title": "" }, { "docid": "6248b43d7da03cff3cfecf7c6602c0ca", "score": "0.46913636", "text": "def rank(self, c, i):\n\n # go down the tree\n node = self.root\n\n rank = i\n while (node is not None) & (rank != 0):\n\n # rank in the bitstring for this node\n rank_prev = rank\n rank = node[1].rank(rank_prev)\n\n # check if c is less then pivot\n if c < node[0]:\n # we need the count of zeros\n rank = rank_prev - rank\n log.debug('Rank in node (left) (%c) = %d', node[0], rank)\n node = node[2]\n\n # c is greater or equal to pivot\n else:\n log.debug('Rank in node (right) (%c) = %d', node[0], rank)\n node = node[3]\n\n return rank", "title": "" }, { "docid": "06191884195bd8f9f29961ac5d6a3d23", "score": "0.46898106", "text": "def score(true_ranks: list, computed_ranks: list):\n number_of_policies = len(true_ranks)\n number_of_pairs = number_of_policies * (number_of_policies - 1) / 2\n number_of_swaps = swap_count(true_ranks, computed_ranks)\n return 1 - number_of_swaps / number_of_pairs", "title": "" }, { "docid": "d17c53746d98a7b23b819b4914ebcdc7", "score": "0.4684393", "text": "def accuracy(scores, true_label, topk=(1,)):\r\n with torch.no_grad():\r\n maxk = max(topk)\r\n batch_size = true_label.size(0)\r\n\r\n _, pred_label = scores.topk(maxk, 1, True, True)\r\n pred_label = pred_label.t()\r\n correct = pred_label.eq(true_label.view(1, -1).expand_as(pred_label))\r\n\r\n result = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\r\n result.append(correct_k.mul_(100.0 / batch_size))\r\n return result", "title": "" }, { "docid": "abc12c89829476e827eb089f6e0998ca", "score": "0.46758547", "text": "def rank_queries(predictions, qptr, max_documents=None, inverted=False):\n\n max_value = np.max(predictions)\n # vector with lenght of each doclist\n n = qptr[1:]-qptr[:-1]\n if not max_documents:\n max_documents = np.max(n)\n\n # the vector of documents is reshaped into a matrix\n # with a document list on every row\n ind = qptr[:-1,None] + np.arange(0,max_documents)[None,:]\n ind = np.minimum(ind,qptr[1:,None]-1)\n # warped is now a matrix of size n_queries x max_documents\n warped = predictions[ind]\n # every document that appears in a row but not in the query list\n # (due to n_query_list < max_documents) gets the worst score in off all documents\n # this makes sure they do not appear in the final ranking\n warped[np.arange(max_documents)[None,:] >= n[:,None]] = max_value + 1\n\n # tiebreak sort uses numpy to rank every row in the matrix\n # this is faster than ranking them by seperate calls\n rankings = tiebreak_sort(warped)\n if inverted:\n inverted = invert_rankings(rankings,dtype=np.int32)\n return inverted[np.arange(max_documents)[None,:] < n[:,None]]\n\n else:\n return rankings[np.arange(max_documents)[None,:] < n[:,None]]", "title": "" }, { "docid": "62f2c6e56352d2686b468696cb36d2e9", "score": "0.46756983", "text": "def _rank_samples(fdatagrid):\n ranks = np.zeros(fdatagrid.shape)\n ncols_dim_image = np.asarray([range(fdatagrid.shape[i]) for i in range(len(fdatagrid.shape) - 1, 0, -1)])\n tuples = list(itertools.product(*ncols_dim_image))\n for t in tuples:\n ranks.T[t] = rankdata(fdatagrid.data_matrix.T[t], method='max')\n return ranks", "title": "" }, { "docid": "9332575e32756035e18c8f6e404521d3", "score": "0.467171", "text": "def process(img, upper, lower):\n \n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n \n detects = []\n \n for index in range(len(ranks)):\n \n try: detects.append(classifiers[index].detectMultiScale(gray, scaleFactor=1.05, minNeighbors=4, maxSize=upper, minSize=lower))\n except: detects.append([])\n \n circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 2.5, 25, minRadius=20, maxRadius=30)\n \n widths = []\n heights = []\n \n out = [[], 0]\n \n index = 0\n windows = []\n \n for found in detects[:-2]:\n \n out[0] += [ranks[index]] * len(detects[index])\n index += 1\n \n for (x, y, w, h) in found: \n \n widths.append(w)\n heights.append(h)\n windows.append((x, y, w, h))\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 5) \n \n for found in detects[-2:]: \n \n for (x, y, w, h) in found:\n \n if any([x + w / 2.0 >= xx \n and x + w / 2.0 <= xx + ww \n and y + h / 2.0 >= yy \n and y + h / 2.0 <= yy + hh \n for (xx, yy, ww, hh) in windows]): \n \n continue\n\n out[0] += [1]\n widths.append(w)\n heights.append(h)\n\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 5)\n \n if circles is not None: \n \n circles = np.round(circles[0, :]).astype(\"int\")\n \n for (x, y, r) in circles: \n\n if any([x >= xx \n and x <= xx + ww \n and y >= yy \n and y <= yy + hh \n for (xx, yy, ww, hh) in windows]): \n\n continue\n\n cv2.circle(image, (x, y), r, (255, 0, 255), 4)\n out[1] += 1\n \n if widths and heights: \n \n upper = (max(widths) + 10, max(heights) + 6)\n lower = (min(widths) - 10, min(heights) - 6)\n \n plot.imshow(image)\n plot.show()\n print(\"CURRENT:\", out[0], out[1])\n \n return out[0], out[1], upper, lower", "title": "" }, { "docid": "25eef21ba00edce620e04ea68181ec4e", "score": "0.46676886", "text": "def rank_n_accuracy(y_true, y_prob_mat, n=0.33):\n n_classes = y_prob_mat.shape[1]\n if n < 1:\n # round to nearest int before casting\n n = int(round(n_classes * n))\n\n # sort the rankings in descending order, then take the top n\n rankings = np.argsort(-y_prob_mat)\n rankings = rankings[:, :n]\n\n num_samples = len(y_true)\n correct_sample_count = 0.0 # force floating point math\n\n for i in range(num_samples):\n if y_true[i] in rankings[i, :]:\n correct_sample_count += 1\n\n return old_div(correct_sample_count, num_samples)", "title": "" }, { "docid": "32ff296f7d713e1aeac55ebdf1a4b83f", "score": "0.46652246", "text": "def find_rank(words, popularity_metric):\n words_in_pop_list = [word for word in words if word in popularity_metric]\n words_not_in_pop_list = [word for word in words if word not in popularity_metric]\n df_in_list = pd.DataFrame(\n [popularity_metric[word] for word in words_in_pop_list],\n index=words_in_pop_list,\n columns=['count']\n )\n\n df_not_in_list = pd.DataFrame(\n [0]*len(words_not_in_pop_list),\n index=words_not_in_pop_list,\n columns=['count']\n )\n\n df = pd.concat([df_in_list, df_not_in_list])\n df['rank'] = df['count'].rank(ascending=False)\n return df", "title": "" }, { "docid": "90782e301555af3b06546d53acc758bd", "score": "0.46630207", "text": "def rankingMatrix(probabilities):\n ranking = np.zeros(shape=[probabilities.shape[0], probabilities.shape[1]])\n probCopy = np.copy(probabilities)\n for i in range(probabilities.shape[0]):\n indexMost = 0\n iteration = 1\n while(sum(probCopy[i,:]) != 0):\n for j in range(probabilities.shape[1]):\n if probCopy[i,j] > probCopy[i,indexMost]:\n indexMost = j\n ranking[i, indexMost] = iteration\n probCopy[i, indexMost] = 0\n iteration += 1\n \n return ranking", "title": "" }, { "docid": "d07cee72cf18b638b70063f9df9852be", "score": "0.46618044", "text": "def knn_threshold(data, column, threshold=15, k=3):\n from sklearn import neighbors\n \n def ids_centers_sizes(data):\n dat = np.array([(i, data.latitude[data[column]==i].mean(), \n data.longitude[data[column]==i].mean(),\n (data[column]==i).sum()) \n for i in set(list(data[column]))])\n return dat[:,0], dat[:,1:-1].astype(float), dat[:,-1].astype(int)\n\n knn = neighbors.NearestNeighbors(n_neighbors=k)\n while True:\n ids, centers, sizes = ids_centers_sizes(data)\n asrt = np.argsort(sizes)\n \n if sizes[asrt[0]] >= threshold:\n break\n \n cids = np.copy(ids)\n knn.fit(centers)\n \n for i in asrt:\n if sizes[i] < threshold:\n nearest = knn.kneighbors(centers[i])[1].flatten()\n nearest = nearest[nearest != i]\n sel = nearest[np.argmin(sizes[nearest])]\n total_size = sizes[sel] + sizes[i]\n data[column][data[column]==cids[i]] = cids[sel]\n cids[cids==i] = cids[sel]\n sizes[i] = total_size\n sizes[sel] = total_size\n \n return data", "title": "" }, { "docid": "0b80eb7d63db8b4bea10396cbaede314", "score": "0.46612287", "text": "def score_func(scores, predictions, inaction_score, perfect_score, thresh=0):\n # Apply the threshold\n predictions = (predictions > thresh).astype(int)\n\n # Get the actual score\n actual_score = scores[:, 1][predictions == 1].sum() + scores[:, 0][predictions == 0].sum()\n\n # Get the normalized score\n normalized_score = (actual_score - inaction_score) / (perfect_score - inaction_score)\n\n return normalized_score", "title": "" }, { "docid": "5c3fd430802a352cb578ce50fcc691ea", "score": "0.46593824", "text": "def __call__(self, smiles, scores): \n\n fronts = self.getParetoFronts(scores)\n\n rank = []\n # sort all fronts by crowding distance\n for t, front in enumerate(fronts):\n distance = np.zeros(len(front))\n for i in range(scores.shape[1]):\n # sort front small to large for value objective i\n cpu_tensor = scores[front.cpu(), i]\n order = cpu_tensor.argsort()\n front = front[order]\n # set distance value smallest and largest value objective i to large value\n distance[order[0]] = 10 ** 4\n distance[order[-1]] = 10 ** 4\n # get all values of objective i in current front\n m_values = [scores[j, i] for j in front]\n # scale for crowding distance by difference between max and min of objective i in front\n scale = max(m_values) - min(m_values)\n if scale == 0:\n scale = 1\n # calculate crowding distance\n for j in range(1, len(front) - 1):\n distance[order[j]] += (scores[front[j + 1], i] - scores[front[j - 1], i]) / scale\n # replace front by front sorted according to crowding distance\n fronts[t] = front[np.argsort(distance)]\n rank.extend(fronts[t].tolist())\n return rank", "title": "" }, { "docid": "59ade0cb8dab59f5cce831efc5f36702", "score": "0.46563745", "text": "def tile_root_rank(self, rank: int) -> int:\n return self.tile.total_ranks * (rank // self.tile.total_ranks)", "title": "" }, { "docid": "ee44ef462e8b20480e81b1827371ded3", "score": "0.46551478", "text": "def binarize(ratings, threshold=2.5):\n ########################################################################\n # Binarize the supplied ratings matrix. #\n # #\n # WARNING: Do not use self.ratings directly in this function. #\n ########################################################################\n\n # The starter code returns a new matrix shaped like ratings but full of\n # zeros.\n binarized_ratings = np.copy(ratings)\n for i in range(len(ratings)):\n vec = ratings[i]\n for j in range(len(vec)):\n if vec[j] == 0:\n continue\n elif vec[j] > threshold:\n binarized_ratings[i][j] = 1\n elif vec[j] <= threshold:\n binarized_ratings[i][j] = -1\n\n return binarized_ratings\n\n ########################################################################\n # END OF YOUR CODE #\n ########################################################################\n return binarized_ratings", "title": "" }, { "docid": "c4e9aef86c967898507ffeca39d4cb01", "score": "0.46529415", "text": "def check_ranking(ranking, mtable):\n count_protected = 0\n\n # if the mtable has a different number elements than there are in the top docs return false\n if len(ranking) != len(mtable):\n raise ValueError(\"Number of documents in ranking and mtable length must be equal!\")\n\n # check number of protected element at each rank\n for i, element in enumerate(ranking):\n count_protected += 1 if element.is_protected else 0\n if count_protected < mtable[i]:\n return False\n return True", "title": "" }, { "docid": "c409604e50c6a668c2f39c3ed7f9ad4a", "score": "0.4642553", "text": "def sgrank(\n doc,\n *,\n normalize=\"lemma\",\n ngrams=(1, 2, 3, 4, 5, 6),\n include_pos=(\"NOUN\", \"PROPN\", \"ADJ\"),\n window_size=1500,\n topn=10,\n idf=None,\n):\n # validate / transform args\n ngrams = utils.to_collection(ngrams, int, tuple)\n include_pos = utils.to_collection(include_pos, str, set)\n if window_size < 2:\n raise ValueError(\"`window_size` must be >= 2\")\n if isinstance(topn, float):\n if not 0.0 < topn <= 1.0:\n raise ValueError(\n \"`topn` must be an int, or a float between 0.0 and 1.0\"\n )\n\n n_toks = len(doc)\n window_size = min(n_toks, window_size)\n # bail out on (nearly) empty docs\n if n_toks < 2:\n return []\n\n candidates, candidate_counts = _get_candidates(doc, normalize, ngrams, include_pos)\n # scale float topn based on total number of initial candidates\n if isinstance(topn, float):\n topn = int(round(len(candidate_counts) * topn))\n candidates, unique_candidates = _prefilter_candidates(\n candidates, candidate_counts, topn, idf)\n\n term_weights = _compute_term_weights(\n candidates, candidate_counts, unique_candidates, n_toks, idf)\n # filter terms to only those with positive weights\n candidates = [cand for cand in candidates if term_weights[cand[0]] > 0]\n edge_weights = _compute_edge_weights(candidates, term_weights, window_size, n_toks)\n\n # build the weighted directed graph from edges, rank nodes by pagerank\n graph = nx.DiGraph()\n graph.add_edges_from(edge_weights)\n term_ranks = nx.pagerank_scipy(graph, alpha=0.85, weight=\"weight\")\n sorted_term_ranks = sorted(\n term_ranks.items(), key=operator.itemgetter(1, 0), reverse=True)\n\n return ke_utils.get_filtered_topn_terms(\n sorted_term_ranks, topn, match_threshold=0.8)", "title": "" }, { "docid": "cb5a828abc63cdfeb24e4f804d07fe6b", "score": "0.46411014", "text": "def estimate_tucker_ranks(self, layer, compression_factor=0):\n weights = layer.weight.data.numpy()\n unfold_0 = tl.base.unfold(weights, 0)\n unfold_1 = tl.base.unfold(weights, 1)\n _, diag_0, _, _ = VBMF.EVBMF(unfold_0)\n _, diag_1, _, _ = VBMF.EVBMF(unfold_1)\n ranks = [diag_0.shape[0], diag_1.shape[1]]\n\n if compression_factor:\n # Check if the VBMF ranks are small enough\n ranks = choose_compression(\n layer, ranks, compression_factor=compression_factor, flag='Tucker2')\n\n return ranks", "title": "" } ]
52af6ff21b973aa2cd9c9089aed1f599
Save a dataset in a way that it's readable by load_dataset.
[ { "docid": "ae1e677c9c29db4870dce2133ad9eba1", "score": "0.0", "text": "def save_dataset(filename, theta_vector, amp_dataset, ph_dataset, x_grid):\n\tto_save = np.concatenate((theta_vector, amp_dataset, ph_dataset), axis = 1)\n\ttemp_x_grid = np.zeros((1,to_save.shape[1]))\n\tK = int((to_save.shape[1]-3)/2)\n\ttemp_x_grid[0,3:3+k] = x_grid\n\tto_save = np.concatenate((temp_x_grid,to_save), axis = 0)\n\tq_max = np.max(theta_vector[:,0])\n\tspin_mag_max = np.max(np.abs(theta_vector[:,1:2]))\n\tx_step = x_grid[1]-x_grid[0]\n\tnp.savetxt(filename, to_save, header = \"# row: theta 3 | amp \"+str(amp_dataset.shape[1])+\"| ph \"+str(ph_dataset.shape[1])+\"\\n# N_grid = \"+str(x_grid.shape[0])+\" | f_step =\"+str(x_step)+\" | q_max = \"+str(q_max)+\" | spin_mag_max = \"+str(spin_mag_max), newline = '\\n')\n\treturn", "title": "" } ]
[ { "docid": "e208299ef2e05512fa69fd21543ef0cf", "score": "0.7637095", "text": "def _save_dataset(self):\n dataset = datastore.Resource.new(\n self.dataset_name,\n self._dataset_files,\n self._meta_data)\n repo = self.data_repo()\n repo.save(dataset, overwrite=True)", "title": "" }, { "docid": "4cc779f78e0762ebf1ce68fa53bcab22", "score": "0.74357975", "text": "def save_dataset(dataset):\n logging.info(f\"Saving dataset {dataset}...\")\n try:\n dataset = json.loads(dataset)\n except json.decoder.JSONDecodeError:\n return\n\n dataset = dataset.rsplit(\"/\")[-1]\n dataset_path = f\"/tmp/data/{dataset}\"\n\n try:\n df = pd.read_csv(dataset_path, sep=None, engine=\"python\")\n platiagro.save_dataset(name=dataset, df=df)\n except (pd.errors.EmptyDataError, pd.errors.ParserError, UnicodeDecodeError, ValueError):\n content = open(dataset_path, \"rb\")\n platiagro.save_dataset(name=dataset, data=content)", "title": "" }, { "docid": "e5c88b6e6d05add6aba8c963116248c4", "score": "0.74094224", "text": "def save_dataset(dataset: TablutDataset, path: Optional[str] = './'):\n with open(os.path.join(path, f'{dataset.name}.pkl'), 'wb') as f:\n pickle.dump(dataset, f)", "title": "" }, { "docid": "ffea81906700acc0d3b2d1c28915fc07", "score": "0.7367737", "text": "def saveDataset(self, filename, datasets):\n with open(os.path.join(filename), 'wb') as handle:\n data = { # Warning: If adding something here, also modifying loadDataset\n 'word2index': self.word2index,\n 'index2word': self.index2word,\n 'index2vector': self.index2vector,\n 'datasets': datasets\n }\n pickle.dump(data, handle, -1) # Using the highest protocol available", "title": "" }, { "docid": "f86e156671f9f9513cc31c6ec97160ce", "score": "0.72513616", "text": "def save_dataset(dataset):\n self.dataset_counter += 1\n self.saved_datasets.append(dataset)\n dataset_dir = self.datasets_directory_name + \"/training_dataset_{}.npy\".format(self.dataset_counter)\n dataset_path = Path(self.datasets_directory_name)\n try:\n dataset_path.rmdir()\n except OSError as e:\n print(f\"Error: {dataset_path} : {e.strerror}\")\n dataset_path.mkdir(exist_ok=True, parents=True)\n if os.path.exists(dataset_path) and self.dataset_counter == 1:\n shutil.rmtree(dataset_path)\n os.makedirs(dataset_path)\n with open(dataset_dir, 'wb') as f:\n np.save(f, dataset)\n print(\"[INFO]: Dataset was saved in the directory: \", dataset_path)", "title": "" }, { "docid": "cee52fb6bab2b27167ecf9285269061a", "score": "0.7239169", "text": "def save(self):\n\n if self.path is None:\n raise ValueError('No path given to save the dataset.')\n\n self.save_at(self.path)", "title": "" }, { "docid": "7bedc009f4111974b7ec2d1cf4cf7839", "score": "0.71408814", "text": "def SaveDataset(self, savepath_pickle=None):\n if savepath_pickle is None:\n savepath_pickle = self.GetDatasetFilePath()\n with open(savepath_pickle, \"wb\") as file:\n pickle.dump((self.sample_dict, self.path_list, self.set_dict, self.set_index), file)", "title": "" }, { "docid": "f66c809f36781a6320b2b07ee4ff9afd", "score": "0.7136119", "text": "def save_encoded_dataset(dataset, filename):\n\n pickle.dump(dataset, open(filename, \"wb\"))", "title": "" }, { "docid": "f4b1beb628e80d41a903616dc106428e", "score": "0.7117572", "text": "def save_dataset(self, path=None, add=True):\n self._assert_loaded(self._data, \"dataset\")\n # if not path, do DataLoader data_dir\n if not path:\n path = self.annotation_path\n\n os.makedirs(path, exist_ok=True)\n\n # save as json\n self._save_json_list(self._data, os.path.join(path, \"data.json\"), add=add, sub_annots=True)", "title": "" }, { "docid": "48fd40f572f564a02c92aa0d550a52fd", "score": "0.71117485", "text": "def save(self, save_path): # todo: need interactionsDataset when saving?\n dump(self, save_path)", "title": "" }, { "docid": "2006bb901a77089084c55cf5f8ea26b9", "score": "0.7082088", "text": "def saveDataset(self, filename):\n\n with open(os.path.join(filename), 'wb') as handle:\n data = {\n 'word2id': self.word2id,\n 'id2word': self.id2word,\n 'idCount': self.idCount,\n 'intent2id': self.intent2id,\n 'id2intent': self.id2intent,\n 'trainingSamples': self.trainingSamples,\n 'validationSamples': self.validationSamples,\n 'testSamples': self.testSamples,\n 'entities': self.entities_property,\n }\n pickle.dump(data, handle, -1)\n with open(self.data_path + \"/train.csv\", \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n writer.writerows(self.txtTrainingSamples)\n with open(self.data_path + \"/valid.csv\", \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n writer.writerows(self.txtValidationSamples)", "title": "" }, { "docid": "6d4bdb422e81b3afbc74e2b92d976ca8", "score": "0.6969148", "text": "def save(self) -> None:\n create_dir(dataset_data_path)\n\n file_name = self.get_filename(self.name, self.dt)\n with open(file_name, 'wb') as f:\n pickle.dump(self, f)", "title": "" }, { "docid": "8dca2543708c7155aae8ecbc4e26409e", "score": "0.6887473", "text": "def save(cls):\n cls._remove_old_datapoints()\n with open(cls._location, 'w') as fh:\n data = yaml.dump(cls._data, default_flow_style=False)\n fh.write(data)", "title": "" }, { "docid": "fafaaa3ec321134fea0d4c06b272853e", "score": "0.6887001", "text": "def save_dataset(filename, dataset):\n\n with open(filename, mode='w', encoding='utf-8')as f:\n np.savetxt(filename, dataset)\n f.close()\n print(f\"{filename} saved successfully\")", "title": "" }, { "docid": "0b24b760947b9412cfd32223f0946e59", "score": "0.687435", "text": "def save_dataset(self, save_filename, type='shuffled_data_array' ):\n\n if type == 'shuffled_data_array':\n\n np.save(save_filename,self.shuffled_data_array)\n\n np.save(('{}_labels'.format(save_filename)),self.shuffled_label_array)\n\n\n\n elif type == 'data_array':\n\n np.save(save_filename,self.data_array)\n\n np.save(('{}_labels'.format(save_filename)),self.label_array)\n\n\n\n elif type == 'xor_array' or type == 'and_array' or type == 'add_array':\n\n fn = 'self.{}_array'.format(type[0:2])\n\n fn_labels = 'self.{}_labels'.format(type[0:2])\n\n np.save(save_filename, fn)\n\n np.save(('{}_labels'.format(save_filename)),fn_labels)\n\n return", "title": "" }, { "docid": "02f03dd376c7af2ba1a071f9e560fd9c", "score": "0.6841852", "text": "def save_dataset(self):\n\n import os\n from datetime import datetime\n\n timestamp = datetime.now()\n yearkey = \"yearkey={}\".format(timestamp.strftime(\"%Y\"))\n monthkey = \"monthkey={}\".format(timestamp.strftime(\"%m\"))\n daykey = \"daykey={}\".format(timestamp.strftime(\"%d\"))\n directory_to_save_processed_data_to = os.path.join(\n self.data_directory, \"processed\", yearkey, monthkey, daykey\n )\n if not os.path.exists(directory_to_save_processed_data_to):\n os.makedirs(directory_to_save_processed_data_to)\n\n self.data.to_parquet(\n os.path.join(directory_to_save_processed_data_to, \"data.parquet.gzip\"),\n compression=\"gzip\",\n )\n self.next(self.end)", "title": "" }, { "docid": "35890761e1dbea961678b20ef230b45c", "score": "0.6798494", "text": "def save_dataset(self, filepath: str):\n np.save(filepath, self.all_sos_numpy)", "title": "" }, { "docid": "b1433fc0f0714e8bb9f9fc9bd2d10ab0", "score": "0.67621285", "text": "def saveDataset(X_train, Y_train, X_test, Y_test, path):\n np.save(os.path.join(path, \"X_train.npy\"), X_train)\n np.save(os.path.join(path, \"Y_train.npy\"), Y_train)\n np.save(os.path.join(path, \"X_test.npy\"), X_test)\n np.save(os.path.join(path, \"Y_test.npy\"), Y_test)", "title": "" }, { "docid": "2b0cbb864ec04c5c31603ffbffb6cd93", "score": "0.6760361", "text": "def save_to_pickle(dataset, fileloc):\n filename = fileloc + \".pickle\"\n with open(filename, 'wb') as f:\n pickle.dump(dataset, f)\n logger.info(\"Saved to {}\".format(filename))", "title": "" }, { "docid": "f4c179adfce2e97fc750a981f00740d2", "score": "0.6758912", "text": "def save_dataset(self, dataset_file=None, force_overwrite=False):\n if dataset_file is not None:\n self.dataset_file = dataset_file\n\n # Initialize dict\n json_dataset = OrderedDict()\n\n # Save dataset info\n json_dataset['root_dir'] = self.root_dir\n json_dataset['dataset_type'] = self.dataset_type\n json_dataset['classes'] = list(self.name_to_class_info.values())\n json_dataset['images'] = list(self.image_infos.values())\n\n # Save dataset into json file\n if (not os.path.isfile(self.dataset_file)) or force_overwrite:\n print('Saving dataset as an annotation file, this can take a while')\n with open(self.dataset_file, 'w') as f:\n json.dump(json_dataset, f)\n print('Dataset saved')\n else:\n raise FileExistsError('Dataset not saved as it already exists, consider overwriting')", "title": "" }, { "docid": "3bb9babcb5e0347ea0e529e663c70bde", "score": "0.673526", "text": "def write_dataset(self) -> list[str]:", "title": "" }, { "docid": "7ae0a79cdc41b48bab4d50d026227349", "score": "0.6731773", "text": "def save_dataset(self, dataset_file=None, force_overwrite=False):\n if dataset_file is not None:\n self.dataset_file = dataset_file\n\n assert self.dataset_file is not None\n\n # Initialize dict\n json_dataset = OrderedDict()\n\n # Save dataset info\n json_dataset['root_dir'] = self.root_dir\n json_dataset['classes'] = list(self.name_to_class_info.keys())\n json_dataset['images'] = list(self.image_infos.values())\n json_dataset['annotations'] = list(self.ann_infos.values())\n\n # Save dataset into json file\n if (not os.path.isfile(self.dataset_file)) or force_overwrite:\n print('Saving dataset as an annotation file, this can take a while')\n with open(self.dataset_file, 'w') as f:\n json.dump(json_dataset, f)\n print('Dataset saved')\n else:\n raise FileExistsError('Dataset not saved as it already exists, consider overwriting')", "title": "" }, { "docid": "539658809dd9ab621f6380de089c0177", "score": "0.6696084", "text": "def save_dataset(dataset, outfile):\n import h5py\n f = h5py.File(outfile, 'w')\n for key in dataset.keys():\n f.create_dataset(key, data=dataset[key])\n f.close()", "title": "" }, { "docid": "2dfa68e4cbf5cc9c8e9f977c03040d61", "score": "0.66912514", "text": "def persist_data(self):\n self.workdir.mkdir(parents=True, exist_ok=True)\n ds2 = []\n for i, (ds, cand_f, ds_attrs) in enumerate(self._flat_datasets()):\n train, test = ds\n if isinstance(train, pd.DataFrame):\n fn = self.workdir / 'ds{}-train.parquet'.format(i+1)\n _logger.info('serializing to %s', fn)\n train.to_parquet(fn)\n train = fn\n if isinstance(test, pd.DataFrame):\n fn = self.workdir / 'ds{}-test.parquet'.format(i+1)\n _logger.info('serializing to %s', fn)\n test.to_parquet(fn)\n test = fn\n ds2.append(((train, test), cand_f, ds_attrs))\n self.datasets = ds2\n self._is_flat = True", "title": "" }, { "docid": "b83b59004434bd8f3e17f25b19b5838c", "score": "0.6667793", "text": "def data_save(self, filename):\n\t\tfd = open(filename, \"wb\")\n\t\tfd.write(self.data)\n\t\tfd.close()", "title": "" }, { "docid": "018add1572d08a029b1a7158919d9131", "score": "0.666314", "text": "def save(self, dataset: 'Dataset', dataset_uri: str, *, compute_digest: ComputeDigest = ComputeDigest.ALWAYS, preserve_metadata: bool = True) -> None:", "title": "" }, { "docid": "8ecfb2d7c0c4c3ebb7d18dec8bd18944", "score": "0.6654124", "text": "def save_dataset(dataset, save_dir):\n # Create directory if it doesn't exist\n print(\"Saving in {}...\".format(save_dir))\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n # Export the dataset\n with open(os.path.join(save_dir, 'sentences.txt'), 'w') as file_sentences:\n with open(os.path.join(save_dir, 'labels.txt'), 'w') as file_labels:\n for words, tags in dataset:\n file_sentences.write(\"{}\\n\".format(\" \".join(words)))\n file_labels.write(\"{}\\n\".format(\" \".join(tags)))\n print(\"- done.\")", "title": "" }, { "docid": "8ecfb2d7c0c4c3ebb7d18dec8bd18944", "score": "0.6654124", "text": "def save_dataset(dataset, save_dir):\n # Create directory if it doesn't exist\n print(\"Saving in {}...\".format(save_dir))\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n # Export the dataset\n with open(os.path.join(save_dir, 'sentences.txt'), 'w') as file_sentences:\n with open(os.path.join(save_dir, 'labels.txt'), 'w') as file_labels:\n for words, tags in dataset:\n file_sentences.write(\"{}\\n\".format(\" \".join(words)))\n file_labels.write(\"{}\\n\".format(\" \".join(tags)))\n print(\"- done.\")", "title": "" }, { "docid": "cd12a4b52682693a939d5b6ce6c6e2b1", "score": "0.6646873", "text": "def save_datasets(datasets, file): \n with open(file, 'wb') as f:\n save(datasets, f)", "title": "" }, { "docid": "7d56fa784544d6a2ad7597d5c01fe26a", "score": "0.6637412", "text": "def save(self):\n with open(self.filename, 'wb') as fp:\n pickle.dump(self.data, fp, pickle.HIGHEST_PROTOCOL)", "title": "" }, { "docid": "9c1a70695432e6f9ebdb70c31caf4f90", "score": "0.66245043", "text": "def save_dataset(self, samples, labels, path):\n\n # Create the list structure of a dataset\n dataset = [samples, labels]\n\n # Save the dataset file in a pickle file\n with open(path + '.pckl', 'wb') as dataset_file:\n pickle.dump(dataset, dataset_file)", "title": "" }, { "docid": "9c296c878726ffcb4eb8c2a390b8712d", "score": "0.66019934", "text": "def test_saveable_dataset():\n # build HCIDataset\n cube = np.zeros((5, 10, 10))\n angles = np.linspace(1, 2, 5)\n fwhm = 4 # test non-numpy type saving/loading\n\n ds = Dataset(cube=cube, angles=angles, fwhm=fwhm)\n\n # save\n fd, fn = tempfile.mkstemp(prefix=\"vip_\")\n ds.save(fn)\n\n # restore\n ds2 = Dataset.load(fn)\n\n # compare\n aarc(ds2.cube, cube)\n aarc(ds2.angles, angles)\n assert ds2.fwhm == fwhm\n\n # cleanup\n os.remove(fn)", "title": "" }, { "docid": "d329dbc26a8ec3e8190f7538ea19a10a", "score": "0.65863574", "text": "def write_dataset(dataset, save_path):\n if not dataset:\n logging.info('No dataset to write.')\n return\n logging.info('Writing dataset to %s', save_path)\n for split_name, list_of_input_output_pairs in dataset.items():\n folder_name = os.path.join(save_path, split_name)\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n encode_name = os.path.join(folder_name, '%s_encode.txt' % split_name)\n decode_name = os.path.join(folder_name, '%s_decode.txt' % split_name)\n with gfile.GFile(encode_name,\n 'w') as encode_f, gfile.GFile(decode_name,\n 'w') as decode_f:\n for pair in list_of_input_output_pairs:\n encode_f.write(pair[0] + '\\n')\n decode_f.write(pair[1] + '\\n')\n logging.info('Dataset written to %s', save_path)", "title": "" }, { "docid": "d6f45f2f66946d1801a34db8776cf96b", "score": "0.6560496", "text": "def save(self, filename=None, verbose=False):\n if not filename:\n if self.name:\n filename = self.name\n else:\n print(\" [ERROR] - Must specify dataset.name [dataset.set_name('name')]\")\n print(\" or give filename [dataset.save(filename='filename')]\")\n hdf5_save.save_hdf_dataset(self, filename, verbose=verbose)", "title": "" }, { "docid": "e054944a62bbf296902d61c5a41157ac", "score": "0.6556137", "text": "def save_data(self, data):", "title": "" }, { "docid": "4afdb01e15302f79a6cc517d8f51e1f0", "score": "0.6549905", "text": "def save_data(self):\n np.savetxt(self.save_to, self.predicted, delimiter=\",\")", "title": "" }, { "docid": "5f47511b87c05e21d215f7cab0656240", "score": "0.65480536", "text": "def save_dataset(self,mode=\"append new\"):\n # Create processed dataset folder if not existent\n if not os.path.exists(self.processed_dataset_folder):\n os.mkdir(self.processed_dataset_folder)\n\n # if dataset has not been saved before save\n dataset_full_path = f\"{self.processed_dataset_folder}{os.sep}{self.dataset_base_name}\"\n if not os.path.exists(dataset_full_path):\n pickle.dump(self.dataset, open(dataset_full_path,\"wb\"))\n return True\n\n if mode == \"override\": # Replace file\n pickle.dump(self.dataset, open(dataset_full_path,\"wb\"))\n return True\n elif mode == \"append new\": # Check if file contents in there and if not then append\n saved_dataset = pickle.load( open(dataset_full_path,\"rb\"))\n saved_dataset_processed_videos_set = set([row[\"Meta\"][\"VideoPath\"] for row in saved_dataset])\n current_dataset_processed_videos_set = set([row[\"Meta\"][\"VideoPath\"] for row in self.dataset])\n videos_processed_but_not_in_saved_dataset = current_dataset_processed_videos_set - saved_dataset_processed_videos_set\n\n if len(videos_processed_but_not_in_saved_dataset) > 0:\n additional_rows_to_save = [row for row in self.dataset if row[\"Meta\"][\"VideoPath\"] in videos_processed_but_not_in_saved_dataset]\n saved_dataset.extend(additional_rows_to_save)\n pickle.dump(saved_dataset, open(dataset_full_path,\"wb\"))\n return True\n return False", "title": "" }, { "docid": "d53c5637e8f1c7854f8bf47e16545dd0", "score": "0.65381706", "text": "def write_dataset(dataset, save_path):\n if not dataset:\n logging.info('No dataset to write.')\n return\n logging.info(f'Writing dataset to {save_path}')\n for split_name, list_of_input_output_pairs in dataset.items():\n folder_name = os.path.join(save_path, split_name)\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n encode_name = os.path.join(folder_name, f'{split_name}_encode.txt')\n decode_name = os.path.join(folder_name, f'{split_name}_decode.txt')\n skeleton_name = os.path.join(folder_name, f'{split_name}_skeleton.txt')\n with open(\n encode_name, 'w', encoding='utf8') as encode_f, open(\n decode_name, 'w', encoding='utf8') as decode_f, open(\n skeleton_name, 'w', encoding='utf8') as skeleton_f:\n for pair in list_of_input_output_pairs:\n encode_f.write(pair[0] + '\\n')\n decode_f.write(pair[1] + '\\n')\n skeleton_f.write(pair[2] + '\\n')\n logging.info(f'Dataset written to {save_path}')", "title": "" }, { "docid": "0ddde7a8b0bb63b2b5d0216bb8cbad85", "score": "0.6508642", "text": "def save(self, filename):\n with open(filename, 'w') as fp:\n json.dump({\"image_size\": self.image_size,\n \"labels\": self.label_info,\n \"images\": self.image_info}, fp)\n utils.logger.info(\"The dataset has been saved into {}\".format(filename))", "title": "" }, { "docid": "8f54c44b22efc7aaf40e75920e94b7ec", "score": "0.6501132", "text": "def saveData(self, data):", "title": "" }, { "docid": "7e469438df6b586c8abd771b75782542", "score": "0.6488153", "text": "def save_mfcc_training_dataset(self):\n\n # Save the dataset as pickle\n with pathlib.Path('dataset.pkl').open('wb') as pfile:\n pickle.dump([(r.diffs, r.result_type) for r in self.res], pfile,\n protocol=pickle.HIGHEST_PROTOCOL)\n logging.getLogger(__name__).info(\"dataset.pkl has generated.\")", "title": "" }, { "docid": "df87947f4111990f712114b009c36077", "score": "0.647041", "text": "def save_dataset(dataset, save_dir):\n\n print(f'Saving in {save_dir}...')\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n with open(os.path.join(save_dir, 'sentences.txt'), 'w', encoding='utf-8') as file_sentences:\n with open(os.path.join(save_dir, 'indices.txt'), 'w', encoding='utf-8') as file_indices:\n for sentence, st_index, end_index in dataset:\n file_sentences.write(f'{sentence}\\n')\n file_indices.write(f'{str(st_index)} {str(end_index)}\\n')\n\n print(\"- done.\")", "title": "" }, { "docid": "ec3b0a4722948edace9c276daf8f6b8b", "score": "0.64498734", "text": "def _save_dataset(filename, name, value, dtype=None):\n\n try:\n filename.create_dataset(name, data=value, maxshape=(None,), dtype=dtype)\n except RuntimeError:\n del filename[name]\n filename.create_dataset(name, data=value, maxshape=(None,), dtype=dtype)", "title": "" }, { "docid": "c596aa6936a6d641fbed5e7e93d3ae31", "score": "0.6447846", "text": "def save_data(self):\r\n json.dump(self.data, open(self.filename, 'w'), indent=2)", "title": "" }, { "docid": "87b6635f12c916bdf4cbc32fa7624d14", "score": "0.6415625", "text": "def save(self, filename='data.json'):\n with open(filename,'w') as f: \n json.dump(self.datadict,f)", "title": "" }, { "docid": "9c44250c5ac1e877058182310833168d", "score": "0.6409766", "text": "def write(self, fname):\n numpy.save(fname, self.dataset)", "title": "" }, { "docid": "4ab38983f55f551d5cb09a29ab5fe040", "score": "0.63770056", "text": "def savechanges(self):\n self.construct_name(force = True)\n if hasattr(self, 'clusterarray'):\n dataset = xr.Dataset({'clustidfield':self.clusterarray,self.array.name:self.array})\n else:\n dataset = self.array.to_dataset()\n # invoke the computation (if loading was lazy) and writing\n particular_encoding = {key : obs_netcdf_encoding[key] for key in dataset.keys()}\n dataset.to_netcdf(self.filepath, encoding = particular_encoding)\n #delattr(self, 'array')", "title": "" }, { "docid": "7663110da61f79c8fbc9be557fb0bb4f", "score": "0.6367028", "text": "def save(\n filepath: str, dataset: Union[List[Frame], Dataset], nprocs: int = 0\n) -> None:\n if isinstance(dataset, Dataset):\n frames, config = dataset.frames, dataset.config\n else:\n frames, config = dataset, None\n\n if nprocs > 1:\n labels = pmap(dump, frames, nprocs)\n else:\n labels = list(map(dump, frames))\n\n with open(filepath, \"w\") as fp:\n json.dump(Dataset(frames=labels, config=config).dict(), fp, indent=2)", "title": "" }, { "docid": "3f6d5d2f909984ab1e7d5e1d8af42b95", "score": "0.63477445", "text": "def save(self):\n entities = pandas.DataFrame(self.data)\n entities.to_csv(self.output_path, index_label=\"id\")\n self.logger.info(\"Initial custom data saved: {}\".format(self.output_path))", "title": "" }, { "docid": "35aac372eeba883b5bb4e98eabd64216", "score": "0.63380545", "text": "def savePickle(path, dataset, protocol=2):\n f = open(path, 'wb')\n pickle.dump(dataset, f, protocol=2)\n f.close()\n return", "title": "" }, { "docid": "00bc112f6e435929aceaff1795147ba2", "score": "0.6337372", "text": "def save_dataset(filename, train_labels=None, train_features=None, test_features=None, ids=None, feature_names=None):\n np.savez(filename, train_labels=train_labels, train_features=train_features, test_features=test_features, ids=ids, feature_names=feature_names)", "title": "" }, { "docid": "35e9d7fe6b66f2689670791d56a25c7d", "score": "0.63304573", "text": "def save_data(data, filename):\n pass", "title": "" }, { "docid": "87a808f54bbd868bf1ef675cef177bf3", "score": "0.6328018", "text": "def save_dataset(self, path, sep='\\t'):\n\n self._dataset.to_csv(path_or_buf=path, sep=sep, index=False)\n\n self.verboseprint(\"Dataset successfully saved at {}.\".format(path))\n logging.debug(\"Dataset saved at {}..\".format(path))", "title": "" }, { "docid": "d0e8d2334736eb0118f7f589975ce2d7", "score": "0.63111484", "text": "def __save_in_database(self, dataset):\n\n if isinstance(dataset, list):\n for data in dataset:\n self.__save_in_database(data)\n else:\n self.save_into_database(dataset, None)", "title": "" }, { "docid": "dbd0cba115ea4095c132b873f90dd688", "score": "0.6308548", "text": "def save_data_to_disk():\n global labelled_data\n with open(os.path.join(data_dir, filename), \"wb\") as f:\n np.savetxt(f, labelled_data, delimiter=\",\")", "title": "" }, { "docid": "40baa2ec7ee0c74525f24dd3818c5817", "score": "0.6305703", "text": "def save_data(self, data):\n with open(self.fname, \"wb\") as file:\n pickle.dump(data, file)", "title": "" }, { "docid": "ea3c9de5a047124f8ed555f9dd30aafa", "score": "0.63037914", "text": "def _write(self, featurized_dataset, dataset_name):\n dump_location = feature_set_location(dataset_name, self.__class__.__name__)\n joblib.dump(featurized_dataset, dump_location)", "title": "" }, { "docid": "0d9ab93314ba8c1d872bfc2c5d50afe2", "score": "0.63032645", "text": "def save(cls, filepath, numpy_tuple_dataset):\n if not isinstance(numpy_tuple_dataset, NumpyTupleDataset):\n raise TypeError('numpy_tuple_dataset is not instance of '\n 'NumpyTupleDataset, got {}'\n .format(type(numpy_tuple_dataset)))\n np.savez(filepath, *numpy_tuple_dataset._datasets)\n print('Save {} done.'.format(filepath))", "title": "" }, { "docid": "7fc946c9f352535a02ff70cd56b81922", "score": "0.630007", "text": "def save(self, save_dir):\n\n # Save params\n full_path = os.path.join(save_dir, 'dataset_params.pickle')\n with open(full_path, 'wb') as x:\n pickle.dump(self.params, x)\n\n # Each partition is persisted saved as a dataset index and color name pair\n # Dataset index can be used to re-create the partitons\n color_idx_lookup = {name: idx for idx, (_, _, name) in enumerate(self)}\n for partition, partition_path in [\n (self.train_set, 'train_partition.txt'),\n (self.cv_set, 'cv_partition.txt'),\n (self.test_set, 'test_partition.txt'),\n ]:\n full_path = os.path.join(save_dir, partition_path)\n color_names = ['{},{}'.format(color_idx_lookup[name], name) for _, _, name in partition]\n with open(full_path, 'w') as x:\n x.write('\\n'.join(color_names))", "title": "" }, { "docid": "314f3f98dbd54460c07fd6df833cb058", "score": "0.62917274", "text": "def save(self, filename, **kwargs):\n\n save(self.ds, filename, **kwargs)", "title": "" }, { "docid": "1f213dd16185287228cf0e16ff63b54e", "score": "0.6291383", "text": "def saveData(self, filename=None):\n if filename is None:\n filename = self.name + \".dat\"\n with open(filename, 'wb') as data:\n pickle.dump(self.items, data)", "title": "" }, { "docid": "b8b32e843796f31568b45fb3a47b8f29", "score": "0.62893254", "text": "def save(self, filename, data):\n raise NotImplementedError()", "title": "" }, { "docid": "a8aec74a063bd41a5f89b09050bed535", "score": "0.6275846", "text": "def save_data(self):\n libFile.write_json(self.datapath, {\"DeformerInfo\": self.data})", "title": "" }, { "docid": "37799e9f3139ba6b2e3eb38fbc4c7e00", "score": "0.6275192", "text": "def save(self, filepath):\n f = h5py.File(filepath, \"w\")\n f.create_dataset(\"trial_type\", data=\"RankObservations\")\n f.create_dataset(\"stimulus_set\", data=self.stimulus_set)\n f.create_dataset(\"n_select\", data=self.n_select)\n f.create_dataset(\"is_ranked\", data=self.is_ranked)\n f.create_dataset(\"group_id\", data=self.group_id)\n f.create_dataset(\"agent_id\", data=self.agent_id)\n f.create_dataset(\"session_id\", data=self.session_id)\n f.create_dataset(\"weight\", data=self.weight)\n f.create_dataset(\"rt_ms\", data=self.rt_ms)\n f.close()", "title": "" }, { "docid": "8567f474fb1db89a5b709a7d174c91d3", "score": "0.6255518", "text": "def save_data(self, filepath=None):\n if filepath is None:\n filepath = os.path.join(self.data_directory, 'df_classification_data.pickle')\n \n filetype = filepath.split('.')[-1]\n to_save = self.all_data[self.all_data[self.label_col].notna()]\n if filetype == 'pickle':\n to_save.to_pickle(filepath)\n elif filetype == 'csv':\n to_save.to_csv(filepath)\n elif filetype == 'json':\n to_save.to_json(filepath, orient='records', lines=True)", "title": "" }, { "docid": "3c6c0683f720673113858a7505019ac7", "score": "0.62554234", "text": "def save(self, filepath):\n f = h5py.File(filepath, \"w\")\n f.create_dataset(\"trial_type\", data=\"RankDocket\")\n f.create_dataset(\"stimulus_set\", data=self.stimulus_set)\n f.create_dataset(\"n_select\", data=self.n_select)\n f.create_dataset(\"is_ranked\", data=self.is_ranked)\n f.close()", "title": "" }, { "docid": "ec312bc200885ea10fb10d053cb27e5b", "score": "0.62543446", "text": "def save_training_data(self, data):\n pkl.dump(data, open('train_data.pkl', 'wb'))", "title": "" }, { "docid": "76a758cff64f3817fe2c8751fb739178", "score": "0.62477815", "text": "def save_data(directory,dataset_name,dataset):\n \n if not os.path.exists(directory):\n os.makedirs(directory)\n \n print('Saving data...')\n np.save(os.path.join(directory,dataset_name),dataset)\n print(dataset_name,' saved at: ',os.path.join(directory,dataset_name))", "title": "" }, { "docid": "a57c306aa40164d3240468f15e5cc226", "score": "0.62472814", "text": "def save(self):\n self._validate_path()\n self.data.save(self.filename)\n self.stored = True", "title": "" }, { "docid": "6a929ba3f42a6c5c1412513d5f900c83", "score": "0.62351054", "text": "def save(self):\r\n os.system(\"mkdir DataSets\")\r\n numpy.save(\"DataSets\\\\\"+self.name+\"Frames\",self.FrameSet)\r\n numpy.save(\"DataSets\\\\\"+self.name+\"Inputs\",self.InputSet)", "title": "" }, { "docid": "17d14c9b6dc8c77a0371d3e4533f4a69", "score": "0.62292564", "text": "def _save_data_on_disk(self):\n # create the training, validation and test arrays\n train_data = np.c_[self.X_train, self.y_train]\n valid_data = np.c_[self.X_valid, self.y_valid]\n test_data = np.c_[self.X_test, self.y_test]\n header_cols = self.housing.feature_names + [\"MedianHouseValue\"]\n header = \",\".join(header_cols)\n\n self.train_filepaths = self._save_to_multiple_csv_files(train_data, \"train\", header, n_parts=20)\n self.valid_filepaths = self._save_to_multiple_csv_files(valid_data, \"valid\", header, n_parts=10)\n self.test_filepaths = self._save_to_multiple_csv_files(test_data, \"test\", header, n_parts=10)", "title": "" }, { "docid": "e317e867e6ae2df418d35a49c2145e5a", "score": "0.6198814", "text": "def save(self, filename=None):\n if filename is None:\n filename = self.filename\n with open(filename, 'wb') as f:\n f.write(self.data)", "title": "" }, { "docid": "e6248170040d204c41971f5b4d0f27e4", "score": "0.61897075", "text": "def save_data(path, filename, data):\n train_data, val_data, test_data = data\n save_name = path / filename.split('.')[0]\n train_data.to_csv(f'{save_name}_train_set.csv')\n val_data.to_csv(f'{save_name}_val_set.csv')\n test_data.to_csv(f'{save_name}_test_set.csv')", "title": "" }, { "docid": "c31933c85f6f34ec34a72ffe1048c12b", "score": "0.61836815", "text": "def save_data(self):\n data = AgentData(gridworld=self)\n pickle.dump(data, open(data.title + '.pkl', 'wb'))", "title": "" }, { "docid": "2516dc600214f2d5ccf3d0529ee4ad80", "score": "0.61756873", "text": "def save_data(self, network):\n\n # save the current network\n self.network_path = \"{}/network_gen_{}.pt\".format(network_dir, self.cycle)\n torch.save(network, self.network_path)\n\n # dump the storage object to file\n with open(storage_path, 'wb') as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)", "title": "" }, { "docid": "27d4a0566dc40bb177652c047c64228a", "score": "0.61646855", "text": "def save(self, ds):\n \n fspec = self.dataset.dsmap[ds]\n if fspec is None and self.directory is None: # file is not saved\n return\n if fspec is not None:\n fdir, base = os.path.dirname(fspec), os.path.basename(fspec)\n if self.suffix: # create file name with suffix preceding the extension\n parts = os.path.splitext(base)\n base = parts[0] + self.suffix + parts[1] # if no extension, parts[1] wil be empty string\n if self.directory: # substitute new location for original\n fdir = self.directory\n target = os.path.join(fdir, base)\n else:\n base = ds + os.path.extsep + \"sav\"\n target = os.path.join(self.directory, base)\n if not self.overwrite and os.path.exists(target):\n print(_(\"\"\"File %s already exists and will not be overwritten.\"\"\") % target)\n else:\n # this is already the active file\n spss.Submit(\"\"\"SAVE OUTFILE=\"%(target)s\".\"\"\" % locals())\n # file might already be gone\n try:\n if self.close and not ds == self.dataset.activefile:\n spss.Submit(\"DATASET CLOSE \" + ds) # file could remain open as active file\n except:\n pass", "title": "" }, { "docid": "8eea44bb15fa0dd5cf2acee00ea300ce", "score": "0.61642736", "text": "def to_export(self):\n if self.data_file is not None:\n pickle.dump(self.data, open(self.data_file, \"wb\"))", "title": "" }, { "docid": "265fa45dc293b07b267720c424aecd09", "score": "0.615056", "text": "def save_data_to_fiile(self):\n logger.warning(\"Only Use it for debug purposes!!!\")\n self.internationalstudies.to_csv(\"internationalstudies.csv\")\n self.drug_vocab_df.to_csv(\"drug_vocab.csv\")\n with open('all_US_studies_by_keyword.json', 'w', encoding='utf-8') as f:\n json.dump(self.all_US_studies_by_keyword, f, ensure_ascii=False, indent=4)", "title": "" }, { "docid": "ec00575b039e2838d4402ecab3be9ac5", "score": "0.6147158", "text": "def _data_to_save(self):\n return self.data", "title": "" }, { "docid": "1d63df6f56af1e7542b87438ae88a83f", "score": "0.61453146", "text": "def to_disk(self, ds_disk_path):\n self.ds.save_to_disk(ds_disk_path)", "title": "" }, { "docid": "471b02133ccacfd3d852ae31ab9b0f1f", "score": "0.614503", "text": "def save(self, storePath, prefix, verbose=0):\n if self.datasets == []:\n print('no datasets to save. Generate datasets with gen_datasets() first.')\n \n print(f\"\\nSave {len(self.datasets)} datasettings into '{storePath}'\")\n if not os.path.isdir(storePath):\n os.makedirs(storePath)\n\n for i,d in enumerate(self.datasets):\n fname = prefix + str(i+1) + \".npy\"\n d.saveSets(storePath, fname)\n if verbose: print(fname)\n \n with open(os.path.join(storePath,'readme.md'), 'w') as f:\n f.write(f\"These datasets were generated from '{self.fileName}'\\n\")\n f.write(str(datetime.now()) + '\\n')", "title": "" }, { "docid": "c2c6276be4ce74dd3d607e572ba4559d", "score": "0.6137075", "text": "def dump_and_load_dataset(self):\n self._init_qlib()\n self._prepare_calender_cache()\n dataset = init_instance_by_config(self.task[\"dataset\"])\n dataset_backtest = init_instance_by_config(self.task[\"dataset_backtest\"])\n\n ##=============dump dataset=============\n dataset.to_pickle(path=\"dataset.pkl\")\n dataset_backtest.to_pickle(path=\"dataset_backtest.pkl\")\n\n del dataset, dataset_backtest\n ##=============reload dataset=============\n with open(\"dataset.pkl\", \"rb\") as file_dataset:\n dataset = pickle.load(file_dataset)\n\n with open(\"dataset_backtest.pkl\", \"rb\") as file_dataset_backtest:\n dataset_backtest = pickle.load(file_dataset_backtest)\n\n self._prepare_calender_cache()\n ##=============reinit dataset=============\n dataset.config(\n handler_kwargs={\n \"start_time\": \"2021-01-19 00:00:00\",\n \"end_time\": \"2021-01-25 16:00:00\",\n },\n segments={\n \"test\": (\n \"2021-01-19 00:00:00\",\n \"2021-01-25 16:00:00\",\n ),\n },\n )\n dataset.setup_data(\n handler_kwargs={\n \"init_type\": DataHandlerLP.IT_LS,\n },\n )\n dataset_backtest.config(\n handler_kwargs={\n \"start_time\": \"2021-01-19 00:00:00\",\n \"end_time\": \"2021-01-25 16:00:00\",\n },\n segments={\n \"test\": (\n \"2021-01-19 00:00:00\",\n \"2021-01-25 16:00:00\",\n ),\n },\n )\n dataset_backtest.setup_data(handler_kwargs={})\n\n ##=============get data=============\n xtest = dataset.prepare(\"test\")\n backtest_test = dataset_backtest.prepare(\"test\")\n\n print(xtest, backtest_test)\n return", "title": "" }, { "docid": "302084093b0c988ab6764987ec9a5a91", "score": "0.6134747", "text": "def write_dat(self):\n data = self.get_data()\n data.savetxt('{}.dat'.format(self.get_name()))", "title": "" }, { "docid": "3d24882cc0abe7ed3f3a46fe27fb18f7", "score": "0.6134005", "text": "def save(self, path=None):\n if not path:\n path = './LMI-BACKUP'\n if not os.path.isdir(path):\n os.mkdir(path)\n today = datetime.datetime.today().strftime('%Y-%m-%d@%Hh-%Mm')\n path += f'/{today}'\n if not os.path.isdir(path):\n os.mkdir(path)\n else:\n if not os.path.isdir(path):\n os.mkdir(path)\n print(f'Saving to path: {path}')\n saved = []\n failed = []\n if server_uses_widgets(self.server):\n url_args = \"vocabulary,metadata,layer,widget\"\n else:\n url_args = \"metadata,layer\"\n for item in tqdm(self):\n if item['id'] not in saved:\n entity_type = item.get('type')\n if entity_type in ['Dataset', 'Table']:\n ds_id = item['id']\n else:\n ds_id = item['attributes']['dataset']\n try:\n url = f'{self.server}/v1/dataset/{ds_id}?includes={url_args}'\n r = requests.get(url)\n dataset_config = r.json()['data']\n except:\n failed.append(item)\n\n save_json = {\n \"id\": ds_id,\n \"type\": \"dataset\",\n \"server\": self.server,\n \"attributes\": dataset_config['attributes']\n }\n with open(f\"{path}/{ds_id}.json\", 'w') as fp:\n json.dump(save_json, fp)\n\n if len(failed) > 0:\n print(f'Some entities failed to save: {failed}')\n return failed\n print('Save complete!')", "title": "" }, { "docid": "2e2d638bf83a24fcc914036690f8c760", "score": "0.61270213", "text": "def _dent_asset_save(self, datastore):\n data_buffer = io.BytesIO()\n np.save(data_buffer, self.data)\n data_header = tarfile.TarInfo(\"data\")\n data_header.size = data_buffer.getbuffer().nbytes\n data_buffer.seek(0)\n datastore.addfile(data_header, data_buffer)\n\n config_buffer = io.BytesIO()\n config_buffer.write(\n yaml.dump(\n {\n \"name\": self.name,\n \"directory\": self.directory,\n \"transform\": self._transform.tolist(),\n \"offset\": self.offset.tolist(),\n \"indices\": self.indices.tolist(),\n \"material_name\": self.material_name,\n }\n ).encode(\"ascii\")\n )\n config_buffer.flush()\n config_header = tarfile.TarInfo(\"config\")\n config_header.size = config_buffer.getbuffer().nbytes\n config_buffer.seek(0)\n datastore.addfile(config_header, config_buffer)", "title": "" }, { "docid": "648da152253f95afa602bbbc4787408a", "score": "0.61234426", "text": "def write_dataset(self, path, data):\n self._h5file.create_dataset(path, data=data)", "title": "" }, { "docid": "a58b6f4a21e3ff8379ccd13994cc7681", "score": "0.6121262", "text": "def _serialize_data(self, samples, losses):\n with open('train_samples.pkl', 'wb') as f:\n pkl.dump(samples, f)\n with open('train_loss.pkl', 'wb') as f:\n pkl.dump(losses, f)", "title": "" }, { "docid": "4f8c5dcbda67b470276bedf9f5309ad2", "score": "0.61147726", "text": "def save(self):\n log.info(\"Writing to \", self.output_filepath)\n eKonf.save_data(self.df, self.output_filepath)", "title": "" }, { "docid": "92c836b3d1dc2020cf0defcf67eef268", "score": "0.61024535", "text": "def fetch_and_save_data(self):\n self._fetch_ca_datset()\n if not os.path.exists(self.dataset_directory):\n self._save_data_on_disk()\n else:\n self._create_filepaths()", "title": "" }, { "docid": "8097d66fe85273930d1355af9540a88d", "score": "0.61008865", "text": "def save_data(self, data, title=\"\"):\n\n\t\ttitle = title.replace(r\"\\\\\", \" -- \").replace(r\"/\", \" -- \").replace(\"\\\\\", \" -- \")[\n\t\t\t\t:-4] # Title tells both the set and the name of the file. Also crop out the extension .xml with the [:-4] command.\n\t\tnp.save(os.path.join(DataSources.forceplate_data, \"processed\", title),\n\t\t\t\tdata) # save as title without .xml, and no spaces on the end", "title": "" }, { "docid": "b70e7be5544f5a63c5586cc851a7285c", "score": "0.6100087", "text": "def save_data(self, filepath: str) -> None:\n np.save(filepath, self._population_history)", "title": "" }, { "docid": "756a0ad4616d5a5f74050e310650dd75", "score": "0.60908103", "text": "def save(self, filename: str):\n pd.to_pickle(self, filename)\n LOG.info(f\"Data successfully saved as {filename}!\")", "title": "" }, { "docid": "ae71be0747280048ac911c47b088dce0", "score": "0.608736", "text": "def saveToDisk(self):\n out.info('Saving to disk (%s)\\n' % (self.filename))\n\n # Make sure they want to save\n if(not self.attrSaveable()):\n return\n\n # Get whatever the data is\n pyld = self.exportAttr(self.getAttr())\n\n # Write the file to disk, truncate if it exists\n try:\n with open(self.filename, 'wb') as output:\n pickle.dump(pyld, output)\n os.fsync(output.fileno())\n except Exception as e:\n out.err('Error writing to disk %s\\n' % (str(e)))\n\n try:\n with open(self.filename + \".yaml\", \"w\") as output:\n yaml.dump(pyld, output)\n except Exception as error:\n out.err(\"Error writing yaml file: {}\".format(error))", "title": "" }, { "docid": "986edbb7909e73dce85e2ecac7093a48", "score": "0.6077244", "text": "def pickle_and_save_datasets(datasets, filename='plankton_data'):\n data_string = pickle.dumps(datasets)\n with gzip.open('data/{}.pkl.gz'.format(filename), 'wb') as f:\n f.write(data_string)", "title": "" }, { "docid": "3f153b9cb844eaa20cd7a1000c395d23", "score": "0.60747665", "text": "def save_attributes(self, paths: af.DirectoryPaths):\r\n dataset_path = paths._files_path / \"dataset\"\r\n\r\n self.dataset.output_to_fits(\r\n data_path=dataset_path / \"data.fits\",\r\n noise_map_path=dataset_path / \"noise_map.fits\",\r\n pre_cti_data_path=dataset_path / \"pre_cti_data.fits\",\r\n overwrite=True,\r\n )\r\n self.dataset.layout.output_to_json(\r\n file_path=dataset_path / \"layout.json\",\r\n )\r\n self.dataset.mask.output_to_fits(\r\n file_path=dataset_path / \"mask.fits\", overwrite=True\r\n )\r\n\r\n if self.dataset_full is not None:\r\n dataset_path = paths._files_path / \"dataset_full\"\r\n\r\n self.dataset_full.output_to_fits(\r\n data_path=dataset_path / \"data.fits\",\r\n noise_map_path=dataset_path / \"noise_map.fits\",\r\n pre_cti_data_path=dataset_path / \"pre_cti_data.fits\",\r\n overwrite=True,\r\n )\r\n self.dataset_full.layout.output_to_json(\r\n file_path=dataset_path / \"layout.json\",\r\n )\r\n self.dataset_full.mask.output_to_fits(\r\n file_path=dataset_path / \"mask.fits\", overwrite=True\r\n )\r\n\r\n self.clocker.output_to_json(file_path=paths._files_path / \"clocker.json\")\r\n self.settings_cti.output_to_json(\r\n file_path=paths._files_path / \"settings_cti.json\"\r\n )", "title": "" }, { "docid": "80144945f551fad8198d747e3e75d09a", "score": "0.6074664", "text": "def save(data, labels, dataset, metric='raw', overwrite=False):\n\n #Dataset filename\n dataFile = dataset.lower()+\"_\"+metric.lower()+\".npz\"\n labelsFile = dataset.lower()+\"_labels.npz\"\n\n #Full path to file\n dataFile_path = os.path.join(data_dir, dataFile)\n labelsFile_path = os.path.join(data_dir, labelsFile)\n\n #Check if Data directory exists\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n\n #Save dataset and labels\n if os.path.isfile(dataFile_path) and not overwrite:\n print('Data file '+dataFile_path+' already exists. Not saving.')\n else:\n np.savez_compressed(dataFile_path,data=data)\n np.savez_compressed(labelsFile_path,labels=labels)", "title": "" }, { "docid": "e4fe509157b6348d5e8ad50517a7e8d3", "score": "0.607132", "text": "def save(self, save_path=\"\"):\n save_data(self, save_path=save_path)", "title": "" }, { "docid": "d752fe7436946cbb254e592fa0d57aec", "score": "0.6066392", "text": "def save(self, basename):\r\n if self.data_type == \"sesans\":\r\n np.savetxt(basename+\".dat\", np.array([self._data.x, self.theory()]).T)\r\n pass", "title": "" }, { "docid": "78ed3c6adac78bed8172b3254cc83dcf", "score": "0.6053453", "text": "def save_data(self, data=None):\n self.data = data\n \n f = open(self.filename, 'w')\n \n for label, values in zip(data['labels'], data['values']):\n f.write(str(label))\n for i, value in enumerate(values):\n f.write(' ')\n f.write(str(i))\n f.write(':')\n f.write(str(value))\n f.write('\\n')\n \n f.close()\n \n return self", "title": "" }, { "docid": "96fbe9601f48056eafc57bddf7a431e2", "score": "0.60517126", "text": "def _save_data(save_to_file: str, save_mode: str, metadata: GlobalMetadata) -> None:\n\n with open(save_to_file, mode=save_mode) as out:\n out.write(str(metadata.datamart_id))\n out.write(\"\\n\")\n out.write(json.dumps(metadata.value))\n out.write(\"\\n\")", "title": "" }, { "docid": "b0c42c55074807fed6ab6bdcfb7a1b3f", "score": "0.6049688", "text": "def save(self):\n with open(self.file_path, 'w') as f:\n json.dump(self.data, f)", "title": "" } ]
00c417c15b524e968fdfa3a8835f51cd
Updates the config with data from the cache
[ { "docid": "513e60e2aad3bfe65b8444e2c411f1ae", "score": "0.7814324", "text": "async def update(self, data=None):\n if data is not None:\n self.cache.update(data)\n await self.api.update_config(self.cache)", "title": "" } ]
[ { "docid": "a75a38ce910e27ffeae5c022e27e18c4", "score": "0.786449", "text": "def update_cache(self):\n pass", "title": "" }, { "docid": "d29532092cd1e14fedd9af2eb790a4c7", "score": "0.7683715", "text": "async def update(self):\n await self.bot.api.update_config(self.filter_default(self._cache))", "title": "" }, { "docid": "c0e77216d25b736637d9269c8470c4c6", "score": "0.7678912", "text": "def update_cache(self):\n raise NotImplementedError", "title": "" }, { "docid": "99fae66914559e99990685c4a833c1e9", "score": "0.764164", "text": "def _update_cache(self):\n pass", "title": "" }, { "docid": "33cf0eff6b3c1b0328aa1f8f213b48bf", "score": "0.7552436", "text": "async def refresh(self):\n data = await self.api.get_config()\n self.cache.update(data)\n self.ready_event.set()", "title": "" }, { "docid": "666547b788caaa7e49f18d9d2b122169", "score": "0.7223693", "text": "def update_cache(self):\n self._cache_updated = True", "title": "" }, { "docid": "de5c902220ec1f758ae1e643adf03633", "score": "0.71002644", "text": "def update_cache(self):\n if not self.use_cache:\n with open(self.cache_name, \"w\") as file_cache:\n json.dump(self.cache, file_cache)", "title": "" }, { "docid": "e3ccd88e4fccb03c579b559f6cdac4cc", "score": "0.6993365", "text": "async def update_config(self, ctx, data: dict):\n await self.db.find_one_and_update(\n {\"_id\": ctx.guild.id}, {\"$set\": data}, upsert=True\n )\n config = self._config_cache[ctx.guild.id]\n for key, value in data.items():\n config[key] = value\n\n self._config_cache[ctx.guild.id] = config", "title": "" }, { "docid": "6a085e52b48fbe54f0869b119632702a", "score": "0.6980284", "text": "async def refresh(self) -> dict:\n for k, v in (await self.bot.api.get_config()).items():\n k = k.lower()\n if k in self.all_keys:\n self._cache[k] = v\n if not self.ready_event.is_set():\n self.ready_event.set()\n logger.info(\"Successfully fetched configurations from database.\")\n return self._cache", "title": "" }, { "docid": "70e839d8cc555c935d0dc4386554d51b", "score": "0.684675", "text": "def _cache_server_config(self, data: dict) -> None:\n # Delete keys if they exist\n client = self._get_redis_client()\n client.delete(self.SERVER_CONFIG_CACHE_KEY)\n\n # Set data in cache. Any conversions here need to be reversed in the get_server_configuration()\n data['lfs_enabled'] = 'true' if data['lfs_enabled'] is True else 'false'\n client.hmset(self.SERVER_CONFIG_CACHE_KEY, data)", "title": "" }, { "docid": "710ad8436d56822a628d102bc81f146a", "score": "0.66896397", "text": "def _cache_auth_config(self, data: dict) -> None:\n # Delete keys if they exist\n client = self._get_redis_client()\n client.delete(self.AUTH_CONFIG_CACHE_KEY)\n\n # Set data in cache\n client.hmset(self.AUTH_CONFIG_CACHE_KEY, data)", "title": "" }, { "docid": "e75affc291afa5ec7017ee679556f2b7", "score": "0.66419554", "text": "async def update_cache(self) -> \"cache\":\n self._cache = await self.get(\"\") or dict()\n self.guild_minecraft_roles = {\n g: self._cache[\"guilds\"][g][\"role\"]\n for g in self._cache[\"guilds\"]\n if self._cache[\"guilds\"][g].get(\"role\")\n } if self._cache.get(\"guilds\") else dict()\n self.guild_server_ips = {\n g: self._cache[\"guilds\"][g][\"minecraft\"]\n for g in self._cache[\"guilds\"]\n if self._cache[\"guilds\"][g].get(\"minecraft\")\n } if self._cache.get(\"guilds\") else dict()\n\n self._requested = True\n return self._cache", "title": "" }, { "docid": "5211e4c30dfbfba744fa4338be299af8", "score": "0.65247273", "text": "def update_cache(self):\n with open(self.cache_file, 'w') as cache:\n cache_values = list(self.cache.values())\n if len(cache_values) > self.cache_size:\n json.dump(cache_values[-self.cache_size:], cache, default=GeoInfo.to_json)\n else:\n json.dump(cache_values, cache, default=GeoInfo.to_json)", "title": "" }, { "docid": "a4359d3691b5f64b60de6371539cfbcf", "score": "0.64972913", "text": "def update_from_cache(interface):\n\tcached = basedir.load_first_config(config_site, config_prog,\n\t\t\t\t\t 'interfaces', escape(interface.uri))\n\tif not cached:\n\t\treturn False\n\n\tinterface.reset()\n\tupdate(interface, cached)\n\tupdate_user_overrides(interface)\n\n\treturn True", "title": "" }, { "docid": "6918ade82b059d7e395fc96fb5a6c9cb", "score": "0.6476746", "text": "def __cache_request (self, data):\n log.debug(\"Cache generated 'edit-config' request...\")\n # self.__last_request = data.full_copy()\n # Copy reference instead of full_copy to avoid overhead\n self.__last_request = data", "title": "" }, { "docid": "a5f07b100ae3e62cefae1c81cb58fbdb", "score": "0.64663553", "text": "async def refresh(self):\n for ds in ['activedirectory', 'ldap', 'nis']:\n ds_state = await self.middleware.call(f'{ds}.get_state')\n if ds_state == 'HEALTHY':\n await self.middleware.call(f'{ds}.fill_cache', True)\n elif ds_state != 'DISABLED':\n self.logger.debug('Unable to refresh [%s] cache, state is: %s' % (ds, ds_state))\n await self.middleware.call('dscache.backup')", "title": "" }, { "docid": "308a827d12b6130d7b365d9410e265a2", "score": "0.64105165", "text": "def update_config(self, config):\n return config", "title": "" }, { "docid": "2a555dd408aa8e02194b6d45f2951fe1", "score": "0.63837177", "text": "def update(self, **kwargs):\n self.config.update(**kwargs)\n\n with open(self.path, \"w\") as buff:\n buff.write(json.dumps(self.config, sort_keys=True, indent=4))\n return self.config", "title": "" }, { "docid": "d2e7aaae68359bf53aad6c3bbcdfb147", "score": "0.6330594", "text": "def _reloadConfig():\n\tbase.caches.clearCaches()\n\n\troot.loadUserVanity(root.ArchiveService)\n\tconfig.makeFallbackMeta(reload=True)\n\tconfig.loadConfig()\n\n\tbase.ui.notifyInfo(\"Cleared caches on SIGHUP\")", "title": "" }, { "docid": "4b0b343ab1128c8455f0bc8e44a2c64b", "score": "0.63123107", "text": "def forcerefresh(self):\n #logging.info( \"loading settings from datastore to local cache\")\n qry=SettingStore.query()\n sets=qry.fetch(1000)# Return up to 1000 records\n self._lastloaded=datetime.utcnow()\n newsettings={}\n for set in sets:\n if set.enttype==\"int\":\n val=int(set.value)\n elif set.enttype==\"float\":\n val=float(set.value)\n elif set.enttype==\"boolean\":\n val=(set.value==True)\n elif set.enttype==\"string\":\n val=set.value\n else:\n val=json.loads(set.value)\n newsettings[set.keyname]=val\n self._settings=newsettings # replace the old settings\n #logging.info(\"Loaded new data into settings\")", "title": "" }, { "docid": "fc1e7e157b80e807ab5440efe0a33cce", "score": "0.6299515", "text": "def update_config(self):\n raise NotImplemented(\"This function is not implemented in this subclass\")", "title": "" }, { "docid": "0cd1d90c5068e7a8f452a2c61b690e79", "score": "0.62661153", "text": "def reload_cache():\n global _packages\n _packages = {}\n _load_package_config(True)", "title": "" }, { "docid": "83506ebc4e60b9e4b7af4a8776a12ebe", "score": "0.6240196", "text": "def update_config(self):\n with open(cwd + '/config/configuration.pickle', 'wb') as f:\n pickle.dump(self.config, f, pickle.HIGHEST_PROTOCOL)\n print('config updated')\n return self.config", "title": "" }, { "docid": "41846c41d24632bcf9a3e22ad92892c1", "score": "0.6196341", "text": "def update_config(self):\n \n self.config_handler.save_config()\n return True", "title": "" }, { "docid": "637b4bf61754e8bb3041b36faed58721", "score": "0.617664", "text": "def populate_cache(cls):\n pass", "title": "" }, { "docid": "703a6160adbb5542480b9f1b8b0d8946", "score": "0.6171471", "text": "def update(self, key, value):\n key = key.replace(\"\\n\", \"\")\n if key not in self.cache and len(self.cache) >= self.max_cache_size:\n self.remove_oldest()\n # print value\n self.cache[key] = {'date_accessed': datetime.datetime.now(),\n 'value': value}", "title": "" }, { "docid": "c01effc6cdbb43dcc662c3129463d64d", "score": "0.61697114", "text": "async def update():\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(source_url) as response:\n global cache\n cache = await parse(await response.text())\n except ut.GetErrors as e:\n ut.note('Failed to GET: ' + source_url)", "title": "" }, { "docid": "054980b11fe48b00e01ed7c70d78175e", "score": "0.6148622", "text": "def reload_cache_config(self):\r\n self._rest_inbound_socket.log.debug(\"RESTAPI ReloadCacheConfig with %s\" \\\r\n % str(request.form.items()))\r\n msg = \"ReloadCacheConfig Failed\"\r\n result = False\r\n\r\n try:\r\n cache_api_url = self.cache['url']\r\n except KeyError:\r\n msg = \"ReloadCacheConfig Failed -- CACHE_URL not found\"\r\n result = False\r\n return self.send_response(Success=result, Message=msg)\r\n\r\n try:\r\n req = HTTPRequest(auth_id=self.key, auth_token=self.secret)\r\n data = req.fetch_response(cache_api_url + '/ReloadConfig/', params={}, method='POST')\r\n res = json.loads(data)\r\n try:\r\n success = res['Success']\r\n msg = res['Message']\r\n except:\r\n success = False\r\n msg = \"unknown\"\r\n if success:\r\n msg = \"Plivo Cache Server config reloaded\"\r\n result = True\r\n self._rest_inbound_socket.log.info(\"ReloadCacheConfig Done\")\r\n else:\r\n raise Exception(msg)\r\n\r\n except Exception, e:\r\n msg = \"Plivo Cache Server config reload failed\"\r\n self._rest_inbound_socket.log.error(\"ReloadCacheConfig Failed -- %s\" % str(e))\r\n result = False\r\n\r\n return self.send_response(Success=result, Message=msg)", "title": "" }, { "docid": "74adc18d40b3c29efa05b3bb511a1504", "score": "0.6138389", "text": "def create_new_cache(self):\n self.cache = {\"data\": {}, \"last_update\": None}", "title": "" }, { "docid": "8af07cddf1b435f11048cb1b96a173e9", "score": "0.61253875", "text": "def reload(self):\n self._cache = dict(super().items())", "title": "" }, { "docid": "e3c8f5a76c7b2c54b7333355f54a1456", "score": "0.60885566", "text": "async def do_update(self, data):\n old = await self.config()\n\n new = old.copy()\n new.update(data)\n\n verrors = ValidationErrors()\n\n servers = data.get('isns_servers') or []\n for server in servers:\n reg = RE_IP_PORT.search(server)\n if reg:\n ip = reg.group(1)\n if ip and ip[0] == '[' and ip[-1] == ']':\n ip = ip[1:-1]\n try:\n ipaddress.ip_address(ip)\n continue\n except ValueError:\n pass\n verrors.add('iscsiglobal_update.isns_servers', f'Server \"{server}\" is not a valid IP(:PORT)? tuple.')\n\n if verrors:\n raise verrors\n\n new['isns_servers'] = '\\n'.join(servers)\n\n await self._update_service(old, new)\n\n if old['alua'] != new['alua']:\n await self.middleware.call('service.start', 'ix-loader')\n\n return await self.config()", "title": "" }, { "docid": "a6a14c621bdd34d04591ce73219cfc6c", "score": "0.6077641", "text": "def reload_config(self):\n\n pass", "title": "" }, { "docid": "49b46056a2ec4e84ace98ea2e71674fa", "score": "0.60755986", "text": "def updated_config(self, **kwargs) -> None:\n state = {}\n for key, value in kwargs.items():\n state[key] = getattr(self, key)\n setattr(self, key, value)\n try:\n yield\n finally:\n for key, value in state.items():\n setattr(self, key, value)", "title": "" }, { "docid": "c3bbddfd218ff6081edfd2cec3694903", "score": "0.60317516", "text": "def update_config():\n data_dir = os.path.dirname(os.path.realpath(__file__))\n data_dir = os.path.join(data_dir, '../data')\n cfg_file = os.path.join(data_dir, 'lydian.conf')\n get_configs().write_config_file(file_name=cfg_file)", "title": "" }, { "docid": "15abea622d1a7c07eb4af14412800d21", "score": "0.60176027", "text": "def save_to_cache(self, data: dict) -> None:\n # Save the configuration, only if it doesn't already exist, to deal with possibility of multiple start up\n # processes all loading the configuration for the first time.\n did_set = self._get_redis_client().setnx(self.CLIENT_CONFIG_CACHE_KEY, json.dumps(data))\n\n if did_set:\n logger.info(\"Saved Client configuration to cache.\")\n else:\n logger.info(\"Skipping saving Client configuration to cache due to configuration that is already set.\")", "title": "" }, { "docid": "ace5558df316c96a47fb7e54e7648cc8", "score": "0.60073966", "text": "def update_cfg(self,cfg):\n\n self._cfg = cfg", "title": "" }, { "docid": "33c19ef205ccb96a315d78e1777b52b0", "score": "0.5991477", "text": "def __cache_topology (self, data):\n log.debug(\"Cache received 'get-config' response...\")\n # self.__last_virtualizer = data.full_copy()\n # Copy reference instead of full_copy to avoid overhead\n self.__last_virtualizer = data", "title": "" }, { "docid": "e12d665c124a9dd233bc795c2626987e", "score": "0.59874004", "text": "def _read_cache(self):", "title": "" }, { "docid": "715add55a7d5fbf229266baecda374a7", "score": "0.5987221", "text": "def load(self):\n try:\n self.update(json.loads(self.config.read())) # B\n except py.error.ENOENT:\n pass", "title": "" }, { "docid": "85b4d75932032968429b0d836b842421", "score": "0.59772277", "text": "def refetch_auth_config(self):\n server_data = self._load_current_configuration()\n\n url = server_data['server'].get(\"auth_config_url\")\n if not url:\n # The auth_config_url wasn't persisted, so we assume gigantum hub format\n url = server_data['server']['base_url'] + \".well-known/auth.json\"\n\n # re-fetch\n try:\n response = requests.get(url)\n\n if response.status_code != 200:\n logger.error(f\"Failed to re-fetch auth configuration. No change applied: {response.status_code}\")\n return\n except Exception as err:\n logger.error(f\"Failed to re-fetch auth configuration. No change applied. Error: {err}\")\n return\n\n # All good, Parse and persist\n data = response.json()\n auth_config = dict_to_auth_config(data)\n\n server_data_file = self.get_server_config_file(server_data['server']['id'])\n server_data['auth'] = auth_config.to_dict()\n with open(server_data_file, 'wt') as f:\n json.dump(server_data, f, indent=2)\n\n # Reload the cache\n self._get_redis_client().delete(self.AUTH_CONFIG_CACHE_KEY)\n self.get_auth_configuration()", "title": "" }, { "docid": "b64535720d15fe8abb88c384c96caa79", "score": "0.5974665", "text": "def copy_cache_to_data(self, cache):\n self.data.setdefault(cache[\"key\"], {}).update(cache[\"values\"])", "title": "" }, { "docid": "63c77dc3dc116cede1907c06c1c5ed32", "score": "0.5964161", "text": "def __init__(self, config):\r\n self.cache = CacheManager(**parse_cache_config_options(config))", "title": "" }, { "docid": "16e8cbf9f12275c2d4eb6e8140766b5f", "score": "0.59638906", "text": "def Set(self, cache_key, value):", "title": "" }, { "docid": "5692b548ddeedb747d15846435196c22", "score": "0.594958", "text": "def reload_conf():\n global conf_cache\n conf_cache = None\n return get_config()", "title": "" }, { "docid": "f97b28842fe19ec0f36a6da0f853b67f", "score": "0.59491694", "text": "def update(self):\n\t\ttry:\n\t\t\tfor key, val in self.data.items():\n\t\t\t\tif key == \"port\":\n\t\t\t\t\tself.SERVER_PORT = val\n\t\t\t\telif key == \"key\":\n\t\t\t\t\tself.KEY = val\n\t\t\t\telif key == \"max_conn\":\n\t\t\t\t\tself.MAX_CLIENTS = int(val)\n\t\t\t\telif key == \"close_conn\":\n\t\t\t\t\tself.DISCONNECT_CLIENT_TIME = int(val)\n\t\t\t\telif key == \"close_port\":\n\t\t\t\t\tself.DISCONNECT_SESSION_TIME = int(val)\n\t\t\t\telif key == \"pcap\":\n\t\t\t\t\tself.PCAP_TRAFFIC = to_bool(val)\n\t\t\t\telif key == \"unsecure\":\n\t\t\t\t\tself.ALLOW_UNSECURE_CONNECTION = to_bool(val)\n\t\t\t\telif key == \"max_log\":\n\t\t\t\t\tself.MAX_LOG = int(val)\n\t\t\t\telif key == \"username\":\n\t\t\t\t\tself.USERNAME = val\n\t\t\t\telif key == \"password\":\n\t\t\t\t\tself.PASSWORD = val\n\t\texcept:\n\t\t\tdebug.debug(\"[Error] Bad config file format.\", level=debug.ERROR)", "title": "" }, { "docid": "667d5c5f69e44535558d7ec9bac41c9b", "score": "0.5942972", "text": "def refresh(self):\n self._data_cache = None", "title": "" }, { "docid": "115857a06b22c5bf55719bda4b51729f", "score": "0.59419745", "text": "def _update(self):\n mod_time = os.stat(self._cnf_filename)[stat.ST_MTIME]\n if os.path.exists(self._cnf_filename) and self.last_updated != mod_time:\n self._conf_handle = ConfigParser()\n self._conf_handle.read(self._cnf_filename)\n self.last_updated = mod_time", "title": "" }, { "docid": "9790f4001b411509e0e8a52e1ca4413a", "score": "0.5940468", "text": "def _write_cache(self):", "title": "" }, { "docid": "1f327aa8589c2790b80bf9beb0844c3c", "score": "0.5934752", "text": "def _rebuild_cache(self):\n self.cache = {}\n self._build_cache_from_corpus()", "title": "" }, { "docid": "1ba708f4e6ea218226a5d4c3db7f94f7", "score": "0.5931864", "text": "def load(self):\n\t\ttry:\n\t\t\tself.update(json.loads(self.config.read())) # B\n\t\texcept py.error.ENOENT:\n\t\t\tpass", "title": "" }, { "docid": "51f70772142cd1c802baca9f6d8b6202", "score": "0.5931201", "text": "def update_config_cache(conf: Dict, changes: List[Change]) -> None:\n for c in changes:\n if isinstance(c, ChangeCreated):\n libyang.xpath_set(conf, c.xpath, c.value, after=c.after)\n elif isinstance(c, ChangeModified):\n libyang.xpath_set(conf, c.xpath, c.value)\n elif isinstance(c, ChangeMoved):\n libyang.xpath_move(conf, c.xpath, c.after)\n elif isinstance(c, ChangeDeleted):\n libyang.xpath_del(conf, c.xpath)", "title": "" }, { "docid": "1535af025f4317767a8b72874d3b99f4", "score": "0.5923526", "text": "def update(self, key):\n if key not in self.cache and len(self.cache) >= self.max_cache_size:\n self.remove_oldest()\n \n self.cache[key] = {'date_stored': datetime.datetime.now()}", "title": "" }, { "docid": "5d45b249bda43c63c92783e450924113", "score": "0.5901761", "text": "def load(self):\n with open(self.CACHE_DIR + self.file_name, 'r') as data_file: \n self.cache = json.load(data_file)", "title": "" }, { "docid": "ffba25bee92c049178635ce3fa697e1e", "score": "0.5889064", "text": "def reload_configs(self):\n\n self.logger.debug('reloading configurations')\n\n alterations = {}\n\n string_keys = {\n 'name': 'name',\n 'admin_email': 'admin_email',\n 'currency': 'currency'\n }\n\n bool_keys = {\n 'login_suport': 'login_suport',\n 'ticket_suport': 'ticket_suport',\n 'default_welcome_msg': 'default_welcome_msg',\n 'close_apps': 'close_apps',\n 'use_logo': 'logo',\n 'use_background': 'background',\n 'inf_login': 'inf_login'\n }\n\n int_keys = {\n 'finish_action': 'finish_action',\n 'finish_action_time': 'finish_action_time',\n }\n\n for name in string_keys.keys():\n value = self.conf_client.get_string(string_keys[name])\n\n if (value != None and ((not(name in self.information)) or (name in self.information) and (self.information[name] != value))):\n self.information[name] = value\n alterations[name] = value\n\n for name in bool_keys.keys():\n value = self.conf_client.get_bool(bool_keys[name])\n\n if self.information[name] != value:\n self.information[name] = value\n alterations[name] = value\n\n for name in int_keys.keys():\n value = self.conf_client.get_int(int_keys[name])\n if (self.information[name] != value) and (value != None):\n self.information[name] = value\n alterations[name] = value\n\n value = self.conf_client.get_float('price_per_hour')\n if self.information['price.hour'] != value:\n self.information['price.hour'] = value\n alterations['price.hour'] = value\n\n if not self.information['default_welcome_msg']:\n\n value = self.conf_client.get_string('welcome_msg')\n\n if self.information.has_key('welcome_msg'):\n if self.information['welcome_msg'] != value:\n self.information['welcome_msg'] = value\n alterations['welcome_msg'] = value\n else:\n self.information['welcome_msg'] = value\n alterations['welcome_msg'] = value\n\n # close apps\n value = self.conf_client.get_string_list(\"close_apps_list\")\n if self.information['close_apps_list'] != value:\n self.information['close_apps_list'] = value\n alterations['close_apps_list'] = value\n\n value = self.conf_client.get_string('background_path')\n if self.common_background != value:\n self.common_background = value\n self.common_background_md5 = md5_cripto(open(self.common_background).read())\n self.instmachine_manager.update_common_backgroud(self.common_background_md5)\n\n value = self.conf_client.get_string('logo_path')\n if self.common_logo != value:\n self.common_logo = value\n self.common_logo_md5 = md5_cripto(open(self.common_logo).read())\n self.instmachine_manager.update_common_logo(self.common_logo_md5)\n\n if alterations:\n self.instmachine_manager.update_information(alterations)", "title": "" }, { "docid": "4b59732357a7165895dc002303d0efde", "score": "0.5880726", "text": "def reset_cache(self):", "title": "" }, { "docid": "3db346d7a816aa64fec11089a1dfd608", "score": "0.5878871", "text": "def cache(conan_api: ConanAPI, parser, *args):\n pass", "title": "" }, { "docid": "95358def39e8da5696324e980f3a0fcf", "score": "0.58723795", "text": "def setCaching(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "95358def39e8da5696324e980f3a0fcf", "score": "0.58723795", "text": "def setCaching(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "95358def39e8da5696324e980f3a0fcf", "score": "0.58723795", "text": "def setCaching(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "95358def39e8da5696324e980f3a0fcf", "score": "0.58723795", "text": "def setCaching(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "95358def39e8da5696324e980f3a0fcf", "score": "0.58723795", "text": "def setCaching(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "95358def39e8da5696324e980f3a0fcf", "score": "0.58723795", "text": "def setCaching(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "95358def39e8da5696324e980f3a0fcf", "score": "0.58723795", "text": "def setCaching(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "95358def39e8da5696324e980f3a0fcf", "score": "0.5869458", "text": "def setCaching(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "95358def39e8da5696324e980f3a0fcf", "score": "0.5869458", "text": "def setCaching(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "95358def39e8da5696324e980f3a0fcf", "score": "0.5869458", "text": "def setCaching(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "95358def39e8da5696324e980f3a0fcf", "score": "0.5869458", "text": "def setCaching(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "95358def39e8da5696324e980f3a0fcf", "score": "0.5869458", "text": "def setCaching(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "42c896ec7df359ff715f801abed825b0", "score": "0.5857698", "text": "def reload_config(self):\n #self._client.reload_config()\n pass", "title": "" }, { "docid": "ab94f30207484a1ac69e00f22d251fbf", "score": "0.5842765", "text": "def update(self, config_dict):\n self._update(config_dict, allow_new_keys=True)", "title": "" }, { "docid": "d78b065a5dbc6323f0c1138178f7d63b", "score": "0.5834923", "text": "def load(self):\n filepath = Path(self.config).resolve()\n if filepath.exists():\n with open(self.config, 'r') as f:\n self.update(json.load(f))", "title": "" }, { "docid": "1d5c6fcd88f2069fa90ed30b732f6162", "score": "0.58169216", "text": "def load_cache(self):\n try:\n with open(self.cache_name, \"r\") as file_cache:\n cache_content = file_cache.read()\n except FileNotFoundError:\n with open(self.cache_name, \"w\") as file_cache:\n cache_content = \"\"\n\n # transform cache to json object, handle cache case\n try:\n self.cache = json.loads(cache_content)\n except ValueError:\n self.cache = {\n \"last_mod\": \"1900-01-01 00:00:00.0\",\n \"current_weather\": {},\n \"forecast_weather\": {},\n }\n\n # validate last modification < 1 hour\n delta = datetime.now() - parse(self.cache[\"last_mod\"])\n delta_to_hour = (delta.days * 24) + (delta.seconds / 3600)\n self.use_cache = delta_to_hour < 1", "title": "" }, { "docid": "23b53223472038f0d066ae95a02fd87f", "score": "0.5808317", "text": "def flush_cache(self):\n raise NotImplementedError()", "title": "" }, { "docid": "e4ae86fc9257b79572f2b99782a6801e", "score": "0.5798737", "text": "def cache(self):\n\n pargs = ['cache']\n return self.hc.run_command(self, pargs)", "title": "" }, { "docid": "71ef7e646f53120f1ea5c5e21c50edbf", "score": "0.5788758", "text": "def update_cache(self, query: QueryType, result: Iterator[BaseInterfaceModel]):", "title": "" }, { "docid": "b902233a4e7fab0239edc34cafc3acfe", "score": "0.57723135", "text": "def update_cache(self, cat, img_path):\r\n try:\r\n shutil.copy(img_path, os.path.join(os.path.dirname(__file__), '..', '_cache_'))\r\n except (IOError, OSError) as ex:\r\n print(ex)\r\n print('ERROR in update_cache() ' +\r\n 'Cannot copy to _cache_ directory, make sure there ' +\r\n 'is enough space on disk')\r\n sys.exit(1)\r\n\r\n cache_img_path = os.path.join(os.path.dirname(__file__), '..', '_cache_',\r\n os.path.basename(img_path))\r\n\r\n zope.event.notify(CacheAddEvent(cat, cache_img_path))\r\n self.cache.append((cat, cache_img_path))", "title": "" }, { "docid": "3ebf1b449eeb5f0f8a803211345e7c4d", "score": "0.57661843", "text": "def setCache(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "3ebf1b449eeb5f0f8a803211345e7c4d", "score": "0.57660276", "text": "def setCache(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "c6c98126bce2e0e1780f2d8fc5927fb6", "score": "0.5758702", "text": "def update(self, config, merge=False):\n\n self.config = {**self.config, **config} if merge else config\n self.dataset = dataset_model(self.config)\n return self", "title": "" }, { "docid": "5da7b702af7fe7d33ec50105f9fe89e2", "score": "0.5742303", "text": "def update_config(self, update_dict, reload=False):\n self.config.update(update_dict)\n # set config temp_folder to wksp for brevity\n if \"temp_folder\" in update_dict.keys():\n self.config[\"wksp\"] = update_dict[\"temp_folder\"]\n self.validate_config()\n if reload:\n self.data = util.load_tables(self.config)", "title": "" }, { "docid": "556966817bde09e6fad0a7a6a56f2e92", "score": "0.5722837", "text": "def _build_cache(self):\n print('Building cache...')\n assert self.cache == {}\n if self.data:\n print('loading data from json')\n self.load_saved_cache()\n else:\n print('from states')\n self._build_cache_from_corpus()\n self._save_cache()", "title": "" }, { "docid": "9ac2e1ef6186cf332e2fc92f3cbcc8a5", "score": "0.571785", "text": "def update_configs():\n global blacklist_dict\n global whitelist_dict\n blacklist_dict = update_config_dict('blacklist.conf', blacklist_dict)\n whitelist_dict = update_config_dict('whitelist.conf', whitelist_dict)", "title": "" }, { "docid": "6791a9925d2545cf9e16e5b43e235627", "score": "0.57126206", "text": "def use_cache(self,**kwargs):\n\t\tself.set_meta_file_name( kwargs.get(\"meta_file_name\",\"\"))\n\t\toverwrite_meta_by_centos = kwargs.get(\"overwrite_meta_by_centos\",False)\n\t\tself.store_allKeys = kwargs.get(\"store_allKeys\",False)\n\t\tself.ignore_invalid_mem = kwargs.get(\"ignore_invalid_mem\",False)\n\t\toverwrite_memory = kwargs.get(\"overwrite_memory\",False)\n\t\tset_to_be_valid = kwargs.get(\"set_to_be_valid\",False)\n\t\tignore_lock = kwargs.get(\"ignore_lock\",False)\n\t\tcheck_pref_is_short = kwargs.get(\"check_pref_is_short\",True)\n\t\tif not self.overwrite_prefix:\n\t\t\tself.overwrite_prefix = kwargs.get(\"overwrite_prefix\",False)\n\t\tloading_msg = kwargs.get(\"loading_msg\",\"\")\n\t\tparams = kwargs.get(\"params\",{}) # if you want to change params\n\t\tif \"params\" in kwargs:\n\t\t\tprint(\"[CM] params update these:\",params)\n\n\t\tif not os.path.exists(self.meta_file_abs_path):\n\t\t\tif self.my_platform!=\"centos\":\n\t\t\t\tr = requests.get(url=FILE_SERVER_GET+\"cache_meta\", params={\"meta_file_name\":self.meta_file_name})\n\t\t\t\tmeta= r.json() \n\t\t\t\t# TODO not using json, changed syncdir/file-server/app.py: get_file()\n\t\t\t\tif \"not-found\" in meta['status']:\n\t\t\t\t\traise Exception(\"Not exists? \"+self.meta_file_name)\n\t\t\t\tmeta.pop('status')\n\t\t\t\ttmpdir = os.sep.join(self.meta_file_abs_path.split(os.sep)[0:-1])\n\t\t\t\tif not os.path.exists(tmpdir): \n\t\t\t\t\tos.makedirs(tmpdir)\n\t\t\t\twith open(self.meta_file_abs_path, 'wb') as f: # TODO not using json\n\t\t\t\t\tjson.dump(meta, codecs.getwriter('utf-8')(f), ensure_ascii=False)\n\t\t\t\tif iprint: print(\"Copied from centos: \"+self.meta_file_abs_path)\n\t\t\telse:\n\t\t\t\traise Exception(\"Not exists? \"+self.meta_file_name)\n\t\telse: # already exists locally:\n\t\t\tif self.my_platform!=\"centos\" and overwrite_meta_by_centos:\n\t\t\t\tr = requests.get(url=FILE_SERVER_GET+\"cache_meta\", params={\"meta_file_name\":self.meta_file_name})\n\t\t\t\tmeta= r.json()\n\t\t\t\tif not \"not-found\" in meta['status']:\n\t\t\t\t\tmeta.pop('status')\n\t\t\t\t\twith open(self.meta_file_abs_path, 'wb') as f:# TODO not using json\n\t\t\t\t\t\tjson.dump(meta, codecs.getwriter('utf-8')(f), ensure_ascii=False)\n\t\t\t\t\tif iprint: print(\"Copied from centos: \"+self.meta_file_abs_path)\n\t\ttry: \n\t\t\tmeta = pickle.load(open(self.meta_file_abs_path, 'rb'))\n\t\texcept: # TODO not using json\n\t\t\tmeta = json.load(open(self.meta_file_abs_path, 'rb'))\n\t\tmeta[\"params\"]=pickle.loads(str(meta[\"params\"]))\n\t\tmeta[\"params\"].update(params)\n\n\t\tif self.overwrite_prefix:\n\t\t\tmeta[\"params\"]['overwrite_prefix'] = True\n\t\t\n\t\tmeta[\"params\"][\"overwrite_memory\"]=overwrite_memory\n\t\tmeta[\"params\"][\"ignore_lock\"]=ignore_lock\n\t\tif self.rt_servers: \n\t\t\tmeta[\"params\"][\"rt_servers\"]=True\n\t\tif self.overwrite_redis_servers:\n\t\t\tmeta[\"params\"][\"overwrite_servers\"]=True\n\n\t\tif set_to_be_valid:\n\t\t\tprint(\"force set to be valid !\")\n\t\t\tself.set_mem_valid( meta[\"params\"] )\n\t\t\treturn \n\t\tret = self.activate_mem( meta[\"params\"] )\n\t\tif (ret==\"invalid\" and not self.ignore_invalid_mem) or overwrite_memory:\n\t\t\tyield_func = dill.loads(meta[\"yield_func\"]) if \"yield_func\" in meta else None\n\t\t\tyield_args = meta[\"yield_args\"] if \"yield_args\" in meta else None\n\t\t\tif yield_func is None or yield_args is None:\n\t\t\t\tif iprint: print(\"Redo load_cache_file_into_mem() ...\")\n\t\t\t\tself.load_cache_file_into_mem( meta[\"cache_file_abs_path\"], meta[\"params\"],self.store_allKeys, msg = loading_msg)\n\t\t\telse:\n\t\t\t\tif iprint: print(\"RUN yield_func() \"+yield_args)\n\t\t\t\tkv_action_type = meta[\"kv_action_type\"] if \"kv_action_type\" in meta else None\n\t\t\t\tself.yield_file_into_mem( yield_func, yield_args, meta[\"params\"], kv_action_type=kv_action_type, msg = loading_msg )\n\t\t\tprint( self.activate_mem( meta[\"params\"] ) )\n\t\tself.meta = meta\n\t\tif check_pref_is_short:\n\t\t\tif len(self.mm.prefix)>4: # support more prefixes if > more.\n\t\t\t\tprint(self.mm.prefix)\n\t\t\t\tprint(\" :prefix too long, not overwrite_prefix?\")\n\t\t\t\tsys.exit(0) # You Edit", "title": "" }, { "docid": "b49d7855cbe5a82a4960a4fdbccc18ee", "score": "0.5708811", "text": "def _flush_cache(self):\n self.__last_update = datetime.datetime(1970, 1, 1)", "title": "" }, { "docid": "83092e50f0f37cae51f6928a20796ea1", "score": "0.570268", "text": "def update(self, settings):\n pass", "title": "" }, { "docid": "31582ef7cf51573e1640da901ba3d5a6", "score": "0.5700119", "text": "def UpdateCache(self, prefix, results, partial_results):\n self.prefix = prefix\n self.results = results\n self.partial_results = partial_results\n self.timestamp = time.time()", "title": "" }, { "docid": "d76f803b96f5aec84e1c9f1f4b3b11a2", "score": "0.56960064", "text": "def getCaching(self, **kwargs):\n \n pass", "title": "" }, { "docid": "d76f803b96f5aec84e1c9f1f4b3b11a2", "score": "0.56960064", "text": "def getCaching(self, **kwargs):\n \n pass", "title": "" }, { "docid": "d76f803b96f5aec84e1c9f1f4b3b11a2", "score": "0.56960064", "text": "def getCaching(self, **kwargs):\n \n pass", "title": "" }, { "docid": "d76f803b96f5aec84e1c9f1f4b3b11a2", "score": "0.56960064", "text": "def getCaching(self, **kwargs):\n \n pass", "title": "" }, { "docid": "d76f803b96f5aec84e1c9f1f4b3b11a2", "score": "0.56960064", "text": "def getCaching(self, **kwargs):\n \n pass", "title": "" }, { "docid": "d76f803b96f5aec84e1c9f1f4b3b11a2", "score": "0.56960064", "text": "def getCaching(self, **kwargs):\n \n pass", "title": "" }, { "docid": "d76f803b96f5aec84e1c9f1f4b3b11a2", "score": "0.56950074", "text": "def getCaching(self, **kwargs):\n \n pass", "title": "" }, { "docid": "d76f803b96f5aec84e1c9f1f4b3b11a2", "score": "0.56950074", "text": "def getCaching(self, **kwargs):\n \n pass", "title": "" }, { "docid": "d76f803b96f5aec84e1c9f1f4b3b11a2", "score": "0.56950074", "text": "def getCaching(self, **kwargs):\n \n pass", "title": "" }, { "docid": "d76f803b96f5aec84e1c9f1f4b3b11a2", "score": "0.56950074", "text": "def getCaching(self, **kwargs):\n \n pass", "title": "" }, { "docid": "d76f803b96f5aec84e1c9f1f4b3b11a2", "score": "0.56950074", "text": "def getCaching(self, **kwargs):\n \n pass", "title": "" }, { "docid": "d76f803b96f5aec84e1c9f1f4b3b11a2", "score": "0.56950074", "text": "def getCaching(self, **kwargs):\n \n pass", "title": "" }, { "docid": "4f9adade55c313a79b25bd2c29bb7ae8", "score": "0.56866056", "text": "def load(self):\n\t\ttry:\n\t\t\twith open(self.config, 'r') as f:\n\t\t\t\tself.update(json.loads(f.read()))\n\t\texcept errno.ENOENT:\n\t\t\tprint \"ENOENT error\"\n\t\t\tpass\n\t\texcept Exception as e:\n\t\t\tprint e", "title": "" }, { "docid": "e68610b32df21910832c10c253c55f77", "score": "0.5683001", "text": "def update_cachegroups(self, cache_group_id=None, data=None):", "title": "" } ]
95d80d6dfef902c68f1348978cb4d026
Read stock data (adjusted close) for given symbols from CSV files.
[ { "docid": "85cde913e57d414b2527d791f792b1de", "score": "0.6046349", "text": "def get_data(symbols, dates):\r\n df = pd.DataFrame(index=dates)\r\n for symbol in symbols:\r\n df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',\r\n parse_dates=True, usecols=['Date', 'Adj Close'], na_values=['nan'])\r\n df_temp = df_temp.rename(columns={'Adj Close': symbol})\r\n df = df.join(df_temp)\r\n df = df.dropna(subset=[\"IBM\"])\r\n return df", "title": "" } ]
[ { "docid": "a8d8645fcc925ecee2412ed9a3db1768", "score": "0.7471986", "text": "def _open_csv(self):\n combined_index = None\n for symbol in self.symbol_list:\n path = self.csv_path + '/{}.csv'.format(symbol)\n self.symbol_data[symbol] = pd.read_csv(\n path, header=0, index_col=0, parse_dates=True,\n names = [\n 'datetime', 'high', 'low', 'open', 'close', 'volume', 'adj_close'\n ]\n )\n\n if combined_index is None:\n combined_index = self.symbol_data[symbol].index\n else:\n combined_index.union(self.symbol_data[symbol].index)\n\n self.latest_symbol_data[symbol] = []\n\n # todo time index combine\n for symbol in self.symbol_list:\n self.symbol_data[symbol] = self.symbol_data[symbol].reindex(\n index=combined_index, method='pad').iterrows()", "title": "" }, { "docid": "cebdfe3e967af44cda092bcba193893a", "score": "0.7000566", "text": "def load(self, path):\n try:\n with open(join(path, self.name + \"_data.csv\"), 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n csvHeader = next(reader)\n for row in reader:\n try:\n currOpen = float(row[1])\n currClose = float(row[4])\n day = datetime.strptime(row[0], \"%Y-%m-%d\").strftime(\"%d.%m.%Y\")\n self.data[day] = (int(np.sign(currClose-currOpen)), currOpen)\n except ValueError:\n logging.warn(\"Could not interpret day \" + row[0])\n except Exception as ex:\n logging.error(\"Could not get the stock data. \" + ex)", "title": "" }, { "docid": "aa81dd27973ddfc80be8cfc573767fd6", "score": "0.69686425", "text": "def _data_conversion_from_csv_files(self):\r\n\r\n combined_index = None\r\n for symbol in self.symbol_list:\r\n # Load the CSV file with no header information, indexed on date\r\n self.symbol_data[symbol] = pd.io.parsers.read_csv(\r\n os.path.join(self.csv_dir, \"%s.csv\" % symbol),\r\n header=0, index_col=0,\r\n names=[\"datetime\", \"open\", \"high\", \"low\", \"close\", \"adj_close\", \"volume\"]\r\n )\r\n\r\n # Combine the index to pad forward values\r\n if combined_index is None:\r\n combined_index = self.symbol_data[symbol].index\r\n else:\r\n combined_index.union(self.symbol_data[symbol].index)\r\n\r\n # Set the latest symbol_data to None\r\n self.latest_symbol_data[symbol] = []\r\n\r\n # Reindex the dataframes\r\n for symbol in self.symbol_list:\r\n self.symbol_data[symbol] = self.symbol_data[symbol].reindex(index=combined_index, method=\"pad\").iterrows()", "title": "" }, { "docid": "99c87d792d6b9ffce8d7fe22020e8902", "score": "0.6741852", "text": "def read_csv(filename):\r\n # frame = pd.read_csv('C:/Users/tzagk/Downloads/Stocks/' + filename, dtype=size_dict, usecols=use_cols, header=0,\r\n # index_col=\"Date\", parse_dates=True)\r\n frame = pd.read_csv('C:/Users/tzagk/Downloads/Stocks/' + filename, dtype=size_dict, header=0,\r\n index_col=\"Date\", parse_dates=True)\r\n all_days = pd.date_range(frame.index.min(), frame.index.max(), freq='D')\r\n frame2 = frame.reindex(index = all_days, method = 'nearest')['Close'] \r\n return [filename, frame, frame.index[0]], [filename,frame2]", "title": "" }, { "docid": "021e2d5e737135db4df84798ac8f8d14", "score": "0.66182214", "text": "def read_csv_file(self):\n try:\n with open('Lesson6_Data_Stocks.csv', 'r') as stock_file:\n reader = csv.reader(stock_file)\n next(reader)\n for row in reader:\n self.stocks[row[0]] = {'NO_SHARES': row[1]}\n except FileNotFoundError as e:\n print('File not found at the given location, Check the Path!')\n return self.stocks", "title": "" }, { "docid": "e1d1eef665aa474c74f4f10780e078a7", "score": "0.6386013", "text": "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols: # add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n df_temp = pd.read_csv(\"data/{}.csv\".format(symbol), index_col='Date',\n parse_dates=True, usecols=['Date', 'Close'], na_values=['nan'])\n df_temp = df_temp.rename(columns={'Close': symbol})\n df = df.join(df_temp)\n if symbol == 'SPY': # drop dates SPY did not trade\n df = df.dropna(subset=[\"SPY\"])\n\n return df", "title": "" }, { "docid": "c363f4c9a982b697c3635760e9e94e72", "score": "0.6263173", "text": "def get_weather_data(filename, dates, highs, lows, date_index, high_index, low_index):\n\n with open(filename) as f:\n reader = csv.reader(f)\n header_row = next(reader)\n\n\n # Get, dates, highs, lows from this file\n for row in reader:\n current_date = datetime.strptime(row[date_index], '%Y-%m-%d')\n try:\n high = int(row[high_index])\n low = int(row[low_index])\n except ValueError:\n print(f\"Missing data from {current_date}\")\n else:\n dates.append(current_date)\n highs.append(high)\n lows.append(low)", "title": "" }, { "docid": "980c25a602a3c534b071696909b18188", "score": "0.6246628", "text": "def load_from_csv(self, symbol, path=''):\n if path == '':\n path = '../data/raw/{}.csv'.format(symbol)\n else:\n path = os.path.join(path, '{}.csv'.format(symbol))\n return pd.read_csv(path)", "title": "" }, { "docid": "c06f0d9fda7b08cdb07c76cd25c46866", "score": "0.6215058", "text": "def load_base_stocks():\n\n stocks = {}\n\n with open(STOCKS_FILE) as d:\n data = csv.reader(d)\n\n for row in data:\n\n values = dict(zip(KEYS, row))\n _cast_values(values)\n stock = Stock(**values)\n\n stocks[stock.symbol] = stock\n\n return stocks", "title": "" }, { "docid": "de52ce87548ca4e0e1d04367a9162c02", "score": "0.61609524", "text": "def read(csv='./tech.csv', metadata='./metadata.json'):\n\n metadata = pd.read_json(metadata)\n columns = metadata.datatable['columns']\n headings = [heading['name'] for heading in columns]\n stocks = pd.read_csv(\n filepath_or_buffer=csv, header=None, names=headings)\n\n return stocks", "title": "" }, { "docid": "98359bce9097c6cd08a384ff2bc65d81", "score": "0.6145918", "text": "def _load_data_from_Yahoo_finance(self):\r\n\r\n combined_index = None\r\n for symbol in self.symbol_list:\r\n\r\n # download data from yfinance for symbol. This could be improved as yfinance can download several\r\n # symbols at the same time\r\n self.symbol_data[symbol] = yf.download(tickers=[symbol], start=self.start_date,\r\n end=self.end_date, interval=self.interval)\r\n\r\n # rename columns for consistency\r\n self.symbol_data[symbol].rename(columns={'Open': 'open',\r\n 'High': 'high',\r\n 'Low': 'low',\r\n 'Close': 'close',\r\n 'Adj Close': 'adj_close',\r\n 'Volume': 'volume'}, inplace=True)\r\n\r\n # rename index as well from 'Date' to 'datetime'\r\n self.symbol_data[symbol].index.name = 'datetime'\r\n\r\n # create returns column (used for some strategies)\r\n self.symbol_data[symbol]['returns'] = self.symbol_data[symbol][\"adj_close\"].pct_change() * 100.0\r\n\r\n # Combine the index to pad forward values\r\n if combined_index is None:\r\n combined_index = self.symbol_data[symbol].index\r\n else:\r\n combined_index.union(self.symbol_data[symbol].index)\r\n\r\n # Set the latest symbol_data to None\r\n self.latest_symbol_data[symbol] = []\r\n\r\n # Reindex the dataframes\r\n for symbol in self.symbol_list:\r\n self.symbol_data[symbol] = self.symbol_data[symbol].reindex(index=combined_index, method=\"pad\").iterrows()", "title": "" }, { "docid": "b5a4c999b8d4992a563f69c8900a5ca7", "score": "0.61391765", "text": "def get_stock_data(stock_symbol):\n\n api_url = API_URL.replace('stock_symbol', stock_symbol)\n response = requests.get(api_url)\n if response.status_code == 200:\n decoded_content = response.content.decode('utf-8')\n csv_list = list(csv.reader(\n decoded_content.splitlines(), delimiter=','))\n close = csv_list[ROW][CLOSE_COLUMN]\n symbol = csv_list[ROW][SYMBOL_COLUMN]\n try:\n float(close)\n return f'{symbol} quote is ${close} per share'\n except:\n return None\n return None", "title": "" }, { "docid": "ffdec9a7276578a41f5ffdd9163da0fa", "score": "0.61227685", "text": "def run(csv='./tech.csv', metadata='./metadata.json'):\n stocks = read(csv, metadata)\n print(\"STOCKS READ********************************************\")\n stocks = adjust(stocks)\n print(\"DATAFRAME COLUMNS ADDED********************************\")\n stocks = set_index(stocks)\n print(\"DATAFRAME INDEX SET************************************\")\n stocks = past(stocks)\n print(\"DATAFRAME FILLED OUT***********************************\\a\\a\")\n return stocks", "title": "" }, { "docid": "2cce38414e94d76e0b9e9b7f3447d5a6", "score": "0.60740167", "text": "def get_data(symbols, dates):\n df = pd.DataFrame(index = dates)\n\n if 'SPY' not in symbols: # add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n\n dfTemp = pd.read_csv(symbol_to_path(symbol),\n index_col=\"Date\",\n parse_dates=True,\n usecols=['Date', 'Adj Close'],\n na_values=['nan'])\n\n dfTemp = dfTemp.rename(columns={'Adj Close' : symbol})\n df = df.join(dfTemp)\n\n if symbol == 'SPY': # drop dates SPY did not trade\n df = df.dropna(subset=[\"SPY\"])\n\n return df", "title": "" }, { "docid": "be86e0c17e8087ab94ea6f54dc9aa3e2", "score": "0.6033021", "text": "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols: # add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n \n for symbol in symbols:\n df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',\n parse_dates=True, usecols=['Date', 'Adj Close'], na_values=['nan'])\n df_temp = df_temp.rename(columns={'Adj Close': symbol})\n df = df.join(df_temp)\n if symbol == 'SPY': # drop dates SPY did not trade\n df = df.dropna(subset=[\"SPY\"])\n return df", "title": "" }, { "docid": "cd2d7064f5fd92db9bdedd81d293c7ab", "score": "0.6030075", "text": "def read_csv(filepath):\n symbols = []\n with open(filepath, 'rb') as csvfile:\n spamreader = csv.DictReader(csvfile, delimiter=',', quotechar='\"')\n for row in spamreader:\n symbols.append(row)\n return symbols", "title": "" }, { "docid": "d00913eb61f40235634a5e6510f8db8a", "score": "0.6020719", "text": "def LoadDataFromCSV(csvFile):\r\n\tdata = pd.read_csv(csvFile)\r\n\tx = data.iloc[:, 0:315] # select columns 1 through end\r\n\ty = data.iloc[:, 315] # select column 0, the stock price\r\n\r\n\treturn x,y", "title": "" }, { "docid": "92167a65083c44eb19259e448f09bb50", "score": "0.6019517", "text": "def get_yahoo_finance_data(symbol, start_date=None, end_date=None, remove_zero_volume=True):\n data_list = [('s', symbol)]\n if hasattr(start_date, 'split'):\n start_date = str2datetime(start_date)\n\n if hasattr(end_date, 'split'):\n end_date = str2datetime(end_date)\n\n if start_date:\n data_list.append(('a', start_date.month - 1))\n data_list.append(('b', start_date.day))\n data_list.append(('c', start_date.year))\n if end_date:\n data_list.append(('d', end_date.month - 1))\n data_list.append(('e', end_date.day))\n data_list.append(('f', end_date.year))\n data_list.append(('g', 'd'))\n data_list.append(('ignore', '.csv'))\n\n url = \"http://chart.finance.yahoo.com/table.csv\"\n stock_info = get(url=url, data_list=data_list)\n stock_data = StringIO(stock_info)\n stock_df = pd.read_csv(stock_data)\n stock_df['Date'] = stock_df['Date'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d'))\n stock_df = stock_df.set_index('Date').sort_index()\n\n if remove_zero_volume:\n return stock_df[stock_df['Volume'] > 0]\n else:\n return stock_df", "title": "" }, { "docid": "2bc6e86f7f63e952000799e3c8fa1ef5", "score": "0.60183865", "text": "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols: # add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',\n parse_dates=True, usecols=['Date', 'Adj Close'], na_values=['nan'])\n df_temp = df_temp.rename(columns={'Adj Close': symbol})\n df = df.join(df_temp)\n if symbol == 'SPY': # drop dates SPY did not trade\n df = df.dropna(subset=[\"SPY\"])\n\n return df", "title": "" }, { "docid": "af11bb80d467de1a339224ba1cfa7972", "score": "0.5999453", "text": "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols: # Add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n df_temp = pd.read_csv(symbol_to_path(symbol),index_col='Date',\n parse_dates=True, usecols = [\"Date\", \"Adj Close\"], na_values = ['nan'])\n # Rename to prevent clash\n df_temp = df_temp.rename(columns={'Adj Close': symbol})\n df = df.join(df_temp) # use default how = 'left'\n\n if symbol == 'SPY': # drop dates on which SPY did not trade\n df = df.dropna(subset=[\"SPY\"])\n\n return df", "title": "" }, { "docid": "c14378685d8bafdf8db2268e85fdaabc", "score": "0.59816164", "text": "def _parse_ticker_symbols(self):\n for files in os.listdir(path=self.directory):\n with open(self.directory+\"\\\\\"+files) as ticker_list:\n next(ticker_list) #skip header row\n for line in ticker_list:\n line = line[0:len(line)-1] #omit newline charachter\n pair = line.split(sep=\"\\t\")\n if re.search(\"^([A-z]{1,6}$)\",pair[0]) is None: #removes assets which are not common stocks from push to db.\n continue\n self.data[pair[0]]={\"name\":pair[1],\"lastUpdated\":date.today().isoformat(),\"sicCode\":None}", "title": "" }, { "docid": "847db80f98a606660a931b2ebbac74b1", "score": "0.59059405", "text": "def load(csv_file):", "title": "" }, { "docid": "de28705422e5cb800134800f859e40d4", "score": "0.5905777", "text": "def fetch_data_by_symbol():\n symbols = [\"crude oil\", \"xau\"]\n dataframes = []\n for symbol in symbols:\n search_result = investpy.search_quotes(text=symbol, n_results=1)\n dataframe = search_result.retrieve_historical_data(from_date='03/06/2012', to_date='15/07/2021')\n dataframe = dataframe.drop([\"Open\", \"High\", \"Low\", \"Volume\", \"Change Pct\"], axis=1)\n dataframe = dataframe.dropna()\n index_date = pd.date_range(\"2012-06-03\", \"2021-07-15\")\n dataframe = dataframe.reindex(index_date, fill_value=None)\n dataframe = dataframe.fillna(method='ffill')\n dataframe = dataframe.fillna(method='bfill')\n dataframe = dataframe.rename(columns={'Close': f'Close-{symbol}'})\n dataframes.append(dataframe)\n return pd.concat(dataframes, axis=1)", "title": "" }, { "docid": "08c693a90d82d70c782a762e51ec3f25", "score": "0.58816314", "text": "def fetch_stock(ticker):\n fname = '%s.csv'%ticker\n url = 'http://ichart.finance.yahoo.com/table.csv?' +\\\n 's=%s&d=9&e=20&f=2007&g=d&a=0&b=29&c=1993&ignore=.csv'%ticker\n\n # the os.path module contains function for checking whether a file\n # exists\n if not os.path.exists(fname):\n urllib.urlretrieve(url, fname)\n r = mlab.csv2rec(fname)\n\n # note that the CSV file is sorted most recent date first, so you\n # will probably want to sort the record array so most recent date\n # is last\n r.sort()\n return r", "title": "" }, { "docid": "a0d0984c796b330c2b1d385407fe4c45", "score": "0.5836562", "text": "def read_file(ticker):\r\n data = []\r\n file = open(ticker.lower()+'.csv')\r\n lines = file.readlines()\r\n print(lines)\r\n last_date = get_last_date(lines)\r\n curr_date = dt.datetime.now().date()\r\n if last_date < curr_date:\r\n print(\"it forkin ran!!!\")\r\n update_file()\r\n \r\n \"\"\"\r\n for line in file:\r\n line.strip('/n').split(',')\r\n data.append(line)\r\n \"\"\"", "title": "" }, { "docid": "62f6c7e230fbd1ad6e01547d3c86b56e", "score": "0.5776138", "text": "def load_symbols():\n if DATA_SET:\n symbol_manifest = pd.read_csv(SYMBOL_MANIFEST, index_col=0, parse_dates=[7, 8, 9])\n symbol_manifest['MarketCap'] = symbol_manifest['MarketCap'].fillna(0.0)\n symbol_manifest.loc[symbol_manifest['Sector'].isnull(), 'Sector'] = 'n/a'\n symbol_manifest = symbol_manifest.rename(columns={'industry': 'Industry'})\n return symbol_manifest\n else:\n print NO_DATA_SET", "title": "" }, { "docid": "8683a191f1c5ce39233d897e15316187", "score": "0.57702637", "text": "def getPrices(start_date,end_date,freq):\n\n\tquery = \"SELECT symbol FROM companyStaging\"\n\tsymbols =getQuery(query, '')\n\ti=0\n\n\tfor symbol in symbols:\n\t\ti= i + 1\n\t\ttotalNum = len(symbols)\n\t\tticker= ' '.join(symbol)\n\t\tlocalFilePath= globals['dataFilesPath'] + ticker + \".AX.csv\"\n\t\tyfURL = constructYFURL(ticker,start_date,end_date,freq)\n\t\tprint (str(i) + \" of \" + str(totalNum) + \" - \" + yfURL)\n\t\ttry:\n\t\t\tdownload(localFilePath,yfURL)\n\t\t\t#print(yfURL)\n\t\t# Add the column in csv file with header\n\t\t\tadd_column_in_csv(localFilePath, globals['dataFilesPath']+\"/1\" + ticker + \".csv\", lambda row, line_num: row.append('Symbol') if line_num == 1 else row.append(ticker))\n\t\t\tos.remove(localFilePath)\n\t\texcept:\n\t\t\tprint('download failed')", "title": "" }, { "docid": "b7775e36f6f7695a287ab41c2b5ab655", "score": "0.5693667", "text": "def get_stock_data(ticker):\n path = os.path.join('data', 'stocks', ticker + '.csv')\n df_stock = pd.read_csv(path)\n df_stock = df_stock.assign(DifferenceRelative = df_stock['Close'] / df_stock['Open'] - 1) \n df_stock = df_stock.assign(Difference = df_stock['Close'] - df_stock['Open']) \n df_stock['Target'] = np.where(df_stock['Difference'] > 0, 1, 0)\n df_stock['Date'] = pd.to_datetime(df_stock['Date']).dt.date\n \n return df_stock", "title": "" }, { "docid": "747a03e31e11107412370fc70e02384a", "score": "0.5640498", "text": "def read_data(start, end):\n df = fix_yahoo_finance.download(\"GLD\", start=\"2008-01-01\", end=\"2017-12-31\")\n df = df[[\"Close\"]].dropna()\n return df", "title": "" }, { "docid": "eb178755f8b29a3b398ee86c879bc665", "score": "0.56381935", "text": "def __init__(self, events, csv_dir, symbol_list):\r\n\r\n self.events = events\r\n self.csv_dir = csv_dir\r\n self.symbol_list = symbol_list\r\n\r\n self.symbol_data = {}\r\n self.latest_symbol_data = {}\r\n self.continue_backtest = True\r\n self._data_conversion_from_csv_files()", "title": "" }, { "docid": "372fbac03fa2ba2f91335cd6ddc18607", "score": "0.563276", "text": "def check_file():\n try:\n with open(\"stock.csv\", \"r\") as file:\n print(\"Items loaded from stock.csv\\n\")\n reader = csv.reader(file)\n for row in reader:\n ST.item_list.append(SIA.StockItem(row[0], row[1], row[2]))\n except IOError:\n print(\"Stock file not found! A new file will be created at end of session...\\n\")", "title": "" }, { "docid": "30ba3337ca3c67efba4bfd0f9c1c1da7", "score": "0.5616497", "text": "def load_dataset(tweets_paths, prices_path, add_intercept=False):\n prices = pd.read_csv(prices_path, encoding = \"ISO-8859-1\", parse_dates = ['date'])\n final = pd.DataFrame()\n\n for tweets_path in tweets_paths:\n symbol = tweets_path[:tweets_path.find('/')].upper()\n tweets_path = \"initial_data/\" + tweets_path\n\n # Load Tweets\n tweets = pd.read_excel(tweets_path, sheet_name='Stream', encoding = \"ISO-8859-1\", parse_dates = ['Date'])\n tweets = tweets.drop(columns=['Hour', 'Tweet Id', 'User Name', 'Nickname', 'Bio'])\n aggregation_functions = {'Tweet content': 'sum', 'Date': 'first'}\n tweets = tweets.groupby(tweets['Date']).aggregate(aggregation_functions)\n tweets['date'] = tweets['Date']\n tweets = tweets.drop(columns=['Date'])\n\n # Load Prices\n specific_prices = prices.loc[prices['symbol'] == symbol]\n specific_prices = specific_prices.drop(columns=['open', 'low', 'high', 'volume'])\n next_day_prices = specific_prices.copy()\n next_day_prices = next_day_prices.drop(columns=['symbol'])\n next_day_prices['date'] -= pd.DateOffset(days=1)\n\n new_prices = specific_prices.merge(next_day_prices, how = 'inner', on = ['date'])\n new_prices['increase'] = np.where((new_prices['close_y'] - new_prices['close_x']) > 0, 1, 0) # For a certain date, increase represents whether or not the stock price increased between the current date's close and the next day's close\n specific_prices = new_prices.drop(columns=['close_y', 'close_x'])\n\n # Merge tweets and prices\n merged = tweets.merge(specific_prices, how = 'inner', on = ['date'])\n\n merged.to_csv(\"final_data/\" + symbol + \".csv\")\n\n final = final.append(merged)\n\n\n final.to_csv(\"final_data/compiled_data.csv\")\n\n return final", "title": "" }, { "docid": "1bcaa14884acd27f2f27789cfbf0bb36", "score": "0.5600141", "text": "def import_stock_data(nr_days_of_history, nr_days_to_trade, start_date_str):\n \n # pick out the desired start and end dates\n dates = calculateDates(nr_days_of_history = nr_days_of_history, nr_days_to_trade = nr_days_to_trade, start_date_str = start_date_str)\n\n # list the files to be read\n #path = \"./data/full_history/*.csv\"\n path = \"../stock_data/data/full_history/*.csv\"\n all_files = np.array(glob.glob(path))\n\n # read in the data frames from the files\n li = read_in_as_list_of_dfs(all_files, dates['start_date'], dates['end_date'])\n\n # combine into a single data frame\n df = pd.concat(li, join='outer', axis=1)\n\n # clean the combined data frame\n df = clean_combined_df(df)\n\n return(df)", "title": "" }, { "docid": "b42c41785a34c541e3863435cc4810d4", "score": "0.55824095", "text": "def _read_current_prices(self):\n for theItem in self.items:\n f = open(\"%s.csv\" % theItem.name)\n if theItem.has_previous_history():\n f.readline() #skip header\n line = f.readline()\n price = self._get_price_from_line(line)\n volume = self._get_volume_from_line(line)\n theItem.price_history = [price] + theItem.price_history\n theItem.operations_volume_history = [volume] + theItem.operations_volume_history\n else:\n lines = f.readlines()\n lines = lines[1:]\n prices = map( self._get_price_from_line, lines)\n volumes = map(self._get_volume_from_line, lines)\n theItem.price_history = prices\n theItem.operations_volume_history = volumes\n f.close()\n theItem.current_price = theItem.price_history[0]\n\n self.items_sorted_by_price = sorted(self.items, key = lambda x: x.current_price)", "title": "" }, { "docid": "4c92a314f1a48db8fe73232200cda455", "score": "0.5581322", "text": "def import_stock(symbol, metric):\n\n df = pd.read_csv(symbol_to_path(symbol),\n index_col='Date', parse_dates=True,\n usecols=['Date', metric], na_values=['nan'])\n # Rename 'Metric' column to ticker symbol to prevent clash\n df_renamed = df.rename(columns={metric:symbol})\n\n return df_renamed", "title": "" }, { "docid": "4c92a314f1a48db8fe73232200cda455", "score": "0.5581322", "text": "def import_stock(symbol, metric):\n\n df = pd.read_csv(symbol_to_path(symbol),\n index_col='Date', parse_dates=True,\n usecols=['Date', metric], na_values=['nan'])\n # Rename 'Metric' column to ticker symbol to prevent clash\n df_renamed = df.rename(columns={metric:symbol})\n\n return df_renamed", "title": "" }, { "docid": "865c3c6977aa6701e3f609f67aa6c836", "score": "0.55802244", "text": "def parse_candles(self, candles, symbol) -> TickerCoin:\n import pandas as pd\n columns = ['time_open', 'open', 'high', 'low', 'close', 'volume', 'time_close', 'volume_quote_asset', 'n_trades', 'buy_base_asset_vol', 'buy_quote_asset_vol', 'ignore']\n df = pd.DataFrame(candles, columns=columns)\n\n\n # Convert dtypes\n df['time_open'] = pd.to_datetime(df['time_open'], unit='ms')\n df['time_close'] = pd.to_datetime(df['time_close'], unit='ms')\n cols_float = ['open', 'high', 'low', 'close', 'volume', 'volume_quote_asset', 'buy_base_asset_vol', 'buy_quote_asset_vol']\n df[cols_float] = df[cols_float].astype(float)\n\n # Chose what to save/drop\n drop_cols = ['time_close', 'volume_quote_asset', 'buy_base_asset_vol', 'buy_quote_asset_vol', 'ignore']\n rename_cols = {'time_open': 'time'}\n df = df.drop(columns=drop_cols)\n df = df.rename(columns=rename_cols)\n\n # df['symbol'] = symbol\n df.insert(loc=0, column='symbol', value=symbol)\n\n dfticker = TickerCoin(df)\n\n return dfticker", "title": "" }, { "docid": "8793f0a11d40ef7982810b3d734a5bb3", "score": "0.5579975", "text": "def reading_data(path):\n out_1 = []\n out_2 = []\n try:\n with open(path, 'r') as csv_fd:\n file = csv.DictReader(csv_fd)\n for row in file:\n out_1.append(float(row['km']))\n out_2.append(float(row['price']))\n except:\n print('Fatal error when trying reading csv!')\n exit(1)\n return out_1, out_2", "title": "" }, { "docid": "ecd6466378510c53adf3f16017289e99", "score": "0.55788565", "text": "def test_LoadCSV(self):\n EXPECTED_STOCKS = ['1AG', 'BNR', 'XNJ']\n loaded_stocks = self.all_stocks._all_stocks\n\n # Ensure only 3 stocks are loaded\n self.assertEqual(len(loaded_stocks.keys()), 3, \n \"StockCollection does not have 3 stocks in it.\")\n\n for stock_code in loaded_stocks.keys():\n self.assertIn(stock_code, EXPECTED_STOCKS, \n \"Loaded stock codes not correct. Got: \" + stock_code)\n\n # For each stock check it has 3 TradingData\n stock_obj = self.all_stocks.get_stock(stock_code)\n num_td = len(stock_obj._trading_data.keys())\n exp_num = 3\n self.assertEqual(num_td, exp_num, \n \"Stock: {} has {} TadingData, should have {}.\".format(\n stock_code, num_td, exp_num))", "title": "" }, { "docid": "4052855c340696633eb3eaf588c655c8", "score": "0.5577596", "text": "def _parse_data_from_file(self, file_name):\n file_path = os.path.join(self.path, file_name)\n data_file = xlrd.open_workbook(file_path)\n worksheet = data_file.sheet_by_index(0)\n fin_data = []\n for row in xrange(self.offset, worksheet.nrows):\n row_data = worksheet.row(row)\n fin_data.append({\n 'stock_date': xlrd.xldate.xldate_as_datetime(row_data[0].value, data_file.datemode).utcnow(),\n 'stock_open_price': row_data[1].value,\n 'stock_high_price': row_data[2].value,\n 'stock_low_price': row_data[3].value,\n 'stock_close_price': row_data[4].value,\n 'stock_volume': row_data[5].value,\n 'stock_adj_close': row_data[6].value,\n 'stock_id': int(row_data[7].value)\n })\n return fin_data", "title": "" }, { "docid": "121303a20b6ddf0058ec98198e024f22", "score": "0.55765146", "text": "def load_csv_file(self):\n f_stream = open(self.m_name, \"r\")\n f_text = f_stream.read()\n f_stream.close()\n rows = f_text.replace(\"\\r\",\"\").split(\"\\n\")\n\n\n\n for idx in range(self.m_numcols):\n self.m_datacols.append([])\n\n\n\n if len(rows) == self.m_headlen:\n return\n\n\n for item in rows[self.m_headlen:]:\n if item == \"\":\n continue\n cells = item.split(\",\")\n for col in range(len(cells)):\n if col == 0:\n self.m_datacols[col].\\\n append(csvd.string_to_datetime(cells[col]))\n else:\n try:\n self.m_datacols[col].append(float(cells[col]))\n except ( ValueError ):\n self.m_datacols[col].append(str(cells[col]))\n #store last date in the file\n try:\n self.m_last_init_date = self.m_datacols[0][-1]\n except ( IndexError ):\n pass", "title": "" }, { "docid": "9762b772769bada8c58b223595c4f88f", "score": "0.5559485", "text": "def csvExtract(filename, country):\n \n rawdata = []\n filepath = 'data/stock/manual/' + filename\n file2 = open(filepath, 'rU')\n reader2 = csv.reader(file2)\n for row in reader2:\n rawdata.append(row)\n\n\tcurrentYear = rawdata[1][0][-2:]\n\tprices = []\n\tdata = []\n\tfor i in range(1, len(rawdata)):\n\t\tif rawdata[i][0][-2:] == currentYear:\n\t\t\tprices.append(float(rawdata[i][4]))\n\t\telse:\n\t\t\tdata.append([currentYear, float(sum(prices))/len(prices)])\n\t\t\tprices = []\n\t\t\tcurrentYear = rawdata[i][0][-2:]\n\t\t\tprices.append(float(rawdata[i][4]))\n\n\tcount = 0\n\tfor row in data:\n\t\tif count <= 12:\n\t\t\trow[0] = '20' + row[0]\n\t\t\tcount += 1\n\t\telif count >= 12:\n\t\t\trow[0] = '19' + row[0]\n\tdata = sorted(data)\n\n\twith open('data/stock/done/' + country + '.csv', 'w') as file1:\n\t\tfile1write = csv.writer(file1, delimiter = ',')\n\t\tfile1write.writerows(data)\n\n\tfile1.close()", "title": "" }, { "docid": "51da6d67964ba889e02ff09336d999ee", "score": "0.55501455", "text": "def pd_parse_data(self, date_obj):\n url = self.get_url(date_obj)\n stocks_df = pd.read_csv(url, sep=r'\\s*,\\s*', engine='python')\n stocks_df = stocks_df[stocks_df['SERIES'] == 'EQ']\n stocks_df = stocks_df[['SYMBOL', p1, p2]]\n for name in self.stocks_dict.keys(): # get data for the stocks\n df = stocks_df[stocks_df['SYMBOL'] == name]\n if not df.empty:\n v1 = df[p1].values[0]\n v2 = df[p2].values[0]\n self.stocks_dict[name].append(v1 * v2)\n # pprint(self.stocks_dict)", "title": "" }, { "docid": "3e713b53007d8c02fc0e8172c816c119", "score": "0.5546367", "text": "def read_values(csv_filename):\n start = time.process_time()\n data = pd.read_csv(csv_filename, parse_dates=True,\n date_parser=date_parser,\n index_col=[0])\n log.debug('csv import took %.2f seconds', time.process_time() - start)\n log.debug(dataframe_info(data))\n\n cols_to_keep = ['Open', 'Volume_(BTC)']\n log.info('keeping only the following column(s): ' + ','.join(cols_to_keep))\n for col_name in data.columns:\n if col_name not in cols_to_keep:\n data.drop(col_name, axis=1, inplace=True)\n log.debug(dataframe_info(data))\n\n log.info('removing NaN rows')\n data.dropna(inplace=True)\n log.debug(dataframe_info(data))\n\n return data", "title": "" }, { "docid": "a5c49f0fd97851d9467bcbab03a5f867", "score": "0.55251783", "text": "def test_LoadCSV(self):\n EXPECTED_STOCKS = ['1AD', 'BNR', 'MIL']\n loaded_stocks = self.all_stocks._all_stocks\n\n # Ensure only 3 stocks are loaded\n self.assertEqual(len(loaded_stocks.keys()), 3, \n \"StockCollection does not have 3 stocks in it.\")\n\n for stock_code in loaded_stocks.keys():\n self.assertIn(stock_code, EXPECTED_STOCKS, \n \"Loaded stock codes not correct. Got: \" + stock_code)\n\n # For each stock check it has 5 TradingData\n stock_obj = self.all_stocks.get_stock(stock_code)\n num_td = len(stock_obj._trading_data.keys())\n exp_num = 5\n self.assertEqual(num_td, exp_num, \n \"Stock: {} has {} TadingData, should have {}.\".format(\n stock_code, num_td, exp_num))", "title": "" }, { "docid": "5f80b4872901e2430271853b03c93052", "score": "0.55162156", "text": "def load_csv(self, name):\n loc = os.path.join(self.file_loc, name)\n with open(loc, 'r') as f:\n lines = []\n reader = csv.reader(f)\n for line in reader:\n # l = list(map(lambda x: float(x), line))\n lines.append(line)\n return lines", "title": "" }, { "docid": "df4cf0e9e02d2424633ccc354f0d959e", "score": "0.5502979", "text": "def load_weather(self):\r\n self.w_df = pd.read_csv(\"682880.csv\",\r\n low_memory=False, \r\n usecols=[2,3,5,6,7])", "title": "" }, { "docid": "6f206c845a75de69b9715e353c40d3cd", "score": "0.5492445", "text": "def addBarsFromCSV(self, instrument, path, timezone=None):\r\n\r\n if timezone is None:\r\n timezone = self.__timezone\r\n\r\n rowParser = GenericRowParser(self.__columnNames, self.__dateTimeFormat, self.getDailyBarTime(), self.getFrequency(), timezone)\r\n BarFeed.addBarsFromCSV(self, instrument, path, rowParser)\r\n\r\n if rowParser.barsHaveAdjClose():\r\n self.__haveAdjClose = True\r\n elif self.__haveAdjClose:\r\n raise Exception(\"Previous bars had adjusted close and these ones don't have.\")", "title": "" }, { "docid": "9413c379ad40e55faa23dcef73ed5ab9", "score": "0.5477521", "text": "def readcsv(filename, **kargs):\r\n _kargs = {\r\n 'headers': True,\r\n 'transposed' : False,\r\n 'delimiter' : ';',\r\n 'quotechar' : '`'}\r\n _kargs.update(kargs)\r\n #TODO finish read from a csv file\r", "title": "" }, { "docid": "57062c382ce2f8e1f20154cd6743941e", "score": "0.54696983", "text": "def get_max_close(symbol):\n df = pd.read_csv(\"data/{}.csv\".format(symbol)) #Read in data\n return df['Close'].max() #Compute and return max", "title": "" }, { "docid": "f0dbff7da68ffa11671ba88c4f00215a", "score": "0.54663837", "text": "def read_data(symbol):\n \n with open(alphavantage_data_dir + symbol + '.json', 'r') as infile:\n data = json.load(infile)\n \n return data", "title": "" }, { "docid": "dbfbbf201e38029f3659dd40bb50ebfd", "score": "0.546602", "text": "def parse_csv(filename):\n\n rows = []\n with codecs.open(filename, \"r\", \"utf-8\") as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\")\n\n date_columns = range(8, 11)\n for row in reader:\n if not row or row[0].startswith(\"ZAVSIF\"):\n continue\n\n reformat_unit(row)\n reformat_dates(date_columns, row)\n rows.append(row)\n\n # sort by reporting date (the last date column)\n rows = sorted(rows, key=itemgetter(date_columns[-1]))\n return rows", "title": "" }, { "docid": "dce17b8f6acc7b149811dec5cd576b36", "score": "0.5464675", "text": "def load_data_yahoo(num, return_file_path=False):\n \n paths = []\n for root, dirs, files in os.walk('data/Yahoo', topdown=False):\n for name in files:\n paths.append(os.path.join(root, name))\n\n file_path = paths[num]\n print(file_path)\n\n df = pd.read_csv(file_path)\n\n if 'is_anomaly' == df.columns[-1]:\n df['is_anomaly'].replace(to_replace=0, value=-1, inplace=True)\n else:\n df['anomaly'].replace(to_replace=0, value=-1, inplace=True)\n df.rename(columns={\"timestamps\": \"timestamp\"}, inplace=True)\n df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms')\n\n if return_file_path:\n return df, file_path\n else: \n return df", "title": "" }, { "docid": "78910a6e73a41cceb0534a3156ae8d3b", "score": "0.5459131", "text": "def loadTradeData(self):\n\n signalMap = {'BUY': 1, 'HOLD': 0, 'SELL': -1, 0: 0, 1: 1, -1: -1}\n self.tradeDataList = pd.read_csv(self.filedir_trades)\n\n self.tradeDataList['signal'] = self.tradeDataList['signal'].map(signalMap)\n self.tradeDataList = self.tradeDataList.values.tolist()", "title": "" }, { "docid": "04ee0367c8b538f5f05fff9ddb520cb0", "score": "0.54565716", "text": "def init_stock_data(self):\r\n stock_data = {}\r\n stock_data['stock_code'] = self._stock_code\r\n stock_symbol_file_name = self._save_flie_path + '%s.txt' % (str(self._stock_code))\r\n try:\r\n for line in open(stock_symbol_file_name):\r\n line = line.strip(\"\\n\")\r\n line = line.split(',')\r\n quary_data = self.assembly_quary_data(line)\r\n stock_data[str(line[1])] = quary_data\r\n finally:\r\n return stock_data", "title": "" }, { "docid": "54dadb06e6048902d910eaa8633f90bd", "score": "0.5449732", "text": "def read_catalog_csv(self):\n\n data_types = {\n \"id\": str,\n \"name\": str,\n \"brand\": str,\n \"added_at\": str,\n \"is_active\": bool,\n \"category\": str,\n \"is_adult\": bool,\n \"is_shippable\": bool\n }\n\n df = pd.read_csv(CATALOG_FILE_PATH, dtype=data_types)\n # print(df.head(5))\n # print(df.dtypes)\n\n df['price'] = None\n\n df = df[(df.is_active == True) & (df.is_adult == False)]\n # print(df.head(5))\n\n self.data = df", "title": "" }, { "docid": "82bc929650d8c2decc0e0107224af973", "score": "0.54389256", "text": "def get_quote_yahoo(symbols):\r\n if isinstance(symbols, compat.string_types):\r\n sym_list = symbols\r\n else:\r\n sym_list = '+'.join(symbols)\r\n\r\n # for codes see: http://www.gummy-stuff.org/Yahoo-data.htm\r\n request = ''.join(compat.itervalues(_yahoo_codes)) # code request string\r\n header = list(_yahoo_codes.keys())\r\n\r\n data = defaultdict(list)\r\n\r\n url_str = _YAHOO_QUOTE_URL + 's=%s&f=%s' % (sym_list, request)\r\n\r\n with urlopen(url_str) as url:\r\n lines = url.readlines()\r\n\r\n for line in lines:\r\n fields = line.decode('utf-8').strip().split(',')\r\n for i, field in enumerate(fields):\r\n if field[-2:] == '%\"':\r\n v = float(field.strip('\"%'))\r\n elif field[0] == '\"':\r\n v = field.strip('\"')\r\n else:\r\n try:\r\n v = float(field)\r\n except ValueError:\r\n v = np.nan\r\n data[header[i]].append(v)\r\n\r\n idx = data.pop('symbol')\r\n return DataFrame(data, index=idx)", "title": "" }, { "docid": "a424a8dea6179991743ff28ee59dcda4", "score": "0.54257447", "text": "def get_price_from_yahoo(yahoo_fx_tickers, date):\n try:\n assert (isinstance(yahoo_fx_tickers, list) and isinstance(date, datetime.date))\n std_index = HistoricalIntradayPrices.common_intraday_tools.get_standardized_intraday_fx_dtindex(date)\n price_dat = pd.concat(map(Utilities.yahoo_import.get_intraday_price_data_of_single_ticker, yahoo_fx_tickers),\n ignore_index=True)\n price_dat = price_dat.applymap(float)\n price_dat.rename(columns={'Timestamp': 'Time'}, inplace=True)\n\n price_dat['Time'] = list(map(lambda t: pytz.utc.localize(datetime.datetime.utcfromtimestamp(t)), price_dat['Time']))\n price_dat['Time'] = list(map(Utilities.datetime_tools.truncate_to_next_minute, price_dat['Time']))\n\n price_dat = price_dat[['Close', 'High', 'Low', 'Open', 'Volume']+['Time']]\n price_dat = price_dat.groupby('Time').agg({\n 'Open': lambda l: l.iloc[0], 'Close': lambda l: l.iloc[-1], 'Low': min, 'High': max, 'Volume': sum})\n\n price_dat = price_dat[['Close', 'High', 'Low', 'Open', 'Volume']]\n price_dat = price_dat.reindex(index=std_index)\n price_dat.loc[:, 'Volume'] = price_dat['Volume'].fillna(0)\n price_dat.loc[:, 'Open'] = price_dat['Open'].fillna(0)\n price_dat.loc[:, 'Close'] = price_dat['Close'].fillna(method='ffill')\n\n def propagate_on_zero_open(t, field):\n if t['Open'] == 0:\n return [t[field]]*(len(t)-1)+[0]\n else:\n return t.values\n\n price_dat = price_dat.apply(lambda t: propagate_on_zero_open(t, 'Close'), axis=1)\n price_dat = price_dat.fillna(0).reset_index()\n\n assert (isinstance(price_dat, pd.DataFrame) and tuple(sorted(price_dat.columns)) == tuple(\n sorted(['Close', 'High', 'Low', 'Open', 'Time', 'Volume'])))\n\n logging.info('Yahoo price import and pandas enrich successful for: %s' % yahoo_fx_tickers)\n if price_dat['Close'].sum() == 0:\n return pd.DataFrame(None)\n return price_dat\n\n except AssertionError:\n logging.warning('Calling get_price_from_yahoo with wrong argument types')\n except Exception as err:\n logging.warning('Yahoo price import and pandas enrich failed for: %s with message %s' %\n (yahoo_fx_tickers, err))\n return pd.DataFrame(None)", "title": "" }, { "docid": "979cf04209a86e117c71aafe5ee1ca61", "score": "0.54220134", "text": "def monthlyDataDownload(self):\n self.multiStockTrain = []\n self.multiStockTest = []\n with open('./resources/SnP500_his.csv', 'rb') as f:\n reader = csv.reader(f)\n ticker_list = list(reader)\n for tickers in ticker_list:\n tickerstring = tickers[0]\n s1 = singleStock(tickerstring, self.trainMonth1, 1, self.trainYear1,\n self.trainMonth2, 28, self.trainYear2, 'm')\n s1.loading()\n s1.Aclose.reverse()\n s1.Close.reverse()\n s1.Date.reverse()\n s1.Open.reverse()\n s1.High.reverse()\n s1.Low.reverse()\n self.multiStockTrain.append(s1)\n self.stockNum = len(self.multiStockTrain)\n\n\n return self.multiStockTrain[0]", "title": "" }, { "docid": "182fa4490ca28ba32baf212feababa17", "score": "0.54196775", "text": "def read_csv(file_path, file_name):\n \n csvpath = os.path.join(file_path, file_name)\n profit_data = []\n total_profit = 0\n with open(csvpath, newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n next(csvfile)\n\n for row in csvreader:\n profit = float(row[1])\n profit_data.append([row[0], profit])\n total_profit += profit\n\n total_profit = round(total_profit, 2) \n \n return profit_data, total_profit", "title": "" }, { "docid": "12dd0ed32a29dc72a53452d2c24f9c6b", "score": "0.5404796", "text": "def data_loader(self):\n\n # -- IMPORTS\n import csv\n from conversions import twh_conversion\n # -- DATA LOADING\n\n # Earthquake Data\n with open(self.earthFile) as f:\n reader = csv.reader(f)\n eq_header = next(reader)\n\n # Columns to be loaded, (1, 5, 14)\n\n # Oil Data\n with open(self.oilFile) as f:\n reader = csv.reader(f)\n\n oil_prod_amt, oil_prod_date = [], []\n for line in reader:\n if line[1] == 'USA':\n oil_prod_date.append(line[2])\n\n oil_prod = line[4]\n oil_prod_amt.append(twh_conversion(float(oil_prod)))\n\n # TEST\n # print(oil_prod_date)\n # print(oil_prod_amt)", "title": "" }, { "docid": "50203404a9ebbab5259dba70a62da3a3", "score": "0.5403184", "text": "def read_market_file(path,input_file):\n _file = open(\"%s\\input\\%s\" % (path,input_file), \"r\")\n content_list = csv.reader(_file)\n return content_list,_file", "title": "" }, { "docid": "d2abbd4f705f554826b93b7de71598c6", "score": "0.53969216", "text": "def import_csv(self, options):\n stock = self.env['stock.standard'].search([('id','=',options['active_id'])])\n csv_data = self.file\n file_type = self.file_name.split(\".\")\n if(self.location_id != False and self.picking_type_id != False and self.warehouse != False):\n if(file_type[1] == 'csv'):\n self.typee_format= 'csv'\n elif(file_type[1] == 'xlsx' or file_type[1] == 'xls'):\n self.typee_format= 'xls'\n # csv\n if(file_type[1] == 'csv' and self.typee_format== 'csv'):\n \n csv_data = base64.decodestring(self.file).decode(encoding='utf-8')\n self._addfromfile(csv_data.split('\\n'),stock)\n # xls\n \n elif((file_type[1] == 'xlsx' or file_type[1] == 'xls') and self.typee_format== 'xls'):\n \n file_data = self.file.decode('base64')\n wb = open_workbook(file_contents=file_data)\n newar = []\n data = []\n data_rows = []\n \n dataloaded=[]\n \n for s in wb.sheets():\n for row in range(s.nrows):\n dd = []\n for col in range(s.ncols):\n value = (s.cell(row, col).value)\n dd.append(value)\n data_rows.append(dd)\n \n for data_row in data_rows:\n if(isinstance(data_row[0], int) or isinstance(data_row[0], float)):\n ln_of_code = str(int(data_row[0]))\n # ln_of_code = len(str(int(data_row[0])))\n code = ln_of_code.zfill(8)\n \n data = self.env['product.product'].search([('name','=',data_row[1]),('code','=',code)])\n \n if(data):\n newar.append(data)\n for n in newar:\n pass\n for i in n:\n if(i.code != False and i.id != False):\n val = [0,0,{'product_id_code':i.code,\n 'product_id':i.id\n }]\n \n dataloaded.append(val)\n \n stock.write({'stock_standard_line':dataloaded})\n else:\n raise exceptions.Warning(_(\"Invalid Extension of File. Select Either .Xls or .CSV File\"))\n else:\n raise exceptions.Warning(_(\"Fill Picking Id, Location, WareHouse first\"))", "title": "" }, { "docid": "6c8853f916bc3cf848bbe38e375ad442", "score": "0.5394753", "text": "def load_csv_file_opti(self):\n for idx in range(self.m_numcols):\n self.m_datacols.append([])\n\n f_stream = open(self.m_name, \"rb\")\n f_stream.seek(-2,2)\n while f_stream.read(1).decode(\"utf-8\") != \"\\n\":\n f_stream.seek(-2,1)\n line = f_stream.readline()\n if line.strip() == \"\":\n return\n str_line = line.decode(\"utf-8\")\n if str_line.find( ',' ) == self.m_header[-1][0] :\n return\n #print(len(line), len(str_line), str_line)\n cells = str_line.split(\",\")\n for col in range(len(cells)):\n if col == 0:\n self.m_datacols[col].append(csvd.string_to_datetime(cells[col]))\n else:\n self.m_datacols[col].append(float(cells[col]))\n f_stream.close()\n #store last date in the file\n try:\n self.m_last_init_date = self.m_datacols[0][-1]\n except ( IndexError ):\n pass", "title": "" }, { "docid": "79032319f64b6c36bd023a206ff72054", "score": "0.53896415", "text": "async def ask_stock(session, asset):\n\n url = 'https://stooq.com/q/l/?s=%s&f=sd2t2ohlcv&h&e=csv' % (urllib.parse.quote(asset.lower()),)\n try:\n async with session.get(url) as response:\n if response.status != 200:\n print(\">>> finbot: WARNING unexpected HTTP code %s for symbol %s\" % (response.status_code, asset))\n return None, None\n else:\n reader = csv.DictReader((await response.text()).split('\\n'))\n try:\n row = next(reader)\n if row['Close'] == 'N/D':\n print(\">>> finbot: WARNING bad or unavailable stock symbol %s\" % asset)\n return None, None\n return row['Symbol'], row['Close']\n except StopIteration:\n print(\">>> finbot: WARNING empty data for symbol %s\" % asset)\n return None, None\n except Exception as e:\n print(\">>> finbot: WARNING exception while trying to get data for symbol %s: %s, %s\" % (asset, type(e).__name__, e.args))\n return None, None", "title": "" }, { "docid": "2a697b28e6806408ca387220305ae1e4", "score": "0.5383794", "text": "def getIndexPrices():\n \n filename = 'data/stock/country_indexes.csv'\n \n indexes = {}\n \n csvfile = open(filename, 'rU')\n reader = csv.reader(csvfile)\n \n for row in reader:\n indexes[row[0]] = row[1:]\n countryCodes = {}\n csvfile2 = open('data/stock/country_codes.txt', 'rU')\n reader2 = csv.reader(csvfile2)\n for row in reader2:\n countryCodes[row[1].lower()] = row[0]\n \n csvfile2.close()\n \n for country in indexes.keys():\n countryCode = countryCodes[country.lower()]\n if indexes[country][1] == 'manual':\n csvExtract(indexes[country][2], countryCode)\n else:\n yahooExtract(indexes[country][1], countryCode)\n csvfile.close()", "title": "" }, { "docid": "acc48366547752b1647ae80c6e8ab207", "score": "0.5380555", "text": "def fetch_btc_data(self):\n btc_abs_path = self.__get_absolut_path_to_data(self.__btc_csv_path)\n btc_data = pd.read_csv(btc_abs_path)\n btc_data = btc_data.dropna()\n btc_data['Position'] = btc_data['close'].diff().fillna(0.0)\n btc_data.loc[btc_data['Position'] > 0.0, 'Position'] = True\n btc_data.loc[btc_data['Position'] <= 0.0, 'Position'] = False\n btc_data['time'] = pd.to_datetime(btc_data['time'], unit='s')\n btc_data.insert(0, 'date', btc_data[\"time\"])\n btc_data = btc_data.set_index('time')\n return btc_data", "title": "" }, { "docid": "6f31baf32c2896b2792a62fa6554fb6b", "score": "0.53802884", "text": "def download_histo_data_from_yahoo(self, date_from = datetime.date(datetime.datetime.now().year-1, datetime.datetime.now().month, datetime.datetime.now().day), date_to = datetime.date(datetime.datetime.now().year, datetime.datetime.now().month, datetime.datetime.now().day)):\n\n \n if self.is_valid:\n r = requests.get(\"http://ichart.finance.yahoo.com/table.csv\", params={'s' : self.symbol, 'a' : str(date_from.month-1), 'b' : str(date_from.day), 'c': str(date_from.year), 'd' : str(date_to.month-1), 'e' : str(date_to.day), 'f' : str(date_to.year), 'g' : 'd', 'ignore' : '.csv'})\n \n csv_data = r.content\n csv_data = csv_data.split('\\n')\n data_histo = []\n \n if (len(csv_data)>0): #check data are available\n for line in csv_data:\n if (len(line)>0): #sometimes, last line is empty\n data_histo.append(line.split(','))\n \n data_histo = [data_histo[0]] + data_histo[:1:-1] #headers + reverse timeserie\n \n #cast\n for i in range(1, len(data_histo)): #saute premiere ligne car header\n for j in range(1, len(data_histo[i])): #saute premiere colonne car date\n if utility.is_number(data_histo[i][j]):\n data_histo[i][j] = float(data_histo[i][j])\n \n self.histo_data = data_histo\n \n self.logs.append([datetime.datetime.now(), \"found histo datas on yahoo finance \" + str(len(csv_data)-1) + \" entries\"])", "title": "" }, { "docid": "c8c77cfa2738a3bc32a6f3e08557303d", "score": "0.53768027", "text": "def readCSV(self):\n csv_files = [x for x in os.listdir(self.csv_dir)]\n csv_data = []\n for x in tqdm(csv_files):\n tmp = pd.read_csv(self.csv_dir + x, skiprows=[0])\n frame_numbers = tmp['Frame Number (Start)'].tolist()\n start_times = tmp['Start Time (seconds)'].tolist()\n length = tmp['Length (seconds)'].tolist()\n csv_data.append([x, frame_numbers, start_times, length])\n self.csv_data = csv_data", "title": "" }, { "docid": "2e0e896ddb3bf1d413e8f70c3aef4e3e", "score": "0.53737354", "text": "def yahooExtract(ticker, country):\n \n rawdata = ystockquote.get_historical_prices(ticker, '1995-01-01', '2011-12-31')\n \n prices = {}\n for date in rawdata.keys():\n if date[0:4] in prices.keys():\n prices[date[0:4]].append(float(rawdata[date]['Close']))\n else:\n\t\t\tprices[date[0:4]] = []\n\t\t\tprices[date[0:4]].append(float(rawdata[date]['Close']))\n\n\tdata = []\n\tfor year in sorted(prices.keys()):\n\t\tdata.append([year, float(sum(prices[year]))/len(prices[year])])\n\n\twith open('data/stock/done/' + country + '.csv', 'w') as file1:\n\t\tfile1write = csv.writer(file1, delimiter = ',')\n\t\tfile1write.writerows(data)\n\n\tfile1.close()", "title": "" }, { "docid": "1093cea5e516d639b2e32343105c6391", "score": "0.53728276", "text": "def __init__(self, filename=\"sp500.csv\",\n start=1950, end=2020,\n date_field=\"Date\",\n price_field=\"SP500\", # \"Real Price\" is inflation adjusted\n date_format=\"y-m-d\", ):\n input_file = filename\n source = open(filename, \"r\")\n\n # figure out which columns we want\n headers = source.readline()\n date_col = self.column(headers, date_field)\n price_col = self.column(headers, price_field)\n\n # figure out the date format\n delimiter = date_format[1]\n fields = date_format.split(delimiter)\n year_col = fields.index('y')\n if 'm' in fields:\n month_col = fields.index('m')\n if 'd' in fields:\n day_col = fields.index('d')\n\n # process the entire file\n prev = -1\n points = 0\n for line in source:\n fields = line.split(',')\n\n # make sure we have all of the expected data\n if (fields[date_col] == \"\" or fields[price_col] == \"\"):\n continue\n\n # extract the interesting fields\n price = float(fields[price_col])\n date = fields[date_col]\n date_fields = date.split(delimiter)\n year = int(date_fields[year_col])\n month = int(date_fields[month_col])\n if prev < 0:\n prev = price\n\n # see if this is within the requested range\n if year >= start and year <= end:\n appreciation = (price - prev)/prev\n tupple = (year, month, price)\n self.prices.append(tupple)\n prev = price\n points += 1\n\n source.close()", "title": "" }, { "docid": "94a190a5942d9186c98aedaccfdb8a62", "score": "0.53678477", "text": "def get_historical_data(\n symbols,\n update = False\n):\n \n datas = []\n \n for symbol in symbols:\n if already_exists(symbol):\n if update:\n datas.append(\n download_data(symbol)\n )\n else:\n datas.append(\n read_data(symbol)\n )\n else:\n datas.append(\n download_data(symbol)\n )\n return datas", "title": "" }, { "docid": "a7ad1e89b5c0baa49d9f3b264b6cd15f", "score": "0.5358469", "text": "def reader(self):\r\n\r\n # Read input data\r\n try:\r\n # catch errors with input arguments\r\n\r\n # Read file name given via standard input\r\n file_name = input()\r\n # Read number of bags\r\n self.bags = int(input())\r\n\r\n # For testing...\r\n # file_name = \"input.csv\"\r\n # self.bags = 0\r\n\r\n file = open(file_name, encoding=\"utf-8\")\r\n\r\n except FileNotFoundError as ex:\r\n print(ex)\r\n\r\n except ValueError as ex:\r\n print(\"Number of bags - wrong input parameter\")\r\n print(ex)\r\n\r\n else:\r\n reader = csv.reader(file)\r\n\r\n header = next(reader)\r\n for row in reader:\r\n try:\r\n # catch errors with wrong data format\r\n\r\n # row = [From, To, Departure date/time, Arrival date/time, Flight no, Price, Bags allowed,\r\n # Bag price]\r\n dep_airport = row[0]\r\n arr_airport = row[1]\r\n # USM,HKT,2019-05-11T06:25:00,2019-05-11T07:25:00,PV404,24,1,9\r\n dep_date_time = datetime.strptime(row[2], \"%Y-%m-%dT%H:%M:%S\")\r\n arr_date_time = datetime.strptime(row[3], \"%Y-%m-%dT%H:%M:%S\")\r\n flight_no = row[4]\r\n flight_price = int(row[5])\r\n bag_allowed = int(row[6])\r\n bag_price = int(row[7])\r\n\r\n except ValueError as ex:\r\n print(\"Wrong input file format - row skipped\")\r\n print(row)\r\n print(ex)\r\n print()\r\n\r\n except IndexError as ex:\r\n print(\"Wrong input file format - row skipped\")\r\n print(row)\r\n print(ex)\r\n print()\r\n\r\n else:\r\n self.data.append(\r\n [dep_airport, arr_airport, dep_date_time, arr_date_time, flight_no, flight_price, bag_allowed,\r\n bag_price])\r\n\r\n # Close file\r\n file.close()", "title": "" }, { "docid": "222357b71cf6d09fd702c54172ca6709", "score": "0.5351112", "text": "def read_and_clean_data(self, file_name):\n\n assert file_name.endswith(\".csv\"), \"Please provide a csv file\"\n\n converters_dict = {\n \"Price\" : clean_string,\n \"Open\" : clean_string,\n \"High\" : clean_string,\n \"Low\" : clean_string,\n \"Vol.\" : clean_vol,\n \"Change %\": clean_change,\n }\n\n # read the csv file\n df = pd.read_csv(file_name,\n parse_dates=[\"Date\"],\n converters=converters_dict)\n\n df.sort_values(by=\"Date\", inplace=True)\n\n return df.reset_index(drop=True)", "title": "" }, { "docid": "9e5ac6b8cdc48d3d6e63ecb88242b166", "score": "0.5346863", "text": "def get_closingPrice(fileloc, ticker):\n # Indices\n time_i = 2\n price_i = 3\n\n # Get the date\n fsplit = fileloc.split('_')\n date = fsplit[len(fsplit) - 1][:-4]\n formatted_date = '-'.join([date[:4], date[4:6], date[6:]])\n \n # 4:00 PM in unixtimestamp\n close_time = \"16:00\"\n pivot = time2unix(date, close_time)\n \n with open(fileloc, 'r') as finput:\n # Check if ticker is in file\n try:\n go_to_ticker(finput, ticker)\n except SystemExit:\n print(\"Ticker Does not exist\")\n return [formatted_date, None]\n \n price = -1\n\n for line in finput:\n if (line.startswith(\"H\")):\n if (price == -1):\n print(\"No trades found, alg did not past 4pm, \\\n and reached end of data\")\n break\n else:\n # Return the most recent price if we reach end of\n # data before 4pm\n return [formatted_date, price]\n elif (line.startswith(\"T\")):\n sline = line.split('|')\n if (float(sline[time_i]) > pivot):\n # If we reach 4pm, but there were no trades\n if (price == -1):\n print(\"Reached 4pm, but no trades occured\")\n break\n return [formatted_date, price]\n # If we are before 4pm, record price\n else:\n price = sline[price_i]\n # If quote line\n else:\n continue\n\n if (price == -1):\n return [formatted_date, None]\n # else:\n # f = open(\"helperfunc.txt\", \"a+\")\n # f.write(\"You fucked up on ticker \" + ticker + \"\\n\")\n # f.close()", "title": "" }, { "docid": "205d2b9a47ed4acd1181af19425b8ff5", "score": "0.534588", "text": "def test_simple_case(self):\n loadCsv = sa.LoadCSV(TEST_FILES['march1.csv'], self.all_stocks)\n\n stocks_loaded = self.all_stocks._all_stocks.keys()\n self.assertEqual(len(stocks_loaded), 1910, \n 'LoadCSV should load correct number of stocks')", "title": "" }, { "docid": "b53a5fa4da1d39d251c3029140e1ea1a", "score": "0.53446454", "text": "def readFrom_csv(self):\n try:\n with open('housing.csv', 'r') as data_csv:\n reader = data_csv.readlines()\n except FileNotFoundError or FileExistsError as e:\n print(str(e))\n return reader", "title": "" }, { "docid": "ee2f42ed78e87d76cb333651d01c5b84", "score": "0.5336711", "text": "def _parse_csv(self):\n try:\n df = pd.read_csv(self.file_name)\n self.data = self._massage_and_clean(df)\n except IOError:\n print(\"Error reading file\")", "title": "" }, { "docid": "0469afb415e8f720a02591fc05a7fbc4", "score": "0.53349894", "text": "def loop_csv_file(source_csv):\n import csv\n\n file_data = []\n with open(source_csv, \"rb\") as csvfile:\n file_reader = csv.reader(csvfile, delimiter=\",\", quotechar='\"')\n for row in file_reader:\n file_data.append(row)\n return file_data", "title": "" }, { "docid": "d5503b25f1671d0219918faae2aa1705", "score": "0.5328874", "text": "def csv_reader(file_obj):\n reader = csv.reader(file_obj)\n i = 0\n prices = []\n time = []\n data = []\n for row in reader:\n if i >= 1:\n prices.append(float(row[-1]))\n time.append(row[-2][:4])\n data.append(row[-3])\n i += 1\n l = len(prices)\n i = 0\n max_dif = 0\n s_max_dif = 0\n index_max = 0\n index_by = 0\n list_index_by = []\n index_sell = 0\n list_index_sell = []\n for i in range(l-1):\n if i == index_max:\n s_max_dif += max_dif\n list_index_by.append(index_by)\n list_index_sell.append(index_sell)\n index_max = l - 1 - prices[::-1].index(max(prices[i+1:]))\n max_dif = 0\n index_by = 0\n index_sell = 0\n if max_dif < prices[index_max] - prices[i]:\n max_dif = prices[index_max] - prices[i]\n index_by = i\n index_sell = index_max\n else:\n if max_dif < prices[index_max] - prices[i]:\n max_dif = prices[index_max] - prices[i]\n index_by = i\n index_sell = index_max\n for j in range(len(list_index_by)):\n print(f\"by: {data[list_index_by[j]]} at {time[list_index_by[j]]}; sell: {data[list_index_sell[j]]} at {time[list_index_sell[j]]}\")\n print(f\"максмальный доход: {s_max_dif}\")", "title": "" }, { "docid": "8d9aabae15a50d652145fd7f66605593", "score": "0.5323603", "text": "def _import_monthly_prices(exch, monthly_prices):\n stock = None\n prices_by_year = {}\n\n for monthly_price_raw in monthly_prices:\n close_price_raw = monthly_price_raw['close_price']\n close_price = Decimal(close_price_raw)\n\n if close_price != Decimal('0'):\n ticker_symbol = monthly_price_raw['ticker_symbol']\n\n if stock is not None and stock.ticker_symbol != ticker_symbol:\n create_stock_yearly_prices(stock, prices_by_year)\n prices_by_year = {}\n\n if stock is None or stock.ticker_symbol != ticker_symbol:\n stock = (Stock.query\n .filter_by(\n exchange=exch,\n ticker_symbol=ticker_symbol)\n .first())\n\n if not stock:\n raise click.BadParameter(\n 'No stock found for ticker symbol \"{0}\"'.format(\n ticker_symbol))\n\n close_at_raw = monthly_price_raw['close_at']\n close_at_year = int(close_at_raw.split('-')[0].lstrip('0'))\n\n if close_at_year not in prices_by_year:\n prices_by_year[close_at_year] = []\n\n prices_by_year[close_at_year].append(close_price)\n\n create_stock_yearly_prices(stock, prices_by_year)", "title": "" }, { "docid": "15079b1569f7776ddbce80e238585353", "score": "0.5318534", "text": "def compile_data():\n with open('sp500tickers.pickle','rb') as f:\n tickers = pickle.load(f)\n\n main_df = pd.DataFrame()\n\n for count, ticker in enumerate(tickers):\n df = pd.read_csv('stock_dfs/{}.csv'.format(ticker))\n df.set_index('date', inplace=True)\n\n df.rename(columns={'adj_close': ticker}, inplace=True)\n df.drop(['open','high','low','close','volume'], axis=1, inplace=True)\n\n if main_df.empty:\n main_df = df\n else:\n main_df = main_df.join(df, how='outer')\n\n if count % 10 == 0:\n print(count)\n\n print(main_df.head())\n main_df.to_csv('sp500_joined_closes.csv')", "title": "" }, { "docid": "82ad5e8e01fab8248c5283909c5e8170", "score": "0.53020716", "text": "def read(self, filename, fields=None, **read_csv):\n read_csv = {\n \"delimiter\": \"\\t\",\n # This should be the column where the date time string is,\n # in this case 0:\n \"parse_dates\": {\"time\": [0]},\n \"index_col\": 0,\n # \"header\": 1,\n }\n\n # Call the reading routine from the base class\n data = super(ShipMSM, self).read(filename, **read_csv)\n\n # Probably, these field names will change for each ship. So, look at\n # one CSV file and try to find those fields to rename them:\n data.rename({\n \"Weatherstation.PDWDA.Air_pressure\": \"air_pressure\",\n \"Weatherstation.PDWDA.Air_temperature\": \"air_temperature\",\n \"Weatherstation.PDWDA.Humidity\": \"humidity\",\n \"Weatherstation.PDWDA.Water_temperature\": \"water_temperature\",\n }, inplace=True)\n\n # Filter out error values. The error values might be different for each\n # ship. Adjust these lines then:\n data = data.isel(\n time=(data.air_temperature < 99) & (data.air_pressure > 500))\n\n if fields is not None:\n data = data[fields]\n\n return data.sortby(\"time\")", "title": "" }, { "docid": "50c1f7be400ca91dfa6469ad73daa651", "score": "0.528501", "text": "def fromTickData(self, quote_file_name, trade_file_name, trading_day_YYYYMMDD, time_zone, pxmul, out_csv_file=None, extended_fields=False, overwrite_repo = False):\n\n if not overwrite_repo and out_csv_file is not None :\n # skip this if it exists\n fa = glob.glob(out_csv_file+'*')\n if len(fa) == 1:\n try:\n if os.stat(fa[0]).st_size > 1000:\n if extended_fields:\n # check number of columns to be more than BASE_COLS\n import subprocess\n BASE_COLS = 9 # utc,ohlc,vol,lpx,ltm,vbs\n gz = (fa[0][-3:]=='.gz')\n cmd_str = '%s %s | head -n 1'%('zcat' if gz else 'cat', fa[0])\n l = subprocess.check_output(cmd_str, shell=True).decode().strip().replace('\\n','')\n if len(l.split(',')) > BASE_COLS:\n print ('found ', fa[0], ' not writting')\n return []\n except Exception as e:\n print('problem checking the ' + fa[0] + ' overwriting it. error: ' + str(e))\n\n print ('getting quotes from %s'%quote_file_name)\n quote = td_parser.get_quote_tickdata(quote_file_name, time_zone=time_zone, px_multiplier=pxmul)\n print ('getting trades from %s'%trade_file_name)\n trade = td_parser.get_trade_tickdata(trade_file_name, time_zone=time_zone, px_multiplier=pxmul)\n start_utc, end_utc = self._get_utc(trading_day_YYYYMMDD)\n print ('generating bars on %s'%trading_day_YYYYMMDD)\n bar, colname = td_parser.daily_mts_bar(trade, quote, 1, start_utc, end_utc-start_utc, extended_fields = extended_fields)\n if self.barsec != 1 :\n bar = td_parser.mergeBar(bar, self.barsec)\n\n if out_csv_file is not None:\n print ('writting to %s'%out_csv_file)\n td_parser.saveCSV(bar, out_csv_file)\n return bar", "title": "" }, { "docid": "c9a7f35f6a797f4eef43561f0358e717", "score": "0.52802885", "text": "def _read_csv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\",\", quotechar=quotechar))", "title": "" }, { "docid": "5aa498bbca8f4f0dc6f9d51dcc4649d4", "score": "0.5269067", "text": "def read_data_from_csv(filename: str) -> List[Climate]:\n res = list()\n with open(filename, newline='') as csvfile:\n reader = csv.DictReader(csvfile, delimiter='\\t')\n for row in reader:\n climate = Climate(name=row[\"name\"], year=int(row[\"year\"]), value=float(row[\"value\"]))\n res.append(climate)\n\n return res", "title": "" }, { "docid": "b8b338957d63e36f7ff43bcd328b4613", "score": "0.5263532", "text": "def read_data_file(sp_context, delimiter, file_path):\n\n raw_data = sp_context.textFile(file_path).cache()\n csv_data = raw_data.map(lambda line: line.split(delimiter)).cache()\n header = csv_data.first() # extract header\n raw_data = csv_data.filter(lambda x: x != header)\n print(raw_data.take(5))\n row_data = raw_data.map(lambda p: Row(\n number_of_orders=p[0],\n total_order_value=p[1],\n weeks_ago=p[2]\n ))\n\n return row_data", "title": "" }, { "docid": "7cfc80a15ee44b614de7c5defe31f7e1", "score": "0.52631974", "text": "def read_csv(file_name: str) -> pd.DataFrame:\n df = pd.read_csv(file_name, encoding='Shift-JIS')\n df.columns = df.iloc[0]\n df.drop(df.index[[0, 1]], inplace=True)\n\n date_lst = df.values.tolist()\n date_lst = df.reset_index().values.tolist()\n columns = ['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'AdjClose']\n\n new_df = pd.DataFrame(date_lst, columns=columns)\n new_df = new_df.set_index('Date')\n new_df.index = pd.to_datetime(new_df.index, format='%Y-%m-%d')\n\n new_df[['High', 'Low', 'Open', 'Close', 'Volume', 'AdjClose']]\n return new_df", "title": "" }, { "docid": "7305f659b8b6211dbdc0fa4f028f1578", "score": "0.5259883", "text": "def read_data(name):\r\n inputs=[]\r\n with open(name) as csvfile:\r\n readCSV = csv.reader(csvfile, delimiter=',')\r\n for row in readCSV:\r\n \r\n row[0] = float(row[0])\r\n row[1] = float(row[1])\r\n row[2] = int(row[2])\r\n \r\n inputs.append(row)\r\n \r\n return inputs", "title": "" }, { "docid": "4fa1e27bf0f9415337a033e224c9a94f", "score": "0.5259643", "text": "def read_csv(csvfile):\n if not os.path.isfile(csvfile):\n raise RuntimeError(\"Invalid CSV file {0}\".format(csvfile))\n\n csvraw = list(csv.reader(open(csvfile, 'r')))\n\n if len(csvraw) == 0:\n raise RuntimeError(\"Empty CSV file {0}\".format(csvfile))\n\n for head_idx in range(len(csvraw)):\n # find first line which is not empty and not a comment\n # the comment line start with #\n if \"\" != \"\".join(csvraw[head_idx]).strip() and not csvraw[head_idx][0].strip().startswith(\"#\"):\n break\n\n if len(csvraw) == head_idx:\n raise RuntimeError(\"No data in CSV file {0}\".format(csvfile))\n\n if csvraw[head_idx][0].strip() in ['pv', 'PV']:\n csv_data = __read_csv_1(csvraw[head_idx:])\n else:\n csv_data = __read_csv_2(csvraw[head_idx:])\n\n return csv_data", "title": "" }, { "docid": "cbe3fad7becaa8f91d21fb3b7f63ff6f", "score": "0.52418756", "text": "def read_cryptocurrencies_from_csv(filename=CSV_DEFAULT_FILENAME):\n try:\n crypto_df = pd.read_csv(filename)\n except FileNotFoundError:\n crypto_df = pd.DataFrame([], columns=CSV_COLUMNS)\n\n # set the index for faster access\n crypto_df.set_index(CSV_INDEX_COLUMN, inplace=True)\n\n return crypto_df", "title": "" }, { "docid": "d1d22a9be2f8a2b235e58774bdeaed83", "score": "0.523783", "text": "def open_csv_file(csv_file_path):\n with open(csv_file_path, 'r', encoding='utf-8') as csv_file:\n reader = csv.reader(csv_file)\n next(reader)\n\n data = list()\n for row in reader:\n data.append(row)\n\n return data", "title": "" }, { "docid": "00b1340730449738a181f4d565693709", "score": "0.5234545", "text": "def open_file(self):\n self.__data = []\n with open(self.__REL_PATH + '.' + self.__file_type) as data_csv: # open the file\n csv_reader_obj = csv.reader(data_csv, delimiter=',') # create the csv reader object\n for row in csv_reader_obj:\n self.__data.append(row)", "title": "" }, { "docid": "845f7e938fe31fb7cef979898a6d5954", "score": "0.5227001", "text": "def read(self, session, analysisSession, path =None, compressed =False):\n\n if path is not None:\n self._path = path\n if self._path is None:\n return False\n\n model = Tracks_Track.MASTER\n for existingTrack in session.query(model).all():\n self.remainingTracks[existingTrack.uid] = existingTrack.fingerprint\n\n try:\n data = pd.read_csv(self._path)\n except Exception as err:\n self._writeError({\n 'message':'ERROR: Unable to read CSV file \"%s\"' % self._path,\n 'error':err })\n return\n\n if data is None:\n self._writeError({\n 'message':'ERROR: Failed to create CSV reader for file \"%s\"' % self._path })\n return\n\n for index, row in data.iterrows():\n # Skip any rows that don't start with the proper numeric index value, which\n # includes the header row (if it exists) with the column names\n try:\n index = int(row[0])\n except Exception:\n continue\n\n rowDict = dict()\n for column in Reflection.getReflectionList(TrackCsvColumnEnum):\n value = row[column.index]\n\n if value and StringUtils.isStringType(value) and not StringUtils.isTextType(value):\n # Try to decode the value into a unicode string using common codecs\n for codec in ['utf8', 'MacRoman', 'utf16']:\n try:\n decodedValue = value.decode(codec)\n if decodedValue:\n value = decodedValue\n break\n except Exception:\n continue\n\n try:\n # Check to see if the value is NaN, and if it is replace it with an empty\n # string to be ignored during import\n value = '' if np.isnan(value) else value\n except Exception:\n pass\n\n if value != '' or value is None:\n rowDict[column.name] = value\n\n self.fromSpreadsheetEntry(rowDict, session)\n\n for uid, fingerprint in DictUtils.iter(self.remainingTracks):\n # Iterate through the list of remaining tracks, which are tracks not found by the\n # importer. If the track is marked as custom (meaning it is not managed by the importer)\n # it is ignored. Otherwise, the track is deleted from the database as a track that no\n # longer exists.\n\n track = Tracks_Track.MASTER.getByUid(uid, session)\n if track.custom:\n continue\n\n Tracks_Track.removeTrack(track, analysisSession)\n self._logger.write('[REMOVED]: No longer exists \"%s\" (%s)' % (\n track.fingerprint, track.uid))\n\n session.flush()\n\n for track in self.created:\n self._logger.write('[CREATED]: \"%s\" (%s)' % (track.fingerprint, track.uid))\n\n return True", "title": "" }, { "docid": "8ad58945f35324d51ea4c99b691d0b7a", "score": "0.5226173", "text": "def load_csv(self):\n raise NotImplementedError", "title": "" }, { "docid": "3e1ae8623360dbabac91b69b06697b7b", "score": "0.52219754", "text": "def _ReadCsv(path):\n ret = []\n with open(path) as f:\n for line in f:\n parts = line.rstrip().split(',')\n if len(parts) == 2 and parts[0] != 'revision':\n ret.append((int(parts[0]), int(float(parts[1]))))\n return ret", "title": "" }, { "docid": "340e882960eebb976fc5b122597499ad", "score": "0.5220239", "text": "def parse(indicator):\n\n # normalize if needed\n if indicator.endswith('.csv'):\n indicator = indicator[:4]\n\n # confirm or find real symbol\n symbol = parse_ticker(indicator)['symbol']\n\n # set path of data\n filename = symbol.upper() + '.csv'\n filepath = os.path.join(CACHE_DIR, filename)\n\n if not os.path.exists(filepath):\n raise CacheFileNotFound(\"Cache file doesn't exists: \", symbol)\n\n return read_data(filepath)", "title": "" }, { "docid": "4db245c41bb51a0a441d52cb74399768", "score": "0.52168983", "text": "def load_stockpiles_df() -> pd.DataFrame:\r\n stockpile_df = pd.read_csv(\"Stockpiles.csv\").set_index('ISO Code')\r\n return stockpile_df", "title": "" }, { "docid": "1edd68406375200280de81f8083b9096", "score": "0.52142787", "text": "def load_data_csv(self):\n data = []\n\n with open(self.csv_path, 'r') as rf:\n for line in rf.readlines()[1:]:\n audio_id, duration, wav, start, stop, spk_id = line.strip(\n ).split(',')\n data.append(\n meta_info(audio_id,\n float(duration), wav,\n int(start), int(stop), spk_id))\n if self.n_train_snts > 0:\n sample_num = min(self.n_train_snts, len(data))\n data = data[0:sample_num]\n\n return data", "title": "" }, { "docid": "9943909591a5f949cb25671bb7a2e5a8", "score": "0.52073073", "text": "def loadDataFromFile(self, fileName):\n\t\t# dateList, openList, highList, lowList, closeList, volumeList, adjCloseList \n\t\t# unpack=True\n\t\treturn np.genfromtxt(fileName, dtype=None, delimiter=',', skiprows=1, usecols=(0,1,2,3,4,5,6))", "title": "" } ]
a5717931205c99dbc7e5b99b793f36a7
Ova funkcija je poopcenje funkcije operator_kanbaza. Ona daje matricni prikaz linearnog operatora f u paru baza (bazaD2,bazaK2) ako je on zadan u paru baza (bazaD1,bazaK1). Ako bazaK1 nije eksplicitno navedena, podrazumijeva se da je bazaK1=bazaD1. Ako bazaK2 nije eksplicitno navedena, podrazumijeva se da je bazaK2=bazaD2.
[ { "docid": "11a9ac321f1d945453fabe1b8393c2b6", "score": "0.7151839", "text": "def operator_baza(f,bazaD1,bazaD2,bazaK1=None,bazaK2=None,klasa='sympy'): \n if isfunction(f):\n #operator f je zadan formulom\n M = operator_kan(f,klasa)\n else:\n #operator f je zadan matricom u paru baza (bazaD1,bazaK1)\n M = f\n if bazaK1 == None: bazaK1 = bazaD1\n if bazaK2 == None: bazaK2 = bazaD2\n S = matrica_prijelaza(bazaD1,bazaD2,klasa)\n T = matrica_prijelaza(bazaK2,bazaK1,klasa)\n if klasa == 'sympy':\n return T*M*S\n elif klasa == 'numpy':\n return np.matmul(np.matmul(T,M),S)", "title": "" } ]
[ { "docid": "af7549cc5d18b9cf9d56a081cd3520b8", "score": "0.7329589", "text": "def operator_kanbaza(f,baza1,baza2=None,klasa='sympy'): \n if isfunction(f):\n #operator f je zadan formulom\n M = operator_kan(f,klasa)\n else:\n #operator f je zadan matricom u paru kanonskih baza\n M = f\n if klasa == 'sympy':\n S = sp.Matrix(baza1).T\n if S.det() == 0: return \"Error: prvi skup nije baza\"\n if baza2 == None:\n T = sp.Matrix(baza1).T\n else:\n T = sp.Matrix(baza2).T\n if T.det() == 0: return \"Error: drugi skup nije baza\"\n return T.inv()*M*S\n elif klasa == 'numpy':\n S = np.transpose(np.array(baza1))\n if np.abs(np.linalg.det(S)) < 1e-15: return \"Error: prvi skup nije baza\"\n if baza2 == None:\n T = np.transpose(np.array(baza1))\n else:\n T = np.transpose(np.array(baza2))\n if np.abs(np.linalg.det(T)) < 1e-15: return \"Error: drugi skup nije baza\"\n return np.matmul( np.matmul(np.linalg.inv(T), M), S)", "title": "" }, { "docid": "18b1510b5cbc4819c5289f364d9616ea", "score": "0.5638643", "text": "def operator(self):", "title": "" }, { "docid": "659288c0775c338c4c2a80237a24c876", "score": "0.54506", "text": "def AdministrarOperacio(operacio, a, b):\n if(mainLib.checkIfCanOperate(a, b)):\n if(operacio == \"+\"):\n op = \"Suma\"\n x = mainLib.sumaMatrius(a, b)\n elif(operacio == \"-\"):\n op = \"Resta\"\n x = mainLib.restaMatrius(a, b)\n else:\n op = \"Multiplicacio\"\n x = mainLib.multMatrius(a, b)\n\n print op + \" de A i B: \"\n print\n mainLib.mostraMatriu(a)\n print \" \" + operacio\n mainLib.mostraMatriu(b)\n print \" ||\"\n mainLib.mostraMatriu(x)\n\n else:\n print mainLib.whichMatrixIsEmpty(a, b)", "title": "" }, { "docid": "e7a604825403906fea3ee86bcf2596b4", "score": "0.5405991", "text": "def operator(op,L_max,m,s):\n \n def ds(op):\n \"\"\"get s increment from the operator string\"\"\"\n if len(op)==1: return 0\n return int(op[1]+'1')\n\n root_half = np.sqrt(0.5)\n\n N = L_max-L_min(m,s)\n\n # identity\n if op == 'I':\n a,b = a_and_b(m,s)\n return jacobi.operator('I',N,a,b)\n\n # cosine multiplication\n if op == 'C':\n a,b = a_and_b(m,s)\n return jacobi.operator('J',N,a,b)\n\n # derivatives\n if (op == 'k+') or (op=='k-'):\n a,b,da,db = a_and_b(m,s,ds=ds(op))\n if (da== 1) and (db==-1): return jacobi.operator('C+',N ,a,b,rescale=-root_half)\n if (da==-1) and (db== 1): return jacobi.operator('C-',N ,a,b,rescale= root_half)\n if (da== 1) and (db== 1): return jacobi.operator('D+',N ,a,b,rescale=-root_half)[:-1,:]\n if (da==-1) and (db==-1): return jacobi.operator('D-',N+1,a,b,rescale= root_half)[:,:-1]\n\n # sine multiplication\n if (op == 'S+') or (op=='S-'):\n a,b,da,db = a_and_b(m,s,ds=ds(op))\n if (da== 1) and (db==-1):\n A = jacobi.operator('A+',N+1,a, b)\n B = jacobi.operator('B-',N+1,a+1,b)\n return B.dot(A)[:-1,:-1]\n if (da==-1) and (db== 1):\n A = jacobi.operator('A-',N+1,a, b)\n B = jacobi.operator('B+',N+1,a-1,b)\n return B.dot(A)[:-1,:-1]\n if (da== 1) and (db== 1):\n A = jacobi.operator('A+',N+1,a, b)\n B = jacobi.operator('B+',N+1,a+1,b)\n return (B.dot(A))[:-2,:-1]\n if (da==-1) and (db==-1):\n A = jacobi.operator('A-',N+2,a, b)\n B = jacobi.operator('B-',N+2,a-1,b)\n return (B.dot(A))[:-1,:-2]", "title": "" }, { "docid": "f8416086a294220da01f163c7a47cb00", "score": "0.5333984", "text": "def AdministrarOperacioUnaMatriu(operacio, a, b):\n if(operacio == \"d\"):\n op = \"Determinant\"\n else:\n op = \"Trasposada\"\n\n aOrB = mainLib.askNumberOption(op +\" de A (1) o de B (2): \", 2)\n if (aOrB == 1):\n OperacioUnaMatriu(operacio, a, \"A\")\n else:\n OperacioUnaMatriu(operacio, b, \"B\")", "title": "" }, { "docid": "bda4555c3c59dfc5e8b19de1fb6609df", "score": "0.52813727", "text": "def getOperator(mpo):\n return 0", "title": "" }, { "docid": "4cb6f20aa8f7a8c2acd33a163bf105dc", "score": "0.52739495", "text": "def klik(self,w,k,PPM):\r\n if self._aktualizacja == True or self._grazakonczona == True:\r\n return\r\n self._aktualizacja = True\r\n if PPM == True:\r\n w = self._plansza.polePPM(w,k) \r\n else:\r\n w = self._plansza.poleLPM(w,k)\r\n if w == 1: #wygrana\r\n self._grazakonczona = True\r\n self._gui.wygrana()\r\n elif w == 2: #przegrana\r\n self._grazakonczona = True\r\n self._gui.przegrana()\r\n self._aktualizacja = False\r\n pass", "title": "" }, { "docid": "83f7c715bb565bc65bee8bda0ae362ab", "score": "0.520008", "text": "def poleLPM(self,w,k,nie_klikaj_pytajnika=False):\r\n #pole w pozycji startowej\r\n if self._pola[w][k][0] % 7 == 0:\r\n if self._pola[w][k][1] == True: #bomba\r\n self._pola[w][k][0] = 6\r\n self._gui.ustaw_pole(w, k, '', self._pola[w][k][0],\r\n self._ilebomb - self._ileflag, self._pytajniki,\r\n self._wolnepola)\r\n return self.przegrana()\r\n else: \r\n self._pola[w][k][0] = 4 #ustawianie stanu\r\n ile = str(self.przegladnij_sasiadow(w,k))\r\n #uaktualnianie przycisku\r\n if ile == '0':\r\n ile = ''\r\n self.przeklikaj_sasiadow(w,k)\r\n self._wolnepola -= 1\r\n self._gui.ustaw_pole(w, k, ile, self._pola[w][k][0],\r\n self._ilebomb - self._ileflag, self._pytajniki,\r\n self._wolnepola)\r\n if self._wolnepola == 0 == self._pytajniki:\r\n return self.wygrana() \r\n #pole z flaga\r\n elif self._pola[w][k][0] % 7 == 1:\r\n #przyc lewy zablokowany\r\n pass\r\n \r\n \r\n #pole z pytajnikiem\r\n elif self._pola[w][k][0] % 7 == 2:\r\n #przy automatycznym odkrywaniu nie sprawdza flagi\r\n if nie_klikaj_pytajnika == False: \r\n self._pytajniki -= 1\r\n if self._pola[w][k][1] == True: #bomba\r\n self._pytajniki -= 1\r\n self._pola[w][k][0] = 6\r\n self._gui.ustaw_pole(w, k, '', self._pola[w][k][0],\r\n self._ilebomb - self._ileflag, self._pytajniki,\r\n self._wolnepola)\r\n return self.przegrana()\r\n \r\n #klik PPM - z autoodkrywaniem\r\n else:\r\n self._pola[w][k][0] = 4\r\n ile = str(self.przegladnij_sasiadow(w,k))\r\n #uaktualnianie przycisku\r\n if ile == '0':\r\n ile = ''\r\n self.przeklikaj_sasiadow(w,k)\r\n self._wolnepola -= 1\r\n self._gui.ustaw_pole(w, k, ile, self._pola[w][k][0],\r\n self._ilebomb - self._ileflag, self._pytajniki,\r\n self._wolnepola)\r\n if self._wolnepola == 0 == self._pytajniki:\r\n return self.wygrana() \r\n return 0 #status \"gramy dalej\"\r", "title": "" }, { "docid": "b7f392b6eb2f2d20ca5a3967dec70241", "score": "0.5185078", "text": "def polePPM(self,w,k):\r\n #pole w pozycji startowej\r\n if self._pola[w][k][0] % 7 == 0:\r\n #ustaw stan z uwzglednieniem kodu xyzzy jesli to jest pole z bomba\r\n self._pola[w][k][0] = 1 + 7*self._uzytykod*self._pola[w][k][1]\r\n #aktualizacja zliczaczy\r\n self._ileflag += 1\r\n if self._pola[w][k][1] == True:\r\n self._flagzbomb += 1\r\n #ustawianie pola w interfejsie\r\n self._gui.ustaw_pole(w, k, \"\", self._pola[w][k][0],\r\n self._ilebomb - self._ileflag, self._pytajniki,\r\n self._wolnepola)\r\n #sprawdzanie czy nastapila wygrana\r\n if self._ilebomb == self._flagzbomb == self._ileflag and 0 == self._pytajniki:\r\n return self.wygrana()\r\n\r\n \r\n #pole z flaga\r\n elif self._pola[w][k][0] % 7 == 1:\r\n self._pola[w][k][0] = 2 + 7*self._uzytykod*self._pola[w][k][1]\r\n self._pytajniki += 1\r\n self._ileflag -= 1\r\n if self._pola[w][k][1] == True:\r\n self._flagzbomb -= 1\r\n self._gui.ustaw_pole(w, k, \"\", self._pola[w][k][0],\r\n self._ilebomb - self._ileflag, self._pytajniki,\r\n self._wolnepola)\r\n \r\n \r\n #pole z pytajnikiem\r\n elif self._pola[w][k][0] % 7 == 2:\r\n self._pytajniki -= 1\r\n self._pola[w][k][0] = 0+ 7*self._uzytykod*self._pola[w][k][1]\r\n self._gui.ustaw_pole(w, k, \"\", self._pola[w][k][0],\r\n self._ilebomb - self._ileflag, self._pytajniki,\r\n self._wolnepola)\r\n \r\n #wszystkie bomby wczesniej poprawnie oflagowane, brak zbednych flag, znika ostatni pytajnik\r\n if self._ilebomb == self._flagzbomb == self._ileflag and 0 == self._pytajniki:\r\n return self.wygrana()\r\n \r\n #wszystkie bez min byly klikniete LPM, ale byly rowniez pytajniki\r\n if self._wolnepola == 0 == self._pytajniki:\r\n return self.wygrana()\r\n \r\n return 0 #status \"gramy dalej\"\r", "title": "" }, { "docid": "b334d51b06c52c97ede3a6c06e14d3b3", "score": "0.5171027", "text": "def same_quantum(self, a, b):\r\n return a.same_quantum(b)", "title": "" }, { "docid": "b334d51b06c52c97ede3a6c06e14d3b3", "score": "0.5171027", "text": "def same_quantum(self, a, b):\r\n return a.same_quantum(b)", "title": "" }, { "docid": "82d4a5ad8db163476da9cbd00602b275", "score": "0.51175624", "text": "def operator(self, values, idx_choosen):\n pass", "title": "" }, { "docid": "246948c3d4b36ecf7941431d8a70a53a", "score": "0.5031777", "text": "def przegrana(self):\r\n for w in range(self._w):\r\n for k in range(self._k):\r\n if self._pola[w][k][0] %7 in [0]:\r\n #nieklikniete pole z bomba\r\n if self._pola[w][k][1] == True: #bomba\r\n self._pola[w][k][0] = 3 + self._uzytykod * 7 #ustawianie stanu\r\n self._gui.ustaw_pole(w, k, '', self._pola[w][k][0],\r\n self._ilebomb - self._flagzbomb, self._pytajniki,\r\n self._wolnepola)\r\n elif self._pola[w][k][0] %7 in [1]:\r\n #flaga postawiona w zlym miejscu\r\n if self._pola[w][k][1] == False: #bomba\r\n self._gui.ustaw_pole(w, k, '', 5,\r\n self._ilebomb - self._flagzbomb, self._pytajniki,\r\n self._wolnepola)\r\n elif self._pola[w][k][0] %7 in [2]:\r\n #odkrywanie pytajnika jezeli jest pod nim bomba\r\n if self._pola[w][k][1] == True: #bomba\r\n self._pola[w][k][0] = 3 + self._uzytykod * 7 #ustawianie stanu\r\n self._gui.ustaw_pole(w, k, '', self._pola[w][k][0],\r\n self._ilebomb - self._flagzbomb, self._pytajniki,\r\n self._wolnepola) \r\n return 2 #status przegranej\r", "title": "" }, { "docid": "c40ba6cd6b941d5da25cb6f7d5fb2187", "score": "0.4993907", "text": "def PauliOperator(label):\n pauli = {\n 'I': np.matrix([[1, 0], [0, 1]]),\n 'Z': np.matrix([[1, 0], [0, -1]]),\n 'X': np.matrix([[0, 1], [1, 0]]),\n 'Y': np.matrix([[0, -1j], [1j, 0]])\n }\n\n operator = pauli[label[0]]\n for letter in label[1:]:\n operator = np.kron(operator, pauli[letter])\n\n return operator", "title": "" }, { "docid": "be284642f1d7cbf8fd54ea8c4221d8b0", "score": "0.4970203", "text": "def test_operator_mapping(operand_a, operand_b, operator, expected_result):\n\n operator_func = OperatorUtils.get_operator(operator)\n\n assert operator_func(operand_a, operand_b) == expected_result", "title": "" }, { "docid": "63fe42a54889ff40147928264b35afe3", "score": "0.49599716", "text": "def __or__(self, other: \"QuOperator\") -> \"QuOperator\":\n return self.tensor_product(other)", "title": "" }, { "docid": "74a0c700148ca3b9363d11be29adc8f0", "score": "0.49543437", "text": "def test2_12_1_2(self):\n GA, e_1, e_2, e_3 = Ga.build('e*1|2|3')\n B = e_1 ^ (e_2 - e_3)\n self.assertEqual(e_1 ^ B, 0)\n self.assertNotEqual((e_1 + e_2) ^ B, 0)\n self.assertNotEqual((e_1 + e_2 + e_3) ^ B, 0)\n self.assertEqual((2 * e_1 - e_2 + e_3) ^ B, 0)", "title": "" }, { "docid": "277e49f908efec3b393c80884e746cba", "score": "0.49329183", "text": "def test_two_qubits_merge_gate_subset(self):\n\n def qfunc():\n qml.CRX(0.1, wires=[0, 1])\n qml.CRX(0.2, wires=[0, 1])\n qml.RY(0.3, wires=[\"a\"])\n qml.RY(0.5, wires=[\"a\"])\n qml.RX(-0.5, wires=[2])\n qml.RX(0.2, wires=[2])\n qml.RZ(0.2, wires=[2])\n\n transformed_qfunc = merge_rotations(include_gates=[\"RX\", \"CRX\"])(qfunc)\n\n ops = qml.tape.make_qscript(transformed_qfunc)().operations\n\n names_expected = [\"CRX\", \"RY\", \"RY\", \"RX\", \"RZ\"]\n wires_expected = [Wires([0, 1]), Wires(\"a\"), Wires(\"a\"), Wires(2), Wires(2)]\n compare_operation_lists(ops, names_expected, wires_expected)\n\n assert qml.math.isclose(ops[0].parameters[0], 0.3)\n assert qml.math.isclose(ops[1].parameters[0], 0.3)\n assert qml.math.isclose(ops[2].parameters[0], 0.5)\n assert qml.math.isclose(ops[3].parameters[0], -0.3)", "title": "" }, { "docid": "56bb84a5628b2b52976f4bd39950e155", "score": "0.49108955", "text": "def __init__(self, booleanos=[], operadores=[]):\r\n self.booleanos = (\"verdadero\", \"falso\")\r\n self.operadores = (\"-\", \"+\", \"*\", \"/\", \"&&\", \"||\", \">\", \"<\", \">=\", \"<=\", \"!=\", \"==\", \"=\")\r\n global debug\r\n debug = False", "title": "" }, { "docid": "032a64e19220d5d1754793fa943a96d3", "score": "0.4896869", "text": "def binary_operator(operator: str, lhs: Formula, rhs: Formula) -> Formula:\r\n assert is_binary(operator)\r\n return Formula(operator, lhs, rhs)", "title": "" }, { "docid": "27159cf346c0b0f6ab9c45aa0f05a8be", "score": "0.48949242", "text": "def operator_kan(f,klasa='sympy'): \n n = len(signature(f).parameters)\n if klasa == 'sympy':\n identiteta = sp.eye(n)\n kan = [list(identiteta.col(i)) for i in range(identiteta.cols)]\n slika = list(map(lambda t: f(*t), kan))\n matrica = sp.Matrix(slika).T\n elif klasa == 'numpy':\n kan = np.eye(n)\n slika = np.apply_along_axis(lambda t: f(*t),1,kan)\n matrica = np.transpose(slika)\n return matrica", "title": "" }, { "docid": "2677d33b16434e816b08d2646d7e901b", "score": "0.48808172", "text": "def wygrana(self): \r\n #odkrywanie pozostałych pól \r\n for w in range(self._w):\r\n for k in range(self._k):\r\n if self._pola[w][k][0] %7 in [0]: \r\n if self._pola[w][k][1] == True: #bomba\r\n self._pola[w][k][0] = 3 + self._uzytykod * 7\r\n ile = ''\r\n else:\r\n self._pola[w][k][0] = 4 #ustawianie stanu\r\n ile = str(self.przegladnij_sasiadow(w,k))\r\n #uaktualnianie przycisku\r\n if ile == '0':\r\n ile = ''\r\n self._gui.ustaw_pole(w, k, ile, self._pola[w][k][0],0,0,0)\r\n \r\n return 1 #status wygranej\r", "title": "" }, { "docid": "792faa135b8bcfb3b03a45810f7b6a05", "score": "0.48751622", "text": "def apply_operator_3(\n statevector1,\n statevector2,\n operator,\n A,\n B,\n C,\n ):\n\n N = (statevector1.shape[0]&-statevector1.shape[0]).bit_length()-1\n if A >= N: raise RuntimeError('A >= N')\n if B >= N: raise RuntimeError('B >= N')\n if C >= N: raise RuntimeError('C >= N')\n if A == B: raise RuntimeError('A == B')\n if A == C: raise RuntimeError('A == C')\n if B == C: raise RuntimeError('B == C')\n if operator.shape != (8,8): raise RuntimeError('3-body gate must be (8,8)')\n if statevector1.shape != (2**N,): raise RuntimeError('statevector1 should be (%d,) shape, is %r shape' % (2**N, statevector1.shape))\n if statevector2.shape != (2**N,): raise RuntimeError('statevector2 should be (%d,) shape, is %r shape' % (2**N, statevector2.shape))\n\n A2, B2, C2 = sorted((A, B, C))\n\n operator2 = np.reshape(operator, (2,2,2,2,2,2))\n\n bra_indices = 'ijk'\n ket_indices = 'lmn'\n bra_indices2 = ''.join([bra_indices[(A, B, C).index(_)] for _ in (A2, B2, C2)])\n ket_indices2 = ''.join([ket_indices[(A, B, C).index(_)] for _ in (A2, B2, C2)])\n \n operator2 = np.einsum('%s%s->%s%s' % (bra_indices, ket_indices, bra_indices2, ket_indices2), operator2)\n\n L = 2**(A2) # Left hangover\n M = 2**(B2-A2-1) # Middle1 hangover\n P = 2**(C2-B2-1) # Middle2 hangover\n R = 2**(N-C2-1) # Right hangover\n statevector1v = statevector1.view() \n statevector2v = statevector2.view()\n statevector1v.shape = (L,2,M,2,P,2,R)\n statevector2v.shape = (L,2,M,2,P,2,R)\n np.einsum('LlMmPnR,ijklmn->LiMjPkR', statevector1v, operator2, out=statevector2v)\n\n return statevector2, statevector1", "title": "" }, { "docid": "b0cb36ed2e7d81d5733c74b5dbe8d64d", "score": "0.48561287", "text": "def apply_operator_2(\n statevector1,\n statevector2,\n operator,\n A,\n B,\n ):\n\n N = (statevector1.shape[0]&-statevector1.shape[0]).bit_length()-1\n if A >= N: raise RuntimeError('A >= N')\n if B >= N: raise RuntimeError('B >= N')\n if A == B: raise RuntimeError('A == B')\n if operator.shape != (4,4): raise RuntimeError('2-body gate must be (4,4)')\n if statevector1.shape != (2**N,): raise RuntimeError('statevector1 should be (%d,) shape, is %r shape' % (2**N, statevector1.shape))\n if statevector2.shape != (2**N,): raise RuntimeError('statevector2 should be (%d,) shape, is %r shape' % (2**N, statevector2.shape))\n\n operator2 = np.reshape(operator, (2,2,2,2))\n if A > B:\n A2, B2 = B, A\n operator2 = np.einsum('ijkl->jilk', operator2)\n else:\n A2, B2 = A, B\n\n L = 2**(A2) # Left hangover\n M = 2**(B2-A2-1) # Middle hangover\n R = 2**(N-B2-1) # Right hangover\n statevector1v = statevector1.view() \n statevector2v = statevector2.view()\n statevector1v.shape = (L,2,M,2,R)\n statevector2v.shape = (L,2,M,2,R)\n np.einsum('LkMlR,ijkl->LiMjR', statevector1v, operator2, out=statevector2v)\n\n return statevector2, statevector1", "title": "" }, { "docid": "c18071e778eeab67d9130ad3a22199ac", "score": "0.48549315", "text": "def custo_par_aresta(distancias, aresta1, aresta2):\n return distancias[aresta1[0]][aresta1[1]] + distancias[aresta2[0]][aresta2[1]]", "title": "" }, { "docid": "4fa2b9b5a6e78095a2bfd1954358b9b8", "score": "0.48369998", "text": "def get_operator_map(self):\n return self.operator_map", "title": "" }, { "docid": "f73693990da3337c2205a0b9d4aff94e", "score": "0.48365572", "text": "def checkOperatorPrecedence(a,b):\n\tcheck={}\n\tcheck['(']=1\n\tcheck['*']=2\n\tcheck['/']=2\n\tcheck['-']=3\n\tcheck['+']=3\n\tif check[a] <= check[b]:\n\t\treturn 1\n\telse:\n\t\treturn 0", "title": "" }, { "docid": "0985ce4fc97bd09f5153e8941618208a", "score": "0.48317426", "text": "def test2_12_1_3(self):\n GA, e_1, e_2, e_3 = Ga.build('e*1|2|3')\n a = e_1 + 2 * e_2\n b = -e_1 - e_2\n B = a ^ b\n self.assertEqual(B, 1 * (e_1 ^ e_2))", "title": "" }, { "docid": "9fcbd6afeb60248618bc74bc53c79d27", "score": "0.48085868", "text": "def test_comparison_on_different_key_blocks(self):\n self.skipTest(\n \"The KeyBlock structure does not support the comparison operators.\"\n )", "title": "" }, { "docid": "5856d33f0d0f0964d910bccbcec481b8", "score": "0.47952715", "text": "def eval(cls, a,b):\n if not (a and b): return S.Zero\n if a == b: return S.Zero\n if a.is_commutative or b.is_commutative:\n return S.Zero\n\n #\n # [A+B,C] -> [A,C] + [B,C]\n #\n a = a.expand()\n if isinstance(a,Add):\n return Add(*[cls(term,b) for term in a.args])\n b = b.expand()\n if isinstance(b,Add):\n return Add(*[cls(a,term) for term in b.args])\n\n #\n # [xA,yB] -> xy*[A,B]\n #\n c_part = []\n nc_part = []\n nc_part2 = []\n if isinstance(a,Mul):\n c_part,nc_part = split_commutative_parts(a)\n if isinstance(b,Mul):\n c_part2,nc_part2 = split_commutative_parts(b)\n c_part.extend(c_part2)\n if c_part:\n a = nc_part or [a]\n b = nc_part2 or [b]\n return Mul(*c_part)*cls(Mul(*a),Mul(*b))\n\n\n #\n # single second quantization operators\n #\n if isinstance(a, BosonicOperator) and isinstance(b, BosonicOperator):\n if isinstance(b,CreateBoson) and isinstance(a,AnnihilateBoson):\n return KroneckerDelta(a.state,b.state)\n if isinstance(a,CreateBoson) and isinstance(b,AnnihilateBoson):\n return S.NegativeOne*KroneckerDelta(a.state,b.state)\n else:\n return S.Zero\n if isinstance(a, FermionicOperator) and isinstance(b, FermionicOperator):\n return wicks(a*b)- wicks(b*a)\n\n #\n # Canonical ordering of arguments\n #\n if cmp(a, b) > 0:\n return S.NegativeOne*cls(b, a)", "title": "" }, { "docid": "41851fb60fc066ff04c7c2e2f5872cd6", "score": "0.47927785", "text": "def compare(self, obj1, obj2):\n # Key o1 = (Key) obj1;\n # Key o2 = (Key) obj2;\n # if (o1.getNote() == KeyLetterEnum.BS && o1.getPosition() == KeyPositionEnum.getLastPosition(o2.getPosition())\n # && o2.getNote() == KeyLetterEnum.C) {\n # return 0;\n # }\n if (obj1.getNote() == kle.BS) and (obj1.getPosition() == kpe.getLastPosition(obj2.getPosition())) and (\n obj2.getNote() == kle.C):\n return 0\n\n # if (o1.getNote() == KeyLetterEnum.C\n # && o2.getPosition() == KeyPositionEnum.getNextPosition(o1.getPosition())\n # && o2.getNote() == KeyLetterEnum.B) {\n # return 0;\n # }\n if (obj1.getNote() == kle.C) and (obj2.getPosition() == kpe.getNextPosition(obj1.getPosition())) and (\n obj2.getNote() == kle.B):\n return 0\n\n # if (o1.getPosition().getPosition() > o2.getPosition().getPosition()) {\n # return 1;\n # }\n # if (o1.getPosition().getPosition() < o2.getPosition().getPosition()) {\n # return -1;\n # } else {\n # if (o1.getNote().getKeyNumber() == o2.getNote().getKeyNumber()) {\n # return 0;\n # } else if (o1.getNote().getKeyIdentity() < o2.getNote().getKeyIdentity()) {\n # return -1;\n # } else {\n # return 1;\n # }\n # }\n if (obj1.getPosition().getPosition() > obj2.getPosition().getPosition()):\n return 1\n elif obj2.getPosition().getPosition() < obj2.getPosition().getPosition():\n return -1\n else:\n if obj1.getNote().getKeyNumber() == obj2.getNote().getKeyNumber():\n return 0\n elif obj1.getNote().getKeyIdentity() < obj2.getNote().getKeyIdentity():\n return -1\n else:\n return 1", "title": "" }, { "docid": "6e56a3ae04f61ff2c0f1d654ce269dc7", "score": "0.4767549", "text": "def makeBs2KstKstSameCharge(name,\n Kstsel,\n BMassWin,\n BVCHI2,\n BDOCA,\n BIPCHI2): \n \n _motherCuts = \"(VFASPF(VCHI2/VDOF) < %(BVCHI2)s) & (MIPCHI2DV(PRIMARY)< %(BIPCHI2)s)\"% locals()\n _combinationCut = \"(ADAMASS('B_s0') < %(BMassWin)s *MeV) & (AMAXDOCA('')< %(BDOCA)s *mm)\" % locals() \n\n _Bs = CombineParticles('_'+name)\n _Bs.DecayDescriptor = \"[B_s0 -> K*(892)0 K*(892)0]cc\"\n _Bs.CombinationCut = _combinationCut\n _Bs.MotherCut = _motherCuts\n\n _Bs.ReFitPVs = True\n\n _Bs.addTool( OfflineVertexFitter )\n# _Bs.VertexFitters.update( { \"\" : \"OfflineVertexFitter\"} )\n _Bs.ParticleCombiners.update( { \"\" : \"OfflineVertexFitter\"} ) # Fix for DaVinci v32r0 by A.Poluektov\n _Bs.OfflineVertexFitter.useResonanceVertex = False\n\n\n return Selection ( name,\n Algorithm = _Bs,\n RequiredSelections = [Kstsel])", "title": "" }, { "docid": "24e8b039c74e0712addf16e9bf81d1ea", "score": "0.47494054", "text": "def set_k2D(Na,Nb,deltachia,deltachib): # notice Na as index should be Na-1\n k1D_a=set_k1D(Na,deltachia)\n k1D_b=set_k1D(Nb,deltachib)\n ka=np.zeros((Na,Nb), float)\n for i in range(Nb):\n ka[:,i]=k1D_a[:,0]\n kb=np.zeros((Na,Nb), float)\n for i in range(Na):\n kb[i,:]=k1D_b.T\n return np.asarray([ka, kb])\n\n #### -> for vk_indexset() -> for set_k2D()", "title": "" }, { "docid": "917a7ff6c02ef586eb2dfdfc7481172c", "score": "0.4740766", "text": "def test_parens_for_sandwiched_single_operand_same_op_as_neighbors(self):\n b = self._bexpr_from_postfix_tokens(\n ['A', 'B', 'and',\n 'C',\n 'D', 'E', 'and',\n 'and', 'and'])\n self.assertEqual(\n b.symbols,\n ['A', 'B', 'C', 'D', 'E'])\n self.assertEqual(\n b.tokens,\n ['A', 'and', 'B', 'and', 'C', 'and', 'D', 'and', 'E'])\n self.assertEqual(\n b.postfix_tokens,\n ['A', 'B', 'and',\n 'C',\n 'D', 'E', 'and',\n 'and', 'and'])\n self.assertEqual(\n b.raw_expr,\n 'A and B and C and D and E')\n self.assertTrue(b.tree is not None)", "title": "" }, { "docid": "f953a234e776f18b15f04b9076789d1a", "score": "0.47368342", "text": "def operator_eq(self, *args):\n return _vnl_matrix_fixedPython.vnl_matrix_fixedD_3_12_operator_eq(self, *args)", "title": "" }, { "docid": "65003198493b8f2e2493e5ec05df4f50", "score": "0.47259355", "text": "def __init__(self,wier,kol,bomb,logika,gui):\r\n #referencje\r\n self._logika = logika\r\n self._gui = gui\r\n #rozmiar planszy\r\n self._w = wier #numeracja 1-15\r\n self._k = kol\r\n \r\n #ilosc bomb\r\n self._ilebomb = bomb\r\n # liczniki\r\n self._wolnepola = wier * kol - bomb\r\n self._ileflag = 0\r\n self._flagzbomb = 0\r\n self._pytajniki = 0\r\n #czy zostal uzyty kod - potrzebne przy aktualizacji statusow pol\r\n self._uzytykod = False\r\n \r\n '''Tworzenie macierzy pol [wiersz][kolumna][nr stanu, czy jest bomba].\r\n Lista stanow pol:\r\n 0- puste, 1- flaga, 2 - pytajnik, 3 - bomba // po xyzzy stan+=7\r\n 4 - klikniete bez bomby // tylko bez xyzzy\r\n 5 - zle postawiona flaga, 6 - wybuch // tylko bez xyzzy\r\n w sumie 7 + 4 = 11 stanow\r\n '''\r\n self._pola = [[[0,False] for j in range(kol)] for i in range(wier)]\r\n \r\n #losowanie pol\r\n licznik = self._ilebomb\r\n while 1:\r\n x = random.randint(0, self._w-1)\r\n y = random.randint(0, self._k-1)\r\n if self._pola[x][y][1] == False:\r\n self._pola[x][y][1] = True\r\n licznik -= 1\r\n if licznik == 0:\r\n break", "title": "" }, { "docid": "a8c3ccaf3d610b1a6e3dc7a92bcb0c64", "score": "0.47246957", "text": "def test_parens_for_negated_leading_chained_operator_clause(self):\n b = self._bexpr_from_postfix_tokens(\n ['A', 'B', 'iff', '!',\n '0', '1', 'and',\n 'and'])\n self.assertEqual(\n b.symbols,\n ['A', 'B'])\n self.assertEqual(\n b.tokens,\n ['!', '(', 'A', 'iff', 'B', ')', 'and', '0', 'and', '1'])\n self.assertEqual(\n b.postfix_tokens,\n ['A', 'B', 'iff', '!',\n '0', '1', 'and',\n 'and'])\n self.assertEqual(b.raw_expr, '!(A iff B) and 0 and 1')\n self.assertTrue(b.tree is not None)", "title": "" }, { "docid": "a8fc52adfa5fd764d9077ce87e938d3e", "score": "0.47204044", "text": "def _get_eq_op_ ( klass1 , klass2 ) :\n t = klass1 , klass2\n ops = _eq_ops_.get( t , None )\n if ops : return ops ## RETURN \n\n ## try to load the operators \n try :\n ops = Ostap.Math.EqualityOp ( klass1 , klass2 )\n _eq_ops_ [ t ] = ops\n return ops ## RETURN \n except TypeError:\n return None ## RETURN\n \n return None ## RETURN", "title": "" }, { "docid": "d9046b95bd3ce0b86e2da0560c1a4b7c", "score": "0.47179422", "text": "def operator(self):\n return self._operator", "title": "" }, { "docid": "1f74fdc5a8ae9f8d8d0852c1389e603f", "score": "0.47171295", "text": "def test_parens_for_leading_chained_operator_clause(self):\n b = self._bexpr_from_postfix_tokens(\n ['A', 'B', 'C', 'D', 'and', 'and', 'and',\n 'E',\n 'or'])\n self.assertEqual(\n b.symbols,\n ['A', 'B', 'C', 'D', 'E'])\n self.assertEqual(\n b.tokens,\n ['(', 'A', 'and', 'B', 'and', 'C', 'and', 'D', ')', 'or', 'E'])\n self.assertEqual(\n b.postfix_tokens,\n ['A', 'B', 'C', 'D', 'and', 'and', 'and',\n 'E',\n 'or'])\n self.assertEqual(\n b.raw_expr,\n '(A and B and C and D) or E')\n self.assertTrue(b.tree is not None)", "title": "" }, { "docid": "d45dd47e8c274ff9de2aef1c303a60f7", "score": "0.47143483", "text": "def test2_12_2_9(self):\n for GA in [Ga('e*1|2'), Ga('e*1|2|3'), Ga('e*1|2|3|4')]: # , Ga('e*1|2|3|4|5')]:\n for k, l in product(range(GA.n + 1), range(GA.n + 1)):\n Ak = GA.mv('A', 'blade', k)\n Bl = GA.mv('B', 'blade', l)\n self.assertEqual(Ak ^ Bl, (-1) ** (k * l) * (Bl ^ Ak))", "title": "" }, { "docid": "c284ce68e33e75bd58148d07b3736d0d", "score": "0.47029236", "text": "def operator_eq(self, *args):\n return _vnl_matrix_fixedPython.vnl_matrix_fixedD_2_6_operator_eq(self, *args)", "title": "" }, { "docid": "3b0d4cd99bb9b9ba2e6650625ccde524", "score": "0.46820453", "text": "def demonstrate_logical_operators():", "title": "" }, { "docid": "a6f652766248768b1a936e006e145696", "score": "0.4679117", "text": "def wyrownaj_liczbe_danych(dane, etykiety,slownik_etykiet_danych):\n lista_aktywnosci = list( slownik_etykiet_danych.keys() )\n lista_etykiet = list( etykiety )\n ksztalt_danych = dane.shape\n wystapienia = {}\n for aktywnosc in lista_aktywnosci:\n wystapienia[aktywnosc] = lista_etykiet.count(slownik_etykiet_danych[aktywnosc])\n liczby_wystapien_aktywnosci = list(wystapienia.values())\n minimalna_liczba_wystapien = min( liczby_wystapien_aktywnosci )\n\n print(\"AKTYWNOSCI W DANYCH WEJSCIOWYCH\")\n print(wystapienia)\n \n return_dane = np.zeros( ( len(slownik_etykiet_danych) * minimalna_liczba_wystapien,ksztalt_danych[1],ksztalt_danych[2] ) )\n return_etykiety = np.zeros( (len(slownik_etykiet_danych) * minimalna_liczba_wystapien,) )\n\n wektor_wystapien = np.zeros( ( len(slownik_etykiet_danych) , ) ) # tu bedziemy zliczac ile razy dana etykieta juz wystapila\n return_dane_licznik = 0\n \n for i in range(len(dane)):\n if wektor_wystapien[int(etykiety[i])] < minimalna_liczba_wystapien :\n wektor_wystapien[int(etykiety[i])] += 1\n return_dane[return_dane_licznik] = dane[i]\n return_etykiety[return_dane_licznik] = etykiety[i]\n return_dane_licznik += 1\n \n lista_etykiet = list( return_etykiety )\n wystapienia = {}\n for aktywnosc in lista_aktywnosci:\n wystapienia[aktywnosc] = lista_etykiet.count(slownik_etykiet_danych[aktywnosc])\n print(\"AKTYWNOSCI PO WYROWNANIU:\")\n print(wystapienia)\n\n return (return_dane,return_etykiety)", "title": "" }, { "docid": "11f72a6dc60d5fd530943833b583dced", "score": "0.46677864", "text": "def operator_eq(self, *args):\n return _vnl_matrix_fixedPython.vnl_matrix_fixedD_2_2_operator_eq(self, *args)", "title": "" }, { "docid": "70c2e0a9de1c08018645ac4b36237ee5", "score": "0.4663572", "text": "def test_multiple_dig_nop(self):\n game = self.test_mines.new_game(3,7,[[1,5],[2,4],(0,1)])\n game2 = self.test_mines.new_game(3,7,[[1,5],[2,4],(0,1)])\n\n game[\"mask\"][0][0]=True\n game2[\"mask\"][0][0]=True\n\n dig(game,0,0)\n\n for key in game.keys():\n self.assertEqual(game2[key], game[key])", "title": "" }, { "docid": "6691420c5c49478cae619f6513340979", "score": "0.46592966", "text": "def test_parens_for_sandwiched_chained_operator_clause(self):\n b = self._bexpr_from_postfix_tokens(\n ['A',\n 'B', 'C', 'D', '&', '&',\n 'E',\n '->', '->'])\n self.assertEqual(\n b.symbols,\n ['A', 'B', 'C', 'D', 'E'])\n self.assertEqual(\n b.tokens,\n ['A', '->', '(', 'B', '&', 'C', '&', 'D', ')', '->', 'E'])\n self.assertEqual(\n b.postfix_tokens,\n ['A',\n 'B', 'C', 'D', '&', '&',\n 'E',\n '->', '->'])\n self.assertEqual(b.raw_expr, 'A -> (B & C & D) -> E')\n self.assertTrue(b.tree is not None)", "title": "" }, { "docid": "4cf6fb357288802cd05cac976b50cb12", "score": "0.46501648", "text": "def contraction(a,b):\n if isinstance(b,FermionicOperator) and isinstance(a,FermionicOperator):\n if isinstance(a,AnnihilateFermion) and isinstance(b,CreateFermion):\n if b.state.assumptions0.get(\"below_fermi\"):\n return S.Zero\n if a.state.assumptions0.get(\"below_fermi\"):\n return S.Zero\n if b.state.assumptions0.get(\"above_fermi\"):\n return KroneckerDelta(a.state,b.state)\n if a.state.assumptions0.get(\"above_fermi\"):\n return KroneckerDelta(a.state,b.state)\n\n return (KroneckerDelta(a.state,b.state)*\n KroneckerDelta(b.state,Symbol('a',dummy=True,above_fermi=True)))\n if isinstance(b,AnnihilateFermion) and isinstance(a,CreateFermion):\n if b.state.assumptions0.get(\"above_fermi\"):\n return S.Zero\n if a.state.assumptions0.get(\"above_fermi\"):\n return S.Zero\n if b.state.assumptions0.get(\"below_fermi\"):\n return KroneckerDelta(a.state,b.state)\n if a.state.assumptions0.get(\"below_fermi\"):\n return KroneckerDelta(a.state,b.state)\n\n return (KroneckerDelta(a.state,b.state)*\n KroneckerDelta(b.state,Symbol('i',dummy=True,below_fermi=True)))\n\n # vanish if 2xAnnihilator or 2xCreator\n return S.Zero\n\n else:\n #not fermion operators\n t = ( isinstance(i,FermionicOperator) for i in (a,b) )\n raise ContractionAppliesOnlyToFermions(*t)", "title": "" }, { "docid": "4e92e8fb408ba1f6109293c7042c9814", "score": "0.46470928", "text": "def operate(\n cls,\n value_1: CalculationValue,\n value_2: CalculationValue,\n value_1_calc: bool,\n value_2_calc: bool\n ) -> CalculationValue:\n l1, r1, l2, r2 = BinaryOperator.get_parens(value_1_calc, value_2_calc)\n\n if type(value_1) in (int, float):\n if type(value_2) in (int, float):\n return cls.operator(value_1, value_2)\n elif isinstance(value_2, Series):\n result = cls.operator(value_1, value_2)\n result.name = (\n f'{l1}{value_1}{r1} '\n f'{cls.symbol} '\n f'{l2}{value_2.name}{r2}'\n )\n return result\n elif isinstance(value_2, DataFrame):\n result = cls.operator(value_1, value_2)\n result.columns = [\n f'{l1}{value_1}{r1} {cls.symbol} {l2}{name_2}{r2}'\n for name_2 in value_2.columns\n ]\n return result\n else:\n raise TypeError(\n 'value_2 must be int, float, Series or DataFrame'\n )\n elif isinstance(value_1, Series):\n if type(value_2) in (int, float):\n result = cls.operator(value_1, value_2)\n result.name = (\n f'{l1}{value_1.name}{r1} '\n f'{cls.symbol} '\n f'{l2}{value_2}{r2}')\n return result\n elif isinstance(value_2, Series):\n result = cls.operator(value_1, value_2)\n result.name = (\n f'{l1}{value_1.name}{r1} '\n f'{cls.symbol} '\n f'{l2}{value_2.name}{r2}')\n return result\n elif isinstance(value_2, DataFrame):\n result = getattr(\n value_2, cls.pandas_op_name\n )(value_1, axis=0)\n result.columns = [\n f'{l1}{value_1.name}{r1} {cls.symbol} {l2}{column}{r2}'\n for column in value_2.columns\n ]\n return result\n else:\n raise TypeError(\n 'value_2 must be int, float, Series or DataFrame'\n )\n elif isinstance(value_1, DataFrame):\n if type(value_2) in (int, float):\n result = cls.operator(value_1, value_2)\n result.columns = [\n f'{l1}{column}{r1} {cls.symbol} {l2}{value_2}{r2}'\n for column in value_1.columns\n ]\n return result\n elif isinstance(value_2, Series):\n result = getattr(\n value_1, cls.pandas_op_name\n )(value_2, axis=0)\n result.columns = [\n f'{l1}{column}{r1} {cls.symbol} {l2}{value_2.name}{r2}'\n for column in value_1.columns\n ]\n return result\n elif isinstance(value_2, DataFrame):\n if value_1.shape[1] != value_2.shape[1]:\n raise ValueError(\n 'Can only operate 2 dataframes together with same '\n 'number of columns'\n )\n result = DataFrame.from_dict({\n f'{l1}{col_1}{r1} {cls.symbol} {l2}{col_2}{r2}':\n cls.operator(value_1[col_1], value_2[col_2])\n for col_1, col_2 in zip(value_1.columns, value_2.columns)\n })\n return result\n else:\n raise TypeError(\n 'value_2 must be int, float, Series or DataFrame'\n )\n else:\n raise TypeError(\n 'value_1 must be int, float, Series or DataFrame'\n )", "title": "" }, { "docid": "fe37f36503495c794f3c851f8aaec2eb", "score": "0.46409702", "text": "def basic_op(operator, value1, value2):\n return eval(str(value1)+operator+str(value2))", "title": "" }, { "docid": "b07abc88787af07e58b3ac7efa69fcb0", "score": "0.4639607", "text": "def is_left_associative(operator):\n\n return operator in \"+-*/\"", "title": "" }, { "docid": "c14ac52a65303c405a1226cd4ba768e6", "score": "0.46368247", "text": "def setOverloadedOperator(self, isOverloadedOperator: bool) -> None:\n ...", "title": "" }, { "docid": "8097a256ca8d5ed6199daa1103417b09", "score": "0.46303564", "text": "def xyzzy(self):\r\n self._uzytykod = True\r\n for w in range(self._w):\r\n for k in range(self._k):\r\n if self._pola[w][k][0] in [0,1,2,3]:\r\n if self._pola[w][k][1] == True: #bomba\r\n self._pola[w][k][0] += 7\r\n self._gui.ustaw_pole(w, k, '', self._pola[w][k][0],\r\n self._ilebomb - self._ileflag, self._pytajniki,\r\n self._wolnepola)", "title": "" }, { "docid": "fa0cab431de23f571d24cda5b019b995", "score": "0.4625291", "text": "def b_operator(self, P):\r\n A, B, Q, R, beta = self.A, self.B, self.Q, self.R, self.beta\r\n S1 = Q + beta * dot(B.T, dot(P, B)) \r\n S2 = beta * dot(B.T, dot(P, A))\r\n S3 = beta * dot(A.T, dot(P, A))\r\n F = solve(S1, S2) \r\n new_P = R - dot(S2.T, solve(S1, S2)) + S3 \r\n return F, new_P", "title": "" }, { "docid": "91ba95def0bd10fc1427b2dba35336e7", "score": "0.46137527", "text": "def operate (self, oper1, oper2):\n\n return None", "title": "" }, { "docid": "abd9c27da68861c1280789ad126f6d03", "score": "0.46124133", "text": "def operationXOR(a, b):\n if(a==\"0\" and b==\"0\"):\n return \"0\"\n if(a==\"1\" and b==\"0\"):\n return \"1\"\n if(a==\"0\" and b==\"1\"):\n return \"1\"\n if(a==\"1\" and b==\"1\"):\n return \"0\"", "title": "" }, { "docid": "6c68588d4ddec7371464dd0e5d6f136c", "score": "0.4587624", "text": "def optAlign( sel1, sel2 ):\n\tcmd.reset()\n\n\t# make the lists for holding coordinates\n\t# partial lists\n\tstored.sel1 = []\n\tstored.sel2 = []\n\t# full lists\n\tstored.mol1 = []\n\tstored.mol2 = []\n\n\t# now put the coordinates into a list\n\t# partials\n\n\t# -- REMOVE ALPHA CARBONS\n\tsel1 = sel1 + \" and N. CA\"\n\tsel2 = sel2 + \" and N. CA\"\n\t# -- REMOVE ALPHA CARBONS\n\n\tcmd.iterate_state(1, selector.process(sel1), \"stored.sel1.append([x,y,z])\")\n\tcmd.iterate_state(1, selector.process(sel2), \"stored.sel2.append([x,y,z])\")\n\t# full molecule\n\tmol1 = cmd.identify(sel1,1)[0][0]\n\tmol2 = cmd.identify(sel2,1)[0][0]\n\tcmd.iterate_state(1, mol1, \"stored.mol1.append([x,y,z])\")\n\tcmd.iterate_state(1, mol2, \"stored.mol2.append([x,y,z])\")\n\n\tK = kabsch()\n\tU, T1, T2, RMSD, c1, c2 = K.align(stored.sel1, stored.sel2, [])\n\n\tstored.mol2 = map(lambda v:[T2[0]+((v[0]*U[0][0])+(v[1]*U[1][0])+(v[2]*U[2][0])),T2[1]+((v[0]*U[0][1])+(v[1]*U[1][1])+(v[2]*U[2][1])),T2[2]+((v[0]*U[0][2])+(v[1]*U[1][2])+(v[2]*U[2][2]))],stored.mol2)\n\t#stored.mol1 = map(lambda v:[ v[0]+T1[0], v[1]+T1[1], v[2]+T1[2] ], stored.mol1)\n\tstored.mol1 = map(lambda v:[ v[0]+T1[0], v[1]+T1[1], v[2]+T1[2] ], stored.mol1)\n\n\tcmd.alter_state(1,mol1,\"(x,y,z)=stored.mol1.pop(0)\")\n\tcmd.alter_state(1,mol2,\"(x,y,z)=stored.mol2.pop(0)\")\n\tcmd.alter( 'all',\"segi=''\")\n\tcmd.alter('all', \"chain=''\")\n\tprint \"RMSD=%f\" % cmd.rms_cur(sel1, sel2)\n\tprint \"MY RMSD=%f\" % RMSD\n\tcmd.hide('everything')\n\tcmd.show('ribbon', sel1 + ' or ' + sel2)\n\tcmd.color('gray70', mol1 )\n\tcmd.color('paleyellow', mol2 )\n\tcmd.color('red', 'visible')\n\tcmd.show('ribbon', 'not visible')\n\tcmd.center('visible')\n\tcmd.orient()\n\tcmd.zoom('visible')", "title": "" }, { "docid": "4f9f8c3aeb6ba4ce216a6f1cdf416c4b", "score": "0.45873356", "text": "def test_parens_for_sandwiched_negated_chained_operator_clause(self):\n b = self._bexpr_from_postfix_tokens(\n ['A',\n 'B', 'C', 'D', 'xor', 'xor', '~',\n 'E',\n '->', '->'])\n self.assertEqual(\n b.symbols,\n ['A', 'B', 'C', 'D', 'E'])\n self.assertEqual(\n b.tokens,\n ['A', '->', '~', '(', 'B', 'xor', 'C', 'xor', 'D', ')', '->', 'E'])\n self.assertEqual(\n b.postfix_tokens,\n ['A',\n 'B', 'C', 'D', 'xor', 'xor', '~',\n 'E',\n '->', '->'])\n self.assertEqual(b.raw_expr, 'A -> ~(B xor C xor D) -> E')\n self.assertTrue(b.tree is not None)", "title": "" }, { "docid": "b7750b521ada02c0e8bd40b073c518be", "score": "0.4569324", "text": "def test_heat_rod1d_modes_BC1(self):\n # BC1\n solver = Rod1D(kappa=self.kappa, TL=self.T0, TR=self.T1, L=self.L,\n Nsum=self.Nsum, alpha1=1.0, beta1=0.0, alpha2=1.0,\n beta2=0.0)\n solver.modes_BC1()\n kn0 = [0, 1.57079633, 3.14159265]\n An0 = [0, 0, 0]\n Bn0 = [0, 4.45633841, -0.31830989]\n np.testing.assert_allclose(solver.kn, kn0)\n np.testing.assert_allclose(solver.An, An0)\n np.testing.assert_allclose(solver.Bn, Bn0)", "title": "" }, { "docid": "2cb30dabc894c32284a46c310ffe90c0", "score": "0.45673597", "text": "def __or__(self, other):\n return self.binary(other, \"|\", is_operator=True, dimres=1, dimcheck=\"same\")", "title": "" }, { "docid": "434998dc144619afedf3623209de5391", "score": "0.45664072", "text": "def opcions():\n print \"[1] Llegir matriu A\"\n print \"[2] Llegir matriu B\"\n print \"[3] Calcular A + B\"\n print \"[4] Calcular A - B\"\n print \"[5] Calcular A * B\"\n print \"[6] Calcular determinant\"\n print \"[7] Calcular trasposada\"\n print \"[8] Sortir\"", "title": "" }, { "docid": "dbcdc1873d231e16de3e0384045e9280", "score": "0.45625427", "text": "def __eq__(self, operator):\n return int(self) != operator", "title": "" }, { "docid": "78ebf2ef9af0d0c81b44035f5d057255", "score": "0.45478916", "text": "def kmers_binary_op(self, op, kmer_db1, kmer_db2, kmer_db_out,\n result_min_occs=1, result_max_occs=util.misc.MAX_INT32,\n result_counter_cap=DEFAULT_COUNTER_CAP,\n threads=None):\n kmer_db1, kmer_db2, kmer_db_out = map(self._kmer_db_name, (kmer_db1, kmer_db2, kmer_db_out))\n # db1_min_occs, db1_max_occs, db2_min_occs, db2_max_occs, db_out_min_occs, db_out_max_occs,\n self.execute(['simple', kmer_db1, kmer_db2, op, kmer_db_out,\n '-ci{}'.format(result_min_occs),\n '-cx{}'.format(result_max_occs),\n '-cs{}'.format(result_counter_cap)], threads=threads)\n _chk(self.is_kmer_db(kmer_db_out), 'kmer_binary_op: output not created')", "title": "" }, { "docid": "9eafaa819934c3cf8f28434a14c0b219", "score": "0.45444795", "text": "def test_operator_shift(self, test_dag):\n # Unpack the fixture\n dag, (op1, op2, op3, op4) = test_dag\n # Arrange the operators with a Label in the middle\n op1 >> Label(\"Label 1\") >> op2\n op3 << Label(\"Label 2\") << op2 >> op4\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Label 1\"}\n assert dag.get_edge_info(op2.task_id, op3.task_id) == {\"label\": \"Label 2\"}\n assert dag.get_edge_info(op2.task_id, op4.task_id) == {}", "title": "" }, { "docid": "d9224101c23261a7ce9bd21437c52246", "score": "0.45411834", "text": "def operator_eq(self, *args):\n return _vnl_matrix_fixedPython.vnl_matrix_fixedD_2_3_operator_eq(self, *args)", "title": "" }, { "docid": "1eda1cc91ce214d8a75ff0478cb1a6ad", "score": "0.45113316", "text": "def sıraKımde(self):\n\n if self.hamle % 2 == 0:\n return self.oyuncuDegis1()\n else:\n return self.oyuncuDegis2()", "title": "" }, { "docid": "3c6ff994c7c2dabbe9cb291e786b5ecd", "score": "0.45097646", "text": "def main_8():\n\n n = 8\n stabs = \"ZIZZZIII IZZZIZII ZIIIZIZZ XXXIIIXI XXIXIIIX IIXIXXXI\"\n logops = \"IIXXIIII XIIIXIII ZZIIIIII IIZIIIZI\"\n code = Code(n, stabs, logops)\n code.check()\n\n #vs = list(code.get_encoded())\n v0 = code.get_encoded()\n v1 = code.logops[0] * v0\n v2 = code.logops[1] * v0\n v3 = code.logops[1] * v1\n\n for stab in code.stabs:\n assert stab*v0 == v0\n assert stab*v1 == v1\n assert stab*v2 == v2\n assert stab*v3 == v3\n\n assert v0 != v1\n\n # these are n-qubit operators:\n CZ = lambda i,j : Z.control(i-1, j-1, rank=n)\n CX = lambda i,j : X.control(i-1, j-1, rank=n)\n SWAP = lambda i,j : CX(i,j)*CX(j,i)\n\n #A = CZ(1,3)*CZ(4,5)*CZ(6,8)*CZ(2,7) # not a weak duality !\n\n for A in [\n #(I @ I @ I @ I @ I @ I @ I @ I)\n (I @ S @ I @ ~S @ S @ I @ ~S @ I)*CZ(1,6)*CZ(3,8), # logical S @ ~S\n #(S @ I @ ~S @ I @ I @ S @ I @ ~S)*CZ(2,5)*CZ(4,7), # logical S @ ~S\n #(I @ ~S @ I @ S @ ~S @ I @ S @ I)*CZ(1,6)*CZ(3,8), # logical ~S @ S\n #(S @ I @ I @ ~S @ I @ S @ ~S @ I)*CZ(2,5)*CZ(3,8), # logical S @ ~S\n #(H @ H @ H @ H @ H @ H @ H @ H) * SWAP(1,6) * SWAP(3,8), # fail....\n ]:\n\n #print(opstr(A))\n \n assert A*code.P == code.P*A\n #P1 = A*code.P*~A\n #assert(P1 == code.P)\n \n vs = [v0, v1, v2, v3]\n op = []\n for u in vs:\n u = A*u\n row = []\n for v in vs:\n r = u.dag() * v\n row.append(r)\n print(\"%.2f+%.2fj\"%(r.real, r.imag), end=\" \")\n print()\n op.append(row)\n \n op = numpy.array(op)\n #op.shape = (2,2,2,2)\n #print(op)\n\n print()", "title": "" }, { "docid": "f68c803d96cc414fba054174dbb0db24", "score": "0.45079932", "text": "def cross(self, a1, a2):\n\n if len(a1) != len(self.alloy):\n raise ValueError('Wrong size of structure to optimize')\n if len(a1) != len(a2):\n raise ValueError('The two structures do not have the same length')\n\n N = len(self.alloy.numbers)\n cell = self.alloy.get_cell()\n pbc = self.alloy.get_pbc()\n atom_numbers = self.alloy.numbers\n pos = self.alloy.positions\n new_pos = np.zeros((N,3))\n\n # Doing order 1 crossover using ordering list\n order1 = list(a1.info['ordering'])\n order2 = list(a2.info['ordering'])\n # built-in list() function was used to avoid call-by-reference of \n # a2.info['ordering']\n new_order = list(np.zeros(N))\n\n rpi = -choice(range(N)) # initial point of random swath\n rpf = rpi + N/2 # final point of random swath\n\n # Possibilities of making swath from pos1\n if rpi < 0 and rpf < 0:\n # case 1\n new_order[rpi:rpf] = order1[rpi:rpf]\n elif rpi < 0 and rpf ==0:\n # case 2\n new_order[rpi:] = order1[rpi:]\n elif rpi < 0 and rpf > 0:\n # case 3\n new_order[rpi:] = order1[rpi:]\n new_order[:rpf] = order1[:rpf]\n elif rpi == 0 and rpf > 0:\n # case 4\n new_order[:rpf] = order1[:rpf]\n else:\n print \"else case ocurred!\"\n\n # Masking coordinates already selected from a1\n # 'del' is not compatible with array format\n for i in range(rpi,rpf):\n imask = np.where(np.array(order2) == new_order[i])[0][0]\n del order2[imask]\n\n Np = len(order2)\n\n # Fill in the rest of new_order list by with the rest of order2\n if rpi < 0 and rpf < 0:\n # case 1\n new_order[N+rpf:] = order2[Np+rpf:]\n new_order[:N+rpi] = order2[:Np+rpi]\n elif rpi < 0 and rpf ==0:\n # case 2\n new_order[:rpi] = order2\n elif rpi < 0 and rpf > 0:\n # case 3\n new_order[rpf:N+rpi] = order2\n elif rpi == 0 and rpf > 0:\n # case 4\n new_order[rpf:] = order2\n else:\n print \"else case ocurred!\"\n\n # Fill in new_pos array following the new_order list\n for i in range(N):\n new_pos[new_order[i]] = pos[i]\n\n alloy_offspring = Atoms(numbers=atom_numbers, positions=new_pos,\n pbc=pbc, cell=cell)\n alloy_offspring. info['ordering'] = new_order\n\n return alloy_offspring", "title": "" }, { "docid": "bc187c7f7160f00a4f23c4582319f90d", "score": "0.45015937", "text": "def addOperators(self):\n return", "title": "" }, { "docid": "b3702bb532fd22582e0ebe78442e99d1", "score": "0.4493724", "text": "def _check2_bitop(a0, a1):\n s0, s1 = len(a0), len(a1)\n sr = max(s0, s1)\n if (s0 != sr and s0 != 1) or (s1 != sr and s1 != 1):\n raise Exception(\"Incompatible argument sizes: %i and %i\" % (s0, s1))\n elif type(a0) is not type(a1) and type(a1) is not a0.MaskType: # noqa\n raise Exception(\"Type mismatch!\")\n ar = a0.empty_(sr if a0.Size == Dynamic else 0)\n return (ar, sr)", "title": "" }, { "docid": "702d36dddcd8b44135719036ce52fdf6", "score": "0.4484038", "text": "def utworz_plansze(self,w,k,b):\r\n if self._aktualizacja == True:\r\n return False\r\n self._aktualizacja = True\r\n self._grazakonczona = False\r\n if hasattr(self, '_plansza') == True:\r\n del self._plansza\r\n self._plansza = Plansza(w,k,b,self,self._gui) #tworzenie planszy\r\n self._aktualizacja = False\r\n return True", "title": "" }, { "docid": "00570d1f28d900b41bb2530c80196d86", "score": "0.44770122", "text": "def pan(a, b):\n ab = a * b\n t = str(a) + str(b) + str(ab)\n if '0' in t:\n return 0\n if len(t) == 9 == len(set(t)):\n return ab\n return 0", "title": "" }, { "docid": "3245b05367263e98250455896d1bb37c", "score": "0.44703317", "text": "def test_operator_set(self, test_dag):\n # Unpack the fixture\n dag, (op1, op2, op3, op4) = test_dag\n # Arrange the operators with a Label in the middle\n op1.set_downstream(op2, Label(\"Label 1\"))\n op3.set_upstream(op2, Label(\"Label 2\"))\n op4.set_upstream(op2)\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Label 1\"}\n assert dag.get_edge_info(op2.task_id, op3.task_id) == {\"label\": \"Label 2\"}\n assert dag.get_edge_info(op2.task_id, op4.task_id) == {}", "title": "" }, { "docid": "c8b280a4717b8ae3d3a2dd08bdbeb9f0", "score": "0.44652066", "text": "def apply_operator_1(\n statevector1,\n statevector2,\n operator,\n A,\n ):\n\n N = (statevector1.shape[0]&-statevector1.shape[0]).bit_length()-1\n if A >= N: raise RuntimeError('A >= N')\n if operator.shape != (2,2): raise RuntimeError('1-body gate must be (2,2)')\n if statevector1.shape != (2**N,): raise RuntimeError('statevector1 should be (%d,) shape, is %r shape' % (2**N, statevector1.shape))\n if statevector2.shape != (2**N,): raise RuntimeError('statevector2 should be (%d,) shape, is %r shape' % (2**N, statevector2.shape))\n\n L = 2**(A) # Left hangover\n R = 2**(N-A-1) # Right hangover\n statevector1v = statevector1.view() \n statevector2v = statevector2.view()\n statevector1v.shape = (L,2,R)\n statevector2v.shape = (L,2,R)\n np.einsum('LjR,ij->LiR', statevector1v, operator, out=statevector2v)\n\n return statevector2, statevector1", "title": "" }, { "docid": "05724ad7ea70f6c7782a73c43464c0fc", "score": "0.446426", "text": "def ketbra(self,state):\n result = Operator()\n for hv1 in self.hv:\n for hv2 in state.hv:\n result.add(hv1.ketbra(hv2))\n return result", "title": "" }, { "docid": "75631e85afa28ff2ebccc0032409a930", "score": "0.44622192", "text": "def comprovaPUK(c,d,m):\n return c<5 and d!=m", "title": "" }, { "docid": "319533272fd4abbf2443ab4a5e2d5d14", "score": "0.4459272", "text": "def bravyi_kitaev(operator, n_qubits=None):\n # Compute the number of qubits.\n from fermilib.utils import count_qubits\n if n_qubits is None:\n n_qubits = count_qubits(operator)\n if n_qubits < count_qubits(operator):\n raise ValueError('Invalid number of qubits specified.')\n\n # Compute transformed operator.\n transformed_operator = QubitOperator()\n for term in operator.terms:\n\n # Initialize identity matrix.\n coefficient = operator.terms[term]\n transformed_term = QubitOperator((), coefficient)\n\n # Build the Fenwick Tree\n fenwick_tree = FenwickTree(n_qubits)\n\n # Build the Bravyi-Kitaev transformed operators.\n for ladder_operator in term:\n index = ladder_operator[0]\n\n # Parity set. Set of nodes to apply Z to.\n parity_set = [node.index for node in\n fenwick_tree.get_parity_set(index)]\n\n # Update set. Set of ancestors to apply X to.\n ancestors = [node.index for node in\n fenwick_tree.get_update_set(index)]\n\n # The C(j) set.\n ancestor_children = [node.index for node in\n fenwick_tree.get_remainder_set(index)]\n\n # Switch between lowering/raising operators.\n d_coefficient = .5j\n if ladder_operator[1]:\n d_coefficient *= -1.\n\n # The fermion lowering operator is given by\n # a = (c+id)/2 where c, d are the majoranas.\n d_majorana_component = QubitOperator(\n (((ladder_operator[0], 'Y'),) +\n tuple((index, 'Z') for index in ancestor_children) +\n tuple((index, 'X') for index in ancestors)),\n d_coefficient)\n\n c_majorana_component = QubitOperator(\n (((ladder_operator[0], 'X'),) +\n tuple((index, 'Z') for index in parity_set) +\n tuple((index, 'X') for index in ancestors)),\n 0.5)\n\n # Update term.\n transformed_term *= c_majorana_component + d_majorana_component\n transformed_operator += transformed_term\n return transformed_operator", "title": "" }, { "docid": "52b2f9c3087a06898a15ffbb58a7e255", "score": "0.4454894", "text": "def kartik_test(a, b):\n\n p_a = binomial_test(a.values, b.values)\n p_b = binomial_test(b.values, a.values)\n direction = np.zeros_like(p_a)\n direction[p_a < p_b] = 1\n direction[p_a > p_b] = -1\n p = np.minimum(p_a, p_b)\n q = multipletests(p, method='fdr_bh')[1]\n return pd.DataFrame(\n {'direction': direction,\n 'p': p,\n 'q': q}, index=a.columns)", "title": "" }, { "docid": "30157b993d6ecdb14def0344942d06cf", "score": "0.44480485", "text": "def operator_eq(self, *args):\n return _vnl_matrix_fixedPython.vnl_matrix_fixedD_3_4_operator_eq(self, *args)", "title": "" }, { "docid": "b737584aafcaf26c3f860de31b787a2b", "score": "0.44441146", "text": "def _expand_operators(self):\n return NO(self._remove_brackets)", "title": "" }, { "docid": "252326e3e8e8cd047d00df8f60345e42", "score": "0.44404167", "text": "def is_operator(char):\n return char in OPERATORS", "title": "" }, { "docid": "b1df92ab4b4322c1d3e6f748d7db43d3", "score": "0.44399634", "text": "def _setOperator(self, operator):\n index_list = self._getSelectedRowsIndex()\n if index_list:\n for index in index_list:\n oCombo = self.table.cellWidget(index.row(), 4)\n oCombo.setCurrentIndex(operator)\n else:\n for i in reversed(range(self.table.rowCount())):\n oCombo = self.table.cellWidget(i, 4)\n oCombo.setCurrentIndex(operator)", "title": "" }, { "docid": "25ed6a845197256956de9d3100370e46", "score": "0.4437378", "text": "def matrica_prijelaza(baza1,baza2,klasa='sympy'):\n if klasa == 'sympy':\n m1 = sp.Matrix(baza1).T\n if m1.det() == 0: return \"Error: prvi skup nije baza\"\n m2 = sp.Matrix(baza2).T\n if m2.det() == 0: return \"Error: drugi skup nije baza\"\n if m1.shape != m2.shape: return \"Error: baze nisu iz istog vektorskog prostora\"\n return m1.inv()*m2\n elif klasa == 'numpy':\n m1 = np.transpose(np.array(baza1))\n if np.abs(np.linalg.det(m1)) < 1e-15: return \"Error: prvi skup nije baza\"\n m2 = np.transpose(np.array(baza2))\n if np.abs(np.linalg.det(m2)) < 1e-15: return \"Error: drugi skup nije baza\"\n if m1.shape != m2.shape: return \"Error: baze nisu iz istog vektorskog prostora\"\n return np.matmul(np.linalg.inv(m1), m2)", "title": "" }, { "docid": "608729f5776723ef3a83b0fa00907447", "score": "0.44357574", "text": "def shortest_logicals(self, start_weight=None, delta=0, verbose=True, css=False):\n\n # if not self.logical_xs_reference and not self.logical_zs_reference:\n # self.eval(verbose=False)\n\n if start_weight is None:\n if self.dist is not None:\n start_weight = self.dist\n else:\n start_weight = 1\n\n end_weight = start_weight + delta\n\n if self.circuit is None:\n raise Exception('Must compile circuits first!')\n\n qudit_set = self.data_qubits\n\n if end_weight > len(qudit_set):\n end_weight = len(qudit_set)\n\n state = self.state\n found = self._dist_mode_smallest(state, qudit_set, css=css, verbose=False, start_len=start_weight,\n end_len=end_weight, list_ops=True)\n\n xs_labels = sorted(self.logical_xs_reference.keys())\n zs_labels = sorted(self.logical_zs_reference.keys())\n\n oplist = []\n\n if found:\n for paulis in found:\n # weight = len(paulis['X'] | paulis['Z'])\n\n op_product = []\n for xi, op_label in enumerate(xs_labels):\n if self.op_anticommute(paulis, self.logical_xs_reference[op_label]):\n op_product.append(zs_labels[xi])\n\n for zi, op_label in enumerate(zs_labels):\n if self.op_anticommute(paulis, self.logical_zs_reference[op_label]):\n op_product.append(xs_labels[zi])\n\n op_product = sorted(op_product)\n\n oplist.append({'X': paulis['X'], 'Z': paulis['Z'], 'equiv_ops': tuple(op_product)})\n\n if verbose:\n\n print('Reference Logical Operators:')\n print('\\nLogical Xs:')\n for op_label in xs_labels:\n op = self.logical_xs_reference[op_label]\n print(op_label, op)\n print('\\nLogical Zs:')\n for op_label in zs_labels:\n op = self.logical_zs_reference[op_label]\n print(op_label, op)\n\n print('\\nLogical Ops Found:\\n')\n for foundop in oplist:\n print('X - %s Z - %s Equiv Ops - %s' % (foundop['X'], foundop['Z'], foundop['equiv_ops']))\n\n return oplist, self.logical_xs_reference, self.logical_zs_reference", "title": "" }, { "docid": "686ede47e5b5b04957fb41f12a8d12c8", "score": "0.44339296", "text": "def ds(op):\n if len(op)==1: return 0\n return int(op[1]+'1')", "title": "" }, { "docid": "21c3ef04b214b1e888a7ceb63ce786a7", "score": "0.44323573", "text": "def g_function_cobra(self):\n if self.num_1 > self.num_2:\n return self.num_1 + self.num_2\n return self.num_1 + self.num_2", "title": "" }, { "docid": "c3835c9c8a49248bdbc64faf0ac889cf", "score": "0.44291458", "text": "def _op_8_nest(self):\r\n self._optable_8[self._N]()", "title": "" }, { "docid": "e2c8128d91095fe579cad4e2b68497fd", "score": "0.4428731", "text": "def Conditional__mul__(self, other):\n try:\n a = self.inf\n b = self.sup\n c = other.inf\n d = other.sup\n \n if a >= 0. and c >= 0.:\n return ia(a*c,b*d)\n elif a >= 0. and c<0.<d:\n return ia(b*c,b*d)\n elif a >= 0. and d <= 0.:\n return ia(b*c,a*d)\n elif a<0.<b and c>=0.:\n return ia(a*d,b*d) #corrected\n elif a<0.<b and d<=0.:\n return ia(b*c,a*c) # I say ad hansen says ac for sup #corrected# ia(b*d,a*d)- I agree with Eldon\n elif b<=0. and c>=0.:\n return ia(a*d,b*c)\n elif b<=0. and c<0.<d:\n return ia(a*d,a*c)\n elif b<=0. and d<=0.:\n return ia(b*d,a*c)\n elif a<0.<b and c<0.<d:\n return ia(min(b*c,a*d,a*c,b*d),max(b*c,a*d,a*c,b*d))\n #return ia(min(b*c,a*d),max(a*c,b*d))\n else:\n print 'error in multiplication'\n c1 = a*c\n c2 = a*d\n c3 = b*c\n c4 = b*d\n return ia(min(c1,c2,c3,c4),max(c1,c2,c3,c4))\n \n \n except:\n a = self.inf\n b = self.sup\n c = other\n d = other\n \n if a >= 0. and c >= 0.:\n return ia(a*c,b*d)\n elif a >= 0. and c<0.<d:\n return ia(b*c,b*d)\n elif a >= 0. and d <= 0.:\n return ia(b*c,a*d)\n elif a<0.<b and c>=0.:\n return ia(a*d,b*d) #corrected\n elif a<0.<b and d<=0.:\n return ia(b*c,a*c) # I say ad hansen says ac for sup #corrected# ia(b*d,a*d)- I agree with Eldon\n elif b<=0. and c>=0.:\n return ia(a*d,b*c)\n elif b<=0. and c<0.<d:\n return ia(a*d,a*c)\n elif b<=0. and d<=0.:\n return ia(b*d,a*c)\n elif a<0.<b and c<0.<d:\n return ia(min(b*c,a*d,a*c,b*d),max(b*c,a*d,a*c,b*d))\n #return ia(min(b*c,a*d),max(a*c,b*d))\n else:\n print 'error in multiplication'\n c1 = a*c\n c2 = a*d\n c3 = b*c\n c4 = b*d\n return ia(min(c1,c2,c3,c4),max(c1,c2,c3,c4))", "title": "" }, { "docid": "09d18bceb93260af3312c09a0fd1ad0a", "score": "0.44228566", "text": "def diamond_bracket_operator(self, d):\n d = int(d) % self.__M.level()\n try:\n return self.__diamond_operator[d]\n except AttributeError:\n self.__diamond_operator = {}\n except KeyError:\n pass\n D = self.__M._diamond_operator_class()(self, d)\n self.__diamond_operator[d] = D\n return D", "title": "" }, { "docid": "a2fc8be77084ef22852e1b79adecabef", "score": "0.44221944", "text": "def _q_multiply_shift_hexagon(op):\n x = op.args[0]\n y = op.args[1]\n fractional_bits = op.args[2]\n shift = op.args[3]\n\n # Don't use this intrinsic if we don't have a int32x32 vector\n # or if we are not multiplying q31 numbers\n if x.dtype != \"int32x32\" or fractional_bits.value != 31:\n return op\n\n # Case 1, shift is negative\n mul_e_1 = tvm.tir.call_llvm_intrin(\n op.dtype, \"llvm.hexagon.V6.vmpyewuh.128B\", tvm.tir.const(2, \"uint32\"), x, y\n )\n mul_o_1 = tvm.tir.call_llvm_intrin(\n op.dtype, \"llvm.hexagon.V6.vmpyowh.sacc.128B\", tvm.tir.const(3, \"uint32\"), mul_e_1, x, y\n )\n fixup = 1 << (-shift - 1)\n round_mul = mul_o_1 + fixup\n out_negative_shift = tvm.tir.call_llvm_intrin(\n op.dtype, \"llvm.hexagon.V6.vaslwv.128B\", tvm.tir.const(2, \"uint32\"), round_mul, shift\n )\n\n # Case 2, shift is positive\n x = x * (1 << (shift))\n mul_e_2 = tvm.tir.call_llvm_intrin(\n op.dtype, \"llvm.hexagon.V6.vmpyewuh.128B\", tvm.tir.const(2, \"uint32\"), x, y\n )\n mul_o_2 = tvm.tir.call_llvm_intrin(\n op.dtype, \"llvm.hexagon.V6.vmpyowh.rnd.sacc.128B\", tvm.tir.const(3, \"uint32\"), mul_e_2, x, y\n )\n\n # Select depending on the shift\n return tvm.tir.Select(shift < 0, out_negative_shift, mul_o_2)", "title": "" }, { "docid": "283f5e4fd4fa97896569b204e88c5d9c", "score": "0.44218627", "text": "def print_ans(M1, M2):\n if M1 == M2:\n print(\"0\")\n else:\n print(\"1\")", "title": "" }, { "docid": "c8d154c67ffddec1de0f7c32f4c340ce", "score": "0.44174978", "text": "def test2_12_1_1(self):\n GA, e_1, e_2, e_3 = Ga.build('e*1|2|3')\n self.assertEqual((e_1 + e_2) ^ (e_1 + e_3), (-e_1 ^ e_2) + (e_1 ^ e_3) + (e_2 ^ e_3))\n self.assertEqual((e_1 + e_2 + e_3) ^ (2 * e_1), -2 * (e_1 ^ e_2) - 2 * (e_1 ^ e_3))\n self.assertEqual((e_1 - e_2) ^ (e_1 - e_3), (e_1 ^ e_2) - (e_1 ^ e_3) + (e_2 ^ e_3))\n self.assertEqual(\n (e_1 + e_2) ^ (0.5 * e_1 + 2 * e_2 + 3 * e_3), 1.5 * (e_1 ^ e_2) + 3 * (e_1 ^ e_3) + 3 * (e_2 ^ e_3))\n self.assertEqual((e_1 ^ e_2) ^ (e_1 + e_3), (e_1 ^ e_2 ^ e_3))\n self.assertEqual((e_1 + e_2) ^ ((e_1 ^ e_2) + (e_2 ^ e_3)), (e_1 ^ e_2 ^ e_3))", "title": "" }, { "docid": "45f179a393572ee5b40eec51cef01314", "score": "0.4416586", "text": "def verify_type_match(self, left, right, operator):\r\n \r\n # codigo para depurar\r\n global debug\r\n if debug :\r\n print('antes')\r\n print('l: ', left)\r\n print('r: ', right)\r\n print('op: ', operator)\r\n \r\n # se obtienen los codigos que representan a cada tipo\r\n l = self.get_val(left)\r\n r = self.get_val(right)\r\n op = self.get_val(operator)\r\n\r\n # codigo para depurar\r\n if debug :\r\n print('despues')\r\n print('l: ', l)\r\n print('r: ', r)\r\n print('op: ', op)\r\n\r\n # Se asigna un codigo a cada tipo\r\n ERROR, ENT, FLOAT, CADENA, CHAR, BOOL = 0, 1, 2, 3, 4, 5\r\n\r\n # Se inicializa el cubo semantico\r\n cube = (\r\n ((ENT, ENT, ENT, FLOAT, ERROR, ERROR, BOOL, BOOL, BOOL, BOOL, BOOL, BOOL, ENT),\r\n (FLOAT, FLOAT, FLOAT, FLOAT, ERROR, ERROR, BOOL, BOOL, BOOL, BOOL, BOOL, BOOL, ENT),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n ),\r\n ((FLOAT, FLOAT, FLOAT, FLOAT, ERROR, ERROR, BOOL, BOOL, BOOL, BOOL, BOOL, BOOL, FLOAT),\r\n (FLOAT, FLOAT, FLOAT, FLOAT, ERROR, ERROR, BOOL, BOOL, BOOL, BOOL, BOOL, BOOL, FLOAT),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n ),\r\n ((ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, BOOL, BOOL, CADENA),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR)\r\n ),\r\n ((ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, BOOL, BOOL, CHAR),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n ),\r\n ((ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n (ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR, ERROR),\r\n (ERROR, ERROR, ERROR, ERROR, BOOL, BOOL, ERROR, ERROR, ERROR, ERROR, BOOL, BOOL, BOOL),\r\n ),\r\n )\r\n\r\n # Se guarda el resultado producido por el cubo\r\n result = cube[l][r][op]\r\n\r\n # Dependiendo del reusltado se regresa una cadena con el tipo resultante\r\n # o un -1 en caso de error de tipos.\r\n if result is 0:\r\n return -1 \r\n elif result is 1:\r\n return \"entero\"\r\n elif result is 2:\r\n return \"flotante\"\r\n elif result is 3:\r\n return \"cadena\"\r\n elif result is 4:\r\n return \"caracter\"\r\n elif result is 5:\r\n return \"bool\"", "title": "" }, { "docid": "c6bb680ad0182c32d8ebb4e4d89e156f", "score": "0.44119498", "text": "def boolean(\n A: Union[ComponentOrReference, Tuple[ComponentOrReference, ...]],\n B: Union[ComponentOrReference, Tuple[ComponentOrReference, ...]],\n operation: str,\n precision: float = 1e-4,\n num_divisions: Union[int, Int2] = (1, 1),\n max_points: int = 4000,\n layer: LayerSpec = (1, 0),\n) -> Component:\n D = Component()\n A_polys = []\n B_polys = []\n A = list(A) if isinstance(A, (list, tuple)) else [A]\n B = list(B) if isinstance(B, (list, tuple)) else [B]\n\n for X, polys in ((A, A_polys), (B, B_polys)):\n for e in X:\n if isinstance(e, (Component, ComponentReference)):\n polys.extend(e.get_polygons())\n elif isinstance(e, Polygon):\n polys.extend(e.polygons)\n\n layer = gf.pdk.get_layer(layer)\n gds_layer, gds_datatype = _parse_layer(layer)\n\n operation = operation.lower().replace(\" \", \"\")\n if operation == \"a-b\":\n operation = \"not\"\n elif operation == \"b-a\":\n operation = \"not\"\n A_polys, B_polys = B_polys, A_polys\n elif operation == \"a+b\":\n operation = \"or\"\n elif operation not in [\"not\", \"and\", \"or\", \"xor\", \"a-b\", \"b-a\", \"a+b\"]:\n raise ValueError(\n \"gdsfactory.geometry.boolean() `operation` \"\n \"parameter not recognized, must be one of the \"\n \"following: 'not', 'and', 'or', 'xor', 'A-B', \"\n \"'B-A', 'A+B'\"\n )\n\n # Check for trivial solutions\n if (not A_polys or not B_polys) and operation != \"or\":\n if (\n operation != \"not\"\n and operation != \"and\"\n and operation == \"xor\"\n and not A_polys\n and not B_polys\n or operation != \"not\"\n and operation == \"and\"\n ):\n p = None\n elif operation != \"not\" and operation == \"xor\" and not A_polys:\n p = B_polys\n elif operation != \"not\" and operation == \"xor\":\n p = A_polys\n elif operation == \"not\":\n p = A_polys or None\n elif not A_polys and not B_polys:\n p = None\n elif all(np.array(num_divisions) == np.array([1, 1])):\n p = gdspy.boolean(\n operand1=A_polys,\n operand2=B_polys,\n operation=operation,\n precision=precision,\n max_points=max_points,\n layer=gds_layer,\n datatype=gds_datatype,\n )\n else:\n p = _boolean_polygons_parallel(\n polygons_A=A_polys,\n polygons_B=B_polys,\n num_divisions=num_divisions,\n operation=operation,\n precision=precision,\n )\n\n if p is not None:\n polygons = D.add_polygon(p, layer=layer)\n [\n polygon.fracture(max_points=max_points, precision=precision)\n for polygon in polygons\n ]\n return D", "title": "" }, { "docid": "1f02dca0ab8c1db5ea5e0d874415c867", "score": "0.44119266", "text": "def crtaj_modul ( orMj , dX ):\n \"\"\"\n od = rel2abs ( origin , poc , smjer )\n do = rel2abs ( origin , kraj , smjer )\n mc.setBlocks ( od , do , blok_id , blok_dv )\n \"\"\"\n crtaj_kvadar ( orMj , [ 3 + dX , 0, 0 ] , [ 3 + dX , -2 , 2 ] , orSm , materijal , 2 ) # blok\n crtaj_kvadar ( orMj , [ 3 + dX , -2, 2 ] , [ 3 + dX , -2 , 2 ] , orSm , 0 , 0 ) # zrak\n crtaj_kvadar ( orMj , [ 3 + dX , -1, 1 ] , [ 3 + dX , -1 , 1 ] , orSm , 0 , 0 ) # zrak\n crtaj_kvadar ( orMj , [ 3 + dX , 0, 0 ] , [ 3 + dX , 0 , 0 ] , orSm , 0 , 0 ) # zrak\n crtaj_kvadar ( orMj , [ 3 + dX , -2, 0 ] , [ 3 + dX , -2 , 0 ] , orSm , 0 , 0 ) # zrak\n crtaj_redstonedust ( orMj , [ 3 + dX , -2, 2 ] , [ 3 + dX , -2 , 2 ] , orSm )\n crtaj_redstonedust ( orMj , [ 3 + dX , -1, 3 ] , [ 3 + dX , -1 , 3 ] , orSm )\n crtaj_redstonetorch ( orMj , [ 3 + dX , 1, 1 ] , orSm , \"desno\" ) \n crtaj_comparator ( orMj , [ 3 + dX , 0, 3 ] , [ 3 + dX , 0 , 3 ] , orSm , rel_smjer = \"lijevo\" )\n crtaj_repeater ( orMj , [ 3 + dX , -1, 1 ] , [ 3 + dX , -1 , 1 ] , orSm , rel_smjer = \"desno\" )\n \n crtaj_hopper ( orMj , [ 3 + dX , 1, 2 ] , [ 3 + dX , 1 , 3 ] , orSm , \"desno\" ) # dva doljnja\n crtaj_hopper ( orMj , [ 3 + dX , 1, 4 ] , [ 3 + dX , 1 , 4 ] , orSm , \"odmene\" ) # gornji\n \n sto = '{TransferCooldown:0,Items:[0:{Slot:0b,id:\"%s\",Count:4b,Damage:%ss,},1:{Slot:1b,id:\"%s\",Count:5b,Damage:%ss,},2:{Slot:2b,id:\"%s\",Count:5b,Damage:%ss,},3:{Slot:3b,id:\"%s\",Count:5b,Damage:%ss,},4:{Slot:4b,id:\"%s\",Count:5b,Damage:%ss,},],id:\"Hopper\",Lock:\"\",}' % ( popis [ dX ] [ 0 ] , popis [ dX ] [ 1 ] , popis [ dX ] [ 0 ] , popis [ dX ] [ 1 ] , popis [ dX ] [ 0 ] , popis [ dX ] [ 1 ] , popis [ dX ] [ 0 ] , popis [ dX ] [ 1 ] , popis [ dX ] [ 0 ] , popis [ dX ] [ 1 ] )\n bla = rel2abs ( orMj , ( 3 + dX , 1 , 3 ) , orSm )\n mc.postToChat(\"orginal: %s %s \" % ( dX , bla ) )\n time.sleep ( 1 )\n mc.setBlockWithNBT(bla,154,smjer_hoppera ( orSm , \"desno\") , sto ) #hopper gleda na istok \n bla = rel2abs ( orMj , ( 3 + dX , 1 , 2 ) , orSm )\n mc.postToChat(\"orginal: %s %s \" % ( dX , bla ) )\n time.sleep ( 1 )\n mc.setBlockWithNBT(bla,154,smjer_hoppera ( orSm , \"desno\") , sto ) #hopper gleda na istok \n #bla = rel2abs ( orMj , ( 3 + dX , 5 , 2 ) , orSm )\n #mc.setBlock(bla,53,4 ) #oak wood stairs naopako, gledaju na istok\n crtaj_stepenice ( orMj , ( 3 + dX , 5 , 2 ) , ( 3 + dX , 5 , 2 ) , orSm , blok_id = 53 , rel_smjer = \"desno\" , gore_dolje = \"da\" )\n\n \n kutija = 54\n tkutija = 146\n if ( int ( dX ) % 2 == 1 ) :\n kmat = kutija\n else :\n kmat = tkutija\n crtaj_kutiju ( orMj , [ 3 + dX , 2, 2 ] , [ 3 + dX , 3 , 2 ] , orSm , rel_smjer = \"meni\" , blok_id = kmat )\n crtaj_hopper ( orMj , [ 3 + dX , 3, 1 ] , [ 3 + dX , 3 , 0 ] , orSm , \"desno\" ) # hopper ispod kutije\n crtaj_kutiju ( orMj , [ 3 + dX , 4, 1 ] , [ 3 + dX , 5 , 0 ] , orSm , rel_smjer = \"meni\" , blok_id = kmat ) # dodatne kutije", "title": "" }, { "docid": "833e060bbc82e213f266619f193cac80", "score": "0.44113728", "text": "def test_weight_for_valid_operator(self):\n self.assertTrue(type(self.instance.weight('*')) is IntType)", "title": "" }, { "docid": "86e72879b685d99a34842bc065804d16", "score": "0.4410812", "text": "def test_batting_flags(batting):\n flag_cols = [\n 'g',\n 'g_dh',\n 'g_ph',\n 'g_pr'\n ]\n\n assert batting[flag_cols].min().min() == 0\n assert batting[flag_cols].max().max() == 1", "title": "" }, { "docid": "e5515de56ece5c5f81b5757893cc31f6", "score": "0.44100785", "text": "def shift_operator(self,dd_):\n \n N_ = self.N\n aa = self.anihilation_operator()\n ad = self.creation_operator()\n \n # construct the Shift Operator\n Dd_large = numpy.zeros((N_,N_),dtype=numpy.float)\n Dd_large = dd_*(ad-aa)/numpy.sqrt(2.0)\n\n # Diagonalize and obtain transformation matrix\n A,S = numpy.linalg.eig(Dd_large)\n S1 = numpy.linalg.inv(S)\n \n # Exponentiate\n Dd_large = numpy.diag(numpy.exp(A))\n \n # Transform back and reduce to the lower number of states\n return numpy.real(numpy.dot(S,numpy.dot(Dd_large,S1)))", "title": "" }, { "docid": "592e263302081fd9d99d999078f2dc78", "score": "0.44087765", "text": "def operator_eq(self, *args):\n return _vnl_matrix_fixedPython.vnl_matrix_fixedD_4_3_operator_eq(self, *args)", "title": "" }, { "docid": "a971986501ed11ceaba23764587bbaae", "score": "0.4408561", "text": "def operar():\n a=[]\n b = []\n exit =False\n while not exit:\n print\n opcions()\n op = mainLib.askNumberOption(\"Choose an option: \", 8)\n print\n if(op == 1):\n a = AdministrarCrearMatriu(\"a\")\n elif(op == 2):\n b = AdministrarCrearMatriu(\"b\")\n elif (op == 3):\n AdministrarOperacio(\"+\",a,b)\n elif(op == 4):\n AdministrarOperacio(\"-\", a, b)\n elif (op == 5):\n AdministrarOperacio(\"*\", a, b)\n elif(op == 6):\n AdministrarOperacioUnaMatriu(\"d\",a,b)\n elif(op == 7):\n AdministrarOperacioUnaMatriu(\"t\",a,b)\n else:\n exit = True", "title": "" } ]
a2ba58efdc9e91c122fb0243afe6f507
Extract the course descriptions from the document. Write them to the appropriate Course records in the database.
[ { "docid": "ba84b03c2eca5d2d3c535e6f92301885", "score": "0.75976646", "text": "def ExtractCourseDescriptions(document: Document, courses: List[Course]) -> None:\n\n courseDescriptionOn = False\n allparagraphs = []\n pertinentParagraphs = []\n number = 0\n pHeader = \"\"\n hIsCourseTitle = False\n partialTitle = []\n\n global courseNameStyles\n\n for p in document.paragraphs:\n\n if p.text.strip() != \"\":\n\n pText = p.text.strip()\n lastPos = len(pText)\n\n if pText[lastPos-1] == \":\":\n partialTitle.append(pText)\n else:\n if len(partialTitle) > 0:\n partialTitle.append(p.text.strip())\n\n number = number + 1\n paragraph = Paragraph()\n paragraph.setNumber(number)\n paragraph.setStyle(p.style.name)\n paragraph.setText(p.text.strip().rstrip(string.digits))\n allparagraphs.append(paragraph)\n\n for i, item in enumerate(allparagraphs):\n if item.getStyle() == constant.STYLE_BODY_TEXT:\n pPrev = allparagraphs[i-1]\n \n pHeader = \"\"\n if pPrev.getStyle() in courseNameStyles:\n number = number + 1\n p2 = Paragraph()\n p2.setNumber(number)\n p2.setStyle(pPrev.getStyle())\n p2.setText(pPrev.getText())\n paragraphs2.append(p2)\n pHeader = pPrev.getText().strip()\n if pHeader != \"\":\n hIsCourseTitle = HeaderIsCourseTitle(pHeader)\n\n if hIsCourseTitle:\n number = number + 1\n pc = Paragraph()\n pc.setNumber(number)\n pc.setStyle(item.getStyle())\n pc.setHeader(pHeader)\n pc.setText(item.getText())\n pertinentParagraphs.append(pc)\n\n associatedCourse = Course()\n currentCourseDescription = \"\"\n courseTitlePosition = -1\n\n for i, p in enumerate(pertinentParagraphs):\n #if p.getHeader().strip() != \"\":\n #print(\"paragraph header: {}\".format(p.getHeader().strip()))\n \n if p.getHeader().strip() != \"\" and courseDescriptionOn and i > courseTitlePosition:\n associatedCourse.setDescription(currentCourseDescription)\n courseDescriptionOn = False\n currentCourseDescription = \"\"\n courseTitlePosition = -1\n if p.getHeader().strip() != \"\" and not courseDescriptionOn:\n courseDescriptionOn = True\n associatedCourse = GetAssociatedCourse(courses, p.getHeader().replace(\"'\",\"\").strip())\n courseTitlePosition = i\n currentCourseDescription += p.getText().strip()\n if courseDescriptionOn and i > courseTitlePosition:\n currentCourseDescription += p.getText().strip()", "title": "" } ]
[ { "docid": "3c0601b75ff7eab4168a97065371a8bf", "score": "0.7158249", "text": "def ExtractCourseAndDescription(firebase: firebase, document: Document, knowledgeAreas: List[KnowledgeArea], courses: List[Course], catalogId: str) -> None:\n\n global currentKnowledgeArea\n\n knowledgeAreaId = \"\"\n knowledgeAreaTitle = \"\"\n candidateId = \"\"\n partialTitle = []\n fulltitle = \"\"\n\n global courseNameStyles\n\n for p in document.paragraphs:\n if p.style.name == constant.STYLE_NORMAL:\n currentKnowledgeArea = getMatchingKnowledgeArea(p.text.strip())\n candidateId = currentKnowledgeArea.getId()\n\n if candidateId != \"\":\n knowledgeAreaId = str(candidateId)\n knowledgeAreaTitle = currentKnowledgeArea.getText()\n else:\n knowledgeAreaId = \"\"\n\n if knowledgeAreaId != \"\" and p.text.strip() != knowledgeAreaTitle and p.text.strip() != \"\":\n pText = p.text.strip()\n lastPos = len(pText)\n\n if not pText[lastPos-1].isdigit():\n partialTitle.append(p.text.strip())\n else:\n if len(partialTitle) > 0:\n partialTitle.append(p.text.strip())\n fulltitle = ' '.join(partialTitle)\n else:\n fulltitle = pText\n\n fulltitle = fulltitle.replace(\"*\", \"\")\n\n course = Course()\n course.setKnowledgeAreaId(knowledgeAreaId)\n course.setKnowledgeArea(currentKnowledgeArea.getText())\n course.setTOCEntry(p.text.strip())\n course.setTitle(fulltitle.rstrip(string.digits).strip())\n courses.append(course)\n partialTitle = []\n\n ExtractCourseDescriptions(document, courses)\n\n for course in courses:\n if course.getDescription().strip() == \"\":\n courseTitle = course.getTitle().strip()\n courseTitle = courseTitle.replace(\"New!\", \"\").replace(\"*\", \"\").replace(\"'\",\"\").strip()\n\n pNumber = 0\n takeDescription = 0\n descriptionElements = []\n\n for p in document.paragraphs:\n pNumber += 1\n \n if p.style.name in courseNameStyles and p.text != \"\":\n pText = p.text.replace(\"New! \", \"\").replace(\"*\", \"\").replace(\"'\",\"\").strip()\n if pText.__contains__(courseTitle) or courseTitle.__contains__(pText):\n takeDescription = 1\n else:\n takeDescription = 0\n\n if p.style.name == constant.STYLE_BODY_TEXT and takeDescription == 1:\n #print(\"taking {} as course description for course: {}\".format(p.text.strip(), course.getTitle()))\n descriptionElements.append(p.text.strip())\n\n fullDescription = course.getDescription()\n\n if len(descriptionElements) > 0:\n fullDescription = ' '.join(descriptionElements)\n\n course.setDescription(fullDescription)\n\n for course in courses:\n if course.getDescription().strip() == \"\":\n print(\"course: {} has no description\".format(course.getTitle()))\n\n if course.getKnowledgeArea() == course.getTitle():\n continue\n\n newCourse = {\n 'catalogid': catalogId,\n 'knowledgeareaid': course.getKnowledgeAreaId(),\n 'name': course.getTitle(),\n 'description': course.getDescription()\n }\n result = firebase.post('course', newCourse)\n course.setId(result)", "title": "" }, { "docid": "a5602e3e4c7ec47b5d9ced790a9d9a27", "score": "0.5897001", "text": "def copy_course_description(self, courseevent_pk):\n courseevent = get_object_or_404(CourseEvent, pk=courseevent_pk)\n course = get_object_or_404(Course, pk=courseevent.course_id)\n\n courseevent = get_object_or_404(CourseEvent, pk=courseevent_pk)\n courseevent.target_group = course.target_group\n courseevent.excerpt = course.excerpt\n courseevent.text = course.text\n courseevent.prerequisites = course.prerequisites\n courseevent.project = course.project\n courseevent.save()\n\n logger.info(\"\"\"[%s] [course %s %s]:\n Beschreibung aus der Kursvorlage in den Kurs kopiert\"\"\"\n % (courseevent, course.id, course))\n return(HttpResponseRedirect(courseevent.get_absolute_url()))", "title": "" }, { "docid": "cd0a830fd8fa1091526b29b84b6f6d9e", "score": "0.5801686", "text": "def get_description(self, course):\n if hasattr(course, 'programcourse') and course.programcourse.description:\n return course.programcourse.description\n return course.description", "title": "" }, { "docid": "01b8a91b546cb01d2747b5365a103984", "score": "0.57233155", "text": "def get_courses(self) -> List[Dict[str, str]]:\n\n # Check if the catalog exists in the cache\n catalog_cache_path = (self._cache_dir / 'department_catalogs').with_suffix('.json')\n if catalog_cache_path.exists():\n with open(catalog_cache_path, 'r') as file:\n departments = json.load(file)\n else:\n departments = self._download_department_catalogs()\n with open(catalog_cache_path, 'w') as file:\n json.dump(departments, file)\n\n # Gather catalog HTML pages\n html_catalogs = [d['catalog_html'] for d in departments]\n\n department_map = {d['code']: d['name'] for d in departments}\n\n # First pass: Parse basic course information\n courses = []\n for catalog_html in html_catalogs:\n\n # Clean content\n catalog_html = catalog_html.replace('&#160;', ' ') # Non-breaking space\n\n soup = BeautifulSoup(catalog_html, 'lxml')\n courses_div = soup.find('div', attrs={'class': 'courses'})\n course_blocks = courses_div.find_all('div', attrs={'class': 'courseblock'})\n\n for block in course_blocks:\n paragraphs = block.find_all('p')\n\n # Parse department code, course number, and course title\n title_paragraph_text = paragraphs[0].text\n match = re.match(re.compile(r'^(.+?) (\\S+?)\\. (.+?)\\. (.+)?$'), title_paragraph_text)\n if not match:\n logging.warning(f'Failed to parse course title paragraph: {title_paragraph_text}')\n continue\n department_code = match.group(1)\n course_number = match.group(2)\n course_title = match.group(3)\n units_string = match.group(4)\n\n course = {\n 'department_code': department_code,\n 'department_name': department_map[department_code],\n 'number': course_number,\n 'title': course_title\n }\n\n # Parse units\n if units_string is not None:\n self._parse_units_string(course, units_string)\n else:\n logging.warning(f'No units found in title paragraph: \"{title_paragraph_text}\"')\n\n # Description\n course['description'] = paragraphs[1].text\n\n # Save remaining paragraphs (they will be parsed on the second pass)\n p = []\n for paragraph in paragraphs[2:]:\n lines = paragraph.text.splitlines()\n p.extend(lines)\n course['_paragraphs'] = p\n\n courses.append(course)\n\n valid_courses = {d['code']: [c['number'] for c in courses if c['department_code'] == d['code']] for d in departments}\n\n # Second pass: Parse extra paragraphs (prerequisites, restrictions, etc.)\n for course in courses:\n for paragraph in course['_paragraphs']:\n\n # Prerequisites\n match = re.match(r'^Prerequisite:\\s*(.+)$', paragraph)\n if match:\n self._parse_prerequisite_string(course, match.group(1), valid_courses=valid_courses)\n continue\n\n # Corequisite\n match = re.match(r'^Corequisite: (.+)$', paragraph)\n if match:\n self._parse_corequisite_string(course, match.group(1), valid_courses=valid_courses)\n continue\n\n # Prerequisite OR Corequisite\n match = re.match(r'^Prerequisite or corequisite: (.+)$', paragraph)\n if match:\n self._parse_prerequisite_or_corequisite_string(course, match.group(1), valid_courses=valid_courses)\n continue\n\n # Restrictions\n match = re.match(r'^Restriction:\\s*(.+)$', paragraph) # TODO: Parse?\n if match:\n course['restriction'] = match.group(1)\n continue\n\n # Same\n match = re.match(r'^Same as (.+)\\.$', paragraph)\n if match:\n self._parse_same_as_string(course, match.group(1)) # TODO: Validate course codes?\n continue\n\n # Concurrent\n match = re.match(r'^Concurrent with (.+)\\.$', paragraph)\n if match:\n course['concurrent'] = match.group(1) # TODO: Validate course codes and parse multiple\n continue\n\n # Repeatability\n match = re.match(r'^Repeatability:\\s*(.+)$', paragraph) # TODO: parse number?\n if match:\n course['repeatability'] = match.group(1)\n continue\n\n # Overlaps\n match = re.match(r'^Overlaps with (.+)\\.$', paragraph) # TODO: validate course codes and parse\n if match:\n course['overlap'] = match.group(1)\n continue\n\n # Grading Option\n match = re.match(r'^Grading Option: (.+)$', paragraph) # TODO: Parse?\n if match:\n course['grading_option'] = match.group(1)\n continue\n\n # Design units\n match = re.match(r'^\\(Design units: ((?:\\d+\\.\\d+|\\.\\d+|\\d+)(?:\\s*-\\s*(?:\\d+\\.\\d+|\\.\\d+|\\d+))?)\\)', paragraph)\n if match:\n course['design_units'] = match.group(1)\n continue\n\n # GE Category\n match = re.match(r'^\\((.+)\\)\\.?$', paragraph)\n if match:\n try:\n self._parse_ge_category_string(course, match.group(1))\n continue\n except Exception as e:\n logger.warning(f'Failed to parse potential GE category for {course[\"department_code\"]} {course[\"number\"]}: {e}')\n\n logger.warning(f'Unrecognized paragraph for course {course[\"department_code\"]} {course[\"number\"]}: \"{paragraph}\"')\n del course['_paragraphs']\n\n # names = [course['department_code'] + ' ' + course['number'] for course in courses]\n # print(names)\n #\n # # Extras\n # for course in courses:\n # if course['number'].upper().endswith('L'):\n # non = course['department_code'] + ' ' + course['number'][:-1]\n # if non in names:\n # print('POTENTIAL LAB:', course['department_code'] + ' ' + course['number'])\n # else:\n # print('BAMBOOZLE:', course['department_code'] + ' ' + course['number'], 'NON:', non)\n\n return courses", "title": "" }, { "docid": "f2b2405b211045f5c0734f4b9e40dced", "score": "0.559974", "text": "def context_processor():\n sections = []\n for doc in DOCS:\n content = get_doc(doc)\n lines = content.split(\"\\n\")\n doc = {\n 'id': doc,\n 'title': lines[0].replace(\"#\", \"\").strip(),\n 'description': \"\\n\".join(lines[1:3]),\n 'body': content,\n }\n sections.append(doc)\n return {'sections': sections}", "title": "" }, { "docid": "652b78f9baa59767606df70e08f274e5", "score": "0.5530076", "text": "def get_db_description_text(self):\n \n # sets description text in different languages\n # default value is English\n # sets header of devotion\n # sets description of type of lesson\n #name_text = bible_codes.db_info_description[\"en\"]\n try:\n name_text = bible_codes.db_info_description_title\n except Exception:\n print(\"unable to get internat.db_info_description for \" + self.lang_code)\n if (self.lesson_type == 'ad' or self.lesson_type == ''):\n version_text = bible_codes.db_info_description_version_adult\n else:\n if (self.lesson_type == 'ay'):\n version_text = version_text = bible_codes.db_info_description_version_youth\n else:\n version_text = \"\"\n description_text = \"{0} {1} {2}\".format(name_text, version_text, self.SS_year_inst.quarters_list_year[-1].get('id'))\n return description_text", "title": "" }, { "docid": "ea7e9bbdfb864f925b5c8713c5b6e244", "score": "0.55218595", "text": "def preprocDescrip():\n # Load lists of descriptions\n concentrations = pickle.load(open(\"nlp/concentrations.p\", \"rb\"))\n descr = pickle.load(open(\"nlp/concentrationDescr.p\", \"rb\"))\n\n orgInfo = pickle.load(open(\"nlp/orgInfo.p\", \"rb\"))\n orgNames = pickle.load(open(\"nlp/orgNames.p\", \"rb\"))\n\n # Add org names to descriptions; remove the word 'Harvard'\n for i, org in enumerate(orgInfo):\n org = org.replace(\"harvard\", \"\")\n orgInfo[i] = str(orgNames[i]).lower() + \" \" + str(org).lower()\n\n # Calculate pairwise similarities\n orgInfoLists = tc.getSimilarityLists(orgInfo)\n concentrationLists = tc.getSimilarityLists(descr)\n\n # Store results for future retrieval\n pickle.dump(orgInfoLists, open(\"nlp/orgInfoLists.p\", \"wb\"))\n pickle.dump(concentrationLists, open(\"nlp/concentrationLists.p\", \"wb\"))", "title": "" }, { "docid": "f09c5d3f360908b3e5e6869d150456c2", "score": "0.5521627", "text": "def course_attrs(self):\n\n TITLE_CLASS = \"PALEVEL0SECONDARY\"\n INFO_TABLE_CLASS = \"SSSGROUPBOXLTBLUEWBO\"\n INFO_BOX_CLASS = \"PSGROUPBOXNBO\"\n INFO_BOX_HEADER_CLASS = \"SSSGROUPBOXLTBLUE\"\n DESCRIPTION_CLASS = \"PSLONGEDITBOX\"\n\n EDITBOX_LABEL_CLASS = \"PSEDITBOXLABEL\"\n EDITBOX_DATA_CLASS = \"PSEDITBOX_DISPONLY\"\n DROPDOWN_LABEL_CLASS = \"PSDROPDOWNLABEL\"\n DROPDOWN_DATA_CLASS = \"PSDROPDOWNLIST_DISPONLY\"\n\n DESCRIPTION = \"Description\"\n COURSE_DETAIL = \"Course Detail\"\n COURSE_COMPS = \"Course Components\"\n ENROLL_INFO = \"Enrollment Information\"\n CEAB = \"CEAB Units\"\n\n KEYMAP = {\n \"Career\": \"career\",\n \"Typically Offered\": \"typically_offered\",\n \"Units\": \"units\",\n \"Grading Basis\": \"grading_basis\",\n \"Add Consent\": \"add_consent\",\n \"Drop Consent\": \"drop_consent\",\n \"Course Components\": \"course_components\",\n \"Enrollment Requirement\": \"enrollment_requirement\",\n }\n\n ret = {\n 'extra':{\n 'CEAB':{}\n }\n }\n\n # Get the title and number\n title = self.soup.find(\"span\", {\"class\": TITLE_CLASS})\n if not title:\n raise Exception(\"Could not find the course title to parse\")\n\n temp = self._clean_html(title.string)\n m = re.search('^([\\S]+)\\s+([\\S]+)\\s+-\\s+(.*)$', temp)\n if not m:\n raise Exception(\"Title found ({0}) didn't match regular expression\".format(temp))\n\n ret['basic'] = {\n 'title' : m.group(3),\n 'number' : m.group(2),\n 'description' : \"\"\n }\n\n # Blue table with info, enrollment, and description\n info_table = self.soup.find(\"table\", {\"class\": INFO_TABLE_CLASS})\n\n # Look through inner tables\n info_boxes = self.soup.find_all(\"table\", {\"class\": INFO_BOX_CLASS})\n for table in info_boxes:\n\n # Get the table type\n temp = table.find(\"td\", {\"class\": INFO_BOX_HEADER_CLASS})\n if not temp or not temp.string:\n # Nothing there\n continue\n\n box_title = temp.string\n\n # Process the description box\n if box_title == DESCRIPTION:\n desc_list = table.find(\"span\", {\"class\": DESCRIPTION_CLASS}).contents\n if desc_list:\n # If not x.string, it means it's a <br/> Tag\n ret['basic']['description'] = \"\\n\".join([x for x in desc_list if x.string])\n\n # Process the course details and enrollment info\n elif box_title in (COURSE_DETAIL, ENROLL_INFO):\n\n # Labels and values for \"Add/Drop Consent\" (enroll), \"Career\" (course), and \"Grading Basis\" (course)\n labels = table.find_all(\"label\", {\"class\": DROPDOWN_LABEL_CLASS})\n data = table.find_all(\"span\", {\"class\": DROPDOWN_DATA_CLASS})\n\n if box_title == ENROLL_INFO:\n # Labels and values for \"Typically Offered\", \"Enrollment Requirement\",\n labels += table.find_all(\"label\", {\"class\": EDITBOX_LABEL_CLASS})\n data += table.find_all(\"span\", {\"class\": EDITBOX_DATA_CLASS})\n\n # Add all the type -> value mappings to the ret dict\n for x in range(0, len(labels)):\n if labels[x].string in KEYMAP:\n ret['extra'][KEYMAP[labels[x].string]] = data[x].get_text()\n\n # Special case for course detail, \"Units\" and \"Course Components\"\n if box_title == COURSE_DETAIL:\n # Units and course components\n labels = table.find_all(\"label\", {\"class\": EDITBOX_LABEL_CLASS})\n data = table.find_all(\"span\", {\"class\": EDITBOX_DATA_CLASS})\n for x in range(0, len(labels)):\n if labels[x].string == COURSE_COMPS:\n # Last datafield, has multiple type -> value mappings\n comp_map = {}\n for i in range(x, len(data), 2):\n comp_map[data[i].string] = data[i+1].get_text()\n\n ret['extra'][KEYMAP[labels[x].string]] = comp_map\n break\n elif labels[x].string in KEYMAP:\n ret['extra'][KEYMAP[labels[x].string]] = data[x].get_text()\n\n # Process the CEAB information\n elif box_title == CEAB:\n\n labels = table.find_all(\"label\", {\"class\": EDITBOX_LABEL_CLASS})\n data = table.find_all(\"span\", {\"class\": EDITBOX_DATA_CLASS})\n\n for x in range(0, len(labels)):\n try:\n # Clean up the data\n temp = int(self._clean_html(data[x].string))\n except (TypeError, ValueError) as e:\n temp = 0\n\n # Add the data to the dict if it exists\n if labels[x].string:\n # Remove the last character of the label to remove the \":\"\n ret['extra']['CEAB'][labels[x].string[:-1]] = temp\n\n else:\n raise Exception('Encountered unexpected info_box with title: \"{0}\"'.format(box_title))\n\n return ret", "title": "" }, { "docid": "7d5993b1c9d30fe4c4e6bdaecaea155d", "score": "0.55084336", "text": "def getCourses(self,course,link,abbrev):\n \t#navigates to course page\n \tcourse_map = {}\n\n \tpage = requests.get(BASE_URL + link,verify=False)\n \tsoup = BeautifulSoup(page.content)\n\n \t#finds all courses in major and puts them in a hashmap\n \tcoursePageLink = soup.find_all(\"li\")\n \tif coursePageLink:\n \t\tlink = str(coursePageLink[len(coursePageLink)-1])[54:-90]\n\n \tpage = requests.get(BASE_URL + link,verify=False)\n \tsoup = BeautifulSoup(page.content)\n \tregex = re.compile(r'<p><strong>(\\w+)')\n \tuniqueTup = []\n \tnormalTup = []\n \tcourseList = []\n\n \t#\n \tfor link in soup.find_all(\"p\"):\n\n \t\t#special cases\n \t\tif '<br/><br/>' in str(link):\n \t\t\t#tuple {major:[course_number,name]}\n \t\t\ttup = self.uniqueCases(link)\n \t\t\tif tup:\n \t\t\t\tuniqueTup = tup\n \t\t#standard case\n \t\telif re.match(regex,str(link)):\n \t\t\t#tuple {major:[course_number,name]}\n \t\t\ttup = normalCase = self.normalCase(link)\n \t\t\tif tup:\n \t\t\t\tnormalTup = tup\n \t\t#puts same major tuples under 1 major\n \t\tcourseTuple = uniqueTup + normalTup\n \t\tif courseTuple:\n \t\t\tcourseTuple[0] = courseTuple[0] + (\"Department: \" + abbrev,)\n \t\t\tcourseList.append(courseTuple)\n \treturn courseList", "title": "" }, { "docid": "d22b234b0517aee1ffb2781dd5324987", "score": "0.5499037", "text": "def create_course_dict():\n wb = load_workbook('newcatalog.xlsx')\n catalog = wb.get_sheet_by_name('catalog')\n Course = namedtuple('Course', 'program, designation')\n CourseInfo = namedtuple('CourseInfo', 'credits, terms, prereqs')\n course_dict = {}\n for row in range(1, catalog.max_row + 1):\n key = Course(get_val(catalog, 'A', row), get_val(catalog, 'B', row))\n prereqs = tuple(tuple(get_split_course(prereq) for prereq in prereqs.split())\n for prereqs in none_split(get_val(catalog, 'E', row)))\n val = CourseInfo(get_val(catalog, 'C', row), tuple(get_val(catalog, 'D', row).split()), prereqs)\n course_dict[key] = val\n return course_dict", "title": "" }, { "docid": "e32262f7ee3a30dd920a64891366d2b9", "score": "0.5486802", "text": "def load_descriptions(doc):\n mapping = dict()\n # process lines\n for line in doc.split('\\n'):\n # split line by white space\n tokens = line.split()\n if len(line) < 2:\n continue\n # take the first token as the image id, the rest as the description\n image_id, image_desc = tokens[0], tokens[1:]\n # remove filename from image id\n image_id = image_id.split('.')[0]\n # convert description tokens back to string\n image_desc = ' '.join(image_desc)\n # create the list if needed\n if image_id not in mapping:\n mapping[image_id] = list()\n # store description\n mapping[image_id].append(image_desc)\n return mapping # parse descriptions", "title": "" }, { "docid": "9c0ac73637e8f0ed167be1abddecaac8", "score": "0.54675364", "text": "def __process_descriptions(self):\n while True:\n try:\n mp = self.catalog_description_queue.get()\n\n with self.mt_lock:\n self.catalog_description_progress.next()\n\n catalog_entry = mp.best_match\n catalog_desc = catalog_entry.description\n\n try:\n source_desc_obj = mp.program.descriptions.get(\n description_type=self.description_type_source_desc\n )\n source_desc = source_desc_obj.description if source_desc_obj.description else ''\n except ProgramDescription.DoesNotExist:\n source_desc = ''\n\n if self.force_desc_updates or not source_desc or source_desc != catalog_desc:\n # Sanitize/process the incoming catalog description if\n # we don't already have an existing original catalog\n # description to compare against, or if we do and it\n # changed since the last time it was imported\n # (or if we're forcing description updates)\n if catalog_entry.program_description_clean is None:\n catalog_entry.program_description_clean = self.__sanitize_description(\n description_str=catalog_desc,\n strip_tables=True\n )\n\n # Pass along to the curriculum queue next\n with self.mt_lock:\n self.catalog_curriculum_queue.put(mp)\n\n except Exception as e:\n logging.log(logging.ERROR, e)\n finally:\n self.catalog_description_queue.task_done()", "title": "" }, { "docid": "cc30d81c48dcd7ffa4fd5eaf07a82637", "score": "0.5457821", "text": "def collect_doc(ori_doc):\n print('collecting original documents from DB list.')\n print(len(ori_doc), ' total documents are collected.')\n\n doc_name2txt = dict()\n for (doc_id, doc_body) in ori_doc:\n # repeated documentation is not allowed\n assert doc_id not in doc_name2txt\n doc_name2txt[doc_id] = doc_body\n return doc_name2txt", "title": "" }, { "docid": "139d4b89e1c6197ed2ef9a1048e35c4c", "score": "0.5442083", "text": "def crawl_course(metadata: CourseInfo) -> CourseInfo:\n # Need to re-decode with utf-8, original is encoded with ISO-8859-1\n text: str = requests.get(\n \"http://class-qry.acad.ncku.edu.tw/syllabus/online_display.php\",\n params={\n \"syear\": metadata.year.zfill(4),\n \"sem\": metadata.semester,\n \"co_no\": metadata.course_id,\n \"class_code\": metadata.class_code if metadata.class_code != \"0\" else None\n }\n ).text.encode(\"ISO-8859-1\").decode(\"utf-8\")\n html: etree._Element = etree.HTML(text)\n\n title: etree._Element = html.xpath('//*[@id=\"header\"]/h1/div/span')[1]\n filtered = list(filter(lambda child: child.tag == \"br\", title.getchildren()))\n course_names = list(map(lambda child: child.tail.strip(), filtered))\n\n sidebar: etree._Element = html.xpath('//*[@id=\"sidebar\"]/div')[0]\n filtered = list(filter(lambda child: child.tag == \"span\", sidebar.getchildren()))\n information = list(map(lambda child: child.tail.strip(), filtered))\n\n return CourseInfo(**{\n \"course_name\": f\"{course_names[0]} {course_names[1]}\",\n \"department\": information[0],\n \"instructor\": information[1],\n \"year\": metadata.year,\n \"semester\": metadata.semester,\n \"serial_number\": information[4],\n \"attribute_code\": information[5],\n \"course_id\": metadata.course_id,\n \"class_code\": metadata.class_code,\n \"credit\": information[8],\n \"language\": information[9],\n \"files\": metadata.files,\n })", "title": "" }, { "docid": "1eee7163c20f08148d4792abc8d8382b", "score": "0.5434418", "text": "def __process_course_tag(self, course_tag):\n course = Course()\n\n course.link = course_tag.find('a')['href']\n\n # Detect course ID\n expr = re.search('index_mobile\\.php\\?id=(\\d+)&code_departement=.*&mydate=([0-9/]+)', course.link)\n course.id = expr.group(1)\n course.date = datetime.datetime.strptime(expr.group(2), '%d/%m/%Y').date()\n\n for span_tag in course_tag.find_all('span'):\n\n assert(len(span_tag['class']) == 1)\n type = span_tag['class'][0]\n\n if type == 'image':\n # First field : time of beginning / end of the courses\n # <br><b>08:30<br>11:45</br>\n expr = re.search('<br/?><b>([0-9]{2}:[0-9]{2})<br>([0-9]{2}:[0-9]{2})</b', str(span_tag))\n if not expr:\n print(\"Unable to find hours in %s !\" % span_tag)\n continue\n\n time_format = '%H:%M'\n course.time_begin = datetime.datetime.strptime(expr.group(1), time_format).time()\n course.time_end = datetime.datetime.strptime(expr.group(2), time_format).time()\n elif type == 'comment':\n # Second field : department and place\n expr = re.search('(.*) : (.*)', span_tag.text)\n course.department = expr.group(1)\n course.place = expr.group(2)\n elif type == 'name':\n # Third field : name\n course.name = span_tag.text\n elif type == 'starcomment':\n # Fourth field : comment\n course.comment = span_tag.text\n elif type == 'arrow':\n # Nothing to do\n pass\n else:\n print(\"Unrecognized type : %s !\" % type)\n\n return course", "title": "" }, { "docid": "2e8122fee62f4208b54a325a57548bc1", "score": "0.53735864", "text": "def insertDocument(self, details):\n details[\"Accreditation\"]=\"Not Evaluated\" \n details[\"Type\"]=\"Unknown\" \n\n if details[\"Category\"].lower().startswith(\"conference\"):\n details[\"ScanPath\"]=\"conferences/\"+details[\"ConferenceTitle\"].replace(' ', '_')+\"/publications/\"\n details[\"ScanFileName\"]=details[\"Title\"].replace(' ', '_')+\".pdf\"\n details[\"TableOfContentsPath\"]=\"conferences/\"+details[\"ConferenceTitle\"].replace(' ', '_')+\"/TOCs/\"\n details[\"PeerReviewPath\"]=\"conferences/\"+details[\"ConferenceTitle\"].replace(' ', '_')+\"/peerReviews/\"\n result=insertConferencePaper(self,details)\n \n elif details[\"Category\"].lower().startswith(\"journal\"):\n if \"Volume\" not in details:\n details[\"Volume\"]=None\n if \"Issue\" not in details:\n details[\"Issue\"]=None\n details[\"HIndex\"]=None\n details[\"ScanPath\"]=\"journals/\"+details[\"JournalTitle\"].replace(' ', '_')+\"/publications/\"\n details[\"ScanFileName\"]=details[\"Title\"].replace(' ', '_')+\".pdf\"\n details[\"TableOfContentsPath\"]=\"journals/\"+details[\"JournalTitle\"].replace(' ', '_')+\"/TOCs/\"\n details[\"PeerReviewPath\"]=\"journals/\"+details[\"JournalTitle\"].replace(' ', '_')+\"/peerReviews/\"\n result=insertJournalPaper(self,details)\n \n elif details[\"Category\"].lower().startswith(\"book\"):\n \n details[\"ScanPath\"]=\"books/\"+details[\"BookTitle\"].replace(' ', '_')+\"/publications/\"\n details[\"ScanFileName\"]=details[\"Title\"].replace(' ', '_')+\".pdf\"\n details[\"TableOfContentsPath\"]=\"books/\"+details[\"BookTitle\"].replace(' ', '_')+\"/TOCs/\"\n details[\"PeerReviewPath\"]=\"books/\"+details[\"BookTitle\"].replace(' ', '_')+\"/peerReviews/\"\n result=insertBookSection(self,details)\n\n return result", "title": "" }, { "docid": "a34bbcb12c76ef2ac796e093dd76091d", "score": "0.53501743", "text": "def find_description(docs):\n summary = None\n filename = docs.get('meta.summary', 'SUMMARY').strip()\n if filename and _os.path.isfile(filename):\n fp = open(filename)\n try:\n try:\n summary = fp.read().strip().splitlines()[0].rstrip()\n except IndexError:\n summary = ''\n finally:\n fp.close()\n\n description = None\n filename = docs.get('meta.description', 'DESCRIPTION').strip()\n if filename and _os.path.isfile(filename):\n fp = open(filename)\n try:\n description = fp.read().rstrip()\n finally:\n fp.close()\n\n if summary is None and description:\n from docutils import core\n summary = core.publish_parts(\n source=description,\n source_path=filename,\n writer_name='html',\n )['title'].encode('utf-8')\n\n return summary, description", "title": "" }, { "docid": "5628300a054c43e6ea3d5b036a1997f0", "score": "0.5328472", "text": "def fetch_description(self, given_description, given_words, left_margin, right_margin, num_of_results):\n # https://simply-python.com/2014/03/14/saving-output-of-nltk-text-concordance/\n # Instead of using nltk.word_tokenize which\n # hangs for a long time sometimes like when using the text from\n # \"url\":\"https://www.reddit.com/user/AutoModerator\"\n # The hanging comes from nltk/tokenize/__init__\n # line 94 : return tokenizer.tokenize(text)\n # I am avoiding this prt and I am only using the TreebankWordTokenizer\n tokens = self.treebank.tokenize(given_description)\n\n text = nltk.Text(tokens)\n c = nltk.ConcordanceIndex(tokens, key=lambda s: s.lower())\n concordance_txt = ([[text.tokens[list(map(lambda x: x-left_margin if (x-left_margin) > 0 else left_margin-x,\n [offset]))[0]:offset+right_margin+1]\n for offset in c.offsets(given_word)][:num_of_results]\n for given_word in given_words.split()])\n concordance_txt = itertools.chain(*concordance_txt)\n return '\\n'.join([''.join([x+' ' for x in con_sub]) for con_sub in concordance_txt])", "title": "" }, { "docid": "30e4f9f1d8f34b0353c7fd7888b22f40", "score": "0.52897835", "text": "def test_export_description():\n desc = cds_list[1].export_description()\n assert desc == \"# origin_seq: scaffold_0 # strand: reverse # start: 1581 # end: 2864\"", "title": "" }, { "docid": "84bdd069a9b1937cff12261a4d3b943a", "score": "0.5288692", "text": "def get_courses():\n return DB[COURSE_COLLECTION].find()", "title": "" }, { "docid": "714bbc21287abecfeab9881af37d7a0c", "score": "0.52808845", "text": "def _commentsToDescription(self, parseResults):\n \n commentString = ''\n if self._KEY_COMMENT in parseResults.keys():\n commentString = parseResults[self._KEY_COMMENT]+\". \"\n # Remove the _KEY_COMMENTS from list of keys.\n parseResults.pop(self._KEY_COMMENT)\n \n if self._VALUE_COMMENT in parseResults.keys():\n commentString = commentString + parseResults[self._VALUE_COMMENT]\n parseResults.pop(self._VALUE_COMMENT)\n\n if len(commentString) == 0:\n return \n \n # Clean up the comment string, remove the // and extra spaces.\n commentString = re.sub(r'//', '', commentString)\n commentString = re.sub('\\s+', ' ',commentString)\n parseResults[self._DESCRIPTION] = commentString", "title": "" }, { "docid": "379c97cf4f0c0597fb8cbe6965c27b5c", "score": "0.5277338", "text": "def extract(self):\n # undergraduate and comp3900 are parameters\n url = \"https://www.handbook.unsw.edu.au/{}/courses/2019/{}/\".format(self.study_level, self.course)\n url = requests.get(url)\n htmltext = url.text\n\n #read the html\n soup = BeautifulSoup(htmltext, 'html.parser')\n\n if soup.title.string:\n self.details[\"Title\"] = soup.title.string\n else:\n self.details[\"Title\"] = \"\"\n if soup.find(id=\"readMoreIntro\").div.p:\n self.details[\"Description\"] = soup.find(id=\"readMoreIntro\").div.p.string\n else:\n self.details[\"Description\"] = \"\"\n if soup.find_all('strong')[1]:\n self.details[\"Credit\"] = soup.find_all('strong')[1].string\n else:\n self.details[\"Credit\"] = \"\"\n if soup.find(id=\"readMoreSubjectConditions\"):\n self.details[\"Prerequisite\"] = soup.find(id=\"readMoreSubjectConditions\").div.div.string\n else:\n self.details[\"Prerequisite\"] = \"\"\n if soup.find(id=\"subject-outline\"):\n self.details[\"Course Outline\"] = soup.find(id=\"subject-outline\").div.a.attrs['href']\n else:\n self.details[\"Course Outline\"] = \"\"\n\n if soup.select('.o-attributes-table-item ')[0].a:\n self.details[\"Faculty\"] = soup.select('.o-attributes-table-item ')[0].a.string\n else:\n self.details[\"Faculty\"] = \"\"\n\n if soup.select('.o-attributes-table-item ')[1].a:\n self.details[\"School\"] = soup.select('.o-attributes-table-item ')[1].a.string\n else:\n self.details[\"School\"] = \"\"\n\n if soup.select('.o-attributes-table-item ')[3].p:\n self.details[\"Offering Terms\"] = soup.select('.o-attributes-table-item ')[3].p.string\n else:\n self.details[\"Offering Terms\"] = \"\"\n if soup.select('.o-attributes-table-item ')[4].p:\n self.details[\"Campus\"] = soup.select('.o-attributes-table-item ')[4].p.string.replace(\" \", \"\").strip()\n else:\n self.details[\"Campus\"] = \"\"\n\n if soup.select('.p-all-1')[0]:\n for value in soup.select('.p-all-1')[0].children:\n if (soup.select('.p-all-1')[0].index(value) == 3):\n self.details[\"PDF\"] = value.a.attrs['href']\n else:\n self.details[\"PDF\"] = \"\"\n\n if soup.select('.o-attributes-table-item ')[5].p:\n self.details[\"Indicative contact hours\"] = soup.select('.o-attributes-table-item ')[5].p.string\n else:\n self.details[\"Indicative contact hours\"] = \"\"\n if soup.select('.a-column-sm-12')[8].p:\n self.details[\"Commonwealth Supported Student\"] = soup.select('.a-column-sm-12')[8].p.string.strip()\n else:\n self.details[\"Commonwealth Supported Student\"] = \"\"\n if soup.select('.a-column-sm-12')[10].p:\n self.details[\"Domestic Student\"] = soup.select('.a-column-sm-12')[10].p.string.strip()\n else:\n self.details[\"Domestic Student\"] = \"\"\n if soup.select('.a-column-sm-12')[12].p:\n self.details[\"International Student\"] = soup.select('.a-column-sm-12')[12].p.string.strip()\n else:\n self.details[\"International Student\"] = \"\"", "title": "" }, { "docid": "f3c2d89a31af1fdce89977df9b5cc353", "score": "0.5242611", "text": "def load_clean_descriptions(filename, dataset):\n\n doc = load_doc(filename)\n descriptions = {}\n for line in doc.split('\\n'):\n # Split line by white space.\n tokens = line.split()\n # Split id from description.\n image_id, image_desc = tokens[0], tokens[1:]\n # skip images not in the set.\n if image_id in dataset:\n # create list.\n if image_id not in descriptions:\n descriptions[image_id] = []\n # Wrap description in tokens.\n desc = 'startseq ' + ' '.join(image_desc) + ' endseq'\n descriptions[image_id].append(desc)\n return descriptions", "title": "" }, { "docid": "24e7c4d31071574250e8b9b9e809c3a1", "score": "0.5194507", "text": "def _get_description(self):\n if self._generated:\n return self._title\n else:\n text = read_file(self.path.src).splitlines()\n if len(text) >= 4:\n desc_line = text[3]\n # Description begins with an AsciiDoc comment\n if desc_line[0:2] == '//':\n return desc_line[2:].strip()", "title": "" }, { "docid": "7cc3979664f410b5639ae836e6d5a5f5", "score": "0.51822454", "text": "def set_description_and_comment(abstract_bibdocfiles, description, comment):\n for bibdocfile in abstract_bibdocfiles:\n bibdocfile.description = description\n bibdocfile.comment = comment", "title": "" }, { "docid": "e53f1e2ca92345ae3c0bf7364ff9d87d", "score": "0.5170063", "text": "def get_interest_descriptions(db):\n failures = []\n\n for rec in db.records:\n if rec.is_interest() and not rec.has_field(\"DESC\"):\n failures.append(\"Missing description on {}\".format(rec))\n return failures", "title": "" }, { "docid": "0d91de2921245258b44a7a814d787511", "score": "0.5154081", "text": "def get_courses(self):\n sitemap = 'http://www.golfadvisor.com/sitemap_courses-#.xml'\n pages = array(\n [sitemap.replace('#', str(index)) for index in range(1, 35)]\n )\n page_lists = array_split(pages, POOL_SIZE)\n with ProcessPoolExecutor() as extr:\n results = extr.map(self._get_course_pages, enumerate(page_lists))\n courses = array([link for links in results for link in links])\n self.courses = courses", "title": "" }, { "docid": "4fe0f16a5d1795f42c79fa39249381cb", "score": "0.51433355", "text": "def _save_courses(self, category, page, courses):\n now = self._get_datetime_now()\n if not os.path.exists('files_{}'.format(now)):\n os.makedirs('files_{}'.format(now))\n out_file_name = 'files_{}/courses_{}_{}.csv'.format(\n now,\n category,\n page\n )\n csv_headers = list(filter(\n lambda property: not property.startswith('_'),\n dir(courses[0])))\n with open(out_file_name, mode='w+', encoding='utf-8', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(csv_headers)\n for course in courses:\n row = [str(getattr(course, prop)) for prop in csv_headers]\n writer.writerow(row)", "title": "" }, { "docid": "1489dc2b1301e4d7a62e4f4c927fa988", "score": "0.5138065", "text": "def load_courses(self):\n\n print('>> Loading Courses...')\n\n Course.objects.all().delete()\n\n # Load Course AIM Labels\n aim_label = {}\n\n with open(os.getcwd() + '/coursefinderapp/data/KISAIM.csv') as f:\n next(f) # Skip the header\n reader = csv.reader(f, skipinitialspace=True)\n aim_label = dict(reader)\n\n course_locs = self.load_course_locations()\n\n def handler(line):\n \"\"\"\n Translate each line of the csv in a Course object and associate with the\n its respective location.\n :param line: csv line\n :return: Course object\n \"\"\"\n\n c = Course(\n pubukprn=line[0],\n ukprn=line[1],\n kiscourseid=line[14],\n title=line[27].replace('\"', \"\"),\n url=line[4],\n distance=line[6],\n mode=line[15],\n aim=aim_label[line[32]],\n )\n\n loc_id = course_locs.get((c.pubukprn, c.ukprn, c.kiscourseid, c.mode), None)\n\n if loc_id is not None:\n c.location = Location.objects.get(ukprn=c.ukprn, locid=loc_id)\n\n return c\n\n courses = self._read_csv('/coursefinderapp/data/KISCOURSE.csv', handler=handler)\n Course.objects.bulk_create(courses)", "title": "" }, { "docid": "bfb93adaa5a30d6a179d17b19bd7976a", "score": "0.5123131", "text": "def parse_course_overview(self, response):\n hxs = HtmlXPathSelector(response)\n xpath = '//table[contains(@class, \"courseBrochure\")]'\n loader = ItemLoader(CourseOverviewItem(), selector=hxs.select(xpath))\n loader.item['course'] = response.request.meta['course']\n loader.add_xpath('extent', 'tr[1]/td[2]', re=r'(\\d+(?:-\\d+)?)')\n loader.add_xpath('teaching_period', 'tr[2]/td[2]')\n loader.add_xpath('learning_outcomes', 'tr[3]/td[2]')\n loader.add_xpath('content', 'tr[4]/td[2]')\n loader.add_xpath('prerequisites', 'tr[5]/td[2]')\n loader.add_xpath('study_materials', 'tr[11]/td[2]')\n return loader.load_item()", "title": "" }, { "docid": "a5775e30eea41cc62bad2d5c0bcff2bd", "score": "0.50984627", "text": "def description(doc=__doc__):\n print(doc)\n for line in doc.splitlines():\n return line.strip()", "title": "" }, { "docid": "82dd849ccacca9e82eac364ede1530be", "score": "0.5095845", "text": "def fill_out_courses(results, new_keywords = None, student_major = None, student_interests = None):\n\n global conn\n\n cur = conn.cursor()\n courses = []\n\n for result in results:\n # Build Course object from current result\n result_course = Course.Course()\n result_course.department = result[17]\n result_course.course_num = result[2]\n result_course.id = result[13]\n result_course.name = result[16]\n result_course.term = result[19]\n result_course.credits = result[11]\n\n # Parse classroom/meeting times/description/prerequisites if defined\n if result[24] != None:\n res = parse_time_room(result[24])\n if res != None:\n result_course.classroom = res[0]\n result_course.time = res[1]\n if result[29] != None:\n result_course.description = result[29]\n if result[30] != None:\n result_course.prereqs = result[30]\n call_debug_print(result_course.prereqs)\n\n # Adding professor information based on id found in courses\n if result[21] != None:\n result_course.faculty_id = result[21]\n if '|' in result_course.faculty_id:\n prof_ids = result_course.faculty_id.split('|')\n query_str = \"SELECT * FROM professors WHERE id = \\'\"\n for id_num in prof_ids:\n query_str = query_str + str(int(id_num)) + \"' OR id = '\"\n query_str = query_str[:-10]\n cur.execute(query_str)\n names = cur.fetchall()\n result_course.faculty_id = \"\"\n for result in names:\n result_course.faculty_id = result_course.faculty_id + result[0] + \", \"\n result_course.faculty_name = result_course.faculty_name + result[1] + \", \"\n if result_course.faculty_id != \"\" and result_course.faculty_id != \"\":\n result_course.faculty_name = result_course.faculty_name[:-2]\n result_course.faculty_id = result_course.faculty_id[:-2]\n else:\n query_str = \"SELECT name FROM professors WHERE id = \\'\" \\\n + str(int(result_course.faculty_id)) + \"'\"\n cur.execute(query_str)\n name = cur.fetchall()\n if len(name) > 0 and len(name[0]) > 0:\n result_course.faculty_name = name[0][0]\n\n # Only runs in the case where this is called by query_by_keywords\n # Uses the new_keywords list in the query_by_keywords function\n if new_keywords is not None:\n relevance_list, weighted_relevance = calculate_course_relevance(result_course, new_keywords,\n student_major, student_interests)\n result_course.relevance = relevance_list\n result_course.weighted_score = weighted_relevance\n courses.append(result_course)\n\n return courses", "title": "" }, { "docid": "691a638d42184e73fec23877a27a7f5b", "score": "0.5080336", "text": "def get_descriptions(programs, program_cache=None, nocattrans=0, quiet=0, slowdays=0):\n\n # This regexp tries to find details such as Genre, Acteurs, Jaar van Premiere etc.\n detail = re.compile('<li>[^<]*<strong>([\\w\\- ]+):</strong>(.*?)</li>', re.DOTALL)\n\n # These regexps find the main description area and lines of descriptive text in this area\n description = re.compile('<div id=\"prog-content\">(.*?)</div>',re.DOTALL)\n descrline = re.compile('<p>(.*?)</p>',re.DOTALL)\n\n # These regexps try to find the subgenre of the program, e.g. Fantasy-familiefilm, Comedyserie, \n # Woonprogramma, Culinair Programma etc.\n # descrtype searches for the subgenre in the description area, e.g.:\n # <strong>Woonprogramma</strong><p>Nance, Tooske, Ellemieke, Marlayne en Viktor helpen mensen...\n #\n # addprogtype searches for the subgenre in the special \"mijn TV agenda\" link, e.g.:\n # <a href=\"#perstvgids\" title=\"Plaats dit programma in mijn TV Agenda\" \n # onclick=\"addProg('10281755','Informatief','Woonprogramma.',event);return false;\">\n\n descrtype = re.compile('<strong>([^<]*)</strong>',re.DOTALL)\n addprogtype = re.compile(\"addProg\\(.*?,.*?,'(.*?)',.*?\\)\",re.DOTALL)\n\n # randomize detail requests\n nprograms = len(programs)\n fetch_order = list(range(0,nprograms))\n random.shuffle(fetch_order)\n\n counter = 0\n for i in fetch_order:\n counter += 1\n if programs[i]['offset'] >= slowdays:\n continue\n \n log('\\n(%3.0f%%) %s: %s ' % (100*float(counter)/float(nprograms), i, programs[i]['name']), quiet)\n\n # check the cache for this program's ID\n cached_program = program_cache.query(programs[i]['ID'])\n \n if (cached_program != None):\n log(' [cached]', quiet)\n # copy the cached information, except the start/end times, rating and clumping, \n # these may have changed.\n tstart = programs[i]['start-time']\n tstop = programs[i]['stop-time']\n try:\n clump = programs[i]['clumpidx']\n except LookupError:\n clump = False\n programs[i] = cached_program\n programs[i]['start-time'] = tstart\n programs[i]['stop-time'] = tstop\n if clump:\n programs[i]['clumpidx'] = clump\n continue\n\n # be nice to tvgids.nl\n time.sleep(random.randint(nice_time[0], nice_time[1]))\n\n # get the details page, and get all the detail nodes\n descriptions = ()\n details = ()\n try:\n log(' [normal fetch]', quiet)\n total = get_page(programs[i]['url'])\n details = detail.finditer(total)\n \n descrspan = description.search(total)\n if descrspan != None:\n descriptions = descrline.finditer(descrspan.group(1))\n else:\n log('Can not find program details on page\\n', quiet)\n descriptions = []\n \n except Exception as e:\n # if we cannot find the description page, \n # go to next in the loop\n log(' [fetch failed or timed out]', quiet)\n continue\n # define containers\n programs[i]['credits'] = {}\n programs[i]['video'] = {}\n\n # now parse the details\n programs[i]['details'] = []\n # First, we try to find the program type in the special \"mijn TV Agenda\" link, if not found there we\n # search for a type in the description section.\n # Note that this type is not the same as the generic genres (these are searched later on), \n # but a more descriptive one like \"Culinair programma\" \n # \n def add_details(program, details):\n details = filter_line(details)\n if len(details) == 0:\n return\n if len(program['details']) > 0 and program['details'][-1].lower() == details.lower():\n return\n program['details'].append(details)\n \n if 'subgenre' in programs[i]:\n add_details(programs[i], programs[i]['subgenre'])\n \n m = addprogtype.search(total)\n if m:\n add_details(programs[i], m.group(1).capitalize())\n \n m = descrtype.search(descrspan.group(1))\n if m:\n add_details(programs[i], m.group(1).capitalize())\n\n # Secondly, we add one or more lines of the program description that are present.\n for descript in descriptions:\n # descript is a re.Match object\n descr_html = descript.group(1)\n \n # Remove sponsored link from description if present.\n sponsor_pos = descr_html.rfind('<i>Gesponsorde link:</i>')\n if sponsor_pos > 0:\n descr_html = descr_html[0:sponsor_pos]\n if re.search('[Gg]een detailgegevens be(?:kend|schikbaar)', descr_html):\n descr_html = ''\n \n add_details(programs[i], descr_html)\n \n if len(programs[i]['details']) == 0:\n programs[i]['detail1'] = ''\n else:\n programs[i]['detail1'] = programs[i]['details'][0]\n \n # Finally, we check out all program details. These are generically denoted as:\n #\n # <li><strong>(TYPE):</strong>(CONTENT)</li> \n #\n # Some examples:\n #\n # <li><strong>Datum:</strong>16 oktober 2008</li>\n # <li><strong>Genre:</strong>Amusement</li>\n \n for d in details:\n ctype = d.group(1).strip().lower()\n content_asis = filter_line(d.group(2))\n content = filter_line(content_asis)\n \n if content == '':\n continue\n\n elif ctype == 'genre':\n\n # Fix detection of movies based on description as tvgids.nl sometimes \n # categorises a movie as e.g. \"Komedie\", \"Misdaadkomedie\", \"Detectivefilm\". \n genre = filter_line(content.title()) # Titlecase\n if nocattrans:\n programs[i]['genre'] = genre\n elif (programs[i]['detail1'].lower().find('film') != -1 \\\n or programs[i]['detail1'].lower().find('komedie') != -1)\\\n and programs[i]['detail1'].lower().find('tekenfilm') == -1 \\\n and programs[i]['detail1'].lower().find('animatiekomedie') == -1 \\\n and programs[i]['detail1'].lower().find('filmpje') == -1:\n programs[i]['genre'] = 'Film'\n else:\n try:\n programs[i]['genre'] = cattrans[genre.lower()].title()\n except LookupError:\n programs[i]['genre'] = genre\n\n # Parse persons and their roles for credit info\n elif ctype in roletrans:\n programs[i]['credits'][roletrans[ctype]] = []\n\n persons = content_asis.split(',');\n\n for name in persons:\n if name.find(':') != -1:\n name = name.split(':')[1]\n if name.find('-') != -1:\n name = name.split('-')[0]\n if name.find('e.a') != -1:\n name = name.split('e.a')[0]\n programs[i]['credits'][roletrans[ctype]].append(filter_line(name))\n\n elif ctype == 'bijzonderheden':\n if content.find('Breedbeeld') != -1:\n programs[i]['video']['breedbeeld'] = 1\n if content.find('Zwart') != -1: \n programs[i]['video']['blackwhite'] = 1\n if content.find('Teletekst') != -1: \n programs[i]['teletekst'] = 1\n if content.find('Stereo') != -1: \n programs[i]['stereo'] = 1\n elif ctype == 'url':\n programs[i]['infourl'] = filter_line(content)\n elif ctype not in programs[i]:\n # In unmatched cases, we still add the parsed type and content to the program details.\n # Some of these will lead to xmltv output during the xmlefy_programs step\n programs[i][filter_line(ctype)] = filter_line(content)\n\n # do not cache programming that is unknown at the time\n # of fetching.\n \n if programs[i]['name'].lower() != 'onbekend':\n program_cache.add(programs[i])\n\n log('\\ndone...\\n\\n', quiet)\n \n # done", "title": "" }, { "docid": "46c6041fcc932cbe89678330c2def05a", "score": "0.5073233", "text": "def parse_description_page(self, response):\r\n house_ad = response.meta['house_ad']\r\n house_ad['description'] = \" \".join(response.xpath('//div[@class=\"mar_t_20\"]/p/text()').extract())\r\n yield house_ad", "title": "" }, { "docid": "913d57ef889ca5d9fafd4753b200e6c1", "score": "0.50694275", "text": "def find_desc(self, cols):\n # go through the columns one at a time\n for i in range(len(cols)):\n # don't reuse columns already discovered\n if i == self.date or i == self.amt:\n continue\n\n # heuristic: descriptions are usually quoted\n s = cols[i]\n if s.startswith('\"') or s.startswith(\"'\"):\n if \"Reference\" in s: # known exception\n continue\n self.desc = i\n return True\n return True", "title": "" }, { "docid": "e45b9bff5c0fa2a9c9e59f5dcff853bf", "score": "0.5067044", "text": "def scrape(args):\n\n\n import logging\n log = logging.getLogger(\"root\")\n log.info( \"Scraping RPI CS data.\" )\n\n\n # Generate a BeautifulSoup object.\n catalog_index_url = \"http://catalog.rpi.edu/content.php?filter[27]=CSCI&cur_cat_oid=13&navoid=313\"\n catalog_index = requests.get( catalog_index_url )\n soup = BeautifulSoup( catalog_index.text )\n\n # Identify the list of courses.\n course_list = soup.find_all(\n name=\"a\",\n onclick=re.compile(\"showCourse.*\"))\n\n # Select only the catoid and coid.\n catoid_re = re.compile(\"(?<=catoid=)\\d+\")\n coid_re = re.compile(\"(?<=coid=)\\d+\")\n\n # Piecewise course page url.\n course_url = \"http://catalog.rpi.edu/preview_course_nopop.php?catoid=%s&coid=%s\"\n\n # Fetch existing metadata objects from database.\n university = META.get(\"school\").get(\"name\")\n university = session.query(University)\\\n .filter(University.name==university)\\\n .first()\n departments = {department.abbreviation.lower() : department\n for department in session.query(Department)\\\n .filter(Department.university==university)\\\n .all()}\n\n # Identify relevant information for each course.\n prereqs = {}\n for course in course_list:\n\n # Generate metadata\n log.debug(course.text)\n full_title = re.compile(\"\\s+\").split(course.text)\n prefix = full_title[0]\n cnum = full_title[1]\n title = ' '.join(full_title[3:])\n title = title.replace(\"'\", \"\")\n\n # Identify coid to get description.\n href = course['href']\n catoid = catoid_re.search(href).group(0)\n coid = coid_re.search(href).group(0)\n\n # Generate a BeautifulSoup object of the course description.\n course_page = requests.get( course_url % (catoid, coid) )\n course_soup = BeautifulSoup( course_page.text )\n content = course_soup.find(class_=\"block_content\").hr.text\n\n # Clean up the description.\n description = content\n try:\n description = description[:description.index(\"Credit Hours\")]\n description = description[:description.index(\"When Offered\")]\n except:\n pass\n\n # Identify prerequisites\n # TODO: Match these up with their database entries.\n prereq_index = description.find(\"Prerequisit\")\n if prereq_index > -1:\n prereq_string = description[prereq_index:]\n description = description[:prereq_index]\n\n prereq_re = re.compile(\"\\w{2,4}\\s\\d{3}\")\n matches = re.findall(prereq_re, prereq_string)\n if len(matches) > 0:\n prereqs[\"%s %s\" % (prefix, cnum)] = matches\n\n # Clean the description string\n description_raw = description\n description = clean(description)\n if description is None:\n continue\n\n # Generate the appropriate course object.\n departments[prefix.lower()].courses.append(Course(\n number=cnum,\n title=title,\n description_raw=description_raw,\n description=description))\n\n log.info( \"Completed scraping.\" )", "title": "" }, { "docid": "276bca30cabbd727e3ea49fdc300777e", "score": "0.5064829", "text": "def download_courses(courses):\n for course in courses:\n _pluradl(course)", "title": "" }, { "docid": "060d37c628c4851e08e1b39aab32b2f9", "score": "0.5061009", "text": "def extract_document_text():\n all_document_pages = OfficialDocumentPage.objects.all()\n print(f'Beginning extraction...')\n\n for page in all_document_pages:\n # commenting out for now - print(f'Page id {page.id}')\n # Check if page body already has content, if so skip\n if len(page.body) > 0:\n continue\n if page.document and page.document.url:\n filename = page.document.url.split('documents')[1]\n print(f'reading {filename}')\n extracted_text = extract_text_from_url(page.document.url)\n # TODO: if the extracted_text is \"\", replace with message per content/OPO\n page.body = extracted_text\n page.save()\n else:\n print(f'Official Document Page with id {page.id} does not have a document')", "title": "" }, { "docid": "af5f189a852872ea47102d52f5781ab9", "score": "0.5059057", "text": "def convert_to_osis(text: str, bookid: str = \"TEST\") -> Tuple[str, ...]:\n # ---------------------------------------------------------------------- #\n\n description: List[str] = []\n\n # ---------------------------------------------------------------------- #\n\n # preprocessing and special spacing\n text = c2o_preprocess(text)\n\n # split text into lines for processing\n lines = text.split(\"\\n\")\n\n # mark introduction endings...\n lines = markintroend(lines)\n\n for i in enumerate(lines):\n # identification\n lines[i[0]], description = c2o_identification(lines[i[0]], description)\n\n # character style formatting\n lines[i[0]] = c2o_noterefmarkers(lines[i[0]])\n lines[i[0]] = c2o_specialtext(lines[i[0]])\n\n # special features if present, and stray \\xt tags that were missed.\n lines[i[0]] = c2o_specialfeatures(lines[i[0]])\n\n # z tags if present\n lines[i[0]] = c2o_ztags(lines[i[0]])\n\n # paragraph style formatting.\n lines[i[0]] = c2o_titlepar(lines[i[0]], bookid)\n\n # process words of Jesus\n if r\"\\wj\" in text:\n lines = c2o_processwj2(lines)\n\n # postprocessing of poetry, lists, tables, and sections\n # to add missing tags and div's.\n lines = c2o_fixgroupings(lines)\n\n # process chapter/verse markers\n lines = c2o_chapverse(lines, bookid)\n\n # postprocessing to fix some issues that may be present\n lines = c2o_postprocess(lines)\n\n descriptiontext = \"\\n\".join(description)\n\n # rejoin lines after processing\n return \"\\n\".join([_ for _ in lines if _ != \"\"]), descriptiontext", "title": "" }, { "docid": "e75dd4e4d0eb5f987ade021a06fff632", "score": "0.50526416", "text": "def updateCoursesCSV():\n with open(os.path.join(os.path.dirname(__file__), 'courses.csv'), 'w', newline='', encoding=\"UTF-8\") as file:\n n = 300\n writer = csv.writer(file, delimiter=',')\n i = 1\n courses = oisCourses.getNCourses(n, i)\n while len(courses) != 0:\n for c in courses:\n if 'title' in c:\n if 'et' in c['title']:\n t = Text(c['title']['et'].lower())\n t.tag_layer(['morph_analysis'])\n writer.writerow([\" \".join([x[0] for x in t.morph_analysis.lemma]), c['code']])\n elif 'en' in c['title']:\n t = Text(c['title']['en'].lower())\n t.tag_layer(['morph_analysis'])\n writer.writerow([\" \".join([x[0] for x in t.morph_analysis.lemma]), c['code']])\n i += n\n courses = oisCourses.getNCourses(n, i)", "title": "" }, { "docid": "7780c8b2cd65375745f271f63826712a", "score": "0.50322837", "text": "def get_description_and_comment(bibdocfiles):\n description = None\n comment = None\n all_descriptions = [bibdocfile.get_description() for bibdocfile \\\n in bibdocfiles\n if bibdocfile.get_description() not in ['', None]]\n if len(all_descriptions) > 0:\n description = all_descriptions[0]\n\n all_comments = [bibdocfile.get_comment() for bibdocfile \\\n in bibdocfiles\n if bibdocfile.get_comment() not in ['', None]]\n if len(all_comments) > 0:\n comment = all_comments[0]\n\n return (description, comment)", "title": "" }, { "docid": "8b0bc43156b2e55bd8b0b9b1c90c4672", "score": "0.50264835", "text": "def get_questionnaire_description(self, questionnaire_data, keys):\n keywords = self.get_description_keywords(keys)\n excerpt_data = collections.defaultdict(str)\n\n for keyword in keywords:\n for x in questionnaire_data.get(keyword.questiongroup, []):\n if x.get(keyword.question):\n for language, text in x[keyword.question].items():\n excerpt_data[language] += '{} '.format(text)\n return excerpt_data", "title": "" }, { "docid": "1f22d2dc088fb1e01979c1937cae1dfe", "score": "0.50097275", "text": "def _parse_description(self, response):\n description = response.xpath('//div[@class=\"content\"]/div[contains(@class, \"field-name-field-event-\")]/descendant-or-self::*')\n descrip_str = ''\n for d in description:\n descrip_str = ' '.join([descrip_str, ' '.join(d.xpath('text()').extract())])\n descrip_str = ' '.join([descrip_str, ' '.join(d.xpath('a/@href').extract())])\n return descrip_str.strip()", "title": "" }, { "docid": "65c0c0a15726a958dfd9962be03ea655", "score": "0.5003737", "text": "def record_story_chapters(self) -> None:\n\n id_regex = re.compile('chapter')\n new_line_regex = re.compile('\\n')\n chapters = self._soup.find_all('div', {'class': 'chapter', 'id': id_regex})\n\n for chapter in chapters:\n # todo: need to add author notes in as well\n chapter_object = Chapter()\n\n chap_name_container = chapter.find_all(True, {'class': 'chapter preface group'})[0]\n chap_name = chap_name_container.find_all('h3')[0].text\n chapter_object.name = new_line_regex.sub('', chap_name).strip()\n\n chap_text = chapter.find_all(True, {'class': 'userstuff'})[0]\n chapter_object.raw_body = chap_text.prettify()\n\n # remove the invisible heading\n heading = chapter.find_all('h3', {'class': ['landmark', 'heading']})\n for element in heading:\n element.decompose()\n chapter_object.processed_body = chap_text.prettify()\n chapter_object.word_count = len(chap_text.text.split())\n\n self._fanfic.add_chapter(chapter_object)", "title": "" }, { "docid": "3a0479265dc892a8744d38e96c5ab930", "score": "0.4978152", "text": "def _parse_description(self, response):\n category_field = response.xpath(\n \"//div[contains(., 'Category:') and contains(@class, 'field-label')]\"\n )\n field_items = category_field.xpath(\"./following::div[contains(@class, 'field-items')]\")\n return ' '.join(\n field_items.xpath('.//p/text()').extract() +\n field_items.xpath('.//strong/text()').extract()\n ).strip()", "title": "" }, { "docid": "af3583bf5c8d16410772762aff738e46", "score": "0.49771658", "text": "def read_docs() -> Generator[Doc, None, None]:\n yield from all_para(DOCS.normal)\n yield from wq_para(DOCS.csv)", "title": "" }, { "docid": "87571a626bb2e6e0dc87f0dcc06400d8", "score": "0.4975917", "text": "def get_meta_data(self):\n # results = db.documents.find({\"url\": self.url})\n #\n # if len(results) > 0:\n # results = results[0]\n # document_id = results['_id']\n\n\n\n document_cursor = db.documents.find_one({\"url\": self.url})\n if document_cursor:\n document_id = document_cursor[\"_id\"]\n else:\n document_id = None\n if document_id:\n self.db_doc = db.documents.find_one({\"_id\": document_id})\n\n self.title = self.db_doc[\"title\"]\n self.authors = self.db_doc[\"authors\"]\n self.text = self.db_doc[\"text\"]\n self.published = self.db_doc[\"publish-date\"]\n # try:\n # self.images = self.db_doc[\"images\"]\n # except:\n # print(\"images for article \" + self.url + \" not collected from database\")\n\n try:\n self.summary = self.db_doc[\"summary\"]\n except:\n print(\"summary for article \" + self.url + \" not collected from database\")\n\n try:\n self.keywords = self.db_doc[\"keywords\"]\n except:\n print(\"keywords for article \" + self.url + \" not collected from database\")", "title": "" }, { "docid": "803c4c0d28f19685353711f81fe448e7", "score": "0.49539548", "text": "def get_wiki_description(browser, city, wiki_collection):\n url = 'https://en.wikipedia.org/wiki/' + city.replace(' ', '_')\n try:\n browser.get(url)\n except:\n print(f'{city} NOT found on Wikipedia')\n summary = ''\n for i in range(1, 100):\n paragraphs = '//*[@id=\"mw-content-text\"]/div/p[' + str(i) + ']'\n try:\n summary += browser.find_element_by_xpath(paragraphs).text\n summary += '\\n'\n except:\n None\n wiki_dict = {'city': city, 'text': summary}\n wiki_collection.insert_one(wiki_dict)", "title": "" }, { "docid": "c46a365b413d6fca939ba14b38ce122f", "score": "0.4953722", "text": "def __get_description(self, catalog_entry):\n desc = ''\n\n if catalog_entry.program_description_clean is not None:\n desc = catalog_entry.program_description_clean\n\n return desc", "title": "" }, { "docid": "a16a095ba66b434600cbc9f382ea6198", "score": "0.49533728", "text": "def set_course_info(self):\n host = \"courses.uscden.net\"\n course_info_url = \"/d2l/le/manageCourses/widget/myCourses/\" + self.location_id + \"/ContentPartial?defaultLeftRightPixelLength=10&defaultTopBottomPixelLength=7\"\n\n # prepare post headers\n course_info_post_headers = {\n #'Host': ' courses.uscden.net',\n #'Connection': ' keep-alive',\n # 'Content-Length': ' 229',\n # 'Pragma': ' no-cache',\n # 'Cache-Control': ' no-cache',\n #'Origin': ' https://courses.uscden.net',\n #'User-Agent': ' Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4)',\n 'Content-type': ' application/x-www-form-urlencoded',\n #'Accept': '*/*',\n #'Referer': 'https://courses.uscden.net/d2l/home/'+self.location_id,\n # 'Accept-Encoding': 'gzip, deflate, br',\n # 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'Cookie': self.d2lSessionVal + '; ' + self.d2lSecureSessionVal\n }\n # prepare post data\n course_info_post_dict = {\n 'widgetId': '2',\n #'_d2l_prc$headingLevel': '3',\n #'_d2l_prc$scope': \"\",\n #'_d2l_prc$childScopeCounters': 'filtersData:0',\n #'_d2l_prc$hasActiveForm': 'false',\n #'filtersData$semesterId': 'All',\n #'isXhr': 'true',\n #'requestId': '2',\n 'd2l_referrer': self.xsrf\n }\n course_info_post_data = urllib.urlencode(course_info_post_dict)\n conn = httplib.HTTPSConnection(host, 443)\n conn.request(\"POST\", course_info_url, course_info_post_data, course_info_post_headers)\n resp = conn.getresponse()\n course_data = resp.read()\n self.term_list = Student.find_term_list(course_data)\n term_num = len(self.term_list)\n self.course_ou_list = [[]]*term_num\n self.course_list = [[]]*term_num\n if term_num == 0:\n return\n elif term_num == 1:\n stop_index = len(course_data)\n start_index = 0\n else:\n start_index = 0\n stop_index = re.compile(self.term_list[1]).search(course_data).start()\n\n for i in range(term_num):\n course_list_raw = re.compile(r'vui-link d2l-link \\S+ href\\=\\\\\\\"/d2l/lp/ouHome/home\\.d2l\\?ou=\\d+\\\\\\\" title\\=\\\\\\\"[^\"^\\\\]+').findall(course_data[start_index:stop_index])\n ou_list = []\n c_list = []\n for c in course_list_raw:\n ou_list.append(re.compile(r'ou=\\d+').search(c).group()[3:])\n c_list.append(re.compile(r'title\\=\\\\\\\"[^\"]+').search(c).group()[14:].replace('&amp;', '&'))\n\n self.course_ou_list[i] = ou_list\n self.course_list[i] = c_list\n start_index = stop_index\n if i+2 < term_num:\n stop_index = re.compile(self.term_list[i+2]).search(course_data).start()\n else:\n stop_index = len(course_data)", "title": "" }, { "docid": "a79924e4b513672156a4fcd0eb7459dc", "score": "0.4949158", "text": "def crawl_page(page, dept_iter, term_iter):\n tree = fromstring(page.content)\n num_courses = get_number_offered_for_term(tree)\n\n # iterates through a department during a specific term\n for i in range(1, num_courses + 1):\n course = Course(collect_dept(dept_iter), collect_nums(tree, i), collect_name(tree, i), collect_terms(term_iter),\n collect_reqs(tree, i), collect_period(tree, i), collect_professor(tree, i), collect_desc(tree, i))\n master_list.append(course)", "title": "" }, { "docid": "8f7b36347f98feb6a17bcdab4d454fa3", "score": "0.49249998", "text": "def _generate_description(self):\n generations = []\n\n for j, templ in enumerate(self.template):\n result = []\n\n for key in templ:\n # get the text form from template object.\n item = key.generate_description(arg_index=0, templ_index=j)\n\n if not item:\n continue\n\n # Flatten if nested dict.\n if type(item) in [OrderedDict, dict]:\n val_list = list(values_of_nested_dict(item))\n result.extend(val_list)\n else:\n result.append(item)\n generations.append(\" \".join(result))\n\n return generations", "title": "" }, { "docid": "5351e0026bc35ac92d8cc554caa89815", "score": "0.49216616", "text": "def _read_description(self, geocache_tree):\n\n description_short = \"\"\n if self.source == \"downloader\":\n description_short = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0}short_description\").text\n elif self.source == \"geocaching.com\":\n description_short = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0/1}short_description\").text\n if description_short:\n description_short = ownfunctions.replace_signs(description_short)\n else:\n description_short = \"\"\n\n description_long = \"\"\n if self.source == \"downloader\":\n description_long = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0}long_description\").text\n elif self.source == \"geocaching.com\":\n description_long = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0/1}long_description\").text\n if description_long:\n description_long = ownfunctions.replace_signs(description_long)\n else:\n description_long = \"\"\n\n return description_short + \"\\n\\n\" + description_long", "title": "" }, { "docid": "2c6612107331ba2d7b8313053b20069f", "score": "0.49178073", "text": "def generate_documentation(self):\n information_translator = bkrdoc.analysis.GetInformation()\n for information in self.documentation_units:\n if information:\n self.phase_documentation_information.append(information_translator.get_information_from_facts(information))", "title": "" }, { "docid": "8fd6f34f94b1688dbb1ce77ceb8b3796", "score": "0.4910362", "text": "def truncate_descriptions_task():\n articles = Article.objects.all()\n\n for article in articles:\n logger.info(\"Truncating Description for \" + article.title)\n\n article.description_truncated = truncatewords_html(article.description, 150)\n\n article.save()", "title": "" }, { "docid": "7df42da4fb1268986e059e1fca811b52", "score": "0.49091297", "text": "def parse_course_data(filename):\r\n created_courses = {}\r\n with open(filename, 'r') as open_file:\r\n\r\n for line in open_file:\r\n line = line.strip()\r\n courses = line.split()\r\n if line != \"\" and len(courses) == 2:\r\n # If the course does not exist, create a new course\r\n if courses[1] not in created_courses:\r\n # Create Course object and store in dictionary\r\n created_courses[courses[1]] = Course(courses[1])\r\n if courses[0] not in created_courses:\r\n created_courses[courses[0]] = Course(courses[0])\r\n\r\n # If courses[0] is not a prereq of courses[1], add courses[0]\r\n # as a prereq to courses[1]\r\n if not created_courses[courses[0]].\\\r\n is_prereq(created_courses[courses[1]]):\r\n created_courses[courses[1]].\\\r\n add_prereq(created_courses[courses[0]])\r\n\r\n # Return the root course\r\n return get_root_course(created_courses)", "title": "" }, { "docid": "180d088e1442715c629c6b13cd931b89", "score": "0.49062607", "text": "def cal_required_elective_courses_forstudent(self):\n path1:str=os.path.join(self._path, \"students.txt\")\n try:\n open_file1 = file_reader(path1, fields=3, sep=\"\\t\",header=True)\n for cwid,name,major in open_file1:\n self._students[cwid].add_courses(self.mainmajor[major].result_return_major())\n\n except ValueError as e:\n print(e)\n except FileNotFoundError as fe:\n print(fe)", "title": "" }, { "docid": "6fabfd6de1117c0d02c7868ac6c94201", "score": "0.49056607", "text": "def __get_description_types(self):\n self.description_type_desc, created_desc = ProgramDescriptionType.objects.get_or_create(\n name='Catalog Description'\n )\n\n if created_desc:\n self.stdout.write(\n self.style.NOTICE(\n \"Created \\\"Catalog Description\\\" description type\"\n )\n )\n\n self.description_type_desc_full, created_desc_full = ProgramDescriptionType.objects.get_or_create(\n name='Full Catalog Description'\n )\n\n if created_desc_full:\n self.stdout.write(\n self.style.NOTICE(\n \"Created \\\"Full Catalog Description\\\" description type\"\n )\n )\n\n self.description_type_source_desc, created_source_desc = ProgramDescriptionType.objects.get_or_create(\n name='Source Catalog Description'\n )\n\n if created_source_desc:\n self.stdout.write(\n self.style.NOTICE(\n \"Created \\\"Source Catalog Description\\\" description type\"\n )\n )\n\n self.description_type_source_curriculum, created_source_curriculum = ProgramDescriptionType.objects.get_or_create(\n name='Source Catalog Curriculum'\n )\n\n if created_source_curriculum:\n self.stdout.write(\n self.style.NOTICE(\n \"Created \\\"Source Catalog Curriculum\\\" description type\"\n )\n )", "title": "" }, { "docid": "bbe0e92e7e90c7f1f29fbbc2db863778", "score": "0.4905648", "text": "def get_courses(self, browser):\n logger.info(\"Checking pages on category {}\".format(self.name))\n for i in range(1, self.maxpages + 1):\n tries = 0\n done = False\n clean_courses = []\n while not done:\n if not os.path.exists(\"files_{}/courses_{}_{}.csv\".format(\n self._get_datetime_now(),\n self.name,\n i)\n ):\n try:\n browser.get(\"{}?p={}\".format(self.url, i))\n courses = browser.find_elements_by_class_name(\n \"browse-course-card--link--3KIkQ\"\n )\n for course in courses:\n url = course.get_attribute(\"href\")\n detail = course.find_element_by_class_name(\n \"course-card--large--1BVxY\"\n )\n name_course = detail.find_element_by_class_name(\n \"udlite-focus-visible-target\"\n ).text\n prize = detail.find_element_by_class_name(\n \"price-text--price-part--Tu6MH\"\n ).text.split(\"\\n\")[1]\n rank = detail.find_element_by_class_name(\n \"course-card--row--1OMjg\"\n ).find_element_by_class_name(\n \"star-rating--rating-number--3lVe8\"\n ).text\n c = Course(\n self.name,\n name_course,\n prize,\n url,\n rank\n )\n clean_courses.append(c)\n self._save_courses(self.name, i, clean_courses)\n done = True\n except NoSuchElementException:\n tries += 1\n logger.warning(\n 'Page {} on {} tried {}/5'.format(\n i,\n self.name,\n tries\n )\n )\n if tries >= 5:\n done = True\n logger.error('Page {} on {} failed.'.format(\n i,\n self.name\n )\n )\n else:\n done = True\n self.ready += 1\n browser.close()", "title": "" }, { "docid": "7624b2c2800da4b643921f869f5a9bae", "score": "0.48943287", "text": "def update_description(self):\n exec_string = \"DELETE FROM info WHERE name='description'\" \n if DEBUG_LEVEL > 0:\n print (\"execute db : {0}\".format(exec_string))\n self.db_cursor.execute(exec_string)\n exec_string = \"\"\"INSERT INTO info VALUES ( 'description', '{0}' )\"\"\".format(self.get_db_description_text())\n if DEBUG_LEVEL > 0:\n print (\"execute db : {0}\".format(exec_string))\n self.db_cursor.execute(exec_string)", "title": "" }, { "docid": "e538237b64b327b0b7cf9bd31c4e47e3", "score": "0.48829094", "text": "def read_descriptions(self):\r\n if not os.path.exists(self.descriptions_path):\r\n logger.warning(f'descriptions path does not exist.')\r\n self.descriptions = pd.DataFrame()\r\n else:\r\n self.descriptions = pd.read_excel(self.descriptions_path)\r\n self.descriptions = self.descriptions.set_index('Unnamed: 0', drop=True)", "title": "" }, { "docid": "6fc41182ef098a8f8ab1a022467ffec0", "score": "0.48779327", "text": "def parse_courses(self):\n with open(self.data_dir + 'courses.csv', newline='') as csvfile:\n course_reader = csv.DictReader(csvfile)\n course_dict = dict()\n for course in course_reader:\n course_dict[course['id']] = {'name': course['name'], 'teacher': course['teacher']}\n return course_dict", "title": "" }, { "docid": "65c59949da1dab84e9cc920e06970308", "score": "0.48749024", "text": "def test_get_course_for_section(self):\n pass", "title": "" }, { "docid": "69a51e2ab27acd97e7657575ab7943a0", "score": "0.48741475", "text": "def description(self):\n lines = []\n for line in self.__doc__.split('\\n')[2:]:\n line = line.strip()\n if line:\n lines.append(line)\n return ' '.join(lines)", "title": "" }, { "docid": "282ae5120449f2beb94d20354ae04318", "score": "0.48621464", "text": "def add_courses_to_calendar(self):\n for course in self.target_parser.courses:\n for meeting in course.meeting_times:\n self.ics_calendar.add_component(self.create_course(course, meeting))", "title": "" }, { "docid": "dcb19fa1a96f80fe0b60bcc98b51e3fc", "score": "0.48576903", "text": "def coursetofile(self, filename):\n print('\\nWriting course to the file ' + filename)\n f = open(filename, \"w+\")\n f.write(self.title_full + \"\\n\")\n for section in self.sections:\n f.write(section.title + \"\\n\")\n for session in section.sessions:\n f.write(session.title + \"\\n\")\n f.write(session.content + \"\\n\")\n f.close()", "title": "" }, { "docid": "d0375c53be35232c39beacffbe1aaa42", "score": "0.48576567", "text": "def get_description():\n desc = {}\n desc[\"data\"] = False\n desc[\n \"description\"\n ] = \"\"\"This application attempts to assess the\n effectiveness of a calendar day's rainfall based on where the rain fell\n in relation to a previous period of days departure from climatology. So\n for a given date and state, the areal coverage of daily precipitation\n at some given threshold is compared against the departure from climatology\n over some given number of days. The intention is to answer a question like\n how much of the rain on a given day fell on an area that needed it! The\n areal coverage percentages are relative to the given state.\n \"\"\"\n today = datetime.datetime.today() - datetime.timedelta(days=1)\n desc[\"arguments\"] = [\n dict(\n type=\"csector\", name=\"sector\", default=\"IA\", label=\"Select Sector:\"\n ),\n dict(\n type=\"date\",\n name=\"date\",\n default=today.strftime(\"%Y/%m/%d\"),\n label=\"Date:\",\n min=\"2011/01/01\",\n ),\n dict(\n type=\"int\",\n name=\"trailing\",\n default=31,\n label=\"Over how many trailing days to compute departures?\",\n ),\n dict(\n type=\"float\",\n name=\"threshold\",\n default=0.1,\n label=\"Date Precipitation Threshold (inch)\",\n ),\n ]\n return desc", "title": "" }, { "docid": "77baa2d5123a53ce4c045045a4e59aeb", "score": "0.48502633", "text": "def get_course_summary(self, course_dict: Dict[str, str]) -> Tuple[Set[str], Set[str], Set[str]]:\n courses = set(\n [course for course, grade in course_dict.items() if grade in self.passing_grades])\n\n if courses.intersection(self._electives) == set():\n return courses, self._required.difference(courses), self._electives\n else:\n return courses, self._required.difference(courses), {}", "title": "" }, { "docid": "0e12c9c486cb917772206e1de036a367", "score": "0.4848594", "text": "def collect_description_data(self, language):\n return [\n [tmf('label_task_title', u'Task title'), self.context.title],\n [tmf('label_deadline', u'Deadline'),\n api.portal.get_localized_time(str(self.context.deadline))],\n [tmf('label_text', u'Text'), self.context.text]\n ]", "title": "" }, { "docid": "4f97ebbc0527ae2088b6037e83a87ba4", "score": "0.4839508", "text": "def get_description(self):", "title": "" }, { "docid": "a0645d9a91825a5fdc3fc552cd14492d", "score": "0.48346865", "text": "def test_templates_organization_detail_meta_description_description(self):\n organization = OrganizationFactory()\n page = organization.extended_object\n\n # Add a description to the organization\n placeholder = organization.extended_object.placeholders.get(slot=\"description\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"CKEditorPlugin\",\n body=\" A <b>longer</b> organization description \",\n )\n page.publish(\"en\")\n\n url = organization.extended_object.get_absolute_url()\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(\n response,\n '<meta name=\"description\" content=\"A longer organization description\" />',\n )\n\n # Add an excerpt to the organization\n placeholder = organization.extended_object.placeholders.get(slot=\"excerpt\")\n add_plugin(\n language=\"en\",\n placeholder=placeholder,\n plugin_type=\"PlainTextPlugin\",\n body=\" A longer organization excerpt \",\n )\n page.publish(\"en\")\n\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n self.assertContains(\n response,\n '<meta name=\"description\" content=\"A longer organization excerpt\" />',\n )", "title": "" }, { "docid": "382cf1d548ca1c77825ae9f942029bf3", "score": "0.48342228", "text": "def __make_description_with_segments(self):\n\n html = []\n desc_result = self.resource.select.read(self.resource.type_name + \"_Description\",\n where=[[\"Description_ID\",\n \"=\",\n self.resource.description_id]]\n )\n values = desc_result.fetchone()\n station_1_id = values['Station_1_ID']\n station_2_id = values['Station_2_ID']\n connection_id = values['Connection_ID']\n\n\n html.append(\"<h2>Station A</h2>\")\n html.append(self.__make_generic_module(feature=None,\n table_name=self.resource.type_name + \"_Station_Description\",\n _id=(\"Station_ID\", station_1_id), dual=1))\n\n html.append(\"<h2>\" + str(self.resource.type_name) + \"</h2>\")\n html.append(self.__make_generic_module(feature=None,\n table_name=self.resource.type_name + \"_Connection_Description\",\n _id=(\"Connection_ID\", connection_id)))\n html.append(\"<h2>Station B</h2>\")\n html.append(self.__make_generic_module(feature=None,\n table_name=self.resource.type_name + \"_Station_Description\",\n _id=(\"Station_ID\", station_2_id), dual=2))\n\n return \"\".join(html)", "title": "" }, { "docid": "be2a17665c9cac8ea9103c912052d232", "score": "0.4829322", "text": "def _extract_info(self, content, capturing_group):\n return re.match(self.configuration[\"COURSE_REGEX\"], content).group(\n capturing_group\n )", "title": "" }, { "docid": "c810949def2b395b520142c4ff952bf8", "score": "0.48208857", "text": "def sort_document_properties():\n config = Config()\n\n LOGGER.info('Parsing documents')\n for collection in config.collections():\n LOGGER.info('Collection: %s', collection.name)\n for path, document in collection.documents():\n LOGGER.info(\"Writing fixed file %s\", path)\n with codecs.open(path, encoding='utf-8', mode='w') as f:\n f.write(object2jekyll(document, 'description'))\n LOGGER.warning('Done')", "title": "" }, { "docid": "19cbc4f208feff641088407bad65f831", "score": "0.4816295", "text": "def descriptions(self, descriptions):\n\n self._descriptions = descriptions", "title": "" }, { "docid": "19cbc4f208feff641088407bad65f831", "score": "0.4816295", "text": "def descriptions(self, descriptions):\n\n self._descriptions = descriptions", "title": "" }, { "docid": "2d62dc8b188de63a7d6478d9bea414c2", "score": "0.48134148", "text": "def getEditions(TOChtml,docDic,language,date):\n sectionNames = [\"ANDERE BESLUITEN\",\"WETTEN, DECRETEN, ORDONNANTIES EN VERORDENINGEN\",\"OFFICIELE BERICHTEN\",\"AGENDA'S\",\"AUTRES ARRETES\",\"LOIS, DECRETS, ORDONNANCES ET REGLEMENTS\",\"AVIS OFFICIELS\",\"ORDRES DU JOUR\"]\n regionNames = ['VLAAMSE GEMEENSCHAP','DUITSTALIGE GEMEENSCHAP','WAALS GEWEST','BRUSSELS HOOFDSTEDELIJK GEWEST','FRANSE GEMEENSCHAP','VLAAMS GEWEST','REGION WALLONNE','REGION DE BRUXELLES-CAPITALE','COMMUNAUTE FLAMANDE',u'COMMUNAUTE FRAN\\xc7AISE','COMMUNAUTE GERMANOPHONE','REGION FLAMANDE',\"DEUTSCHSPRACHIGE GEMEINSCHAFT\",\"WALLONISCHE REGION\",\"DEUTSCHPRACHIGE GEMEINSCHAFT\"]\n levelNames = ['GEMEENSCHAPS- EN GEWESTREGERINGEN','GOUVERNEMENTS DE COMMUNAUTE ET DE REGION','GEMEINSCHAFTS- UND REGIONALREGIERUNGEN']\n editions = TOChtml.split(\"<a name=EDITION\")\n for edition in editions:\n if re.match(\"\\d>EDITI\",edition):\n curEdition = edition[0]\n else:\n curEdition = '0'\n regex = re.compile(\"<A name=(\\d+)></a>(.+?)<input type=submit name=numac value=(\\d+)\",re.DOTALL)\n articles = regex.findall(edition)\n curSection = 'NA'\n curRegion = 'NA'\n curSource = 'NA'\n curLevel = 'NA'\n curTitle = 'NA'\n url = 'NA'\n for article in articles:\n if (len(article) == 3):\n if article[0] == article[2]:\n numac = article[0]\n lines = stripHTMLTags(article[1])\n lines = lines.split('\\n')\n title = []\n section = []\n region = []\n level = []\n source = []\n for line in lines:\n line = line.strip()\n if (len(line) > 0) and (line[0] != '<'):\n if float(len(re.findall('[A-Z]',line)))/float(len(line))> 0.5: # if it is a Section title or source\n if line in sectionNames:\n section.append(line)\n elif line in regionNames:\n region.append(line)\n elif line in levelNames:\n level.append(line)\n else:\n source.append(line)\n else:\n title.append(line)\n if len(section) > 0:\n curSection = ' '.join(section)\n curRegion = 'NA'\n curSource = 'NA'\n if len(level) > 0:\n curLevel = ' '.join(level)\n curRegion = 'NA'\n curSource = 'NA'\n if len(region) > 0:\n curRegion = ' '.join(region)\n curSource = 'NA'\n if len(source) > 0:\n curSource = ' '.join(source)\n if len(title) > 0:\n curTitle = ' '.join(title)\n artID = date+'-'+str(numac)\n if not docDic.has_key(artID):\n docDic[artID] = {'nl':{},'fr':{},'du':{}}\n docDic[artID][language]['edition'] = curEdition\n docDic[artID][language]['section'] = curSection\n docDic[artID][language]['region'] = curRegion\n docDic[artID][language]['source'] = curSource\n docDic[artID][language]['title'] = curTitle\n docDic[artID][language]['article'] = getArticle(date, numac,language)\n url = str(\"http://www.ejustice.just.fgov.be/cgi/article_body.pl?\" +\"language=\"+language+\"&caller=summary&pub_date=\" + date + \"&numac=\" + numac)\n docDic[artID][language]['url'] = url\n curTitle = 'NA'\n url = 'NA'", "title": "" }, { "docid": "729f420148a4616ccc25da8701325143", "score": "0.48089322", "text": "def scrape_course(self, course):\n print(course)\n\n department = course[:course.index(\" \")]\n course_code = course[course.index(\" \") + 1:]\n\n self.driver.get(\"https://thecriticalreview.org/search/\" + department + \"/\" + course_code)\n soup = BeautifulSoup(self.driver.page_source, 'lxml')\n\n course_review_info = {}\n course_review_info[\"courseCode\"] = course\n\n try:\n # no reviews for the course\n no_reviews = self.driver.find_element_by_css_selector(\"#unreturned_banner_message\")\n self.courses.append(course_review_info)\n except NoSuchElementException:\n course_rating = soup.find('div', class_=\"c_rating\")[\"data-num\"]\n course_review_info[\"courseRating\"] = course_rating\n\n course_data = soup.find('div', class_=\"review_data\")\n reviews = course_data[\"data-test-value\"]\n\n # demographic data\n course_review_info[\"freshmen\"] = course_data[\"data-frosh\"]\n course_review_info[\"sophomores\"] = course_data[\"data-soph\"]\n course_review_info[\"juniors\"] = course_data[\"data-jun\"]\n course_review_info[\"seniors\"] = course_data[\"data-sen\"]\n course_review_info[\"gradStudents\"] = course_data[\"data-grad\"]\n\n if reviews == \"\":\n self.courses.append(course_review_info)\n return\n\n reviews = json.loads(reviews)\n\n # hours spent on course\n if \"minhours\" in reviews:\n course_review_info[\"avgHrs\"] = self.avg(reviews[\"minhours\"])\n if \"maxhours\" in reviews:\n course_review_info[\"maxHrs\"] = self.avg(reviews[\"maxhours\"])\n\n # course feedback statistics\n if \"readings\" in reviews:\n course_review_info[\"readingsWorthwhile\"] = self.avg(reviews[\"readings\"])\n if \"class-materials\" in reviews:\n course_review_info[\"materialsUseful\"] = self.avg(reviews[\"class-materials\"])\n if \"difficult\" in reviews:\n course_review_info[\"difficult\"] = self.avg(reviews[\"difficult\"])\n if \"learned\" in reviews:\n course_review_info[\"learnedALot\"] = self.avg(reviews[\"learned\"])\n if \"loved\" in reviews:\n course_review_info[\"enjoyedCourse\"] = self.avg(reviews[\"loved\"])\n if \"grading-speed\" in reviews:\n course_review_info[\"timelyGrading\"] = self.avg(reviews[\"grading-speed\"])\n if \"grading-fairness\" in reviews:\n course_review_info[\"fairGrading\"] = self.avg(reviews[\"grading-fairness\"])\n\n # concentrator demographics\n if \"conc\" in reviews:\n course_review_info[\"concentratorYes\"] = self.count_elem(reviews[\"conc\"], \"C\")\n course_review_info[\"concentratorNo\"] = self.count_elem(reviews[\"conc\"], \"N\")\n course_review_info[\"concentratorMaybe\"] = self.count_elem(reviews[\"conc\"], \"D\")\n\n # taken as requirement?\n if \"requirement\" in reviews:\n course_review_info[\"requirementYes\"] = self.count_elem(reviews[\"requirement\"], \"Y\")\n course_review_info[\"requirementNo\"] = self.count_elem(reviews[\"requirement\"], \"N\")\n\n # expected grades\n if \"grade\" in reviews:\n course_review_info[\"expectedA\"] = self.count_elem(reviews[\"grade\"], \"A\")\n course_review_info[\"expectedB\"] = self.count_elem(reviews[\"grade\"], \"B\")\n course_review_info[\"expectedC\"] = self.count_elem(reviews[\"grade\"], \"C\")\n course_review_info[\"expectedS\"] = self.count_elem(reviews[\"grade\"], \"S\")\n course_review_info[\"expectedNC\"] = self.count_elem(reviews[\"grade\"], \"NC\")\n\n self.courses.append(course_review_info)", "title": "" }, { "docid": "72c1fc14666f2a7ed3fc3717646458c8", "score": "0.48037595", "text": "def fetch_desc_costco(self):\n description = ''\n try:\n \t#Extract description from URL for costco\n link = self.product_link.replace(\"https://www.costco.com/\",\"\")\n for ch in link:\n if(ch!=\".\"):\n description += ch\n else:\n break\n description = description.replace('-',' ')\n except:\n description = ''\n return description", "title": "" }, { "docid": "cf59ed3dad0a1903e1a6183aed5ff7c3", "score": "0.4800775", "text": "def getDescription(self, metadata):\n description = '== {{int:filedesc}} ==\\n'\n description += '{{Artwork|wikidata=%(item)s}}\\n' % metadata\n description += '\\n=={{int:license-header}}==\\n'\n description += '{{Yale Digital Dura-Europos Archive}}\\n'\n description += '{{Cc-zero}}\\n'\n description += '[[Category:Objects from Dura Europos in the Yale University Art Gallery]]\\n'\n return description", "title": "" }, { "docid": "298155518858ea76b8613941f44817e3", "score": "0.4797285", "text": "def load_description(path_dir=PATH_ROOT, filename='DOCS.md'):\n with open(os.path.join(path_dir, filename)) as f:\n long_description = f.read()\n return long_description", "title": "" }, { "docid": "97922e8004d07b7a9eb322a546e613b0", "score": "0.4788354", "text": "def _write_grid_description(self):\n logging.info(\"\\t\\t Writing echam6 grid description to generic file atmosphere.griddes...\")\n logging.info(\"\\t\\t * generatic griddes\")\n griddes = self.CDO.griddes(input=self.files[\"couple\"][\"atmosphere_file_for_ice\"].src)\n ofile = open(self.couple_dir+\"/griddes_file\", \"w\")\n ofile.write(\"\\n\".join(griddes))\n ofile.flush()\n self.files[\"couple\"][\"atmosphere_grid_description\"] = ComponentFile(src=ofile.name,\n dest=self.couple_dir+\"/atmosphere.griddes\")\n self._cleanup_list.append(ofile.name)\n logging.info(\"\\t\\t ...done!\")", "title": "" }, { "docid": "96868f6eb0ee5481f56674a64aa1c4b0", "score": "0.4762315", "text": "def save(self, *args, **kwargs):\n if self.gen_description:\n self.description = strip_tags(self.description_from_content())\n super(MetaData, self).save(*args, **kwargs)", "title": "" }, { "docid": "d679d338d0bf8a5bc2c3c56ad148d5fc", "score": "0.47541976", "text": "def get_college_dict(text_rows, url):\n college = {}\n for row in text_rows:\n i = 0\n key = \"\"\n info = \"\"\n for col in row:\n if i == 0:\n key = col\n else:\n info += col\n i += 1\n info = info.rstrip(' ')\n if key != \"\":\n college[key] = info\n try:\n college['Name'] = url.replace('_', ' ').split('/')[-1:][0]\n except:\n print \"Could not get Name of the College from URL\"\n sys.exit(2)\n\n return college", "title": "" }, { "docid": "9c31d165665e5108f2e27da8af87ddc0", "score": "0.47517684", "text": "def test016b_description(self):\r\n w = self.doc['WR-P-E-J-0000000001.p.1.s.1.w.7']\r\n self.assertRaises( folia.NoDescription, w.description)", "title": "" }, { "docid": "5e59463a2c0b7b7023b50e2e761b937e", "score": "0.4748506", "text": "def parse_docstr(docstr):\n\n # Indentation level ......................................................\n\n def indentation(line):\n \"\"\"\n Returns the index of the first non-space character\n\n Parameters\n ----------\n line : str\n String to find the indentation level from\n\n Returns\n -------\n Index of the first non-space character or None if there is no char\n\n \"\"\"\n\n strip = len(line.lstrip())\n\n if strip:\n return (len(line) - strip) // 4\n else:\n return None\n\n # Case with empty string .................................................\n\n if docstr is None or docstr.strip() == '':\n return \"No documentation found for this section\"\n\n html = ''\n\n # Split Lines ............................................................\n\n all_lines = docstr.split('\\n')\n\n # Finding minimum indent level ...........................................\n\n min_ = -1\n\n for line in all_lines:\n if line.strip() != '':\n i = indentation(line)\n if min_ == -1:\n min_ = i\n elif i < min_:\n min_ = i\n\n all_lines = [x[4 * min_:] for x in all_lines]\n\n # Parsing sections .......................................................\n\n all_sections = [] # List containing sections. Each section is a dict\n # with a name fiels and a content field\n\n i_l = len(all_lines) - 1\n last_content = i_l\n for line in all_lines[::-1]:\n if i_l:\n if all(x == '-' for x in line.strip()) and len(line.strip()) > 0:\n if indentation(line) == indentation(all_lines[i_l - 1]):\n if len(line.strip()) <= len(all_lines[i_l - 1].strip()):\n content = '\\n'.join(all_lines[i_l + 1:last_content])\n section = {'name': all_lines[i_l - 1].strip(),\n 'content': content}\n last_content = i_l - 1\n all_sections.append(section)\n i_l -= 1\n\n # If Description section is not explicit\n if last_content > 1 and \\\n ''.join([x.strip() for x in all_lines[:last_content]]) != '':\n content = '\\n'.join(all_lines[:last_content])\n section = {'name': 'Description',\n 'content': content}\n all_sections.append(section)\n\n # Parsing to HTML ........................................................\n\n for section in all_sections[::-1]:\n html += \"<h3>\" + section['name'] + \"</h3>\"\n\n # Parsing section content . . . . . . . . . . . . . . . . . . . . . .\n\n sect_lines = section['content'].split('\\n')\n last_indent = 0\n code = False # True if it's a code division \">>> foo(bar)\"\n list_ = False # True if there is a list in the docstring\n code_indent0 = 0\n list_indent0 = 0\n last_line_was_empty = False\n open_div = 0\n\n for i_l, line in enumerate(sect_lines):\n if line.strip() != '':\n # Finding indent evolution\n if indentation(line) > last_indent and not code:\n delta = indentation(line) - last_indent\n while delta > 0:\n html += '<div class=\"indent\">'\n open_div += 1\n delta -= 1\n\n elif indentation(line) < last_indent and not code and \\\n not list_ or code and \\\n indentation(\n line) < code_indent0 or list_ and not code:\n if code:\n html += \"</div>\"\n code = False\n if list_:\n if indentation(line) < list_indent0:\n html += \"</li></ul>\"\n list_indent0 = 0\n list_ = False\n for _ in range(last_indent - indentation(line)):\n html += '</div>'\n open_div -= 1\n\n content = line\n content = content.replace('&', '&amp;')\n\n last_indent = indentation(line)\n\n if code:\n html += \"<br>\"\n\n # Code section\n if content.strip()[:3] == '>>>' and not code:\n code = True\n code_indent0 = indentation(line)\n if not last_line_was_empty:\n html += \"<br>\"\n html += '<div class=\"code\">'\n\n if code:\n content = content[4 * code_indent0:]\n content = content.replace(' ', '&nbsp;')\n else:\n content = line.strip()\n content = content.replace('<', '&lt;').replace('>', '&gt;')\n content = content.replace(\"'\", '&apos;')\n content = content.replace('\"', \"&quot;\")\n\n # finding URLs\n urls = set(re.findall(\n r\"\"\"(?i)\\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\\s()<>{}\\[\\]]+|\\([^\\s()]*?\\([^\\s()]+\\)[^\\s()]*?\\)|\\([^\\s]+?\\))+(?:\\([^\\s()]*?\\([^\\s()]+\\)[^\\s()]*?\\)|\\([^\\s]+?\\)|[^\\s`!()\\[\\]{};:'\".,<>?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)\\b/?(?!@)))\"\"\",\n content))\n for url in urls:\n rep = '<a href=\"' + url + '\">' + url + \"</a>\"\n content = content.replace(url, rep)\n\n # List section\n if content[:2] in ['- ', '* '] and not code:\n content = content[2:]\n if not list_:\n html += '<ul>'\n list_ = True\n list_indent0 = indentation(line)\n else:\n html += \"</li>\"\n html += '<li>'\n\n # Bold before ':'\n if not code and \"</a>\" not in content:\n sp = content.split(':')\n if len(sp) > 1:\n content = \"<b>\" + sp[0] + \"</b>\" + ':' + \\\n ':'.join(sp[1:])\n\n # Adding content\n html += ' ' + content\n last_line_was_empty = False\n\n # Empty line\n else:\n if code:\n html += \"</div>\"\n code = False\n if list_:\n for _ in range(last_indent - list_indent0):\n html += \"</div>\"\n open_div -= 1\n last_indent = list_indent0\n html += \"</li></ul>\"\n list_ = False\n if i_l != 0:\n html += \"<br>\"\n last_line_was_empty = True\n\n # Closing section\n if code:\n html += \"</div>\"\n code = False\n if list_:\n html += \"</li></ul>\"\n list_ = False\n\n while open_div > 0:\n html += \"</div>\"\n open_div -= 1\n return html", "title": "" }, { "docid": "147dbc4b8b90da3c14b40958a8178de9", "score": "0.4743189", "text": "def get_course_list(self, step_index, courses):\n response = self.browser.get(\"https://student.osiris.hro.nl:9021/osiris_student/OnderwijsZoekCursus.do\", params={\n \"event\": \"goto\",\n \"source\": \"OnderwijsZoekCursus\",\n \"value\": step_index + 1,\n \"size\": \"30\",\n \"partialTargets\": \"OnderwijsZoekCursus _uixState\",\n \"partial\": \"true\",\n \"requestToken\": self.get_request_token()\n })\n\n for index, row in enumerate(BeautifulSoup(response.text, \"lxml\").find(\"table\", {\"class\": \"OraTableContent\"}).find_all(\"tr\")):\n if not index == 0:\n course = self.get_course_info(course_row=row)\n courses.append(course)\n\n self.lock.acquire()\n print(\"[+] Scraped course metadata: %s (%s)\" % (course[\"korteNaamCursus\"], course[\"cursuscode\"]))\n self.lock.release()", "title": "" }, { "docid": "d3ca67b182b130873ed6374312ff938e", "score": "0.47429794", "text": "def add_course(code, name, misc, descript):\n c = list(db.courses.find({\"code\": code}))\n if not c:\n course = {\"code\": code,\n \"name\": name,\n \"misc\": misc,\n \"description\": descript}\n db.courses.insert(course)\n return True\n print 'db add course'\n return False", "title": "" }, { "docid": "e321840cf94f40f831c256d0270643b0", "score": "0.47382024", "text": "def test_course(self):\n course = CourseInstanceFactory()\n [res_course] = list(self.wrapper.courses())\n\n self.assertEqual(res_course.id, course.id)\n self.assertEqual(res_course.name, course.name)\n self.assertEqual(res_course.short_name, course.short_name)\n self.assertEqual(res_course.points, course.points)\n self.assertEqual(res_course.has_exam, course.has_exam)\n self.assertEqual(res_course.description, course.description)\n self.assertEqual(res_course.language, course.language)\n self.assertEqual(res_course.semester, course.semester.id)\n self.assertEqual(res_course.course_type, course.course_type.short_name)\n self.assertEqual(res_course.usos_kod, course.usos_kod)", "title": "" }, { "docid": "981da650518f33b2becae61599f76b1b", "score": "0.4735454", "text": "def prepare(doc):\n doc.headers = [0, 0, 0, 0, 0, 0]\n doc.aliases = [\"\", \"\", \"\", \"\", \"\", \"\"]\n doc.information = {}\n doc.defined = {}\n\n if \"pandoc-numbering\" in doc.metadata.content and isinstance(\n doc.metadata.content[\"pandoc-numbering\"], MetaMap\n ):\n for category, definition in doc.metadata.content[\n \"pandoc-numbering\"\n ].content.items():\n if isinstance(definition, MetaMap):\n add_definition(category, definition, doc)\n\n doc.count = {}\n doc.collections = {}", "title": "" }, { "docid": "cb5bdaccc9010bfcf914fcd7ed48068c", "score": "0.47305542", "text": "def describe(description):\n if \"experiment\" not in _eh:\n raise ValueError(\"Call flexp.setup first! flexp.describe after that\")\n with open(_eh[\"experiment\"].get_file_path(\"description.txt\"), \"wt\") as fx:\n print(description, file=fx)", "title": "" }, { "docid": "f5b0dd6ae027a9534cf369473672a01f", "score": "0.47261122", "text": "def description_from_content(self):\n description = \"\"\n # Fall back to the title if description couldn't be determined.\n if not description:\n description = str(strip_tags(self.content))\n # Strip everything after the first block or sentence.\n ends = (\"</p>\", \"<br />\", \"<br/>\", \"<br>\", \"</ul>\",\n \"\\n\", \". \", \"! \", \"? \")\n for end in ends:\n pos = description.lower().find(end)\n if pos > -1:\n description = TagCloser(description[:pos]).html\n break\n else:\n description = truncatewords_html(description, 150)\n try:\n description = unicode(description)\n except NameError:\n pass # Python 3.\n return description", "title": "" }, { "docid": "07725fea8405f335c47b19c80df9bb7f", "score": "0.47248343", "text": "def write_descr(self, f, design):\n bdt = time.localtime(design.design_attributes.metadata.updated_timestamp)\n datestr = time.strftime('%d %b %Y', bdt).lower()\n f.write('''\\\n$Descr A4 11700 8267\nencoding utf-8\nSheet 1 1\nTitle \"\"\nDate \"%s\"\nRev \"\"\nComp \"\"\nComment1 \"\"\nComment2 \"\"\nComment3 \"\"\nComment4 \"\"\n$EndDescr\n''' % (datestr,))", "title": "" }, { "docid": "5bf2919b3ffb7af69aa3222e23efc597", "score": "0.4722489", "text": "def description(self):\n return self._book_dict['description']", "title": "" }, { "docid": "166fbea4e4c9e947404c9201612cb082", "score": "0.4722284", "text": "def test_catalog(self):\n with Transaction():\n factory = self.root.manage_addProduct['silva.app.document']\n factory.manage_addDocument('document', 'Test Document')\n\n version = self.root.document.get_editable()\n save_editor_text(version.body, HTML_CATALOG, content=version)\n\n # Test appear in the title.\n self.assertItemsEqual(\n search(fulltext='Test'),\n ['/root/document/0'])\n\n # Catalog appear in the body text.\n self.assertItemsEqual(\n search(fulltext='catalog'),\n ['/root/document/0'])", "title": "" }, { "docid": "3d44e826a28e6c8519b2eacf1fb2d0fb", "score": "0.47211483", "text": "def record_story_chapters(self) -> None:\n # get the chapters\n for chapter in self.chapter_list:\n time.sleep(self._chapter_sleep_time)\n chapter_object = Chapter()\n self.log_debug(\"Downloading chapter: \" + chapter['link'])\n self._update_soup(url=self._url[0:-1]+chapter['link'])\n chapter_text = \"\"\n chapter_count = 0\n story_tag = self._soup.find(id=\"storytextp\")\n for content in story_tag.find_all(['p', 'hr']):\n chapter_text += content.prettify()\n chapter_count += len(content.text.split())\n chapter_object.processed_body = chapter_text\n chapter_object.raw_body = self._soup.prettify()\n chapter_object.word_count = chapter_count\n chapter_object.name = chapter['name']\n self._fanfic.add_chapter(chapter_object)", "title": "" }, { "docid": "d22cdbb3e976cf825990f8e15039e62d", "score": "0.47167173", "text": "def _getCourses(self):\n courses = defaultdict(list)\n with open(os.path.join(os.path.dirname(__file__), 'courses.csv'), encoding=\"UTF-8\") as file:\n reader = csv.reader(file)\n for line in reader:\n courses[line[0].strip()].append(line[1].strip())\n return courses", "title": "" }, { "docid": "c89e022c52616bcbaecd97ee734ab935", "score": "0.47144622", "text": "def get_course_list(cls):\n\n cls.setup()\n\n course_list = []\n\n tree = html.fromstring(cls.__main_page)\n # Current semester courses\n courses_nodes = tree.get_element_by_id('navbar').find('li/ul').findall('li/a[@title]')\n # Previous courses\n courses_nodes += tree.get_element_by_id('navbar').find('.//ul[@class=\"dropdown-menu\"]')\\\n .findall('li/a[@title]')\n\n for course_node in courses_nodes:\n course = DataClasses.CourseInfo()\n course.name = course_node.text_content().split('(')[0].strip()\n course.semester = course_node.text_content().split(course.name)[1].strip()\n course.link = course_node.attrib['href']\n course_list.append(course)\n\n return course_list", "title": "" }, { "docid": "a1f9d4bb0ef0c2d7cc46cc11e4697f2d", "score": "0.4708553", "text": "def saveContent(self):\n if not os.path.exists(self.contentFile):\n with open(self.translation.book_file, 'r') as f:\n lines = [line.strip() for line in f.readlines()]\n ch_cont = ' '.join(lines[self.firstLine-1:self.lastLine+1])\n start = 0\n new_lines = []\n for _, _, end in self.beads:\n new_lines.append(ch_cont[start:end])\n start = end\n ch_cont = '\\n\\n'.join(new_lines)\n with open(self.contentFile, 'w') as chf:\n chf.write(ch_cont)", "title": "" }, { "docid": "e6eddfd62dee774785381eec12050614", "score": "0.47085264", "text": "def _generate_description(self):\n generations = []\n\n for j, templ in enumerate(self.template):\n result = []\n for i, key in enumerate(templ):\n key_type = type(key)\n arg_index = 0\n # arg_index 1 for StopCondition\n if (key_type in CONDIITON_TEMPLATES) and (len(self.args) > 1):\n arg_index = 1\n\n # get the text from template object\n item = key.generate_description(arg_index=arg_index, index=i, templ_index=j)\n\n if not item:\n continue\n # flatten nested dict\n if type(item) in [OrderedDict, dict]:\n val_list = list(values_of_nested_dict(item))\n result.extend(val_list)\n else:\n result.append(item)\n generations.append(\" \".join(result))\n\n return generations", "title": "" } ]
10be4055dc44747101cc3d0847c103b0
This is the main method of the tool.
[ { "docid": "5987afff833f5856b1bc716afc9b2e8a", "score": "0.0", "text": "def main(self):\n if not os.path.isfile(os.path.join(self.project_directory, \"make_project.json\")):\n print(f\"Generating CProject in {self.project_directory}...\")\n self.normami(\"ami-makeproject\", [\"--rawfiletypes\", \"html,pdf,xml\", \"--omit\", \"template.xml\"])\n\n raw_project_contents = [os.path.join(self.project_directory, x) for x in os.listdir(self.project_directory)]\n project_contents = [x for x in raw_project_contents if os.path.isdir(x)]\n\n\n self.normami(\"ami-pdf\")\n\n self.normami(\"ami-filter\", [\"--small\", \"small\", \"--duplicate\", \"duplicate\", \"--monochrome\", \"monochrome\"])\n\n\n papers = []\n for ctree in project_contents:\n paper = Paper(ctree)\n papers.append(paper)\n\n pdf_images_dir = os.path.join(ctree, \"pdfimages\")\n try:\n imagedirs = [os.path.join(pdf_images_dir, x) for x in os.listdir(pdf_images_dir) if x.startswith(\"image.\")]\n except FileNotFoundError:\n # Most likely we've hit other dirs in the corpus, like .git\n continue\n for imagedir in imagedirs:\n skeleton = Skeleton(imagedir)\n plot = None\n if skeleton.likely_spss():\n os.rename(os.path.join(imagedir, \"lines.png\"),\n os.path.join(imagedir, \"spss.png\"))\n plot = SPSSForestPlot(imagedir, skeleton)\n elif skeleton.likely_stata():\n os.rename(os.path.join(imagedir, \"lines.png\"),\n os.path.join(imagedir, \"stata.png\"))\n plot = StataForestPlot(imagedir, skeleton)\n\n if not plot:\n continue\n\n try:\n plot.break_up_image()\n plot.process()\n except InvalidForestPlot:\n pass\n else:\n paper.plots.append(plot)\n plot.save()\n\n self.save_results(papers)", "title": "" } ]
[ { "docid": "ec021328057f10f8af523fff413bbe9a", "score": "0.8747809", "text": "def main ():", "title": "" }, { "docid": "ec021328057f10f8af523fff413bbe9a", "score": "0.8747809", "text": "def main ():", "title": "" }, { "docid": "1d4484b0529dbe0541b241834cb12a3e", "score": "0.86891615", "text": "def main():\n\t\tpass", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.86536205", "text": "def main():", "title": "" }, { "docid": "82c54a749d7508e492693a3059faed30", "score": "0.85027075", "text": "def main():\n return", "title": "" }, { "docid": "82c54a749d7508e492693a3059faed30", "score": "0.85027075", "text": "def main():\n return", "title": "" }, { "docid": "07e76a2dea489b69c96fdd82461f8592", "score": "0.84794545", "text": "def main(self):\n pass", "title": "" }, { "docid": "07e76a2dea489b69c96fdd82461f8592", "score": "0.84794545", "text": "def main(self):\n pass", "title": "" }, { "docid": "cfc083a503ff24be9d8e1d2ad2fd1393", "score": "0.844006", "text": "def main(self):\n return", "title": "" }, { "docid": "6a72f0523c7f4b19a3ff495c1baaa5c5", "score": "0.8404597", "text": "def main() -> None:", "title": "" }, { "docid": "6a72f0523c7f4b19a3ff495c1baaa5c5", "score": "0.8404597", "text": "def main() -> None:", "title": "" }, { "docid": "4301ce0ec56b79f3f0e020a93f26bdee", "score": "0.8325753", "text": "def main():\n \n pass", "title": "" }, { "docid": "c8e238f9ea21384e3be28d52a1ee7939", "score": "0.8275896", "text": "def main():\n\tpass", "title": "" }, { "docid": "59a11114512ac46a2a2d976a77d1c6d6", "score": "0.8274263", "text": "def main(self, *args):\n pass", "title": "" }, { "docid": "59a11114512ac46a2a2d976a77d1c6d6", "score": "0.8274263", "text": "def main(self, *args):\n pass", "title": "" }, { "docid": "d01bb886890ed36b0f4c3f9b6f39bbe6", "score": "0.81955576", "text": "def main():\r\n pass", "title": "" }, { "docid": "d01bb886890ed36b0f4c3f9b6f39bbe6", "score": "0.81955576", "text": "def main():\r\n pass", "title": "" }, { "docid": "1e8b2ebbe42aca7bf8565023e7af8326", "score": "0.81616354", "text": "def main(args=None):", "title": "" }, { "docid": "a4873d5cb2b9c8e72f57155f6e9b8d43", "score": "0.7982208", "text": "def main(args):\n\treturn 0", "title": "" }, { "docid": "f64ecdad4bada7fe6ab976b65fd314c4", "score": "0.79688203", "text": "def main():\n cli()", "title": "" }, { "docid": "737edbcdda02609a89cb40f455a292fb", "score": "0.7957253", "text": "def main(args):\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.78789955", "text": "def main():\n pass", "title": "" }, { "docid": "566036b3a07ddd1af043005be3b82644", "score": "0.78345406", "text": "def main(args=None):\r\n pass", "title": "" }, { "docid": "93e2db98c51fbce39b84a61c48c918ca", "score": "0.78276795", "text": "def main(args):\n return 0", "title": "" }, { "docid": "a8550ef16b860a6cc3bad917dfb04f42", "score": "0.78050315", "text": "def main():\n ...", "title": "" }, { "docid": "e302ae8a2be1a50834705897601e1c4f", "score": "0.7797365", "text": "def _main():\n\tpass", "title": "" }, { "docid": "64a1a4261cea121e609f61fd60d0b36f", "score": "0.7668843", "text": "def main():\n return None", "title": "" }, { "docid": "535c6e7d32028af3cc3d681a0804815d", "score": "0.76315814", "text": "def main(args=None):\n return 0", "title": "" }, { "docid": "7d8c44a31b3fd929034986d163e95ff4", "score": "0.75593626", "text": "def main():\n sys.exit(CommandGenerator().run())", "title": "" }, { "docid": "b816c59d61979d6c3de629bfe657acc3", "score": "0.7548137", "text": "def main():\n CLI().run(argv[1:])", "title": "" }, { "docid": "04688cd98d9fcf835947139440a6440f", "score": "0.7527326", "text": "def main():\n\n pass\n\n return None", "title": "" }, { "docid": "05b99850a4683aae56ee599491955a1b", "score": "0.748265", "text": "def _main() -> None:\n pass", "title": "" }, { "docid": "68322a9750831c83f6f96d00feec8bfd", "score": "0.74573714", "text": "def cli():\n pass", "title": "" }, { "docid": "68322a9750831c83f6f96d00feec8bfd", "score": "0.74573714", "text": "def cli():\n pass", "title": "" }, { "docid": "68322a9750831c83f6f96d00feec8bfd", "score": "0.74573714", "text": "def cli():\n pass", "title": "" }, { "docid": "68322a9750831c83f6f96d00feec8bfd", "score": "0.74573714", "text": "def cli():\n pass", "title": "" }, { "docid": "5e4cec8ea848b3d798bca3b0fced58b1", "score": "0.7456379", "text": "def main(args):\n Run()", "title": "" }, { "docid": "d3bc8d0ffe7ca1f67f6ed25dd23c3256", "score": "0.74484986", "text": "def main():\n return cli(obj={})", "title": "" }, { "docid": "416ebef25c2ec8d1e62f66e4f8143a45", "score": "0.7446385", "text": "def cli():\n\tpass", "title": "" }, { "docid": "f705cd79ef3e5c4359eff1300ac38c5b", "score": "0.7443812", "text": "def cli(ctx):", "title": "" }, { "docid": "f705cd79ef3e5c4359eff1300ac38c5b", "score": "0.7443812", "text": "def cli(ctx):", "title": "" }, { "docid": "f705cd79ef3e5c4359eff1300ac38c5b", "score": "0.7443812", "text": "def cli(ctx):", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.74294496", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.74294496", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.74294496", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.74294496", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.74294496", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.74294496", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.74294496", "text": "def cli():", "title": "" }, { "docid": "c9cb5e099e45669e7b63c3dbf25cf6a9", "score": "0.74294496", "text": "def cli():", "title": "" } ]
c7cafb5f4313d1b5fa9cdfd206771bee
Binary must wrap a string type
[ { "docid": "9ed9bd995a217fb464f9ddab65e1e4cd", "score": "0.7420099", "text": "def test_binary_force_string(self):\n with self.assertRaises(TypeError):\n Binary(2)", "title": "" } ]
[ { "docid": "eea0267dc26b875303145a2447b0192b", "score": "0.72017926", "text": "def isbinary(s, params, ui, **kwargs):\n return s", "title": "" }, { "docid": "5242fe89c082c090012e14c26675050c", "score": "0.7061668", "text": "def test_binary_converts_unicode(self):\n b = Binary('a')\n self.assertTrue(isinstance(b.value, six.binary_type))", "title": "" }, { "docid": "cf85bb221bb0b88201946dcebef8ab1e", "score": "0.68776315", "text": "def test_binary_field(self):\n # Arrange\n byte_string = b'abcde\\x00\\x0f\\x00\\x0f\\x0a\\xcf\\xff'\n\n # System under test\n field = BinaryField(\"text\")\n\n # Act\n field_bytes = field.read(BytesIO(byte_string), context=None)\n\n # Assert\n self.assertEqual(field_bytes, byte_string)", "title": "" }, { "docid": "69f05852d98b4288141c42438253c17e", "score": "0.6781392", "text": "def test_binary_bytes(self):\n self.make_table()\n data = {'a': 1, 'b': 2}\n self.dynamo.put_item('foobar', {'id': 'a',\n 'data': Binary(dumps(data))})\n item = list(self.dynamo.scan('foobar'))[0]\n self.assertEqual(loads(item['data'].value), data)", "title": "" }, { "docid": "d2b69de01af9a9550d48ea5b3d6cf501", "score": "0.6743652", "text": "def is_byte_string(val: Any) -> bool:\n return not hasattr(val, 'encode')", "title": "" }, { "docid": "ba7026c983f52df9243e6045ea29a148", "score": "0.67091286", "text": "def _binary(val):\n if isinstance(val, int) or isinstance(val, float):\n s = int(math.ceil(val.bit_length() / 8))\n return val.to_bytes(s, 'big')\n if isinstance(val, str):\n try:\n return decode(val)\n except Exception:\n return val.encode(\"utf-8\")\n if isinstance(val, bytes):\n return val\n raise TypeError(\"Byte serialization not supported\")", "title": "" }, { "docid": "a66b79e59945e0eeeb5c9deeaf40570a", "score": "0.6658232", "text": "def Binary(value):\n if PY2:\n return Bytea(value)\n else:\n return value", "title": "" }, { "docid": "5150f6352495f4c05ee073f09b48e8cb", "score": "0.661736", "text": "def Binary(x):\n if PY2:\n return bytearray(x)\n else:\n return bytes(x)", "title": "" }, { "docid": "5f06d221d12f160cafc82db216db5970", "score": "0.6566614", "text": "def maybe_binstr(val):\n if not isinstance(val, six.string_types):\n return val\n return binstr(val)", "title": "" }, { "docid": "55e272d3686314ea3a6062fec74ac12c", "score": "0.65555143", "text": "def test_str_param(self):\n request = DummyRequest()\n request.params = {'field': 'myfield'}\n field = param(request, 'field', type=bytes)\n self.assertEquals(field, b'myfield')\n self.assertTrue(isinstance(field, six.binary_type))", "title": "" }, { "docid": "c126cabcc20eae54a7bf1faede064662", "score": "0.6540289", "text": "def parseByteString(string):\n if string is None : return None\n raise ApiError(api_error.API_ERROR_DATA_TYPE_NOT_SUPPORTED, db.ByteStringProperty)", "title": "" }, { "docid": "e34aa966df4b4bb5c7061df0b1f43c42", "score": "0.65340626", "text": "def BINARY(self):", "title": "" }, { "docid": "2992bdc51d62b409d6e0531d0f5a3a1b", "score": "0.6477947", "text": "def __call__(self, value):\r\n \r\n if not isinstance(value, unicode):\r\n try:\r\n unicode(value, self._encoding)\r\n except UnicodeError:\r\n errorMessage = \"The given binary string cannot be converted to unicode using the default encoding.\" + \\\r\n \"Please convert the string to unicode before.\"\r\n raise ValueError(errorMessage)\r\n except TypeError:\r\n raise ValueError(\"The value '%s' is no binary string.\" % str(value))", "title": "" }, { "docid": "2a06a73e611b53c08ce9368ae146e98c", "score": "0.64649063", "text": "def test_string(self):\r\n\r\n\t\tself.n = bencode.ben_type(\"4:test\")\r\n\t\tself.assertEqual(self.n, str)", "title": "" }, { "docid": "080b768bcd95c11eb552e64443b056ce", "score": "0.6450479", "text": "def is_binary_string(inp):\n # From https://stackoverflow.com/a/7392391\n return bool(inp.translate(None, TEXTCHARS))", "title": "" }, { "docid": "e5687db72bf9ae6f4875bf5e60b34e93", "score": "0.6426588", "text": "def binstr(val):\n if isinstance(val, six.binary_type):\n return val\n if sys.version_info < (3, 0):\n return bytes(val)\n else:\n return bytes(val, 'utf8')", "title": "" }, { "docid": "e10c14c3c5c5b0d1fa76a4efe73ac632", "score": "0.64137053", "text": "def ensure_bytes(v):\n\t\treturn v.encode(\"utf8\") if isinstance(v,str) else v", "title": "" }, { "docid": "79558b70e015b5aaa0959b968c95d152", "score": "0.6409564", "text": "def _isBinaryString(bytes):\n\tnontext = bytes.translate(None, TEXTCHARS)\n\treturn bool(nontext)", "title": "" }, { "docid": "6a470e9ec1584516c1fab0ee985cb641", "score": "0.63404655", "text": "async def binify(self, ctx: commands.Context, *message: str):\n if not message:\n response = \"Please include string to convert.\"\n elif set(\"\".join(message)).issubset([\"0\", \"1\", \" \"]) and len(\"\".join(message)) > 2:\n string = \"\".join(message)\n if len(string) % 8 != 0:\n response = \"Binary string contains partial byte.\"\n else:\n response = \"\"\n for i in range(0, len(string), 8):\n n = int(string[i:i+8], 2)\n if n >= 128:\n response = \"Character out of ascii range (0-127)\"\n break\n response += chr(n)\n else:\n response = \"\"\n for c in \" \".join(message).replace(\"&amp;\", \"&\").replace(\"&lt;\", \"<\").replace(\"&gt;\", \">\"):\n n = ord(c)\n if n >= 128:\n response = \"Character out of ascii range (0-127)\"\n break\n response += f\"{n:08b}\"\n\n await ctx.send(response)", "title": "" }, { "docid": "82a2de68aa550f21eaa4dfbc4cb62007", "score": "0.6296766", "text": "def test_binary(self):\n self.assertEqual(self.sid_null_bin, sid.sid(self.sid_null).binary())\n self.assertEqual(self.sid_sample_bin,\n sid.sid(self.sid_sample).binary())", "title": "" }, { "docid": "f5eda366edab8d7f22c57943d084c53c", "score": "0.62784666", "text": "def test_binary(self):\n self.make_table()\n self.dynamo.put_item('foobar', {'id': 'a', 'data': Binary('abc')})\n item = list(self.dynamo.scan('foobar'))[0]\n self.assertEqual(item['data'].value, b'abc')", "title": "" }, { "docid": "9e585a359fd97e7e0b72cf493c51820f", "score": "0.62733793", "text": "def ensure_bytes( v ):\n\t\treturn v.encode(\"utf8\") if isinstance(v,unicode) else v", "title": "" }, { "docid": "4c0c1c57f8099d79919a64c8a515dce3", "score": "0.62726736", "text": "def PlatformBytes(inputStr):\n\t\treturn PlatformString(inputStr)", "title": "" }, { "docid": "8801a19226c43d112b157c52734b15ec", "score": "0.62707", "text": "def __init__(self, value: str):\n super(BuilderOp.BitString, self).__init__(type='BitString')\n self.value = value", "title": "" }, { "docid": "958a81b333b9b00e14cdd83c3b292593", "score": "0.62530494", "text": "def test_bytes_param(self):\n request = DummyRequest()\n request.params = {'field': 'myfield'}\n field = param(request, 'field', type=bytes)\n self.assertEquals(field, b'myfield')\n self.assertTrue(isinstance(field, six.binary_type))", "title": "" }, { "docid": "b9be128fa767f3b304b06909c4e48797", "score": "0.6236167", "text": "def unserialize(self, bytes, isBinary):", "title": "" }, { "docid": "3dc6f4d722c62cc3ffaf9c21b2f4ff07", "score": "0.61659217", "text": "def ensure_string_type(obj):\n if isinstance(obj, bytes):\n return obj.decode()\n else:\n return str(obj)", "title": "" }, { "docid": "42e0e7c2a2c58db621fbcf297efadd63", "score": "0.6132326", "text": "def test_binary(self):\n m = fd('joe', 2, '\\x00\\x01\\xff')\n self.assertEqual(m, Message('joe', 2, {\n 'line': '\\x00\\x01\\xff'.encode('base64'),\n 'encoding': 'base64',\n }))", "title": "" }, { "docid": "8f15860be57991b9ae20c93b7de1846d", "score": "0.61235255", "text": "def test_init_binary(self):\n self.assertEqual(self.sid_null, str(\n sid.sid(self.sid_null_bin, sid.SID_BINARY)))\n self.assertEqual(self.sid_sample, str(\n sid.sid(self.sid_sample_bin, sid.SID_BINARY)))", "title": "" }, { "docid": "d4c3cc8bf49239fd4299b10a04c8f9fd", "score": "0.61196184", "text": "def binary_encoding(string, encoding = 'utf-8'):\n try:\n return bytes(string, encoding)\n except TypeError: # We are in Python 2\n return str(string)", "title": "" }, { "docid": "275fb4f95a7935f9e90b7ff959a59dc9", "score": "0.6077308", "text": "def test_bytes_preservation(self):\n data = {\n b'bytes_key': b'this is a byte array',\n 'unicode_key': 'unicode string!',\n }\n serializer = MsgpackSerializer()\n output = serializer.blob_to_dict(serializer.dict_to_blob(data))\n assert b'bytes_key' in output\n assert 'unicode_key' in output\n assert output[b'bytes_key'] == b'this is a byte array'\n assert output['unicode_key'] == 'unicode string!'", "title": "" }, { "docid": "ae1486c1f4564bfdad974269914691a6", "score": "0.6071919", "text": "def IsBinaryBytes(self, bytes_str):\n return False\n trantab = str.maketrans(FileTypeChecker.ALLBYTES,\n FileTypeChecker.ALLBYTES,FileTypeChecker.TXTCHARS)\n print (trantab)\n nontext = bytes_str.translate(trantab.keys())\n return bool(nontext)", "title": "" }, { "docid": "8a63d7b09c532f301200325eb40c29e8", "score": "0.6019618", "text": "def b(raw):\n return raw.encode(\"ascii\")", "title": "" }, { "docid": "ed668d335e9a336876ed5e275079c54e", "score": "0.60115725", "text": "def testStringb(self):\n d = pdb.pdbdata('char[5]', 'abcde')\n self.assertEqual('abcde', str(d))\n o = pdb.unpack(d)\n self.assertEqual(o, 'abcde')", "title": "" }, { "docid": "dde3888951a55ee35eb4ccaf4e35a2cc", "score": "0.6000859", "text": "def convert_to_bytes(_string: str) -> bytes:\n # print('input string: ', _string)\n # print(\"hold: \", _string.replace(b' ', b'').replace(b':', b''))\n # print('new string: ', binascii.unhexlify(_string.replace(b' ', b'').replace(b':', b'')))\n if type(_string) is str:\n return binascii.unhexlify(_string.replace(' ', '').replace(':', ''))\n elif type(_string) is bytes:\n return binascii.unhexlify(_string.replace(b' ', b'').replace(b':', b''))\n else:\n raise Exception(\"Use string or byte string\")", "title": "" }, { "docid": "71e017b0fb0c6cf5dcae9262007147ad", "score": "0.5997392", "text": "def a2b_base64(data: str | bytes, /) -> bytes:", "title": "" }, { "docid": "2d81c1717536d252676bc765a50c3b8d", "score": "0.59918606", "text": "def make_byte_string(s):\n if IS_PYTHON3 and isinstance(s, string_types):\n s = s.encode()\n return s", "title": "" }, { "docid": "eaf8d3466829aa87e3588dd58a07cc00", "score": "0.59788567", "text": "def ensure_byte_strings():\n def convert_result_from_bytes(result):\n if isinstance(result, bytes):\n return result.decode('utf-8')\n if isinstance(result, list):\n return list(map(convert_result_from_bytes, result))\n if isinstance(result, tuple):\n return tuple(map(convert_result_from_bytes, result))\n return result\n def decorator(fn):\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n newargs = []\n newkwargs = {}\n for arg in args:\n if isinstance(arg, str):\n newargs.append(bytes(arg, 'utf-8'))\n else:\n newargs.append(arg)\n for k, v in kwargs.items():\n if isinstance(v, str):\n newkwargs[k] = bytes(v, 'utf-8')\n else:\n newkwargs[k] = v\n newargs = tuple(newargs)\n return fn(*newargs, **newkwargs)\n return wrapper\n return decorator", "title": "" }, { "docid": "16d7ec6b65a2cceada53744242cc6a5a", "score": "0.5973994", "text": "def testEncode(self):\n\n stream = BytesIO(b'')\n self.schema.encode(stream, {0x52A1:50})\n stream.seek(0)\n self.assertEqual(stream.getvalue(), b'\\x52\\xA1\\x81\\x32')", "title": "" }, { "docid": "70d161b282f447bcdbc8ab59ce6648d1", "score": "0.5971439", "text": "def bytes_schema(self, schema: core_schema.BytesSchema) -> JsonSchemaValue:\n json_schema = {'type': 'string', 'format': 'binary'}\n self.update_with_validations(json_schema, schema, self.ValidationsMapping.bytes)\n return json_schema", "title": "" }, { "docid": "e1caa98c4b8c78884a4ebbf7d584de84", "score": "0.5967122", "text": "def to_binary(value, encoding='utf-8'):\n if not value:\n return b''\n if isinstance(value, six.binary_type):\n return value\n if isinstance(value, six.text_type):\n return value.encode(encoding)\n return to_text(value).encode(encoding)", "title": "" }, { "docid": "d12c10b75f029a146b2a7ed8a2a6c82e", "score": "0.59585375", "text": "def _deserialize_b(self, value):\n return base64.b64encode(value).decode('utf-8')", "title": "" }, { "docid": "66fc8aaddb99f700c93b17691408c11c", "score": "0.5942805", "text": "def test_binary_equal(self):\n self.assertEqual(Binary('a'), Binary('a'))\n self.assertEqual(Binary('a'), b'a')\n self.assertFalse(Binary('a') != Binary('a'))", "title": "" }, { "docid": "115d7fd398f1a630c00e0300d529271a", "score": "0.59389806", "text": "def test_str_json_body(self):\n request = DummyRequest()\n request.params = {}\n request.json_body = {'field': 'myfield'}\n request.headers = {'Content-Type': 'application/json'}\n field = param(request, 'field', type=bytes)\n self.assertEquals(field, b'myfield')\n self.assertTrue(isinstance(field, six.binary_type))", "title": "" }, { "docid": "985cf41057490a2b3f4053a070bebfa6", "score": "0.5937785", "text": "def fix_test_case(block):\n if type(block) is str:\n block = bytes(block, encoding='utf-8')\n return block", "title": "" }, { "docid": "e7fcdbe68d41c2638409bb09ea67e099", "score": "0.5903928", "text": "def test_binary_repr(self):\n self.assertEqual(repr(Binary('a')), 'Binary(%s)' % b'a')", "title": "" }, { "docid": "492d1ed9bbb2445cdedcc5ced88e936f", "score": "0.5893895", "text": "def unhexlify(data: str | bytes, /) -> bytes:", "title": "" }, { "docid": "74546d214f8797cc6cf2dee92bfc29da", "score": "0.58908445", "text": "def test_hash_tag_bytes():\n assert hash_tag_bytes(\"Value\", b\"123\") == \"25aa6da917ca4e568e859df039fb1ca184e65d57\"", "title": "" }, { "docid": "a7f4dc5c9d003781233157c8cccdaef6", "score": "0.58838505", "text": "def test_get_prep_value_with_bytes(self):\n obj = Base64TestModel()\n value = obj._meta.get_field('field').get_prep_value(\n b'VGhpcyBpcyBhIHTDqXN0\\n')\n\n self.assertIs(type(value), str)\n self.assertEqual(value, 'VGhpcyBpcyBhIHTDqXN0\\n')", "title": "" }, { "docid": "c83a7f26f6735c1d468f5af59b963453", "score": "0.5876491", "text": "def is_binary(data):\n if not data or not isinstance(data, (six.string_types, six.binary_type)):\n return False\n\n if isinstance(data, six.binary_type):\n if b'\\0' in data:\n return True\n elif str('\\0') in data:\n return True\n\n text_characters = ''.join([chr(x) for x in range(32, 127)] + list('\\n\\r\\t\\b'))\n\n # Get the non-text characters (map each character to itself then use the\n # 'remove' option to get rid of the text characters.)\n if six.PY3:\n if isinstance(data, six.binary_type):\n import sugar.utils.data\n nontext = data.translate(None, sugar.utils.data.encode(text_characters))\n else:\n trans = ''.maketrans('', '', text_characters)\n nontext = data.translate(trans)\n else:\n if isinstance(data, six.text_type):\n trans_args = ({ord(x): None for x in text_characters},)\n else:\n trans_args = (None, str(text_characters)) # future lint: blacklisted-function\n nontext = data.translate(*trans_args)\n\n # If more than 30% non-text characters, then\n # this is considered binary data\n if float(len(nontext)) / len(data) > 0.30:\n return True\n return False", "title": "" }, { "docid": "65c8727e7914ffc91978695c338eb1dd", "score": "0.5856668", "text": "def test_string(self):\r\n\r\n\t\tself.n = bencode.encode_str(\"test\")\r\n\t\tself.assertEqual(self.n, \"4:test\")", "title": "" }, { "docid": "d3dfce0ee5b6e47a6a227c1ac0109777", "score": "0.58527464", "text": "def test_convert_type_str(self):\n schema = {\n 'key': 'string'\n }\n record = {\n 'key': 100\n }\n assert_equal(ParserBase._convert_type(record, schema), True)\n assert_equal(record, {'key': '100'})", "title": "" }, { "docid": "2ef402fb28cad8934454290732151ec7", "score": "0.5852582", "text": "def test_input_too_short(typ, spec, bs):\n\n protocol = BinaryProtocol()\n\n with pytest.raises(EndOfInputError) as exc_info:\n s = bytes(bytearray(bs))\n protocol.deserialize_value(typ, s)\n\n assert 'bytes but got' in str(exc_info)\n\n with pytest.raises(EndOfInputError) as exc_info:\n reader = protocol.reader(ReadBuffer(s))\n spec.read_from(reader)\n\n assert 'bytes but got' in str(exc_info)", "title": "" }, { "docid": "4a4a167a3c25556304ee33cd6463955c", "score": "0.584788", "text": "def test_to_python_with_bytes(self):\n obj = Base64TestModel()\n value = obj._meta.get_field('field').to_python(\n b'VGhpcyBpcyBhIHRlc3Q=\\n')\n\n self.assertIs(type(value), Base64DecodedValue)\n self.assertEqual(value, b'This is a test')", "title": "" }, { "docid": "16c103233ff4ba94680b7a685d79f37c", "score": "0.5819751", "text": "def serialize(self, buff):\n try:\n pass\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "title": "" }, { "docid": "f1fceeb677a41ec8a321e67d59fdf085", "score": "0.5818131", "text": "def is_bytes(obj):\n\treturn isinstance(obj, (bytes, bytearray))", "title": "" }, { "docid": "0ea8913a529b803fee4ebdec7f1613fd", "score": "0.5809857", "text": "def test_object_with_id_binary_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = ObjectWithID(id=uid)\n\n blob = (\n b\"\\n$syft.core.common.object.ObjectWithID\\x12\\x14\\n\\x12\\n\\x10\\xfb\\x1b\\xb0\"\n + b\"g[\\xb7LI\\xbe\\xce\\xe7\\x00\\xab\\n\\x15\\x14\"\n )\n\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob\n assert sy.serialize(obj, to_bytes=True) == blob", "title": "" }, { "docid": "cbf784e3a264f0d6697e2511936f1595", "score": "0.58067", "text": "def ensure_str(v):\n\t\treturn v.decode(\"utf8\") if isinstance(v,bytes) else v", "title": "" }, { "docid": "6109fc19bc6624598fba982c13ef9c6f", "score": "0.5803272", "text": "def IsBinaryDataType(*args):\n return _DigitalMicrograph.IsBinaryDataType(*args)", "title": "" }, { "docid": "50bde40eedc1a4f771a9b94e7c36f89e", "score": "0.57741356", "text": "def register_bitstring_types(connection):\r\n with closing(connection.cursor()) as cur:\r\n cur.execute(\"SELECT NULL::BIT\")\r\n bit_oid = cur.description[0].type_code\r\n cur.execute(\"SELECT NULL::VARBIT\")\r\n varbit_oid = cur.description[0].type_code\r\n bit_caster = ext.new_type((bit_oid, varbit_oid), 'BIT', cast_bits)\r\n ext.register_type(bit_caster, connection)", "title": "" }, { "docid": "f75e16a5ff724d7c2989b391572b5a7a", "score": "0.5751746", "text": "def visit_Bytes(self, node):\n self.attr(node, 'content', [self.tokens.str], deps=('s',), default=node.s)", "title": "" }, { "docid": "ffdd3e5916aa065c669f4c966299c3b6", "score": "0.57433456", "text": "def fix_binary_string(bin_str, num_bits):\n assert num_bits >= len(bin_str), \"num_bits is greater than the length of bin_str\"\n return '0'*(num_bits-len(bin_str))+bin_str", "title": "" }, { "docid": "08044b0fb3960a94a059b111ab7b8cc1", "score": "0.57409024", "text": "def _bstr(s):\n if isinstance(s, unicode):\n s = s.encode('utf-8')\n elif not isinstance(s, str):\n s = str(s)\n return s", "title": "" }, { "docid": "0bccb71e8079a9336e5ed3a2f608d0fd", "score": "0.57385075", "text": "def test_strings(self):\r\n\r\n\t\tself.n = bencode.encode(\"test\")\r\n\t\tself.assertEqual(self.n, \"4:test\")", "title": "" }, { "docid": "1bd356b54ba78e4e8df8d3c9939d4471", "score": "0.57316196", "text": "def __init__(self, text, value, *args, **kwargs):\n str_value = (value and YES) or NO\n super(Binary, self).__init__(text, str_value, *args, **kwargs)", "title": "" }, { "docid": "d5912ab4ea095add386a67b6072f64fb", "score": "0.57289237", "text": "def test_base64_str_representation(self):\n try:\n correct = '$base64_string = \"Test __str()__ call on YaraString w/ Base64 modifier.\" ' \\\n 'base64(!@#$%^&*(){}[].,|ABCDEFGHIJ\tLMNOPQRSTUVWXYZabcdefghijklmnopqrstu)'\n\n ys = YaraString(\"base64_string\", \"Test __str()__ call on YaraString w/ Base64 modifier.\",\n string_type=TEXT_TYPE,\n modifiers=[\n {\n \"keyword\": BASE64,\n \"data\": \"!@#$%^&*(){}[].,|ABCDEFGHIJ\\x09LMNOPQRSTUVWXYZabcdefghijklmnopqrstu\"\n }])\n\n self.assertEqual(str(ys), correct)\n except Exception as exc:\n self.fail(\"{}\".format(exc))", "title": "" }, { "docid": "e404774965362d6638c95172c74b9fbb", "score": "0.5723626", "text": "def bsFromString(self, str):\n\n\t\tx = self.length - len(str)\n\t\tx = x // 2\n\t\tfor i in range(len(str)):\n\t\t\tif(str[i] == '1'):\n\t\t\t\tself.bits[x + i] = 1\n\t\t\telse:\n\t\t\t\tself.bits[x + i] = 0", "title": "" }, { "docid": "a948d3a3a8d0560725eab7f79b7cffba", "score": "0.5721702", "text": "def test_query_must_be_bytes(self):\n with self.assertRaises(TypeError):\n utils.build_query_params('')", "title": "" }, { "docid": "f5e14d67a5c947dbe460924b9967cad7", "score": "0.5715057", "text": "def is_string(data):\n return isinstance(data, str)", "title": "" }, { "docid": "c39e4e39b2e46159771de4e002824b4d", "score": "0.57147294", "text": "def is_binary(self):\n return self._is_binary", "title": "" }, { "docid": "9440473343ba740f8627af2378a3e31a", "score": "0.5713475", "text": "def PlatformBytes(inputStr):\n\t\tif isinstance(inputStr, bytes):\n\t\t\treturn inputStr\n\t\treturn inputStr.encode(\"UTF-8\")", "title": "" }, { "docid": "3a73600246ed9e24f3dea532e4f7348b", "score": "0.5713276", "text": "def test__cast_to_bytes(self):\n self.assertEqual(\n bytes(RequestInsert(charset, errors, 0, 1, 0, 1, 2000, 30000)),\n binascii.unhexlify(\"0d0000001b00000000000000010000000000000003000000040100000004d00700000430750000\")\n )\n\n self.assertEqual(\n bytes(RequestInsert(charset, errors, 0, 1, 0, b\"AAA\", b\"BBBB\", b\"CCCCCC\")),\n binascii.unhexlify(\"0d0000001c0000000000000001000000000000000300000003414141044242424206434343434343\")\n )", "title": "" }, { "docid": "4d2d86bf8b38baa14d95fecb617e3ff0", "score": "0.570846", "text": "def safe_encode(s: Optional[AnyStr]) -> Optional[bytes]:\n if isinstance(s, str):\n return s.encode(defenc)\n elif isinstance(s, bytes):\n return s\n elif s is None:\n return None\n else:\n raise TypeError(\"Expected bytes or text, but got %r\" % (s,))", "title": "" }, { "docid": "acc05dee057031595529dcbab2745421", "score": "0.5699245", "text": "def binary_str(self):\n return f'{self.number:b}'", "title": "" }, { "docid": "47c5f3caf979c66720a9dd84b48fa888", "score": "0.56983876", "text": "def __init__(self, value: bytes):\n self.value = value", "title": "" }, { "docid": "2b5fb854a8dd9835760a27aa3f94c080", "score": "0.56838495", "text": "def s2b(s):\n if _py3:\n return s.encode('utf-8')\n else:\n return s", "title": "" }, { "docid": "54bcbb0f0a90737d71a9ab0759eb4520", "score": "0.5682309", "text": "def is_binary(s): # DONT EDIT\n return s == '&' or s == '|' or s == '->'", "title": "" }, { "docid": "3cc5557ff8cbd92978ee405269a2ca49", "score": "0.5681747", "text": "def is_binary(s: str) -> bool:\n return s == '&' or s == '|' or s == '->'", "title": "" }, { "docid": "3cc5557ff8cbd92978ee405269a2ca49", "score": "0.5681747", "text": "def is_binary(s: str) -> bool:\n return s == '&' or s == '|' or s == '->'", "title": "" }, { "docid": "28b0811a5806feba189e4f9ae6f40587", "score": "0.56809175", "text": "def _pack_value(self, x: Any) -> bytes:\n if type(x) is int:\n return self.TYPE_INT_PACKED + self._pack_int(x)\n elif type(x) is float:\n return self.TYPE_FLOAT_PACKED + self._pack_float(x)\n elif type(x) is str:\n return self._pack_text(x)\n elif type(x) is bool:\n return self.TYPE_BOOL_PACKED + self._pack_bool(x)\n else:\n raise Exception(\"Can't pack type\" + str(type(x)))", "title": "" }, { "docid": "1cef023efb3de295d6818e60e433ff56", "score": "0.5680797", "text": "def test_typestring_conversion(self):\n conversion_tests = (\n (12, \"i|12\"),\n (12.4, \"f|12.4\"),\n (\"cowabunga\", \"s|cowabunga\"),\n )\n\n # Test conversion to typestring\n for _input, expected in conversion_tests:\n self.assertEqual(self.cog.redis._value_to_typestring(_input), expected)\n\n # Test conversion from typestrings\n for _input, expected in conversion_tests:\n self.assertEqual(self.cog.redis._value_from_typestring(expected), _input)\n\n # Test that exceptions are raised on invalid input\n with self.assertRaises(TypeError):\n self.cog.redis._value_to_typestring([\"internet\"])\n self.cog.redis._value_from_typestring(\"o|firedog\")", "title": "" }, { "docid": "de416e6c2abb1fc810eba6f775f37f1a", "score": "0.5674973", "text": "def pack_string(string):\n data = string.encode('UTF-8') if isinstance(string, unicode) else str(string)\n return struct.pack('b', len(data)) + data", "title": "" }, { "docid": "b2981a69cc0cef9a5efc254e0ac6ea0e", "score": "0.56714255", "text": "def _pack_string(instr):\n val = tools.ToBytes(instr)\n pad_len = align_int(len(val) + 1, FILENAME_ALIGN)\n return val + tools.GetBytes(0, pad_len - len(val))", "title": "" }, { "docid": "652a50d51adaf1c2e187b68ef5890d7a", "score": "0.5670707", "text": "def test_query_accepts_bytes(self):\n utils.build_query_params(b'')", "title": "" }, { "docid": "b25dc8fbce631a5a04554693082bd50d", "score": "0.5668083", "text": "def is_binary(s: str) -> bool:\n # return s == '&' or s == '|' or s == '->'\n # For Chapter 3:\n return s in {'&', '|', '->', '+', '<->', '-&', '-|'}", "title": "" }, { "docid": "1463b68e4abf6c71c9615c91e7dea018", "score": "0.5655611", "text": "def key_bytes(key):\n from normality import stringify\n if isinstance(key, bytes):\n return key\n key = stringify(key) or \"\"\n return key.encode(\"utf-8\")", "title": "" }, { "docid": "33e73f01505bebf572be6dc00deb4a68", "score": "0.56469035", "text": "def _is_string_array(restype):\n if isinstance(restype, WeldVec):\n if isinstance(restype.elem_type, WeldVec):\n if isinstance(restype.elem_type.elem_type, I8):\n return True\n return False", "title": "" }, { "docid": "7de7b4890dc875d51ad5ff022618b7b6", "score": "0.56468403", "text": "def ispure(s):\n if s is None:\n return True\n if isinstance(s, (six.text_type, newstr)):\n return True\n if type(s) == bytes:\n try:\n s.decode('ascii')\n except UnicodeDecodeError:\n return False\n return True\n return False", "title": "" }, { "docid": "26e45d2ee3395dc1934d426611e44c34", "score": "0.56407607", "text": "def test_types(self):\r\n fstr = _FakeString(\"fake string\")\r\n self.assertEqual(str(fstr), \"fake string\")\r\n self.assertEqual(bytes(fstr), b\"fake string\")\r\n if py3k:\r\n self.assertEqual(repr(fstr), \"'fake string'\")\r\n else:\r\n self.assertEqual(repr(fstr), b\"u'fake string'\")\r\n\r\n self.assertIsInstance(str(fstr), str)\r\n self.assertIsInstance(bytes(fstr), bytes)\r\n if py3k:\r\n self.assertIsInstance(repr(fstr), str)\r\n else:\r\n self.assertIsInstance(repr(fstr), bytes)", "title": "" }, { "docid": "b67bd5a0e7cd62dae0b7d27534ba8d94", "score": "0.56313056", "text": "def test_convert_type_unicode_str(self):\n schema = {\n 'key': 'string'\n }\n record = {\n 'key': '\\ue82a'\n }\n assert_equal(ParserBase._convert_type(record, schema), True)\n assert_equal(record, {'key': '\\ue82a'})", "title": "" }, { "docid": "5ce86445586eb42dd500d3cd00a820c4", "score": "0.5625722", "text": "def _a_encode_bytes(value, mapping):\n assert isinstance(value, (bytes, str)), \"VALUE has invalid type: %s\" % type(value)\n return str(len(value)).encode(\"UTF-8\"), b\"b\", cast_to_bin(value)", "title": "" }, { "docid": "d8aec4bfc14b01d8ad0305c0592a8ddf", "score": "0.56215763", "text": "def __bytes__(self):\n return bytes([self.type * 2, 4]) + self.value.to_bytes(4, 'big')", "title": "" }, { "docid": "4fdbe6cc9735c58a1fe63f750ebcc855", "score": "0.56169206", "text": "def transmit_encoded(plaintext: str) -> str:\n pass # <- implement your function", "title": "" }, { "docid": "8fdff0b683448d84d291ef0b36fe9d2c", "score": "0.5613103", "text": "def unserialize(self, bytes):", "title": "" }, { "docid": "8fdff0b683448d84d291ef0b36fe9d2c", "score": "0.5613103", "text": "def unserialize(self, bytes):", "title": "" }, { "docid": "9acba63f90dec199819a7843af87ccdf", "score": "0.5597181", "text": "def _bytes(x): # pragma: no branch\n if sys.version > \"3\":\n return bytes(x, \"utf8\")\n else: # pragma: no cover\n return x.__bytes__()", "title": "" }, { "docid": "aa346597e5f4d34a7c7eb42888d3b8a5", "score": "0.55970603", "text": "def test_solution() -> None:\n a = \"49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d\"\n b = b\"SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t\"\n assert encoding.hex_to_base64(a) == b", "title": "" }, { "docid": "5f60d91a74345c7631600dfb384bd501", "score": "0.5596351", "text": "def _ops_binary_placeholder_sql(self, value):\n if value is not None:\n return '_binary %s'\n else:\n return '%s'", "title": "" }, { "docid": "a810bdcba61e2e46707bdbdc77106af6", "score": "0.55940217", "text": "def test_string_length(self):\r\n\r\n\t\tself.n = bencode.decode_str(\"1:abc\")\r\n\t\tself.assertEqual(self.n, \"a\")", "title": "" }, { "docid": "1c7a89e37f243904df7b374810aabe1c", "score": "0.5593397", "text": "def force_bytes(value):\n if IS_PY3:\n if isinstance(value, str):\n value = value.encode('utf-8', 'backslashreplace')\n else:\n if isinstance(value, unicode):\n value = value.encode('utf-8')\n\n return value", "title": "" } ]
1b26a7e2470731048736735d41e820ca
centers the window to the screen
[ { "docid": "6622fe436845f7b3986af42688cfc35c", "score": "0.8327689", "text": "def _center_window(self):\r\n screen = QtGui.QDesktopWidget().screenGeometry()\r\n size = self.geometry()\r\n self.move(\r\n (screen.width() - size.width()) * 0.5,\r\n (screen.height() - size.height()) * 0.5\r\n )", "title": "" } ]
[ { "docid": "0470869de0fe4c2dc345e28a653fcd8f", "score": "0.8163197", "text": "def center(self,win):\n win.update_idletasks()\n width = win.winfo_width()\n frm_width = win.winfo_rootx() - win.winfo_x()\n win_width = width + 2 * frm_width\n height = win.winfo_height()\n titlebar_height = win.winfo_rooty() - win.winfo_y()\n win_height = height + titlebar_height + frm_width\n x = win.winfo_screenwidth() // 2 - win_width // 2\n y = win.winfo_screenheight() // 2 - win_height // 2\n win.geometry('{}x{}+{}+{}'.format(width, height, x, y))\n win.deiconify()", "title": "" }, { "docid": "7989c9a736b1a7bac945846d900d0030", "score": "0.81152254", "text": "def center(self):\n self.top.update_idletasks()\n w = self.top.winfo_screenwidth()\n h = self.top.winfo_screenheight()\n size = tuple(int(_) for _ in self.top.geometry().split('+')[0].split('x'))\n x = w/2 - size[0]/2\n y = h/2 - size[1]/2\n self.top.geometry(\"%dx%d+%d+%d\" % (size + (x, y)))\n return", "title": "" }, { "docid": "269f4daf6f57d93af44737520be52128", "score": "0.8040823", "text": "def center(win):\n win.update_idletasks()\n width = win.winfo_width()\n frm_width = win.winfo_rootx() - win.winfo_x()\n win_width = width + 2 * frm_width\n height = win.winfo_height()\n titlebar_height = win.winfo_rooty() - win.winfo_y()\n win_height = height + titlebar_height + frm_width\n x = win.winfo_screenwidth() // 2 - win_width // 2\n y = win.winfo_screenheight() // 2 - win_height // 2\n win.geometry('{}x{}+{}+{}'.format(width, height, x, y))\n win.deiconify()", "title": "" }, { "docid": "02e1510bda2656e2c42c0c0f8a7ee699", "score": "0.7986522", "text": "def makeWindowCenter(self):\n qtRectangle = self.frameGeometry()\n centerPoint = QDesktopWidget().availableGeometry().center()\n qtRectangle.moveCenter(centerPoint)\n self.move(qtRectangle.topLeft())", "title": "" }, { "docid": "12a42b95583045c87ae3eb756f77d8e7", "score": "0.7965381", "text": "def center(win):\n window_height = 700\n window_width = 350\n\n screen_width = win.winfo_screenwidth()\n screen_height = win.winfo_screenheight()\n\n x_cordinate = int((screen_width/2) - (window_width/2))\n y_cordinate = int((screen_height/2) - (window_height/2))\n\n win.geometry(\"{}x{}+{}+{}\".format(window_width, window_height, x_cordinate, y_cordinate))", "title": "" }, { "docid": "7bb3ba56ac54a8bb31e45fdd4e2a1c79", "score": "0.794357", "text": "def center_window_to_screen(self):\n desktop = QtWidgets.QApplication.desktop()\n cursor_pos = QtGui.QCursor.pos()\n desktop_number = desktop.screenNumber(cursor_pos)\n desktop_rect = desktop.screenGeometry(desktop_number)\n\n size = self.geometry()\n\n self.move(\n (desktop_rect.width() - size.width()) * 0.5 + desktop_rect.left(),\n (desktop_rect.height() - size.height()) * 0.5 + desktop_rect.top(),\n )", "title": "" }, { "docid": "8186ea631fa7d764689461e8aae1f91e", "score": "0.79214674", "text": "def center_window(self):\n\n # if there is no main application then fall back to centering to the\n # screen that the mouse pointer is in\n parent = self.parent()\n if not parent:\n self.center_window_to_screen()\n return\n\n # get the main application size\n # center to that dimension\n parent_size = parent.geometry()\n size = self.geometry()\n\n left = (parent_size.width() - size.width()) * 0.5 + parent_size.left()\n top = (parent_size.height() - size.height()) * 0.5 + parent_size.top()\n\n left = max(0, left)\n top = max(0, top)\n\n self.move(left, top)", "title": "" }, { "docid": "147eb2078b785d6620846c0627eb45c8", "score": "0.7807315", "text": "def center_widget(self) -> None:\n self.withdraw()\n self.update_idletasks()\n\n x = (self.winfo_screenwidth() - self.winfo_reqwidth())\n x /= 2\n y = (self.winfo_screenheight() - self.winfo_reqheight())\n y /= 2\n g = (\"+%d+%d\" % (x, y))\n\n self.geometry(g)\n self.deiconify()\n return", "title": "" }, { "docid": "42200a08a775d4d4809d3c31e74889bb", "score": "0.7756313", "text": "def center(self):\n # geometry of the main window\n qr = self.frameGeometry()\n # center point of screen\n cp = QDesktopWidget().availableGeometry().center()\n # move rectangle's center point to screen's center point\n qr.moveCenter(cp)\n # top left of rectangle becomes top left of window centering it\n self.move(qr.topLeft())", "title": "" }, { "docid": "87d50577c36ff2cfa23e7edca8c1711d", "score": "0.75481683", "text": "def _center(self):\n _frame = self.frameGeometry()\n # noinspection PyArgumentList\n _root = QApplication.desktop()\n _active_screen = _root.screenNumber(_root.cursor().pos())\n _center = _root.screenGeometry(_active_screen).center()\n _frame.moveCenter(_center)\n self.move(_frame.topLeft())", "title": "" }, { "docid": "371012b5ceeecf752331c6cee8cd51f2", "score": "0.7445819", "text": "def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center() # get screen resolution and center point\n qr.moveCenter(cp) # \n self.move(qr.topLeft())", "title": "" }, { "docid": "d02f3164df06f575e3e2f489bca8f86f", "score": "0.74301237", "text": "def align_window(self):\n self.parent.update()\n\n # get screen info\n screen_width = self.parent.winfo_screenwidth()\n screen_height = self.parent.winfo_screenheight()\n\n # get window info\n window_width = self.parent.winfo_width()\n window_height = self.parent.winfo_height()\n\n # determine position of the window\n x = screen_width - window_width/2 - 180\n y = screen_height/2 - window_height/2\n\n # move the window to determined position\n self.parent.geometry('+%d+%d' % (x, y))", "title": "" }, { "docid": "e76197a9315d2cd00c67fb1f45031e48", "score": "0.73379296", "text": "def center(handle, w=500, h=450):\n sw = handle.winfo_screenwidth()\n sh = handle.winfo_screenheight()\n\n x = (sw - w) / 2\n y = (sh - h) / 2\n handle.geometry('%dx%d+%d+%d' % (w, h, x, y))\n handle.focus()", "title": "" }, { "docid": "df6c6ae39fbb8dbb5665791cc37d7a2b", "score": "0.7308243", "text": "def center(widget,relx,rely):\n widget.withdraw() # Remain invisible while we fig+ure out the geometry \n widget.update_idletasks() # Actualize geometry information \n m_width = widget.winfo_screenwidth()\n m_height = widget.winfo_screenheight() \n w_width = widget.winfo_reqwidth() \n w_height = widget.winfo_reqheight()\n x = (m_width - w_width) * relx\n y = (m_height - w_height) * rely\n widget.geometry(\"+%d+%d\" % (x,y)) \n widget.deiconify()", "title": "" }, { "docid": "70dde9aed48e875377bcfcad47acba10", "score": "0.7297512", "text": "def _center(self):\n qr = self.frameGeometry()\n cp = QtWidgets.QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())", "title": "" }, { "docid": "369c8b9530cc74f4002443cbb300e489", "score": "0.72566235", "text": "def center(self):\r\n self.showNormal()\r\n self.resize(QDesktopWidget().screenGeometry().width() // 1.25,\r\n QDesktopWidget().screenGeometry().height() // 1.25)\r\n qr = self.frameGeometry()\r\n qr.moveCenter(QDesktopWidget().availableGeometry().center())\r\n return self.move(qr.topLeft())", "title": "" }, { "docid": "7fe6dcbdd46a00e26df77a92e7bae584", "score": "0.72289217", "text": "def center_window(window, resize=True):\n screen = QGuiApplication.screenAt(QCursor.pos())\n\n if resize:\n screen_size = screen.size()\n max_width = screen_size.width()\n max_height = screen_size.height()\n\n window.resize(max_width * 0.8, max_height * 0.8)\n\n window.move(screen.geometry().center() - window.frameGeometry().center())", "title": "" }, { "docid": "25bb7977fec7d11fa007471e56a56a7e", "score": "0.7194875", "text": "def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())", "title": "" }, { "docid": "25bb7977fec7d11fa007471e56a56a7e", "score": "0.7194875", "text": "def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())", "title": "" }, { "docid": "25bb7977fec7d11fa007471e56a56a7e", "score": "0.7194875", "text": "def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())", "title": "" }, { "docid": "25bb7977fec7d11fa007471e56a56a7e", "score": "0.7194875", "text": "def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())", "title": "" }, { "docid": "fa855cec61d9050a6bc2654736866e2b", "score": "0.7147751", "text": "def center(self):\n self.translate(-1*self.com)", "title": "" }, { "docid": "e9412a5089ccc42c202043ed03d01868", "score": "0.71378535", "text": "def center(self):\n\t\tqRect = self.frameGeometry()\n\t\tcenterPoint = QDesktopWidget().availableGeometry().center()\n\t\tqRect.moveCenter(centerPoint)\n\t\tself.move(qRect.topLeft())", "title": "" }, { "docid": "9d5b14fb40a9bbf381dcac2ba60cfd93", "score": "0.7021262", "text": "def center_ball(self):\r\n\t\tself.centerx=self.screen_rect.centerx\r\n\t\tself.centery=self.screen_rect.centery", "title": "" }, { "docid": "776829129fbd676c74b5fb205f077300", "score": "0.6984075", "text": "def setCenter(self, value):\n self._viewCenter = b2Vec2(*value)\n self._viewOffset = self._viewCenter - self.screenSize / 2\n self.window.graphicsView.centerOn(*self._viewCenter)", "title": "" }, { "docid": "eef45dc7689335e3b622ef7a4bb49573", "score": "0.6918894", "text": "def center(master, win, width, height):\n # get screen width and height\n ws = master.winfo_screenwidth() # width of the screen\n hs = master.winfo_screenheight() # height of the screen\n\n # calculate x and y coordinates for the Tk root window\n x = (ws / 2) - (width / 2)\n y = (hs / 2) - (height / 2)\n\n return width, height, x, y", "title": "" }, { "docid": "b95e1abcb1053ecb6644b7f1e93aff4f", "score": "0.68476415", "text": "def center_on_screen(widget):\r\n desktop = QtGui.QApplication.instance().desktop()\r\n rect = desktop.screenGeometry(QtGui.QCursor().pos())\r\n cy = rect.height()//2\r\n cx = rect.width()//2\r\n widget.move(cx - widget.width()//2, cy - widget.height()//2)", "title": "" }, { "docid": "8da09214a9f5090dfa9e74e9f7d57656", "score": "0.68216664", "text": "def center_ship(self):\n\t\tself.center= self.screen_rect.centerx\n\t\tself.bottom= self.screen_rect.bottom", "title": "" }, { "docid": "cb75846baa05e651057b7cf7fd49bc0a", "score": "0.67556983", "text": "def bringFrontCenter(self):\n\t\tif self.window:\n\t\t\tNSLog(\"window is present\")\n\t\t\tself.window.becomeMainWindow()\n\t\t\tself.window.center()\n\t\t\t# needed so the window can show over the loginwindow\n\t\t\tself.window.setCanBecomeVisibleWithoutLogin_(True)\n\t\t\tself.errorWindow.setCanBecomeVisibleWithoutLogin_(True)\n\t\t\tself.authSheetWindow.setCanBecomeVisibleWithoutLogin_(True)\n\t\t\tself.window.setLevel_(NSScreenSaverWindowLevel - 1)\n\t\t\tself.window.orderFrontRegardless()", "title": "" }, { "docid": "89eb863d66f02897861eeac32e418d9d", "score": "0.6749471", "text": "def set_window_start(root):\n\tws = root.winfo_screenwidth() # gets width of screen\n\thw = root.winfo_screenheight() # gets height of screen\n\t\n\t# I want it to open in the bottom left\n\tx = 3.0*ws/4\n\ty = ws/3.0\n\t\n\t# set (width, height, x, y)\n\troot.geometry( '%dx%d+%d+%d' % (300, 300, x, y) )", "title": "" }, { "docid": "819df5b06fefc33b272d262940fcc9c6", "score": "0.6737455", "text": "def sc_center(Window_size: tuple, Inverse: bool = False, \\\r\n center: tuple = ini_center, win: object = Window):\r\n if Inverse == False:\r\n win.size = Window_size\r\n variation_x = win.center[0] - center[0]\r\n variation_y = win.center[1] - center[1]\r\n win.left -= variation_x\r\n win.top -= variation_y\r\n \r\n if Inverse == True:\r\n variation_x = win.center[0] - center[0]\r\n variation_y = win.center[1] - center[1]\r\n win.left += variation_x\r\n win.top += variation_y\r\n win.size = Window_size", "title": "" }, { "docid": "017fe70074d867c3461c416ea5ecc7ff", "score": "0.6720759", "text": "def center_ship(self):\r\n self.center = self.screen_rect.centerx", "title": "" }, { "docid": "06febd4f073a0d937834f78f834e06f5", "score": "0.6693505", "text": "def position ( self, ui ):\n view = ui.view\n window = ui.control\n\n # Set up the default position of the window:\n parent = window.GetParent()\n if parent is None:\n window.Centre( wx.BOTH )\n else:\n position_near( parent, window, offset_y = -30 )\n \n # Calculate the correct width and height for the window:\n cur_width = window.winfo_width()\n cur_height = window.winfo_height()\n width = view.width\n height = view.height\n \n if width < 0.0:\n width = cur_width\n elif width <= 1.0:\n width = int( width * screen_dx )\n else:\n width = int( width )\n \n if height < 0.0:\n height = cur_height\n elif height <= 1.0:\n height = int( height * screen_dy )\n else:\n height = int( height )\n \n # Calculate the correct position for the window:\n x = view.x\n y = view.y\n \n if x < -99999.0:\n x = (screen_dx - width) / 2\n elif x <= -1.0:\n x = screen_dx - width + int( x ) + 1\n elif x < 0.0:\n x = screen_dx - width + int( x * screen_dx )\n elif x <= 1.0:\n x = int( x * screen_dx )\n else:\n x = int( x )\n \n if y < -99999.0:\n y = (screen_dy - height) / 2\n elif y <= -1.0:\n y = screen_dy - height + int( y ) + 1\n elif x < 0.0:\n y = screen_dy - height + int( y * screen_dy )\n elif y <= 1.0:\n y = int( y * screen_dy )\n else:\n y = int( y )\n \n # Position and size the window as requested:\n window.geometry( '%dx%d+%d+%d' % ( width, height, x, y ) )", "title": "" }, { "docid": "7224f6c3a09e9cfcb151f31e30e2548a", "score": "0.6666444", "text": "def center_ship(self):\n\t\tself.center = self.screen_rect.centerx", "title": "" }, { "docid": "c51981ade551ce3d92a2ad4ea44bd909", "score": "0.6658568", "text": "def update_window(self):\n screen_area = HOST_APP.proc_screen_workarea\n scale_factor = 1.0 / HOST_APP.proc_screen_scalefactor\n top = left = width = height = 0\n\n window_rect = revit.get_window_rectangle()\n\n # set width and height\n width = window_rect.Right - window_rect.Left\n height = self.user_height\n\n top = window_rect.Top\n # in maximized window, the top might be off the active screen\n # due to windows thicker window frames\n # lets cut the height and re-adjust the top\n top_diff = abs(screen_area.Top - top)\n if 10 > top_diff > 0 and top_diff < height:\n height -= top_diff\n top = screen_area.Top\n\n left = window_rect.Left\n # in maximized window, Left also might be off the active screen\n # due to windows thicker window frames\n # let's fix the width to accomodate the extra pixels as well\n left_diff = abs(screen_area.Left - left)\n if 10 > left_diff > 0 and left_diff < width:\n # deduct two times the left negative offset since this extra\n # offset happens on both left and right side\n width -= left_diff * 2\n left = screen_area.Left\n\n self.Top = top * scale_factor\n self.Left = left * scale_factor\n self.Width = width * scale_factor\n self.Height = height", "title": "" }, { "docid": "293b7085262b8b8fc20c76b8ecec3298", "score": "0.66558135", "text": "def center_ship(self):\n self.center = self.screen_rect.centerx", "title": "" }, { "docid": "293b7085262b8b8fc20c76b8ecec3298", "score": "0.66558135", "text": "def center_ship(self):\n self.center = self.screen_rect.centerx", "title": "" }, { "docid": "293b7085262b8b8fc20c76b8ecec3298", "score": "0.66558135", "text": "def center_ship(self):\n self.center = self.screen_rect.centerx", "title": "" }, { "docid": "fbc5e67a3d9f1aff8048198f786b8001", "score": "0.66218907", "text": "def place_window(self, width=400, height=300):\n w = width\n h = height\n\n # get screen width and height\n ws = self.winfo_screenwidth() # width of the screen\n hs = self.winfo_screenheight() # height of the screen\n\n x = (ws / 2) - (w / 2)\n y = (hs / 2) - (h / 2)\n\n # set the dimensions of the screen\n # and where it is placed\n self.geometry(\"%dx%d+%d+%d\" % (w, h, x, y))", "title": "" }, { "docid": "d784b48449effe9dea7801dacd70c5cc", "score": "0.6614368", "text": "def update_center(self):\r\n \r\n pass", "title": "" }, { "docid": "0a2036c3eca30ab636fdb2dcd0755eab", "score": "0.6613791", "text": "def updateCenter(self):\r\n self.viewOffset = self.viewCenter - self.screenSize/2\r\n\r\n self.debugDraw._setValues(self.viewZoom, self.viewCenter, self.viewOffset, self.screenSize.x, self.screenSize.y)", "title": "" }, { "docid": "cf2f49fa2982edb87c8797ce929a6897", "score": "0.65797716", "text": "def center_mouse() -> None:\n rect = ui.active_window().rect\n center = (rect.x + round(rect.width / 2), rect.y + round(rect.height / 2))\n actions.mouse_move(*center)", "title": "" }, { "docid": "5042901b8abfe0af229e4e9e761ca985", "score": "0.65168935", "text": "def Fullscreen(self):\n\t\tself.width = self.display_resolution.current_w\n\t\tself.height = self.display_resolution.current_h\n\n\t\tself.item1 = menuItem(\"minimize\",self.width, self.height)\n\t\tself.item2 = menuItem(\"start\",self.width, self.height - 2*self.item1.height)\n\t\tself.item3 = menuItem(\"quit\",self.width, self.height + 2*self.item1.height)\n\n\t\tself.boxy = Boxy()\n\t\tself.boxy = Boxy((255,255,255), self.item1.x - 2*self.boxy.width, centerHeight(self.item2.height, self.boxy.height) + self.item2.y)", "title": "" }, { "docid": "142892e31fa0c29898c41d3abf3ba335", "score": "0.650492", "text": "def centrar(self, x, y):\n self.rect.center = (x,y)", "title": "" }, { "docid": "132f432af86bbc508e85a84dabe708cd", "score": "0.64783156", "text": "def update_center(self):\n self._update(\"update_center\")", "title": "" }, { "docid": "035faf8d51f20406e4bbad45aed154f2", "score": "0.647459", "text": "def move_window(self):", "title": "" }, { "docid": "7cf2121bc8ba963d16ed8e1e3b72a7fc", "score": "0.64682335", "text": "def center_ship(self):\r\n\t\tself.center = self.screen_rect.centerx\r\n\t\t# ~ self.rect.midbottom = self.screen_rect.midbottom\r\n\t\t# ~ self.x = float(self.rect.x)\r", "title": "" }, { "docid": "cd71756f72ff5c3ea8aba62ec99e5266", "score": "0.6440379", "text": "def update_dimensions(self):\n # get the window size\n window_rect = win32gui.GetWindowRect(self.hwnd)\n isFullScreen = win32gui.GetWindowRect(self.hwnd) == full_screen_rect\n self.isFullScreen = isFullScreen\n self.isTrueFullscreen = is_full_screen()\n self.w = window_rect[2] - window_rect[0]\n self.h = window_rect[3] - window_rect[1]\n\n if not isFullScreen:\n # account for the window border and titlebar and cut them off\n border_pixels = 8\n titlebar_pixels = 30\n self.w = self.w - (border_pixels * 2)\n self.h = self.h - titlebar_pixels - border_pixels\n self.cropped_x = border_pixels\n self.cropped_y = titlebar_pixels\n\n # images into actual screen positions\n self.offset_x = window_rect[0] + self.cropped_x\n self.offset_y = window_rect[1] + self.cropped_y\n\n else:\n self.offset_x, self.offset_y = 0, 0\n self.cropped_x, self.cropped_y = 0, 0", "title": "" }, { "docid": "20043502f14d6ff55300211293dee7ff", "score": "0.6421751", "text": "def center_ship(self):\n\t\tself.rect.midbottom=self.screen_rect.midbottom\n\t\tself.x=float(self.rect.x)", "title": "" }, { "docid": "7637cd8dd310dc4ad7a545c75bb8b0d1", "score": "0.64083266", "text": "def mwindow(self) -> None:\n self.setFixedSize(898, 422)\n self.setWindowTitle(\"Diabetes Prediction\")\n self.show()", "title": "" }, { "docid": "f4ee51820918c900d7089d99163dfeb8", "score": "0.6343348", "text": "def center_ship(self):\n\t\tself.rect.midbottom = self.screen_rect.midbottom\n\t\tself.x = float(self.rect.x)", "title": "" }, { "docid": "a22138d20e921b2b28a9fddc986ed1aa", "score": "0.6323974", "text": "def center_ship(self):\n self.rect.midbottom = self.screen_rect.midbottom\n self.x = float(self.rect.x)", "title": "" }, { "docid": "a22138d20e921b2b28a9fddc986ed1aa", "score": "0.6323974", "text": "def center_ship(self):\n self.rect.midbottom = self.screen_rect.midbottom\n self.x = float(self.rect.x)", "title": "" }, { "docid": "a22138d20e921b2b28a9fddc986ed1aa", "score": "0.6323974", "text": "def center_ship(self):\n self.rect.midbottom = self.screen_rect.midbottom\n self.x = float(self.rect.x)", "title": "" }, { "docid": "a22138d20e921b2b28a9fddc986ed1aa", "score": "0.6323974", "text": "def center_ship(self):\n self.rect.midbottom = self.screen_rect.midbottom\n self.x = float(self.rect.x)", "title": "" }, { "docid": "a22138d20e921b2b28a9fddc986ed1aa", "score": "0.6323974", "text": "def center_ship(self):\n self.rect.midbottom = self.screen_rect.midbottom\n self.x = float(self.rect.x)", "title": "" }, { "docid": "37254e96a0ca94800ed64bbff3ec3e35", "score": "0.6309343", "text": "def center_view_on_player(self):\n self.view_left = self.player.center_x - self.screen_width / 2\n self.view_bottom = self.player.center_y - self.screen_height / 2\n self.update_viewport()\n self.scroll_viewport()", "title": "" }, { "docid": "c7b8a991e51fd1bf757df595755e2e0f", "score": "0.62983125", "text": "def enlargeWindow(self):\n\t\t#\t\tNSLog(\"Main window size: {}\".format(self.window.frame().size))\n\t\twinRect = self.window.frame()\n\t\twinRect.size.height = 480\n\t\twinRect.origin.y = winRect.origin.y - 256\n\t\t#\t\tNSLog(\"Setting new main window size: {}\".format(winRect))\n\t\tself.window.setFrame_display_(winRect, True)", "title": "" }, { "docid": "894e87f79182b72e4973662c3f11a7c2", "score": "0.62930137", "text": "def setupWindow(self):\n self.win = visual.Window(self.window_dims, allowGUI=False, fullscr=self.fullscreen, \n monitor='testMonitor', units='deg') \n self.win.setColor('black')\n self.win.flip()\n self.win.flip()", "title": "" }, { "docid": "b7993ec81b773415fbff160fc0ba295a", "score": "0.6288775", "text": "def centerPage(self):\n top, bottom = self.yview()\n size = bottom - top\n if size==0: size=0.4\n middle = size * 0.4\n self.yview('moveto', middle)\n #print top, bottom\n #print middle\n return", "title": "" }, { "docid": "3f02eae09c11c06ead9c05588945de66", "score": "0.62675095", "text": "def setCenter(self, newCenter):\n self.center = newCenter", "title": "" }, { "docid": "22f098cc337aefbe2a7cbd4e9152196b", "score": "0.6261226", "text": "def center(self):\r\n\t\treturn self", "title": "" }, { "docid": "ef770eb683b1d7eb7652da86c1ba61e9", "score": "0.62596244", "text": "def center(self, location: ghidra.program.util.ProgramLocation) -> None:\n ...", "title": "" }, { "docid": "7f8ff8c99913c72f1bcb42ed6fe6cfab", "score": "0.6228856", "text": "def position_window(window):\n pos = QtGui.QCursor.pos()\n window.move(pos.x(), pos.y())", "title": "" }, { "docid": "c8b74d323bb622f5ed3772eab740b5fa", "score": "0.62265974", "text": "def center(self):\n if self._mode == \"vertical\":\n margin = self._margins[0]\n gap = self._gaps[0]\n y = None\n if self._align == \"center\":\n x=\"auto\"\n elif self._align == \"left\":\n x=self._element.get_storer_rect().left + self._margins[0]\n elif self._align == \"right\":\n x=self._element.get_storer_rect().right - self._margins[0]\n elif self._mode == \"horizontal\":\n margin = self._margins[1]\n gap = self._gaps[1]\n x = None\n if self._align == \"center\":\n y=\"auto\"\n elif self._align == \"top\":\n y=self._element.get_storer_rect().top + self._margins[0]\n elif self._align == \"bottom\":\n y=self._element.get_storer_rect().bottom - self._margins[0]\n size=store(frame=self._element,\n elements=self._elements,\n mode=self._mode,\n x=x,\n y=y,\n margin=margin,\n gap=gap,\n align=self._align)", "title": "" }, { "docid": "1054bb296adbfe9ace7edb2581942013", "score": "0.6208886", "text": "def draw(self, win, center):", "title": "" }, { "docid": "24c9a2e106618fdc58d32be576711bcf", "score": "0.62028426", "text": "def display_win(self) -> None:\n\n self.top_panel.win_label.grid(row=0, columnspan=10)", "title": "" }, { "docid": "b1f08957226628c87db15418d89f5643", "score": "0.6200582", "text": "def centre_on_screen(widget: QWidget):\n\n # Get the widget's frame size.\n widget_frame = widget.frameGeometry()\n\n # Get the centre of the screen.\n screen_centre = QDesktopWidget().availableGeometry().center()\n\n # Calculate where the frame should be placed.\n widget_frame.moveCenter(screen_centre)\n\n # Move the widget to the correct place.\n widget.move(widget_frame.topLeft())", "title": "" }, { "docid": "10f666f06ecf333b9697225ec60897a7", "score": "0.615979", "text": "def wrap(self, screen_width, screen_height):\r\n #check if it is off the left side\r\n if self.center.x < 0:\r\n self.center.x = screen_width\r\n #check if it is off the right side\r\n if self.center.x > screen_width:\r\n self.center.x = 0\r\n #check if it is off the top\r\n if self.center.y > screen_height:\r\n self.center.y = 0\r\n #check if it is off the bottom\r\n if self.center.y < 0:\r\n self.center.y = screen_height", "title": "" }, { "docid": "6bcbfe8e5921dfcd41a444af65208063", "score": "0.6152299", "text": "def center(self, surface, centerx=True, centery=True, xajout=0, yajout=0):\n\t\tif centerx and centery:\n\t\t\treturn surface.get_rect(centerx=self.screen.get_width() / 2 + xajout, centery=self.screen.get_height() / 2 + yajout)\n\t\tif centerx:\n\t\t\treturn surface.get_rect(centerx=self.screen.get_width() / 2 + xajout)\n\n\t\tif centery:\n\t\t\treturn surface.get_rect(centery=self.screen.get_width() / 2 + xajout)", "title": "" }, { "docid": "4576a0ed37a7698571ffefa8fd99cd94", "score": "0.6127658", "text": "def LayoutDialog(self, centreFlags=wx.BOTH):", "title": "" }, { "docid": "4d3671a52f03abf30be6a3927df9eb20", "score": "0.61192656", "text": "def collapseWindow(self):\n\t\t#\t\tNSLog(\"Main window size: {}\".format(self.window.frame().size))\n\t\twinRect = self.window.frame()\n\t\twinRect.size.height = 180\n\t\twinRect.origin.y = winRect.origin.y + (\n\t\t\t\tself.window.frame().size.height - 180 )\n\t\t\t\t#\t\tNSLog(\"Setting new main window size: {}\".format(winRect))\n\t\tself.window.setFrame_display_(winRect, True)", "title": "" }, { "docid": "97926c64ee824f63a31c044d6e4126e3", "score": "0.6070533", "text": "def geometry(self):\n self.root.geometry(\n f\"{self.playwidth*self.boxsize}x{(self.playheight + 2)*self.boxsize}+400+150\")\n self.root.resizable(False, False)", "title": "" }, { "docid": "f6725c71597f68ea5892ea28ad0f116a", "score": "0.60663486", "text": "def setWindowSize(self, x_size=800, y_size=600):\n \n self.window.resize(x_size, y_size)", "title": "" }, { "docid": "30a44841bd6ca1fb736bb2e580c0ef44", "score": "0.60618573", "text": "def main(cls):\n\n tk.NoDefaultRoot()\n root = tk.Tk()\n app = cls(root)\n # auto frame resize\n app.grid(sticky=\"NSEW\")\n root.grid_columnconfigure(0, weight=1)\n root.grid_rowconfigure(0, weight=1)\n root.grid_columnconfigure(1, weight=1)\n root.grid_rowconfigure(1, weight=1)\n root.grid_columnconfigure(2, weight=1)\n root.grid_rowconfigure(2, weight=1)\n # make adjustable in x not y\n root.resizable(True, False)\n # run application until user exit\n root.mainloop()", "title": "" }, { "docid": "90cf5b095735759888150d48960bbe34", "score": "0.60437715", "text": "def center(self, center):\n\n self._center = center", "title": "" }, { "docid": "886ca718b27dba80b4a11dca9d2d35ab", "score": "0.6043047", "text": "def center(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "205e5a85463cd27c6197dc8b834d6bfd", "score": "0.5988926", "text": "def reset_location(self, screen: pygame.Surface) -> None:\r\n self.rect.left = (screen.get_width() - self.rect.width) / 2\r\n self.rect.top = (screen.get_height() - self.rect.height) / 2", "title": "" }, { "docid": "2bb2a30847998b804ba48a52a2b4d626", "score": "0.59451467", "text": "def start(self):\n self._root.grid_rowconfigure(0, weight=1)\n self._root.grid_columnconfigure(0, weight=1)\n\n center_frame = Frame(\n self._root,\n width=self._root_width,\n heigh=self._root_height - self._bottom_height,\n )\n\n bottom_frame = Frame(\n self._root,\n bg=\"black\",\n width=self._root_width,\n height=self._bottom_height,\n )\n\n center_frame.grid(\n row=0,\n column=0,\n sticky=\"nsew\"\n )\n\n bottom_frame.grid(\n row=1,\n column=0,\n sticky=\"nsew\"\n )\n\n bottom_frame.grid_rowconfigure(1)\n bottom_frame.grid_columnconfigure(\n [0, 1, 2, 3, 4, 5, 6, 7], minsize=(self._root_width/8))\n\n bottom_frame_headlines = [\"Intro\", \"Options\", \"Start\", \"Exit\"]\n commands = [lambda: self._show_intro_view(center_frame),\n lambda: self._show_options_view(center_frame),\n lambda: self._show_game_view(center_frame),\n self._quit]\n\n for j in range(0, len(bottom_frame_headlines)):\n button = Button(\n bottom_frame,\n text=bottom_frame_headlines[j],\n compound=\"center\",\n width=12,\n height=1,\n background=\"red\",\n activebackground=\"red\",\n font=self.fonts.small_text_font,\n command=commands[j]\n )\n\n button.grid(\n row=0,\n column=j,\n padx=12,\n pady=12\n )\n\n self._show_opening_view(center_frame)", "title": "" }, { "docid": "0392d289092b31ad9c87556a5fe31a54", "score": "0.5944433", "text": "def set_relative_position(self):\n\n # Position window w/ respect to parent window\n mx, my = self.parent.GetTopLevelParent().GetScreenPosition()\n disp_geom = self.get_display(window=self.parent.GetTopLevelParent())\n dw = disp_geom[2]\n dxmin = disp_geom[0]\n dxmax = disp_geom[0] + dw\n dh = disp_geom[3]\n dymin = disp_geom[1]\n dymax = disp_geom[1] + dh\n\n if self.IsFrame():\n adj_x = -50\n adj_y = -50\n elif self.IsDialog():\n adj_x = int(dw * 0.025)\n adj_y = int(dh * 0.025)\n else:\n adj_x = 50\n adj_y = 50\n px = mx + adj_x\n py = my + adj_y\n\n ww, wh = self.GetSize()\n pxmax = px + ww\n pymax = py + wh\n\n # Calculate if window is going out of bounds, and adjust\n if pxmax >= dxmax:\n px = int((dxmin + dw - ww) * 0.9)\n if pymax >= dymax:\n py = int((dymin + dh - wh) * 0.9)\n if px <= dxmin:\n px = 25\n if py <= dymin:\n py = 25\n\n return px, py", "title": "" }, { "docid": "8f4dbb231fc27b89e674e534b86e5d44", "score": "0.59231", "text": "def Minimized(self):\n\t\tself.width = 400\n\t\tself.height = 400\n\n\t\tself.item1 = menuItem(\"fullscreen\",self.width, self.height)\n\t\tself.item2 = menuItem(\"start\",self.width, self.height - 2*self.item1.height)\n\t\tself.item3 = menuItem(\"quit\",self.width, self.height + 2*self.item1.height)\n\n\t\tself.boxy = Boxy()\n\t\tself.boxy = Boxy((255,255,255), self.item1.x - 2*self.boxy.width, centerHeight(self.item2.height, self.boxy.height) + self.item2.y)", "title": "" }, { "docid": "f570a60eae4ab0654eaae8990b6660c6", "score": "0.5922452", "text": "def updateCenter(self, x, y):\n self.Xcenter_old = self.Xcenter\n self.Ycenter_old = self.Ycenter\n self.Xcenter = x\n self.Ycenter = y", "title": "" }, { "docid": "444579c2636bc48639514a8b52148b30", "score": "0.59136033", "text": "def center(self):\n return self.functional.center", "title": "" }, { "docid": "d84a5dc7ce62649553f2f2b82b89b6da", "score": "0.5893992", "text": "def fulltonormal(self):\n if self.controlDock.FullNormSW.isChecked():\n self.MainWindow.showFullScreen()\n self.controlDock.setStretch(*(10,300)) # minimize control dock width\n else:\n self.MainWindow.showNormal()\n self.controlDock.setStretch(*(10,300)) # minimize control dock width", "title": "" }, { "docid": "23ae16b3eaf5317888d150587f42609c", "score": "0.58824813", "text": "def center(self):\n return self._center", "title": "" }, { "docid": "3c800d8734906e8ef484dd96aa4f6854", "score": "0.58794665", "text": "def initUI(self):\n \n self.resize(1024, 512)\n self.center()\n self.setWindowTitle('Py-Paint')\n\n self.createMenuBar() \n self.createToolBar()\n self.show()", "title": "" }, { "docid": "574b5517593c60d987e14d16a271cff9", "score": "0.58590764", "text": "def set_center(self, center_x: int, center_y: int) -> None:\r\n self._x = center_x - self._image.get_width() // 2\r\n self._y = center_y - self._image.get_height() // 2", "title": "" }, { "docid": "15fdbf611dd75250e5fa9274943d454b", "score": "0.58475226", "text": "def update(self):\n super().update()\n if self.center_x < LEFT_LIMIT:\n self.center_x = RIGHT_LIMIT\n if self.center_x > RIGHT_LIMIT:\n self.center_x = LEFT_LIMIT\n if self.center_y > TOP_LIMIT:\n self.center_y = BOTTOM_LIMIT\n if self.center_y < BOTTOM_LIMIT:\n self.center_y = TOP_LIMIT", "title": "" }, { "docid": "4393c955b34a610783050d1cc4486909", "score": "0.5829849", "text": "def set_screen(self):\n self.screen = pygame.display.set_mode(self.size, pygame.RESIZABLE)", "title": "" }, { "docid": "90f0740bddf78be51acb1e206ca54a13", "score": "0.58262646", "text": "def set_pos_root_button(self, instance):\n\n if self.anchor == \"right\":\n instance.y = dp(20)\n instance.x = Window.width - (dp(56) + dp(20))", "title": "" }, { "docid": "6cfb7b2ce163c2782236d9577176a640", "score": "0.58252335", "text": "def setCentreCoordinates(self,xcenter,ycenter):\n self.x = xcenter\n self.y = ycenter", "title": "" }, { "docid": "1e9cedc0d7819db57ffaf589ab568876", "score": "0.58119756", "text": "def render_to_window(self):\n window = self.window\n camera = self.camera\n\n y = camera.y\n x = camera.x\n\n x1 = x\n x2 = window.width + x\n y1 = y\n y2 = window.height + y\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluOrtho2D(x1, x2, y1, y2)\n\n glClear(GL_COLOR_BUFFER_BIT)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n self.draw_world()\n self.draw_hud()\n\n window.flip()", "title": "" }, { "docid": "3396eb6fae01c0cd781c44167f042ddb", "score": "0.5807685", "text": "def _makeScreen(self):\n\n #Centering the display on screen (taken from pygame FAQ)\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n\n #Starting pygame and setting up the display(game window)\n\n pygame.init()\n gamescreen = pygame.display.set_mode((RES_X, RES_Y))\n pygame.display.set_caption(CAPTION)\n \n return gamescreen", "title": "" }, { "docid": "db386faf10c93eadf27bd745ea1d8b87", "score": "0.5806194", "text": "def main(location, rows, columns, padding, margin):\n\n window = get_active_window()\n width, height = get_new_size(columns, rows, padding)\n\n resize(window, width - margin * 2, height - margin * 2)\n\n left, right, top, bottom = get_border()\n\n r = location[0] * (height + top + bottom) + padding[0] + margin\n c = location[1] * (width + left + right) + padding[3] + margin\n\n move(window, r, c)", "title": "" }, { "docid": "0ebae1f9805d26cbc74050259430e583", "score": "0.57883084", "text": "def center_inky(self):\n self.rect.centery = 250\n self.rect.centerx = 300\n self.centerx = self.rect.x\n self.rect.centerx = self.rect.x\n self.centery = self.rect.y\n self.rect.centery = self.rect.y", "title": "" }, { "docid": "ac04d15cdd170866f11ac84cfa97e1f9", "score": "0.5786315", "text": "def center(self):\n return self.pos", "title": "" }, { "docid": "7b3731be38e07d33f6edbd12c69bec3c", "score": "0.5784496", "text": "def show_main_screen(self):\n self.title_frame.pack(side=TOP)\n self.title_frame.pack_propagate(0)\n self.title.pack()\n self.intro_text.pack(side=TOP)\n self.buttons_frame.pack()\n self.select_button.pack(side=LEFT, padx=50)\n self.multi_button.pack(side=LEFT, padx=50)", "title": "" }, { "docid": "d3024ab4d8e746468694bedfcd272f69", "score": "0.5775383", "text": "def resize(widthWindow, heightWindow):\r\n glMatrixMode(GL_PROJECTION)\r\n glLoadIdentity()\r\n glMatrixMode(GL_MODELVIEW)\r\n glLoadIdentity()", "title": "" }, { "docid": "050ae135fd359b2acb55c92d62b45fe4", "score": "0.57698506", "text": "def frame_center(self):\n return self.frame.get_center()", "title": "" }, { "docid": "ac6d306faee15e406690fd3eac54e14a", "score": "0.5763159", "text": "def display(self):\n self.window.clear()\n \n # If the screen is big enough print all relevant information\n if self.max_columns > 20 and self.max_rows > 10:\n self.print_buffer()\n self.print_title_bar()\n self.print_message()\n self.print_coords()\n self.print_menu()\n \n self.scroll() # Scroll the screen if needed\n self.set_cursor() # Set the cursor coordinates\n self.window.move(self.row, self.column) # Place the cursor on screen\n \n self.window.noutrefresh()", "title": "" } ]
495cf48c5e18e12b7bfaf87cc50f1428
Wisdom AI engine. This extracts insights from documents and returns key points, abstracts, wordclouds and PDF viewer if possible.
[ { "docid": "932e0ca123feee64a87a4865f30a383f", "score": "0.5761071", "text": "def wisdom(search_me, source, pdfurl, userid):\n ### source needs to be name of data source (\"arxiv\", \"google scholar\", \"doaj\")\n search_me = search_me.strip()\n # check if pdfurl has been found before\n pdf = db_summaries.find_one({\"url\": pdfurl})\n if pdf:\n text = pdf.get('text')\n summary = pdf.get('summary')\n topics = pdf.get('topics')\n # update in db if data is 1 days or older\n last_updated = datetime.utcnow() - pdf.get(\"last_updated\")\n last_updated_diff = last_updated.days\n if last_updated_diff > 1:\n search_term = db_search_terms.find_one({\"value\": search_me.lower()})\n search_id = search_term.get(\"_id\")\n data = {\"search_id\": search_id,\n \"url\": pdfurl, \"source\": source, \"text\": text,\n \"summary\": summary, \"topics\": topics, \"last_updated\": datetime.utcnow()}\n db_summaries.update({\"url\": pdfurl}, {\"$set\": data})\n else:\n pass\n else:\n text = wisdomaiengine.pdfdocumentextracter(pdfurl)\n summary = wisdomaiengine.summarisepdfdocument(text)\n topics = wisdomaiengine.wordcloud(search_me, text)\n if topics is None:\n topics = ['No Topics Found']\n # write data to arxiv collection\n search_term = db_search_terms.find_one({\"value\": search_me.lower()})\n search_id = search_term.get(\"_id\")\n data = {\"search_id\": search_id,\n \"url\": pdfurl,\n \"source\": source,\n \"text\": text,\n \"summary\": summary,\n \"topics\": topics,\n \"last_updated\": datetime.utcnow()}\n x = db_summaries.insert(data, check_keys=False)\n # return json\n summaryjson = jsonify(wisdomtopics=topics, wisdomsummary=summary)\n return summaryjson", "title": "" } ]
[ { "docid": "d04f78c317274e362e0222600c00b016", "score": "0.5694965", "text": "def main():\n logging.basicConfig(level=logging.DEBUG)\n custom_embedding = True\n\n # Download embeddings'\n if custom_embedding:\n embedding_path = '../data/custom_embedding.pkl'\n embedding_index_path = '../data/custom_vocab_index.pkl'\n logging.info('Pulling custom embedding from: {}, and custom vocab from: {}'.format(embedding_path, embedding_index_path))\n embedding_matrix = pickle.load(open(embedding_path, 'rb'))\n embedding_index_lookup = pickle.load(open(embedding_index_path, 'rb'))\n\n else:\n logging.warning('Downloading embedding. If downloading for the first time, this make take 5-10 minutes.')\n embedding_url = 'https://s3.amazonaws.com/dl4j-distribution/GoogleNews-vectors-negative300.bin.gz'\n embedding_path = '~/nlp_example/'\n embedding_filename = 'GoogleNews-vectors-negative300.bin.gz'\n lib.download_file(embedding_url, embedding_path, embedding_filename)\n\n # Unpack embedding\n model = gensim.models.KeyedVectors.load_word2vec_format(embedding_path + '/' + embedding_filename, binary=True)\n embedding_matrix = model.syn0\n embedding_index_lookup = dict([(k, v.index) for k, v in model.vocab.items()])\n\n # Create thesaurus\n thesaurus = Thesaurus(embedding_matrix, embedding_index_lookup)\n\n # Find nearest neighbors for examples\n print(thesaurus.synonyms('day'))\n print(thesaurus.synonyms('top'))\n print(thesaurus.synonyms('bottom'))\n print(thesaurus.synonyms('cat'))\n print(thesaurus.synonyms('grown'))\n\n\n pass", "title": "" }, { "docid": "5414d2cceb439a2c817b0dd22dec84e6", "score": "0.5648464", "text": "def analyse_presentation(pres_name:str, verbose=False) -> Dict[str, Any]:\n prs = Presentation(pres_name)\n if verbose:\n debug_dump(prs)\n (layouts_interactive, layouts) = count_layouts(prs)\n interaction_stars = min(layouts_interactive, 5)\n topic_stars = ([1,1,3,5,5,4,3,2,1]+[1]*100)[layouts[\"Section Header\"]]\n\n pres_properties = get_presentation_properties(prs)\n\n word_count = get_word_counts(prs.slides)\n words_per_slide = sum(word_count) / len(word_count)\n # ideal words/slide is 30-40 (5 stars)\n text_stars = calculate_text_stars(word_count)\n # print(\"word counts:\", word_count)\n\n # Create a list of warnings about very text-heavy slides\n heavy_warnings = []\n for slide, words in enumerate(word_count):\n if words > MAX_WORDS_PER_SLIDE:\n heavy_warnings.append(f\"WARNING: slide {slide} has {words} words!\")\n\n\n slides = get_slide_analytics(prs.slides)\n print(slides)\n result = {\n\n \"presentation_rating_stars_interaction\": interaction_stars,\n \"presentation_rating_stars_section\": topic_stars,\n \"presentation_rating_stars_accessibility\": 3, # not implemented yet!\n \"presentation_rating_stars_text\": text_stars,\n \"presentation_count_slide\": len(prs.slides),\n \"presentation_count_layout\": layouts, # dictionary that maps layout name to count\n \"presentation_total_words\": words_per_slide, # a float\n \"presentation_warning_text_heavy\": heavy_warnings, # a list of warning strings\n \"presentation_data_slides\": slides, # a list of slides and analytics\n \"filename\": pres_name, # TODO: strip any Path and just return file name?\n \"name\": \"ICT999\",\n \"description\": \"Introduction to ICT\"\n }\n\n return result", "title": "" }, { "docid": "c0014dd9244d3b08e26a41c8651af12d", "score": "0.5442001", "text": "def run(self):\n self.tf_idf_scores = self.tf_idf()\n self.word_scores = self.create_word_score()\n self.keyword_scores = self.create_keyword_score()\n self.text_ranks = self.create_text_rank()\n self.pos_scores = {}\n self.z_area_scores = {}\n self.create_pos_and_area_score()\n self.all_scores = self.weight_distribution()\n self.all_scores_dict = self.get_all_scores_dict()\n return self.get_keywords()", "title": "" }, { "docid": "403a0a54c67e379424a9698052881df0", "score": "0.54035425", "text": "def gather_documents(self):\n self.document_gatherer.gather_and_save_everything(Constants.path_cord, \n Constants.path_metadata, \n Constants.path_linked_documents,\n Constants.path_unlinked_documents,\n Constants.path_parsed_documents,\n Constants.path_all_documents)\n \n print(\"Done gathering documents.\")", "title": "" }, { "docid": "d625ed60e5b26a3d637f1ce533c50d0d", "score": "0.53915334", "text": "def engine():\r\n document.add_heading('Engine details', 1)\r\n\r\n engine_metrics = ['customProperties','listenerPorts','autosaveInterval', 'tableFilesDirectory', 'genericUndoBufferMaxSize', 'documentTimeout','documentDirectory',\r\n 'allowDataLineage', 'qrsHttpNotificationPort', 'standardReload',\r\n 'workingSetSizeLoPct', 'workingSetSizeHiPct', 'workingSetSizeMode','cpuThrottlePercentage', 'maxCoreMaskPersisted', 'maxCoreMask',\r\n 'maxCoreMaskHiPersisted', 'maxCoreMaskHi','objectTimeLimitSec', 'exportTimeLimitSec', 'reloadTimeLimitSec',\r\n 'hyperCubeMemoryLimit', 'exportMemoryLimit', 'reloadMemoryLimit', 'createSearchIndexOnReloadEnabled', 'hostname',\r\n 'globalLogMinuteInterval','auditActivityLogVerbosity','auditSecurityLogVerbosity','serviceLogVerbosity','systemLogVerbosity','performanceLogVerbosity',\r\n 'qixPerformanceLogVerbosity','auditLogVerbosity','sessionLogVerbosity','trafficLogVerbosity']\r\n\r\n enginenodes = get_qlik_sense.get_engine()\r\n num_of_engines = len(enginenodes)\r\n num_of_engine_metrics = len(engine_metrics)\r\n table = document.add_table(rows=num_of_engine_metrics+1, cols=num_of_engines+1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'Metric'\r\n for item in range(0, num_of_engines):\r\n row.cells[item+1].text = enginenodes[item][36]\r\n for item in range(num_of_engine_metrics):\r\n row = table.rows[item+1]\r\n row.cells[0].text = str(engine_metrics[item])\r\n for enginenode in range(num_of_engines):\r\n row.cells[enginenode+1].text = str(enginenodes[enginenode][item])\r\n\r\n document.add_page_break()", "title": "" }, { "docid": "6cd69e819f3bb959089daf448964771b", "score": "0.53757584", "text": "def analyze(self, content):\n words = self.remove_stop_words(content)\n words = self.select_relevant_pos_words(words)\n concepts = [self.get_wiki_content(word) for word in words]\n self.create_wordcloud(concepts)", "title": "" }, { "docid": "c2a7b6f5798d9a40ee7787daab82aa32", "score": "0.52914554", "text": "def main():\r\n\tlang = get_arguments()\r\n\twiki_analyzer(lang)", "title": "" }, { "docid": "ae108b8342048aef56aaa979ce64d207", "score": "0.524891", "text": "def analyse(self):\n pass", "title": "" }, { "docid": "ed0ae4bc0fa46fb9d01fdf38b5c22a1c", "score": "0.5186837", "text": "def analyse_document(dom, arguments):\n model = dom.getElementsByTagName(\"model\")[0]\n return analyse_model(model, arguments)", "title": "" }, { "docid": "976be3cf0892e1e404a68421e4097787", "score": "0.5137216", "text": "def phonology(request):\n\n perspective_cid = request.params.get('perspective_client_id')\n perspective_oid = request.params.get('perspective_object_id')\n\n # Checking if we have limits on number of computed results.\n\n limit = (None if 'limit' not in request.params else\n int(request.params.get('limit')))\n\n limit_exception = (None if 'limit_exception' not in request.params else\n int(request.params.get('limit_exception')))\n\n limit_no_vowel = (None if 'limit_no_vowel' not in request.params else\n int(request.params.get('limit_no_vowel')))\n\n limit_result = (None if 'limit_result' not in request.params else\n int(request.params.get('limit_result')))\n\n # TODO: get perspective's translation and language it belongs to.\n\n # We get lexical entries of this perspective with markup'ed sounds.\n\n Sound = aliased(Entity, name = \"Sound\")\n PublishingSound = aliased(PublishingEntity, name = \"PublishingSound\")\n\n query = DBSession.query(LexicalEntry, Entity, Sound, PublishingEntity, PublishingSound).filter(and_(\n LexicalEntry.parent_client_id == perspective_cid,\n LexicalEntry.parent_object_id == perspective_oid,\n LexicalEntry.marked_for_deletion == False,\n Entity.parent_client_id == LexicalEntry.client_id,\n Entity.parent_object_id == LexicalEntry.object_id,\n Entity.marked_for_deletion == False,\n Entity.additional_metadata.contains({\"data_type\": \"praat markup\"}),\n PublishingEntity.client_id == Entity.client_id,\n PublishingEntity.object_id == Entity.object_id,\n PublishingEntity.published == True,\n PublishingEntity.accepted == True,\n Sound.client_id == Entity.self_client_id,\n Sound.object_id == Entity.self_object_id,\n Sound.marked_for_deletion == False,\n PublishingSound.client_id == Sound.client_id,\n PublishingSound.object_id == Sound.object_id,\n PublishingSound.published == True,\n PublishingSound.accepted == True))\n\n # We process these lexical entries in batches. Just in case, it seems that perspectives rarely have more\n # then several hundred such lexical entries.\n\n exception_counter = 0\n no_vowel_counter = 0\n result_list = list()\n\n for index, row in enumerate(query.yield_per(100)):\n\n markup_url = row.Entity.content\n sound_url = row.Sound.content\n\n cache_key = 'phonology:{0}:{1}:{2}:{3}'.format(\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id)\n\n # Checking if we have cached result for this pair of sound/markup.\n\n cache_result = CACHE.get(cache_key)\n\n if cache_result == 'no_vowel':\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}) '\n '[CACHE {7}]: no vowels\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url))\n\n no_vowel_counter += 1\n\n if (limit_no_vowel and no_vowel_counter >= limit_no_vowel or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # If we have cached exception, we do the same as with absence of vowels, show its info and\n # continue.\n\n elif isinstance(cache_result, tuple) and cache_result[0] == 'exception':\n exception, traceback_string = cache_result[1:3]\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n '[CACHE {7}]: exception\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url))\n\n log.debug(traceback_string)\n\n exception_counter += 1\n\n if (limit_exception and exception_counter >= limit_exception or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # If we actually have the result, we use it and continue.\n\n elif cache_result:\n\n result_string = '\\n'.join(\n 'tier {0} \\'{1}\\': {2}'.format(tier_number, tier_name,\n \n tier_result_seq_list if not isinstance(tier_result_seq_list, list) else\n tier_result_seq_list[0] if len(tier_result_seq_list) <= 1 else\n ''.join('\\n {0}'.format(tier_result) for tier_result in tier_result_seq_list))\n\n for tier_number, tier_name, tier_result_seq_list in cache_result)\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}) '\n '[CACHE {7}]:\\n{8}\\n{9}\\n{10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url, result_string))\n\n result_list.append(cache_result)\n\n if (limit_result and len(result_list) >= limit_result or\n limit and index + 1 >= limit):\n break\n\n continue\n\n try:\n # Getting markup, checking for each tier if it needs to be processed.\n\n markup_bytes = urllib.request.urlopen(urllib.parse.quote(markup_url, safe = '/:')).read()\n\n textgrid = pympi.Praat.TextGrid(xmax = 0)\n textgrid.from_file(\n io.BytesIO(markup_bytes),\n codec = chardet.detect(markup_bytes)['encoding'])\n\n tier_data_list = []\n vowel_flag = False\n\n for tier_number, tier_name in textgrid.get_tier_name_num():\n\n raw_interval_list = textgrid.get_tier(tier_number).get_all_intervals()\n raw_interval_seq_list = [[]]\n\n # Splitting interval sequence on empty intervals.\n\n for raw_index, interval in enumerate(raw_interval_list):\n\n if len(interval[2].strip()) <= 0:\n if len(raw_interval_seq_list[-1]) > 0:\n raw_interval_seq_list.append([])\n\n else:\n raw_interval_seq_list[-1].append((raw_index, interval))\n\n if len(raw_interval_seq_list[-1]) <= 0:\n del raw_interval_seq_list[-1]\n\n # Selecting interval sequences for analysis, checking if we have unusual markup.\n \n interval_seq_list = []\n interval_idx_to_raw_idx = dict()\n\n unusual_markup_flag = False\n unusual_markup_list = []\n\n for raw_interval_seq in raw_interval_seq_list:\n\n interval_seq_list.append([])\n interval_idx_to_raw_idx[len(interval_seq_list) - 1] = {}\n\n for partial_raw_index, (raw_index, interval) in enumerate(raw_interval_seq):\n\n interval_text = interval[2].strip()\n\n # Accepting interval if its text contains at least one vowel, and is short enough or\n # is a valid phonetic transcription.\n\n transcription_check = re.fullmatch(transcription_re, interval_text)\n\n if (len(interval_text) > 0 and\n any(character in vowel_set for character in interval_text) and\n (len(interval_text) <= 2 or transcription_check)):\n\n interval_seq_list[-1].append(interval)\n\n sequence_index = len(interval_seq_list) - 1\n interval_index = len(interval_seq_list[-1]) - 1\n\n interval_idx_to_raw_idx[(sequence_index, interval_index)] = raw_index\n interval_idx_to_raw_idx[sequence_index][interval_index] = partial_raw_index\n\n # Noting if the interval contains unusual (i.e. non-transcription) markup.\n\n elif not transcription_check:\n\n unusual_markup_flag = True\n unusual_markup_list.append((raw_index, interval))\n\n transcription_list = [text for begin, end, text in raw_interval_list]\n transcription = ''.join(transcription_list)\n\n selected_list = [text\n for interval_list in interval_seq_list\n for begin, end, text in interval_list]\n\n selected = ''.join(selected_list)\n\n # If we have intervals with unusual markup, we report them.\n\n if unusual_markup_flag:\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' has interval(s) with unusual transcription text: '\n '{9} / {10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name, transcription, dict(unusual_markup_list)))\n\n # If the markup does not have any vowels, we note it and also report it.\n\n if all(character not in vowel_set for character in transcription):\n\n tier_data_list.append((tier_number, tier_name, 'no_vowel'))\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' doesn\\'t have any vowel markup: {9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name, transcription_list))\n\n # It is also possible that while full transcription has vowels, intervals selected for\n # analysis do not. In that case we also note it and report it.\n\n elif not any(character in vowel_set for character in selected):\n\n tier_data_list.append((tier_number, tier_name, 'no_vowel_selected'))\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' intervals to be processed don\\'t have any vowel markup: '\n 'markup {9}, selected {10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name,\n transcription_list, selected_list))\n\n # Otherwise we store tier data to be used during processing of the sound file.\n\n else:\n tier_data_list.append((tier_number, tier_name,\n (raw_interval_list, raw_interval_seq_list, interval_seq_list,\n interval_idx_to_raw_idx, transcription)))\n\n vowel_flag = True\n\n # If there are no tiers with vowel markup, we skip this sound-markup file altogether.\n\n if not vowel_flag:\n\n CACHE.set(cache_key, 'no_vowel')\n no_vowel_counter += 1\n\n if (limit_no_vowel and no_vowel_counter >= limit_no_vowel or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # Otherwise we retrieve the sound file and analyse each vowel-containing markup.\n # Partially inspired by source code at scripts/convert_five_tiers.py:307.\n\n sound = None\n with tempfile.NamedTemporaryFile() as temp_file:\n\n sound_file = urllib.request.urlopen(urllib.parse.quote(sound_url, safe = '/:'))\n temp_file.write(sound_file.read())\n temp_file.flush()\n\n sound = AudioPraatLike(pydub.AudioSegment.from_wav(temp_file.name))\n\n tier_result_list = []\n\n for tier_number, tier_name, tier_data in tier_data_list:\n\n if tier_data == 'no_vowel' or tier_data == 'no_vowel_selected':\n tier_result_list.append((tier_number, tier_name, tier_data))\n continue\n\n # Analyzing vowel sounds of each interval sequence.\n\n (raw_interval_list, raw_interval_seq_list, interval_seq_list, interval_idx_to_raw_idx,\n transcription) = tier_data\n\n tier_result_list.append((tier_number, tier_name, []))\n\n for seq_index, (raw_interval_list, interval_list) in enumerate(zip(\n raw_interval_seq_list, interval_seq_list)):\n\n if len(interval_list) <= 0:\n continue\n\n (max_intensity_index, max_intensity, max_length_index, max_length) = \\\n find_max_interval_praat(sound, interval_list)\n\n max_intensity_interval = interval_list[max_intensity_index]\n max_length_interval = interval_list[max_length_index]\n\n max_intensity_f1_f2 = sound.get_interval_formants(*max_intensity_interval[:2])\n max_length_f1_f2 = sound.get_interval_formants(*max_length_interval[:2])\n\n # Compiling results.\n\n max_length_str = '{0} {1:.3f} [{2}]'.format(\n max_length_interval[2], max_length,\n len(''.join(text for index, (begin, end, text) in\n raw_interval_list[:interval_idx_to_raw_idx[seq_index][max_length_index]])))\n\n max_intensity_str = '{0} {1:.3f} [{2}]'.format(\n max_intensity_interval[2],\n max_intensity,\n len(''.join(text for index, (begin, end, text) in\n raw_interval_list[:interval_idx_to_raw_idx[seq_index][max_intensity_index]])))\n\n tier_result_list[-1][2].append([\n ''.join(text for index, (begin, end, text) in raw_interval_list),\n max_length_str,\n '{0:.3f}'.format(max_length_f1_f2[0]),\n '{0:.3f}'.format(max_length_f1_f2[1]),\n max_intensity_str,\n '{0:.3f}'.format(max_intensity_f1_f2[0]),\n '{0:.3f}'.format(max_intensity_f1_f2[1]),\n '+' if max_intensity_index == max_length_index else '-'])\n\n # Saving result.\n\n result_list.append(tier_result_list)\n CACHE.set(cache_key, tier_result_list)\n\n result_string = '\\n'.join(\n 'tier {0} \\'{1}\\': {2}'.format(tier_number, tier_name,\n \n tier_result_seq_list if not isinstance(tier_result_seq_list, list) else\n tier_result_seq_list[0] if len(tier_result_seq_list) <= 1 else\n ''.join('\\n {0}'.format(tier_result) for tier_result in tier_result_seq_list))\n\n for tier_number, tier_name, tier_result_seq_list in tier_result_list)\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}):'\n '\\n{7}\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n markup_url, sound_url, result_string))\n\n # Stopping earlier, if required.\n\n if (limit_result and len(result_list) >= limit_result or\n limit and index + 1 >= limit):\n break\n\n except Exception as exception:\n\n #\n # NOTE\n #\n # Exceptional situations encountered so far:\n #\n # 1. TextGrid file actually contains sound, and wav file actually contains textgrid markup.\n #\n # Perspective 330/4, LexicalEntry 330/7, sound-Entity 330/2328, markup-Entity 330/6934\n #\n # 2. Markup for one of the intervals contains a newline \"\\n\", and pympi fails to parse it.\n # Praat parses such files without problems.\n #\n # Perspective 330/4, LexicalEntry 330/20, sound-Entity 330/6297, markup-Entity 330/6967\n #\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'exception\\n{7}\\n{8}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n markup_url, sound_url))\n\n # if we encountered an exception, we show its info and remember not to try offending\n # sound/markup pair again.\n\n traceback_string = ''.join(traceback.format_exception(\n exception, exception, exception.__traceback__))[:-1]\n\n log.debug(traceback_string)\n\n CACHE.set(cache_key, ('exception', exception,\n traceback_string.replace('Traceback', 'CACHEd traceback')))\n\n exception_counter += 1\n\n if (limit_exception and exception_counter >= limit_exception or\n limit and index + 1 >= limit):\n break\n\n log.debug('phonology {0}/{1}: {2} result{3}, {4} no vowels, {5} exceptions'.format(\n perspective_cid, perspective_oid,\n len(result_list), '' if len(result_list) == 1 else 's',\n no_vowel_counter, exception_counter))\n\n # If we have no results, we indicate the situation and also show number of failures and number of\n # markups with no vowels.\n\n if not result_list:\n request.response.status = HTTPPreconditionFailed.code\n\n return {\n \"error\": \"no markups for this query\",\n \"exception_counter\": exception_counter,\n \"no_vowel_counter\": no_vowel_counter}\n\n # Otherwise we create and then serve Excel file.\n\n excel_book = xlwt.Workbook(encoding = \"utf-8\")\n sheet = excel_book.add_sheet(\"Sheet 1\")\n\n sheet.write(0, 0, 'Transcription')\n sheet.write(0, 1, 'Longest (seconds) interval')\n sheet.write(0, 2, 'F1 (Hz)')\n sheet.write(0, 3, 'F2 (Hz)')\n sheet.write(0, 4, 'Highest intensity (dB) interval')\n sheet.write(0, 5, 'F1 (Hz)')\n sheet.write(0, 6, 'F2 (Hz)')\n sheet.write(0, 7, 'Coincidence')\n\n row_counter = 1\n\n for tier_result_list in result_list:\n for tier_number, tier_name, tier_result_seq_list in tier_result_list:\n\n if tier_result_seq_list == 'no_vowel':\n continue\n\n for tier_data in tier_result_seq_list:\n for index, tier_data_str in enumerate(tier_data):\n sheet.write(row_counter, index, tier_data_str)\n\n row_counter += 1\n\n # Formatting column widths.\n\n sheet.col(0).width = 24 * 256\n sheet.col(1).width = 24 * 256\n sheet.col(2).width = 12 * 256\n sheet.col(3).width = 12 * 256\n sheet.col(4).width = 24 * 256\n sheet.col(5).width = 12 * 256\n sheet.col(6).width = 12 * 256\n sheet.col(7).width = 12 * 256\n\n excel_stream = io.BytesIO()\n excel_book.save(excel_stream)\n excel_stream.seek(0)\n\n # See http://stackoverflow.com/questions/2937465/what-is-correct-content-type-for-excel-files for Excel\n # content-type.\n\n response = Response(content_type = 'application/vnd.ms-excel')\n\n response.app_iter = FileIter(excel_stream)\n response.headers['Content-Disposition'] = \"attachment; filename=phonology.xls\"\n\n return response", "title": "" }, { "docid": "f6a4cdea62963b32f50ad71f7fa1cf1e", "score": "0.5105812", "text": "def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()", "title": "" }, { "docid": "b5197cf37652e92e9c47a02f9d0cbe04", "score": "0.5100792", "text": "def main(keyword):\n fs_db = initFirestore()\n # keyword = \"whatshappeninginmyanmar\"\n all_posts = []\n\n twitterCrawler = TwitterCrawler(fs_db,keyword) \n cleaned_tweets = twitterCrawler.crawl() \n\n redditCrawler = RedditCrawler(fs_db, keyword) \n cleaned_submissions = redditCrawler.crawl() \n\n twitterCrawler.exportToDB(cleaned_tweets)\n redditCrawler.exportToDB(cleaned_submissions)\n\n crawler = Crawler()\n\n crawler.generateSentimentAnalysis(fs_db, cleaned_submissions, cleaned_tweets)\n wordcloud_img = crawler.generateWordCloud()\n\n # Send wordcloud to DB\n doc_ref = fs_db.collection(u'wordcloud').document('first')\n doc_ref.set({\n u'image': wordcloud_img\n })", "title": "" }, { "docid": "166521861a8bb8d2e5ddb5b9a61e037d", "score": "0.5074083", "text": "def _prepare_analysis_input(self, documents):\n subdoc_to_doc_map = {}\n wordtype_to_number = {}\n number_to_wordtype = []\n wordtypes = {}\n \n # prevent duplicating work\n if os.path.exists(self.wordtype_file):\n return\n \n try:\n # First find singletons\n if self.remove_singletons:\n word_type_count_threshold = max(1, int(math.log(documents.count(), 10)) - 2)\n temp_word_type_counts = {}\n for doc_index, doc in enumerate(documents):\n tokens = self.tokenize(doc.get_content())\n for token, token_start in tokens:\n temp_word_type_counts[token] = temp_word_type_counts.setdefault(token, 0) + 1\n for word_type, count in temp_word_type_counts.iteritems(): # add singletons to stopword list\n if count <= word_type_count_threshold:\n self._excluded_words[word_type] = True\n with io.open(self.excluded_words_file, 'w', encoding='utf-8') as ex_f:\n ex_f.write(unicode(json.dumps(self._excluded_words)))\n \n haltwords = dict(self.stopwords)\n haltwords.update(self._excluded_words)\n # Second find bigrams, iterate through documents and train.\n if self.find_bigrams:\n from import_tool.analysis.bigram_finder import BigramFinder\n bigram_finder = BigramFinder(stopwords=haltwords)\n for doc_index, doc in enumerate(documents):\n bigram_finder.train(doc_index, self.tokenize(doc.get_content()))\n bigram_finder.print()\n \n # Third, we're going to stem words\n if self.stem_words:\n from import_tool.analysis.stemmer import Stemmer\n stemmer = Stemmer(self._working_dir, self.base_dir)\n \n # for each document tokenize and map tokens to numbers to avoid regex problems before passing data to Mallet\n with io.open(self.mallet_input_file, 'w', encoding='utf-8') as w:\n with io.open(self.start_index_file, 'w', encoding='utf-8') as w2:\n count = 0\n subcount = 0\n for doc_index, doc in enumerate(documents):\n doc_content = unicode(doc.get_content())\n count += 1\n subdocuments = self.create_subdocuments(doc_index, doc_content)\n token_start_index_offset = 0 # needed to make sure the start index remains correct once the document is re-merged\n for subdoc_name, subdoc_content in subdocuments:\n if subcount > 0:\n w2.write(u'\\n')\n subcount += 1\n subdoc_to_doc_map[subdoc_name] = doc_index\n tokens = self.tokenize(subdoc_content)\n \n if self.find_bigrams:\n tokens = bigram_finder.combine(tokens, subdoc_content)\n \n token_numbers = []\n token_start_indices = []\n only_tokens = []\n tokens_temp = []\n for tok, tok_start in tokens:\n only_tokens.append(tok)\n tokens_temp.append([tok, tok_start + token_start_index_offset])\n tokens = tokens_temp\n tokens_temp = None\n if self.stem_words:\n stemmed_tokens = stemmer.stem(only_tokens)\n else:\n stemmed_tokens = only_tokens\n for tup, tok_stem in zip(tokens, stemmed_tokens):\n tok, tok_start = tup\n wordtypes[tok] = True\n wordtypes[tok_stem] = True\n try:\n tok_num = wordtype_to_number[tok_stem]\n except:\n tok_num = len(wordtype_to_number)\n number_to_wordtype.append(tok_stem)\n wordtype_to_number[tok_stem] = tok_num\n token_numbers.append(unicode(tok_num))\n token_start_indices.append([tok, tok_start])\n text = u' '.join(token_numbers)\n w.write(u'{0} all {1}\\n'.format(subdoc_name, text))\n w2.write(unicode(json.dumps(token_start_indices)))\n token_start_index_offset += len(subdoc_content)\n for tok, tok_start in tokens:\n try:\n assert doc_content[tok_start:tok_start+len(tok)].lower() == tok.lower()\n except:\n print(tok_start)\n print(len(tok))\n print('\"'+doc_content[tok_start:tok_start+len(tok)].lower()+'\"')\n print('\"'+tok.lower()+'\"')\n raise\n if not count:\n raise Exception('No files processed.')\n # record which subdocuments belong to which documents\n with io.open(self.subdoc_to_doc_map_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(subdoc_to_doc_map)))\n with io.open(self.wordtype_to_number_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(wordtype_to_number)))\n with io.open(self.number_to_wordtype_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(number_to_wordtype)))\n with io.open(self.wordtype_file, 'w', encoding='utf-8') as w:\n w.write(unicode(json.dumps(wordtypes)))\n except: # cleanup\n self._cleanup(self.mallet_input_file)\n self._cleanup(self.subdoc_to_doc_map_file)\n self._cleanup(self.wordtype_to_number_file)\n self._cleanup(self.number_to_wordtype_file)\n self._cleanup(self.wordtype_file)\n self._cleanup(self.excluded_words_file)\n raise", "title": "" }, { "docid": "86fa15b2609b8808c96c9a44223e9b6c", "score": "0.5035346", "text": "def main():\n LESSONS_PATH = os.path.join(LESSON_LOCATOR_DATA, LESSON_SETS[0])\n ORIGINAL_LESSONS_PATH = os.path.join(LESSONS_PATH, \"original\")\n ANNOTATED_LESSONS_PATH = os.path.join(LESSONS_PATH, \"annotated\")\n\n if not os.path.exists(ANNOTATED_LESSONS_PATH):\n os.mkdir(ANNOTATED_LESSONS_PATH)\n\n print(\"Scanning original lessons in %s...\" % ORIGINAL_LESSONS_PATH)\n\n for item in os.listdir(ORIGINAL_LESSONS_PATH):\n if item == \".DS_Store\": continue\n\n print(\" found: %s\" % item)\n\n item_path = os.path.join(ORIGINAL_LESSONS_PATH, item)\n\n lesson_number = None\n lesson_description = None\n mobj = re.search(r'^AY\\s+(\\d+)\\s*-\\s*(.+)\\.txt$', item)\n if mobj:\n lesson_number = mobj.group(1)\n lesson_description = mobj.group(2)\n\n print(\" number: %s\" % lesson_number)\n print(\" description: %s\" % lesson_description)\n\n lesson = dict()\n lesson['number'] = lesson_number\n lesson['description'] = lesson_description\n\n fh = open(item_path)\n lesson_raw_text = fh.read()\n fh.close()\n lesson_text = re.split(r'\\n', lesson_raw_text)\n# lesson_raw_text_reencoded = lesson_raw_text.decode('mac-roman').encode('utf-8')\n# lesson_text = re.split(r'\\n', lesson_raw_text_reencoded)\n\n lesson['text'] = lesson_text\n lesson['parsed'] = parseLesson(lesson_text)\n\n if lesson['parsed']['end_of_lesson'] is None:\n print(\" lesson has no 'end of lesson' marker\")\n\n lesson_json = json.dumps(lesson, indent=4)\n annotated_lesson_path = os.path.join(ANNOTATED_LESSONS_PATH, \"ay_%04d.json\" % int(lesson_number))\n fh = open(annotated_lesson_path, \"w\")\n fh.write(lesson_json)\n fh.close()\n\n else:\n print(\"ERROR: File name not understood: %s\" % item)\n\n return 0", "title": "" }, { "docid": "a8088068221bd855f018d369c5d25ceb", "score": "0.50167453", "text": "def compute_wiki(self):\n\n self.__wiki_counts()\n self.__cross_wiki_counts()\n\n # Step 1: Calculate p(e|m) for wiki.\n print(\"Filtering candidates and calculating p(e|m) values for Wikipedia.\")\n for ent_mention in self.wiki_freq:\n if len(ent_mention) < 1:\n continue\n\n ent_wiki_names = sorted(\n self.wiki_freq[ent_mention].items(), key=lambda kv: kv[1], reverse=True\n )\n # Get the sum of at most 100 candidates, but less if less are available.\n total_count = np.sum([v for k, v in ent_wiki_names][:100])\n\n if total_count < 1:\n continue\n\n self.p_e_m[ent_mention] = {}\n\n for ent_name, count in ent_wiki_names:\n self.p_e_m[ent_mention][ent_name] = count / total_count\n\n if len(self.p_e_m[ent_mention]) >= 100:\n break\n\n del self.wiki_freq", "title": "" }, { "docid": "9611488c4353d23bb3e08e0ca838baf8", "score": "0.5009797", "text": "def get_WS(w2v):\n # get set of MAX_NGRAM-grams in text\n lines = open(INFNAME_FORMAT.format(\"train\")).readlines() \\\n + open(INFNAME_FORMAT.format(\"test\")).readlines()\n raw = [process_line(l) for l in lines ]\n ngrams_in_data = set()\n for words in raw:\n for ngram in tweet_to_ngrams(words):\n ngrams_in_data.add(ngram)\n\n # load sentiment features from model\n clf_pipe = pickle.load(open(CLF_FNAME, 'rb')) # model\n\n vect = clf_pipe.best_estimator_.named_steps['vect']\n clf = clf_pipe.best_estimator_.named_steps['clf']\n\n features_to_sent_idx = vect.vocabulary_ # map from model features to sentiment index\n # currently, sentiment = 2 * (count_pos / (count_pos + count_neg)) - 1\n sentiments = clf.feature_count_[1,:] / np.sum(clf.feature_count_, axis=0) # in [0,1]\n sentiments = 2 * sentiments - 1 # rescale to [-1,1]\n\n features_to_sent = {feat: sentiments[idx] for (feat,idx) in features_to_sent_idx.items()}\n\n # build WS and ngram_idx_map for each MAX_NGRAM-gram in the text\n k = len(next(iter(w2v.values()))) # dimension of embedding\n WS = np.zeros(shape=(len(ngrams_in_data) + 1, k + MAX_NGRAM), dtype='float32')\n ngram_idx_map = {}\n\n index = 1 # first row is left 0, for padding in the cnn. This is also neutral sentiment.\n # For Vader Sentiment analysis\n# vader_analyzer = SentimentIntensityAnalyzer()\n\n\n for ngram in ngrams_in_data:\n ngram_idx_map[ngram] = index\n\n # set word embedding, note that unknown words already randomized in load_embedding \n words = ngram.split(' ')\n WS[index,:k] = w2v[words[-1]] # embedding of last word\n\n # set sentiment embedding\n for n in range(MAX_NGRAM): # for 1, 2, ... length ngrams\n sub_ngram = ' '.join(words[-1 - n:]) \n\n # Naive Bayes Sentiment feature --------------------------------\n sent = features_to_sent.get(sub_ngram, 0.0) # default to neutral 0\n # --------------------------------------------------------------\n\n# # TextBlob sentiment feature -----------------------------------\n# sent = TextBlob(sub_ngram).sentiment.polarity\n# # --------------------------------------------------------------\n\n# # Vader sentiment feature -------------------------------------\n# sent = vader_analyzer.polarity_scores(sub_ngram)['compound']\n# # -------------------------------------------------------------\n WS[index,k+n] = sent\n\n index += 1\n\n return WS, ngram_idx_map", "title": "" }, { "docid": "094614d3bf2355fdc3c4f7e7041f0655", "score": "0.4999468", "text": "def workbench_scenarios():\n return [\n (\"Oppia Embedding\",\n \"\"\"<vertical_demo>\n <oppia oppiaid=\"0\" src=\"https://www.oppia.org\" width=\"700\" />\n </vertical_demo>\n \"\"\"),\n ]", "title": "" }, { "docid": "4f999b0f59fddb0e1776ce03b69b5a8f", "score": "0.4986965", "text": "def extract_documents():\n client = MongoClient()\n conn = client.data\n coll = conn.germanwings\n\n query = {'text': {'$exists': 1}, 'exc': {'$exists': 0}}\n selection = {'text': 1, 'short_url': 1}\n for i, doc in enumerate(coll.find(query, selection)):\n short_url, text = tuple(doc[x] for x in (\"short_url\", \"text\"))\n print(\"Extracting {0} {1}\".format(i, short_url), file=stderr)\n filename = os.path.join(RAW_DIR, short_url)\n with open(filename, \"w\") as f:\n ascii = text.encode('ascii', 'ignore')\n f.write(ascii)", "title": "" }, { "docid": "24c73e13f253f36b5593640e37a35998", "score": "0.49765155", "text": "def run():\n\n api = api_start()\n stonks = {}\n check_function = load_symbol_list()\n for obj in (\"comments\", \"submissions\"):\n for post in get_text(api, obj):\n if obj == \"comments\":\n full_text = post.body\n else: # obj == \"submissions\"\n full_text = post.title + post.selftext\n try:\n stonks = check_texts(\n full_text, post.author.name, stonks, check_function\n )\n except AttributeError:\n pass\n\n return stonks", "title": "" }, { "docid": "8095ed8a35b084b91054a594ab0aff1a", "score": "0.49734497", "text": "def wps(model, data):\n with msg.loading(f\"Loading model '{model}'...\"):\n nlp = spacy.load(model)\n texts = (eg[\"text\"] for eg in srsly.read_jsonl(data))\n n_docs = 0\n n_words = 0\n start_time = timer()\n for doc in nlp.pipe(texts):\n n_docs += 1\n n_words += len(doc)\n end_time = timer()\n wps = int(n_words / (end_time - start_time))\n result = [\n (\"Docs\", f\"{n_docs:,}\"),\n (\"Words\", f\"{n_words:,}\"),\n (\"Words/s\", f\"{wps:,}\"),\n ]\n msg.table(result, widths=(7, 12), aligns=(\"l\", \"r\"))", "title": "" }, { "docid": "4b0e78481ce7a6fad8056487980dcf05", "score": "0.49670535", "text": "def request_document(self):\n\t\tassert self.image is not None, \"image is not defined.\"\n\t\treturn {\"requests\":[{\n\t\t\t\"image\": {\"content\": self.image.base64},\n\t\t\t\"features\":[{\n\t\t\t\t\"type\": \"LANDMARK_DETECTION\",\n\t\t\t\t\"maxResults\": 1\n\t\t\t}]\n\t\t}]}", "title": "" }, { "docid": "096e51a3b4435acc9ba293acae0c7e27", "score": "0.49649388", "text": "def evaluate():\n global dictionary, wv\n count = 0\n # To save the scores by distance and similarity\n scores = np.zeros(6)\n similar = np.zeros(6)\n itr = len(dictionary)\n logging.info('running evaluation for {0} samples'.format(itr))\n for key in dictionary:\n progress = (count / itr) * 100\n d = dictionary[key].split('resource/')\n d = [idx.split()[0].translate(table).lower() for idx in d[1:]]\n try:\n r = np.array(list(map(lambda x: wv.get_vector(x), d)),\n dtype=np.float32)\n except KeyError:\n itr -= 1\n continue\n if np.any(np.isnan(r)):\n itr -= 1\n continue\n else:\n if r.ndim == 2:\n try:\n # Mean of vector containing all word vectors\n # obtained from abstract.\n r = r.mean(axis=0).reshape(1, -1)\n \n # Obtain the vectors for the entity\n mean_vec = mean_encoder(dictionary[key])\n mean_vec = mean_vec.reshape(1, -1) / norm(mean_vec)\n mean_dist_vec = distance_encoder(dictionary[key])\n mean_dist_vec = mean_dist_vec.reshape(1, -1)\n mean_dist_vec = mean_dist_vec / norm(mean_dist_vec)\n title_vec = title_mean(key)\n title_vec = title_vec.reshape(1, -1) / norm(title_vec)\n abstract_vec = abstract_encoder(key)\n abstract_vec = abstract_vec.reshape(1, -1)\n abstract_vec = abstract_vec / norm(abstract_vec)\n random_vec = np.random.randn(100).reshape(1, -1)\n zero_vec = np.zeros(100).reshape(1, -1)\n \n # Score the entity vectors\n scores[0] += norm(r - mean_vec)\n scores[1] += norm(r - mean_dist_vec)\n scores[2] += norm(r - title_vec)\n scores[3] += norm(r - abstract_vec)\n scores[4] += norm(r - random_vec)\n scores[5] += norm(r - zero_vec)\n similar[0] += cosine_similarity(r, mean_vec)\n similar[1] += cosine_similarity(r, mean_dist_vec)\n similar[2] += cosine_similarity(r, title_vec)\n similar[3] += cosine_similarity(r, abstract_vec)\n similar[4] += cosine_similarity(r, random_vec)\n similar[5] += cosine_similarity(r, zero_vec)\n count += 1\n print(count, end='\\r')\n except (ValueError, KeyError) as _:\n itr -= 1\n continue\n else:\n itr -= 1\n continue\n # Normalize the scores to get a better\n # comparison against the baselines.\n scores = scores / norm(scores)\n similar = similar / norm(similar)\n print_summary(scores, similar)", "title": "" }, { "docid": "19133503f4d039654bcf8787d017b0db", "score": "0.49587277", "text": "def visualize(self):\n # TODO\n #pyLDAvis.enable_notebook()\n #vis = pyLDAvis.gensim.prepare(self.lda_model, self.stemmed_corpus)\n return", "title": "" }, { "docid": "21ae6d00632b3616ad16936719a6e07e", "score": "0.49508405", "text": "def run(self):\r\n history = self.extracter.load_user_history()\r\n self.plot_history(history)\r\n \r\n pp_history = self.analyser.preprocess_history(history)\r\n part_worths, attribute_importance, relative_importance = self.analyser.conjoint_analysis(pp_history)\r\n self.plot_analysis(part_worths, relative_importance)\r\n \r\n return history, pp_history, part_worths, relative_importance", "title": "" }, { "docid": "b73cb825455fae6c04f57b8fa704ed31", "score": "0.49432248", "text": "def learn_models(self):\n\n influencers = self.influencers.infGroup\n\n self.complete_model = LanguageModel()\n self.influencer_models = { influencer: LanguageModel() for influencer in influencers }\n\n all_tweets = []\n # for influencer in tqdm(influencers, desc='Learning Models'):\n for influencer in influencers:\n tweets = [tweet for tweet in self.get_saved_tweets(influencer)]\n self.influencer_models[influencer].add_documents(tweets)\n all_tweets += tweets\n\n self.complete_model.add_documents(all_tweets)", "title": "" }, { "docid": "2801359b261e4f2479acf6e4fc95b081", "score": "0.494306", "text": "def inference(self):\n for m, doc in enumerate(self.docs):\n # Be careful followings are views\n # So self.hoge will be change, when changing variant\n zs_j = self.zs_m_j[m]\n zk_j = self.zk_m_j[m]\n n_m_zs = self.n_m_zs[m]\n n_m_zk = self.n_m_zk[m]\n for j, t in enumerate(doc):\n # discount for n-th word t with topic z\n zs = zs_j[j]\n zk = zk_j[j]\n n_m_zs[zs] -= 1\n n_m_zk[zs, zk] -= 1\n self.n_zk_t[zk, t] -= 1\n self.n_zk[zk] -= 1\n\n # sampling topic new_z for t\n \"\"\"\n n_s = n_m_zs + self.alphas # mth doc, S vec\n p_s = n_s / np.sum(n_s)\n n_k = n_m_zk + self.alphask # mth doc, SxK matrix\n p_k = n_k / n_s.reshape(len(n_s), 1)\n n_v = self.n_zk_t[:, t] + self.beta\n p_v = n_v / (self.n_zk + self.beta)\n\n p_zsk = p_s.reshape(len(p_s), 1) * p_k * p_v # SxK matrix\n \"\"\"\n\n p_zsk = (n_m_zk + self.alphask) * self.n_zk_t[:, t] \\\n / (np.sum(n_m_zs + self.alphas) * self.n_zk)\n\n p_zs = np.sum(p_zsk, axis=1) / np.sum(p_zsk)\n p_zk = np.sum(p_zsk, axis=0) / np.sum(p_zsk)\n\n new_zs = np.random.multinomial(1, p_zs).argmax()\n new_zk = np.random.multinomial(1, p_zk).argmax()\n\n # print(\"arg\", np.argmax(p_s), np.argmax(p_k, axis=1),\n # np.argmax(p_k, axis=0), np.argmax(p_zk))\n # print('probs', p_s, p_zs)\n # print('probk', p_k, p_zk)\n # print('old', zs, zk)\n # print('new', new_zs, new_zk)\n\n # set z the new topic and increment counters\n zs_j[j] = new_zs\n zk_j[j] = new_zk\n n_m_zs[new_zs] += 1\n n_m_zk[new_zs, new_zk] += 1\n self.n_zk_t[new_zk, t] += 1\n self.n_zk[new_zk] += 1", "title": "" }, { "docid": "4b7ecd53c19c14aa054a0489b52997ff", "score": "0.49372828", "text": "def emo_detect(text_list,language='English',method ='dimensional',output='data_frame',resolution = 'words_in_text', folder='',max_len=500000):\n \n # Dictionary containing file names of affect disctionaries:\n file_dict = {'english':{'dimensional':('english_anew_dict',),'discrete':('english_nstar_liwc_dict','english_star_liwc_dict')},\\\n 'german':{'dimensional':('german_anew_dict',),'discrete':('german_nstar_liwc_dict','german_star_liwc_dict')},\\\n 'chinese':{'discrete':('chinese_nstar_liwc_dict','chinese_star_liwc_dict')}}\n # Dictionary containing column names of data frames:\n colname_dict = {'english':{'dimensional':['PosVal', 'NegVal','Arousal','Dominance', 'PosCount','DetectCount','TokenCount','ValSq'],'discrete':['function', 'pronoun', 'ppron', 'i', 'we', 'you', 'shehe', 'they', 'ipron', 'article', 'prep', 'auxverb', 'adverb', 'conj', 'negate', 'verb', 'adj', 'compare', 'interrog', 'number', 'quant', 'affect', 'posemo', 'negemo', 'anx', 'anger', 'sad', 'social', 'family', 'friend', 'female', 'male', 'cogproc', 'insight', 'cause', 'discrep', 'tentat', 'certain', 'differ', 'percept', 'see', 'hear', 'feel', 'bio', 'body', 'health', 'sexual', 'ingest', 'drives', 'affiliation', 'achieve', 'power', 'reward', 'risk', 'focuspast', 'focuspresent', 'focusfuture', 'relativ', 'motion', 'space', 'time', 'work', 'leisure', 'home', 'money', 'relig', 'death', 'informal', 'swear', 'netspeak', 'assent', 'nonflu', 'filler','DetectCount','TokenCount']},\\\n 'german':{'dimensional':['PosVal', 'NegVal','Arousal','Dominance', 'PosCount','DetectCount','Imagine','Potency', 'DomPot_Count','TokenCount','ValSq'],'discrete':['Pronoun', 'I', 'We', 'Self', 'You', 'Other', 'Negate', 'Assent', 'Article', 'Preps', 'Number', 'Affect', 'Posemo', 'Posfeel', 'Optim', 'Negemo', 'Anx', 'Anger', 'Sad', 'Cogmech', 'Cause', 'Insight', 'Discrep', 'Inhib', 'Tentat', 'Certain', 'Senses', 'See', 'Hear', 'Feel', 'Social', 'Comm', 'Othref', 'Friends', 'Family', 'Humans', 'Time', 'Past', 'Present', 'Future', 'Space', 'Up', 'Down', 'Incl', 'Excl', 'Motion', 'Occup', 'School', 'Job', 'Achieve', 'Leisure', 'Home', 'Sports', 'TV', 'Music', 'Money', 'Metaph', 'Relig', 'Death', 'Physcal', 'Body', 'Sexual', 'Eating', 'Sleep', 'Groom', 'Swear', 'Nonfl', 'Fillers', 'Swiss', 'Ideo', 'Personalpronomina', 'Indefinitpronomina', 'AuxiliaryVerbs', 'Konjunktionen', 'Adverbien','Bedrohung', 'DetectCount','TokenCount']},\\\n 'chinese':{'discrete':['function', 'pronoun', 'ppron', 'i', 'we', 'you', 'shehe', 'they', 'youpl', 'ipron', 'prep', 'auxverb', 'adverb', 'conj', 'negate', 'quanunit', 'prepend', 'specart', 'tensem', 'focuspast', 'focuspresent', 'focusfuture', 'progm', 'particle', 'modal_pa', 'general_pa', 'compare', 'interrog', 'number', 'quant', 'affect', 'posemo', 'negemo', 'anx', 'anger', 'sad', 'social', 'family', 'friend', 'female', 'male', 'cogproc', 'insight', 'cause', 'discrep', 'tentat', 'certain', 'differ', 'percept', 'see', 'hear', 'feel', 'bio', 'body', 'health', 'sexual', 'ingest', 'drives', 'affiliation', 'achieve', 'power', 'reward', 'risk', 'relativ', 'motion', 'space', 'time', 'work', 'leisure', 'home', 'money', 'relig', 'death', 'informal', 'swear', 'netspeak', 'assent', 'nonflu', 'filler', 'DetectCount','TokenCount']}}\n # Normalize language and method parameters:\n language = language.lower()\n method = method.lower()\n #Initiate empty stemmer:\n stemmer = []\n # Counter:\n c = 0\n # Get the files to load from the file_dict:\n files = file_dict[language][method]\n # Get column names from col_dict:\n colnames = colname_dict[language][method]\n # Load files:\n # One dictionary (dimensional):\n if len(files) == 1:\n with open(folder + files[0],'rb') as f:\n dicts = pickle.load(f)\n # Length of affect vectors in dictionary:\n vec_len = len(list(dicts.values())[0])\n pos_list = 0\n # Two dictionaries (discrete):\n if len(files) == 2:\n with open(folder + files[0],'rb') as f:\n naster_disc_dict = pickle.load(f)\n with open(folder + files[1],'rb') as f:\n aster_disc_dict = pickle.load(f)\n dicts = (naster_disc_dict,aster_disc_dict)\n # Length of affect vectors in dictionary:\n vec_len = len(list(naster_disc_dict.values())[0])\n # Generate stemmer if german dimensional affect detection:\n if method == 'dimensional':\n if language == 'german':\n stemmer = SnowballStemmer('german')\n elif method == 'discrete':\n # List of stem lengths in the word stem dictionary:\n pos_list = list(reversed(list(aster_disc_dict.keys())))\n # Initiate vec_list if output is data_frame, and emo_mat if output is array:\n if output == 'data_frame': \n vec_list = []\n elif output == 'array':\n emo_mat = np.zeros([max_len,max_len,vec_len])\n # for resolution 'sentences_in_text' create an emo_mat with one additional layer for sentence counter\n if resolution == 'sentences_in_text':\n emo_mat = np.zeros([max_len,max_len,vec_len+1])\n\n # Iterate over texts in text_list:\n for c,text in enumerate(text_list):\n # Print counter every 10,000 texts:\n if c % 10000 == 0:\n print(c)\n # Ignore if it's not a text:\n if not isinstance(text,str):\n continue\n # Resolution words in text:\n if resolution == 'words_in_text':\n # create vector or array (depending on 'ouptut'):\n emo_thingy = text_detect(text=text,max_len=max_len,vec_len=vec_len,pos_list=pos_list,dicts=dicts,stemmer=stemmer,method = method,output = output)\n # Skip text if output of 'text_detect' is 0.\n if isinstance(emo_thingy, int):\n continue\n # Append emo_thingy to vec_list or add to emo_mat (depending on 'ouptut'):\n elif output == 'data_frame': \n vec_list.append(emo_thingy)\n elif output == 'array':\n emo_mat[emo_thingy[0],:emo_thingy[0]+1,:] += emo_thingy[1] \n elif resolution == 'words_in_sentence':\n sent_list = sentence_tokenize(text,language=language)\n for sent in sent_list:\n # create vector or array (depending on 'ouptut'):\n emo_thingy = text_detect(text=sent,max_len=max_len,vec_len=vec_len,pos_list=pos_list,dicts=dicts,stemmer=stemmer,method = method,output = output)\n # Skip text if output of 'text_detect' is 0.\n if isinstance(emo_thingy, int):\n continue\n # Append emo_thingy to vec_list or add to emo_mat (depending on 'ouptut'):\n elif output == 'data_frame': \n vec_list.append(emo_thingy)\n elif output == 'array':\n emo_mat[emo_thingy[0],:emo_thingy[0]+1,:] += emo_thingy[1] \n elif resolution == 'sentences_in_text':\n sent_list = sentence_tokenize(text,language=language)\n n_sent = len(sent_list)\n if (n_sent > max_len) or (n_sent == 0):\n continue\n n_sent = n_sent - 1\n for sc,sent in enumerate(sent_list):\n emo_thingy = text_detect(text=sent,max_len=10000,vec_len=vec_len,pos_list=pos_list,dicts=dicts,stemmer=stemmer,method = method,output = 'data_frame')\n if isinstance(emo_thingy, int):\n continue\n emo_thingy = np.append(emo_thingy,np.array([1]))\n emo_mat[n_sent,sc,:] += emo_thingy\n # return data frame or array, depending on 'output':\n if output == 'data_frame':\n return(pd.DataFrame(vec_list,columns=colnames))\n elif output == 'array':\n return(emo_mat)", "title": "" }, { "docid": "8cf918980b856171a28d842d572d9096", "score": "0.49365968", "text": "def run(self):\r\n\r\n self.roi_analyzer = ROIAnalyzer(\r\n ini_path=self.config_path,\r\n data_path=self.outlier_corrected_dir,\r\n calculate_distances=True,\r\n settings=self.settings,\r\n )\r\n self.roi_analyzer.files_found = self.files_found\r\n self.all_shape_names = self.roi_analyzer.shape_names\r\n self.roi_analyzer.run()\r\n self.roi_analyzer.compute_framewise_distance_to_roi_centroids()\r\n self.roi_distances_dict = self.roi_analyzer.roi_centroid_distance\r\n self.roi_entries_df = self.roi_analyzer.detailed_df\r\n if self.roi_directing_viable:\r\n self.directing_analyzer.run()\r\n self.roi_direction_df = self.directing_analyzer.results_df\r\n\r\n self.data = {}\r\n for file_cnt, file_path in enumerate(self.features_files):\r\n _, self.video_name, _ = get_fn_ext(file_path)\r\n _, _, self.fps = self.read_video_info(video_name=self.video_name)\r\n data_df = read_df(file_path, self.file_type)\r\n self.out_df = deepcopy(data_df)\r\n self.__process_within_rois()\r\n self.__distance_to_roi_centroids()\r\n if self.roi_directing_viable:\r\n self.__process_directionality()\r\n self.data[self.video_name] = self.out_df", "title": "" }, { "docid": "4ed2627a8180c954b6fecaf0d6fe506c", "score": "0.49345893", "text": "def detect_document(path):\n from google.cloud import vision\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.document_text_detection(image=image)\n\n for page in response.full_text_annotation.pages:\n for block in page.blocks:\n #print('\\nBlock confidence: {}\\n'.format(block.confidence))\n for paragraph in block.paragraphs:\n for word in paragraph.words:\n word_text = ''.join([symbol.text for symbol in word.symbols])\n text.append(word_text.encode('utf-8'))\n #print(word_text)", "title": "" }, { "docid": "8c9722f57ea37a8097212a4b00336cc0", "score": "0.49288893", "text": "def sentiment_aspects(docs: Iterable[tokens.Doc]) -> List[collections.Counter]:\n sent_dict_list = []\n start_time = time.time()\n\n for doc in docs:\n sent_dict = collections.Counter()\n for token in doc:\n # check if the word is an opinion word, then assign sentiment\n if token.text.lower() in _OPINION_WORDS:\n sentiment = 1 if token.text.lower() in _POS_WORDS else -1\n if (token.dep_ == \"advmod\"):\n # if target is an adverb modifier (i.e. pretty, highly, etc.)\n # but happens to be an opinion word, ignore and pass\n continue\n\n elif (token.dep_ == \"amod\"):\n sent_dict[token.head.text.lower()] += sentiment\n\n else:\n for child in token.children:\n # if there's a adj modifier (i.e. very, pretty, etc.) add\n # more weight to sentiment\n # This could be better updated for modifiers that either\n # positively or negatively emphasize\n if _is_opinion_mod(child):\n sentiment *= 1.5\n # check for negation words and flip the sign of sentiment\n if child.dep_ == \"neg\":\n sentiment *= -1\n for child in token.children:\n if (token.pos_ == \"VERB\") & (child.dep_ == \"dobj\"):\n # if verb, check if there's a direct object\n sent_dict[child.text.lower()] += sentiment\n # check for conjugates (a AND b), then add both to dictionary\n subchildren = []\n conj = 0\n for subchild in child.children:\n if subchild.text.lower() == \"and\": conj=1\n if (conj == 1) and (subchild.text.lower() != \"and\"):\n subchildren.append(subchild.text.lower())\n conj = 0\n for subchild in subchildren:\n sent_dict[subchild] += sentiment\n\n # check for negation\n for child in token.head.children:\n noun = \"\"\n if _is_opinion_mod(child):\n sentiment *= 1.5\n if (child.dep_ == \"neg\"):\n # check for negation words and flip the sign of sentiment\n sentiment *= -1\n\n # check for nouns\n for child in token.head.children:\n noun = \"\"\n if (child.pos_ == \"NOUN\") and (child.text not in sent_dict):\n noun = child.text.lower()\n # Check for compound nouns\n for subchild in child.children:\n if subchild.dep_ == \"compound\":\n noun = subchild.text.lower() + \" \" + noun\n sent_dict[noun] += sentiment\n sent_dict_list.append(collections.Counter(sent_dict))\n\n print(\"\\nFound aspects on {} reviews.\".format(len(sent_dict_list)))\n print(time.time() - start_time)\n return sent_dict_list", "title": "" }, { "docid": "bb356c3bf2d1cbf9ef471575c497383c", "score": "0.48967612", "text": "def text_analytics(self):\n\n headers = {\n # Request headers\n 'Content-Type': 'application/json',\n 'Ocp-Apim-Subscription-Key': self.keys['text_analytics'],\n }\n \n sentiment_url = 'https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment'\n \n raw_text = self.article_params['text']\n\n # Build post for sentiment\n try:\n sentences = tokenize.sent_tokenize(str(raw_text))\n content = []\n for i, sentence in enumerate(sentences):\n content.append({'id': str(i), 'language': 'en', 'text': sentence})\n body = json.dumps({\"documents\": content}).encode('utf-8')\n\n request = urllib.request.Request(sentiment_url, body, headers)\n response = urllib.request.urlopen(request)\n json_response = json.loads(response.read().decode('utf-8'))\n \n # A list of dictionaries, with each dictionary containing a sentence\n # sentiment score\n sentiments_list = json_response['documents']\n\n # Calculate the articles average sentiment from all the sentences\n cumulative_sentiment_score = 0\n for sent in sentiments_list:\n cumulative_sentiment_score += sent['score']\n avg_article_sentiment = cumulative_sentiment_score/len(sentiments_list)\n\n # Put article sentiments in bucket from 1 to 5, with 1 being very\n # negative and 5 being very positive\n if avg_article_sentiment < 0.2:\n sentiment = 1\n elif 0.2 <= avg_article_sentiment < 0.4:\n sentiment = 2\n elif 0.4 <= avg_article_sentiment < 0.6:\n sentiment = 3\n elif 0.6 <= avg_article_sentiment < 0.8:\n sentiment = 4\n else:\n sentiment = 5\n\n except Exception as e:\n print('Unable to process sentiment for article. Assuming '\n 'sentiment is neutral.')\n sentiment = 3\n\n return sentiment", "title": "" }, { "docid": "35aea9116a20457b06626361b0a8c0a6", "score": "0.4894925", "text": "def main():\n try:\n res = requests.get('http://localhost:9200')\n pprint(json.loads(res.content.decode('utf-8')))\n except requests.exceptions.ConnectionError:\n print(\"ERROR: ELASTICSEARCH Server is not running!\")\n exit(-1)\n\n # scrapeAndSaveNewsArticles()\n # generateNewsDocsCSV() # may need to be modified based on how scrapeAndSave function file output\n if not es_client.indices.exists(index='huffpost_news_index'):\n print(\"PLEASE WAIT... LOADING DOCUMENTS INTO INVERTED INDEX\")\n indexDocsToES('huffpost_news_index')", "title": "" }, { "docid": "9f4a186b81c2d93742075e9f6cf02bac", "score": "0.48722687", "text": "def forward(self, doc):\n out = torch.tensor([]).float().to(self.device)\n\n for i in range(len(doc)):\n sentences_raw = sentencesplit(cleantxt(doc[i]))\n sentences_ready = torch.tensor([]).float().to(self.device)\n for sentence in sentences_raw:\n sentence = sentence.split()\n if sentence == []:\n continue\n lookup_tensor = torch.tensor([]).long().to(self.device)\n for word in sentence:\n if word in self.embedd_dict:\n lookup_tensor = torch.cat((lookup_tensor,\n torch.LongTensor([self.embedd_dict[word]])), 0)\n else:\n lookup_tensor = torch.cat((lookup_tensor, torch.LongTensor([0])), 0)\n # Word embedding\n xw = self.word_embedding(lookup_tensor).view(1, -1, self.embedding_dim).to(self.device)\n # Word GRU\n self.hidden_gru_words = self.init_hidden_words()\n hw, self.hidden_gru_words = self.gru_word(xw, self.hidden_gru_words)\n # Word MLP\n uw = nn.Tanh()(self.MLP_word(hw)).to(self.device)\n # Word attention\n attention_score = torch.matmul(uw, self.attention_word).squeeze().to(self.device)\n attention_score = F.softmax(attention_score, dim=0).view(uw.size(0), uw.size(1), 1).to(self.device)\n scored_x = (hw * attention_score).to(self.device)\n s = torch.sum(scored_x, dim=1).to(self.device)\n #collecting sentences\n sentences_ready = torch.cat((sentences_ready, s), 0)\n # Sentence GRU\n if len(sentences_ready) == 0:\n out = torch.cat((out,\n torch.randn(1, self.number_cat).to(self.device)), 0).to(self.device)\n continue\n sentences_ready_gru = sentences_ready.view(1, -1, self.embedding_dim).to(self.device)\n self.hidden_gru_sentences = self.init_hidden_sentences()\n hs, self.hidden_gru_sentences = self.gru_sentence(torch.tensor(sentences_ready_gru), self.hidden_gru_sentences)\n # SENTENCE MLP\n us = nn.Tanh()(self.MLP_sentence(hs)).to(self.device)\n # Sentence attention\n attention_score = torch.matmul(us, self.attention_sentence).squeeze().to(self.device)\n attention_score = F.softmax(attention_score, dim=0).view(us.size(0), us.size(1), 1).to(self.device)\n scored_x = (hs * attention_score).to(self.device)\n v = torch.sum(scored_x, dim=1).to(self.device)\n # classification\n p = self.MLP_classification(v).to(self.device)\n out = torch.cat((out, p.float()), 0).float().to(self.device)\n return out", "title": "" }, { "docid": "3f78117a3cf6cb34f721eab0266e6eeb", "score": "0.4870776", "text": "def test(self, filename: str, info_extractor: Optional[InfoExtractor]):\r\n if self.model is None:\r\n raise RatingModel.RatingModelError(\"model is not loaded or trained yet\")\r\n doc, _ = loadDocumentIntoSpacy(filename, self.parser, self.nlp)\r\n\r\n print(\"Getting rating...\")\r\n if self._type == \"fixed\":\r\n print(\"working on fixed model\")\r\n if self.keywords is None:\r\n raise RatingModel.RatingModelError(\"Keywords not found\")\r\n\r\n seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc)\r\n\r\n # scoring\r\n temp_out = self.__trainKMWM(list(seen_chunks_words), list(all_tokens_chunks), self.keywords)\r\n if temp_out is None:\r\n raise RatingModel.RatingModelError(\r\n \"Either parser cannot detect text or too few words in resume for analysis. Most usually the former.\"\r\n )\r\n km_scores, wm_scores = temp_out\r\n # average of km/wm scores for all keywords\r\n km_score = np.mean(km_scores)\r\n wm_score = np.mean(wm_scores)\r\n final_score = km_score * wm_score\r\n elif self._type == \"lda\":\r\n if self.lda is None or self.dictionary is None or self.top_k_words is None:\r\n raise RatingModel.RatingModelError(\"No LDA found\")\r\n\r\n seen_chunks_words, all_tokens_chunks = getAllTokensAndChunks(doc)\r\n seen_chunks_words, all_tokens_chunks = (\r\n list(seen_chunks_words),\r\n list(all_tokens_chunks),\r\n )\r\n\r\n # scoring\r\n new_seen_chunks_words = self.__keep_top_k_words(seen_chunks_words)\r\n bow = self.dictionary.doc2bow(new_seen_chunks_words)\r\n doc_distribution = np.array(\r\n [tup[1] for tup in self.lda.get_document_topics(bow=bow)]\r\n )\r\n # get keywords and weights\r\n keywords = []\r\n all_pair_scores = []\r\n all_topic_scores = []\r\n all_diff_scores = []\r\n # take top 5 topics\r\n for j in doc_distribution.argsort()[-5:][::-1]:\r\n topic_prob = doc_distribution[j]\r\n # take top 5 words for each topic\r\n st = self.lda.show_topic(topicid=j, topn=5)\r\n sum_st = np.sum(list(map(lambda x: x[1], st)))\r\n pair_scores = []\r\n for pair in st:\r\n keywords.append(pair[0])\r\n pair_scores.append(pair[1])\r\n all_pair_scores.append(np.array(pair_scores))\r\n all_topic_scores.append(np.array(topic_prob))\r\n\r\n all_pair_scores = np.array(all_pair_scores)\r\n norm_all_pair_scores = all_pair_scores.T / np.sum(all_pair_scores, axis=1)\r\n norm_all_topic_scores = all_topic_scores / np.sum(all_topic_scores)\r\n all_diff_scores = (norm_all_pair_scores * norm_all_topic_scores).flatten()\r\n weights = pd.Series(all_diff_scores, index=keywords)\r\n weights.sort_values(ascending=False, inplace=True)\r\n\r\n temp_out = self.__trainKMWM(seen_chunks_words, all_tokens_chunks, keywords)\r\n if temp_out is None:\r\n print(\r\n \"Either parser cannot detect text or too few words in resume for analysis. Most usually the former. Skip document.\"\r\n )\r\n km_scores, wm_scores = temp_out\r\n\r\n # average of km/wm scores for all keywords\r\n km_score = np.dot(weights.values, km_scores)\r\n wm_score = np.dot(weights.values, wm_scores)\r\n\r\n final_score = km_score * wm_score\r\n\r\n # max_score = self.model[\"score\"].iloc[0] - np.std(self.model[\"score\"])\r\n # min_score = self.model[\"score\"].iloc[-1]\r\n mean = np.mean(self.model[\"score\"])\r\n sd = np.std(self.model[\"score\"])\r\n\r\n rating = min(10, max(0, round(5 + (final_score-mean)/sd, 2)))\r\n if info_extractor is not None:\r\n print(\"-\" * 20)\r\n\r\n # info_extractor.extractFromFile(filename)\r\n output= info_extractor.extractFromFile(filename)\r\n\r\n print(\"output:----\",output)\r\n print(\"-\" * 20)\r\n print(\"Rating: %.1f\" % rating)\r\n # if info_extractor is not None:\r\n # print(\"info extractor is not working\")\r\n # env = os.environ\r\n # subprocess.call([sys.executable, filename], env=env)\r\n return output", "title": "" }, { "docid": "935e065ed2e8ef1071fd6ea6c9fb8b2e", "score": "0.48706353", "text": "def collect_docs() -> Dict[str, Any]:\n docs = {}\n\n for integration_dir, meta in all_integrations():\n integration_name = integration_dir.name\n markdown = \"\"\n for readme in sorted(integration_dir.glob(\"*.md\")) + sorted(integration_dir.glob(\"README.md.jinja\")):\n markdown += readme.read_text(encoding=\"utf-8\")\n\n if not markdown:\n continue\n\n docs[integration_name] = {\n \"markdown\": process_markdown_for_app(markdown, integration_dir, uses_legacy_build(meta))\n }\n\n return docs", "title": "" }, { "docid": "8cd00602ff5af8d15d5dd68f4ed79855", "score": "0.48668104", "text": "def get_slide_analytics_new(slides) -> List[int]:\n word_count = []\n for slide in slides:\n print(slide)\n words = 0\n for shape in slide.shapes:\n if not shape.has_text_frame:\n continue\n print(shape.name)\n for paragraph in shape.text_frame.paragraphs:\n for run in paragraph.runs:\n print(\" \" + run.text)\n words += len(run.text.split())\n word_count.append(words)\n return word_count", "title": "" }, { "docid": "e4dd7429b51b3115c527e2a80a20b5d7", "score": "0.48623803", "text": "def create_embedding(self):\n self.embedding = []\n\n for index in range(1,self.args.window_size+1):\n print(\"\\nOptimization round: \" +str(index)+\"/\"+str(self.args.window_size)+\".\")\n print(\"Creating documents.\")\n clean_documents = self.walk_extracts(index)\n print(\"Fitting model.\")\n model = Word2Vec(clean_documents,\n size = self.args.dimensions,\n window = 1,\n min_count = self.args.min_count,\n sg = 1,\n workers = self.args.workers)\n\n new_embedding = self.get_embedding(model)\n self.embedding = self.embedding +[new_embedding]\n self.embedding = np.concatenate(self.embedding, axis = 1)", "title": "" }, { "docid": "604fd95d4d3feaf292951f92b171aa21", "score": "0.48512176", "text": "def extract(self, documents):\n\n # Feature vector to return\n features = np.zeros((len(documents), len(self.idx_to_word)))\n\n # Raise an exception if 'extract' is called before 'preprocess'\n if len(self.word_to_idx) == 0 or len(self.idx_to_word) == 0:\n raise Exception(\"Dictionary not initialised.\")\n\n # Iterate over all documents\n for idx, doc in enumerate(documents):\n # Split the doc into a list of words\n words = extract_words(doc)\n\n # For each word\n for w in words:\n # Calculate it's frequency, however, keep in mind\n # that this word may not have been in the training\n # corpus. In that case, ignore the word.\n ''' YOUR CODE HERE '''\n try:\n features[idx][self.word_to_idx[w]] = words.count(w)\n except KeyError:\n pass\n\n ''' END CODE FOR THIS LOOP '''\n\n # Divide the vector by the total number of words in the document to\n # normalize the frequencies.\n ''' YOUR CODE HERE '''\n features[idx] = features[idx]/len(words)\n ''' END CODE FOR THIS LOOP '''\n\n return features", "title": "" }, { "docid": "5830eddcc02787f888755402d0486b85", "score": "0.48499954", "text": "def environmental_science_news():\n\n return general_scraper(['http://mesva.univaq.it/?q=avvisi/cl-clm/52671'])", "title": "" }, { "docid": "381a5db4c0d0ee2186d6415772225d77", "score": "0.48452774", "text": "def analyse() -> 'html':\n storeFileName = ''\n if request.method == 'POST':\n \n if 'file' not in request.files:\n return render_mainpageerror('No file selected')\n \n file = request.files['file']\n if file.filename == '':\n return render_mainpageerror('Empty file name Please select a File to upload')\n \n if file and not allowed_file(file.filename):\n storeFileName = filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return render_mainpageerror('Not allowed file format')\n \n if ' ' in file.filename:\n return render_mainpageerror('Please remove withespace from file name before uploading')\n \n if file and allowed_file(file.filename):\n originalFilename = file.filename\n storeFileName = filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n \n \n if len(storeFileName) == 0:\n return render_template('mainpage.html',\n title='Word association')\n else:\n wordsfromfile = readFile.red_uploaded_file(storeFileName)\n #print(wordsfromfile, 'The ans from pdf')\n if wordsfromfile == 'STOP':\n return render_mainpageerror('Cant find Encoding on PDF document')\n size = get_store()\n if not check_fileName(size):\n \"\"\"Analyse first time building up JSON file\"\"\"\n keyWords = readFile.read_JSON()\n allJson = readFile.load_JSON()\n data = Counter(wordsfromfile)\n printResultsExluds = []\n container = {}\n container['size'] = size\n container['file'] = storeFileName\n toDB = []\n for k, v in sorted(data.items()):\n if k in keyWords:\n string = allJson[k][:3]\n toDB.append({k : v})\n toDB.append(string)\n else:\n value = str(v)\n keystr = k + '(' + value + ')'\n printResultsExluds.append(keystr)\n for w in printResultsExluds:\n toDB.append({w:'was not found in the associations list'})\n\n container['files'] = toDB \n js = json.dumps(container)\n print(js)\n c = get_client()\n c.insert(json.loads(js))\n print('insert')\n \"\"\"If analysed first time\"\"\"\n return pr(size, 'Analysed first time')\n else:\n \"\"\"If already Analysed\"\"\"\n return pr(size,'Already analysed')", "title": "" }, { "docid": "77e666d9bf2f728844369dbba517797b", "score": "0.48416984", "text": "def develop_output():\n output_array = []\n for docs in db.get_collection(\"google_places\").find({}):\n docs[\"city\"] = docs[\"vicinity\"].split(\",\")[-1].strip()\n del docs['_id']\n\n for mg_doc in db.get_collection(\"michelin_guide\").find({}):\n del mg_doc['_id']\n\n if (fuzz.token_set_ratio(docs[\"vicinity\"], mg_doc[\"datasheets\"][0][\"address\"]) > 80 and\n fuzz.token_set_ratio(docs[\"name\"], mg_doc[\"datasheets\"][0][\"name\"]) > 80):\n docs[\"michelin_stars\"] = mg_doc[\"datasheets\"][0][\"michelin_stars\"]\n docs[\"michelin_mention\"] = True\n docs[\"michelin_description\"] = mg_doc[\"datasheets\"][0][\"description\"]\n docs[\"michelin_url\"] = mg_doc[\"datasheets\"][0][\"web\"]\n break\n\n else:\n docs[\"michelin_stars\"] = 0\n docs[\"michelin_mention\"] = False\n docs[\"michelin_description\"] = None\n docs[\"michelin_url\"] = None\n\n for yelp_doc in db.get_collection(\"Yelp\").find({}):\n del yelp_doc['_id']\n if (fuzz.token_set_ratio(docs[\"vicinity\"], yelp_doc[\"location\"][\"address1\"]) > 80 and\n fuzz.token_set_ratio(docs[\"name\"], yelp_doc[\"name\"]) > 80):\n docs[\"yelp_stars\"] = yelp_doc[\"rating\"]\n docs[\"yelp_url\"] = yelp_doc[\"url\"]\n break\n\n else:\n docs[\"yelp_stars\"] = None\n docs[\"yelp_url\"] = None\n\n \"\"\" The results in Zomato are nested in one document,\n so this for loop breaks them up so the break logic\n works better.\n \"\"\"\n clean_zomato_list = []\n for zom_doc in db.get_collection(\"zomato\").find({}):\n del zom_doc['_id']\n for restaurant in zom_doc[\"restaurants\"]:\n clean_zomato_list.append(restaurant)\n\n for restaurant in clean_zomato_list:\n if (fuzz.token_set_ratio(docs[\"vicinity\"], restaurant['restaurant'][\"location\"][\"address\"]) > 80 and\n fuzz.token_set_ratio(docs[\"name\"], restaurant['restaurant'][\"name\"]) > 80):\n docs[\"zomato_stars\"] = restaurant['restaurant']['user_rating']['aggregate_rating']\n docs[\"zomato_timings\"] = restaurant['restaurant']['timings']\n docs[\"zomato_avg_for_two\"] = restaurant['restaurant']['average_cost_for_two']\n docs[\"zomato_events\"] = restaurant['restaurant']['events_url']\n break\n\n else:\n docs[\"zomato_stars\"] = None\n docs[\"zomato_timings\"] = None\n docs[\"zomato_avg_for_two\"] = None\n docs[\"zomato_events\"] = None\n\n if docs not in output_array:\n output_array.append(docs)\n\n #db.get_collection(\"outputs\").delete_many({})\n db.get_collection(\"outputs\").insert_many(output_array)", "title": "" }, { "docid": "4ea4663df0aea9df2d26cd986153b720", "score": "0.48416552", "text": "def benchmark(extract_size=800):\n random_file = random_html_file()\n with open(join(DATA_PATH, random_file), 'r') as f:\n html_string = f.read()\n\n # GOOSE\n try:\n g = Goose({'browser_user_agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0',\n 'enable_image_fetching': False})\n goose_article = g.extract(raw_html=html_string)\n goose_result = goose_article.cleaned_text\n except:\n goose_result = ' Goose error.'\n\n # EATIHT\n try:\n eatiht_result = eatiht.extract(html_string)\n except:\n eatiht_result = ' Eatiht error.'\n\n # DRAGNET\n\n try:\n dragnet_result = dragnet.extract_content(html_string)\n except Exception as e:\n dragnet_result = ' Dragnet error: ' + str(e)\n\n # LIBEXTRACT\n\n try:\n textnodes = list(libextract.api.extract(html_string))\n libextract_result = textnodes[0].text_content()\n except:\n libextract_result = ' Libextract error.'\n\n # BOILERPIPE (CanolaExtractor)\n\n try:\n extractor = Extractor(\n extractor='CanolaExtractor', html=html_string)\n boilerpipe_result = extractor.getText()\n except:\n boilerpipe_result = ' Boilerpipe error.'\n\n # NEWSPAPER\n\n try:\n article = Article('url')\n article.download(input_html=html_string)\n article.parse()\n print('Auteurs:', article.authors)\n print('Date de publication:', article.publish_date)\n newspaper_result = article.text\n except:\n newspaper_result = ' Newspaper error.'\n\n # JUSTEXT\n\n try:\n paragraphs = justext.justext(\n html_string, justext.get_stoplist(\"French\"))\n print('PARAGRAPHS')\n for p in paragraphs:\n if not p.is_boilerplate:\n print(p.text)\n justext_result = '\\n'.join(\n paragraph.text for paragraph in paragraphs if not paragraph.is_boilerplate)\n print('JUSTEXT_RESULT', justext_result)\n\n except Exception as e:\n justext_result = ' Justext error: ' + str(e)\n print(justext_result)\n\n # Results\n\n try:\n # finds the url associated with the file in a \"filename-url\" csv\n with open('./data/urls.csv', 'r') as csvfile:\n\n urls = dict((line['id'], line['url'])\n for line in csv.DictReader(csvfile))\n url = urls[random_file[:-5]]\n\n print('\\n\\n >>> URL n.' + random_file[:-5] + ' : ' + url)\n except:\n print('\\n\\n (URL of the html file not found. To print the associated URL, please provide a urls.csv file featuring filename & url in /data)')\n # webbrowser.open(url, autoraise=False)\n path = abspath('temp.html')\n local_url = 'file://' + path\n with open(path, 'w') as f:\n f.write(html_string)\n webbrowser.open(local_url)\n\n # print('\\n\\n /// GOOSE /// \\n')\n # print(goose_result[:extract_size])\n # print('\\n\\n /// EATIHT /// \\n')\n # print(eatiht_result[:extract_size])\n print('\\n ------ [[DRAGNET]] ------',\n len(dragnet_result), 'caractères\\n')\n print(dragnet_result[:extract_size] +\n '\\n...\\n' + dragnet_result[-extract_size:])\n print('\\n ------ [[NEWSPAPER]] ------',\n len(newspaper_result), 'caractères\\n')\n print(newspaper_result[:extract_size] +\n '\\n...\\n' + newspaper_result[-extract_size:])\n print('\\n ------ [[JUSTEXT]] ------',\n len(justext_result), 'caractères\\n')\n print(justext_result[:extract_size] +\n '\\n...\\n' + justext_result[-extract_size:])\n # print('\\n\\n /// LIBEXTRACT /// \\n')\n # print(libextract_result[:extract_size])\n # print('\\n\\n /// BOILERPIPE (CanolaExtractor) /// \\n\\n')\n # print(boilerpipe_result[:extract_size])\n # print('\\n\\n')\n return(url)", "title": "" }, { "docid": "9092fc9ed9f2b354ab962190f27cb6f7", "score": "0.48402354", "text": "def problem3() -> None:\n try:\n print(\"Reading in the text files for each keyword...\")\n kw_df = collect_keyword_dataframe(collect_all=COLLECT_ALL) \n except FileNotFoundError:\n raise Exception(\"ERROR: The 'articles' folder was not found. Please \"\n \"ensure that the functions for problems 1 and 2 have \"\n \"been applied first.\")\n \n word2onehot, w2vp, matrices = word2vec_main(kw_df)\n sem_dist_mat = get_semantic_dist_matrix(\n w2vp.target_words, word2onehot, matrices\n )\n \n try:\n matrix2dataframe(w2vp.keywords, sem_dist_mat)\n except PermissionError:\n print(\"ERROR: Unable to write 'distance.xlsx'. Please ensure that \"\n \"this file is not open.\")\n \n # Functions for visualisations:\n reduce_and_plot_word_vectors(\n w2vp.keywords, [word2onehot[w] for w in w2vp.target_words], matrices\n )\n nearest_neighbours( \n w2vp, word2onehot, matrices \n )\n similarity_distributions(\n kw_df, w2vp, word2onehot, matrices\n )", "title": "" }, { "docid": "86a45a0e06cc694a864568bc14c41dea", "score": "0.48376784", "text": "def run():\n logger.info(f\"Process started:\")\n logger.info(f\"Converting Glove file to Word2Vec format\")\n convert_to_word2vec.convert(\n \"./data/source/glove.6B.50d.txt\", \"./data/source/glove.6B.50d.w2vformat.txt\"\n )\n\n logger.info(f\"Extracting Click Stream data\")\n extract_click_stream_data()\n\n logger.info(\"Extracting Wiki articles\")\n extract_wiki_articles()\n\n logger.info(f\"Generating Clickstream dataset\")\n generate_datasets()\n\n logger.info(\"Tokenizing articles\")\n WikiArticlesTokenizer().process()\n\n logger.info(\"Creating dataset with Wiki Articles\")\n create_wiki_articles_dataset()", "title": "" }, { "docid": "205c1a87d884508574f8ff796057d048", "score": "0.48364243", "text": "def main():\n loader = MicrosoftDataloader()\n train,dev,test = loader.getData()\n sentences = []\n\n # Collect all the training sentences\n for i,row in pd.concat((train,test)).iterrows():\n if isinstance(row[\"sentence1\"], basestring) and isinstance(row[\"sentence2\"], basestring):\n sentences.append(row[\"sentence1\"])\n sentences.append(row[\"sentence2\"])\n\n # Get the mapping between sentences and their cotext vectors\n mapped = get_sentence_to_context_map(sentences)\n\n # At this stage we have a map between every sentence and its context vector\n # However the JSON file must contain sentences in the same order as in the MSR data file\n data = []\n for i,sentence in enumerate(sentences):\n embedding = mapped[sentence]\n data.append({'index':i, 'embedding':embedding, 'text':sentence})\n\n # Write the sentences and embeddings to JSON\n # The array index should corrospond to the sentence #\n print \"Saving embedded sentences to: {0}\".format(EMBED_FILE)\n with open(EMBED_FILE,'w') as outfile:\n json.dump(data,outfile,indent=2)", "title": "" }, { "docid": "0c00e716572836ad08706fbaa579d01c", "score": "0.48353478", "text": "def morpho_doc(doc):\n doc_text = doc.stripped\n mystem_analyzer.start()\n # new_morpho = mystem_analyzer.analyze(doc_text)\n new_morpho = mystem_analyzer.analyze(doc_text.replace('\\n',''))\n\n morpho_list = []\n\n for element in new_morpho: # разрезаем\n\n if is_sentence_end(element):\n morpho_list.append(element)\n else:\n\n line = element.get('text', '')\n\n space_len = 0\n\n word_start = -1\n word_len = 0\n\n symbol_number = -1\n for symbol in line:\n\n symbol_number+=1\n\n if symbol == \"'\" or symbol == '\"' or symbol == '»' or symbol == '«':\n\n if space_len > 0: # добавим пробелы\n\n cur_space = ' ' * space_len\n\n new_element = {'text': cur_space}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n space_len = 0\n\n elif word_start > -1: # добавим слово\n\n cur_word = line[word_start:(word_start + word_len)]\n\n new_element = {'text': cur_word}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n word_start = -1\n word_len = 0\n\n # добавим кавычку\n new_element = {'text': symbol}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n elif symbol == \" \":\n\n if word_start > -1: # добавим слово\n\n cur_word = line[word_start:(word_start + word_len)]\n\n new_element = {'text': cur_word}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n word_start = -1\n word_len = 0\n\n space_len += 1\n\n else:\n\n if space_len > 0: # добавим пробелы\n\n cur_space = ' ' * space_len\n\n new_element = {'text': cur_space}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n space_len = 0\n\n if word_start == -1:\n word_start = symbol_number\n word_len = 1\n else:\n word_len += 1\n\n if space_len > 0: # добавим пробелы\n\n cur_space = ' ' * space_len\n\n new_element = {'text': cur_space}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n\n morpho_list.append(new_element)\n\n elif word_start > -1: # добавим слово\n\n cur_word = line[word_start:(word_start + word_len)]\n\n new_element = {'text': cur_word}\n if 'analysis' in element: new_element['analysis'] = element['analysis']\n morpho_list.append(new_element)\n\n for i in range(len(morpho_list) - 1): # переставляем\n if i > 0:\n if morpho_list[i - 1]['text'] == ' ' and morpho_list[i]['text'] == '\"' and morpho_list[i + 1]['text'] == '\\\\s':\n morpho_list[i], morpho_list[i + 1] = morpho_list[i + 1], morpho_list[i]\n\n sentence_index = 0\n word_index = 0\n start_offset = 0\n\n for element in morpho_list: # нумеруем\n if is_sentence_end(element):\n if word_index != 0:\n sentence_index += 1\n word_index = 0\n else:\n line = element.get('text', '')\n line_len = len(line)\n\n if(line[0]!=' '):\n element['start_offset'] = start_offset\n element['end_offset'] = start_offset + line_len - 1\n element['word_index'] = word_index\n element['sentence_index'] = sentence_index\n\n word_index += 1\n start_offset += line_len\n\n doc.morpho = morpho_list\n mystem_analyzer.close()", "title": "" }, { "docid": "c6d13864f711264db0bb546f4c2723f1", "score": "0.4818834", "text": "def _read_adv_embeddings(identity, target):\n embeddings_file = os.path.join(\n FLAGS.output_directory,\n identity,\n FLAGS.attack_type,\n target\n )\n embeddings_file = os.path.join(FLAGS.image_directory,\n identity,\n 'embeddings.h5')\n with h5py.File(embeddings_file, 'r') as f:\n return f['embeddings'][:].astype(np.float32)", "title": "" }, { "docid": "429e4bfdb400d42f560067a1b0a86078", "score": "0.48179644", "text": "def get_wiki_content(self):\n url = \"https://fr.wikipedia.org/w/api.php?action=query&prop=extracts&exsentences=4&explaintext=&pageids={}&format=json\".format(self.page_id)\n self.page = str(self.page_id)\n self.response = requests.get(url)\n self.data = self.response.json()\n self.wiki_data = (self.data['query']['pages'][self.page]['extract'])\n return (self.wiki_data)", "title": "" }, { "docid": "cad7e92437b5e1b930e423a581c70b19", "score": "0.48148713", "text": "def model_extract_document_embedding(self):\n input_ids = tf.keras.layers.Input(shape=(self.maxlen,), dtype=tf.int32, name=\"ids\")\n attention_mask = tf.keras.layers.Input(shape=(self.maxlen,), dtype=tf.int32, name=\"att\")\n token = tf.keras.layers.Input(shape=(self.maxlen,), dtype=tf.int32, name=\"tok\")\n\n # Embedding :\n if self.method_embedding == 'CamemBERT':\n Camembert_model = transformers.TFCamembertModel.from_pretrained(\"jplu/tf-camembert-base\")\n x = Camembert_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'FlauBERT':\n # lr = 0.00001\n Flaubert_model = transformers.TFFlaubertModel.from_pretrained(\"jplu/tf-flaubert-base-uncased\")\n x = Flaubert_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'XLM-RoBERTa':\n # lr = 0.00001\n XLMRoBERTa_model = transformers.TFXLMRobertaModel.from_pretrained(\"jplu/tf-xlm-roberta-base\")\n x = XLMRoBERTa_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'RoBERTa':\n # Experience Test path weights :\n PATH = '/kaggle/input/tf-roberta/'\n config = transformers.RobertaConfig.from_pretrained(PATH + 'config-roberta-base.json')\n Roberta_model = transformers.TFRobertaModel.from_pretrained(PATH + 'pretrained-roberta-base.h5',\n config=config)\n # Sinon :\n # Roberta_model = transformers.TFRobertaModel.from_pretrained('roberta-base')\n x = Roberta_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'BERT':\n BERT_model = transformers.TFBertModel.from_pretrained('bert-base-uncased')\n x = BERT_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n else:\n logger.critical(\"unknown embedding method name : '{}'\".format(self.method_embedding))\n\n # word vectors shape : (None, maxlen, 768)\n x = x[0]\n cls_token = x[:, 0, :]\n\n model = tf.keras.models.Model(inputs=[input_ids, attention_mask, token], outputs=cls_token)\n return model", "title": "" }, { "docid": "73129f4013ee617835f66bf33515d190", "score": "0.48147637", "text": "def iter_documents(self):\n raise NotImplementedError", "title": "" }, { "docid": "0584aa2acf6c3fbdc1c0d6e5b17ff798", "score": "0.4809549", "text": "def problem4() -> None:\n \n # Obtain the dataframe containing lines from the articles and the keyword\n # with which the article is associated.\n try:\n kw_df = collect_keyword_dataframe(collect_all=COLLECT_ALL) \n except FileNotFoundError:\n raise Exception(\"[ERROR] The 'articles' folder was not found. Please \"\n \"ensure that the functions for problems 1 and 2 have \"\n \"been applied first.\")\n \n keywords, corpus = separate_keywords_corpus(kw_df)\n \n # Complete the preprocessing for the corpus for comparison's sake\n w2vp = W2VPreprocessor(keywords, corpus)\n \n # Display the counts of sentences collected associated with each keyword\n keywords_countplot(kw_df)\n \n # Create displots for the lengths of sentences in the corpus\n displot_all_sentence_lengths(kw_df)\n displot_sentence_lengths_per_keyword(kw_df)\n \n # Visualise the semantic distances\n try:\n distances = read_in_distance_matrix()\n except FileNotFoundError:\n raise Exception(\"[ERROR] The distance matrix excel file was could not \"\n \"be found! Please ensure the methods for the \"\n \"preceding problems have been run first.\")\n visualise_distances(distances)\n \n # Visualise word frequency before and after preprocessing\n visualise_initial_most_frequent_words(kw_df)\n visualise_preprocessed_most_frequent_words(w2vp.corpus)", "title": "" }, { "docid": "d1a10c0f30e43fde085d71e556d4c8dd", "score": "0.4807212", "text": "def speaker_spotting_try_system2(current_trial):\n # target model\n # record the model embedding vector \n # and model id\n model = {}\n model_id = current_trial['model_id'] \n model_embedding = models[current_trial['model_id']]\n model['mid'] = model_id\n model['embedding'] = model_embedding\n \n # where to look for this target\n try_with = current_trial['try_with']\n \n # precomputed embedding\n embeddings = precomputed(current_trial)\n \n # annotation of current file\n oracle_diarization = REFERENCE[current_trial['uri']].crop(current_trial['try_with'])\n \n # find index of first and last embedding fully included in 'try_with'\n indices = embeddings.sliding_window.crop(try_with, mode='strict')\n first, last = indices[0], indices[-1]\n onlineOracleClustering = clustering.OnlineOracleClustering(current_trial['uri'])\n start = embeddings.sliding_window[0].start\n data = np.zeros((len(embeddings.data), 1))\n for i, (window, _) in enumerate(embeddings):\n # make sure the current segment is in 'try_with'\n if i < first:\n start = window.end\n continue\n if i > last:\n break\n \n so_far = Segment(start, window.end)\n current_annotation = oracle_diarization.crop(so_far)\n score = 0.\n for segment, _, label in current_annotation.itertracks(label=True):\n example = {}\n example['label'] = label\n example['segment'] = segment\n example['embedding'] = embeddings.crop(segment, mode='center')\n example['indice'] = [i]\n # compute the distance with model\n example['distances'] = {}\n example['distances'][model['mid']] = list(cdist(example['embedding'], \n model['embedding'], \n metric='cosine').flatten())\n # update the online oracle clustering\n onlineOracleClustering.upadateCluster(example)\n if not onlineOracleClustering.empty():\n # compute the current score\n min_dist = min(onlineOracleClustering.modelDistance(model))\n score = max(score, 2-min_dist)\n data[i] = score\n start = window.end\n import pdb\n #pdb.set_trace()\n \n # transform scores to sliding window features\n data = data[first:last+1]\n sliding_window = SlidingWindow(start=embeddings.sliding_window[first].start,\n duration=embeddings.sliding_window.duration,\n step=embeddings.sliding_window.step)\n \n return SlidingWindowFeature(data, sliding_window)", "title": "" }, { "docid": "7fb7839b49621951ed2f1a6da2cd10fc", "score": "0.48055255", "text": "def build_data(self):\n from desiutil.io import combine_dicts\n # Loop on exposures\n odict = {}\n for qanight in self.qa_nights:\n for qaexp in qanight.qa_exps:\n # Get the exposure dict\n idict = write_qa_exposure('foo', qaexp, ret_dict=True)\n odict = combine_dicts(odict, idict)\n # Finish\n self.data = odict", "title": "" }, { "docid": "f2ec2642418c7cdf0bd5b65ab5d50cf4", "score": "0.4800392", "text": "def main() -> Dict[int, int]:\n # DOCUMENTS DIRECTORY\n # Path of the directory that contains the .txt documents. One .txt document by company. IT NEEDS TO BE \"/data\" when you upload it in data challenge platform. For test in local, you can modifiy to match your data path.\n if dev:\n documents_directory = \"../example_dataset/data\"\n else: \n documents_directory = \"/data\"\n\n path_to_files: List[str] = [os.path.join(documents_directory, file) for file in os.listdir(documents_directory)]\n assert len(path_to_files) == 10 # 10 files in documents directory\n path_to_files.sort() # Sort list of path file by alphabetical order to match ground truth annotations order : IT IS ESSENTIAL.\n\n # INITIALIZATION OF YOUR OBJECTS\n data_model = FormDataModel.from_json_file(\n os.path.join(os.path.dirname(__file__), \"resources\", \"data-model.json\")\n )\n country_referential = CountryReferential.from_csv(\n os.path.join(os.path.dirname(__file__), \"resources\", \"countries_code.csv\")\n )\n form_company_filling = FormCompanyFilling([\n BasicExtractor(\n question_ids=NOT_COUNTRY_QUESTIONS_NUMBERS,\n form_data_model=data_model,\n ),\n BasicCountryExtractor(\n question_ids=COUNTRY_QUESTIONS_NUMBERS,\n form_data_model=data_model,\n country_code_referential=country_referential,\n\n )\n ])\n\n # COMPUTE PREDICTION BY FILE (ie company)\n print(\"##################################\")\n print(\"RUNNING PREDICTION\")\n results: Dict[int, int] = {}\n start_time = time.time()\n for i, path in enumerate(path_to_files):\n start = time.time()\n print(f\"File : {path}\")\n with open(path, \"r\") as input_file:\n text = input_file.read()\n print(\"... Encoding ...\")\n start_encoding = time.time()\n embeddings = model.encode(prepare_sentences(text))\n # embeddings = []\n print(\"Successfully encoded\")\n print(\"Encoding time : \", time.time() - start_encoding)\n\n form_company_response = form_company_filling.fill(text, embeddings, model)\n form_company_response.sort_by_question_id() # ESSENTIAL : Sort the response by question number for each company\n for answer in form_company_response.answers:\n question_number = answer.question_id + i * 22 # ESSENTIAL : each company has 22 questions. Each question_number in results should be unique\n results[question_number] = answer.answer_id\n # gc.collect()\n print(\"File time :\", time.time()-start, '\\n')\n # CHECK FORMAT RESULTS IS DATACHALLENGE PLATFORM COMPATIBLE\n assert len(results) == len(path_to_files) * (len(COUNTRY_QUESTIONS_NUMBERS) + len(NOT_COUNTRY_QUESTIONS_NUMBERS))\n assert set(list(results.keys())) == {i for i in range(1,221)}\n print(\"Full Time\", time.time()-start_time)\n return results", "title": "" }, { "docid": "3b2cfa7bf46142925deae1bf5c16630a", "score": "0.47919524", "text": "def corpora_stats(output):\n igFiles = []\n for root, directories, filenames in os.walk(output + \"/ig/\"):\n for filename in filenames:\n igFiles.append(os.path.join(root, filename))\n igFiles = filter(lambda x: \".txt\" in x, igFiles)\n words = []\n for file in igFiles:\n fileH = open(file, \"r\")\n words = words + fileH.read().split(\" \")\n print(\"Number of words in IG corpus: {}\".format(len(words)))\n print(\"Vocabulary size of IG corpus: {}\".format(len(set(words))))", "title": "" }, { "docid": "0cf44b52206139aa58bd58914f0c86e8", "score": "0.47894615", "text": "def main():\n # Load the metadata file.\n\n # Compute audio average length.\n # audio_lengths()\n\n # Compute age.\n average_age()\n\n # Number of words and talking time.\n num_words()", "title": "" }, { "docid": "ce416b27270034cd6ebdf65aba001f12", "score": "0.47884974", "text": "def run_module(self):\n info(\"Searching for cross site scripting (reflected)...\")\n\n # load in a list of lfi attach strings\n #self.attack_strings = self.main.db.get_wordlist(\n # self.info['wordlist_name'])\n\n self.attack_strings = ['<script>alert(1)</script>',\n '<img srx=\"x\" onerror=\"alert(1)>\"']\n\n # the search strings will be the attack strings themselves\n # because python will not interpret any javascript\n self.re_search_strings = self.attack_strings\n\n injectable_params = self._get_previous_results('HTMLParser')\n\n with concurrent.futures.ProcessPoolExecutor() as executor:\n results = executor.map(self._run_thread, injectable_params)\n\n final = []\n for r in results:\n final.extend(r)\n\n # save the results\n self._save_scan_results(final)", "title": "" }, { "docid": "3070ac458fc5ec5216e0c4ee491376a4", "score": "0.47828823", "text": "def extract_features(data, stopwords=STOPWORDS):\n tags = set()\n docs = []\n for document in data:\n doc_data = dict()\n doc_data['pmid'] = document['sourceid']\n text = document['text']\n\n # Insert PubTator annotations inside abstract\n denotations = document['denotations']\n sorted_denotations = []\n for denotation in denotations:\n begin = denotation['span']['begin']\n end = denotation['span']['end']\n obj = denotation['obj']\n for c in punctuation:\n obj = obj.replace(c, '')\n tags.add(obj)\n doc_data[obj] = doc_data.get(obj,0)+1\n sorted_denotations.append([begin,end,obj])\n sorted_denotations.sort()\n sorted_denotations.reverse()\n for begin, end, obj in sorted_denotations:\n text = text[:begin] + obj + ' ' + text[end:]\n\n doc_data['text'] = clean_text(text, stopwords)\n docs.append(doc_data)\n\n return docs", "title": "" }, { "docid": "6d2a91608f238f580391282fd94bd746", "score": "0.4773797", "text": "def passion_analyzer(text):\n\n\tlower_text = text.lower()\n\n\thashtag_scaling = 0.3\n\texclamation_scaling = 0.5\n\tuppercase_scaling = 0.2\n\n\n\tpassion_index = 0\n\n\tfor x in range(len(positive_words)):\n\t\tpassion_index += (lower_text.count(positive_words[x]))**2\n\tfor x in range(len(negative_words)):\n\t\tpassion_index -= (lower_text.count(negative_words[x]))**2\n\tif '!' in text:\n\t\tpassion_index *= exclamation_scaling * lower_text.count('!') + 1\n\tif '#' in text:\n\t\tpassion_index *= hashtag_scaling * lower_text.count('#') + 1\n\tpassion_index *= uppercase_scaling * sum(1 for c in text if c.isupper())\n\n\n\t\t\n\treturn math.sqrt(passion_index)", "title": "" }, { "docid": "bd2ea745317b67984d2a6bd61347cc77", "score": "0.47733462", "text": "def insights(self):\r\n return insights.Insights(self)", "title": "" }, { "docid": "decadcafbeea5086704d57080a838d03", "score": "0.4761947", "text": "def analysis():\n global prediction\n\n json_path = os.path.join(basedir, 'static', 'data', 'tmp_json')\n # csv_path = os.path.join(basedir, 'static', 'data', 'csv')\n # if not os.path.exists(csv_path):\n # os.mkdir(csv_path)\n\n if os.name == 'nt':\n audio_file = Path(os.path.join(json_path, 'audio_data.shlf.dir'))\n video_file = Path(os.path.join(json_path, 'facial_data.shlf.dir'))\n else:\n audio_file = Path(os.path.join(json_path, 'audio_data.shlf'))\n video_file = Path(os.path.join(json_path, 'facial_data.shlf'))\n\n # Files exists\n if audio_file.is_file() and video_file.is_file():\n with shelve.open(os.path.join(json_path, 'facial_data.shlf')) as shelf:\n emotion_data = shelf['emotion_data']\n microexpression_data = shelf['micro_expression_data']\n blink_data = shelf['blink_data']\n\n with shelve.open(os.path.join(json_path, 'audio_data.shlf')) as shelf:\n mean_energy = shelf['mean_energy']\n max_pitch_amp = shelf['max_pitch_amp']\n vowel_duration = shelf['vowel_duration']\n pitch_contour = shelf['pitch_contour']\n\n else:\n emotion_data = None\n microexpression_data = None\n blink_data = None\n mean_energy = None\n max_pitch_amp = None\n vowel_duration = None\n pitch_contour = None\n\n # Training Files (choose one)\n # soc_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_soc.txt')\n # niko_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_niko.txt')\n # vero_file = os.path.join(basedir, 'static', 'data', 'train_files', 'first_vero.txt')\n\n # txt_file = soc_file\n\n # train_data = []\n\n # for cases where one parameter has more elements\n # for i in range(min(len(blink_data), len(microexpression_data), len(mean_energy))):\n # train_data.append(0)\n\n # train_file = open(txt_file)\n\n # for line in train_file:\n # index1 = int((int(line[4]) * 600) + ((int(line[5]) * 60) + (int(line[7]) * 10) + int(line[8])) / 2)\n # index2 = int((int(line[10]) * 600) + ((int(line[11]) * 60) + (int(line[13]) * 10) + int(line[14])) / 2)\n # if line[0] == 'F':\n # train_data[index1] = 1\n # train_data[index2] = 1\n\n # with open(os.path.join(csv_path, 'train.csv'), 'w', newline='') as csv_file:\n # writer = csv.writer(csv_file)\n # writer.writerow(['Time Interval', 'Micro-expressions', 'Blinks',\n # 'Mean Energy', 'Max Pitch Amplitude', 'Vowel Duration', 'Fundamental Frequency',\n # 'False/True'])\n\n # # for cases where one parameter has more elements than another\n # for index in range(min(len(mean_energy), len(blink_data), len(microexpression_data))):\n # writer.writerow([index, microexpression_data[index], blink_data[index],\n # mean_energy[index], max_pitch_amp[index], vowel_duration[index], pitch_contour[index],\n # train_data[index]])\n\n # finalresults = [['Time Interval', 'Micro-expressions', 'Blinks',\n # 'Mean Energy', 'Max Pitch Amplitude', 'Vowel Duration', 'Fundamental Frequency' ]]\n final_results = []\n\n for index in range((min(len(mean_energy), len(blink_data), len(microexpression_data)))):\n final_results.append([microexpression_data[index], blink_data[index],\n mean_energy[index], max_pitch_amp[index], vowel_duration[index],\n pitch_contour[index]])\n\n prediction[0] = predict(final_results)\n\n return render_template('analysis.html', mean_energy=mean_energy, max_pitch_amp=max_pitch_amp,\n vowel_duration=vowel_duration, pitch_contour=pitch_contour, blink_data=blink_data,\n microexpression_data=microexpression_data, emotion_data=emotion_data)", "title": "" }, { "docid": "dc92410a0520036c9ea03316608d2714", "score": "0.4759545", "text": "def sample_analyze_entities(text_content, fb2wiki):\n\n #client = language_v1.LanguageServiceClient()\n\n # text_content = 'California is a state.'\n\n # Available types: PLAIN_TEXT, HTML\n type_ = enums.Document.Type.PLAIN_TEXT\n\n # Optional. If not specified, the language is automatically detected.\n # For list of supported languages:\n # https://cloud.google.com/natural-language/docs/languages\n language = \"en\"\n document = {\"content\": text_content, \"type\": type_, \"language\": language}\n entities = []\n entity_map = {}\n\n # Available values: NONE, UTF8, UTF16, UTF32\n encoding_type = enums.EncodingType.UTF8\n\n response = client.analyze_entities(document, encoding_type=encoding_type)\n #print(text_content)\n # Loop through entitites returned from the API\n for entity in response.entities:\n #print(u\"Representative name for the entity: {}\".format(entity.name))\n # Get entity type, e.g. PERSON, LOCATION, ADDRESS, NUMBER, et al\n # print(u\"Entity type: {}\".format(enums.Entity.Type(entity.type).name))\n # Get the salience score associated with the entity in the [0, 1.0] range\n # print(u\"Salience score: {}\".format(entity.salience))\n # Loop over the metadata associated with entity. For many known entities,\n # the metadata is a Wikipedia URL (wikipedia_url) and Knowledge Graph MID (mid).\n # Some entity types may have additional metadata, e.g. ADDRESS entities\n # may have metadata for the address street_name, postal_code, et al.\n if entity.metadata['mid'] != '':\n mid = entity.metadata['mid'].strip(\"/\").replace('/','.')\n if mid in fb2wiki:\n wikiids = fb2wiki[mid]\n entities.extend(wikiids)\n #print(mid, wikiids)\n # for metadata_name, metadata_value in entity.metadata.items():\n # print(u\"{}: {}\".format(metadata_name, metadata_value))\n\n # Loop over the mentions of this entity in the input document.\n # The API currently supports proper noun mentions.\n for mention in entity.mentions:\n # print(u\"Mention text: {}\".format(mention.text.content))\n # # Get the mention type, e.g. PROPER for proper noun\n # print(\n # u\"Mention type: {}\".format(enums.EntityMention.Type(mention.type).name)\n # )\n #print(mention.text)\n mention_text = mention.text.content\n begin = mention.text.begin_offset\n end = mention.text.begin_offset + len(mention_text)\n #print(mention_text, begin, end)\n if begin in entity_map:\n entity_map[begin].extend([(end, wid) for wid in wikiids])\n else:\n entity_map[begin] = [(end, wid) for wid in wikiids]\n\n\n # Get the language of the text, which will be the same as\n # the language specified in the request or, if not specified,\n # the automatically-detected language.\n # print(u\"Language of the text: {}\".format(response.language))\n return entities, entity_map", "title": "" }, { "docid": "a35cdeb822e74d2e45ff1f5ee19711e8", "score": "0.47527993", "text": "def _publish_results(self):\n\n doc = Document()\n date = get_stamp()\n\n labels = ExperimentTemplateBase.parameters_to_string(self._topology_parameters_list)\n\n title = 'Mutual Information labels vs ' + self._experiment_name\n self.plot_save(title,\n self._mutual_info,\n self._baseline_mutual_info,\n 'Norm. mutual information',\n labels, date, self._docs_folder, doc)\n\n title = 'Weak classifier accuracy labels vs ' + self._experiment_name\n self.plot_save(title,\n self._classifier_accuracy,\n self._baseline_classifier_accuracy,\n 'Classifier accuracy',\n labels, date, self._docs_folder, doc) #, smoothing_size=3)\n\n title = 'average delta'\n f = plot_multiple_runs(\n self._different_steps[0], # here the X axes are identical\n self._average_delta,\n title=title,\n ylabel='log(delta)',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n title = 'average boosting duration'\n f = plot_multiple_runs(\n self._different_steps[0],\n self._average_boosting_dur,\n title=title,\n ylabel='duration',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n doc.write_file(path.join(self._docs_folder, to_safe_name(self._complete_name() + date + \".html\")))\n\n print('done')", "title": "" }, { "docid": "c1e768c23ac138268528027f134242d6", "score": "0.47511122", "text": "def do_analysis(ckpt, queries_type, entities_type, request):\n global currently_analyzing, results, d, analysis_user\n try:\n print(\"starting analysis!\")\n if entities_type == \"all\":\n print(\"using all entities detected!\")\n elif entities_type == \"uploaded\":\n print(\"using only entities specified in csv file!\")\n \n currently_analyzing = True\n analysis_user = request.user.username\n results = []\n proj_path = os.path.abspath(os.path.dirname(__file__)).split(\"FYP_Web_App\")[0]\n ckpt = proj_path + \"FewRel/checkpoint/\" + ckpt\n if d is None or d.ckpt_path != ckpt:\n d = DetectionFramework(ckpt_path=ckpt)\n if cancel_flag[0]:\n return\n d.clear_support_queries()\n if len([i for i in os.listdir(\"temp/relation_support_datasets\") if 'csv' in i and request.user.username in i]) == 0:\n raise ValueError(\"Please upload relation support dataset!\")\n \n d.load_support_files(\"temp/relation_support_datasets\", request.user.username)\n if queries_type == \"csv_option\":\n if not os.path.exists(\"temp/queries.csv\"):\n raise ValueError(\"Please upload query CSV dataset!\")\n d.load_queries_csv(\"temp/queries.csv\")\n \n elif queries_type == \"url_option\":\n if not os.path.exists(\"temp/url.txt\"):\n raise ValueError(\"Please specify news article url!\")\n with open(\"temp/url.txt\") as f:\n url = f.read()\n d.load_url(url)\n \n elif queries_type == \"txt_option\":\n d.load_text_files(os.path.abspath(\"temp/text_files\"))\n \n elif queries_type == \"ind_sentence_option\":\n ind_sentence = request.POST.get('ind_sent')\n d.load_ind_sentence(ind_sentence)\n \n elif queries_type == \"html_option\":\n d.load_html_file_queries(os.path.abspath(\"temp/html_files\"))\n \n if entities_type == \"uploaded\":\n d.trim_queries_based_on_entities_file(os.path.abspath(\"temp/entities_csv_file.csv\"))\n\n if cancel_flag[0]:\n return\n d.detect(rt_results=results, cancel_flag=cancel_flag)\n if cancel_flag[0]:\n return\n src=None\n if queries_type == \"csv_option\":\n src = \"queries_csv\"\n elif queries_type == \"txt_option\":\n src = \"queries_text_file\"\n elif queries_type == \"ind_sentence_option\":\n src = \"ind_sentence\"\n elif queries_type == \"url_option\":\n with open(\"temp/url.txt\") as f:\n src = f.read()\n elif queries_type == \"html_option\":\n src = \"html_files\"\n \n s = Source(source=src, user=request.user)\n s.save()\n for r in results:\n er = ExtractedRelation(sentence=r['sentence'],head=r['head'],tail=r['tail'],pred_relation=r['pred_relation'],sentiment=r['sent'],conf=r['conf'],ckpt=ckpt, source=s)\n er.save()\n except Exception as e:\n print(len(str(e)))\n print(str(e))\n errors.append(str(e))\n tb = traceback.format_exc()\n print(tb)\n finally:\n currently_analyzing = False\n analysis_user = None", "title": "" }, { "docid": "a5fb22dc5cfe9f14d6eb38fe3934c88a", "score": "0.47506815", "text": "def extract_information(preprocessed_sentences):\n parsed = list(map(lambda sentence: nlp(sentence), preprocessed_sentences))\n\n quantities = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'QUANTITY'), parsed))\n dates = list(filter(lambda sentence: eh.sentence_has_type(sentence, 'DATE'), parsed))\n\n hurricane_name = eh.extract_frequent_regex_match(parsed, '[Hh]urricane ([A-Z][a-z]+)').most_common(1)[0][0]\n hurricane_category = eh.extract_frequent_regex_match(parsed, '[Cc]ategory ([0-9]+)').most_common(1)[0][0]\n\n tropical_storm_name = eh.extract_frequent_regex_match(parsed, '[Tt]ropical [Ss]torm ([A-Z][a-z]+)').most_common(1)[0][0]\n formation_date, middle_month = extract_storm_timeline(dates, hurricane_name)\n\n preperation_info = extract_preparation_information(parsed)\n prep_gpes = preperation_info[0].most_common(3)\n\n restore_info = extract_restoration_information(parsed)\n\n landfall_info = extract_landfall_information(parsed)\n\n wind_info = extract_wind_information(quantities)\n rain_info = extract_rain_information(quantities)\n size_info = extract_size_information(parsed)\n\n # formation_info = extract_formation_info(parsed)\n death_info = extract_death_damages_info(parsed)\n\n print(constants.HURRICANE_SENTENCE.format(hurricane_name, middle_month, hurricane_category))\n print(constants.LANDFALL_SENTENCE.format(hurricane_name, landfall_info[2], landfall_info[3], landfall_info[0], landfall_info[1]))\n print(constants.WIND_SENTENCE.format(wind_info[0], wind_info[1], wind_info[2]))\n print(constants.RAIN_SENTENCE.format(hurricane_name, rain_info[1], rain_info[0], rain_info[2]))\n print(constants.FORMATION_SENTENCE.format(formation_date, tropical_storm_name))\n print(constants.PREPARATION_SENTENCE.format(prep_gpes[0][0], prep_gpes[1][0], prep_gpes[2][0], preperation_info[1].\n most_common(1)[0][0]))\n print(constants.SIZE_SENTENCE.format(size_info[0], size_info[1]))", "title": "" }, { "docid": "8b0966fa977dfe08eafd5007f9d197d8", "score": "0.4750112", "text": "def inference(self):\n embeddings = self.process_speaker(speaker_speech_path=self.main_configs.SPEAKER_SPEECH_PATH)\n with open(self.main_configs.INPUT_TEXTS_PATH, \"r\") as file:\n texts = file.readlines()\n specs = self.synthesize_spectrograms(texts=texts, embeddings=embeddings)\n specs = specs[0]\n wav = self.generate_waveform(specs)\n return wav", "title": "" }, { "docid": "1109a8c9c043b777d8ffe3c7da5c6437", "score": "0.47493204", "text": "def main(self, words_docs, cleaned_sentences, lang, model_dir, number_of_clusters, embedding_model, model_id):\n\t\ttry:\n\t\t\tif embedding_model == \"tfidf\": text_vector = self.create_tfidf_vectors(cleaned_sentences)\n\t\t\telif embedding_model == \"word2vec\": text_vector = self.create_w2v_vectors(words_docs)\n\t\t\tmodel, pred_dict = self.train_model(cleaned_sentences, text_vector, number_of_clusters, lang, model_id, model_dir)\n\t\t\tdf_dominant_topic = self.evaulate_clusters(pred_dict, model_dir)\n\n\t\texcept Exception as e:\n\t\t\tprint(\"\\n Error in main : \",e)\n\t\t\tprint(\"\\n Error details : \", traceback.format_exc())\n\n\t\treturn df_dominant_topic", "title": "" }, { "docid": "350a25bc36f4975e5f26fd090758162e", "score": "0.4746469", "text": "def feature_extraction(args, what_roi):\n print(\"Start feature extraction...\")\n descr_dict = {}\n file_list = []\n for root, dirs, files in os.walk(args.imdir):\n for n, filename in enumerate(files):\n if n == 0:\n print(\"\\nCurrent directory: %s\" % root)\n filepath = os.path.join(root, filename)\n img = cv2.imread(filepath, cv2.IMREAD_COLOR)\n\n if type(img) != np.ndarray:\n sys.stderr.write(\"--> %d File %s is not an img: will be skipped.\\n\" % (n, filename))\n continue\n else:\n print(\"%d Image %s is being processed...\" % (n, filename))\n file_list.append(filename)\n\n img = apply_roi(args, what_roi, img)\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n ret, thresh = cv2.threshold(img_gray, 170, 255, cv2.THRESH_TRUNC)\n des = surf_keypoint_detection(thresh)\n descr_dict[filename] = des\n\n print(\"\\nAll images have been processed.\")\n return descr_dict, file_list", "title": "" }, { "docid": "83f737bac1e245de9cc9cbb67328c603", "score": "0.47404498", "text": "def documents(self):\n from kitsune.wiki.models import Document\n\n return Document.objects.filter(documentimage__image=self)", "title": "" }, { "docid": "36c67e8973d22370cdf488112cda2c18", "score": "0.47379708", "text": "def main():\n logging.basicConfig(level=logging.WARN)\n\n text = extract()\n text, char_indices, indices_char, x, y = transform(text)\n model(text, char_indices, indices_char, x, y)\n\n pass", "title": "" }, { "docid": "3405afec9e0f9e933b1ab9fd89407f6e", "score": "0.473772", "text": "def run_extraction(self):\n self.background_estimator = ReflectedRegionsBackgroundEstimator(\n observations=self.observations, **self.config[\"background\"]\n )\n self.background_estimator.run()\n\n self.extraction = SpectrumExtraction(\n observations=self.observations,\n bkg_estimate=self.background_estimator.result,\n **self.config[\"extraction\"]\n )\n\n self.extraction.run()", "title": "" }, { "docid": "87bc13b67079a664c76ca1d541f857c2", "score": "0.47326425", "text": "def getExcerpts(self, text, DICECodeResults):\n\t\t\"\"\" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \"\"\"\n\t\t\n\t\tdef getKernels(indices):\n\t\t\t\"\"\"\n\t\t\t\tgetKernels() is a sub-method that extracts strings from a doc-\n\t\t\t\tument using indices provided by the DICECodeResults data struc-\n\t\t\t\tture passed into this sub-method's parent method, getExcerpts().\n\t\t\t\tThis sub-method returns three strings.\n\n\t\t\t\tindices --> tuple containing indices in the document with text to extract.\n\t\t\t\"\"\"\n\n\t\t\ti = indices[0]\n\t\t\tj = indices[1]\n\n\t\t\th = i - self.scope\n\t\t\tk = j + self.scope\n\n\t\t\tif h < 0: h = 0\n\t\t\tif k > len(text): k = len(text)-1\n\n\t\t\treturn text[h:i].replace(\"\\n\", \"__\").replace(\"\\t\", \" \"), text[i:j].replace(\"\\n\", \"__\").replace(\"\\t\", \" \"), text[j:k].replace(\"\\n\", \"__\").replace(\"\\t\", \" \")\n\t\t\t#return \"|\"+text[h:i].replace(\"\\n\", \"__\").replace(\"\\t\", \" \")+\"|\", text[i:j].replace(\"\\n\", \"__\").replace(\"\\t\", \" \"), \"|\"+text[j:k].replace(\"\\n\", \"__\").replace(\"\\t\", \" \")+\"|\"\n\n\t\tdef getComboTerms(tuples):\n\t\t\t\"\"\"\n\t\t\t\tgetComboTerms() is a sub-method that combines search terms and \n\t\t\t\ttheir indices provided in the tuple parameter into a string with\n\t\t\t\tthe following structure: [(variant, index)]. This sub-method re-\n\t\t\t\tturns a string of that structure.\n\n\t\t\t\ttuples --> data structure containing the search term and index of the search term in the form of: (term, index)\n\t\t\t\"\"\"\t\t\t\n\t\t\t#return \"[{0}]\".format('; '.join([\"({0})\".format(','.join([text[indices[0]:indices[1]], str(indices[0])])) for indices in tuples]))\n\t\t\treturn \"{0}\".format('; '.join((\"{0}\".format(text[indices[0]:indices[1]]) for indices in tuples)))\n\n\t\tdef getProximity(tuples):\n\t\t\t\"\"\"\n\t\t\t\tgetProximity() is a sub-method that calculates the distance of the search terms provided in the tuple parameter. \n\t\t\t\tThis sub-method returns an absolute value integer.\n\n\t\t\t\ttuples:\tdata structure containing the search term and index of the search term in the form of: (term, index)\n\t\t\t\"\"\"\n\t\t\tsortedIndices = [indices for indices in tuples]\n\t\t\t#return abs(sortedIndices[0][1] - sortedIndices[-1][0])\n\t\t\treturn sortedIndices[-1][0] - sortedIndices[0][1] \n\n\t\t\"\"\" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \"\"\"\n\n\t\texcerptsResults = list()\t\t# NEW list to contain the expanded data structure provided by the DICECodeResults parameter\n\n\t\t# CALL THESE JUST ONCE BEFORE LOOP(S)\n\t\tappend = excerptsResults.append\n\t\tformat = str.format\n\t\t# - - - - - - - - - - - - - - - - - -\n\n\t\tfor row in DICECodeResults:\n\n\t\t\tDICECode \t\t= row[0]\t# (1) DICE code as specified in C:\\Users\\a5rjqzz\\Desktop\\Python\\files\\Types.gd\n\t\t\tTYPECode \t\t= row[1]\t# (2) Type code as specified in C:\\Users\\a5rjqzz\\Desktop\\Python\\files\\Types.gd\n\t\t\tCombo \t\t= False\t\t# (3) Boolean status of the presence of a combo term\n\t\t\tdocumentIndex \t= 0\t\t\t# (4) Index of this search term in the document\n\t\t\tindices \t\t= row[2]\t# (5) Indices of the search term and combo term if present\n\t\t\tproximity\t\t= 0\t\t\t# (6) Distance between search term and combo terms\n\n\t\t\tif type(row[2][0]) == type(tuple()):\n\t\t\t\tCombo = True\t# If the type of search term is a combo, this is true\n\n\t\t\t\tfor tuples in row[2]:\n\t\t\t\t\tindices \t\t\t\t\t\t= tuples[0]\t\t\t\t# (1) Location(s) of the search term in the tuple\n\t\t\t\t\tdocumentIndex \t\t\t\t\t= indices[0]\t\t\t# (2) Location of the search term in the document\n\t\t\t\t\tcomboTerms \t\t\t\t\t\t= getComboTerms(tuples)\t# (3) Multiple terms assigned to variable comboTerms\n\t\t\t\t\tproximity \t\t\t\t\t\t= getProximity(tuples)\t# (4) Proximity of combo terms if present\n\t\t\t\t\tkernelLeft, kernel, kernelRight = getKernels(indices)\t# (5) Left, center, and right kernels or excerpts\n\n\t\t\t\t\tappend([DICECode, TYPECode, Combo, documentIndex, kernelLeft, kernel, kernelRight, comboTerms, proximity])\n\n\t\t\telse:\n\t\t\t\tdocumentIndex \t\t\t\t\t= indices[0]\t\t\t\t\t\t\t\t\t# (1) Location of the search term in the document\n\t\t\t\tcomboTerms \t\t\t\t\t\t= format(\"[{0}]\", text[indices[0]:indices[1]])\t# (2) Single term assigned to variable comboTerms\n\t\t\t\tkernelLeft, kernel, kernelRight = getKernels(indices)\t\t\t\t\t\t\t# (3) Left, center, and right kernels or excerpts\n\n\t\t\t\tappend([DICECode, TYPECode, Combo, documentIndex, kernelLeft, kernel, kernelRight, comboTerms, proximity])\n\n\t\treturn excerptsResults", "title": "" }, { "docid": "44f3996b20f8d1879a7cc17af9b1c6be", "score": "0.47272265", "text": "def _compute_expert_weights(self):\n # Shape = (num_nonzero, 1, num_binary).\n smooth_step_activations = self._smooth_step(self._z_logits)\n # Shape = (num_nonzero, num_experts).\n selector_outputs = tf.math.reduce_prod(\n tf.where(self._binary_codes, smooth_step_activations,\n 1 - smooth_step_activations),\n axis=2)\n # Weights for the single-expert selectors: shape = (num_nonzero, 1).\n selector_weights = tf.nn.softmax(self._w_logits, axis=0)\n expert_weights = tf.math.reduce_sum(\n selector_weights * selector_outputs, axis=0)\n\n return expert_weights, selector_outputs", "title": "" }, { "docid": "6db70f1c5cbc29963fed521d462eda73", "score": "0.47268128", "text": "def display_wmd(google_snippets, my_snippets):\r\n for i in range(len(google_snippets)):\r\n print(i+1, '.', urls[i])\r\n print('Google Snippet:', google_snippets[i])\r\n print('Generated Snippet:', my_snippets[i])\r\n print(\"Word Mover's Distance:\", get_wmd_dist(google_snippets[i], my_snippets[i], model))\r\n print()", "title": "" }, { "docid": "a07eef829c91f5c52fff772340bd9a01", "score": "0.4724361", "text": "def __init__(self):\n # Initialise class attributes (visibility ease)\n self.__corpus__ = None\n self.__pron_det_pos_words__ = None\n self.__triples_corpus__ = None\n self.__entities_in_doc__ = None\n self.__wvmodel__ = None\n \n # For purpose of parsing relation triplets later\n # Load pretrained embedding model\n #plog('Loading pretrained word embeddings. This will take some time to load...')\n #self.__wvmodel__ = api.load('fasttext-wiki-news-subwords-300')\n #plog('Pretrained word embeddings loaded!')", "title": "" }, { "docid": "700e62bf908212d54d738f43d57281b8", "score": "0.47217306", "text": "def extract(self, documents):\n\n # Placeholder for return value.\n features = None\n\n # Create a numpy array of all zeros for storing frequencies.\n tf = np.zeros((len(documents), len(self.idx_to_word)))\n\n # Raise an exception if 'extract' is called before 'preprocess'\n if len(self.word_to_idx) == 0 or len(self.idx_to_word) == 0:\n raise Exception(\"Extractor not initialised.\")\n\n # For each document\n for idx, doc in enumerate(documents):\n # Split strig into a list of words\n words = extract_words(doc)\n\n # Calculate it's frequency, however, keep in mind\n # that this word may not have been in the training\n # corpus. In that case, ignore the word.\n for w in words:\n ''' YOUR CODE HERE '''\n try:\n tf[idx][self.word_to_idx[w]] = words.count(w)\n except KeyError:\n pass\n\n ''' END CODE FOR THIS LOOP '''\n\n # Divide the frequencies by the number of words in document.\n ''' YOUR CODE HERE '''\n tf[idx] = tf[idx]/len(words)\n ''' END CODE FOR THIS LOOP '''\n\n # Calculate the Tf-Idf features.\n features = tf * self.idf\n\n return features", "title": "" }, { "docid": "f70ec6a6ce913ebf5cf9f65cfa079af5", "score": "0.47216392", "text": "def ingest_annotations(directory, annotation_type):\n in_golden = annotation_type == \"manual\"\n doc_type = None\n issue_number = None\n bid = None\n annotated_pages = []\n file_prefix = \"page\"\n try:\n bid = directory.split('/')[len(directory.split('/'))-1:][0]\n record = Metadata.objects(bid=bid).first()\n assert record is not None\n except Exception as e:\n bid,issue_number = directory.split('/')[len(directory.split('/'))-2:]\n record = Metadata.objects(bid=bid).first()\n try:\n doc_type = record[\"type_document\"]\n except Exception as e:\n doc_type = None\n logger.warning('The record for %s is not in MongoDB'%bid)\n try:\n page_numbers = {int(os.path.basename(fname).replace(\"page-\",\"\").split('.')[0]):os.path.basename(fname).split('.')[0] \n for fname in glob.glob(\"%s/*.ann\"%directory)}\n except Exception as e:\n page_numbers = {int(os.path.basename(fname).replace(\"image-\",\"\").split('.')[0]):os.path.basename(fname).split('.')[0] \n for fname in glob.glob(\"%s/*.ann\"%directory)}\n file_prefix = \"image\"\n # TODO: handle the exception of document not in the DB\n logger.info(\"Ingesting the annotations from directory \\\"%s\\\"\"%directory)\n if(issue_number != None):\n logger.info(\"Found document %s-%s [type=%s]\"%(bid,issue_number,doc_type))\n else:\n logger.info(\"Found document %s [type=%s]\"%(bid,doc_type))\n try:\n if(doc_type==\"journal\"):\n doc = LBDocument.objects(internal_id=\"%s-%s\"%(bid,issue_number)).first()\n if doc is None:\n doc = LBDocument.objects(internal_id=\"%s-%s\"%(bid, convert_issue_number(issue_number))).first()\n elif(doc_type==\"monograph\"):\n doc = LBDocument.objects(bid=bid).first()\n logger.info(\"%s has %i pages\"%(doc.internal_id, len(doc.pages)))\n for page_n in sorted(page_numbers):\n logger.debug(\"Reading in annotations for page %i from file %s/ %s\"%(page_n,directory,page_numbers[page_n]))\n entities_with_continuations = {}\n entities,relations = read_ann_file(page_numbers[page_n],directory)\n fulltext = codecs.open(\"%s/%s.txt\"%(directory,page_number2image_name(page_n, string=file_prefix)),'r', 'utf-8').read()\n line_breaks = find_newlines(fulltext)\n #logger.info(\"Found %i entities, %i relation in %s\"%(len(entities), len(relations), directory))\n doc_id = \"%s-%s-%s\"%(bid, issue_number, page_numbers[page_n])\n try:\n page = next((page for page in doc.pages if page.single_page_file_number==page_n))\n if(page[\"in_golden\"]==True):\n annotated_pages.append(page.id)\n logger.info(\"found %i entities in %s (p. %i)\"%(len(entities),doc_id,page_n))\n logger.info(\"found %i relations in %s (p. %i)\"%(len(relations.keys()),doc_id,page_n))\n \"\"\"\n Parse the `ContainedIN` relations and identify annotations that should be merged together.\n IDs of candidates for merging are stored in a dict(), e.g. {\"T1\":[\"T2\",\"T4\"]}\n \"\"\"\n entities_with_continuations = {}\n if len(relations.keys())>0:\n for relation_key in relations:\n args = relations[relation_key][\"arguments\"]\n if args[0] in entities_with_continuations:\n entities_with_continuations[args[0]].append(args[1])\n else:\n entities_with_continuations[args[0]] = [args[1]]\n logger.debug(\"(%s-%s) entities to be merged: %s\"%(doc_id,page_n,entities_with_continuations))\n \"\"\"\n Create the annotations (`entities` dict). \n Later they will be stored into the MongoDB\n \"\"\"\n for entity in entities:\n entities[entity][\"ingestion_timestamp\"] = datetime.utcnow()\n entities[entity][\"annotation_ingester_version\"] = __version__\n entities[entity][\"entity_type\"] = entities[entity][\"entity_type\"].lower( )\n entities[entity][\"filename\"] = \"%s/%s%s\"%(directory,page_numbers[page_n],\".ann\")\n if(doc_type==\"journal\"):\n entities[entity][\"bid\"] = bid\n entities[entity][\"pageid\"] = doc_id\n elif(doc_type==\"monograph\"):\n entities[entity][\"bid\"] = bid\n entities[entity][\"pageid\"] = \"%s-%s\"%(bid,page_numbers[page_n])\n entities[entity][\"container\"] = entities[entity][\"entity_type\"] in container_annotations\n # ref to page_id (from content_loader) ✓\n for position in entities[entity][\"positions\"]:\n line_number = find_linenumber_for_string(position[\"start\"],position[\"end\"], line_breaks)\n logger.debug(\"%s is found at line %s\"%(entity,line_number))\n position[\"line_n\"] = line_number\n position[\"page_id\"] = page.id\n positions_by_offset = sorted(entities[entity][\"positions\"]\n ,key=lambda position: position['start'])\n entities[entity][\"positions\"] = sorted(positions_by_offset\n , key=lambda position: Page.objects(id=position['page_id']).first().single_page_file_number)\n logger.debug(\"Annotations %s %s\"%(entity,entities[entity]))\n \"\"\"\n Now take the candidates for merging identified above and populate the annotations.\n Still nothing is saved into MongoDB at this stage.\n \"\"\"\n for ann_id in entities_with_continuations:\n try:\n logger.debug(\"Starting to merge SP and SF entities into meta-annotations (%s-%s)\"%(doc_id, page_n))\n logger.debug(\"%s will be merged with %s\"%(ann_id,\"+\".join(entities_with_continuations[ann_id])))\n top_entity_types = \"_\".join([entities[ann_id][\"entity_type\"]]+[entities[annid][\"entity_type\"] \n for annid in entities_with_continuations[ann_id]])\n logger.debug(\"%s\"%top_entity_types)\n new_entity = copy.deepcopy(entities)[ann_id] \n #container = True \n new_entity[\"ann_id\"] = \"%s+%s\"%(ann_id,\"+\".join(entities_with_continuations[ann_id]))\n new_entity[\"entity_type\"] = \"meta-annotation\"\n new_entity[\"top_entity_types\"] = top_entity_types\n new_entity[\"top_entities_ids\"] = [ann_id]\n new_entity[\"top_entities_ids\"] += [id for id in entities_with_continuations[ann_id]]\n fname = new_entity[\"filename\"]\n new_entity[\"filename\"] = [fname]\n for to_merge_id in entities_with_continuations[ann_id]:\n to_merge = dict(entities)[to_merge_id]\n new_entity[\"filename\"]+= [to_merge[\"filename\"]]\n new_entity[\"positions\"] = new_entity[\"positions\"] + to_merge[\"positions\"]\n positions_by_offset = sorted(new_entity[\"positions\"]\n ,key=lambda position: position['start'])\n new_entity[\"positions\"] = sorted(positions_by_offset\n ,key=lambda position: Page.objects(id=position['page_id']).first().single_page_file_number)\n new_entity[\"filename\"] = \", \".join(list(set(new_entity[\"filename\"])))\n surface_start = new_entity[\"positions\"][0][\"start\"]\n surface_end = new_entity[\"positions\"][-1][\"end\"]\n new_entity[\"surface\"] = fulltext[surface_start:surface_end]\n entities[new_entity[\"ann_id\"]] = new_entity\n logger.debug(new_entity)\n except Exception as e:\n logger.error(\"The merging of %s in (%s-%s) failed with error\\\"%s\\\"\"%(new_entity[\"ann_id\"],bid,page_n,e))\n \"\"\"\n Now all annotations will be stored into the MongoDB. \n And some specific fields (e.g. `top_entities`) are sorted, and annotations updated \n accordingly in the DB. \n \"\"\"\n try:\n annotations = []\n for entity in entities.values():\n annotation = Annotation(**entity)\n annotation.positions = [PagePosition(**position) for position in entity[\"positions\"]]\n annotation.save()\n annotations.append(annotation)\n page.annotations_ids = [] #TODO\n page.annotations_ids = annotations\n page.is_annotated = True\n page.save()\n logger.debug(\"Following annotations were inserted into MongoDB: %s\"%([annotation.id for annotation in annotations]))\n logger.info(\"%i annotations were inserted into MongoDB\"%len(annotations))\n except Exception as e:\n raise e\n containers = [annotation for annotation in annotations if annotation[\"container\"]] \n contained = [annotation for annotation in annotations if not annotation[\"container\"]]\n meta_annotations = [annotation for annotation in annotations if annotation[\"entity_type\"]==\"meta-annotation\"]\n logger.debug(\"meta annotations: %s\"%meta_annotations)\n \"\"\"\n Resolve the top entities in the meta-annotations: replace entity IDs with \n a reference to the annotation in the MongoDB.\n \"\"\"\n for annotation in meta_annotations:\n top_entities_ids = annotation[\"top_entities_ids\"]\n logger.debug('resolving top_entities')\n top_entities = [Annotation.objects(ann_id=ann_id, pageid=annotation.pageid).first() for ann_id in top_entities_ids]\n #top_entities = list([db_conn.annotations.find_one({\"ann_id\":ann_id,\"pageid\":annotation[\"pageid\"]}) for ann_id in top_entities_ids])\n logger.debug(\"Top entities before sorting %s\"%[ann.id for ann in top_entities])\n annotation[\"top_entities\"] = sort_annotations_by_offset(top_entities)\n logger.debug(\"Top entities after sorting %s\"%[ann.id for ann in top_entities])\n annotation[\"top_entities\"] = top_entities\n annotation.save()\n logger.debug(\"Updating meta-annotation: %s\"%annotation.id)\n \"\"\"\n Transform contains relations between entities into references between annotations \n in the MongoDB.\n \"\"\"\n for annotation in sort_annotations_by_offset(containers):\n if(len(annotation[\"positions\"]) > 1):\n start = annotation[\"positions\"][0][\"start\"]\n end = annotation[\"positions\"][len(annotation[\"positions\"])-1][\"end\"]\n else:\n start = annotation[\"positions\"][0][\"start\"]\n end = annotation[\"positions\"][0][\"end\"]\n annotation[\"contains\"] = []\n for contained_annotation in sort_annotations_by_offset(contained):\n if(len(contained_annotation[\"positions\"])>1):\n if(contained_annotation[\"positions\"][0][\"start\"] >= start\n and contained_annotation[\"positions\"][len(contained_annotation[\"positions\"])-1][\"end\"] <= end):\n annotation[\"contains\"].append(contained_annotation)\n logger.debug(\"[%s] Annotation %s (%s) contains %s (%s)\"%(\n doc_id\n ,annotation[\"ann_id\"]\n ,annotation[\"id\"]\n ,contained_annotation[\"ann_id\"]\n ,contained_annotation[\"id\"]))\n annotation.save()\n else:\n if(contained_annotation[\"positions\"][0][\"start\"] >= start\n and contained_annotation[\"positions\"][0][\"end\"] <= end):\n annotation[\"contains\"].append(contained_annotation)\n logger.debug(\"[%s] Annotation %s (%s) contains %s (%s)\"%(\n doc_id\n ,annotation[\"ann_id\"]\n ,annotation[\"id\"]\n ,contained_annotation[\"ann_id\"]\n ,contained_annotation[\"id\"]))\n annotation.save()\n else:\n page.is_annotated = False\n logger.info(\"%s was ignored because it's not in the golden set\"%doc_id)\n except StopIteration as e:\n logger.error(\"The annotations for %s-%s p. %i can't be ingested\"%(bid, issue_number, page_n))\n except Exception as e:\n logger.error(\"The annotations for %s-%s can't be ingested. Got error %s\"%(bid, issue_number, e))\n return annotated_pages", "title": "" }, { "docid": "b1c3d4bd616d2f302eb79715a017de11", "score": "0.4721362", "text": "def main():\n\n\tst.title(\"Sentiment Analysis Emoji App\")\n\n\tactivities = [\"Sentiment\",\"Text Analysis on URL\",\"About\"]\n\tchoice = st.sidebar.selectbox(\"Choice\",activities)\n\n\tif choice == 'Sentiment':\n\t\tst.subheader(\"Sentiment Analysis\")\n\t\tst.write(emoji.emojize('Everyone :red_heart: Streamlit ',use_aliases=True))\n\t\traw_text = st.text_area(\"Enter Your Text\",\"Type Here\")\n\t\tif st.button(\"Analyze\"):\n\t\t\tblob = TextBlob(raw_text)\n\t\t\tresult = blob.sentiment.polarity\n\t\t\tif result > 0.0:\n\t\t\t\tcustom_emoji = ':smile:'\n\t\t\t\tst.write(emoji.emojize(custom_emoji,use_aliases=True))\n\t\t\telif result < 0.0:\n\t\t\t\tcustom_emoji = ':disappointed:'\n\t\t\t\tst.write(emoji.emojize(custom_emoji,use_aliases=True))\n\t\t\telse:\n\t\t\t\tst.write(emoji.emojize(':expressionless:',use_aliases=True))\n\t\t\tst.info(\"Polarity Score is:: {}\".format(result))\n\t\t\t\n\tif choice == 'Text Analysis on URL':\n\t\tst.subheader(\"Analysis on Text From URL\")\n\t\traw_url = st.text_input(\"Enter URL Here\",\"Type here\")\n\t\ttext_preview_length = st.slider(\"Length to Preview\",50,100)\n\t\tif st.button(\"Analyze\"):\n\t\t\tif raw_url != \"Type here\":\n\t\t\t\tresult = get_text(raw_url)\n\t\t\t\tblob = TextBlob(result)\n\t\t\t\tlen_of_full_text = len(result)\n\t\t\t\tlen_of_short_text = round(len(result)/text_preview_length)\n\t\t\t\tst.success(\"Length of Full Text::{}\".format(len_of_full_text))\n\t\t\t\tst.success(\"Length of Short Text::{}\".format(len_of_short_text))\n\t\t\t\tst.info(result[:len_of_short_text])\n\t\t\t\tc_sentences = [ sent for sent in blob.sentences ]\n\t\t\t\tc_sentiment = [sent.sentiment.polarity for sent in blob.sentences]\n\t\t\t\t\n\t\t\t\tnew_df = pd.DataFrame(zip(c_sentences,c_sentiment),columns=['Sentence','Sentiment'])\n\t\t\t\tst.dataframe(new_df)\n\n\tif choice == 'About':\n\t\tst.subheader(\"About:Sentiment Analysis Emoji App\")\n\t\tst.info(\"Built with Streamlit,Textblob and Emoji\")\n\t\tst.text(\"Jesse E.Agbe(JCharis\")\n\t\tst.text(\"Jesus Saves@JCharisTech\")", "title": "" }, { "docid": "c7262f473392b5b603fc98c227ff6c8d", "score": "0.47176358", "text": "def run(self):\n\n self.log.debug('Reading wordlist %s', self.wordlists)\n\n # Test\n data = self.symfony.profiler.open(self.symfony.root)\n if data is None:\n self.log.info('The target does not support file preview')\n return\n\n # Enqueue\n with open(self.wordlists) as file:\n i = 0\n for i, line in enumerate(file, 1):\n url = self.symfony.profiler.url('open')\n params = {'line': 1, 'file': line.strip()}\n self.engine.queue.put(Request(method='GET', url=url, params=params))\n self.log.debug('Enqueued %d entries', i)\n\n self.engine.join()\n found = [response for response in self.engine.results if response.status_code != 404]\n files = [resp.request.params['file'] for resp in found]\n\n # Composer lookup\n composer = [file for file in files if file.endswith('composer.lock')]\n if composer:\n self.log.info(\"Found: %s, run 'symfony security:check' or submit it at %s\", composer[0], self.security_url)\n\n if not found:\n self.log.warning('Did not find any file')\n return\n\n # Save results\n for response in found:\n data = self.symfony.profiler.parse_file_preview(response.text)\n self.symfony.files[response.request.params['file']] = data\n\n self.log.warning('Found the following files:')\n for file in files:\n self.log.warning(' %s', file)", "title": "" }, { "docid": "ca61f43be01711612262d6d843e3ae46", "score": "0.47137287", "text": "def biological_science_news():\n\n return general_scraper(['http://mesva.univaq.it/?q=avvisi/cl-clm/52672'])", "title": "" }, { "docid": "f5e51157244b5b9fee98575a0590882d", "score": "0.4712479", "text": "def write():\n #with st.spinner(\"Loading Dashboard ...\"):\n #ast.shared.components.title_awesome(\"\")\n\n st.title('arXiv - Analytics')\n st.text(\"\")\n if st.checkbox('Most similar words in w2v'):\n user_input = st.text_input(\"Topic (please enter up to two keywords)\", 'Machine Learning')\n user_input = user_input.lower().replace(\" \", \"_\")\n st.text(\"\")\n number_of_similar_words = st.slider('Select a modulus', 3, 50)\n plot_similar_words(model, user_input, number_of_similar_words)\n st.pyplot()\n st.text(\"\")\n if st.checkbox('Word Cloud'):\n cluster = st.slider('Select a cluster', 0, 49)\n word_cloud_kmeans(cluster)\n st.pyplot()", "title": "" }, { "docid": "efae1a4ea851905884fc047208f63737", "score": "0.47093627", "text": "def main():\n analyze_perturbations()", "title": "" }, { "docid": "d4f5b9e8f3d1e131796e7ead6df986b6", "score": "0.4708818", "text": "def ALLEN_st_cells_1_movies(self):\n exp_dict = self.template_dataset()\n exp_dict = self.add_globals(exp_dict)\n exp_dict['experiment_name'] = 'ALLEN_st_cells_1_movies'\n exp_dict['only_process_n'] = None # MICHELE\n exp_dict['randomize_selection'] = True\n exp_dict['reference_image_key'] = {'proc_stimuli': 'image'}\n exp_dict['reference_label_key'] = {'neural_trace_trimmed': 'label'}\n exp_dict['rf_query'] = [{\n 'rf_coordinate_range': { # Get all cells\n 'x_min': 20,\n 'x_max': 30,\n 'y_min': 50,\n 'y_max': 60,\n },\n 'cre_line': 'Cux2',\n 'structure': 'VISp'}]\n exp_dict['cross_ref'] = 'rf_coordinate_range_and_stimuli'\n exp_dict['store_means'] = [\n 'image',\n 'label'\n ]\n exp_dict['deconv_method'] = 'OASIS'\n exp_dict['cv_split'] = {\n 'cv_split_single_stim': {\n 'target': 0,\n 'split': 0.95\n }\n }\n # exp_dict['cv_split'] = {\n # 'split_on_stim': 'natural_movie_two' # Specify train set\n # }\n exp_dict['neural_delay'] = [8, 13] # MS delay * 30fps for neural data\n exp_dict['slice_frames'] = 2 # 4 MICHELE\n exp_dict['st_conv'] = len(\n range(\n exp_dict['neural_delay'][0],\n exp_dict['neural_delay'][1]))\n exp_dict['grid_query'] = False # False = evaluate all neurons at once\n exp_dict['cc_repo_vars'] = {\n 'output_size': [1, 1],\n 'model_im_size': [152, 304, 1],\n 'loss_function': 'l2',\n 'score_metric': 'pearson',\n 'preprocess': 'resize'\n }\n exp_dict['weight_sharing'] = True\n return exp_dict", "title": "" }, { "docid": "065651f7f50dca824f573e8e13fd54aa", "score": "0.47043127", "text": "def entity_recognition(text: str) -> spacy:\n nlp = spacy.load('en_core_web_sm')\n document = nlp(text)\n return document", "title": "" }, { "docid": "4fddc26dc9e32773a8f058636e527888", "score": "0.47029012", "text": "def run_examples():\n\n for example in examples:\n\n print(str(example) + \" : \", end=\" \")\n try:\n t, smush = analyse(example, my_env)\n print(lookup(t, smush))\n # print(\"Smush\")\n # for k,v in smush.items():\n # print(f\"\\t{k} : {v}\")\n except (ParseError, InferenceError) as e:\n print(e)", "title": "" }, { "docid": "c17e91b2e31287e2d3f226c28cd07bd0", "score": "0.4702484", "text": "def learn_visual_words(annots_df, taids, nWords, use_cache=USE_CACHE_WORDS):\n max_iters = 200\n flann_params = {}\n train_vecs_list = [pdh.ensure_values(vecs) for vecs in annots_df['vecs'][taids].values]\n train_vecs = np.vstack(train_vecs_list)\n print('Training %d word vocabulary with %d annots and %d descriptors' %\n (nWords, len(taids), len(train_vecs)))\n kwds = dict(max_iters=max_iters, use_cache=use_cache, appname='smk',\n flann_params=flann_params)\n _words = clustertool.cached_akmeans(train_vecs, nWords, **kwds)\n if WITH_PANDAS:\n # Pandasify\n wx_series = pdh.RangeIndex(len(_words), name='wx')\n #words = pd.DataFrame(_words, index=wx_series, columns=VEC_COLUMNS)\n words = pd.DataFrame(_words, index=wx_series)\n else:\n words = _words\n return words", "title": "" }, { "docid": "2dd48456bcc504de6d6154321d62cd70", "score": "0.46943823", "text": "def _run_wiki_nogui(self):\n # start wikipedia page download\n self._log_print(msg_WHITE=\"Accessing Wikipedia...\")\n\n # download wikipedia page and track progress\n for message in self._get_preload_progress():\n if \"Searching for\" in message:\n print(f\"Searching for: {GREEN}{self.ALBUM}{RESET} by \"\n f\"{GREEN}{self.ALBUMARTIST}\")\n elif \"Using offline\" in message:\n self._log_print(msg_GREEN=\"Using offline cached page insted \"\n \"of web page\")\n elif \"Found at\" in message:\n self._log_print(msg_GREEN=\"Found at: \", msg_WHITE=self.url)\n else:\n self._log_print(msg_WHITE=message)\n\n # get error messages\n error_msg = self.get_wiki()\n if error_msg:\n self._log_print(msg_GREEN=error_msg)\n return\n\n if not we_are_frozen():\n # basic html textout for debug\n self.basic_out()\n\n # find release date\n self._log_print(msg_GREEN=\"Found release date:\",\n msg_WHITE=self.get_release_date())\n\n # find list of genres\n self._log_print(msg_GREEN=\"Found genre(s)\",\n msg_WHITE=\"\\n\".join(self.get_genres()))\n\n # get and print out page contents\n self._log_print(msg_GREEN=\"Found page contents\",\n msg_WHITE=\"\\n\".join(self.get_contents()))\n\n # extract track list\n self.get_tracks()\n\n # extract personel names\n self._log_print(msg_GREEN=\"Found aditional personel\")\n self.get_personnel()\n if not we_are_frozen():\n print(self.personnel_2_str())\n\n # extract writers, composers\n self._log_print(msg_GREEN=\"Found composers\",\n msg_WHITE=\"\\n\".join(flatten_set(self.get_composers())))\n\n if not we_are_frozen():\n # save to files\n self._log_print(msg_WHITE=\"Writing to disk\")\n self.disk_write()\n\n # print out found tracklist\n self._log_print(msg_GREEN=\"Found Track list(s)\")\n self.print_tracklist()\n\n # select genre\n if not self.GENRE:\n if not self.genres:\n print(CYAN + \"Input genre:\", end=\"\")\n self.genre = input()\n else:\n print(CYAN + \"Specify which genre you want to write: [1.]\")\n for i, gen in enumerate(self.genres, 1):\n print(f\"{i}. {gen}\")\n\n print(\"Input number:\", CYAN, end=\"\")\n index = input()\n try:\n index = int(index) - 1\n except ValueError:\n index = 0\n\n self.GENRE = self.genres[index]\n\n # decide what to do with artists\n print(CYAN + \"Do you want to assign artists to composers? ([y]/n)\",\n RESET, end=\" \")\n if to_bool(input()):\n self.merge_artist_composers()\n\n # decide if you want to find lyrics\n print(CYAN + \"\\nDo you want to find and save lyrics? ([y]/n): \" +\n RESET, end=\"\")\n\n # download lyrics\n self.save_lyrics(to_bool(input()))\n\n print(CYAN + \"Write data to ID3 tags? ([y]/n): \" + RESET, end=\"\")\n if to_bool(input()):\n if not self.write_tags():\n self._log_print(\n msg_WHITE=\"Cannot write tags because there are no \"\n \"coresponding files\")\n else:\n self._log_print(msg_GREEN=\"Done\")", "title": "" }, { "docid": "cfc8b0bedfe5d8532ebec1c74a2dd5c2", "score": "0.4692744", "text": "def autoAnalyze(self):\n print(\"Perfoming full automatic analysis...\")\n t1=time.perf_counter()\n self.cleanUp()\n self.figure_rois()\n self.figure_roi_inspect_all()\n self.figure_dGoR_roi(showEach=False,saveAs=self.folderSave+\"/avg.png\")\n self.figure_dGoR_roi(showEach=True,saveAs=self.folderSave+\"/each.png\")\n self.index()\n print(\"analysis completed in %.02f sec\"%(time.perf_counter()-t1))", "title": "" }, { "docid": "1073921c3e3389a456bb9b4b6f965094", "score": "0.4691585", "text": "def interrogator(corpus, \n search, \n query='any',\n show='w',\n exclude=False,\n excludemode='any',\n searchmode='all',\n dep_type='collapsed-ccprocessed-dependencies',\n case_sensitive=False,\n save=False,\n just_speakers=False,\n preserve_case=False,\n lemmatag=False,\n files_as_subcorpora=False,\n only_unique=False,\n random=False,\n only_format_match=False,\n multiprocess=False,\n spelling=False,\n regex_nonword_filter=r'[A-Za-z0-9:_]',\n gramsize=2,\n split_contractions=False,\n conc=False,\n maxconc=9999,\n window=4,\n no_closed=False,\n no_punct=True,\n whitelist=False,\n **kwargs\n ):\n\n # in case old kwarg is used\n conc = kwargs.get('do_concordancing', conc)\n\n # store kwargs and locs\n locs = locals().copy()\n locs.update(kwargs)\n locs.pop('kwargs', None)\n\n if isinstance(search, STRINGTYPE) and len(search) > 3:\n raise ValueError('search argument not recognised.')\n\n import codecs\n import signal\n import os\n from time import localtime, strftime\n from collections import Counter\n\n import corenlp_xml\n import pandas as pd\n from pandas import DataFrame, Series\n\n from corpkit.interrogation import Interrogation, Interrodict\n from corpkit.corpus import Datalist, Corpora, Corpus, File, Subcorpus\n from corpkit.process import (tregex_engine, get_deps, unsplitter, sanitise_dict, \n get_speakername, animator, filtermaker)\n from corpkit.other import as_regex\n from corpkit.dictionaries.word_transforms import wordlist, taglemma\n from corpkit.dictionaries.process_types import Wordlist\n from corpkit.build import check_jdk\n\n import re\n if regex_nonword_filter:\n is_a_word = re.compile(regex_nonword_filter)\n else:\n is_a_word = re.compile(r'.*')\n \n have_java = check_jdk()\n\n def signal_handler(signal, _):\n \"\"\"pause on ctrl+c, rather than just stop loop\"\"\" \n import signal\n import sys\n from time import localtime, strftime\n signal.signal(signal.SIGINT, original_sigint)\n thetime = strftime(\"%H:%M:%S\", localtime())\n INPUTFUNC('\\n\\n%s: Paused. Press any key to resume, or ctrl+c to quit.\\n' % thetime)\n time = strftime(\"%H:%M:%S\", localtime())\n print('%s: Interrogation resumed.\\n' % time)\n signal.signal(signal.SIGINT, signal_handler)\n\n def fix_show(show):\n \"\"\"lowercase anything in show and turn into list\"\"\"\n if isinstance(show, list):\n show = [i.lower() for i in show]\n elif isinstance(show, STRINGTYPE):\n show = show.lower()\n show = [show]\n\n # this little 'n' business is a hack: when ngramming,\n # n shows have their n stripped, so nw should be nw \n # so we know we're ngramming and so it's not empty.\n for index, val in enumerate(show):\n if val == 'n' or val == 'nw':\n show[index] = 'nw'\n elif val == 'b' or val == 'bw':\n show[index] = 'bw'\n elif val.endswith('pl'):\n show[index] = val.replace('pl', 'x')\n else:\n if len(val) == 2 and val.endswith('w'):\n show[index] = val[0]\n return show\n\n def fix_search(search):\n \"\"\"if search has nested dicts, remove them\"\"\"\n ends = ['w', 'l', 'i', 'n', 'f', 'p', 'x', 's']\n if not search:\n return\n if isinstance(search, STRINGTYPE):\n return search\n if search.get('t'):\n return search\n newsearch = {}\n for srch, pat in search.items():\n if len(srch) == 1 and srch in ends:\n srch = 'm%s' % srch\n if isinstance(pat, dict):\n for k, v in list(pat.items()):\n if k != 'w':\n newsearch[srch + k] = pat_format(v)\n else:\n newsearch[srch] = pat_format(v)\n else:\n newsearch[srch] = pat_format(pat)\n return newsearch\n\n def pat_format(pat):\n from corpkit.dictionaries.process_types import Wordlist\n import re\n if pat == 'any':\n return re.compile(r'.*')\n if isinstance(pat, Wordlist):\n pat = list(pat)\n if isinstance(pat, list):\n if all(isinstance(x, int) for x in pat):\n pat = [str(x) for x in pat]\n pat = filtermaker(pat, case_sensitive=case_sensitive, root=kwargs.get('root'))\n else:\n if isinstance(pat, int):\n return pat\n if isinstance(pat, re._pattern_type):\n return pat\n if case_sensitive:\n pat = re.compile(pat)\n else:\n pat = re.compile(pat, re.IGNORECASE)\n return pat\n\n def is_multiquery(corpus, search, query, just_speakers):\n \"\"\"determine if multiprocessing is needed\n do some retyping if need be as well\"\"\"\n is_mul = False\n from collections import OrderedDict\n #if hasattr(corpus, '__iter__'):\n # is_mul = True\n # so we can do search = 't', query = ['NP', 'VP']:\n from corpkit.dictionaries.process_types import Wordlist\n if isinstance(query, Wordlist):\n query = list(query)\n if isinstance(query, list):\n if query != list(search.values())[0] or len(list(search.keys())) > 1:\n query = {c.title(): c for c in query}\n if isinstance(query, (dict, OrderedDict)):\n is_mul = True\n if just_speakers:\n if just_speakers == 'each':\n is_mul = True\n just_speakers = ['each']\n if just_speakers == ['each']:\n is_mul = True\n elif isinstance(just_speakers, STRINGTYPE):\n is_mul = False\n just_speakers = [just_speakers]\n #import re\n #if isinstance(just_speakers, re._pattern_type):\n # is_mul = False\n if isinstance(just_speakers, list):\n if len(just_speakers) > 1:\n is_mul = True\n if isinstance(search, dict):\n if all(isinstance(i, dict) for i in list(search.values())):\n is_mul = True\n return is_mul, corpus, search, query, just_speakers\n\n def slow_tregex(sents, **dummy_args):\n \"\"\"do the speaker-specific version of tregex queries\"\"\"\n speakr = dummy_args.get('speaker', '')\n import os\n from corpkit.process import tregex_engine\n # first, put the relevant trees into temp file\n to_open = '\\n'.join(sent.parse_string.strip() for sent in sents \\\n if sent.parse_string is not None)\n q = list(search.values())[0]\n ops = ['-o', '-%s' % translated_option]\n concs = []\n res = tregex_engine(query=q, \n options=ops, \n corpus=to_open,\n root=root,\n preserve_case=True\n )\n if not no_conc:\n ops += ['-w', '-f']\n whole_res = tregex_engine(query=q, \n options=ops, \n corpus=to_open,\n root=root,\n preserve_case=True\n )\n for line in whole_res:\n line.insert(1, speakr) \n\n res = format_tregex(res)\n whole_res = format_tregex(whole_res, whole=True)\n concs = make_conc_lines_from_whole_mid(whole_res, res)\n\n if root:\n root.update()\n if countmode:\n if isinstance(res, int):\n return res, False\n else:\n return len(res), False\n else:\n return res, concs\n\n def get_stats(sents, **dummy_args):\n \"\"\"get a bunch of frequencies on interpersonal phenomena\"\"\"\n from collections import Counter\n statsmode_results = Counter() \n # first, put the relevant trees into temp file\n\n for sent in sents:\n statsmode_results['Sentences'] += 1\n deps = get_deps(sent, dep_type)\n numpass = len([x for x in deps.links if x.type.endswith('pass')])\n statsmode_results['Passives'] += numpass\n statsmode_results['Tokens'] += len(sent.tokens)\n words = [w.word for w in sent.tokens if w.word is not None and w.word.isalnum()]\n statsmode_results['Words'] += len(words)\n statsmode_results['Characters'] += len(''.join(words))\n\n to_open = '\\n'.join(s.parse_string.strip() for s in sents)\n\n from corpkit.dictionaries.process_types import processes\n from corpkit.other import as_regex\n tregex_qs = {'Imperative': r'ROOT < (/(S|SBAR)/ < (VP !< VBD !< VBG !$ NP !$ SBAR < NP !$-- S !$-- VP !$ VP)) !<< (/\\?/ !< __) !<<- /-R.B-/ !<<, /(?i)^(-l.b-|hi|hey|hello|oh|wow|thank|thankyou|thanks|welcome)$/',\n 'Open interrogative': r'ROOT < SBARQ <<- (/\\?/ !< __)', \n 'Closed interrogative': r'ROOT ( < (SQ < (NP $+ VP)) << (/\\?/ !< __) | < (/(S|SBAR)/ < (VP $+ NP)) <<- (/\\?/ !< __))',\n 'Unmodalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP !< MD)))',\n 'Modalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP < MD)))',\n 'Open class': r'/^(NN|JJ|VB|RB)/ < __',\n 'Closed class': r'__ !< __ !> /^(NN|JJ|VB|RB)/',\n 'Clauses': r'/^S/ < __',\n 'Interrogative': r'ROOT << (/\\?/ !< __)',\n 'Mental processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % \\\n as_regex(processes.mental, boundaries='w'),\n 'Verbal processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % \\\n as_regex(processes.verbal, boundaries='w'),\n 'Relational processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % \\\n as_regex(processes.relational, boundaries='w'),\n 'Verbless clause': r'/^S/ !<< /^VB.?/'}\n\n for name, q in sorted(tregex_qs.items()):\n res = tregex_engine(query=q, \n options=['-o', '-C'], \n corpus=to_open, \n root=root\n )\n statsmode_results[name] += int(res)\n if root:\n root.update()\n return statsmode_results, []\n\n def make_conc_lines_from_whole_mid(wholes,\n middle_column_result\n ):\n \"\"\"\n Create concordance line output from tregex output\n \"\"\"\n import re\n import os\n if not wholes and not middle_column_result:\n return []\n\n conc_lines = []\n # remove duplicates from results\n unique_wholes = []\n unique_middle_column_result = []\n duplicates = []\n for (f, sk, whole), mid in zip(wholes, middle_column_result):\n joined = '-join-'.join([f, sk, whole, mid])\n if joined not in duplicates:\n duplicates.append(joined)\n unique_wholes.append([f, sk, whole])\n unique_middle_column_result.append(mid)\n\n # split into start, middle and end, dealing with multiple occurrences\n for (f, sk, whole), mid in zip(unique_wholes, unique_middle_column_result):\n reg = re.compile(r'([^a-zA-Z0-9-]|^)(' + re.escape(mid) + r')([^a-zA-Z0-9-]|$)', \\\n re.IGNORECASE | re.UNICODE)\n offsets = [(m.start(), m.end()) for m in re.finditer(reg, whole)]\n for offstart, offend in offsets:\n start, middle, end = whole[0:offstart].strip(), whole[offstart:offend].strip(), \\\n whole[offend:].strip()\n conc_lines.append([os.path.basename(f), sk, start, middle, end])\n return conc_lines\n\n def uniquify(conc_lines):\n \"\"\"get unique concordance lines\"\"\"\n from collections import OrderedDict\n unique_lines = []\n checking = []\n for index, (_, speakr, start, middle, end) in enumerate(conc_lines):\n joined = ' '.join([speakr, start, 'MIDDLEHERE:', middle, ':MIDDLEHERE', end])\n if joined not in checking:\n unique_lines.append(conc_lines[index])\n checking.append(joined)\n return unique_lines\n\n def lemmatiser(list_of_words, tag):\n \"\"\"\n Take a list of unicode words and a tag and return a lemmatised list\n \"\"\"\n output = []\n for word in list_of_words:\n if translated_option.startswith('u'):\n word = taglemma.get(word.lower(), 'Other')\n else:\n word = wordlist.get(word, lmtzr.lemmatize(word, tag))\n if not preserve_case:\n word = word.lower()\n output.append(word)\n return output\n\n def tgrep_searcher(sents, search, show, conc, **kwargs):\n \"\"\"\n Use tgrep for constituency grammar search\n \"\"\"\n f = kwargs.get('filename')\n from corpkit.process import show_tree_as_per_option, tgrep\n out = []\n conc_output = []\n conc_out = []\n for sent in sents:\n sk = get_speakername(sent)\n results = tgrep(sent, search['t'])\n for res in results:\n out.append(show_tree_as_per_option(show, res, sent))\n if conc:\n lin = [f, sk, show_tree_as_per_option(show + ['whole'], res, sent)]\n conc_out.append(lin)\n\n if conc:\n conc_output = make_conc_lines_from_whole_mid(conc_out, out)\n return out, conc_output\n\n def gettag(query, lemmatag=False):\n \"\"\"\n Find tag for WordNet lemmatisation\n \"\"\"\n if lemmatag:\n return lemmatag\n\n tagdict = {'N': 'n',\n 'J': 'a',\n 'V': 'v',\n 'A': 'r',\n 'None': False,\n '': False,\n 'Off': False}\n\n # in case someone compiles the tregex query\n try:\n query = query.pattern\n except AttributeError:\n query = query\n \n\n qr = query.replace(r'\\w', '').replace(r'\\s', '').replace(r'\\b', '')\n firstletter = next((c for c in qr if c.isalpha()), 'n')\n return tagdict.get(firstletter.upper(), 'n')\n\n def format_tregex(results, whole=False):\n \"\"\"format tregex by show list\"\"\"\n import re\n\n if countmode:\n return results\n\n if not results:\n return\n\n done = []\n if whole:\n fnames, snames, results = zip(*results)\n\n if 'l' in show or 'x' in show:\n lemmata = lemmatiser(results, gettag(search.get('t'), lemmatag))\n else:\n lemmata = [None for i in results]\n for word, lemma in zip(results, lemmata):\n bits = []\n if exclude and exclude.get('w'):\n if len(list(exclude.keys())) == 1 or excludemode == 'any':\n if re.search(exclude.get('w'), word):\n continue\n if len(list(exclude.keys())) == 1 or excludemode == 'any':\n if re.search(exclude.get('l'), lemma):\n continue\n if len(list(exclude.keys())) == 1 or excludemode == 'any':\n if re.search(exclude.get('p'), word):\n continue\n if len(list(exclude.keys())) == 1 or excludemode == 'any':\n if re.search(exclude.get('x'), lemma):\n continue\n if exclude and excludemode == 'all':\n num_to_cause_exclude = len(list(exclude.keys()))\n current_num = 0\n if exclude.get('w'):\n if re.search(exclude.get('w'), word):\n current_num += 1\n if exclude.get('l'):\n if re.search(exclude.get('l'), lemma):\n current_num += 1\n if exclude.get('p'):\n if re.search(exclude.get('p'), word):\n current_num += 1\n if exclude.get('x'):\n if re.search(exclude.get('x'), lemma):\n current_num += 1 \n if current_num == num_to_cause_exclude:\n continue \n\n for i in show:\n if i == 't':\n bits.append(word)\n if i == 'l':\n bits.append(lemma)\n elif i == 'w':\n bits.append(word)\n elif i == 'p':\n bits.append(word)\n elif i == 'x':\n bits.append(lemma)\n joined = '/'.join(bits)\n done.append(joined)\n if whole:\n done = zip(fnames, snames, done)\n return done\n\n def tok_by_list(pattern, list_of_toks, concordancing=False, **kwargs):\n \"\"\"search for regex in plaintext corpora\"\"\"\n import re\n if isinstance(pattern, STRINGTYPE):\n pattern = [pattern]\n if not case_sensitive:\n pattern = [p.lower() for p in pattern]\n if not concordancing:\n if case_sensitive:\n matches = [m for m in list_of_toks if m in pattern]\n else:\n matches = [m for m in list_of_toks if m.lower() in pattern]\n else:\n matches = []\n for index, token in enumerate(list_of_toks):\n if token in pattern:\n if not split_contractions:\n match = [' '.join(t for t in unsplitter(list_of_toks[:index]))[-140:]]\n else:\n match = [' '.join(t for t in list_of_toks[:index])[-140:]]\n match.append(token)\n if not split_contractions:\n match.append(' '.join(t for t in unsplitter(list_of_toks[index + 1:]))[:140])\n else:\n match.append(' '.join(t for t in list_of_toks[index + 1:])[:140])\n\n matches.append(match)\n if countmode:\n return len(matches)\n else:\n return matches\n\n def tok_ngrams(pattern, list_of_toks, concordancing=False, split_contractions=True):\n import re\n result = []\n list_of_toks = [x for x in list_of_toks if re.search(regex_nonword_filter, x)]\n\n if not split_contractions:\n list_of_toks = unsplitter(list_of_toks)\n \n for i in range(len(list_of_toks)):\n try:\n the_gram = [list_of_toks[i+x] for x in range(gramsize)]\n if any(re.search(pattern, x) for x in the_gram):\n result.append(' '.join(the_gram))\n except IndexError:\n pass\n\n if countmode:\n return len(result)\n\n else:\n result = [i for i in result if result.count(i) > 1]\n return result\n\n def compiler(pattern):\n \"\"\"compile regex or fail gracefully\"\"\"\n if hasattr(pattern, 'pattern'):\n return pattern\n import re\n try:\n if case_sensitive:\n comped = re.compile(pattern)\n else:\n comped = re.compile(pattern, re.IGNORECASE)\n return comped\n except:\n import traceback\n import sys\n from time import localtime, strftime\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lst = traceback.format_exception(exc_type, exc_value, exc_traceback)\n error_message = lst[-1]\n thetime = strftime(\"%H:%M:%S\", localtime())\n print('%s: Query %s' % (thetime, error_message))\n if root:\n return 'Bad query'\n else:\n raise ValueError('%s: Query %s' % (thetime, error_message))\n\n def tok_by_reg(pattern, list_of_toks, concordancing = False, **kwargs):\n \"\"\"search for regex in plaintext corpora\"\"\"\n import re\n comped = compiler(pattern)\n if comped == 'Bad query':\n return 'Bad query'\n if not concordancing:\n matches = [m for m in list_of_toks if re.search(comped, m)]\n else:\n matches = []\n for index, token in enumerate(list_of_toks):\n if re.search(comped, token):\n if not split_contractions:\n match = [' '.join(t for t in unsplitter(list_of_toks[:index]))[-140:]]\n else:\n match = [' '.join(t for t in list_of_toks[:index])[-140:]]\n match.append(re.search(comped, token).group(0))\n if not split_contractions:\n match.append(' '.join(t for t in unsplitter(list_of_toks[index + 1:]))[:140])\n else:\n match.append(' '.join(t for t in list_of_toks[index + 1:])[:140])\n matches.append(match)\n if countmode:\n return len(matches)\n else:\n return matches\n\n def determine_search_func(show):\n \"\"\"Figure out what search function we're using\"\"\"\n\n simple_tregex_mode = False\n statsmode = False\n tree_to_text = False\n\n if search.get('t') and not just_speakers and not kwargs.get('tgrep'):\n if have_java:\n simple_tregex_mode = True\n searcher = None\n else:\n searcher = tgrep_searcher\n optiontext = 'Searching parse trees'\n else:\n if datatype == 'plaintext':\n if any(i.endswith('n') for i in search.keys()):\n optiontext = 'n-grams via plaintext'\n raise NotImplementedError('Use a tokenised or parsed corpus for n-gramming.')\n #searcher = plaintext_ngram\n elif any(i.endswith('w') for i in search.keys()):\n if kwargs.get('regex', True):\n searcher = plaintext_regex_search\n else:\n searcher = plaintext_simple_search\n optiontext = 'Searching plaintext'\n else:\n raise ValueError(\"Plaintext search must be 'w' or 'n'.\")\n\n elif datatype == 'tokens':\n if any(i.endswith('n') for i in search.keys()):\n searcher = tok_ngrams\n optiontext = 'n-grams via tokens'\n elif any(i.endswith('w') for i in search.keys()):\n if kwargs.get('regex', True):\n searcher = tok_by_reg\n else:\n searcher = tok_by_list\n if isinstance(search.get('w'), (list, Wordlist)):\n searcher = tok_by_list\n optiontext = 'Searching tokens'\n only_parse = ['r', 'd', 'g', 'dl', 'gl', 'df', 'gf',\n 'dp', 'gp', 'f', 'd2', 'd2f', 'd2p', 'd2l']\n \n\n if datatype != 'parse' and any(i in only_parse for i in list(search.keys())):\n form = ', '.join(i for i in list(search.keys()) if i in only_parse)\n raise ValueError('Need parsed corpus to search with \"%s\" option(s).' % form)\n\n elif datatype == 'parse':\n if any(i.endswith('n') for i in search.keys()):\n search['w'] = search.pop('n')\n if not show_ngram:\n show = ['n']\n if any(i.endswith('t') for i in search.keys()):\n if have_java and not kwargs.get('tgrep'):\n searcher = slow_tregex\n else:\n searcher = tgrep_searcher\n optiontext = 'Searching parse trees'\n elif any(i.endswith('s') for i in search.keys()):\n searcher = get_stats\n statsmode = True\n optiontext = 'General statistics'\n elif any(i.endswith('r') for i in search.keys()):\n from corpkit.depsearch import dep_searcher\n searcher = dep_searcher\n optiontext = 'Distance from root'\n else:\n from corpkit.depsearch import dep_searcher\n searcher = dep_searcher\n optiontext = 'Dependency querying'\n \n # ngram mode for parsed data\n if show_ngram:\n optiontext = 'N-grams from parsed data'\n searcher = dep_searcher\n\n return searcher, optiontext, simple_tregex_mode, statsmode, tree_to_text\n\n def get_tregex_values():\n \"\"\"If using Tregex, set appropriate values\n\n - Check for valid query\n - Make 'any' query\n - Make list query\n \"\"\"\n\n translated_option = 't'\n if isinstance(search['t'], Wordlist):\n search['t'] = list(search['t'])\n q = tregex_engine(corpus=False,\n query=search.get('t'),\n options=['-t'],\n check_query=True,\n root=root,\n preserve_case=preserve_case\n )\n if q is False:\n if root:\n return 'Bad query', None\n else:\n return 'Bad query', None\n\n if isinstance(search['t'], list):\n regex = as_regex(search['t'], boundaries='line', case_sensitive=case_sensitive)\n else:\n regex = ''\n\n # listquery, anyquery, translated_option\n treg_dict = {'p': [r'__ < (/%s/ !< __)' % regex, r'__ < (/.?[A-Za-z0-9].?/ !< __)', 'u'],\n 'pl': [r'__ < (/%s/ !< __)' % regex, r'__ < (/.?[A-Za-z0-9].?/ !< __)', 'u'],\n 'x': [r'__ < (/%s/ !< __)' % regex, r'__ < (/.?[A-Za-z0-9].?/ !< __)', 'u'],\n 't': [r'__ < (/%s/ !< __)' % regex, r'__ < (/.?[A-Za-z0-9].?/ !< __)', 'o'],\n 'w': [r'/%s/ !< __' % regex, r'/.?[A-Za-z0-9].?/ !< __', 't'],\n 'c': [r'/%s/ !< __' % regex, r'/.?[A-Za-z0-9].?/ !< __', 'C'],\n 'l': [r'/%s/ !< __' % regex, r'/.?[A-Za-z0-9].?/ !< __', 't']\n }\n\n listq, anyq, translated_option = treg_dict.get(show[0].lower())\n if isinstance(search['t'], list):\n search['t'] = listq\n elif search['t'] == 'any': \n search['t'] = anyq\n return search['t'], translated_option\n\n def plaintext_regex_search(pattern, plaintext_data, concordancing=False, **kwargs):\n \"\"\"search for regex in plaintext corpora\n\n it searches over lines, so the user needs to be careful.\n \"\"\"\n import re\n if concordancing:\n pattern = r'(.{,140})\\b(' + pattern + r')\\b(.{,140})'\n compiled_pattern = compiler(pattern)\n if compiled_pattern == 'Bad query':\n return 'Bad query'\n matches = re.findall(compiled_pattern, plaintext_data)\n if concordancing:\n matches = [list(m) for m in matches]\n if not concordancing:\n for index, i in enumerate(matches):\n if isinstance(i, tuple):\n matches[index] = i[0]\n if countmode:\n return len(matches)\n else:\n return matches\n\n def correct_spelling(a_string):\n \"\"\"correct spelling within a string\"\"\"\n if not spelling:\n return a_string\n from corpkit.dictionaries.word_transforms import usa_convert\n if spelling.lower() == 'uk':\n usa_convert = {v: k for k, v in list(usa_convert.items())}\n bits = a_string.split('/')\n for index, i in enumerate(bits):\n converted = usa_convert.get(i.lower(), i)\n if i.islower() or preserve_case is False:\n converted = converted.lower()\n elif i.isupper() and preserve_case:\n converted = converted.upper()\n elif i.istitle() and preserve_case:\n converted = converted.title()\n bits[index] = converted\n r = '/'.join(bits)\n return r\n\n def plaintext_simple_search(pattern, plaintext_data, concordancing=False, **kwargs):\n \"\"\"search for tokens in plaintext corpora\"\"\"\n import re\n result = []\n if isinstance(pattern, STRINGTYPE):\n pattern = [pattern]\n for p in pattern:\n if concordancing:\n pat = r'(.{0,140})\\b(' + re.escape(p) + r')\\b(.{0,140})'\n pat = compiler(pat)\n if pat == 'Bad query':\n return 'Bad query'\n matches = re.findall(pat, plaintext_data)\n if concordancing:\n matches = [list(m) for m in matches]\n for i in matches:\n result.append(i)\n else: \n for m in range(len(matches)):\n result.append(p)\n return result\n\n def make_search_iterable(corpus):\n \"\"\"determine how to structure the corpus for interrogation\"\"\"\n \n # skip file definitions if they are not needed\n if simple_tregex_mode:\n if corpus.level in ['s', 'f']:\n return {(corpus.name, corpus.path): False}\n else:\n return {(os.path.basename(i), os.path.join(corpus.path, i)): False\n for i in os.listdir(corpus.path)\n if os.path.isdir(os.path.join(corpus.path, i))}\n\n if isinstance(corpus, Datalist):\n to_iterate_over = {}\n # it could be files or subcorpus objects\n if corpus[0].level == 's':\n if files_as_subcorpora:\n for subc in corpus:\n for f in subc.files:\n to_iterate_over[(f.name, f.path)] = [f]\n else:\n for subc in corpus:\n to_iterate_over[(subc.name, subc.path)] = subc.files\n elif corpus[0].level == 'f':\n for f in corpus:\n to_iterate_over[(f.name, f.path)] = [f]\n elif corpus.singlefile:\n to_iterate_over = {(corpus.name, corpus.path): [corpus]}\n elif not hasattr(corpus, 'subcorpora') or not corpus.subcorpora:\n # just files in a directory\n if files_as_subcorpora:\n to_iterate_over = {}\n for f in corpus.files:\n to_iterate_over[(f.name, f.path)] = [f]\n else:\n to_iterate_over = {(corpus.name, corpus.path): corpus.files}\n else:\n to_iterate_over = {}\n if files_as_subcorpora:\n # don't know if possible: has subcorpora but also .files\n if hasattr(corpus, 'files') and corpus.files is not None:\n for f in corpus.files:\n to_iterate_over[(f.name, f.path)] = [f]\n # has subcorpora with files in those\n elif hasattr(corpus, 'files') and corpus.files is None:\n for subc in corpus.subcorpora:\n for f in subc.files:\n to_iterate_over[(f.name, f.path)] = [f]\n else:\n if corpus[0].level == 's':\n for subcorpus in corpus:\n to_iterate_over[(subcorpus.name, subcorpus.path)] = subcorpus.files\n elif corpus[0].level == 'f':\n for f in corpus:\n to_iterate_over[(f.name, f.path)] = [f]\n else:\n for subcorpus in corpus.subcorpora:\n to_iterate_over[(subcorpus.name, subcorpus.path)] = subcorpus.files\n return to_iterate_over\n\n def welcome_printer(return_it=False):\n \"\"\"Print welcome message\"\"\"\n if no_conc:\n message = 'Interrogating'\n else:\n message = 'Interrogating and concordancing'\n if kwargs.get('printstatus', True):\n thetime = strftime(\"%H:%M:%S\", localtime())\n from corpkit.constants import transshow, transobjs\n sformat = '\\n'\n for k, v in search.items():\n if k == 't':\n dratt = ''\n else:\n dratt = transshow.get(k[-1], k[-1])\n drole = transobjs.get(k[0], k[0])\n if k == 't':\n drole = 'Trees'\n vform = getattr(v, 'pattern', v)\n sformat += ' %s %s: %s\\n' % (drole, dratt.lower(), vform)\n if search.get('s'):\n sformat = 'Features'\n welcome = ('\\n%s: %s %s ...\\n %s\\n ' \\\n 'Query: %s\\n %s corpus ... \\n' % \\\n (thetime, message, cname, optiontext, sformat, message))\n if return_it:\n return welcome\n else:\n print(welcome)\n\n def goodbye_printer(return_it=False, only_conc=False):\n \"\"\"Say goodbye before exiting\"\"\"\n if not kwargs.get('printstatus', True):\n return\n thetime = strftime(\"%H:%M:%S\", localtime())\n if only_conc:\n \n show_me = (thetime, len(conc_df))\n finalstring = '\\n\\n%s: Concordancing finished! %d results.' % show_me\n else:\n finalstring = '\\n\\n%s: Interrogation finished!' % thetime\n if countmode:\n finalstring += ' %d matches.' % tot\n else:\n dat = (numentries, total_total)\n finalstring += ' %d unique results, %d total occurrences.' % dat\n if return_it:\n return finalstring\n else:\n print(finalstring)\n\n\n def make_conc_obj_from_conclines(conc_results):\n \"\"\"\n Turn conclines into DataFrame\n \"\"\"\n from corpkit.interrogation import Concordance\n all_conc_lines = []\n for sc_name, resu in sorted(conc_results.items()):\n if only_unique:\n unique_results = uniquify(resu)\n else:\n unique_results = resu\n #make into series\n if PYTHON_VERSION == 2:\n pindex = 'c f s l m r'.encode('utf-8').split()\n else:\n pindex = 'c f s l m r'.split()\n for fname, spkr, start, word, end in unique_results:\n #spkr = str(spkr, errors = 'ignore')\n fname = os.path.basename(fname)\n ser = [sc_name, fname, spkr, start, word, end]\n all_conc_lines.append(Series(ser, index=pindex))\n\n if random:\n from random import shuffle\n shuffle(all_conc_lines)\n\n try:\n conc_df = pd.concat(all_conc_lines, axis=1).T\n if all(x == '' for x in list(conc_df['s'].values)):\n conc_df.drop('s', axis=1, inplace=True)\n \n if show_ngram or show_collocates:\n if not language_model:\n counted = Counter(conc_df['m'])\n indices = [l for l in list(conc_df.index) if counted[conc_df.ix[l]['m']] > 1] \n conc_df = conc_df.ix[indices]\n conc_df = conc_df.reset_index(drop=True)\n\n locs['corpus'] = corpus.name\n conc_df = Concordance(conc_df)\n try:\n conc_df.query = locs\n except AttributeError:\n pass\n return conc_df\n\n except ValueError:\n return\n\n def make_progress_bar():\n \"\"\"generate a progress bar\"\"\"\n\n if simple_tregex_mode:\n total_files = len(list(to_iterate_over.keys()))\n else:\n total_files = sum(len(x) for x in list(to_iterate_over.values()))\n\n par_args = {'printstatus': kwargs.get('printstatus', True),\n 'root': root, \n 'note': note,\n 'length': total_files,\n 'startnum': kwargs.get('startnum'),\n 'denom': kwargs.get('denominator', 1)}\n\n term = None\n if kwargs.get('paralleling', None) is not None:\n from blessings import Terminal\n term = Terminal()\n par_args['terminal'] = term\n par_args['linenum'] = kwargs.get('paralleling')\n\n if in_notebook:\n par_args['welcome_message'] = welcome_message\n\n outn = kwargs.get('outname', '')\n if outn:\n outn = outn + ': '\n\n tstr = '%s%d/%d' % (outn, current_iter, total_files)\n p = animator(None, None, init=True, tot_string=tstr, **par_args)\n tstr = '%s%d/%d' % (outn, current_iter + 1, total_files)\n animator(p, current_iter, tstr, **par_args)\n return p, outn, total_files, par_args\n\n # find out if using gui\n root = kwargs.get('root')\n note = kwargs.get('note')\n language_model = kwargs.get('language_model')\n\n # set up pause method\n original_sigint = signal.getsignal(signal.SIGINT)\n if kwargs.get('paralleling', None) is None:\n original_sigint = signal.getsignal(signal.SIGINT)\n signal.signal(signal.SIGINT, signal_handler)\n\n # find out about concordancing\n only_conc = False\n no_conc = False\n if conc is False:\n no_conc = True\n if isinstance(conc, str) and conc.lower() == 'only':\n only_conc = True\n no_conc = False\n numconc = 0\n\n # wipe non essential class attributes to not bloat query attrib\n if isinstance(corpus, Corpus):\n import copy\n corpus = copy.copy(corpus)\n for k, v in corpus.__dict__.items():\n if isinstance(v, (Interrogation, Interrodict)):\n corpus.__dict__.pop(k, None)\n\n # convert path to corpus object\n if not isinstance(corpus, (Corpus, Corpora, Subcorpus, File, Datalist)):\n if not multiprocess and not kwargs.get('outname'):\n corpus = Corpus(corpus, print_info=False)\n\n # figure out how the user has entered the query and show, and normalise\n from corpkit.process import searchfixer\n search = searchfixer(search, query)\n show = fix_show(show)\n \n show_ngram = any(x.startswith('n') for x in show)\n show_collocates = any(x.startswith('b') for x in show)\n\n # instantiate lemmatiser if need be\n if 'l' in show and isinstance(search, dict) and search.get('t'):\n from nltk.stem.wordnet import WordNetLemmatizer\n lmtzr = WordNetLemmatizer()\n\n # do multiprocessing if need be\n im, corpus, search, query, just_speakers = is_multiquery(corpus, search, query, just_speakers)\n\n # figure out if we can multiprocess the corpus\n if hasattr(corpus, '__iter__') and im:\n corpus = Corpus(corpus)\n if hasattr(corpus, '__iter__') and not im:\n im = True\n if isinstance(corpus, Corpora):\n im = True\n\n # split corpus if the user wants multiprocessing but no other iterable\n if not im and multiprocess:\n im = True\n corpus = corpus[:]\n\n search = fix_search(search)\n exclude = fix_search(exclude)\n\n # if it's already been through pmultiquery, don't do it again\n locs['search'] = search\n locs['query'] = query\n locs['just_speakers'] = just_speakers\n locs['corpus'] = corpus\n locs['multiprocess'] = multiprocess\n locs['print_info'] = kwargs.get('printstatus', True)\n\n # send to multiprocess function\n if im:\n signal.signal(signal.SIGINT, original_sigint)\n from corpkit.multiprocess import pmultiquery\n return pmultiquery(**locs)\n\n # get corpus metadata\n cname = corpus.name\n if isinstance(save, STRINGTYPE):\n savename = corpus.name + '-' + save\n if save is True:\n raise ValueError('save must be str, not bool.')\n\n\n datatype = getattr(corpus, 'datatype', 'parse')\n singlefile = getattr(corpus, 'singlefile', False)\n level = getattr(corpus, 'level', 'c')\n \n # store all results in here\n results = {}\n count_results = {}\n conc_results = {}\n\n # check if just counting, turn off conc if so\n countmode = 'c' in show\n if countmode:\n no_conc = True\n only_conc = False\n # where we are at in interrogation\n current_iter = 0\n\n # multiprocessing progress bar\n denom = kwargs.get('denominator', 1)\n startnum = kwargs.get('startnum', 0)\n\n # Determine the search function to be used #\n searcher, optiontext, simple_tregex_mode, statsmode, tree_to_text = determine_search_func(show)\n \n # no conc for statsmode\n if statsmode:\n no_conc = True\n only_conc = False\n conc = False\n\n # Set some Tregex-related values\n if search.get('t'):\n if show_ngram:\n raise ValueError(\"Can't search trees for n-grams---use a dependency search.\")\n query, translated_option = get_tregex_values()\n if query == 'Bad query' and translated_option is None:\n if root:\n return 'Bad query'\n else:\n return\n # more tregex options\n if tree_to_text:\n treg_q = r'ROOT << __'\n op = ['-o', '-t', '-w']\n elif simple_tregex_mode:\n treg_q = search['t']\n op = ['-o', '-' + translated_option]\n\n # make iterable object for corpus interrogation\n to_iterate_over = make_search_iterable(corpus)\n\n from traitlets import TraitError\n try:\n from ipywidgets import IntProgress\n\n _ = IntProgress(min=0, max=10, value=1)\n in_notebook = True\n except TraitError:\n in_notebook = False\n except ImportError:\n in_notebook = False\n\n # print welcome message\n welcome_message = welcome_printer(return_it=in_notebook)\n\n # create a progress bar\n p, outn, total_files, par_args = make_progress_bar()\n\n # Iterate over data, doing interrogations\n for (subcorpus_name, subcorpus_path), files in sorted(to_iterate_over.items()):\n\n # results for subcorpus go here\n conc_results[subcorpus_name] = []\n count_results[subcorpus_name] = []\n results[subcorpus_name] = Counter()\n\n # get either everything (tree_to_text) or the search['t'] query\n if tree_to_text or simple_tregex_mode:\n result = tregex_engine(query=treg_q,\n options=op,\n corpus=subcorpus_path,\n root=root,\n preserve_case=preserve_case\n )\n\n # format search results with slashes etc\n if not countmode and not tree_to_text:\n result = format_tregex(result)\n\n # if concordancing, do the query again with 'whole' sent and fname\n if not no_conc:\n ops = ['-w', '-f'] + op\n whole_result = tregex_engine(query=search['t'],\n options=ops,\n corpus=subcorpus_path,\n root=root,\n preserve_case=preserve_case\n )\n for line in whole_result:\n line.insert(1, '') \n\n # format match too depending on option\n if not only_format_match:\n whole_result = format_tregex(whole_result, whole=True)\n\n # make conc lines from conc results\n conc_result = make_conc_lines_from_whole_mid(whole_result, result)\n for lin in conc_result:\n if numconc < maxconc or not maxconc:\n conc_results[subcorpus_name].append(lin)\n numconc += 1\n\n # add matches to ongoing counts\n if countmode:\n count_results[subcorpus_name] += [result] \n else:\n result = Counter(result)\n results[subcorpus_name] += result\n\n # update progress bar\n current_iter += 1\n tstr = '%s%d/%d' % (outn, current_iter + 1, total_files)\n animator(p, current_iter, tstr, **par_args)\n\n # dependencies, plaintext, tokens, slow_tregex and tree_to_text\n if not simple_tregex_mode:\n for f in files:\n slow_treg_speaker_guess = kwargs.get('outname', False)\n if datatype == 'parse' and not tree_to_text:\n # right now, this is not using the File class's read() or document\n # methods. the reason is that there seem to be memory leaks. these\n # may have been fixed already though.\n try:\n from corenlp_xml import Document\n except ImportError:\n from corenlp_xml.document import Document\n with codecs.open(f.path, 'rb') as fo:\n data = fo.read()\n corenlp_xml = Document(data)\n #corenlp_xml = f.document\n if just_speakers:\n import re\n if isinstance(just_speakers, re._pattern_type):\n sents = [s for s in corenlp_xml.sentences if \\\n re.search(just_speakers, get_speakername(s))]\n else:\n sents = [s for s in corenlp_xml.sentences if get_speakername(s) in just_speakers]\n if len(just_speakers) == 1:\n slow_treg_speaker_guess = just_speakers[0]\n else:\n sents = corenlp_xml.sentences\n\n # get coreferences\n if kwargs.get('coref') or any(x.startswith('h') for x in show):\n if just_speakers:\n corefs = [i for i in corenlp_xml.coreferences if any(x == i.sentence for x in sents)]\n else:\n corefs = corenlp_xml.coreferences\n else:\n corefs = []\n \n corenlp_xml = None\n\n res, conc_res = searcher(sents, search=search, show=show,\n dep_type=dep_type,\n exclude=exclude,\n excludemode=excludemode,\n searchmode=searchmode,\n case_sensitive=case_sensitive,\n conc=conc,\n only_format_match=only_format_match,\n speaker=slow_treg_speaker_guess,\n gramsize=gramsize,\n no_punct=no_punct,\n no_closed=no_closed,\n whitelist=whitelist,\n split_contractions=split_contractions,\n window=window,\n filename=f.name,\n language_model=language_model,\n corefs=corefs,\n is_a_word=is_a_word,\n **kwargs\n )\n \n if res == 'Bad query':\n return 'Bad query'\n\n if datatype == 'tokens':\n import pickle\n with codecs.open(f.path, \"rb\") as fo:\n data = pickle.load(fo)\n elif datatype == 'plaintext' or tree_to_text:\n if tree_to_text:\n data = '\\n'.join(result)\n if not split_contractions:\n data = unsplitter(data)\n else:\n with codecs.open(f.path, 'rb', encoding='utf-8') as data:\n data = data.read()\n\n if datatype == 'tokens' or datatype == 'plaintext':\n\n query = list(search.values())[0]\n\n if not only_conc:\n res = searcher(query,\n data,\n split_contractions=split_contractions, \n concordancing=False\n )\n if res == 'Bad query':\n if root:\n return 'Bad query'\n if not no_conc:\n conc_res = searcher(query,\n data,\n split_contractions=split_contractions, \n concordancing=True\n )\n if conc_res == 'Bad query':\n if root:\n return 'Bad query'\n for line in conc_res:\n line.insert(0, '')\n\n if countmode:\n count_results[subcorpus_name] += [res]\n\n else:\n # add filename and do lowercasing for conc\n if not no_conc:\n for line in conc_res:\n if searcher != slow_tregex and searcher != tgrep_searcher:\n line.insert(0, f.name)\n else:\n line[0] = f.name\n if not preserve_case:\n line[3:] = [x.lower() for x in line[3:]]\n if spelling:\n line = [correct_spelling(b) for b in line]\n if numconc < maxconc or not maxconc:\n conc_results[subcorpus_name].append(line)\n numconc += 1\n\n # do lowercasing and spelling\n if not only_conc:\n if not preserve_case:\n if not statsmode:\n res = [i.lower() for i in res]\n\n if spelling:\n if not statsmode:\n res = [correct_spelling(r) for r in res]\n #if not statsmode:\n results[subcorpus_name] += Counter(res)\n #else:\n #results[subcorpus_name] += res\n\n # update progress bar\n current_iter += 1\n tstr = '%s%d/%d' % (outn, current_iter + 1, total_files)\n animator(p, current_iter, tstr, **par_args)\n\n # Get concordances into DataFrame, return if just conc\n if not no_conc:\n # fail on this line with typeerror if no results?\n conc_df = make_conc_obj_from_conclines(conc_results)\n\n if only_conc:\n locs = sanitise_dict(locs)\n try:\n conc_df.query = locs\n except AttributeError:\n return conc_df\n if save and not kwargs.get('outname'):\n print('\\n')\n conc_df.save(savename)\n goodbye_printer(only_conc=True)\n signal.signal(signal.SIGINT, original_sigint) \n return conc_df\n else:\n conc_df = None\n\n # Get interrogation into DataFrame\n if countmode:\n df = Series({k: sum(v) for k, v in sorted(count_results.items())})\n tot = df.sum()\n else:\n the_big_dict = {}\n unique_results = set(item for sublist in list(results.values()) for item in sublist)\n sortres = sorted(results.items(), key=lambda x: x[0])\n for word in unique_results:\n the_big_dict[word] = [subcorp_result[word] for _, subcorp_result in sortres]\n # turn master dict into dataframe, sorted\n df = DataFrame(the_big_dict, index=sorted(results.keys()))\n\n # for ngrams, remove hapaxes\n if show_ngram or show_collocates:\n if not language_model:\n df = df[[i for i in list(df.columns) if df[i].sum() > 1]]\n\n numentries = len(df.columns)\n tot = df.sum(axis=1)\n total_total = df.sum().sum()\n\n # turn df into series if all conditions met\n if not countmode:\n if level == 's' or singlefile:\n if not files_as_subcorpora:\n if not kwargs.get('df1_always_df'):\n df = Series(df.ix[0])\n df.sort_values(ascending=False, inplace=True)\n tot = df.sum()\n numentries = len(df.index)\n total_total = tot\n\n # turn data into DF for GUI if need be\n if isinstance(df, Series) and kwargs.get('df1_always_df'):\n total_total = df.sum()\n df = DataFrame(df)\n tot = Series(total_total, index=['Total'])\n\n # if we're doing files as subcorpora, we can remove the .txt.xml etc\n if isinstance(df, DataFrame) and files_as_subcorpora:\n cname = corpus.name.replace('-stripped', '').replace('-parsed', '')\n edits = [(r'(-[0-9][0-9][0-9])?\\.txt\\.xml', ''),\n (r'-%s(-stripped)?(-parsed)?' % cname, '')]\n from corpkit.editor import editor\n df = editor(df, replace_subcorpus_names=edits).results\n tot = df.sum(axis=1)\n total_total = df.sum().sum()\n\n # sort by total\n if isinstance(df, DataFrame):\n if not df.empty: \n df = df[list(df.sum().sort_values(ascending=False).index)]\n\n # make interrogation object\n locs['corpus'] = corpus.path\n locs = sanitise_dict(locs)\n interro = Interrogation(results=df, totals=tot, query=locs, concordance=conc_df)\n\n # save it\n if save and not kwargs.get('outname'):\n print('\\n')\n interro.save(savename)\n \n goodbye = goodbye_printer(return_it=in_notebook)\n if in_notebook:\n try:\n p.children[2].value = goodbye.replace('\\n', '')\n except AttributeError:\n pass\n signal.signal(signal.SIGINT, original_sigint)\n return interro", "title": "" }, { "docid": "ca14bc835498c6cfdc4f79acaa23d9d3", "score": "0.46904004", "text": "def collecte_docs(self, chercheur, overwrite=False): # self,\n init = overwrite # If True, data persistence is lost when references are updated\n docs = hal.find_publications(chercheur[\"halId_s\"], \"authIdHal_s\")\n\n progress_recorder = ProgressRecorder(self)\n progress_recorder.set_progress(0, len(docs), description=\"récupération des données HAL\")\n # Insert documents collection\n for num, doc in enumerate(docs):\n doc[\"country_colaboration\"] = location_docs.generate_countrys_fields(doc)\n doc = doi_enrichissement.docs_enrichissement_doi(doc)\n if \"fr_abstract_s\" in doc.keys():\n if isinstance(doc[\"fr_abstract_s\"], list):\n doc[\"fr_abstract_s\"] = \"/n\".join(doc[\"fr_abstract_s\"])\n if len(doc[\"fr_abstract_s\"]) > 100:\n doc[\"fr_entites\"] = keyword_enrichissement.return_entities(\n doc[\"fr_abstract_s\"], \"fr\"\n )\n doc[\"fr_teeft_keywords\"] = keyword_enrichissement.keyword_from_teeft(\n doc[\"fr_abstract_s\"], \"fr\"\n )\n if \"en_abstract_s\" in doc.keys():\n if isinstance(doc[\"en_abstract_s\"], list):\n doc[\"en_abstract_s\"] = \"/n\".join(doc[\"en_abstract_s\"])\n if len(doc[\"en_abstract_s\"]) > 100:\n doc[\"en_entites\"] = keyword_enrichissement.return_entities(\n doc[\"en_abstract_s\"], \"en\"\n )\n doc[\"en_teeft_keywords\"] = keyword_enrichissement.keyword_from_teeft(\n doc[\"en_abstract_s\"], \"en\"\n )\n\n doc[\"_id\"] = doc[\"docid\"]\n doc[\"validated\"] = True\n\n doc[\"harvested_from\"] = \"researcher\"\n\n doc[\"harvested_from_ids\"] = []\n doc[\"harvested_from_label\"] = []\n\n #\n #\n # print(doc[\"authorship\"], doc ['authLastName_s'])\n\n if len(doc[\"authIdHal_s\"]) != len(doc[\"authLastName_s\"]):\n # print (\"elastichal.py : test d'autorat no good\")\n # test sur le nom complet...\n nom = [\n truc\n for truc in doc[\"authLastName_s\"]\n if chercheur[\"lastName\"].lower() in truc.lower()\n ] # pour les récemment mariés qui auraient un nom composé...\n # Après si 'lun des co-auteur porte le même nom...\n if len(nom) > 0:\n nom = nom[0].title()\n try:\n if doc[\"authLastName_s\"].index(nom) == 0: # premier\n doc[\"authorship\"] = [\n {\"authorship\": \"firstAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n elif (\n doc[\"authLastName_s\"].index(nom) == len(doc[\"authLastName_s\"]) - 1\n ): # dernier\n doc[\"authorship\"] = [\n {\"authorship\": \"lastAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n except ValueError:\n doc[\"authorship\"] = []\n else:\n doc[\"authorship\"] = []\n elif chercheur[\"halId_s\"] in doc[\"authIdHal_s\"]:\n if doc[\"authIdHal_s\"].index(chercheur[\"halId_s\"]) == 0:\n doc[\"authorship\"] = [\n {\"authorship\": \"firstAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n elif (\n doc[\"authIdHal_s\"].index(chercheur[\"halId_s\"]) == len(doc[\"authIdHal_s\"]) - 1\n ): # dernier\n doc[\"authorship\"] = [\n {\"authorship\": \"lastAuthor\", \"authIdHal_s\": chercheur[\"halId_s\"]}\n ]\n else:\n doc[\"authorship\"] = []\n else:\n doc[\"authorship\"] = []\n\n doc[\"harvested_from_ids\"].append(chercheur[\"halId_s\"])\n\n # historique d'appartenance du docId\n # pour attribuer les bons docs aux chercheurs\n # harvet_history.append({'docid': doc['docid'], 'from': row['halId_s']})\n #\n # for h in harvet_history:\n # if h['docid'] == doc['docid']:\n # if h['from'] not in doc[\"harvested_from_ids\"]:\n # doc[\"harvested_from_ids\"].append(h['from'])\n\n doc[\"records\"] = []\n\n doc[\"MDS\"] = utils.calculate_mds(doc)\n\n try:\n should_be_open = utils.should_be_open(doc)\n if should_be_open == 1:\n doc[\"should_be_open\"] = True\n if should_be_open == -1:\n doc[\"should_be_open\"] = False\n\n if should_be_open == 1 or should_be_open == 2:\n doc[\"isOaExtra\"] = True\n elif should_be_open == -1:\n doc[\"isOaExtra\"] = False\n except IndexError:\n print(\"publicationDate_tdate error ?\")\n doc[\"Created\"] = datetime.datetime.now().isoformat()\n\n if not init: # récupération de l'existant pour ne pas écraser\n field = \"_id\"\n doc_param = esActions.scope_p(field, doc[\"_id\"])\n\n if not es.indices.exists(\n index=chercheur[\"structSirene\"]\n + \"-\"\n + chercheur[\"labHalId\"]\n + \"-researchers-\"\n + chercheur[\"ldapId\"]\n + \"-documents\"\n ): # -researchers\" + row[\"ldapId\"] + \"-documents\n print(\"exception \", chercheur[\"labHalId\"], chercheur[\"ldapId\"])\n\n res = es.search(\n index=chercheur[\"structSirene\"]\n + \"-\"\n + chercheur[\"labHalId\"]\n + \"-researchers-\"\n + chercheur[\"ldapId\"]\n + \"-documents\",\n body=doc_param,\n ) # -researchers\" + row[\"ldapId\"] + \"-documents\n\n if len(res[\"hits\"][\"hits\"]) > 0:\n doc[\"validated\"] = res[\"hits\"][\"hits\"][0][\"_source\"][\"validated\"]\n if \"authorship\" in res[\"hits\"][\"hits\"][0][\"_source\"]:\n doc[\"authorship\"] = res[\"hits\"][\"hits\"][0][\"_source\"][\"authorship\"]\n\n if (\n res[\"hits\"][\"hits\"][0][\"_source\"][\"modifiedDate_tdate\"]\n != doc[\"modifiedDate_tdate\"]\n ):\n doc[\"records\"].append(\n {\n \"beforeModifiedDate_tdate\": doc[\"modifiedDate_tdate\"],\n \"MDS\": res[\"hits\"][\"hits\"][0][\"_source\"][\"MDS\"],\n }\n )\n\n else:\n doc[\"validated\"] = True\n progress_recorder.set_progress(num, len(docs), description=\"(récolte)\")\n progress_recorder.set_progress(num, len(docs), description=\"(indexation)\")\n helpers.bulk(\n es,\n docs,\n index=chercheur[\"structSirene\"]\n + \"-\"\n + chercheur[\"labHalId\"]\n + \"-researchers-\"\n + chercheur[\"ldapId\"]\n + \"-documents\",\n refresh=\"wait_for\",\n )\n\n return chercheur # au cas où", "title": "" }, { "docid": "552d8019a0ca35d82249c0f2c30e02ce", "score": "0.46897706", "text": "def interrogator(path,\n search,\n query = 'any', \n show = 'words',\n exclude = False,\n case_sensitive = False,\n lemmatise = False, \n titlefilter = False, \n lemmatag = False, \n spelling = False, \n phrases = False, \n dep_type = 'collapsed-ccprocessed-dependencies',\n quicksave = False,\n printstatus = True,\n root = False,\n df1_always_df = False,\n just_speakers = False,\n excludemode = 'any',\n searchmode = 'all',\n **kwargs):\n import corpkit\n from corpkit.other import add_corpkit_to_path\n from corpkit.other import tregex_engine\n from corpkit.other import add_nltk_data_to_nltk_path\n \n # some non-Python resources need to explicitly be added to path\n add_corpkit_to_path()\n\n import os\n import re\n import signal\n import gc\n\n import collections\n import warnings\n import nltk\n import numpy\n\n import pandas as pd\n from collections import Counter\n from time import localtime, strftime\n from pandas import DataFrame, Series\n\n from corpkit.tests import check_pytex, check_t_kinter\n from corpkit.textprogressbar import TextProgressBar\n\n import dictionaries\n from dictionaries.word_transforms import (wordlist, \n usa_convert, \n taglemma)\n\n # nltk data path for tokeniser/lemmatiser\n if 'nltk_data_path' in kwargs.keys():\n if kwargs['nltk_data_path'] not in nltk.data.path:\n nltk.data.path.append(kwargs['nltk_data_path'])\n locdir = '/Users/daniel/work/corpkit/nltk_data'\n if locdir not in nltk.data.path:\n nltk.data.path.append(locdir)\n\n # prefer ipython to python if the user has it\n try:\n from IPython.display import display, clear_output\n except ImportError:\n pass\n \n # check for gui, pythontex\n tk = check_t_kinter()\n have_python_tex = check_pytex()\n\n # multiprocessing progress bar\n if 'denominator' in kwargs.keys():\n denom = kwargs['denominator']\n else:\n denom = 1\n if 'startnum' in kwargs.keys():\n startnum = kwargs['startnum']\n else:\n startnum = 0\n\n # determine if multiquery\n is_multiquery = False\n if hasattr(path, '__iter__'):\n is_multiquery = True\n if 'postounts' in path[0]:\n spelling = 'UK'\n if type(query) == dict or type(query) == collections.OrderedDict:\n is_multiquery = True\n if just_speakers:\n if just_speakers == 'each':\n is_multiquery = True\n if type(just_speakers) == str:\n if just_speakers != 'each':\n just_speakers = [just_speakers]\n if type(just_speakers) == list:\n if len(just_speakers) > 1:\n is_multiquery = True\n\n # regex type\n retype = type(re.compile('hello, world'))\n\n # just for me: convert spelling automatically for bipolar\n if not is_multiquery:\n if 'postcounts' in path:\n spelling = 'UK'\n\n # don't print so much stdout in the GUI\n if root:\n shouldprint = False\n else:\n shouldprint = True\n\n # run pmultiquery if so\n if is_multiquery:\n from corpkit.multiprocess import pmultiquery\n d = { 'path': path, \n 'search': search,\n 'query': query,\n 'show': show,\n 'lemmatise': lemmatise, \n 'titlefilter': titlefilter, \n 'lemmatag': lemmatag, \n 'print_info': shouldprint, \n 'spelling': spelling, \n 'phrases': phrases, \n 'dep_type': dep_type, \n 'quicksave': quicksave, \n 'df1_always_df': df1_always_df,\n 'just_speakers': just_speakers, \n 'root': root,}\n \n if 'note' in kwargs.keys() and kwargs['note'] is not False:\n d['note'] = kwargs['note']\n\n if 'num_proc' in kwargs.keys():\n d['num_proc'] = kwargs['num_proc']\n\n return pmultiquery(**d)\n\n if 'paralleling' in kwargs.keys():\n paralleling = kwargs['paralleling']\n else:\n paralleling = False\n\n # multiple progress bars when multiprocessing\n par_args = {}\n if not root:\n from blessings import Terminal\n term = Terminal()\n par_args['terminal'] = term\n par_args['linenum'] = paralleling\n\n the_time_started = strftime(\"%Y-%m-%d %H:%M:%S\")\n \n # check if we are in ipython\n try:\n get_ipython().getoutput()\n except TypeError:\n have_ipython = True\n except NameError:\n import subprocess\n have_ipython = False\n\n def unsplitter(lst):\n \"\"\"unsplit contractions and apostophes from tokenised text\"\"\"\n unsplit = []\n for index, t in enumerate(lst):\n if index == 0 or index == len(lst) - 1:\n unsplit.append(t)\n continue\n if \"'\" in t and not t.endswith(\"'\"):\n rejoined = ''.join([lst[index - 1], t])\n unsplit.append(rejoined)\n else:\n if not \"'\" in lst[index + 1]:\n unsplit.append(t)\n return unsplit\n\n def animator(progbar, count, tot_string = False, linenum = False, terminal = False, \n init = False, length = False):\n \"\"\"animates progress bar in unique position in terminal\"\"\"\n if init:\n from textprogressbar import TextProgressBar\n return TextProgressBar(length, dirname = tot_string)\n if type(linenum) == int:\n with terminal.location(0, terminal.height - (linenum + 1)):\n if tot_string:\n progbar.animate(count, tot_string)\n else:\n progbar.animate(count)\n else:\n if tot_string:\n progbar.animate(count, tot_string)\n else:\n progbar.animate(count) \n\n def signal_handler(signal, frame):\n \"\"\"pause on ctrl+c, rather than just stop loop\"\"\" \n import signal\n import sys\n from time import localtime, strftime\n time = strftime(\"%H:%M:%S\", localtime())\n sel = raw_input('\\n\\n%s: Paused. Press return to resume, or type exit to quit: \\n' % time)\n if sel.startswith('e') or sel.startswith('E'):\n sys.exit(0)\n else:\n time = strftime(\"%H:%M:%S\", localtime())\n print '%s: Interrogation resumed.\\n' % time\n signal.signal(signal.SIGINT, signal_handler)\n \n def gettag(query, lemmatag = False):\n \"\"\"find tag for wordnet lemmatisation\"\"\"\n import re\n if lemmatag is False:\n tag = 'n' # same default as wordnet\n # attempt to find tag from tregex query\n tagfinder = re.compile(r'^[^A-Za-z]*([A-Za-z]*)')\n tagchecker = re.compile(r'^[A-Z]{1,4}$')\n treebank_tag = re.findall(tagfinder, query.replace(r'\\w', '').replace(r'\\s', '').replace(r'\\b', ''))\n if re.match(tagchecker, treebank_tag[0]):\n if treebank_tag[0].startswith('J'):\n tag = 'a'\n elif treebank_tag[0].startswith('V') or treebank_tag[0].startswith('M'):\n tag = 'v'\n elif treebank_tag[0].startswith('N'):\n tag = 'n'\n elif treebank_tag[0].startswith('R'):\n tag = 'r'\n elif lemmatag:\n tag = lemmatag\n tagchecker = re.compile(r'^[avrn]$')\n while not re.match(tagchecker, lemmatag):\n time = strftime(\"%H:%M:%S\", localtime())\n selection = raw_input('\\n%s: WordNet POS tag \"%s\" not recognised.\\n It must be:\\n\\n ' \\\n ' a: (adjective)' \\\n ' n: (noun)' \\\n ' r: (adverb)' \\\n ' v: (verb)\\n\\nYour selection: ' % (time, lemmatag))\n lemmatag = selection\n return tag\n \n def processwords(list_of_matches, lemmatag = False):\n \"\"\"normalise matches from interrogations\"\"\"\n list_of_matches = [w.lower() for w in list_of_matches]\n # remove nonwords, strip . to normalise \"dr.\"\n if translated_option != 'o' and translated_option != 'u':\n list_of_matches = [w.lstrip('.').rstrip('.') for w in list_of_matches if re.search(regex_nonword_filter, w)]\n \n list_of_matches.sort()\n \n # tokenise if multiword:\n if phrases and not n_gramming:\n from nltk import word_tokenize as word_tokenize\n list_of_matches = [word_tokenize(i) for i in list_of_matches]\n\n # this is just for plaintext ... should convert to unicode on file open\n if datatype == 'plaintext':\n try:\n list_of_matches = [unicode(w, errors = 'ignore') for w in list_of_matches]\n except TypeError:\n pass\n\n if not dependency and exclude and 'w' in exclude.keys():\n list_of_matches = [w for w in list_of_matches if not re.match(exclude['w'], w)]\n\n if lemmatise or 'l' in show:\n if not dependency:\n tag = gettag(query, lemmatag = lemmatag)\n lemmata = lemmatiser(list_of_matches, tag)\n tups = zip(list_of_matches, lemmata)\n res = []\n for w, l in tups:\n single_result = []\n if exclude and 'l' in exclude.keys():\n if re.match(exclude['l'], l):\n continue\n if 'w' in show:\n single_result.append(w)\n if 'l' in show:\n single_result.append(l)\n # bad fix:\n # this currently says, if pos in show, there must only be pos ...\n if 'p' in show:\n if lemmatise:\n single_result.append(l)\n else:\n single_result.append(w)\n\n single_result = '/'.join(single_result)\n res.append(single_result)\n list_of_matches = res\n\n if titlefilter and not dependency:\n list_of_matches = titlefilterer(list_of_matches)\n if spelling:\n list_of_matches = convert_spelling(list_of_matches, spelling = spelling)\n\n # use blacklist option in gui\n if 'blacklist' in kwargs.keys():\n stopwords = False\n if kwargs['blacklist'] is not False:\n if kwargs['blacklist'] is True:\n from dictionaries.stopwords import stopwords as my_stopwords\n stopwords = [i.lower() for i in my_stopwords]\n list_of_matches = [w for w in list_of_matches if w not in stopwords]\n else:\n if type(kwargs['blacklist']) == list:\n stopwords = [i.lower() for i in kwargs['blacklist']]\n list_of_matches = [w for w in list_of_matches if w not in stopwords]\n else:\n regexblacklist = re.compile(kwargs['blacklist'])\n list_of_matches = [w for w in list_of_matches if not re.search(regexblacklist, w)]\n\n #if not split_con:\n # list_of_matches = unsplitter(list_of_matches)\n \n # turn every result into a single string again if need be:\n if phrases:\n output = []\n for res in list_of_matches:\n joined = ' '.join(res)\n output.append(joined)\n return output\n else:\n return list_of_matches\n\n def lemmatiser(list_of_words, tag):\n \"\"\"take a list of unicode words and a tag and return a lemmatised list.\"\"\"\n \n output = []\n for entry in list_of_words:\n if phrases:\n # just get the rightmost word\n word = entry[-1]\n entry.pop()\n else:\n word = entry\n if translated_option.startswith('u'):\n if word in taglemma:\n word = taglemma[word]\n else:\n if word == 'x':\n word = 'Other'\n # only use wordnet lemmatiser when appropriate\n elif not dependency:\n if word in wordlist:\n word = wordlist[word]\n word = lmtzr.lemmatize(word, tag)\n # do the manual_lemmatisation\n else:\n if word in wordlist:\n word = wordlist[word]\n if phrases:\n entry.append(word)\n output.append(entry)\n else:\n output.append(word)\n return output\n\n def titlefilterer(list_of_matches):\n from dictionaries.wordlists import wordlists\n badwords = wordlists.titles + wordlists.closedclass\n output = []\n for result in list_of_matches:\n head = result[-1]\n non_head = len(result) - 1\n title_stripped = [token for token in result[:non_head] if token.rstrip('.') not in badwords]\n title_stripped.append(head)\n output.append(title_stripped)\n return output\n\n def convert_spelling(list_of_matches, spelling = 'US'):\n from dictionaries.word_transforms import usa_convert\n if spelling == 'UK':\n usa_convert = {v: k for k, v in usa_convert.items()}\n output = []\n for result in list_of_matches:\n if not phrases:\n result = result.split('/')\n for index, i in enumerate(result):\n try:\n result[index] = usa_convert[i]\n except KeyError:\n pass\n output.append('/'.join(result))\n return output\n\n def distancer(lks, lk):\n \"determine number of jumps to root\" \n c = 0\n # get the gov index, stop when it's zero\n root_found = False\n while not root_found:\n if c == 0:\n try:\n link_to_check = next(i for i in lks if i.dependent.idx == lk.id)\n except StopIteration:\n root_found = True\n break\n #link_to_check = lk\n gov_index = link_to_check.governor.idx\n if gov_index == 0:\n root_found = True\n else:\n if c > 29:\n root_found = True\n break\n link_to_check = [l for l in lks if l.dependent.idx == gov_index]\n if len(link_to_check) > 0:\n link_to_check = link_to_check[0]\n else:\n break\n c += 1\n if c < 30:\n return c\n\n def dep_searcher(sents):\n \"\"\"\n search corenlp dependency parse\n 1. search for 'search' keyword arg\n governor\n dependent\n function\n pos\n lemma\n word\n index\n\n 2. exclude entries if need be\n\n 3. return '/'-sep list of 'show' keyword arg:\n governor\n dependent\n function\n pos\n lemma\n word\n index\n distance\n \n ... or just return int count.\n \"\"\"\n \n result = []\n for s in sents:\n lks = []\n deps = get_deps(s, dep_type)\n tokens = s.tokens\n for opt, pat in search.items():\n pat = filtermaker(pat)\n if opt == 'g':\n for l in deps.links:\n if re.match(pat, l.governor.text):\n lks.append(s.get_token_by_id(l.dependent.idx))\n elif opt == 'd':\n for l in deps.links:\n if re.match(pat, l.dependent.text):\n lks.append(s.get_token_by_id(l.governor.idx))\n elif opt == 'f':\n for l in deps.links:\n if re.match(pat, l.type):\n lks.append(s.get_token_by_id(l.dependent.idx))\n elif opt == 'p':\n for tok in tokens:\n if re.match(pat, tok.pos):\n lks.append(tok)\n elif opt == 'l':\n for tok in tokens:\n if re.match(pat, tok.lemma):\n lks.append(tok)\n elif opt == 'w':\n for tok in tokens:\n if re.match(pat, tok.word):\n lks.append(tok)\n elif opt == 'i':\n for tok in tokens:\n if re.match(pat, str(tok.id)):\n lks.append(tok)\n\n # only return results if all conditions are met\n if searchmode == 'all':\n counted = Counter(lks)\n lks = [k for k, v in counted.items() if v >= len(search.keys())]\n\n lks = list(set([x for x in lks if re.search(regex_nonword_filter, x.word)]))\n\n if exclude is not False:\n to_remove = []\n for op, pat in exclude.items():\n pat = filtermaker(pat)\n for tok in lks:\n if op == 'g':\n for l in deps.links:\n if re.match(pat, l.governor.text):\n to_remove.append(s.get_token_by_id(l.governor.idx))\n elif op == 'd':\n for l in deps.links:\n if re.match(pat, l.dependent.text):\n to_remove.append(s.get_token_by_id(l.dependent.idx))\n elif op == 'f':\n for l in deps.links:\n if re.match(pat, l.type):\n to_remove.append(s.get_token_by_id(l.dependent.idx))\n elif op == 'p':\n for tok in tokens:\n if re.match(pat, tok.pos):\n to_remove.append(tok)\n elif op == 'l':\n for tok in tokens:\n if re.match(pat, tok.lemma):\n to_remove.append(tok)\n elif op == 'w':\n for tok in tokens:\n if re.match(pat, tok.word):\n to_remove.append(tok)\n elif op == 'i':\n for tok in tokens:\n if re.match(pat, str(tok.id)):\n to_remove.append(tok)\n\n if excludemode == 'all':\n counted = Counter(to_remove)\n to_remove = [k for k, v in counted.items() if v >= len(exclude.keys())]\n for i in to_remove:\n try:\n lks.remove(i)\n except ValueError:\n pass\n\n if only_count:\n result.append(len(lks))\n continue\n\n # figure out what to show\n for lk in lks:\n single_result = {}\n node = deps.get_node_by_idx(lk.id)\n\n if 'w' in show:\n single_result['w'] = 'none'\n if lemmatise:\n single_result['w'] = lk.lemma\n else:\n single_result['w'] = lk.word\n\n if 'l' in show:\n single_result['l'] = lk.lemma\n\n if 'p' in show:\n single_result['p'] = 'none'\n postag = lk.pos\n if lemmatise:\n if postag.lower() in taglemma.keys():\n single_result['p'] = taglemma[postag.lower()]\n else:\n single_result['p'] = postag.lower()\n else:\n single_result['p'] = postag\n if not single_result['p']:\n single_result['p'] == 'none'\n\n if 'f' in show:\n single_result['f'] = 'none'\n for i in deps.links:\n if i.dependent.idx == lk.id:\n single_result['f'] = i.type\n break\n if single_result['f'] == '':\n single_result['f'] = 'root'\n\n if 'g' in show:\n single_result['g'] = 'none'\n for i in deps.links:\n if i.dependent.idx == lk.id:\n if s.get_token_by_id(i.governor.idx):\n if lemmatise: \n single_result['g'] = s.get_token_by_id(i.governor.idx).lemma\n else:\n single_result['g'] = i.governor.text\n else:\n single_result['g'] = 'root'\n break\n\n if 'd' in show:\n single_result['d'] = 'none'\n for i in deps.links:\n if i.governor.idx == lk.id:\n if s.get_token_by_id(i.dependent.idx): \n if lemmatise:\n single_result['d'] = s.get_token_by_id(i.dependent.idx).lemma\n else:\n single_result['d'] = i.dependent.text\n break\n\n if 'r' in show:\n all_lks = [l for l in deps.links]\n distance = distancer(all_lks, lk)\n if distance:\n single_result['r'] = str(distance)\n else:\n single_result['r'] = '-1'\n\n if 'i' in show:\n single_result['i'] = str(lk.id)\n\n if not only_count:\n \n # add them in order\n out = []\n for i in show:\n out.append(single_result[i])\n\n result.append('/'.join(out))\n \n if 'c' in show:\n result = sum(result)\n\n return result\n\n def tok_by_list(pattern, list_of_toks):\n \"\"\"search for regex in plaintext corpora\"\"\"\n if type(pattern) == str:\n pattern = [pattern]\n result = []\n matches = [m for m in list_of_toks if m in pattern]\n for m in matches:\n result.append(m)\n return result\n\n def tok_ngrams(pattern, list_of_toks, split_contractions = True):\n from collections import Counter\n global gramsize\n import re\n ngrams = Counter()\n result = []\n # if it's not a compiled regex\n list_of_toks = [x for x in list_of_toks if re.search(regex_nonword_filter, x)]\n\n if not split_contractions:\n list_of_toks = unsplitter(list_of_toks)\n \n #list_of_toks = [x for x in list_of_toks if \"'\" not in x]\n for index, w in enumerate(list_of_toks):\n try:\n the_gram = [list_of_toks[index+x] for x in range(gramsize)]\n if not any(re.search(query, x) for x in the_gram):\n continue\n #if query != 'any':\n # if not any(re.search(query, w) is True for w in the_gram):\n # continue\n ngrams[' '.join(the_gram)] += 1\n except IndexError:\n pass\n # turn counter into list of results\n for k, v in ngrams.items():\n if v > 1:\n for i in range(v):\n result.append(k)\n return result\n\n def tok_by_reg(pattern, list_of_toks):\n \"\"\"search for regex in plaintext corpora\"\"\"\n try:\n comped = re.compile(pattern)\n except:\n import traceback\n import sys\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lst = traceback.format_exception(exc_type, exc_value,\n exc_traceback)\n error_message = lst[-1]\n thetime = strftime(\"%H:%M:%S\", localtime())\n print '%s: Query %s' % (thetime, error_message)\n return 'Bad query'\n\n matches = [m for m in list_of_toks if re.search(comped, m)]\n\n return matches\n\n def plaintext_regex_search(pattern, plaintext_data):\n \"\"\"search for regex in plaintext corpora\"\"\"\n result = []\n #if not pattern.startswith(r'\\b') and not pattern.endswith(r'\\b'):\n #pattern = r'\\b' + pattern + '\\b'\n try:\n compiled_pattern = re.compile(pattern)\n except:\n import traceback\n import sys\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lst = traceback.format_exception(exc_type, exc_value,\n exc_traceback)\n error_message = lst[-1]\n thetime = strftime(\"%H:%M:%S\", localtime())\n print '%s: Query %s' % (thetime, error_message)\n return 'Bad query'\n matches = re.findall(compiled_pattern, plaintext_data)\n for index, i in enumerate(matches):\n if type(i) == tuple:\n matches[index] = i[0]\n return matches\n\n def plaintext_simple_search(pattern, plaintext_data):\n \"\"\"search for tokens in plaintext corpora\"\"\"\n if type(pattern) == str:\n pattern = [pattern]\n result = []\n try:\n tmp = re.compile(pattern)\n except:\n import traceback\n import sys\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lst = traceback.format_exception(exc_type, exc_value,\n exc_traceback)\n error_message = lst[-1]\n thetime = strftime(\"%H:%M:%S\", localtime())\n print '%s: Query %s' % (thetime, error_message)\n return 'Bad query'\n\n for p in pattern:\n if case_sensitive:\n pat = re.compile(r'\\b' + re.escape(p) + r'\\b')\n else:\n pat = re.compile(r'\\b' + re.escape(p) + r'\\b', re.IGNORECASE)\n if not any_plaintext_word:\n matches = re.findall(pat, plaintext_data)\n for m in range(len(matches)):\n result.append(p)\n else:\n for m in plaintext_data.split():\n result.append(m)\n return result\n\n def get_speaker_names_from_xml_corpus(path):\n import os\n import re\n from bs4 import BeautifulSoup\n names = []\n # parsing html with regular expression! :)\n speakid = re.compile(r'<speakername>[\\s\\n]*?([^\\s\\n]+)[\\s\\n]*?<.speakername>', re.MULTILINE)\n for (root, dirs, fs) in os.walk(path):\n for f in fs:\n with open(os.path.join(root, f), 'r') as fo:\n txt = fo.read()\n res = re.findall(speakid, txt)\n if res:\n res = [i.strip() for i in res]\n for i in res:\n if i not in names:\n names.append(i)\n return list(sorted(set(names)))\n\n def slow_tregex(sents):\n \"\"\"do the speaker-specific version of tregex queries\"\"\"\n import os\n import bs4\n # first, put the relevant trees into temp file\n if 'outname' in kwargs.keys():\n to_open = 'tmp-%s.txt' % kwargs['outname']\n else:\n to_open = 'tmp.txt'\n to_write = '\\n'.join([sent._parse_string.strip() for sent in sents if sent.parse_string is not None]).encode('utf-8', errors = 'ignore')\n with open(to_open, \"w\") as fo:\n fo.write(to_write)\n q = search.values()[0]\n res = tregex_engine(query = q, \n options = ['-o', '-%s' % translated_option], \n corpus = to_open,\n root = root)\n if root:\n root.update()\n os.remove(to_open)\n return res\n\n def get_deps(sentence, dep_type):\n if dep_type == 'basic-dependencies':\n return sentence.basic_dependencies\n if dep_type == 'collapsed-dependencies':\n return sentence.collapsed_dependencies\n if dep_type == 'collapsed-ccprocessed-dependencies':\n return sentence.collapsed_ccprocessed_dependencies\n\n def get_stats(sents):\n \"\"\"get a bunch of frequencies on interpersonal phenomena\"\"\"\n import os\n import re \n # first, put the relevant trees into temp file\n if 'outname' in kwargs.keys():\n to_open = 'tmp-%s.txt' % kwargs['outname']\n else:\n to_open = 'tmp.txt'\n with open(to_open, \"w\") as fo:\n for sent in sents:\n statsmode_results['Sentences'] += 1\n fo.write(sent.parse_string.rstrip().encode('utf-8', errors = 'ignore') + '\\n')\n deps = get_deps(sent, dep_type)\n numpass = len([x for x in deps.links if x.type.endswith('pass')])\n statsmode_results['Passives'] += numpass\n statsmode_results['Tokens'] += len(sent.tokens)\n statsmode_results['Words'] += len([w for w in sent.tokens if w.word.isalnum()])\n #statsmode_results['Unique words'] += len(set([w.word.lower() for w in sent.tokens if w.word.isalnum()]))\n #statsmode_results['Unique lemmata'] += len(set([w.lemma.lower() for w in sent.tokens if w.word.isalnum()]))\n\n # count moods via trees (/\\?/ !< __)\n from dictionaries.process_types import processes\n from corpkit.other import as_regex\n tregex_qs = {'Imperative': r'ROOT < (/(S|SBAR)/ < (VP !< VBD !< VBG !$ NP !$ SBAR < NP !$-- S !$-- VP !$ VP)) !<< (/\\?/ !< __) !<<- /-R.B-/ !<<, /(?i)^(-l.b-|hi|hey|hello|oh|wow|thank|thankyou|thanks|welcome)$/',\n #'Open interrogative': r'ROOT < SBARQ <<- (/\\?/ !< __)', \n #'Closed interrogative': r'ROOT ( < (SQ < (NP $+ VP)) << (/\\?/ !< __) | < (/(S|SBAR)/ < (VP $+ NP)) <<- (/\\?/ !< __))',\n 'Unmodalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP !< MD)))',\n 'Modalised declarative': r'ROOT < (S < (/(NP|SBAR|VP)/ $+ (VP < MD)))',\n 'Open class words': r'/^(NN|JJ|VB|RB)/ < __',\n 'Closed class words': r'__ !< __ !> /^(NN|JJ|VB|RB)/',\n 'Clauses': r'/^S/ < __',\n 'Interrogative': r'ROOT << (/\\?/ !< __)',\n 'Mental processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % as_regex(processes.mental, boundaries = 'w'),\n 'Verbal processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % as_regex(processes.verbal, boundaries = 'w'),\n 'Relational processes': r'VP > /^(S|ROOT)/ <+(VP) (VP <<# /%s/)' % as_regex(processes.relational, boundaries = 'w')}\n\n for name, q in sorted(tregex_qs.items()):\n res = tregex_engine(query = q, \n options = ['-o', '-C'], \n corpus = to_open, \n root = root)\n statsmode_results[name] += int(res)\n global numdone\n numdone += 1\n if root:\n root.update()\n if not root:\n tot_string = str(numdone + 1) + '/' + str(total_files * len(tregex_qs.keys()))\n if 'outname' in kwargs.keys():\n tot_string = '%s: %s' % (kwargs['outname'], tot_string)\n animator(p, numdone, tot_string, **par_args)\n if 'note' in kwargs.keys() and kwargs['note'] is not False:\n kwargs['note'].progvar.set((numdone * 100.0 / (total_files * len(tregex_qs.keys())) / denom) + startnum)\n os.remove(to_open)\n\n def tabler(subcorpus_names, list_of_dicts, num_rows):\n \"\"\"make a word table showing num_rows results\"\"\"\n import pandas as pd\n cols = []\n for subcorp, data in zip(subcorpus_names, list_of_dicts):\n col = pd.Series([w for w, v in data.most_common(num_rows)], name = subcorp)\n cols.append(col)\n word_table = pd.concat(cols, axis = 1)\n return word_table\n\n # a few things are off by default:\n only_count = False\n using_tregex = False\n n_gramming = False\n dependency = False\n plaintext = False\n tokens = False\n statsmode = False\n split_con = True\n search_iterable = False\n\n # determine what kind of data the corpus is\n # this currently slows things down with huge corpora, \n # so judge from folder name first \n if type(path) == str and path.endswith('-parsed'):\n datatype = 'parse'\n elif type(path) == str and path.endswith('-tokenised'):\n datatype = 'tokens'\n else:\n from corpkit.other import determine_datatype\n datatype = determine_datatype(path)\n\n # some empty lists we'll need\n dicts = []\n allwords_list = []\n \n regex_nonword_filter = re.compile(\"[A-Za-z0-9:_]\")\n \n # fix up search\n if type(search) == str:\n search = search[0].lower()\n if not search.lower().startswith('t') and not search.lower().startswith('n') \\\n and datatype == 'parse':\n search_iterable = True\n if query == 'any':\n query = r'.*'\n search = {search: query}\n\n possb = ['d', 'g', 'i', 'c', 'a', 'p', 'l', 'w', 't', 'f']\n if not any(i in possb for i in search.keys()):\n raise ValueError('search argument \"%s\" unrecognised.' % search.keys())\n if len(search.keys()) > 1 and 't' in search.keys():\n raise ValueError('if \"t\" in search, it must be the only list item')\n\n # fix up exclude naming conventions, convert lists to regex\n fixed_exclude = {}\n if exclude:\n for k, v in exclude.items():\n if type(v) == list:\n from corpkit.other import as_regex\n v = as_regex(v, boundaries = 'l', case_sensitive = case_sensitive)\n if k != k.lower()[0]:\n fixed_exclude[k.lower()[0]] = v\n else:\n fixed_exclude[k] = v\n exclude = fixed_exclude\n\n if not search_iterable:\n query = search.values()[0]\n\n if type(show) == str or type(show) == unicode:\n show = [show.lower()[0]]\n\n for index, t in enumerate(show):\n show[index] = t.lower()[0]\n\n possb = ['d', 'g', 'i', 'c', 'a', 'p', 'l', 'w', 't', 'f']\n only_dep = ['d', 'g', 'i', 'a', 'f']\n if not any(i in possb for i in show):\n raise ValueError('show argument \"%s\" unrecognised.' % show)\n if len(show) > 1 and 'c' in show:\n raise ValueError('if \"c\" in show, it must be the only list item')\n if 't' in search.keys() and any(i in only_dep for i in show):\n raise ValueError('If searching trees, show can not include: %s' % ', '.join(only_dep))\n\n # Tregex option:\n translated_option = False\n from corpkit.other import as_regex\n \n if datatype == 'parse':\n if 't' in search.keys():\n using_tregex = True\n\n if datatype == 'plaintext':\n plaintext = True\n\n elif datatype == 'tokens':\n tokens = True\n\n\n if using_tregex:\n if 'p' in show:\n dep_funct = slow_tregex\n optiontext = 'Part-of-speech tags only.'\n translated_option = 'u'\n if type(query) == list:\n query = r'__ < (/%s/ !< __)' % as_regex(query, boundaries = 'line', case_sensitive = case_sensitive)\n if query == 'any':\n query = r'__ < (/.?[A-Za-z0-9].?/ !< __)'\n elif 't' in show:\n dep_funct = slow_tregex\n optiontext = 'Tags and words.'\n translated_option = 'o'\n if type(query) == list:\n query = r'__ < (/%s/ !< __)' % as_regex(query, boundaries = 'line', case_sensitive = case_sensitive)\n if query == 'any':\n query = r'__ < (/.?[A-Za-z0-9].?/ !< __)'\n elif 'w' in show:\n dep_funct = slow_tregex\n optiontext = 'Words only.'\n translated_option = 't'\n if type(query) == list:\n query = r'/%s/ !< __' % as_regex(query, boundaries = 'line', case_sensitive = case_sensitive)\n if query == 'any':\n query = r'/.?[A-Za-z0-9].?/ !< __'\n elif 'c' in show:\n dep_funct = slow_tregex\n count_results = {}\n only_count = True\n translated_option = 'C'\n optiontext = 'Counts only.'\n if type(query) == list:\n query = r'/%s/ !< __' % as_regex(query, boundaries = 'line', case_sensitive = case_sensitive)\n if query == 'any':\n query = r'/.?[A-Za-z0-9].?/ !< __'\n elif 'l' in show:\n dep_funct = slow_tregex\n translated_option = 't'\n optiontext = 'Words, lemmatised.'\n lemmatise = True\n if type(query) == list:\n query = r'/%s/ !< __' % as_regex(query, boundaries = 'line', case_sensitive = case_sensitive)\n if query == 'any':\n query = r'/.?[A-Za-z0-9].?/ !< __'\n\n elif datatype == 'plaintext':\n optiontext = 'Searching plaintext corpus'\n if 'regex' in kwargs.keys() and kwargs['regex'] is False:\n translated_option = 's'\n if query == 'any':\n any_plaintext_word = True\n else:\n any_plaintext_word = False\n else:\n translated_option = 'r'\n if query == 'any':\n query = r'[^\\s]+'\n if type(query) == list:\n query = as_regex(query, boundaries = 'line', case_sensitive = case_sensitive)\n \n elif datatype == 'tokens':\n if 'w' in search.keys():\n tokens = True\n if type(query) == list:\n translated_option = 'e'\n optiontext = 'Tokens via list.'\n dep_funct = tok_by_list\n else:\n translated_option = 'h'\n optiontext = 'Tokens via regular expression.'\n dep_funct = tok_by_reg\n if 'n' in search.keys():\n translated_option = 'j'\n tokens = True\n lemmatise = False\n optiontext = 'Get ngrams from tokens.'\n if query == 'any':\n query = r'.*'\n if type(query) == list:\n query = as_regex(query, boundaries = 'l', case_sensitive = case_sensitive)\n else:\n try:\n if not case_sensitive:\n query = re.compile(query, re.IGNORECASE)\n else:\n query = re.compile(query)\n except:\n import traceback\n import sys\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lst = traceback.format_exception(exc_type, exc_value,\n exc_traceback)\n error_message = lst[-1]\n thetime = strftime(\"%H:%M:%S\", localtime())\n print '%s: Query %s' % (thetime, error_message)\n return 'Bad query'\n global gramsize\n if 'gramsize' in kwargs.keys():\n gramsize = kwargs['gramsize']\n else:\n gramsize = 2\n dep_funct = tok_ngrams\n\n elif datatype == 'parse':\n if 'n' not in search.keys() and 't' not in search.keys():\n translated_option = 'y'\n dependency = True\n optiontext = 'Dependency querying...'\n dep_funct = dep_searcher\n if 'c' in show:\n count_results = {}\n only_count = True\n\n if 's' in search.keys():\n translated_option = 'v'\n #using_tregex = True\n statsmode = True\n optiontext = 'Getting general stats.'\n dep_funct = get_stats\n if datatype != 'parse':\n print 'Need parsed corpus for this.'\n return\n\n # initialise nltk lemmatiser only once\n if lemmatise or ('l' in show and not dependency):\n from nltk.stem.wordnet import WordNetLemmatizer\n lmtzr=WordNetLemmatizer()\n\n if 'n' in search.keys():\n if datatype == 'parse':\n translated_option = 'n'\n using_tregex = True\n optiontext = 'n-grams only.'\n n_gramming = True\n if datatype == 'tokens':\n translated_option = 'j'\n using_tregex = False\n \n if type(query) == list:\n query = as_regex(query, boundaries = 'word', case_sensitive = case_sensitive)\n\n if dependency:\n if type(query) == list:\n query = as_regex(query, boundaries = 'line', case_sensitive = case_sensitive)\n #query = r'(?i)^(' + '|'.join(query) + r')$' \n if query == 'any':\n query = r'.*'\n\n # see if fast tregex can be done instead of temp file slow way\n can_do_fast = False\n if using_tregex:\n if just_speakers is False:\n if statsmode is False:\n can_do_fast = True\n\n if plaintext is True:\n try:\n if tregex_engine(corpus = os.path.join(path, os.listdir(path)[-1]), check_for_trees = True, root = root):\n if not root:\n decision = raw_input('\\nIt appears that your corpus contains parse trees. If using a plaintext search option, your counts will likely be inaccurate.\\n\\nHit enter to continue, or type \"exit\" to start again: ')\n if decision.startswith('e'):\n return\n else:\n time = strftime(\"%H:%M:%S\", localtime())\n print '%s: Corpus \"%s\" contains parse trees. Use \"Trees\" option.' % (time, os.path.basename(path))\n root.update()\n return False\n except:\n pass\n \n # if query is a special query, convert it:\n if query == 'any':\n if translated_option == 't' or translated_option == 'C':\n query = r'/.?[A-Za-z0-9].?/ !< __'\n if translated_option == 'u' or translated_option == 'o':\n query = r'__ < (/.?[A-Za-z0-9].?/ !< __)'\n if query == 'subjects':\n query = r'__ >># @NP'\n if query == 'processes':\n query = r'/VB.?/ >># ( VP >+(VP) (VP !> VP $ NP))'\n if query == 'modals':\n query = r'MD < __'\n if query == 'participants':\n query = r'/(NN|PRP|JJ).?/ >># (/(NP|ADJP)/ $ VP | > VP)'\n if query == 'entities':\n query = r'NP <# NNP'\n titlefilter = True\n\n # check that there's nothing in the quicksave path\n if quicksave:\n savedir = 'data/saved_interrogations'\n if not quicksave.endswith('.p'):\n quicksave = quicksave + '.p'\n fullpath = os.path.join(savedir, quicksave)\n if os.path.isfile(fullpath):\n # if the file exists, check if the query is pretty much the same\n from corpkit import load_result\n loaded = load_result(quicksave)\n if loaded.query['query'] == query and \\\n loaded.query['path'] == path and \\\n loaded.query['translated_option'] == translated_option and \\\n loaded.query['lemmatise'] == lemmatise and \\\n loaded.query['titlefilter'] == titlefilter and \\\n loaded.query['spelling'] == spelling and \\\n loaded.query['dep_type'] == dep_type and \\\n loaded.query['function'] == 'interrogator':\n dup_non_i = 'Duplicate'\n else:\n dup_non_i = 'Non-identical'\n\n while os.path.isfile(fullpath) and quicksave:\n dict_for_print = ' ' + '\\n '.join(sorted(['%s: %s' % (k, v) for k, v in loaded.query.items()])) + '\\n'\n time = strftime(\"%H:%M:%S\", localtime())\n selection = raw_input('\\n%s: %s interrogation found in %s:\\n\\n%s\\n' \\\n ' You have the following options:\\n\\n' \\\n ' a) save with a new name\\n' \\\n ' b) turn off \"quicksave\"\\n' \\\n ' c) return the results from %s\\n' \\\n ' d) delete %s\\n' \\\n ' e) Quickview %s and then decide\\n' \\\n ' f) exit\\n\\nYour selection: ' % (time, dup_non_i, fullpath, dict_for_print, fullpath, fullpath, fullpath))\n if 'a' in selection:\n sel = raw_input('\\nNew save name: ')\n quicksave = sel\n if not quicksave.endswith('.p'):\n quicksave = quicksave + '.p'\n fullpath = os.path.join(savedir, quicksave)\n elif 'b' in selection:\n quicksave = False\n elif 'c' in selection:\n return loaded\n elif 'd' in selection:\n os.remove(fullpath)\n elif 'e' in selection:\n print loaded.query\n print '\\n'\n try:\n print loaded.results\n except:\n print loaded.totals\n print '\\n'\n elif 'f' in selection:\n print ''\n return\n else:\n as_str = str(selection)\n print ' Choice \"%s\" not recognised.' % selection\n\n # titlefiltering only works with phrases, so turn it on\n if titlefilter:\n phrases = True\n\n def filtermaker(the_filter):\n if type(the_filter) == list:\n from other import as_regex\n the_filter = as_regex(the_filter, case_sensitive = case_sensitive)\n try:\n output = re.compile(the_filter)\n is_valid = True\n except:\n is_valid = False\n if root:\n import traceback\n import sys\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lst = traceback.format_exception(exc_type, exc_value,\n exc_traceback)\n error_message = lst[-1]\n thetime = strftime(\"%H:%M:%S\", localtime())\n print '%s: Filter %s' % (thetime, error_message)\n return 'Bad query'\n \n while not is_valid:\n if root:\n time = strftime(\"%H:%M:%S\", localtime())\n print the_filter\n print '%s: Invalid the_filter regular expression.' % time\n return False\n time = strftime(\"%H:%M:%S\", localtime())\n selection = raw_input('\\n%s: filter regular expression \" %s \" contains an error. You can either:\\n\\n' \\\n ' a) rewrite it now\\n' \\\n ' b) exit\\n\\nYour selection: ' % (time, the_filter))\n if 'a' in selection:\n the_filter = raw_input('\\nNew regular expression: ')\n try:\n output = re.compile(r'\\b' + the_filter + r'\\b')\n is_valid = True\n except re.error:\n is_valid = False\n elif 'b' in selection:\n print ''\n return False\n return output\n\n # dependencies:\n # can't be phrases\n # check if regex valid\n # check if dep_type valid\n if dependency:\n if translated_option == 'v':\n names = get_speaker_names_from_xml_corpus(path)\n \n phrases = False\n \n allowed_dep_types = ['basic-dependencies', 'collapsed-dependencies', 'collapsed-ccprocessed-dependencies']\n \n # allow a b and c shorthand\n if dep_type == 'a':\n dep_type = allowed_dep_types[0]\n if dep_type == 'b':\n dep_type = allowed_dep_types[1]\n if dep_type == 'c':\n dep_type = allowed_dep_types[2]\n\n while dep_type not in allowed_dep_types:\n time = strftime(\"%H:%M:%S\", localtime())\n selection = raw_input('\\n%s: Dependency type \"%s\" not recognised. Must be one of:\\n\\n' \\\n ' a) basic-dependencies' \\\n ' b) collapsed-dependencies' \\\n ' c) collapsed-ccprocessed-dependencies\\n\\nYour selection: ' % (time, dep_type))\n if 'a' in selection:\n dep_type = allowed_dep_types[0]\n elif 'b' in selection:\n dep_type = allowed_dep_types[1]\n elif 'c' in selection:\n dep_type = allowed_dep_types[2]\n else:\n pass\n\n # get list of subcorpora and sort them ... user input if no corpus found\n got_corpus = False\n while got_corpus is False:\n try:\n sorted_dirs = [d for d in os.listdir(path) if os.path.isdir(os.path.join(path,d))]\n got_corpus = True\n except OSError:\n got_corpus = False\n time = strftime(\"%H:%M:%S\", localtime())\n selection = raw_input('\\n%s: Corpus directory not found: \" %s \". You can either:\\n\\n' \\\n ' a) enter a new corpus path\\n' \\\n ' b) exit\\n\\nYour selection: ' % (time, path))\n if 'a' in selection:\n path = raw_input('\\nNew corpus path: ')\n elif 'b' in selection:\n print ''\n return\n \n # treat as one large corpus if no subdirs found\n one_big_corpus = False\n if len(sorted_dirs) == 0:\n #warnings.warn('\\nNo subcorpora found in %s.\\nUsing %s as corpus dir.' % (path, path))\n one_big_corpus = True\n # fails if in wrong dir!\n sorted_dirs = [os.path.basename(path)]\n\n # numerically sort subcorpora if the first can be an int\n # could improve now with is_number, all\n else:\n try:\n check = int(sorted_dirs[0])\n sorted_dirs.sort(key=int)\n except:\n pass\n\n # if doing dependencies, make list of all files, and a progress bar\n if dependency or plaintext or tokens or can_do_fast is False:\n all_files = []\n for d in sorted_dirs:\n if not one_big_corpus:\n subcorpus = os.path.join(path, d)\n else:\n subcorpus = path\n if dependency:\n files = [f for f in os.listdir(subcorpus) if f.endswith('.xml')]\n else:\n files = [f for f in os.listdir(subcorpus) if not f.startswith('.')]\n \n # skip files not containing speakers...\n if just_speakers:\n rem = []\n for f in files:\n fp = os.path.join(subcorpus, f)\n data = open(fp, 'r').read()\n if any('<speakername>' + name in data for name in just_speakers):\n rem.append(f)\n files = rem\n\n all_files.append([d, files])\n total_files = len([item for sublist in all_files for item in sublist[1]])\n sorted_dirs = all_files\n c = 0\n if not root:\n tstr = False\n if 'outname' in kwargs.keys():\n if dependency or plaintext or tokens:\n tstr = '%s: %d/%d' % (kwargs['outname'], 0, total_files)\n else:\n tstr = '%s: %d/%d' % (kwargs['outname'], 0, len(sorted_dirs))\n if translated_option != 'v':\n p = animator(None, None, init = True, tot_string = tstr, length = total_files, **par_args)\n #p = TextProgressBar(total_files)\n else:\n p = animator(None, None, init = True, tot_string = tstr, length = total_files * 10, **par_args)\n #p = TextProgressBar(total_files * 10)\n \n # if tregex, make progress bar for each dir\n else:\n if not root:\n tstr = False\n if 'outname' in kwargs.keys():\n tstr = '%s: %d/%d' % (kwargs['outname'], 0, len(sorted_dirs))\n p = animator(None, None, tot_string = tstr, init = True, length = len(sorted_dirs), **par_args)\n\n # loop through each subcorpus\n subcorpus_names = []\n\n # check for valid query. so ugly.\n if using_tregex:\n if query:\n if not n_gramming:\n q = search.values()[0]\n query = tregex_engine(corpus = False, query = q, options = ['-t'], check_query = True, root = root)\n if query is False:\n if root:\n return 'Bad query'\n else:\n return\n \n else:\n if dependency or translated_option == 'r' or translated_option == 'h':\n is_valid = True\n try:\n if translated_option == 'r':\n if type(query) == str:\n if query.startswith(r'\\b'):\n query = query[2:]\n if query.endswith(r'\\b'):\n query = query[:-2]\n if case_sensitive:\n regex = re.compile(r'\\b' + query + r'\\b')\n else:\n regex = re.compile(r'\\b' + query + r'\\b', re.IGNORECASE)\n else:\n regex = query\n else:\n if case_sensitive:\n regex = re.compile(query)\n else:\n regex = re.compile(query, re.IGNORECASE)\n is_valid = True\n except re.error:\n is_valid = False\n if root:\n import traceback\n import sys\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lst = traceback.format_exception(exc_type, exc_value,\n exc_traceback)\n error_message = lst[-1]\n thetime = strftime(\"%H:%M:%S\", localtime())\n print '%s: Query %s' % (thetime, error_message)\n return \"Bad query\"\n while not is_valid:\n time = strftime(\"%H:%M:%S\", localtime())\n if root:\n time = strftime(\"%H:%M:%S\", localtime())\n print '%s: Regular expression in query contains an error.' % time\n return 'Bad query'\n selection = raw_input('\\n%s: Regular expression \" %s \" contains an error. You can either:\\n\\n' \\\n ' a) rewrite it now\\n' \\\n ' b) exit\\n\\nYour selection: ' % (time, query))\n if 'a' in selection:\n query = raw_input('\\nNew regular expression: ')\n try:\n if case_sensitive:\n regex = re.compile(r'\\b' + query + r'\\b')\n else:\n regex = re.compile(r'\\b' + query + r'\\b', re.IGNORECASE)\n is_valid = True\n except re.error:\n is_valid = False\n elif 'b' in selection:\n print ''\n return\n\n #print list nicely\n if type(query) == list:\n qtext = ', '.join(query)\n elif type(query) == str or type(query) == unicode:\n qtext = query\n else:\n qtext = 'regex'\n\n global skipped_sents\n skipped_sents = 0\n\n # begin interrogation\n time = strftime(\"%H:%M:%S\", localtime())\n if printstatus:\n print (\"\\n%s: Beginning corpus interrogation: %s\" \\\n \"\\n Query: '%s'\\n %s\" \\\n \"\\n Interrogating corpus ... \\n\" % (time, os.path.basename(path), qtext, optiontext) )\n if root:\n print '%s: Interrogating corpus ...' % time\n if root and tk:\n root.update()\n\n global numdone\n numdone = 0\n\n for index, d in enumerate(sorted_dirs):\n if using_tregex or n_gramming:\n if can_do_fast or n_gramming:\n subcorpus_name = d\n subcorpus_names.append(subcorpus_name)\n if not root:\n if paralleling is not False:\n tstr = '%s: %d/%d' % (kwargs['outname'], index + 1, len(sorted_dirs))\n else:\n tstr = False\n animator(p, index, tstr, **par_args)\n #animator(p, index, **par_args)\n #p.animate(index)\n if root and tk:\n time = strftime(\"%H:%M:%S\", localtime())\n if not one_big_corpus:\n print '%s: Interrogating subcorpus: %s' % (time, subcorpus_name)\n else:\n print '%s: Interrogating corpus ... ' % time\n root.update()\n if 'note' in kwargs.keys() and kwargs['note'] is not False:\n kwargs['note'].progvar.set(((index + 1) * 100.0 / len(sorted_dirs) / denom) + startnum)\n # get path to corpus/subcorpus\n if len(sorted_dirs) == 1:\n subcorpus = path\n else:\n subcorpus = os.path.join(path,subcorpus_name)\n \n if n_gramming:\n result = []\n if 'split_contractions' in kwargs.keys():\n if kwargs['split_contractions'] is True:\n split_con = True\n elif kwargs['split_contractions'] is False:\n split_con = False\n from corpkit.keys import ngrams\n if 'blacklist' in kwargs.keys():\n the_blacklist = kwargs['blacklist']\n else:\n the_blacklist = False\n if 'gramsize' in kwargs.keys():\n gramsz = kwargs['gramsize']\n else:\n gramsz = 2\n\n spindle_out = ngrams(subcorpus, reference_corpus = False, \n blacklist = the_blacklist,\n printstatus = False, \n clear = False, \n lemmatise = lemmatise, \n split_contractions = split_con, \n whitelist = query,\n gramsize = gramsz\n )\n for w in list(spindle_out.index):\n if query != 'any':\n if re.search(query, w):\n for _ in range(spindle_out[w]):\n result.append(w)\n else:\n for _ in range(spindle_out[w]):\n result.append(w)\n\n #if tregex, search\n else:\n if not statsmode:\n op = ['-o', '-' + translated_option]\n q = search.values()[0]\n result = tregex_engine(query = q, options = op, \n corpus = subcorpus, root = root)\n if result is False:\n return\n \n # if just counting matches, just \n # add subcorpus name and count...\n if only_count:\n count_results[d] = result\n continue\n\n # for dependencies, d[0] is the subcorpus name \n # and d[1] is its file list ... \n\n elif dependency or plaintext or tokens or statsmode or can_do_fast is False:\n #if not root:\n #p.animate(-1, str(0) + '/' + str(total_files))\n from collections import Counter\n statsmode_results = Counter({'Sentences': 0, 'Passives': 0, 'Tokens': 0})\n subcorpus_name = d[0]\n subcorpus_names.append(subcorpus_name)\n fileset = d[1]\n #for f in read_files:\n result = []\n for f in fileset:\n result_from_file = None\n # pass the x/y argument for more updates \n if not root and translated_option != 'v':\n tot_string = str(c + 1) + '/' + str(total_files)\n if 'outname' in kwargs.keys():\n tot_string = '%s: %s' % (kwargs['outname'], tot_string)\n animator(p, c, tot_string, **par_args)\n #p.animate((c), tot_string)\n if root and tk and not statsmode:\n root.update()\n if 'note' in kwargs.keys() and kwargs['note'] is not False:\n kwargs['note'].progvar.set((((c + 1) * 100.0 / total_files) / denom) + startnum)\n time = strftime(\"%H:%M:%S\", localtime())\n if not one_big_corpus:\n print '%s: Interrogating subcorpus: %s' % (time, subcorpus_name)\n else:\n print '%s: Interrogating corpus ...' % (time)\n c += 1\n if one_big_corpus:\n filepath = os.path.join(path, f)\n else:\n filepath = os.path.join(path, subcorpus_name, f)\n if dependency or can_do_fast is False:\n if not plaintext and not tokens:\n with open(filepath, \"rb\") as text:\n data = text.read()\n from corenlp_xml.document import Document\n try:\n corenlp_xml = Document(data)\n except:\n print 'Could not read file: %s' % filepath\n continue\n #corenlp_xml = Beautifulcorenlp_xml(data, parse_only=justsents) \n if just_speakers: \n sents = [s for s in corenlp_xml.sentences if s.speakername in just_speakers]\n if not sents:\n continue\n #sents = [s for s in corenlp_xml.find_all('sentence') \\\n #if s.speakername.text.strip() in just_speakers]\n else:\n sents = corenlp_xml.sentences\n # run whichever function has been called\n if translated_option == 'y':\n result_from_file = dep_searcher(sents)\n else:\n result_from_file = dep_funct(sents)\n if only_count:\n count_results[subcorpus_name] = result_from_file\n\n # memory problems\n corenlp_xml = None\n data = None\n gc.collect()\n\n if plaintext:\n with open(filepath, \"rb\") as text:\n data = text.read()\n if translated_option == 'r':\n result_from_file = plaintext_regex_search(regex, data)\n if translated_option == 's':\n result_from_file = plaintext_simple_search(query, data)\n if tokens:\n import pickle\n data = pickle.load(open(filepath, \"rb\"))\n #print data\n if translated_option == 'h':\n result_from_file = tok_by_reg(regex, data)\n if translated_option == 'e':\n result_from_file = tok_by_list(query, data)\n if translated_option == 'j':\n split_con = False\n if 'split_contractions' in kwargs.keys():\n if kwargs['split_contractions'] is True:\n split_con = True\n result_from_file = tok_ngrams(query, data, split_contractions = split_con)\n \n if result_from_file:\n if not statsmode and not only_count:\n for entry in result_from_file:\n result.append(entry)\n\n if not statsmode and 'c' not in show:\n result.sort()\n\n # lowercaseing, encoding, lemmatisation, \n # titlewords removal, usa_english, etc.\n if not statsmode:\n processed_result = processwords(result, lemmatag = lemmatag)\n \n if not statsmode:\n allwords_list.append(processed_result)\n dicts.append(Counter(processed_result))\n if statsmode:\n dicts.append(statsmode_results)\n allwords_list.append([w for w in statsmode_results.keys()])\n\n if not plaintext:\n if not root:\n if paralleling is not False:\n if dependency or plaintext or tokens or can_do_fast is False:\n tstr = '%s: %d/%d' % (kwargs['outname'], total_files, total_files)\n else:\n tstr = '%s: %d/%d' % (kwargs['outname'], len(sorted_dirs), len(sorted_dirs))\n\n else:\n tstr = False\n animator(p, len(sorted_dirs), tot_string = tstr, **par_args)\n\n #p.animate(len(sorted_dirs))\n if 'note' in kwargs.keys() and kwargs['note'] is not False:\n kwargs['note'].progvar.set((100 / denom + startnum))\n if root and tk:\n root.update()\n\n else:\n # weird float div by 0 zero error here for plaintext\n try:\n if not root:\n if translated_option != 'v':\n if paralleling is not False:\n animator(p, total_files, kwargs['outname'], **par_args)\n else:\n animator(p, total_files, **par_args)\n #p.animate(total_files)\n\n else:\n if paralleling is not False:\n animator(p, total_files * 10, kwargs['outname'], **par_args)\n else:\n animator(p, total_files * 10, **par_args)\n #p.animate(total_files * 10)\n if 'note' in kwargs.keys() and kwargs['note'] is not False:\n kwargs['note'].progvar.set((100 / denom + startnum))\n except:\n pass\n\n if root and tk:\n root.update()\n\n if not have_ipython and not root and not tk:\n print '\\n'\n \n # if only counting, get total total and finish up:\n if only_count:\n stotals = pd.Series(count_results)\n stotals.name = 'Total' \n outputnames = collections.namedtuple('interrogation', ['query', 'totals'])\n the_time_ended = strftime(\"%Y-%m-%d %H:%M:%S\")\n # add option to named tuple\n the_options = {'path': path,\n 'search': search,\n 'show': show,\n 'function': 'interrogator',\n 'datatype': stotals.dtype,\n 'query': query,\n 'exclude': exclude,\n 'lemmatise': lemmatise,\n 'titlefilter': titlefilter,\n 'lemmatag': lemmatag,\n 'spelling': spelling,\n 'phrases': phrases,\n 'dep_type': dep_type,\n 'quicksave': quicksave,\n 'time_started': the_time_started,\n 'time_ended': the_time_ended}\n\n try:\n the_options['translated_option'] = translated_option\n except:\n the_options['translated_options'] = translated_options\n\n output = outputnames(the_options, stotals)\n if 'outname' in kwargs:\n stotals.name = kwargs['outname']\n return stotals\n if have_ipython:\n clear_output()\n if quicksave:\n if stotals.sum() > 0:\n from other import save_result\n save_result(output, quicksave)\n \n if printstatus:\n time = strftime(\"%H:%M:%S\", localtime())\n print '%s: Interrogation finished! %d total occurrences.' % (time, stotals.sum())\n if not tk:\n print ''\n\n return output\n\n # flatten and sort master list, in order to make a list of unique words\n allwords = [item for sublist in allwords_list for item in sublist]\n allwords.sort()\n unique_words = set(allwords)\n\n #make master reference_corpus\n the_big_dict = {}\n\n # calculate results\n # for every unique entry, find out how many times it appears per subcorpus\n for word in unique_words:\n the_big_dict[word] = [each_dict[word] for each_dict in dicts]\n \n # turn master dict into dataframe, sorted\n df = DataFrame(the_big_dict, index = subcorpus_names)\n\n if one_big_corpus:\n df = df.T.sort(list(df.T.columns)[0], ascending = False).T\n\n try:\n if not one_big_corpus:\n df.ix['Total'] = df.sum()\n tot = df.ix['Total']\n df = df[tot.argsort()[::-1]]\n df = df.drop('Total', axis = 0)\n except:\n pass\n\n # make totals branch\n stotals = df.sum(axis = 1)\n stotals.name = 'Total'\n\n # make result into series if only one subcorpus\n if one_big_corpus and not df1_always_df:\n try:\n df = df.ix[subcorpus_names[0]]\n except:\n pass\n df.sort(ascending = False)\n\n # if numerical colnames, sort numerically\n if show == ['r'] or show == ['i']:\n intcols = sorted([int(c) for c in list(df.columns)])\n df.columns = [str(c) for c in intcols]\n\n # add sort info for tk\n if tk:\n df = df.T\n df['tkintertable-order'] = pd.Series([index for index, data in enumerate(list(df.index))], index = list(df.index))\n df = df.T\n \n # print skipped sent information for distance_mode\n if printstatus and 'r' in show and skipped_sents > 0:\n print '\\n %d sentences over 99 words skipped.\\n' % skipped_sents\n \n #make results into named tuple\n # add option to named tuple\n the_time_ended = strftime(\"%Y-%m-%d %H:%M:%S\")\n the_options = {'path': path,\n 'search': search,\n 'show': show,\n 'datatype': df.iloc[0].dtype,\n 'query': query,\n 'lemmatise': lemmatise,\n 'titlefilter': titlefilter,\n 'lemmatag': lemmatag,\n 'function': 'interrogator',\n 'spelling': spelling,\n 'exclude': exclude,\n 'phrases': phrases,\n 'dep_type': dep_type,\n 'quicksave': quicksave,\n 'time_started': the_time_started,\n 'time_ended': the_time_ended}\n\n try:\n the_options['translated_option'] = translated_option\n except:\n the_options['translated_options'] = translated_options\n\n outputnames = collections.namedtuple('interrogation', ['query', 'results', 'totals'])\n output = outputnames(the_options, df, stotals)\n\n if type(paralleling) == int:\n return (kwargs['outname'], df, stotals)\n \n if have_ipython:\n clear_output()\n\n # warnings if nothing generated...\n if not one_big_corpus and not df1_always_df:\n num_diff_results = len(list(df.columns))\n elif df1_always_df and not one_big_corpus:\n num_diff_results = len(list(df.columns))\n elif not df1_always_df and one_big_corpus:\n num_diff_results = len(list(df.index))\n elif df1_always_df and one_big_corpus:\n num_diff_results = len(list(df.columns))\n\n if num_diff_results == 0:\n if not root:\n print ''\n warnings.warn('No results produced. Maybe your query needs work.')\n else:\n time = strftime(\"%H:%M:%S\", localtime())\n print '%s: Interrogation produced no results, sorry.' % time\n return False\n\n if stotals.sum() == 0:\n if not root:\n print ''\n warnings.warn('No totals produced. Maybe your query needs work.')\n else:\n time = strftime(\"%H:%M:%S\", localtime())\n print '%s: Interrogation produced no results, sorry.' % time\n return False\n\n time = strftime(\"%H:%M:%S\", localtime())\n if printstatus:\n print '%s: Interrogation finished! %d unique results, %d total.' % (time, num_diff_results, stotals.sum())\n if not tk:\n print ''\n\n if quicksave:\n if stotals.sum() > 0 and num_diff_results > 0:\n from other import save_result\n save_result(output, quicksave)\n\n return output", "title": "" }, { "docid": "d6c25ddac5290f858c0f4d0bae72b436", "score": "0.4681151", "text": "def find_corpora_visualizations():\n\n # build a quick map of visualizations, so that we can reference by annis label.\n vis_map = dict()\n for vis in HtmlVisualizationFormat.objects.all():\n vis_map[vis.slug] = vis\n\n for corpus in Corpus.objects.all():\n # get the list of all the visualizations already loaded for this corpus.\n already_have = set()\n for one_fmt in corpus.html_visualization_formats.all():\n already_have.add(one_fmt.slug)\n\n url_fmt = \"https://corpling.uis.georgetown.edu/annis-service/annis/query/resolver/{0}/NULL/node\"\n url_to_fetch = url_fmt.format(corpus.annis_corpus_name)\n res = request.urlopen(url_to_fetch)\n root = ET.fromstring(res.read())\n xpath = \"./resolverEntry[visType='htmldoc']/mappings/entry/value\"\n added = False\n for one_node in root.findall(xpath):\n vis_slug = one_node.text\n if vis_slug not in already_have:\n corpus.html_visualization_formats.add(vis_map[vis_slug])\n added = True\n\n # If we added any visualizations, save them now\n if added:\n corpus.save()", "title": "" }, { "docid": "f9b8dbc964c6dab7f81072eb1f1e3fb7", "score": "0.46805918", "text": "def analyze_entities(text_content):\n # Set connection to client as variable\n client = language_v1.LanguageServiceClient()\n\n # Set type_ to read PLAIN_TEXT\n type_ = language_v1.Document.Type.PLAIN_TEXT\n\n # specify language & set document variable\n '''\n https://cloud.google.com/natural-language/docs/languages\n if language is not set, it will be detected.\n '''\n lang = \"en\"\n document = {\"content\": text_content, \"type_\": type_, \"language\": lang}\n\n # Set Encoding type to UTF8\n encoding_type = language_v1.EncodingType.UTF8\n\n # Pass in client request with defined specifications\n response = client.analyze_entities(request = {'document': document, 'encoding_type': encoding_type})\n\n # Loop through entitites returned from the API\n for entity in response.entities:\n print(u\"Representative name for the entity: {}\".format(entity.name))\n\n # Get entity type (PERSON, LOCATION, ADDRESS, NUMBER, etc)\n print(u\"Entity type: {}\".format(language_v1.Entity.Type(entity.type_).name))\n\n # Get salience score in [0, 1.0] range\n print(u\"Salience score: {}\".format(entity.salience))\n\n \n # Loop over the metadata associated with entity. \n '''\n Many known entities have a wiki (wikipedia_url) and Knowledge Graph MID (mid)/\n (Knowledge Graph is used by google to show widgets of condensed info from multiple sources)\n\n Some entity types may also have additional metadata\n e.g. ADDRESS entities may have metadata for the address street_name, postal_code, etc\n ''' \n for metadata_name, metadata_value in entity.metadata.items():\n print(u\"{}: {}\".format(metadata_name, metadata_value))\n \n # Loop over the mentions of entity from input document.\n '''\n API also supports proper noun mentions.\n # of appearances in given text will affect an entity's salience score.\n '''\n for mention in entity.mentions:\n print(u\"Mention text: {}\".format(mention.text.content))\n\n # Get mention type, e.g. PROPER for proper noun\n print(\n u\"Mention type: {}\".format(language_v1.EntityMention.Type(mention.type_).name)\n )\n\n # outputs the language, useful if language needs to be automatically detected.\n print(u\"Language of the text: {}\".format(response.language))", "title": "" }, { "docid": "bc98f23142f5966cda19dd9b4b5c74ca", "score": "0.46783927", "text": "def showcase():\n\tfrom PIL import Image\n\tfrom PIL import ImageFont\n\tfrom PIL import ImageDraw\n\n\t# Optional: Varied loading process for showcases, when not done at the end of training\n\t# directory = \"results/dirname\"\n\t# checkpoint_path = directory + \"/50000.pth\"\n\t# checkpoint = torch.load(checkpoint_path)\n\t# epoch = checkpoint['epoch']\n\t\"\"\"\n\tfrom collections import OrderedDict\n\tnew_state_dict = OrderedDict()\n\tfor k, v in checkpoint['state_dict'].items():\n\t\t# name = k[7:] # remove `module.`\n\t\tname = k.replace(\".module\", \"\") # removing ‘.moldule’ from key\n\t\tnew_state_dict[name] = v\n\t# load params\n\tmodel.load_state_dict(new_state_dict)\n\n\toptimizer.load_state_dict(checkpoint['optimizer'])\n\tprint(\"Loaded checkpoint '{}' (epoch {})\".format(checkpoint_path, checkpoint['epoch']))\n\t\"\"\"\n\tos.makedirs(directory + \"/showcase\", exist_ok=True)\n\n\tglobal decoder_nat_loss, decoder_syn_loss, KLD_syn_loss, KLD_nat_loss, regressor_nat, regressor_syn\n\n\tactual_showcase(False, False)\n\treset_loss_sums()\n\tactual_showcase(True, False)\n\treset_loss_sums()\n\tactual_showcase(False, True)\n\treset_loss_sums()\n\tactual_showcase(True, True)", "title": "" }, { "docid": "f8fc840b87112fb50c87543c7bc75ec0", "score": "0.4677465", "text": "def read_document(self):\n words = self.word_runner()\n word = \"press space to start\"\n orp_ind = 13\n try:\n while True:\n time.sleep(60 / self.wpm)\n\n if self.is_reading:\n word = next(words)\n orp_ind = int(self.orp_index(word))\n\n yield (word, orp_ind)\n except StopIteration:\n pass\n finally:\n del words", "title": "" }, { "docid": "1e71b5860b51c1b20ff775ff4753becb", "score": "0.46771982", "text": "def deep_dream_of_extreme_control(FLAGS,model,input_images=[],num_iterations=10,step_size=0.1):\n if len(input_images) == 0:\n # use predefined images\n img_dir='/esat/opal/kkelchte/docker_home/pilot_data/visualization_images'\n input_images=sorted([img_dir+'/'+f for f in os.listdir(img_dir)])\n\n print(\"[tools.py]: extracting deep dream maps of {0} in {1}\".format([os.path.basename(i) for i in input_images], os.path.dirname(input_images[0])))\n \n # experts=np.asarray([[k]*(FLAGS.action_quantity if FLAGS.discrete else 1) for v in sorted(model.factor_offsets.values()) for k in model.factor_offsets.keys() if model.factor_offsets[k]==v]).flatten()\n\n inputs = load_images(input_images, model.input_size[1:])\n \n # collect gradients for output endpoint of evaluation model\n grads={}\n with tf.device('/cpu:0'):\n output_tensor = model.endpoints['eval']['outputs']\n for i in range(output_tensor.shape[1].value):\n layer_loss = output_tensor[:,i]\n gradients = tf.gradients(layer_loss, model.inputs)[0]\n gradients /= (tf.sqrt(tf.reduce_mean(tf.square(gradients))) + 1e-5)\n grads[output_tensor.name+'_'+str(i)]=gradients\n\n\n # apply gradient ascent for all outputs and each input image\n # if number of outputs ==1 apply gradient descent for contrast\n if len(grads.keys())== 1:\n opposite_results={}\n else:\n opposite_results=None\n\n import copy\n results = {}\n for gk in grads.keys(): \n results[gk]=copy.deepcopy(inputs)\n if isinstance(opposite_results,dict): opposite_results[gk]=copy.deepcopy(inputs)\n\n for step in range(num_iterations):\n if step%10==0: print \"{0} step: {1}\".format(time.ctime(), step)\n for i,gk in enumerate(sorted(grads.keys())):\n results[gk] += step_size * model.sess.run(grads[gk], {model.inputs: results[gk]})\n if isinstance(opposite_results,dict):\n opposite_results[gk] -= step_size * model.sess.run(grads[gk], {model.inputs: opposite_results[gk]})\n\n # Normalize results within 0:1 range\n clean_results={}\n for gk in results.keys():\n clean_results[gk]=[]\n for i in range(results[gk].shape[0]):\n clean_results[gk].append(deprocess_image(results[gk][i], one_channel=True))\n # results[gk][i]=deprocess_image(results[gk][i], one_channel=True)\n if isinstance(opposite_results,dict):\n opposite_results[gk][i]=deprocess_image(opposite_results[gk][i])\n\n # combine adjust input images in one overview image\n # one column for each input image\n # one row with each extreme control for separate and difference images\n num_rows=1+len(results.keys())\n fig, axes = plt.subplots(num_rows ,min(len(input_images),5),figsize=(23, 4*(len(grads.keys())+1)))\n # fig, axes = plt.subplots(num_rows ,min(len(input_images),5),figsize=(23, 4*(len(grads.keys())+1)))\n # add original images in first row\n for i in range(axes.shape[1]):\n axes[0, i].set_title(os.path.basename(input_images[i]).split('.')[0])\n axes[0, i].imshow(matplotlibprove(inputs[i]), cmap='inferno')\n axes[0, i].axis('off')\n\n # add for each filter the modified input\n row_index=1\n for gk in sorted(results.keys()):\n for i in range(axes.shape[1]):\n # print gk\n # axes[row_index, i].set_title('Grad Asc: '+gk.split('/')[1]+'/'+gk[-1]) \n axes[row_index, i].set_title('Grad Asc: '+gk)\n # axes[row_index, i].set_title(experts[row_index-1])\n\n axes[row_index, i].imshow(np.concatenate((inputs[i],np.expand_dims(clean_results[gk][i],axis=2)), axis=2), cmap='inferno')\n # axes[row_index, i].imshow(matplotlibprove(results[gk][i]), cmap='inferno')\n axes[row_index, i].axis('off')\n row_index+=1\n # In cas of continouos controls: visualize the gradient descent and difference\n # if isinstance(opposite_results,dict):\n # for gk in opposite_results.keys():\n # for i in range(axes.shape[1]):\n # # axes[row_index, i].set_title('Grad Desc: '+gk.split('/')[1]) \n # axes[row_index, i].set_title('Grad Desc: '+gk) \n # axes[row_index, i].imshow(matplotlibprove(opposite_results[gk][i]), cmap='inferno')\n # axes[row_index, i].axis('off')\n # row_index+=1\n \n # # add difference\n # for gk in opposite_results.keys():\n # for i in range(axes.shape[1]):\n # # axes[row_index, i].set_title('Diff: '+gk.split('/')[1]) \n # axes[row_index, i].set_title('Diff: '+gk) \n # axes[row_index, i].imshow(matplotlibprove(deprocess_image((opposite_results[gk][i]-results[gk][i])**2)), cmap='inferno')\n # axes[row_index, i].axis('off')\n # row_index+=1\n # else:\n # # add difference between 2 exteme actions\n # gk_left=sorted(results.keys())[0]\n # gk_right=sorted(results.keys())[-1]\n # for i in range(axes.shape[1]):\n # # axes[row_index, i].set_title('Diff : '+gk.split('/')[1]) \n # axes[row_index, i].set_title('Diff : '+gk) \n # axes[row_index, i].imshow(matplotlibprove(deprocess_image((results[gk_left][i]-results[gk_right][i])**2)), cmap='inferno')\n # axes[row_index, i].axis('off')\n # row_index+=1\n \n \n plt.savefig(FLAGS.summary_dir+FLAGS.log_tag+'/control_dream_maps.jpg',bbox_inches='tight')\n # plt.show()", "title": "" }, { "docid": "a1211ae4f48a93630006ec87da1038d6", "score": "0.4672397", "text": "def wiki_analyzer(language: str) -> None:\r\n\trunning = True\r\n\tdbase = database.WikiDatabase(f'{language}wikidb')\r\n\r\n\tsource_buffer = []\r\n\ttarget_buffer = []\r\n\tpaths_buffer = []\r\n\r\n\tlast_time = time.time()\r\n\tpaths_added = 0\r\n\r\n\ttry:\r\n\t\twhile running:\r\n\t\t\r\n\t\t\tsources, targets, all_paths = analyze_path(dbase)\r\n\t\t\tsource_buffer.extend(sources)\r\n\t\t\ttarget_buffer.extend(targets)\r\n\t\t\tpaths_buffer.extend(all_paths)\r\n\t\t\tpaths_added += 2\r\n\r\n\t\t\tif len(source_buffer) >= BUFFER_SIZE:\r\n\t\t\t\tdbase.dump_statistics(source_buffer, target_buffer, paths_buffer)\r\n\t\t\t\tsource_buffer = []\r\n\t\t\t\ttarget_buffer = []\r\n\t\t\t\tpaths_buffer = []\r\n\r\n\t\t\tif select.select([sys.stdin], [], [], 0.0)[0]:\r\n\t\t\t\tusr_input = input()\r\n\t\t\t\tif usr_input.lower() == 'q' or usr_input.lower() == 'quit':\r\n\t\t\t\t\trunning = False\r\n\t\t\t\t\tif source_buffer:\r\n\t\t\t\t\t\tdbase.dump_statistics(source_buffer, target_buffer, paths_buffer)\r\n\r\n\t\t\tif time.time() - last_time > 15:\r\n\t\t\t\td_t = time.time() - last_time\r\n\t\t\t\tpaths_per_min = round(paths_added/d_t * 60)\r\n\t\t\t\tprint(f\"\\r{paths_per_min} paths / min \", end='')\r\n\t\t\t\tpaths_added = 0\r\n\t\t\t\tlast_time = time.time()\r\n\tfinally:\r\n\t\tif running: \r\n\t\t\tdbase.dump_statistics(source_buffer, target_buffer, paths_buffer)", "title": "" }, { "docid": "1d6e331c27062a0f9d40ad67ab7987c2", "score": "0.4666665", "text": "def run_demo():\n while True:\n embeddings = beer_emb.embed_doc(input(\"Test if words are beer-related: \"),\n word_filter=False)\n for word_vec in embeddings:\n print(is_beer_related(word_vec))", "title": "" }, { "docid": "f434f6961f529bac8a8952d09c4673b0", "score": "0.46660876", "text": "def Run_Extraction(self):\n\n # print the summary of the model\n print(self.ww_model.model.summary(), end=\"\\n\\n\", flush=True)\n # open an audio data stream\n self.stream = self.p.open(format=self.format, channels=self.channels,\n rate=self.rate, input=True,\n frames_per_buffer=self.chunk)\n\n act_count = 0\n\n while True:\n\n # reads chunk of audio\n data = self.stream.read(self.chunk)\n\n # appends chunk to frame list\n self.frames.append(data)\n\n # begins making predictions after the first\n # 2.5 seconds of audio is read\n if (len(self.frames) > 19):\n\n prediction = self.Prediction()\n\n # if the predictions is larger than the defined confidence\n if (prediction > self.confidence):\n\n # increment the activation counter\n act_count += 1\n\n # if the number of consecutive activations\n # exceeds the activation value\n if(act_count >= self.activations):\n\n # print out \"nimbus\"\n print(\" << nimbus >> \", end=\" \", flush=True)\n\n # reset activation count\n act_count = 0\n\n self.False_Activation()\n\n self.frames = self.frames[18:]\n\n if (self.false_counts >= self.false_count):\n self.Retrain_Model()\n\n # if prediction falls below the confidence level\n else:\n\n # reset the activation count\n act_count = 0\n\n if not(self.print_pred):\n # output nothing to the stream\n print(\"-\", end=\"\", flush=True)\n\n # window the data stream\n self.frames = self.frames[1:]", "title": "" }, { "docid": "cb03b00deff42713f9827aa01b7eb32e", "score": "0.46658754", "text": "def main(args):\n corpus = read_corpus(args, verbose=True)\n for k in sorted(corpus, key=educe.stac.id_to_path):\n doc = corpus[k]\n print(\"========== %s ============\" % k)\n print()\n if args.edges:\n dialogues = sorted_first_widest(filter(is_dialogue, doc.units))\n if dialogues:\n d_first = dialogues[0]\n print(annotate_doc(doc, span=d_first.text_span()))\n if len(dialogues) > 1:\n d_last = dialogues[-1]\n txt = annotate_doc(doc, span=d_last.text_span())\n print(\"...\\n\")\n print(txt.encode('utf-8'))\n else:\n print(annotate_doc(doc).encode('utf-8'))\n print()", "title": "" } ]
8e011d0b414fc36e0eedeb9448e99c0d
returns a dict with some joint info
[ { "docid": "8972d83bad2dffd5c842b188e5eca22d", "score": "0.7336723", "text": "def _get_joint_info(self, body_id, joint_id):\n # todo: make joint_info a class so we don't have to memorise the keys\n info = self._p.getJointInfo(body_id, joint_id)\n joint_info = {\n 'id': info[0],\n 'link_name': info[12].decode(\"utf-8\"),\n 'joint_name': info[1].decode(\"utf-8\"),\n 'type': self.JOINT_TYPES[info[2]],\n 'friction': info[7],\n 'lower_limit': info[8],\n 'upper limit': info[9],\n 'max_force': info[10],\n 'max_velocity': info[11],\n 'joint_axis': info[13],\n 'parent_pos': info[14],\n 'parent_orn': info[15]\n }\n return joint_info", "title": "" } ]
[ { "docid": "d062645d5feee624eb9b8bbb5e39aa28", "score": "0.7694279", "text": "def joint_dict(self):\n self.check_joint_names()\n return dict(zip(self.joint_names, self.joint_values))", "title": "" }, { "docid": "711b5a3870f4e152646742d8714a6112", "score": "0.6767711", "text": "def _get_joint_positions(self) -> dict:\n return self.joint_positions", "title": "" }, { "docid": "9453c6c77ce8e2672c0a22d7ff2958d7", "score": "0.6716907", "text": "def get_joints(id):\n # initialize the dictionaries:\n r={}\n p={}\n real_w = {}\n for key in handler.keys():\n r[key] = skel_cap.get_joint_position(id,handler[key])# -> [str,str,str,float]\n # Convert to projective\n p[key] = convert2projective(r[key])\n # Convert the data in the original dictonary to format\n real_w[key] = [ float(r[key].point[0]),float(r[key].point[1]),\n float(r[key].point[2]),r[key].confidence]\n # confidences:\n return p, real_w", "title": "" }, { "docid": "3617ab53669c02df20184b825c6ea366", "score": "0.6657556", "text": "def joints(self):\n if self.floating_platform is None: return {}\n joints_dict = {}\n for member in self.floating_platform[\"joints\"]:\n if \"cylindrical\" in member and member[\"cylindrical\"]:\n r = member[\"location\"][0]\n theta = member[\"location\"][1]\n x = r * cos(theta)\n y = r * sin(theta)\n z = member[\"location\"][2]\n joints_dict[member[\"name\"]] = np.array([x, y, z])\n else:\n joints_dict[member[\"name\"]] = np.array(member[\"location\"])\n for member in self.floating_platform[\"members\"]:\n joint1 = joints_dict[member[\"joint1\"]]\n joint2 = joints_dict[member[\"joint2\"]]\n direction = joint2 - joint1\n if \"axial_joints\" in member:\n for axial_joint in member[\"axial_joints\"]:\n grid = axial_joint[\"grid\"]\n axial_cartesian = joint1 + direction * grid\n joints_dict[axial_joint[\"name\"]] = axial_cartesian\n return joints_dict", "title": "" }, { "docid": "d162a8e7f986f9e8b56800168169adf9", "score": "0.6567416", "text": "def get_params(self):\n return {'joints_actor': dict()}", "title": "" }, { "docid": "185005d67656e0bf3ccc680b1d239ebe", "score": "0.64702004", "text": "def _collect_information(self):\n for i in range(self.num_joints):\n joint = Joint(\n bc=self.bc,\n body_id=self.body_id,\n joint_index=i,\n max_force=self.max_force,\n max_velocity=self.max_velocity\n )\n self.joint_list.append(joint)\n self.joint_dict[joint.name] = joint\n if joint.controllable and not joint.name.startswith('jointfix') \\\n and not joint.name.startswith('ignore'):\n self.motor_list.append(joint)\n self.motor_dict[joint.name] = joint\n else:\n joint.disable_motor()\n link = Link(\n bc=self.bc,\n name=joint.child_link_name,\n body_id=self.body_id,\n link_index=i\n )\n self.link_list.append(link)\n self.link_dict[link.name] = link\n\n # if link matches the name of the whole robot, then take it as root\n if joint.child_link_name == self.robot_name:\n self.root_link = link\n\n if i == 0 and self.root_link is None:\n link = Link(\n bc=self.bc,\n name=self.robot_name.encode('utf-8'),\n body_id=self.body_id,\n link_index=-1\n )\n print('Root Link:', link.name) if self.debug else None\n self.root_link = link\n self.link_list.append(link)\n self.link_dict[link.name] = link", "title": "" }, { "docid": "e960a7c3528dfff58a462b8eda2881f3", "score": "0.62016517", "text": "def type_dict(self):\n self.check_joint_names()\n return dict(zip(self.joint_names, self.joint_types))", "title": "" }, { "docid": "164a5edc86f6c0c2b9078a545cf8620d", "score": "0.60796785", "text": "def join_info(self):\n return self._join_info", "title": "" }, { "docid": "74d76037a88835f1086953621d354a51", "score": "0.59992784", "text": "def get_dict(self):\n _dict = {\n \"linked\": self._linked,\n \"actor_id\": self._actor_id,\n \"link_type\": self._link_type,\n \"linked_activity\": self.linked_activity.get_dict(),\n \"link_weight\": self._link_weight,\n }\n if self._extra is not None:\n _dict[\"extra\"] = self._extra\n return _dict", "title": "" }, { "docid": "6d6fd251251df7a282aca6890bf97377", "score": "0.59644425", "text": "def _joints(self):\n raise NotImplementedError", "title": "" }, { "docid": "7582cfa11ad2b8802650bbc5f32a5954", "score": "0.58877295", "text": "def convert_info(self):\n subject_id= \"subject/{}\".format(self.subject_id)\n subject_name = self.subject_name\n subject_dict ={\n \"id\" :subject_id,\n \"nodeName\": subject_name,\n \"type\": \"subject\"\n }\n\n return subject_dict", "title": "" }, { "docid": "ebf87994ec33aa3784a585bae4c38bc3", "score": "0.58164215", "text": "def __dict__(self):\n return {\n \"join\": self.join, \"spectate\": self.spectate, \"match\": self.match\n }", "title": "" }, { "docid": "c850eb189582904fa959c1c2b566b0ff", "score": "0.58135253", "text": "def joint_values(self):\n return self._joint_values", "title": "" }, { "docid": "aa03363b850c7feda750c450dc817a40", "score": "0.5809639", "text": "def joint_names(self):\n return self._joint_names[self.name]", "title": "" }, { "docid": "8bbe4f0f8379dcbcf38c43167191872d", "score": "0.57585543", "text": "def get_joint_arrays(id):\n # initialize the dictionaries:\n r={}\n p={}\n real_w = {}\n real_list = []\n proj_list = []\n conf_list =[]\n for key in handler.keys():\n r[key] = skel_cap.get_joint_position(id,handler[key])# -> [str,str,str,float]\n # Convert to projective\n p[key] = convert2projective(r[key])\n # Convert the data in the original dictonary to format\n real_w[key] = [ float(r[key].point[0]),float(r[key].point[1]),\n float(r[key].point[2])]#,r[key].confidence]\n #convert to list\n conf_list.append(r[key].confidence)\n proj_list.append(p[key])\n real_list.append(real_w[key])\n # convert to array\n confidences = (np.array(conf_list)).reshape(15,1)\n proj_coords = (np.array(proj_list)).reshape(15,3)\n real_coords = (np.array(real_list)).reshape(15,3)\n return proj_coords, real_coords, confidences", "title": "" }, { "docid": "c364bc4a80fc86465c9f3b39f195e0de", "score": "0.57168806", "text": "def info(self):\n info = None\n\n if self.is_relation:\n pairs = self._property.local_remote_pairs\n if len(pairs):\n for pair in reversed(pairs):\n for col in pair:\n if col.table in self._property.parent.tables and not col.primary_key:\n return getattr(col, 'info', None)\n elif col.table in self._property.mapper.tables:\n if col.primary_key:\n if self._property.direction == MANYTOMANY:\n return getattr(col, 'info', None)\n else:\n parent_info = getattr(col, 'info', {})\n info = {}\n for k, v in parent_info.items():\n if k.startswith('backref_'):\n info[k[8:]] = v\n return info\n else:\n try:\n col = getattr(self.model.__table__.c, self.key)\n except AttributeError:\n return {}\n else:\n return getattr(col, 'info', None)\n return {}", "title": "" }, { "docid": "2284e7e650736db7d2d846565a97849b", "score": "0.5706495", "text": "def determine_joint_names(self):\n self.joint_names = joint_names(self.prefix)", "title": "" }, { "docid": "6603563f3cc8cbf32480f6268ac52d21", "score": "0.56856775", "text": "def get_joint_values(self):\n return self.group.get_current_joint_values()", "title": "" }, { "docid": "74bcd4be1ff7f1eebe1e8e813d4eff60", "score": "0.5661835", "text": "def joint_names(self):\n return self.arm.joint_names()", "title": "" }, { "docid": "422da9e97ba4553ecee07573b274664e", "score": "0.56460065", "text": "def Joints(self):\r\n self.link.check_connection()\r\n command = 'G_Thetas'\r\n self.link.send_line(command)\r\n self.link.send_item(self)\r\n joints = self.link.rec_array()\r\n self.link.check_status()\r\n return joints", "title": "" }, { "docid": "555b1d76c90032faf0457132e115a125", "score": "0.5609154", "text": "def joint_names(self):\n return self._joint_names", "title": "" }, { "docid": "555b1d76c90032faf0457132e115a125", "score": "0.5609154", "text": "def joint_names(self):\n return self._joint_names", "title": "" }, { "docid": "9a99b5ba3cc85cf93b2503acfd5dbb8d", "score": "0.5587697", "text": "def get_protein_dict(self):\n protein_dict = {\"Protein_name\": [self.name], \"CAZy_family\": [self.family]}\n\n if len(self.ec) == 0:\n protein_dict[\"EC#\"] = [np.nan]\n elif len(self.ec) == 1:\n protein_dict[\"EC#\"] = self.ec\n else:\n ec_string = \"\\n\".join(self.ec)\n protein_dict[\"EC#\"] = [ec_string]\n\n protein_dict[\"Source_organism\"] = [self.source]\n\n if type(self.links) is dict:\n for database in [\"GenBank\", \"UniProt\", \"PDB/3D\"]:\n try:\n if len(self.links[database]) == 1:\n protein_dict[database] = self.links[database]\n else:\n accession_string = \",\\n\".join(self.links[database])\n protein_dict[database] = [accession_string]\n except KeyError:\n protein_dict[database] = [np.nan]\n else:\n for database in [\"GenBank\", \"UniProt\", \"PDB/3D\"]:\n protein_dict[database] = [np.nan]\n return protein_dict", "title": "" }, { "docid": "79c27084874bf203883c36472a5f414a", "score": "0.55684775", "text": "def relationship_dicts() -> dict:\n parent = {\n 'VariationParent': {\n 'Identifiers': {\n 'MarketplaceASIN': {\n 'ASIN': {\n 'value': 'TickleStick'\n }\n }\n }\n }\n\n }\n\n children = {\n 'VariationChildren': [{\n 'Identifiers': {\n 'MarketplaceASIN': {\n 'ASIN': {\n 'value': 'TickleStick'\n }\n }\n }\n },\n {\n 'Identifiers': {\n 'MarketplaceASIN': {\n 'ASIN': {\n 'value': 'Geni'\n }\n }\n }\n }]\n\n }\n\n return {'parent': parent, 'children': children}", "title": "" }, { "docid": "01166a642a57859af8b80d3a9f9a82ad", "score": "0.5558503", "text": "def joint_names(self) -> List[str]:\n return [jnt.name for jnt in self.urdf.joints]", "title": "" }, { "docid": "1d3397d8b970d055867e3888fffbeaed", "score": "0.5551889", "text": "def get_info(self) -> _InfoDict:", "title": "" }, { "docid": "1d3397d8b970d055867e3888fffbeaed", "score": "0.5551889", "text": "def get_info(self) -> _InfoDict:", "title": "" }, { "docid": "1d3397d8b970d055867e3888fffbeaed", "score": "0.5551889", "text": "def get_info(self) -> _InfoDict:", "title": "" }, { "docid": "23d734b780a86aa41084bf906b315da8", "score": "0.55495477", "text": "def get_attributes(self):\n retdict = {}\n if self.name:\n retdict['name'] = self.name\n if self.rule:\n retdict['rule'] = self.rule\n retdict['id'] = str(self.id)\n retdict['junction'] = str(self.road_type)\n retdict['length'] = str(self.planview.get_total_length())\n return retdict", "title": "" }, { "docid": "d88c62900d967d80d222acf24ac36f2d", "score": "0.5528877", "text": "def get_organism_dictionary():\n\n \"\"\"Assume this means dictionaries within a dictionary\"\"\"", "title": "" }, { "docid": "9b7488a67b7f25c85f651a0e11fd43eb", "score": "0.552584", "text": "def _get_info(self):\n\n return {}", "title": "" }, { "docid": "333ef79d7258a4f4a806ba0b3d9e7b69", "score": "0.5476746", "text": "def update_joints(self):\n\n self.primary_joints = {\n 'hip': self.hip,\n 'shoulder': self.shoulder,\n 'right_elbow': (self.shoulder[0] + 85, self.right_elbow_pos),\n 'left_elbow': (self.shoulder[0] - 85, self.left_elbow_pos),\n 'right_knee': (self.hip[0] * 1.25, self.hip[1] * self.right_knee_pos),\n 'left_knee': (self.hip[0] * 0.75, self.hip[1] * self.left_knee_pos),\n }\n\n\n self.secondary_joints = {\n 'right_hand': (self.primary_joints['right_elbow'][0] + 20, self.primary_joints['right_elbow'][1] + self.right_hand_pos),\n 'left_hand': (self.primary_joints['left_elbow'][0] - 20, self.primary_joints['left_elbow'][1] + self.left_hand_pos),\n 'right_foot': (self.primary_joints['right_knee'][0] + 10, self.primary_joints['right_knee'][1] + self.right_foot_pos),\n 'left_foot': (self.primary_joints['left_knee'][0] - 10, self.primary_joints['left_knee'][1] + self.left_foot_pos),\n }", "title": "" }, { "docid": "b96c7bb98c9691b667aaf4f75cbdfd3e", "score": "0.5447122", "text": "def get_details(self):\n detail_dict = {\"platform\": self.platform,\n \"src\": self.src,\n \"channel\": self.channel,\n \"genre\": self.genre,\n \"type\": self.type,\n \"id\": self.id}\n return detail_dict", "title": "" }, { "docid": "0741286de05bf159a051cf6eae83ae59", "score": "0.54389894", "text": "def build_relationshipdict(self, white=True):\n rels = get_initial_queryset(\"Journal\").relationships().written_by(self.obj)\n if white:\n rels = rels.white()\n else:\n rels = rels.black()\n relsdict = {}\n for rel in rels:\n if rel.db_receivers_objects.all():\n name = rel.db_receivers_objects.all()[0].key.lower()\n relslist = relsdict.get(name, [])\n relslist.append(rel)\n relsdict[name] = relslist\n if white:\n self._white_relationships = relsdict\n else:\n self._black_relationships = relsdict\n return relsdict", "title": "" }, { "docid": "3ffd040974ee03fe2393fac1bb079099", "score": "0.5438322", "text": "def _read_joint_data(self, jdata):\r\n for joint in jdata.get_Joint():\r\n jdata = JointData(self.componentsdict[joint.get_AssembledComponentInstanceID()],\r\n [self.componentsdict[constrainedtocomp.get_ComponentInstanceID()] for constrainedtocomp in\r\n joint.get_ConstrainedToComponents().get_ConstrainedToComponent()],\r\n joint.get_Type())\r\n jdata.location = Vec3(\r\n [float(x) for x in joint.get_GlobalCoordinateSystem().get_Location().get_ArrayValue().split(\";\")])\r\n jdata.orientation = Vec3(\r\n [float(x) for x in joint.get_GlobalCoordinateSystem().get_Orientation().get_ArrayValue().split(\";\")])\r\n jdata.id = joint.get_ID()\r\n self.joints.append(jdata)", "title": "" }, { "docid": "69e439fd21fb96a89f6767d31e514dc4", "score": "0.5434439", "text": "def as_linkage(self):\n return {'data': {'type': self.type_, 'id': self.id_}}", "title": "" }, { "docid": "8854b6a2e28dc2239ef6c89215789e28", "score": "0.5419667", "text": "async def get_dict(self) -> dict:\n return {\n \"booster\": self.booster.id,\n \"target\": self.target.id\n }", "title": "" }, { "docid": "dc51fa9c378a7f565542675c4b800afc", "score": "0.54082257", "text": "def joint_community(self):\n return self._joint_community", "title": "" }, { "docid": "e622cd8cf777aee1ac6f8e8b3162b1dc", "score": "0.53909075", "text": "def to_joint_trajectory_msg(self):\n joint_trajectory_msg = trajectory_msg.JointTrajectory()\n\n joint_trajectory_msg.joint_names = [joint.name for joint in self.joints]\n\n timestamps = self.get_unique_timestamps()\n for timestamp in timestamps:\n joint_trajectory_point = trajectory_msg.JointTrajectoryPoint()\n joint_trajectory_point.time_from_start = rospy.Duration.from_sec(timestamp)\n\n for joint in self.joints:\n interpolated_setpoint = joint.get_interpolated_setpoint(timestamp)\n if interpolated_setpoint.time != timestamp:\n rospy.logwarn('Time mismatch in joint {jn} at timestamp {ts}, '\n 'got time {ti}'.format(jn=joint.name, ts=timestamp, ti=interpolated_setpoint.time))\n\n joint_trajectory_point.positions.append(interpolated_setpoint.position)\n joint_trajectory_point.velocities.append(interpolated_setpoint.velocity)\n\n joint_trajectory_msg.points.append(joint_trajectory_point)\n\n return joint_trajectory_msg", "title": "" }, { "docid": "66846692a88a4e49bb8b536def65a373", "score": "0.53876406", "text": "def joint(*args, **kwargs):\n pass", "title": "" }, { "docid": "fe870ca61b0dc04e2448539db7fb5039", "score": "0.5379347", "text": "def to_dict(self):\n to_dict = dict()\n to_dict['id'] = self.id\n if self.disqualified:\n to_dict['disqualified'] = self.disqualified\n to_dict['summit'] = self.summit.id\n to_dict['saddle'] = self.saddle.id\n return to_dict", "title": "" }, { "docid": "62756e38bc589f3396ae11139fa3cca0", "score": "0.53661394", "text": "def to_dict(self) -> Dict[str, Any]:\n return {\n \"satisfied\": self.satisfied,\n \"satisfied_at\": self.satisfied_at,\n \"annotation\": self.annotation.to_dict(),\n \"children\": [c.to_dict() for c in self.children],\n }", "title": "" }, { "docid": "80bd23b32f6f567dc218861795c06a1b", "score": "0.5366131", "text": "def serialize(self):\n error, resp = RelationshipStatus().get_most_recent(self.id_relacion)\n return {\n 'id': self.id_relacion,\n 'usuario': self.usuario.correo_electronico,\n 'amigo': self.usuario_amigo.correo_electronico,\n 'estado': resp.estado_solicitud.estado_solicitud\n }", "title": "" }, { "docid": "180ec04ff95986b4aadab99032a23731", "score": "0.536113", "text": "def _to_dict(self):\n _dict = {}\n if hasattr(self, 'translation') and self.translation is not None:\n _dict['translation'] = self.translation\n if hasattr(self, 'part_of_speech') and self.part_of_speech is not None:\n _dict['part_of_speech'] = self.part_of_speech\n return _dict", "title": "" }, { "docid": "eeeaa94a3cbfe481786809b7bf9f6971", "score": "0.5354963", "text": "def to_dict(self):\n\n info_dict = {'p1': self.p1, 'p2': self.p2,\n 'surface': self.surface.name,\n 'tournament_name': self.tournament_name,\n 'date': self.date}\n\n # Flatten additional info if it exists\n if self.additional_info is not None:\n info_dict.update(flatten_nested_dict(self.additional_info, ''))\n\n if self.tournament_round is not None:\n info_dict['round'] = self.tournament_round.name\n else:\n info_dict['round'] = None\n\n return info_dict", "title": "" }, { "docid": "7b8b5b1a21b4625d18172b7c7dd58124", "score": "0.53501695", "text": "def get_info(self, observation):\n info = {}\n return info", "title": "" }, { "docid": "2625b54efc021486b16108dcc3a7871d", "score": "0.53472024", "text": "def __getitem__(self, joint_id):\n return self._joints[joint_id]", "title": "" }, { "docid": "bb9e51f481a0c1fdf185d5ba00436f6f", "score": "0.53373337", "text": "def __iter__(self):\n return self._joints.itervalues()", "title": "" }, { "docid": "0be0acf4d25270f2a774ba831066743a", "score": "0.5335678", "text": "def GetJoin(self):", "title": "" }, { "docid": "eed4fe1279a8a109a279efaa60af282b", "score": "0.5335058", "text": "def get_info(self):\n return {}", "title": "" }, { "docid": "c96437a799aeb6b76c44f51a3a245242", "score": "0.5334832", "text": "def serialize(self):\n return {\n 'pk' : self.pk,\n 'tax_id': self.tax_id,\n # This is an example how to deal with Many2Many relations\n 'description' : self.description,\n 'Org_name':self.Org_name\n }", "title": "" }, { "docid": "137341abce9bd73f2f5957f971b9cf5f", "score": "0.5334062", "text": "def get_info(self):\n return {\n 'name': self.name,\n 'actions': self.actions,\n 'gamma': self.gamma,\n 'gravity': self.gravity,\n 'masscart': self.masscart,\n 'masspole': self.masspole,\n 'total_mass': self.total_mass,\n 'length': self.length,\n 'polemass_length': self.polemass_length,\n 'force_mag': self.force_mag,\n 'tau': self.tau,\n 'kinematics_integrator': self.kinematics_integrator,\n 'theta_threshold_radians': self.theta_threshold_radians,\n 'x_threshold': self.x_threshold\n }", "title": "" }, { "docid": "30dae037fcc072ed4af41197d5ef0c49", "score": "0.53264934", "text": "def _prepare_info_dict(self, joint_agent_action_info, mdp_infos):\n # Get the agent action info, that could contain info about action probs, or other\n # custom user defined information\n env_info = {\n \"agent_infos\": [\n joint_agent_action_info[agent_idx]\n for agent_idx in range(self.mdp.num_players)\n ]\n }\n # TODO: This can be further simplified by having all the mdp_infos copied over to the env_infos automatically\n env_info[\"sparse_r_by_agent\"] = mdp_infos[\"sparse_reward_by_agent\"]\n env_info[\"shaped_r_by_agent\"] = mdp_infos[\"shaped_reward_by_agent\"]\n env_info[\"phi_s\"] = (\n mdp_infos[\"phi_s\"] if \"phi_s\" in mdp_infos else None\n )\n env_info[\"phi_s_prime\"] = (\n mdp_infos[\"phi_s_prime\"] if \"phi_s_prime\" in mdp_infos else None\n )\n return env_info", "title": "" }, { "docid": "ea1cca6174d417a90e294ca23d456d6d", "score": "0.53198475", "text": "def _create_info_dictionary(self, obs):\n return {}", "title": "" }, { "docid": "cdf56b2892cf33d8bfb071bda2a4c9ff", "score": "0.53118724", "text": "def _dictme(self):\r\n\r\n return {\"name\": self._name,\r\n \"stats\": self._stats,\r\n \"lvl\": self._lvl,\r\n \"race\": str(self._race),\r\n \"char_class\": str(self._chclass),\r\n \"background\": self._background,\r\n \"profs\": list(self._profs.keys())\r\n }", "title": "" }, { "docid": "59e600d4e5c6e37364919daa4b63313b", "score": "0.5302283", "text": "def jointgroup(self):\n return util.buf_to_npy(self._ptr.contents.jointgroup, (6,))", "title": "" }, { "docid": "3383d182b5f63bfa92512bd39f5768b8", "score": "0.52996635", "text": "def getJoinDict(url,referenceSetId=0):\n\tdef defaultList():\n\t\treturn defaultdict(list)\n\tjoinDict=defaultdict(defaultList)\n\turl+='/joins/search'\n\treq={\n \"length\": None, \n \"pageSize\": 100, \n \"pageToken\": '0', \n \"referenceSetId\": str(referenceSetId), \n \"sequenceId\": '', \n \"start\": None, \n \"strand\": None, \n \"variantSetId\": ''\n\t}\n\tnextPageToken=True\n\theader={'Content-Type':'application/json'}\n\twhile nextPageToken:\n\t\tres=requests.post(url,data=json.dumps(req),headers=header)\n\t\tthePage=json.loads(res.text)\n\t\tnextPageToken=thePage['nextPageToken']\n\t\treq['pageToken']=nextPageToken\n\t\tjoins=thePage['joins']\n\t\tfor join in joins:\n\t\t\tside1=join['side1']\n\t\t\tside2=join['side2']\n\t\t\tbase1=side1['base']\n\t\t\tbase2=side2['base']\n\t\t\tseq1=base1['sequenceId']\n\t\t\tseq2=base2['sequenceId']\n\t\t\tpos1=int(base1['position'])\n\t\t\tpos2=int(base2['position'])\n\t\t\tstrand1=side1['strand']\n\t\t\tstrand2=side2['strand']\n\t\t\tjoinDict[seq1][(pos1,strand1)].append((seq2,pos2,strand2))\n\t\t\tjoinDict[seq2][(pos2,strand2)].append((seq1,pos1,strand1))\n\treturn joinDict", "title": "" }, { "docid": "11a1ab9901e6f822f89f4ea6d81d182e", "score": "0.52772593", "text": "def get_dictionary(self):\n settlement_dict = {}\n settlement_dict[\"road_id\"] = self.id\n settlement_dict[\"road_row\"] = self.location[0]\n settlement_dict[\"road_column\"] = self.location[1]\n settlement_dict[\"road_color\"] = self.color\n if self.ownedBy is not None:\n settlement_dict[\"road_ownedBy\"] = self.ownedBy.id\n if self.attached_settlement is not None:\n settlement_dict[\"attached_settlement\"] = self.attached_settlement.id\n return settlement_dict", "title": "" }, { "docid": "877c5a7bcef4b374b3b83b6d23344987", "score": "0.52753085", "text": "def final_position(self):\n return {joint.name: joint.setpoints[-1].position for joint in self.joints}", "title": "" }, { "docid": "c4a448f6964950f9f7fe7d4b4d0af700", "score": "0.5273507", "text": "def _to_dict(self):\n _dict = {}\n if hasattr(self, 'external_id') and self.external_id is not None:\n _dict['externalId'] = self.external_id\n if hasattr(self, 'document_id') and self.document_id is not None:\n _dict['documentId'] = self.document_id\n if hasattr(self, 'parent_document_id') and self.parent_document_id is not None:\n _dict['parentDocumentId'] = self.parent_document_id\n if hasattr(self, 'publication_name') and self.publication_name is not None:\n _dict['publicationName'] = self.publication_name\n if hasattr(self, 'publication_date') and self.publication_date is not None:\n _dict['publicationDate'] = self.publication_date\n if hasattr(self, 'publication_url') and self.publication_url is not None:\n _dict['publicationURL'] = self.publication_url\n if hasattr(self, 'authors') and self.authors is not None:\n _dict['authors'] = self.authors\n if hasattr(self, 'title') and self.title is not None:\n _dict['title'] = self.title\n if hasattr(self, 'medline_license') and self.medline_license is not None:\n _dict['medlineLicense'] = self.medline_license\n if hasattr(self, 'href_pub_med') and self.href_pub_med is not None:\n _dict['hrefPubMed'] = self.href_pub_med\n if hasattr(self, 'pdf_url') and self.pdf_url is not None:\n _dict['pdfUrl'] = self.pdf_url\n if hasattr(self, 'reference_url') and self.reference_url is not None:\n _dict['referenceUrl'] = self.reference_url\n if hasattr(self, 'highlighted_title') and self.highlighted_title is not None:\n _dict['highlightedTitle'] = self.highlighted_title\n if hasattr(self, 'highlighted_abstract') and self.highlighted_abstract is not None:\n _dict['highlightedAbstract'] = self.highlighted_abstract\n if hasattr(self, 'highlighted_body') and self.highlighted_body is not None:\n _dict['highlightedBody'] = self.highlighted_body\n if hasattr(self, 'highlighted_sections') and self.highlighted_sections is not None:\n _dict['highlightedSections'] = ({k : v._to_dict() for k, v in\n self.highlighted_sections.items()})\n if hasattr(self, 'passages') and self.passages is not None:\n _dict['passages'] = self.passages\n if hasattr(self, 'annotations') and self.annotations is not None:\n _dict['annotations'] = {k : v._to_dict() for k, v in self.annotations.items()}\n return _dict", "title": "" }, { "docid": "465637611f9f736ae0bae5fedbe10ea2", "score": "0.526984", "text": "def get_joint_name(self):\n return self.joint_name", "title": "" }, { "docid": "6188f2df0044d5a4cc24ca8e1a020357", "score": "0.5267064", "text": "def classifier_info(self):\n return {\n 'classname': self.classname,\n 'subject': self.subject,\n 'roc': self.roc,\n 'auc': self.auc,\n 'params': json.dumps(self.params),\n }", "title": "" }, { "docid": "17a549efbcdd1c126c61b0a153d3dc9a", "score": "0.5262656", "text": "def json_data(self):\n return {\n \"type\": self.type,\n \"recruiter\": self.recruiter_id,\n \"assignment_id\": self.assignment_id,\n \"hit_id\": self.hit_id,\n \"mode\": self.mode,\n \"end_time\": self.end_time,\n \"base_pay\": self.base_pay,\n \"bonus\": self.bonus,\n \"status\": self.status,\n \"object_type\": \"Participant\",\n \"entry_information\": self.entry_information,\n }", "title": "" }, { "docid": "0a1bcf4f5689b5319c73539510f330f6", "score": "0.52610385", "text": "def _known_persons(self, home_id: str) -> dict[str, dict]:\n return {pid: p for pid, p in self.persons[home_id].items() if \"pseudo\" in p}", "title": "" }, { "docid": "291ec2704b719fa5d2a789e1908d5f14", "score": "0.52527004", "text": "def JointAccount(self):\n return super(FContactRegulatoryInfo, self).JointAccount()", "title": "" }, { "docid": "36e7ac51947e604798085b8121f1b1e5", "score": "0.52463794", "text": "def joint_array_to_dict(vel_torque_array, limb):\n\n return dict(itertools.izip(limb.joint_names(), vel_torque_array))", "title": "" }, { "docid": "4784ff9bee5067a8d2ed7a9a2db52780", "score": "0.5244284", "text": "def serialize(self):\n\t\treturn {\n\t\t\t'notice_id' : self.notice_id,\n\t\t\t'timestamp' : self.timestamp,\n\t\t\t'message' : self.message,\n\t\t\t'c_id' : self.c_id\n\t\t\t# This is an example how to deal with Many2Many relations\n\t\t#\t'many2many' : self.serialize_many2many\n\t\t}", "title": "" }, { "docid": "76632e4065eb6cf729e4027ab411c0a4", "score": "0.524409", "text": "def convert_book_info(self):\n\n book_id = \"book/{}\".format(self.books.book_id)\n book_title = self.books.title\n subjects = self.books.subjects\n print(subjects)\n book_dict ={\n \"id\" :book_id,\n \"nodeName\": book_title,\n \"type\": \"book\",\n \"subjects\": []\n }\n\n return book_dict", "title": "" }, { "docid": "408654f17dd7687231850c185f010b17", "score": "0.5231306", "text": "def qtrobot_joints_callback(self, msg):\n self.qtrobot_joint_dictionary = dict(zip(msg.name, msg.position))", "title": "" }, { "docid": "69ea2d6b02948daa38a8b27d26d2d257", "score": "0.52244484", "text": "def get_additional_info(self) -> dict:\n return {}", "title": "" }, { "docid": "03a1f4a5e018277f9927273afc5b5107", "score": "0.5223652", "text": "def get_info(self):\n info = {\n 'name': self.name,\n 'model': self.model,\n 'syringe_diameter': self.diameter,\n 'rate': self.rate\n }\n return info", "title": "" }, { "docid": "23bf576946779dc34ff7fc6faffeabd4", "score": "0.5217067", "text": "def to_dict(self):\n rep = super(Person, self).to_dict()\n rep.update({\n 'identifier': self.get('identifier', None),\n 'name': self.name,\n 'email': self.email,\n })\n return rep", "title": "" }, { "docid": "f929e67261ad6167ac05e9c7bc1905c4", "score": "0.5217003", "text": "def serialize(self):\n\t\treturn {\n\t\t\t'enrolls_id' : self.enrolls_id,\n\t\t\t'student_id' : self.student_id,\n\t\t\t'course_id' : self.course_id\n\t\t\t# This is an example how to deal with Many2Many relations\n\t\t\t#\t'many2many' : self.serialize_many2many\n\t\t}", "title": "" }, { "docid": "83f39aeb2d2e9ce694ff89d2923ac8c0", "score": "0.5211111", "text": "def _extract_info(self, infos, domain):\n ids = []\n residues = infos[domain]['residues']\n # the residues are necessarily in the same chain, so :\n chain = residues[0].get_parent().get_id()\n for residue in residues:\n number = str(residue.get_id()[1])\n if number not in ids:\n ids.append(number)\n return {'chain': chain, 'ids': ids}", "title": "" }, { "docid": "5638ea690f583d18769c933ac7a6787e", "score": "0.5210504", "text": "def joint_type(self):\n return self._joint_type", "title": "" }, { "docid": "77c283cf4889c65cb2ba9e57fb0e4255", "score": "0.5210123", "text": "def getTransformationMatrices(self):\n transformationMatrices = {}\n # TODO modify from here\n # Hint: the output should be a dictionary with joint names as keys and\n # their corresponding homogeneous transformation matrices as values.\n return transformationMatrices", "title": "" }, { "docid": "172f110a538b236726848b9e5910a6fa", "score": "0.52030563", "text": "def info(self):\n if self.name == \"\":\n return {}\n else:\n return {\"name\" : self.name}", "title": "" }, { "docid": "944b0d031eb5b090d84d288a43fd888f", "score": "0.5201102", "text": "def get_info(message):\n try:\n from_info = message.get('From', 'No sender provided')\n to_info = message.get('To', 'No recipient provided')\n date_info = message.get('Date', 'No date provided')\n subj_info = message.get('Subject', 'No subject provided')\n except:\n print('Didnt provide correct email object instance')\n raise\n return {'From':from_info, 'To':to_info ,'Date':date_info, 'Subject':subj_info }", "title": "" }, { "docid": "fd0322522c50de936c9cddec8f3effa3", "score": "0.5191566", "text": "def _to_dict(self):\n _dict = {}\n if hasattr(self, 'qualifier_id') and self.qualifier_id is not None:\n _dict['qualifier_id'] = self.qualifier_id\n if hasattr(self, 'qualifier_name') and self.qualifier_name is not None:\n _dict['qualifier_name'] = self.qualifier_name\n return _dict", "title": "" }, { "docid": "bd0b5d04fffbef9a480349cc3ae6e8b9", "score": "0.51772", "text": "def export_relationship_json(rel: Relationship) -> Dict:\n return {\"node_a\": rel.node_a.node_id, \"relationship_type\": rel.relationship_type, \"node_b\": rel.node_b.node_id,\n \"properties\": rel.properties}", "title": "" }, { "docid": "8cb66cb5ce4edafaac5658a651f776d0", "score": "0.5176936", "text": "def get_entity(self):\n if self.sender_id:\n return {'user_id': self.sender_id}\n return {'name': self.name}", "title": "" }, { "docid": "e13878f3958f0dcdf4e1f9f609f5188d", "score": "0.5172545", "text": "def get_breach_details():", "title": "" }, { "docid": "1528e1cebce18657d064dd15d1102a71", "score": "0.5170092", "text": "def getInfo(self):\n return {}", "title": "" }, { "docid": "038c5cc0433079592e5d407b5d2b02c5", "score": "0.51697385", "text": "def serialize(self):\n return {\n 'id' : self.id,\n 'sender_id' : self.sender_id,\n 'recipient_id' : self.recipient_id,\n 'sender_first_name' : self.sender_first_name,\n 'sender_last_name' : self.sender_last_name,\n 'sender_picture' : self.sender_picture,\n 'recipient_first_name' : self.recipient_first_name,\n 'recipient_last_name' : self.recipient_last_name,\n 'sent_status' : self.sent_status,\n 'message' : self.message,\n 'time_sent' : self.time_sent,\n 'conversation_id' : self.conversation_id\n \n }", "title": "" }, { "docid": "3f8a7c8bceb17dd4e3291df451b9deb2", "score": "0.5168953", "text": "def to_json_dict(self) -> dict:\r\n\r\n return {'id_time' : self.id_tine, 'id_user' : self.id_user,\r\n 'access_mode' : self.access_mode,\r\n 'attributes' : self.attributes.to_json_dict(),\r\n 'attachments' : self.attachments.to_json_dict(),\r\n 'is_finished' : self.is_finished}", "title": "" }, { "docid": "fa6ebf8bae06447ffa6ea3239ba4af26", "score": "0.51628435", "text": "def get_attributes(self):\n retdict = {}\n retdict['entityRef'] = self.target\n retdict['freespace'] = convert_bool(self.freespace)\n retdict['continuous'] = convert_bool(self.continuous)\n if self.distance:\n retdict['distance'] = str(self.distance)\n return retdict", "title": "" }, { "docid": "fde6c1f6846b9ca827eb5aeb430fab00", "score": "0.5162007", "text": "def relations(relate_soup):\n lis = []\n for r in relate_soup.find_all('tr'):\n relation = {}\n types = r.find_all('td')\n \n if len(types)==4:\n relation['name'] = types[0].text.strip()\n relation['type'] = types[1].text.strip()\n relation['year'] = types[2].text.strip()\n relation['location'] = types[3].text.strip()\n lis.append(relation)\n return lis", "title": "" }, { "docid": "bc62631ebfda8c39c1b193cab8d05744", "score": "0.5161577", "text": "def joints(self) -> npt.NDArray[np.float64]:\n return self._joints", "title": "" }, { "docid": "4930f6531a8b6972f7d10f204a6613d3", "score": "0.5157041", "text": "def _get_join_desired(self):\n return self.__join_desired", "title": "" }, { "docid": "ed5c2a7fb771688ca0d40e9b350b22c0", "score": "0.51545626", "text": "def get_details(self):\n return {}", "title": "" }, { "docid": "80c3b33460f841b67917bb39f0ac6847", "score": "0.51529557", "text": "def _to_dict(self):\n return {\n \"hookup_type\": self.hookup_type,\n \"corr_index\": self.corr_index,\n \"all_pols\": self.all_pols,\n \"redirect_part_types\": self.redirect_part_types,\n \"single_pol_labeled_parts\": self.single_pol_labeled_parts,\n \"full_connection_path\": self.full_connection_path,\n }", "title": "" }, { "docid": "33f251c444deef95f8c56e70480cdb95", "score": "0.51513135", "text": "def get_joint_index(self):\n return self.joint_index", "title": "" }, { "docid": "61f634d387369d7de08c709432ec9091", "score": "0.51467377", "text": "def get_dict(self):\n para_dict = {\n \"id\": self.id,\n \"news_id\": self.news_id,\n \"type\": self.type,\n \"title\": self.title,\n \"publish_time\": self.publish_time,\n \"content\": self.content,\n \"img_name\": self.img_name,\n \"audio_name\": self.audio_name,\n \"has_img\": self.has_img,\n \"has_audio\": self.has_audio,\n \"dangos\": self.dangos\n }\n\n return para_dict", "title": "" }, { "docid": "7523395f3c565d7ff9dd7b0af96d1f81", "score": "0.5145694", "text": "def get_infos(self):\n infos = dict()\n infos[\"dataset\"] = self.dataset_name\n infos[\"task\"] = \"harmony_sep\"\n infos[\"licenses\"] = [mdb_license]\n return infos", "title": "" }, { "docid": "ec2c0ebe6d69b174f1ad4bc536314b19", "score": "0.5141187", "text": "def dictionary(self):\n return {\n \"name\": self.name, \"agency\": self.agency, \"mission\": self.type_of_mission,\n \"year_launched\": self.year_launched}", "title": "" }, { "docid": "cd24214198d90e08997c285f8bc68664", "score": "0.5140088", "text": "def get_dict():", "title": "" }, { "docid": "aef4509c432c822f7a228247d67af651", "score": "0.51353806", "text": "def as_linkage(self):\n return {'data': None}", "title": "" }, { "docid": "6a31e9f29c1ff0de423dc8f9dc445027", "score": "0.5133731", "text": "def convert_info(self):\n\n author_dict = {\n \"id\" : \"author/{}\".format(self.author_id),\n \"nodeName\": self.name,\n \"type\": \"author\"\n }\n\n return author_dict", "title": "" }, { "docid": "2d7f55b5586cb0728c9abb6a8e63a17a", "score": "0.5127704", "text": "def get_info(self):\n ret = {}\n ret[\"isSequence\"] = self.isSequence\n ret[\"maxSequenceLength\"] = self.maxSequenceLength\n ret[\"avgSequenceLength\"] = self.avgSequenceLength\n ret[\"nAttrs\"] = self.nAttrs\n ret[\"nFeatures\"] = self.nFeatures\n ret[\"nInstances\"] = self.nInstances\n ret[\"targetType\"] = self.targetType\n ret[\"nClasses\"] = self.nClasses\n ret[\"targetClasses\"] = self.targetClasses\n ret[\"features\"] = self.features\n ret[\"target\"] = self.target\n return ret", "title": "" }, { "docid": "b79ee125065e6369b6a364f27a592dc2", "score": "0.5127179", "text": "def print_information(self) -> None:\n print('---' * 20)\n print(f' Joint name: \\t \\t{self.name}')\n print(f' Joint type: \\t \\t{self.type}')\n print(f' Joint index: \\t \\t{self.index}')\n print(f' Body index: \\t \\t{self.body_id}')\n print(f' maximum force: \\t \\t{self.max_force}')", "title": "" } ]
5d53dc63cf45f2b3db50f8910018ee32
Initialize with the column names (in a XiboEvent tuple) use in Xibo JSON responses.
[ { "docid": "4b51f78cbba9b28efcdfefb61709a950", "score": "0.58089435", "text": "def __init__(self, column_names):\n self.column_names = column_names", "title": "" } ]
[ { "docid": "b91406418d4f7587da0fc1f8a3998de4", "score": "0.5988165", "text": "def json_to_xibo_event(self, json_event):\n return XiboEvent(\n xibo_id=json_event[self.column_names.xibo_id],\n meetup_id=json_event[self.column_names.meetup_id],\n name=json_event[self.column_names.name],\n location=json_event[self.column_names.location],\n start_time=json_event[self.column_names.start_time],\n end_time=json_event[self.column_names.end_time],\n )", "title": "" }, { "docid": "9fe58cf205025650616deb803c0b3281", "score": "0.5880382", "text": "def event_to_columns(self, event):\n return {\n self.column_ids.meetup_id: event.meetup_id,\n self.column_ids.name: event.name,\n self.column_ids.location: event.location,\n self.column_ids.start_time: event.start_time,\n self.column_ids.end_time: event.end_time\n }", "title": "" }, { "docid": "6fa46088315790b24ff4a2ea42e2ae13", "score": "0.55893785", "text": "def __init__(self, metadata_columns, tag_columns):\n self.data = None\n self.metadata_columns = metadata_columns\n self.tag_columns = tag_columns", "title": "" }, { "docid": "2c93d6b4a297340b664fd4328b8b2608", "score": "0.53798157", "text": "def initColumnHeaders():", "title": "" }, { "docid": "37035d405ff17e55c796d5c2c68b99ce", "score": "0.5357916", "text": "def __init__(self, column_ids):\n self.column_ids = column_ids", "title": "" }, { "docid": "0fd4658c4d564e0ea5d8d9bc2002c9c4", "score": "0.5304528", "text": "def __init__(self, event=None):\n if event is not None:\n for key, val in list(event.__dict__.items()):\n setattr(self, key, val)", "title": "" }, { "docid": "3e001a4881aeda1663693e07ebb37473", "score": "0.52718776", "text": "def _parse_event(self, response):\n data = {\n '_type': 'event',\n 'name': self._parse_name(response),\n 'event_description': self._parse_description(response),\n 'start': self._parse_start(response),\n 'end': self._parse_end(response),\n 'all_day': self._parse_all_day(response),\n 'location': self._parse_location(response),\n 'documents': self._parse_documents(response),\n 'sources': self._parse_sources(response)\n }\n data['id'] = self._generate_id(data)\n data['status'] = self._generate_status(data)\n data['classification'] = self._parse_classification(data['name'])\n return data", "title": "" }, { "docid": "6b4e7e8e177186c328c5c8edd4974448", "score": "0.52701914", "text": "def get_event_columns(self):\n if not self.dwca_event_columns:\n self.dwca_event_columns = [\n 'id', \n 'eventID', \n 'parentEventID', \n 'type', \n 'samplingProtocol', \n 'sampleSizeValue', \n 'sampleSizeUnit', \n 'samplingEffort', \n 'eventDate', \n 'eventTime', \n 'startDayOfYear', \n 'endDayOfYear', \n 'year', \n 'month', \n 'day', \n 'verbatimEventDate', \n 'habitat', \n 'fieldNumber', \n 'fieldNotes', \n 'eventRemarks', \n 'locationID', \n 'country', \n 'countryCode', \n 'county', \n 'municipality', \n 'locality', \n 'verbatimLocality', \n 'waterBody', \n 'verbatimDepth', \n 'minimumDepthInMeters', \n 'maximumDepthInMeters', \n 'decimalLatitude', \n 'decimalLongitude', \n 'geodeticDatum', \n 'license', \n 'rightsHolder', \n 'accessRights', \n 'bibliographicCitation', \n 'references', \n 'institutionID', \n 'datasetID', \n 'institutionCode', \n 'datasetName', \n 'ownerInstitutionCode', \n 'dataGeneralizations', # (ex: aggregerad över storleksklass...)\n 'dynamicProperties', \n ]\n #\n return self.dwca_event_columns", "title": "" }, { "docid": "c52a3d711d4f061f53175be4fac982d3", "score": "0.52676564", "text": "def __create_message__(cls, row):\n message = dict()\n message['eventtype'] = cls.EVENT_TYPE\n message['type'] = row.pop('state', None)\n inner_obj = dict()\n for key, value in cls.DB_MAP.items():\n inner_obj[key] = row.pop(value)\n\n message[cls.INNER_OBJ] = cls.__clean_dict__(inner_obj)\n message = cls.__clean_dict__(message)\n return message", "title": "" }, { "docid": "8e2e08613fff835239210831346302ac", "score": "0.52657056", "text": "def _populate_from_database(self, row):\n\n (name, message, button, mode, \n groups, start, end, author, created_at) = row\n\n self['name'] = name\n self['message'] = message\n self['button'] = button\n self['mode'] = mode\n self['groups'] = json.loads(groups) if groups else None\n self['start'] = from_utimestamp(start)\n self['end'] = from_utimestamp(end)\n self['author'] = author\n self['created_at'] = from_utimestamp(created_at)", "title": "" }, { "docid": "3a26b03e217949446268368964e9e1d2", "score": "0.5224331", "text": "def __init__(self):\n self._map1 = {\n \"CIRC\" : self.circ_status_event,\n \"STREAM\" : self.stream_status_event,\n \"ORCONN\" : self.or_conn_status_event,\n \"STREAM_BW\" : self.stream_bw_event,\n \"BW\" : self.bandwidth_event,\n \"DEBUG\" : self.msg_event,\n \"INFO\" : self.msg_event,\n \"NOTICE\" : self.msg_event,\n \"WARN\" : self.msg_event,\n \"ERR\" : self.msg_event,\n \"NEWDESC\" : self.new_desc_event,\n \"ADDRMAP\" : self.address_mapped_event,\n \"NS\" : self.ns_event,\n \"STATUS_GENERAL\" : self.general_status_event,\n \"STATUS_CLIENT\" : self.client_status_event,\n \"STATUS_SERVER\" : self.server_status_event,\n \"ORCIRCUIT\" : self.orcircuit_event,\n \"NEWCONSENSUS\" : self.new_consensus_event,\n \"TOKEN_LEVELS\" : self.token_level_event\n }", "title": "" }, { "docid": "3b6dee8d464df1e0b99e3233fe8f77c0", "score": "0.51967037", "text": "def _init_column_names():\n mapping = {}\n names = []\n\n index = 1\n while index < 256:\n name = _colnum_string(index)\n names.append(name)\n mapping[name] = index\n index += 1\n return names, mapping", "title": "" }, { "docid": "f53e3234f705e656da34736ba81c8c76", "score": "0.5186084", "text": "def _data_events(self, object):\n\t\t# Builds an event list that gives for each event:\n\t\t# - Gramps ID\\n\"\n\t\t# - The event name\n\t\t# - The event date\n\t\t# - The event date in ISO format (sortable)\n\t\t# - The event place index (in table 'P'), -1 if none\n\t\t# - The event description\n\t\t# - The event text and notes (including event reference notes)\n\t\t# - A list of the event media index, in the form:\n\t\t# - media index (in table 'M')\n\t\t# - media thumbnail path\n\t\t# - [x1, y1, x2, y2] of the media reference\n\t\t# - notes of the media reference\n\t\t# - list of the media reference source citations index (in table 'C')\\n\"\n\t\t# - A list of the event source citations index (in table 'C')\n\t\tevent_ref_list = object.get_event_ref_list()\n\t\tif not event_ref_list: return(\"\")\n\t\trows = []\n\t\tfor event_ref in event_ref_list:\n\t\t\tif (event_ref.ref not in self.obj_dict[Event]): continue\n\t\t\tevent = self.database.get_event_from_handle(event_ref.ref)\n\t\t\tif (not event): continue\n\t\t\ttrow = \"\\t[\"\n\t\t\tevt_type = str(event.get_type())\n\t\t\tevent_role = event_ref.get_role()\n\t\t\tif (event_role != EventRoleType.PRIMARY and event_role != EventRoleType.FAMILY):\n\t\t\t\tevt_type += \" (%s)\" % event_role\n\t\t\tplace_index = -1\n\t\t\tplace_handle = event.get_place_handle()\n\t\t\tif (place_handle and (place_handle in self.obj_dict[Place])):\n\t\t\t\tplace_index = self.obj_dict[Place][place_handle][OBJDICT_INDEX]\n\t\t\tevt_desc = event.get_description()\n\t\t\ttrow += \"\\\"\" + self.obj_dict[Event][event_ref.ref][OBJDICT_GID] + \"\\\",\"\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_type)) + \"\\\",\"\n\t\t\tevt_date = format_date(event.get_date_object())\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_date)) + \"\\\",\"\n\t\t\tevt_date = format_date(event.get_date_object(), True)\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_date)) + \"\\\",\"\n\t\t\ttrow += str(place_index) + \",\"\n\t\t\tif (evt_desc is None): evt_desc = \"\"\n\t\t\ttrow += \"\\\"\" + script_escape(html_escape(evt_desc)) + \"\\\",\"\n\t\t\t# Get event notes\n\t\t\tnotelist = event.get_note_list()\n\t\t\tnotelist.extend(event_ref.get_note_list())\n\t\t\tattrlist = event.get_attribute_list()\n\t\t\tattrlist.extend(event_ref.get_attribute_list())\n\t\t\ttrow += \"\\\"\" + script_escape(self.get_notes_attributes_text(notelist, attrlist)) + \"\\\",\"\n\t\t\t# Get event media\n\t\t\ttrow += self._data_media_reference_index(event)\n\t\t\ttrow += \",\"\n\t\t\t# Get event sources\n\t\t\tcitationlist = event.get_citation_list()\n\t\t\tcitationlist.extend(event_ref.get_citation_list())\n\t\t\tfor attr in attrlist: citationlist.extend(attr.get_citation_list())\n\t\t\ttrow += self._data_source_citation_index_from_list(citationlist)\n\t\t\t#\n\t\t\ttrow += \"]\"\n\t\t\trows.append(trow)\n\t\treturn(\",\\n\".join(rows))", "title": "" }, { "docid": "3b907638876dee9732352a59351036d8", "score": "0.5179667", "text": "def initialize(self):\n self.__as_columns.clear()\n \n db_map = self.__aggregate.getProofInstance().getDatabaseMap(self.getDBName())\n table_map = db_map.getTable(self.__table_name)\n column_maps = table_map.getColumns()\n for column_map in column_maps:\n column_name = column_map.getFullyQualifiedName()\n self.__as_columns[column_name] = column_name\n\n self.__initialized = 1", "title": "" }, { "docid": "ff383e535b7f2d0abffb5d856f45af65", "score": "0.51698637", "text": "def json_to_column_ids(self, json_columns):\n heading_to_id_map = {\n column[\"heading\"]:\n \"dataSetColumnId_{}\".format(column[\"dataSetColumnId\"])\n for column in json_columns\n }\n heading_to_id_map[self.column_names.xibo_id] = None\n return self.json_to_xibo_event(heading_to_id_map)", "title": "" }, { "docid": "d621bf8463ee4adc4e6db807d0dd35c7", "score": "0.513667", "text": "def __init__(self, events, database, symbol_list):\n self.events = events\n self.database = database\n self.symbol_list = symbol_list\n\n self.symbol_data = {}\n self.latest_symbol_data = {}\n self.continue_backtest = True\n self.bar_index = 0\n self.all_data_dic = {} # access data in list form for testing\n\n self._open_convert_database_data()", "title": "" }, { "docid": "556411b20c977e1a15b99e65e98724f5", "score": "0.5135141", "text": "def __init__(self, source, data_column, label_columns):\n self._source = source\n self._data_column = data_column\n self._label_columns = label_columns", "title": "" }, { "docid": "c392fe47ce9273a24396ab8ec6de90b4", "score": "0.5060727", "text": "def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self._label_column_name = None\n self._numeric_columns = None\n self._cat_columns = None", "title": "" }, { "docid": "fb2380ba54b7ce37ccd546b7d61f3043", "score": "0.5059268", "text": "def __init__(self, init_example, **kwargs):\n super().__init__(init_example, **kwargs)\n self.event_mat = init_example['events'][None]", "title": "" }, { "docid": "53d42a3dc49e6b2b68e839d5ae76ad56", "score": "0.5048306", "text": "def __init__(self):\n event_data_stream = events.EventDataStream()\n\n super(FieldFormattingHelper, self).__init__()\n self._callback_functions = {}\n self._event_data_stream_field_names = event_data_stream.GetAttributeNames()\n self._event_tag_field_names = []\n\n for field_name, callback_name in self._FIELD_FORMAT_CALLBACKS.items():\n if callback_name == '_FormatTag':\n self._event_tag_field_names.append(field_name)\n else:\n self._callback_functions[field_name] = getattr(\n self, callback_name, None)", "title": "" }, { "docid": "ef855f557aea52947c1211326cf0dbe3", "score": "0.5042969", "text": "def events_to_dataframe(events):\n columns = [\"id\", \"name\", \"source\", \"time\", \"description\"]\n data = {column: list() for column in columns}\n for event in events:\n data[\"id\"].append(event.get_id())\n data[\"name\"].append(event.get_name())\n data[\"source\"].append(event.get_source())\n data[\"time\"].append(event.get_time())\n data[\"description\"].append(event.get_description())\n return pd.DataFrame(data)", "title": "" }, { "docid": "fb3984eba4945e2793a0d918fc8882c8", "score": "0.5035369", "text": "def extract_new_event_data(table_name, response):\n output = {}\n context = {}\n\n output['Status'] = response.get('status', '')\n output['Message'] = response.get('message', '')\n output['Incident key'] = response.get('dedup_key', '')\n\n context['Status'] = output['Status']\n context['Message'] = output['Message']\n context['incident_key'] = output['Incident key']\n\n return {\n 'Type': entryTypes['note'],\n 'Contents': response,\n 'ContentsFormat': formats['json'],\n 'ReadableContentsFormat': formats['markdown'],\n 'HumanReadable': tableToMarkdown(table_name, output),\n 'EntryContext': {\n 'PagerDuty.Event(val.incident_key==obj.dedup_key)': context,\n 'Event.ID(val.ID==obj.dedup_key)': context['incident_key']\n }\n }", "title": "" }, { "docid": "d57800ca714283669f7ae4fc0c162373", "score": "0.5033141", "text": "def __init__(self, eventdata_data):\n self.events = eventdata_data", "title": "" }, { "docid": "4f5a8ae66ba289320e896ab610cb3f26", "score": "0.50146925", "text": "def __init__(self, *args, **kwargs):\n\n args = list(args)\n name, type_ = self._extract_name_and_type(args, kwargs)\n kwargs.update(name=name, type_=self._column_type)\n\n super().__init__(*args, **kwargs)", "title": "" }, { "docid": "db3bc598712d408f6e8d19a47f7b4a61", "score": "0.50042796", "text": "def __init__(self, event_type):\n if (isinstance(event_type, str)):\n self.event_type = self.EVENT_TYPES_REVERSE[event_type]\n else:\n self.event_type = event_type\n self.err_str = None\n self.data = None", "title": "" }, { "docid": "1eac21066ef32d41680a8afe1ee61721", "score": "0.50023246", "text": "def _convert_event(self, event):\n\n doc = {\"h\": event.hostname,\n \"ts\": event.timestamp}\n if event.args:\n doc[\"fields\"] = event.args\n return doc", "title": "" }, { "docid": "3f3ea996e27cdc926f8fa7e5e4a7bb88", "score": "0.49914545", "text": "def __init__(self, columns=None):\n self._columns = {}\n self._reindex = False\n columns = columns or self._table.GetColumns()\n for c in columns:\n if not isinstance(c, schema.IndexTermsColumn):\n self._columns[c.name] = c.NewInstance()", "title": "" }, { "docid": "8d06545e910bbededd45bda6b3b5263f", "score": "0.49783707", "text": "def __init__(self, col_index=default_record_layout, *args, **kwargs):\n self.col_index = col_index", "title": "" }, { "docid": "04055d5624c222fc2ed2e7eb27282a98", "score": "0.49726656", "text": "def __init__(self, data, name):\r\n self.data = data\r\n self.name = name\r\n self.descriptions = event_descriptions", "title": "" }, { "docid": "f7a4da247735ed2c1f23b972cddc3209", "score": "0.49467462", "text": "def build_labeled_event_trigger(x):\n event_trigger = {\n 'id': x.trigger['id'],\n 'event_type_probs': x.event_type_probs\n }\n x['event_triggers'] = [event_trigger]\n return x", "title": "" }, { "docid": "f531560851f0ef163a5892cfc8d307cc", "score": "0.49383092", "text": "def __init__(__self__, *,\n columns: Optional[Sequence[str]] = None):\n if columns is not None:\n pulumi.set(__self__, \"columns\", columns)", "title": "" }, { "docid": "f531560851f0ef163a5892cfc8d307cc", "score": "0.49383092", "text": "def __init__(__self__, *,\n columns: Optional[Sequence[str]] = None):\n if columns is not None:\n pulumi.set(__self__, \"columns\", columns)", "title": "" }, { "docid": "c383daf21affd89fdc7dce054daa5884", "score": "0.49186832", "text": "def __init__(self, events):\n self.events = events", "title": "" }, { "docid": "b2d41786980211e451f80abff91be16b", "score": "0.49103194", "text": "def __init__(self, *args, **kargs):\n super(_EventObjectJSONDecoder, self).__init__(\n *args, object_hook=self._ConvertDictToObject, **kargs)", "title": "" }, { "docid": "4f95d2a3a693d006b52a64e1c69128cb", "score": "0.49047938", "text": "def __init__(self, mds, fs, es, headers, name, handler_registry=None,\n handler_override=None):\n from .broker import Broker\n self.fs = fs\n db = Broker(mds, fs)\n events = db.get_events(headers, [name], fill=False)\n\n self._datum_uids = [event.data[name] for event in events\n if name in event.data]\n self._len = len(self._datum_uids)\n first_uid = self._datum_uids[0]\n if handler_override is None:\n self.handler_registry = handler_registry\n else:\n # mock a handler registry\n self.handler_registry = defaultdict(lambda: handler_override)\n with self.fs.handler_context(self.handler_registry) as fs:\n example_frame = fs.get_datum(first_uid)\n # Try to duck-type as a numpy array, but fall back as a general\n # Python object.\n try:\n self._dtype = example_frame.dtype\n except AttributeError:\n self._dtype = type(example_frame)\n try:\n self._shape = example_frame.shape\n except AttributeError:\n self._shape = None # as in, unknown", "title": "" }, { "docid": "5d54b0c36c9091b70858b0b4d825728e", "score": "0.4904374", "text": "def __init__(self, source):\n # init data from known format\n ColumnData.__init__(self, source,\n header=['onsets', 'durations', 'intensities'],\n sep=None, dtype=float)", "title": "" }, { "docid": "70018d8c4b60118ce152068c30bc8baa", "score": "0.48975858", "text": "def _parse_loxone_message(self, message):\n event_dict = {}\n if self._current_message_typ == 0:\n event_dict = message\n elif self._current_message_typ == 1:\n pass\n elif self._current_message_typ == 2:\n length = len(message)\n num = length / 24\n start = 0\n end = 24\n for _ in range(int(num)):\n packet = message[start:end]\n event_uuid = uuid.UUID(bytes_le=packet[0:16])\n fields = event_uuid.urn.replace(\"urn:uuid:\", \"\").split(\"-\")\n uuidstr = f\"{fields[0]}-{fields[1]}-{fields[2]}-{fields[3]}{fields[4]}\"\n value = unpack(\"d\", packet[16:24])[0]\n event_dict[uuidstr] = value\n start += 24\n end += 24\n elif self._current_message_typ == 3:\n start = 0\n\n def get_text(message, start, offset):\n first = start\n second = start + offset\n event_uuid = uuid.UUID(bytes_le=message[first:second])\n first += offset\n second += offset\n\n icon_uuid_fields = event_uuid.urn.replace(\"urn:uuid:\", \"\").split(\"-\")\n uuidstr = \"{}-{}-{}-{}{}\".format(\n icon_uuid_fields[0],\n icon_uuid_fields[1],\n icon_uuid_fields[2],\n icon_uuid_fields[3],\n icon_uuid_fields[4],\n )\n\n icon_uuid = uuid.UUID(bytes_le=message[first:second])\n icon_uuid_fields = icon_uuid.urn.replace(\"urn:uuid:\", \"\").split(\"-\")\n\n first = second\n second += 4\n\n text_length = unpack(\"<I\", message[first:second])[0]\n\n first = second\n second = first + text_length\n message_str = unpack(f\"{text_length}s\", message[first:second])[0]\n start += (floor((4 + text_length + 16 + 16 - 1) / 4) + 1) * 4\n event_dict[uuidstr] = message_str.decode(\"utf-8\")\n return start\n\n while start < len(message):\n start = get_text(message, start, 16)\n\n elif self._current_message_typ == 6:\n event_dict[\"keep_alive\"] = \"received\"\n else:\n self._current_message_typ = 7\n return event_dict", "title": "" }, { "docid": "8d8e6f19ea8d4c3df0cdaf56c19accbc", "score": "0.4883175", "text": "def __init__(self, columns: Optional[DatasetSchema] = None):\n self.columns = columns\n self.data = list()\n self.index = list()\n self.dtypes = object", "title": "" }, { "docid": "1b70ea09a4009eefa683b453933c1848", "score": "0.4880899", "text": "def reformat_events(self, columns, one_timestamp):\n temp_data = list()\n log_df = self.log.to_dict('records')\n key = 'end_timestamp' if one_timestamp else 'start_timestamp'\n log_df = sorted(log_df, key=lambda x: (x['caseid'], key))\n for key, group in itertools.groupby(log_df, key=lambda x: x['caseid']):\n trace = list(group)\n temp_dict = dict()\n for x in columns:\n serie = [y[x] for y in trace]\n if x == 'ac_index':\n serie.insert(0, self.ac_index[('start')])\n serie.append(self.ac_index[('end')])\n elif x == 'rl_index':\n serie.insert(0, self.rl_index[('start')])\n serie.append(self.rl_index[('end')])\n else:\n serie.insert(0, 0)\n serie.append(0)\n temp_dict = {**{x: serie}, **temp_dict}\n temp_dict = {**{'caseid': key}, **temp_dict}\n temp_data.append(temp_dict)\n return temp_data", "title": "" }, { "docid": "0ef843eb99825ae53c8bd5fd082c862a", "score": "0.48724988", "text": "def __init__(self, fields):\n self.field_names = (str(field) for field in fields)", "title": "" }, { "docid": "db480c616154a456f611bee81a84a694", "score": "0.48669606", "text": "def get_column_map(self, X):\n\n if not isinstance(X, pd.DataFrame):\n raise TypeError(\"X should be a pd.DataFrame\")\n\n new_columns = [\"column_\" + str(n) for n in range(0, X.shape[1])]\n\n self.column_map = dict(zip(X.columns, new_columns))", "title": "" }, { "docid": "a9ade0ce4a0363d5ed9bd36894faecfb", "score": "0.48584414", "text": "def __init__(self, data=None, annotation_metadata=None, sandbox=None):\n # TODO(ejhumphrey@nyu.edu): We may want to subclass list here to turn\n # 'data' into a special container with convenience methods to more\n # easily unpack sparse events, among other things.\n if data is None:\n data = list()\n if annotation_metadata is None:\n annotation_metadata = AnnotationMetadata()\n if sandbox is None:\n sandbox = JSONType()\n self.data = self.__parse_data__(data)\n self.annotation_metadata = AnnotationMetadata(**annotation_metadata)\n self.sandbox = JSONType(**sandbox)", "title": "" }, { "docid": "1f369e867cd0ab1e29b75590c9768216", "score": "0.4847639", "text": "def __init__(self, event_type=None, data=None):\n self._type = event_type\n self._data = data", "title": "" }, { "docid": "c0be8de13187e01ce2b046bb4273f1bd", "score": "0.4846005", "text": "def __init__(self, *args, **kwds):\n if args or kwds:\n super(InverseResponse, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.event is None:\n self.event = 0\n if self.angles is None:\n self.angles = []\n else:\n self.event = 0\n self.angles = []", "title": "" }, { "docid": "5f33fde51b8bb8dd4380325af9bf169f", "score": "0.48278165", "text": "def create_deft_table_json_mappings():\n mappings = list()\n mappings.append(JsonColumnMapping(columnName=\"rownumber\", jsonPath=\"$.rownumber\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"rowguid\", jsonPath=\"$.rowguid\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xdouble\", jsonPath=\"$.xdouble\", cslDataType=\"real\"))\n mappings.append(JsonColumnMapping(columnName=\"xfloat\", jsonPath=\"$.xfloat\", cslDataType=\"real\"))\n mappings.append(JsonColumnMapping(columnName=\"xbool\", jsonPath=\"$.xbool\", cslDataType=\"bool\"))\n mappings.append(JsonColumnMapping(columnName=\"xint16\", jsonPath=\"$.xint16\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"xint32\", jsonPath=\"$.xint32\", cslDataType=\"int\"))\n mappings.append(JsonColumnMapping(columnName=\"xint64\", jsonPath=\"$.xint64\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint8\", jsonPath=\"$.xuint8\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint16\", jsonPath=\"$.xuint16\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint32\", jsonPath=\"$.xuint32\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xuint64\", jsonPath=\"$.xuint64\", cslDataType=\"long\"))\n mappings.append(JsonColumnMapping(columnName=\"xdate\", jsonPath=\"$.xdate\", cslDataType=\"datetime\"))\n mappings.append(JsonColumnMapping(columnName=\"xsmalltext\", jsonPath=\"$.xsmalltext\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xtext\", jsonPath=\"$.xtext\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xnumberAsText\", jsonPath=\"$.xnumberAsText\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xtime\", jsonPath=\"$.xtime\", cslDataType=\"timespan\"))\n mappings.append(JsonColumnMapping(columnName=\"xtextWithNulls\", jsonPath=\"$.xtextWithNulls\", cslDataType=\"string\"))\n mappings.append(JsonColumnMapping(columnName=\"xdynamicWithNulls\", jsonPath=\"$.xdynamicWithNulls\", cslDataType=\"dynamic\"))\n return mappings", "title": "" }, { "docid": "617c29551cdaf5b44f087e0b3fea5e36", "score": "0.48263848", "text": "def get_events(sqlx, sqlx_count):\n ap = db_create_connection()\n ap.sqlx = sqlx_count\n ap.op = 'count'\n try:\n obj_count = {\"count\": -1}\n ap = db_exec_cur(ap)\n for t in ap.result:\n obj_count[\"count\"] = t[0]\n\n ap.sqlx = sqlx\n ap.op = 'get_data'\n db_exec_cur(ap)\n except Exception as e:\n ms1 = \"error reading the database\" + str(e)\n print ms1\n raise \"error reading the database\"\n\n rslt_list = convert_tuple(ap, obj_count)\n db_close_con(ap)\n return rslt_list", "title": "" }, { "docid": "d1c9d28f24d0df568c4a1f4d45236683", "score": "0.48244342", "text": "def sample_metadata(sample):\n select = [\n chart_table.currentWeekPosition,\n chart_table.previousWeekPosition,\n chart_table.peakPosition,\n chart_table.Artist,\n #chart_table.entryDate,\n #chart_table.entryPosition,\n ]\n results = db.session.query(*select).all()\n\n # dic viewing rows of columns\n sample_metadata = {}\n for result in results:\n sample_metadata[\"Current Week Position \"] = result[0]\n sample_metadata[\"Previous Week Position \"] = result[1]\n sample_metadata[\"Peak Position \"] = result[2]\n #sample_metadata[\" Artist \"] = result[3]\n #sample_metadata[\"entryDate\"] = result[4]\n #sample_metadata[\"entryPosition\"] = result[5]\n\n print(sample_metadata)\n return jsonify(sample_metadata)", "title": "" }, { "docid": "0a09bbd111337c1ee7acfebcf96a5562", "score": "0.48241213", "text": "def _initialize_column_rows(self):\n data_source = self.data_source_scan.data_source\n self.sql = data_source.sql_get_table_columns(self.table.table_name)\n self.sql = self.data_source_scan.scan.jinja_resolve(self.sql)\n\n self.fetchall()", "title": "" }, { "docid": "7eab99b014c879f3cffda49622fa8fb5", "score": "0.48206058", "text": "def __init__(self, event_type, data=None):\n self._type = event_type\n self._data = data", "title": "" }, { "docid": "33cadac38e158f265be7dbd289354023", "score": "0.48056328", "text": "def apigw_event():\n return [\n {\n \"headers\": {\n \"Accept-Language\": \"en-US,en;q=0.8\",\n \"X-Forwarded-For\": \"127.0.0.1, 127.0.0.2\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n },\n \"pathParameters\": {\n \"id\": \"id_01\",\n \"user_id\": \"user_01\",\n },\n \"httpMethod\": \"GET\",\n },\n {\n \"headers\": {\n \"Accept-Language\": \"en-US,en;q=0.8\",\n \"X-Forwarded-For\": \"127.0.0.1, 127.0.0.2\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n },\n \"httpMethod\": \"POST\",\n \"body\": \"{\\\"id\\\":\\\"id_02\\\", \\\"user_id\\\":\\\"user_02\\\", \\\"body\\\":\\\"message_02\\\"}\",\n },\n {\n \"headers\": {\n \"Accept-Language\": \"en-US,en;q=0.8\",\n \"X-Forwarded-For\": \"127.0.0.1, 127.0.0.2\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\"\n },\n \"pathParameters\": {\n \"id\": \"id_02\",\n \"user_id\": \"user_02\"\n },\n \"httpMethod\": \"DELETE\",\n }\n ]", "title": "" }, { "docid": "2713175732bd705462b25e799a0d162d", "score": "0.47925967", "text": "def __init__(self, **kwargs):\n super().__init__()\n self.column_name = {}\n self.column_name['water_body'] = {'internal': 'MS_CD',\n 'display': 'WATERBODY_NAME'}\n \n self.column_name['type_area'] = {'internal': 'TYPE_AREA_CODE', \n 'display': 'TYPE_AREA_NAME'}\n \n self.column_name['water_district'] = {'internal': 'WATER_DISTRICT_CODE', \n 'display': 'WATER_DISTRICT_NAME'}\n \n \n #TODO Add Parametermapping for water body names\n #TODO Map against .lower() letters \n if kwargs:\n self.load_water_body_match(**kwargs)", "title": "" }, { "docid": "c04d0b55c15c771160f04452cf8a336b", "score": "0.4789773", "text": "def __init__(self, input_json):\n self.id = input_json.get('id')\n self.tstamp = input_json.get('tstamp')\n self.schemaname = input_json.get('schemaname')\n self.operation = input_json.get('operation')\n self.who = input_json.get('who')\n self.new_val = input_json.get('new_val')\n self.old_val = input_json.get('old_val')\n self.tabname = input_json.get('tabname')", "title": "" }, { "docid": "024861791e77fa79c9d107235ff0652f", "score": "0.47877645", "text": "def __init__(__self__, *,\n events: Sequence['outputs.EventResponse'],\n state: str):\n pulumi.set(__self__, \"events\", events)\n pulumi.set(__self__, \"state\", state)", "title": "" }, { "docid": "18c1ec90dc16d1ba149322cfcafd0609", "score": "0.47723952", "text": "def __init__(self, jsondict=None):\n \n super(DiagnosticOrderItemEvent, self).__init__(jsondict)", "title": "" }, { "docid": "c907b14328d564e8a5e5416d9933b135", "score": "0.47677287", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def string_casting(data: any):\n \"\"\"\n Closure to cast the data to string\n :param data: any\n Column value\n :return: str\n Parsed column value\n \"\"\"\n try:\n if data:\n return str(data)\n else:\n return data\n except Exception:\n logging.error('\\n')\n logging.error('~' * 100)\n logging.exception(\"String casting exception:\")\n logging.error('~' * 100)\n raise StringParsingException(\"Column value - {} could not be casted into String.\".format(data))\n\n self.add_func(string_casting)", "title": "" }, { "docid": "7485a2f89c9dcc6fd1fc70af9689993a", "score": "0.47654918", "text": "def initEvents():\n global PLAYER, CURRENT_MAP, EVENTS, MAPS, SOUNDS\n for i in EVENTS.e:\n EVENTS[i].data[\"player\"] = PLAYER\n EVENTS[i].data[\"cmap\"] = CURRENT_MAP\n EVENTS[i].data['setter'] = setMap \n EVENTS[i].data['setChar'] = setChar\n EVENTS[i].data['exit'] = Exit\n EVENTS[i].data['sounds'] = SOUNDS\n EVENTS[i].data['globmaps'] = MAPS", "title": "" }, { "docid": "4d362755317471c977468e89767e570e", "score": "0.47465035", "text": "def parse_raw_events(self, events):\n events = json.loads(events) or None\n if events:\n events = (OmegleEvent(self.id,\n ev[0],\n None if len(ev) == 1 else ev[1])\n for ev in events)\n\n return events", "title": "" }, { "docid": "1d61d20bbc0b7199ccf2b7275e298d8a", "score": "0.47428414", "text": "def parse_events(self,response):", "title": "" }, { "docid": "220e9a6d4777dd12e19b68be74ba5a2c", "score": "0.47350973", "text": "def __init__(self, options, columns):\n super(DatadogForeignDataWrapper, self).__init__(options, columns)\n initialize(\n api_key=options['api_key'],\n app_key=options['app_key']\n )\n self.columns = columns", "title": "" }, { "docid": "d14879aea552d7c3b459a45af49d18e8", "score": "0.47228545", "text": "def __init__(self, x, y, independent_columns, dependent_column):\n\n self.x = pd.DataFrame(x)\n self.y = pd.DataFrame(y)\n self.independent_columns = independent_columns\n self.dependent_column = dependent_column", "title": "" }, { "docid": "f9fb537bae44ea54e6491004e23d1322", "score": "0.47167116", "text": "def _init_dict(self, data):\n if not self._columns:\n self._columns = list(data.keys())\n\n # Filter values by defined columns\n columns = (\n to_list(values)\n for column, values in data.items()\n if column in self._columns\n )\n\n # Convert columns to rows\n self._data = [list(row) for row in zip_longest(*columns)]\n\n self._index = self._index or list(range(len(self._data)))", "title": "" }, { "docid": "c2f496d515391bcc209216a9ae71b233", "score": "0.4716067", "text": "def __init__(self,house=None, api_type=None, api_name=None, schema=None, entry_columns=None):\n self.house = house\n self.api_type = api_type\n self.api_name = api_name\n self.schema = schema\n\n\n self.table = ('_').join([self.house, self.api_type, self.api_name])\n self.conn = utils.connect_sqlalchemy()\n\n self.entry_structure = dict.fromkeys(entry_columns)", "title": "" }, { "docid": "2a4903d2e8e7968e3b3ccab23ee5f476", "score": "0.47156122", "text": "def __init__(self, *args, **kwargs):\n super(DnsUniqueResponse, self).__init__(*args, **kwargs)\n\n self._records = list()\n self._process_records()", "title": "" }, { "docid": "e799c8353f805af4b6d00df9b98c1653", "score": "0.47143358", "text": "def make_event(self):\n event_dict = {}\n quote = \"\"\n while quote == \"\":\n aut, quote = self.get_quote()\n event_dict[\"author\"] = aut\n event_dict[\"quote\"] = quote\n return event_dict", "title": "" }, { "docid": "73d3cfc2efa359fe8d1ba0dc778692c0", "score": "0.4713355", "text": "def __init__(__self__, *,\n count: int,\n first_timestamp: str,\n last_timestamp: str,\n message: str,\n name: str,\n type: str):\n pulumi.set(__self__, \"count\", count)\n pulumi.set(__self__, \"first_timestamp\", first_timestamp)\n pulumi.set(__self__, \"last_timestamp\", last_timestamp)\n pulumi.set(__self__, \"message\", message)\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"type\", type)", "title": "" }, { "docid": "a7a30c7780885ed5d7289dbda009ca2f", "score": "0.47081393", "text": "def __init__(self, name):\n # these items must match the columns above\n # if they're not created above, they won't be stored to the DB\n\n self.name = name", "title": "" }, { "docid": "53715b788f87abe66ffd0a5d4548c3d9", "score": "0.47060105", "text": "def __init__(self, row):\n\n if row == None:\n return\n\n self.id = row['id']\n self.owner = row['owner']\n self.created = row['created']\n self.reply_to_user = row['reply_to_user']\n self.reply_to_tweet = row['reply_to_tweet']\n self.geo = row['geo']\n self.place_name = None\n self.source = None\n self.contents = None\n self.yyyymm = 0 # weirdly set to 0 on init.\n \n if row['place_name'] != None:\n self.place_name = row['place_name']\n\n self.place_box = row['place_box']\n \n if row['source'] != None:\n self.source = row['source']\n\n if row['contents'] != None:\n self.contents = row['contents']\n \n if row['yyyymm'] != None:\n self.yyyymm = row['yyyymm']", "title": "" }, { "docid": "518992cb645a820edae890999a13d75b", "score": "0.47015494", "text": "def __init__(self, row, cols):\n self.field_dict = {}\n for c in range(len(cols)):\n self.field_dict[cols[c]] = row[c]", "title": "" }, { "docid": "edea8576948777c699d403e22eac21e3", "score": "0.46917585", "text": "def get_event(event):\n if isinstance(event, str):\n name = event\n else:\n try:\n name = event.name\n except AttributeError:\n raise ValueError(\"Cannot get event. Use a string.\")\n db = Data(DB_BLAZE)\n selection = odo(db.event[db.event.name==name], pd.DataFrame)\n if selection.empty:\n raise ValueError(\"Event does not exist.\")\n else:\n event = dict(selection.loc[0])\n #event = collections.namedtuple('Event', event.keys())(**event)\n event = Event(**event)\n return event", "title": "" }, { "docid": "fa3ab3ff24d24ba7de7ea998dc9c1c0c", "score": "0.46899942", "text": "def __init__(self, req_id, location, time, event):\n self.req_id = req_id\n self.location = location\n self.time = time\n self.event = event", "title": "" }, { "docid": "ee24f77d2bed03f0f90fc0b3569b1567", "score": "0.46879146", "text": "def __create_column_name_mapping__(self, columns):\n mapping = {'names': {}, 'numbers': {},\n 'header': { 'separator': self.separator}}\n\n argn = 0\n for token in columns:\n mapping['names'][token] = argn\n mapping['numbers'][argn] = token\n argn += 1\n\n self._column_names = mapping['names']\n self.column_nums = mapping['numbers']\n\n if not self._out_column_names_set:\n self._out_column_names = self._column_names.keys()\n\n self._mapping = mapping\n return mapping", "title": "" }, { "docid": "37e2a3729fe3843c08be8721bd51400d", "score": "0.4683065", "text": "def add_column_names(self, request):\n model_obj = None\n org = self.get_organization(request, return_obj=True)\n inventory_pk = request.query_params.get('inventory_pk')\n inventory_type = request.query_params.get('inventory_type', 'property')\n if inventory_type in ['property', 'propertystate']:\n if not inventory_pk:\n model_obj = PropertyState.objects.filter(\n organization=org\n ).order_by('-id').first()\n try:\n model_obj = PropertyState.objects.get(id=inventory_pk)\n except PropertyState.DoesNotExist:\n pass\n elif inventory_type in ['taxlot', 'taxlotstate']:\n if not inventory_pk:\n model_obj = TaxLotState.objects.filter(\n organization=org\n ).order_by('-id').first()\n else:\n try:\n model_obj = TaxLotState.objects.get(id=inventory_pk)\n inventory_type = 'taxlotstate'\n except TaxLotState.DoesNotExist:\n pass\n else:\n msg = \"{} is not a valid inventory type\".format(inventory_type)\n raise ParseError(msg)\n if not model_obj:\n msg = \"No {} was found matching {}\".format(\n inventory_type, inventory_pk\n )\n raise NotFound(msg)\n Column.save_column_names(model_obj)\n\n columns = Column.objects.filter(\n organization=model_obj.organization,\n table_name=model_obj.__class__.__name__,\n is_extra_data=True,\n\n )\n columns = ColumnSerializer(columns, many=True)\n return Response(columns.data, status=status.HTTP_200_OK)", "title": "" }, { "docid": "bec2fdba68756c84db2a39467f6b8adb", "score": "0.46814585", "text": "def _populate_from_database(self, row):\n\n (record_id, name, user, agreed_at) = row\n\n self['record_id'] = record_id\n self['message_name'] = name\n self['agreed_by'] = user\n self['agreed_at'] = from_utimestamp(agreed_at)", "title": "" }, { "docid": "e6dd36a0c0f13e0559d215c4116acfb6", "score": "0.46798655", "text": "def __init__(self, x=None, y=None, fold_column='', blending_avg=True, inflection_point=3, smoothing=1):\n\n self._teColumns = x\n self._responseColumnName = y\n self._foldColumnName = fold_column\n self._blending = blending_avg\n self._inflectionPoint = inflection_point\n self._smoothing = smoothing", "title": "" }, { "docid": "85791cb4ca1e09d072aa5dc903369219", "score": "0.4676823", "text": "def __init__(self, message, status=None, event_status=None, events=None):\n self.status = status\n self.event_status = event_status\n self.events = events\n super().__init__(self, message)", "title": "" }, { "docid": "b65fa6d4855864214fe624fa2c407167", "score": "0.46701154", "text": "def emit_basic(data):\n #~ pass\n res = {}\n i = 0\n if 'uuid' in data:\n data = {0:data}\n if 'uuid' in data[data.keys()[0]]:\n data = {0:data}\n for idx1 in data:\n logger.debug('Basic event idx1 :%s,%s' % (idx1,data[idx1]))\n if idx1 not in res:\n res[idx1]={}\n for idx2 in data[idx1]:\n logger.debug('Basic event idx2 :%s,%s' % (idx2,data[idx1][idx2]))\n res[idx1][idx2] = data[idx1][idx2]\n logger.debug(u'Basic event :%s', res)\n self.socketio.emit('my basics response',\n {'data':res},\n namespace='/janitoo')", "title": "" }, { "docid": "bd08c3274924405aa7480f0899e25fe2", "score": "0.46612448", "text": "def _json_object_hook(dat):\n return namedtuple('X', dat.keys())(*dat.values())", "title": "" }, { "docid": "ba540ddbdd51ea164d85b9f3b4b66154", "score": "0.46515137", "text": "def __init__(self,test=False,remote=False):\n self._test = test\n self.dbh = DBHandler(league=LEAGUES[0],test=self._test,remote=remote)\n self.data = {_l:{_y:(None,None) for _y in YEARS} for _l in LEAGUES}\n self._indces = {_l:0 for _l in LEAGUES}", "title": "" }, { "docid": "85d18835daeaf3460a228c8b966a7c26", "score": "0.46498805", "text": "def __init__(self, row):\n \n self.id = row['id']\n \n self.screen_name = None\n self.name = None\n self.lang = None\n self.location = None\n self.private = None\n \n if row['screen_name'] != None:\n # I don't think I need the single_unescape here, the python library \n # handles it; but in theory if I correctly escape thing and unescape\n # things all the time--it should be fine.\n self.screen_name = row['screen_name']\n\n if row['name'] != None:\n self.name = row['name']\n\n if row['lang'] != None:\n self.lang = row['lang']\n\n if row['location'] != None:\n self.location = row['location']\n\n if row['private'] != None:\n self.private = row['private']\n\n self.friends = row['friends']", "title": "" }, { "docid": "d43a2c77b5ca36b90f91e58ee131012f", "score": "0.46496642", "text": "def __init__(self, num_columns):\n\n self.row_values = []\n self.num_columns = int(num_columns)", "title": "" }, { "docid": "55d2927057e0adfa328d29c57df4b3c1", "score": "0.4648", "text": "def event_names(self):\n pass", "title": "" }, { "docid": "cac80d9fb89be12c87c014fa8cf371f4", "score": "0.46443748", "text": "def __init__(self, *args, **kwargs):\n super(DnsResponse, self).__init__(*args, **kwargs)\n self._process_records()", "title": "" }, { "docid": "266214551c4cbf23f0c47624f4701480", "score": "0.46403676", "text": "def _init_empty(self):\n self._data = [[None for _ in self._columns] for _ in self._index]", "title": "" }, { "docid": "039d8356db5ff32889c4e696ff7ced98", "score": "0.46364927", "text": "def __init__(self):\n self.hits = {}\n self.timestamp_arr = []", "title": "" }, { "docid": "7a3f0c0975a95c7bc5e6abf1e2473e47", "score": "0.46327257", "text": "def test_json_columns(self):\n\n class MyTable(Table):\n column_a = JSON()\n column_b = JSONB()\n\n self.assertEqual(\n MyTable._meta.json_columns, [MyTable.column_a, MyTable.column_b]\n )", "title": "" }, { "docid": "69c0396e7ad7180d315f9ea4b1cea260", "score": "0.4631868", "text": "def __init__(self, *args, **kwds):\n if args or kwds:\n super(mpu_valuesResponse, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.ax is None:\n self.ax = 0.\n if self.ay is None:\n self.ay = 0.\n if self.az is None:\n self.az = 0.\n else:\n self.ax = 0.\n self.ay = 0.\n self.az = 0.", "title": "" }, { "docid": "cafdf92001be20639b59c28ebdc571b7", "score": "0.4631288", "text": "def __init__(\n self,\n response: dict\n ):\n self.__more = read_value(\n \"more\", response, bool, True)\n self.__total_count = read_value(\n \"totalCount\", response, int, True)\n self.__filtered_count = read_value(\n \"filteredCount\", response, int, True)\n self.__items = read_value(\n \"items\", response, Row, True)", "title": "" }, { "docid": "8b59ff681ac08c7d193accaa60e5dc08", "score": "0.4620476", "text": "def __init__(self):\n\n self._column_dict = {\"UI\": [self._USER, self._ITEM],\n \"UIR\": [self._USER, self._ITEM, self._RATING],\n \"UIT\": [self._USER, self._ITEM, self._TIME],\n \"UIRT\": [self._USER, self._ITEM, self._RATING, self._TIME]}\n self._column_name = None\n self._config = OrderedDict()\n self.all_data = None\n self.train_data = None\n self.valid_data = None\n self.test_data = None\n self.user2id = None\n self.item2id = None\n self._dir_path = None\n self._data_name = \"\"\n self._split_manner = \"\"\n self._user_min = 0\n self._item_min = 0", "title": "" }, { "docid": "d94b2698c08a68be9684d8a34e42c7f0", "score": "0.46192908", "text": "def __init__(self, records):\n self._data = [record.data for record in records]", "title": "" }, { "docid": "b96841bd8ecb0daaadb2c03d92c462a6", "score": "0.46117195", "text": "def __init__(self):\n super(AggHandler, self).__init__() # makes sure variables from super \"__init__\" are also inherited\n self.timestamp_loc = {'start': 1, 'end': 9}\n self.transtype_loc = 9\n self.volume_loc = {'short':{'start':19, 'end': 25}, 'long':{'start':19, 'end':28}}\n self.contra_id_loc = {'short':{'start':34, 'end': 43}, 'long': {'start':38, 'end':47}}\n self.passive_id_loc = {'start': 10, 'end': 19}\n self.traderef_loc = {'short':{'start':25, 'end': 34}, 'long': {'start':29, 'end':38}}\n self.reset_cache()", "title": "" }, { "docid": "b26e42edab1a5b72991e902d9ec8a3c9", "score": "0.46102205", "text": "def __init__(\n self,\n pre_event: list = [],\n event: list = [],\n post_event: list = [],\n title: str = '',\n event_type: EventType = EventType.UNDEFINED,\n event_detail: EventDetail = EventDetail(),\n ) -> None:\n self.pre_event: list = pre_event\n self.event: list = event\n self.post_event: list = post_event\n self.title = title\n self.event_type: EventType = event_type\n self.event_detail: EventDetailType = event_detail", "title": "" }, { "docid": "3fd6d0b0dd67de7848312dcb8846c93a", "score": "0.46096426", "text": "def __init__(self, commandsetresponse):\n self.metrics = {Metric(i).uniquetuple: Metric(i).value for i in commandsetresponse.parsed_message}\n self.executed_at = commandsetresponse.executed_at\n self.host = commandsetresponse.device", "title": "" }, { "docid": "52ce965b73f05182ec52e483795e69a4", "score": "0.4606544", "text": "def __init__(self, sample_dict: dict, sample_ordinal: int, illumina_naming: bool=False, sample_barcode_column: Optional[str]=None):\n self.__dict: dict = sample_dict\n self.__sample_ordinal: int = sample_ordinal\n self.__illumina_naming: bool = illumina_naming\n self.__sample_barcode_column: str = sample_barcode_column\n assert sample_dict['sample_name'], \"'sample_name' not in the sample dictionary\"\n assert sample_dict['sample_id'], \"'sample_id' not in the sample dictionary\"\n assert 0 < self.__sample_ordinal, f\"sample_ordinal must be greater than zero, was {sample_ordinal}\"", "title": "" }, { "docid": "fc7c35b3b3fceaa594e64044090ad0d1", "score": "0.46021602", "text": "def __init__(self, fields, ignore_empty=False):\n\n self.field_names = (str(field) for field in fields)", "title": "" }, { "docid": "868061b649c3e8fb70bb46f2d5997d24", "score": "0.4600528", "text": "def __init__(self, *args, **kwds):\n if args or kwds:\n super(Event, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.type is None:\n self.type = 0\n if self.ball1 is None:\n self.ball1 = billiards_msgs.msg.BallState()\n if self.ball2 is None:\n self.ball2 = billiards_msgs.msg.BallState()\n if self.string_rep is None:\n self.string_rep = ''\n else:\n self.type = 0\n self.ball1 = billiards_msgs.msg.BallState()\n self.ball2 = billiards_msgs.msg.BallState()\n self.string_rep = ''", "title": "" }, { "docid": "a2ced6db26e3bd4921ff0bca6d8b3698", "score": "0.45960116", "text": "def __init__(self, read_data_events=None, write_data_events=None, activity_tracker_crn=None):\n self.read_data_events = read_data_events\n self.write_data_events = write_data_events\n self.activity_tracker_crn = activity_tracker_crn", "title": "" }, { "docid": "29f06b9a961d014db62d0431838a80a1", "score": "0.45852238", "text": "def __init__(self):\n self.keys = {}\n self.timestamps = {}", "title": "" }, { "docid": "8040cc31a768a3fc2078241cb1bba3a2", "score": "0.45846277", "text": "def header(self):\n return dict(\n [\n (key, value)\n for key, value in self.__dict__.items()\n if key not in [\"tseries\", \"dseries\"]\n ]\n )", "title": "" }, { "docid": "8f22259c630adccf7c6bacd9e72b1da9", "score": "0.45820498", "text": "def __init__(self, **dataset):\n for key, value in dataset.items():\n insertion_information = asarray(value)\n setattr(self, key, insertion_information)", "title": "" }, { "docid": "7e07e784ac45daf63f02fccfe53099d8", "score": "0.45713317", "text": "def events():\n is_empty = False\n\n con = connect_database()\n\n sql_query = \"SELECT * FROM prithvidb.disastersFinal ORDER BY date\"\n\n cursor = con.cursor()\n cursor.execute(sql_query)\n data = cursor.fetchall()\n if len(data) == 0:\n is_empty = True\n cursor.close()\n\n result = []\n\n if data is not None:\n app.logger.info(\"Success\")\n for row in data:\n row_data = dict()\n # app.logger.info(row)\n row_data['date'] = row[1]\n row_data['title'] = row[2]\n row_data['description'] = row[3]\n row_data['country'] = row[4]\n row_data['city'] = row[5]\n row_data['latitude'] = row[6]\n row_data['longitude'] = row[7]\n row_data['name'] = row[8]\n result.append(row_data)\n else:\n error = \"No user could be found\"\n return json.dumps(result)", "title": "" } ]
9567da02ab3490901b639507bf63db42
Returns a list of locales found in the "locales" property of the manifest. This will convert locales found in the SHORTER_LANGUAGES setting to their full locale. It will also remove locales not found in AMO_LANGUAGES.
[ { "docid": "b3e587f2da19952e54367eb789e2dd40", "score": "0.6775279", "text": "def get_supported_locales(manifest):\n return sorted(filter(None, map(find_language, set(\n manifest.get('locales', {}).keys()))))", "title": "" } ]
[ { "docid": "c7eeb0b6810a336325fe917618b5dcf9", "score": "0.74035686", "text": "def get_locales(app: Sphinx) -> List[str]:\n # Manually configured list of locales\n sitemap_locales: Optional[List[str]] = app.builder.config.sitemap_locales\n if sitemap_locales:\n # special value to add nothing -> use primary language only\n if sitemap_locales == [None]:\n return []\n\n # otherwise, add each locale\n return [locale for locale in sitemap_locales]\n\n # Or autodetect locales\n locales = []\n for locale_dir in app.builder.config.locale_dirs:\n locale_dir = os.path.join(app.confdir, locale_dir)\n if os.path.isdir(locale_dir):\n for locale in os.listdir(locale_dir):\n if os.path.isdir(os.path.join(locale_dir, locale)):\n locales.append(locale)\n return locales", "title": "" }, { "docid": "7c2f34c990e34f7b0561f72ee92e445d", "score": "0.60102385", "text": "def discover_resources():\n locale_discovery_paths = list(settings.TRANZ_LOCALE_PATHS)\n if settings.TRANZ_SEARCH_LOCALE_IN_APPS:\n locale_discovery_paths += [os.path.join(app.path, settings.TRANZ_DIR_NAME) for app in list(apps.app_configs.values())]\n \n APP_LANGUAGES = [l[0] for l in settings.TRANZ_LANGUAGES]\n\n resources = []\n for path in locale_discovery_paths:\n if not os.path.isdir(path):\n continue\n\n # Try to match direct children or discovery paths\n for file in os.listdir(path):\n if os.path.isfile(os.path.join(path, file)):\n try:\n domain, lang, format = file.split('.')\n except ValueError as e:\n continue\n resources.append((format, os.path.join(path, file), lang, domain))\n \n \n # Try to match django's LC_MESSAGES directories\n if settings.TRANZ_REPLACE_DJANGO_TRANSLATIONS:\n for lang in APP_LANGUAGES:\n if os.path.isdir(os.path.join(path, lang)):\n LC_MESSAGES_PATH = os.path.join(path, lang, 'LC_MESSAGES')\n if os.path.isdir(LC_MESSAGES_PATH):\n for file in os.listdir(LC_MESSAGES_PATH):\n try:\n domain, format = file.split('.')\n except ValueError as e:\n continue\n resources.append((format, os.path.join(LC_MESSAGES_PATH, file), lang, domain))\n return resources", "title": "" }, { "docid": "6ae3fb7a33fd6a008941607e1e77abe3", "score": "0.5900377", "text": "def submission_locales(self):\n return self._submission_locales", "title": "" }, { "docid": "ac68e878272a6ee481b4187ed32129b2", "score": "0.5781434", "text": "def get_all_locale():\n wb = openpyxl.load_workbook(__matrix_file_name__)\n sheet = wb[\"RYI\"]\n\n res = []\n for index in range(4, 35):\n locale = sheet[\"B{}\".format(index)].value.split()[0]\n res.append(locale)\n\n return res", "title": "" }, { "docid": "4abe104d5dd647ac97f4b89e7230fb1d", "score": "0.57366514", "text": "def parse_locales(paths):\n\n\tif not paths:\n\t\traise ValueError(\"Invalid argument: no paths were passed.\")\n\n\treturn set([parse_locale(path) for path in paths])", "title": "" }, { "docid": "19b9a8e3cb01b0ff5fc13c7ae5e99982", "score": "0.5657871", "text": "def interface_locales(self):\n return self._interface_locales", "title": "" }, { "docid": "02f4e2537c49240e2b03799638be48ea", "score": "0.5627225", "text": "def regularize_locales(locales):\n locales = [regularize_locale(loc) for loc in locales]\n locales_set = set(locales)\n for loc in locales:\n yield loc\n parts = loc.split('_')\n if len(parts) > 1 and parts[0] not in locales_set:\n # Insert \"fr\" after \"fr_fr\" if it's not somewhere in the list\n yield parts[0]\n alias = ALIASES.get(loc)\n if alias and alias not in locales_set:\n # Insert \"fr_fr\" after \"fr\" if it's not somewhere in the list\n yield alias\n if 'en' not in locales_set and 'en_us' not in locales_set:\n yield 'en'\n yield 'en_us'", "title": "" }, { "docid": "16d43ff401058c5f2e8ec48dcf083b83", "score": "0.5538066", "text": "def locales(ctx):\n # https://docs.djangoproject.com/en/dev/ref/django-admin/#django-admin-makemessages\n \"\"\"\n python manage.py makemessages -v 3 --no-wrap --ignore \".*\" --locale=pl_PL\n python manage.py compilemessages -v 3\n \"\"\"\n tmp = ROOT_DIR / \".tmp\"\n if not tmp.exists():\n os.makedirs(str(tmp))\n locale = ROOT_DIR / \"src\" / \"locale\"\n if not locale.exists():\n os.makedirs(str(locale))\n # http://babel.edgewall.org/wiki/BabelDjango\n pybabel = str(VENV_BIN / 'pybabel')\n ctx.run(pybabel + \" extract -F locale/babel.cfg -o locale/django.pot --no-wrap --sort-output .\")\n # create locales firs\n # pybabel init -D django -i locale/django.pot -d locale -l es\n # http://babel.edgewall.org/wiki/BabelDjango#CreatingandUpdatingTranslationsCatalogs\n # ctx.run(pybabel + \" update -D django -i locale/django.pot -d locale --ignore-obsolete\")\n ctx.run(pybabel + \" update -D django -i locale/django.pot -d locale --previous --no-wrap\")\n ctx.run(pybabel + \" compile -D django -d locale --statistics\")\n log.info(\"JavaScript locales\")\n # ctx.run(pybabel + \" update -D djangojs -i locale/djangojs.pot -d locale --previous --no-wrap\")\n ctx.run(\"django-admin makemessages -d djangojs -i static -i node_modules\")\n ctx.run(pybabel + \" compile -D djangojs -d locale --statistics\")", "title": "" }, { "docid": "8cf42f69eb6352d8aaf8d51d5d0eaaa2", "score": "0.5526054", "text": "def locale_resources(self, locale):\n resources = []\n project_files = self.get_or_set_project_files(locale.code)\n\n for resource in self.vcs_project.db_project.resources.all():\n absolute_resource_path = os.path.join(\n self.vcs_project.source_directory_path,\n resource.path,\n )\n\n if project_files.match(absolute_resource_path):\n resources.append(resource)\n\n return resources", "title": "" }, { "docid": "f881f0110cd5024ed4c3ca358857b5a8", "score": "0.5444359", "text": "def get_localizations(self):\n localizations = []\n for glat in self.haldane_lattices:\n localizations.append(glat.get_localization())\n\n return localizations", "title": "" }, { "docid": "0fc3416ca41e00704adea508153c7958", "score": "0.540855", "text": "def get_available_languages(domain):\r\n if domain in _AVAILABLE_LANGUAGES:\r\n return copy.copy(_AVAILABLE_LANGUAGES[domain])\r\n\r\n localedir = '%s_LOCALEDIR' % domain.upper()\r\n find = lambda x: gettext.find(domain,\r\n localedir=os.environ.get(localedir),\r\n languages=[x])\r\n\r\n # NOTE(mrodden): en_US should always be available (and first in case\r\n # order matters) since our in-line message strings are en_US\r\n language_list = ['en_US']\r\n # NOTE(luisg): Babel <1.0 used a function called list(), which was\r\n # renamed to locale_identifiers() in >=1.0, the requirements master list\r\n # requires >=0.9.6, uncapped, so defensively work with both. We can remove\r\n # this check when the master list updates to >=1.0, and update all projects\r\n list_identifiers = (getattr(localedata, 'list', None) or\r\n getattr(localedata, 'locale_identifiers'))\r\n locale_identifiers = list_identifiers()\r\n\r\n for i in locale_identifiers:\r\n if find(i) is not None:\r\n language_list.append(i)\r\n\r\n # NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported\r\n # locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they\r\n # are perfectly legitimate locales:\r\n # https://github.com/mitsuhiko/babel/issues/37\r\n # In Babel 1.3 they fixed the bug and they support these locales, but\r\n # they are still not explicitly \"listed\" by locale_identifiers().\r\n # That is why we add the locales here explicitly if necessary so that\r\n # they are listed as supported.\r\n aliases = {'zh': 'zh_CN',\r\n 'zh_Hant_HK': 'zh_HK',\r\n 'zh_Hant': 'zh_TW',\r\n 'fil': 'tl_PH'}\r\n for (locale, alias) in six.iteritems(aliases):\r\n if locale in language_list and alias not in language_list:\r\n language_list.append(alias)\r\n\r\n _AVAILABLE_LANGUAGES[domain] = language_list\r\n return copy.copy(language_list)", "title": "" }, { "docid": "28a92844e27af4433511216282afe0bf", "score": "0.53916764", "text": "def list_translations(dirname='locale'):\n if not os.path.isdir(dirname):\n return []\n\n result = []\n for folder in sorted(os.listdir(dirname)):\n if os.path.isdir(os.path.join(dirname, folder, 'LC_MESSAGES')):\n result.append(Locale.parse(folder))\n\n return result", "title": "" }, { "docid": "898393fb9181012607099ab8a0f798d0", "score": "0.53290695", "text": "def getlocalenames(self):\n if rootdir:\n o = gui.rootrun('chroot %s locale -a' % rootdir)[1]\n else:\n o = Popen(['locale', '-a'], stdout=PIPE).communicate()[0]\n ll = [ l for l in o.splitlines() if '.' in l ]\n return ['C', 'POSIX'] + ll", "title": "" }, { "docid": "0f2ee6c7bcc3323b6b02d92fedb8bf37", "score": "0.5278709", "text": "def list_languages():\n logger.info(\"retrieving list of supported languages\")\n return [lang.to_dict() for lang in installation.languages]", "title": "" }, { "docid": "411b711418162e4e8e423f01a8311886", "score": "0.5230266", "text": "def ordered_locales(request):\n\n accept_language = get_accept_language(request)\n langs = accept_language.split(\",\")\n pairs = map( lambda(lang): lang.split(\";\"), langs)\n\n for pair in pairs:\n if len(pair) < 2: pair.append(1)\n else: pair[1] = float(pair[1][2:])\n pairs.sort(key=lambda(x):x[1], reverse=True)\n\n return [lang for (lang,val) in pairs if val > 0.0]", "title": "" }, { "docid": "468535c7e7d5aafa90b517d27678c6b1", "score": "0.52168137", "text": "def langs(self) -> list[str]:\n return list(self._translations.keys())", "title": "" }, { "docid": "c4f5ffab6cb4c3fd0c4a1234cbdfa243", "score": "0.5147565", "text": "def _get_test_locales(config):\n return eval(config.getoption(\"nl\")) if is_multilocale_arkouda() else 1", "title": "" }, { "docid": "5b4af42c6dccda2ea5b54f33e7b80c03", "score": "0.5137169", "text": "def released_languages_list(self):\n if not self.released_languages.strip():\n return []\n\n languages = [lang.lower().strip() for lang in self.released_languages.split(',')]\n # Put in alphabetical order\n languages.sort()\n return languages", "title": "" }, { "docid": "99a090f6616d7d5a5f6d035f7e9a2d8a", "score": "0.51151115", "text": "def locale_path_locales(self, repo_checkout_path):\n locale_path_locales = {}\n\n for locale in self.db_project.locales.all():\n locale_directory = self.locale_directory_paths[locale.code]\n path = locale_directory[len(repo_checkout_path) :].lstrip(os.sep)\n path = os.path.join(path, \"\") # Ensure the path ends with os.sep\n locale_path_locales[path] = locale\n\n return locale_path_locales", "title": "" }, { "docid": "ee0b6d30bf35b8accab51f9b4bcc365b", "score": "0.50602275", "text": "def findTranslationResources():\n\n\ttranslations_dir = QtCore.QDir(':/')\n\tfile_names = translations_dir.entryList(['*.qm'],\n\t\tQtCore.QDir.Files, QtCore.QDir.Name)\n\n\ttranslator = QtCore.QTranslator()\n\n\ttranslation_files = []\n\tfor file_name in file_names:\n\t\tresource_path = translations_dir.filePath(file_name)\n\n\t\ttry:\n\t\t\tlocale = _parseTranslationFileName(str(file_name))\n\t\texcept:\n\t\t\t# theoretically, this should not happen, because these files \n\t\t\t# have already passed through findTranslationFiles()\n\t\t\twarning(\"Failed to parse translation file name: \" + file_name)\n\t\t\tcontinue\n\n\t\tif not translator.load(resource_path):\n\t\t\twarning(\"Failed to load translation file \" + resource_path)\n\t\t\tcontinue\n\n\t\tlocale_name, full_name = _getLocaleInfo(locale)\n\n\t\t# do not include default locale to list, we do not want to show it to user\n\t\t# it will be used only as a backup one\n\t\tif locale_name == _QT_UNKNOWN_LOCALE:\n\t\t\tcontinue\n\n\t\ttranslation_files.append((locale_name, full_name, resource_path))\n\n\treturn translation_files", "title": "" }, { "docid": "2797cdbc5afd29c56928def62bf73f2a", "score": "0.5051008", "text": "def get_languages(self):\n global GENGO_LANGUAGE_CACHE\n if not GENGO_LANGUAGE_CACHE:\n resp = self.gengo_api.getServiceLanguages()\n GENGO_LANGUAGE_CACHE = tuple(\n [item['lc'] for item in resp['response']])\n return GENGO_LANGUAGE_CACHE", "title": "" }, { "docid": "173dd799b4a1a88a3263f83ce54d1972", "score": "0.5037178", "text": "def get_languages(self):\n return self._data.get_data(const.LANGUAGES_COLLECTION)", "title": "" }, { "docid": "1e66e4297e1b4215f4a020b572013aa6", "score": "0.503531", "text": "def get_slang_abbreviations():\n with open(get_root_directory()+'/resources/twitter_slang_abbreviations.json') as data_file:\n return json.load(data_file)", "title": "" }, { "docid": "b36a03eba590e30d686fc6112bf4f9bb", "score": "0.50228155", "text": "def _get_languages(self):\r\n LOGGER.log()\r\n gtk_lang_mgr = gtksourceview2.language_manager_get_default()\r\n language_ids = gtk_lang_mgr.get_language_ids()\r\n language_names = []\r\n for language_id in language_ids:\r\n language = gtk_lang_mgr.get_language(language_id)\r\n language_names.append(language.get_name())\r\n language_names.sort(lambda a, b:\r\n cmp(a.lower(), b.lower()))\r\n return language_names", "title": "" }, { "docid": "83a10795cb06800ba71d8a4e424d41f3", "score": "0.5002563", "text": "def supported_languages(self) -> list[str]:", "title": "" }, { "docid": "64ba48589172e3de8af030f931846647", "score": "0.50003403", "text": "def getTranslatedThemeList(self, category=None):\n pre = \"theme_\"\n themes = [ (_(pre+t), t) for t in self.getThemes() ]\n \n return themes", "title": "" }, { "docid": "f5db54f7b92bee22e8e1f8f19eb3a102", "score": "0.49863845", "text": "def get_mapping_languages(self) -> List[str]:\n return sorted(lang for _, lang in self.cfg[\"mapping\"].items())", "title": "" }, { "docid": "83e8ebb1b115a295e3ae4f31974be6fa", "score": "0.49761966", "text": "def get_available_languages():\n _set_environment()\n proc = subprocess.Popen([TESSERACT_CMD, \"--list-langs\"],\n startupinfo=g_subprocess_startup_info,\n creationflags=g_creation_flags,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n langs = proc.stdout.read().decode('utf-8').splitlines(False)\n ret = proc.wait()\n if ret != 0:\n raise TesseractError(ret, \"unable to get languages\")\n\n return [lang for lang in langs if lang and lang[-1] != ':']", "title": "" }, { "docid": "48149a715a8e25c9e9beb8d0ee4b2480", "score": "0.49359855", "text": "def get_translation_list(obj, language_codes=[lang['code'] for lang in settings.PARLER_LANGUAGES[None]]):\n prefetched_translations = getattr(obj, 'translation_list', [])\n filtered_prefetched = [\n translation for translation in prefetched_translations if translation.language_code in language_codes\n ]\n return filtered_prefetched if prefetched_translations else obj.translations.filter(language_code__in=language_codes)", "title": "" }, { "docid": "79a7bda2cb146f6ecfacfb4bfffd9ce2", "score": "0.4918901", "text": "def ADDITIONAL_LANGUAGES(self) -> Iterable[str]:\n return self._setting(\n \"ADDITIONAL_LANGUAGES\", self.config[\"additional_languages\"]\n )", "title": "" }, { "docid": "5ea62ac62cbc7d56e16fd0db6e3da884", "score": "0.48982581", "text": "def languages(self) -> set[str]:\n langs = set()\n for cat in self.entry_page.categories:\n if (m := DASH_LANG_SEARCH(cat)) and (lang := LANG_ABBREV_MAP.get(m.group(1))):\n langs.add(lang)\n break\n return langs", "title": "" }, { "docid": "21f8b41900cf0ebcc66381afb60aa0f2", "score": "0.48617068", "text": "def langs(cls):\n return cls._langs", "title": "" }, { "docid": "921d037d8efa8843abe9d72cb279e08c", "score": "0.48374712", "text": "def languages(self):\n if not self._languages:\n # query API only the first time\n self._languages = self.get_languages()\n return self._languages", "title": "" }, { "docid": "b9f7e0677792e6ad99e5f24f32720b52", "score": "0.48260513", "text": "def monster_language_set(data):\n languages = set()\n for m in data:\n languages.update([x.strip() for x in m[\"Languages\"].split(\",\")])\n return languages", "title": "" }, { "docid": "7b775d5acb15d705dda67041b0fdb9e1", "score": "0.48153034", "text": "def get_available_languages():\n pass", "title": "" }, { "docid": "dcd43e1d1d67bfe446574aba56a9d96f", "score": "0.47988316", "text": "def supported_languages(self) -> list[str]:\n return [\"en\"]", "title": "" }, { "docid": "c33d85380325b9ba9fabc7b7e610e774", "score": "0.4771139", "text": "def realm_names(self):\n return sorted(self._realms)", "title": "" }, { "docid": "9bb3849a3dc981c05195b6b3ab968be3", "score": "0.4760957", "text": "def get_languages(self):\n languages = cache.get(self.PAGE_LANGUAGES_KEY % (self.id))\n if languages:\n return languages\n\n languages = [c['language'] for\n c in Content.objects.filter(page=self,\n type=\"slug\").values('language')]\n languages = list(set(languages)) # remove duplicates\n languages.sort()\n cache.set(self.PAGE_LANGUAGES_KEY % (self.id), languages)\n return languages", "title": "" }, { "docid": "e9b31ef160f8f215c66dad0978c45271", "score": "0.4718545", "text": "def get_languages(self):\r\n if self._languages:\r\n return self._languages\r\n self._languages = cache.get(self.PAGE_LANGUAGES_KEY % (self.id))\r\n if self._languages is not None:\r\n return self._languages\r\n\r\n languages = [c['language'] for\r\n c in Content.objects.filter(page=self,\r\n type=\"slug\").values('language')]\r\n # remove duplicates\r\n languages = list(set(languages))\r\n languages.sort()\r\n cache.set(self.PAGE_LANGUAGES_KEY % (self.id), languages)\r\n self._languages = languages\r\n return languages", "title": "" }, { "docid": "d15f3e4606fefb752190146427cdae71", "score": "0.46740437", "text": "def locale(self) -> str | None:\n settings = self.app.settings\n\n locale = settings.i18n.locale_negotiator(self.app.locales, self)\n\n return locale or self.app.default_locale", "title": "" }, { "docid": "258dad49294b10163e12099f0f59ee39", "score": "0.46649918", "text": "def get_language_list(client, db_name):\n dbo = client[db_name]\n distinct_lang = dbo.tweets.distinct(\"lang\")\n return unicode_to_utf(distinct_lang)", "title": "" }, { "docid": "275e0aedb414f63988ac2b10d563046c", "score": "0.46614388", "text": "def locale_codes (self) :\n codes = self.get_user_locale_codes ()\n if not codes :\n codes = self.__super.locale_codes\n return codes", "title": "" }, { "docid": "4fc084dd4a56df251818f427efb2c9da", "score": "0.46555066", "text": "def list_message_files(suffix=\".po\"):\n _files = glob(\"locale/*\" + suffix)\n _list = []\n for _file in _files:\n # basename (without extension) is a locale name\n _locale = os.path.splitext(os.path.basename(_file))[0]\n _list.append((_file, os.path.join(\n \"share\", \"locale\", _locale, \"LC_MESSAGES\", \"roundup.mo\")))\n return _list", "title": "" }, { "docid": "9a171b1fc9ebc6f1da52a13e6c4d5840", "score": "0.46511155", "text": "def test_empty_target_locale(self):\n for addon in Addon.objects.all():\n addon.target_locale = ''\n addon.save()\n response = self.client.get(self.url, follow=True)\n eq_(response.status_code, 200)\n eq_(response.context['locales'], [])", "title": "" }, { "docid": "f9ed332aa7e5c75dd037e2ac30a9aceb", "score": "0.46353132", "text": "def get_locale() :\n return app.config.get('LANGUAGE')", "title": "" }, { "docid": "4237276968868a362f8a66160ba823a8", "score": "0.46096516", "text": "def available_domains(self) -> list:\r\n\r\n response = requests.get(\"https://api4.temp-mail.org/request/domains/format/json\", headers={\"User-Agent\": \"okhttp/3.12.6\"})\r\n domains = json.loads(response.text)\r\n \r\n return list(domains)", "title": "" }, { "docid": "a30adba10f5259116802ed65996b125d", "score": "0.45978656", "text": "def list_resources() -> List[str]:\n return []", "title": "" }, { "docid": "014072c33a315b14854cfc2d5e083edb", "score": "0.45900485", "text": "def get_all_manga(self):\n results = []\n for name in self.manga.keys():\n manga = self.get_manga(name)\n if not manga or not manga[2]: continue\n results.append(manga)\n return results", "title": "" }, { "docid": "715fd5a61ea1af5aa35a43bd08b55c3c", "score": "0.4580036", "text": "def find_pos(lang, include_djangos = False, include_rosetta = False):\n \n paths = []\n \n # project/locale\n parts = settings.SETTINGS_MODULE.split('.')\n project = __import__(parts[0], {}, {}, [])\n paths.append(os.path.join(os.path.dirname(project.__file__), 'locale'))\n \n # django/locale\n if include_djangos:\n paths.append(os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale'))\n \n # settings \n for localepath in settings.LOCALE_PATHS:\n if os.path.isdir(localepath):\n paths.append(localepath)\n \n # project/app/locale\n for appname in settings.INSTALLED_APPS:\n \n if 'rosetta' == appname and include_rosetta == False:\n continue\n \n if rosetta_settings.EXCLUDED_APPLICATIONS and appname in rosetta_settings.EXCLUDED_APPLICATIONS:\n continue\n \n p = appname.rfind('.')\n if p >= 0:\n app = getattr(__import__(appname[:p], {}, {}, [appname[p+1:]]), appname[p+1:])\n else:\n app = __import__(appname, {}, {}, [])\n\n apppath = os.path.join(os.path.dirname(app.__file__), 'locale')\n\n if os.path.isdir(apppath):\n paths.append(apppath)\n \n ret = set()\n rx=re.compile(r'(\\w+)/../\\1')\n langs = (lang,)\n if u'-' in lang:\n _l,_c = map(lambda x:x.lower(),lang.split(u'-'))\n langs += (u'%s_%s' %(_l, _c), u'%s_%s' %(_l, _c.upper()), )\n elif u'_' in lang:\n _l,_c = map(lambda x:x.lower(),lang.split(u'_'))\n langs += (u'%s-%s' %(_l, _c), u'%s-%s' %(_l, _c.upper()), )\n \n for path in paths:\n for lang_ in langs:\n dirname = rx.sub(r'\\1', '%s/%s/LC_MESSAGES/' %(path,lang_))\n for fn in ('django.po','djangojs.po',):\n if os.path.isfile(dirname+fn):\n ret.add(os.path.abspath(dirname+fn))\n return list(ret)", "title": "" }, { "docid": "aee5d3e87db8c9ce0d11dc0a80d74a5b", "score": "0.45782062", "text": "def getSupportedLanguages():\n\tlangs = []\n\tfor fullName, (ivoId,descr) in SUPPORTED_LANGUAGES.iteritems():\n\t\ttry:\n\t\t\tname, version = fullName.split(\"-\", 1)\n\t\texcept ValueError: \n\t\t\t# fullName has no version info, there must be at least one entry\n\t\t\t# that includes a version, so skip this one.\n\t\t\tcontinue\n\t\tlangs.append((name, version, descr, ivoId))\n\treturn langs", "title": "" }, { "docid": "2c84139a392d0384d520ed6bdb77d2ce", "score": "0.4571465", "text": "def list_languages():\n load_languages()\n langs = []\n visited = set([])\n for lg in language_map:\n L = language_map[lg]\n if L.code:\n if not L.code in visited:\n langs.append(L)\n visited.add(L.code)\n if L.code_iso3:\n if not L.code_iso3 in visited:\n langs.append(L)\n visited.add(L.code_iso3)\n return langs", "title": "" }, { "docid": "284475dd42bbb2f3572125483dac599c", "score": "0.45637026", "text": "def months():\n x = None\n return [x for x in calendar.month_name]", "title": "" }, { "docid": "b48ec1dec1d5b8fa56a731658ea009ed", "score": "0.45559436", "text": "def interpreterStrings(self):\n if self.interpreterclass():\n return sorted(self.interpreterclass().getDefaultConfig()['translations'])\n return []", "title": "" }, { "docid": "b48ec1dec1d5b8fa56a731658ea009ed", "score": "0.45559436", "text": "def interpreterStrings(self):\n if self.interpreterclass():\n return sorted(self.interpreterclass().getDefaultConfig()['translations'])\n return []", "title": "" }, { "docid": "630b5e7cb7ac337565f6b767020670d5", "score": "0.454772", "text": "def obtener_monedas(self):\n\n tabla = self.derivado_generico.flujos_valorizados[[\"ID\",\"ActivoPasivo\", \"Fecha\", \"FechaFixing\", \"FechaFlujo\", \"FechaPago\", \"Flujo\", \"ValorPresenteMonFlujo\", \"Moneda\", \"MonedaBase\"]]\n monedas = tabla[\"Moneda\"]\n arreglo_monedas = []\n for i in range(len(monedas)):\n\n moneda = monedas[i]\n if moneda in arreglo_monedas: continue\n arreglo_monedas.append(moneda)\n\n return arreglo_monedas", "title": "" }, { "docid": "b5d40bb035046db69162da52a158bc73", "score": "0.4544738", "text": "def all():\n return PLUGINS.values()", "title": "" }, { "docid": "f718746fe6b1180c5dff1a37b5fa3223", "score": "0.45395854", "text": "def locale_filter(project):\n # a silly example: display only Locales whose code starts with the \n # second letter of the project label\n return Locale.objects.filter(code__istartswith=project.label[1])", "title": "" }, { "docid": "eb9a595d86e9c466c1dc3b29f2d8bb7e", "score": "0.45247325", "text": "def month_all_foo():\n all_month = [\n 'Январь', 'Февраль', 'Март', 'Апрель',\n 'Май', 'Июнь', 'Июль', 'Август',\n 'Сентябрь', 'Октябрь', 'Ноябрь', 'Декабрь'\n ]\n return all_month", "title": "" }, { "docid": "8670cbc17514795a0fdee600640b89a3", "score": "0.45229542", "text": "def listall(self):\n if self._cache_allmedia is None:\n self._cache_allmedia = []\n for collection in self.collections:\n self._cache_allmedia.extend(collection.listall())\n # else returns the cache value\n return self._cache_allmedia", "title": "" }, { "docid": "bec70a3efa2c83d3fde7c49ddd649bd2", "score": "0.4517889", "text": "def supportedLanguages(self):\n return [\"de\", \"en\", \"es\", \"fr\", \"it\", \"ja\", \"pt\", \"ru\", ]", "title": "" }, { "docid": "b1409b783cd214f06c75c03470391d6d", "score": "0.45082396", "text": "def all_manifests(request):\r\n \r\n return _phas(request, smart_only=True)", "title": "" }, { "docid": "88916e9fd30042f6bfb235a75ca5bb0b", "score": "0.4497977", "text": "def beta_languages_list(self):\n if not self.beta_languages.strip():\n return []\n\n languages = [lang.lower().strip() for lang in self.beta_languages.split(',')]\n # Put in alphabetical order\n languages.sort()\n return languages", "title": "" }, { "docid": "8d72a980d7df2fdcd1e8c9690a1436bd", "score": "0.44638142", "text": "def get_locale():\n return request.accept_languages.best_match(current_app.config['LANGUAGES'])", "title": "" }, { "docid": "8a2d77aeb6d9ad2dfe6d108dd5af6b03", "score": "0.44614086", "text": "def registered_manga(self):\n return sorted(self.manga.keys())", "title": "" }, { "docid": "8ce53372d1b14428acc8a6d066d336e9", "score": "0.44600862", "text": "def build_bot_locales(\n self,\n bot_id: str,\n bot_locale_ids: List[str],\n bot_version: str = DRAFT_VERSION,\n max_concurrent_builds: int = 5,\n ) -> None:\n self._bot_manager.build_bot_locales(\n bot_id=bot_id,\n bot_locale_ids=bot_locale_ids,\n bot_version=bot_version,\n max_concurrent_builds=max_concurrent_builds,\n )", "title": "" }, { "docid": "227241ae37f80892f0501e38cfeb61f2", "score": "0.44542634", "text": "def test_missing_locale(self):\n wa = dict(self.locales)['wa']\n eq_(wa.display, 'Walloon Language Pack (wa)')\n eq_(wa.native, '')", "title": "" }, { "docid": "e7792195b156a1cf3353245b7d5c3c8c", "score": "0.4453551", "text": "def list_languages():\n translate_client = translate.Client()\n\n results = translate_client.get_languages()\n\n for language in results:\n print(u'{name} ({language})'.format(**language))", "title": "" }, { "docid": "3f1a8b3911940aaf1231d135640965a5", "score": "0.44452986", "text": "def changed_locales_files(self):\n files = {}\n\n # VCS changes\n repos = self.db_project.translation_repositories()\n if self.repo_locales:\n repos = repos.filter(pk__in=self.repo_locales.keys())\n\n for repo in repos:\n if repo.multi_locale:\n locales = (\n self.repo_locales[repo.pk]\n if self.repo_locales\n else self.db_project.locales.all()\n )\n for locale in locales:\n changed_files = get_changed_files(\n repo.type,\n repo.locale_checkout_path(locale),\n repo.get_last_synced_revisions(locale.code),\n )[0]\n\n for path in changed_files:\n files.setdefault(path, []).append(locale)\n else:\n changed_files = get_changed_files(\n repo.type, repo.checkout_path, repo.get_last_synced_revisions()\n )[0]\n\n log.info(\n \"Changed files in {} repository, all: {}\".format(\n self.db_project, changed_files\n )\n )\n\n # Include only relevant (localizable) files\n if self.configuration:\n files = self.get_relevant_files_with_config(changed_files)\n else:\n files = self.get_relevant_files_without_config(\n changed_files, self.locale_path_locales(repo.checkout_path)\n )\n\n log.info(\n \"Changed files in {} repository, relevant for enabled locales: {}\".format(\n self.db_project, files\n )\n )\n\n # DB changes\n vcs = files\n db = self.db_project.changed_resources(self.now)\n for path in set(list(vcs.keys()) + list(db.keys())):\n if path in vcs and path in db:\n vcs[path] = set(list(vcs[path]) + list(db[path]))\n\n else:\n vcs[path] = vcs[path] if path in vcs else db[path]\n\n return files", "title": "" }, { "docid": "55dde4d91abaa326f50554d2d61f1edb", "score": "0.44370285", "text": "def get_all_org_units_from_mo(self) -> list:\n logger.info(\"Fetching all org units from OS2mo\")\n query = gql(\n \"\"\"\n query OrgUnitQuery {\n org_units {\n objects {\n current {\n uuid\n org_unit_level {\n name\n }\n name\n }\n }\n }\n }\n \"\"\"\n )\n\n r = self.gql_client.execute(query)\n\n units = [o[\"current\"] for o in r[\"org_units\"][\"objects\"]]\n # filter by org_unit_level if configured in settings\n if org_unit_levels := self.settings.integrations_kle_xlsx_org_unit_levels:\n units = [\n o for o in units if o[\"org_unit_level\"].get(\"name\") in org_unit_levels\n ]\n logger.info(\"Found {} units\".format(len(units)))\n return units", "title": "" }, { "docid": "20d32e4f201db992d607638c15dddf62", "score": "0.44329232", "text": "def test_getWindowsLocale(self):\n self.assertEqual(l10n.getWindowsLocale(\"en_EN.UTF-8\"), \"english\")", "title": "" }, { "docid": "6ae28ee980f31cef586387ea8a8b747d", "score": "0.4432367", "text": "def build_loaders_for_all_locales():\n for locale in supported_locales:\n with open(\"m32cmploader_{}.js\".format(locale), \"w+\") as f:\n f.write(preppy.getModule(\"cmp_loader.prep\").get(locale))", "title": "" }, { "docid": "82f4ec748d7760c4158e2663d8e7bd36", "score": "0.4426602", "text": "def get_locale_msgs(nestData):\n msgs = None\n locale = nestData.get_locale()\n print \"locale = \", locale\n try:\n if locale:\n with open('config_' + locale + '.json') as jsonfile:\n msgs = json.loads(jsonfile.read())\n except Exception as ex:\n print \"Error trying to open configuration file for locale: \", locale, ex\n\n if not msgs:\n try:\n with open('config.json') as jsonfile:\n msgs = json.loads(jsonfile.read())\n except Exception as ex1:\n print \"Error trying to open config.json: \", ex1\n\n if not msgs:\n msgs = {}\n if \"labels\" not in msgs:\n msgs[\"labels\"] = {}\n if \"fa-icons\" not in msgs:\n msgs[\"fa-icons\"] = {}\n return msgs", "title": "" }, { "docid": "c1bb6b090af2174da74c85d0f79882a4", "score": "0.44257262", "text": "def _list(self):\n conn = self._connect()\n try:\n names = conn.listDefinedDomains()\n except libvirt.libvirtError:\n raise CuckooMachineError(\"Cannot list domains\")\n finally:\n self._disconnect(conn)\n return names", "title": "" }, { "docid": "27f4fdd267966d2b68eb7cf0d85609fc", "score": "0.44242364", "text": "def supportedLanguages(self):\n return [\"ar\", \"be\", \"bg\", \"bs\", \"ca\", \"cs\", \"da\", \"de\", \"el\", \"en\",\n \"es\", \"et\", \"fi\", \"fr\", \"ga\", \"gl\", \"hi\", \"hr\", \"hu\", \"id\",\n \"is\", \"it\", \"iw\", \"ja\", \"ka\", \"ko\", \"lt\", \"lv\", \"mk\", \"mt\",\n \"nl\", \"no\", \"pl\", \"pt\", \"ro\", \"ru\", \"sk\", \"sl\", \"sq\", \"sr\",\n \"sv\", \"th\", \"tl\", \"tr\", \"uk\", \"vi\", \"zh-CN\", \"zh-TW\",\n ]", "title": "" }, { "docid": "20bef12eba86253350e228449b6c41c9", "score": "0.44210565", "text": "def get_theme_list(self):\n themes = [theme.replace('.scienv_theme', '') for theme in os.listdir(\n self._config['General']['themes_path'])]\n return themes", "title": "" }, { "docid": "9c081877e61833fb709543c211001579", "score": "0.44178656", "text": "def _get_domain_list(self):\n return reduce(operator.add, [device.get_configured_domains() for device in self.devices])", "title": "" }, { "docid": "d7c412e499b4bd0c3656800cdb033b39", "score": "0.4417432", "text": "def pick_locale(request):\n\n available_parm = request.get(\"available\")\n if not available_parm:\n available_parm = \"none\"\n available = available_parm.split(\",\")\n preferred = ordered_locales(request)\n\n best = pick(available, preferred)\n return best", "title": "" }, { "docid": "06cded6642373374f63402f59e68df4e", "score": "0.44165623", "text": "def fetch_languages(self):\n\n cds_url = TRANSIFEX_CDS_URLS['FETCH_AVAILABLE_LANGUAGES']\n languages = []\n\n try:\n last_response_status = 202\n while last_response_status == 202:\n response = requests.get(\n self.host + cds_url,\n headers=self._get_headers(),\n )\n last_response_status = response.status_code\n\n if not response.ok:\n logger.error(\n 'Error retrieving languages from CDS: `{}`'.format(\n response.reason\n )\n )\n response.raise_for_status()\n\n json_content = response.json()\n languages = json_content['data']\n\n except (KeyError, ValueError):\n # Compatibility with python2.7 where `JSONDecodeError` doesn't\n # exist\n logger.error(\n 'Error retrieving languages from CDS: Malformed response')\n except requests.ConnectionError:\n logger.error(\n 'Error retrieving languages from CDS: ConnectionError')\n except Exception as e:\n logger.error('Error retrieving languages from CDS: UnknownError '\n '(`{}`)'.format(str(e)))\n\n return languages", "title": "" }, { "docid": "58ef9194e6846d80e6f37a024d7ea01b", "score": "0.4413733", "text": "def locale(self):\n return self._locale", "title": "" }, { "docid": "99b1142e2dce1afc7eefead33742f322", "score": "0.44072366", "text": "def langs(self, cache=True):\n try:\n if not self.cache['languages'] and cache:\n # data = urlencode({'key': self.api_key, 'ui' : self.ui})\n data = urlencode({'key': self.api_key, 'ui': self.ui})\n result = urlopen(self.api_urls['langs'] % data).read()\n # self.cache['languages'] = loads(result.decode(\"utf-8\"))['dirs']\n self.cache['languages'] = loads(result.decode(\"utf-8\"))\n except IOError:\n raise YandexTranslateException(self.error_codes[503])\n except ValueError:\n raise YandexTranslateException(result)\n return self.cache['languages']", "title": "" }, { "docid": "7fa37a62c70699e7f309bc496f2b160e", "score": "0.43963456", "text": "def test_null_target_locale(self):\n for addon in Addon.objects.all():\n addon.target_locale = None\n addon.save()\n response = self.client.get(self.url, follow=True)\n eq_(response.status_code, 200)\n eq_(response.context['locales'], [])", "title": "" }, { "docid": "5b083dda265a01d5761412b7e6077ed2", "score": "0.4396338", "text": "def get_languages():\r\n\r\n url = \"http://accent.gmu.edu/browse_language.php\"\r\n html = get(url)\r\n soup = BeautifulSoup(html.content, 'html.parser')\r\n languages = []\r\n language_lists = soup.findAll('ul', attrs={'class': 'languagelist'})\r\n for ul in language_lists:\r\n for li in ul.findAll('li'):\r\n languages.append(li.text)\r\n return languages", "title": "" }, { "docid": "3cbc6edcb3d08a196e270f8f711393cc", "score": "0.43886054", "text": "def artist_name_translation_list(self):\n return self._artist_name_translation_list", "title": "" }, { "docid": "28b107d02928592138b142de500ecf18", "score": "0.43792656", "text": "def test_sorting(self):\n displays = [locale.display for lang, locale in self.locales]\n eq_(displays, sorted(displays))", "title": "" }, { "docid": "c3d4cce6c8c08444d54aeb700ec13540", "score": "0.4378617", "text": "def topodic_supported_languages():\n return settings.LANGUAGE_DICT", "title": "" }, { "docid": "08305dfa8b57985c1884bc191c321884", "score": "0.43781117", "text": "def languages(self):\n if self.tool is None:\n return []\n\n bound = self.tool.getLanguageBindings()\n current = bound[0]\n\n def merge(lang, info):\n info[\"code\"] = lang\n if lang == current:\n info['selected'] = True\n else:\n info['selected'] = False\n return info\n\n languages = [merge(lang, info) for (lang, info) in\n self.tool.getAvailableLanguageInformation().items()\n if info[\"selected\"]]\n\n # sort supported languages by index in portal_languages tool\n supported_langs = self.tool.getSupportedLanguages()\n\n def index(info):\n try:\n return supported_langs.index(info[\"code\"])\n except ValueError:\n return len(supported_langs)\n\n return sorted(languages, key=index)", "title": "" }, { "docid": "3bc84a436b4e7d8b9be6239e61f8a822", "score": "0.4377666", "text": "def idndl_DownlevelGetLocaleScripts(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"lpLocaleName\", \"lpScripts\", \"cchScripts\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "title": "" }, { "docid": "1968d94ba370fa7ff834ab42f4d470bf", "score": "0.43686083", "text": "def get_supported_language_names(self) -> typing.Iterable[str]:\n return [LanguageClassLoader.to_language_name(s) for s in self._ln_loader.config.sections()]", "title": "" }, { "docid": "a3f292799849960bf344fcc794461d47", "score": "0.4364841", "text": "def extended_managers(self):\n return self.properties.get('ExtendedManagers', StringCollection())", "title": "" }, { "docid": "f176e33f7e46ae0ecc75cfdf55dadbfa", "score": "0.43625548", "text": "def getTranslatedSoundStringList(self, category=None):\n sound_list = \", \".join( [ _(\"lang_\" + s) for s in self.getSounds()]) \n\n return sound_list", "title": "" }, { "docid": "ca580efa90b985f4586af91464af56ec", "score": "0.43622023", "text": "def getAvailableReportersVocab(self):\n members = self.getReporters()\n return DisplayList([(m.UID(), m.Title()) for m in members])", "title": "" }, { "docid": "f677170e9a668d68143f2f3daf85758f", "score": "0.43580335", "text": "def get_official_abbreviations():\n with open(get_root_directory()+'/resources/official_abbreviations.json') as data_file:\n return json.load(data_file)", "title": "" }, { "docid": "ad679bd993a1a86a4109f6fc2f2706b7", "score": "0.43578392", "text": "def get_providers() -> List[CalendarProvider]:\r\n return [x.value for x in CalendarProviders]", "title": "" }, { "docid": "e6ac1b8337ed73f1b0a4a45a1d3c30f0", "score": "0.4356482", "text": "def get_supported_languages(translate_client, verbose=False):\n results = translate_client.get_languages()\n if verbose:\n for language in results:\n print(u\"{name} ({language})\".format(**language))\n return [r[\"language\"] for r in results]", "title": "" }, { "docid": "c73e90a4ea69c2d62e64d9062419c15f", "score": "0.43560612", "text": "def ls_arrs(self):\r\n return self.conf.ls_arrs(self.dir)", "title": "" }, { "docid": "08dfd97d9e976fbb97a5c5f951701a77", "score": "0.43514547", "text": "def supported_languages(self):\n return SUPPORTED_LANGUAGES", "title": "" }, { "docid": "fb0f98a673fe12f3fdfa1f850c7e1075", "score": "0.43497854", "text": "def get_language_choices():\r\n DEFAULT_LANGUAGES = (\r\n (\"bash\", \"Bash/Shell\"),\r\n (\"css\", \"CSS\"),\r\n (\"diff\", \"diff\"),\r\n (\"html\", \"HTML\"),\r\n (\"javascript\", \"Javascript\"),\r\n (\"json\", \"JSON\"),\r\n (\"python\", \"Python\"),\r\n (\"scss\", \"SCSS\"),\r\n (\"yaml\", \"YAML\"),\r\n )\r\n\r\n return getattr(settings, \"WAGTAIL_CODE_BLOCK_LANGUAGES\", DEFAULT_LANGUAGES)", "title": "" }, { "docid": "e2b8290a300dc51fa45eb3bfe0af973d", "score": "0.43403623", "text": "def get_all_domains(self):\n return self.domains.values()", "title": "" }, { "docid": "1b04a5e8d61d33c9a2453c5feab8143a", "score": "0.43337968", "text": "def locale_directory_paths(self):\n locale_directory_paths = {}\n parent_directories = set()\n\n for locale in self.locales:\n try:\n if self.configuration:\n locale_directory_paths[locale.code] = self.configuration.l10n_base\n else:\n locale_directory_paths[locale.code] = locale_directory_path(\n self.checkout_path,\n locale.code,\n parent_directories,\n )\n parent_directory = get_parent_directory(\n locale_directory_paths[locale.code]\n )\n\n except OSError:\n if not self.db_project.has_multi_locale_repositories:\n source_directory = self.source_directory_path\n parent_directory = get_parent_directory(source_directory)\n\n locale_code = locale.code\n if uses_undercore_as_separator(parent_directory):\n locale_code = locale_code.replace(\"-\", \"_\")\n\n locale_directory = os.path.join(parent_directory, locale_code)\n\n # For asymmetric formats, create empty folder\n if is_asymmetric_resource(next(self.relative_resource_paths())):\n os.makedirs(locale_directory)\n\n # For other formats, copy resources from source directory\n else:\n shutil.copytree(source_directory, locale_directory)\n\n for root, dirnames, filenames in os.walk(locale_directory):\n for filename in filenames:\n path = os.path.join(root, filename)\n if is_resource(filename):\n os.rename(path, source_to_locale_path(path))\n else:\n os.remove(path)\n\n locale_directory_paths[locale.code] = locale_directory\n\n else:\n raise MissingLocaleDirectoryError(\n f\"Directory for locale `{locale.code}` not found\"\n )\n\n parent_directories.add(parent_directory)\n\n return locale_directory_paths", "title": "" }, { "docid": "fe3ca67aaeadd7b2c5e7eae397c744b1", "score": "0.4326779", "text": "def clashing_months(self) -> typing.Optional[typing.List[date]]:\n if (\n not self.preflight\n or \"months\" not in self.preflight\n or \"format_version\" not in self.preflight\n or self.preflight[\"format_version\"] != self.PREFLIGHT_FORMAT_VERSION\n ):\n return None\n\n # get actual orgnizations\n organizations_with_names = self.organizations_from_data()\n if any(e[0] is None for e in organizations_with_names):\n # Unable to resolve organization from data => can determine whether\n # there are clashing data present\n return None\n organizations = [e[1] for e in organizations_with_names] or [self.organization]\n\n # preflight was performed\n return sorted(\n {\n e.date\n for e in ImportBatch.objects.filter(\n report_type=self.report_type,\n platform=self.platform,\n organization__in=organizations,\n date__in=[e for e in self.preflight[\"months\"]],\n )\n }\n )", "title": "" } ]
0514c3e3f00ed75fc520a4e64741f14f
Renders a template from the given template source string with the given context. Template variables will be autoescaped.
[ { "docid": "a770a3bef0b5686c817732c498e625f6", "score": "0.8366585", "text": "def render_template_string(source, **context):\n ctx = _app_ctx_stack.top\n ctx.app.update_template_context(context)\n return _render(ctx.app.jinja_env.from_string(source), context, ctx.app)", "title": "" } ]
[ { "docid": "2591aad30c9c871ce95371d363072d1e", "score": "0.8500779", "text": "def render_template_string(source, **context):\n ctx = stack.top\n lookup = _lookup(ctx.app)\n template = Template(source, lookup=_lookup(ctx.app), **lookup.template_args)\n return _render(template, context, ctx.app)", "title": "" }, { "docid": "a015558512ae75e88f118dbe614b433e", "score": "0.774367", "text": "def render(self, template_string, context={}):\n try:\n t = template.Template(template_string)\n c = template.Context(context)\n return t.render(c)\n except Exception, e:\n return e", "title": "" }, { "docid": "0eafb67497567658857cbbca433a13a1", "score": "0.74834913", "text": "def render(text, context):\n return Template(text).render(context)", "title": "" }, { "docid": "0f37767455aa3ef260766b47bba8bd4e", "score": "0.7334508", "text": "def render(self, template_str: str, context: (dict, None)) -> str:\n if context is None:\n context = self.context\n t = self._env.from_string(template_str)\n return t.render(context)", "title": "" }, { "docid": "73a668ba9ee559f7619a6286f9e727e3", "score": "0.7262923", "text": "def render_template_to_string(self, template, context):\r\n return self.lookup.from_string(template).render(**context)", "title": "" }, { "docid": "9bd6406d3f4fbc0d702248d40f4c7aab", "score": "0.7225489", "text": "def render_template(process, template_string, context):\n from resolwe.flow.managers import manager\n\n # Get the appropriate expression engine. If none is defined, do not evaluate\n # any expressions.\n expression_engine = process.requirements.get(\"expression-engine\", None)\n if not expression_engine:\n return template_string\n\n return manager.get_expression_engine(expression_engine).evaluate_block(\n template_string, context\n )", "title": "" }, { "docid": "347e04caad1dc357e14b8a02dd6732bd", "score": "0.722019", "text": "def render_template(self, template_string):\n original_autoescape = self.context.autoescape\n self.context.autoescape = False\n\n template = Template(\"\")\n template_debug = getattr(\n settings, \"TEMPLATE_DEBUG\", template.engine.debug if hasattr(template, \"engine\") else False\n )\n if template_debug is True:\n origin = Origin(template_string)\n else:\n origin = None\n\n template.nodelist = self.compile_string(template_string, origin, template_debug)\n\n rendered = template.render(self.context)\n self.context.autoescape = original_autoescape\n return rendered", "title": "" }, { "docid": "8b9f4cc476e5b4b0aab14a45b3361a3a", "score": "0.71682847", "text": "def render_template(self, _filename, **context):\r\n template = self.environment.get_template(_filename)\r\n return template.render_unicode(**context)", "title": "" }, { "docid": "33a96759c3797eea318e2dc866eaf9cb", "score": "0.71345305", "text": "def render_from_string(template: str, ctx: Context) -> str:\n template = Template(template)\n return template.render(ctx)", "title": "" }, { "docid": "6f912dd17f5ab42b0e0e77e85d926a0a", "score": "0.7085872", "text": "def _render_string_from_template(\n source: str, template: _BaseInstallationTemplate\n) -> str:\n # TODO: we could use a while loop or recursive function to render the template until\n # there are no jinja-specific things. At this point, we support one level of\n # nesting.\n n_renders = 0\n max_renders = 20\n\n err = (\n \"A template included in this renderer raised an error. Please check the\"\n \" template definition. A required argument might not be included in the\"\n \" required arguments part of the template. Variables in the template should\"\n \" start with `self.`.\"\n )\n\n # Render the string again. This is sometimes necessary because some defaults in the\n # template are rendered as {{ self.X }}. These defaults need to be rendered again.\n\n while (\n _jinja_env.variable_start_string in source\n and _jinja_env.variable_end_string in source\n ):\n source = source.replace(\"self.\", \"template.\")\n tmpl = _jinja_env.from_string(source)\n try:\n source = tmpl.render(template=template)\n except jinja2.exceptions.UndefinedError as e:\n raise RendererError(err) from e\n n_renders += 1\n\n if n_renders > max_renders:\n raise RendererError(\n f\"reached maximum rendering iterations ({max_renders}). Templates\"\n f\" should not nest variables more than {max_renders} times.\"\n )\n return source", "title": "" }, { "docid": "648c29585ddd6bb2292ce23c335eb606", "score": "0.70501155", "text": "def render_template(self, _filename, **context):\r\n return self.environment.get_template(_filename).render(**context)", "title": "" }, { "docid": "5225976f5bf500b0ff313cb44cfe5977", "score": "0.70279455", "text": "def render_template_to_string(self, template, context):\r\n\r\n context = context.update(self.filters)\r\n\r\n return Template(template).render(**context)", "title": "" }, { "docid": "89c8169fbe0f615c5b3582dd4f0ba4de", "score": "0.69808406", "text": "def render_template(template, **kwargs):\n if template is None or not isinstance(template, basestring):\n return template\n tmpl = _get_environment().from_string(template)\n ctx = dict(kwargs)\n if global_context is not None:\n for k, v in global_context.iteritems():\n if not k in ctx:\n ctx[k] = v\n # expose the global context\n ctx['_ctx'] = global_context\n if 'self' in ctx:\n del ctx['self']\n try:\n return tmpl.render(**ctx)\n except TemplateSyntaxError as err:\n raise RenderError(template, err.message, line=err.lineno)", "title": "" }, { "docid": "90c7fd7858f045df59d08cba92720317", "score": "0.69634664", "text": "def render(template, context, template_dir=\"templates\"):\n j2env = Environment(loader=FileSystemLoader(template_dir))\n\n tpl = j2env.get_template(template)\n\n return tpl.render(context)", "title": "" }, { "docid": "5e5e87fc7c77e90c3c3d443ac311ecf8", "score": "0.68901604", "text": "def render_to_string(template_name, context):\n f = open(settings.PLSQL_TEMPLATE_DIR + '/' + template_name)\n template_string = f.read()\n f.close()\n\n t = Template(template_string, template_string)\n return t.render(context)", "title": "" }, { "docid": "abd6a0c5fefa51b56e45aefa549df2bd", "score": "0.68708706", "text": "def render_template(template, **context):\n if ':' not in template:\n template = '%s:%s' % (request.package, template)\n return jinja_env.get_template(template).render(context)", "title": "" }, { "docid": "0b71038c9e116f603838a6ebc2dc3d4e", "score": "0.6852936", "text": "def render_template(template_name, context={}):\n \n path = os.path.join(os.path.dirname(__file__), 'templates', template_name)\n return template.render(path, context)", "title": "" }, { "docid": "2bd970858079f8dee89e5a2075531f7b", "score": "0.68046623", "text": "def render(charm_dir, source, context):\n templates_dir = os.path.join(charm_dir, 'templates')\n template_env = Environment(loader=FileSystemLoader(templates_dir))\n\n try:\n template = template_env.get_template(source)\n except exceptions.TemplateNotFound as e:\n logger.error('Could not load template %s from %s.' %\n (source, templates_dir))\n raise e\n return template.render(context)", "title": "" }, { "docid": "7b8d23c2bd57c6c45704ce6be9e54e8b", "score": "0.68041766", "text": "def render_template(template_name, **context):\n ctx = stack.top\n return _render(_lookup(ctx.app).get_template(template_name),\n context, ctx.app)", "title": "" }, { "docid": "5c58b96dbf471fff73e438d854519db4", "score": "0.6776268", "text": "def _render(template, context, app):\n app.update_template_context(context)\n try:\n rv = template.render_unicode(**context)\n template_rendered.send(app, template=template, context=context)\n return rv\n except:\n translated = TemplateError(template)\n raise translated", "title": "" }, { "docid": "3f4b9513e740f73db1c03d979526a005", "score": "0.67737067", "text": "def render_template(self, template_path, context={}):\n template_str = self.load_resource(template_path)\n return Template(template_str).render(Context(context))", "title": "" }, { "docid": "edbc06a93a201c5dfc476c03adbb3400", "score": "0.67288864", "text": "def render_template(template, context):\n for key, value in context.iteritems():\n template = template.replace('{{ %s }}' % (key,), value)\n return template", "title": "" }, { "docid": "1c81d9467257d4ac56d374411817eced", "score": "0.6688212", "text": "def render_template(self, template_name, output_name, context):\r\n context['striphtml'] = striphtml\r\n template = self.lookup.get_template(template_name)\r\n data = template.render_unicode(**context)\r\n if output_name is not None:\r\n makedirs(os.path.dirname(output_name))\r\n with open(output_name, 'w+') as output:\r\n output.write(data)\r\n return data", "title": "" }, { "docid": "34bcc21ccca1219e7ca38523596bf115", "score": "0.6605197", "text": "def render_template(self, template_name, output_name, context):\r\n raise NotImplementedError()", "title": "" }, { "docid": "d41f469f1f76fbbe9b3b983200990041", "score": "0.6602932", "text": "def render(request, template, context):\n ctx = RequestContext(request, context)\n return render_to_response(template, ctx)", "title": "" }, { "docid": "0fc772dce5f52691fa6e1668d92e610a", "score": "0.6592469", "text": "def render(template, **context):\n return render_response(template, context)", "title": "" }, { "docid": "9ab7aa16c0d690df68425548f45a7ef1", "score": "0.6583847", "text": "def render_str(self, *template, **kw):\n tmp = JINJA_ENV.get_template(*template)\n return tmp.render(**kw)", "title": "" }, { "docid": "fb80040988830a32af5ba5f22e9493aa", "score": "0.65768784", "text": "def render_template(template_file, **context):\n t_env = get_template_env('templates')\n template = t_env.get_template(template_file)\n body = template.render(**context or {})\n return body", "title": "" }, { "docid": "6104a01a8f2545752b6e05836470eb9f", "score": "0.65703696", "text": "def _interpolate_str(\n data: str, context: Dict[str, Any], strict: bool = True\n) -> RenderedString:\n # Render the given string as a Django template with the given context.\n template = Template(data)\n template_context = Context(context, autoescape=False)\n rendered_string = RenderedString(template.render(template_context))\n\n # Extract a dict of variables used to render the string.\n rendered_context = {\n var: template_context.get(var, NOT_PROVIDED)\n for var in frozenset(\n v.filter_expression.var.lookups[0]\n for v in template.nodelist\n if isinstance(v, VariableNode)\n )\n }\n\n # Attach the render context to the string.\n rendered_string.__context__ = rendered_context\n\n missing_variables = frozenset(\n k for k, v in rendered_context.items() if v is NOT_PROVIDED\n )\n if strict and missing_variables:\n raise LookupError(\n f\"The template references variables that were not in the context \"\n f'provided: {\", \".join(missing_variables)}'\n )\n\n return rendered_string", "title": "" }, { "docid": "935008cf3c13f96af87356adb922d37a", "score": "0.6564538", "text": "def render_template(out, name, context, templates_dir, prefix=None):\n\n # support \"::\" syntax\n pp = [tenjin.PrefixedLinePreprocessor(prefix=prefix)\n if prefix else tenjin.PrefixedLinePreprocessor()]\n # disable HTML escaping\n template_globals = {\"to_str\": str, \"escape\": str}\n if templates_dir:\n engine = TemplateEngine(path=[templates_dir], pp=pp, cache=False)\n else:\n engine = TemplateEngine(pp=pp, cache=False)\n out.write(engine.render(name, context, template_globals))\n if 'KD_DICT' in context:\n return context['KD_DICT']\n if 'P4TBL_TYPES' in context:\n return context['P4TBL_TYPES']\n else:\n return None", "title": "" }, { "docid": "c6c91d4b2c0a27b962911b9852294b95", "score": "0.6560027", "text": "def render_to_string(template_name, context=None, request=None,\r\n processors=None):\r\n context = dict(context or {})\r\n if request is not None:\r\n context['request'] = request\r\n for processor in chain(get_standard_processors(), processors or ()):\r\n context.update(processor(request))\r\n return get_template(template_name).render(context)", "title": "" }, { "docid": "51c6b29d48abd64ebf5febd4799c058f", "score": "0.6551095", "text": "def render_template_to_string(self, template, context):\r\n raise NotImplementedError()", "title": "" }, { "docid": "f08ec913ff12add5f8152ff7c9b8b7f9", "score": "0.6512847", "text": "def render_template(self, template_file, **context) -> str:\n if not self.template_dir: raise InsufficientError('template_dir')\n try: \n with open(Path(self.template_dir)/template_file, 'r') as f:\n template:Template = Template(f.read())\n return template.render(context)\n except FileNotFoundError: raise TemplateNotFoundError(template_file)\n except exceptions.UndefinedError: raise Jinja2ContextDataError", "title": "" }, { "docid": "1198088871e9f64196d46307c077b79c", "score": "0.65125775", "text": "def _render(template, context, app):\n\n before_render_template.send(app, template=template, context=context)\n rv = template.render(context)\n template_rendered.send(app, template=template, context=context)\n return rv", "title": "" }, { "docid": "2a4ba2129529b076af1fe4fa76bf41e1", "score": "0.64339757", "text": "def _render(self, context):\n if not context:\n context = Context({})\n\n plain = self.template_plain.render(context)\n html = self.template_html.render(context)\n css = get_template(self.template_style).render(Context({}))\n\n p = Pynliner()\n html = p.from_string(html).with_cssString(css).run()\n\n return plain, html", "title": "" }, { "docid": "59b62f38b09a4d02a8b2b835178f1aa4", "score": "0.6406535", "text": "def render_template_with_globals(template, args):\n args = fill_template_arguments(args)\n debug('Rendering {} with args: {}'.format(template, args))\n return render_template(template, **args)", "title": "" }, { "docid": "1c76c93b75703a9cf070cddaf608e7da", "score": "0.6400795", "text": "def render(template_name, context={}):\n template = myenv.get_template(template_name)\n context['user'] = get_user()\n context['path'] = cherrypy.request.path_info\n context['params'] = cherrypy.request.params\n return template.render(context)", "title": "" }, { "docid": "aff98d638dfbb556b3716c50c2fab03c", "score": "0.63886666", "text": "def render_str(self, template, **params):\n t = JINJA_ENV.get_template(template)\n return t.render(params)", "title": "" }, { "docid": "03785d27a1ceb854ca3a56528ce23931", "score": "0.63627714", "text": "def render_str(template, **params):\n t = JINJA_ENV.get_template(template)\n return t.render(params)", "title": "" }, { "docid": "7876a6de1d41e75c7d8ed7ba1adb41f6", "score": "0.6349783", "text": "def render(self, context):\n try:\n template = self.template.resolve(context)\n # Does this quack like a Template?\n if not callable(getattr(template, 'render', None)):\n # If not, we'll try our cache, and get_template()\n template_name = template\n cache = context.render_context.dicts[0].setdefault(self, {})\n template = cache.get(template_name)\n if template is None:\n template = context.template.engine.get_template(template_name)\n cache[template_name] = template\n # Use the base.Template of a backends.django.Template.\n elif hasattr(template, 'template'):\n template = template.template\n values = {\n name: var.resolve(context)\n for name, var in self.extra_context.items()\n }\n if self.isolated_context:\n return template.render(context.new(values))\n with context.push(**values):\n return template.render(context)\n except Exception as e:\n if context.template.engine.debug:\n raise\n template_name = getattr(context, 'template_name', None) or 'unknown'\n warnings.warn(\n \"Rendering {%% include '%s' %%} raised %s. In Django 2.1, \"\n \"this exception will be raised rather than silenced and \"\n \"rendered as an empty string.\" %\n (template_name, e.__class__.__name__),\n RemovedInDjango21Warning,\n )\n logger.warning(\n \"Exception raised while rendering {%% include %%} for \"\n \"template '%s'. Empty string rendered instead.\",\n template_name,\n exc_info=True,\n )\n return ''", "title": "" }, { "docid": "c2547c14902b5c626cd62cb0a3511787", "score": "0.6347726", "text": "def render_str(template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)", "title": "" }, { "docid": "32dec2cb212a8ff0a093da4dc25f76b1", "score": "0.63275397", "text": "def render_str(self, template, **params):\n templ = jinja_env.get_template(template)\n return templ.render(params)", "title": "" }, { "docid": "035646f2e1a265afd52ddb35ace6c20f", "score": "0.6326421", "text": "def render_filter(ctx, template):\n if isinstance(template, basestring):\n template = ctx.environment.from_string(template)\n return template.render(ctx)", "title": "" }, { "docid": "3104d1fe5af67f1c9f68dff95c73d95a", "score": "0.6316073", "text": "def render_body(template, context):\n for key, value in context.iteritems():\n template = template.replace('{{ %s }}' % key, value)\n return template", "title": "" }, { "docid": "9089eb52191d6da8561798849a121fd2", "score": "0.63057363", "text": "def render_template(self, template_name, output_name, context):\r\n if jinja2 is None:\r\n req_missing(['jinja2'], 'use this theme')\r\n template = self.lookup.get_template(template_name)\r\n output = template.render(**context)\r\n if output_name is not None:\r\n makedirs(os.path.dirname(output_name))\r\n with open(output_name, 'w+') as output:\r\n output.write(output.encode('utf8'))\r\n return output", "title": "" }, { "docid": "d58483926e6356482bbc371f740136e0", "score": "0.62967765", "text": "def render_template(\n searchpath: Path,\n template_name: str,\n context: Dict[str, Any],\n extension: str,\n autoescape: bool = True,\n keep_trailing_newline: bool = False,\n) -> str:\n import jinja2\n\n template_loader = jinja2.FileSystemLoader(searchpath=searchpath)\n template_env = jinja2.Environment(\n loader=template_loader,\n undefined=jinja2.StrictUndefined,\n autoescape=autoescape,\n keep_trailing_newline=keep_trailing_newline,\n )\n template = template_env.get_template(f\"{template_name}_TEMPLATE{extension}.jinja2\")\n content: str = template.render(context)\n return content", "title": "" }, { "docid": "b8b9b618f43a1b23b763aee6e6848f07", "score": "0.628353", "text": "def render(self, **context):\n template = JinjaTemplate(self.get_content())\n return template.render(**context)", "title": "" }, { "docid": "3582dd4fc5f98e7183b06c9921d72f3b", "score": "0.6250179", "text": "def render(template_name, **kwargs):\r\n with open(template_name) as f:\r\n template = Template(f.read())\r\n return template.render(**kwargs)", "title": "" }, { "docid": "fe16f26c2d4bddac69e43d3ea6af479b", "score": "0.62370026", "text": "def render_template(self, name, **ctx):\n tmpl = self.get_template(name)\n return tmpl.render(ctx)", "title": "" }, { "docid": "0c9f6f3003022bd31ce361f1a0653f66", "score": "0.62352145", "text": "def render(self, template=None, context=None, encoding=None):\n template = template or self.template\n context = context or self.context\n\n template = self.render_sections(template, context)\n result = self.render_tags(template, context)\n if encoding is not None:\n result = result.encode(encoding)\n return result", "title": "" }, { "docid": "051856278a0e4d25425631ac1f3c71ee", "score": "0.6229935", "text": "def render_context(self, context, *args, **kwargs):\r\n if getattr(context, '_with_template', None) is None:\r\n context._with_template = self\r\n runtime._render_context(self, \r\n self.callable_, \r\n context, \r\n *args, \r\n **kwargs)", "title": "" }, { "docid": "9cf62eb638fe4f4f05606c1d5d6fe34d", "score": "0.6182085", "text": "def include(self, template_name, **kwargs):\n frame = sys._getframe(1)\n locals = frame.f_locals\n globals = frame.f_globals\n context = locals[\"_context\"].copy()\n context.update(kwargs)\n template = self.get_template(template_name, context, globals)\n return template.render(context, globals, _buf=locals[\"_buf\"])", "title": "" }, { "docid": "4afca3aa29d60bb5945acb025893dbd4", "score": "0.6169488", "text": "def render_template(template=None, context=None,\n method=None, string=None):\n kajiki = current_app.extensions['kajiki']\n method = kajiki._method_for(template, method)\n template = generate_template(template, context, method, string)\n # TODO kajiki has no arguments for the serializer\n #render_args = dict(method=kajiki.methods[method]['serializer'])\n #if 'doctype' in kajiki.methods[method]:\n # render_args['doctype'] = kajiki.methods[method]['doctype']\n #return template.render(**render_args)\n return template.render()", "title": "" }, { "docid": "1f473c3e91f689a1814c9eef19827c1d", "score": "0.61638755", "text": "def check_template_tag(self, template_string: str, context: dict, result_string: str) -> None:\n t = Template(template_string)\n c = Context(context)\n output = t.render(c)\n self.assertEqual(output, result_string)", "title": "" }, { "docid": "4a40c80b6114e508b33596942dd485eb", "score": "0.61482924", "text": "def render(self, template, **kw): \n render_out = \"\"\n jinja_env = self.app.config.get(\"jinja_env\")\n if jinja_env:\n t = jinja_env.get_template(template)\n render_out = t.render(kw)\n self.response.out.write(render_out)", "title": "" }, { "docid": "cd8af326978fb812beedac7473398a9e", "score": "0.61314666", "text": "def render_email(template, context=None):\n if context is None:\n context = {}\n email_body = render_jinja2_template(template, context)\n return email_body", "title": "" }, { "docid": "cc2546b8d00fe3c695455667d185579a", "score": "0.61176956", "text": "def render_template(cls, template_name: str, **kwargs) -> str:\n template_filename: str = \"{}.tmp\".format(template_name)\n template = cls.env.get_template(template_filename)\n return template.render(kwargs['input_data'])", "title": "" }, { "docid": "32df02fd23a0825fbff625132b084641", "score": "0.6117059", "text": "def render_template(template_name_or_list, **context):\n ctx = _app_ctx_stack.top\n ctx.app.update_template_context(context)\n return _render(\n ctx.app.jinja_env.get_or_select_template(template_name_or_list),\n context,\n ctx.app,\n )", "title": "" }, { "docid": "1407eb8f5f05f061d06032aef7bd8c3a", "score": "0.6114893", "text": "def render_and_write(template_name, context, output_name, output_dir):\n\n template = templates_env.get_template(template_name)\n f = open(path.join(output_dir, output_name), \"w\")\n f.write(template.render(**context))\n f.close()", "title": "" }, { "docid": "f255e2720129c505d4bdc6285915990f", "score": "0.61102927", "text": "def _render_template(\n self,\n mako_template_filepath: str,\n context: dict,\n translator: Translator\n ) -> str:\n try:\n template = Template(filename=mako_template_filepath)\n return template.render(\n _=translator.get_translation,\n config=self.config,\n **context\n )\n except Exception as exc:\n logger.exception(self, 'Failed to render email template: {}'.format(exc.__str__()))\n raise EmailTemplateError('Failed to render email template: {}'.format(exc.__str__()))", "title": "" }, { "docid": "c3ce590a89a49cdbbb8818fec833c8c2", "score": "0.6107454", "text": "def render_template(template, dirs = [ ], context = None, **kwargs):\n dirs = sequencify(dirs)\n if PATH[\"TEMPLATES\"] not in dirs:\n dirs.append(PATH[\"TEMPLATES\"])\n\n dirs = [osp.abspath(dir_) for dir_ in dirs]\n\n logger.info(\"Searching for templates within directories: %s\" % dirs)\n\n path = None\n for dir_ in dirs:\n temp = osp.join(dir_, template)\n if osp.exists(temp):\n path = temp\n break\n \n if not path:\n raise TemplateNotFoundError(\"Template %s not found.\" % template)\n \n string = read(path)\n rendered = string\n\n if not context:\n context = kwargs\n\n if context:\n template = Template(string)\n rendered = template.substitute(context)\n \n return rendered", "title": "" }, { "docid": "5da6a4d98552f41635d04f122af46184", "score": "0.60734165", "text": "def render_to_string(template_path: str, ctx: Context) -> str:\n template = loader.get_template(template_path)\n return template.render(ctx)", "title": "" }, { "docid": "b2819a830cb5664f3875f2e2d528389f", "score": "0.6047136", "text": "def _render(tpl, ctx, req):\n return render_to_response(tpl, ctx, RequestContext(req))", "title": "" }, { "docid": "2d7a4ba402fc250cd85678028ab2696b", "score": "0.6034613", "text": "def render_template(src: Path, dst: Path, context: dict[str, Any]) -> None:\n dst.parent.mkdir(parents=True, exist_ok=True)\n\n with src.open() as src_fd, dst.open(\"w\") as dst_fd:\n template = DottedIDsTemplate(src_fd.read())\n dst_fd.write(template.substitute(context))", "title": "" }, { "docid": "a882c36c2eb04afa041d6aa002fdb349", "score": "0.6016755", "text": "def render_tags(self, template, context):\n while 1:\n match = self.tag_re.search(template)\n if match is None:\n break\n\n tag, tag_type, tag_name = match.group(0, 1, 2)\n tag_name = tag_name.strip()\n try:\n func = modifiers[tag_type]\n replacement = func(self, tag_name, context)\n template = template.replace(tag, replacement)\n except (SyntaxError, KeyError):\n return u\"{{invalid template}}\"\n\n return template", "title": "" }, { "docid": "b1c304b9f441b7f293a350704788c8a0", "score": "0.60144174", "text": "def render_template(self, **data):\n return self.get_template().format(**data)", "title": "" }, { "docid": "4ba954284abae6a27d1241c5681b22eb", "score": "0.6013257", "text": "def render_email(context, text_template, html_template=None):\n try:\n t = loader.get_template(text_template)\n text_content = t.render(context)\n except TemplateDoesNotExist:\n text_content = None\n\n if html_template:\n try:\n html_content = Pynliner().from_string(\n render_to_string(html_template, context)).run()\n except TemplateDoesNotExist:\n html_content = None\n else:\n html_content = None\n\n if not text_content and not html_content:\n #print \"None of the following two templates are found: %s and %s\" % (text_template, html_template)\n raise NoContent(\"None of the following two templates are found: %s and %s\" % (text_template, html_template))\n\n return text_content, html_content", "title": "" }, { "docid": "7ef0bcc8f9a45fba0aa2675c192a5c4c", "score": "0.5996987", "text": "def handlebars_template(package, name, context):\n\n handlebars_check_context(context)\n\n package_name = package.replace(\"-\", \"_\")\n function_name = name.replace(\"-\", \"_\")\n\n # Enable for debugging\n if False:\n logging.info(\"Rendering template %s.%s with context: %s\" %\n (package, name, api.jsonify.jsonify(context)))\n\n module_name = (\"compiled_templates.%s_package.%s\"\n % (package_name, function_name))\n\n function = None\n\n if App.is_dev_server:\n # In dev mode, load all templates dynamically\n function = handlebars_dynamic_load(package, name)\n\n else:\n # In production mode, dynamically load the compiled template module and\n # find the function\n\n if not module_name in sys.modules:\n try:\n __import__(module_name)\n except ImportError:\n logging.info(\"Import error: %s\" % traceback.format_exc())\n\n if module_name in sys.modules:\n function = getattr(sys.modules[module_name], function_name)\n\n if function:\n try:\n ret = function(context,\n helpers=handlebars_helpers,\n partials=handlebars_partials)\n return u\"\".join(ret)\n except:\n logging.error(\"Exception running Handlebars template: %s\"\n % traceback.format_exc())\n return u\"\"\n else:\n return u\"\"", "title": "" }, { "docid": "3c2d9419beac6f0ccc8bf349ff62acac", "score": "0.5990973", "text": "def render(self, template, **kw):\r\n t = JINJA_ENV.get_template(template)\r\n self.response.write(t.render(kw))", "title": "" }, { "docid": "078653ed86a49646688d14b95ffba395", "score": "0.59891635", "text": "def render_template(\n template: str, destination: str, context: Dict[str, Any]) -> None:\n template_file = os.path.join(charm_dir(), 'templates', template)\n with open(template_file) as f:\n template_object = Template(f.read())\n\n with open(destination, 'w') as f:\n f.write(juju_header())\n f.write(template_object.render(context))\n os.chmod(destination, 0o444)", "title": "" }, { "docid": "9f2e44b0396835eb61feaa32985b7624", "score": "0.5973079", "text": "def text_template(text, variables):\n template = string.Template(text)\n return template.safe_substitute(variables)", "title": "" }, { "docid": "18f895a66a2e75d8122306225e5c33bc", "score": "0.5933755", "text": "def render(self, template, **kw):\n self.write(self.render_str(template, **kw))", "title": "" }, { "docid": "18f895a66a2e75d8122306225e5c33bc", "score": "0.5933755", "text": "def render(self, template, **kw):\n self.write(self.render_str(template, **kw))", "title": "" }, { "docid": "18f895a66a2e75d8122306225e5c33bc", "score": "0.5933755", "text": "def render(self, template, **kw):\n self.write(self.render_str(template, **kw))", "title": "" }, { "docid": "c5aadb562830e00c0f6529950ec69b61", "score": "0.59312475", "text": "def render_template(email, member=None):\n\n f = open(\"./template/template.html\", \"r\")\n html = f.read()\n f.close()\n\n text = \"\"\n\n for line in email.text.split(\"\\n\"):\n text += \"<p>%s</p>\" % line\n\n if member is not None:\n # Replace variables with personalized values for each member\n text = text.replace(\"{{first}}\", member.first).replace(\"{{last}}\", member.last).replace(\"{{email}}\", member.email)\n\n html = html.replace(\"{{content}}\", text)\n\n f = open(\"./template/template.css\", \"r\")\n css = f.read()\n f.close()\n\n return Pynliner().from_string(html).with_cssString(css).run()", "title": "" }, { "docid": "ebd7bd359aeae876d51e8b8d9d2cbf0d", "score": "0.5930599", "text": "def substitute(self, _str, ctx):\n if isinstance(_str, str) or isinstance(_str, unicode):\n _str = Template(_str).safe_substitute(ctx)\n return _str", "title": "" }, { "docid": "73b2516a2b677e9f6413d9340f073724", "score": "0.59170884", "text": "def render_str(self, template, **params):\n t = jinja_env.get_template(template)\n params['username'] = self.username\n return t.render(params)", "title": "" }, { "docid": "487571eb4aa47b9569cb795f843aee1a", "score": "0.5910275", "text": "def compile_template_to_string(\n template: Path,\n context: Context,\n shell_command_working_directory: Optional[Path] = None,\n) -> str:\n if not shell_command_working_directory:\n shell_command_working_directory = template.parent\n\n env = jinja_environment(\n templates_folder=template.parent,\n shell_command_working_directory=shell_command_working_directory,\n )\n jinja_template = env.get_template(name=template.name)\n\n return jinja_template.render(context)", "title": "" }, { "docid": "809faa721ef9f9f8ca8e6eecf6dc443c", "score": "0.5877433", "text": "def render(self, *template, **kw):\n self.write(self.render_str(*template, **kw))", "title": "" }, { "docid": "67ccbc45d8280debb030cedd5b10dcc0", "score": "0.58703214", "text": "def _render_template(self):\n template = templates.Template.from_file(\n os.path.join(\n self.project.directory, \n 'templates',\n self.template_name)\n )\n contents = {\n 'content':self.markup(),\n }\n # add additional headers from the source into template context\n contents.update(self.page)\n return template.render(contents)", "title": "" }, { "docid": "1800e0948eb8044f7d80156d8d571011", "score": "0.5851413", "text": "def render(self, template):\n return re.sub(r'\\$\\$|\\${\\w+}', self._render_sub, template)", "title": "" }, { "docid": "2e3763cb7eb7c4eca591d34e9191c636", "score": "0.5844247", "text": "def template(ctx: Context, path: \"string\"):\n jinja_env = _get_template_engine(ctx)\n template_path = _extend_path(ctx, path)\n if template_path in tcache:\n template = tcache[template_path]\n else:\n template = jinja_env.get_template(template_path)\n tcache[template_path] = template\n\n resolver = ctx.get_resolver()\n\n try:\n out = template.render({\"{{resolver\": resolver})\n return out\n except UndefinedError as e:\n raise NotFoundException(ctx.owner, None, e.message)", "title": "" }, { "docid": "6e2cf5320a0949dac73ea9b982de71be", "score": "0.58427966", "text": "def EvaluateTemplate(template, env, escape=True):\n for key, val in env.items():\n if escape and not key.endswith(\"_unescaped\"):\n val = cgi.escape(val)\n template = template.replace('{{%s}}' % key, val)\n return template", "title": "" }, { "docid": "d8093edc88f306e5dc4c95141895888e", "score": "0.582872", "text": "def render(template, **params):\n return re.sub(r'{{\\s*([^}\\s]+)\\s*}}',\n lambda match: str(params.get(match.group(1), match.group(0))),\n template)", "title": "" }, { "docid": "71f584ab8db6160f2b60ee37e6e7739f", "score": "0.5822259", "text": "def rstjinja(app, docname, source):\n # Make sure we're outputting HTML\n if app.builder.format != \"html\":\n return\n src = source[0]\n rendered = app.builder.templates.render_string(src, app.config.html_context)\n source[0] = rendered", "title": "" }, { "docid": "32348fcf3387e0f180b45c1f3bd3e026", "score": "0.58207905", "text": "def render_email_template( template, **context ):\n\n subject, body = render_to_string( template, context ).split( '\\n', 1 )\n return subject.strip(), body", "title": "" }, { "docid": "7a3d3eddd67ac0ee5850f4c59f5bfa7b", "score": "0.5817247", "text": "def from_string(self, source, globals=None, template_class=None):\r\n globals = self.make_globals(globals)\r\n cls = template_class or self.template_class\r\n return cls.from_code(self, self.compile(source), globals, None)", "title": "" }, { "docid": "ec7ba75b381a7085d5a8f9b2684c4b2b", "score": "0.5808121", "text": "def renderfile(tpl, path, context):\n file_h = open(path, 'w')\n file_h.write(tpl.render(context))\n file_h.close()", "title": "" }, { "docid": "df10b36aab515c36a493de3dbd825b3f", "score": "0.58006716", "text": "def render_template(template_file, template_vars):\n templateLoader = jinja2.FileSystemLoader(searchpath=\"./templates/\")\n template_env = jinja2.Environment(loader=templateLoader)\n template = template_env.get_template(template_file)\n return template.render( template_vars )", "title": "" }, { "docid": "20e14542ebdc05ab47534887ccd14bf9", "score": "0.5797771", "text": "def render_to_response(self, context):\n return TemplateResponse(\n request=self.request, template=self.get_template_names(), context=context\n )", "title": "" }, { "docid": "d7eb993c4077c2525cbd4d9519da7800", "score": "0.5795039", "text": "def render_template(\n template: str,\n request: 'CoreRequest',\n content: dict[str, Any],\n suppress_global_variables: bool = True\n) -> str:\n\n if suppress_global_variables == 'infer':\n suppress_global_variables = template.startswith('mail_')\n\n registry = request.app.config.template_engine_registry\n page_template = registry._template_loaders['.pt'][template]\n\n variables = get_default_vars(\n request, content, suppress_global_variables=suppress_global_variables)\n\n return page_template.render(**variables)", "title": "" }, { "docid": "7e4a85d52eaa6a97c2a4b91f783ed911", "score": "0.5780865", "text": "def render_template(text, **context_args):\n template = Template(\"{% load bootstrap3 %}\" + text)\n if not 'form' in context_args:\n context_args['form'] = TestForm()\n return template.render(Context(context_args))", "title": "" }, { "docid": "05456eac42feb6620109966140d30fa8", "score": "0.5766187", "text": "def render(self, template, **params):\n self.write(self.render_str(template,\n current_user = self.get_current_user(), **params))", "title": "" }, { "docid": "7fafb027cf1e439c3f24d04308a42bab", "score": "0.5762089", "text": "def __call__(self, template_name, template_vars, **kwargs):\n config = tg.config._current_obj()\n\n # Gets template format from content type or from config options\n format = kwargs.get('format')\n if not format:\n format = self.format_for_content_type.get(tg.response.content_type)\n if not format:\n format = config.get('templating.chameleon.genshi.format')\n if not format:\n format = config.get('templating.genshi.method')\n if not format or format not in ('xml', 'text'):\n format = 'xml'\n\n def render_template():\n template = self.load_template(template_name, format=format)\n return literal(template.render(**template_vars))\n\n return cached_template(template_name, render_template,\n ns_options=('doctype', 'method'), **kwargs)", "title": "" }, { "docid": "c646fc053b655bc00c60e5db4bfee703", "score": "0.5760994", "text": "def compile(self, source, name=None, filename=None, raw=False,\r\n defer_init=False):\r\n source_hint = None\r\n try:\r\n if isinstance(source, basestring):\r\n source_hint = source\r\n source = self._parse(source, name, filename)\r\n if self.optimized:\r\n source = optimize(source, self)\r\n source = self._generate(source, name, filename,\r\n defer_init=defer_init)\r\n if raw:\r\n return source\r\n if filename is None:\r\n filename = '<template>'\r\n else:\r\n filename = _encode_filename(filename)\r\n return self._compile(source, filename)\r\n except TemplateSyntaxError:\r\n exc_info = sys.exc_info()\r\n self.handle_exception(exc_info, source_hint=source)", "title": "" }, { "docid": "d4b2550dddab8e6ce4c66ac855c6e2bd", "score": "0.57590085", "text": "def _exec_template(callable_, context, args=None, kwargs=None):\r\n template = context._with_template\r\n if template is not None and \\\r\n (template.format_exceptions or template.error_handler):\r\n error = None\r\n try:\r\n callable_(context, *args, **kwargs)\r\n except Exception, e:\r\n _render_error(template, context, e)\r\n except: \r\n e = sys.exc_info()[0]\r\n _render_error(template, context, e)\r\n else:\r\n callable_(context, *args, **kwargs)", "title": "" }, { "docid": "5a8791f85ee75c2e9c457867b9e13164", "score": "0.5756796", "text": "def render_template_text(email, member=None):\n\n text = email.text\n\n # Add extra newlines in plaintext emails\n text = text.replace(\"\\n\", \"\\n\\n\")\n\n # Replace anchor tags with plaintext links\n for anchor in re.finditer(\"<a href=\\\"([^\\\"]+)\\\">([^<]+)<\\/a>\", text):\n url = anchor.group(1)\n content = anchor.group(2)\n text = text.replace(\"<a href=\\\"%s\\\">%s</a>\" % (url, content), \"%s (%s)\" % (content, url))\n\n if member is not None:\n # Replace variables with personalized values for each member\n text = text.replace(\"{{first}}\", member.first).replace(\"{{last}}\", member.last).replace(\"{{email}}\", member.email)\n\n if \"<br>\" in text or \"<br/>\" in text or \"<br />\" in text:\n # Replace HTML newlines with plaintext newlines\n text = text.replace(\"<br>\", \"\\n\").replace(\"<br/>\", \"\\n\").replace(\"<br />\", \"\\n\")\n\n return text", "title": "" }, { "docid": "77465f32502bb5ca75429ca33174a884", "score": "0.5743868", "text": "def process_template(self, template_text, data_dict=None):\n if data_dict is None:\n data_dict = {}\n else:\n assert isinstance(data_dict, dict)\n data_dict = dict(data_dict)\n assert \"algo\" not in data_dict, \"algo already set by user data\"\n data_dict[\"algo\"] = self\n\n template = jinja2.Template(template_text)\n return template.render(data_dict)", "title": "" }, { "docid": "e6b2a4fc618839fa102fa8466272ee57", "score": "0.57370764", "text": "def test_context(self):\r\n collection = lookup.TemplateLookup()\r\n\r\n collection.put_string('main.html', \"\"\"\r\n <%namespace name=\"comp\" file=\"defs.html\"/>\r\n\r\n this is main. ${comp.def1()}\r\n ${comp.def2(\"there\")}\r\n\"\"\")\r\n\r\n collection.put_string('defs.html', \"\"\"\r\n <%def name=\"def1()\">\r\n def1: x is ${x}\r\n </%def>\r\n\r\n <%def name=\"def2(x)\">\r\n def2: x is ${x}\r\n </%def>\r\n\"\"\")\r\n\r\n assert flatten_result(collection.get_template('main.html').render(x=\"context x\")) == \"this is main. def1: x is context x def2: x is there\"", "title": "" }, { "docid": "08838bc6bb3c5d059ba8c0079ec334dd", "score": "0.5736208", "text": "def render_template(data, template_name, filters=None):\n env = Environment(loader=FileSystemLoader(''))\n\n if filters is not None:\n for key, value in filters.iteritems():\n env.filters[key] = value\n\n template = env.get_template(template_name)\n return template.render(feed=data).encode('utf-8')", "title": "" } ]
6287c1dd2a1e7937b08a68ddf5349771
T.__new__(S, ...) > a new object with type S, a subtype of T
[ { "docid": "4cd774e88eb43b6786dfb6dbd212399c", "score": "0.72122014", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\r\n pass", "title": "" } ]
[ { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "4fb50b5679d0bc2205d77fa7704c8195", "score": "0.74194497", "text": "def __new__(self,S, ):\n pass", "title": "" }, { "docid": "3843fb29d325d0e9a5ccde53d9af8436", "score": "0.7382608", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "3843fb29d325d0e9a5ccde53d9af8436", "score": "0.7382608", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "3843fb29d325d0e9a5ccde53d9af8436", "score": "0.7382608", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "3843fb29d325d0e9a5ccde53d9af8436", "score": "0.7382608", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "84aa418b6c50688106f5e1f5c04561bd", "score": "0.7366072", "text": "def _new_object(cls):\n return cls.__new__(cls)", "title": "" }, { "docid": "84aa418b6c50688106f5e1f5c04561bd", "score": "0.7366072", "text": "def _new_object(cls):\n return cls.__new__(cls)", "title": "" }, { "docid": "0ca538c0ae200206717a9486704074a6", "score": "0.7291962", "text": "def new(self, t, **attrs):\n\n if 'recurse' in attrs:\n attrs['recurse'].update(self.attributes)\n else:\n attrs['recurse'] = self.attributes\n\n attrs.setdefault('parent', self)\n\n # instantiate an instance if we're given a type\n if not(istype(t) or isinstance(t,generic)):\n raise error.TypeError(self, 'base.new', message='%r is not a ptype class'% t.__class__)\n\n # if it's a type, then instantiate it\n if istype(t):\n t = t(**attrs)\n\n # if already instantiated, then update it's attributes\n elif isinstance(t,generic):\n t.update_attributes(**attrs)\n\n # give the instance a default name\n t.__name__ = attrs.get('__name__', hex(id(t)) )\n return t", "title": "" }, { "docid": "a453149e7ab3d88db00f8da5f8f78338", "score": "0.7284284", "text": "def __new__(cls):\n return object.__new__(cls)", "title": "" }, { "docid": "a453149e7ab3d88db00f8da5f8f78338", "score": "0.7284284", "text": "def __new__(cls):\n return object.__new__(cls)", "title": "" }, { "docid": "600b65c4dd051d7d3fbab32dd7b1ae98", "score": "0.72610724", "text": "def __newobj__(cls, *args):\n return cls.__new__(cls, *args)", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "4ed5c9722106eee1fdf63563e15eab4c", "score": "0.72600895", "text": "def __new__(S, *more): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "87940601e25435f4264d51c2d912cd54", "score": "0.7111726", "text": "def __new__(cls, *args, **kwargs) -> None:\n return object.__new__(cls)", "title": "" }, { "docid": "7d3fe66ade8448e03c921da394780ab2", "score": "0.7001853", "text": "def __new__(cls, *more):\r\n pass", "title": "" }, { "docid": "2733724a81729db4b9c2ba785677a254", "score": "0.69560206", "text": "def __new__(cls, *args, **kwargs):\n obj = object.__new__(cls)\n obj.__init__(*args, **kwargs)\n return obj()", "title": "" }, { "docid": "0b25d781608a505fa11b511e1c5a53c6", "score": "0.6847844", "text": "def __new__(cls, *args, **kwargs):\n\t\tif cls != type(cls._inst):\n\t\t\tcls._inst = object.__new__(cls, *args, **kwargs)\n\t\treturn cls._inst", "title": "" }, { "docid": "22660530c5d8ab09984c7997a39a7a6a", "score": "0.6818508", "text": "def __new__(metacls, nom, bases, dict):\n print(metacls, nom)\n return type.__new__(metacls, nom, bases, dict)", "title": "" }, { "docid": "d1b686bcf11d1209814ad5910c4213c8", "score": "0.67791396", "text": "def new_instance_of_same_type(parent):\n instance = type(parent).__new__(type(parent))\n instance.__init__()\n return instance", "title": "" }, { "docid": "ec006e0c7be72e9929c1168c2783a42b", "score": "0.67152846", "text": "def __basicnew__(self):\n return self.__new__(self)", "title": "" }, { "docid": "f28f1d277624952bf336eee9458e8b07", "score": "0.66792864", "text": "def __new__(cls, *na, **kwna):\n if not cls.is_initialized():\n cls._instances[cls] = super().__new__(cls)\n return cls._instances[cls]", "title": "" }, { "docid": "33f985d7d74f27ab690601037ed51383", "score": "0.66685194", "text": "def __new__(cls, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "1e56279f8f4a17ef7f5c19aa884b0405", "score": "0.664382", "text": "def __new__(cls, *args, **kwargs): # type: ignore\n if cls not in cls._instances:\n cls._instances[cls] = object.__new__(cls, *args, **kwargs)\n return cls._instances[cls]", "title": "" }, { "docid": "ec8211b49baad98d656be4eb4678ef74", "score": "0.66253453", "text": "def __new__(cls):\n return cls", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "47f8d874943fad63102963dcc9b0a072", "score": "0.6620953", "text": "def __new__(*args, **kwargs): # real signature unknown\n pass", "title": "" } ]
b5590739bfefe0c7424bd86d5dede73b
returns a connection to mechanical turk.
[ { "docid": "3b946ba33bb556f7b8bda8e254aea043", "score": "0.79187876", "text": "def get_mt_conn(sandbox=settings.SANDBOX):\n if sandbox:\n host=\"mechanicalturk.sandbox.amazonaws.com\"\n else:\n host=\"mechanicalturk.amazonaws.com\"\n\n return connection.MTurkConnection(\n aws_access_key_id=settings.aws_id,\n aws_secret_access_key=settings.aws_secret,\n host=host)", "title": "" } ]
[ { "docid": "56a5a7204653cff73a19f5b0b1ac1780", "score": "0.627893", "text": "def connection():\n return get_connection()", "title": "" }, { "docid": "c29a8ed3dbdcb9193cd9222e6b99893b", "score": "0.6158203", "text": "def _connect(self):\n return httplib.HTTPConnection(self._host, self._port)", "title": "" }, { "docid": "3536ef050714fc24c3a51b4e3e47b3a1", "score": "0.61508757", "text": "def remote_connection(self):\n return DriverRemoteConnection(self.endpoint,'g')", "title": "" }, { "docid": "ea4b4310c7c8224cfa8fb9ed30aafa5d", "score": "0.6127698", "text": "def __get_tg_connection___(config: TgcliConfiguration, graph_name: Optional[str] = None) -> TigerGraphConnection:\n return TigerGraphConnection(\n host=config.server,\n username=config.username,\n password=config.password,\n clientVersion=config.client_version,\n graphname=graph_name or '',\n restppPort=config.restpp_port,\n gsPort=config.gs_port,\n apiToken='',\n useCert=config.use_auth\n )", "title": "" }, { "docid": "a1919717fa4acfa293e9f2b7304b099f", "score": "0.611027", "text": "def get_connection():\n pass", "title": "" }, { "docid": "c4e95a23eced284c110c543e1f8af436", "score": "0.6091545", "text": "def bot_connection():\n return BotConnection('dummybot', 'http://dummybot')", "title": "" }, { "docid": "8c33542b21ef72c67f61771dd2ecf9af", "score": "0.6083142", "text": "def get_conn(self):\n http_authorized = self._authorize()\n return build(\n 'dataflow', 'v1b3', http=http_authorized, cache_discovery=False)", "title": "" }, { "docid": "4143d72a156531e2856e4e121ad85448", "score": "0.60658413", "text": "def connect(self):\n self.wrdsconn = wrds.Connection()\n self.conn = self.wrdsconn.connect()\n return self.wrdsconn", "title": "" }, { "docid": "785ceaf1326da26e85971bf3c7a8ae99", "score": "0.59902596", "text": "def get_connection(self):\n return self.conn", "title": "" }, { "docid": "e5e344d04fb18a0123bad93b80fc1c59", "score": "0.59759575", "text": "def connection(self):\n return self.connection", "title": "" }, { "docid": "1ddc2b2ea4bfcf4a866b105fcdab8953", "score": "0.5972781", "text": "def connect(self):\n conn = pika.BlockingConnection(\n pika.ConnectionParameters(\n host=self.settings.host,\n credentials=pika.credentials.PlainCredentials(\n username=self.settings.credentials.username,\n password=self.settings.credentials.password)))\n return conn", "title": "" }, { "docid": "654e24bd55ec03901ac9ff593fd70f15", "score": "0.5970635", "text": "def get_connection(self):\n return self.connection", "title": "" }, { "docid": "89a6ce69cffae26ea401dc9f9ef69a0a", "score": "0.59662926", "text": "def connect(self):\n # TODO: sockets, etc.\n host = self._connection_options.get('host', DEFAULT_HOST)\n port = self._connection_options.get('port', DEFAULT_PORT)\n self.connection = Tyrant(host=host, port=port)", "title": "" }, { "docid": "b10f42e12613f4a92f2c6792821af459", "score": "0.5954163", "text": "def connect_reddit(self):\r\n lg.debug('CointipBot::connect_reddit(): connecting to Reddit...')\r\n\r\n conn = praw.Reddit(user_agent = self.conf.reddit.auth.user)\r\n conn.login(self.conf.reddit.auth.user, self.conf.reddit.auth.password)\r\n\r\n lg.info(\"CointipBot::connect_reddit(): logged in to Reddit as %s\", self.conf.reddit.auth.user)\r\n return conn", "title": "" }, { "docid": "d65e9cb550e3306a9cbda4e6db471c36", "score": "0.5935847", "text": "def _Conn(self):\n cls_name = '{scheme}Connection'.format(scheme=self.scheme.upper())\n return getattr(http.client, cls_name)", "title": "" }, { "docid": "af580b2348f91ff2dc1b44a61cafe6bf", "score": "0.59355676", "text": "def conn(self):\n client = RiakClient(\n protocol=self.conf.get(\"protocol\"),\n http_port=self.conf.get(\"port\"),\n host=self.conf.get(\"host\"))\n\n conn = client.bucket(self.conf.get(\"db\"))\n conn.enable_search()\n return conn", "title": "" }, { "docid": "0f662151f658966cbf276f4dc9412293", "score": "0.59076524", "text": "def connection(self):\n\t\treturn self._connection", "title": "" }, { "docid": "40fad0c19bd62bd037720d99ec96780d", "score": "0.5906515", "text": "def get_connection():\n return Connection.from_environment()", "title": "" }, { "docid": "21efa182d470e392d9bb33f30eec02d0", "score": "0.5885212", "text": "def connect(self):\n self._client.session = self._client.create_session()\n return self._client.session", "title": "" }, { "docid": "02f519b9ed1ca4a3e47ad06b49b91881", "score": "0.58223546", "text": "def connection(self):\n # if there isn't an open connection, open one...\n if self._connection is None:\n self._connection = self.connect_fn(**self.connect_kwargs)\n\n return self._connection", "title": "" }, { "docid": "855bc012cfd747aff473afe3e2ffd44c", "score": "0.5804371", "text": "def connect(self):\n return self", "title": "" }, { "docid": "d6986303c49abf47402eaa10bd9ce32b", "score": "0.58029765", "text": "def _getURLOpener(self):\n return HTTPConnection(self['host'])", "title": "" }, { "docid": "5132131444313abce79e07eaa2d32c87", "score": "0.5773046", "text": "def get_connection(self, **options):\n options.setdefault('authentication', NTLM)\n options.setdefault('user', self.username)\n options.setdefault('password', self.password)\n return Connection(self.server, **options)", "title": "" }, { "docid": "f23e162f26487042d2091c4a1d0b7ede", "score": "0.57689136", "text": "def getConnection():", "title": "" }, { "docid": "b436acf3bcabaa034337e34a7a7d475e", "score": "0.573757", "text": "def get_connection(self):\n return self.__r, self.__conn", "title": "" }, { "docid": "f88dbf7ed53fd0d1751762afc0d6f23f", "score": "0.5724949", "text": "def connect(self):\n print \"Connecting to \" + str(self._url)\n return pika.SelectConnection(pika.URLParameters(self._url),\n self.on_connection_open,\n stop_ioloop_on_close=False)", "title": "" }, { "docid": "e47a8f83dbb4f6a18118faeceb071c8e", "score": "0.57212406", "text": "def getMQTTConnection(self):\n # Return the internal AWSIoTMQTTClient instance\n return self._AWSIoTMQTTClient", "title": "" }, { "docid": "1f6f8dd0ea57eb6b0a3c77f429af48ff", "score": "0.57123935", "text": "def _getURLOpener(self):\n key, cert = self.getKeyCert()\n return HTTPSConnection(self['host'], key_file=key, cert_file=cert)", "title": "" }, { "docid": "d4ce3d503c63326c2dcea71401fdc324", "score": "0.571099", "text": "def connection(self):\n return self._connection", "title": "" }, { "docid": "d4ce3d503c63326c2dcea71401fdc324", "score": "0.571099", "text": "def connection(self):\n return self._connection", "title": "" }, { "docid": "d5744e067ed87f752820b6df1e5aad87", "score": "0.56917465", "text": "def _get_connection(self):\n return self._acquire()", "title": "" }, { "docid": "a1fc5c741e5d657a71bc8ae25f8009f6", "score": "0.56911755", "text": "def connect(self):\n logger.debug('Connecting to %s', self._url)\n return pika.SelectConnection(parameters=pika.URLParameters(self._url),\n on_open_callback=self.on_connection_open)", "title": "" }, { "docid": "13c15f37579431b6229a8c20b9a0a7b2", "score": "0.56787", "text": "def con(backend):\n return backend.connection", "title": "" }, { "docid": "dcb21b11689091fc3a2bd825b818ae5b", "score": "0.5646509", "text": "def make_connection(self):\n connection = None\n channel = None\n try:\n connection = pika.BlockingConnection(self.connection_params)\n channel = connection.channel()\n except:\n print(\"Failed to make connection\")\n return connection, channel", "title": "" }, { "docid": "b9b8e9b0c4ea1f2bed1d7110d3781d38", "score": "0.56456053", "text": "def get_tg_connection(config: TgcliConfiguration,\n graph_name: Optional[str] = None,\n clean_init: bool = False) -> TigerGraphConnection:\n conn = __get_tg_connection___(config, graph_name)\n # Manually download dependencies and set flags in the underlying TigerGraphConnection\n # This gives more flexibility/control into what and where we download dependencies to\n init_dependencies(config, conn, clean_init=clean_init)\n conn.gsqlInitiated = True\n conn.downloadCert = False\n conn.downloadJar = False\n conn.jarLocation = get_jar_folder(config).expanduser().__str__()\n conn.certLocation = get_cert_filepath(config).expanduser().__str__()\n # Still call init for other dependencies (self.url) - but dependencies will not be downloaded\n conn.initGsql(conn.jarLocation, conn.certLocation)\n if config.use_auth and graph_name:\n # Get secret for the graph, if provided\n # TODO: Consider a class function for adding and retrieving secrets\n secret = config.secrets.get(graph_name, None)\n if not secret or clean_init:\n print(f\"Creating new secret for graph {graph_name} and saving to configuration.\")\n conn.graphname = graph_name\n secret = conn.createSecret()\n if not secret:\n raise ValueError(f\"Could not create a secret for the connection to graph {graph_name}.\")\n # Also save the new config\n config.secrets[graph_name] = secret\n upsert_config(config)\n # Finally, get the token\n conn.getToken(secret=secret)\n return conn", "title": "" }, { "docid": "9e1b6090dc3bf039f11c059cff02c1bd", "score": "0.5643282", "text": "def connect(self):\n LOGGER.info('Connecting to %s', self._url)\n return pika.TornadoConnection(pika.URLParameters(self._url),\n self.on_connection_open,\n on_open_error_callback=self.on_open_error)", "title": "" }, { "docid": "dc275b1b2fd580c4df1bec91d41a6739", "score": "0.56326294", "text": "def _getURLOpener(self):\n return HTTPSConnection(self[\"host\"])", "title": "" }, { "docid": "d14e3932a277ee8e1d5bb634b492d723", "score": "0.560438", "text": "def get_connection(name: str = None) -> Connection:\n return get_engine(name).connect()", "title": "" }, { "docid": "c9cfcdd05cb0352cde214d1ae715c754", "score": "0.5597934", "text": "def _connect_to_bdd(self):\n redis_co = redis.Redis(\\\n host=self._host,\n port=self._port,\n password=self._password)\n\n return redis_co", "title": "" }, { "docid": "ac24cb40f3f1c5612accdfcc84150cec", "score": "0.55907065", "text": "def connect(self):\n self.message(\"connect\")\n logger.info(\"going to connect to connection!!\", extra=self.extra)\n logger.debug(str(self.connection), extra=self.extra)\n return self.connection.connect() # will create connection.connection", "title": "" }, { "docid": "8dca176d8682b33d198c37a51c5b5b60", "score": "0.5586824", "text": "def _getURLOpener(self):\n return HTTPSConnection(self['host'])", "title": "" }, { "docid": "87a9954f7f678816c470a91e615829d1", "score": "0.55579937", "text": "def connect(self):\n LOGGER.info('Connecting to %s', self._url)\n return pika.SelectConnection(pika.URLParameters(self._url),\n on_open_callback=self.on_connection_open,\n on_open_error_callback=None,\n on_close_callback=self.on_connection_closed,\n stop_ioloop_on_close=False)", "title": "" }, { "docid": "ca220ca3e152b5df27ed5bc5cbe3896f", "score": "0.5544147", "text": "def createConnection(self):\n\n conn = \"\"\n context = ssl._create_unverified_context()\n if self.type == \"https\":\n conn = httplib.HTTPSConnection(self.ip, context=context)\n sock = socket.create_connection(\n (conn.host, conn.port), conn.timeout, conn.source_address)\n conn.sock = ssl.wrap_socket(sock, conn.key_file, conn.cert_file,\n ssl_version=ssl.PROTOCOL_TLSv1)\n elif self.type == \"ssh\":\n conn = SSHConnection(self.ip, self.username, self.password)\n elif self.type == \"scp\":\n transport = paramiko.Transport((self.ip, 22))\n transport.connect(username=\"root\", password=self.password)\n conn = paramiko.SFTPClient.from_transport(transport)\n elif self.type == \"expect\":\n conn = ExpectConnection(self.ip, self.username, self.password)\n else:\n conn = httplib.HTTPConnection(self.ip)\n return conn", "title": "" }, { "docid": "121deb2178c7f4e14128b323a5f28a18", "score": "0.55438393", "text": "def _connect() -> Connection:\n if entry.options.get(CONF_UNAUTHENTICATED_MODE):\n _LOGGER.debug(\"Connecting in unauthenticated mode, reduced feature set\")\n connection = Connection(url, timeout=CONNECTION_TIMEOUT)\n else:\n _LOGGER.debug(\"Connecting in authenticated mode, full feature set\")\n username = entry.data.get(CONF_USERNAME) or \"\"\n password = entry.data.get(CONF_PASSWORD) or \"\"\n connection = Connection(\n url, username=username, password=password, timeout=CONNECTION_TIMEOUT\n )\n return connection", "title": "" }, { "docid": "41716fc9078a9e7e784509e88fb83ca2", "score": "0.5524809", "text": "def get_backend():\n return Connection()", "title": "" }, { "docid": "41716fc9078a9e7e784509e88fb83ca2", "score": "0.5524809", "text": "def get_backend():\n return Connection()", "title": "" }, { "docid": "41716fc9078a9e7e784509e88fb83ca2", "score": "0.5524809", "text": "def get_backend():\n return Connection()", "title": "" }, { "docid": "853e3b25d8c7a93de2fdd933e252f1ed", "score": "0.55162114", "text": "async def _get_connection(self):\n if self._connection is None:\n self._pool = await self._get_pool()\n self._connection = await self._pool.acquire()\n\n return self._connection", "title": "" }, { "docid": "996a46683674887c775ba2e2cf83a33e", "score": "0.55141294", "text": "def getConnection(**cParms) :\n if len(cParms.keys()) == 0 : \n return getConnectionJacuzzi().newConnection(**_defaultConnectionParms)\n else : \n return getConnectionJacuzzi().newConnection(**cParms)", "title": "" }, { "docid": "51919e318b88bbb5b86b0f9459a98fd7", "score": "0.5505527", "text": "def connect(hostname, securely):\n if securely:\n https_connection = httplib.HTTPSConnection(hostname)\n return https_connection\n else:\n http_connection = httplib.HTTPConnection(hostname)\n return http_connection", "title": "" }, { "docid": "d03ff662e5efe38834616e697a0c448b", "score": "0.54912704", "text": "def get_connection(self):\n return vertica_python.connect(**self.conn_info)", "title": "" }, { "docid": "a55a8fef119d748b9fb0242d71e16e64", "score": "0.54886323", "text": "def conn(self):\n return os_conn.Connection(verify=self.verify, **self.os_auth_args)", "title": "" }, { "docid": "2b17c98513cae1dc59eb4c48983f329b", "score": "0.5481048", "text": "def connect(self):\n self.connection, self.channel = self.__connect(self.user, self.password, self.host, self.port,\n self.url_parameters)", "title": "" }, { "docid": "fec30c87354d4c13392f8e69e4cb768d", "score": "0.54759896", "text": "def get_rethink_connection(props):\n\n\trethink_conn = r.connect(\n\t\thost=props.get(\"RETHINKDB\", \"RETHINK_HOST\"),\n\t\tport=props.get(\"RETHINKDB\", \"RETHINK_PORT\"),\n\t\tdb=props.get(\"RETHINKDB\", \"RETHINK_DB\"),\n\t\tuser=props.get(\"RETHINKDB\", \"RETHINK_USER\"),\n\t\tpassword=props.get(\"RETHINKDB\", \"RETHINK_PASSWORD\"),\n\t\ttimeout=int(props.get(\"RETHINKDB\", \"RETHINK_TIMEOUT\")),\n\t)\n\treturn rethink_conn", "title": "" }, { "docid": "8400ffead37b52ae849e744ab74325c8", "score": "0.54647315", "text": "def _connect(self) -> ClientLibrary:\n with contextlib.redirect_stderr(io.StringIO()):\n return ClientLibrary(url=self.ip, username=self.username,\n password=self.password, ssl_verify=False)", "title": "" }, { "docid": "6517734f2d07d05b6e4cd3ddf596a370", "score": "0.5463275", "text": "def open_connection():", "title": "" }, { "docid": "7634129384d7eac220f182da1becdb79", "score": "0.5456022", "text": "def getConnection(self):\n raise NotImplementedError(\"Shell.getConnection()\")", "title": "" }, { "docid": "ae3de7866e94ea20cc903ee3efaa1c09", "score": "0.54367614", "text": "def get_conn(self):\n if self.pentaho_cli:\n return self.pentaho_cli\n\n self.pentaho_cli = self.PentahoCarteClient(\n host=self.connection.host,\n port=self.connection.port,\n rep=self.extras.get('rep'),\n username=self.connection.login,\n password=self.connection.password,\n carte_username=self.extras.get('carte_username'),\n carte_password=self.extras.get('carte_password'),\n level=self.level)\n\n return self.pentaho_cli", "title": "" }, { "docid": "02ba2849ed025aa80e89af08882f8e45", "score": "0.5436113", "text": "def make_connection(self):\r\n exchange_class = getattr(ccxt, self.name)\r\n exch = exchange_class({\r\n 'enableRateLimit': True,\r\n })\r\n exch.load_markets()\r\n return exch", "title": "" }, { "docid": "e27cbb8ffaa0c1a27b6fbcae437759b4", "score": "0.54358673", "text": "def solid_connection(self):\n\t\treturn connect(self._dbapi,\n\t\t\tself._maxusage, self._setsession, *self._args, **self._kwargs)", "title": "" }, { "docid": "2794ff242d6e41c73eafb7c96dccc6f1", "score": "0.54164785", "text": "def get_connection(self):\n logging.info(\"[%s] Connecting to %s\", self._consumer_name, str(self._brokers))\n conn = stomp.Connection(host_and_ports=self._brokers)\n conn.set_listener(self._consumer_name, self._listener)\n conn.start()\n conn.connect(self._user, self._passcode, wait=True)\n # Give the connection threads a little breather\n time.sleep(0.5)\n return conn", "title": "" }, { "docid": "226213234bb545d658b19c96c62d18f8", "score": "0.5407084", "text": "def getconn(self):\n return self._conn", "title": "" }, { "docid": "47f4fd6edebbb84805b3907e80acef4d", "score": "0.5404162", "text": "def get_connection(self):\n\t\tfrom pymongo import MongoClient\n\n\t\tif self._connection is None:\n\t\t\tself._connection = MongoClient(host=self.url, max_pool_size=10)\n\n\t\treturn self._connection", "title": "" }, { "docid": "cb8298055a4140ad45c0067574942e08", "score": "0.5395113", "text": "def connection(self):\n # we have to keep this reference here,\n # if we just return self.engine.raw_connection(),\n # any cursor from that connection will fail\n # doing: engine.raw_connection().cursor().execute('') fails!\n if self._connection is None:\n self._connection = self.engine.raw_connection()\n\n # if a task or product calls client.connection.close(), we have to\n # re-open the connection\n if not self._connection.is_valid:\n self._connection = self.engine.raw_connection()\n\n return self._connection", "title": "" }, { "docid": "0bfec17a452c7c53a1c0aceb93b5ce72", "score": "0.53897625", "text": "def establish_connection(self):", "title": "" }, { "docid": "9bf886ae84870a408ee0afea2d7e7cf7", "score": "0.53859836", "text": "def getConnection(self):\n return connection['MirrDatabase']", "title": "" }, { "docid": "41365c28fbcbcd7d1d7511c3997bf4a7", "score": "0.53846216", "text": "def make_twitter_connection( self ):\n connection = TwitterConnection( self.credentials_file )\n self.conn = connection.connection", "title": "" }, { "docid": "44b0d66948d57320ffd233e8d628a059", "score": "0.5383322", "text": "def connect(self, keepAliveIntervalSecond=600):\n self._load_callbacks()\n return self._AWSIoTMQTTClient.connect(keepAliveIntervalSecond)", "title": "" }, { "docid": "3c77f14325d3cebee814416c3bbbe644", "score": "0.53798884", "text": "def get_connection(self, conn_handle):\n return self.conn_manager.get_connection(conn_handle)", "title": "" }, { "docid": "b2c27029fe61bff7a968bede64a0aa62", "score": "0.5378017", "text": "def get_connection(region='eu-west-1'):\n return connect_to_region(region)", "title": "" }, { "docid": "43d05f5fd60252924535ef2bd087ef5e", "score": "0.5373982", "text": "def connect(self):\n log.debug('Attempting connection to Accumulo proxy ...')\n log.debug('Connection args: \"%s:%s@%s:%s\"' %\n (self.user, '********', self.host, self.port))\n try:\n self.connection = pyaccumulo.Accumulo(host=self.host,\n port=int(self.port),\n user=self.user,\n password=self.password)\n self.meta.connection = self.connection\n self.meta_search.connection = self.connection\n self.image.connection = self.connection\n self.thumb.connection = self.connection\n self.audit.connection = self.connection\n self.gdelt.connection = self.connection\n except (TTransportException,\n AccumuloSecurityException) as err:\n log.error('Connection error: \"%s\"' % err)\n\n return self.connection", "title": "" }, { "docid": "4ef4888223dfde7340f04e98e9acccd7", "score": "0.53689206", "text": "def get_connection(self, ishttps=False, cookie=None, verbose=False):\r\n if self.__certificate is not None:\r\n if verbose:\r\n print(\"------>Cert\")\r\n return self.get_connection_secure(verbose)\r\n else:\r\n if verbose:\r\n print(\"------>Cookie\")\r\n return httplib.HTTPSConnection(self._ConnectionHandler__connHost,\r\n self._ConnectionHandler__connPort)", "title": "" }, { "docid": "54131b4eddc1bef2853df22d30845c78", "score": "0.5362701", "text": "def get_connection(self):\n\n try:\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect('burgers.db')\n return db\n except:\n # if we cant connect lets throw a 500 with an error message\n abort(500, 'Issue Connecting to the database')", "title": "" }, { "docid": "49add38642c530b07feb0d552ad44221", "score": "0.53365016", "text": "def get_conn():\n if not hasattr(g, 'redis_client'):\n g.redis_client = RedisClient()\n return g.redis_client", "title": "" }, { "docid": "680a7c1e1bf3d7d3bda71c894c332d4e", "score": "0.5328758", "text": "def connect(self):\n return self", "title": "" }, { "docid": "e946d14bf0c51993ac7238387121ca72", "score": "0.53257626", "text": "def __get_connection(self, connection_dict):\n return connect(\n db=connection_dict['db_name'],\n username=connection_dict['user'],\n password=connection_dict['password'],\n host=connection_dict['host'],\n port=connection_dict['port']\n )", "title": "" }, { "docid": "338d8778ed5115b5df1be3abbee016af", "score": "0.53096926", "text": "def _get_connection(self):\n try:\n if self._ncc_connection and self._ncc_connection.connected:\n return self._ncc_connection\n else:\n self._ncc_connection = manager.connect(\n host=self._host_ip, port=self._host_ssh_port,\n username=self._username, password=self._password,\n device_params={'name': \"csr\"}, timeout=self._timeout)\n if not self._itfcs_enabled:\n self._itfcs_enabled = self._enable_itfcs(\n self._ncc_connection)\n return self._ncc_connection\n except Exception as e:\n conn_params = {'host': self._host_ip, 'port': self._host_ssh_port,\n 'user': self._username,\n 'timeout': self._timeout, 'reason': e.message}\n raise cfg_exc.ConnectionException(**conn_params)", "title": "" }, { "docid": "302cec3aa12d843ce8f9fd39b0a68de7", "score": "0.5306083", "text": "def connect(self):\n return BlockingConnection(\n parameters=ConnectionParameters(\n host=self.host,\n port=self.port,\n virtual_host=self.vhost,\n credentials=PlainCredentials(\n username=self.user,\n password=self.passwd,\n )\n )\n )", "title": "" }, { "docid": "2bbcff104042e916ebfe176a53c2df26", "score": "0.52994365", "text": "def get_connection():\n\n client = pymongo.MongoClient(CONFIG[\"database\"])\n return client.hitmen", "title": "" }, { "docid": "962e22a8a7bd30d60b2300bae0bd5cdc", "score": "0.5298408", "text": "def conn():\n return __context__[\"netmiko_device\"].get(\"connection\")", "title": "" }, { "docid": "41832f195ce19b9faaddd30c887ef8fe", "score": "0.52926916", "text": "def request_connection():\n\n # If there is no connection in the global (\"g\") variable, try to establish it\n if not hasattr(g, \"conn\"):\n g.conn = None\n try:\n g.conn = sqlite3.connect(DATABASE)\n except Exception as e:\n print(e)\n\n return g.conn", "title": "" }, { "docid": "ef4344c304ebad40a1175b27d4b44d4c", "score": "0.5285611", "text": "def createConnection(self):\n uri = self.reader.createConnection()\n return Pyro.core.getAttrProxyForURI(uri)", "title": "" }, { "docid": "65e5076aefae8f2cdac187ab092a95e6", "score": "0.5269692", "text": "def connect(config_file=qcs.default_filename, section='info', remember_me=False, remember_me_always=False):\n # Retrieve login credentials.\n conf = qcconf.QualysConnectConfig(filename=config_file, section=section, remember_me=remember_me,\n remember_me_always=remember_me_always)\n connect = qcconn.QGConnector(conf.get_auth(),\n conf.get_hostname(),\n conf.proxies,\n conf.max_retries)\n logger.info(\"Finished building connector.\")\n return connect", "title": "" }, { "docid": "4f519480a8c27a392be67578b7e55e5b", "score": "0.5266888", "text": "def connect(*args, **kwargs):\n return Connection.connect(*args, **kwargs)", "title": "" }, { "docid": "14d41aadbfd88e7bcc1f87ad669ad5d8", "score": "0.5255701", "text": "def _connect(self):\n server = self._server\n port = self._port\n headers = self._headers\n ssl_enabled = self._ssl_enabled\n proxy_server, proxy_port, proxy_auth = self._get_proxy_config()\n\n if proxy_server and proxy_port:\n if ssl_enabled:\n context = self._get_ssl_context()\n self._conn = http.client.HTTPSConnection(\n proxy_server, proxy_port, context=context\n )\n else:\n self._conn = http.client.HTTPConnection(proxy_server, proxy_port)\n\n tunnel_headers = None\n if proxy_auth:\n tunnel_headers = {\"Proxy-Authorization\": proxy_auth}\n\n self._conn.set_tunnel(server, port, headers=tunnel_headers)\n else:\n if ssl_enabled:\n context = self._get_ssl_context()\n self._conn = http.client.HTTPSConnection(server, port, context=context)\n else:\n self._conn = http.client.HTTPConnection(server, port)\n\n self._conn.putrequest(\"POST\", self._url)\n self._conn.putheader(\"Transfer-Encoding\", \"chunked\")\n for header in headers:\n self._conn.putheader(header, headers[header])\n self._conn.endheaders()\n\n # Set blocking to False prevents recv\n # from blocking while waiting for a response.\n self._conn.sock.setblocking(False)\n self._bytes = b\"\"\n self._reset_retries()\n time.sleep(0.5)", "title": "" }, { "docid": "251e745130aad12529f99a6c4a812bda", "score": "0.52496", "text": "def post_get_connection(\n self, response: repositories.Connection\n ) -> repositories.Connection:\n return response", "title": "" }, { "docid": "f3c008bcd4905e9794ed8cd759904cda", "score": "0.52410686", "text": "def _get_session(self) -> requests.Session:\n session = requests.Session()\n retry = rq_retry.Retry(\n total=self.retries_number,\n read=self.retries_number,\n connect=self.retries_number,\n backoff_factor=self.backoff_factor,\n status_forcelist=self.status_forcelist,\n )\n adapter = rq_adapt.HTTPAdapter(max_retries=retry)\n session.mount(\"https://\", adapter)\n return session", "title": "" }, { "docid": "db944c32a46d21a6402048805b0cebf8", "score": "0.5240774", "text": "def open_connection(self):\n connection = tcp.TCPConnection(config.CONF['vim-api']['rpc_host'],\n config.CONF['vim-api']['rpc_port'])\n connection.connect(config.CONF['vim']['rpc_host'],\n config.CONF['vim']['rpc_port'])\n self._connections.append(connection)\n return connection", "title": "" }, { "docid": "79fa9459a1bdee50bb1ae4dc87286e33", "score": "0.5233544", "text": "def establish_connection(self):\n conninfo = self.client\n for name, default_value in self.default_connection_params.items():\n if not getattr(conninfo, name, None):\n setattr(conninfo, name, default_value)\n if conninfo.ssl:\n conninfo.qpid_transport = 'ssl'\n conninfo.transport_options['ssl_keyfile'] = conninfo.ssl[\n 'keyfile']\n conninfo.transport_options['ssl_certfile'] = conninfo.ssl[\n 'certfile']\n conninfo.transport_options['ssl_trustfile'] = conninfo.ssl[\n 'ca_certs']\n if conninfo.ssl['cert_reqs'] == ssl.CERT_REQUIRED:\n conninfo.transport_options['ssl_skip_hostname_check'] = False\n else:\n conninfo.transport_options['ssl_skip_hostname_check'] = True\n else:\n conninfo.qpid_transport = 'tcp'\n\n credentials = {}\n if conninfo.login_method is None:\n if conninfo.userid is not None and conninfo.password is not None:\n sasl_mech = 'PLAIN'\n credentials['username'] = conninfo.userid\n credentials['password'] = conninfo.password\n elif conninfo.userid is None and conninfo.password is not None:\n raise Exception(\n 'Password configured but no username. SASL PLAIN '\n 'requires a username when using a password.')\n elif conninfo.userid is not None and conninfo.password is None:\n raise Exception(\n 'Username configured but no password. SASL PLAIN '\n 'requires a password when using a username.')\n else:\n sasl_mech = 'ANONYMOUS'\n else:\n sasl_mech = conninfo.login_method\n if conninfo.userid is not None:\n credentials['username'] = conninfo.userid\n\n opts = {\n 'host': conninfo.hostname,\n 'port': conninfo.port,\n 'sasl_mechanisms': sasl_mech,\n 'timeout': conninfo.connect_timeout,\n 'transport': conninfo.qpid_transport\n }\n\n opts.update(credentials)\n opts.update(conninfo.transport_options)\n\n conn = self.Connection(**opts)\n conn.client = self.client\n self.session = conn.get_qpid_connection().session()\n self.session.set_message_received_notify_handler(\n self._qpid_message_ready_handler\n )\n conn.get_qpid_connection().set_async_exception_notify_handler(\n self._qpid_async_exception_notify_handler\n )\n self.session.set_async_exception_notify_handler(\n self._qpid_async_exception_notify_handler\n )\n return conn", "title": "" }, { "docid": "358c7306625b06d2a4a24e3aa96e7dcf", "score": "0.5217251", "text": "def connect(*args, **kwargs):\n return Connection(args, kwargs)", "title": "" }, { "docid": "bf1d9bb1baff67401275d80504f2e7b4", "score": "0.52161855", "text": "def connect_server(self) -> rpc.RPCSession:\n tracker = self.connect_tracker()\n session: rpc.RPCSession = tracker.request(\n key=self.tracker_key,\n priority=self.session_priority,\n session_timeout=self.session_timeout_sec,\n )\n return session", "title": "" }, { "docid": "d7a74737e101619910201bd1fa0686ed", "score": "0.52113956", "text": "def get_client(self):\n return self.session", "title": "" }, { "docid": "771964ad6b85edd071234fdc2be90619", "score": "0.51924706", "text": "async def _connect(self):\n logger.debug(\"connecting to the stream\")\n await self.client.setup\n if self.session is None:\n self.session = self.client._session\n kwargs = await self.client.headers.prepare_request(**self.kwargs)\n request = self.client.error_handler(self.session.request)\n\n return await request(timeout=0, **kwargs)", "title": "" }, { "docid": "93843003c088dd5d18f12abef3194e56", "score": "0.5192259", "text": "def connect(hostname, username, password):\n conn = BlitzGateway(username, password,\n host=hostname, secure=True)\n conn.connect()\n conn.c.enableKeepAlive(60)\n return conn", "title": "" }, { "docid": "4590052c41063f9f5a2c63bc5072d646", "score": "0.51867414", "text": "def connection(self, context=None):\n if context is None:\n context = ssl.SSLContext(ssl.PROTOCOL_TLS)\n context.check_hostname = False\n context.verify_mode = ssl.CERT_NONE\n\n address = (self.args.host, self.args.port)\n with socket.create_connection(address, timeout=5) as sock:\n with context.wrap_socket(sock, server_hostname=self.args.host) as ssock:\n yield ssock", "title": "" }, { "docid": "92cec4f398f40733bbc00da42a838add", "score": "0.5182721", "text": "async def open_connection(self) -> _BaseConnector:\n connector = self.connector\n if connector is None:\n raise HekrAPIException('No connector attached to device')\n\n if not connector.is_connected:\n await connector.open_connection()\n\n return connector", "title": "" }, { "docid": "53f1e1f16aad6c0ad2a581c357916cdc", "score": "0.5178644", "text": "def get_connection(self):\n return sqlite3.connect(self.database)", "title": "" }, { "docid": "b2301e98f7801319ff39b021fde35517", "score": "0.51771706", "text": "def connect(cls, url: Optional[str] = None) -> \"YubiHsm\":\n return cls(get_backend(url))", "title": "" }, { "docid": "49bb63a97ecd309e79718a3ab9e12650", "score": "0.51625913", "text": "def connect(self):\n\n if self.getConfig().initDone is False:\n logging.debug('connecting without config init')\n raise ConfigError(self.__class__.__name__, Error.errDict['initBad']['errCode'], Error.errDict['initBad']['errMsg'])\n\n headers = {'user-agent': self.userAgent}\n formData = {'client_id': self.getConfig().getClientID(),\n 'client_secret': self.getConfig().getClientSecret(),\n 'grant_type': self.getConfig().getGrantType(),\n 'code': self.getConfig().getAuthorizationCode(),\n 'redirect_uri': self.getConfig().getRedirectURL()}\n r = requests.post(self.getConfig().getTokenServerURL(),\n headers=(headers),\n cert=(self.getConfig().getSSLCertPath(),\n self.getConfig().getSSLKeyPath()),\n data=(formData),\n verify=(self.getConfig().getSSLCertPath()))\n logging.debug(r.status_code)\n logging.debug(r.json())\n if (r.status_code == requests.codes.ok):\n self.connection['status'] = 'connected'\n self.connection['token'] = r.json()['access_token']\n self.connection['expires'] = datetime.datetime.now() + datetime.timedelta(0, r.json()['expires_in'], 0)\n if self.getSessionState() == '':\n self.setSessionState(str(uuid.uuid1()))\n else:\n raise ConnectError(self.__class__.__name__, str(r.status_code), 'Unable to connect to ADP')", "title": "" }, { "docid": "d28faf5e70ae0468852309db28d485a3", "score": "0.5158046", "text": "def __GetConnection(self, transaction):\r\n self.__connection_lock.acquire()\r\n request_tx = transaction and transaction.handle()\r\n if request_tx == 0:\r\n request_tx = None\r\n if request_tx != self.__current_transaction:\r\n raise apiproxy_errors.ApplicationError(\r\n datastore_pb.Error.BAD_REQUEST,\r\n 'Only one concurrent transaction per thread is permitted.')\r\n return self.__connection", "title": "" } ]
9b337da8305531e87506f09a76a8b1cf
Receives a message from the CCM using the configured adapter
[ { "docid": "13b97c92b7ab12f1f249fbb976afc713", "score": "0.73328966", "text": "def receive_message_from_ccm(self) -> str:\n with hw_lock:\n return self.ccm_adapter.receive_message_with_stop_byte()", "title": "" } ]
[ { "docid": "8c6cd8c20e92b52dfd75e49af8253aa7", "score": "0.7003274", "text": "def receive_message(self):\n pass", "title": "" }, { "docid": "586a9036a9863930cd60d2c5f50d037a", "score": "0.6642486", "text": "def rcv_message(self, message):\n pass", "title": "" }, { "docid": "1c961b58bd64ade3bf545d7a8d81ba51", "score": "0.6423138", "text": "def receive_data(self):\n c_message = utils.receive_data(self.active_socket, 'encrypted message')\n print('received encrypted message:\\n{c_m}'.format(c_m=base64.b64encode(c_message)))\n print('decrypting message ...\\n')\n message = utils.aes_decrypt(self.cipher, c_message)\n print('do you want to write the message to a file ? if so input a \"file:absolute_file_path\" (example = '\n 'file:/etc/my_file.txt) if not type no\\n')\n choice = input('input your choice: ')\n if choice[:5] == 'file:':\n utils.write_to_file(message, choice[5:])\n else:\n print('decrypted message: {message}'.format(message=message.decode()))\n self.received_messages.append(message.decode())", "title": "" }, { "docid": "dd5a479fd9ad746f230bada9262dffc1", "score": "0.63875616", "text": "async def recv_message(self):\n ...", "title": "" }, { "docid": "b619eb17ff87f506702d59cdebb42589", "score": "0.6357205", "text": "def receiveAndroid(self):\n try:\n msg = self.client_sock.recv(1024)\n dispatcher.send(message=msg, signal=ts.ANDROID_SIGNAL, sender=ts.ANDROID_SENDER)\n self.sendAndroid(\"This message has been received: \" + msg)\n # logging.info(\"Received \" + msg)\n return msg\n except BluetoothError:\n logging.info(\"Bluetooth Error\")\n self.connect()", "title": "" }, { "docid": "a35fb1577414e35798b91fb33b0b20f4", "score": "0.63511354", "text": "def receive_message(self, message: bytes):\r\n schc_message = SCHCParser.from_bytes(self.sm.protocol, message)\r\n if isinstance(schc_message, SCHCAck):\r\n return self.receive_schc_ack(schc_message)\r\n elif isinstance(schc_message, SCHCReceiverAbort):\r\n return self.receive_schc_receiver_abort(schc_message)\r\n else:\r\n raise ValueError(\"Bytes received could not be decoded\")", "title": "" }, { "docid": "505095bf9efa5bef0de9e96a0371bf44", "score": "0.6346035", "text": "def __receive_message_callback(self, message, counter):\n b = message.get_bytearray()\n size = len(b)\n config.logging.warning('FirebaseTest: __receive_message_callback - Message Received: \\n'\n ' Data: [{0}]'.format(b[:size].decode('utf-8')))\n self._execute_commands(b[:size].decode('utf-8'))\n return IoTHubMessageDispositionResult.ACCEPTED", "title": "" }, { "docid": "4d3426538bdb08e104bef77d754df46b", "score": "0.63265383", "text": "def message_received(self, msg):", "title": "" }, { "docid": "1543518f4d1d432d3d6eb6760c8af93f", "score": "0.62314844", "text": "def _receive_message(self):\n\n return self.connection.getresponse().read().decode()", "title": "" }, { "docid": "380e4a7e57081b1d8c883aa89e01151d", "score": "0.62233067", "text": "def consume(self, msg):\n pass", "title": "" }, { "docid": "05dbf10fc55d1adbb51fba04673a02ed", "score": "0.6222334", "text": "def receive_msg(self, msg):\n return None", "title": "" }, { "docid": "ab2339715ee1c23b49e25d4ee81afa54", "score": "0.6130102", "text": "def _receive(self, msg, msgID):\n raise NotImplementedError(\"The method '_receive' has to \"\n 'be implemented.')", "title": "" }, { "docid": "3fbcbade1a4b6a4aaef371d7fd859d56", "score": "0.6121717", "text": "def receive(self):\n AMOEBA_RECEIVE_DEBUG=0\n if AMOEBA_RECEIVE_DEBUG:\n print \"Receive message.\"\n # Retrieve the received data from the server and process the strings.", "title": "" }, { "docid": "64b73eb3d030e8b9b4919a55f8c2e4f3", "score": "0.6078694", "text": "def test_peer_to_peer_receive_short(self):\r\n self.can_messages = [\r\n (TestECU.MsgType.CANRX, 0x00DC0201, [1, 2, 3, 4, 5, 6, 7, 8], 0.0), # TP.CM RTS\r\n ]\r\n\r\n self.pdus = [\r\n (TestECU.MsgType.PDU, 56320, [1, 2, 3, 4, 5, 6, 7, 8], 0),\r\n ]\r\n\r\n self.ecu.subscribe(self._on_message)\r\n self._inject_messages_into_ecu()\r\n # wait until all messages are processed asynchronously\r\n while len(self.pdus)>0:\r\n time.sleep(0.500)\r\n # wait for final processing \r\n time.sleep(0.100)\r\n self.ecu.unsubscribe(self._on_message)", "title": "" }, { "docid": "04ae75e9a7d4cfb4025045b5c344c4c9", "score": "0.6002246", "text": "def receive(self) -> FBSPMessage:\n msg = self.channel.receive(self.timeout)\n if isinstance(msg, ErrorMessage):\n raise self.protocol.exception_for(msg)\n if msg is TIMEOUT:\n raise TimeoutError()\n if msg is INVALID:\n raise Error(\"Invalid response from service\")\n if msg.msg_type is MsgType.CLOSE:\n raise Error(\"Connection closed by service\")\n return msg", "title": "" }, { "docid": "262092244c27897b26fa0ce1fc6ea053", "score": "0.59878373", "text": "def recieve():\n\n while 1:\n msg_len = client.recv(HEADER).decode(FORMAT)\n if msg_len:\n msg_len = int(msg_len)\n msg = client.recv(msg_len).decode(FORMAT)\n if msg == DC_CMD:\n break\n if msg != \"\":\n chat.display_message(msg)", "title": "" }, { "docid": "3e93a651b82d8b4b24e459ba8a9db22f", "score": "0.59755003", "text": "def messageReceived(self, message):\r\n self.sendMessage(message)", "title": "" }, { "docid": "0d55edd0a89211f8b631d58bbd766cfa", "score": "0.59641314", "text": "async def on_message_received(_msg: \"api.ChatMessage\"):", "title": "" }, { "docid": "62a79038957d8091f05ceb6e4f5c70fc", "score": "0.5932269", "text": "def on_message(self, message, transport):", "title": "" }, { "docid": "c64118beb04b40b16a946b6cdb57c5dc", "score": "0.5929551", "text": "def message_arrived(self, msg):\n self.buffer += msg\n #is the whole message here?\n msgLen, msgData = Basic.read_short(self.buffer)\n if len(msgData) >= msgLen:\n msgData = msgData[:msgLen]\n #we just discard the rest of the cell, two messages are never packed in the same cell currently\n self.buffer = \"\"\n #what type of message is this?\n msgType, msgData = Basic.read_byte(msgData)\n #ok, now handle that message:\n for msgName in MESSAGE_CODES.keys():\n if msgType == MESSAGE_CODES[msgName]:\n #if we don't know how to handle this message, just close the circuit\n if msgName not in self.messageHandlers:\n log_msg(\"Remote request for %s, which we do not know how to handle\" % (msgName), 1)\n self.close()\n return\n #get the handler:\n handler = self.messageHandlers[msgName]\n #get the handler function:\n funcName = \"handle_%s\" % (msgName)\n if not hasattr(handler, funcName):\n raise Exception(\"%s cannot handle %s payment message?\" % (handler, msgName))\n f = getattr(handler, funcName)\n f(msgData)\n return\n #uhh, not sure how to handle this message:\n raise Exception(\"Unknown message type for payment message: %s\" % (msgType))", "title": "" }, { "docid": "38a3144e88b5d934fd9de435ca5863cf", "score": "0.59223104", "text": "def receive_request_message(self):\n raise NotImplementedError()", "title": "" }, { "docid": "1806ae014a6b056f38d860d29511c9f0", "score": "0.5918319", "text": "def receive_message(self):\n\n if self._request.client_terminated:\n raise BadOperationException(\n 'Requested receive_message after receiving a closing '\n 'handshake')\n\n while True:\n \n \n \n frame_type_str = self.receive_bytes(1)\n frame_type = ord(frame_type_str)\n if (frame_type & 0x80) == 0x80:\n \n \n length = self._read_payload_length_hixie75()\n if length > 0:\n _ = self.receive_bytes(length)\n \n \n if not self._enable_closing_handshake:\n continue\n\n if frame_type == 0xFF and length == 0:\n self._request.client_terminated = True\n\n if self._request.server_terminated:\n self._logger.debug(\n 'Received ack for server-initiated closing '\n 'handshake')\n return None\n\n self._logger.debug(\n 'Received client-initiated closing handshake')\n\n self._send_closing_handshake()\n self._logger.debug(\n 'Sent ack for client-initiated closing handshake')\n return None\n else:\n \n bytes = self._read_until('\\xff')\n \n \n \n message = bytes.decode('utf-8', 'replace')\n if frame_type == 0x00:\n return message", "title": "" }, { "docid": "cd68d90ef4eb23ec5e08c31a7d9171e4", "score": "0.5913723", "text": "def receive(self, packet):\n super(Messenger, self).receive(packet)\n\n if packet.data['tk'] == TrnsKind.message:\n if packet.data['pk'] == PcktKind.ack: # more\n self.acked = True\n self.another() # continue message\n elif packet.data['pk'] == PcktKind.resend: # resend\n self.acked = True\n self.resend() # resend missed segments\n elif packet.data['pk'] == PcktKind.done: # completed\n self.acked = True\n self.complete()\n elif packet.data['pk'] == PcktKind.nack: # rejected\n self.reject()", "title": "" }, { "docid": "b827062d0b2801d3ff2c3bfc98eddc3b", "score": "0.5911782", "text": "def c2_listener_callback(self, msg):\n self.channel_two_data = msg.data", "title": "" }, { "docid": "a1b8f3b5299df0a848c2f2fbd58484c9", "score": "0.5896654", "text": "def receive(self, message):\n raise NotImplemented()", "title": "" }, { "docid": "ac8f592c5806606f6e0d9a05ff5eeaa2", "score": "0.58768207", "text": "def receive(self):\n self.__print_verbose(\"Waiting for message\")\n msg = self.f.readline()\n if self.echo:\n self.__print_verbose(\"Received message: \\\"\" + msg +\"\\\"\")\n else:\n self.__print_verbose(\"Received message\")\n return msg", "title": "" }, { "docid": "df0e9157b058028fc3029284127dad62", "score": "0.58523536", "text": "def receiveMessage(self, msg):\r\n super(MessagingGateway, self).receiveMessage(msg)\r\n self.distributeMessage(msg, self.getId())", "title": "" }, { "docid": "33fda0e3cbf0338354068d4d71489f25", "score": "0.5851399", "text": "def _receive(self, raw_bytes, ancdata, src_addr):\n raise NotImplementedError", "title": "" }, { "docid": "e71ce4cf75e7077d00b6af805ab6e7c7", "score": "0.58372515", "text": "def _get_receive_message(self):\n return self.__receive_message", "title": "" }, { "docid": "ef2fd1df2838b638e3e6d9902b646545", "score": "0.58371794", "text": "def test_broadcast_receive_short(self):\r\n self.can_messages = [\r\n (TestECU.MsgType.CANRX, 0x00FEB201, [1, 2, 3, 4, 5, 6, 7, 8], 0.0), \r\n ]\r\n\r\n self.pdus = [\r\n (TestECU.MsgType.PDU, 65202, [1, 2, 3, 4, 5, 6, 7, 8]),\r\n ]\r\n\r\n self.ecu.subscribe(self._on_message)\r\n self._inject_messages_into_ecu()\r\n # wait until all messages are processed asynchronously\r\n while len(self.pdus)>0:\r\n time.sleep(0.500)\r\n # wait for final processing \r\n time.sleep(0.100)\r\n self.ecu.unsubscribe(self._on_message)", "title": "" }, { "docid": "853f945589d241335d75fb04a2f62cfc", "score": "0.5826855", "text": "def receive(self):\n pass", "title": "" }, { "docid": "7cfce78dbfc69eaf48234390349260f6", "score": "0.5818358", "text": "def receive(self, packet):\n super(Messengent, self).receive(packet)\n\n # resent message\n if packet.data['tk'] == TrnsKind.message:\n if packet.data['pk'] == PcktKind.message:\n self.message()\n elif packet.data['pk'] == PcktKind.nack: # rejected\n self.reject()", "title": "" }, { "docid": "754d590d3924985dc65d237436439808", "score": "0.5808071", "text": "def receive(self, message):\n body = [b.decode() for t, b in message.bodies('text/plain')][0]\n self.process_update(message.to, body)", "title": "" }, { "docid": "e7e2681514f337c80a6a12db9c697964", "score": "0.5807718", "text": "def data_received(self, message):\n pass", "title": "" }, { "docid": "88d36849c4fc30963fef41223cbe28fc", "score": "0.58055055", "text": "def receive(self):", "title": "" }, { "docid": "fec975a84d9977310d6de48d75fd3173", "score": "0.5766006", "text": "def receive_message(self) -> NetMessage:\n\n header = self.sock.recv(struct.calcsize(MSG_HEADER_FORMAT))\n msg_type, msg_len = struct.unpack_from(MSG_HEADER_FORMAT, header, 0)\n payload = self.sock.recv(msg_len)\n typed_message = NetMessage(msg_type)\n self.message_callbacks[typed_message](payload)\n return typed_message", "title": "" }, { "docid": "4c3616f0047bf9f4d6d43d61fed7e2b6", "score": "0.57630056", "text": "def MsgRead (self, Rcvid, Buffer, Bytes, Offset = 0):\n\treturn self._MsgRead (Rcvid, Buffer, Bytes, Offset)", "title": "" }, { "docid": "db842e1a2dd7520de813175109771f1f", "score": "0.57459533", "text": "def recv(self) -> BatsimMessage:\n if not self.is_connected:\n raise SystemError(\"Connection not opened.\")\n\n return self.__socket.recv_json(object_hook=BatsimMessageDecoder())", "title": "" }, { "docid": "5dc65855ad0f2da3d6ba3c1b6a3e5fc3", "score": "0.5738059", "text": "def _on_mqtt_message(self, client, userdata, msg):\n logging.debug(f\"RCV: {msg.topic}: {str(msg.payload)}\")\n payload = json.loads(msg.payload)\n self._on_message(msg.topic, payload)", "title": "" }, { "docid": "cb301f3a6cce950b0aeffcb228285633", "score": "0.571993", "text": "def read(self):\n # We wait for a message (and its size)\n data_len = self._rfxcom.read()\n hex_data_len = binascii.hexlify(data_len)\n int_data_len = int(hex_data_len, 16)\n print(\"----------------------------\")\n self._log.debug(\"----------------------------\")\n print(\"LENGTH = %s\" % int_data_len)\n self._log.debug(\"LENGTH = %s\" % int_data_len)\n\n if int_data_len != 0:\n # We read data\n data = self._rfxcom.read(int_data_len)\n hex_data = binascii.hexlify(data)\n print(\"DATA = %s\" % hex_data)\n self._log.debug(\"DATA = %s\" % hex_data)\n\n # Process data\n self._process_received_data(hex_data)", "title": "" }, { "docid": "449ae5167395368062ed12429200c9e6", "score": "0.571637", "text": "def send_message_to_ccm(self, message: str):\n with hw_lock:\n self.logger.debug(f\" PI --> CCM: {message}\")\n self.ccm_adapter.send_message(message)", "title": "" }, { "docid": "6e0738608a2d5c9bdbc1b2c5317dd27e", "score": "0.5704182", "text": "def callback_message(self, conn, mess):\n\n # Prepare to handle either private chats or group chats\n type = mess.getType()\n jid = mess.getFrom()\n props = mess.getProperties()\n text = mess.getBody()", "title": "" }, { "docid": "b213c943d54f4355adf4bf09328f5877", "score": "0.5692245", "text": "def extract_message(tcp_buffer):\r\n pass", "title": "" }, { "docid": "967942d059a45cc3f987ae220d06b85a", "score": "0.56857777", "text": "def decode_message(self, message):\n # Current readings request\n if message == \"CR\":\n print(\"Send Current Readings\")\n current_reading = self.get_reading()\n if current_reading:\n self.send_current_readings(current_reading)\n # Previous stored readings request\n elif message == \"NA\":\n print(\"Get Network Activity\")\n activity = self.get_network_activity()\n if activity:\n self.send_network_activity(activity)\n # Plot data request\n elif message == \"PD\":\n print(\"Send Plot Data\")\n plot_data = self.get_plot_data()\n if plot_data:\n self.send_plot_data(plot_data)\n # Unknown request\n else:\n print(\"Unsupported message, nothing to do!\")", "title": "" }, { "docid": "f0e931a6df75731713f83b1dc5e78bc3", "score": "0.5682126", "text": "def receive(self):\n while True:\n data = self._conn.recv(1024)\n if data: break\n DEBUG and print(\"Received message: \" + data.decode('utf-8'))\n return data", "title": "" }, { "docid": "a9036cfd8423098367427765fe3f0714", "score": "0.5679466", "text": "def receive(self):\n try:\n self._last_reply = self._last_reply.decode()\n except UnicodeDecodeError:\n raise PLConnectionProtocolError(f\"Can't decode device reply!\\n {self._last_reply}\")\n return LabDeviceReply(body=self._last_reply, parameters=self._last_reply_headers, content_type=\"json\")", "title": "" }, { "docid": "95cb42b3227595e270264dd252ec86ce", "score": "0.5668115", "text": "async def recv(self) -> T:\n if self._listener is None and self._messages.empty():\n raise exceptions.ChannelClosedError(\n \"the channel is closed and no residual message exist\"\n )\n\n loop = asyncio.get_event_loop()\n waiter: Future[T] = loop.create_future()\n\n def _release_waiter(f: Union[Future[T], Future[None]]) -> None:\n if waiter.done():\n return\n elif (exc := f.exception()) :\n waiter.set_exception(exc)\n elif (res := f.result()) :\n waiter.set_result(res)\n else:\n waiter.set_exception(\n exceptions.ChannelClosedError(\"the channel is closed\")\n )\n\n if self._listener:\n self._listener.add_done_callback(_release_waiter)\n waiter.add_done_callback(\n partial(self._listener.remove_done_callback, _release_waiter)\n )\n\n getter = asyncio.create_task(self._messages.get())\n getter.add_done_callback(_release_waiter)\n try:\n return await waiter\n finally:\n getter.cancel()", "title": "" }, { "docid": "1ce900faa0b3fb345628108148dec31a", "score": "0.565768", "text": "def receive(self, message):\n\n self.messages.append(message)\n\n if message.type == MessageTypes.PA_BLOCKS:\n print \"replying blocks\"\n reply = Message(MessageTypes.PA_BLOCKS_REPLY,\n self.public_key,\n self.interactions.get_blocks())\n self.interactions.add_blocks(message.payload)\n self.interface.send(message.sender, reply)\n if message.type == MessageTypes.PA_BLOCKS_REPLY:\n print \"sending score\"\n self.interactions.add_blocks(message.payload)\n reply = Message(MessageTypes.PA_SCORE,\n self.public_key,\n True)\n self.interface.send(message.sender, reply)\n self.endorsements.append(Endorsement([self.public_key, message.sender, True]))\n if message.type == MessageTypes.PA_SCORE:\n print \"replying score\"\n reply = Message(MessageTypes.PA_SCORE_REPLY,\n self.public_key,\n True)\n self.endorsements.append(Endorsement([self.public_key, message.sender, True]))\n self.interface.send(message.sender, reply)\n if message.type == MessageTypes.CHAIN:\n reply = Message(MessageTypes.CHAIN_REPLY,\n self.public_key,\n self.chain)\n self.interface.send(message.sender, reply)\n if message.type == MessageTypes.CHAIN_REPLY:\n self.interactions.add_blocks(message.payload.get_blocks())", "title": "" }, { "docid": "43222a76175ff0eb67b3a466e024da14", "score": "0.56537455", "text": "async def fetch_message(self, channel: SupportsInt, message: SupportsInt) -> Dict[str, Any]:\n return await self.request(Route(\n 'GET', '/channels/{channel_id}/messages/{message_id}',\n channel_id=int(channel), message_id=int(message)\n ))", "title": "" }, { "docid": "60da68bc8910fefca393f93340b1c7b4", "score": "0.56529784", "text": "def receive(self,msgQueue, callbackF):\n cParams = pika.ConnectionParameters(host=self.host)\n connection = pika.BlockingConnection(cParams)\n channel = connection.channel()\n\n channel.queue_declare(queue=msgQueue) # ensure queue created\n channel.basic_consume(callbackF, queue=msgQueue, no_ack=True)\n channel.start_consuming()\n # will only be reached once channel stops consuming (assumed in callback)\n connection.close()", "title": "" }, { "docid": "dbdf022702f906189bcf2c45e1334926", "score": "0.5637638", "text": "def _message_callback(mqttc, userdata, msg):\n callback(msg.topic, msg.payload.decode(\"utf-8\"), msg.qos)", "title": "" }, { "docid": "753bd139a774944ba9899d3298541707", "score": "0.56216216", "text": "def on_message(self, message):\n print(\"Received\",message)", "title": "" }, { "docid": "6ee78fbe54ae4defaf534e1e60692454", "score": "0.5619653", "text": "def channel_message(self, message_type, channel, data):\n pass", "title": "" }, { "docid": "d2586ecd517be62ab9783241322393bc", "score": "0.5618884", "text": "def onMessage(self, payload, isBinary):\n self.factory.communicate(self, payload, isBinary)", "title": "" }, { "docid": "b2d10881afea3260524a5407a3535c5e", "score": "0.5614722", "text": "async def receive(self, payload_enc: Union[str, bytes]) -> InboundMessage:\n if self._check_relay_context:\n await self.handle_relay_context(payload_enc)\n self._check_relay_context = False\n\n message = await self.parse_inbound(payload_enc)\n self.receive_inbound(message)\n return message", "title": "" }, { "docid": "eec4e2bab89e097a6f3cd45a747fef51", "score": "0.5613303", "text": "def on_accept_client(self, channel: Channel, msg: HelloMessage) -> None:", "title": "" }, { "docid": "dd6b7d75a38d43507137476ca5b06e01", "score": "0.56121206", "text": "def message_received(self, new_message):\n pass", "title": "" }, { "docid": "3254eb89994bb80fb5c8bb65545bb930", "score": "0.561203", "text": "def receive_messages(self) -> None:\n header = self.client_socket.recv(struct.calcsize(MSG_HEADER_FORMAT))\n msg_type, msg_len = struct.unpack_from(MSG_HEADER_FORMAT, header, 0)\n payload = self.client_socket.recv(msg_len)\n self.message_callbacks[NetMessage(msg_type)](payload)", "title": "" }, { "docid": "5ece0d043535e979ae6596524542a89e", "score": "0.56118125", "text": "def recv_msg(self, decode=True):\n data = self.recv_all(self.sock, self.data_size)\n if decode:\n return data.decode('utf-8')\n return data", "title": "" }, { "docid": "b0a4e4cb7fc8f6328f145f82b7ce2585", "score": "0.5611682", "text": "def __receive_message(self, return_pattern=None, multiline=False):\n try:\n # checking if a connection is there\n if self.__connection is not None:\n # if multiple lines are expected, keep reading lines until no more lines come in\n if multiline:\n answer = \"\"\n while True:\n line = self.__connection.readline()\n if line:\n answer += line.decode(self.standard_encoding)\n else:\n break\n # if just one line is expected, just read one line (faster than always waiting for the timeout)\n else:\n answer = self.__connection.readline()\n # decoding the answer using the standard settings from __init__\n answer = answer.decode(self.standard_encoding)\n\n # if the user wants a code check performed\n if return_pattern is not None:\n if type(return_pattern).__name__ != \"SRE_Pattern\":\n raise ValueError(\n \"The return code you specified was not a valid regular expression: {0}\".format(return_pattern)\n )\n\n if re.fullmatch(pattern=return_pattern, string=answer):\n # if the answer matches the desired pattern return the read value\n return re.match(pattern=return_pattern, string=answer).groups()\n else:\n self.logger.critical(\n \"Value Error. Serial device did not return correct return code. Send: \\\"{0}\\\". \"\n \"Received: \\\"{1}\\\".\".format(return_pattern, answer)\n )\n if self.__soft_fail_for_testing:\n return answer.strip()\n else:\n raise ValueError(\n \"Value Error. Serial device did not return correct return code. Send: \\\"{0}\\\". \"\n \"Received: \\\"{1}\\\".\".format(return_pattern, answer)\n )\n else:\n # if no return code checking was requested, just return the read value\n return answer.strip()\n else:\n raise Exception(\"Error. No connection to the device\")\n except Exception as e:\n # This is not very elegant, but lets the user know that the device failed while retaining the error\n # information\n raise Exception(\"Serial device message receive failed. Error Message: {0}\\n{1}\".format(e, e.__traceback__))", "title": "" }, { "docid": "85008ed6dc214544af856a1a74f413d8", "score": "0.5609495", "text": "def munch(self, unused):\n self._channel.add_on_cancel_callback(self.cancel_channel)\n self._consumer_tag = self._channel.basic_consume(self._process_message)", "title": "" }, { "docid": "cb7b51f89276e598e606fd0ce546c9d5", "score": "0.56038445", "text": "def _received_message_handler(self, channel, method, properties, body):\n pass", "title": "" }, { "docid": "a14438dbfcab1d95235b2c86650059cf", "score": "0.55976534", "text": "async def receive(self, text_data=None, bytes_data=None):\n text_data_json = json.loads(text_data)\n message = text_data_json['message']\n\n # Send message to room group\n await self.channel_layer.group_send(\n self.room_group_name,\n {\n 'type': 'chat_message',\n 'message': message,\n }\n )", "title": "" }, { "docid": "f1f8e25c69f52107ea8acb8ba2166030", "score": "0.5596549", "text": "def processReceivedMessage(iTag, clsName, msgID, msg): #@NoSelf", "title": "" }, { "docid": "24ca69ae9dd7d572335204d03a0b6e68", "score": "0.55940956", "text": "def receive(self, message):\n\n try:\n # Ensure the messae decodes\n message = loads(message)\n\n # Make sure some basic stuctures are here\n originator = message[\"from\"]\n tx, action = message['Tx'], message['action']\n idx, deps, new_obj, data = tx\n\n except Exception as e:\n raise Exception(\"Badly formatted messages: %s\" % str(e))\n\n # Ignore messages we sent\n if self.name == originator:\n return\n \n if not idx == packageTx(data, deps, len(new_obj))[0]:\n raise Exception(\"Invalid transaction.\")\n\n if not self._within_TX(tx):\n if action == \"process\":\n # We are going to re-route the message on a correct channel\n msg = dumps({ \"action\":\"process\", \"from\":self.name, \"Tx\":tx })\n self.send(tx, msg)\n return\n else:\n raise Exception(\"Transaction not of interest.\")\n\n if action == \"vote\":\n # We process an incoming vote.\n n, l, v = message['vote']\n vote = (n, tuple(l), v)\n self.RLogger.info(\"Receive vote (%s) for %s (%s)\" % (v, idx[:6], self.name))\n \n if vote not in self.pending_vote[idx]:\n self.pending_vote[idx].add( vote )\n self.process(tx)\n \n if action == \"commit\":\n # We process an incoming commit.\n yesno = message['yesno']\n self.RLogger.info(\"Receive commit (%s) for %s (%s)\" % (yesno ,idx[:6], self.name))\n\n if yesno:\n self.do_commit_yes(tx)\n else:\n self.commit_no.add(idx)\n\n if action == \"process\":\n # We process a request\n self.process(tx)", "title": "" }, { "docid": "68168949c66b1c88e69521f0c153f83f", "score": "0.5591507", "text": "def receive(self, node, message):\n\t\tif self.recv: self.recv(node, pickle.loads(message))", "title": "" }, { "docid": "916d9907df5467e0865d8040011e350b", "score": "0.5574089", "text": "def on_receive(self, message, from_jid, otr_state):\n pass", "title": "" }, { "docid": "839e7bea280e0091919ad06c61302458", "score": "0.55709845", "text": "async def recv_message(self):\n return await asyncio.sleep(1e10)", "title": "" }, { "docid": "4a9b068e181e6e8d882f05f4785409a0", "score": "0.55663013", "text": "def _receive(self, _unused_frame, queue, control_center):\n\n log.info(\" [*] Waiting for Messages. To exit press CTRL+C\")\n\n self.channels[queue].basic_consume(queue=queue, on_message_callback=control_center.received_message_handler)", "title": "" }, { "docid": "ec225a6af1012c51cc61d4d191befa75", "score": "0.5563348", "text": "def dispatch_msg(self, plugin, message):\n self.__run_plugin(plugin, \"recv_msg\", message)", "title": "" }, { "docid": "4fe638da3b3688859bcb118f441e8135", "score": "0.5562309", "text": "def handle_message(self, msg, rcvtime):\n raise NotImplementedError", "title": "" }, { "docid": "ee71f825bad1937e204d3142d9fb2b45", "score": "0.5556639", "text": "def receive_message(self):\n message = None\n properties = None\n while not self.done:\n # Process message queue events, returning as soon as possible\n self.server_connection.process_data_events(time_limit=0)\n\n method, properties, body = self.server_channel.basic_get(queue=self.response_server_queue,\n no_ack=True)\n # Return as soon as we get a valid message\n if method is not None:\n print(method, properties, body)\n message = json.loads(body.decode('utf-8'))\n logger.info('Received a message: {} | {}'.format(body, properties.reply_to))\n break\n\n return message, properties", "title": "" }, { "docid": "cbaeedd359cd1e03ec2f4ce076ed77ab", "score": "0.5553836", "text": "def messageReceived(self, ammsgtype, msg):\n\t#print \"serial.messageReceived - %s\" % msg.tostring()\n if ammsgtype in self.__handlers:\n for handler in self.__handlers[ammsgtype]:\n handler(ammsgtype, msg[:])\n else:\n self.__logger.debug(msg.tostring())", "title": "" }, { "docid": "82e371c7f077e0146d70e89e35a04f8c", "score": "0.55513245", "text": "async def listen(self):\n\n while True:\n if not self.connected:\n # sleep and hope the checker fixes us\n await asyncio.sleep(5)\n continue\n\n data = await self.read_one_message()\n if data is None:\n await asyncio.sleep(1)\n continue\n\n mtype = self.find_balboa_mtype(data)\n if mtype is None:\n self.log.error(f\"Spa sent an unknown message: {data.hex()}\")\n await asyncio.sleep(0.1)\n continue\n if mtype == BMTR_MOD_IDENT_RESP:\n self.parse_module_identification(data)\n await asyncio.sleep(0.1)\n continue\n if mtype == BMTR_STATUS_UPDATE:\n await self.parse_status_update(data)\n await asyncio.sleep(0.1)\n continue\n if mtype == BMTR_DEVICE_CONFIG_RESP:\n self.parse_device_configuration(data)\n await asyncio.sleep(0.1)\n continue\n if mtype == BMTR_SYS_INFO_RESP:\n self.parse_system_information(data)\n await asyncio.sleep(0.1)\n continue\n if mtype == BMTR_SETUP_PARAMS_RESP:\n self.parse_setup_parameters(data)\n await asyncio.sleep(0.1)\n continue\n if mtype == BMTR_FILTER_INFO_RESP:\n self.parse_filter_cycle_info(data)\n await asyncio.sleep(0.1)\n continue\n self.log.error(\"Unhandled mtype {0}\".format(mtype))", "title": "" }, { "docid": "36bf39777f80d1339cabd83aadc4f0e9", "score": "0.5551283", "text": "def sms_receive(self) -> Optional[dict]:\n ...", "title": "" }, { "docid": "0b8b38f07cf3f90ab19a4c48a697b3f3", "score": "0.554542", "text": "def receive(self):\n buffer = self.cnt.get_buffer_and_flush()\n for m in buffer:\n self.messages.append(m)", "title": "" }, { "docid": "4fca0deca003e941070970e746d1aa88", "score": "0.5529326", "text": "def tcp_message(self, flow: mitmproxy.tcp.TCPFlow):", "title": "" }, { "docid": "daa0e736e62df75557bd0889afbbe291", "score": "0.5512056", "text": "def dispatch_msg(self, plugin, message):\n self.__run_plugin(plugin, message.audit_name, \"recv_msg\", message)", "title": "" }, { "docid": "636680db535e96e63531f91db6829e7c", "score": "0.55093056", "text": "def got_message(self, address, message):\n self.callback(message)\n # print(message)\n return \"DONE\"", "title": "" }, { "docid": "5c4ace76e52155089a3ff2727725732f", "score": "0.5507662", "text": "def recv(self):\n logger.debug(\"LinkReceiver receiving message.\")\n \n # Keep trying until we get something\n while True:\n recv_data = self._recv()\n if recv_data is not None:\n return recv_data", "title": "" }, { "docid": "ecf96e3aaf194e4721e7ee691335f140", "score": "0.5503975", "text": "def handle_call(self, context, connection):\n print(\"Received message: %s\" % context.message)", "title": "" }, { "docid": "a557aae9874442022b83c803f8c62d39", "score": "0.54957265", "text": "def c1_listener_callback(self, msg):\n self.channel_one_data = msg.data", "title": "" }, { "docid": "e27f90269d2dc525f2ebe62533fe0011", "score": "0.54953676", "text": "def handleMessage(self, message):", "title": "" }, { "docid": "77f34777b6faed7a74d75e098fb1a3be", "score": "0.5492488", "text": "def on_msg(mqttc, obj, msg):\n # print(msg.topic + \" \" + str(msg.qos) + \" \" + str(msg.payload))\n LOG.debug(\"got message\")\n payload = msg.payload.decode(\"UTF-8\")\n topic = msg.topic\n LOG.debug(\"Message topic : %s\" % topic)", "title": "" }, { "docid": "c4e0a7b379037c42dbfb5060a0c539d1", "score": "0.54916763", "text": "def mqtt_message(self):\n time.sleep(1)\n return self.message", "title": "" }, { "docid": "15b349dddc8baebd5b07c1f0966d7609", "score": "0.549045", "text": "def test_broadcast_receive_long(self):\r\n self.can_messages = [\r\n (TestECU.MsgType.CANRX, 0x00ECFF01, [32, 20, 0, 3, 255, 0xB0, 0xFE, 0], 0.0), # TP.CM BAM (to global Address)\r\n (TestECU.MsgType.CANRX, 0x00EBFF01, [1, 1, 2, 3, 4, 5, 6, 7], 0.0), # TP.DT 1\r\n (TestECU.MsgType.CANRX, 0x00EBFF01, [2, 1, 2, 3, 4, 5, 6, 7], 0.0), # TP.DT 2\r\n (TestECU.MsgType.CANRX, 0x00EBFF01, [3, 1, 2, 3, 4, 5, 6, 255], 0.0), # TP.DT 3\r\n ]\r\n\r\n self.pdus = [\r\n (TestECU.MsgType.PDU, 65200, [1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6]),\r\n ]\r\n\r\n self.ecu.subscribe(self._on_message)\r\n self._inject_messages_into_ecu()\r\n # wait until all messages are processed asynchronously\r\n while len(self.pdus)>0:\r\n time.sleep(0.500)\r\n # wait for final processing \r\n time.sleep(0.100)\r\n self.ecu.unsubscribe(self._on_message)", "title": "" }, { "docid": "2257298e838d5b35ca1fc043269858e4", "score": "0.5489604", "text": "def msg(self, msg):\n if self.simulator:\n message_string = \"{}\".format(msg)\n self.com.send(message_string.encode())\n resp = ''\n if '?' in msg:\n resp = self.com.recv(BUFF_SIZE).decode()\n return resp\n\n else:\n # Writes message\n message_string = \"{}\\r\\n\".format(msg).encode()\n\n # write(message_string)\n self.com.write(message_string)\n\n # Reads response if queried\n resp = ''\n if \"?\" in msg:\n resp = self.com.readline()\n resp = str(resp[:-2], 'utf-8') # Strips terminating chars\n if not resp:\n raise TimeoutError(\"Device timed out\")\n\n # Must wait 10 ms before sending another command\n time.sleep(.01)\n\n return resp", "title": "" }, { "docid": "2d360ddaddf7133b8317c974faf48c60", "score": "0.5483794", "text": "def handle_message(self, payload):\n assert isinstance(payload, dict)\n if 'getToken' in payload:\n token = channel.create_channel(self.client_id)\n self.send({'token': token})\n elif 'pickup' in payload:\n pickup_id = payload['pickup']\n memcache_key = _PICKUP_MEMCACHE_PREFIX + pickup_id\n results_get_pickup = yield memcache.Client().get_multi_async(\n [memcache_key])\n pickup_data = results_get_pickup.get(memcache_key)\n if pickup_data is not None:\n for service, messages_for_service in pickup_data.iteritems():\n for message in messages_for_service:\n self.send(message, service=service)\n else:\n logging.warning('Memcache read failure.')\n else: logging.error('Unrecognized system service message.')", "title": "" }, { "docid": "6b798b3af11ba9f3d15de282a793d788", "score": "0.5477442", "text": "def process_received_message(self, message):\n self.log.debug('Received \"%s\"', message)\n\n try:\n cmd, kwargs = bcp.decode_command_string(message)\n self.receive_queue.put((cmd, kwargs))\n except ValueError:\n self.log.error(\"DECODE BCP ERROR. Message: %s\", message)\n raise", "title": "" }, { "docid": "cf55405139c42b5244321789642a33af", "score": "0.54757184", "text": "def message(self, user, channel, message):\n pass", "title": "" }, { "docid": "62f7cd8370563af7fac48d838c97e207", "score": "0.54728395", "text": "def receive_data(self):\n return self.message_queue.get(True, 5)\n #return self.message_queue.get_nowait()", "title": "" }, { "docid": "bc18a79e72e810c3a2edcd48f1939667", "score": "0.54724187", "text": "def recv(self, *args, **kwargs):\n return self.connection.recv(*args, **kwargs)", "title": "" }, { "docid": "b9eb88cb32743abd2b664a3d54fd77b4", "score": "0.5469767", "text": "def on_message(self, service, msg):\n pass", "title": "" }, { "docid": "53c1beeda9bf9a6838e71d17447052be", "score": "0.546417", "text": "def receive(self):\n\n logging.info(\"transceiver: start receiving\")\n # request extra packet at start so there is always one ready\n self.xcvr.request_audio_output()\n while 1:\n\n if not self.command_queue.empty():\n break\n\n request = None\n if not self.request_queue.empty():\n request = self.request_queue.get()\n if request == \"frequency\" : \n self.xcvr.request_frequency()\n elif request == \"mode\" : \n self.xcvr.request_mode()\n elif request == \"tx\" : \n self.xcvr.request_TX()\n self.xcvr.request_audio_output()\n data = self.xcvr.get_audio()\n self.player.stdin.write(data)\n\n if request is not None:\n if request == \"frequency\" : \n self.frequency = self.xcvr.get_frequency()\n self.response_queue.put(self.frequency)\n elif request == \"mode\" : \n mode = self.xcvr.get_mode()\n self.response_queue.put(mode)\n if mode != self.mode:\n self.mode = mode\n break\n elif request == \"tx\" : \n tx_rx = self.xcvr.get_TX()\n self.response_queue.put(tx_rx)\n if tx_rx != self.tx_rx:\n self.tx_rx = tx_rx\n break\n\n # when complete, play the extra packet\n data = self.xcvr.get_audio() # process extra request once finished\n self.player.stdin.write(data)\n logging.info(\"transceiver: stop receiving\")", "title": "" }, { "docid": "b769d54988d108b754d46a9f81c37383", "score": "0.545968", "text": "def receive_msg():\n\n for msg in messager.listen():\n messages.append(msg)", "title": "" }, { "docid": "b769d54988d108b754d46a9f81c37383", "score": "0.545968", "text": "def receive_msg():\n\n for msg in messager.listen():\n messages.append(msg)", "title": "" }, { "docid": "e31fe1dba11f8a55e93d85bc8b26b353", "score": "0.5456528", "text": "def test_get_message(self):\n headers = { \n 'Accept': 'application/json',\n 'LoginRequired': 'special-key',\n }\n response = self.client.open(\n '/api/v1/channels/{channel_id}/messages/{message_id}'.format(channel_id=56, message_id=56),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "cab724291c29799a58123fd842a693aa", "score": "0.5455665", "text": "async def receive_message(reader: typing_classes.StreamReader) -> str:\n data = await reader.read(1024)\n return data.decode(errors='ignore')", "title": "" }, { "docid": "de30b4d7d5ae564f3cee8a358ca6c4a4", "score": "0.5454776", "text": "def received_msg(account, sender, message, conv, flags):\n\n obj = bus.get_object(\"im.pidgin.purple.PurpleService\", \"/im/pidgin/purple/PurpleObject\")\n purple = dbus.Interface(obj, \"im.pidgin.purple.PurpleInterface\")\n\n pmesg = purple.PurpleMarkupStripHtml(message)\n unread = purple.PurpleConversationGetData(conv, \"unseen-count\")\n\n type = purple.PurpleConversationGetType(conv)\n\n if unread or True:\n if type == 1: # That's PURPLE_CONV_TYPE_IM\n g.notify(\"Received IM message\", purple.PurpleConversationGetTitle(conv), pmesg)\n else:\n g.notify(\"Received Chat message\", sender, pmesg)", "title": "" }, { "docid": "8928138262fd5ba28960d6080a3439f2", "score": "0.5454483", "text": "def lbm_rcv_deliver(self, rcv, msg):\n self.app_rcv_callback(rcv, msg, self.app_clientd)", "title": "" } ]
94d97e50552b686245c4cf9ada41c177
Permission Checking Function to be used as a Dependency for API endpoints. This is used as a helper. This will either return a User object to the calling method if the user meets the authentication requirements, or it will raise a CredentialException and prevent the method that depends on this from continuing.
[ { "docid": "70d2872c3c88f205c82a40f0a7ad2ad2", "score": "0.0", "text": "def current_user_researcher(token: str = Depends(oauth2_scheme)):\n user = get_current_user(token)\n if not any(role in [Roles.admin.name, Roles.researcher.name] for role in user.roles):\n raise CredentialException()\n\n return user", "title": "" } ]
[ { "docid": "5e224920ce0bc70b58729d9fc529e91d", "score": "0.7034291", "text": "def check_auth(func):\n\n @wraps(func)\n def invoke(self, *args, **kwargs):\n try:\n self.user = self.state.get_user()\n if self.user is None:\n print(\"Authentication is needed, run spock auth\")\n return\n return func(self, *args, **kwargs)\n except (tk.Forbidden, tk.NotFound) as e:\n # TODO better error messages\n print(e)\n\n return invoke", "title": "" }, { "docid": "9c183ffd7b906a8c05f0f231f81b6324", "score": "0.6600555", "text": "def authorized_access(func):\n @wraps(func)\n def decor(*args, **kwargs):\n print(get_current_user())\n if get_current_user() is None:\n return unauthorized_handler()\n elif not get_current_user().authorized:\n return abort(403)\n else:\n return func(*args, **kwargs)\n\n return decor", "title": "" }, { "docid": "fd9e443a33c3b891c19039d102f52b2f", "score": "0.64821035", "text": "def authorization_required(func):\n @functools.wraps(func)\n def decorated_function(*pa, **ka): # pylint: disable=missing-docstring\n if auth.is_authorized(ndb.Key(urlsafe=ka['key'])):\n return func(*pa, **ka)\n if not auth.is_logged_in():\n return abort(401)\n return abort(403)\n\n return decorated_function", "title": "" }, { "docid": "cd463cc1d707007b256213b66d9c8904", "score": "0.64415056", "text": "def required(method):\n @functools.wraps(method)\n def inner_method(self, *args, **kwargs):\n if not self.current_user:\n raise tornado.web.HTTPError(401)\n return method(self, *args, **kwargs)\n return inner_method", "title": "" }, { "docid": "abc910645dcc40e9d1560fc0a525fe33", "score": "0.6417043", "text": "def user_required(f):\n def decorator(*args, **kwargs):\n if not g.user:\n abort(401)\n return f(*args, **kwargs)\n return decorator", "title": "" }, { "docid": "94a50332767a575f570154b464d8eed7", "score": "0.63966864", "text": "def permission_required(permission):\n def decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not current_user.is_authenticated:\n abort(403)\n if not current_user.can(permission, current_user.curr_conf):\n abort(403)\n return f(*args, **kwargs)\n return decorated_function\n return decorator", "title": "" }, { "docid": "1039840cfb67ea036663b1b2462cf129", "score": "0.6382413", "text": "def authorize(func):\n def func_wrapper(*args, **kwargs):\n \"\"\"Wrap the function.\"\"\"\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException(\"Authorization required.\")\n return func(*args, **kwargs)\n return func_wrapper", "title": "" }, { "docid": "6f86255f20bc57edb3370291c40b3811", "score": "0.6320725", "text": "def auth_required():\n\n def decorator(endpoints_method):\n @wraps(endpoints_method)\n def validate(*args, **kwargs):\n \"\"\" Validates the User ID and token sent in an endpoints request.\"\"\"\n\n # Get the headers from the self arg\n headers = args[0].request_state.headers\n token = headers.get('Token')\n if headers.get('User-Id') is None:\n raise endpoints.UnauthorizedException(MESSAGE_UNAUTHORIZED)\n\n user_id = int(headers.get('User-Id'))\n user, timestamp = User.get_by_auth_token(user_id, token)\n if user:\n return endpoints_method(*args, **kwargs)\n else:\n logging.warn('Invalid API request! User ID:%s, token:%s'\n %(user_id, token))\n raise endpoints.UnauthorizedException(MESSAGE_UNAUTHORIZED)\n return validate\n return decorator", "title": "" }, { "docid": "0349a07186161c0a71e50c532d092f0f", "score": "0.630414", "text": "def _check_auth(func):\n @functools.wraps(func)\n def deco(*args, **kwargs):\n if 'Authorization' in request.headers:\n try:\n scheme, creds = request.headers['Authorization'].split(\n None, 1\n )\n except ValueError:\n return '', 401\n else:\n if scheme == 'Bearer':\n user = User.verify_auth_token(creds)\n if user is not None:\n g.user = user\n return func(*args, **kwargs)\n return '', 401\n return deco", "title": "" }, { "docid": "fd8e5c0d88e8fb022b045e6d2d774e44", "score": "0.6302787", "text": "def admin_credentials_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n if 'username' not in request.headers or 'password' not in request.headers:\n return abort(401)\n if not ldapuser.authenticate(request.headers['username'], request.headers['password']):\n return abort(403)\n if not ldapuser.is_api_authorized(request.headers['username']):\n return abort(403)\n return f(*args, **kwargs)\n return decorated", "title": "" }, { "docid": "a6a4039a26761beeec593737fe030535", "score": "0.6287337", "text": "def requires_auth(self, f):\r\n @wraps(f)\r\n def decorated(*args, **kwargs):\r\n auth = request.authorization\r\n if not auth or not self.check_auth(auth.username, auth.password):\r\n return self.authenticate()\r\n return f(*args, **kwargs)\r\n return decorated", "title": "" }, { "docid": "208fefd390f0cdc23b1fc5870703a1e8", "score": "0.6263697", "text": "def local_user_required(decorated_function):\n def check_for_local_user(self, *kw, **kwargs):\n try:\n current_user = users.get_current_user()\n user = get_user_by_google_id(current_user.user_id())\n if not user:\n raise Exception\n except:\n self.abort(401)\n else:\n decorated_function(self, *kw, **kwargs)\n return check_for_local_user", "title": "" }, { "docid": "bc693f93f343f5a0b8d2025626db9a95", "score": "0.6183728", "text": "def requires_authentication(func):\n def endpoint_wrapper(*args, **kwargs):\n try:\n header = connexion.request.headers['Authorization']\n User.authenticate(header)\n except KeyError:\n logger.info(\"Missing 'Authorization' header\")\n raise AuthException(\"Missing 'Authorization' header\")\n except AuthenticationFailure as e:\n logger.info(\"Unable to authenticate: {}\".format(e))\n raise AuthException(\"Resource requires authentication\")\n else:\n return func(*args, **kwargs)\n\n return endpoint_wrapper", "title": "" }, { "docid": "a9bf9b2cebb9b16cdb7ff1619229446d", "score": "0.61732227", "text": "def check_auth(self, request):\n\n if not request.user.is_authenticated():\n from django.core.exceptions import PermissionDenied\n raise PermissionDenied", "title": "" }, { "docid": "a9bf9b2cebb9b16cdb7ff1619229446d", "score": "0.61732227", "text": "def check_auth(self, request):\n\n if not request.user.is_authenticated():\n from django.core.exceptions import PermissionDenied\n raise PermissionDenied", "title": "" }, { "docid": "a9bf9b2cebb9b16cdb7ff1619229446d", "score": "0.61732227", "text": "def check_auth(self, request):\n\n if not request.user.is_authenticated():\n from django.core.exceptions import PermissionDenied\n raise PermissionDenied", "title": "" }, { "docid": "664a22a4980590a182a4bc2541f3f186", "score": "0.6165246", "text": "def authenticated(method):\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n if not self.current_user: \n raise HTTPError(401)\n return method(self, *args, **kwargs)\n return wrapper", "title": "" }, { "docid": "4a95fe246588bbcae5023ab97beaac59", "score": "0.6143246", "text": "def _auth_required_healthpro_or_config_admin(func):\n\n def wrapped(*args, **kwargs):\n if not is_config_admin(app_util.get_oauth_id()):\n _, user_info = get_validated_user_info()\n if not HEALTHPRO in user_info.get(\"roles\", []):\n logging.warning(\"User has roles {}, but HEALTHPRO or admin is required\".format(user_info.get(\"roles\")))\n raise Forbidden()\n return func(*args, **kwargs)\n\n return wrapped", "title": "" }, { "docid": "bbdbdfa1559f574b9fdd0e0053769e53", "score": "0.6111663", "text": "def require_permission(permission):\n def handler(f, *args, **kwargs):\n request = args[0]\n if check_permission(request, request.current_user, permission):\n return f(*args, **kwargs)\n elif request.current_user:\n raise HTTPForbidden()\n else:\n raise HTTPFound(request.route_url('user.login', _query={'redirect': encode_route(request)}))\n return decorator(handler)", "title": "" }, { "docid": "16e31dc169c8638440859b248e1702c1", "score": "0.61005646", "text": "def require_context(f):\n\n def wrapper(*args, **kwargs):\n if not is_admin_context(args[0]) and not is_user_context(args[0]):\n raise exception.NotAuthorized()\n return f(*args, **kwargs)\n\n return wrapper", "title": "" }, { "docid": "00ec2e2b9cb34ebe00620a63d20b8048", "score": "0.6080321", "text": "def require_authentication():\n if not current_user.is_authenticated:\n return current_app.login_manager.unauthorized()", "title": "" }, { "docid": "fd376234770beaa4d7ccb211694c74c1", "score": "0.6043147", "text": "def login_required(func):\n @wraps(func)\n def decor(*args, **kwargs):\n if get_current_user() is None:\n return unauthorized_handler()\n else:\n return func(*args, **kwargs)\n\n return decor", "title": "" }, { "docid": "81087392ac3ba9f7146145cb87852d79", "score": "0.6038824", "text": "def auth_required(f):\n\n @wraps(f)\n async def _wrapper(request):\n if 'auth' not in request:\n raise HTTPUnauthorized()\n return await f(request)\n\n return _wrapper", "title": "" }, { "docid": "690bfe268358413070982277e8cc2b16", "score": "0.6031002", "text": "async def check(self, req: Request, handler: Callable, client):\n if client.administrator or client.permissions[self.permission.value]:\n return\n\n raise HTTPForbidden(text=\"Not permitted\")", "title": "" }, { "docid": "0acc64c6851d439d7c4204987a0f9843", "score": "0.6015531", "text": "def check_ldap_access(view_func):\n def _decorated_view (request, pw_pk=None, **kwargs):\n if not pw_pk or check_authorization(pw_pk, request.user.username):\n return view_func(request, pw_pk, **kwargs)\n elif check_authorization(pw_pk, request.user.username) == None:\n return view_func(request, None, **kwargs) \n else:\n raise PermissionDenied(str(get_ldap_groups(request.user.username))+request.user.username)\n return wraps(view_func)(_decorated_view)", "title": "" }, { "docid": "e89c01d0aec7d23e69a52504401dd066", "score": "0.5991843", "text": "def session_oriented_request(self, func, *args, **kwargs):\n try:\n session = self.check_authorization()\n user = get_user(session.user)\n\n if user is None:\n return responses.client_error(404, 'User not found')\n\n return func(user, *args, **kwargs)\n except AuthException as ae:\n return responses.client_error(401, '{}'.format(ae))", "title": "" }, { "docid": "1933ecdf77a950c0c67577884966bc1b", "score": "0.59741074", "text": "def api_credentials_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n if 'id' in request.headers and 'token' in request.headers:\n if not tokens.verify(request.headers['id'], request.headers['token']):\n return abort(403)\n if tokens.is_instance(request.headers['id']):\n if 'instance_id' in kwargs:\n instance = aws.get_instance(kwargs['instance_id'])\n if not instance:\n return abort(400)\n client_ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)\n if instance.private_ip_address != client_ip:\n return abort(403)\n return f(*args, **kwargs)\n\n if 'username' in request.headers and 'password' in request.headers:\n if not ldapuser.authenticate(request.headers['username'], request.headers['password']):\n return abort(403)\n if not ldapuser.is_api_authorized(request.headers['username']):\n return abort(403)\n return f(*args, **kwargs)\n\n return abort(401)\n\n return decorated", "title": "" }, { "docid": "506438b679fbfdde6a949cab44586763", "score": "0.5949683", "text": "def _validate_authorization(context):\n AUTHORIZE(context)\n return context.elevated()", "title": "" }, { "docid": "79812fe61a797b58d6967c2e6468ea2a", "score": "0.59406054", "text": "def requires_login(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n if g.user is None:\n return access_denied()\n return f(*args, **kwargs)\n return decorated", "title": "" }, { "docid": "93d256654a930de698e8f8538c3483a2", "score": "0.5940354", "text": "def authenticated(method):\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n if not self.current_user:\n logging.error(\"User not authorized: {}\".format(self.current_user))\n raise HTTPError(401)\n return method(self, *args, **kwargs)\n return wrapper", "title": "" }, { "docid": "6809dae3bd3c6051563ae62d91091fb8", "score": "0.5909586", "text": "def authenticated(method):\r\n @functools.wraps(method)\r\n def wrapper(self, *args, **kwargs):\r\n if not self.current_user:\r\n if self.request.method in (\"GET\", \"HEAD\"):\r\n self.redirect(\"/\")\r\n return\r\n self.abort(403)\r\n return method(self, *args, **kwargs)\r\n return wrapper", "title": "" }, { "docid": "b4d76201127532935ecfde847caaaa9a", "score": "0.5895598", "text": "def wrap(request):\n token = request.token\n if token is not None:\n user_type = token.user.role.name\n if user_type in user_types:\n return func(request)\n else:\n raise HTTPForbidden(\"You can not access this route\")\n else:\n raise HTTPForbidden(\"Need token for access\")", "title": "" }, { "docid": "f323651b4f73f44c03dc32facf452a8f", "score": "0.58904517", "text": "def is_accessible(self):\n return auth.check()", "title": "" }, { "docid": "64e07618fa76b10986aea0de46066886", "score": "0.5880261", "text": "def __get_user_permissions(request):\r\n auth_func = getattr(settings, 'AMF_AUTH_FUNC')\r\n if hasattr(request, 'amfcredentials'):\r\n username = request.amfcredentials.get('username', None)\r\n password = request.amfcredentials.get('password', None)\r\n else:\r\n username = None\r\n password = None\r\n if isinstance(auth_func, FunctionType):\r\n result = auth_func(request, username, password)\r\n elif isinstance(auth_func, StringTypes):\r\n func = amf.utils.get_func(auth_func)\r\n result = func(request, username, password)\r\n\r\n if isinstance(result, (StringTypes, ListType, TupleType)):\r\n return result\r\n else:\r\n return ()", "title": "" }, { "docid": "063288c19eb91ff85f8a8e8825f7f51a", "score": "0.5871366", "text": "def requires_authn(): # noqa: D202\n\n def wrapper(fn):\n @wraps(fn)\n def decorator(*args, **kwargs):\n check_authn(request)\n return fn(*args, **kwargs)\n\n return decorator\n\n return wrapper", "title": "" }, { "docid": "39394503a311be399512a17ff3886f5e", "score": "0.5844865", "text": "def check_permissions(self, request):\n obj = (\n hasattr(self, 'get_controlled_object') and self.get_controlled_object() or\n hasattr(self, 'get_object') and self.get_object() or getattr(self, 'object', None)\n )\n user = request.user\n\n # Get the permissions to check\n perms = self.get_required_permissions(self)\n\n # Check permissions\n has_permissions = self.perform_permissions_check(user, obj, perms)\n\n if not has_permissions and not user.is_authenticated:\n return HttpResponseRedirect('{}?{}={}'.format(\n resolve_url(self.login_url),\n self.redirect_field_name,\n quote(request.get_full_path())\n ))\n elif not has_permissions:\n raise PermissionDenied", "title": "" }, { "docid": "89c1e34e33b6705e0f0a9e96873cf2b5", "score": "0.5838557", "text": "def auth_required(handler_method):\n\n def check_auth(self, *args):\n root_url = Apputil.Url.get_root_url(self)\n redirect_url = Social.Gapi.get_redirect_uri(root_url)\n\n userinfo = _load_session_userinfo(self)\n\n logging.info(userinfo)\n\n if userinfo is None:\n return self.redirect(redirect_url)\n\n\n self.userid = Core.Session.load_session_userid(self)\n self.credentials = userinfo.credentials\n\n self.mirror_service = Apputil.Service.create('mirror', 'v1', self.credentials)\n\n if self.credentials:\n try:\n self.credentials.refresh(httplib2.Http())\n return handler_method(self, *args)\n except AccessTokenRefreshError:\n # Access has been revoked.\n Core.Session.store_userid(self, '')\n credentials_entity = userinfo.credentials\n if credentials_entity:\n credentials_entity.delete()\n\n self.redirect(redirect_url)\n\n return check_auth", "title": "" }, { "docid": "146020e8617698fa76d769a8ec8f9bae", "score": "0.5824737", "text": "def check_if_user_linked(provider_identifier, func, *args, **kwargs):\n\n if current_user.is_authenticated:\n if getattr(current_user, provider_identifier):\n return func(*args, **kwargs)\n else:\n return abort(403)\n\n else:\n return abort(401)", "title": "" }, { "docid": "17807e9f22433da0eceb8a073641062d", "score": "0.58089256", "text": "async def check_permission(request: web.Request, permission: Union[str, enum.Enum],\n context: Any = None) -> None:\n\n await check_authorized(request)\n allowed = await permits(request, permission, context)\n if not allowed:\n raise web.HTTPForbidden(reason=\"User does not have '{}' permission\".format(permission))", "title": "" }, { "docid": "d6b7dac13a60f412b30e4be27b38f197", "score": "0.58044344", "text": "def authenticate(func):\n @wraps(func)\n def with_authentication(self, *args):\n user_id = self.get_argument('userId', None)\n pwd = self.get_argument('pwd', None)\n\n if None in (user_id, pwd):\n log.warning(\"Authentication failed! Missing auth parameters\")\n return self.error({\"message\": \"Authentication failed! Missing auth parameters\"}, 401)\n\n claro_user = self.settings['config']['claro/v1/user']['value']\n claro_pass = self.settings['config']['claro/v1/password']['value']\n\n if claro_user != user_id or claro_pass != pwd:\n log.error(\"Could not authenticate such a combination: userId {0} and pwd {1}\".format(user_id, pwd))\n return self.error({\"message\": \"Could not authenticate! Wrong userId/pwd combination\"}, 401)\n\n return func(self, *args)\n return with_authentication", "title": "" }, { "docid": "c253cad6c4681ddee550853c7902269c", "score": "0.58027345", "text": "def requires_auth(f):\n\n @wraps(f)\n def decorated(*decorator_args, **decorator_kwargs):\n auth = request.authorization\n\n if args.enable_auth:\n if not auth or not verify_authentication(auth.username, auth.password):\n response = Response(\"Need basic auth to request the resources\", 401, {\n \"WWW-Authenticate\": '\"Basic realm=\"Login Required\"'\n })\n return response\n\n return f(*decorator_args, **decorator_kwargs)\n\n return decorated", "title": "" }, { "docid": "4c18347ee35ed7558eb8729321c069bc", "score": "0.5783026", "text": "def perm_check(dummy): # pragma: no cover\n def wrap_outer(func):\n def wrap_inner(*args, **kargs):\n # get the argument frrom the func (method route(cls, request)\n kclass = args[0]\n request = args[1]\n\n # count number of permisson passed\n passed_count = 0\n flag_pass = False\n flag_login_required = False\n for perm in kclass.x_perm:\n if perm == 'login':\n flag_login_required = True\n if Utils.has_perm(request, perm):\n passed_count = passed_count + 1\n\n # determine pass or not\n if len(kclass.x_perm) == passed_count:\n flag_pass = True\n\n # handle failed permission\n if flag_pass == False:\n if flag_login_required:\n result = {'return':'302',\n 'msg':'Please login to continue',\n 'redirect_url':'/login/' }\n return kclass()._render(request, result)\n else:\n result = {'return':'302',\n 'msg':'Sorry, you do not have permission to continue',\n 'redirect_url':'/'}\n return kclass()._render(request, result)\n # handle passed permission\n return func(*args, **kargs)\n return wrap_inner\n return wrap_outer", "title": "" }, { "docid": "f40725d2259e30628b97bbf42db159ce", "score": "0.5781028", "text": "def UserRequired(handler):\n def checkLogin(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect(self.uri_for('recruiterLogin'), abort=True)\n else:\n return handler(self, *args, **kwargs)\n return checkLogin", "title": "" }, { "docid": "7282b26766954256db3a31237877f3b2", "score": "0.57790387", "text": "def require_auth(f):\n\n @wraps(f)\n def wrapped(*args, **kwargs):\n request = args[0]\n if not request.user or not request.user.is_authenticated:\n return redirect('/admin/login')\n else:\n return f(*args, **kwargs)\n\n return wrapped", "title": "" }, { "docid": "f4f4d5ee7e0a427ef8165df402f1aeaf", "score": "0.5778319", "text": "def user_required(handler):\n def check_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect(self.uri_for('login'), abort=True)\n else:\n return handler(self, *args, **kwargs)\n\n return check_login", "title": "" }, { "docid": "ec4a6d5c5637835f46fa2c623ba0a589", "score": "0.57674855", "text": "def admin_required(func):\n @functools.wraps(func)\n def decorated_function(*pa, **ka): # pylint: disable=missing-docstring\n if auth.is_admin():\n return func(*pa, **ka)\n if not auth.is_logged_in():\n return abort(401)\n return abort(403)\n\n return decorated_function", "title": "" }, { "docid": "9ecf09da57e965947198d0e2d92d523a", "score": "0.57638514", "text": "def CheckAccess(self, handler, username):\n handler_name = handler.__class__.__name__\n\n if (self.api_groups and\n not self.CheckPermissions(username, self.api_access)):\n stats.STATS.IncrementCounter(\"grr_api_auth_fail\",\n fields=[handler_name, username])\n raise access_control.UnauthorizedAccess(\n \"User %s not in groups %s, authorized for %s API access.\" % (\n username, self.api_groups, self.api_access))\n\n if handler_name in self.ACLedHandlers():\n if not self.CheckPermissions(username, handler_name):\n stats.STATS.IncrementCounter(\"grr_api_auth_fail\",\n fields=[handler_name, username])\n raise access_control.UnauthorizedAccess(\n \"User %s not authorized for handler %s.\" % (\n username, handler_name))\n elif not handler.enabled_by_default:\n raise access_control.UnauthorizedAccess(\n \"%s has enabled_by_default=False and no explicit ACL set. Add ACL\"\n \" to ACL list (see API.HandlerACLFile config option) to use \"\n \"this API\" % handler)\n\n logging.debug(\"Authorizing %s for API %s\", username, handler_name)\n stats.STATS.IncrementCounter(\"grr_api_auth_success\",\n fields=[handler_name, username])", "title": "" }, { "docid": "e5f3c177a107f9ade06f0a8eb8f76193", "score": "0.57541656", "text": "def admin_only(func):\n\n @functools.wraps(func)\n def wraps(*args, **kwargs):\n if slack_blueprint.config['DEBUG']:\n return func(*args, **kwargs)\n slack_token = slack_blueprint.config['SLACK_OAUTH_TOKEN']\n if not slack_token:\n flask.abort(401)\n slack = slacker.Slacker(slack_token)\n user_id = flask.request.form['user_id']\n flask.current_app.logger.info(f'[access]: performing admin check...')\n try:\n info = slack.users.info(user_id)\n except slacker.Error:\n flask.current_app.logger.info(f'[access]: {user_id} not found')\n flask.abort(403)\n if info.body['user']['is_admin']:\n return func(*args, **kwargs)\n flask.current_app.logger.error('[access]: failed admin-only test')\n flask.abort(403)\n\n return wraps", "title": "" }, { "docid": "71ce77a8a7e677ea18fc31fe3fb7bc51", "score": "0.5752249", "text": "def auth_required(role_allowed_list):\n\n if not role_allowed_list:\n raise AssertionError(\"Can't call auth_required with empty role_allowed_list.\")\n\n if not isinstance(role_allowed_list, list):\n role_allowed_list = [role_allowed_list]\n\n def auth_required_wrapper(func):\n def wrapped(*args, **kwargs):\n appid = GAE_PROJECT\n request.log_record = base_api.log_api_request()\n # Only enforce HTTPS and auth for external requests; requests made for data generation\n # are allowed through (when enabled).\n acceptable_hosts = (\"None\", \"testbed-test\", \"testapp\", \"localhost\", \"127.0.0.1\")\n # logging.info(str(request.headers))\n if not is_self_request():\n if request.scheme.lower() != \"https\" and appid not in acceptable_hosts:\n raise Unauthorized(f\"HTTPS is required for {appid}\", www_authenticate='Bearer realm=\"rdr\"')\n check_auth(role_allowed_list)\n request.logged = False\n result = func(*args, **kwargs)\n if request.logged is False:\n try:\n base_api.log_api_request(log=request.log_record)\n except RuntimeError:\n # Unittests don't always setup a valid flask request context.\n pass\n return result\n\n return wrapped\n\n return auth_required_wrapper", "title": "" }, { "docid": "6046ec6c668f464503a4c46e1ea84261", "score": "0.5748685", "text": "async def __call__(self, request: Request):\n checked = False\n for permission_class in self.permissions_classes:\n checked = await permission_class(request=request).check_permission()\n if checked:\n break\n\n if not checked:\n raise HTTPException(status_code=self.status_code, detail={\"detail\": self.error_msg})", "title": "" }, { "docid": "670f6e89b529b78b9bb1bc3c2c056dff", "score": "0.57462764", "text": "def _auth_advice(*args, **kwargs):\n token = self._auth_context.current_token\n\n if token is None:\n raise AuthMissingTokenError(\n \"Denied access to a protected service: Method was \"\n \"called outside of an Authorization Context?\"\n )\n\n self._auth_service.check_permission(\n access_token=token,\n in_domain=domain,\n to_execute=method_name,\n args=args,\n kwargs=kwargs\n )\n\n return wrapped_f(*args, **kwargs)", "title": "" }, { "docid": "46e6a1057420f547a3948f15f5781d89", "score": "0.57433856", "text": "def require_user(func):\n def decorated(self, *args, **kwargs):\n if not self.user:\n self.redirect(users.create_login_url(self.request.url))\n else:\n return func(self, *args, **kwargs)\n return decorated", "title": "" }, { "docid": "8d6510a8538b8e58a503c2bf8562525b", "score": "0.5741873", "text": "def requires_authentication(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n authenticator = current_app.authenticator\n authentic, status, message = authenticator.authenticate(request)\n if not authentic:\n _message = json.dumps(dict(errors=dict(mauth=[message])))\n return Response(response=_message, status=status, mimetype=\"application/json\")\n return func(*args, **kwargs)\n\n return wrapper", "title": "" }, { "docid": "f5d11787e5bc194976e0e85e05b612b5", "score": "0.5739391", "text": "def permission_required(perm, login_url=None, raise_exception=False):\n def check_perms(user):\n # First check if the user has the permission (even anon users)\n if user.has_perm(perm):\n return True\n # In case the 403 handler should be called raise the exception\n if raise_exception:\n try:\n app_label, codename = perm.split('.')\n permission = Permission.objects.get(content_type__app_label=app_label, codename=codename)\n except (ObjectDoesNotExist, ValueError):\n permission = perm\n user.permission_error = permission\n raise PermissionDenied\n # As the last resort, show the login form\n return False\n return user_passes_test(check_perms, login_url=login_url)", "title": "" }, { "docid": "ce74f305b41af98542bdadebd2fb864d", "score": "0.57391584", "text": "def abort_if_no_google_user(decorated_function):\n def check_current_user(self, *kw, **kwargs):\n current_user = users.get_current_user()\n if not current_user:\n self.abort(401)\n else:\n decorated_function(self, *kw, **kwargs)\n return check_current_user", "title": "" }, { "docid": "cd9e06b783f23f29186e2a3870cc3990", "score": "0.57304513", "text": "def user_required(handler):\r\n def check_login(self, *args, **kwargs):\r\n auth = self.auth\r\n if not auth.get_user_by_session():\r\n self.redirect(self.uri_for('login'), abort=True)\r\n else:\r\n return handler(self, *args, **kwargs)\r\n\r\n return check_login", "title": "" }, { "docid": "7c30c9166b41b91975984b56deac1e61", "score": "0.5723275", "text": "def require_auth(f, self = None, *args, **kw):\n\n if not self.sid:\n raise NotLoggedIn\n\n return f(self, *args, **kw)", "title": "" }, { "docid": "0b00621708363e97543aaea45bb5f71b", "score": "0.5713961", "text": "def test_it_should_raise_a_PermissionError_if_user_does_not_have_permission(self, current_user):\n func = Mock()\n current_user.has_permission.return_value = False\n current_user.is_admin = False\n current_user.return_value = current_user\n decorated_func = permission(func, permission=\"permission\")\n\n with pytest.raises(PermissionError):\n decorated_func()", "title": "" }, { "docid": "0b7bc055a4c6fc708beaa0786ce0b0af", "score": "0.570744", "text": "def test_user_does_not_need_permission(self, current_user):\n func = Mock()\n current_user.return_value = current_user\n decorated_func = permission(func)\n\n assert decorated_func().is_called", "title": "" }, { "docid": "1693e4b1307d7b17be2c38eb35b7e32b", "score": "0.5705757", "text": "def service_permission(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n # if we get here, user has scrooge permission\n if (\n (\n 'service' in kwargs and\n has_permission_to_service(request.user, kwargs['service'])\n ) or\n 'service' not in kwargs\n ):\n return view_func(request, *args, **kwargs)\n return HttpResponseForbidden(\n json.dumps({'message': 'No permission to service'}),\n content_type=\"application/json\"\n )\n return login_required(_wrapped_view)", "title": "" }, { "docid": "72b2053eb9d9aa129aea92df54627ad7", "score": "0.5698636", "text": "def helper_auth_helper(request):\n if request.user and request.user.is_authenticated:\n return None\n # source, http://djangosnippets.org/snippets/243/\n if \"HTTP_AUTHORIZATION\" in request.META:\n auth = request.headers[\"Authorization\"].split()\n if len(auth) == 2 and auth[0].lower() == \"basic\":\n uname, passwd = (\n base64.b64decode(auth[1].encode(\"utf-8\")).decode(\"utf-8\").split(\":\")\n )\n user = authenticate(username=uname, password=passwd)\n if user:\n request.user = user\n return None\n\n return HttpResponseNotAuthorized()", "title": "" }, { "docid": "d48a6e2ffe8186453bd4ee6894b503e8", "score": "0.5694516", "text": "def can_handle_filterquery(func):\n def wrapper(self, request, **kwargs):\n obj = self.get_object()\n is_owner = obj.user == self.request.user\n can_handle = obj.for_everyone and obj.has_global_perm(self.request.user)\n if not is_owner and not can_handle:\n raise PermissionDenied\n\n return func(self, request, **kwargs)\n return wrapper", "title": "" }, { "docid": "d0e15feec485c2eb7d74ac8e3a9e99a3", "score": "0.5689806", "text": "def login_required(func):\n @functools.wraps(func)\n def decorated_function(*pa, **ka): # pylint: disable=missing-docstring\n if auth.is_logged_in():\n return func(*pa, **ka)\n return abort(401)\n\n return decorated_function", "title": "" }, { "docid": "3694a1c1b2c34d4f83baeca8f5ccab4c", "score": "0.5689007", "text": "def requires_auth(f):\n @wraps(f)\n def wrapper(*args, **kwds):\n if 'uid' not in session:\n return login(\"You are not logged in yet. Please log in\")\n return f(*args, **kwds)\n return wrapper", "title": "" }, { "docid": "18a5f62370543658432ce13df93782a8", "score": "0.5688777", "text": "def check_user_access(**kwargs):\n su = kwargs.get(\"su\", False)\n rules = kwargs.get(\"rules\", [])\n match_all = kwargs.get(\"match_all\", False)\n read_required = kwargs.get(\"read_required\", False)\n write_required = kwargs.get(\"write_required\", False)\n cls = kwargs.get(\"cls\", Rest)\n \n # no rules applied for super user (no need to execute rules)\n if su: return\n # no read/write access required at all (no need to execute rules)\n if not read_required and not write_required: return\n \n r = RulesEngine.execute_rule(rules)\n # local user should always be returned in rule, if not then allow\n # exception to occur. Also, all rule_dn's passed to execute_rule\n # must have r/w attribute per user returned\n match_count = 0\n for dn in rules:\n access = r[dn][\"users\"][g.user.username]\n if read_required and write_required:\n if access[\"write\"] and access[\"read\"]: match_count+=1\n elif match_all: abort(403, MSG_403)\n elif read_required:\n if access[\"read\"]: match_count+=1\n elif match_all: abort(403, MSG_403)\n elif write_required:\n if access[\"write\"]: match_count+=1\n elif match_all: abort(403, MSG_403)\n\n # check if any rules were matched\n if match_count == 0:\n abort(403, MSG_403)", "title": "" }, { "docid": "cec496ee4c5734d2c662ab95f23a8ca3", "score": "0.56752", "text": "def auth_required(function):\n\n def wrap(request, *args, **kwargs):\n if request.user.is_authenticated:\n return function(request, *args, **kwargs)\n\n resp = HttpResponse()\n resp.status_code = 401\n return resp\n\n return wrap", "title": "" }, { "docid": "cddc3d21bac4d0f14c7b56f8ec6c06b8", "score": "0.56547445", "text": "def requires_auth(f):\n\n @wraps(f)\n def decorated(*args, **kwargs):\n # return f(*args, **kwargs)\n self = args[0]\n request = args[1]\n if six.PY3:\n token = request.args.get(b'token', [b''])[0].decode(\"utf8\")\n else:\n token = str(request.args.get('token', [''])[0])\n if self.config['token'] is None or token == self.config['token']:\n return f(*args, **kwargs)\n request.setResponseCode(401)\n return 'unauthorized'\n\n return decorated", "title": "" }, { "docid": "88e9310993c82d8541f71eaf94e0386d", "score": "0.5654406", "text": "def check_if_user_not_linked(provider_identifier, func, *args, **kwargs):\n\n if current_user.is_authenticated:\n if not getattr(current_user, provider_identifier):\n return func(*args, **kwargs)\n else:\n return abort(403)\n\n else:\n return abort(401)", "title": "" }, { "docid": "18b1e4ea635e0c940c27a65e22b02e97", "score": "0.56495607", "text": "def xmlauthenticated(method):\n \n def inner(*args, **kwargs):\n if args[0].current_user is None:\n try:\n raise404(args[0], \"Access forbidden\")\n except ForbiddenOperationException as e:\n pass\n return\n else:\n return method(*args, **kwargs)\n return inner", "title": "" }, { "docid": "30b52a7bc4cefa6047b66d2a13c15cfa", "score": "0.56485206", "text": "def require_login(definition: Callable) -> Callable:\n\n @functools.wraps(definition)\n def wrapper(state: State, *args, **kwargs) -> flask.Response:\n if state.user is None: # If No User Exists In State\n flask.abort(403)\n else:\n return definition(*args, **kwargs, state=state) # Continue Request\n\n return wrapper", "title": "" }, { "docid": "10ef830457f0392a45aae5804cab992e", "score": "0.5641129", "text": "def _authenticate(self, request):\n\n def anonymous_access(exc_obj):\n \"\"\" Check whether anonymous access is allowed. \"\"\"\n if not request.user and not self.allow_anonymous:\n raise exc_obj\n\n request.user = None\n if self.authentication:\n try:\n request.user = self.authentication.authenticate(request)\n except Unauthorized, exc:\n anonymous_access(exc)\n else:\n anonymous_access(Forbidden())\n return request.user", "title": "" }, { "docid": "15a65d9fceb039a4c11b252d76e5c92c", "score": "0.5638501", "text": "def authenticate(self, handler, data=None):\n userdict = yield super().authenticate(handler, data)\n denylist = os.environ.get('GITHUB_ORGANIZATION_DENYLIST')\n if denylist:\n self.log.debug(\"Denylist `%s` found.\" % denylist)\n denylist = denylist.split(',')\n denied = yield self._check_denylist(userdict, denylist)\n if denied:\n self.log.warning(\"Rejecting user: denylisted\")\n userdict = None\n return userdict", "title": "" }, { "docid": "261fbaa3390dd85814fc5e37f08f0ea8", "score": "0.5632094", "text": "def privilege_required(view_func):\n def _decorated_view(request, machine_name, **kwargs):\n if _is_privileged_on(request, machine_name):\n return view_func(request, machine_name, **kwargs)\n else:\n raise PermissionDenied\n return wraps(view_func)(_decorated_view)", "title": "" }, { "docid": "b972b8da2ace9151179b39c4f62a5da2", "score": "0.5623965", "text": "def _get_auth_safe_user(\n self,\n request: 'TracimRequest',\n ) -> User:\n app_config = request.registry.settings['CFG']\n uapi = UserApi(None, session=request.dbsession, config=app_config)\n login = ''\n try:\n login = request.authenticated_userid\n if not login:\n raise UserNotFoundInTracimRequest('You request a current user but the context not permit to found one') # nopep8\n user = uapi.get_one(login)\n if not user.is_active:\n raise UserAuthenticatedIsNotActive('User {} is not active'.format(login))\n except (UserDoesNotExist, UserNotFoundInTracimRequest) as exc:\n raise NotAuthenticated('User {} not found'.format(login)) from exc\n return user", "title": "" }, { "docid": "ff9301ed2477d78fdf8f2ef4fa15cd12", "score": "0.5601441", "text": "def http_auth(func):\n @wraps(func)\n def inner(request, *args, **kwargs):\n result = __http_auth_helper(request)\n if result is not None:\n return result\n return func(request, *args, **kwargs)\n return inner", "title": "" }, { "docid": "59eef6ab037351aca6df5df49acd2841", "score": "0.5600946", "text": "def get_authenticated_user(self, auth_func, realm):\n try:\n auth = self.request.headers.get('Authorization')\n \n if auth == None: return self._request_auth(realm)\n if not auth.startswith('Basic '): return self._request_auth(realm)\n \n auth_decoded = base64.decodestring(auth[6:])\n username, password = auth_decoded.split(':', 1)\n \n if auth_func(self, realm, username, password):\n self._current_user = username\n return True\n else:\n return self._request_auth(realm)\n except Exception, e:\n return self._request_auth(realm)", "title": "" }, { "docid": "4b77562199f407806fecbafe4cc83728", "score": "0.5593828", "text": "def _special_Trusted(self, request, name, dowhat, rightsdict):\n if (request.user.name == name and\n request.user.auth_method in request.cfg.auth_methods_trusted):\n return rightsdict.get(dowhat)\n return None", "title": "" }, { "docid": "2540dedef3b39496ac968ac4743b422a", "score": "0.55937165", "text": "def _validate_authorization(context, action):\n #We want to use the more granular version, but can't until it exists\n AUTHORIZE(context, action=action)\n return context.elevated()", "title": "" }, { "docid": "fb810051923c44c7726a16aba5291ea6", "score": "0.55935144", "text": "def validate(*args, **kwargs):\n\n # Get the headers from the self arg\n headers = args[0].request_state.headers\n token = headers.get('Token')\n if headers.get('User-Id') is None:\n raise endpoints.UnauthorizedException(MESSAGE_UNAUTHORIZED)\n\n user_id = int(headers.get('User-Id'))\n user, timestamp = User.get_by_auth_token(user_id, token)\n if user:\n return endpoints_method(*args, **kwargs)\n else:\n logging.warn('Invalid API request! User ID:%s, token:%s'\n %(user_id, token))\n raise endpoints.UnauthorizedException(MESSAGE_UNAUTHORIZED)", "title": "" }, { "docid": "b98589612c4b1750897b75db9f005c99", "score": "0.55931556", "text": "def permissions_required(app_label, login_url=None, raise_exception=False):\n def check_perms(user):\n # First check if the user has the permission (even anon users)\n if user.has_module_perms(app_label):\n return True\n # In case the 403 handler should be called raise the exception\n if raise_exception:\n user.permission_error = app_label\n raise PermissionDenied\n # As the last resort, show the login form\n return False\n\n return user_passes_test(check_perms, login_url=login_url)", "title": "" }, { "docid": "600144fd3f5625fd30047464f024f2c4", "score": "0.5591398", "text": "def login_required(f):\n def decorator(*args, **kwargs):\n if not current_user.is_authenticated:\n abort(401)\n return f(*args, **kwargs)\n return decorator", "title": "" }, { "docid": "c074381197bc17c810a71bae0dab05a0", "score": "0.5590082", "text": "def authenticate(self, **kwargs):\n authenticated = False\n if 'password' and 'username' in kwargs:\n try:\n user = TaskerUser.objects.get_by_natural_key(\n kwargs.get('username'))\n authenticated = user.check_password(kwargs.get('password'))\n except TaskerUser.DoesNotExist:\n pass\n elif 'google_user' in kwargs:\n google_user = kwargs.get('google_user')\n email = google_user['email']\n try:\n user = TaskerUser.objects.get_by_natural_key(email)\n authenticated = True\n except TaskerUser.DoesNotExist:\n email = google_user['email']\n try:\n user = TaskerUser.objects.get_by_natural_key(email)\n authenticated = True\n except TaskerUser.DoesNotExist:\n pass\n if authenticated:\n return user\n return None", "title": "" }, { "docid": "4c61841bfa3da2370fd955ada28736ea", "score": "0.5589802", "text": "def _check_basic_auth (self, user, password):\n if self._is_basic_auth_enabled is False: return True\n\n try:\n return self.basic_auth_function(self, user, password)\n except TypeError:\n return self.basic_auth_function(user, password)", "title": "" }, { "docid": "96b0b326964489b2b46f65677b838a6a", "score": "0.5589744", "text": "def requires_auth(f):\n @functools.wraps(f)\n def decorated(*args, **kwargs):\n\n potential_user_id = BaseController().is_logged_in()\n \n if not potential_user_id:\n return redirect(\"/login\", code=302)\n\n request.user_id = potential_user_id\n return f(*args, **kwargs)\n\n return decorated", "title": "" }, { "docid": "1897ba7745e99f68995f290c0f6693f4", "score": "0.55829257", "text": "async def __call__(self, request: Request):\n credentials: HTTPAuthorizationCredentials = await super().__call__(request)\n credentials = credentials.credentials\n if not credentials:\n raise HTTPException(status_code=403, detail=AuthState.NO_TOKEN)\n\n try:\n token_data = jwt.decode(credentials, Server.JWT_SECRET)\n except JWTError:\n raise HTTPException(status_code=403, detail=AuthState.INVALID_TOKEN.value)\n\n user_id, token_salt = token_data[\"id\"], token_data[\"salt\"]\n\n user_state = await request.state.db_conn.fetchrow(\n \"SELECT is_banned, is_mod, key_salt FROM users WHERE user_id = $1\",\n int(user_id)\n )\n\n # Handle bad scenarios\n if user_state is None or user_state[\"key_salt\"] != token_salt:\n raise HTTPException(status_code=403, detail=AuthState.INVALID_TOKEN.value)\n elif user_state[\"is_banned\"]:\n raise HTTPException(status_code=403, detail=AuthState.BANNED.value)\n elif self.is_mod_endpoint and not user_state[\"is_mod\"]:\n raise HTTPException(status_code=403, detail=AuthState.NEEDS_MODERATOR.value)\n\n request.state.user_id = int(user_id)\n return credentials", "title": "" }, { "docid": "4d33e68e2049b70e9c1b250ff0db18e5", "score": "0.55694336", "text": "def check_user_permissions(self, user, method, status, url, *args):\n if (user):\n self.client.login(username=user, password='password')\n\n if (method == 'get'):\n response = self.client.get(url, format='json')\n elif (method == 'post'):\n response = self.client.post(url, args[0], format='json')\n elif (method == 'put'):\n response = self.client.put(url, args[0], format='json')\n elif (method == 'delete'):\n response = self.client.delete(url, format='json')\n\n self.assertEqual(response.status_code, status)", "title": "" }, { "docid": "564bb62b641f28c3aa60d9fe2fdd8fc6", "score": "0.55652887", "text": "def admin_required(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if not users.is_current_user_admin():\n raise exceptions.MethodNotAllowed('Admin credentials required.')\n return func(*args, **kwargs)\n return decorated_view", "title": "" }, { "docid": "d50f72a47505787e139ef4899e2ad4d7", "score": "0.5563556", "text": "def user_required(handler):\n\n def check_login(self, *args, **kwargs):\n \"\"\"\n If handler has no login_url specified invoke a 403 error\n \"\"\"\n if self.request.query_string != '':\n query_string = '?' + self.request.query_string\n else:\n query_string = ''\n\n continue_url = self.request.path_url + query_string\n login_url = self.uri_for('login', **{'continue': continue_url})\n\n try:\n user = self.auth.get_user_by_session()\n if not user:\n try:\n self.redirect(login_url, abort=True)\n except (AttributeError, KeyError), e:\n self.abort(403)\n except AttributeError, e:\n # avoid AttributeError when the session was delete from the server\n logging.error(e)\n self.user.unset_session()\n self.redirect(login_url)\n\n return handler(self, *args, **kwargs)\n return check_login", "title": "" }, { "docid": "3786d87c21418424b921cc176ef2ba45", "score": "0.5563208", "text": "def administrator(method):\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n if not self.current_user:\n if self.request.method == \"GET\":\n self.redirect(self.get_login_url())\n return\n raise tornado.web.HTTPError(403)\n elif not self.current_user.administrator:\n if self.request.method == \"GET\":\n self.redirect(\"/\")\n return\n raise tornado.web.HTTPError(403)\n else:\n return method(self, *args, **kwargs)\n return wrapper", "title": "" }, { "docid": "d7ddbdb3b819025e3fbf8a291025f20c", "score": "0.5543244", "text": "def superuser_permission(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(request, *args, **kwargs):\n if request.user.is_superuser:\n return view_func(request, *args, **kwargs)\n return HttpResponseForbidden(\n json.dumps({'message': 'No permission to service'}),\n content_type=\"application/json\"\n )\n return login_required(_wrapped_view)", "title": "" }, { "docid": "949aac04f4b355d4c02237b73cfc6e50", "score": "0.5542497", "text": "def authorization(func):\n def wrapper(*args, **kwargs):\n if 'Authorization' not in args[0].headers:\n response = error_handler(error_status=401, message=f'Security token is missing!')\n else:\n response = func(*args, **kwargs)\n return response\n return wrapper", "title": "" }, { "docid": "806cc471ae62c3f73229680d3259043c", "score": "0.55357605", "text": "def auth_token_required(fn):\r\n\r\n @wraps(fn)\r\n def decorated(*args, **kwargs):\r\n if _check_token():\r\n return fn(*args, **kwargs)\r\n return _get_unauthorized_response()\r\n return decorated", "title": "" }, { "docid": "1ae8c2cb6c4210f778d60c0f952834b5", "score": "0.5535683", "text": "def wrapper(*args, **kwargs):\n if not _is_admin():\n abort(403, message='Only admins can do this')\n\n return fn(*args, **kwargs)", "title": "" }, { "docid": "837034616a13848ea50ec9be3d73fc67", "score": "0.5534459", "text": "def has_access(func):\n @wraps(func)\n def _wrapper(bot, update, conf, *args, **kwargs):\n try:\n user = update.message.from_user\n id = int(user['id'])\n if id in conf[\"white_list\"]:\n return func(bot, update, conf=conf, *args, **kwargs)\n update.message.reply_text(\"You don't have access to me yet.\")\n except Exception as e:\n update.message.reply_text(\"Error occur in has_access: \" + str(e))\n return ConversationHandler.END\n return _wrapper", "title": "" }, { "docid": "55456d9ee8d1b4da3a2844abf7dc211a", "score": "0.5519779", "text": "def authorizeRequireWarp(fn):\n @wraps(fn)\n def wrapper(self, session, *args, **kwargs):\n try:\n if SessionManager.Check(session) is True:\n return fn(self, session, *args, **kwargs)\n else:\n return False, CController.Unauthorized(session)\n except Exception as e:\n print \"Exception in COrgan: %s\" % str(e)\n return False, e\n return wrapper", "title": "" }, { "docid": "5de27c3687c0909d2d8c9187af9e0b51", "score": "0.5511475", "text": "def isOwner():\n def wrapper(func):\n @wraps(func)\n def decorator(*args, **kwargs):\n current_user = User.query.get(get_jwt_identity())\n if current_user:\n if current_user.id == int(kwargs['id']):\n return func(*args, **kwargs)\n return abort(401)\n return abort(401)\n return decorator\n return wrapper", "title": "" }, { "docid": "cb9ff2a8a781203479484fcd63d5120f", "score": "0.5503048", "text": "def RequireAdministrator(func):\n\t@wraps(func)\n\tdef f(request, *args, **kwargs):\n\t\tif 'Administrator' not in request.session['MHL_Users']:\n\t\t\treturn err403(request)\n\n\t\treturn func(request, *args, **kwargs)\n\n\treturn f", "title": "" }, { "docid": "a14ce9ca125173a4e8a48fc4efe09e0b", "score": "0.55030423", "text": "def superuser_required(func):\n @wraps(func)\n def wrapper(request, *args, **kwargs):\n user = getCurrentUser(request)\n if not user.getUserpermission('superuser'):\n return util.ctrl.infoMsg(_(\"您不具有{}权限\").format(_(\"超级管理员\")))\n return func(request, *args, **kwargs)\n return wrapper", "title": "" }, { "docid": "e5e0da68fc98969c0945e1dde6e89fb9", "score": "0.55025524", "text": "def _check_credentials(\n self, user: Optional[TUser], handle: str, password: str\n ) -> TUser:\n if not user or not user.check_password(password):\n raise AuthenticationFailure(\n self._strings.wrong_email\n if \"@\" in handle\n else self._strings.wrong_username\n )\n\n if self._require_activation and not user.is_activated:\n raise AuthenticationFailure(self._strings.inactive_account)\n return user", "title": "" }, { "docid": "317a7efee883d520f7770631e97b152a", "score": "0.5497476", "text": "def require_login(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if _has_logged_in():\n return func(*args, **kwargs)\n else:\n # Forbidden, login invalid or non existant.\n # TODO: Redirect to login page.\n abort(403)\n return wrapper", "title": "" }, { "docid": "645ad852b7e1118d13065488a0df3ae6", "score": "0.5497418", "text": "def basic_http_auth(func):\n\n @wraps(func)\n def _inner(request, *args, **kwargs):\n result = helper_auth_helper(request)\n if result is not None:\n return result\n return func(request, *args, **kwargs)\n\n return _inner", "title": "" } ]
814d29215dd48fa3003c1a2a39a7b4ff
Looks up the UID of the resource with the given name.
[ { "docid": "7ccbfb8989ccbd49b774db442cfa0174", "score": "0.8512743", "text": "def resourceUIDForName(self, name):\n uid = self._db_value_for_sql(\"select UID from RESOURCE where NAME = :1\", name)\n\n return uid", "title": "" } ]
[ { "docid": "22c43a1d43961de77cb6605b3291cf35", "score": "0.6511902", "text": "def resource_uid(self) -> Optional[str]:\n return pulumi.get(self, \"resource_uid\")", "title": "" }, { "docid": "bd16aa5bb834eff63cf6559219c3e5ea", "score": "0.65088683", "text": "def _get_uid(self, name):\n if name.startswith(\"http\"):\n name = self._get_path_from_url(name)\n\n if not name.startswith(self.storage_dir):\n return name\n\n uid = path.dirname(name)\n uid = path.relpath(uid, start=self.storage_dir)\n return uid", "title": "" }, { "docid": "7e4700dcd3c51083415bfb4a93ce47d8", "score": "0.63126373", "text": "def get_by_name(self, name):\n if not name:\n raise TypeError('name must be a non empty string.')\n\n uid = self._name_uid_map.get(name)\n return self.get(uid)", "title": "" }, { "docid": "80c5d982b67a87365519f6edebfb79f2", "score": "0.62900656", "text": "def resourceExists(self, name):\n uid = self._db_value_for_sql(\"select UID from RESOURCE where NAME = :1\", name)\n return uid is not None", "title": "" }, { "docid": "808824b59148320eeb5b431fcdf0e707", "score": "0.628813", "text": "def _lookupUid(self, short_uid):", "title": "" }, { "docid": "af59f42b118ebc6d0a74e3fad386d1e1", "score": "0.6165063", "text": "def resourceNameForUID(self, uid):\n result = None\n\n for name in self.resourceNamesForUID(uid):\n assert result is None, \"More than one resource with UID %s in address book collection %r\" % (uid, self)\n result = name\n\n return result", "title": "" }, { "docid": "2395f4950f18a3656a8e4801cdb9e8ff", "score": "0.6115621", "text": "def get_uid(self):\n\t\tresult, data = self.connection.uid('search', None, \"ALL\")\n\t\tif result == \"OK\":\n\t\t\tself.uid = (data[0]).split()\n\t\t\tprint(\"[ i ] UIDs fetched.\")\n\t\telse:\n\t\t\tprint(\"[ e ] Searching for UIDs failed.\")", "title": "" }, { "docid": "d4a86f8782fb0839b53449419106f330", "score": "0.60574347", "text": "def _nameToUID(self, name):\n return name.rsplit(\".\", 1)[0]", "title": "" }, { "docid": "b9cd2b58e15eaa1c42d1485869549c3b", "score": "0.59734124", "text": "def _match_name(self, uids: Iterable) -> Optional[str]:\n for uid in uids:\n if parseaddr(uid)[0] == self.provided_name:\n return uid", "title": "" }, { "docid": "0ef31118ddd85bead69e7a65b6642a85", "score": "0.59275097", "text": "def get_resource_id(self, resource, name):\n response = self._request.get(resource, params={'links': 'false', 'query': name})\n if not response[resource]:\n return None\n if len(response[resource]) > 1:\n raise Dhis2ApiQueryError('Query returned multiple results')\n return response[resource][0]['id']", "title": "" }, { "docid": "e4bac146f5b50fea845eec6a70249ec7", "score": "0.58771193", "text": "def get_uid():\n with uid_lock:\n out = next(uid)\n return out", "title": "" }, { "docid": "3b928f9c1708f4e1da0e182658a471f2", "score": "0.5778452", "text": "def uid(self) -> Optional[str]:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "3b928f9c1708f4e1da0e182658a471f2", "score": "0.5778452", "text": "def uid(self) -> Optional[str]:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "3b928f9c1708f4e1da0e182658a471f2", "score": "0.5778452", "text": "def uid(self) -> Optional[str]:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "3b928f9c1708f4e1da0e182658a471f2", "score": "0.5778452", "text": "def uid(self) -> Optional[str]:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "de3ef9111a5a0f839647ce44c12b9d10", "score": "0.572037", "text": "def resolve_uid(uid):\n site = get_site()\n if site is None:\n return\n catalog = getToolByName(site, 'portal_catalog', None)\n if catalog is None:\n return\n result = catalog.unrestrictedSearchResults(UID=uid)\n if len(result) != 1:\n return\n return result[0]._unrestrictedGetObject()", "title": "" }, { "docid": "a2024e61892b6a4ee115c2772592169f", "score": "0.57159597", "text": "def get_uid(self, idx):\n try:\n return self.__uid_cache[idx]\n except KeyError:\n return self[idx].uid", "title": "" }, { "docid": "daaa143455b31a5d30e2194122f15b4f", "score": "0.5692868", "text": "def get_or_generate_uid(file_name: str, generator: callable, validator: [callable, None]):\n full_path = os.path.join(get_uid_path(), file_name)\n uid = None\n if os.path.exists(full_path):\n with open(full_path, 'r') as file:\n uid = file.readline().strip()\n\n if uid is not None and (validator is not None and not validator(uid)):\n uid = None\n\n if uid is None:\n uid = generator()\n save_uid_to_file(full_path, uid)\n return uid", "title": "" }, { "docid": "caf32cf3c075b5b075abdd43fcd9f3c9", "score": "0.5673893", "text": "def resource_id(self):\n\t\treturn self.name", "title": "" }, { "docid": "4a14dbf3aa274bbe68bed063d14088ad", "score": "0.5669674", "text": "def name(self) -> str:\n uid_string = str(self)\n if uid_string in UID_dictionary:\n return UID_dictionary[self][0]\n\n return uid_string", "title": "" }, { "docid": "d92e92b43ce90c20d1b7a5d6a3239c4c", "score": "0.5653038", "text": "def getUID(self):\n uid = \"%s_%s_%s_%s\" % (self.res_sid, self.resid, self.chain_id, self.model_id)\n return uid", "title": "" }, { "docid": "04f2b4888f7478884ecd2c88087931d1", "score": "0.5642538", "text": "def uid(self) -> str:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "d2dcd55f000a5d6c3fa75f1b8fc78155", "score": "0.56291854", "text": "def uid(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "9d31f9273d459d1ae3c91c0b7a1de10b", "score": "0.5614781", "text": "def lookup_name(name):\n resp = rx.get('/rxcui.json', params=dict(name=name))\n resp.raise_for_status()\n\n data = resp.json()\n return extract_cui(data)", "title": "" }, { "docid": "51edd26527de2a839f754577e05629c5", "score": "0.56011206", "text": "def get_id_for_name(self, name):\n client = MongoClient(MONGO_HOST, 27017)\n coll = client[MONGO_DBNAME]['signature']\n resp = coll.find({\"name\": name}).next()\n oid = str(resp.get(\"_id\"))\n return oid", "title": "" }, { "docid": "331fca09de3fdd0d89c97faa6048d8a0", "score": "0.5601014", "text": "def __format_id_from_name(self, name):\n\n query = Query(\"format\", \"id\")\n query.where(query.Condition(\"name\", name))\n rows = query.execute(self.cursor).fetchall()\n return rows[0].id if rows else None", "title": "" }, { "docid": "e60bf14ba074ef633f1aac4d361416ec", "score": "0.5573876", "text": "def uid(self) -> int:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "e60bf14ba074ef633f1aac4d361416ec", "score": "0.5573876", "text": "def uid(self) -> int:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "9ed7faeddb6ba6abb3162e0e1626d0bb", "score": "0.55551934", "text": "def uid(self):\n\n return self.metadata[0]", "title": "" }, { "docid": "29bad0aa5d80ced3831a1e59cd142cdf", "score": "0.5533407", "text": "def avatar_for_name(name=None):\n avatars = avatars_by_name()\n if name in avatars:\n return avatars[name]\n \n return all_avatars()[0]", "title": "" }, { "docid": "ae10603100564df7b731f87e9d07e1c7", "score": "0.55214924", "text": "def _fetch_register(self, name: str) -> int:\n with open(os.path.join(self.statedir, name), \"r\") as f:\n return int(f.read())", "title": "" }, { "docid": "f4217f7e735ad2811fe3a3302d79842b", "score": "0.55143017", "text": "def GetUserid(self):\n if self.scmRef == None or self.ref == None or \\\n self.ver == None or self.type == None:\n raise KeyError, \"getuid: Params can't be empty\"\n\n if self.debug:\n print \"GetUserID(%s, %s, %s)\" % \\\n (self.scmRef, self.ver, self.type)\n\n try:\n userid = \\\n self.db[self.scmRef][self.ref][self.ver][self.type]['misc'][0]\n except ( IndexError, KeyError):\n userid = self.userid\n except:\n raise\n\n if self.debug:\n print \"GetUserid: %s\" % userid\n\n return string.strip(userid)", "title": "" }, { "docid": "ba79ec5e9649c41de81a2b8072874008", "score": "0.5513815", "text": "def uid(self):\n self._uid = getattr(self, \"_uid\", unicode(uuid.uuid4().hex))\n return self._uid", "title": "" }, { "docid": "50391a2ae318443df3b32fb11647d064", "score": "0.5499823", "text": "def find_resourceid_by_name_or_id(resource_client, name_or_id):\n try:\n # Try to return an uuid\n return str(uuid.UUID(name_or_id))\n except ValueError:\n # Not an uuid => assume it is resource name\n pass\n\n resources = resource_client.list()\n candidate_ids = [r['id'] for r in resources if r.get('name') == name_or_id]\n if not candidate_ids:\n raise exceptions.ResourceNotFound(\n 'Could not find resource with name \"%s\"' % name_or_id)\n elif len(candidate_ids) > 1:\n str_ids = ','.join(candidate_ids)\n raise exceptions.NoUniqueMatch(\n 'Multiple resources with name \"%s\": %s' % (name_or_id, str_ids))\n return candidate_ids[0]", "title": "" }, { "docid": "14b8f2669ccf939f8183bbf2e5312b31", "score": "0.54843956", "text": "def get_res_by_name(self, name):\n for res in self.resources:\n if res.name == name:\n return res\n return None", "title": "" }, { "docid": "07815e8e739e92c2cf1d4e2e2f7102a0", "score": "0.54808587", "text": "def from_name(self, name):\n if name not in self._lookup:\n raise ValueError('Unkown name {} in registry'.format(name))\n return self._lookup[name]", "title": "" }, { "docid": "5e02a5b4aca376971561e574eafa86a2", "score": "0.5473876", "text": "def getUserId(request, searchName):\n if not searchName:\n return None\n cfg = request.cfg\n try:\n _name2id = cfg._name2id\n except AttributeError:\n arena = 'user'\n key = 'name2id'\n cache = caching.CacheEntry(request, arena, key)\n try:\n _name2id = pickle.loads(cache.content())\n except (pickle.UnpicklingError, IOError, EOFError, ValueError):\n _name2id = {}\n cfg._name2id = _name2id\n id = _name2id.get(searchName, None)\n if id is None:\n for userid in getUserList(request):\n name = User(request, id=userid).name\n _name2id[name] = userid\n cfg._name2id = _name2id\n arena = 'user'\n key = 'name2id'\n cache = caching.CacheEntry(request, arena, key)\n cache.update(pickle.dumps(_name2id, PICKLE_PROTOCOL))\n id = _name2id.get(searchName, None)\n return id", "title": "" }, { "docid": "ec5aa89179643410573bb78d4ec6a452", "score": "0.54694355", "text": "def uid(self):\n return self.__dict__.get('id')", "title": "" }, { "docid": "0eae1f89b7ff4e70d19c80ce5df7b991", "score": "0.5454802", "text": "def get_image_id(self, name):\n if name.startswith(\"sha256:\"):\n return name[7:]\n image = self.get_image(name)\n if not image:\n return\n return image[\"id\"]", "title": "" }, { "docid": "4bc789a080c9e6385ee0434f6cf443e6", "score": "0.5448324", "text": "def getUID(self):\n return self.uid", "title": "" }, { "docid": "f966a42b639f5d8bc951adb2f4d144dd", "score": "0.54480475", "text": "def get_uid(prefix=''):\n _uid_prefixes[prefix] += 1\n return _uid_prefixes[prefix]", "title": "" }, { "docid": "b0bc584418fa16f9036bdb92151db333", "score": "0.5398736", "text": "def get_sensor_id_by_name(self, name):\n sensors = self.get_sensor()\n for sensor_id in sensors:\n if PY3K:\n if name == sensors[sensor_id][\"name\"]:\n return sensor_id\n else:\n if name.decode(\"utf-8\") == sensors[sensor_id][\"name\"]:\n return sensor_id\n return False", "title": "" }, { "docid": "33dcf5fccfdbdc4b31e647f3e928e439", "score": "0.5386785", "text": "def name(self) -> str:\n return parseaddr(self.uid)[0]", "title": "" }, { "docid": "b536a2b0f59e2c2159c8bc327c32087d", "score": "0.5367091", "text": "def get_ref_by_name(self, name_str):\n\t\tuser_dict = self.get_user_dict_by_name(name_str)\n\t\tif user_dict:\n\t\t\treturn user_dict['ref']\n\t\telse:\n\t\t\treturn None", "title": "" }, { "docid": "9873c8b2e054f1389e4822f2650c5561", "score": "0.5353032", "text": "def uid(self) -> str:\n return typing.cast(\n str,\n self._properties.get(\"uid\"),\n )", "title": "" }, { "docid": "9873c8b2e054f1389e4822f2650c5561", "score": "0.5353032", "text": "def uid(self) -> str:\n return typing.cast(\n str,\n self._properties.get(\"uid\"),\n )", "title": "" }, { "docid": "9873c8b2e054f1389e4822f2650c5561", "score": "0.5353032", "text": "def uid(self) -> str:\n return typing.cast(\n str,\n self._properties.get(\"uid\"),\n )", "title": "" }, { "docid": "9c3c3bbbb52adc21041ba1bfebd8abac", "score": "0.53486484", "text": "def uid(self) -> Optional[str]:\n return self.__uid", "title": "" }, { "docid": "11da66e6ab081c7cdb521677905c782d", "score": "0.5337882", "text": "def getUser(name):", "title": "" }, { "docid": "43602a2b149932d47487094947ddb482", "score": "0.5333861", "text": "def id_from_name(uname_raw: str):\n uname_low: str = uname_raw.lower()\n response = requests.get(\n \"https://api.mojang.com/users/profiles/minecraft/{}\".format(uname_low)\n )\n log.f(\"WLME_RESP\", str(response))\n if response.status_code == 200:\n return {\"code\": response.status_code, \"udat\": response.json()}\n else:\n return {\"code\": response.status_code}", "title": "" }, { "docid": "0a906cc5d219648489ec8b446d34d18f", "score": "0.5323687", "text": "def lookup(self, name):\n return self._name_to_spec.get(name, None)", "title": "" }, { "docid": "7f9cce8bd678f8abd1b56e19bee54cd5", "score": "0.53209287", "text": "def get_uid(self):\n dat = self._handle.controlRead(Panda.REQUEST_IN, 0xc3, 0, 0, 12)\n return binascii.hexlify(dat).decode()", "title": "" }, { "docid": "c85907d06bc3481af8dafa3ed35786b9", "score": "0.53176194", "text": "def getUID(self):\r\n return self.id", "title": "" }, { "docid": "37d9a2bdffc1223298670875ca5f43ec", "score": "0.5313008", "text": "def fetchName(self, name, pwonly, decrypt, fulldecrypt):\n\t\tret = self.find(name, nameOnly=True)\n\t\tif not ret:\n\t\t\treturn\n\t\tif len(ret) > 1:\n\t\t\tlog.error(\"found more than 1 record, only returning the first.\")\n\t\tuuid = ret[0]['uuid']\n\t\treturn self.fetchUUID(uuid, pwonly, decrypt, fulldecrypt)", "title": "" }, { "docid": "96282b58f8328b10f1442d16d8752fea", "score": "0.5308342", "text": "def get_resource(name, decode=True):\n # get the resource using relative names\n with open(os.path.join(_pwd, name), 'rb') as f:\n resource = f.read()\n\n # make sure we return it as a string if asked\n if decode and hasattr(resource, 'decode'):\n return resource.decode('utf-8')\n\n return resource", "title": "" }, { "docid": "0d2de678dd016aca72d7adeaaa79f0d2", "score": "0.53070873", "text": "def uid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "0d2de678dd016aca72d7adeaaa79f0d2", "score": "0.53070873", "text": "def uid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "0d2de678dd016aca72d7adeaaa79f0d2", "score": "0.53070873", "text": "def uid(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uid\")", "title": "" }, { "docid": "89e23ddf58072ae5da604b0fbcd450a0", "score": "0.5299741", "text": "def _findId(self, name):\n isinstance(name, str)\n\n for key_id in list(self.filenames.keys()):\n # data with title\n if self.filenames[key_id].name:\n input = self.filenames[key_id].name\n # data without title\n else:\n input = str(key_id)\n if name in input:\n return key_id", "title": "" }, { "docid": "cce1863b16d563a09cb86014456b4482", "score": "0.5275912", "text": "def findUser(self, name):\n name = name.lower()\n ul = self._getUserlist()\n udi = dict(zip([u.name for u in ul], ul))\n cname = None\n for n in udi.keys():\n if name in n:\n if cname: return None #ambiguous!!\n cname = n\n if cname: return udi[cname]\n else: return None", "title": "" }, { "docid": "6c09866794796dd8d40defa98b5e640c", "score": "0.52732617", "text": "def _get_uid_attr(self, ldap_userfolder):\n uid_attr = ldap_userfolder._uid_attr\n schema_dicts = ldap_userfolder.getSchemaDict()\n for schema_map in schema_dicts:\n if uid_attr == schema_map['ldap_name']:\n return schema_map['public_name']\n return uid_attr", "title": "" }, { "docid": "c700595a317f90df1f1246d734ca25da", "score": "0.52654034", "text": "def getuid(): # type: ignore\n return 0", "title": "" }, { "docid": "4c4d3ac3c0e90699737789302ccf5cb7", "score": "0.5258746", "text": "def name_to_id(self, obj_name):\n comp_map = self.get_comp_map()\n\n res = self.redis.hget(comp_map, obj_name)\n\n if res is not None:\n return res\n\n redir_map = self.get_redir_map()\n if redir_map:\n return self.redis.hget(redir_map, obj_name)", "title": "" }, { "docid": "440059df1cccfbad3baaa78ea75ce362", "score": "0.5256874", "text": "def get_node_by_name(self, name):\n for res in self.resources:\n for node in res.nodes:\n if node.name == name:\n return node\n return None", "title": "" }, { "docid": "90f4210057524d28a925310179ea3687", "score": "0.52474374", "text": "def uid(self):\n return self._uid", "title": "" }, { "docid": "90f4210057524d28a925310179ea3687", "score": "0.52474374", "text": "def uid(self):\n return self._uid", "title": "" }, { "docid": "90f4210057524d28a925310179ea3687", "score": "0.52474374", "text": "def uid(self):\n return self._uid", "title": "" }, { "docid": "90f4210057524d28a925310179ea3687", "score": "0.52474374", "text": "def uid(self):\n return self._uid", "title": "" }, { "docid": "90f4210057524d28a925310179ea3687", "score": "0.52474374", "text": "def uid(self):\n return self._uid", "title": "" }, { "docid": "90f4210057524d28a925310179ea3687", "score": "0.52474374", "text": "def uid(self):\n return self._uid", "title": "" }, { "docid": "cf25c51e21a3b8cd752c4e42745148cd", "score": "0.523516", "text": "def get_resource_from_uri(self, uri: str):\n pat = self.oidch.get_new_pat()\n resource_reg_endpoint = self.wkh.get(TYPE_UMA_V2, KEY_UMA_V2_RESOURCE_REGISTRATION_ENDPOINT)\n #r=self.mongo.get_id_from_uri(icon_uri)\n #if not r: return False\n resources = self.get_resources()\n for resource_id in resources:\n data = resource.read(pat, resource_reg_endpoint, resource_id, self.verify)\n print(data)\n if \"icon_uri\" in data and data[\"icon_uri\"] in uri:\n return data[\"_id\"]\n return None", "title": "" }, { "docid": "0de1f12c168b0df92bdc96a3805d7aa9", "score": "0.5232988", "text": "def _resolve_id(self, id):\n return self._lookupUid(id) if len(id) < 10 else id", "title": "" }, { "docid": "56e6ab3b00c42aaf84fcb712352c8e00", "score": "0.52116", "text": "def _match_uid(self, uids: Iterable) -> Optional[str]:\n return self.provided_id if self.provided_id in uids else None", "title": "" }, { "docid": "ad08cffdc09dd4ac42e1e712db3a76ec", "score": "0.5196318", "text": "def _find_resource_by_id_or_name(id_or_name, resource_list,\n resource_type_name):\n search_result_by_id = [res for res in resource_list\n if res.id == id_or_name]\n if search_result_by_id:\n return search_result_by_id[0]\n else:\n # try to find resource by name\n search_result_by_name = [res for res in resource_list\n if res.name == id_or_name]\n match_count = len(search_result_by_name)\n if match_count > 1:\n message = (\"Ambiguous {0} name '{1}'. Found more than one \"\n \"{0} for this name in Manila.\"\n ).format(resource_type_name, id_or_name)\n raise exceptions.NoUniqueMatch(message)\n elif match_count == 1:\n return search_result_by_name[0]\n else:\n raise heat_exception.EntityNotFound(entity=resource_type_name,\n name=id_or_name)", "title": "" }, { "docid": "308d626c79052e54aa57c2d63e981684", "score": "0.5190388", "text": "def _lookup_nsid(self, username):\n find_resp = self.api.people.findByUsername(username=username, format=\"parsed-json\")\n nsid = None\n if find_resp[\"stat\"] == \"ok\":\n nsid = find_resp[\"user\"][\"nsid\"]\n log.debug(\"Looking up username %s returned %s\", username, nsid)\n return nsid", "title": "" }, { "docid": "012cde7994eabdfc45701399fa874ef6", "score": "0.5184868", "text": "def uid(self):\n if not self._uid:\n self._uid = uuid4()\n return self._uid", "title": "" }, { "docid": "d5797af7e151ec1d307086df70936dd0", "score": "0.51786214", "text": "def uid(self):\n return self._id", "title": "" }, { "docid": "2f78b1570d0443131a6ec2dd619ce6fa", "score": "0.515076", "text": "def owner_lookup(self, name, rtype='ANY'):\n return self._request(FROM_OWNER_URL.format(value=name, rtype=rtype))", "title": "" }, { "docid": "993fa9c5ec513baf8e56d738f4641aac", "score": "0.5140477", "text": "def uid(self) -> str:\n return self.signing_key[\"uid\"]", "title": "" }, { "docid": "041d61ae1251659360af10c6c9cd8b1b", "score": "0.513205", "text": "def get_unit_guid(self, name):\n unit = next((unit for unit in self.units() if unit['name'].find(name) >= 0), None)\n return None if unit is None else unit['unitGuid']", "title": "" }, { "docid": "e2a9a30ef7fa54aeb7d4669673f9c8d6", "score": "0.51276284", "text": "def get_resource_string(manager, resource_name):", "title": "" }, { "docid": "e2a9a30ef7fa54aeb7d4669673f9c8d6", "score": "0.51276284", "text": "def get_resource_string(manager, resource_name):", "title": "" }, { "docid": "38a24d8785c0eaa37573099980c9e9a8", "score": "0.5092694", "text": "def _uuid_find(context, host, name_label):\n for i in objects.InstanceList.get_by_host(context, host):\n if i.name == name_label:\n return i.uuid\n return None", "title": "" }, { "docid": "fff2651be6cc6bd45e8f8bdf5074f32e", "score": "0.50889", "text": "def get_rs_id_by_name(name, response_sets):\n\n for rs in response_sets:\n if rs['name'] == name:\n return rs", "title": "" }, { "docid": "18d2caf8c5727c724773faaea6fb5ea1", "score": "0.5084684", "text": "def lookupUser(conn, uid):\n curs = conn.cursor(MySQLdb.cursors.DictCursor)\n curs.execute('''select * from user where username = %s''', [uid])\n return curs.fetchone()", "title": "" }, { "docid": "d988ae903e6488eaeaf0e117f2819dc7", "score": "0.50842035", "text": "def get_rv(self, name):\r\n if name in self._name_lookup:\r\n return self._name_lookup[name]\r\n return None", "title": "" }, { "docid": "707512f47bcf675d558e31db4b759f7b", "score": "0.50782555", "text": "def get_system_id(self, name):\n for k, v in self.nodes_iter(data=True):\n if 'name' in v and v['name'].lower() == name.lower():\n return k", "title": "" }, { "docid": "e2ee30c9f9b0c3e71a79b3ced9215dde", "score": "0.50745374", "text": "def __init__(self, name: str, uid: str):\n\n self.uid = uid\n self.name = name", "title": "" }, { "docid": "5506f6bc160101f8f5699413aa0b07ae", "score": "0.50717187", "text": "def insert_and_get_uid(self, username):\n uid = -1\n\n with sqlite.connect(dbfile) as conn:\n cur = conn.cursor()\n q1 = safe_unicode(u\"INSERT OR IGNORE INTO user VALUES (null, ?);\")\n cur.execute(q1, [username])\n conn.commit()\n\n q2 = safe_unicode(u\"SELECT uid FROM user WHERE name = ?;\")\n res = cur.execute(q2, [username])\n uid = res.fetchone()[0]\n return uid", "title": "" }, { "docid": "c89418a274b3703f8825022a3585e82d", "score": "0.50656986", "text": "def _testNameFromId(self, name):\n m = self.idpat.match(name)\n if m is None:\n return None\n\n id_ = int(m.groups()[0])\n\n self.loadIds()\n # Translate to test's real ID\n try:\n return self.ids[id_]\n except KeyError:\n return None", "title": "" }, { "docid": "a9e3d52baf0db68609800e6b102eda13", "score": "0.5058789", "text": "def get_uid(self, cached_robot):\n return cached_robot.attr[\"uid\"]", "title": "" }, { "docid": "50ce15db50cd47eaa08c32c9e1af0464", "score": "0.50444424", "text": "def get_entry_uid(self, entry_data):\n dl_url = self.get_entry_download_url(entry_data)\n if not dl_url:\n raise NotImplementedError('%s must implement get_entry_uid '\n 'method' % self.__class__.__name__)\n return md5(self.__class__.__name__ + dl_url).hexdigest()", "title": "" }, { "docid": "3a860e4c4af4e4d4f5ede7f32a72c198", "score": "0.5041299", "text": "def resource(self, name):\n if name in self.resources:\n res = self.resources[name]\n else:\n url = self.url + '/' + name.strip('/') + '/'\n res = RestResource(url)\n self.resources[name] = res\n return res", "title": "" }, { "docid": "fb9b01b5d7d2e429bc8641077e185aca", "score": "0.50398725", "text": "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "title": "" }, { "docid": "fb9b01b5d7d2e429bc8641077e185aca", "score": "0.50398725", "text": "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "title": "" }, { "docid": "fb9b01b5d7d2e429bc8641077e185aca", "score": "0.50398725", "text": "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "title": "" }, { "docid": "fb9b01b5d7d2e429bc8641077e185aca", "score": "0.50398725", "text": "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "title": "" }, { "docid": "fb9b01b5d7d2e429bc8641077e185aca", "score": "0.50398725", "text": "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "title": "" }, { "docid": "fb9b01b5d7d2e429bc8641077e185aca", "score": "0.50398725", "text": "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "title": "" }, { "docid": "fb9b01b5d7d2e429bc8641077e185aca", "score": "0.50398725", "text": "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "title": "" } ]
7fedc36119e96a11cbfecfce4483d863
Creates a view that allows ANY logged in user to submit a field trip request but checks to make sure the admin is accepting requests first.
[ { "docid": "d808caee7bdabd7b0bddaebc979f5bd0", "score": "0.66285205", "text": "def create(request):\n admin_option = AdminOption.objects.get()\n if not admin_option.window_open:\n return HttpResponse(\"New field trip requests have been disabled.\")\n\n title = \"Submit a Field Trip Request\"\n cards = (\n (\"General Information\", FORMS + 'general.html'),\n (\"Transportation\", FORMS + 'transportation.html'),\n (\"Funding\", FORMS + 'funding.html'),\n (\"Curriculum\", FORMS + 'curriculum.html'),\n )\n buttons = (\n ('submit', \"Submit\"),\n ('draft', \"Save as Draft\"),\n )\n ChaperoneFormSet = inlineformset_factory(FieldTrip, Chaperone, extra=1,\n form=ChaperoneForm)\n if request.method == 'POST':\n field_trip = FieldTrip(submitter=request.user)\n # status defaults to IN_PROGRESS, make it DRAFT for a draft\n if 'draft' in request.POST:\n field_trip.status = FieldTrip.DRAFT\n form = CreateForm(request.POST, request.FILES, instance=field_trip)\n chaperones = ChaperoneFormSet(request.POST, instance=field_trip)\n if form.is_valid() and chaperones.is_valid():\n form.save()\n chaperones.save()\n return redirect('field_trips:index')\n else:\n chaperones = ChaperoneFormSet()\n form = CreateForm()\n return render(request, 'field_trips/show_cards.html', {\n 'title': title,\n 'cards': cards,\n 'buttons': buttons,\n 'form': form,\n 'chaperones': chaperones,\n 'enctype': \"multipart/form-data\",\n 'action': reverse('field_trips:create'),\n 'admin_option': admin_option,\n })", "title": "" } ]
[ { "docid": "34a30345c20e0a3d5e61f19441df8845", "score": "0.6473721", "text": "def home():\n\n form = TripForm()\n \n if form.validate_on_submit():\n \n if validate_dates(form.start_date_time.data, form.end_date_time.data, form):\n \n return render_template('create_trip.html', form=form)\n\n if validate_number_of_people(form.number_of_people.data):\n\n form.number_of_people.errors = [\"Please enter at least 1 person\"]\n\n return render_template('create_trip.html', form=form)\n\n # check if user is logged in if not log them in as a guest user\n \n if 'user_id' not in session:\n User.log_in_as_guest()\n\n \n trip = Trip(start_date_time=form.start_date_time.data,\n end_date_time=form.end_date_time.data,\n number_of_people=form.number_of_people.data,\n name=form.name.data,\n user_id=session.get('user_id')\n )\n \n db.session.add(trip)\n db.session.commit()\n\n session['trip_id'] = trip.id\n\n return redirect(url_for('select_meals', trip_id=trip.id))\n \n return render_template('create_trip.html', form=form)", "title": "" }, { "docid": "11820e2a784e3f55cabea6763cdaae97", "score": "0.6250833", "text": "def user_trips(request):\n # extremely necessary to make the golobal vaiables\n # global display1\n # global display2\n global locations\n global matching_tlist\n tform1 = TripCreationForm1(request.user)\n tform2 = TripCreationForm2()\n tmform = TripModifyForm(request.user)\n lis = []\n for o, dis in matching_tlist:\n d = {}\n d[\"trip_name\"] = o.trip_id\n d[\"trip_date\"] = o.start_time\n d[\"trip_participants\"] = [x.username for x in o.participants.all()]\n d[\"disable\"] = dis\n d[\"trip_locations\"] = [\n x.location.location_name for x in TripPoint.objects.filter(trip=o)]\n lis.append(d)\n req_lis = Notification.objects.filter(\n user_to=request.user, notif_type=\"Trip Related\", resolved=\"No\")\n display1 = len(locations) != 0\n display2 = len(lis) != 0\n\n tlist = Trip.objects.filter(participants__in=[request.user])\n for t in tlist:\n # and (datetime.datetime.now()+datetime.timedelta(hours=5.5)-j.start_time.replace(tzinfo=utc)).days>10)\n if(t.start_time < (datetime.datetime.now()+datetime.timedelta(hours=5.5)).replace(tzinfo=utc)):\n if(t.posted):\n t.closed = True\n t.save()\n\n return render(request, 'info/trips.html', {\"tform1\": tform1, \"tform2\": tform2, \"tmform\": tmform,\n \"display1\": display1, \"display2\": display2, \"lis\": lis, \"locations\": locations,\n \"req_lis\": req_lis, \"tlist\": tlist})", "title": "" }, { "docid": "090407cebc8e21af4c863e281f240ec4", "score": "0.61548936", "text": "def trip_request_add_handler(request):\n pprint(request.POST)\n global matching_tlist\n n = len(matching_tlist)\n user = request.user\n print(n)\n index = 0\n for i in range(n):\n if(\"submit\"+str(i) in request.POST):\n index = i\n print(\"index\", index)\n trp = matching_tlist[index][0]\n description = \"User \"+user.username+\" wants to be added to the \" + \\\n trp.trip_id+\" in which you are a participant.\"\n title = \"Trip Add request\"\n # for ut in trp.participants.all():\n Notification.objects.create(user_to=trp.leader, user_from=user, title=title, description=description,\n notif_type=\"Trip Related\", creation_time=datetime.datetime.now()+datetime.timedelta(hours=5.5),\n travel_id=trp.trip_id)\n matching_jlist[index][1] = True\n return redirect(\"/user_trips/\")", "title": "" }, { "docid": "37b8dae6a16b4fde1251a4fe16cc1220", "score": "0.59939307", "text": "def post(self, request, *args, **kwargs):\n c = self.getContext(request)\n if 'choice' not in request.POST:\n c.update({\n 'error': True,\n 'message': 'No one was selected'\n })\n return render_to_response('display_message.html', c)\n tutor_name = request.POST['choice'].split('^?^')\n c.update({ 'firstname': tutor_name[0], 'lastname': tutor_name[1], 'username': tutor_name[2], 'specific_request': True })\n c.update({ \n 'times': range(1,24),\n 'days': ['Mon','Tues','Wed','Thurs','Fri','Sat','Sun']\n })\n return render_to_response('request_help.html',c)", "title": "" }, { "docid": "9c906c53aa964d8f345f8d803aa47f20", "score": "0.59727734", "text": "def create(request):", "title": "" }, { "docid": "b24f9e345d203cf3da1ebe2d1d1df526", "score": "0.58876973", "text": "def trip_creation_handler2(request):\n # function to handle the creation of new trip\n global locations\n pprint(locations)\n pprint(request.POST)\n trip_name = request.POST.get(\"trip_name\")\n trip_date = request.POST.get(\"travel_date\")\n cotravel_number = request.POST.get(\"cotravel_number\")\n source = request.POST.get(\"source\")\n duration = request.POST.get(\"duration\")\n expected_budget = request.POST.get(\"expected_budget\")\n trip_info = request.POST.get(\"trip_info\")\n\n trp = Trip(trip_id=trip_name, start_time=parser.parse(trip_date),\n source=source, cotravel_number=cotravel_number, duration=duration, expected_budget=expected_budget,\n trip_info=trip_info, leader=request.user)\n trp.save()\n trp.participants.add(request.user)\n # for each checkpoint create a new trip point with foreignkey to location and the trip objects\n for i, x in enumerate(locations):\n loc = LocationPoint.objects.get(\n location_name=x[\"location\"], user=request.user, location_type=\"Trip Point\")\n TripPoint.objects.create(location=loc, trip=trp)\n locations = []\n return redirect(\"/user_trips/\")", "title": "" }, { "docid": "9c3d3e9c22aa87484c485af03b10f769", "score": "0.5838591", "text": "def get(self, request, *args, **kwargs):\n c = self.getContext(request) \n is_tutor = False\n if tutor(c['user'].username):\n is_tutor = True\n c.update({'tutor': is_tutor})\n tutee_list = Tutee.objects.all()\n requests = Request.objects.all()\n help_requests = []\n for tutee in tutee_list:\n for req in requests:\n if tutee.user.username == req.user and req.user != c['user'].username:\n if not req.accepted_by:\n help_requests.append(req)\n \n c.update(csrf(request))\n c.update({'help_requests': help_requests})\n return render_to_response('claim_tutee.html', c)", "title": "" }, { "docid": "0dd84a9eb01d218f5047fd36da5ec442", "score": "0.5813315", "text": "def onflight(request):\n\n # Authentication check\n if not request.user.is_authenticated():\n # TODO redirect to error page\n return redirect(reverse('index'))\n\n user = request.user\n\n sia_user = get_object_or_404(SIAUser, user=user)\n return render(request, 'users/on-flight.html', {'SiaUser':sia_user})", "title": "" }, { "docid": "c8bf2c8a119f844f584cdc2729223166", "score": "0.5789913", "text": "def get(self, request, *args, **kwargs):\n c = RequestContext(request)\n c.update(csrf(request))\n if c['user'].is_superuser:\n return redirect('/promote_user/', permanant=True)\n \n is_tutor = False\n if tutor(c['user'].username):\n is_tutor = True\n\n requests = Request.objects.all()\n pending_requests = []\n my_tutors = []\n if is_tutor:\n my_tutees = []\n for req in requests:\n if req.user == c['user'].username:\n if not req.accepted_by:\n pending_requests.append(req)\n else:\n my_tutors.append((Tutee.objects.get(user__username=req.accepted_by), req))\n elif is_tutor and req.accepted_by == c['user'].username:\n my_tutees.append((Tutee.objects.get(user__username=req.user), req))\n\n c.update(csrf(request))\n c.update({'pending_requests': pending_requests})\n c.update({'my_tutors': my_tutors})\n c.update({'tutor': is_tutor})\n if is_tutor:\n c.update({'my_tutees': my_tutees})\n return render_to_response('profile.html', c)", "title": "" }, { "docid": "67ac31aa35b321fa6f0b1e7be751644f", "score": "0.5786391", "text": "def denied_add():\n messages.error(request, \"You don't have permission to create topologies.\")\n return HttpResponseRedirect('/login/')", "title": "" }, { "docid": "e51ce1763db0292fcfe32cbd41d23b1e", "score": "0.5765464", "text": "def new_ticket(request, *arg):\n if request.method == 'POST':\n form = TicketForm(request.POST)\n if form.is_valid():\n t = Ticket()\n t.ticket_type = form.cleaned_data['ticket_type']\n t.project = form.cleaned_data['project']\n t.component = form.cleaned_data['component']\n t.summary = form.cleaned_data['summary']\n t.description = form.cleaned_data['description']\n t.created_by = request.user\n t.updated_by = request.user\n # workflow related stuff\n ticket_type = t.ticket_type\n wm = WorkflowManager()\n wm.workflow = ticket_type.workflow\n wm.created_by = request.user\n wm.save()\n r = Role.objects.get(id=settings.ROLE_SUBMITTER)\n p = Participant()\n p.user = request.user\n p.role = r\n p.workflowmanager=wm\n p.save()\n t.workflow_manager=wm\n t.save()\n wm.start(p)\n # Generates a confirmation email to send to the new user\n current_site = Site.objects.get_current() \n site_name = current_site.name\n domain = current_site.domain\n tplt = loader.get_template(settings.NEW_TICKET_EMAIL_TEMPLATE)\n c = {\n 'email': request.user.email,\n 'domain': domain,\n 'site_name': site_name,\n 'user': request.user,\n 'protocol': settings.REGISTRATION_USE_HTTPS and 'https' or 'http',\n 'ticket': t,\n }\n send_mail(_(\"Confirmation of new ticket on IssueTracker\"),\n tplt.render(Context(c)), None, [request.user.email])\n \n request.user.message_set.create(message=_(\"The ticket has been\"\\\n \" successfully created.\"))\n return HttpResponseRedirect('/ticket/%d'%t.id)\n else:\n form = TicketForm()\n c = RequestContext(request, {'form': form})\n return render_to_response('new.html', c)", "title": "" }, { "docid": "f9de72b64e34113d105db135061cac5d", "score": "0.5754959", "text": "def get(self, request):\n if request.user.is_authenticated():\n return self.redirect_to(request)\n else:\n return render(request, 'account/signup.html', {'form': self.form()})", "title": "" }, { "docid": "20851bfd02b455b3643f59c1122d68cf", "score": "0.5754119", "text": "def get(self, request):\n if request.user.status == 2:\n return redirect('orders:pickup_service')\n else:\n return super().get(request)", "title": "" }, { "docid": "e6caf7263eecd8a03f00f1c60a0a949e", "score": "0.5731373", "text": "def validate_trip():\n\n email = session.get('email')\n user = User.query.filter_by(email=email).first()\n trip_name = request.form.get('trip-name')\n start_date = request.form.get('start-date')\n end_date = request.form.get('end-date')\n\n new_trip = Trip(creator_id=user.user_id,\n trip_name=trip_name,\n start_date=start_date,\n end_date=end_date)\n\n db.session.add(new_trip)\n\n db.session.commit()\n\n return redirect(url_for('trip_itinerary',\n trip_name=new_trip.trip_name, \n trip_id=new_trip.trip_id))", "title": "" }, { "docid": "85ea1b84eee129c3638cf22bcbc1a38d", "score": "0.5671864", "text": "def create(self,request):# create function is called when a http post request is made to the ViewSet\n\n return ObtainAuthToken().post(request)", "title": "" }, { "docid": "08d1f6fe285eedcd9c99ff813199d95f", "score": "0.56620723", "text": "def new_request_widget(self):\n if self.user_id == 0:\n self.login.click()\n if self.user_id == 0:\n return\n new_request_form = ui.NewRequest(self)\n new_request_form.move(get_popup_pos(self, new_request_form))\n new_request_form.user_id = self.user_id\n new_request_form.request.setFocus()\n new_request_form.exec_()", "title": "" }, { "docid": "a4e7bea2a630bd478d6a762a41642f75", "score": "0.5627527", "text": "def questionnaire(request):\n if request.method == \"POST\":\n # If is valid\n if not request.user.is_superuser:\n set_user_step(request.user, step=3, phase=1)\n return http.HttpResponseRedirect('/phase1/step3/')\n\n return http.HttpResponseBadRequest(\"Only accesible with POST\")", "title": "" }, { "docid": "5fe75ff2da849df7fa2224978dcd31e4", "score": "0.56189126", "text": "def post(self, request, *args, **kwargs):\n c = self.getContext(request)\n validate = validate_help_request(request.POST)\n if validate is not None:\n c.update(validate)\n c.update(csrf(request))\n return render_to_response('request_help.html', c)\n\n fc = request.POST['for_class']\n desc = request.POST['description']\n d = request.POST['day']\n t = request.POST['time']\n users_requests = Request.objects.filter(user=c['user'].username)\n valid = True\n \"\"\"\n If the user has ten requests, then they are not able to create a new one.\n If the user has already requested help for a specific class then they are not able to request help for that class again\n \"\"\"\n if len(users_requests) < 11:\n for req in users_requests:\n if req.for_class == fc:\n c.update({'request_exists_error': 'A help request for this class already exists'})\n valid = False\n else:\n valid = True\n elif len(users_requests) > 10:\n c.update({'too_many_error': 'You have too many open requests'})\n valid = False\n\n if valid:\n helprequest = Request(user=c['user'].username, first_name=c['user'].first_name, last_name=c['user'].last_name, for_class=fc, description=desc, days=d, time=t)\n \"\"\"\n if a tutor is being specifically requested then they will recieve an email informing them of the request\n \"\"\"\n if 'specific_request' in request.POST:\n helprequest.requested = request.POST['requested']\n helprequest.save()\n target = Tutee.objects.get(user__username=request.POST['requested']).user\n emailer = emails()\n message = 'You have been requested as a tutor by ' +\\\n c['user'].first_name + ' ' + c['user'].last_name +\\\n '. Log in to RPI Tutor Time to view their request.'\n emailer.send_email(target, message, 'Tutor Request')\n else:\n helprequest.save()\n c.update({\n 'error': False,\n 'message': 'Request successfully submitted.'\n })\n return render_to_response('display_message.html', c)\n\n return render_to_response('request_help.html', c)", "title": "" }, { "docid": "69c59ca5c155c48f96160ae758760e06", "score": "0.56134325", "text": "def login_required(view):\n @functools.wraps(view)\n def wrapped_view(**kwags):\n if g.user is None:\n # import pdb; pdb.set_trace()\n abort(404)\n return view(**kwags)\n return wrapped_view", "title": "" }, { "docid": "057f2be48aed0d8c3615532e742a7833", "score": "0.5605659", "text": "def get(self, request):\n today = datetime.date.today()\n random_string = hashlib.md5(bytes(str(random.random()).encode('utf-8'))).hexdigest()\n user_id = today.strftime('lmdemo-%y%m%d-')\n user_id += random_string[:3]\n\n initial_form = {\n 'lm_url': '/',\n 'first_name': 'Test',\n 'last_name': 'Example',\n 'user_id': user_id,\n 'email': '{}@example.com'.format(user_id),\n 'postal_code': '63303',\n 'endpoint': loginbypass.PROD_ENDPOINT,\n 'key': settings.LOGIN_BYPASS_KEY,\n }\n form = forms.LoginBypassForm(initial=initial_form)\n context = {'form': form}\n return render(request, self.template_name, context)", "title": "" }, { "docid": "1d63e3ddd34926b93d7150372fb06513", "score": "0.5597213", "text": "def create_view(self):\n if flask.request.method == \"POST\":\n try:\n self.start_task()\n flash(\"The task was successfully started.\", \"success\")\n except Exception as e:\n current_app.logger.exception(e)\n flash(\"An error occurred while starting the task.\", \"error\")\n return redirect(request.path) # Redirect after POST\n\n return_url = get_redirect_target() or self.get_url(\".index_view\")\n\n return self.render(\n \"cds_ils_admin/create_task.html\", return_url=return_url\n )", "title": "" }, { "docid": "cadb06ee4bbee3c5c67cde8a06320fd1", "score": "0.558085", "text": "def show_create_page():\n\n return render_template('user_form.html')", "title": "" }, { "docid": "52df3e9d53b6a9222a7ca6b5829a4155", "score": "0.5577324", "text": "def plans(request):\n if request.user.is_authenticated:\n if request.method == \"POST\":\n try:\n start_date = request.POST['start_date']\n start_time = request.POST['start_time']\n end_date = request.POST['end_date']\n end_time = request.POST['end_time']\n title = request.POST['title']\n start_datetime = pytz.utc.localize(datetime.combine(\n datetime.strptime(start_date, \"%Y-%m-%d\"),\n datetime.strptime(start_time, \"%H:%M\").time()))\n end_datetime = pytz.utc.localize(datetime.combine(\n datetime.strptime(end_date, \"%Y-%m-%d\"),\n datetime.strptime(end_time, \"%H:%M\").time()))\n\n Plan.objects.create(\n user=request.user,\n start_time=start_datetime,\n end_time=end_datetime,\n title=title)\n message = {\n 'mtype': \"success\",\n 'text': \"Pomyślnie dodano do bazy danych\",\n 'bold': \"Sukces!\"\n }\n\n return render(request, 'polls/add_plans.html', {'message': message})\n except Exception as ex:\n message = {\n 'mtype': \"danger\",\n 'text': \"Nie udało się dodać planów do bazy danych\",\n 'bold': \"Błąd!\"\n }\n print(ex)\n return render(request, 'polls/add_plans.html', {'message': message})\n return render(request, 'polls/add_plans.html')\n return render(request,\n 'polls/user_profile.html',\n {'error_message': \"Musisz być zalogowany!\"})", "title": "" }, { "docid": "152da2b87c003c66224a55e443d82f1d", "score": "0.5573352", "text": "def user(request):\n logging_user = LoginUser.get_login_user()\n if not logging_user.is_view_right():\n return\n auth_type = None\n type_form = GacoiForm(\"typeForm\", \"/user/\", \"POST\")\n\n keu_form = LagForm()\n keu_form.set_title(\"User List\")\n type_form.set_view(\"id,user_name,login_name,address,phone,email,course,gender,roles,teacher_id\")\n type_form.set_key(\"id\")\n\n if logging_user.is_add_right():\n type_form.set_insert(\"user_name,login_name,address, phone,course,email,gender,roles,teacher_id\")\n if logging_user.is_update_right():\n type_form.set_update(\"user_name,login_name,address, phone,course,email,gender,roles,teacher_id\")\n if logging_user.is_delete_right():\n type_form.set_option_deletable(True)\n\n type_form.set_type('password', GacoiFormFieldType.Password)\n type_form.set_required('user_name,login_name')\n type_form.set_type('updated_datetime', GacoiFormFieldType.DateTime)\n type_form.set_type('created_datetime', GacoiFormFieldType.DateTime)\n\n dept_id = AuthMetaTypeDefine.Department.code\n genders = UserLogic.get_genders_dict()\n teachers = UserLogic.get_teachers_dict()\n roles = UserLogic.get_roles_dict()\n type_form.get_field(\"gender\").set_drop_down_list_values(genders)\n type_form.get_field(\"teacher_id\").set_drop_down_list_values(teachers)\n type_form.get_field(\"roles\").set_drop_down_list_values(roles)\n type_form.get_field(\"course\").set_multi_choices(\"select id,name from course id\",\n \"select c.id from course c inner join base_user_course u \"\n \"on u.course_id = c.id where u.user_id = [id]\", 1)\n\n type_form.set_search('user_name,login_name, address,gender,roles,teacher_id')\n type_form.set_order('id,user_name,login_name,address,teacher_id,roles')\n type_form.init(request)\n if logging_user.is_update_right():\n type_form.add_inline_user_button(InlineUserButton(\"Change password\", \"change_password\",\n action=\"do_change_password([id])\"))\n\n reset_id = None\n reset_password = False\n\n # Check request\n # if typeForm_action = reset_password -> handle request password\n params = request.POST\n\n if params.get('typeForm_action') == 'reset_password':\n reset_id = params.get('reset_id')\n # transaction.set_autocommit(False)\n try:\n user_changed = User.objects.get(id=reset_id)\n user_changed.password = UserLogic.hash_password(params.get('new_password', None))\n user_changed.save()\n reset_password = False\n except ObjectDoesNotExist:\n # transaction.rollback()\n print(\"aaa\")\n\n # transaction.commit()\n\n # Check type_form.get_inline_user_button_return_value() return change_password button to open modal\n if logging_user.is_update_right():\n if \"change_password\" == type_form.get_inline_user_button_return_value():\n reset_id = type_form.get_key_value(\"id\")\n reset_password = True\n\n # call to edit in form view, if not: edit in grid view\n # type_form.set_option_update_in_form_view(True)\n\n if type_form.is_action(GacoiFormAction.DeleteDone):\n if logging_user.is_delete_right():\n try:\n user_deleted = User.objects.get(pk=type_form.get_key_value(\"id\"))\n user_deleted.delete()\n LogOperation.log(LogModule.User, LogType.Delete, LogResult.Success, type_form.get_key_value(\"id\"))\n except ObjectDoesNotExist:\n LogOperation.log(LogModule.User, LogType.Delete, LogResult.Fail, type_form.get_key_value(\"id\"))\n insert = False\n update = False\n if type_form.is_action(GacoiFormAction.InsertStart):\n keu_form.set_title(\"Create New User\")\n insert = True\n if type_form.is_action(GacoiFormAction.UpdateStart):\n update = True\n # Insert\n if type_form.is_action(GacoiFormAction.InsertDone):\n try:\n User.objects.get(login_name=type_form.get_field('login_name').get_value_blank2none())\n error_message = \"The login user is exist!\"\n type_form.set_action(GacoiFormAction.InsertStart)\n type_form.set_error_message(error_message)\n LogOperation.log(LogModule.User, LogType.Insert, LogResult.Fail)\n except ObjectDoesNotExist:\n # transaction.set_autocommit(False)\n try:\n role = type_form.get_field('roles').get_value_blank2none()\n teacher_id = type_form.get_field('teacher_id').get_value_blank2none()\n user_courses = type_form.get_field('course').get_multi_choices_selected_values()\n if int(role) != UserRoles.Student.code:\n teacher_id = None\n user_new = UserLogic.create_user(type_form.get_field('user_name').get_value_blank2none(),\n type_form.get_field('login_name').get_value_blank2none(),\n type_form.get_field('gender').get_value_blank2none(),\n role,\n type_form.get_field('phone').get_value_blank2none(),\n type_form.get_field('email').get_value_blank2none(),\n type_form.get_field('address').get_value_blank2none(),\n teacher_id,\n password=type_form.get_field('password').get_value_blank2none())\n UserLogic.store_user_courses(user_new.id, user_courses)\n type_form.set_action(1000)\n\n except ObjectDoesNotExist as e:\n\n # transaction.rollback()\n LogOperation.log(LogModule.User, LogType.Insert, LogResult.Fail, None, e)\n\n # transaction.commit()\n insert = False\n return HttpResponseRedirect(\"/user\")\n\n if type_form.is_action(GacoiFormAction.UpdateDone):\n today = datetime.datetime.today()\n # transaction.set_autocommit(False)\n try:\n user_updated = User.objects.get(id=type_form.get_key_value('id'))\n print(user_updated.login_name)\n new_user_name = type_form.get_field('user_name').get_value_blank2none()\n new_login_name = type_form.get_field('login_name').get_value_blank2none()\n # if user_updated.user_name != new_user_name or user_updated.login_name != new_login_name:\n user_updated.user_name = new_user_name\n user_updated.login_name = new_login_name\n user_updated.updated_datetime = today\n role = type_form.get_field('roles').get_value_blank2none()\n teacher_id = type_form.get_field('teacher_id').get_value_blank2none()\n user_courses = type_form.get_field('course').get_multi_choices_selected_values()\n if int(role) != UserRoles.Student.code:\n teacher_id = None\n user_updated.gender = type_form.get_field('gender').get_value_blank2none()\n user_updated.roles = role\n user_updated.phone = type_form.get_field('phone').get_value_blank2none()\n user_updated.email = type_form.get_field('email').get_value_blank2none()\n user_updated.address = type_form.get_field('address').get_value_blank2none()\n user_updated.teacher_id = teacher_id\n\n type_form.get_field('roles').get_value_blank2none()\n user_updated.save()\n UserLogic.update_user_courses(user_updated.id, user_courses)\n\n LogOperation.log(LogModule.User, LogType.Update, LogResult.Success, type_form.get_key_value('id'))\n except ObjectDoesNotExist:\n # transaction.rollback()\n LogOperation.log(LogModule.User, LogType.Update, LogResult.Fail)\n # transaction.commit()\n update = False\n type_form.set_paging_value(10)\n\n # data = User.objects.all().values_list('id',flat=False)\n data = User.objects.all()\n # Order\n if type_form.order_field:\n if type_form.order_type == 'desc':\n data = data.order_by(\"-\" + type_form.order_field)\n else:\n data = data.order_by(type_form.order_field)\n\n # Search user_name,login_name\n search = type_form.get_field(\"user_name\").get_search_value()\n if search is not None and search != '':\n data = data.filter(user_name__contains=search)\n search = type_form.get_field(\"login_name\").get_search_value()\n if search is not None and search != '':\n data = data.filter(login_name__contains=search)\n search = type_form.get_field(\"roles\").get_search_value()\n if search:\n l = list(User.objects.filter(roles__in=search).values_list('id', flat=True))\n data = data.filter(id__in=l)\n search = type_form.get_field(\"gender\").get_search_value()\n if search:\n l = list(User.objects.filter(gender__in=search).values_list('id', flat=True))\n data = data.filter(id__in=l)\n\n type_form.set_form_data(data)\n # type_form.set_form_model(User)\n type_form.set_caption([\"id,user_name,login_name,address,phone,email,gender,roles, teacher_id, course\",\n \"ID,Username,Login name,Address, Phone, Email, Gender,Roles, Teacher, Course\"])\n info_form = None\n context = { # \"select id,name,email,password from user\"\n 'authType': auth_type,\n 'user': logging_user,\n 'infoForm': info_form,\n 'typeForm': type_form,\n 'keuForm': keu_form,\n 'screen_name': ScreenName.User,\n 'reset_password': reset_password,\n 'update': update,\n 'insert': insert,\n 'reset_id': reset_id,\n }\n\n return render(request, 'user.html', context)", "title": "" }, { "docid": "013138d001bdc09426a8a84089b87ee5", "score": "0.5567037", "text": "def test_create_regular(self):\n self.client.force_authenticate(user=self.regular_user)\n response = self.client.post(_get_list_url(1), submission_data(self.regular_user))\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "b1afd3575e3c44f26bb4c57a869500de", "score": "0.5560713", "text": "def tickets_view_one(request, pk):\n # get ticket details\n ticket = get_object_or_404(Ticket, pk=pk)\n # increase number of views by 1\n ticket.views += 1\n ticket.save()\n # filter comments on specific ticket\n comments = Comment.objects.filter(ticket_id=ticket.pk)\n # filter upvotes on specific ticket by user ID\n upvotes = Upvote.objects.filter(ticket_id=ticket.pk).values(\"user_id\")\n voters = [vote[\"user_id\"] for vote in upvotes]\n donors = User.objects.filter(id__in=voters)\n # render drop-down list for superusers to update status\n ticket_status_list = TicketStatus.objects.all()\n tkt_status = ticket.ticket_status.id\n # POST methods\n if request.method == \"POST\":\n comment_form = CommentForm(request.POST)\n if comment_form.is_valid():\n comment_form.instance.commenter = request.user\n comment_form.instance.ticket = ticket\n comment_form.save()\n # remove two ticket views to avoid duplicates on POST\n ticket.views -= 2\n ticket.save()\n messages.success(\n request, f\"Comment successfully added!\")\n return redirect(tickets_view_one, ticket.pk)\n else:\n comment_form = CommentForm()\n donation_form = DonationForm()\n context = {\n \"comment_form\": comment_form,\n \"comments\": comments,\n \"donation_form\": donation_form,\n \"donors\": donors,\n \"publishable\": settings.STRIPE_PUBLISHABLE,\n \"ticket\": ticket,\n \"ticket_status_list\": ticket_status_list,\n \"tkt_status\": tkt_status,\n \"voters\": voters,\n }\n return render(request, \"tickets_view_one.html\", context)", "title": "" }, { "docid": "a5e87b6147606310816795316cd2cd9f", "score": "0.5551106", "text": "def post(self, request, *args, **kwargs):\n poll = self.get_object()\n\n if request.user.id != poll.user.id:\n raise PermissionDenied(\"You can not create choice for this poll.\")\n\n return super().post(request, *args, **kwargs) # pylint: disable=no-member", "title": "" }, { "docid": "9ae82132ff4ca7b1410c3fd77247c009", "score": "0.55449873", "text": "def trip_creation_handler1(request):\n global locations\n if(\"add_checkpoint\" in request.POST):\n pprint(request.POST)\n d = {}\n d[\"location\"] = request.POST.get(\"location\")\n locations.append(d)\n return redirect(\"/user_trips/\")", "title": "" }, { "docid": "286e0637132874f32ec62bbdbec0d5d3", "score": "0.5543584", "text": "def get(self, request, user_id):\n user = User.objects.get(pk=request.user.id)\n volunteer_user = User.objects.get(pk=user_id)\n if user.id == volunteer_user.id:\n volunteer = get_object_or_404(Volunteer_User_Add_Ons, user=user_id)\n infoForm = volunteerSignupForm(instance=volunteer)\n\n return render(\n request,\n self.url_volunteer_edit,\n {\n 'infoForm': infoForm,\n 'user_id': volunteer.user.id,\n 'volunteer': volunteer\n }\n )\n else:\n return redirect(self.login_url)", "title": "" }, { "docid": "4abd47fd823fb7c057a2cb8156f64866", "score": "0.5537861", "text": "def hoteliers_only(view):\n def new_view(request, *args, **kwargs):\n response = view(request, *args, **kwargs)\n id = request.user.id\n my_user = Hotelier.objects.get(pk=id)\n if my_user != None:\n return response\n # TODO: Add your own forbidden view\n return render(request, 'thanks.html', {'message': 'شما مجاز به انجام این تغییرات نیستید.', 'redir' : '/simorgh/home/'})\n\n return new_view", "title": "" }, { "docid": "e3f21d26efe8164e5acee4c06a619dca", "score": "0.5534656", "text": "def request_access(request):\n context = {}\n if request.method == 'POST':\n context.update(request.POST)\n context['name'] = request.POST.get('name')\n context['email_address'] = request.POST.get('email_address')\n context['username'] = request.POST.get('username')\n context['affiliation'] = request.POST.get('affiliation')\n context['reason'] = request.POST.get('reason')\n\n if not request.POST.get('name'):\n context['error'] = 'name is required'\n elif not request.POST.get('username'):\n context['error'] = 'username is required'\n elif not request.POST.get('email_address'):\n context['error'] = 'email address is required.'\n elif not request.POST.get('reason'):\n context['error'] = 'A reason is required'\n if not context.get('error'):\n msg = \"\"\"User Requesting Access to Reactors Service\\n\n Name:{}\n username: {}\n email address: {}\n affiliation: {}\n reason: {}\"\"\".format(context['name'],\n context['username'],\n context['email_address'],\n context.get('affiliation'),\n context['reason'])\n # write the request to a local file as backup:\n with open(os.path.join(settings.REACTORS_REQUESTS_DIR, context['username']), 'w+') as f:\n f.write(msg)\n # attempt to notify\n try:\n send_mail(msg,\n \"do-not-reply@reactors.tacc.cloud\",\n settings.REACTORS_EMAIL_LIST)\n except Exception as e:\n print(\"Error sending email for context: {}\".format(context))\n context['success'] = \"You request has been submitted! You will hear back from TACC stuff soon.\"\n return render(request, 'abaco/request_access.html', context, content_type='text/html')", "title": "" }, { "docid": "751d5244512674429a573251c4bb4d4d", "score": "0.5526437", "text": "def onflightfr(request):\n \n # Authentication check\n if not request.user.is_authenticated():\n # TODO redirect to error page\n return redirect(reverse('index'))\n\n user = request.user\n\n sia_user = get_object_or_404(SIAUser, user=user)\n return render(request, 'users/on-flight-fr.html', {'SiaUser':sia_user})", "title": "" }, { "docid": "2aa10841b1582990c7f7036e79c87d8a", "score": "0.55042714", "text": "def customer_login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if session.get('user_type') != 'customer':\n return jsonify({'status': 'failed',\n 'msg': 'Operation not permitted.'})\n return view(**kwargs)\n\n return wrapped_view", "title": "" }, { "docid": "241dca1a0c3ece54e6e94a41fe9ef234", "score": "0.55041355", "text": "def show_pickup_analyzer():\n #Check that user is logged in\n if 'user' in session:\n user = User.query.get(session['user']['username'])\n teams = user.teams\n return render_template('pickup-analyzer.html', user=user, teams=teams, cats=categories)\n else:\n flash('Please log in first.')\n return redirect(url_for('show_login_form'))", "title": "" }, { "docid": "f9c6b1d3a9aa02ffb1cb6cf707e0d057", "score": "0.54990375", "text": "def login_required(view):\n @ wraps(view)\n def wrapped_view(**kwargs):\n db = get_db()\n if g.user is None:\n\n return redirect(url_for(\"login\", next=request.url))\n if db.execute(\"\"\"SELECT isBanned FROM users WHERE user_id = ? \"\"\", (g.user,)).fetchone()[\"isBanned\"] == 1:\n return redirect(url_for(\"login\"))\n return view(**kwargs)\n return wrapped_view", "title": "" }, { "docid": "70e2f2a4887e3c643a932dec058adc16", "score": "0.54793507", "text": "def residence_add(request):\n if request.method == 'POST':\n form = ResidenceForm(request.POST)\n if form.is_valid():\n home = form.save(commit=False)\n home.user = request.user # The logged user\n home.save()\n return redirect('homepage')\n else:\n form = ResidenceForm()\n return render(request, 'myapp/residence/residence_add.html', {\n 'form': form,\n 'user': request.user\n })", "title": "" }, { "docid": "4c5405c1259d0a6135f7447b2455f7ed", "score": "0.54747885", "text": "def add_patient(request):\n form = PatientForm()\n print('Getting the form')\n if request.method==\"POST\":\n form = PatientForm(request.POST)\n rai_list = ['age',\n 'snf',\n 'nephrologist',\n 'chf',\n 'sob',\n 'cancer',\n 'weight_loss',\n 'appetite',\n 'memory',\n 'mobility',\n 'eating',\n 'toileting',\n 'hygiene']\n features = [int(form.data[r]) for r in rai_list]\n \n\n \n rai = utils.get_rai(form)\n\n\n if form.is_valid():\n data = form.save(commit=False)\n data.created_by = request.user\n data.save()\n \n print(request.user)\n return render (request, \n 'show_rai.html', \n context = {'rai':rai,\n 'Age':form.data['age'],\n 'FirstName': form.data['first_name'],\n 'LastName': form.data['last_name'],\n 'MiddleInit': form.data['middle_initial'],\n 'SSN':form.data['SSN']})\n\n form = PatientForm()\n else:\t\t\n form=PatientForm\n return render(request, 'add_patient.html',{'form':form})", "title": "" }, { "docid": "4d9a2e8895051d3bbaa68325ceb9c8cf", "score": "0.5472861", "text": "def journey_creation_handler2(request):\n # function to handle the creation of new jounrey\n global checkpoints\n # form = JourneyCreationForm1(request.POST)\n # if form.is_valid():\n pprint(checkpoints)\n pprint(request.POST)\n journey_name = request.POST.get(\"journey_name\")\n journey_date = request.POST.get(\"travel_date\")\n cotravel_number = request.POST.get(\"cotravel_number\")\n jrny = Journey(journey_id=journey_name, start_time=parser.parse(journey_date),\n source=checkpoints[0][\"checkpointA\"], destination=checkpoints[-1][\"checkpointB\"],\n cotravel_number=cotravel_number)\n jrny.save()\n # save the journey and the sole participant of the journey is the request user\n jrny.participants.add(request.user)\n\n # for each checkpoint create a new journe point with foreignkey to location and the journey objects\n for i, x in enumerate(checkpoints):\n loc = LocationPoint.objects.get(\n location_name=x[\"checkpointA\"], user=request.user, location_type=\"Journey Point\")\n JourneyPoint.objects.create(\n location=loc, transport=x[\"means\"], point_id=i, journey=jrny)\n if(len(checkpoints) != 0):\n loc = LocationPoint.objects.get(\n location_name=checkpoints[-1][\"checkpointB\"], user=request.user, location_type=\"Journey Point\")\n JourneyPoint.objects.create(\n location=loc, transport=x[\"means\"], point_id=len(checkpoints), journey=jrny)\n checkpoints = []\n return redirect(\"/user_journeys/\")", "title": "" }, { "docid": "85f036be1088d98ef307d2da6c9b03aa", "score": "0.5469385", "text": "def create(self, request, *args, **kwargs):\n if not request.user.is_superuser:\n raise exceptions.NotFound\n\n return super().create(request, *args, **kwargs)", "title": "" }, { "docid": "bfc6cd42bd424dcadb879965772268b3", "score": "0.5464898", "text": "def instructor(request):\n\n # get all lesson sets, display\n if request.method == 'POST':\n # Will this include where the data is updated? Such as selecting different visuals to interact with?\n if user_auth_inst(request):\n # Take instructor to data view\n if lesson_set_auth(request):\n return redirect(\"/tutor/tutor\")\n else:\n return redirect(\"accounts:profile\")\n else:\n return redirect(\"/accounts/settings\")\n else:\n\n return render(request, \"data_analysis/visual.html\")", "title": "" }, { "docid": "d5ac9ebd0bd2f55f99ed73f7079bc737", "score": "0.54510874", "text": "def post(self, request, *args, **kwargs):\n c = self.getContext(request)\n is_tutor = False\n if tutor(c['user'].username):\n is_tutor = True\n c.update({'tutor': is_tutor})\n msg = 'Good news, your request has been accepted by ' +\\\n c['user'].first_name + \" \" + c['user'].last_name +\\\n \". They should be contacting you directly shortly\"\n request_user_and_id = request.POST['choice'].split('?^?')\n requests = Request.objects.all()\n for req in requests:\n if req.user == request_user_and_id[0]:\n if req.id == int(request_user_and_id[1]):\n req.accepted_by = c['user'].username\n req.save()\n usr = Tutee.objects.get(user__username=req.user).user\n c.update({'target': req.user})\n #Email the student\n emailer = emails()\n emailer.send_email(usr, msg, \"Good News\")\n \n c.update(csrf(request))\n return render_to_response('email_tutee.html', c)", "title": "" }, { "docid": "51010e31b102db3425a4dfa28e91e371", "score": "0.5438009", "text": "def trip_request_resolve_handler(request):\n req_lis = Notification.objects.filter(\n user_to=request.user, notif_type=\"Trip Related\", resolved=\"No\")\n print(request.POST)\n index = 0\n typ = 0\n for i in range(len(req_lis)):\n if(\"accept\"+str(i) in request.POST):\n index = i\n typ = 1\n elif(\"reject\"+str(i) in request.POST):\n index = i\n typ = 0\n print(index, typ)\n req = req_lis[index]\n if(typ == 0):\n # typ 0 means the request is rejected.\n # create a new notification meant for all the partiipants and the request user\n req.resolved = \"Request rejected by \"+request.user.username\n trp_id = req.travel_id\n print(trp_id)\n trp = Trip.objects.get(participants__in=[request.user], trip_id=trp_id)\n\n desc = request.user.username+\" rejected the request by \"+req.user_from.username\n for par in trp.participants.all():\n Notification.objects.create(user_to=par, user_from=request.user, title=\"Trip add request rejected\", description=desc,\n notif_type=\"Trip Related\", creation_time=datetime.datetime.now()+datetime.timedelta(hours=5.5), resolved=\"Yes\")\n Notification.objects.create(user_to=req.user_from, user_from=request.user, title=\"Journey add request rejected\", description=desc,\n notif_type=\"Journey Related\", creation_time=datetime.datetime.now()+datetime.timedelta(hours=5.5),\n resolved=\"Yes\")\n if(typ == 1):\n # typ 1 means the request is rejected.\n # create a new notification meant for all the partiipants and the request user\n req.resolved = \"Request accepted by \"+request.user.username\n print(req.description.split())\n trp_id = req.travel_id\n print(trp_id)\n trp = Trip.objects.get(participants__in=[request.user], trip_id=trp_id)\n\n trp.participants.add(req.user_from)\n desc = request.user.username+\" accepted the request by \"+req.user_from.username\n for par in jrny.participants.all():\n Notification.objects.create(user_to=par, user_from=request.user, title=\"Trip add request accepted\", description=desc,\n notif_type=\"Journey Related\", creation_time=datetime.datetime.now()+datetime.timedelta(hours=5.5),\n resolved=\"Yes\")\n req.save()\n return redirect(\"/user_trips/\")", "title": "" }, { "docid": "a073faf23c82ae9426ad206fff18dd59", "score": "0.54373556", "text": "def dispatch_request(self, **kwargs):\n not_authorized = self.pre_tests(**kwargs)\n\n if not_authorized:\n return not_authorized\n\n to_redirect = 0\n\n self.set_choices(**kwargs)\n\n if helpers.really_submitted(self.create_form):\n self.handle_create(**kwargs)\n\n elif helpers.really_submitted(self.process_form):\n to_redirect = self.handle_process(**kwargs)\n\n #TODO: maybe not the cleanest way to do it!!\n if to_redirect == 0 or to_redirect is None:\n self.reset_fields()\n\n return render_template(self.template,\n create_form=self.create_form,\n process_form=self.process_form,\n **self.template_kwargs)\n else:\n return redirect(to_redirect)", "title": "" }, { "docid": "dec6d521a9717dc4453718b7247a6a20", "score": "0.5433963", "text": "def dispatch_assignent_post_request(request):\n\n return create_new_homework(request)", "title": "" }, { "docid": "2c047b4d0c5deabb3530c48ab646fab1", "score": "0.5433689", "text": "def show_create_user_form():\n\n return render_template(\"create_user_form.html\")", "title": "" }, { "docid": "de2503a629885b0d4219f03c6453b269", "score": "0.5425061", "text": "def add_view(self, request, form_url='', extra_context=None):\n data = request.GET.copy()\n\n origin_types = TblOriginType.objects.all()\n if len(origin_types) > 0:\n data['origin_type'] = origin_types[len(origin_types)-1]\n\n environmental_ids = TblEnvironmental.objects.all()\n if len(environmental_ids) > 0:\n data['environmentalid'] = environmental_ids[len(environmental_ids)-1]\n \n taxonomy_ids = TblTaxonomy.objects.all()\n if len(taxonomy_ids) > 0:\n data['taxonomyid'] = taxonomy_ids[len(taxonomy_ids)-1]\n\n request.GET = data\n return super().add_view(request, form_url=form_url, extra_context=extra_context)", "title": "" }, { "docid": "a5414096496e75485277b253ac942a64", "score": "0.54199666", "text": "def test_options_user(self):\n request = self.factory.get(reverse('planet:options'))\n request.user = self.user\n response = OptionsView.as_view()(request)\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "b5fc56e9aaf0f28128d8a20085f704a1", "score": "0.5411256", "text": "def show_add_user_form():\n\n return render_template (\"new_user.html\")", "title": "" }, { "docid": "9f0b384f4bde6e16666a43f1e90709e5", "score": "0.5405932", "text": "def candidate_register(request):\n if request.user.is_authenticated:\n return redirect(\"index\")\n else:\n if request.method == 'POST':\n logic.candidate_register_logic(request)\n return redirect(\"index\")\n else:\n skills = Skill.objects.all()\n return render(request,'candidate/login_signup.html',{\"skills\":skills})", "title": "" }, { "docid": "acf6dbc843d171c5c468b2b24ddd9eaa", "score": "0.5396369", "text": "def agent_login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if session.get('user_type') != 'agent':\n return jsonify({'status': 'failed',\n 'msg': 'Operation not permitted.'})\n return view(**kwargs)\n\n return wrapped_view", "title": "" }, { "docid": "fa934ebaaba98a05256eb6b23dcbe6a5", "score": "0.5386511", "text": "def sign_up_view(request):\n if request.method == 'POST':\n form = ProjectPlannerUserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n # log in the user\n accounts_user = form.save()\n login(request, accounts_user)\n\n # Message\n messages.success(request, 'Welcome!')\n\n return redirect('project:index')\n else:\n # Message\n messages.warning(request, 'Some fields are not correct!')\n else:\n form = ProjectPlannerUserCreationForm()\n\n context = {\n 'form': form\n }\n\n return render(request, 'accounts/signUp.html', context)", "title": "" }, { "docid": "fa9ab1d77fda619867dbcfaa5705c298", "score": "0.5375764", "text": "def add(request):\n\n if datetime.now() > datetime.strptime(settings.DEADLINE_SUBMISSION, '%Y-%m-%d %H:%M'):\n return redirect('/')\n else:\n # save form\n if request.method == 'POST':\n new_entry = Entry()\n entryform = EntryForm(request.POST, request.FILES, instance=new_entry)\n if entryform.is_valid():\n if request.user.is_authenticated():\n new_entry.user = request.user\n entryform.save()\n vis_thumbnail(new_entry.screenshot)\n return render_to_response('submission/thankyou.html', locals(), context_instance=RequestContext(request))\n # show empty form\n else:\n user = request.user\n entryform = EntryForm()\n return render_to_response('submission/add.html', locals(), context_instance=RequestContext(request))", "title": "" }, { "docid": "9780a2142149d141bbff62048bd05538", "score": "0.53741014", "text": "def show_add_form():\n return render_template('newuser.html')", "title": "" }, { "docid": "66148209a2aa0aee601c0fd10bcf05cb", "score": "0.5363834", "text": "def staff_login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n print(session)\n if session.get('user_type') != 'staff':\n return jsonify({'status': 'failed',\n 'msg': 'Operation not permitted.'})\n return view(**kwargs)\n\n return wrapped_view", "title": "" }, { "docid": "a93e38e2a807db60aca04bcdc89bced9", "score": "0.5361074", "text": "def set_tasks(request):\n user = request.user\n form = CreateTaskForm(user=request.user)\n\n # Users can only set tasks for tables they are part of\n tables = CafeTable.objects.filter(\n university=user.university,\n table_id__in=user.cafe_table_ids.values_list('table_id', flat=True))\n form.fields['table_id'].queryset = tables\n\n # reset number of tasks set today if necessary\n if user.tasks_set_today >= 2 and \\\n user.next_possible_set == datetime.date.today():\n user.tasks_set_today = 0\n user.save()\n\n # students can set max 2 tasks per day (to avoid spamming)\n if user.tasks_set_today >= 2 and not user.is_staff:\n return redirect(\"dashboard\")\n\n context = {'form': form, 'num_users': get_number_current_users()}\n\n if request.method == 'POST':\n # create task with provided data\n form = CreateTaskForm(request.POST, user=request.user)\n if form.is_valid():\n task_name = form.cleaned_data.get('task_name')\n table_id = form.cleaned_data.get('table_id')\n task_content = form.cleaned_data.get('task_content')\n points = form.cleaned_data.get('points')\n recurrence_interval = form.cleaned_data.get('recurrence_interval')\n max_repeats = form.cleaned_data.get('max_repeats')\n Task.objects.create( # removed task =\n task_name=task_name,\n created_by=user,\n table_id=table_id,\n task_content=task_content,\n points=points,\n recurrence_interval=recurrence_interval,\n max_repeats=max_repeats\n )\n user.tasks_set_today += 1\n user.save()\n\n # Add notification\n task_text = user.first_name + user.last_name + \" has added '\" + \\\n task_name + \"' as a new task - go check it out!\"\n notification = Notification(table_id=table_id, notification_type=3,\n text_preview=task_text)\n notification.save()\n\n # if student, redirect if can't set more tasks now\n if user.tasks_set_today >= 2 and not user.is_staff:\n user.next_possible_set = datetime.date.today() + \\\n datetime.timedelta(days=1)\n user.save()\n return redirect(\"dashboard\")\n\n else:\n context[\"CreateTaskForm\"] = form\n else:\n context[\"CreateTaskForm\"] = form\n\n # by default, tasks are not recurring\n form.fields['recurrence_interval'].initial = \"n\"\n return render(request, 'set_tasks.html', context)", "title": "" }, { "docid": "cbb92d218481c2941900bb68bb87d8e1", "score": "0.53457695", "text": "def tasks():\n # if user reached route via GET (as by clicking a link or via redirect)\n if request.method == \"GET\":\n # get tasks of current user and pass to template\n cur_user_tasks = select_task(session[\"user_id\"])\n return render_template(\"tasks.html\", tasks = cur_user_tasks)\n \n # else if user reached route via POST (as by submitting a form via POST)\n elif request.method == \"POST\":\n # delete completed tasks\n delete_task(session[\"user_id\"])\n return redirect(url_for(\"tasks\"))", "title": "" }, { "docid": "e0698c9b2edb9940969beaf0ccdf97dd", "score": "0.53387177", "text": "def add_operator(request, t):\n try:\n messages = None\n admin = get_operator_by_username(request.session['username'])\n params = {\n 'rao': get_attributes_RAO(),\n 'is_admin': is_admin(request.session['username']),\n 'active_operator': admin\n }\n form = ()\n if request.method == 'POST':\n if 'add_operator' not in request.POST:\n form = NewOperatorForm(request.POST)\n else:\n form = NewOperatorPinForm(request.POST)\n\n if form.is_valid():\n\n if 'add_operator' not in request.POST:\n params['operator'] = True\n return render(request, settings.TEMPLATE_URL_AGENCY + 'add_operator.html',\n {'params': params, 'token': t, 'form': form})\n elif admin and not admin.signStatus:\n return HttpResponseRedirect(reverse('agency:logout_agency'))\n else:\n params['operator'] = False\n result, pin = create_operator(request.session['username'], request.POST)\n if result == StatusCode.OK.value:\n params = {\n 'username': request.session['username'],\n 'operator': request.POST.get('fiscalNumber').upper(),\n 'is_admin': is_admin(request.session['username']),\n 'timestamp': datetime.datetime.strftime(datetime.datetime.utcnow(), '%Y-%m-%d %H:%M')\n }\n request.session['pin'] = pin\n t = signing.dumps(params)\n LOG.info(\"admin: {}, operator: {} - Operatore creato con successo\".format(\n request.session['username'], request.POST.get('fiscalNumber').upper()),\n extra=set_client_ip(request))\n return HttpResponseRedirect(reverse('agency:list_operator', kwargs={'t': t, 'page': 1}))\n elif result == StatusCode.ERROR.value:\n messages = display_alert(AlertType.DANGER,\n \"Si è verificato un problema durante l'inserimento del nuovo operatore.\"\n \" Esiste già un operatore con questa email e/o codice fiscale.\")\n elif result == StatusCode.SIGN_NOT_AVAILABLE.value:\n messages = display_alert(AlertType.DANGER,\n \"Pin errato per la terza volta. Il tuo PIN non è più valido.\")\n elif result == StatusCode.UNAUTHORIZED.value:\n messages = display_alert(AlertType.DANGER, \"Il PIN inserito è errato.\")\n else:\n messages = display_alert(AlertType.DANGER,\n \"Si è verificato un problema durante l'inserimento del nuovo operatore.\"\n \" Invio della mail non riuscito.\")\n\n return render(request, settings.TEMPLATE_URL_AGENCY + 'add_operator.html',\n {'params': params, 'token': t, 'form': form, 'messages': messages})\n except Exception as e:\n LOG.error(\"Exception: {}\".format(str(e)), extra=set_client_ip(request))\n params = {\n 'rao': get_attributes_RAO(),\n 'is_admin': is_admin(request.session['username']),\n 'active_operator': get_operator_by_username(request.session['username'])\n }\n return render(request, settings.TEMPLATE_URL_AGENCY + 'error.html',\n {\"statusCode\": StatusCode.EXC.value, 'params': params,\n \"message\": \"Errore durante l'inserimento nuovo operatore\"})", "title": "" }, { "docid": "fefb3802f746641f2e8cd19de6320459", "score": "0.53360593", "text": "def create(self, request, *args, **kwargs):\n response = self.lib.create_trip(request.user, request_to_dict(request))\n if (response['error']):\n return rc.BAD_REQUEST\n elif (response['trip']):\n return response['trip']", "title": "" }, { "docid": "d3d55dc6fcde6af1f2f0fda9d6450d52", "score": "0.5322102", "text": "def login_not_required(view):\n setattr(view, LOGIN_NOT_REQUIRED_MARKER, True)\n return view", "title": "" }, { "docid": "888c484e48be828ad1ae45563f319a06", "score": "0.5321672", "text": "def form_valid(self, form):\n form.instance.user = self.request.user\n form.instance.issue = bug_or_feature(self)\n return super(TicketCreateView, self).form_valid(form)", "title": "" }, { "docid": "9525759e43f08c5e81f36bd18428090a", "score": "0.53206843", "text": "def checkAccess(self):\n self.mutator.taskFromKwargs(comments=True, work_submissions=True)\n self.data.is_visible = self.check.isTaskVisible()\n\n if task_logic.updateTaskStatus(self.data.task):\n # The task logic updated the status of the task since the deadline passed\n # and the GAE task was late to run. Reload the page.\n raise RedirectRequest('')\n\n if self.request.method == 'POST':\n # Access checks for the different forms on this page. Note that there\n # are no elif clauses because one could add multiple GET params :).\n self.check.isProfileActive()\n\n if 'reply' in self.data.GET:\n # checks for posting comments\n # valid tasks and profile are already checked.\n self.check.isBeforeAllWorkStopped()\n self.check.isCommentingAllowed()\n\n if 'submit_work' in self.data.GET:\n self.check.isBeforeAllWorkStopped()\n if not task_logic.canSubmitWork(self.data.task, self.data.profile):\n self.check.fail(DEF_NOT_ALLOWED_TO_UPLOAD_WORK)\n\n if 'button' in self.data.GET:\n # check for any of the buttons\n button_name = self._buttonName()\n\n buttons = {}\n TaskInformation(self.data).setButtonControls(buttons)\n if not buttons.get(button_name):\n self.check.fail(DEF_NOT_ALLOWED_TO_OPERATE_BUTTON % button_name)\n\n if 'send_for_review' in self.data.GET:\n self.check.isBeforeAllWorkStopped()\n if not task_logic.isOwnerOfTask(self.data.task, self.data.profile) or \\\n not self.data.work_submissions:\n self.check.fail(DEF_CANT_SEND_FOR_REVIEW)\n\n if 'delete_submission' in self.data.GET:\n self.check.isBeforeAllWorkStopped()\n id = self._submissionId()\n work = GCIWorkSubmission.get_by_id(id, parent=self.data.task)\n\n if not work:\n self.check.fail(DEF_NO_WORK_FOUND %id)\n\n time_expired = work.submitted_on - datetime.datetime.now()\n if work.user.key() != self.data.user.key() or \\\n time_expired > task_logic.DELETE_EXPIRATION:\n self.check.fail(DEF_NOT_ALLOWED_TO_DELETE)", "title": "" }, { "docid": "f82efd90a272c120d130f50fa16fa763", "score": "0.5315018", "text": "def addCar(request):\n if 'user_type' in request.session.keys():\n if request.session.get('user_type') == 1:\n return render(request, 'addCar.html')\n else:\n return HttpResponseForbidden()\n else:\n return redirect('login')", "title": "" }, { "docid": "03ab43ca5fe2a3b83290df5bbcee6e01", "score": "0.53149116", "text": "def canCreate(self, request):\n return False", "title": "" }, { "docid": "4835ecd887134dc82ec4d1b8824ebd0b", "score": "0.5306381", "text": "def create_user(self, request, *args, **kwargs):\n return self.create(request, *args, **kwargs)", "title": "" }, { "docid": "dabe37ee3cc2de0c3c86ee65e7d2db45", "score": "0.53038955", "text": "def create(self, request, *args, **kwargs):\n return super().create(request, *args, **kwargs)", "title": "" }, { "docid": "c93313e21128ee4716fc07ae092bc38c", "score": "0.5301878", "text": "def allow(self, user, action):\n if action == 'view':\n return True\n else:\n return False", "title": "" }, { "docid": "5579ebe20458e78b103955434e693cd9", "score": "0.529623", "text": "def isNotCompetentToAuthenticate(request):", "title": "" }, { "docid": "751fd3d37ac51672d1be06352d806e40", "score": "0.529372", "text": "def new(request):\n\treturn render(request, 'semi_restful/add.html')", "title": "" }, { "docid": "c3f8276ee454fc74888d031a81a8b90a", "score": "0.5291223", "text": "def post(self, request):\n context = {\n 'signed_in_volunteers': VolunteerLog.logged_in_volunteers_objects.all(),\n 'log_sign_in_form': LogSignInForm\n }\n if request.method == 'POST':\n log_sign_out_form = LogSignOutForm(request.POST)\n if log_sign_out_form.is_valid():\n log_sign_out_form.save()\n return super(VolunteerSignInView, self).form_valid(log_sign_out_form)\n\n if request.method == 'POST' and not log_sign_out_form.is_valid():\n log_sign_in_form = LogSignInForm(request.POST)\n if log_sign_in_form.is_valid():\n log_sign_in_form.save()\n return render(request, self.template_name, context)\n else:\n messages.error(request, \"Please correct the errors below and resubmit.\")\n context['log_sign_in_form'] = log_sign_in_form\n return render(request, self.template_name, context)", "title": "" }, { "docid": "e3cf5086ff32730417e9d87c44acc7df", "score": "0.5285659", "text": "def topology_access_check(request, callee, action, **kwargs):\n\n def denied():\n \"\"\"Generate an error message and redirect if we try do something to a\n topology we're not allowed to\"\"\"\n messages.error(request, \"Either this topology doesn't exist or you don't \"\n \"have permission to %s it.\" % action)\n return HttpResponseRedirect('/login/')\n\n def denied_add():\n \"\"\"Generate an error message and redirect if we try to create a topology\n and are not allowed to\"\"\"\n messages.error(request, \"You don't have permission to create topologies.\")\n return HttpResponseRedirect('/login/')\n\n \n # If we're trying to add a template, don't need to get the template itself\n if action == \"add\":\n if permissions.allowed_topology_access_create(request.user):\n return callee(request)\n else:\n return denied_add()\n\n else:\n\n # Try getting the template - if it doesn't exist, show the same message\n # as for permission denied. If we don't have a \"tid\" argument, django\n # will show an internal error, which is what we want.\n tid = int(kwargs[\"tid\"])\n kwargs[\"tid\"] = tid\n try :\n topo = db.Topology.objects.get(pk=tid)\n except db.Topology.DoesNotExist:\n return denied()\n\n if action == \"use\":\n # See if there is an HTTP GET token - if there is, try to use the token\n # method for authentication\n try:\n token = request.GET[\"token\"]\n except KeyError:\n pass\n else:\n # See if the token is valid\n user = crypto.validate_token(token)\n if user != None and permissions.allowed_topology_access_use(user, topo):\n request.user = user\n return callee(request, topo=topo, **kwargs)\n if permissions.allowed_topology_access_use(request.user, topo):\n return callee(request, topo=topo, **kwargs)\n else:\n return denied()\n\n elif action == \"change\":\n if permissions.allowed_topology_access_change(request.user, topo):\n return callee(request, topo=topo, **kwargs)\n else:\n return denied()\n elif action == \"delete\":\n if permissions.allowed_topology_access_delete(request.user, topo):\n return callee(request, topo=topo, **kwargs)\n else:\n return denied()\n else:\n raise ValueError(\"Unknown action: %s\" % options[\"action\"])", "title": "" }, { "docid": "4cadcc43d8373a9636811caf391ee728", "score": "0.52789825", "text": "def createlisting(request):\n\t# Check if user submitted a new listing post\n\tif request.method == \"POST\":\n\n\t\tform = NewListingForm(request.POST)\n\t\t# Validate the form\n\t\tif form.is_valid():\n\t\t\tlisting = form.save(commit=False) # allow us to change fields manually\n\t\t\t# Add information that wasn't on user form\n\t\t\tlisting.current_price = form.cleaned_data[\"starting_bid\"]\n\t\t\t#listing.current_price = 0.0\n\t\t\tlisting.user = request.user\n\t\t\n\t\t# Save the listing to the Database\n\t\tlisting.save()\n\t\t# Return User to New Listing\n\t\treturn HttpResponseRedirect(reverse(\"listing\", args=(listing.id,)))\n\telse:\n\t\treturn render(request, \"auctions/createlisting.html\", {\"form\": NewListingForm()})", "title": "" }, { "docid": "51c8105bb6ba8aff252af65752efaf46", "score": "0.5276149", "text": "def show_new_user_form():\n return render_template('add_user_form.html')", "title": "" }, { "docid": "b37c2d9133b8896585b13f4746c34b70", "score": "0.5267732", "text": "def create_ticket(request, ticket_type):\n if request.method == \"POST\":\n form = TicketForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, \"Ticket added\")\n if ticket_type == \"FEATURE\":\n return redirect(all_features)\n else:\n return redirect(all_bugs)\n else:\n form = TicketForm()\n form.fields[\"ticket_type\"].initial = ticket_type\n return render(request, 'ticketform.html', {'form': form})", "title": "" }, { "docid": "4d5c536d79aaf059cd621f8aab6c0a4d", "score": "0.52633303", "text": "def get(self, request):\n if request.user.is_authenticated and not request.user.is_verified:\n return render(request, 'account/signup_done.html')\n return redirect('home')", "title": "" }, { "docid": "2c59f41e3ae926cf90b9ff7627f6a4da", "score": "0.526257", "text": "def __call__(self):\n # aq_inner is needed in some cases like in the portlet renderers\n # where the context itself is a portlet renderer and it's not on the\n # acquisition chain leading to the portal root.\n # If you are unsure what this means always use context.aq_inner\n context = self.context.aq_inner\n form = self.request.form\n self.userid = form.get(\"userid\", \"\")\n self.loginid = form.get(\"loginid\", \"\")\n self.user_edit = form.get(\"form.button.edit\", None) is not None\n self.user_activate = form.get(\"form.button.activate\", None) is not None\n self.user_deactivate = form.get(\n \"form.button.deactivate\", None) is not None\n\n if not self.userid:\n return self.index()\n\n if self.user_edit:\n edit_view = \"%s/user_edit_view?userid=%s\" % (\n self.context.absolute_url(), self.userid)\n self.request.response.redirect(edit_view)\n\n portal_membership = getToolByName(context, 'portal_membership')\n\n if portal_membership.isAnonymousUser():\n raise Unauthorized('You need to login to access this page.')\n\n current_user = portal_membership.getAuthenticatedMember()\n user_groups = current_user.getGroups()\n sm = getSecurityManager()\n portal = getUtility(ISiteRoot)\n\n # user_management_list = context.get_management_dict()\n manage_by_group = context.get_manage_by_groups()\n manage_all = context.get_manage_all()\n\n # TODO: \"checking for * should be done in get_manage_by_groups\"\n if sm.checkPermission(ManagePortal, portal) and not manage_by_group:\n # show all for adminstratior/manager\n manage_by_group = [manage_all]\n\n if not manage_by_group:\n raise Unauthorized(UNAUTHORISED_ERROR)\n\n user = portal_membership.getMemberById(self.userid)\n if not user:\n context.plone_utils.addPortalMessage(\n _(u'This user does not exists.'))\n return self.index()\n\n user_groups = user.getGroups()\n same_groups = user_groups\n if manage_all not in manage_by_group:\n # TODO((ivan) limit the search instead of doing it after that\n same_groups = set(manage_by_group) & set(user_groups)\n if not same_groups:\n raise Unauthorized(UNAUTHORISED_ERROR)\n\n self.user_fullname = user.getProperty('fullname', '')\n self.user_email = user.getProperty('email', '')\n approved_by = user.getProperty('approved_by', '')\n self.user_approved_by = context.get_user_name(\n approved_by)\n self.user_approved_date = user.getProperty('approved_date', '')\n last_updated_by = user.getProperty('last_updated_by', '')\n self.user_last_updated_by = context.get_user_name(\n last_updated_by)\n self.user_last_updated_date = user.getProperty('last_updated_date', '')\n self.user_status = context.get_status(user)\n self.user_is_active = context.is_active(user)\n # display the groups based from the login user management list\n group_names = context.get_groups_title(same_groups)\n self.user_group = \", \".join(\n [group_name[\"group_title\"] for group_name in group_names])\n\n if self.user_activate:\n context.user_activate(self.userid, self.request)\n\n profile_view = \"%s/user_profile_view?userid=%s\" % (\n self.context.absolute_url(), self.userid)\n self.request.response.redirect(profile_view)\n\n if self.user_deactivate:\n context.user_deactivate(self.userid)\n\n profile_view = \"%s/user_profile_view?userid=%s\" % (\n self.context.absolute_url(), self.userid)\n self.request.response.redirect(profile_view)\n\n return self.index()", "title": "" }, { "docid": "64f784f505cdbf2c353f89c4151ca79f", "score": "0.52621067", "text": "def trip_itinerary(trip_name, trip_id):\n \n email = session.get('email')\n user = User.query.filter_by(email=email).first()\n users = User.query.filter(User.email!=email).all()\n trip = Trip.query.filter_by(trip_id=trip_id).first()\n\n return render_template('itinerary.html',\n trip_name=trip_name,\n trip_id=trip_id,\n trip=trip,\n username=user.username,\n users=users)", "title": "" }, { "docid": "0e4cb222be15ae7423c55b127040c9f4", "score": "0.5257335", "text": "def home_signup_view(request:Request, email:str = Form(...)):\n\n airtable_client = Airtable(\n base_id = AIRTABLE_BASE_ID,\n api_key = AIRTABLE_API_KEY,\n table_name = AIRTABLE_TABLE_NAME,\n )\n\n # to send email to airtable\n did_send = airtable_client.create_records({\"email\":email})\n return templates.TemplateResponse(\"home.html\",{\"request\":request, \"submitted_email\":email, \"did_send\":did_send})", "title": "" }, { "docid": "87e1f9786a4c9efd33e97b98bb7aa81f", "score": "0.52547777", "text": "def tickets_new_feature(request):\n if request.method == \"POST\":\n ticket_form = TicketForm(request.POST)\n donation_form = DonationForm(request.POST)\n if ticket_form.is_valid() and donation_form.is_valid():\n # amount to pay / donate\n donation_amount = 0\n donation_amount += int(request.POST.get(\"donation_amount\"))\n try:\n # build Stripe payment\n token = request.POST[\"stripeToken\"]\n customer = stripe.Charge.create(\n amount=int(donation_amount * 100),\n currency=\"EUR\",\n description=(\n request.user.email +\n \" (\" + request.user.get_full_name() + \")\"),\n source=token,\n )\n except stripe.error.CardError:\n # Stripe payment error\n messages.error(request, f\"Your card was declined!\")\n # authorization is valid - payment successful\n if customer.paid:\n ticket_form.instance.author = request.user\n ticket_form.instance.ticket_type_id = 2\n ticket_form.instance.total_donations = donation_amount\n # update user Profile with additional donation amount\n get_user_donations = Profile.objects.values_list(\n \"total_donated\", flat=True).get(user_id=request.user.id)\n new_user_donations = get_user_donations + donation_amount\n Profile.objects.filter(\n user_id=request.user.id).update(\n total_donated=new_user_donations)\n # update ticket status to In Progress if €100 goal is achieved\n if donation_amount >= int(100):\n ticket_form.instance.ticket_status_id = 2\n else:\n ticket_form.instance.ticket_status_id = 1\n new_ticket = ticket_form.save()\n new_ticket_id = new_ticket.pk\n messages.success(\n request, f\"Thank you for your Feature Request!\\\n €{donation_amount} was charged to your card.\")\n return redirect(tickets_view_one, new_ticket_id)\n else:\n messages.error(request, f\"Unable to take payment!\")\n else:\n messages.error(request, f\"There was an error. Please try again.\")\n else:\n ticket_form = TicketForm()\n donation_form = DonationForm()\n context = {\n \"donation_form\": donation_form,\n \"ticket_form\": ticket_form,\n \"publishable\": settings.STRIPE_PUBLISHABLE\n }\n return render(request, \"tickets_new_feature.html\", context)", "title": "" }, { "docid": "77df534e230304382a17d6b64b34905c", "score": "0.52546364", "text": "def before_request():\n g.user = current_user", "title": "" }, { "docid": "f38f0acf2569f2adef9f69a1e8305274", "score": "0.5254325", "text": "def vouch_required(f):\n @login_required\n @wraps(f)\n def wrapped(request, *args, **kwargs):\n if request.user.get_profile().is_vouched:\n return f(request, *args, **kwargs)\n else:\n log.warning('vouch_required forbidding access')\n return HttpResponseForbidden(_('You must be vouched to do this.'))\n\n return wrapped", "title": "" }, { "docid": "9d56c55b292834e2d79c1009c4813918", "score": "0.52485853", "text": "def new_food_entry_form(user_id):\n\t\n\treturn render_template(\"entry.html\", user_id=user_id)", "title": "" }, { "docid": "44966277b7e240553f5bc4381193f23b", "score": "0.5245977", "text": "def test_func(self):\n\n todo = self.get_object()\n return self.request.user.is_superuser or \\\n self.request.user == todo.assigned_user", "title": "" }, { "docid": "0a8e417dcb45cd61a48ed67dc518567f", "score": "0.5244312", "text": "def create_task_form(self,req):\r\n\t\ttemp_set=loader.get_template(\"form_set.html\")\r\n\t\tset_form=task_write()\r\n\t\tcontext={\r\n\t\t\t\"form\":set_form,\r\n\t\t\t\"title\":u\"Authorization Setup\",\r\n\t\t\t\"act_cn\":u\"Authorization Management\",\r\n\t\t\t\"act\":\"authset\",\r\n\t\t\t}\r\n\t\trt=temp_set.render(context,req)\r\n\t\treturn rt", "title": "" }, { "docid": "f4551a4ecb5421c1b161a404fdf27681", "score": "0.5242026", "text": "def login_router(request):\n\n user = request.user\n\n # Checks if an AllSkills model is attached to the user\n # If not create and send to edit page\n try:\n user = user.allskills\n except AttributeError:\n models.AllSkills.objects.create(\n user_id=user.pk\n )\n\n # Redirect to the main edit page\n return redirect('profiles:edit')\n\n else:\n # redirect to main profile page\n return redirect('profiles:profile', pk=request.user.pk)", "title": "" }, { "docid": "b7f9a95a0d7a27fc246ac6c8d5991503", "score": "0.5235917", "text": "def shared(request):\n print(request.method)\n if request.method == \"POST\":\n set_user_step(request.user, step=4, phase=1)\n result = 'success'\n json_response = {'result': result}\n return http.JsonResponse(json_response)\n\n return http.HttpResponseBadRequest(\"Only accesible with POST\")", "title": "" }, { "docid": "442e9ff84517371839822f77d41d2a57", "score": "0.5216749", "text": "def main(request):\n\n if request.user.is_authenticated:\n return redirect(\"question\")\n else:\n return redirect(\"login\")", "title": "" }, { "docid": "966c6a282b653994b33a2d74ed999101", "score": "0.52164596", "text": "def test_view_uses_correct_template(self):\n self.client.login(username=\"john.doe\", password=\"XVAP11!0$\")\n response = self.client.get(reverse(\"meal-add\"))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"meals/meal_form.html\")", "title": "" }, { "docid": "8dde170298c7585c4318c3b2cb303907", "score": "0.52153194", "text": "def add_single_view(self, request, form_url='', extra_context=None):\n return self.add_view(\n request, form_url=form_url, extra_context=extra_context)", "title": "" }, { "docid": "194b028581b1132edb8149ac5a44e8da", "score": "0.521503", "text": "def dashboard():\n\n client_form = AddClientForm(prefix='client_form')\n prop_form = AddPropertyForm(prefix='prop_form')\n showing_form = AddShowingForm(prefix='showing_form')\n showings = query.getShowingByUser(current_user.username)\n table = query.getClientsForUser(current_user.username)\n\n if prop_form.validate_on_submit() and prop_form.submit.data:\n query.createProperty({\n 'List_Price': prop_form.list_price.data,\n 'Location': prop_form.location.data,\n 'Trend_Link': prop_form.trend_link.data})\n\n return redirect(\"dashboard\")\n\n if showing_form.validate_on_submit() and showing_form.submit.data:\n \n r = query.createShowing({\n 'client_id': showing_form.client_id.data,\n 'Property_ID': showing_form.Property_ID.data,\n 'Feedback': showing_form.Feedback.data,\n 'Rating': showing_form.Rating.data})\n print(r)\n\n showings = query.getShowingByUser(current_user.username)\n\n return redirect(\"dashboard\")\n\n # on submit re-render template using form data and query\n if client_form.validate_on_submit() and client_form.submit.data:\n\n query.createClient(\n {\n \"first_name\": client_form.first_name.data,\n \"last_name\": client_form.last_name.data,\n \"email\": client_form.email.data,\n \"phone\": client_form.phone.data,\n \"user_id\": current_user.id,\n }\n )\n\n table = query.getClientsForUser(current_user.username)\n\n return redirect(\"dashboard\")\n\n return render_template(\n \"home/dashboard.html\",\n title=\"Dashboard\",\n table=table,\n user=current_user,\n showings=showings,\n showing_form=showing_form,\n client_form=client_form,\n prop_form=prop_form,\n )", "title": "" }, { "docid": "dfdd30fbabecd233635b20197681a729", "score": "0.52150214", "text": "def login_required(view):\n def view_wrapper(request, *args, **kwargs):\n if request.user.is_authenticated:\n return view(request, *args, **kwargs)\n else:\n if request.is_ajax():\n return JsonResponse({'error': _(u'Debes estar logueado.')}, status=400)\n else:\n path = request.get_full_path()\n return redirect_to_login(path)\n return view_wrapper", "title": "" }, { "docid": "d3ac053c38a9d0cdfce3cc9ea28d9d7b", "score": "0.5206036", "text": "def plan_view(request, plan_id):\n\n plan = Plan.objects.get(pk=plan_id)\n\n if request.user==plan.user:\n motes = plan.motes.all()\n return {'plan': plan, 'motes': motes,}\n else:\n return redirect('login')", "title": "" }, { "docid": "6d00f31420d4523019a4218ad23f6d33", "score": "0.52053094", "text": "def view(self, user, financing, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return True\n\n if user.is_manager:\n return True\n\n if user.is_advisor:\n return True\n\n return self.admin_permission(user, financing, *args)", "title": "" }, { "docid": "652b785db101bc9a7e11ddae86875afe", "score": "0.520398", "text": "def create(self, request):\n\n\t\treturn ObtainAuthToken().post(request)", "title": "" }, { "docid": "4454f383ca75e348bf7ed7f2a2707f88", "score": "0.5203732", "text": "def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user')\n self.draw = kwargs.pop('draw')\n super(BuyTicketsForm, self).__init__(*args, **kwargs)", "title": "" }, { "docid": "7de9557587be1cb02d0322356f05e65e", "score": "0.52032346", "text": "def protected_view(request):\n return render(request, 'bands/protected.html', {'current_user': request.user})", "title": "" }, { "docid": "d563c679a14a0b955ac40e5daeeea3ed", "score": "0.5195981", "text": "def show_new_user_form():\n\n return render_template('users/new_user.html')", "title": "" }, { "docid": "b5d0307b5ea8cae318c54d98cd5b9a9b", "score": "0.5190887", "text": "def request_add_handler(request):\n pprint(request.POST)\n global matching_jlist\n n = len(matching_jlist)\n user = request.user\n print(n)\n index = 0\n for i in range(n):\n if(\"submit\"+str(i) in request.POST):\n index = i\n print(\"index\", index)\n jrny = matching_jlist[index][0]\n description = \"User \"+user.username+\" wants to be added to the \" + \\\n jrny.journey_id+\" in which you are a participant.\"\n title = \"Journey Add request\"\n for ut in jrny.participants.all():\n Notification.objects.create(user_to=ut, user_from=user, title=title, description=description,\n notif_type=\"Journey Related\", creation_time=datetime.datetime.now()+datetime.timedelta(hours=5.5),\n travel_id=jrny.journey_id)\n matching_jlist[index][1] = True\n return redirect(\"/user_journeys/\")", "title": "" }, { "docid": "4d366c6b0f8b3efbe38ebde5c749dd98", "score": "0.5189515", "text": "def test_add_flight_functionality(self):\n with self.subTest('Test normal user cannot add flight.'):\n self.client.force_authenticate(user=self.normal_user)\n response = self.client.post(self.url, data=self.data, format='json')\n self.assertEqual(response.status_code, 403)\n self.assertEqual(response.data['error'], 'PermissionDenied')\n self.assertEqual(response.data['error_description'],\n 'You do not have permission to perform this action.')\n\n with self.subTest('Test a staff can add flight.'):\n self.client.force_authenticate(user=self.staff_user)\n response = self.client.post(self.url, data=self.data, format='json')\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response.data['name'], 'FLIGHT VALERIA')\n self.assertEqual(Flight.objects.all().count(), 1)\n\n with self.subTest('Test a super user can add flight.'):\n self.data['name'] = 'flight1'\n self.client.force_authenticate(user=self.super_user)\n response = self.client.post(self.url, data=self.data, format='json')\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response.data['name'], 'FLIGHT1')\n self.assertEqual(Location.objects.all().count(), 2)", "title": "" }, { "docid": "1bbce2b702c17f914f37dabd1c1dfd8d", "score": "0.51842207", "text": "def admin_login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if session.get('user_type') != 'admin':\n return json\n return view(**kwargs)\n\n return wrapped_view", "title": "" }, { "docid": "e23a66c0b804869ef276b1106c07f5e4", "score": "0.51601136", "text": "def post(self, request: HttpRequest, *args, **kwargs) -> Any:\n\n self.set_application_and_task()\n\n self.application.cleared_by.add(self.request.user)\n\n if self.kwargs[\"case_type\"] == \"import\":\n view_name = \"Search Import Applications\"\n else:\n view_name = \"Search Certificate Applications\"\n\n messages.success(request, f\"Case cleared, it can still be viewed in the {view_name} page.\")\n\n return redirect(reverse(\"workbasket\"))", "title": "" } ]
99d3bdd659d55dca74810a5a89293664
Write a pd.DataFrame to a google sheets sheet
[ { "docid": "eaa125b9edb84e9ad4fed7d55fc2e99d", "score": "0.63241166", "text": "def write_df(wks, df, row, col):\n\n # header row\n for j, col_name in enumerate(df.columns):\n if 'AOL' in col_name:\n wks.update_cell(row+1, col+j+1, 'AOL')\n wks.update_cell(row+2, col+j+1, col_name.replace('AOL', ''))\n elif 'Keen' in col_name:\n wks.update_cell(row+1, col+j+1, 'Keen')\n wks.update_cell(row+2, col+j+1, col_name.replace('Keen', ''))\n else:\n wks.update_cell(row+2, col+j+1, col_name)\n # data\n for i, row_of_data in df.iterrows():\n for j, col_name in enumerate(df.columns):\n wks.update_cell(row+3+i, col+j+1, row_of_data[j])", "title": "" } ]
[ { "docid": "f2cbcdddc55b0d33b6dba497953238b6", "score": "0.7028948", "text": "def write_google_sheet(results_dict, row=2, name='results_layers', sheet_name='run1'):\n # Google API stuff\n scope = [\"https://www.googleapis.com/auth/drive\"]\n creds = ServiceAccountCredentials.from_json_keyfile_name('config/gcp-credentials.json', scope)\n client = gspread.authorize(creds)\n sheet = client.open(name).worksheet(sheet_name)\n sheet.update(f'B{row}:D{row}', [list(results_dict.values())])", "title": "" }, { "docid": "78602bd0955a460b27245fc8415e5a3a", "score": "0.6928508", "text": "def write_sheet(writer, name, df, index=False):\n if index:\n df = df.reset_index()\n df.to_excel(writer, name, index=False)\n for i, col in enumerate(df.columns):\n if df.dtypes[col].name.startswith((\"float\", \"int\")):\n width = len(str(col)) + 2\n else:\n width = (\n max([df[col].map(lambda x: len(str(x or \"None\"))).max(), len(col)]) + 2\n )\n writer.sheets[name].set_column(i, i, width) # assumes xlsxwriter as engine", "title": "" }, { "docid": "256e4ad6fd9f80ade2a6a4d0a44a951d", "score": "0.69066215", "text": "def _xlsheet(writer, df, sheet_name, **kwargs):\r\n\r\n df.to_excel(writer, sheet_name, **kwargs)\r\n #tweak column width\r\n for i in range(df.shape[1]):\r\n writer.sheets[sheet_name].set_column(i, i+1, len(df.columns[i]) + 4)", "title": "" }, { "docid": "eefd16ba6ef9b81eb9b0a1cdeb078a2a", "score": "0.661218", "text": "def write_file(df, file_name):\n writer = pd.ExcelWriter(f\"{file_name}\", engine=\"openpyxl\")\n df.to_excel(writer, sheet_name=\"Sheet1\", startrow=1)\n writer.save()", "title": "" }, { "docid": "482c9dbb15055c9e7f453ac116d14f7c", "score": "0.65816694", "text": "def save_sheet(svc_sheet, spreadsheet_id, tab_name, header, records):\n if len(records) == 0:\n return\n\n count_columns = len(header)\n count_rows = len(records) + 1\n workbook = svc_sheet.open_by_key(spreadsheet_id)\n sheets = dict()\n for sheet in workbook.worksheets():\n sheets[sheet.title] = sheet\n\n if tab_name not in sheets:\n worksheet = workbook.add_worksheet(tab_name, count_rows, count_columns)\n\n else:\n worksheet = sheets[tab_name]\n\n worksheet.resize(rows=count_rows, cols=count_columns)\n range_text = 'A1:{}'.format(rowcol_to_a1(count_rows, count_columns))\n logging.info('accessing range {}'.format(range_text))\n cells = worksheet.range(range_text)\n for cell in cells:\n count_row = cell.row - 1\n count_col = cell.col - 1\n field = header[count_col]\n if count_row == 0:\n cell.value = field\n\n else:\n row_data = records[count_row - 1]\n cell.value = row_data[field]\n\n worksheet.update_cells(cells)", "title": "" }, { "docid": "0377b94a25e49272918533b5a48ad464", "score": "0.64087117", "text": "def write_sheet(writer, tables, id_name, ligand_name):\n\tfor table in tables:\n\t\ttable_id = table.get('id')\n\t\tif table_id == id_name:\n\t\t\ttable_df = pd.concat(pd.read_html(table.prettify()))\n\t\t\tdrop_list = [i for i in table_df.index if i%2!=0]\n\t\t\ttable_df = table_df.drop(drop_list, axis=0)\n\t\t\tdrop_col = [\"Unnamed: \"+str(i) for i in range(1, 10)]\n\t\t\ttable_df = table_df.drop(drop_col, axis=1)\n\t\t\ttable_df.to_excel(writer, sheet_name=id_name, index=False)\n\t\t\tprint(\"Sheet name \"+ligand_name+\" is add\")", "title": "" }, { "docid": "7343d3fff603e2f33dbaf85c80e46764", "score": "0.6363107", "text": "def save_styled_dataframe(\n filepath_with_ext: str,\n sheet_name_to_styler: Dict[str, Any],\n ) -> None:\n with pd.ExcelWriter(filepath_with_ext) as excel_writer:\n for sheet_name, styler_obj in sheet_name_to_styler.items():\n styler_obj.to_excel(\n excel_writer=excel_writer,\n sheet_name=sheet_name,\n index=False,\n engine='xlsxwriter',\n )\n return None", "title": "" }, { "docid": "0cc79258ffe88e4964b9eb467f5155e9", "score": "0.63454306", "text": "def write_exel(df, output_name):\n # create excel writer object\n writer = pd.ExcelWriter('covid_data.xlsx')\n # write dataframe to excel\n covid_data.to_excel(writer)\n # save the excel\n writer.save()\n print('DataFrame is written successfully to Excel File.')", "title": "" }, { "docid": "61cd408ea60be3df2ad61d27e0c4db45", "score": "0.63329643", "text": "def write_output_excel(excel_book_path, output_dict, sheetname):\n book = xw.Book(excel_book_path)\n sheet = book.sheets[sheetname]\n sheet.range(\"D12\").options(index=False, header=False).value = pd.DataFrame(output_dict)", "title": "" }, { "docid": "bcb3506fdd897e62e7b2b860095989b5", "score": "0.6283218", "text": "def generate_excel_effectiveness(dataframe_list, sheet_name_list, filename, project_io):\n with closing(BytesIO()) as output:\n writer = pd.ExcelWriter(output, engine='xlsxwriter', options={'remove_timezone': True})\n workbook = writer.book\n data_format1 = workbook.add_format({'bg_color': '#BBCCE2'})\n data_format2 = workbook.add_format({'bg_color': '#DEE6EF'})\n\n format_header = workbook.add_format({'text_wrap': True})\n\n workbook.formats[0].set_font_size(15)\n for df, name in zip(dataframe_list, sheet_name_list):\n df.to_excel(writer, sheet_name=name)\n worksheet = writer.sheets[name]\n worksheet.set_row(0, 20, format_header)\n worksheet.set_column('A:A', 5)\n worksheet.set_column('B:D', 30)\n for row in range(1, len(df) + 1, 2):\n worksheet.set_row(row, cell_format=data_format1)\n worksheet.set_row(row + 1, cell_format=data_format2)\n writer.save()\n if project_io is not None:\n project_io.save_data(filename, output.getvalue(), overwrite=True)\n else:\n with open(filename, 'wb') as out:\n out.write(output.getvalue())", "title": "" }, { "docid": "8787549f8820474d8210f1d94fb7b74e", "score": "0.6259219", "text": "def send_to_gsheet(service):\n sorted_elo_player = sorted(ALL_PLAYERS.values(), key=operator.attrgetter('elo'))\n sorted_elo_player.reverse()\n all_players_gsheet_format = []\n rank = 1\n longest_line = 0\n for player in sorted_elo_player:\n rank_string = str(rank)\n if len(player.tournaments) == 1:\n evolution = 0\n else:\n evolution = compute_evolution(player)\n if evolution > 0:\n rank_string += \"(+\"+str(evolution)+\")\"\n elif evolution < 0:\n rank_string += (\"(\"+str(evolution)+\")\")\n else:\n rank_string += (\"(=)\")\n rank += 1\n player_line = []\n player_line.append(rank_string)\n player_line.append(player.name)\n player_line.append(str(player.elo).replace(\".\", \",\"))\n player_line.append(len(player.tournaments))\n player_line.append(pretty_dict(player.elo_history).replace(\".\", \",\"))\n\n if len(player_line) > longest_line:\n longest_line = len(player_line)\n all_players_gsheet_format.append(player_line)\n\n rank_range = RANK_SHEET_NAME+\"!A1:\"+str(column_to_letter(longest_line))+str(len(all_players_gsheet_format))\n\n body = {\n 'values': all_players_gsheet_format\n }\n\n clear_sheet(service, ELO_SPREADSHEET_ID, RANK_SHEET_NAME)\n result = service.spreadsheets().values().update(spreadsheetId=ELO_SPREADSHEET_ID,\n range=rank_range,\n valueInputOption=\"RAW\",\n body=body).execute()\n logger.info(\"%s cells updated.\", result.get('updatedCells'))", "title": "" }, { "docid": "d492b17a949346070cb2edf10c93fb37", "score": "0.61341095", "text": "def append_df_to_excel(filename, df, sheet_name='Sheet1', startrow=None,\r\n truncate_sheet=False, \r\n **to_excel_kwargs):\r\n\r\n\r\n # ignore [engine] parameter if it was passed\r\n if 'engine' in to_excel_kwargs:\r\n to_excel_kwargs.pop('engine')\r\n\r\n writer = pd.ExcelWriter(filename, engine='openpyxl')\r\n\r\n try:\r\n FileNotFoundError\r\n except NameError:\r\n FileNotFoundError = IOError\r\n\r\n\r\n try:\r\n # try to open an existing workbook\r\n writer.book = load_workbook(filename)\r\n\r\n # get the last row in the existing Excel sheet\r\n # if it was not specified explicitly\r\n if startrow is None and sheet_name in writer.book.sheetnames:\r\n startrow = writer.book[sheet_name].max_row\r\n\r\n # copy existing sheets\r\n writer.sheets = {ws.title:ws for ws in writer.book.worksheets}\r\n except FileNotFoundError:\r\n print (\"File not found\")# file does not exist yet, we will create it\r\n pass\r\n\r\n if startrow is None:\r\n startrow = 0\r\n\r\n # write out the new sheet\r\n df.to_excel(writer, sheet_name, startrow=startrow, **to_excel_kwargs)\r\n\r\n # save the workbook\r\n try:\r\n writer.save()\r\n except:\r\n print(\"Close the target file\")\r\n return", "title": "" }, { "docid": "c417120e645c8679eead9e9fc5440590", "score": "0.6112243", "text": "def to_excel(self, workbook, sheet_name=\"Sheet1\", startrow=0, startcol=0):\n # import pdb\n # pdb.set_trace()\n excel_writer = pd.ExcelWriter(workbook.filename, engine=\"xlsxwriter\")\n excel_writer.book = workbook\n excel_writer.sheets = workbook.sheetnames\n self.data = self.data[self.column_order]\n self.styler.data = self.styler.data[self.column_order]\n new_index, indexer = self.styler.columns.reindex(self.column_order)\n self.styler.columns = new_index\n for styler_rule in self.styler_rules:\n if \"map\" in styler_rule:\n styler = self.styler[styler_rule[\"map\"]]\n else:\n styler = self.styler\n styler.applymap(\n styler_rule[\"func\"],\n subset=styler_rule[\"subset\"] if \"subset\" in styler_rule else None,\n )\n self.styler.to_excel(\n excel_writer,\n sheet_name=sheet_name,\n startrow=startrow,\n startcol=startcol,\n **self.params\n )\n workbook = excel_writer.book\n if self.params[\"index\"]:\n col_add = self.index_width\n else:\n col_add = 0\n if self.params[\"header\"]:\n row_add = self.header_height\n else:\n row_add = 0\n worksheet = workbook.get_worksheet_by_name(sheet_name)\n if self.conditional_formatters:\n # import pdb\n # pdb.set_trace()\n for format_rule in self.conditional_formatters:\n if format_rule[\"style\"]:\n format_obj = workbook.add_format(format_rule[\"style\"])\n format_rule[\"options\"].update({\"format\": format_obj})\n if format_rule[\"cols\"] is not None:\n for col in format_rule[\"cols\"]:\n col_index = list(self.data.columns).index(col) + col_add\n worksheet.conditional_format(\n row_add,\n col_index,\n row_add + len(self.data),\n col_index,\n options=format_rule[\"options\"],\n )\n if format_rule[\"rows\"] is not None:\n for row in format_rule[\"rows\"]:\n row_index = list(self.data.index).index(row) + row_add\n worksheet.conditional_format(\n row_index,\n col_add,\n row_index,\n len(self.data.columns) + col_add,\n options=format_rule[\"options\"],\n )\n if format_rule[\"index\"] is not None:\n # import pdb\n # pdb.set_trace()\n col_start_index = 0 + startcol\n if format_rule[\"index\"] is True:\n col_range = range(\n col_start_index,\n col_start_index + len(self.data.index.names),\n )\n elif isinstance(format_rule[\"index\"], list):\n col_range = [\n (col_start_index + level) for level in format_rule[\"index\"]\n ]\n else:\n continue\n for col_index in col_range:\n worksheet.conditional_format(\n 0,\n col_index,\n len(self.data),\n col_index,\n options=format_rule[\"options\"],\n )\n if format_rule[\"header\"] is not None:\n row_start_index = 0 + startrow\n if format_rule[\"header\"] is True:\n row_range = range(\n row_start_index,\n row_start_index + len(self.data.columns.names),\n )\n elif isinstance(format_rule[\"header\"], list):\n row_range = [\n (row_start_index + level) for level in format_rule[\"header\"]\n ]\n else:\n continue\n for row_index in row_range:\n worksheet.conditional_format(\n row_index,\n 0,\n row_index,\n len(self.data.columns),\n options=format_rule[\"options\"],\n )\n if format_rule[\"bool_map\"] is not None:\n # TODO: Finish this\n if format_rule[\"col_wise\"]:\n for col in format_rule[\"cols\"]:\n col_index = list(self.data.columns).index(col) + col_add\n worksheet.conditional_format(\n 1,\n col_index,\n len(self.data),\n col_index,\n options=format_rule[\"options\"],\n )\n else:\n for row in format_rule[\"rows\"]:\n row_index = list(self.data.index).index(row) + row_add\n worksheet.conditional_format(\n row_index,\n 0,\n row_index,\n len(self.data.columns),\n options=format_rule[\"options\"],\n )\n col_rules = [rule for rule in self.cell_formatters if rule[\"cols\"] is not None]\n row_rules = [rule for rule in self.cell_formatters if rule[\"rows\"] is not None]\n bool_rules = [\n rule for rule in self.cell_formatters if rule[\"bool_map\"] is not None\n ]\n index_rules = [rule for rule in self.cell_formatters if rule[\"index\"]]\n header_rules = [rule for rule in self.cell_formatters if rule[\"header\"]]\n if col_rules:\n for col in set(flatten_list([x[\"cols\"] for x in col_rules])) & set(\n list(self.data.columns)\n ):\n all_rules_for_col = [x for x in col_rules if col in x[\"cols\"]]\n combined_style = {}\n combined_options = {}\n for each_rule in all_rules_for_col:\n combined_style.update(each_rule[\"style\"])\n if each_rule[\"options\"] is not None:\n combined_options.update(each_rule[\"options\"])\n width_val, style = process_col_style(\n combined_style, self.data[col], self.styler\n )\n col_index = list(self.data.columns).index(col) + col_add + startcol\n style_format = workbook.add_format(style)\n worksheet.set_column(\n col_index,\n col_index,\n width=width_val,\n cell_format=style_format,\n options=combined_options,\n )\n if row_rules:\n row_vals = list(set(flatten_list([x[\"rows\"] for x in row_rules])))\n # import pdb\n # pdb.set_trace()\n if str(self.data.index.dtype) != \"int64\":\n int_rows = [x for x in row_vals if isinstance(x, int)]\n int_row_names = [self.data.index[x] for x in int_rows]\n row_vals = set(\n [x for x in row_vals if not isinstance(x, int)] + int_row_names\n ) & set(self.data.index)\n for row in row_vals:\n all_rules_for_row = [\n x\n for x in row_rules\n if row in x[\"rows\"] or list(self.data.index).index(row) in x[\"rows\"]\n ]\n combined_style = {}\n combined_options = {}\n for each_rule in all_rules_for_row:\n combined_style.update(each_rule[\"style\"])\n if each_rule[\"options\"] is not None:\n combined_options.update(each_rule[\"options\"])\n height_val, style = process_row_style(\n combined_style, self.data.loc[row], self.styler\n )\n row_index = list(self.data.index).index(row) + row_add + startrow\n style_format = workbook.add_format(style)\n worksheet.set_row(\n row_index,\n height=height_val,\n cell_format=style_format,\n options=combined_options,\n )\n if self.params[\"index\"] and index_rules:\n # import pdb\n # pdb.set_trace()\n if not isinstance(self.data.index, pd.MultiIndex):\n width_val, style, options = create_index_format(\n index_rules, self.data.index\n )\n col_index = 0 + startcol\n style_format = workbook.add_format(style)\n worksheet.set_column(\n col_index,\n col_index,\n width=width_val,\n cell_format=style_format,\n options=options,\n )\n else:\n for level in range(0, len(self.data.index.names)):\n all_rules_for_index = [\n x\n for x in index_rules\n if level in x[\"index\"] or x[\"index\"] is True\n ]\n width_val, style, options = create_index_format(\n all_rules_for_index, self.data.index.get_level_values(level)\n )\n col_index = level + startcol\n style_format = workbook.add_format(style)\n worksheet.set_column(\n col_index,\n col_index,\n width=width_val,\n cell_format=style_format,\n options=options,\n )\n if self.params[\"header\"] and header_rules:\n # import pdb\n # pdb.set_trace()\n if not isinstance(self.data.columns, pd.MultiIndex):\n height_val, style, options = create_header_format(\n header_rules, self.data.columns\n )\n row_index = 0 + startrow\n style_format = workbook.add_format(style)\n worksheet.set_row(\n row_index,\n height=height_val,\n cell_format=style_format,\n options=options,\n )\n else:\n for level in range(0, len(self.data.columns.names)):\n all_rules_for_header = [\n x\n for x in header_rules\n if level in x[\"header\"] or x[\"header\"] is True\n ]\n height_val, style, options = create_index_format(\n all_rules_for_header, self.data.columns.get_level_values(level)\n )\n row_index = level + startrow\n style_format = workbook.add_format(style)\n worksheet.set_row(\n row_index,\n height=height_val,\n cell_format=style_format,\n options=options,\n )\n if bool_rules:\n pass\n # TODO: Finish this\n # excel_writer.save()\n # excel_writer.close()", "title": "" }, { "docid": "9bf40cf076d1b36619eb79204177b504", "score": "0.6102703", "text": "def append_df_to_excel(filename, df, sheet_name, startrow=0,truncate_sheet=False,**to_excel_kwargs):\n\n\n # ignore [engine] parameter if it was passed\n if 'engine' in to_excel_kwargs:\n to_excel_kwargs.pop('engine')\n\n writer = pd.ExcelWriter(filename, engine='openpyxl')\n\n # Python 2.x: define [FileNotFoundError] exception if it doesn't exist\n try:\n FileNotFoundError\n except NameError:\n FileNotFoundError = IOError\n\n\n try:\n # try to open an existing workbook\n writer.book = load_workbook(filename)\n\n # get the last row in the existing Excel sheet\n # if it was not specified explicitly\n if startrow is None and sheet_name in writer.book.sheetnames:\n startrow = writer.book[sheet_name].max_row\n\n # truncate sheet\n if truncate_sheet and sheet_name in writer.book.sheetnames:\n # index of [sheet_name] sheet\n idx = writer.book.sheetnames.index(sheet_name)\n # remove [sheet_name]\n writer.book.remove(writer.book.worksheets[idx])\n # create an empty sheet [sheet_name] using old index\n writer.book.create_sheet(sheet_name, idx)\n\n # copy existing sheets\n writer.sheets = {ws.title:ws for ws in writer.book.worksheets}\n except FileNotFoundError:\n # file does not exist yet, we will create it\n pass\n\n if startrow is None:\n startrow = 0\n\n # write out the new sheet\n df.to_excel(writer, sheet_name, startrow=startrow, **to_excel_kwargs)\n ws = writer.book[sheet_name]\n for column_cells in ws.columns:\n length = max(map(lambda cell: len(str(cell.value)) if cell.value else 0, column_cells))\n ws.column_dimensions[column_cells[0].column_letter].width = length+3\n # save the workbook\n writer.save()", "title": "" }, { "docid": "28a242f432037c09848321fa74f853ca", "score": "0.60957867", "text": "def generate_excel_measure(dataframe_list, sheet_name_list, filename, project_io):\n with closing(BytesIO()) as output:\n writer = pd.ExcelWriter(output, engine='xlsxwriter', options={'remove_timezone': True})\n workbook = writer.book\n data_format1 = workbook.add_format({'bg_color': '#BBCCE2'})\n data_format2 = workbook.add_format({'bg_color': '#DEE6EF'})\n\n format_header = workbook.add_format({'text_wrap': True})\n\n workbook.formats[0].set_font_size(15)\n for df, name in zip(dataframe_list, sheet_name_list):\n df.to_excel(writer, sheet_name=name)\n worksheet = writer.sheets[name]\n worksheet.set_row(0, 30, format_header)\n worksheet.set_column('A:A', 5)\n worksheet.set_column('B:B', 30)\n worksheet.set_column('C:C', 30)\n worksheet.set_column('D:D', 15)\n worksheet.set_column('F:G', 35)\n worksheet.set_column('H:AH', 20)\n for row in range(1, len(df) + 1, 2):\n worksheet.set_row(row, cell_format=data_format1)\n worksheet.set_row(row + 1, cell_format=data_format2)\n writer.save()\n if project_io is not None:\n project_io.save_data(filename, output.getvalue(), overwrite=True)\n else:\n with open(filename, 'wb') as out:\n out.write(output.getvalue())", "title": "" }, { "docid": "bdc52290fb473595c31cc3dbea88385e", "score": "0.6046522", "text": "def write_excel_df(save_file_path: str,\n df_list: List, sheet_name_list: List):\n writer = pd.ExcelWriter(save_file_path, engine='xlsxwriter')\n # Write each dataframe to a different worksheet\n assert len(df_list) == len(sheet_name_list)\n for index in range(len(df_list)):\n df_list[index].to_excel(writer, sheet_name=sheet_name_list[index])\n # Close the Pandas Excel writer and output the Excel file.\n writer.save()\n return", "title": "" }, { "docid": "66e553c1e866d8bc58ea30574aff37d9", "score": "0.60190165", "text": "def save_large_df_to_excel(df, file_path, sheet_prefix='page'):\n\n def _split_file_path():\n \"\"\"Returns a list representing the file path containing the file.\"\"\"\n if '/' in file_path:\n # Split file path into a list\n path_parts = file_path.split('/')\n # Take all except last entry and append together\n file_dir = '/'.join(path_parts[:-1])\n file_name = path_parts[-1]\n\n return file_dir, file_name\n else:\n # Otherwise, return current directory as '.'\n return '.', file_path\n\n\n file_dir, file_name = _split_file_path()\n # Prevents overwriting if file already exists\n if file_name in os.listdir(file_dir):\n raise ValueError('Filename {} already exists'.format(file_name))\n writer = pd.ExcelWriter(file_path, engine='xlsxwriter')\n\n i = 0\n while True:\n # Remove one row to leave space for header\n max_num_rows = 2**20 - 1\n # Subselected DataFrame to write as a single sheet\n temp_df = df.iloc[max_num_rows*i: max_num_rows*(i+1)]\n\n # Terminate if we have reached the end of the DataFrame\n if temp_df.shape[0] == 0:\n break\n\n # Save sheet to excel file\n temp_df.to_excel(writer, sheet_name='{}_{}'.format(sheet_prefix, i))\n i += 1\n\n writer.save()", "title": "" }, { "docid": "803f84dc291b5fc63f5b33dc08da6220", "score": "0.6018584", "text": "def send_to_gs(df, wks_name):\n\n d2g.upload(df,\n spreadsheet_key,\n wks_name,\n credentials=credentials,\n row_names=True)", "title": "" }, { "docid": "ce8937d80ee49a96e0035ddef6e6fede", "score": "0.6017467", "text": "def save_dict_xlsx(data_dict: Dict[Any, Any], save_location: str, sheet_name: str) -> None:\n pd_df = pd.DataFrame.from_dict(data_dict, orient=\"index\")\n\n with pd.ExcelWriter(save_location, engine=\"xlsxwriter\") as writer: # pylint: disable=abstract-class-instantiated\n for n, df in (pd_df).items():\n df.to_excel(writer, sheet_name=sheet_name)", "title": "" }, { "docid": "2b89a258259a56c6995b575754f255ce", "score": "0.5974723", "text": "def list_to_spreadsheet(spreadsheet_name, column_names, mylist):\n records = [dict(zip(column_names, row)) for row in mylist]\n dataframe = pandas.DataFrame.from_records(\n records, index=column_names[0], columns=column_names\n )\n dataframe.to_excel(writer, sheet_name=spreadsheet_name)", "title": "" }, { "docid": "ee90ff375db44056af145452f293aa70", "score": "0.5937404", "text": "def append_df_to_excel(filename, df, sheet_name='Sheet1', startrow=None,\r\n truncate_sheet=False, \r\n **to_excel_kwargs):\r\n from openpyxl import load_workbook\r\n\r\n import pandas as pd\r\n\r\n # ignore [engine] parameter if it was passed\r\n if 'engine' in to_excel_kwargs:\r\n to_excel_kwargs.pop('engine')\r\n\r\n writer = pd.ExcelWriter(filename, engine='openpyxl')\r\n\r\n # Python 2.x: define [FileNotFoundError] exception if it doesn't exist \r\n try:\r\n FileNotFoundError\r\n except NameError:\r\n FileNotFoundError = IOError\r\n\r\n\r\n try:\r\n # try to open an existing workbook\r\n writer.book = load_workbook(filename)\r\n\r\n # get the last row in the existing Excel sheet\r\n # if it was not specified explicitly\r\n if startrow is None and sheet_name in writer.book.sheetnames:\r\n startrow = writer.book[sheet_name].max_row\r\n\r\n # truncate sheet\r\n if truncate_sheet and sheet_name in writer.book.sheetnames:\r\n # index of [sheet_name] sheet\r\n idx = writer.book.sheetnames.index(sheet_name)\r\n # remove [sheet_name]\r\n writer.book.remove(writer.book.worksheets[idx])\r\n # create an empty sheet [sheet_name] using old index\r\n writer.book.create_sheet(sheet_name, idx)\r\n\r\n # copy existing sheets\r\n writer.sheets = {ws.title:ws for ws in writer.book.worksheets}\r\n except FileNotFoundError:\r\n # file does not exist yet, we will create it\r\n pass\r\n\r\n if startrow is None:\r\n startrow = 0\r\n\r\n # write out the new sheet\r\n df.to_excel(writer, sheet_name, startrow=startrow, **to_excel_kwargs)\r\n\r\n # save the workbook\r\n writer.save()", "title": "" }, { "docid": "093a902d626edf70cd431a2b64705baa", "score": "0.5931121", "text": "def to_excel(self):\r\n raise NotImplementedError()", "title": "" }, { "docid": "d70cf37bab07a7e56ca22281eb9174c9", "score": "0.5927741", "text": "def append_df_to_excel(filename, df, sheet_name='Sheet1', startrow=None,\n startcol=None, truncate_sheet=False, \n **to_excel_kwargs):\n from openpyxl import load_workbook\n\n import pandas as pd\n\n # ignore [engine] parameter if it was passed\n if 'engine' in to_excel_kwargs:\n to_excel_kwargs.pop('engine')\n\n writer = pd.ExcelWriter(filename, engine='openpyxl')\n\n # Python 2.x: define [FileNotFoundError] exception if it doesn't exist \n try:\n FileNotFoundError\n except NameError:\n FileNotFoundError = IOError\n\n\n try:\n # try to open an existing workbook\n writer.book = load_workbook(filename)\n\n # get the last row in the existing Excel sheet\n # if it was not specified explicitly\n if startrow is None and sheet_name in writer.book.sheetnames:\n startrow = writer.book[sheet_name].max_row\n # if startcol is None and sheet_name in writer.book.sheetnames:\n # startcol = writer.book[sheet_name].max_col\n # truncate sheet\n if truncate_sheet and sheet_name in writer.book.sheetnames:\n # index of [sheet_name] sheet\n idx = writer.book.sheetnames.index(sheet_name)\n # remove [sheet_name]\n writer.book.remove(writer.book.worksheets[idx])\n # create an empty sheet [sheet_name] using old index\n writer.book.create_sheet(sheet_name, idx)\n\n # copy existing sheets\n writer.sheets = {ws.title:ws for ws in writer.book.worksheets}\n except FileNotFoundError:\n # file does not exist yet, we will create it\n pass\n\n if startrow is None:\n startrow = 0\n if startcol is None:\n startcol = 0\n\n # write out the new sheet\n df.to_excel(writer, sheet_name, startrow=startrow, startcol=startcol, **to_excel_kwargs)\n\n # save the workbook\n writer.save()", "title": "" }, { "docid": "21f1698790968ba9a0f551c30c69b498", "score": "0.592436", "text": "def saveSpreadSheet(self):\r\n try:\r\n self.wb.save(self.fullFilePath)\r\n logger.info('In module ' + __name__ \r\n + '. saveSpreadSheet.')\r\n except Exception as e:\r\n print('ExcelWriter.saveSpreadSheet: ' + str(e)) \r\n logger.error('ExcelWriter.saveSpreadSheet: ' + str(e))", "title": "" }, { "docid": "933ef31907dee5effbf16e13207f5bef", "score": "0.59137565", "text": "def data_to_xls(path, data, sheet_name):\n df = pd.DataFrame(data, index=[0]).T\n df.to_excel(path, sheet_name=sheet_name)", "title": "" }, { "docid": "3a911c35e3a4a7ac7e55e637e2d9a57b", "score": "0.5850379", "text": "def Test():\n\n # https://docs.google.com/spreadsheets/d/1CR_8w8ZYeu8gD9X3UwwtuZA2djj5jCpNSFycNSXM_GY/edit#gid=1608909088\n # setup\n\n service = GetService()\n # - preface as upload.GetService() if use in another file\n\n # doc: db_adwords\n spreadsheetId = \"1CR_8w8ZYeu8gD9X3UwwtuZA2djj5jCpNSFycNSXM_GY\"\n\n # READ\n read_sheet = \"Sheet2\" # please make sure this exists first\n read_data = GetGSheetsRange(service, spreadsheetId, read_sheet, quiet=False)\n print len(read_data)\n\n # WRITE\n write_sheet = \"Sheet1\"\n newValues = [[\"a\", \"bbbb\", \"ccc\"], [\"235235\"], [\"325235235\", \"dgdfg\", \"fdgfdg\"]]\n OverwriteCells(service, spreadsheetId, write_sheet, newValues, quiet=False)\n\n # DELETE\n DeleteSheetId = 1608909088\n DeleteGSheetsRange(service, spreadsheetId, DeleteSheetId, 0, 3, quiet=False)", "title": "" }, { "docid": "8edbb3584cae0c4ffa59d1409b2345be", "score": "0.584908", "text": "def append_df_to_excel(filename, df, sheet_name='Sheet1', startrow=None,\n truncate_sheet=False, \n **to_excel_kwargs):\n from openpyxl import load_workbook\n\n import pandas as pd\n\n # ignore [engine] parameter if it was passed\n if 'engine' in to_excel_kwargs:\n to_excel_kwargs.pop('engine')\n\n writer = pd.ExcelWriter(filename, engine='openpyxl')\n\n # Python 2.x: define [FileNotFoundError] exception if it doesn't exist \n try:\n FileNotFoundError\n except NameError:\n FileNotFoundError = IOError\n\n\n try:\n # try to open an existing workbook\n writer.book = load_workbook(filename)\n\n # get the last row in the existing Excel sheet\n # if it was not specified explicitly\n if startrow is None and sheet_name in writer.book.sheetnames:\n startrow = writer.book[sheet_name].max_row\n\n # truncate sheet\n if truncate_sheet and sheet_name in writer.book.sheetnames:\n # index of [sheet_name] sheet\n idx = writer.book.sheetnames.index(sheet_name)\n # remove [sheet_name]\n writer.book.remove(writer.book.worksheets[idx])\n # create an empty sheet [sheet_name] using old index\n writer.book.create_sheet(sheet_name, idx)\n\n # copy existing sheets\n writer.sheets = {ws.title:ws for ws in writer.book.worksheets}\n except FileNotFoundError:\n # file does not exist yet, we will create it\n pass\n\n if startrow is None:\n startrow = 0\n\n # write out the new sheet\n df.to_excel(writer, sheet_name, startrow=startrow, **to_excel_kwargs)\n\n # save the workbook\n writer.save()", "title": "" }, { "docid": "4eb6c084084054d7b8a99bc49dc2a0d2", "score": "0.5847653", "text": "def excel(Df, index=True):\n\n writer = pd.ExcelWriter(r'C:\\Users\\Administrator\\PycharmProjects\\Stock\\Output\\Output.xlsx', engine='xlsxwriter')\n Df.to_excel(writer, sheet_name='Sheet1', index= index)", "title": "" }, { "docid": "5692c43f10c52a25bf5de3b02a9673b6", "score": "0.5833753", "text": "def save_as_xlsx(df, file, index=True, **to_excel_kwargs):\n\n def localize(df):\n \"\"\"\n Remove tz from datetime cols since Excel doesn't allow\n \"\"\"\n tz_cols = df.select_dtypes(include=[\"datetimetz\"]).columns\n for tz_col in tz_cols:\n df[tz_col] = df[tz_col].dt.tz_localize(None)\n return df\n\n import xlsxwriter\n\n df = df.copy()\n df = localize(df)\n with pd.ExcelWriter(\n file,\n engine=\"xlsxwriter\",\n date_format=\"m/d/yyy\",\n datetime_format=\"m/d/yyy h:mmAM/PM\",\n ) as writer:\n df.to_excel(writer, index=index, **to_excel_kwargs)", "title": "" }, { "docid": "95dad6f82acdfb6b791e83e8e00e1790", "score": "0.5817795", "text": "def append_df_to_excel(filename, df, sheet_name='Sheet1', startrow=None,\n truncate_sheet=False,\n **to_excel_kwargs):\n\n # ignore [engine] parameter if it was passed\n if 'engine' in to_excel_kwargs:\n to_excel_kwargs.pop('engine')\n\n writer = pd.ExcelWriter(filename, engine='openpyxl')\n\n try:\n # try to open an existing workbook\n writer.book = load_workbook(filename)\n\n # get the last row in the existing Excel sheet\n # if it was not specified explicitly\n if startrow is None and sheet_name in writer.book.sheetnames:\n startrow = writer.book[sheet_name].max_row\n\n # truncate sheet\n if truncate_sheet and sheet_name in writer.book.sheetnames:\n # index of [sheet_name] sheet\n idx = writer.book.sheetnames.index(sheet_name)\n # remove [sheet_name]\n writer.book.remove(writer.book.worksheets[idx])\n # create an empty sheet [sheet_name] using old index\n writer.book.create_sheet(sheet_name, idx)\n\n # copy existing sheets\n writer.sheets = {ws.title:ws for ws in writer.book.worksheets}\n except FileNotFoundError:\n # file does not exist yet, we will create it\n pass\n\n if startrow is None:\n startrow = 0\n\n # write out the new sheet\n df.to_excel(writer, sheet_name, startrow=startrow, **to_excel_kwargs)\n\n # save the workbook\n writer.save()", "title": "" }, { "docid": "e2bd06eccf5ab13d8e9d3d835029849c", "score": "0.57952166", "text": "def WriteGSheetsRange(\n service, spreadsheetId, range_name, values, value_input_option=\"RAW\", quiet=True\n):\n body = {\"values\": values}\n result = (\n service.spreadsheets()\n .values()\n .update(\n spreadsheetId=spreadsheetId,\n range=range_name,\n valueInputOption=value_input_option,\n body=body,\n )\n .execute()\n )\n\n if not quiet:\n print \" WRITE RESULT\"\n for k, v in result.iteritems():\n print \" %s: %s\" % (k, v)\n print\n \"\"\"\n # example\n spreadsheetId: 1d9Cb9-_-XTYHevDHUfVvQAZjJY9EN6yeZmEcR4DsFVw\n updatedRange: tb_staging_camp_day!A1:O630\n updatedCells: 9450\n updatedRows: 630\n updatedColumns: 15\n \"\"\"", "title": "" }, { "docid": "5e66375247f6a897a78555151e6b1838", "score": "0.5775305", "text": "def save_to_file(out_folder, animal_name, dataframe):\n file_name = \"%s.xls\" % (animal_name)\n file_name = os.path.join(out_folder, file_name)\n\n\n cols = dataframe.columns.tolist()\n #print ('original:', cols)\n\n #neworder=[11, 0, 17, 19, 15, 18, 21, 5, 4, 20, 3, 16, 1, 12, 7, 14, 6, 8, 2, 10, 9, 13]\n #cols = [cols[i] for i in neworder]\n #print ('reorderd:', cols)\n #neworder = [[]]\n neworder = ['date','animal','program','duration','trials','hit','miss','false','withold','hit_rate','false_positive_rate','dprime','recall','precision','F_score','500 Hz', '1000 Hz','2000 Hz','4000 Hz', '8000 Hz', '16000 Hz', '32000 Hz']\n\n #cols = list(neworder.columns.values)\n dataframe = dataframe[neworder]\n\n\n\n# df = df[['f','f']]\n# cols = list(df.columns.values)\n\n\n dataframe.to_csv(file_name, sep='\\t', index=False)\n return", "title": "" }, { "docid": "2b23817b6ce8b42c15f784340d94116e", "score": "0.575602", "text": "def write_xls(table_2Dlist, path, sheet_name, data_line=None, start_from_row1=False):\n workbook = xlwt.Workbook(encoding=sys.stdout.encoding)\n sheet = workbook.add_sheet(sheet_name)\n if data_line != None:\n for index, value in enumerate(data_line):\n sheet.write(0, index, value)\n if start_from_row1 == False:\n for row in range(len(table_2Dlist)):\n for col in range(len(table_2Dlist[row])):\n sheet.write(row+1, col, table_2Dlist[row][col])\n else:\n for row in range(len(table_2Dlist)):\n if row == 0:\n pass\n else:\n for col in range(len(table_2Dlist[row])):\n sheet.write(row+1, col, table_2Dlist[row][col])\n else:\n for row in range(len(table_2Dlist)):\n for col in range(len(table_2Dlist[row])):\n sheet.write(row, col, table_2Dlist[row][col])\n workbook.save(path)", "title": "" }, { "docid": "99d53caa93d338aae767269f6852c5a8", "score": "0.5753641", "text": "def export_scatter_diagram(self, filename):\n\n writer = pd.ExcelWriter(filename, engine=\"xlsxwriter\")\n\n # Seastates sheet\n self.df_ss.to_excel(writer, sheet_name=\"Sea States\", na_rep=\"N/A\", float_format=\"%.2f\")\n ws = writer.sheets[\"Sea States\"]\n ws.set_column(\"A:A\", 11)\n # wb = writer.book\n # fmt = wb.add_format({'num_format': '0.00'})\n # ws.set_column('B:Y', None, fmt)\n\n # Seascatter sheet\n # Replace zeros with blanks\n df_scatter = self.df_scatter.replace({0: \"\"})\n df_scatter.to_excel(writer, sheet_name=\"Sea Scatter Diagram\", float_format=\"%.2f\")\n ws.set_column(\"A:A\", 18)\n writer.save()", "title": "" }, { "docid": "e3c2ab4f8c7e83a2cde2573e974a483f", "score": "0.572457", "text": "def __generate_sheet__(self, app, xls_doc):\n # Create the worksheet\n sheet = xls_doc.add_sheet(app)\n \n # Create header style\n hdr_font = xlwt.Font()\n hdr_font.name = 'Times New Roman'\n hdr_font.colour_index = 2\n hdr_font.bold = True\n hdr_style = xlwt.XFStyle()\n hdr_style.font = hdr_font\n \n # Create header\n sheet.write(0, 0, \"Count\")\n sheet.write(0, 1, \"DB Name\")\n sheet.write(0, 2, \"DB ID\")\n sheet.write(0, 3, \"GO Name\")\n sheet.write(0, 4, \"GO ID\")\n sheet.write(0, 5, \"DB Link\")\n sheet.write(0, 6, \"GO Link\")\n \n # Write a row to the spreadsheet for each row in iprstatsdata\n length = self.cache.get_match_length(app)\n for r in range(length):\n row = self.cache.get_one_row(app, r)\n if row:\n sheet.write(r+1, 0, str(row[1])) # Count\n sheet.write(r+1, 1, row[0]) # DB Name\n sheet.write(r+1, 2, row[3]) # DB ID\n sheet.write(r+1, 3, row[4]) # GO Name\n sheet.write(r+1, 4, row[2]) # GO ID\n sheet.write(r+1, 5, self.cache.get_url(app, r)) # DB URL\n sheet.write(r+1, 6, self.cache.get_url(app, r, True))#GO URL\n if len(row) == 7:\n sheet.write(r, 7, row[6]) # GO Definition\n else:\n break", "title": "" }, { "docid": "3bb0b535c4b0b29b7dc6fb8a131ee981", "score": "0.5717932", "text": "def hstFailuresToExcel(df, sheet):\n\n sheet = openSheet(sheet)\n\n failure_cells = ['D17', 'D18', 'D19', 'D20', 'D21', 'D22', 'D23', 'D24', 'D25', 'D26', 'D27', 'D28', 'D29', 'D30',\n 'D31', 'D32', 'D33', 'D34', 'D35']\n failures = df['Failure'].dropna().tolist()\n for i in range(len(failures)):\n sheet.range(failure_cells[i]).value = failures[i]", "title": "" }, { "docid": "8331960356cc03f22b609bbfecb7590e", "score": "0.5711396", "text": "def save_df(write_df, outputfile):\n print('Saving File....')\n if outputfile.split('.')[-1] == 'csv':\n write_df.to_csv(outputfile)\n\n elif outputfile.split('.')[-1] == 'xls' or outputfile.split('.')[-1] == 'xlsx':\n writer = pd.ExcelWriter(outputfile)\n write_df.to_excel(writer,'Result')\n writer.save()\n else:\n print('csv or excel needs to be selected for file {}!'.format(outputfile))\n raise NotImplementedError\n write_df.to_clipboard()\n print('...done!')", "title": "" }, { "docid": "be585cf10696b87075b8f27848f983b6", "score": "0.5699932", "text": "def xlsave(output_file, frames, sheets, **kwargs):\r\n\r\n with pd.ExcelWriter(\r\n output_file,\r\n options = {\r\n 'remove_timezone': True,\r\n },\r\n ) as writer:\r\n _l.info(f'{me()} saving to file {output_file} ...')\r\n #if only one sheet:\r\n if isinstance(sheets, str):\r\n _xlsheet(writer, frames, sheets, **kwargs)\r\n #if multiple sheets\r\n else:\r\n for f, s in zip(frames, sheets):\r\n _xlsheet(writer, f, s, **kwargs)\r\n _l.info(f'{me()} done')", "title": "" }, { "docid": "910bb5610f9971b92df06e942514e4b3", "score": "0.569453", "text": "def write_to_excel(dest_fullpath:str, dfs_map:dict, include_index:bool=True)->None:\n with pd.ExcelWriter(dest_fullpath) as writer:\n for k, df in dfs_map.items():\n df.to_excel(writer, sheet_name=k, index=include_index)", "title": "" }, { "docid": "6839f9f4a8fa4ed796ea5d74d6219a19", "score": "0.5687497", "text": "def export_dataframe(self, dataframe, filename, sep = ','):\n\t\tdataframe.to_excel(filename, index = False)", "title": "" }, { "docid": "0eb125fe416312607ed17881d2b21f9a", "score": "0.56730133", "text": "def _create_bom_worksheet(df, writer, header_format):\n df.to_excel(\n writer,\n sheet_name='bom',\n index=False,\n header=False,\n startrow=1\n )\n\n worksheet = writer.sheets['bom']\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(0, col_num, value, header_format)\n\n worksheet.set_column(0, 0, 16) # Assembly\n worksheet.set_column(1, 1, 26) # Assembly_Name\n worksheet.set_column(5, 5, 16) # Dwg_No\n worksheet.set_column(6, 6, 34) # Component", "title": "" }, { "docid": "6fde5d0e01e97d68cfba7f5ea4f15eee", "score": "0.5660677", "text": "def write_to_sheet(sheet, row, col, exec_time, size, graph, path):\n sheet.write(0, 0, \"Expanded States\")\n sheet.write(0, 1, \"Solution Size\")\n sheet.write(0, 2, \"Time\")\n sheet.write(0, 3, \"Puzzle Size\")\n sheet.write(row, col, graph.expanded_states())\n if path is not None:\n sheet.write(row, col + 1, len(path))\n else:\n sheet.write(row, col + 1, \"Failed\")\n sheet.write(row, col + 2, exec_time)\n sheet.write(row, col + 3, size)", "title": "" }, { "docid": "dd864ea0d7ff343ce51ee078edd77628", "score": "0.5633066", "text": "def output_data(self, path):\n self.df = self.treat_data(self.df)\n self.df.to_excel(path, index=False, header=True)", "title": "" }, { "docid": "c580c6664e5964f95832d7599f7f0d72", "score": "0.5631117", "text": "def _create_indented_bom_worksheet(df, writer, header_format):\n df.to_excel(\n writer,\n sheet_name='indented_bom',\n index=False,\n header=False,\n startrow=1,\n )\n\n worksheet = writer.sheets['indented_bom']\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(0, col_num, value, header_format)\n\n worksheet.set_column(2, 2, 21) # Dwg_No\n worksheet.set_column(3, 3, 60) # Component", "title": "" }, { "docid": "703bb7dad47b570920adce15b167cdf3", "score": "0.5628814", "text": "def write_xls(data, file_name):\r\n try:\r\n book = xlwt.Workbook()\r\n sheet1 = book.add_sheet(\"Jul-15\")\r\n\r\n for num in range(len(data)):\r\n row = sheet1.row(num)\r\n vals = data[num]\r\n for index, col in enumerate(vals):\r\n row.write(index, col)\r\n xls_name = '%s/%s.xls' % (MEDIA_ROOT, file_name)\r\n book.save(xls_name)\r\n except Exception, e:\r\n raise e", "title": "" }, { "docid": "acd8e8bd77e107629deaa7cde8c57687", "score": "0.56226164", "text": "def write_excel(sheets, sheet_names):\n wb = xlwt.Workbook()\n for sheet, sheet_name in zip(sheets, sheet_names):\n ws = wb.add_sheet(sheet_name)\n for row_num, row in enumerate(sheet):\n for col_num, cell in enumerate(row):\n try:\n as_float = float(cell)\n ws.write(row_num, col_num, as_float) # write number to excel\n except Exception, e:\n ws.write(row_num, col_num, cell) # write without changing to number\n wb.save('migrants.xls')", "title": "" }, { "docid": "0f8f834e72a853fd420181f4d5a10ebf", "score": "0.56202", "text": "def update(service):\n\n # Call the Sheets API\n # Compute a timestamp and pass the first two arguments\n values = [[1]]\n body = {'values': values}\n result = service.spreadsheets().values().update(spreadsheetId=SPREADSHEET_ID,\n range=RANGE_NAME,\n # How the input data should be interpreted.\n valueInputOption='USER_ENTERED',\n # How the input data should be inserted.\n # insertDataOption='INSERT_ROWS'\n body=body\n ).execute()", "title": "" }, { "docid": "404bfd7abf9bcb03d2fadd8a104ea8bc", "score": "0.5614299", "text": "def _create_import_file_worksheet(df, writer, header_format):\n df.to_excel(\n writer,\n sheet_name='import',\n index=False,\n startrow=1,\n header=False\n )\n\n worksheet = writer.sheets['import']\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(0, col_num, value, header_format)\n\n # worksheet.set_column(0, 0, 16) # Assembly\n # worksheet.set_column(1, 1, 26) # Assembly_Name", "title": "" }, { "docid": "f3b9a078a0cf72a23899207be645f889", "score": "0.56134135", "text": "def sendToExcel(studies, sheet, provider):\n\n # excel sheet passed in from previous functions\n sheet = openSheet(sheet)\n\n # mapping for spreadsheet locations\n kgw_cells = {'PSG': 'K12', 'EEG': 'K13', 'ETCO2': 'K14', 'SPLIT': 'K15', 'HST': 'K16', 'MSLT': 'K17', 'MWT': 'K18',\n 'OAT': 'K19', 'Inspire': 'K20', 'PAP': 'K21', 'NAP': 'K22', 'FHST': 'K23', 'NS': 'K24'}\n mb_cells = {'PSG': 'P12', 'EEG': 'P13', 'ETCO2': 'P14', 'SPLIT': 'P15', 'HST': 'P16', 'MSLT': 'P17', 'MWT': 'P18',\n 'OAT': 'P19', 'Inspire': 'P20', 'PAP': 'P21', 'NAP': 'P22', 'FHST': 'P23', 'NS': 'P24'}\n qr_cells = {'PSG': 'U12', 'EEG': 'U13', 'ETCO2': 'U14', 'SPLIT': 'U15', 'HST': 'U16', 'MSLT': 'U17', 'MWT': 'U18',\n 'OAT': 'U19', 'Inspire': 'U20', 'PAP': 'U21', 'NAP': 'U22', 'FHST': 'U23', 'NS': 'U24'}\n jf_cells = {'PSG': 'AA12', 'EEG': 'AA13', 'ETCO2': 'AA14', 'SPLIT': 'AA15', 'HST': 'AA16', 'MSLT': 'AA17',\n 'MWT': 'AA18', 'OAT': 'AA19', 'Inspire': 'AA20', 'PAP': 'AA21', 'NAP': 'AA22', 'FHST': 'AA23', 'NS': 'AA24'}\n dr_cells = {'PSG': 'AG12', 'EEG': 'AG13', 'ETCO2': 'AG14', 'SPLIT': 'AG15', 'HST': 'AG16', 'MSLT': 'AG17',\n 'MWT': 'AG18', 'OAT': 'AG19', 'Inspire': 'AG20', 'PAP': 'AG21', 'NAP': 'AG22', 'FHST': 'AG23', 'NS': 'AG24'}\n\n # assign correct cell mapping by provider\n if provider == 'kgw':\n cells = kgw_cells\n elif provider == 'mb':\n cells = mb_cells\n elif provider == 'qr':\n cells = qr_cells\n elif provider == 'jf':\n cells = jf_cells\n elif provider == 'dr':\n cells = dr_cells\n else:\n print('Provider {} is unknown'.format(provider))\n return\n \n if studies[0] == 'PSG':\n sheet.range(cells['PSG']).value = studies[1]\n elif 'EEG' in studies[0]:\n sheet.range(cells['EEG']).value = studies[1]\n elif 'EtCO2' in studies[0]:\n sheet.range(cells['ETCO2']).value = studies[1]\n elif 'Split' in studies[0]:\n sheet.range(cells['SPLIT']).value = studies[1]\n elif studies[0] == 'HST':\n sheet.range(cells['HST']).value = studies[1]\n elif 'MSLT' in studies[0]:\n sheet.range(cells['MSLT']).value = studies[1]\n elif 'MWT' in studies[0]:\n sheet.range(cells['MWT']).value = studies[1]\n elif 'OAT' in studies[0]:\n sheet.range(cells['OAT']).value = studies[1]\n elif studies[0] == 'PAP':\n sheet.range(cells['PAP']).value = studies[1]\n elif studies[0] == 'PAP Nap':\n sheet.range(cells['NAP']).value = studies[1]\n elif studies[0] == 'Failed HST':\n sheet.range(cells['FHST']).value = studies[1]\n elif studies[0] == 'Failed in lab':\n sheet.range(cells['NS']).value = studies[1]\n elif studies[0] == 'Inspire':\n sheet.range(cells['Inspire']).value = studies[1]\n elif 'Other' in studies[0]:\n print(studies[0], studies[1])\n else:\n print('Study type, {} is not indexed'.format(studies[0]))\n return", "title": "" }, { "docid": "cea8111b49f1c6cec99d3f767902cb4a", "score": "0.561059", "text": "def put_data(self, google_sheet_id, sheet_a1_notation, rows, value_input_option=\"USER_ENTERED\", overwrite_or_append=\"OVERWRITE\"):\n with googleapiclient.discovery.build('sheets', 'v4', credentials=self.creds) as service:\n if overwrite_or_append == \"OVERWRITE\": # default\n service.spreadsheets().values().clear(spreadsheetId=google_sheet_id,\n range=sheet_a1_notation,\n body={}).execute()\n\n body = {\"values\": rows}\n return service.spreadsheets().values().append(\n spreadsheetId=google_sheet_id,\n range=sheet_a1_notation,\n valueInputOption=value_input_option,\n body=body).execute()", "title": "" }, { "docid": "8c5956e70bd5debe82ccc768860e8689", "score": "0.5608833", "text": "def writeto_excel(cls,path,names,excelname):\n\n df = pd.DataFrame.from_dict(data={'ImageName': [i for i in names.keys()], 'EmpName': [i for i in names.values()]},orient='columns')\n print(f'[INFO] Save in - {path}')\n \n fullpath = os.path.join(path,excelname)\n if os.path.isfile(fullpath):\n os.remove(fullpath)\n\n writer = pd.ExcelWriter(path +'data.xlsx')\n # writer = pd.ExcelWriter('test.xlsx', engine='xlsxwriter')\n df.to_excel(writer,sheet_name='test1',index=False)\n writer.save()", "title": "" }, { "docid": "71df5d4cc7d9f4b9864b3bbc96501fb9", "score": "0.5601026", "text": "def OverwriteCells(service, spreadsheetId, sheet, newValues, quiet=True):\n print \"Sheet: %s\" % sheet\n print \"Command: Upload to sheets, overwriting data\"\n try:\n print \"Reading existing values...\"\n preview = 3\n existingValues = GetGSheetsRange(\n service, spreadsheetId, sheet, preview, quiet=quiet\n )\n print \"Emptying table...\"\n emptyValues = MakeEmptyTable(existingValues)\n WriteGSheetsRange(service, spreadsheetId, sheet, emptyValues, quiet=quiet)\n print \"Uploading new data...\"\n WriteGSheetsRange(\n service,\n spreadsheetId,\n sheet,\n newValues,\n value_input_option=\"USER_ENTERED\",\n quiet=quiet,\n )\n print\n print \"Done.\"\n\n except google_errors.HttpError as h:\n print \"ERROR\"\n print \" - Http connection to Google Sheets failed!\"\n print \" - %s\" % h\n print\n print", "title": "" }, { "docid": "f53cb7fb9e7e9587b9048453a6018b98", "score": "0.55847424", "text": "def write_xlsx(outs, *args, **kwargs):\n\tdf = pd.DataFrame(outs)\n\tdf.to_excel(\"output.xlsx\")\n\treturn", "title": "" }, { "docid": "b2a43077029ba2b0fd750261b5f91709", "score": "0.5560496", "text": "def upload_to_goole_sheets(df_loc: str) -> None:\n # must have your custom creds\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n \"gdrive_creds.json\", scope\n )\n client = gspread.authorize(credentials)\n spreadsheet = client.open(\"dc_crash_bot\") # Name of spread sheet\n\n with open(df_loc, \"r\") as file_obj:\n content = file_obj.read()\n client.import_csv(spreadsheet.id, data=content)", "title": "" }, { "docid": "b2a43077029ba2b0fd750261b5f91709", "score": "0.5560496", "text": "def upload_to_goole_sheets(df_loc: str) -> None:\n # must have your custom creds\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n \"gdrive_creds.json\", scope\n )\n client = gspread.authorize(credentials)\n spreadsheet = client.open(\"dc_crash_bot\") # Name of spread sheet\n\n with open(df_loc, \"r\") as file_obj:\n content = file_obj.read()\n client.import_csv(spreadsheet.id, data=content)", "title": "" }, { "docid": "5813f267a04e48761cf5fa4bd90b763c", "score": "0.555385", "text": "def _create_new_revision_worksheet(df, writer, header_format):\n df.to_excel(\n writer,\n sheet_name='new_revision',\n index=False,\n startrow=1,\n header=False\n )\n\n worksheet = writer.sheets['new_revision']\n for col_num, value in enumerate(df.columns.values):\n worksheet.write(0, col_num, value, header_format)\n\n worksheet.set_column(0, 0, 16) # Assembly\n worksheet.set_column(1, 1, 26) # Assembly_Name\n worksheet.set_column(2, 1, 15) # Revision", "title": "" }, { "docid": "a96905cfbe210adbb7ea0ed89fa49416", "score": "0.55521977", "text": "def write(self, values, spreadsheet_id, cell, sheet=None, row_major=True, raw=False):\n self._spreadsheets.values().update(\n spreadsheetId=spreadsheet_id,\n range=self._get_range(sheet, cell, spreadsheet_id),\n valueInputOption='RAW' if raw else 'USER_ENTERED',\n body={\n 'values': self._values(values),\n 'majorDimension': 'ROWS' if row_major else 'COLUMNS',\n },\n ).execute()", "title": "" }, { "docid": "202354bac8caab1a4c9cdd03f738ec55", "score": "0.5541308", "text": "def write_to_excel(data_matrix, filename):\n wb = Workbook()\n ws = wb.create_sheet(\"data\", 0)\n \n for row in data_matrix:\n ws.append(row)\n \n save_file = filename + '.xlsx'\n wb.save( save_file )", "title": "" }, { "docid": "a23bb2bda49b49b9298a6b30c012e9bc", "score": "0.5529477", "text": "def save_results(output):\r\n # extension = '.xlsx'\r\n # if filename.lower().endswith('.xlsx'):\r\n # output_file = os.path.join(output, filename)\r\n # else:\r\n # output_file = os.path.join(output, filename+extension)\r\n for key, value in output_df.iteritems():\r\n output_file = os.path.join(output, key)\r\n writer = pd.ExcelWriter(output_file, engine='xlsxwriter')\r\n value.to_excel(writer)\r\n writer.save()", "title": "" }, { "docid": "acd6f6b3e142b8e980cd54d17ed1ba0a", "score": "0.5527286", "text": "def __write_xlsx(self, data_f, name):\n file_path = os.path.join(self.__get_file_path(), self.__get_file_name() + \"_\" + name)\n # Github open ticket for the abstract method\n writer = pd.ExcelWriter('%s.xlsx' % file_path, engine='xlsxwriter')\n data_f.to_excel(writer, sheet_name=name)\n writer.save()", "title": "" }, { "docid": "3515a98e30034850fd351980c60dace6", "score": "0.55242693", "text": "def update_worksheet(data, sheet_name):\n print(f'Updating {sheet_name} worksheet...{newline}')\n updated_worksheet = SHEET.worksheet(sheet_name)\n updated_worksheet.append_row(data)\n print(f'{sheet_name} worksheet updated successfully!{newline}')", "title": "" }, { "docid": "2a6bddcee0299d534df260f0aa6600dc", "score": "0.5509846", "text": "def write_result(header, grouped_data_items, num_sheets, highlight_cols=None):\n\n header = [\"Номер групи\"] + header\n if highlight_cols is None:\n highlight_cols = []\n\n filename = 'formatted_{:%Y-%m-%d_%H:%M:%S}.xlsx'.format(datetime.now())\n print('Writing to XLSX workbook \"{}\"'.format(filename))\n workbook = xlsxwriter.Workbook(filename)\n\n groups_per_sheet = len(grouped_data_items) // num_sheets\n for sheet_num in range(num_sheets):\n worksheet = workbook.add_worksheet('Book{}'.format(sheet_num + 1))\n # Pagination bounds\n lower_bound = groups_per_sheet * sheet_num\n if sheet_num == num_sheets - 1:\n upper_bound = None\n else:\n upper_bound = groups_per_sheet * (sheet_num + 1)\n sheet_groups = grouped_data_items[lower_bound:upper_bound]\n\n row_pointer = 0 # Current row in the worksheet\n worksheet.write_row(row_pointer, 0, header)\n for group_num, rows in enumerate(sheet_groups):\n for row_num, row in enumerate(rows):\n row_pointer += 1\n prev_group = \"\"\n\n for col, cell in enumerate([group_num] + row):\n # Here we are using number in front of header of current\n # column to determine if we are still in the same group\n new_group_started = True\n if len(header) > col:\n m = re.match(\"(\\d+)\", header[col])\n if m:\n if m.group(1) == prev_group:\n new_group_started = False\n else:\n prev_group = m.group(1)\n\n orig_col = col - 1\n format = workbook.add_format()\n # Highlight columns that contain non-matching cells\n if orig_col in highlight_cols:\n if any([x[orig_col] != cell for i, x in enumerate(rows) if i != row_num]):\n format.set_bg_color('red')\n # Handle the group borders\n format.set_border_color('#DDDDDD')\n format.set_border(1)\n\n if new_group_started:\n format.set_left_color('black')\n\n if row_num == 0:\n format.set_top_color('black')\n elif row_num == len(rows) - 1:\n format.set_bottom_color('black')\n\n # Filenames might sometimes be detected as numbers and we don't want this\n if col == COL_FILENAME:\n worksheet.write_string(row_pointer, col, cell, format)\n else:\n worksheet.write(row_pointer, col, cell, format)\n\n # Disabled for now\n # # Add an empty row between the groups\n # row_pointer += 1\n # worksheet.write_row(row_pointer, 0, [])\n print('Wrote {} rows for sheet {}'.format(row_pointer + 1, sheet_num))\n\n workbook.close()", "title": "" }, { "docid": "90020d289fa0394def3ec56edac70671", "score": "0.5501802", "text": "def create_sheet(self, name):\n return ODSSheetWriter(self.native_book, None, name)", "title": "" }, { "docid": "005e54560235314bcecf0d0622c632d8", "score": "0.54850245", "text": "def export_list(self, sheet):\n if self.groupby:\n idx = self.fields.index(self.groupby)\n groups = []\n uniquekeys = []\n queryset = sorted(self.data, key=lambda x: x[idx])\n for key, group in groupby(queryset, key=lambda x: x[idx]):\n groups.append(list(group))\n uniquekeys.append(key)\n row = 1\n for grouper, group in zip(uniquekeys, groups):\n sheet.write(row, 0, u'', styles['data'])\n sheet.write(row + 1, 0, grouper, styles['subheader'])\n row += 2\n for line in group:\n col = 0\n for val in line:\n self.write_cell_data(sheet, (row, col), val)\n col += 1\n row += 1\n else:\n row = 1\n for line in self.data:\n col = 0\n for val in line:\n self.write_cell_data(sheet, (row, col), val)\n col += 1\n row += 1", "title": "" }, { "docid": "8e3af6fdf857b9f948d85ee4bdf89d31", "score": "0.54808885", "text": "def getExcel(f):\n return pd.ExcelWriter(f)", "title": "" }, { "docid": "b03056eec848a8cfb8ebed16e67461fa", "score": "0.5475697", "text": "def to_excel(self, excel_writer='output.xlsx', sheet_name='Sheet1', *, protect_sheet=False,\n right_to_left=False, columns_to_hide=None, add_filters=False, replace_sheet=False,\n auto_fit=None, **kwargs):\n save = kwargs.pop('save', isinstance(excel_writer, str))\n\n # pandas.to_excel defaults\n index = kwargs.pop('index', True)\n header = kwargs.pop('header', True)\n columns = kwargs.pop('columns', None)\n engine = kwargs.pop('engine', 'openpyxl')\n startcol = kwargs.pop('startcol', 0)\n startrow = headerrow = kwargs.pop('startrow', 0)\n\n index_label = kwargs.get('index_label', self.index.name)\n\n if self._table_args and not header:\n raise ValueError('Cannot format as table without headers.')\n\n if columns is not None:\n return self.loc[:, columns].to_excel(\n excel_writer=excel_writer, sheet_name=sheet_name, protect_sheet=protect_sheet,\n right_to_left=right_to_left, columns_to_hide=columns_to_hide, add_filters=add_filters,\n replace_sheet=replace_sheet, auto_fit=auto_fit, header=header, index=index,\n startcol=startcol, startrow=startrow, engine=engine, save=save, **kwargs\n )\n\n if isinstance(excel_writer, str):\n excel_writer = self.ExcelWriter(excel_writer)\n elif 'openpyxl' not in excel_writer.engine:\n raise ValueError('Engine for excel_writer must be openpyxl.')\n\n if _os.path.splitext(excel_writer.path)[1] not in excel_writer.supported_extensions:\n raise ValueError(\n 'Unsupported file extension {}. Use {}.'.format(\n _os.path.splitext(excel_writer.path)[1], '/'.join(excel_writer.supported_extensions)\n )\n )\n\n if replace_sheet:\n if sheet_name in excel_writer.book:\n del excel_writer.book[sheet_name]\n if sheet_name in excel_writer.sheets:\n del excel_writer.sheets[sheet_name]\n\n self.dataframe.to_excel(\n excel_writer, sheet_name=sheet_name, engine=engine, header=header,\n index=index, startcol=startcol, startrow=startrow, columns=columns, **kwargs\n )\n\n book = excel_writer.book\n sheet = book[sheet_name]\n sheet.sheet_view.rightToLeft = right_to_left\n\n # add named styles. Rename any whose name is already taken within book.\n renamed_styles = self._add_named_styles(book)\n\n if auto_fit is not None and auto_fit is not False:\n if auto_fit is True:\n auto_fit = self.columns\n self.auto_fit(auto_fit, index=index, include_header=bool(header))\n\n # index styles\n if index:\n current_cell = sheet.cell(row=startrow + 1, column=startcol + 1)\n if header and not index_label and self._table_args:\n index_label = 'index'\n current_cell.value = index_label # Otherwise formatting as table will auto give it a ColumnX name.\n if header: # TODO: Style this cell properly\n current_cell.style = self._header_styles.iat[0]\n offset = 2 if header else 1\n for row_index, index_style in enumerate(self._index_styles.iteritems()):\n index_value, style = index_style\n current_cell = sheet.cell(row=row_index + startrow + offset, column=startcol + 1)\n current_cell.style = renamed_styles.get(style, style)\n # set index width\n sheet.column_dimensions[self.get_column_letter(startcol)].width = self._index_width\n # adjust startcol for added index column\n startcol += 1\n\n # header styles\n if header:\n for col_index, col_style in enumerate(self._header_styles.iteritems()):\n col_name, style = col_style\n current_cell = sheet.cell(row=startrow + 1, column=col_index + startcol + 1)\n current_cell.style = renamed_styles.get(style, style)\n # set header height\n sheet.row_dimensions[startrow + 1].height = self.header_height\n # adjust startrow for header row\n startrow += 1\n\n # data styles\n for col_index, col_series in enumerate(self._styleframe.iteritems()):\n col_name, column = col_series\n for row_index, index_style in enumerate(column.iteritems()):\n index_value, style = index_style\n current_cell = sheet.cell(row=row_index + startrow + 1, column=col_index + startcol + 1)\n current_cell.style = renamed_styles.get(style, style)\n\n # add any hyperlinks\n if self._hyperlinks is not None:\n for col_index, col_series in enumerate(self._hyperlinks.iteritems()):\n col_name, column = col_series\n\n try:\n col_index = self.columns.get_loc(col_name)\n except KeyError: # col_name not in dataframe\n if index and col_name in (index_label, self.index.name, 'index'):\n col_index = -1\n else:\n continue\n\n for row_index, index_link in enumerate(column.iteritems()):\n index_value, hyperlink = index_link\n if isinstance(hyperlink, _Hyperlink):\n # make sure each cell has a unique Hyperlink obj to receive the ref of that cell\n hyperlink = _copy(hyperlink)\n\n current_cell = sheet.cell(row=row_index + startrow + 1, column=col_index + startcol + 1)\n # openpyxl assigns cell ref to hyperlink\n current_cell.hyperlink = hyperlink\n\n # set column widths\n for col_index, column_width in enumerate(self._column_widths.iteritems()):\n column, width = column_width\n column_letter = self.get_column_letter(col_index, startcol=startcol)\n sheet.column_dimensions[column_letter].width = width\n\n # set row heights\n for row_index, row_height in enumerate(self._row_heights.iteritems()):\n row, height = row_height\n sheet.row_dimensions[startrow + row_index + 1].height = height\n\n # format as table if needed\n if self._table_args:\n tables = {tbl.name for sht in book.worksheets for tbl in sht._tables}\n\n rows = None if not self.dataframe.empty else (0, 1)\n self._table_args['ref'] = self._get_range_as_str(\n row_index=rows, startcol=startcol, startrow=headerrow, index=index\n )\n\n if 'name' in self._table_args:\n self._table_args['displayName'] = self._table_args.pop('name')\n if self._table_args.get('displayName', None) in tables:\n raise ValueError('Table name \"{}\" already exists in book.'.format(self._table_args['displayName']))\n\n if 'displayName' not in self._table_args:\n # find next available table name\n for i in _count(1):\n if 'Table{}'.format(i) not in tables:\n tbl = _table.Table(\n displayName='Table{}'.format(i),\n **self._table_args\n )\n break\n else:\n tbl = _table.Table(\n **self._table_args\n )\n\n sheet.add_table(tbl)\n\n elif add_filters:\n sheet.auto_filter.ref = self._get_range_as_str(\n row_index=0, startcol=startcol, startrow=headerrow, index=index\n )\n\n # Hide columns\n if columns_to_hide:\n if isinstance(columns_to_hide, (str, int)):\n column_letter = self.get_column_letter(columns_to_hide, startcol=startcol)\n sheet.column_dimensions[column_letter].hidden = True\n else:\n for column in columns_to_hide:\n column_letter = self.get_column_letter(column, startcol=startcol)\n sheet.column_dimensions[column_letter].hidden = True\n\n # Protect sheet\n if protect_sheet:\n sheet.protection.autoFilter = False\n sheet.protection.enable()\n\n if save:\n excel_writer.save()\n\n return excel_writer", "title": "" }, { "docid": "d117f0ba4b793b01d4cfb49dbca8761e", "score": "0.5475479", "text": "def fill_xls(ws, row, task_title, milestone, start_date, end_date, estimated_hours, qa, _developer, priority, _type,\n description):\n ws.write(row, 0, task_title)\n ws.write(row, 1, milestone)\n ws.write(row, 2, start_date)\n ws.write(row, 3, end_date)\n ws.write(row, 4, estimated_hours)\n ws.write(row, 5, qa)\n ws.write(row, 6, _developer)\n ws.write(row, 7, priority)\n ws.write(row, 8, _type)\n ws.col(9).width = 100 * 256\n ws.write(row, 9, description.encode('utf-8').replace(\"\\r\\n\", \"\").strip())", "title": "" }, { "docid": "dbe4d4315ba3009a6859dd6474b182c4", "score": "0.54749334", "text": "def write_excel(filename, sheetnames, arrays):\n if len(sheetnames) != len(arrays):\n raise IndexError(\"Array and sheet number must be equal.\")\n \n book = xlwt.Workbook()\n \n for name, array in zip(sheetnames, arrays):\n sheet = book.add_sheet(name)\n cols, rows = array.shape\n \n for row in range(rows):\n for col in range(cols):\n sheet.write(row, col, array[col][row])\n \n book.save(filename)", "title": "" }, { "docid": "a8b5761fc33d5831f01f1e613998bedf", "score": "0.54558617", "text": "def save_excel(self, file):\n if not file.endswith('.xlsx'):\n file = file + '.xlsx'\n df = pd.DataFrame(self._items, columns=['DOI'])\n df.to_excel(file)", "title": "" }, { "docid": "bc8ddb688c107d1c406b28972c8cf94b", "score": "0.54516715", "text": "def create_doc(title):\n\n spreadsheet_body = {\n 'properties': {\n 'locale': 'zh_CN',\n 'timeZone': 'Etc/GMT',\n 'autoRecalc': 'ON_CHANGE',\n 'title': title\n },\n }\n return service.spreadsheets().create(body = spreadsheet_body).execute()", "title": "" }, { "docid": "1119a6786ae6317fa2dede14acea2a1b", "score": "0.544675", "text": "def InsertGSheetsRows(\n service, spreadsheetId, range_name, values, value_input_option=\"RAW\", quiet=True\n):\n body = {\"values\": values}\n result = (\n service.spreadsheets()\n .values()\n .append(\n spreadsheetId=spreadsheetId,\n range=range_name,\n valueInputOption=value_input_option,\n body=body,\n )\n .execute()\n )\n print result", "title": "" }, { "docid": "5c47401fc4029867494317494e0f9174", "score": "0.5435767", "text": "def get_Data_temblor(idsheet, rangesheet):\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n 'version=v4')\n service = discovery.build('sheets', 'v4', http=http, discoveryServiceUrl=discoveryUrl)\n# spreadsheetId = 'Necesidades de Zonas Afectadas y Albergues '\n spreadsheetId = 'Centros de Acopio'\n #####DAÑOS Y DERRUMBES VERIFICADOS\n #####Para descargar otras páginas cambiar el nombre en el campo range\n result = service.spreadsheets().values().get(\n spreadsheetId=idsheet,\n range=rangesheet).execute()\n values = result.get('values', [])\n return pd.DataFrame(values)", "title": "" }, { "docid": "01d33f342016389413e0957bb66237fc", "score": "0.54277146", "text": "def _save_values(self, nm):\n try:\n result = self.sheet.values().get(\n spreadsheetId=self.sheet_id,\n range=nm).execute()\n values = result.get('values', [])\n df = pd.DataFrame(\n values[4:], columns=values[3])\n df.to_csv(\n 'data/Canada-{}.csv'.format(nm),\n index=False)\n except:\n print(\"Retrieving {} failed.\".format(nm))", "title": "" }, { "docid": "c01547aa9dce285db91bf405caa9e215", "score": "0.5426704", "text": "def save_report_file(assembly, bom, indented_bom, ebom, prev):\n writer = pd.ExcelWriter(assembly + '.xlsx', engine='xlsxwriter')\n workbook = writer.book\n header_format = workbook.add_format({\n 'bold': True,\n 'text_wrap': False,\n 'fg_color': '#DCE6F1',\n 'border': 1\n })\n\n _create_bom_worksheet(bom, writer, header_format)\n _create_indented_bom_worksheet(indented_bom, writer, header_format)\n _create_import_file_worksheet(ebom, writer, header_format)\n _create_new_revision_worksheet(prev, writer, header_format)\n\n writer.close()", "title": "" }, { "docid": "08bfcc9ad3d5d2bae36b6cbe1469a98c", "score": "0.54227155", "text": "def save_excel(self, file):\n if not file.endswith('.xlsx'):\n file = file + '.xlsx'\n df = pd.DataFrame(self._items, columns=self.field_names)\n df.to_excel(file)", "title": "" }, { "docid": "dc4f5ac48ce66fdbd5298200b1c57742", "score": "0.54178506", "text": "def gs(credentials, title, output_path):\n\n con = create_database(output_path)\n\n loader = simplesqlite.loader.GoogleSheetsTableLoader()\n loader.source = credentials\n loader.title = title\n\n for tabledata in loader.load():\n click.echo(\n \"convert '%s' to '%s' table\" % (title, tabledata.table_name))\n con.create_table_from_tabledata(tabledata)\n\n return 0", "title": "" }, { "docid": "8eee5ff2e2dfb0baaede0560768c0ecf", "score": "0.54152787", "text": "def export_plot_data_to_excel(self):\n\n all_srs = self.plot_settings.axis1_series_list + self.plot_settings.axis2_series_list\n\n # Collate all time series to dataframe\n df_ts_list = []\n populated_srs = (srs for srs in all_srs if len(srs.y) > 0)\n for srs in populated_srs:\n col = self._create_label(srs)\n df = pd.DataFrame(srs.y, index=srs.x, columns=[col])\n df_ts_list.append(df)\n\n y = srs.y_filt\n if len(y) > 0:\n col = self._create_label(srs, filtered=True)\n df = pd.DataFrame(y, index=srs.x, columns=[col])\n df_ts_list.append(df)\n\n # Collate all psd series to dataframe\n df_psd_list = []\n populated_srs = (srs for srs in all_srs if len(srs.pxx) > 0)\n for srs in populated_srs:\n col = self._create_label(srs)\n df = pd.DataFrame(srs.pxx, index=srs.freq, columns=[col])\n df_psd_list.append(df)\n\n pxx = srs.pxx_filt\n if len(pxx) > 0:\n col = self._create_label(srs, filtered=True)\n df = pd.DataFrame(pxx, index=srs.freq, columns=[col])\n df_psd_list.append(df)\n\n # Concatenate to sheet dataframe\n df_ts: pd.DataFrame\n df_ts = pd.concat(df_ts_list, axis=1, sort=False)\n df_ts.index.name = \"Time (s)\"\n df_psd = pd.concat(df_psd_list, axis=1, sort=False)\n df_psd.index.name = \"Freq (Hz)\"\n\n # Write to Excel\n filename, _ = QtWidgets.QFileDialog.getSaveFileName(\n self, caption=\"Export Plot Data\", filter=\"Excel File (*.xlsx)\"\n )\n if filename:\n writer = pd.ExcelWriter(filename, engine=\"xlsxwriter\")\n wb = writer.book\n fmt1 = wb.add_format({\"text_wrap\": True, \"bold\": True})\n fmt2 = wb.add_format({\"num_format\": \"0.000E+00\"})\n\n # Time Series sheet\n df_ts.to_excel(writer, sheet_name=\"Time Series\")\n ws = writer.sheets[\"Time Series\"]\n ws.set_column(1, len(df_ts.columns), 12, fmt2)\n\n # Need to rewrite column names to add desired format\n for i, col in enumerate(df_ts.columns):\n ws.write(0, i + 1, col, fmt1)\n\n # PSD sheet\n df_psd.to_excel(writer, sheet_name=\"PSD\")\n ws = writer.sheets[\"PSD\"]\n ws.set_column(1, len(df_psd.columns), 12, fmt2)\n\n # Need to rewrite column names to add desired format\n for i, col in enumerate(df_psd.columns):\n ws.write(0, i + 1, col, fmt1)\n\n writer.save()\n msg = \"Plot data exported successfully.\"\n QtWidgets.QMessageBox.information(self, \"Export Plot Data\", msg)", "title": "" }, { "docid": "b2a494b3bb6b423ad7e85594fa6a25c5", "score": "0.54057837", "text": "def toExcel(fname,tables):\r\n writer = pd.ExcelWriter(fname)\r\n for k,v in tables.items():\r\n # Make sure the order of columns complies the specs\r\n record = [r for r in V4.records if r.__class__.__name__.upper()==k]\r\n if len(record)==0:\r\n print(\"Ignore exporting table %s: No such record type exists.\" %k)\r\n else:\r\n columns = [field[0] for field in record[0].fieldMap]\r\n v.to_excel(writer,sheet_name=k,columns=columns,index=False,na_rep=\"N/A\")\r\n writer.save()", "title": "" }, { "docid": "900e1abdf76864c909b42e6d3e90893c", "score": "0.54052615", "text": "def save(self, worksheet, workbook, startrow=0, startcol=0):\n if self.name and self.show_title:\n write_title(self.name, worksheet, workbook, startrow, startcol, self.width)\n startrow += 1\n self.to_excel(workbook, worksheet.name, startrow=startrow, startcol=startcol)\n return workbook.get_worksheet_by_name(worksheet.name)", "title": "" }, { "docid": "b83887975b4d69d609d3298d95402e2a", "score": "0.5397788", "text": "def update_worksheet(data,worksheet):\n print(f\"Updating {worksheet} worksheet...\\n\")\n worksheet_to_update = SHEET.worksheet(worksheet)\n worksheet_to_update.append_row(data)\n print(f\"{worksheet} worksheet updated!\")", "title": "" }, { "docid": "59abed42a585384f2eca9ce05a3ff0a9", "score": "0.53878915", "text": "def export_df(df: pd.DataFrame, export_path: str,\r\n export_format: str = \"pickle\",\r\n hdf_key: Optional[str] = None\r\n ) -> None:\r\n _, ext = os.path.splitext(export_path)z\r\n use_ext = '.' + export_format if len(ext) == 0 else ''\r\n if export_format == \"html\":\r\n color_dict = {\r\n 'POSITIVE': 'limegreen',\r\n 'NEGATIVE': 'red',\r\n 'NEUTRAL': 'lightgrey',\r\n 'MIXED': 'yellow'\r\n }\r\n spk_dict = {\r\n 'ch_0': '#F0F8FF',\r\n 'spk_0': '#F0F8FF',\r\n 'ch_1': '#FFF8DC',\r\n 'spk_1': '#FFF8DC'\r\n }\r\n df.set_index(\r\n ['transcript', 'recording', 'speaker', 'index']\r\n ).to_html(export_path + use_ext, encoding='utf-8',\r\n formatters={\r\n 'pred_sent': lambda sent: f'<span style=\"background-color:{color_dict[sent]}\">{sent}</span>',\r\n 'speaker': lambda speaker: f'<span style=\"background-color:{spk_dict[speaker]}\">{speaker}</span>'\r\n }, escape=False)\r\n elif export_format == \"csv\":\r\n df.to_csv(export_path + use_ext)\r\n elif export_format == \"json\":\r\n df.to_json(export_path + use_ext)\r\n elif export_format == \"parquet\":\r\n df.to_parquet(export_path + use_ext)\r\n elif export_format == \"pickle\":\r\n df.to_pickle(export_path + use_ext)\r\n elif export_format == \"excel\":\r\n if len(use_ext) > 0:\r\n use_ext = '.xlsx'\r\n df.to_excel(export_path + use_ext, index=False)\r\n elif export_format == \"hdf\":\r\n assert hdf_key is not None, \"Parameter hdf_key must be informed if export format is hdf.\"\r\n df.to_hdf(export_path + use_ext, hdf_key)\r\n else:\r\n raise ValueError(f\"Unknown export format: {export_format}\")", "title": "" }, { "docid": "86fee2d337aaac1a263aea1dced745ef", "score": "0.5387516", "text": "def _create_spreadsheet(self, ws_name: str, columns: List[str]) -> None:\n ws = self.workbook.create_sheet(ws_name)\n self.workbook.active = ws\n ws.append(columns)\n self.workbook.save(self.wb_name)", "title": "" }, { "docid": "1009060490d1e796dfb6313590e8848a", "score": "0.5387063", "text": "def make_worksheet(self, sheet_name, query_results, **kwargs):\n try:\n self.workbook_builder.add_worksheet(\n sheet_name=sheet_name,\n field_names=query_results.field_names,\n sheet_data=query_results.result_data,\n **kwargs\n )\n except:\n error = \"Unable to add worksheet {}\".format(sheet_name)\n self.logger.exception(error)", "title": "" }, { "docid": "6664e94d2265df84fe471f9a4543c9ad", "score": "0.5382799", "text": "def export_excel(table, file_name, attributes):\r\n \r\n writer = pd.ExcelWriter(file_name, engine='xlsxwriter')\r\n table.to_excel(writer, index=False, header=False, startrow=1,\r\n sheet_name = 'Sheet1')\r\n num_rows, num_cols = table.shape\r\n num_cols -= 1\r\n workbook = writer.book\r\n worksheet = writer.sheets['Sheet1']\r\n \r\n # Add attribute labels: Row 1 merge (b-d), (e-g), etc basiac\r\n alphabet = string.ascii_uppercase\r\n\r\n offset = 1\r\n width = 3\r\n row = 1\r\n \r\n merge_format = workbook.add_format({\r\n 'bold': 1,\r\n 'border': 1,\r\n 'align': 'center',\r\n 'valign': 'vcenter'})\r\n \r\n for i, attribute in enumerate(attributes):\r\n start_col = offset + i*width\r\n end_col = start_col + width - 1\r\n positions = \"{0}{2}:{1}{2}\".format(alphabet[start_col],\r\n alphabet[end_col], row)\r\n \r\n\r\n worksheet.merge_range(positions, attribute, merge_format)\r\n\r\n writer.save()", "title": "" }, { "docid": "9d57cc42f8e4fe93df0d9a84580a1a17", "score": "0.5378535", "text": "def excel(self): \n nombres =pandas.read_excel('C:/Videojuego/Proyecto/Usuarios/Nombres.xlsx',header = None)\n nombres = list(nombres.iloc[:,0])\n nombres.append(self.text2)\n clave=nombres.pop(0)\n dic={clave:nombres}\n\n df = pandas.DataFrame(dic)\n df =df[['Nombres']]\n writer =ExcelWriter('C:/Videojuego/Proyecto/Usuarios/Nombres.xlsx') \n df.to_excel(writer,sheet_name='Alumnos',index=False)\n writer.save()\n\n df2 = pandas.DataFrame({'Juego':[\"----------\"],'Estatus':[\"----------\"],'Tiempo':[\"----------\"],'Real':[\"----------\"],'Ideal':[\"----------\"],'Fecha':[\"----------\"]})\n df2=df2[['Juego','Estatus','Tiempo','Real','Ideal','Fecha']]\n writer = ExcelWriter('C:/Videojuego/Proyecto/Usuarios/Alumnos/'+self.text2+'.xlsx')\n df2.to_excel(writer,sheet_name=self.text2,index=False)\n writer.save()", "title": "" }, { "docid": "0939f485d27b8f78aca961be7ecddb6f", "score": "0.5359601", "text": "def write_cell_data(self, sheet, pos, val):\n row, col = pos\n if isinstance(val, (date, datetime)):\n sheet.write(row, col, label=val, style=styles['date'])\n else:\n sheet.write(row, col, label=val, style=styles['data'])", "title": "" }, { "docid": "00cfdd4549accbbf00151cc8943187cc", "score": "0.53470063", "text": "def tableFormat(sheetData, sheetName, wbook):\n # Nothing to format, so return.\n if sheetData.shape[0] == 0:\n return\n sheet = wbook.sheets[sheetName]\n sheet.freeze_panes(1, 0)\n # Set the autofilter for the sheet.\n sheet.autofilter(0, 0, sheetData.shape[0], sheetData.shape[1]-1)\n # Set document formatting.\n docFormat = wbook.book.add_format({'font': 'Calibri', 'font_size': 11})\n acctFormat = wbook.book.add_format({'font': 'Calibri', 'font_size': 11, 'num_format': 44})\n commaFormat = wbook.book.add_format({'font': 'Calibri', 'font_size': 11, 'num_format': 3})\n newFormat = wbook.book.add_format({'font': 'Calibri', 'font_size': 11, 'bg_color': 'yellow'})\n movedFormat = wbook.book.add_format({'font': 'Calibri', 'font_size': 11, 'bg_color': '#FF9900'})\n # Format and fit each column.\n i = 0\n # Columns which get shrunk down in reports.\n hideCols = ['Technology', 'Excel Part Link', 'Report Part Nbr Link', 'MFG Part Description',\n 'Focus', 'Part Class Name', 'Vendor ID', 'Invoice Detail Nbr', 'Assigned Account Rep',\n 'Recipient', 'DKLI Report Date', 'Invoice Date Group', 'Comments', 'Sales Channel']\n coreCols = ['Must Contact', 'End Product', 'How Contacted', 'Information for Digikey']\n for col in sheetData.columns:\n acctCols = ['Unit Price', 'Invoiced Dollars']\n if col in acctCols:\n formatting = acctFormat\n elif col == 'Quantity':\n formatting = commaFormat\n else:\n formatting = docFormat\n maxWidth = max(len(str(val)) for val in sheetData[col].values)\n # Set maximum column width at 50.\n maxWidth = min(maxWidth, 50)\n if col in hideCols:\n maxWidth = 0\n elif col in coreCols:\n maxWidth = 25\n sheet.set_column(i, i, maxWidth+0.8, formatting)\n i += 1\n # Highlight new root customer and moved city rows.\n try:\n for row in sheetData.index:\n ind = str(sheetData.loc[row, 'TAARCOM Comments']).lower().rstrip() == 'individual'\n no_root_cust = sheetData.loc[row, 'Root Customer..'] == ''\n if ind or no_root_cust:\n continue\n root_cust_loc = int(np.where(sheetData.columns == 'Root Customer..')[0])\n city_loc = int(np.where(sheetData.columns == 'City on Acct List')[0])\n if sheetData.loc[row, 'New T-Cust'] == 'Y':\n sheet.write(row + 1, root_cust_loc, sheetData.loc[row, 'Root Customer..'], newFormat)\n elif not sheetData.loc[row, 'City on Acct List']:\n pass\n elif sheetData.loc[row, 'Customer City'] not in sheetData.loc[row, 'City on Acct List'].split(', '):\n sheet.write(row + 1, root_cust_loc, sheetData.loc[row, 'Root Customer..'], movedFormat)\n sheet.write(row + 1, city_loc, sheetData.loc[row, 'City on Acct List'], movedFormat)\n except KeyError:\n print('Error locating Sales and/or City on Acct List columns.\\n'\n 'Unable to highlight without these columns.\\n---')", "title": "" }, { "docid": "8f24d0a7e51812a100d1a19aac8a1043", "score": "0.53447926", "text": "def export_to_pdf(whpa, export_path):\n wb = xlwt.Workbook()\n ws = wb.add_sheet(whpa)\n\n header = Header(*range(0, len(Header._fields)))\n\n for field, index in header._asdict().items():\n ws.write(0, index, field)\n\n with psql.connect(\"dbname=gwi_csi user=eric\") as conn:\n with conn.cursor() as cur:\n cur.execute('SELECT facility_id, facility_name, whpa_id, address,\\\n phone, property_type, facility_type, facility_status,\\\n csi_zone, \"date\", comments, location, urls, issues\\\n from whpa_report WHERE facility_id LIKE %s',\n (\"%{}%\".format(whpa),))\n\n row_index = 1\n\n for row in cur.fetchall():\n facility = Facility(*row)\n\n for field, value in facility._asdict().items():\n if field == 'location':\n x = float(facility.location.split(' ')[0].lstrip('POINT('))\n y = float(facility.location.split(' ')[1].rstrip(')'))\n\n ws.write(row_index, header.X, x)\n ws.write(row_index, header.Y, y)\n elif field == 'issues' and facility.issues is not None:\n ws.write(row_index, header.issues, \", \".join(facility.issues))\n else:\n ws.write(row_index, getattr(header, field), value)\n row_index += 1\n\n wb.save(os.path.join(export_path, \"{}.xlsx\".format(whpa)))", "title": "" }, { "docid": "1f42799db991eecb9748aa6323344809", "score": "0.53431857", "text": "def upload_as_gsheet(file_to_upload, filename): \r\n drive_service = initialize_drive_service()\r\n file_found = False\r\n \r\n try:\r\n extension = file_to_upload.split(\".\")[-1]\r\n \r\n file_metadata = {\r\n 'name' : filename,\r\n 'mimeType' : 'application/vnd.google-apps.spreadsheet'\r\n }\r\n \r\n if (extension == 'xls'):\r\n media = MediaFileUpload('%s' % (file_to_upload),\r\n mimetype='application/vnd.ms-excel',\r\n resumable=True)\r\n file_found = True\r\n elif (extension == 'xlsx'):\r\n media = MediaFileUpload('%s' % (file_to_upload),\r\n mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\r\n resumable=True)\r\n file_found = True\r\n else:\r\n pass \r\n except IndexError:\r\n pass\r\n \r\n if (file_found == True):\r\n file = drive_service.files().create(body=file_metadata,\r\n media_body=media,\r\n fields='id').execute()\r\n log_file().info(\"Source Compiler.log\", \"File \\\"{0}\\\" uploaded as: {1} (ID: {2}).\".format(file_to_upload, filename, file.get('id')))\r\n print(\"File \\\"{0}\\\" uploaded as: {1} (ID: {2}).\".format(file_to_upload, filename, file.get('id'))) \r\n else:\r\n log_file().error(\"Source Compiler.log\", \"Invalid file name or extension. Provide full file name with .xls or .xlsx extensions.\")\r\n print(\"Invalid file name or extension. Provide full file name with .xls or .xlsx extensions.\")", "title": "" }, { "docid": "2e847e36617b1ec476406e82f662fbe2", "score": "0.53355503", "text": "def create_main_tables_structure(ws, df, date_str):\r\n ws['A1']=\"Data is provided for reference purposes only. Data is the property of PV InfoLink.\\nUsers should respect the legitimate rights for use based on the principle of integrity.\"\r\n ws['A1'].alignment=Alignment(wrap_text=True)\r\n ws.merge_cells('A1:D1')\r\n ws['A2']=\"Please do not modify this sheet manually. Use a copy for playing with the data\"\r\n ws.merge_cells('A2:D2')\r\n ws['A3']=df.columns[-3]+\", \"+ws.title\r\n ws['A3'].font=Font(bold=True)\r\n ws.merge_cells('A3:D3')\r\n for r in dataframe_to_rows(df[[\"Category\", \"Item\", \"Unit\", \"High Price\"]], index=False, header=True):\r\n ws.append(r)\r\n ws.cell(row=ws.max_row+2, column=1, value=df.columns[-1]+\", \"+ws.title)\r\n ws.cell(row=ws.max_row, column=1).font=Font(bold=True)\r\n ws.merge_cells(start_row=ws.max_row, start_column=1, end_row=ws.max_row, end_column=ws.max_column)\r\n for r in dataframe_to_rows(df[[\"Category\", \"Item\", \"Unit\", \"Average Price\"]], index=False, header=True):\r\n ws.append(r)\r\n ws.cell(row=ws.max_row+2, column=1, value=df.columns[-2]+\", \"+ws.title)\r\n ws.cell(row=ws.max_row, column=1).font=Font(bold=True)\r\n ws.merge_cells(start_row=ws.max_row, start_column=1, end_row=ws.max_row, end_column=ws.max_column)\r\n for r in dataframe_to_rows(df[[\"Category\", \"Item\", \"Unit\", \"Low Price\"]], index=False, header=True):\r\n ws.append(r)\r\n\r\n ws.cell(row=ws.max_row+2, column=1).value='*The quote of mono wafers is low resistivity product.'\r\n ws.merge_cells(start_row=ws.max_row, start_column=1, end_row=ws.max_row, end_column=ws.max_column)\r\n ws.cell(row=ws.max_row+1, column=1).value='**Mono-Si wafer quotes are based on those of 180µm. Prices of thinner ones are calculated with formula.'\r\n ws.merge_cells(start_row=ws.max_row, start_column=1, end_row=ws.max_row, end_column=ws.max_column)\r\n ws.cell(row=ws.max_row+1, column=1).value='***US and Indian module prices showed on the PV InfoLink website is after-tax price (punitive tariffs). Others are FOB price.'\r\n ws.merge_cells(start_row=ws.max_row, start_column=1, end_row=ws.max_row, end_column=ws.max_column)\r\n\r\n swap_headers(ws, \"Low Price\", date_str)\r\n swap_headers(ws, \"Average Price\", date_str)\r\n swap_headers(ws, \"High Price\", date_str)\r\n ws.column_dimensions['B'].width=51\r\n ws.column_dimensions['A'].width=16\r\n ws.column_dimensions['D'].width=10\r\n ws.row_dimensions[1].height=30", "title": "" }, { "docid": "5ee69d526e0731440c03eff2c10a0eaf", "score": "0.53336", "text": "async def spreadsheet(ctx,*,request : str=''):\n await ctx.send(r\"https://docs.google.com/spreadsheets/d/11ZXN22CTmItPMVduRdpEwf6wR9KRMJR7Ro9HensEWSk/edit#gid=656420900\")", "title": "" }, { "docid": "38eef9c9098476f332c511ef6042d32b", "score": "0.53316915", "text": "def save_dataframe(df, filename):\n df.to_csv(filename, header=True, index=True, index_label=False, mode='wb',\n encoding='utf-8', na_rep='NaN', sep='|')\n print \"DataFrame Saved to:\\n-->\\033[1;36m{}\\033[0m\\n\".format(filename)", "title": "" }, { "docid": "1d794bfb31cc731373cdd1798728d10e", "score": "0.533051", "text": "def write_rows(self, sheet_range, values=[]):\n body = {\n 'values': values\n }\n result = self.service.spreadsheets().values().append(\n spreadsheetId=self.spreadsheet_id,\n range=sheet_range,\n valueInputOption='USER_ENTERED',\n insertDataOption='INSERT_ROWS',\n body=body).execute()\n return result", "title": "" }, { "docid": "3e12938908cc62c01c3c4a3ea2081ac9", "score": "0.5330464", "text": "def export_qs(self, sheet):\n opts = self.data.model._meta\n row = 4\n for item in self.data:\n col = 0\n for name in self.fields:\n val = getattr(item, name, '')\n self.write_cell_data(sheet, (row, col), val)\n col += 1\n row += 1", "title": "" }, { "docid": "f060c38e3752d6c8d03a8ceba4655cb4", "score": "0.5324046", "text": "def to_xls(scraped_data):\n book = xlwt.Workbook()\n sheet = book.add_sheet(\"labs\")\n\n data = [COL_HEADERS] + to_lists(scraped_data)\n\n for i, data_row in enumerate(data):\n sheet_row = sheet.row(i)\n for j, item in enumerate(data_row):\n sheet_row.write(j, item)\n\n book.save(FILEPATH_TEMPLATE.format(\"xls\"))", "title": "" }, { "docid": "af880beb5950a3ab6e3f49b0273f016d", "score": "0.5315864", "text": "def export_inventory_to_excel(inventory, day):\r\n\r\n df = pd.DataFrame(inventory)\r\n writer = pd.ExcelWriter(f'inventory_{day}.xlsx', engine='xlsxwriter')\r\n df.to_excel(writer, sheet_name='Sheet1', startrow=1, header=False)\r\n workbook = writer.book\r\n worksheet = writer.sheets['Sheet1']\r\n header = ['Product Name', 'Count', 'Expiration Date']\r\n header_format = workbook.add_format({\r\n 'bold': True,\r\n 'text_wrap': True,\r\n 'valign': 'top',\r\n 'fg_color': '#D7E4BC',\r\n 'border': 1})\r\n for col_num, value in enumerate(header):\r\n worksheet.write(0, col_num + 1, value, header_format)\r\n writer.save()", "title": "" }, { "docid": "4553a70c20c9d6419d0c556130d7ce06", "score": "0.5311012", "text": "def add_to_worksheet(worksheet, data):\n print(f'Updating {worksheet} worksheet...\\n')\n worksheet_to_update = SHEET.worksheet(worksheet)\n worksheet_to_update.append_row(data)\n print(f'{worksheet} worksheet updated successfully.\\n')", "title": "" }, { "docid": "5a5444b140b762536c7b10ab8676bf1b", "score": "0.52888787", "text": "def df_to_excel(self, df, filename, **kwargs):\n if filename.startswith('/'):\n filename = filename[1:]\n if (\n 'index' not in kwargs\n ): # make the default index=False if not specified in input\n kwargs = dict({'index': False}, **kwargs)\n return df.to_excel(self.filepath(filename), **kwargs)", "title": "" }, { "docid": "469f76923658fc90a75e63e4787ed69f", "score": "0.5274583", "text": "def _save(tables, writer, sheet_name='Sheet1', n_row=1):\n for t in tables:\n label = t.columns.name\n t.to_excel(writer, sheet_name, \n startrow=n_row, index_label=label)\n n_row += len(t.index) + 2\n \n return n_row", "title": "" }, { "docid": "ef4530a1cccaab30ecd88c61ff362744", "score": "0.52736664", "text": "def new_excel(seatNo, ID, gender, first_name, last_name, seatName, filename):\r\n wb = xlwt.Workbook(encoding='utf-8')\r\n sheet1 = wb.add_sheet('Sheet1')\r\n\r\n row = 0\r\n sheet1.write(row, 0, u\"ลำดับ\".encode('utf-8'))\r\n sheet1.write(row, 1, u\"รหัสนิสิต\".encode('utf-8'))\r\n # sheet1.write(row, 2, u\"คำนำหน้า\".encode('utf-8'))\r\n sheet1.write(row, 2, u\"ชื่อ - นามสกุล\".encode('utf-8'))\r\n sheet1.write(row, 3, u\"เลขที่นั่ง\".encode('utf-8'))\r\n for row in range(0, len(ID)):\r\n sheet1.write(row+1, 0, seatNo[row])\r\n sheet1.write(row+1, 1, ID[row])\r\n sheet1.write(row+1, 2, gender[row] + first_name[row] + \" \" + last_name[row])\r\n sheet1.write(row+1, 3, seatName[row])\r\n\r\n wb.save(filename + '_new.xls')", "title": "" } ]
9410c012f0a34a67a772c66287a39474
moves item identified by key to the head of the list
[ { "docid": "82f0a5cf6411760d15775794141626bc", "score": "0.814508", "text": "def movehead(self, key):\n #remove from old position in list\n n = self.d[key]\n n.next.prev = n.prev\n n.prev.next = n.next\n #put in front position of list\n self.l.next.prev = n\n n.next = self.l.next \n n.prev = self.l\n self.l.next = n", "title": "" } ]
[ { "docid": "67072f43b5e8d065011b2c282e4bfc4b", "score": "0.67528284", "text": "def change_key(self, key, newkey):\n if key in self.list:\n self.list.remove(key)\n self.list.append(newkey)\n self.sort()\n else:\n raise KeyError('No such key')", "title": "" }, { "docid": "9c42b964b276d53263b68eafbd0d83ef", "score": "0.6639715", "text": "def enqueue(self, key):\n self.list.append(key)\n self.sort()", "title": "" }, { "docid": "def28f2e110ad711e55f7e5d2702a478", "score": "0.65499777", "text": "def insert_front(self, key):\n new_node = Node(key)\n if self.head:\n self.head.previous = new_node\n new_node.next = self.head\n self.head = new_node", "title": "" }, { "docid": "e70c3b23b8a8948d8a5a26469e922eee", "score": "0.64309484", "text": "def remove(self, key):\n ptr = self.head\n if ptr is not None:\n if ptr.data == key:\n self.head = ptr.next\n ptr = None\n return\n while ptr:\n if ptr.data == key:\n break\n prev = ptr\n ptr = ptr.next\n if not ptr:\n return \n prev.next = ptr.next\n ptr.next = None", "title": "" }, { "docid": "e107d0e308f6c3dda44367433ee108e4", "score": "0.6384707", "text": "def change_key(self, item, key):\n index = item._index\n if (self._list[index]._key == item._key and\n self._list[index]._value == item._value):\n self._list[index]._key = key\n self._repair_heap_up(index)\n self._repair_heap_down(index)", "title": "" }, { "docid": "28cd21badddfcd92da2b3a50b74ab783", "score": "0.6366439", "text": "def inc(self, key):\n cur = self.dt[key] if key in self.dt else self.head\n cur.keys.discard(key)\n\n if cur.cnt + 1 == cur.next.cnt:\n new = cur.next\n else:\n new = ListNode(key, cur.cnt + 1)\n self.insert(cur, new)\n\n self.dt[key] = new\n if not cur.keys and cur.cnt != 0:\n cur.pre.next, cur.next.pre = cur.next, cur.pre", "title": "" }, { "docid": "c9939238bec7bd86c85717335e9d235c", "score": "0.6358132", "text": "def insert(self, key):\n new_node = Node(key)\n self.size += 1\n if not self.head:\n self.head = new_node\n return\n tmp = self.head\n new_node.next = tmp\n self.head = new_node", "title": "" }, { "docid": "b78e80ff3d6236d444bf2597e791a4a9", "score": "0.6307552", "text": "def ListLeftPush(self, key, value):\n if key in self.data:\n self.data[key].insert(0, value)\n else:\n self.data[key] = [value]\n\n return Operation(success=len(self.data[key]))", "title": "" }, { "docid": "13aad791916436a1902759f16f0bc7db", "score": "0.6242076", "text": "def move_to_start(self, key):\n super().move_to_end(key, last=False)", "title": "" }, { "docid": "925c2feb33d3fe5eafc422c2b99dbb70", "score": "0.6210368", "text": "def put(self, key, value):\n # If the key already exists in the self.map dict,\n # replace the key-value pair stored in the linked list. \n # If the key does not exist, add this key-value pair to self.list. If\n # self.map is over capacity, evict the least recent used key-value.\n # Also be sure to update the LRU order in self.list.\n\n if key in self.map:\n self.list.remove(self.map[key])\n new_node = ListNode((key, value))\n self.list.prepend(new_node) # Add new node to order list\n self.map[key] = new_node # Add new node to hash table\n\n # If hash table overflows, evict the LRU key-value.\n if len(self.map) > self.capacity:\n new_node = self.head.next\n self.list.remove(new_node) # Pop node that stores key-value pair from order list\n self.map.pop(new_node[0], None) # Pop key-value pair from hash table", "title": "" }, { "docid": "aaa99bf523f03549a86de938e8130994", "score": "0.6200396", "text": "def remove(self, key: int) -> None:\n index=key%self.size\n if self.a[index] != None:\n tempHead = self.a[index]\n if tempHead.key == key:\n self.a[index] = tempHead.next\n else:\n prevNode,current_Node = tempHead, tempHead.next\n while current_Node:\n if current_Node.key == key:\n prevNode.next = current_Node.next\n break\n prevNode = prevNode.next\n current_Node = current_Node.next\n \n else:\n return", "title": "" }, { "docid": "d86678b34d4d4389087645e0eafe98cc", "score": "0.62001115", "text": "def remove(self, key):\n hashKey = self.hash(key)\n head = self.table[hashKey]\n if head is None:\n return\n last = None\n if head.key == key:\n self.table[hashKey] = head.next\n return\n while head is not None:\n if head.key == key:\n last.next = head.next\n return\n last = head\n head = head.next", "title": "" }, { "docid": "2831581e8edfd4d1c4bca93c85e9fbbf", "score": "0.6199767", "text": "def add_front(self, key, value):\r\n new_node = SLNode(key, value)\r\n new_node.next = self.head\r\n self.head = new_node\r\n self.size = self.size + 1", "title": "" }, { "docid": "ae25dc1a948d4f705378040597905831", "score": "0.6171246", "text": "def inc(self, key: str) -> None:\n if key not in self.memo:\n self.memo[key] = self.add(self.head, key)\n else:\n node = self.memo[key]\n self.memo[key] = self.add(node, key)\n node.data.remove(key)\n if not node.data:\n node.pre.next, node.next.pre = node.next, node.pre\n node.next = node.pre = None", "title": "" }, { "docid": "417b4bfe2562690f79c8105bb9418e82", "score": "0.61354256", "text": "def __setitem__(self, key, newValue):\n list.__setitem__(self, key - self.start, newValue)", "title": "" }, { "docid": "1bda2f7dd2fb7bbbe31f33b6aa308c44", "score": "0.6091285", "text": "def remove(self, key: str) -> None:\n # get the list at hash location\n list_ = self._get_list(key)\n list_.remove(key)\n self.size -= 1", "title": "" }, { "docid": "90fc6c1e9a908e1ccf0208d54a817981", "score": "0.6082191", "text": "def add_to_start(self, key, value):\n self[key] = value # adding\n self.move_to_start(key) # moving", "title": "" }, { "docid": "a2b6981f2124c7ec9219912aa17dd75d", "score": "0.60670775", "text": "def _remove_from_list(self, item, list_key):\n with self.mutable(list_key) as l:\n if item in l:\n l.remove(item)", "title": "" }, { "docid": "2caed29444929368776511c5aab179e2", "score": "0.60443157", "text": "def put(self, key: int, value: int) -> None:\n \n index = key%self.size\n if(self.a[index]==None):\n self.a[index] = ListNode(key,value)\n else:\n current_Node = self.a[index]\n #update value of head node to required key\n while True:\n if current_Node.key == key:\n current_Node.val = value\n return\n if current_Node.next == None:\n break\n current_Node = current_Node.next\n \n current_Node.next = ListNode(key,value)", "title": "" }, { "docid": "a6f08561c4c3b5ded5b765d78aaa1d64", "score": "0.6027231", "text": "def remove(self, key: int) -> None:\n data = self.data\n\n\n cell = data[self.hash(key)]\n if cell:\n if cell.key == key:\n data[self.hash(key)] = cell.next\n self.n -= 1\n return \n cur = cell\n while cur:\n if cur.next and cur.next.key == key:\n cur.next = cur.next.next\n self.n -=1\n break\n cur = cur.next\n return\n else:\n return", "title": "" }, { "docid": "3a5b48e3fc31ad6c9d4ed2c1a6c2e000", "score": "0.5985992", "text": "def _add_to_list(self, item, list_key):\n with self.mutable(list_key) as l:\n l.append(item)", "title": "" }, { "docid": "b3bfe81ec4139a9c5f4ade577090f9da", "score": "0.5975116", "text": "def enqueue(self, key):\n element = Element(key, None)\n if self.empty():\n self.head = element\n else:\n self.tail.pointer = element\n self.tail = element", "title": "" }, { "docid": "1d5f2c70f549a1674721b317ab8ae231", "score": "0.5974976", "text": "def __setitem__(self, key, value):\n self._list[key] = value\n return None", "title": "" }, { "docid": "3afc178dc71371ca54b36b45ef85a515", "score": "0.597273", "text": "def removeByKey(self, key):\n current=self.head\n\n while current != None:\n if current.data[0] == key:\n self.remove(current.data)\n return\n\n current=current.next\n \n raise KeyError(\"Value not present\")", "title": "" }, { "docid": "cc4ab1cf3b91327f02e3703c348c3583", "score": "0.5965164", "text": "def delete(self, key):\n # find hash index:\n index = self.hash_index(key)\n # if current is None, print key not found:\n if self.storage[index] is None:\n return\n else:\n # set previous and current variables:\n previous = None\n current = self.storage[index]\n # while there's more than 1 node in the linked list:\n while current.next is not None:\n # if keys match:\n if current.key == key:\n # if key is first node:\n if previous is None:\n # change pointer from first node to next:\n self.storage[index] = current.next\n else:\n # skip over node so previous points to next:\n previous.next = current.next\n # decrease count\n self.counter -= 1\n break\n else:\n # keep moving down the list looking for matching key\n previous = current\n current = current.next\n # if only 1 item in the list and keys match, replace with None\n if previous is None and current.key == key:\n self.storage[index] = None\n self.counter -= 1\n if (self.counter/self.capacity) >= .7:\n self.resize(new_capacity=self.capacity*2)\n return", "title": "" }, { "docid": "903c404450235e480f6be7df4cd3f9ba", "score": "0.5952744", "text": "def insert(self, item, key):\n\n index = len(self.nodes)\n self.nodes.append((item, key))\n\n while index > 0:\n parent = self._parent(index)\n if self.nodes[parent][1] > key:\n self.nodes[parent], self.nodes[index] = \\\n self.nodes[index], self.nodes[parent]\n index = parent", "title": "" }, { "docid": "bea12c718df9faaa5c0770e66dbcca68", "score": "0.5937095", "text": "def append(self, key):\n new_node = Node(key)\n if self.head is None:\n self.head = new_node\n else:\n current_node = self.head\n while current_node.next:\n current_node = current_node.next\n current_node.next = new_node\n new_node.previous = current_node", "title": "" }, { "docid": "6066ddf177bd4d30a509c9ad8d31cbb4", "score": "0.5899931", "text": "def remove(self, key):\n # your code here\n prev, curr = self._linear_search(key)\n if curr is None:\n value = None\n else:\n value = curr._value\n if prev is None:\n self._front = curr._next\n else:\n prev._next = curr._next\n \n return value", "title": "" }, { "docid": "c91cf7a2db64069d6f26464e2b4d59c9", "score": "0.58926606", "text": "def remove(self, key):\n temp=self.head.next\n prev=self.head\n while(temp.next!=None):\n if(temp.data==key):\n return key\n else:\n temp=temp.next\n prev=prev.next\n return None", "title": "" }, { "docid": "9c82e4e46b46627f835c277dddb4524b", "score": "0.5888624", "text": "def remove(self, key):\n if(self.head == None):\n return 'Empty LinkedList'\n if(self.head.data == key):\n self.head = self.head.next\n return 'Done'\n prev = self.head\n temp = self.head.next\n while(temp!=None):\n if(temp.data == key):\n prev.next = temp.next\n return 'Done'\n prev = temp\n temp = temp.next\n return 'Key not found'", "title": "" }, { "docid": "ee94e045035d1267c55c48caa11a57af", "score": "0.5881477", "text": "def replace_head(input_list):\n input_list[0]=42\n return input_list", "title": "" }, { "docid": "d736392ea712bd6c528f854207190d72", "score": "0.5881373", "text": "def unshift(self, value): # O(1)\n self._items.unshift(value)", "title": "" }, { "docid": "3c89bcbcd6f26a1d44016508d8785a3f", "score": "0.58781224", "text": "def put(self, key, item):\n if key is not None and item is not None:\n last = len(self.all_keys) - 1\n self.all_keys.append(key)\n self.cache_data[key] = item\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n removing = self.all_keys.pop(last)\n del self.cache_data[removing]\n print(\"DISCARD: {}\".format(removing))", "title": "" }, { "docid": "b6fc092df7daffa5a25b7434559aaac4", "score": "0.5876702", "text": "def _adjust_entries_in_queue(self,key): ## internal method \n if key in self._q:\n self._q.remove(key)\n self._q.appendleft(key)\n else:\n self._q.appendleft(key)", "title": "" }, { "docid": "d0cb599f650ca4e47ab36f944b6580f6", "score": "0.58751494", "text": "def put(self, key: int, value: int) -> None:\n hash_id=self.get_index(key)\n if self.hashmap[hash_id] == None:\n self.hashmap[hash_id]=Listnode((key,value))\n else:\n current=self.hashmap[hash_id]\n while current:\n k,v =current.key_value\n if k == key:\n current.key_value = (key,value)\n return\n if current.next == None:\n current.next = Listnode((key, value))\n return\n else:\n current=current.next\n return", "title": "" }, { "docid": "b35985da4eeeada58df01a2e82a1590d", "score": "0.58726424", "text": "def _put(self, node, key, index):\n if node is None:\n node = Node()\n if index == len(key):\n return node\n order = ord(key[index])\n node.next[order] = self._put(node.next[order], key, index+1)\n node.size += 1\n return node", "title": "" }, { "docid": "fef541a5882ce810736de371016c6a98", "score": "0.58448285", "text": "def insert_item_before_item(self, item, data):\t\t\n\t\t\n\n\t\tif item == self.head.data:\n\t\t\tvalue = Node(data)\n\t\t\tvalue.next = self.head\n\t\t\tself.head = value\n\t\t\treturn \n\n\n\t\tvalue = Node(data)\n\t\tstart = self.head\n\t\twhile start.next != None:\n\t\t\tif start.next.data == item:\n\t\t\t\tbreak\n\t\t\tstart = start.next\n\t\tif start.next == None:\n\t\t\tprint(item,\"is not in this list\")\n\t\telse:\n\t\t\tvalue.next = start.next\n\t\t\tstart.next = value", "title": "" }, { "docid": "2605d3d44c09743dcaa3de29ed8a56a2", "score": "0.5843531", "text": "def insert_before(self, key, next_node):\n new_node = Node(key)\n if next_node is None:\n print (\"Next node is None. Did nothing.\")\n return \n new_node.previous = next_node.previous\n new_node.next = next_node\n next_node.previous = new_node\n if new_node.previous:\n new_node.previous.next = new_node", "title": "" }, { "docid": "212554791e72660a5de415d5f9794218", "score": "0.58359325", "text": "def __getitem__(self, key):\n if key in self.index:\n node = self.index[key]\n node.remove()\n node.setnext(self.head)\n return self.index[key].value\n return None", "title": "" }, { "docid": "5f3fa6483355ba3fdcdd7f6759032854", "score": "0.58045965", "text": "def delete(self, key):\n current_node = self.head\n previous_node = None\n while current_node:\n if current_node.key == key:\n if previous_node:\n previous_node.next = current_node.next\n else:\n self.head = current_node.next\n if current_node.next:\n current_node.next.previous = previous_node\n return current_node\n previous_node = current_node\n current_node = current_node.next\n return current_node", "title": "" }, { "docid": "37b5f58ab69793831abb605b6c669dfc", "score": "0.5793984", "text": "def appendleft(self, key, value):\n n = node()\n n.key = key\n n.value = value\n #add n to front of list\n self.l.next.prev = n\n n.next = self.l.next \n n.prev = self.l\n self.l.next = n\n #add n to the map\n self.d[key] = n", "title": "" }, { "docid": "2de8ffc0bcbc09c85ba2e63c8e4fc71a", "score": "0.57900405", "text": "def append(self, key):\n new_node = Node(key)\n self.size += 1\n if not self.head:\n self.head = new_node\n return\n\n tmp = self.head\n while tmp.next:\n tmp = tmp.next\n tmp.next = new_node", "title": "" }, { "docid": "a33acb2f7ff213d7570e0dd58cfff488", "score": "0.57731175", "text": "def __delitem__(self, key):\n self.pop(self.pop(key))", "title": "" }, { "docid": "f959c33df369c4597f2e054a27e4db4c", "score": "0.57515085", "text": "def __delitem__(self, key: str) -> None:\n del_key = key.lower().encode(\"latin-1\")\n\n pop_indexes = []\n for idx, (item_key, item_value) in enumerate(self._list):\n if item_key == del_key:\n pop_indexes.append(idx)\n\n for idx in reversed(pop_indexes):\n del self._list[idx]", "title": "" }, { "docid": "c4f12370809f4fb84b8513ff663d9ef2", "score": "0.5717696", "text": "def put(self, key, item):\n if key is None or item is None:\n pass\n if key and item:\n self.cache_data[key] = item\n if key not in self.LRUkeys:\n self.LRUkeys.append(key)\n else:\n self.LRUkeys.append(self.LRUkeys.pop(self.LRUkeys.index(key)))\n if len(self.LRUkeys) > BaseCaching.MAX_ITEMS:\n discard = self.LRUkeys.pop(0)\n del self.cache_data[discard]\n print(\"DISCARD: {}\".format(discard))", "title": "" }, { "docid": "671b81c85b7a63288d965949c14c3db2", "score": "0.57173526", "text": "def __delitem__(self, key):\n if self.is_empty():\n raise KeyError(\"Unable to delete item: key is missing!\")\n\n if not isinstance(key, type(self.contents[0].key)):\n raise TypeError(\"Unable to delete item: key of incorrect type!\")\n\n (boolean, current, counter) = self._retrieve(key)\n if not boolean:\n raise KeyError(\"Unable to delete item: key is missing!\")\n\n # If there are multiple items with the same searchkey, removing only one of those will suffice.\n if len(current.contents[counter].node_content) > 1:\n del current.contents[counter].node_content[0] # deleting first item in node_content\n return\n\n if current._no_children():\n # The node with the given key will have to be deleted.\n del current.contents[counter]\n if current.parent is not None:\n # If the (sub)tree is an empty leaf, the fix-algorithm will be called.\n if current.is_empty():\n current._fix()\n\n # If we're dealing with an internal node, then it must swap positions with it's inorder successor.\n else:\n inord_succ = current.children[counter+1]._inorder_successor()\n # The inorder successor is in the left TreeItem, always, that is the whole essence of the inorder successor.\n current.contents[counter] = inord_succ.contents[0]\n del inord_succ.contents[0]\n if inord_succ.is_empty(): # If the inorder successor's node is empty after deleting, the fix algorithm\n inord_succ._fix() # must be called.\n return", "title": "" }, { "docid": "5dfd5ebe720dbac74c0aa9a00c39e6d0", "score": "0.57126516", "text": "def delete_by_key(self, key):\n\n current = self.head\n previous = None\n found = False\n\n while current and not found:\n if current.data == key and current == self.head:\n found = True\n self.head = current.next_node\n elif current.data == key:\n found = True\n previous.next_node = current.next_node\n else:\n previous = current\n current = current.next_node\n return current", "title": "" }, { "docid": "90ef553c3dcb9b5f43796712ced2eecd", "score": "0.5711856", "text": "def __getitem__(self, key):\n rv = self._mapping[key]\n if self._queue[-1] != key:\n self._remove(key)\n self._append(key)\n return rv", "title": "" }, { "docid": "bc0287f09a699d6397ac7f7bd5b2b726", "score": "0.5691216", "text": "def remove(self, key):\r\n i = key % self.base\r\n if self.keyList[i] != 0:\r\n # 为0时证明key根本不在,不用考虑,所以只考虑不为0时的\r\n root = self.keyList[i]\r\n # 根节点\r\n if root.key == key:\r\n # 第一个点就是目标点\r\n if root.next:\r\n # 如果存在下一个节点,将keyList直接链接到下一个点上\r\n self.keyList[i] = root.next\r\n else:\r\n # 不存在下一个节点,即这个链表只有目标点时,重置链表\r\n self.keyList[i] = 0\r\n else:\r\n while root.next:\r\n if root.next.key == key:\r\n # 找到目标点\r\n root.next = root.next.next\r\n break\r\n root = root.next", "title": "" }, { "docid": "8d19d272e00a045be7611e5c5a386495", "score": "0.56843966", "text": "def insert(self, key, value):\n index = len(self._list)\n item = PriorityQueueItem(key, value, index)\n self._list.append(item)\n self._repair_heap_up(index)", "title": "" }, { "docid": "0075ce38d7a614b18392836531ec62cd", "score": "0.5678694", "text": "def remove(self, key: int) -> None:\n \n \n hc = hash(key)\n index = self.getCompressed(hc)\n \n head = self.buckets[index]\n \n prev = None\n \n while head is not None:\n \n if head.key==key:\n \n # do \n \n if prev is None:\n self.buckets[index] = head.next\n else:\n prev.next = head.next\n self.count-=1\n \n prev = head\n head = head.next", "title": "" }, { "docid": "7d4db56317c78093849fc6b2733d8ad0", "score": "0.56628597", "text": "def __delitem__(self, key):\n if key in self.index:\n node = self.index[key]\n if node == self.tail:\n self.tail = node.prev\n if node == self.head:\n self.head = node.successor\n del self.index[key]\n self.count -= 1\n node.remove()\n self.checksize()", "title": "" }, { "docid": "8e84ff78e5e3400a820cda6a9c56a248", "score": "0.5650674", "text": "def insertBefore(list, nextItem, x):\n\n pass", "title": "" }, { "docid": "5757772e85c73176f600c66ef6a25f4b", "score": "0.5650055", "text": "def insert(self, key, id_):\n self.A.append((key, id_))\n self._siftup(len(self.A) - 1)", "title": "" }, { "docid": "8e9a83708be74bf0d2ab6e98d7324d2a", "score": "0.5638734", "text": "def __delitem__(self, key):\n n = self.d[key]\n n.next.prev = n.prev\n n.prev.next = n.next\n del self.d[key]", "title": "" }, { "docid": "0f8cbbb55b4d22fb341630e6411b952d", "score": "0.563836", "text": "def unshift(self, item):\n\n # create new node. forward to head's next. back to head\n new = Node(item, succeeding=self.head.succeeding, previous=self.head)\n self.cursor = self.head.succeeding\n self.head.succeeding.previous = new\n self.head.succeeding = new\n self._length += 1", "title": "" }, { "docid": "81fb245179f80d45d46343f7c3a226c0", "score": "0.56381696", "text": "def remove(self, key):\n index = key % self.size\n if self.hashlist[index]:\n singlenode = self.hashlist[index]\n prenode = singlenode\n if key == singlenode.key:\n self.hashlist[index] = singlenode.next\n return\n singlenode = singlenode.next\n while singlenode:\n if key == singlenode.key:\n prenode.next = singlenode.next\n return\n prenode = singlenode\n singlenode = singlenode.next\n\n\n\n # Your MyHashMap object will be instantiated and called as such:\n # obj = MyHashMap()\n # obj.put(key,value)\n # param_2 = obj.get(key)\n # obj.remove(key)", "title": "" }, { "docid": "db547f98caadaadac19f4bf19cde3137", "score": "0.5613023", "text": "def remove(self, key: int) -> None:\n i = self.index(key)\n if not self.nodes[i]:\n return\n prev = self.find(self.nodes[i], key)\n if not prev.next:\n return\n else:\n prev.next = prev.next.next\n return", "title": "" }, { "docid": "b52d99b20b19d437844f05c5171ee4b7", "score": "0.55961514", "text": "def put(self, key: str, value: object) -> None:\n # get the list at hash location\n list_ = self._get_list(key)\n\n # if key exists, replace value\n node = list_.contains(key)\n if node is not None:\n node.value = value\n return\n\n # key does not yet exist, add key/value pair\n list_.insert(key, value)\n self.size += 1", "title": "" }, { "docid": "eeb4875e92c6e5807d8908baab10b7ce", "score": "0.5570143", "text": "def __delitem__(self, k):\n h = self.head\n while h.next:\n if h.next.key == k:\n h.next = h.next.next\n self.n -= 1\n return\n h = h.next\n raise KeyError(k)", "title": "" }, { "docid": "fae8e772033bc69d12352f78d6da67e5", "score": "0.5554949", "text": "def prepend(self, item):\n self.insert(0, item)", "title": "" }, { "docid": "2ec70a40ff1f466e2e00e247152c1eec", "score": "0.5539818", "text": "def remove(self, key):\n ll = self.hashtable[self._hash_key(key)]\n if ll is None:\n raise TypeError\n temp = ll.head\n if ll.head.key is key:\n temp = ll.head\n ll.head = ll.head.next\n return temp.val\n while ll:\n if ll.head.next.key is key:\n temp = ll.head.next\n ll.head.next = ll.head.next.next\n return temp.val\n if ll.head is None:\n self.hashtable[self._hash_key(key)] = None\n raise TypeError", "title": "" }, { "docid": "f1d8f0cbb35ab4cb01b05a4edc16c2d1", "score": "0.55357224", "text": "def set(self, _key, _value, **kw):\n if kw:\n _value = _options_header_vkw(_value, kw)\n _key = _unicodify_header_value(_key)\n _value = _unicodify_header_value(_value)\n self._validate_value(_value)\n if not self._list:\n self._list.append((_key, _value))\n return\n listiter = iter(self._list)\n ikey = _key.lower()\n for idx, (old_key, _old_value) in enumerate(listiter):\n if old_key.lower() == ikey:\n # replace first occurrence\n self._list[idx] = (_key, _value)\n break\n else:\n self._list.append((_key, _value))\n return\n self._list[idx + 1 :] = [t for t in listiter if t[0].lower() != ikey]", "title": "" }, { "docid": "9e9186b44eaec30633cc183b1919bf8b", "score": "0.55349404", "text": "def remove(self, key: int) -> None:\n hash_id=self.get_index(key)\n if self.hashmap[hash_id] == None:\n return\n else:\n current=self.hashmap[hash_id]\n prev=None\n while current:\n k,v =current.key_value\n if k == key:\n if prev == None:\n self.hashmap[hash_id]=None\n return\n else:\n prev.next=current.next\n return\n else:\n prev=current\n current=current.next\n return", "title": "" }, { "docid": "cae190aa6fa0d127b86ee74d7bda300c", "score": "0.5534382", "text": "def push_key(self,key):\n return _ldns.ldns_key_list_push_key(self,key)", "title": "" }, { "docid": "09fefad60bba70775cd634e63d81cbaa", "score": "0.5530455", "text": "def insert(self, key) -> None:\n self.heap_size += 1\n self.A.append(-math.inf)\n self.increase_key(self.heap_size, key)", "title": "" }, { "docid": "a72116d3ef57213e1c0a87f0288f227f", "score": "0.5528654", "text": "def __delitem__(self, key):\n self.pop(key)", "title": "" }, { "docid": "34134b3a4d0231765fc74442fca690d8", "score": "0.55276364", "text": "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n last_key = self.stk.pop()\n print(\"DISCARD: {}\".format(last_key))\n del self.cache_data[last_key]\n self.stk.append(key)", "title": "" }, { "docid": "fffb72515f81164b3e916fba39109c16", "score": "0.5525205", "text": "def delete(self, key):\n # Your code here\n keyHash = self.hash_index(key)\n cur = self.capacity[keyHash]\n if cur == None:\n print('warning Key not found')\n # if key is head\n if cur.key == key:\n cur.value = None\n cur = cur.next\n return cur\n else: # key not head\n while cur != None:\n prev = cur\n #see if keys matchs\n cur = cur.next\n if cur.key == key:\n #set prev next to cur.next, to skip over current\n prev.next = cur.next\n #self.get_load_factor()\n return cur.value\n \n #set current to next\n \n #otherwise check the next node", "title": "" }, { "docid": "77b2d824e18ec8c8d331fec1d2372dbf", "score": "0.5521774", "text": "def put(self, key, item):\n if key is not None and item is not None:\n if key in self.cache_data:\n self.cache_data[key] = item\n self.frequency[key] += 1\n self.lfu_order.remove(key)\n else:\n if len(self.cache_data) >= self.MAX_ITEMS:\n min_value = min(self.frequency.values())\n min_keys = [k for k in self.frequency\n if self.frequency[k] == min_value]\n for i in range(len(self.lfu_order)):\n if self.lfu_order[i] in min_keys:\n break\n del self.cache_data[self.lfu_order[i]]\n del self.frequency[self.lfu_order[i]]\n print(\"DISCARD:\", self.lfu_order[i])\n self.lfu_order.pop(i)\n self.cache_data[key] = item\n self.frequency[key] = 1\n self.lfu_order.append(key)", "title": "" }, { "docid": "ee58fcbe0e18eaa8e72197820e062ab4", "score": "0.54907763", "text": "def remove(self, key):\r\n if self.head is None:\r\n return False\r\n if self.head.key == key:\r\n self.head = self.head.next\r\n self.size = self.size - 1\r\n return True\r\n cur = self.head.next\r\n prev = self.head\r\n while cur is not None:\r\n if cur.key == key:\r\n prev.next = cur.next\r\n self.size = self.size - 1\r\n return True\r\n prev = cur\r\n cur = cur.next\r\n return False", "title": "" }, { "docid": "dfc7747db410ad327397dd27616b664b", "score": "0.5484711", "text": "def __setitem__(self, key, value):\n self.insert(key, value)", "title": "" }, { "docid": "fe571cf74644ec17c60236bf08cd5eb0", "score": "0.54834", "text": "def move_object_to_top(id):", "title": "" }, { "docid": "d5aad2e411522af5ec616503c20eb4f6", "score": "0.54734606", "text": "def dec(self, key):\n cur = self.dt[key]\n cur.keys.discard(key)\n\n if cur.cnt - 1 == cur.pre.cnt:\n new = cur.pre\n else:\n new = ListNode(key, cur.cnt - 1)\n self.insert(cur.pre, new)\n\n self.dt[key] = new\n if not cur.keys and cur.cnt != 0:\n cur.pre.next, cur.next.pre = cur.next, cur.pre", "title": "" }, { "docid": "595591804ca0d10665b8784ca05caa83", "score": "0.5471652", "text": "def update(self, key):\n if key == pg.K_w or key == pg.K_UP:\n self.prev_item()\n elif key == pg.K_s or key == pg.K_DOWN:\n self.next_item()\n elif key == pg.K_RETURN or key == pg.K_f:\n self.choose_item()", "title": "" }, { "docid": "ee9c13ddd98a1504efd4b4680ad6e24e", "score": "0.54651386", "text": "def remove(self, key):\n pass", "title": "" }, { "docid": "9654c5c0f1a311aa650c8733cd058da7", "score": "0.54557216", "text": "def __setitem__(self, key, value):\n node = None\n if key in self.index:\n node = self.index[key]\n node.remove()\n node.setnext(self.head)\n self.head = node\n node.value = value\n else:\n node = Node(key, value)\n self.index[key] = node\n if not self.head:\n self.tail = node\n node.setnext(self.head)\n self.head = node\n self.count += 1\n self.checksize()", "title": "" }, { "docid": "23c87d9a2cd44b70bd900de63bb680bf", "score": "0.54512745", "text": "def __setitem__(self, key, value):\n position = self.hash(key)\n count = 0\n\n\n if self.array[position] == None:\n linklist = List()\n pair = (key,value)\n linklist.insert(count,pair)\n head = linklist\n set = (count+1,head) #(number_of_items,pointer_to_linklist)\n self.array[position] = set\n\n return\n\n elif self.array[position]!= None:\n copy = self.array[position][1].head\n\n\n while copy!= None:\n if copy.item[0] == key:\n new_tuple = (key,value)\n copy.item = new_tuple\n return\n\n copy = copy.next\n\n\n self.array[position][1].insert(self.array[position][0],(key,value))\n new_tuple = (self.array[position][0]+1,self.array[position][1])\n self.array[position] = new_tuple\n self.collisions+=1\n\n return", "title": "" }, { "docid": "7a7edf279809d8ef6e52a7a58036f5cc", "score": "0.54457074", "text": "def put(self, key, value):\n index = key % self.size\n if not self.hashlist[index]:\n singlenode = Node(key, value)\n self.hashlist[index] = singlenode\n else:\n singlenode = self.hashlist[index]\n while singlenode.next:\n if key == singlenode.key:\n singlenode.val = value\n return\n singlenode = singlenode.next\n if key == singlenode.key:\n singlenode.val = value\n return\n singlenode.next = Node(key, value)", "title": "" }, { "docid": "599848974fef85c4dab2c02796cd0810", "score": "0.54426867", "text": "def pop(self, key, default=...):\n ...", "title": "" }, { "docid": "d4df67cfe7d6eb708a76e057e499f225", "score": "0.5435662", "text": "def pop_first(self):\n\t\tif self.head == None:\n\t\t\tprint(\"list has no element\")\n\t\t\treturn \n\t\tself.head = self.head.next", "title": "" }, { "docid": "241d98ae4fcd6feb4c56255138a79366", "score": "0.54346967", "text": "def put(self, key, value):\n node = Node(key, value)\n hashKey = self.hash(key)\n head = self.table[hashKey]\n if head is None:\n self.table[hashKey] = node\n return\n last = None\n while head is not None:\n if(head.key == key):\n head.value = value\n return\n last = head\n head = head.next\n last.next = node", "title": "" }, { "docid": "5ff8de1a15420d6b599167060178660e", "score": "0.54343766", "text": "def shift_left(my_list):\r\n #my_list = []\r\n new_list = my_list[1:]\r\n new_list.insert(2,my_list[0])\r\n return new_list", "title": "" }, { "docid": "364156383e293b7a6ad9defbe164c522", "score": "0.5433465", "text": "def __setitem__(self, k, o):\n try:\n (age, item) = self.items[k]\n del self.age_to_items[age]\n except KeyError:\n pass\n\n self._set_newest(k, o)\n\n if len(self.items) > self.size:\n # Maximum number of keys reached\n # Delete the last recently used key\n while self.oldest not in self.age_to_items:\n self.oldest += 1\n\n del self.items[self.age_to_items.pop(self.oldest)]", "title": "" }, { "docid": "043ec5618c9a0fbaa7f3f7afd453abc4", "score": "0.5432182", "text": "def put(self, key, item):\n if key and item:\n self.cache_data[key] = item\n if len(self.cache_data.keys()) <= self.MAX_ITEMS:\n pass\n else:\n del self.cache_data[self.last_key]\n print(f'DISCARD: {self.last_key}')\n self.last_key = key", "title": "" }, { "docid": "8b359e6f0b1a108ca333555d889a2d6a", "score": "0.54296947", "text": "def sort(self, key):\r\n self.list_places = sorted(self.list_places, key=attrgetter(key, \"name\"))", "title": "" }, { "docid": "9608066f928e0c91cee07292de5c4a3e", "score": "0.5416643", "text": "def insert(self, key, value):\n\n if key in self.hash_map:\n node = self.hash_map[key]\n node.value = value\n\n if self.head != node:\n self.remove(node)\n self.set_head(node)\n else:\n new_node = QNode(key, value)\n if self.current_size == self.capacity:\n del self.hash_map[self.end.key]\n self.remove(self.end)\n self.set_head(new_node)\n self.hash_map[key] = new_node", "title": "" }, { "docid": "c60e2f0c850e3160267b515d7da1a8d3", "score": "0.5410061", "text": "def insert_item_after_item(self, item, data):\n\t\tvalue = Node(data)\n\t\tstart = self.head\n\t\twhile start != None:\n\t\t\tif start.data == item:\n\t\t\t\tbreak\n\t\t\tstart = start.next\n\n\t\tif start == None:\n\t\t\tprint(item, \"is not in this list\")\n\t\telse:\n\t\t\tvalue.next = start.next\n\t\t\tstart.next = value\n\t\t\treturn \t\t\n\t\t\t# value.next = start.next\n\t\t\t# start.next = value", "title": "" }, { "docid": "7afcf72b7ab794358df4970f0a00bb13", "score": "0.54066145", "text": "def __setitem__(self, key, item):", "title": "" }, { "docid": "80b15888d56a9f2149c5db5761bac2f1", "score": "0.5404773", "text": "def put(self, key, item):\n if key and item:\n if key in self.cache_data:\n self.queue.remove(key)\n elif len(self.cache_data) >= self.MAX_ITEMS:\n popped = self.queue.popleft()\n del self.cache_data[popped]\n print(\"DISCARD: \" + str(popped))\n self.queue.append(key)\n self.cache_data[key] = item", "title": "" }, { "docid": "1929c5bf39edcdee1df2bce4a33538dc", "score": "0.54008895", "text": "def put(self, key, item):\n if key is None or item is None:\n return\n keys_list = self.cache_data.keys()\n if len(keys_list) == super().MAX_ITEMS:\n if key in keys_list:\n del self.cache_data[key]\n else:\n last_key = list(keys_list)[-1]\n print(f'DISCARD: {last_key}')\n del self.cache_data[last_key]\n self.cache_data[key] = item", "title": "" }, { "docid": "d8b345acdf0276910bb90522843c8f12", "score": "0.5395307", "text": "def put(self, key, value):\n index = self._get_index(key)\n head = self._bucket_list[index]\n while head is not None:\n if head.key == key:\n head.value = value\n return\n head = head.next\n self._size += 1\n head = self._bucket_list[index]\n new_node = Node(key, value)\n new_node.next = head\n self._bucket_list[index] = new_node\n if self._should_expand():\n self.expand()", "title": "" }, { "docid": "394fc4ceb62cf37962fd73a39d6d4ca4", "score": "0.5394903", "text": "def put(self, key: int, value: int) -> None:\n index = key % self.MOD\n values = self.ht[index]\n for i, v in enumerate(values):\n if key == v[0]:\n values[i] = (key, value)\n break\n else:\n values.append((key, value))", "title": "" }, { "docid": "e4b0f25ed98a8abe8e2fd4820d7a0c0d", "score": "0.5386447", "text": "def remove(self, item):\t\t\n\t\tif self.head == None:\n\t\t\tprint(\"No item in this list\")\n\t\t\treturn \n\n\t\tif self.head.data == item:\n\t\t\tself.head = self.head.next\n\t\t\treturn\n\n\t\tstart = self.head\n\t\twhile start.next != None:\n\t\t\tif start.next.data == item:\n\t\t\t\tbreak \n\t\t\tstart = start.next\n\t\tif start.next == None:\n\t\t\tprint(item, \"is not present in this list\")\n\t\telse:\n\t\t\tstart.next = start.next.next", "title": "" }, { "docid": "7195ccec227b70b353680f434f97cca2", "score": "0.5383614", "text": "def remove(self, key):\n raise NotImplementedError('Method from abstract implementation')", "title": "" }, { "docid": "499e1eb9e2406f36ffd4f8e0c2dcb05e", "score": "0.5374034", "text": "def add(self, key, value):\n index = self.hash(key)\n if self.array[index] is not None:\n for value in self.array[index]:\n if value[0] == key:\n value[1] = value\n break\n else:\n self.array[index].append([key, value])\n\n else:\n self.array[index] = []\n self.array[index].append([key, value])", "title": "" }, { "docid": "ab4f717a412c8ad285e740fd12d734d6", "score": "0.5363762", "text": "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._upheap(len(self._data) - 1) # upheap newly added position", "title": "" }, { "docid": "abdd4cd5417a265fea0cf40a019685fc", "score": "0.53632635", "text": "def set(self, key, value):\n # If key is in map, update the value and move node to front of queue\n evict = None\n if key in self.map:\n # Update value of key\n if isinstance(self.map[key][0], set):\n self.map[key][0].add(value)\n else:\n self.map[key][0] = value\n node = self.map[key][1]\n # Update Queue using the following steps:\n # Update tail if current node is tail and there is more than one element in cache\n if self._tail == node and node.prev != self._dummy_head:\n self._tail = node.prev\n # Extract node from current position in Queue\n node.prev.next = node.next\n if node.next:\n node.next.prev = node.prev\n # Modify node's prev and next references\n node.prev, node.next = self._dummy_head, self._dummy_head.next\n # Transplant node into beginning of Queue\n if self._dummy_head.next is not None:\n self._dummy_head.next.prev = node\n self._dummy_head.next = node\n return None\n elif self._size == self._capacity:\n # Evict LRU element using key of LRU\n lru = self._tail\n evict = (lru.data, self.map[lru.data][0])\n self.map.pop(lru.data)\n self._tail = lru.prev if lru.prev != self._dummy_head else None\n lru.prev.next = None\n self._size -= 1\n\n node = ListNode(key, self._dummy_head, self._dummy_head.next)\n if self._dummy_head.next is not None:\n self._dummy_head.next.prev = node\n self._dummy_head.next = node\n if self._tail is None:\n self._tail = node\n self.map[key] = [value, node]\n self._size += 1\n return evict", "title": "" }, { "docid": "6e59409f484f43b4df5428f8fe8a2bc2", "score": "0.53602254", "text": "def __setitem__(self, key, lumi):\n try:\n oldLumi = sorted(self.eventsPerLumi.keys())[key] # Extract the lumi from the sorted list\n del self.eventsPerLumi[oldLumi] # Delete it and add the new one\n except IndexError:\n pass\n self.appendLumi(lumi)", "title": "" }, { "docid": "80277a93cec72990c66cf62aa4d8826e", "score": "0.5342852", "text": "def remove_key(self, key):\n for index, element in enumerate(self.heap): # pragma: no cover\n if element[0] == key:\n last_element = self.heap.pop()\n self.heap[index] = last_element\n self._build_push_down_heapify(index)\n return self.heap\n return self.heap", "title": "" } ]
fe32bdc2e08352e3b214f2ef83cc9a05
Return vector of distances (from vector a to each vector in matrix B).
[ { "docid": "f90a260f3c1a7dec66483c160d36697c", "score": "0.74742806", "text": "def distance(self, a, B):\n return np.linalg.norm(a-B, axis=1, ord=2)", "title": "" } ]
[ { "docid": "b999fcf1356d308e2fae0525e6491457", "score": "0.6998436", "text": "def calculate_distance_list(rA, rB):\n squared_sum = 0\n for dim in range(len(rA)):\n squared_sum += (rA[dim] - rB[dim])**2\n \n distance = np.sqrt(squared_sum)\n return distance", "title": "" }, { "docid": "751f44a0928531d323338f05df29c154", "score": "0.698104", "text": "def euclidean_distance(a, b):\n n = a.shape[0]\n m = b.shape[0]\n result = np.empty((n, m), dtype='float32')\n\n for i in range(n):\n result[i] = np.linalg.norm(b - a[i], axis=1)\n\n return result", "title": "" }, { "docid": "b18ecd513476f97773edb60cb8236b67", "score": "0.69747764", "text": "def compute_distance(A, B):\n if len(A) != len(B) or len(A[1]) != len(B[1]):\n raise IndexError\n n = len(A)\n m = len(A[1])\n distance = 0\n for i in range(n):\n for j in range(m):\n distance += pow((A[i][j] - B[i][j]), 2)\n return distance", "title": "" }, { "docid": "ac14e43c04de1d8c5c30e2872ea3d249", "score": "0.6846432", "text": "def calc_distance(A, B):\n dist = np.linalg.norm(B - A)\n return dist", "title": "" }, { "docid": "f0330e4b708a55ecd098fa2a52c9bf7a", "score": "0.67587334", "text": "def distance(a: np.ndarray, b: np.ndarray) -> float:\n return np.linalg.norm( np.subtract(a , b))", "title": "" }, { "docid": "f0025bc9ebf2972bfb1935c5693e3a85", "score": "0.66584617", "text": "def calculate_distance(rA, rB):\n dist_vec = (rA-rB)\n distance = np.linalg.norm(dist_vec)\n return distance", "title": "" }, { "docid": "2618c4156e371b7e28fe193adc676348", "score": "0.66153526", "text": "def distance(a, b):\n return norme_vecteur(difference_vecteur(a, b))", "title": "" }, { "docid": "7e3bbb893ed8f22acfbc50231c647003", "score": "0.64958817", "text": "def getDistancia(A, B):\n return math.sqrt(((A[0] - B[0]) ** 2) + ((A[1] - B[1]) ** 2))", "title": "" }, { "docid": "7a07b9e88e6e9da8d2ebb6f4384d988e", "score": "0.64586884", "text": "def pairwise_euclidean_distances(A, B):\n with tf.variable_scope('pairwise_euclidean_dist'):\n # squared norms of each row in A and B\n na = tf.reduce_sum(tf.square(A), 2)\n nb = tf.reduce_sum(tf.square(B), 2)\n\n # na as a row and nb as a co\"lumn vectors\n na = tf.expand_dims(na, 2)\n nb = tf.expand_dims(nb, 1)\n\n # return pairwise euclidead difference matrix\n D = tf.reduce_mean(tf.reduce_sum(tf.reduce_sum(tf.sqrt(tf.maximum(na - 2 * tf.matmul(A, B, False, True) + nb, 0.0)),axis=1), axis=1))\n return D", "title": "" }, { "docid": "df2d115ce1582708c6347ddd738dcc71", "score": "0.6414005", "text": "def dist(iter_a, iter_b):\r\n return math.sqrt(sum((a - b)**2 for a, b in zip_longest(iter_a, iter_b,\r\n fillvalue=0)))", "title": "" }, { "docid": "7efe188629f9a2ec89b43b037780bcf2", "score": "0.63694096", "text": "def distance_nodes(self,a,b):\n distance = np.linalg.norm(np.array(a) - np.array(b))\n return distance", "title": "" }, { "docid": "5829c82155685e0af0a00ff73385bbbe", "score": "0.63446283", "text": "def distance(a, b):\n sum = 0\n if len(a) == len(b):\n for i, a_i in enumerate(a):\n diff = a_i - b[i]\n sum += diff * diff\n return sum\n return 0", "title": "" }, { "docid": "f77dc008fa17d5c206d9cfad7efc46af", "score": "0.6327542", "text": "def distance(a, b):\n [a_tonnetz, b_tonnetz] = [_to_tonnetz(x) for x in [a, b]]\n return np.linalg.norm(b_tonnetz - a_tonnetz)", "title": "" }, { "docid": "56cd42a42894b7a6242ead174cd4f7e2", "score": "0.6306227", "text": "def dist_euclidean(A, B):\n return np.linalg.norm(A - B)", "title": "" }, { "docid": "47260e8564f54d4254d1761b8464048f", "score": "0.629432", "text": "def distance(a, b):\n\n return ((a[0] - b[0])**2 + (a[1] - b[1])**2)**0.5", "title": "" }, { "docid": "2895144164bbbc9d90c7f6f2dabbec86", "score": "0.62916136", "text": "def distances(a, b):\n\n # a[i] and b[j]\n # set up an empty matrix output[i][j] with extra row on top and column to the left\n output = [[(0, None) for x in range(len(b) + 1)] for y in range(len(a) + 1)]\n\n # fill in the top row\n for i in range(len(a)):\n output[i + 1][0] = (i + 1, Operation.DELETED)\n\n # fill in the first column\n for j in range(len(b)):\n output[0][j + 1] = (j + 1, Operation.INSERTED)\n\n # navigate through the output matrix\n for i in range(len(a)):\n for j in range(len(b)):\n # calculate costs for deletion, insertion and substitution\n deletion = (output[i][j + 1][0] + 1, Operation.DELETED)\n insertion = (output[i + 1][j][0] + 1, Operation.INSERTED)\n substitution = (output[i][j][0] + (0 if a[i] == b[j] else 1), Operation.SUBSTITUTED)\n\n # choose based on lowest cost\n if insertion[0] <= deletion[0] and insertion[0] <= substitution[0]:\n output[i + 1][j + 1] = insertion\n elif deletion[0] <= substitution[0]:\n output[i + 1][j + 1] = deletion\n else:\n output[i + 1][j + 1] = substitution\n\n return output", "title": "" }, { "docid": "accf79f739fe5010b07688cc42271b5c", "score": "0.62498426", "text": "def dist(A, B) :\n n = len(A.data)\n Block.comparisons += 1\n\n D = A.data - B.data\n return np.sum(D**2) # // n**2 - (np.sum(D)**2 // n**2) )", "title": "" }, { "docid": "ae4d608acb619b54c86e3bc69c1b8662", "score": "0.6209211", "text": "def dist(self, a, b):\n return math.sqrt(sum([(a[i] - b[i])**2 for i in range(3)]))", "title": "" }, { "docid": "4d2079160dc5af00b696c21816bff921", "score": "0.620893", "text": "def _distance(self, a, b):\n _sum = 0\n if (not a) or (not b):\n return 1\n for d in xrange(self._dim):\n difference_sq = (a.flist[d].iweight - b.flist[d].iweight) ** 2\n _sum += difference_sq\n return sqrt(_sum)", "title": "" }, { "docid": "c70c2c3ee1f8a40a8befb76dd55d00a6", "score": "0.61898935", "text": "def calculate_distances(train: np.ndarray, test: np.ndarray) -> np.ndarray:\n vector_diff = test[:, np.newaxis] - train\n distances = np.linalg.norm(vector_diff, ord=2, axis=2)\n return distances", "title": "" }, { "docid": "ce9ae8d751122823a0aaf82b364dbe44", "score": "0.6180827", "text": "def get_dist(self, a, b):\n if self.cl is True:\n return math.sqrt(((a - b).T * (a - b)).todense())\n else:\n return (a.T * b).todense()", "title": "" }, { "docid": "965ef0ef43bfb1102cac1fbb1ee8e34d", "score": "0.6180034", "text": "def calculate_distance(rA, rB):\n \n d = rA - rB\n dist = np.linalg.norm(d)\n return dist", "title": "" }, { "docid": "32efcfa42bf8202e1b840133810cb6b7", "score": "0.6155911", "text": "def get_distances(list_of_locs:np.ndarray, point:np.ndarray) -> np.ndarray:\n return np.linalg.norm(list_of_locs - point, ord=2, axis=1)", "title": "" }, { "docid": "e6eea85ad83cad598746328c9d6da344", "score": "0.614864", "text": "def distance(a, b):\n\n vector = vector_by(a,b)\n _distance = absolute(vector)\n return _distance", "title": "" }, { "docid": "99998ea6ae0931638b6c38ea19806064", "score": "0.6130537", "text": "def calculateDistance(vector1,vector2):\n calc_sum = 0.0\n pairs = zip(vector1,vector2)\n for pair in pairs:\n calc_sum = calc_sum + ( pair[0] - pair[1] ) ** 2\n return calc_sum ** 0.5", "title": "" }, { "docid": "906a6c58cc13f19b83b5a3d5be3b494c", "score": "0.61228305", "text": "def dist(a, b):\n if a.shape != b.shape:\n raise ValueError\n return np.sqrt(np.sum(np.square(a - b)))", "title": "" }, { "docid": "888d41beb653cb261780928282362fdc", "score": "0.6085567", "text": "def dist(positions):\n return array([abs(array(positions) - i) for i in positions])", "title": "" }, { "docid": "ecb450d6b1f4472a89a7fa98372d2ee3", "score": "0.60848695", "text": "def compute_distances_two_loops(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in xrange(num_test):\n for j in xrange(num_train):\n dists[i, j] = np.linalg.norm(X[i] - self.X_train[j])\n return dists", "title": "" }, { "docid": "b9c1cb7d2fd198e60f058d11d6c4815c", "score": "0.60709256", "text": "def distance(a, b):\n n, m = len(a), len(b)\n if n > m:\n # Make sure n <= m, to use O(min(n, m)) space\n a, b = b, a\n n, m = m, n\n\n current_row = range(n + 1) # Keep current and previous row, not entire matrix\n for i in range(1, m + 1):\n previous_row, current_row = current_row, [i] + [0] * n\n for j in range(1, n + 1):\n add, delete, change = previous_row[j] + 1, current_row[j - 1] + 1, previous_row[j - 1]\n if a[j - 1] != b[i - 1]:\n change += 1\n current_row[j] = min(add, delete, change)\n\n return current_row[n]", "title": "" }, { "docid": "69aae99e54acd3cdda662bcf5f280e50", "score": "0.6053983", "text": "def euclidean_distance(a, b):\n return(sqrt(sum([(a_i - b_i)**2 for a_i, b_i in zip(a, b)])))", "title": "" }, { "docid": "27264f67409e25c2f8f51ec3bbbbcc57", "score": "0.6053733", "text": "def compute_euclidean_distance(vector_a, vector_b):\r\n return np.linalg.norm(vector_a - vector_b)", "title": "" }, { "docid": "c07d097f6feac5a0e1b04450c71a13f5", "score": "0.6016696", "text": "def euclidean_distances(X, Y):\r\n\r\n M = len(X)\r\n if Y.ndim == 1:\r\n N = 1\r\n D = np.zeros((M,N))\r\n\r\n for m in range(0,M):\r\n D[m] = np.linalg.norm(X[m] - Y)\r\n else:\r\n N = len(Y)\r\n D = np.zeros((M,N))\r\n\r\n for m in range(0,M):\r\n for n in range(0,N):\r\n D[m,n] = np.linalg.norm(X[m] - Y[n])\r\n\r\n return D", "title": "" }, { "docid": "c527171be66fb083db6499383a90aac4", "score": "0.6016545", "text": "def edist(A, B):\n B_transpose = tf.transpose(B) # (DxM)\n A_norm = tf.reduce_sum(A ** 2, axis=1, keepdims=True) # (Nx1)\n B_norm = tf.reduce_sum(B_transpose ** 2, axis=0, keepdims=True) # (1xM)\n A_dot_B = tf.matmul(A, B_transpose) # (NxM)\n dist = tf.sqrt(A_norm + B_norm - 2 * A_dot_B) # (NxM)\n return dist", "title": "" }, { "docid": "308a1c36befae803a1f5a804485fcfaf", "score": "0.59987545", "text": "def distances(self):\n dists = list()\n for p0, p1 in self.edges():\n dists.append(distance(p0, p1))\n return np.array(dists)", "title": "" }, { "docid": "5555ab08faba01d4776c0c967fac8d65", "score": "0.59825456", "text": "def distance(a, b):\n return norm(subs3(a, b))", "title": "" }, { "docid": "2a9e53000725e4777a4b5b6056da83b6", "score": "0.59783965", "text": "def distance_xy(A):\n d_ = np.sqrt(np.sum((A[1:,:] - A[:-1,:])**2, axis=1))\n d = [0]\n for a in d_:\n d.append(d[-1]+a)\n return np.array(d)", "title": "" }, { "docid": "9038eda52b41d394fb5d762a7bc8377e", "score": "0.5967414", "text": "def euclidean_distances(X, Y):\n\n M = X.shape[0]\n N = Y.shape[0]\n D = np.zeros([M, N])\n\n for i in range(M):\n for j in range(N):\n D[i, j] = np.sqrt(np.dot(X[i] - Y[j], X[i] - Y[j]))\n\n return D", "title": "" }, { "docid": "d08285dff2863d78124e306b81a0a347", "score": "0.5966403", "text": "def batched_pairwise_l2_distance(A, B):\n batch = tf.shape(A)[0]\n row_norms_A = tf.math.reduce_sum(tf.square(A), axis=-1)\n row_norms_A = tf.reshape(row_norms_A, [batch, -1, 1]) # Column vector.\n\n row_norms_B = tf.math.reduce_sum(tf.square(B), axis=-1)\n row_norms_B = tf.reshape(row_norms_B, [batch, 1, -1]) # Row vector.\n\n dist = row_norms_A - 2. * tf.matmul(A, B, transpose_b=True) + row_norms_B\n return tf.math.maximum(dist, 0.)", "title": "" }, { "docid": "52d311520e7a5729489701d1e67f4421", "score": "0.596113", "text": "def matrix_subtraction(a, b):\n c = [[0 for y in range(len(a[0]))] for x in range(len(a))]\n for i in range(len(a)):\n for j in range(len(a[0])):\n c[i][j] = a[i][j] - b[i][j]\n return c", "title": "" }, { "docid": "3e70fe5dc1aba3f9f30be98680ad36db", "score": "0.5939913", "text": "def distance(first, second):\n return sqrt(sum(((first - second) ** 2).ravel()))", "title": "" }, { "docid": "7a3d4c5db520737ef5a4a7ae2976b72d", "score": "0.59383345", "text": "def pairwise_euclidean_distances( coord_array_a, coord_array_b ):\n N = len(coord_array_a)\n M = len(coord_array_b)\n assert coord_array_a.shape[-1] == coord_array_b.shape[-1]\n ndim = coord_array_a.shape[-1]\n\n distances = numpy.zeros((N,M), dtype=numpy.float32)\n for i in range(ndim):\n tmp = numpy.empty_like(distances) # force float32\n pairwise_subtracted = numpy.subtract.outer(coord_array_a[:,i], coord_array_b[:,i], out=tmp)\n squared = numpy.power(pairwise_subtracted, 2, out=pairwise_subtracted)\n distances[:] += squared\n\n numpy.sqrt(distances, out=distances)\n return distances", "title": "" }, { "docid": "6311c505ed6123bcc29a15ee5fd8364b", "score": "0.593795", "text": "def dist_numba(A, B, p):\n sum = 0\n for i in prange(A.shape[0]):\n for j in prange(A.shape[1]):\n sum += abs(A[i, j] - B[i, j]) ** p\n return sum ** (1 / p)", "title": "" }, { "docid": "2ba6900881ece7a66bb40934ce97b3ca", "score": "0.5934452", "text": "def euclidean_distance(vects):\n x, y = vects\n return K.sqrt(K.sum(K.square(x - y), axis=1, keepdims=True))", "title": "" }, { "docid": "629b9b7e1daafa6b976b49d7b58580d8", "score": "0.5930885", "text": "def distance(vector1, vector2):\n return np.linalg.norm(vector1 - vector2)", "title": "" }, { "docid": "a72e8f5f3c3ed77103dccd231a2a1e38", "score": "0.59288937", "text": "def distance_to(self, vector):\n # pythagoras in 3D\n dist = 0.0\n for i, j in zip(self, vector):\n dist += (i - j)**2\n return dist**0.5", "title": "" }, { "docid": "5b53d330753cc75ec1317fbc19b5bcc2", "score": "0.5922461", "text": "def vector_distance(v1, v2):\n return math.sqrt(sum([(x1-x2)*(x1-x2) for x1, x2 in zip(v1, v2)]))", "title": "" }, { "docid": "4f467ce24862481d3dfb85dfcb0d4283", "score": "0.5914176", "text": "def distance(self):\n costs = self.calculate_costs()\n print(cols[:self.N])\n #M1 = nx.to_numpy_matrix(self.g1)\n #M2 = nx.to_numpy_matrix(self.g2)\n \n self.Mindices = cols[:self.N]\n return np.sum(costs)", "title": "" }, { "docid": "a0a1cb4e7d0bf558c597e4c9dd07ce58", "score": "0.59123456", "text": "def pairwise_distances(a: torch.Tensor, b: torch.Tensor, p=2):\n\n if len(a.shape) != 3:\n raise ValueError(\"Invalid shape for a. Must be [m, n, d] but got\", a.shape)\n if len(b.shape) != 3:\n raise ValueError(\"Invalid shape for a. Must be [m, n, d] but got\", b.shape)\n return (a.unsqueeze(2) - b.unsqueeze(1)).abs().pow(p).sum(3)", "title": "" }, { "docid": "a0a1cb4e7d0bf558c597e4c9dd07ce58", "score": "0.59123456", "text": "def pairwise_distances(a: torch.Tensor, b: torch.Tensor, p=2):\n\n if len(a.shape) != 3:\n raise ValueError(\"Invalid shape for a. Must be [m, n, d] but got\", a.shape)\n if len(b.shape) != 3:\n raise ValueError(\"Invalid shape for a. Must be [m, n, d] but got\", b.shape)\n return (a.unsqueeze(2) - b.unsqueeze(1)).abs().pow(p).sum(3)", "title": "" }, { "docid": "efd2264c774ad977c48bf888c4045348", "score": "0.59114164", "text": "def euclidean_distance(vectors):\n # Todo: Possibly Euclidean distance is also not useful, but angular distance is needed for high-dimensional space\n\n # unpack the vectors into separate lists\n (featsA, featsB) = vectors\n # compute the sum of squared distances between the vectors\n sum_squared = K.sum(K.square(featsA - featsB), axis=1, keepdims=True)\n # return the euclidean distance between the vectors\n return K.sqrt(K.maximum(sum_squared, K.epsilon()))", "title": "" }, { "docid": "dcc26fb00e78e7891e5d656cb2ae8e73", "score": "0.59098953", "text": "def min_distances(A, B, distances):\n assert A[0].size == A[1].size\n assert B[0].size == B[1].size\n assert A[0].size > 0 and B[0].size > 0\n for i in range(A[0].size):\n distances[i] = euclidean_distance(A[0][i], A[1][i], B[0][0], B[1][0])\n for j in range(B[0].size):\n d = euclidean_distance(A[0][i], A[1][i], B[0][j], B[1][j])\n if d < distances[i]:\n distances[i] = d", "title": "" }, { "docid": "4acaf2c7d8562ca1dc82fc64c3c3a7f9", "score": "0.5896878", "text": "def distance(a, b):\n dx = abs(a[0] - b[0])\n x = min(dx, abs(A - dx))\n \n dy = abs(a[1] - b[1])\n y = min(dy, abs(B - dy))\n \n dz = abs(a[2] - b[2])\n z = min(dz, abs(C - dz))\n \n return sp.sqrt(x**2 + y**2 + z**2)", "title": "" }, { "docid": "cab4cb30e05bd265b2cf1b23235cd489", "score": "0.589407", "text": "def distM(self, x):\n\n result = np.zeros((len(x), len(x)))\n for i in range(len(x)):\n for j in range(len(x)):\n result[i,j] = euclid_distance(x[i],x[j])\n return result", "title": "" }, { "docid": "63980fa1e70d05424c29b2765c1cba0e", "score": "0.58916867", "text": "def distance(vector):\n\n return np.sqrt(np.sum(np.square(vector)))", "title": "" }, { "docid": "e9d6aba03a6409f9ab13897c9e147ec0", "score": "0.58781874", "text": "def euclidian_distance(vector_a, vector_b):\r\n\r\n return math.sqrt(sum([(vector_a - vector_b) ** 2 for vector_a, vector_b in zip(vector_a, vector_b)]))", "title": "" }, { "docid": "552c5484cbf2bd029cff8c9cfd619ab1", "score": "0.58758956", "text": "def distance(a, b):\n if a is None or b is None:\n raise Exception(\"Error: Neither a nor b can be none.\")\n elif len(a) != len(b):\n raise Exception(\"Error: a and b should have the same dimension\")\n sum = 0\n for index in range(len(a)):\n sum += (a[index] - b[index])**2\n return round(math.sqrt(sum), 2)", "title": "" }, { "docid": "291403ba8924d35391970c414c7b437e", "score": "0.58720523", "text": "def dist_cpu(A, B, p):\n sum = 0\n for i in range(A.shape[0]):\n for j in range(A.shape[1]):\n sum += abs(A[i,j] - B[i,j])**p\n return sum**(1/p)", "title": "" }, { "docid": "d6e3c5226f662a83c855cc98a27b1b9c", "score": "0.5867506", "text": "def distance_point_point(a, b):\n ab = subtract_vectors(b, a)\n return length_vector(ab)", "title": "" }, { "docid": "83ff10c9abe0bea86301139fe92228d6", "score": "0.58656734", "text": "def squared_dist(A):\r\n expanded_a = tf.expand_dims(A, 1)\r\n expanded_b = tf.expand_dims(A, 0)\r\n distances = tf.reduce_mean(tf.squared_difference(expanded_a, expanded_b), 2)\r\n return distances", "title": "" }, { "docid": "91bffd7513ad7e4e6421e4726dc240a7", "score": "0.5826428", "text": "def euclidean_distance(a, b):\n return(\n sqrt(\n sum(\n [(a_i - b_i)**2 for a_i, b_i in zip(a, b)]\n )\n )\n )", "title": "" }, { "docid": "8359c323b358d437c8b6582bbf0c61c7", "score": "0.58201265", "text": "def distance(vec1, vec2):\n return np.dot(vec1, vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2))", "title": "" }, { "docid": "7a742570f410803c6f7203f5e51b3a9c", "score": "0.58172035", "text": "def distance_matrix_l1(x, y):\n m = pairwise_difference_vector_batch(x, y)\n \n return m.abs().sum(2)", "title": "" }, { "docid": "c38aa5733c8bbc7db559fb796c8b7595", "score": "0.58076125", "text": "def pairwise_edist(A, B, keepdims=False):\n A_norm = tf.reduce_sum(A ** 2, axis=1, keepdims=keepdims)\n B_norm = tf.reduce_sum(B ** 2, axis=1, keepdims=keepdims)\n pw_A_dot_B = tf.reduce_sum(tf.multiply(A, B), axis=1, keepdims=keepdims)\n dist = tf.sqrt(A_norm + B_norm - 2 * pw_A_dot_B)\n return dist", "title": "" }, { "docid": "0192c9f75ddc5c46e9e4d54f26fe63c1", "score": "0.57975763", "text": "def eucli_distance(a, b):\n if not len(a) == len(b):\n raise ValueError(\"a and b must be of the same size\")\n return math.sqrt(np.power(a-b, 2).sum())", "title": "" }, { "docid": "9014ccfd35f9867413835af60b74b16f", "score": "0.5791496", "text": "def euclidian_distance(a, b):\r\n return np.linalg.norm(a - b, 2)", "title": "" }, { "docid": "4ab65ebff8ae1427f9c809936e2d0c1a", "score": "0.578901", "text": "def distance_tensor(self, X: torch.Tensor, Nbrs: torch.Tensor,\n box_size: Union[torch.Tensor, None], B: int, N: int,\n M: int, d: int) -> torch.Tensor:\n if box_size is not None and len(box_size) != d:\n raise ValueError(\"Length of `box_size` must be equal to `d`\")\n\n flat_neighbors = torch.reshape(Nbrs, (-1, N * M))\n neighbor_coords = torch.stack(\n [X[b, flat_neighbors[b]] for b in range(B)])\n neighbor_coords = torch.reshape(neighbor_coords, (-1, N, M, d))\n D = neighbor_coords - torch.unsqueeze(X, 2)\n if box_size is not None:\n box_size = torch.reshape(box_size, (1, 1, 1, d))\n D -= torch.round(D / box_size) * box_size\n\n return D", "title": "" }, { "docid": "e715c4f8b6d31937ea20dcbd4f94d0ef", "score": "0.5784734", "text": "def dist(vec1, vec2):\n return np.linalg.norm(vec1-vec2)", "title": "" }, { "docid": "6e7a46f3ec24a18f46a92c8d45f4e3e2", "score": "0.57786006", "text": "def get_distance(a, b):\n return abs(a[0] - b[0]) + abs(a[1] - b[1])", "title": "" }, { "docid": "80cd72af8d92189a8d76ed495f22d379", "score": "0.57643646", "text": "def distance_matrix(self, D: torch.Tensor) -> torch.Tensor:\n return torch.sqrt(torch.sum(torch.mul(D, D), 3))", "title": "" }, { "docid": "7d24374af0a8cff8f3b27c0e8ba9682a", "score": "0.57495534", "text": "def squared_dist(A):\n expanded_a = tf.expand_dims(A, 1)\n expanded_b = tf.expand_dims(A, 0)\n distances = tf.reduce_mean(tf.math.squared_difference(expanded_a, expanded_b), 2)\n return distances", "title": "" }, { "docid": "13d635838a5fe52238dc23b885f5f748", "score": "0.5749381", "text": "def calc_dist(first, second):\n\tdistance = 0\n\tfor x in range(1,len(first)):\n\t\tdistance += pow(first[x] - second[x], 2)\n\treturn math.sqrt(distance)", "title": "" }, { "docid": "0e17633d4606fe821c38c34e4c5ff465", "score": "0.57466865", "text": "def measure_distance(self, vector_1:pd.Series, vector_2:pd.Series) -> float:", "title": "" }, { "docid": "168a87b75d58bbdd383a07264a419f95", "score": "0.57386404", "text": "def cosine_distances(X, Y):\r\n M = len(X)\r\n if Y.ndim == 1:\r\n N = 1\r\n D = np.zeros((M,N))\r\n\r\n for m in range(0,M):\r\n dot = np.dot(X[m], Y)\r\n Ux = np.linalg.norm(X[m])\r\n Uy = np.linalg.norm(Y)\r\n\r\n s = dot / (Ux * Uy)\r\n\r\n D[m] = 1 - s\r\n else:\r\n N = len(Y)\r\n D = np.zeros((M,N))\r\n\r\n for m in range(0,M):\r\n for n in range(0,N):\r\n dot = np.dot(X[m], Y[n])\r\n Ux = np.linalg.norm(X[m])\r\n Uy = np.linalg.norm(Y[n])\r\n\r\n s = dot / (Ux * Uy)\r\n\r\n D[m,n] = 1 - s\r\n\r\n return D", "title": "" }, { "docid": "734d7a4f8c195018fbafb699bd846b73", "score": "0.5736716", "text": "def pairwise_siamese_dist(self, A, B):\n with tf.compat.v1.variable_scope(\"pair_siam_dist\"), tf.device(\n self.next_device()):\n xx = tf.expand_dims(A, -1)\n xx = tf.tile(xx, tf.stack([1, 1, tf.shape(B)[0]]))\n\n yy = tf.expand_dims(B, -1)\n yy = tf.tile(yy, tf.stack([1, 1, tf.shape(A)[0]]))\n yy = tf.transpose(yy, perm=[2, 1, 0])\n zz = tf.reduce_sum(tf.abs(xx - yy) * tf.exp(self.alpha), 1)\n return zz", "title": "" }, { "docid": "551d1ba8eeea8f02079ea6aca3ea1151", "score": "0.57265234", "text": "def distance_matrix_l2(x, y):\n m = pairwise_difference_vector_batch(x, y)\n \n return m.pow(2).sum(2).sqrt().squeeze()", "title": "" }, { "docid": "097f14e07636252efd3eda83fe8ca62c", "score": "0.5726431", "text": "def difference_vecteur(a, b):\n \n return [a[0] - b[0], a[1] - b[1]]", "title": "" }, { "docid": "b1e826acf3518a1b12d18567b6fb0f10", "score": "0.5721511", "text": "def beam_distances(self):\n dist = np.zeros(len(self.beams))\n for n, beam in enumerate(self.beams):\n dist[n] = beam.total_distance()\n return dist", "title": "" }, { "docid": "01355bb6f218f6b0b73ae4d767cbb581", "score": "0.57180476", "text": "def euclidean_distances(X):\n\n Y = X\n X = _transpose_on_first_two_axes(X)\n\n XX = K.expand_dims(K.sum(K.square(X), axis=1), 0)\n YY = _transpose_on_first_two_axes(XX)\n\n distance = _dot(X, Y)\n distance_shapes = K.int_shape(distance)\n distance *= -2\n distance += XX\n distance += YY\n distance = K.maximum(distance,\n K.constant(0, dtype=distance.dtype))\n distance._keras_shape = distance_shapes\n return distance", "title": "" }, { "docid": "f703c1930aae94c96b52941f46401944", "score": "0.5696638", "text": "def dist_sq(a, b):\n return sum([(xa - xb) ** 2 for xa, xb in zip(a, b)])", "title": "" }, { "docid": "29d2595d8a3f9070debcded043368517", "score": "0.56800634", "text": "def get_distances(centroid, points):\n return np.linalg.norm(points - centroid, axis=1)", "title": "" }, { "docid": "b8b6c08872e988f5f58dcc051676d898", "score": "0.56785434", "text": "def squared_euclidean_distance(a, b):\n result = numpy.float32(0.0)\n for i in range(len(a)):\n result += (a[i] - b[i])**2\n return result", "title": "" }, { "docid": "82d7114065e54fbcfec6f9548a4eaad2", "score": "0.5678065", "text": "def cosine_distances(X, Y):\n\n M = X.shape[0]\n N = Y.shape[0]\n D = np.zeros([M, N])\n\n for i in range(M):\n for j in range(N):\n n_1 = np.sqrt(np.dot(X[i], X[i]))\n n_2 = np.sqrt(np.dot(Y[j], Y[j]))\n # This yield the cosine value of the two vectors, which range from 1 to -1\n D[i, j] = np.dot(X[i], Y[j]) / (n_1 * n_2)\n\n # Larger value (closer to 1) means the two vectors have similar direction, so they\n # should have smaller distance\n return 1 - D", "title": "" }, { "docid": "4e09d3d7171f5f847a8fb073ad2caee5", "score": "0.56737864", "text": "def calc_dis(point_list1, point_list2):\r\n y = []\r\n for i in range(point_list1.__len__()):\r\n y.append(point_list2)\r\n y = np.array(y)\r\n x=[]\r\n for i in range(point_list2.__len__()):\r\n x.append(point_list1)\r\n x = np.array(x)\r\n x = x.transpose(1, 0, 2)\r\n distance_matrix = np.linalg.norm(np.array(x) - np.array(y), axis=2)\r\n return distance_matrix", "title": "" }, { "docid": "23d73f5120348cd47fdcda47ed685a06", "score": "0.5671712", "text": "def pairwise_l2_distance(A, B):\n row_norms_A = tf.math.reduce_sum(tf.square(A), axis=1)\n row_norms_A = tf.reshape(row_norms_A, [-1, 1]) # Column vector.\n\n row_norms_B = tf.math.reduce_sum(tf.square(B), axis=1)\n row_norms_B = tf.reshape(row_norms_B, [1, -1]) # Row vector.\n\n dist = row_norms_A - 2. * tf.matmul(A, B, transpose_b=True) + row_norms_B\n return tf.math.maximum(dist, 0.)", "title": "" }, { "docid": "3f17710c371f07fe87682760e442a93a", "score": "0.5666328", "text": "def dist(self, b):\n distance = sqrt((b.x - self.x) ** 2 + (b.y - self.y) ** 2)\n return distance", "title": "" }, { "docid": "e045377b5fe9fe603cf5c878cd888650", "score": "0.5661958", "text": "def euclidian(A,B):\n return np.linalg.norm(A-B)", "title": "" }, { "docid": "c69b1355759f7189fe1275de58b23d24", "score": "0.5659475", "text": "def euclidean_distance(vector1, vector2):\n z = 0\n for (v1, v2) in zip(vector1, vector2):\n z += (v1 - v2) ** 2\n return math.sqrt(z)", "title": "" }, { "docid": "72533805c3077426cc35e00448bb4427", "score": "0.5652093", "text": "def distance(a, b):\r\n x1, y1 = a[0], a[1]\r\n x2, y2 = b[0], b[1]\r\n return round(math.sqrt((x1-x2)**2 + (y1-y2)**2))", "title": "" }, { "docid": "169227d4de47447b759a496fd88d9c12", "score": "0.56466854", "text": "def calculate_distance(lhs, rhs):\n return numpy.linalg.norm((numpy.array(lhs) - numpy.array(rhs)))", "title": "" }, { "docid": "f160015286ab4aec62b3d1d51740807f", "score": "0.56271565", "text": "def produit_vectoriel(a, b):\n \n return a[0] * b[1] - a[1] * b[0]", "title": "" }, { "docid": "1e58157569d33d5c2b7429807c98d3b4", "score": "0.5619837", "text": "def subtractMatrices(matrixA, matrixB):\n return map(lambda i: map(lambda x, y: x - y, matrixA[i], matrixB[i]),\n xrange(len(matrixA)))", "title": "" }, { "docid": "08a2c9fa437d258abda07054c6b13c8e", "score": "0.5617198", "text": "def compute_distances_one_loop(self, X):\n num_test = X.shape[0]\n num_train = self.X_train.shape[0]\n dists = np.zeros((num_test, num_train))\n for i in xrange(num_test):\n dists[i] = np.linalg.norm(self.X_train - X[i], axis=1)\n return dists", "title": "" }, { "docid": "5acedc0b30bd5fe1cf2fffbca00cdfac", "score": "0.5617071", "text": "def dist(a, b):\n from math import sqrt\n return sqrt((a.y - b.y)**2 + (a.x - b.x)**2)", "title": "" }, { "docid": "f87baaf1bac246cedae642c3769fdca2", "score": "0.5614947", "text": "def matrix_dist(X, Y):\n return numpy.sqrt((X[:, 0] - Y[:, 0]) ** 2 + (X[:, 1] - Y[:, 1]) ** 2)", "title": "" }, { "docid": "118d6a4851403b31e82ca97d3618e4a0", "score": "0.56115663", "text": "def dist(a, b):\n dx = a.x - b.x\n dy = a.y - b.y\n return math.sqrt(dx * dx + dy * dy)", "title": "" }, { "docid": "1a24f020b9ccb54ff82918465e039ec3", "score": "0.56030965", "text": "def distance(a,b):\n n, m = len(a), len(b)\n if n > m:\n # Make sure n <= m, to use O(min(n,m)) space\n a,b = b,a\n n,m = m,n\n current = range(n+1)\n for i in range(1,m+1):\n previous, current = current, [i]+[0]*n\n for j in range(1,n+1):\n add, delete = previous[j]+1, current[j-1]+1\n change = previous[j-1]\n if a[j-1] != b[i-1]:\n change = change + 1\n current[j] = min(add, delete, change)\n return current[n]", "title": "" }, { "docid": "3e121742de1a96883b303cc1643f5a50", "score": "0.5577346", "text": "def distance_matrix(self) -> np.ndarray:\n # TODO check handling of unknown distances\n G = self.directed\n selected_edges = [(u, v) for u, v, d in G.edges(data=True) if DIST in d]\n return (\n nx.to_numpy_array(\n nx.to_undirected(G.edge_subgraph(selected_edges)), weight=DIST\n )\n ** 2\n )", "title": "" }, { "docid": "3f27ad03f8c3aafa703ef861cb3d073d", "score": "0.55584216", "text": "def _distance(feature0, feature1):\n return np.power(feature0 - feature1, 2).sum()", "title": "" }, { "docid": "664d949332985e1f628edeacfdf679d0", "score": "0.5557304", "text": "def distance_point_point_sqrd(a, b):\n ab = subtract_vectors(b, a)\n return length_vector_sqrd(ab)", "title": "" }, { "docid": "2e82a00de70d0ce824591f74a24cd60b", "score": "0.5555838", "text": "def eucledian_norm(vec_a, vec_b):\n distance = vec_a - vec_b\n # return np.sqrt(np.sum(np.square(distance)))\n # return np.sqrt(np.einsum('ij, ij->i', distance, distance))\n return np.linalg.norm(distance, ord = 'fro', axis = 1)", "title": "" } ]
e3681f8e80d992b6a48d659329f3e97f
@ param query_embedds = (n, d) @ param target_embedds = (n, d) @ param img_ids = (n,)
[ { "docid": "5aac2fbdaeacbcc9c51d58942cf4ec1f", "score": "0.6189335", "text": "def ranking(query_embedds, target_embedds, img_ids):\n\n cos_sim = torch.mm(query_embedds,target_embedds.T)/ \\\n torch.mm(query_embedds.norm(2, dim=1, keepdim=True),\n target_embedds.norm(2, dim=1, keepdim=True).T)\n _, idx = torch.topk(cos_sim, len(query_embedds)//100, dim=1)\n top20 = idx.cpu().numpy()\n img_ids = np.array(img_ids)\n count = 0\n with open('answer.csv', 'w') as f:\n f.write(\"Descritpion_ID,Top_20_Image_IDs\\n\")\n for i, img_id in enumerate(img_ids):\n top_imgs = img_ids[top20[i]]\n top_imgs_str = \" \".join(list(top_imgs))\n text_id = img_id.split(\".\")[0]+\".txt\"\n f.write(text_id+\",\"+top_imgs_str+\"\\n\")\n if img_id in list(top_imgs):\n count+=1\n print(\"count\", count)", "title": "" } ]
[ { "docid": "2a0d8c3f6444f00756c4da15a4161f82", "score": "0.6125822", "text": "def get_images(k):\n\n keys = np.linspace(0, 99, 50)\n values = np.random.randn(50, 50)\n image_embeddings = dict(zip(keys, values))\n\n # with open(\"image_embeddings.pickle\", \"rb\") as f:\n # image_embeddings = pickle.load(f)\n\n with open(\"url.pickle\", \"rb\") as urls:\n url = pickle.load(urls)\n\n # GETTING THE SIMILAR IDS\n caption_embed = get_query.query() # embedded text\n caption_embed = fu.normalize(caption_embed)\n ids, embeddings = (shit.split(image_embeddings))\n for embed in embeddings:\n embed.reshape((50, 1))\n\n embed_values = []\n for e in embeddings:\n embed_values.append((caption_embed @ e).data)\n embed_values = np.array(embed_values)\n\n max_values = np.argsort(embed_values, axis=0)[:k].flatten()\n similar_image_ids = []\n\n for m in max_values:\n similar_image_ids.append(ids[m])\n # END ID ACQUISITION\n\n image_links = []\n for img_id in similar_image_ids:\n image_links.append(url[img_id])\n\n for i in image_links:\n response = requests.get(i)\n img = Image.open(BytesIO(response.content))\n\n return similar_image_ids", "title": "" }, { "docid": "e22e73abe8ddacf0792bf8c06e485f86", "score": "0.5829802", "text": "def query(self, images):\n if self.buffer_size == 0:\n return images\n return_images = []\n for image in images:\n image = torch.unsqueeze(image.data, 0)\n if self.img_num < self.buffer_size:\n self.img_num = self.img_num + 1\n self.image_buffer.append(image)\n return_images.append(image)\n else:\n use_buffer = np.random.random() < self.buffer_ratio\n if use_buffer:\n random_id = np.random.randint(0, self.buffer_size)\n image_tmp = self.image_buffer[random_id].clone()\n self.image_buffer[random_id] = image\n return_images.append(image_tmp)\n else:\n return_images.append(image)\n return_images = torch.cat(return_images, 0)\n return return_images", "title": "" }, { "docid": "4c692ea5aa1ea036507e41f5991b7013", "score": "0.5704483", "text": "def query(self, images):\n if self.pool_size == 0:\n return images\n return_images = []\n for image in images:\n image = torch.unsqueeze(image.data, 0)\n if self.num_imgs < self.pool_size:\n self.num_imgs = self.num_imgs + 1\n self.images.append(image)\n return_images.append(image)\n else:\n p = random.uniform(0, 1)\n if p > 0.5:\n random_id = random.randint(0, self.pool_size - 1)\n tmp = self.images[random_id].clone()\n self.images[random_id] = image\n return_images.append(tmp)\n else:\n return_images.append(image)\n return_images = torch.cat(return_images, 0)\n return return_images", "title": "" }, { "docid": "4305676d81fddbf49b61c17aaa65a782", "score": "0.56069136", "text": "def handle_flikr_embed(session,post_dict):\n logging.debug(\"Processing flikr embed\")\n # Extract image link from post dict\n thumbnail_link = post_dict[\"thumbnail_url\"]\n # Download videos if there are any\n media_id_list = download_image_link(session,thumbnail_link)\n logging.debug(\"Finished downloading flikr embed\")\n return media_id_list", "title": "" }, { "docid": "2a8d15e4f2fa1496522854b4f6d7adfe", "score": "0.5513861", "text": "def pack_query_data(people_imgs):\n uids = list(range(-len(people_imgs), 0))\n pids = list(range(-len(people_imgs), 0))\n imgs = None\n\n for person_img in people_imgs:\n pil_img = Image.fromarray(cv2.cvtColor(person_img, cv2.COLOR_BGR2RGB))\n img = TimingRecorder.transformer(pil_img)\n img = img.view(1, 3, 256, 128)\n if imgs is None:\n imgs = img\n else:\n imgs = torch.cat((imgs, img), dim=0)\n\n return uids, pids, imgs", "title": "" }, { "docid": "26426e7880382deaafb6dc63fcbf4ba3", "score": "0.55134726", "text": "def _get_multiple_evidences_predictions_normal(self, query_ids: torch.Tensor,\n query_contents: torch.Tensor,\n query_lens: np.ndarray,\n query_sources: torch.Tensor,\n evd_doc_ids: torch.Tensor,\n evd_doc_contents: torch.Tensor,\n evd_docs_lens: np.ndarray,\n evd_sources: torch.Tensor,\n labels: np.ndarray,\n n: int, **kargs) -> torch.Tensor:\n evd_count_per_query = kargs[KeyWordSettings.EvidenceCountPerQuery] # (B, )\n assert evd_doc_ids.size() == evd_docs_lens.shape\n assert query_ids.size(0) == evd_doc_ids.size(0)\n assert query_lens.shape == labels.size()\n assert query_contents.size(0) == evd_doc_contents.size(0) # = batch_size\n _, L = query_contents.size()\n batch_size = query_ids.size(0)\n # prunning at this step to remove padding\\\n e_lens, e_conts, q_conts, q_lens = [], [], [], []\n expaned_labels = []\n for evd_cnt, q_cont, q_len, evd_lens, evd_doc_cont, label in \\\n zip(evd_count_per_query, query_contents, query_lens, evd_docs_lens, evd_doc_contents, labels):\n evd_cnt = int(torch_utils.cpu(evd_cnt).detach().numpy())\n e_lens.extend(list(evd_lens[:evd_cnt]))\n e_conts.append(evd_doc_cont[:evd_cnt, :]) # stacking later\n q_lens.extend([q_len] * evd_cnt)\n q_conts.append(q_cont.unsqueeze(0).expand(evd_cnt, L))\n expaned_labels.extend([int(torch_utils.cpu(label).detach().numpy())] * evd_cnt)\n # concat\n e_conts = torch.cat(e_conts, dim = 0) # (n1 + n2 + ..., R)\n e_lens = np.array(e_lens) # (n1 + n2 + ..., )\n q_conts = torch.cat(q_conts, dim = 0) # (n1 + n2 + ..., R)\n q_lens = np.array(q_lens)\n assert q_conts.size(0) == q_lens.shape[0] == e_conts.size(0) == e_lens.shape[0]\n\n d_new_indices, d_old_indices = torch_utils.get_sorted_index_and_reverse_index(e_lens)\n e_lens = my_utils.gpu(torch.from_numpy(e_lens), self._use_cuda)\n q_new_indices, q_restoring_indices = torch_utils.get_sorted_index_and_reverse_index(query_lens)\n query_lens = my_utils.gpu(torch.from_numpy(query_lens), self._use_cuda)\n # query_lens = my_utils.gpu(torch.from_numpy(query_lens), self._use_cuda)\n additional_paramters = {\n KeyWordSettings.Query_lens: query_lens,\n KeyWordSettings.Doc_lens: evd_docs_lens,\n KeyWordSettings.DocLensIndices: (d_new_indices, d_old_indices, e_lens),\n KeyWordSettings.QueryLensIndices: (q_new_indices, q_restoring_indices, query_lens),\n KeyWordSettings.QuerySources: query_sources,\n KeyWordSettings.DocSources: evd_sources,\n KeyWordSettings.TempLabel: labels,\n KeyWordSettings.DocContentNoPaddingEvidence: e_conts,\n KeyWordSettings.QueryContentNoPaddingEvidence: q_conts,\n KeyWordSettings.EvidenceCountPerQuery: evd_count_per_query,\n KeyWordSettings.FIXED_NUM_EVIDENCES: n\n }\n predictions = self._net(query_contents, evd_doc_contents, **additional_paramters) # (B, )\n # labels.unsqueeze(-1).expand(batch_size, n).reshape(batch_size * n)\n # labels = torch_utils.gpu(torch.from_numpy(np.array(expaned_labels)), self._use_cuda)\n # print(\"Labels: \", labels)\n # mask = (evd_doc_ids >= 0).view(batch_size * n).float()\n return self._loss_func(predictions, labels.float())", "title": "" }, { "docid": "f6844efe7ac0d2f70fb8945117d175ff", "score": "0.54689616", "text": "def get_image_list(releaseid):\n ...", "title": "" }, { "docid": "c36dbba6bc9e1aeb9366f525170254c5", "score": "0.5445434", "text": "def query(self, images):\n if self.pool_size == 0:\n return images\n return_images = []\n for image in images:\n if self.num_imgs < self.pool_size:\n self.num_imgs += 1\n self.images.append(image)\n return_images.append(image)\n else:\n p = np.random.rand()\n if p > 0.5:\n random_id = np.random.randint(0, self.pool_size)\n tmp = self.images[random_id].copy()\n self.images[random_id] = image\n return_images.append(tmp)\n else:\n return_images.append(image)\n return np.array(return_images)", "title": "" }, { "docid": "c16af1b378e0e96b52c0f5b88773092e", "score": "0.54368937", "text": "def selectRows(sentEmbeds, idxs):\n results = []\n for idx in idxs:\n if idx == -1:\n results.append(torch.zeros((1, 2 * configuration['kiperwasser']['rnnUnitNum']), dtype=dtype).to(device))\n else:\n results.append(sentEmbeds[idx].view(1, -1).to(device))\n return torch.cat(results)", "title": "" }, { "docid": "6fbf994d8832957b36ce0c53c704befc", "score": "0.5318698", "text": "def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]):\n quesIds = quesIds if type(quesIds) == list else [quesIds]\n quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]\n ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]\n\n if len(quesIds) == len(quesTypes) == len(ansTypes) == 0:\n anns = self.dataset['annotations']\n else:\n if not len(quesIds) == 0:\n anns = sum([self.qa[quesId] for quesId in quesIds if quesId in self.qa], [])\n else:\n anns = self.dataset['annotations']\n anns = anns if len(quesTypes) == 0 else [\n ann for ann in anns if ann['question_type'] in quesTypes]\n anns = anns if len(ansTypes) == 0 else [\n ann for ann in anns if ann['answer_type'] in ansTypes]\n ids = [ann['image_id'] for ann in anns]\n return ids", "title": "" }, { "docid": "35dcf57bd7045a81857eb964b2f9a9d4", "score": "0.53042674", "text": "def query(self, images):\r\n if self.pool_size == 0: # if the buffer size is 0, do nothing\r\n return images\r\n return_images = []\r\n for image in images:\r\n image = torch.unsqueeze(image.data, 0)\r\n if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer\r\n self.num_imgs = self.num_imgs + 1\r\n self.images.append(image)\r\n return_images.append(image)\r\n else:\r\n p = random.uniform(0, 1)\r\n if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer\r\n random_id = random.randint(0, self.pool_size - 1) # randint is inclusive\r\n tmp = self.images[random_id].clone()\r\n self.images[random_id] = image\r\n return_images.append(tmp)\r\n else: # by another 50% chance, the buffer will return the current image\r\n return_images.append(image)\r\n return_images = torch.cat(return_images, 0) # collect all the images and return\r\n return return_images", "title": "" }, { "docid": "a601f179348f7ceb0465b44616276175", "score": "0.5228106", "text": "def update_images(self):", "title": "" }, { "docid": "3443bad0ccfb4a22cd49606fda782f03", "score": "0.5216751", "text": "def loadImgs(self, ids=[]):\n if type(ids) == list:\n return [self.imgs[id] for id in ids]\n elif type(ids) == int:\n return [self.imgs[ids]]", "title": "" }, { "docid": "5a0d7b609fcfa2106f3be5660bcbd2df", "score": "0.52092993", "text": "def run(conn, params):\n images = []\n\n if params.get(\"Data_Type\") == 'Dataset':\n for dsId in params[\"IDs\"]:\n dataset = conn.getObject(\"Dataset\", dsId)\n if dataset:\n for image in dataset.listChildren():\n images.append(image)\n\n if len(images) == 0:\n return None\n\n for image in images:\n print(\"---- Processing image\", image.id)\n return images", "title": "" }, { "docid": "cab34cf6a80e977703a0613d3b9d2d85", "score": "0.51977825", "text": "def conjunction(images):\n pass", "title": "" }, { "docid": "7fee612bb560f597bf3ad8325b0fdd02", "score": "0.5192922", "text": "def query(self, images):\n if self.pool_size == 0: # if the buffer size is 0, do nothing\n return images\n\n return_images = []\n\n for image in images:\n image = torch.unsqueeze(image.data, 0)\n if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer\n self.num_imgs = self.num_imgs + 1\n self.images.append(image)\n return_images.append(image)\n else:\n p = random.uniform(0, 1)\n if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer\n random_id = random.randint(\n 0, self.pool_size - 1) # randint is inclusive\n tmp = self.images[random_id].clone()\n self.images[random_id] = image\n return_images.append(tmp)\n else: # by another 50% chance, the buffer will return the current image\n return_images.append(image)\n # collect all the images and return\n return_images = torch.cat(return_images, 0)\n return return_images", "title": "" }, { "docid": "c3f9929a1405b247e339098908966709", "score": "0.509739", "text": "def call(self, inputs):\n batch_size, h, w = tf.shape(inputs)[0], tf.shape(inputs)[1], tf.shape(inputs)[2]\n i = tf.range(w)\n j = tf.range(h)\n x_emb = self.col_embed(i)\n y_emb = self.row_embed(j)\n single_img_emb = tf.concat([\n tf.tile(x_emb[None], (h, 1, 1)),\n tf.tile(y_emb[:, None], (1, w, 1)),\n ],\n axis=-1)\n\n batch_emb = tf.tile(single_img_emb[None], (batch_size, 1, 1, 1))\n return batch_emb", "title": "" }, { "docid": "cbedda7c25a4e5719821d0fc14ee2b00", "score": "0.50831044", "text": "def update_imageset_ids(experiments, reflections):\n # input a list of ordered matching experiments and reflection tables.\n\n next_iset_id = 0\n imagesets_found = OrderedSet()\n for expt, table in zip(experiments, reflections):\n if \"imageset_id\" in table:\n assert len(set(table[\"imageset_id\"])) == 1\n iset = expt.imageset\n if iset not in imagesets_found:\n imagesets_found.add(iset)\n table[\"imageset_id\"] = flex.int(table.size(), next_iset_id)\n next_iset_id += 1\n else:\n iset_id = imagesets_found.index(iset)\n table[\"imageset_id\"] = flex.int(table.size(), iset_id)\n return reflections", "title": "" }, { "docid": "7dd6ea39d2a705595f93509f91dd5810", "score": "0.50658953", "text": "def processImages(self, imsq):\n pass", "title": "" }, { "docid": "b27466e9652a4b147e79196ab24e143b", "score": "0.50580966", "text": "def _embed(embedders, xg, mode=None):\n\n if mode is not None:\n for e in embedders:\n e.mode = mode\n\n return torch.cat([e(xg_j) for e, xg_j in zip(embedders, xg.T)], dim=1)", "title": "" }, { "docid": "62675314977c217eac9e8f1f6331494b", "score": "0.50363404", "text": "def generate_batch(images_embeddings, indexed_captions, batch_size, max_len=None):\n\n # added replace=False to avoid repetitions\n batch_indexes = np.random.choice(len(images_embeddings), batch_size, replace=False)\n batch_image_embeddings = images_embeddings[batch_indexes]\n captions_of_batch_images = indexed_captions[batch_indexes].tolist()\n\n # a little brutto\n batch_captions = []\n captions_batch_indexes = np.random.choice(5, batch_size)\n for i,captions in enumerate(captions_of_batch_images):\n batch_captions += [captions[captions_batch_indexes[i]]]\n\n\n batch_captions_matrix = batch_captions_to_matrix(batch_captions, 0, max_len)\n\n return {decoder.img_embeds: batch_image_embeddings,\n decoder.sentences: batch_captions_matrix}", "title": "" }, { "docid": "71cd25755e41e3e0aec5ffeecfca45cc", "score": "0.50174147", "text": "def disjunction(images):\n pass", "title": "" }, { "docid": "437acf87f30c64065fe387e2228ee1d3", "score": "0.5012557", "text": "def queue_image(self, image_id):", "title": "" }, { "docid": "a3a5b9fd3fe746eb865a40f04ed92156", "score": "0.50075275", "text": "def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]):\n imgIds = imgIds if type(imgIds) == list else [imgIds]\n quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]\n ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]\n\n if len(imgIds) == len(quesTypes) == len(ansTypes) == 0:\n anns = self.dataset['annotations']\n else:\n if not len(imgIds) == 0:\n anns = sum([self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA], [])\n else:\n anns = self.dataset['annotations']\n anns = anns if len(quesTypes) == 0 else [\n ann for ann in anns if ann['question_type'] in quesTypes]\n anns = anns if len(ansTypes) == 0 else [\n ann for ann in anns if ann['answer_type'] in ansTypes]\n ids = [ann['question_id'] for ann in anns]\n return ids", "title": "" }, { "docid": "63d09bb32009c7c86361258db6b4be89", "score": "0.49977264", "text": "def build_image_embeddings(self):\n\n # parameter initialization\n batch_norm_params = {\n \"is_training\": False,\n \"trainable\": False,\n # decay for the moving averages\n \"decay\": 0.9997,\n # epsilon to prevent 0s in variance\n \"epsilon\": 0.001,\n # collection containing the moving mean and moving variance\n \"variables_collections\": {\n \"beta\": None,\n \"gamma\": None,\n \"moving_mean\": [\"moving_vars\"],\n \"moving_variance\": [\"moving_vars\"],\n }\n }\n\n stddev = 0.1,\n dropout_keep_prob = 0.8\n\n with tf.variable_scope(\"InceptionV3\", \"InceptionV3\", [self.images]) as scope:\n with slim.arg_scope(\n [slim.conv2d, slim.fully_connected],\n weights_regularizer=None,\n trainable=False):\n with slim.arg_scope(\n [slim.conv2d],\n weights_initializer=tf.truncated_normal_initializer(stddev=stddev),\n activation_fn=tf.nn.relu,\n normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params):\n net, end_points = inception_v3_base(self.images, scope=scope)\n with tf.variable_scope(\"logits\"):\n shape = net.get_shape()\n net = slim.avg_pool2d(net, shape[1:3], padding=\"VALID\", scope=\"pool\")\n net = slim.dropout(\n net,\n keep_prob=dropout_keep_prob,\n is_training=False,\n scope=\"dropout\")\n net = slim.flatten(net, scope=\"flatten\")\n\n # add summaries\n for v in end_points.values():\n tf.contrib.layers.summaries.summarize_activation(v)\n\n self.inception_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"InceptionV3\")\n\n # map inception output(net) into embedding space\n with tf.variable_scope(\"image_embedding\") as scope:\n image_embeddings = tf.contrib.layers.fully_connected(\n inputs=net,\n num_outputs=self.embedding_size,\n activation_fn=None,\n weights_initializer=self.initializer,\n biases_initializer=None,\n scope=scope)\n\n # save the embedding size in the graph\n tf.constant(self.embedding_size, name=\"embedding_size\")\n\n self.image_embeddings = image_embeddings", "title": "" }, { "docid": "fee9a2c8e53e8b89649db295c889603b", "score": "0.49957043", "text": "def compile_embedding(self, ids):\n pass", "title": "" }, { "docid": "5aa722c7bbe3ba0105cde528ed3c6e38", "score": "0.49870318", "text": "def prepare_queries(self, queries: List[Instances], mask):\n for x in queries:\n x.pred_boxes = Boxes(x.pred_boxes.tensor.detach())\n x.scores = x.scores.detach()\n x.features = x.features.detach()\n \n bs, vis_dim, box_dim = len(queries), queries[0].features.shape[-1], self.hidden_dim\n device = queries[0].features.device\n \n num_queries = max([len(x) for x in queries]) * self.num_duplicates\n\n query_masks = torch.ones((num_queries, num_queries, bs), dtype=torch.bool, device=device)\n query_vis_embs = torch.zeros((num_queries, bs, vis_dim), dtype=torch.float32, device=device)\n query_box_embs = torch.zeros((num_queries, bs, box_dim), dtype=torch.float32, device=device)\n query_boxes = torch.zeros((num_queries, bs, 4), dtype=torch.float32, device=device)\n query_classes = torch.full((num_queries, bs), -1, dtype=torch.int64, device=device)\n query_scores = torch.zeros((num_queries, bs), dtype=torch.float32, device=device)\n for i, x in enumerate(queries):\n size = len(x) * self.num_duplicates\n\n query_vis_embs[:size, i, :] = x.features.repeat(1, self.num_duplicates).view(-1, vis_dim)\n \n box = x.pred_boxes.tensor\n query_boxes[:size, i, :] = box.repeat(1, self.num_duplicates).view(-1, 4)\n \n box_embs_x = self.box_encoding(x.image_size, x.pred_boxes.tensor.clone())\n query_box_embs[:size, i, :] = box_embs_x.repeat(1, self.num_duplicates).view(-1, box_dim)\n\n classes_x = x.pred_classes.unsqueeze(-1).repeat(1, self.num_duplicates).view(-1)\n query_classes[:size, i] = classes_x\n\n scores_x = x.scores.unsqueeze(-1).repeat(1, self.num_duplicates).view(-1)\n query_scores[:size, i] = scores_x\n\n for q in range(max([len(x) for x in queries])):\n stx = q * self.num_duplicates\n end = (q + 1) * self.num_duplicates\n query_masks[stx:end, stx:end, :] = False\n\n query_vis_embs = self.query_norm(self.query_proj(query_vis_embs))\n query_instance = Instances(image_size=tuple(mask.shape[-2:]))\n query_instance.query_feat = query_vis_embs + query_box_embs\n query_instance.query_boxes = query_boxes\n query_instance.query_masks = query_masks\n query_instance.query_classes = query_classes\n query_instance.query_scores = query_scores\n\n return query_instance", "title": "" }, { "docid": "6666ab477e0848306c535fb2bda95a3c", "score": "0.49816033", "text": "def handle_imgur_videos(session,post_dict):# NEW TABLES\n logging.debug(\"Processing imgur videos\")\n # Extract video links from post dict\n imgur_urls = []\n video_items = post_dict[\"player\"]\n for video_item in video_items:\n embed_code = video_item[\"embed_code\"]\n # u'<iframe class=\"imgur-embed\" width=\"100%\" height=\"720\" frameborder=\"0\" src=\"http://i.imgur.com/wSBlRyv.gifv#embed\"></iframe>'\n # http://i.imgur.com/wSBlRyv.gifv\n if embed_code:\n # Process links so YT-DL can understand them\n logging.debug(\"embed_code: \"+repr(embed_code))\n embed_url_regex =\"\"\"src=[\"']([^?\"'#]+)\"\"\"\n embed_url_search = re.search(embed_url_regex, embed_code, re.IGNORECASE|re.DOTALL)\n if embed_url_search:\n embed_url = embed_url_search.group(1)\n imgur_urls.append(embed_url)\n continue\n\n logging.debug(\"imgur_urls: \"+repr(imgur_urls))\n\n # Download videos if there are any\n media_id_list = run_yt_dl_multiple(\n session = session,\n download_urls = imgur_urls,\n extractor_used=\"video_handlers.handle_imgur_videos()\",\n )\n logging.debug(\"Finished downloading imgur_video embeds\")\n return media_id_list", "title": "" }, { "docid": "ff96acb17ad79439fb15f971a9951f4a", "score": "0.497084", "text": "def get_batch(self):\r\n\r\n indices = np.random.choice(self.dataset_len, self.batch_size) # Sample non-repeated indices\r\n img_batch, hair_tags, eye_tags, face_tags, glass_tags = [], [], [], [], []\r\n for i in indices:\r\n img, hair_tag, eye_tag, face_tag, glass_tag = self.dataset.get_item(i)\r\n img_batch.append(img.unsqueeze(0))\r\n #print(img.shape)\r\n hair_tags.append(hair_tag.unsqueeze(0))\r\n #print(hair_tag.shape)\r\n #print(hair_tag.unsqueeze(0).shape) #torch.Size([1, 6])\r\n #input()\r\n eye_tags.append(eye_tag.unsqueeze(0)) #unsqueeze dim-0 for later concatenation\r\n face_tags.append(face_tag.unsqueeze(0))\r\n glass_tags.append(glass_tag.unsqueeze(0))\r\n img_batch = torch.cat(img_batch, 0)\r\n #print(img_batch)\r\n #print(img_batch.shape) #torch.Size([batch_size, 3, 128, 128])\r\n hair_tags = torch.cat(hair_tags, 0)\r\n #print(hair_tags)\r\n #print(hair_tags.shape) #torch.Size([128, 6])\r\n eye_tags = torch.cat(eye_tags, 0)\r\n #print(eye_tags.shape) #torch.Size([128, 4])\r\n face_tags= torch.cat(face_tags, 0)\r\n #print(face_tags.shape) #torch.Size([128, 3])\r\n glass_tags= torch.cat(glass_tags, 0)\r\n #print(glass_tags.shape) #torch.Size([128, 2])\r\n #input()\r\n return img_batch, hair_tags, eye_tags, face_tags, glass_tags", "title": "" }, { "docid": "99f9715d6d6b0be63dea1c88e49d7b2a", "score": "0.49688542", "text": "def pull(self, queries):\n self.start_queue()\n self.results = []\n threads = []\n self.info(\"Saving images in: %s\" % self.path)\n for query in queries:\n self.info(\"Searching for \\\"%s\\\" images...\" % query)\n start = 0\n while start < self.max_results:\n api_url = self.make_api_url(query, start)\n if self.is_url_in_history(api_url):\n self.info(\"Already have %s\" % api_url)\n start += 4 # Normal number of results.\n continue\n self.put_url_in_history(api_url)\n data = self.http_get(api_url)\n if data:\n msg = json.loads(data)\n if msg['responseStatus'] == 200 and msg['responseData']:\n # Good response.\n newresults = msg['responseData']['results']\n if not len(newresults):\n self.warn(\"No results in response: %s\" % msg)\n time.sleep(3)\n break\n self.results += newresults\n start += len(newresults)\n # Retrieve.\n for result in newresults:\n image_url = result['unescapedUrl']\n image_id = self.make_hash(image_url)\n image_path = self.get_std_image_path(image_id)\n if 'dogdrip.net/' in image_url:\n # Exceptionally slow domain. :-)\n self.info(\"Skipping %s\" % image_url)\n if self.is_url_in_history(image_url):\n # Seen it already.\n self.info(\"Already have %s\" % image_url)\n else:\n # Put URL in history, to avoid repeats.\n self.put_url_in_history(image_url)\n # Join queue to retrieve image content.\n self.q.put(image_url)\n else:\n # Bad response.\n details = msg.get('responseDetails', '')\n if details.startswith('qps rate exceeded'):\n self.warn('Google just complained about ' +\\\n 'too many queries per second. Pausing...')\n time.sleep(15)\n else:\n self.warn(\"Response not OK: %s\" % msg)\n time.sleep(3)\n break\n else:\n # No data in response.\n self.warn(\"No data from %s\" % api_url)\n time.sleep(3)\n break\n # Be nice to Google.\n time.sleep(1.5)\n # Wait for workers to finish processing the queue.\n self.q.join()\n # Summarise results.\n std_count = len(self.get_paths('.std'))\n if not std_count:\n self.warn(\"No images found.\")\n self.info(\"There are %d images.\" % std_count)", "title": "" }, { "docid": "109e3f5b2d4c5cdc9382f971f70c4f28", "score": "0.49677628", "text": "def embed(\n docs: Union[DocumentArray, DocumentArrayMemmap],\n embed_model: AnyDNN,\n device: str = 'cpu',\n) -> None:\n fm = get_framework(embed_model)\n globals()[f'_set_embeddings_{fm}'](docs, embed_model, device)", "title": "" }, { "docid": "e6f1f8c5da25e46c88b1f684d56c0865", "score": "0.49635997", "text": "def get_concat_embeddings(self, word_id_batch: torch.Tensor,\r\n tag_id_batch: torch.Tensor,\r\n deprel_id_batch: torch.Tensor) -> torch.Tensor:\r\n ##****BEGIN YOUR CODE****\r\n word_id_batch_temp = self.word_embed(word_id_batch)\r\n reshape_word = self.reshape_embedded(word_id_batch_temp)\r\n tag_id_batch_temp = self.tag_embed(tag_id_batch)\r\n reshape_tag = self.reshape_embedded(tag_id_batch_temp)\r\n deprel_id_batch_temp = self.deprel_embed(deprel_id_batch)\r\n reshape_deprel = self.reshape_embedded(deprel_id_batch_temp)\r\n x = torch.cat([reshape_word, reshape_tag, reshape_deprel], -1)\r\n ##****END YOUR CODE****\r\n return x", "title": "" }, { "docid": "b4264b3b2f9b502c5cab62e83a87888a", "score": "0.49584693", "text": "def all(ctx):\n print(\"Pushing all images\")", "title": "" }, { "docid": "89b538720820a544b67f4cd3db4c4fbb", "score": "0.49465153", "text": "def _filter_images(self):\n self.new_images = []\n self.total_new_images = 0\n for image_id in self.new_image_ids:\n self.total_new_images += 1\n self.new_images.append(self.images[image_id])", "title": "" }, { "docid": "c2e397fb48d52c84acfcf795c6ded174", "score": "0.4944695", "text": "def loadImgs(self, ids=[]):\n if _isArrayLike(ids):\n return [self.imgs[id] for id in ids]\n elif type(ids) == int:\n return [self.imgs[ids]]", "title": "" }, { "docid": "1f5b7c42e61cda7b06067c5b64b241a4", "score": "0.49264753", "text": "def get_imgs_by_ids(img_ids, coco, c, path_to_imgs):\n imgs = coco.loadImgs(img_ids)\n X = np.empty((c.batch_size, c.img_height, c.img_width, 3), dtype=np.float)\n for i, img in enumerate(imgs):\n path = os.path.join(path_to_imgs, img['file_name'])\n X[i] = image.img_to_array(image.load_img(path,\n target_size=[c.img_height, c.img_width]))/127.5 - 1\n return X", "title": "" }, { "docid": "70a9c0f45f8717a559e9826ae97f2c1f", "score": "0.49138683", "text": "def extract_references(cls, img, query_size, container_size):\n\n num_containers_row = int(np.floor(img.shape[0] / container_size))\n num_containers_col = int(np.floor(img.shape[1] / container_size))\n\n windows = np.zeros((num_containers_row * num_containers_col * 4, query_size, query_size))\n win_idx = 0\n\n mean_all, std_all = cls.moments(img, query_size)\n\n for y_contain in range(1, num_containers_row+1):\n for x_contain in range(1, num_containers_col+1):\n temp = img[(y_contain - 1) * container_size: min(img.shape[0],\n y_contain * container_size),\n (x_contain - 1) * container_size: min(img.shape[1],\n x_contain * container_size)]\n\n mean_contain = mean_all[(y_contain - 1) * container_size + query_size - 1: min(\n mean_all.shape[0] - query_size,\n (y_contain - 1) * container_size + container_size),\n (x_contain - 1) * container_size + query_size - 1:min(\n mean_all.shape[1] - query_size,\n (x_contain - 1) * container_size + container_size)]\n\n std_contain = std_all[\n (y_contain - 1) * container_size + query_size - 1:min(\n mean_all.shape[0] - query_size, (\n y_contain - 1) * container_size + container_size),\n (x_contain - 1) * container_size + query_size - 1:min(\n mean_all.shape[1] - query_size, (\n x_contain - 1) * container_size + container_size)]\n\n y, x = np.where(mean_contain == mean_contain.max())\n if np.prod(y.shape) == 1:\n windows[win_idx, :, :] = temp[int(y):int(y + query_size),\n int(x):int(x + query_size)]\n win_idx = win_idx + 1\n \n y, x = np.where(mean_contain == mean_contain.min())\n if np.prod(y.shape) == 1:\n windows[win_idx, :, :] = temp[int(y):int(y + query_size),\n int(x):int(x + query_size)]\n win_idx = win_idx + 1\n \n y, x = np.where(std_contain == std_contain.max())\n if np.prod(y.shape) == 1:\n windows[win_idx, :, :] = temp[int(y):int(y + query_size),\n int(x):int(x + query_size)]\n win_idx = win_idx + 1\n \n y, x = np.where(std_contain == std_contain.min())\n if np.prod(y.shape) == 1:\n windows[win_idx, :, :] = temp[int(y):int(y + query_size),\n int(x):int(x + query_size)]\n win_idx = win_idx + 1\n\n return windows.copy()", "title": "" }, { "docid": "bcf109783809ee5f22c5af9187f6dbfa", "score": "0.48999664", "text": "def test_embed_list_in_object(self):\n\n obj_id = self.get_list_with_min_size(1)[0].id\n\n # Make sure that this object has a list attached to it.\n resp = self.client.get(reverse(self.api_details_url, kwargs={'pk': obj_id}), {'embed': ','.join([self.embedded_list_ids])})\n self.assertGreater(len(resp.data[self.embedded_list_ids]), 0)\n\n # Make sure the list is of ids integers:\n for obj in resp.data[self.embedded_list_ids]:\n self.assertIsInstance(obj, int)\n\n # Get the object with the embedded list\n resp = self.client.get(reverse(\n self.api_details_url, kwargs={'pk': obj_id}\n ) + '?embed=%s' % self.embedded_obj_p)\n self.assertGreater(len(resp.data[self.embedded_obj_p]), 0)\n\n # Make sure the list is now embedded\n for obj in resp.data[self.embedded_obj_p]:\n self.assertIn('id', obj)", "title": "" }, { "docid": "7ba4e2c6c944021c1fbe91260b313f2f", "score": "0.48985377", "text": "def create_position_ids_from_inputs_embeds(self, inputs_embeds):\n input_shape = inputs_embeds.size()[:-1]\n sequence_length = input_shape[1]\n\n position_ids = torch.arange(\n self.padding_idx + 1,\n sequence_length + self.padding_idx + 1,\n dtype=torch.long,\n device=inputs_embeds.device,\n )\n return position_ids.unsqueeze(0).expand(input_shape)", "title": "" }, { "docid": "c8295a19c5cfd8a02c9d630818b03b7e", "score": "0.48903325", "text": "def fetch_images(self):\n ids = [i.id for i in self]\n if len(ids) == 0:\n return {}\n nodes = Item.objects.hfilter(\n {'_class': 'product'}).extra(\n where=[\"(content_item.data -> 'legacy_id') IN (%s)\"\n % ', '.join('%s' for x in ids)],\n params=map(unicode, ids)).hselect(['legacy_id'])\n match = (len(nodes) == self.count())\n ids_table = {}\n for obj in self:\n obj.node = [n for n in nodes if int(n.legacy_id) == obj.id][0] \\\n if match else obj.node\n ids_table.update({obj.node.id: obj.id})\n\n images_qs = Item().graph.get_images(ids=ids_table.keys()).extra(\n select={'item_id': 'T4.from_item_id'})\n for i in images_qs:\n for obj in self:\n if obj.id == ids_table[i.item_id]:\n obj.image = i\n return self", "title": "" }, { "docid": "437ec11d77e8ed5373a670c27df486e5", "score": "0.4886353", "text": "def forward_test(self, imgs):", "title": "" }, { "docid": "54225281ac3652dc8b54063698d2544d", "score": "0.48729935", "text": "def compile_embedding(self, ids):\n to_concat = [ids, np.transpose(self.W), np.transpose(np.dot(self.H, self.T))]\n return np.concatenate(to_concat, axis=1)", "title": "" }, { "docid": "fa7207329dfb276bce7c90db2726125b", "score": "0.48617682", "text": "def showimages(images):\n for image in images:\n showimage(image)", "title": "" }, { "docid": "a25ea4030fa895c7765408b8a7962af7", "score": "0.48496383", "text": "async def embeds(self, ctx, search: int = None):\n await self.do_purge(ctx, search, lambda e: len(e.embeds))", "title": "" }, { "docid": "52e33b94fb1af054ad5d77863974d750", "score": "0.48362797", "text": "def make_prediction(image_ids, model, word_to_ind, ind_to_word):\n predictions = []\n vgg_model = preprocess_images.load_prebuilt_model()\n maxlen = model.input_shape[1][1]\n for ii in image_ids:\n prediction = []\n print('Predicting image: ' + str(ii))\n example = image.load_img(ii, target_size=(224, 224))\n example_arr = image.img_to_array(example, dtype='float32')\n example_arr = np.expand_dims(example_arr, axis=0)\n example_arr = preprocess_input(example_arr)\n example_features = vgg_model.predict(example_arr)\n example_features = np.array(example_features).reshape(-1, 4096)\n \n start_string = ['*start*']\n start_ind = list(map(word_to_ind.get, start_string))\n for i in range(maxlen):\n start_seq = pad_sequences([start_ind], maxlen)\n yhat = model.predict([example_features, start_seq])\n yhat = np.argmax(yhat)\n if ind_to_word[yhat] == '*end*':\n break\n prediction.append(ind_to_word[yhat])\n start_ind.append(yhat)\n predictions.append(prediction)\n return predictions", "title": "" }, { "docid": "a3842b8fe5c34232e17c540c9d6a92ce", "score": "0.48355153", "text": "def compute_image_embeddings(model_name, images, device, return_tensor=False):\n model = SentenceTransformer(model_name, device=device)\n if return_tensor:\n embeddings = model.encode(images, device=device, convert_to_tensor=True).cpu()\n else:\n embeddings = model.encode(images, device=device, convert_to_numpy=True)\n return embeddings", "title": "" }, { "docid": "1d14886ff377badb8c0f2ccb5abb9fb6", "score": "0.4835186", "text": "def get_current_images_id(hparams, dataset_name):\n\n train_file = hparams[dataset_name]\n image_dir = os.path.join(hparams['root'], train_file)\n caption_file_path = get_cleaned_captions_path(hparams, train_file)\n img_size = hparams[\"image_size\"]\n transform_pipeline = transforms.Compose([CenteringPad(),\n transforms.Resize(\n (img_size, img_size)),\n transforms.ToTensor()])\n\n coco_train_set = dset.CocoDetection(root=image_dir,\n annFile=caption_file_path,\n transform=transform_pipeline\n )\n train_loader = torch.utils.data.DataLoader(\n coco_train_set, batch_size=hparams[\"batch_size\"])\n break_training_loop_percentage = hparams[\"break_training_loop_percentage\"]\n break_training_loop_idx = max(\n int(len(train_loader) * break_training_loop_percentage / 100) - 1, 0)\n\n img_list = []\n for idx, sample in enumerate(tqdm(train_loader)):\n img_list.extend(sample[1][0][\"image_id\"].tolist())\n if idx == break_training_loop_idx:\n break\n return img_list", "title": "" }, { "docid": "b772d023feb5cfd7d91a51f18602c751", "score": "0.48272422", "text": "def process_embed(embed_items=None,\n embed_tracks=None,\n embed_metadata=None):\n\n result = None\n\n embed = ''\n if embed_items:\n embed = 'items'\n if embed_tracks:\n if embed != '':\n embed += ','\n embed += 'tracks'\n if embed_metadata:\n if embed != '':\n embed += ','\n embed += 'metadata'\n\n if embed != '':\n result = embed\n\n return result", "title": "" }, { "docid": "7ff37ad5cd9b377f01ce8d6878d099b5", "score": "0.47936964", "text": "def interactImages(images, titles=None, subplotShape=None, figsize=None):\n #determine view\n #fixed dimension\n dimensions = np.arange(3)\n fixedDimensionWidget = widgets.IntSlider(min=0, max=dimensions[-1], value=0)\n \n #determine position on fixed dimension\n # for example z\n \n positionWidget = widgets.IntSlider(min=0)\n \n #updates\n def updatePositionWidget(*args):\n positionWidget.max = pet_scan.IMAGE_SHAPE[fixedDimensionWidget.value] -1\n positionWidget.description = pet_scan.IMAGE_DIMENSION_TITLES[fixedDimensionWidget.value]\n \n fixedDimensionWidget.observe(updatePositionWidget, 'value')\n updatePositionWidget()\n \n def f(fixDim, fixPos):\n #print(fixedDims)\n #determine image axes from fixdim\n dimensions = np.arange(3)\n hDim,vDim = dimensions[dimensions != fixDim]\n \n viewNDImages(images=images, fixedDimensions=[fixDim], fixedValues=[fixPos],\n subplotShape=subplotShape, titles = titles,\n axisLabels=[pet_scan.IMAGE_DIMENSION_TITLES[hDim],pet_scan.IMAGE_DIMENSION_TITLES[vDim]],\n figsize=figsize)\n \n interact(f, fixDim=fixedDimensionWidget, fixPos=positionWidget);", "title": "" }, { "docid": "2c40ebec4ac67575a2f5c95bd1066937", "score": "0.477987", "text": "def slides(self, req):\n\tlength = req.GET.get('length', 10)\n\t\n\n slides = []\n\n # FIXME(nmg): should catch exception if any\n queries = self.db.get_veg_slides()\n\n for query in queries:\n item = {\n 'id': str(query['_id']),\n 'name': query['name'],\n 'photo': query['photo'],\n 'price': query['price'],\n 'mprice': query['mprice'],\n 'size': query['size']\n }\n slides.append(item)\n\n return HttpResponse(slides)", "title": "" }, { "docid": "d8177cdcb0592feec54339f76cf97ac9", "score": "0.47709528", "text": "def wildbook_signal_imgsetid_list(ibs, imgsetid_list=None,\n set_shipped_flag=True,\n open_url_on_complete=True,\n wb_target=None, dryrun=False):\n try:\n wb_url = ibs.get_wildbook_base_url(wb_target)\n except IOError:\n print('[ibs.wildbook_signal_imgsetid_list] Caught IOError, returning None')\n return None\n\n try:\n ibs.assert_ia_available_for_wb(wb_target)\n except Exception:\n pass\n\n if imgsetid_list is None:\n imgsetid_list = ibs.get_valid_imgsetids()\n\n # Check to make sure imagesets are ok:\n for imgsetid in imgsetid_list:\n # First, check if imageset can be pushed\n aid_list = ibs.get_imageset_aids(imgsetid)\n assert len(aid_list) > 0, (\n 'ImageSet imgsetid=%r cannot be shipped with0 annots' % (imgsetid,))\n unknown_flags = ibs.is_aid_unknown(aid_list)\n unnamed_aid_list = ut.compress(aid_list, unknown_flags)\n unnamed_ok_aid_list = ibs.filter_annots_general(\n unnamed_aid_list,\n minqual='ok',\n )\n nUnnamedOk = sum(unnamed_ok_aid_list)\n assert nUnnamedOk == 0, (\n ('ImageSet imgsetid=%r1 cannot be shipped becuase '\n 'annotation(s) %r with an identifiable quality have '\n 'not been named') % (imgsetid, unnamed_ok_aid_list, ))\n\n # Call Wildbook url to signal update\n print('[ibs.wildbook_signal_imgsetid_list] ship imgsetid_list = %r to wildbook' % (\n imgsetid_list, ))\n imageset_uuid_list = ibs.get_imageset_uuid(imgsetid_list)\n print('[ibs.wildbook_signal_imgsetid_list] ship imgset_uuid_list = %r to wildbook' % (\n imageset_uuid_list, ))\n\n url = wb_url + '/ia'\n dbname = ibs.db.get_db_init_uuid()\n occur_url_fmt = (wb_url + '/occurrence.jsp?number={uuid}&dbname={dbname}')\n #enc_url_fmt = (wb_url + '/encounters/encounter.jsp?number={uuid}')\n\n # Check and push 'done' imagesets\n status_list = []\n for imgsetid, imageset_uuid in zip(imgsetid_list, imageset_uuid_list):\n print('[_send] URL=%r' % (url, ))\n json_payload = {'resolver': {'fromIAImageSet': str(imageset_uuid) }}\n if dryrun:\n status = False\n else:\n response = requests.post(url, json=json_payload)\n status = response.status_code == 200\n print('response = %r' % (response,))\n if set_shipped_flag:\n ibs.set_imageset_shipped_flags([imgsetid], [status])\n if status and open_url_on_complete:\n view_occur_url = occur_url_fmt.format(uuid=imageset_uuid, dbname=dbname)\n _browser = ut.get_prefered_browser(PREFERED_BROWSER)\n _browser.open_new_tab(view_occur_url)\n status_list.append(status)\n\n try:\n ibs.update_special_imagesets()\n ibs.notify_observers()\n except:\n pass\n\n return status_list", "title": "" }, { "docid": "5978b424ec3226a8ad4d89ff6e7b4a15", "score": "0.4765014", "text": "def zfturbo_convert_targets_from_dataloader(self, targets, image_ids):\n for img_id, t in zip(image_ids, targets):\n\n for box, label in zip(t[\"boxes\"].numpy(), t[\"labels\"].numpy()):\n self.true_image_id_arr.append(img_id)\n self.true_label_arr.append(label)\n self.true_x_min_arr.append(box[0])\n self.true_x_max_arr.append(box[2])\n self.true_y_min_arr.append(box[1])\n self.true_y_max_arr.append(box[3])", "title": "" }, { "docid": "945e83496bc1ca9498ff7d2e5347b0f7", "score": "0.4762966", "text": "def merge_images(sources, targets, opts, k=10):\n _, _, h, w = sources.shape\n row = int(np.sqrt(opts.batch_size))\n merged = np.zeros([3, row * h, row * w * 2])\n for idx, (s, t) in enumerate(zip(sources, targets)):\n i = idx // row\n j = idx % row\n merged[:, i * h : (i + 1) * h, (j * 2) * h : (j * 2 + 1) * h] = s\n merged[:, i * h : (i + 1) * h, (j * 2 + 1) * h : (j * 2 + 2) * h] = t\n return merged.transpose(1, 2, 0)", "title": "" }, { "docid": "37572145bd7f92d5af541da7abd038af", "score": "0.47581768", "text": "async def dog(self, ctx):\r\n url = \"https://api.thedogapi.com/v1/images/search\"\r\n response = urlopen(url)\r\n result = json.loads(response.read())\r\n actualData = result[0]\r\n url = actualData[\"url\"]\r\n embed = discord.Embed(color=random.randint(1, 0xfffff))\r\n \r\n embed.set_image(url=url)\r\n embed.set_footer(text=f\"Requested by {ctx.author}\", icon_url=ctx.author.avatar_url)\r\n return await ctx.send(embed=embed)", "title": "" }, { "docid": "16d74c7bdc9c202eceff5bf7376e371a", "score": "0.47462964", "text": "def preprocess_image(self, batched_inputs):\n images = [self.normalizer(x[\"image\"].to(self.device)) for x in batched_inputs]\n\n# images = ImageList.from_tensors(images, self.size_divisibility)\n images = ImageList.from_tensors(images, 32)\n\n images_whwh = list()\n for bi in batched_inputs:\n h, w = bi[\"image\"].shape[-2:]\n images_whwh.append(torch.tensor([w, h, w, h], dtype=torch.float32, device=self.device))\n images_whwh = torch.stack(images_whwh)\n\n return images, images_whwh", "title": "" }, { "docid": "56d518c8ae03ca4e43f86a63e3837285", "score": "0.47427243", "text": "def fetch_images(self, response):\n pass", "title": "" }, { "docid": "e5108fccdf4ade0e82ce74706a7369e0", "score": "0.47369584", "text": "def create_pids2idxs(data_source):\n pid2imgs = {}\n for idx, (img, target, _) in enumerate(data_source.imgs):\n if target not in pid2imgs:\n pid2imgs[target] = [idx]\n else:\n pid2imgs[target].append(idx)\n return pid2imgs", "title": "" }, { "docid": "bcd934d0fee6c7970daa89fdbb6c2bf7", "score": "0.47367933", "text": "def compute_embeddings(images):\n\n # Creates graph from saved GraphDef.\n create_graph()\n filename_to_emb = {}\n config = tf.ConfigProto(device_count = {'GPU': 0})\n bar = progressbar.ProgressBar(widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])\n with tf.Session(config=config) as sess:\n i = 0\n for image in bar(images):\n if not tf.gfile.Exists(image):\n tf.logging.fatal('File does not exist %s', image) \n image_data = tf.gfile.FastGFile(image, 'rb').read()\n # Some useful tensors:\n # 'softmax:0': A tensor containing the normalized prediction across\n # 1000 labels.\n # 'pool_3:0': A tensor containing the next-to-last layer containing 2048\n # float description of the image.\n # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG\n # encoding of the image.\n # Runs the softmax tensor by feeding the image_data as input to the graph.\n softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')\n embedding_tensor = sess.graph.get_tensor_by_name('pool_3:0')\n embedding = sess.run(embedding_tensor,\n {'DecodeJpeg/contents:0': image_data})\n filename_to_emb[image] = embedding.reshape(2048)\n i += 1\n # print(image, i, len(images))\n return filename_to_emb", "title": "" }, { "docid": "0b5766afdc9114e55c6ae90ea297dbc9", "score": "0.47360665", "text": "def _get_embeddings(self, input_ids):\n # embedding_matrix = self.model.transformer.wte.weight\n embedding_matrix = self.model_embeddings\n\n vocab_size = embedding_matrix.shape[0]\n one_hot_tensor = self.to(_one_hot(input_ids, vocab_size))\n\n token_ids_tensor_one_hot = one_hot_tensor.clone().requires_grad_(True)\n # token_ids_tensor_one_hot.requires_grad_(True)\n\n inputs_embeds = torch.matmul(token_ids_tensor_one_hot, embedding_matrix)\n return inputs_embeds, token_ids_tensor_one_hot", "title": "" }, { "docid": "0de5bffa8a5d9b236f2ce7cf95223f02", "score": "0.47346297", "text": "def preprocess_batch_imgs(self,generated_content):\n\n # check the largest image width and word len in the batch\n pil_images = [img for img, word in generated_content]\n max_width = max([img.size[0] for img in pil_images])\n max_word_len_batch = max([len(word) for img, word in generated_content])\n\n\n # expand img with to mod 4_ds so that the maxpoolings wil result into\n # well defined integer length for the mapped tdist dimension (\"new width\")\n if max_width % 4 == 0:\n img_w = max_width\n else:\n img_w = max_width + 4 - (max_width % 4)\n\n #augment batch images\n for batch_ind in range(self.batch_size):\n\n # pad the image width with to the largest (fixed) image width\n pil_img = pil_images[batch_ind]\n width, height = pil_img.size\n\n new_img = Image.new(pil_img.mode, (img_w, self.img_h), (255,255,255))\n new_img.paste(pil_img, ((img_w - width) // 2, 0))\n\n # convert to numpy array\n img_arr = np.array(new_img)\n \n #some additional augmentation\n img_arr = self.keras_augmentor.random_transform(img_arr)\n\n # scale with 255 so that the values are between 0 and 1\n # and save to batch, also transpose because the \"time axis\" is width\n generated_content[batch_ind][0] = img_arr.transpose((1,0,2)) / 255\n\n return generated_content, img_w, max_word_len_batch", "title": "" }, { "docid": "cad859d122fe1f8c23b5d88dc4f99f67", "score": "0.4732007", "text": "def update_image_canvas_multi(indices, data, source, max_images=25):\n n_images = len(indices)\n filenames = data['path'].iloc[indices]\n if n_images > max_images:\n filenames = filenames[:max_images - 1]\n images = [imread(fn) for fn in filenames]\n if n_images > max_images:\n # from the My First Pixel Art (TM) School of Design\n dotdotdot = np.full((7, 7, 4), 255, dtype=np.uint8)\n dotdotdot[3, 1::2, :3] = 0\n images.append(dotdotdot)\n sidelen = ceil(sqrt(min(n_images, max_images)))\n step_size = 1 / sidelen\n grid_points = np.arange(0, 1 - step_size/2, step_size)\n start_xs, start_ys = np.meshgrid(grid_points, grid_points, indexing='ij')\n n_rows = len(images)\n step_sizes = np.full(n_rows, step_size)\n source.data = {'image': images, 'x': start_xs.ravel()[:n_rows],\n 'y': start_ys.ravel()[:n_rows],\n 'dx': step_sizes, 'dy': step_sizes}\n print(source.data)", "title": "" }, { "docid": "55d59fb7dd4bd9036c703f2e7e056b61", "score": "0.47269878", "text": "def compile_embedding(self, ids):\n to_concat = [ids, np.transpose(self.W), self.T.transpose().dot(self.H.transpose())]\n return np.concatenate(to_concat, axis=1)", "title": "" }, { "docid": "d94b86d20817e2f5da2f4aee98a2088c", "score": "0.47199464", "text": "def compute_contextualized_embeddings(self, sent_w_ids, sent_p_ids):\n\n sent_length = len(sent_p_ids)\n \n sentence_w_embeddings = self.w_embeddings(sent_w_ids) # shape = len of sent (+2) , w_embedding_dim\n sentence_p_embeddings = self.p_embeddings(sent_p_ids) # shape = len of sent (+2) , p_embedding_dim\n\n # embedding for one word is concatenation of the word form embedding and the pos embedding\n sentence_embeddings = torch.cat( [ sentence_w_embeddings, sentence_p_embeddings ], 1 )\n\n # re-set to 0 the hidden vectors (TODO: other initialization?)\n if self.gru_or_lstm == 'lstm':\n # init hidden and cell states h0 c0\n self.rnn_hidden = self.init_lstm_hidden_and_cell()\n else:\n # init hidden state h0\n self.rnn_hidden = self.init_gru_hidden()\n \n #print(\"SENTENCE_EMBEDDINGS.SHAPE\", sentence_embeddings.shape)\n #print(\"INPUT SHAPE TO RNN:\", sentence_embeddings.view(len(sent_length, 1, -1).shape)\n #print(\"RNN_HIDDEN.shape:\", self.rnn_hidden_and_cell[0].shape)\n\n # rnn takes the batch size as second dimension (here batch size=1)\n # rnn_hidden_seq = the output(hidden) vectors at each position in sequence\n # rnn_hidden = if lstm: the output and cell vectors at the last position\n # if gru: the output vector at the last position\n rnn_hidden_seq, self.rnn_hidden = self.rnn(sentence_embeddings.view(sent_length, 1, -1),\n self.rnn_hidden)\n #print(\"LSTM_OUT.shape\", lstm_out.shape)\n #print(\"LSTM_OUT output shape\", lstm_out.view(len(sent_length), -1).shape)\n\n return rnn_hidden_seq.view(sent_length, -1)", "title": "" }, { "docid": "5786e99304dd361a79ca4d230b7ff4dc", "score": "0.47181153", "text": "def get_images(self):\n \"\"\"Returns a list of all the images used. The indices\n in the layout refer to sprites in the list returned by\n this function\"\"\"\n pass", "title": "" }, { "docid": "3b7db5a269498eebeb76c33b2d3311c8", "score": "0.4713153", "text": "def get_relevant_imgs(img_lst, img_map, indices, distances,k, form=\"list\", rank=True, img_dir=None):\n df_lst = []\n for img in img_lst:\n df_lst.append(get_similar_imgs(img, img_map, indices, distances, k, img_dir=img_dir))\n \n df = pd.concat(df_lst)\n if rank:\n df = df.sort_values(\"dist\")\n else:\n df = df.sample(k)\n if form == \"list\":\n return df.head(k)[\"img\"].values\n elif form == \"df\":\n return df.head(k)", "title": "" }, { "docid": "b80ec583f7fd29fdea7bc4a1db3c2e9c", "score": "0.471242", "text": "def embedding_lookup_unique(params, ids, name=None):\n with ops.name_scope(name, \"EmbeddingLookupUnique\", [params, ids]):\n ids = ops.convert_to_tensor(ids)\n shape = array_ops.shape(ids)\n ids_flat = array_ops.reshape(\n ids, math_ops.reduce_prod(shape, keep_dims=True))\n unique_ids, idx = array_ops.unique(ids_flat)\n unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids)\n embeds_flat = array_ops.gather(unique_embeddings, idx)\n embed_shape = array_ops.concat(\n [shape, array_ops.shape(unique_embeddings)[1:]], 0)\n embeds = array_ops.reshape(embeds_flat, embed_shape)\n embeds.set_shape(ids.get_shape().concatenate(\n unique_embeddings.get_shape()[1:]))\n return embeds", "title": "" }, { "docid": "2e19bfdf2259484153178fbf63f1407a", "score": "0.46960354", "text": "def get_images(self):\n pass", "title": "" }, { "docid": "2a0f64d8eea4fe8bec303882f9da7776", "score": "0.46924624", "text": "async def images(request: Request):\n recommendation_request = RecommendationRequest(**request.raw_args)\n response = await selector.get_recommendations(app.client_session,\n recommendation_request)\n return json(response.json, status=response.status)", "title": "" }, { "docid": "e43500af3356291694915dd671b04e16", "score": "0.4683429", "text": "def create_image_pairs(self, input_images, input_actions):\n\timages = []\n\tactions = []\n\tfuture_images = []\n\tfuture_actions = []\n\tids = []\n\tfuture_ids = []\n\tif len(input_images) < 1 or len(input_actions) < 1:\n\t return [images, actions, future_images, future_actions, ids, future_ids]\n\t\n\t# specify the length of the action sequence based on the maximally possible delta_t\n\tdelta_t = self.min_time_difference\n\tif self.random_time:\n\t delta_t = self.max_time_difference\n\taction_sequence_length = delta_t * len(input_actions[0])\n\timage_sequence_length = delta_t * input_images[0].shape[2] \n\t\n\t# create present-future image/action pairs\n\tfor i in range(len(input_images)):\t \n\t # select time delta\n\t if self.random_time:\n\t\tmax_time_difference = min(len(input_images) - i, self.max_time_difference)\n\t\tif max_time_difference <= self.min_time_difference \\\n\t\t\tor max_time_difference < 1:\n\t\t continue\n\t\tdelta_t = np.random.randint(self.min_time_difference, max_time_difference)\n\t else:\n\t\tif i + delta_t >= len(input_images):\n\t\t break\n\t images.append(input_images[i])\n\t actions.append(input_actions[i])\n\t future_images.append(input_images[i+delta_t])\n\t future_actions.append(input_actions[i+delta_t])\n\t ids.append(i+self.batch_num*self.batch_size)\n\t future_ids.append(i+delta_t+self.batch_num*self.batch_size)", "title": "" }, { "docid": "65afb84fa6dc6bb020c9cf7a7250557b", "score": "0.46827105", "text": "def download_galleries(driver, gallery_id_list, num_galleries):\n for num, id in enumerate(gallery_id_list, 1):\n # Only want the first N galleries specified by user\n if num > num_galleries:\n return\n\n # Make a direct link to the gallery\n gallery_link = 'https://imgur.com/gallery/' + id\n\n # Go to the gallery_link page\n driver.get(gallery_link)\n\n # Large galleries will have a \"load more images\" button\n load_more_images_button = driver.find_elements_by_xpath(\"\"\"//a[contains(@class, 'post-loadall')]\"\"\")\n\n # If there is a \"Load More Images\" button, then we must switch to the gallery's grid view\n # so the entire gallery is loaded. Otherwise, we can use the default gallery view.\n if load_more_images_button:\n use_grid_view(driver, id, num)\n else:\n use_normal_view(driver, num)", "title": "" }, { "docid": "f48c75d7cdec57f7ef00c29c6dd28548", "score": "0.46815458", "text": "def load_imgs(self, ids):\n return self._load_helper(self.imgs, ids)", "title": "" }, { "docid": "5448286e9efc61d561e56ae305761b81", "score": "0.46776003", "text": "def get_queued_images(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "f1c6341c877c963c0a7e5c1c5eee8545", "score": "0.4675306", "text": "def handle_flash_embed(session,post_dict):#TODO FIXME\n logging.debug(\"Processing flash embed\")\n logging.warning(\"handle_flash_embed() is not finished yet. FIX IT!\")#TODO FIXME\n \"\"\" u'player': [{u'embed_code': u'<embed width=\"250\" height=\"291\" align=\"middle\" pluginspage=\"http://www.adobe.com/go/getflashplayer\" type=\"application/x-shockwave-flash\" allowfullscreen=\"false\" allowscriptaccess=\"sameDomain\" name=\"xdft\" bgcolor=\"#000000\" scale=\"noscale\" quality=\"high\" menu=\"false\" src=\"http://www.najle.com/idaft/idaft/xdft.swf\">',\n u'width': 250},\n {u'embed_code': u'<embed width=\"400\" height=\"466\" align=\"middle\" pluginspage=\"http://www.adobe.com/go/getflashplayer\" type=\"application/x-shockwave-flash\" allowfullscreen=\"false\" allowscriptaccess=\"sameDomain\" name=\"xdft\" bgcolor=\"#000000\" scale=\"noscale\" quality=\"high\" menu=\"false\" src=\"http://www.najle.com/idaft/idaft/xdft.swf\">',\n u'width': 400},\n {u'embed_code': u'<embed width=\"500\" height=\"582\" align=\"middle\" pluginspage=\"http://www.adobe.com/go/getflashplayer\" type=\"application/x-shockwave-flash\" allowfullscreen=\"false\" allowscriptaccess=\"sameDomain\" name=\"xdft\" bgcolor=\"#000000\" scale=\"noscale\" quality=\"high\" menu=\"false\" src=\"http://www.najle.com/idaft/idaft/xdft.swf\">',\n u'width': 500}],\"\"\"\n # Extract video links from post dict\n found_links = []\n video_items = post_dict[\"player\"]\n for video_item in video_items:\n embed_code = video_item[\"embed_code\"]\n #\n if embed_code:\n # Find links in the field\n field_links = link_handlers.find_links(embed_code)\n found_links += field_links\n continue\n logging.debug(\"handle_flash_embed() found_links: \"+repr(found_links))\n\n # Remove duplicate links\n links = uniquify(found_links)\n\n media_id_list = []\n # Choose which links to save\n for link in links:\n # If link ends in .swf\n if \".swf\" in link[-4:]:\n media_id_list += download_image_link(session,link)\n continue\n # If link ends in .flv\n elif \".flv\" in link[-4:]:\n media_id_list += download_image_link(session,link)\n continue\n continue\n\n return media_id_list", "title": "" }, { "docid": "c4f74a44a5911a768227ce1c7872bd16", "score": "0.46719703", "text": "def main(*args):\n if len(args) >= 1:\n image_source_id = bson.objectid.ObjectId(args[0])\n\n config = global_conf.load_global_config('config.yml')\n db_client = database.client.DatabaseClient(config=config)\n\n image_source = None\n s_image_source = db_client.image_source_collection.find_one({'_id': image_source_id})\n if s_image_source is not None:\n image_source = db_client.deserialize_entity(s_image_source)\n del s_image_source\n\n if image_source is not None:\n image_source.begin()\n while not image_source.is_complete():\n image, _ = image_source.get_next_image()\n debug_img = image.data[:, :, ::-1].copy()\n for obj in image.metadata.labelled_objects:\n x, y, w, h = obj.bounding_box\n cv2.rectangle(debug_img, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n text_label = str(obj.class_names[0])\n (retval, baseLine) = cv2.getTextSize(text_label, cv2.FONT_HERSHEY_COMPLEX, 1, 1)\n text_org = (x, y - 0)\n\n cv2.rectangle(debug_img, (text_org[0] - 5, text_org[1] + baseLine - 5),\n (text_org[0] + retval[0] + 5, text_org[1] - retval[1] - 5), (0, 0, 0), 2)\n cv2.rectangle(debug_img, (text_org[0] - 5, text_org[1] + baseLine - 5),\n (text_org[0] + retval[0] + 5, text_org[1] - retval[1] - 5), (255, 255, 255), -1)\n cv2.putText(debug_img, text_label, text_org, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1)\n\n cv2.imshow('debug', debug_img)\n cv2.waitKey(0)", "title": "" }, { "docid": "560fa680efd3836987a15f68557e5a71", "score": "0.46693918", "text": "def batch_pad(data_dict,ids):\n batch_info = {}\n b_fp = torch.tensor(())\n b_dfpdX = torch.tensor(())\n b_e_mask = torch.tensor(())\n b_e = torch.tensor(())\n b_f = torch.tensor(())\n\n # Find the largest image in the batch\n N_max = 0\n all_atoms = torch.tensor(())\n for ID in ids:\n N_atoms, N_element = data_dict[ID]['e_mask'].shape\n all_atoms = torch.cat((all_atoms,torch.tensor(N_atoms).float().view(1,1)))\n if N_atoms > N_max:\n N_max = N_atoms\n N_sym = data_dict[ID]['fp'].shape[1]\n\n # Loop through the ids to batch the values\n for ID in ids:\n pad_fp = torch.zeros(N_max,N_sym)\n pad_dfpdX = torch.zeros(N_max,N_sym,N_max,3)\n pad_e_mask = torch.zeros(N_max,N_element)\n pad_f = torch.zeros(N_max,3)\n fp = data_dict[ID]['fp']\n dfpdX = data_dict[ID]['dfpdX']\n e_mask = data_dict[ID]['e_mask']\n pad_fp[:fp.shape[0],:fp.shape[1]] = fp\n pad_dfpdX[:dfpdX.shape[0],:dfpdX.shape[1],:dfpdX.shape[2],:] = dfpdX\n pad_e_mask[:e_mask.shape[0],:e_mask.shape[1]] = e_mask\n pad_f[:fp.shape[0],:] = data_dict[ID]['f'] \n b_fp = torch.cat((b_fp,pad_fp))\n b_dfpdX = torch.cat((b_dfpdX,pad_dfpdX))\n b_e_mask = torch.cat((b_e_mask,pad_e_mask))\n b_e = torch.cat((b_e,data_dict[ID]['e'].view(1,1)),dim=0)\n b_f = torch.cat((b_f,pad_f))\n\n # Update the output dictionary\n batch_info.update({'N_atoms':all_atoms})\n batch_info.update({'b_fp':b_fp.view(len(ids),N_max,N_sym)})\n batch_info.update({'b_dfpdX':b_dfpdX.view(len(ids),N_max,N_sym,N_max,3)})\n batch_info.update({'b_e_mask':b_e_mask.view(len(ids),N_max,N_element)})\n batch_info.update({'b_e':b_e})\n batch_info.update({'b_f':b_f.view(len(ids),N_max,3)})\n return batch_info", "title": "" }, { "docid": "e43b5925594eab6f5f3eff9c3796fe00", "score": "0.4665801", "text": "def get_true_captions(img_ids, coco):\n batch_captions = []\n for img_id in img_ids:\n image_metadata = coco.imgToAnns[img_id]\n # Here, we compute the image metadata\n #print(\"Image metadata is {}\".format(image_metadata))\n captions = []\n for metadata in image_metadata:\n captions.append(metadata['caption'].lower())\n batch_captions.append(captions)\n return batch_captions", "title": "" }, { "docid": "2947ebef2f28671ca4cab0f9856cebd0", "score": "0.46594942", "text": "def handle_dailymotion_videos(session,post_dict):\n logging.debug(\"Processing dailymotion video\")\n # Extract video links from post dict\n video_urls = []\n video_items = post_dict[\"player\"]\n for video_item in video_items:\n embed_code = video_item[\"embed_code\"]\n # u'embed_code': u'<iframe src=\"https://www.dailymotion.com/embed/video/x2msryd\" width=\"250\" height=\"139\" frameborder=\"0\" allowfullscreen></iframe>'\n # https://www.dailymotion.com/embed/video/x2msryd\n if embed_code:\n # Process links so YT-DL can understand them\n logging.debug(\"embed_code: \"+repr(embed_code))\n embed_url_regex =\"\"\"src=[\"']([^?\"'#]+)\"\"\"\n embed_url_search = re.search(embed_url_regex, embed_code, re.IGNORECASE|re.DOTALL)\n if embed_url_search:\n embed_url = embed_url_search.group(1)\n video_urls.append(embed_url)\n continue\n logging.debug(\"video_urls: \"+repr(video_urls))\n\n # Download videos if there are any\n media_id_list = run_yt_dl_multiple(\n session = session,\n download_urls = video_urls,\n extractor_used=\"video_handlers.handle_dailymotion_videos()\",\n )\n\n logging.debug(\"Finished downloading dailymotion embeds\")\n return media_id_list", "title": "" }, { "docid": "c227bbc1238334453a77e67cd7660e0e", "score": "0.4656213", "text": "def setHelpEmbedThumbnails():\n for levelSection in botCommands.helpSectionEmbeds:\n for helpSection in levelSection.values():\n for embed in helpSection:\n embed.set_thumbnail(url=botState.client.user.avatar_url_as(size=64))", "title": "" }, { "docid": "33ce80f1bf81423b7379d77e5a97b831", "score": "0.4655339", "text": "def add_images_to_visual(self, visual_id, images=[]):\n for image in images:\n logger.info(\"Adding image %s to visual %d\" % (image, visual_id))\n image_buffer = self._load_file(image)\n return self._post(\"projects/visuals/%d/images/\" % visual_id,\n files={'image': image_buffer})", "title": "" }, { "docid": "5336d6eabcf2ad7d1e70261c7a6fe426", "score": "0.46519732", "text": "def handle_inline(inline_query):\n if not nekowat.is_allowed(inline_query.from_user.id):\n nekowat.answer_inline_query(inline_query.id, [])\n return\n\n # Normalize expression\n expression = inline_query.query.lower().strip()\n\n if not expression:\n # Get all images\n wats = nekowat.get_all_wats()\n\n else:\n # Get by expression\n wats = nekowat.get_wats_by_expression(expression)\n\n try:\n responses = []\n\n for index, wat in enumerate(wats):\n r = telebot.types.InlineQueryResultCachedPhoto(\n str(index),\n # Get smallest file for inline reply\n wat['file_ids'][0],\n parse_mode='' # Workaround for Telegram API error\n )\n\n responses.append(r)\n\n nekowat.answer_inline_query(inline_query.id, responses)\n\n except Exception as e:\n print(e)", "title": "" }, { "docid": "2eb13699da3751bd8ed7ed1093635c96", "score": "0.46486753", "text": "def detection_wanted_img(path_wanted,path_image,portrait,l_reso,l_seuil):\n print(\"calcul des embeddings à rechercher\")\n res=convert_embd(path_wanted)\n list_emb,list_name_wanted=res[0],res[1]\n print(\"calcul terminé\")\n list_emb=np.asarray(list_emb)\n list_name_wanted=np.asarray(list_name_wanted)\n detection_wanted(list_emb,list_name_wanted,path_image,portrait,l_reso,l_seuil)", "title": "" }, { "docid": "255c0a3c0fc2021ebc041ad4c8c10171", "score": "0.46456274", "text": "def get_example(id):\n c = get_chunk(id // aug_single_chunk_size)\n return c[0][id % aug_single_chunk_size].reshape((ImageChannels*aug_image_side**2)), c[1][id % aug_single_chunk_size]", "title": "" }, { "docid": "6db4b7f19262e4107771040e81754350", "score": "0.4644313", "text": "def GetQAofImage(id=61512):\n page = 1\n next = '/api/v0/image/' + str(id) + '/qa?page=' + str(page)\n qas = []\n image_map = {}\n while True:\n data = utils.RetrieveData(next)\n for d in data['results']:\n if d['image'] not in image_map:\n image_map[d['image']] = GetImageData(id=d['image'])\n qas.extend(utils.ParseQA(data['results'], image_map))\n if data['next'] is None:\n break\n page += 1\n next = '/api/v0/image/' + str(id) + '/qa?page=' + str(page)\n return qas", "title": "" }, { "docid": "e76ef9f4b53da57548cd606270e2b42f", "score": "0.4642653", "text": "def embedplays(params):\n # set up default values\n default_from, default_to, yesterday, _ = make_default_times()\n\n # get params\n try:\n series = \"onionstudios\"\n from_date = params.get(\"from\", [default_from])[0]\n to_date = params.get(\"to\", [default_to])[0]\n group_by = params.get(\"group_by\", [DEFAULT_GROUP_BY])[0]\n except Exception as e:\n LOGGER.exception(e)\n return json.dumps({\"error\": e.message}), \"500 Internal Error\"\n\n # check the cache\n cache_key = \"{}:{}:{}:{}:{}:{}\".format(memcached_prefix, \"embedplays.json\", series, from_date, to_date, group_by)\n try:\n data = MEMCACHED_CLIENT.get(cache_key)\n if data:\n return data, \"200 OK\"\n except Exception as e:\n LOGGER.exception(e)\n\n # parse from date\n from_date = parse_datetime(from_date)\n if from_date is None:\n LOGGER.error(\"could not parse 'from'\")\n return json.dumps({\"error\": \"could not parse 'from'\"}), \"400 Bad Request\"\n\n # parse to date\n to_date = parse_datetime(to_date)\n if to_date is None:\n LOGGER.error(\"could not parse 'to'\")\n return json.dumps({\"error\": \"could not parse 'to'\"}), \"400 Bad Request\"\n\n # influx will only keep non-aggregated data for a day, so if the from param is beyond that point\n # we need to update the series name to use the rolled up values\n rollup_query = False\n if from_date < yesterday:\n series = update_series(series, \"embedplays\")\n rollup_query = True\n\n # format times\n from_date = format_datetime(from_date)\n to_date = format_datetime(to_date)\n\n # build out the query\n if not rollup_query:\n query = \"SELECT sum(value) as value \" \\\n \"FROM {series} \" \\\n \"WHERE time > '{from_date}' \" \\\n \"AND time < '{to_date}' \" \\\n \"AND event =~ /^embedplay$/ \" \\\n \"GROUP BY time({group_by}) \" \\\n \"fill(0);\"\n else:\n query = \"SELECT sum(value) as value \" \\\n \"FROM {series} \" \\\n \"WHERE time > '{from_date}' \" \\\n \"AND time < '{to_date}' \" \\\n \"GROUP BY time({group_by}) \" \\\n \"fill(0);\"\n args = {\"series\": series, \"from_date\": from_date, \"to_date\": to_date, \"group_by\": group_by}\n\n # send the request\n try:\n res = INFLUXDB_CLIENT.query(query.format(**args))\n\n # capture errors and send them back along with the query (for inspection/debugging)\n except Exception as e:\n LOGGER.exception(e)\n return json.dumps({\"error\": e.message, \"query\": query.format(**args)}), \"500 Internal Error\"\n\n # build the response object\n response = flatten_response(res)\n res = json.dumps(response)\n\n # cache the response\n try:\n MEMCACHED_CLIENT.set(cache_key, res, time=MEMCACHED_EXPIRATION)\n except Exception as e:\n LOGGER.exception(e)\n\n return res, \"200 OK\"", "title": "" }, { "docid": "5f890c7a86c30ef8bdba91e46f3abe82", "score": "0.4639542", "text": "def getPictures(base_url, sizes):\n pictures = []\n widths = []\n names = {}\n for name in sizes:\n width = sizes[name][0]\n widths.append(width)\n names[str(width)] = name\n widths.sort()\n noscript = \"\"\n previous = None\n for width in widths:\n name = names[str(width)]\n if width >= 128 and not noscript:\n noscript = base_url + name\n previous = width\n continue\n if not previous:\n continue\n media = \"(min-width: %spx)\" % (previous + 1)\n# media = \"(min-width: %spx)\" % width\n# media = \"(max-width: %spx)\" % width\n image = {'src': base_url + name, 'media': media}\n pictures.append(image)\n previous = width\n media = \"(min-width: %spx)\" % (widths[-1] + 1)\n image = {'src': base_url[:-1], 'media': media}\n pictures.append(image)\n\n #try to find a width > 128px for the noscript image\n return pictures, noscript", "title": "" }, { "docid": "c9591abb4f30cc5e8feb027fd8ad4f26", "score": "0.46377063", "text": "def __woo_upload_metadata(self):\n for item_id in self.active_item_ids:\n self.woo.try_command('upload_images', item_id)", "title": "" }, { "docid": "0e7c69f82972dec8d2f18d44603b04fc", "score": "0.46374556", "text": "def get_multiple_image(curid):\n\tapi_url = 'https://en.wikipedia.org/w/api.php?action=query&prop=revisions&rvprop=content&format=json&pageids='+str(curid)+'&rvsection=0'\n\tresult = _rget(api_url)[u'query'][u'pages']\n\tr = result[unicode(curid)][u'revisions'][0][u'*']\n\twikicode = mwparserfromhell.parse(r)\n\ttemplates = wikicode.filter_templates()\n\tbox = {}\n\tfor template in templates:\n\t\tname = template.name.lstrip().rstrip().lower()\n\t\tif 'image' in name:\n\t\t\tbox_ = {}\n\t\t\tfor param in template.params:\n\t\t\t\tkey = drop_comments(param.name).strip().lower().replace(' ','_')\n\t\t\t\tvalue = drop_comments(param.value).strip()\n\t\t\t\tbox_[key] = value\n\t\t\tbox['image'] = box_\n\t\t\tbreak #Grab only the first one\n\treturn box", "title": "" }, { "docid": "98783ff50dde8dd0e83dc5b01dba0ddf", "score": "0.46356267", "text": "def handle_kaltura_videos(session,post_dict):#TODO FIXME\n logging.debug(\"Processing kaltura video\")\n # Extract video links from post dict\n video_urls = []\n video_items = post_dict[\"player\"]\n for video_item in video_items:\n embed_code = video_item[\"embed_code\"]\n # ...data=\"http://www.kaltura.com/index.php/kwidget/wid/0_jzs5tgmh/uiconf_id/7342382\">...\n # http://www.kaltura.com/index.php/kwidget/wid/0_jzs5tgmh/uiconf_id/7342382\n if embed_code:\n # Process links so YT-DL can understand them\n logging.debug(\"handle_kaltura_videos() embed_code: \"+repr(embed_code))\n embed_url_regex =\"\"\"(?:http://www.)?kaltura.com/index.php/kwidget/wid/[^\"'<>]+\"\"\"\n embed_url_search = re.search(embed_url_regex, embed_code, re.IGNORECASE|re.DOTALL)\n if embed_url_search:\n video_url = embed_url_search.group(0)\n video_urls.append(video_url)\n continue\n logging.debug(\"handle_kaltura_videos() video_urls: \"+repr(video_urls))\n\n # Download videos if there are any\n media_id_list = run_yt_dl_multiple(\n session = session,\n download_urls = video_urls,\n extractor_used=\"video_handlers.handle_kaltura_videos()\",\n )\n logging.debug(\"Finished downloading kaltura embeds\")\n return media_id_list", "title": "" }, { "docid": "527b9b6d0c7dc5bd490b55107abebcb2", "score": "0.46338552", "text": "def encode_img(self,\n images: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n device = images.device\n x = self.vision_encoder(images)[0]\n image_embeds = self.ln_vision(x).to(device)\n image_atts = torch.ones(\n image_embeds.size()[:-1], dtype=torch.long).to(device)\n\n query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n query_output = self.q_former.bert(\n query_embeds=query_tokens,\n encoder_hidden_states=image_embeds,\n encoder_attention_mask=image_atts,\n return_dict=True,\n )\n\n inputs_llama = self.llama_proj(query_output.last_hidden_state)\n atts_llama = torch.ones(\n inputs_llama.size()[:-1], dtype=torch.long).to(images.device)\n return inputs_llama, atts_llama", "title": "" }, { "docid": "f70668f69c033880fc324f7d7f924523", "score": "0.4633523", "text": "def plot_pruned_seqs(model_output, inputs, n_logged_samples=3, max_seq_len=None):\n assert \"images\" in model_output.dense_rec # need pruned predicted images of SH-Pred model\n if inputs is not None:\n batch, n_gt_imgs, channels, res, _ = inputs.traj_seq.shape\n else:\n batch = len(model_output.dense_rec.images)\n assert batch == 1 # con currently only handle batch size 1\n n_gt_imgs, channels, res, _ = model_output.dense_rec.images[0].shape\n MAX_SEQ_LEN = int(n_gt_imgs * 1.5) if not max_seq_len else max_seq_len\n\n im_height = 2 * res\n im_width = (MAX_SEQ_LEN+1) * res\n im = np.asarray(0.7 * np.ones((n_logged_samples, im_height, im_width, 3)), dtype=np.float32)\n pred_imgs = list(map(imgtensor2np, model_output.dense_rec.images[:n_logged_samples]))\n max_len = min(n_gt_imgs, MAX_SEQ_LEN)\n for b in range(n_logged_samples):\n if pred_imgs[b] is None: continue\n seq_len = min(pred_imgs[b].shape[0], MAX_SEQ_LEN)\n max_len = max(max_len, seq_len)\n im[b, -res:, res:(seq_len+1)*res] = pred_imgs[b][:seq_len].transpose(2, 0, 3, 1).reshape(res, seq_len*res, channels)\n if inputs is not None:\n im[:, :res, :(n_gt_imgs*res)] = imgtensor2np(inputs.traj_seq, n_logged_samples).transpose(0, 3, 1, 4, 2)\\\n .reshape(n_logged_samples, res, n_gt_imgs*res, channels)\n if \"actions\" in model_output.dense_rec \\\n and model_output.dense_rec.actions is not None \\\n and (True in [a is not None for a in model_output.dense_rec.actions]) \\\n and (inputs is None or inputs.actions.shape[-1] == 2):\n ac_im = np.asarray(0.7 * np.ones((n_logged_samples, res, im_width, 3)), dtype=np.float32)\n pred_ac = list(map(tensor2np, model_output.dense_rec.actions[:n_logged_samples]))\n for b in range(n_logged_samples):\n if pred_ac[b] is None: continue\n seq_len = min(pred_ac[b].shape[0], MAX_SEQ_LEN)\n ac_im[b, :, :seq_len*res] = batch_action2img(pred_ac[b][None, :seq_len], res, channels).transpose(0, 2, 1, 3, 4)\\\n .reshape(res, seq_len*res, channels)\n im = np.concatenate((im, ac_im), axis=1)\n\n # prepare GIF version\n gif_imgs = np.swapaxes(im.reshape(n_logged_samples, im.shape[1], MAX_SEQ_LEN+1, res, channels), 0, 2)[:max_len+1] \\\n .reshape(max_len+1, im.shape[1], res * n_logged_samples, channels)\n\n return im, gif_imgs", "title": "" }, { "docid": "8c54fed09598be2964aae56673c1e44e", "score": "0.46313572", "text": "def parse_fewshot_anno(self, query: dict, support_list: List) -> dict:\n # prepare n shots examples\n shots = random.sample(support_list, self.num_shots)\n\n # append image path for n shots\n img_path = [shot['img_path'] for shot in shots]\n img_path.append(query['img_path'])\n query['img_path'] = img_path\n\n query['shots'] = [\n dict(\n question=item['question'],\n answer=item['gt_answer'][0],\n ) for item in shots\n ]\n return query", "title": "" }, { "docid": "e229cc38993d00b972a9bc0fd50b3c6f", "score": "0.46307558", "text": "async def search_images(self, **kwargs):\n\n params = {\n 'breed_id': kwargs.pop(\"breed_id\", \"\"),\n 'category_ids': kwargs.pop(\"category_ids\", \"\"),\n 'format': kwargs.pop(\"format\", \"\"),\n 'limit': kwargs.pop(\"limit\", 1),\n 'mime_types': kwargs.pop(\"mime_types\", \"\"),\n 'order': kwargs.pop(\"order\", \"RANDOM\"),\n 'size': kwargs.pop(\"size\", \"med\"),\n 'page': kwargs.pop(\"page\", 0),\n }\n\n url = f'{BASE_URL}/images/search'\n\n images = await self.api_get_session(url, params)\n images = [Image(**image) for image in images]\n return images", "title": "" }, { "docid": "7293401296aec8aa177ef92054ed2c40", "score": "0.46290153", "text": "def beam_search_batch(self, src_id, max_decode_length):\n\t\tpass", "title": "" }, { "docid": "cf26fb747e81d58dabe00acc80b4859e", "score": "0.4619074", "text": "def images(self, **query):\n if 'limit' not in query.keys():\n query[\"limit\"] = 1000\n return list(self._list(_image.Image, **query))", "title": "" }, { "docid": "221e7db9aa6d90102f21e2581e0349fd", "score": "0.4618204", "text": "def embed(self, input, batchsize):\n if self.prm[\"wo_tok\"]:\n # emb: [seq_len*nbatch, char_hid]\n emb = self.drop(self.char_encoder(input[\"char\"]))\n # emb: [seq_len, nbatch, char_hid]\n if batchsize is not None:\n emb = emb.reshape(-1, batchsize, emb.shape[-1])\n else:\n emb = emb.reshape(input[\"word\"].shape[0], input[\"word\"].shape[1], -1)\n elif self.prm[\"wo_char\"]:\n # emb: [seq_len, nbatch, tok_emb]\n emb = self.drop(self.word_encoder(input[\"word\"]))\n elif self.prm[\"wo_tok\"] and self.prm[\"wo_char\"]:\n # At least one embedding layer is required.\n assert(False)\n else:\n # emb: [seq_len, nbatch, tok_emb]\n emb_word = self.drop(self.word_encoder(input[\"word\"]))\n # emb: [seq_len*nbatch, char_hid]\n emb_char = self.drop(self.char_encoder(input[\"char\"]))\n # emb: [seq_len, nbatch, char_hid]\n emb_char = emb_char.reshape(input[\"word\"].shape[0], input[\"word\"].shape[1], -1)\n # emb: [seq_len, nbatch, tok_emb + char_hid]\n emb = torch.cat([emb_word, emb_char], dim=2)\n return emb", "title": "" }, { "docid": "0c9fcf47c55227001f30b26cf2f54e2e", "score": "0.46140903", "text": "def get_similar_imgs(img_name, img_map, indices, distances,k, form =\"df\",img_dir=None):\n index = get_key(img_map, img_name)\n distances = distances[index][1:k+1]\n \n if img_dir == None:\n images = [x for x in indices[index]]\n else: \n images = [img_dir+x for x in indices[index]] \n \n images = images[1:k+1]\n \n if form == \"df\":\n return pd.DataFrame({\"img\": images,\"dist\": distances})\n elif form == \"list\": \n return images", "title": "" }, { "docid": "e696a6114c4d79b6bf659778d3a9ba22", "score": "0.46125808", "text": "def attack(self, imgs, targets):\n r = []\n print('go up to', imgs.shape[0])\n for i in range(0, imgs.shape[0], self.batch_size):\n print('tick', i)\n # print(\"imgs[i:i + self.batch_size]\", imgs[i:i + self.batch_size])\n # print(\"targets\", targets)\n r.extend(self.attack_batch(\n imgs[i:i + self.batch_size], targets))\n return np.array(r)", "title": "" }, { "docid": "fecf5e13cd785cb5a966ebf9ea07e75b", "score": "0.4611163", "text": "def parse_fewshot_anno(self, query: dict, support_list: List) -> dict:\n # prepare n shots examples\n shots = random.sample(support_list, self.num_shots)\n\n # append image path for n shots\n img_path = [shot['img_path'] for shot in shots]\n img_path.append(query['img_path'])\n query['img_path'] = img_path\n\n query['shots'] = [dict(caption=item['gt_caption']) for item in shots]\n return query", "title": "" }, { "docid": "580b4ef695c411f550b2c57524a29ae6", "score": "0.46107417", "text": "def DisplayImages(self) -> LayerFilterDisplayImages:", "title": "" }, { "docid": "731c3c12b58e2092c84150a0090c8265", "score": "0.4608333", "text": "def build_image_embedding(self):\r\n\r\n with tf.variable_scope(\"D_\"):\r\n with tf.variable_scope(\"image_embedding\") as scope:\r\n image_embeddings = tf.contrib.layers.fully_connected(\r\n inputs=self.inception_output,\r\n num_outputs=self.config.embedding_size,\r\n activation_fn=None,\r\n weights_initializer=self.initializer,\r\n biases_initializer=None,\r\n scope=scope)\r\n\r\n self.image_embeddings = image_embeddings", "title": "" } ]
cf090fe9542868e99484e51b47cd4719
Find the first status matching the given id
[ { "docid": "273f0441220e69f71c097180394e0b39", "score": "0.5996115", "text": "def get_card_status(status_id):\n status = data_manager.execute_select(\n \"\"\"\n SELECT * FROM statuses s\n WHERE s.id = %(status_id)s;\n \"\"\", {\"status_id\": status_id})\n return status", "title": "" } ]
[ { "docid": "3f3256587c96740dc1d2b1c1aaffc75b", "score": "0.7008374", "text": "def get_status(self, status_id: StatusID):\n pass", "title": "" }, { "docid": "7c7e39e58fe3e485e0cc9f321a375e01", "score": "0.6748259", "text": "def get_card_status(status_id):\n statuses = persistence.get_statuses()\n return next((status['title'] for status in statuses if status['id'] == str(status_id)), 'Unknown')", "title": "" }, { "docid": "9caa158321a0245d46485edc6a5f119b", "score": "0.67374897", "text": "def get_card_status(status_id):\n statuses = persistence.get_statuses()\n return next((status['title'] for status in statuses if status['id'] == int(status_id)), 'Unknown')", "title": "" }, { "docid": "6b16f614d576f2ded4c7709c27d3d6fa", "score": "0.65737146", "text": "def find_one_by_id(id) -> Job:\n\n with DB.connection_context():\n return Job.get_or_none(Job.id == id)", "title": "" }, { "docid": "e0477878b783b292ab62b01773fb412b", "score": "0.64558583", "text": "def find(self, id):\n pass", "title": "" }, { "docid": "2372504d4950b8e20a55ea95699000ea", "score": "0.6330566", "text": "def get_status(value):\n query = Status.query.filter(Status.value==value)\n return query.one()", "title": "" }, { "docid": "283fc1d6dbb7af9a64da41147ae9fbf5", "score": "0.6308374", "text": "def tt_lookup(id):\n status = status_lookup([id])\n status = status[0]\n return status['text']", "title": "" }, { "docid": "75572946d0ea26efa6c7e38fcc495da5", "score": "0.6250195", "text": "async def find_by_id(self, id):\r\n return await self.db.find_one({\"_id\": id})", "title": "" }, { "docid": "63ea667cefc45e2f2f5f79aa0553fba6", "score": "0.6240588", "text": "def get_one(self, id):\n return self.coll.find_one({'_id': self._get_valid_id(id)})", "title": "" }, { "docid": "9eb8d47312aabd60ef2004dabd4b5701", "score": "0.6216652", "text": "async def find(self, id):\r\n return await self.find_by_id(id)", "title": "" }, { "docid": "cbbd846d0c0d9bc912535ff12b03d597", "score": "0.62122244", "text": "def find_status_name(cls, status_id):\n statuses = cls.STATUSES\n reversed_statuses = dict(zip(statuses.values(), statuses.keys()))\n return reversed_statuses.get(status_id)", "title": "" }, { "docid": "9e2d4ff4e9b0769f508fb28bfadde235", "score": "0.62011504", "text": "def get(self, id):\n ctx = self.context.copy()\n ctx['active_test'] = False\n results = self.rpc_model.search_read(\n [('id', '=', id)],\n None, None, None, self.fields,\n context=ctx\n )\n return results and results[0] or None", "title": "" }, { "docid": "1985e8e806f83c1f6f770e645121199a", "score": "0.619701", "text": "def findById(self, id):\n return self.model().get(id)", "title": "" }, { "docid": "b99d1060df9ddba39517c685d38ccc25", "score": "0.61848927", "text": "def defaultstatus():\n #NOTE: Takes the first status set as default. If there are more of these, it takes whichever is first.\n s = IssueStatus.objects.filter(default=True)\n if len(s) > 0:\n return s[0].id", "title": "" }, { "docid": "e0145e1be3582622b43f181fc83b04ee", "score": "0.61794394", "text": "def find(self,id):\r\n for i in self.__lista:\r\n if i.getId()==id:\r\n return i\r\n raise error_find", "title": "" }, { "docid": "e0145e1be3582622b43f181fc83b04ee", "score": "0.61794394", "text": "def find(self,id):\r\n for i in self.__lista:\r\n if i.getId()==id:\r\n return i\r\n raise error_find", "title": "" }, { "docid": "673c83cc8b21f2b9e01758d4587499ed", "score": "0.6114049", "text": "def get_by_id(self, id):\n return self.collection.find_one({\n 'data.general.externalIds.statistic': int(id)\n })", "title": "" }, { "docid": "30f8871af75eeb9441ebfdf8217727af", "score": "0.6078235", "text": "def status(id):\n return Server.status(id)", "title": "" }, { "docid": "97cf7a8589647e1aa551f22f7084844c", "score": "0.6026908", "text": "def by_id(self, id):\n status, data = self.client.get(self.endpoint + '/%s' % id)\n return self.clazz(self.client, data)", "title": "" }, { "docid": "e6be725825b8d876429d31f0df73fbc7", "score": "0.602375", "text": "def find(self,id):\r\n return RepoClient.find(self, id)", "title": "" }, { "docid": "04c99510386e91074826462ebc87e5b7", "score": "0.60051286", "text": "def _single_version(self, id_):\n s = self.client.search()\n s.params(id=id_)\n result = s.find_versions(limit=1)\n assert result\n return result[0]", "title": "" }, { "docid": "afa870776e24275158966faca3c7f30f", "score": "0.59777516", "text": "def findbyid(self, id):\n l = list(filter(lambda x: x.id == id, self._widgets))\n return None if l == [] else l[0]", "title": "" }, { "docid": "393fb947d3e374a122bb8d2aaaa46708", "score": "0.5957893", "text": "def get(self,id):\r\n item = get_one(id=id)\r\n if not item:\r\n api.abort(404)\r\n else:\r\n return item", "title": "" }, { "docid": "a0a25669d534456c9cf2243d4e7db722", "score": "0.5884681", "text": "def identify(cls, id):\n return cls.query.get(id)", "title": "" }, { "docid": "35ad8700e2980d70d0b958f3419264c5", "score": "0.5871743", "text": "def find_by_id(id):\n with get_db_cursor(commit=True) as cur:\n cur.execute(\n '''SELECT uuid, invoice_id_from, invoice_id_to, created_at, updated_at, amount, status \n FROM public.transaction WHERE id=%s::INT LIMIT 1''',\n (id,))\n result = cur.fetchone()\n if result:\n uid, inv_id_from, inv_id_to, created_at, updated_at, amount, status = result\n return Transaction(inv_id_from, inv_id_to, amount,\n uid=uid, created_at=created_at, updated_at=updated_at, id=id, status=status)\n return None", "title": "" }, { "docid": "25a8e2117f7d37f40d24e53c67350506", "score": "0.5827104", "text": "def by_id(cls, _id):\n return cls.query.filter_by(id=_id).first()", "title": "" }, { "docid": "2469e1ac6fee416acdcbc0ebf871bda7", "score": "0.581723", "text": "def find_by_id(self, id):\n self.cursor.execute(\"SELECT * FROM Accounts WHERE account_id=?\", [id])\n return self.__fetchone_account()", "title": "" }, { "docid": "8903b78b93ed7017abed9012fad01172", "score": "0.5792517", "text": "async def get_by_id(self, id):\r\n return await self.find_by_id(id)", "title": "" }, { "docid": "94cfad0d5805637784ea9ec736b5a471", "score": "0.5790875", "text": "def status_id(self, name):\n statuses = self.statuses()\n name_lower = name.lower()\n for s in statuses:\n if name_lower == s[\"name\"].lower():\n return s[\"id\"]\n else:\n raise JiraShellError(\"unknown status name: %r\" % name)", "title": "" }, { "docid": "a61052520a9aba9fba4414e81e545891", "score": "0.57817227", "text": "def get_task_status(job_id, entity_id):\n sql = \"select \" + \", \".join(TaskStatus.list_properties()) + \" from taskstatus where job_id = %s and entity_id = %s\"\n rows = atp_env.mydb.select(sql, (job_id, entity_id))\n if rows and len(rows) > 0:\n return TaskStatus(rows[0])\n else:\n return None", "title": "" }, { "docid": "e64a4e893d27c1c14be23f2832c3e01f", "score": "0.57264507", "text": "def find_by_id(self, book_id):\n raise NotImplementedError(\"find_one not implemented yet\")", "title": "" }, { "docid": "f8a97ef938f6864fc59c8374da72f35b", "score": "0.5718186", "text": "def get_run_status(self, id):\n res = self.api_client.GetRunStatus(id)\n return response_handler(res)", "title": "" }, { "docid": "8cb125c0e44d3974a6a10f1df2fc52cb", "score": "0.5716425", "text": "def by_id(self, id):\n return self._index.get(id, None)", "title": "" }, { "docid": "bbe3a06feba802ea4241e76e06891a8f", "score": "0.57131", "text": "def check_status(self, inspection_id: str) -> Status:", "title": "" }, { "docid": "7fc63cfeb64d2f96e408079dd937e5f8", "score": "0.5692336", "text": "def find_by_id(self, id):\n matches = [device for device in self.get_device_details() if device.id == id]\n\n if len(matches) > 1:\n raise RuntimeError(f\"Found more than one device for id '{id}'\")\n\n if len(matches) < 1:\n raise RuntimeError(f\"Could not find a device for id '{id}'\")\n\n return matches[0]", "title": "" }, { "docid": "c7418996d0c8364e3daa28d4176a80d2", "score": "0.56896317", "text": "def _find_request_by_id(self, request_id):\n response = self.client.get_request(request_id)\n\n if response.ok:\n return self.parser.parse_request(response.json())\n else:\n self._handle_response_failure(response, default_exc=FetchError,\n raise_404=False)", "title": "" }, { "docid": "0d8136dfa0ae4022d4a520fa4b59fa18", "score": "0.56861335", "text": "def call_get_status(board_id):\n global library\n return Status(library.getStatus(board_id))", "title": "" }, { "docid": "c3df2508c2259fb4bca351b120f7be7f", "score": "0.5680991", "text": "def getOrderState(id_):\r\n for i in range(len(myOrders)):\r\n if myOrders[i][\"id\"] == id_:\r\n return myOrders[i][\"state\"]\r\n return None", "title": "" }, { "docid": "9eaed028703edf386b0f6e386ac8577f", "score": "0.5678187", "text": "def findObjectById(self, id, current=None):\n raise NotImplementedError(\"servant method 'findObjectById' not implemented\")", "title": "" }, { "docid": "819e625e8bdb97bc682993a965a27728", "score": "0.56654525", "text": "def find_by_id(self, id: int):\n if self.id == id:\n return self\n for child in self._children:\n ret = child.find_by_id(id)\n if ret is not None:\n return ret\n return None", "title": "" }, { "docid": "710a25a9f8a5381bdd54926c48e9f5d6", "score": "0.5662839", "text": "def get_one(self, name_id):\r\n\r\n try:\r\n o = self.get_one_by_name(unicode(name_id))\r\n\r\n if o:\r\n return o\r\n except:\r\n pass\r\n\r\n try:\r\n o = self.get_one_by_id(int(name_id))\r\n\r\n if o:\r\n return o\r\n except:\r\n pass\r\n\r\n return None", "title": "" }, { "docid": "a780c0da727fea2ced326b9689360891", "score": "0.5659603", "text": "def get_index_status(json2i, index):\n for d in json2i[\"status\"]:\n if d[\"name\"] == index:\n return d[\"status\"]\n return None", "title": "" }, { "docid": "a780c0da727fea2ced326b9689360891", "score": "0.5659603", "text": "def get_index_status(json2i, index):\n for d in json2i[\"status\"]:\n if d[\"name\"] == index:\n return d[\"status\"]\n return None", "title": "" }, { "docid": "eea387c2272abe8845da429e2915d553", "score": "0.56534714", "text": "def find_by_id(foo_id):\n return Foo.query.filter_by(id=foo_id).first()", "title": "" }, { "docid": "b44712c0011f56aa706093bf3df58af5", "score": "0.56517404", "text": "def find_label_by_id(self, _id):\n search = True\n i = 0\n while search:\n if i == len(self.labels):\n break;\n\n if self.labels[i].id == _id:\n return self.labels[i]\n search = False\n #print self.labels[i].id\n i += 1\n if search:\n return None", "title": "" }, { "docid": "8ee66c58c8e87e53f58e51e6cb12619c", "score": "0.56329674", "text": "def find_successor(self, id):\n np = self.find_predecessor(id)\n return self.call_rpc(np, 'successor')", "title": "" }, { "docid": "0e526e63605d311488a12b965a5c3b72", "score": "0.5621515", "text": "def find_one(self, _topic, _id):\n return self.find_all(_topic).get(_id)", "title": "" }, { "docid": "cc6a63fca731260585164e83ebe81957", "score": "0.56211346", "text": "def get_by_id(id):\n return Vaga.query.filter_by(\n id=id\n ).one_or_none()", "title": "" }, { "docid": "b6c2e7a910446242ad055460fe8d16bf", "score": "0.5619565", "text": "def find_by_status(status):\n return Inventory.__find_by('status', status)\n # \"\"\" Returns all of the Inventories in a status\n #\n # Args:\n # status (string): the status of the Inventories you want to match\n # \"\"\"\n # return [inventory for inventory in Inventory.data if inventory.status == status]", "title": "" }, { "docid": "ebcfe52c5037cc507ba4bc9b498142cf", "score": "0.5612619", "text": "def resolve_status(cls, status_name):\n status_results = ServerStatusOp.get(name=status_name)\n\n if len(status_results) is not 1:\n raise ServerStatusNotFoundError(f'Not found status name: \"{status_name}\".')\n\n status_id = status_results[0].id\n return status_id", "title": "" }, { "docid": "08bb8cdf9a411df6c77f9adaf9366043", "score": "0.5610765", "text": "def get_state_by_id(cls, context, id):", "title": "" }, { "docid": "3248ea51964120c0d5cedc14abb69d0b", "score": "0.56092995", "text": "def get(model, _id):\n try:\n return model.query.filter_by(id=_id).one()\n except:\n return None", "title": "" }, { "docid": "59562cffb685594757502206e4dec159", "score": "0.56038266", "text": "def find(self, Id):\n if not Id in self.__items.keys():\n raise RepositoryError(\"an item with the given id does not exist\")\n return self.__items[Id]", "title": "" }, { "docid": "ab09995732f145e3c896539eca56702b", "score": "0.55944663", "text": "def getbyId(id, lista):\n for avion in lista:\n if getId(avion) == id:\n return avion\n return None", "title": "" }, { "docid": "137aa8fce3d68058f7a049bafb9a4a7e", "score": "0.5592402", "text": "def get_node(self, id:int):\n for node in self.nodes:\n if node.id == id:\n return node", "title": "" }, { "docid": "031096adf7228a01ece449e3f24aefcb", "score": "0.55861413", "text": "def find(self, id):\n clients = self.__loadFromFile()\n for i in clients:\n if i.getId() == id:\n return i\n return None", "title": "" }, { "docid": "8e65d1e8b0ed3167f521639a8d542864", "score": "0.5570195", "text": "def byId(self, Id):\n\t\tfor test in self._list:\n\t\t\tif test._Id == int(Id):\n\t\t\t\treturn test\n\t\treturn None", "title": "" }, { "docid": "b02dcaa162f590d59aaa4ee57116cac3", "score": "0.55676115", "text": "def find_by_id(cls, id):\n for user in base.session.query(User).filter(User.id == id):\n return user", "title": "" }, { "docid": "e3120b7835f6dc0449d79dfdb2f18450", "score": "0.55611265", "text": "def find_task_by_id(self, id):\n if id == 0:\n return None\n for r in self.root_nodes:\n task_with_id = r.find_subtask_by_id(id)\n if task_with_id:\n return task_with_id\n return None", "title": "" }, { "docid": "cb31154eef30e5d678e7afe2d01bb324", "score": "0.5550253", "text": "def api_by_id(self, _id):\n apis = self.db.apis.find({\"id\": _id})\n result = [api for api in apis]\n if result:\n return result[0]", "title": "" }, { "docid": "3b1e903302b9655d60e5473a54274061", "score": "0.5541705", "text": "def get_node_by_id(self, id):\n nodes = self.get_nodes_by_property(\"id\", id)\n if len(nodes) == 0:\n return None\n else:\n return nodes[0]", "title": "" }, { "docid": "9758c4104e0324c184e6c2f8ffbe8074", "score": "0.553512", "text": "def find_successor(self, id):\n #print('find_successor called by node_id = {} for key: {} at timestamp = {}'.format(self.n_id, str(id), self.pr_now()))\n\n if (in_range(id, self.get_id(), self.successor().get_id()) and (self.n_id != self.successor().get_id()) and (id != self.n_id)):\n return self.successor()\n else:\n remote = self.closest_preceding_node(id)\n if self.my_address.get_hash() != remote.my_address.get_hash():\n return remote.find_successor(id)\n else:\n #print('returning self')\n return self", "title": "" }, { "docid": "d42dbeaf20b1900510d8520a3ddb74e0", "score": "0.5524069", "text": "def get_id_by_status(cls, request, status_str):\n return request.dbsession.query(cls)\\\n .filter_by(status=status_str).one().id", "title": "" }, { "docid": "8a174c993de5313bff38008d2b21965b", "score": "0.5523496", "text": "def find_by_id(self, _id):\n entity = new_instance_with_primary_key(_id)\n req, params = build_select_req(entity)\n return self.dao.do_select(req, params)", "title": "" }, { "docid": "86e24e980af27207fb55034dfea85c03", "score": "0.5522255", "text": "def _get_current_status():\n return Status.query.get(1)", "title": "" }, { "docid": "9984aafd84ebbcaa4e5963d6e67f9c38", "score": "0.5510831", "text": "def status_lookup(ids):\n ids = list(set(ids))\n len_ids = len(ids) # @DEBUG\n ids = ','.join(ids)\n tw = Twython(CREDENTIALS['APP_KEY'],\n CREDENTIALS['APP_SECRET'],\n CREDENTIALS['ACCESS_KEY'],\n CREDENTIALS['ACCESS_SECRET'])\n res = tw.lookup_status(id=ids)\n\n return res", "title": "" }, { "docid": "97fb5c5de56ff9be377522f314a8b3b4", "score": "0.5509235", "text": "def find_first(self, label):\n results = self.find(label)\n\n if results:\n return results[0]\n\n return None", "title": "" }, { "docid": "c6025b852ed3cafb91c0903cfb68a643", "score": "0.5505244", "text": "def get(id_: str) -> dict:\n return __col.find_one({ID: id_})", "title": "" }, { "docid": "278ea3aadc66386e21e3a3f848dd7cd7", "score": "0.5503628", "text": "def get_by_id(self, id):\n\n return self.ent_dict[id]", "title": "" }, { "docid": "e96af5cc6e720aa83c9d4821766b3f35", "score": "0.55035645", "text": "def __getStateById(self, stateId):\n for state in self.__listState:\n if state.getId() == stateId:\n return state\n return None", "title": "" }, { "docid": "c1983281689db8dbd10a6466289f330e", "score": "0.5500628", "text": "def process_with_id(self, id):\n # ...\n for process in self.runningStack:\n if process.id == id:\n return process\n for process in self.waitingList:\n if not process == None:\n if process.id == id:\n return process\n return None", "title": "" }, { "docid": "93ca593ff87abba1fd460809570bdf60", "score": "0.5497836", "text": "def getOne(self, _id, **kwargs):\n kwargs['id'] = self.sanitize_id(_id)\n kwargs['url'] = self.SINGLE_URL\n return self.transport.set_method('GET').request(**kwargs)", "title": "" }, { "docid": "8658c8283a3974327caf9f8902c3209c", "score": "0.5490123", "text": "def get_object(self, id=None):\n # try:\n # obj = UpdateModel.objects.get(id=id)\n # except UpdateModel.DoesNotExist:\n # obj = None\n\n if id is None:\n return None\n\n qs= self.get_queryset().filter(id=id)\n if qs.count() == 1:\n return qs.first()\n return None", "title": "" }, { "docid": "7d2cc1e203f90ca4d0c269d8453b97de", "score": "0.5486001", "text": "def find_job(context, job_id, state=None):\n jsondata = context.response.json()\n jobs = jsondata['jobs']\n job_ids = [job[\"job_id\"] for job in jobs]\n assert job_id in job_ids\n if state is not None:\n job = get_job_by_id(jobs, job_id)\n assert job is not None\n assert job[\"state\"] is not None\n assert job[\"state\"] == state", "title": "" }, { "docid": "c46811d19a5a1e8b2179d91cd8621f5a", "score": "0.54794425", "text": "def fetch_record_or_abort(id):\n t = Todo.query.get(id)\n if not t:\n abort(404, message=\"To-do %s doesn't exist\" % id)\n return t", "title": "" }, { "docid": "88827ae23be3510f7ad3ceeb72e578fc", "score": "0.546806", "text": "def get_one_book(id):\n one_book = BookModel.query.get(id)\n if not one_book:\n return None\n return one_book", "title": "" }, { "docid": "cd170f236416826f6f51cfbe89ce787e", "score": "0.5467214", "text": "def getOne(self, _id, **kwargs):\n kwargs['url'] = self.SINGLE_URL\n kwargs['id'] = self.sanitize_id(_id)\n return self.transport.set_method('GET').request(**kwargs)", "title": "" }, { "docid": "e88996f5f5636f98775da1c096a77a74", "score": "0.5462106", "text": "def get_by_id(id, lista):\n for vanzare in lista:\n if get_id(vanzare) == id:\n return vanzare\n return None", "title": "" }, { "docid": "a11e7749007f2dff1cb1cee54a7bf898", "score": "0.5455481", "text": "def get_earliest_status(self):\n earliest = None\n if self.data:\n sorted_statuses = sorted(self.data.values(), key=lambda status: status.created_at)\n earliest = sorted_statuses[0]\n return earliest", "title": "" }, { "docid": "5da32f847a364029abdc1f497b887639", "score": "0.54494977", "text": "def single_user_by_id(id):\n all_users = allusers()\n for user in all_users:\n if str(user.key.id()) == str(id):\n return user\n return None", "title": "" }, { "docid": "66116039de789d26f77a719b0f7a59da", "score": "0.54458266", "text": "def get_object(self, id=None):\n # try:\n # obj = UpdateModel.objects.get(id=id)\n # except UpdateModel.DoesNotExist:\n # obj = None\n qs= UpdateModel.objects.filter(id=id)\n if qs.count() == 1:\n return qs.first()\n return None", "title": "" }, { "docid": "da2226493c1e57c1f59ca9ee6480b9d4", "score": "0.5440847", "text": "def _get(self, id):\n return self.client.get(\n id=id\n )", "title": "" }, { "docid": "550aeffc0a6a46d2c7844e8959420294", "score": "0.54386216", "text": "def get(self, id):\n return self.model_class.query.get(id)", "title": "" }, { "docid": "019bd99e4ffbea60d156b40876daf442", "score": "0.542419", "text": "def get_draw_or_404(cls, id_):\n obj = cls.query.get(id_)\n if not obj: # Not found, try to search via private_id\n obj = cls.query.filter_by(private_id=id_).first_or_404()\n return obj", "title": "" }, { "docid": "b2cc29e67ea5a043f5e36d7f7820f3b6", "score": "0.54189336", "text": "def get(self, id):\n return self._get(id)", "title": "" }, { "docid": "fee45f7b759fd0025df3ec1f77fbd778", "score": "0.5413169", "text": "def returnClassFromID(dictionary, id):\n if id == \"\":\n print(\"Complaint ID field is empty.\")\n return None\n else:\n id = int(id)\n for key in dictionary:\n if key == id:\n return dictionary[key]\n print(str(id) + \" is not in dataset.\")", "title": "" }, { "docid": "21618ee5fa2cbc37d7000714dc85e9f9", "score": "0.5408651", "text": "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "title": "" }, { "docid": "43706cc72239748e530c0225cc4232a3", "score": "0.54076946", "text": "def findit(self,idnum):\r\n \r\n for student in self.students:\r\n if student.get_id() == idnum:\r\n return student\r\n return \"Student not found\"", "title": "" }, { "docid": "421fe2e199eab214a569c850d4ae2301", "score": "0.5399659", "text": "def get_order_status_from_id(self, order_id):\r\n url = self.address + '/order/status/?id=%s' % order_id\r\n with self.opener.open(url) as result:\r\n response = result.read()\r\n if result.getcode() != 200:\r\n raise Exception(response)\r\n data = json.loads(response)\r\n closed = data['closed']\r\n if closed is not None:\r\n closed = closed.split('.', 1)[0]\r\n closed = datetime.strptime(closed, \"%Y-%m-%d %H:%M:%S\")\r\n return data['status'], data['progress'], closed", "title": "" }, { "docid": "365b17cf6694bb6f88ecbdcb78bfdef3", "score": "0.5398555", "text": "def get(self, id):\n return self.model.get(id)", "title": "" }, { "docid": "f1c020e26a7aab65df4e46a8decb5f9d", "score": "0.53943", "text": "def _get_single_instance(self, id):\n r = self._conn.get_all_instances(id)\n return r[0].instances[0] if r and r[0].instances else None", "title": "" }, { "docid": "1511a2b15680f62ad215d7fad5149374", "score": "0.5387435", "text": "def get(id):\n try:\n obj = _store.objects[id]\n if (obj is not None):\n return obj\n else:\n raise Exception('Object not found')\n except KeyError:\n time.sleep(0.0001)\n logging.debug(\"No matching object found, trying again...\")\n return get(id)", "title": "" }, { "docid": "79f55a4854504f7479a0def5112721f8", "score": "0.5371744", "text": "def _find_item(self, item_id):\n for item in self.library_items[int(item_id) - 1]:\n if item.id == item_id:\n return item\n return None", "title": "" }, { "docid": "c34848c9fc527e7aa65d870b18c12289", "score": "0.537127", "text": "def find_one(self):\n self.query_params[\"limit\"] = 1\n return self.__execute_network_call()", "title": "" }, { "docid": "35fb629615a0d589ad2e32cbe19ef6ed", "score": "0.53695875", "text": "def retrieve(self, request, pk=None):\n\n try:\n status = Status.objects.get(pk=pk)\n serializer = StatusSerializer(status, context={'request': request})\n return Response(serializer.data)\n\n except Exception as ex:\n return HttpResponseServerError(ex)", "title": "" }, { "docid": "bab35dc3772a86dbd2a8dce385b28ccd", "score": "0.5361243", "text": "def get(self, id):\n return Team.query.filter(Team.id == id).one()", "title": "" }, { "docid": "a56c138e971efdac002e67888ccffad0", "score": "0.536081", "text": "def at(cls, _id):\n return cls.where(cls.primarykey == _id)", "title": "" }, { "docid": "24783f95d383b6d5aec6c7c0e4e850c4", "score": "0.53573316", "text": "def find_student(self, id_s):\n if id_s in self.__students:\n return self.__students[id_s]\n else:\n raise RepositoryFind", "title": "" }, { "docid": "9701e24dc07f76665fee58be1a192473", "score": "0.53505886", "text": "async def find_object(self, cls, _id):\n raise NotImplementedError", "title": "" }, { "docid": "6a9ee22ea5b7f46895bf0a1dc2fdd8d1", "score": "0.5342875", "text": "def get_entity(self, id):\n return self.entities.get(id, (None, None, None))[2]", "title": "" } ]
94c53df768d04da17d323d893257f0e7
Testing the building class function.
[ { "docid": "654d6599c56fe9c783f45854ff6d073c", "score": "0.6755711", "text": "def test_building_class():\n # Testing the ruleset for classifying Hazus building class\n res = []\n ref_class = ['WSF', 'WMUH']\n ref = np.ones(2)\n for i in range(2):\n data_dir = os.path.join(cur_dir, base_input_path, 'BuildingClass_Data',\n 'building_class_test_' + str(i+1) + '.json')\n with open(data_dir) as f:\n data_input = json.load(f)\n tmp = parse_BIM(data_input['GI'])\n data_output = building_class(tmp)\n print(data_output)\n res.append(int(data_output == ref_class[i]))\n # Check\n assert_allclose(res, ref, atol=1e-5)", "title": "" } ]
[ { "docid": "79785cbd6d603ba51a4083520d3df1f4", "score": "0.7564869", "text": "def test_01_BuildObjects(self):\n pass", "title": "" }, { "docid": "3b0ee69e1abd41003d408ff9161a2153", "score": "0.7322961", "text": "def test_build(self): \n\n Builder(self.app)\n\n assert 'labels' in dir(self.app)\n assert 'containers' in dir(self.app)\n assert 'entrys' in dir(self.app)\n assert 'button' in dir(self.app)", "title": "" }, { "docid": "472c03cf677f5e41336f6ca3a0f5ef5e", "score": "0.71330905", "text": "def test_create_build_type(self):\n pass", "title": "" }, { "docid": "171440fb99c27f79f66746fa5e94a0a5", "score": "0.7018632", "text": "def test_builder():", "title": "" }, { "docid": "1206e6098ea5779aa83f574c5fd68c6a", "score": "0.69523484", "text": "def build(self):\r\n pass", "title": "" }, { "docid": "496821a7b2f39891834fd89d6a116e6b", "score": "0.69168097", "text": "def _build(cls):\n from draco2.model.check import CheckVisitor\n from draco2.model.build import BuildVisitor\n checker = CheckVisitor()\n checker.visit(cls)\n builder = BuildVisitor()\n builder.visit(cls)", "title": "" }, { "docid": "3ec91b74f79c99c1f15cf8796c7e7439", "score": "0.6856415", "text": "def __build(self):\n pass", "title": "" }, { "docid": "a0ac9565236a2e1b9e78346c2d88f34d", "score": "0.6850931", "text": "def test_constructor_fill_fields(self):\r\n builder = AbstractBuilder(\"url\")\r\n\r\n self.assertEqual(builder.url, \"url\")\r\n self.assertEqual(builder.data, {})", "title": "" }, { "docid": "d6e74b6ce1bef395f1e232911019c953", "score": "0.68274146", "text": "def build(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "cfe7d12c13df9ea6847862016d1692a4", "score": "0.67328924", "text": "def test_init_creates_builder_object(self):\n\n self.assertEqual(self.director._builder, None)", "title": "" }, { "docid": "5b8a906fcd75592fc8da26dd57b4246b", "score": "0.6689936", "text": "def test_constructor(self):\n self.exporter_class()", "title": "" }, { "docid": "9fb5ddcc23fbe7e0b80120772bb6c804", "score": "0.66652966", "text": "def _build(self):\n raise NotImplementedError()", "title": "" }, { "docid": "bdd25cd8c38bbf4bf35041dad52203eb", "score": "0.6648575", "text": "def build(self):\n raise NotImplementedError(\"Must be implemented by subclass.\")", "title": "" }, { "docid": "00715bf309783b709593feac3fffeb5c", "score": "0.66266096", "text": "def build(self):\n pass", "title": "" }, { "docid": "00715bf309783b709593feac3fffeb5c", "score": "0.66266096", "text": "def build(self):\n pass", "title": "" }, { "docid": "00715bf309783b709593feac3fffeb5c", "score": "0.66266096", "text": "def build(self):\n pass", "title": "" }, { "docid": "00715bf309783b709593feac3fffeb5c", "score": "0.66266096", "text": "def build(self):\n pass", "title": "" }, { "docid": "00715bf309783b709593feac3fffeb5c", "score": "0.66266096", "text": "def build(self):\n pass", "title": "" }, { "docid": "ce715e484866b52cd69afd03acf27b54", "score": "0.6613739", "text": "def build(self):", "title": "" }, { "docid": "960babe19fdbec85b450fa6acfbb0c46", "score": "0.65682083", "text": "def test_constructor(self):\n self.build_preprocessor()", "title": "" }, { "docid": "dc9a4a9936b3508c4161ba98e3b52d5d", "score": "0.65465474", "text": "def test_builderBender_2():", "title": "" }, { "docid": "910b4f85243dee8227382619284a094c", "score": "0.6523219", "text": "def test_builds_hierarchy(self):\n class A(object):\n seed = 101\n\n class B(object):\n pass\n\n builders = {A: mock.Mock()}\n extra_builders = {B: mock.Mock()}\n\n a = A()\n b = B()\n\n network_a = mock.Mock()\n network_a.seed = None\n network_a.connections = []\n network_a.ensembles = [a]\n network_a.nodes = []\n network_a.networks = []\n network_a.probes = []\n network_a.config = mock.Mock(name=\"config\")\n\n network_b = mock.Mock()\n network_b.seed = None\n network_b.connections = []\n network_b.ensembles = []\n network_b.nodes = [b]\n network_b.networks = []\n network_b.probes = []\n network_b.config = mock.Mock(name=\"config\")\n\n network = mock.Mock()\n network.seed = None\n network.connections = []\n network.ensembles = []\n network.nodes = []\n network.networks = [network_a, network_b]\n network.probes = []\n network.config = mock.Mock(name=\"config\")\n\n # Patch the default builders\n with patch.object(Model, \"builders\", new=builders):\n # Create a model and build the mock network\n model = Model()\n model.build(network, extra_builders=extra_builders)\n\n # Assert that the config was stored in the model\n assert model.config is network.config\n\n # Assert that seeds were supplied\n assert model.seeds[a] == a.seed\n assert model.seeds[b] is not None\n\n # Assert the builders got called\n builders[A].assert_called_once_with(model, a)\n extra_builders[B].assert_called_once_with(model, b)", "title": "" }, { "docid": "3e57475e01715c5eecb9121be152687f", "score": "0.6487616", "text": "def test_builderBender_1():", "title": "" }, { "docid": "0539b1e0837358d04c16e0f3afe1fede", "score": "0.6480443", "text": "def test_construct_all(self):\n self.constructors.construct_all()\n self.check_all_constructions()", "title": "" }, { "docid": "981676979f4eef8174b74263f424d5db", "score": "0.64783055", "text": "def test_class_creation(self, us):\n assert us.subject == \"Subject\"\n assert us.epic == \"Epic\"\n assert us.tags == [\"expedientes\"]", "title": "" }, { "docid": "d0a58ae9df803358baba692f85001d7c", "score": "0.6455311", "text": "def test_constructor(self):\n zone_name = 'example.com'\n builder = ZoneBuilder(zone_name)\n self.assertTrue(hasattr(builder, 'zone'))\n self.assertEqual(builder.zone, zone_name)", "title": "" }, { "docid": "e5c202ae8336013a9105ef787112c70a", "score": "0.64339846", "text": "def __init__(self, builder):\n self.builder = builder", "title": "" }, { "docid": "e322e19025aa35a6340bae2365c763a8", "score": "0.63910186", "text": "def test_create_instance(self):\n pass", "title": "" }, { "docid": "e322e19025aa35a6340bae2365c763a8", "score": "0.63910186", "text": "def test_create_instance(self):\n pass", "title": "" }, { "docid": "cd4af922f0d6fb7d9b83f68ce3260a85", "score": "0.6387344", "text": "def test_creation(self):", "title": "" }, { "docid": "e291569a352687c4950fc70f60b8f3c9", "score": "0.63830835", "text": "def build(self, *args, **kwargs):", "title": "" }, { "docid": "bbb422bd58c676c8f406162c045b69f8", "score": "0.6379992", "text": "def test_object_creation(self, dcc):\n dcc", "title": "" }, { "docid": "e6d2a1d0e878bf61f725d43c158ddc0a", "score": "0.6374741", "text": "def test_constructor_element_default(self):", "title": "" }, { "docid": "d18986f77987b58f8c56e7703ef5915b", "score": "0.63620514", "text": "def test_instantiation(self):\n projectbuild = ProjectBuild.objects.create(\n project=self.project, requested_by=self.user)\n self.assertEqual(self.user, projectbuild.requested_by)\n self.assertIsNotNone(projectbuild.requested_at)\n self.assertIsNone(projectbuild.ended_at)\n self.assertEqual(\"UNKNOWN\", projectbuild.status)\n self.assertEqual(\"UNKNOWN\", projectbuild.phase)", "title": "" }, { "docid": "42c606f46e9fce84fbe465235d38b252", "score": "0.63536096", "text": "def test__init__(self):\n # tests based on basic object creation from self.create_self():\n assert self.l2_service_builder.driver is self.driver, \\\n \"Driver provisioning test\"\n assert self.l2_service_builder.conf is self.driver.conf, \\\n \"Dirver conf provisioning test\"\n assert self.l2_service_builder.f5_global_routed_mode is \\\n self.f5_global_routed_mode, \\\n \"f5_global_routed_mode provisioning test\"\n self.system_helper.assert_called_once_with()\n self.network_helper.assert_called_once_with()\n self.service_model_adopter.assert_called_once_with(self.driver.conf)\n\n # further tests needed:\n # add tests for...\n # - f5_external_physical_mappings assignment test\n # - vlan_binding_driver assignment/importation test\n # Suggest a refactor for implementing the above in unit tests...", "title": "" }, { "docid": "5c84bdcac04eb10a73112b0046a35461", "score": "0.63439816", "text": "def __init__(self, name, packages=None, tests=None):\n if not packages:\n packages = []\n if not tests:\n tests = []\n\n # Assert that the correct objects are passed to the constructor\n # TODO: Fix validation after optimization is done...\n validate_build_contents(packages, tests)\n super(Build, self).__init__(name, packages, tests)", "title": "" }, { "docid": "0ced0a322722630d75b8330157c2f2f8", "score": "0.63296765", "text": "def build(self) -> None: # type: ignore[override]\n pass", "title": "" }, { "docid": "0a7adf77b116c0ca5fb70df358241704", "score": "0.6324664", "text": "def _build(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "0a7adf77b116c0ca5fb70df358241704", "score": "0.6324664", "text": "def _build(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "326be520d503261de5270ed2d2f36650", "score": "0.6323604", "text": "def build(self, *args, **kwargs) -> None:\n pass", "title": "" }, { "docid": "199f3464672f4568fafbe5884b2c1ba9", "score": "0.6310089", "text": "def build(self):\n raise NotImplementedError(\"This method must be overwritten\")", "title": "" }, { "docid": "bd3a9e5b3e51173a2a3e3e19be8051a1", "score": "0.6307875", "text": "def test_e6_example_create_building(self):\n from teaser.examples import e6_generate_building as e6\n\n prj = e6.example_create_building()", "title": "" }, { "docid": "232931a9c5de82e24aafba0e69cbe5d8", "score": "0.6306217", "text": "def build(self):\n raise NotImplementedError", "title": "" }, { "docid": "892b93af5f554ca51c4d84d50ea85b70", "score": "0.6286135", "text": "def construct(self):\n\n if tools.toolbox.get_setting('control', 'do_construct')=='0':\n print 'skip constructing.'\n return\n\n def build(building):\n \"\"\"\n constructs building\n \"\"\"\n self.open('main')\n\n try:\n self.browser.follow_link(self.browser.find_link(url_regex=\n re.compile(\"upgrade_building.+id=%s\"%building)))\n print_cstring(\"Building: [%s]\"%building, 'turq')\n self.statistics['buildings_constructed']+=1\n except LinkNotFoundError:\n print_cstring('fuck that shit, not enough ressources to build [%s]'%building, 'red')\n\n self.open('main')\n html=self.browser.response().read()\n farm_building='\\t\\t\\tBauernhof<br />\\n\\t\\t\\tStufe' in html\n if farm_building:\n self.pop_critical=0\n\n storage_building='\\t\\t\\tSpeicher<br />\\n\\t\\t\\tStufe' in html\n\n if self.buildings['under_construction']:\n return\n\n if self.pop_critical:\n if 'farm' in self.buildable:\n build('farm')\n if self.storage_critical and 'storage' in self.buildable and not storage_building:\n build('storage')\n\n\n elif self.next_building in self.buildable and not self.pop_critical:\n build(self.next_building)", "title": "" }, { "docid": "8f49279d1bb317c91979afb820dd7ea2", "score": "0.62846243", "text": "def test_basic_build(build_turbine):\n m = build_turbine", "title": "" }, { "docid": "4ff29206f93e9f7110d7dd781f5b764d", "score": "0.6265389", "text": "def build(self):\n raise NotImplementedError()", "title": "" }, { "docid": "4ff29206f93e9f7110d7dd781f5b764d", "score": "0.6265389", "text": "def build(self):\n raise NotImplementedError()", "title": "" }, { "docid": "f1b4367da5be9f618c144e2cc54afbd9", "score": "0.62642974", "text": "def test_build_channel_class():\n channel_class = build_channel_class(channel_number=2, mcaroi_numbers=(1, 2, 3))\n\n assert hasattr(channel_class, \"channel_number\")\n assert channel_class.channel_number == 2\n\n assert hasattr(channel_class, \"sca\")\n assert hasattr(channel_class, \"mca\")\n assert hasattr(channel_class, \"mca_sum\")\n\n # there should be 3 MCAROI attributes: mcaroi01, mcaroi02, mcaroi3\n expected_mcaroi_attr_names = {\n f\"mcaroi{mcaroi_i:02d}\" for mcaroi_i in range(1, 3 + 1)\n }\n\n # there should be no other MCAROI attributes\n all_mcaroi_attr_names = {\n attr_name\n for attr_name in dir(channel_class)\n if re.match(r\"mcaroi\\d{2}\", attr_name)\n }\n\n assert expected_mcaroi_attr_names == all_mcaroi_attr_names", "title": "" }, { "docid": "03009a8e9b7e52e272271c86bdc1036c", "score": "0.6263392", "text": "def __init__(self, build=None):\n\t\tself.build = build", "title": "" }, { "docid": "24f5a49a23ee6dbf1fdd33694f266544", "score": "0.62596506", "text": "def test_01_BuildObjects(self):\n self.assertNotEqual(self.m_pyhouse_obj.House.Hvac, None)", "title": "" }, { "docid": "5b0ed901d8258b242825903cab009e1f", "score": "0.62589186", "text": "def build(self):\n raise NotImplementedError(\"Subclasses should implement this!\")", "title": "" }, { "docid": "0cc6efff5fc99c83ea7928f3ed3746e2", "score": "0.625586", "text": "def construct(self):\n pass", "title": "" }, { "docid": "0cc6efff5fc99c83ea7928f3ed3746e2", "score": "0.625586", "text": "def construct(self):\n pass", "title": "" }, { "docid": "d17bc4ec97602f1479241c306abe35df", "score": "0.62191135", "text": "def init_test_object(self, method):\n if self.func in self.use_seed:\n paddle.seed(self.seed)\n if method == \"BuildClass\" or method == \"BuildClassWithInputSpec\":\n # 仅实例化一次,防止多次实例化后,因为随机种子不固定导致多个结果值res不相等\n obj = BuildClass(self.in_params, self.func)\n obj.eval()\n elif method == \"BuildFunc\" or method == \"BuildFuncWithInputSpec\":\n obj = BuildFunc(self.in_params, self.func)\n obj.eval()\n elif method == \"naive_func\":\n obj = naive_func\n\n return obj", "title": "" }, { "docid": "4019fed8632cdbd1b4bbd90ee5deed0e", "score": "0.621247", "text": "def test__build(self):\n x = numpy.array([[1, 2],\n [3, 4],\n [5, 6]])\n # start with default initialization\n subject = gandy.models.bnns.BNN((2,), (4,), train_size=len(x))\n self.assertTrue(isinstance(subject.model, tf.keras.Model))\n self.assertTrue(subject.model._compile_was_called)\n self.assertTrue(subject.model.built)\n self.assertEqual(tuple(subject.model.input.shape.as_list())[1:],\n subject.xshape)\n predict = subject.model.predict(x)\n self.assertTrue(predict.shape == (3, 4))\n out = subject.model(x)\n self.assertTrue(isinstance(out, tfp.distributions.Distribution))\n\n # test keyword assignment\n subject = gandy.models.bnns.BNN((2,), (4,),\n train_size=len(x),\n optimizer='RMSprop')\n self.assertTrue(isinstance(subject.model.optimizer,\n tf.keras.optimizers.RMSprop))\n subject = gandy.models.bnns.BNN((2,), (4,),\n train_size=len(x),\n optimizer=tf.keras.optimizers.RMSprop)\n self.assertTrue(isinstance(subject.model.optimizer,\n tf.keras.optimizers.RMSprop))\n opt = tf.keras.optimizers.RMSprop()\n subject = gandy.models.bnns.BNN(\n (2,), (4,),\n train_size=len(x),\n optimizer=opt\n )\n self.assertTrue(subject.model.optimizer is opt)\n return", "title": "" }, { "docid": "75e5fa6e2b104b3873ddebfaecd1c525", "score": "0.6196077", "text": "def build(self) -> BaseHamilton:\r\n pass", "title": "" }, { "docid": "954f0d5b9a294c6621047ec5fbe8cb09", "score": "0.6177918", "text": "def build(self):\n raise NotImplementedError('build not implemented')", "title": "" }, { "docid": "a0243886ee1575ea81a670f223c79fb5", "score": "0.61775076", "text": "def build(cls, **kwargs):\n return cls(**kwargs)", "title": "" }, { "docid": "320dd0725c028e81ddd15a2fa2d416bb", "score": "0.6166137", "text": "def setup_class(klass):", "title": "" }, { "docid": "287e22c11c3146d1d4879fb7d20d2367", "score": "0.61563617", "text": "def test_creation(self):\n classifier = LookUpClassifier()\n self.assertTrue(classifier)", "title": "" }, { "docid": "3304793c6d84b318dab1a82509cae13e", "score": "0.6152292", "text": "def setup_class(self):", "title": "" }, { "docid": "3304793c6d84b318dab1a82509cae13e", "score": "0.6152292", "text": "def setup_class(self):", "title": "" }, { "docid": "3304793c6d84b318dab1a82509cae13e", "score": "0.6152292", "text": "def setup_class(self):", "title": "" }, { "docid": "c24da61c1c630167ea96036f654b6907", "score": "0.6150095", "text": "def setup_class(cls):\n # ns.assert_true(False, \"setup_class run\")\n print('setup_class\\n')", "title": "" }, { "docid": "c24da61c1c630167ea96036f654b6907", "score": "0.6150095", "text": "def setup_class(cls):\n # ns.assert_true(False, \"setup_class run\")\n print('setup_class\\n')", "title": "" }, { "docid": "a1f69d8e7f30dd22e97c8541192f8495", "score": "0.6137967", "text": "def test_build(self):\n self.request.add_attribute(SearchAttribute, 'Test', 123)\n built = self.request.build()\n expected = {\n \"TableName\": \"testtable\",\n \"Test\": 123,\n }\n assert built == expected", "title": "" }, { "docid": "bf46fc898a1002dffb975da236e47ef8", "score": "0.6135326", "text": "def test_registerClass():", "title": "" }, { "docid": "ff5f80ce9487551ad2145db63945af70", "score": "0.61318994", "text": "def test_constructor(self):\n Exporter()", "title": "" }, { "docid": "7e47ac8a2242fcea108249ebb4eed07c", "score": "0.6104577", "text": "def setUpClass(cls):\n annofile = os.path.join(util.UTFILES_DIR, \"make-3.82-emake-5.3.0.xml\")\n cls.build = annolib.AnnotatedBuild(annofile)\n\n # Collect all the jobs (so that we have the metrics)\n cls.jobs = cls.build.getAllJobs()\n\n cls.build.close()", "title": "" }, { "docid": "4be12dc2ece63416767cc9fd0a4b0420", "score": "0.61017764", "text": "def example_type_building():\n\n from teaser.project import Project\n\n \"\"\"We instantiate the Project class. The parameter load_data = True indicates\n that we load the XML data bases into our Project.\n This can take a few sec.\"\"\"\n\n prj = Project(load_data=True,used_data_country=\"Belgium\") #we maken een project aan,\n # true because data bindings for\n #type elements and use conditions should be loaded\n #dus instance of dataclass() wordt aangemaakt\n #de 3 input XML's worden geladen en de bijhorende scripts aangeroepen\n #om de nodige waarden te kunnen uitlezen\n prj.name = \"ArchetypeBuildings_Ref_ANNEX_bel\" #de naam van het project is ...\n\n \"\"\"The five functions starting with type_bldg giving us the opportunity to\n create the specific type building (e.g. type_bldg_residential). The function\n automatically calculates all the necessary parameter. If not specified different\n it uses vdi calculation method.\n Dus type_bldg_residential maakt een instance aan van building() en zal vervolgens alles berekenen\n neen!! geen instantie van building() wel van singlefamilydwelling() dit is een specifieker gebouwtype\n \"\"\"\n\n prj.type_bldg_residential(name=\"ResidentialBuilding\",\n year_of_construction=1970,\n number_of_floors=2,\n height_of_floors=3.5,\n net_leased_area=100,\n construction_type=\"heavy\")\n\n '''Or we use Annex60 method (e.g with four elements). Which exports one\n Model per zone'''\n\n prj.used_library_calc = 'Annex60'\n prj.number_of_elements_calc = 4\n prj.merge_windows_calc = False\n prj.calc_all_buildings(raise_errors=True)\n prj.export_annex()", "title": "" }, { "docid": "e85909cd18b3ac27f53865a64953be41", "score": "0.6098726", "text": "def build_test(self):\n\n super(AggregateBase, self).build_test()\n self.set_common_expected_parms()\n self.set_common_optional_parms()", "title": "" }, { "docid": "7b98de1fefbecf8e02cf5a6d245da365", "score": "0.6096737", "text": "def build(self, *args, **kw):\n raise NotImplementedError", "title": "" }, { "docid": "2c7bbb4b8571925c0bace6890a5b0386", "score": "0.6086688", "text": "def test_creation(self):\n classifier = DecisionTreeClassifier()\n self.assertTrue(classifier)", "title": "" }, { "docid": "5cffd8588be512521a72cc019450fd47", "score": "0.60760254", "text": "def test_node_object_generation(self):", "title": "" }, { "docid": "6c093f4399cd54397c2b17257cc011b2", "score": "0.60751545", "text": "def test_class_creation(self, report):\n assert report.project == \"SIEEL\"", "title": "" }, { "docid": "782bb3da51cc0ba09cd6f76a807748da", "score": "0.6072306", "text": "def __call_creator_constructor_test(self, sport=None, prize_structure=None, start=None,\n duration=None,\n draft_group=None):\n if sport is None:\n sport = self.sport\n if prize_structure is None:\n class PrizeStructureChild(PrizeStructure):\n def __init__(self):\n pass\n\n prize_structure = PrizeStructureChild\n if start is None:\n start = timezone.now()\n if duration is None:\n duration = int(300)\n\n # try to construct a ContestPool\n try:\n # print('sport:', str(sport))\n self.assertRaises(IncorrectVariableTypeException,\n lambda: ContestPoolCreator(sport, prize_structure, start, duration,\n draft_group))\n except:\n # there might be other exceptions from\n # inner objects, but those arent tested here!\n pass", "title": "" }, { "docid": "a594555aedaf1c3e6691cfa6639be2cd", "score": "0.6063685", "text": "def test_creation(self):\n classifier = MajorityClassifier()\n self.assertTrue(classifier)", "title": "" }, { "docid": "01c0508071cc479a368388b7522f7e3d", "score": "0.6058457", "text": "def test_valid_construction(constructor_args):\n boxmc = hoomd.hpmc.update.BoxMC(**constructor_args)\n\n # validate the params were set properly\n obj_attr_check(boxmc, constructor_args)", "title": "" }, { "docid": "d32f8ab65ec268511bd8d5293ce941db", "score": "0.60434175", "text": "def test_constructor(self):\n assert isinstance(self.j, Jungle)", "title": "" }, { "docid": "67342dca7cd52466a19fbe618139e89e", "score": "0.60257757", "text": "def __init__(self, name, params):\n Tester.__init__(self, name, params)", "title": "" }, { "docid": "b9860cd8a7a7fa554d317b27f4bdd0e2", "score": "0.60252756", "text": "def test_class_created():\n check_class_exists(task_22_1, \"Topology\")", "title": "" }, { "docid": "116551fe248c6d21c8c43401a439c5f7", "score": "0.6021173", "text": "def test_facade_builder(self):\n\n\t\tgenerated_builder = self.manual_facade.get_object_builder()\n\t\tgenerated_builder.load_from_config(\"small_red_cube\")\n\t\tsmall_red_cube = generated_builder.create(\"small_red_cube\", \"small_offset\")\n\t\tself.assertEqual(small_red_cube.get_descriptor(), \"cube\")", "title": "" }, { "docid": "e717c99c03b2e63b07daac6e4a99c1d2", "score": "0.6010992", "text": "def test_setup(self):\n self.create_iinfo_class()\n self.create_info_class()", "title": "" }, { "docid": "d4f8eb698b46a2eba74506350d064519", "score": "0.6007934", "text": "def setUpClass(cls):", "title": "" }, { "docid": "d4f8eb698b46a2eba74506350d064519", "score": "0.6007934", "text": "def setUpClass(cls):", "title": "" }, { "docid": "7a54e2d90d78a39cfaed900ade004d47", "score": "0.60028803", "text": "def test_new(self):", "title": "" }, { "docid": "7a54e2d90d78a39cfaed900ade004d47", "score": "0.60028803", "text": "def test_new(self):", "title": "" }, { "docid": "10fb8bfe004bc3b095ad573daca00143", "score": "0.60009766", "text": "def test_creation(self):\n classifier = AdaBoost()\n self.assertTrue(classifier)", "title": "" }, { "docid": "0ba8de071134389fefb87625cdb93dfc", "score": "0.59946126", "text": "def build(self,\n environment: 'Environment',\n bug: bugzoo.Bug\n ) -> 'TestSuite':\n ...", "title": "" }, { "docid": "1b85105730caa397bf087b8e775e6d4c", "score": "0.599112", "text": "def construct(self):\n\t\traise NotImplementedError", "title": "" }, { "docid": "430fd8ae59bc9180c21fa0d9371678d9", "score": "0.5982696", "text": "def test___init__(self):\n pass", "title": "" }, { "docid": "2f084f3f6df37de5f4e25574ad0a0fd6", "score": "0.598147", "text": "def test_create(self):\r\n\r\n self.assertTrue(isinstance(self.DUT, Model))\r\n\r\n # Verify Hardware class was properly initialized.\r\n self.assertEqual(self.DUT.revision_id, None)\r\n self.assertEqual(self.DUT.category_id, 0)\r\n\r\n # Verify Relay class was properly initialized.\r\n self.assertEqual(self.DUT.category, 6)\r\n self.assertEqual(self.DUT.quality, 0)\r\n self.assertEqual(self.DUT.construction, 0)\r\n self.assertEqual(self.DUT.q_override, 0.0)\r\n self.assertEqual(self.DUT.base_hr, 0.0)\r\n self.assertEqual(self.DUT.piQ, 0.0)\r\n self.assertEqual(self.DUT.piE, 0.0)", "title": "" }, { "docid": "3c86562f72e8f338ee82475a2446a095", "score": "0.5974097", "text": "def test_inferior_asserting(self):\n self.build()\n self.inferior_asserting()", "title": "" }, { "docid": "7dcfbcc391862e179da70f514c9dcbdc", "score": "0.5963725", "text": "def test__init__(self):\n pass", "title": "" }, { "docid": "173d16d18e39af15b541eb1ddb227fb8", "score": "0.5959179", "text": "def test_create_build_type_template(self):\n pass", "title": "" }, { "docid": "cf08cc42b0a0d405427e99f1d114c3b9", "score": "0.595564", "text": "def setup_class(cls):", "title": "" }, { "docid": "cf08cc42b0a0d405427e99f1d114c3b9", "score": "0.595564", "text": "def setup_class(cls):", "title": "" }, { "docid": "cf08cc42b0a0d405427e99f1d114c3b9", "score": "0.595564", "text": "def setup_class(cls):", "title": "" }, { "docid": "64bd06278ad63546f01744fc65c2f6fa", "score": "0.595478", "text": "def compile_class(self):", "title": "" }, { "docid": "d190d5ec12b2a30a6a63bc3ff6f1a7ae", "score": "0.5954159", "text": "def construct(self):\n pass # To be implemented in subclasses", "title": "" } ]
b9dd9e8181293cb24fa0c3e0b144cff8
Entry point of program
[ { "docid": "7b29257fa5d2d624779dbb9d8e13a809", "score": "0.0", "text": "def main():\n client.run(secrets.TOKEN)", "title": "" } ]
[ { "docid": "1d4484b0529dbe0541b241834cb12a3e", "score": "0.8570562", "text": "def main():\n\t\tpass", "title": "" }, { "docid": "ec021328057f10f8af523fff413bbe9a", "score": "0.8506079", "text": "def main ():", "title": "" }, { "docid": "ec021328057f10f8af523fff413bbe9a", "score": "0.8506079", "text": "def main ():", "title": "" }, { "docid": "82c54a749d7508e492693a3059faed30", "score": "0.84507304", "text": "def main():\n return", "title": "" }, { "docid": "82c54a749d7508e492693a3059faed30", "score": "0.84507304", "text": "def main():\n return", "title": "" }, { "docid": "07e76a2dea489b69c96fdd82461f8592", "score": "0.8364074", "text": "def main(self):\n pass", "title": "" }, { "docid": "07e76a2dea489b69c96fdd82461f8592", "score": "0.8364074", "text": "def main(self):\n pass", "title": "" }, { "docid": "6a72f0523c7f4b19a3ff495c1baaa5c5", "score": "0.8349577", "text": "def main() -> None:", "title": "" }, { "docid": "6a72f0523c7f4b19a3ff495c1baaa5c5", "score": "0.8349577", "text": "def main() -> None:", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "08aa0c5cc07c5efe590ba89320b45dbc", "score": "0.8306582", "text": "def main():", "title": "" }, { "docid": "4301ce0ec56b79f3f0e020a93f26bdee", "score": "0.82826084", "text": "def main():\n \n pass", "title": "" }, { "docid": "d01bb886890ed36b0f4c3f9b6f39bbe6", "score": "0.82752603", "text": "def main():\r\n pass", "title": "" }, { "docid": "d01bb886890ed36b0f4c3f9b6f39bbe6", "score": "0.82752603", "text": "def main():\r\n pass", "title": "" }, { "docid": "c8e238f9ea21384e3be28d52a1ee7939", "score": "0.82637316", "text": "def main():\n\tpass", "title": "" }, { "docid": "cfc083a503ff24be9d8e1d2ad2fd1393", "score": "0.82436484", "text": "def main(self):\n return", "title": "" }, { "docid": "59a11114512ac46a2a2d976a77d1c6d6", "score": "0.8186553", "text": "def main(self, *args):\n pass", "title": "" }, { "docid": "59a11114512ac46a2a2d976a77d1c6d6", "score": "0.8186553", "text": "def main(self, *args):\n pass", "title": "" }, { "docid": "29ddf12471e98d1f15306c861cce2085", "score": "0.8155023", "text": "def main():\n print(\"Call your main application code here\")", "title": "" }, { "docid": "29ddf12471e98d1f15306c861cce2085", "score": "0.8155023", "text": "def main():\n print(\"Call your main application code here\")", "title": "" }, { "docid": "29ddf12471e98d1f15306c861cce2085", "score": "0.8155023", "text": "def main():\n print(\"Call your main application code here\")", "title": "" }, { "docid": "566036b3a07ddd1af043005be3b82644", "score": "0.80748236", "text": "def main(args=None):\r\n pass", "title": "" }, { "docid": "1e8b2ebbe42aca7bf8565023e7af8326", "score": "0.8069544", "text": "def main(args=None):", "title": "" }, { "docid": "737edbcdda02609a89cb40f455a292fb", "score": "0.8060648", "text": "def main(args):\n pass", "title": "" }, { "docid": "321784f4004341db82df14bd42654ecc", "score": "0.80290717", "text": "def main():\n _main(sys.argv[1:])", "title": "" }, { "docid": "e302ae8a2be1a50834705897601e1c4f", "score": "0.79930955", "text": "def _main():\n\tpass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "bc0d536b098278d704e13b8e318d6af5", "score": "0.79857004", "text": "def main():\n pass", "title": "" }, { "docid": "8105ce55096699eab7dce0d326a51948", "score": "0.79610205", "text": "def main():\n print(\"start ...\")", "title": "" }, { "docid": "8a52887b08fa7fd181a79c9d75450e83", "score": "0.79189616", "text": "def main():\n if __name__ == '__main__':\n pass", "title": "" }, { "docid": "5e4cec8ea848b3d798bca3b0fced58b1", "score": "0.78941566", "text": "def main(args):\n Run()", "title": "" }, { "docid": "a4873d5cb2b9c8e72f57155f6e9b8d43", "score": "0.78750134", "text": "def main(args):\n\treturn 0", "title": "" }, { "docid": "05b99850a4683aae56ee599491955a1b", "score": "0.7862574", "text": "def _main() -> None:\n pass", "title": "" }, { "docid": "64a1a4261cea121e609f61fd60d0b36f", "score": "0.7859942", "text": "def main():\n return None", "title": "" }, { "docid": "b816c59d61979d6c3de629bfe657acc3", "score": "0.78479403", "text": "def main():\n CLI().run(argv[1:])", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.7813415", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.7813415", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.7813415", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.7813415", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "ff6b5f435e231cc978bcc5e9148a452d", "score": "0.7813415", "text": "def run():\n main(sys.argv[1:])", "title": "" }, { "docid": "a8550ef16b860a6cc3bad917dfb04f42", "score": "0.7802033", "text": "def main():\n ...", "title": "" }, { "docid": "8e20fe7656852b06a2bcefab39b3b901", "score": "0.7796682", "text": "def main():\n print(run())", "title": "" }, { "docid": "97569561f99d1987cecc56802d6aef43", "score": "0.7781605", "text": "def main():\n flag = init()\n run( flag )", "title": "" }, { "docid": "04688cd98d9fcf835947139440a6440f", "score": "0.7780548", "text": "def main():\n\n pass\n\n return None", "title": "" }, { "docid": "f8826e58b73a5885ad66a1bd972804dc", "score": "0.7761393", "text": "def main():\n app = Application(argv)\n app.run()", "title": "" }, { "docid": "93e2db98c51fbce39b84a61c48c918ca", "score": "0.7745705", "text": "def main(args):\n return 0", "title": "" }, { "docid": "f64ecdad4bada7fe6ab976b65fd314c4", "score": "0.7737328", "text": "def main():\n cli()", "title": "" }, { "docid": "535c6e7d32028af3cc3d681a0804815d", "score": "0.7719438", "text": "def main(args=None):\n return 0", "title": "" }, { "docid": "b8d45552f96ceaa95dc134f6f5d74cd7", "score": "0.76849943", "text": "def main():\n\n print(\"Have a nice day!\")", "title": "" }, { "docid": "a8577939272433d16335be4b6d98c8f1", "score": "0.7656128", "text": "def main(argv):\n\treturn run()", "title": "" }, { "docid": "c3cab5de9a654ae1f5d2da3c963dbd93", "score": "0.76134276", "text": "def main():\n log.debug(\"Calling CLI instance...\")\n input()\n log.debug(\"Application exit.\")", "title": "" }, { "docid": "a01e5ea8aee35b5cf5d09a93b93ad122", "score": "0.758695", "text": "def main():\n\n execute_cli()", "title": "" } ]