query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Get float value of money.
def GetFloat(self): return self.Amount / float(Money.HiCost)
[ "def cash_to_float(amount):\n\n suffixes = {\n 'k': 3,\n 'm': 6,\n 'b': 9,\n 't': 12,\n 'p': 15,\n }\n\n if amount:\n try:\n quotient = float(amount)\n #return \"{:.2f}\".format(quotient) # already a float!\n return round(quotient, 2)\n except ValueError as e:\n suffix = amount[-1].lower()\n\n if suffixes.has_key(suffix):\n #return \"{:.2f}\".format(float(amount[0:-1]) * (10**suffixes[suffix]))\n return round(float(amount[0:-1]) * (10**suffixes[suffix]), 2)\n\n return None", "def getfloat(self, key) -> float:\n\t\tvalue = self._data[key]\n\t\treturn float(value)", "def getAmount():\r\n amount = input()\r\n if amount.isdecimal() is True:\r\n return float(amount)\r\n else:\r\n getAmount()", "def getFloat(self, *args):\r\n return _osgDB.Field_getFloat(self, *args)", "def getfloat(self, section, setting):\n value = self.get(section, setting)\n if value is None:\n raise ValueError(\"%s.%s : is not a number\" % (section, setting))\n return float(self.get(section, setting))", "def get_transaction_value():\r\n # Get the user input, transform it from a string to a float and store it in user_input\r\n return float(input('Your transaction amount : '))", "def _float(value, user: User = flask_security.current_user):\n try:\n value = float(value)\n except (ValueError, TypeError):\n flask.abort(400) if _is_admin(user) else flask.abort(403)\n return value", "def get_coin_value(self, coin):\r\n url = self.url_base + 'coin=' + str(coin)\r\n \r\n if self.debug == 1:\r\n print url\r\n\r\n try:\r\n result = requests.get(url, timeout=self.timeout)\r\n except requests.exceptions.RequestException as exception:\r\n print exception\r\n return \"ERROR\"\r\n\r\n return float(result.text)", "def receive_float(self):\n\t\treturn struct.unpack('>f', self.receive_and_decrypt())[0]", "def float( self, unit ) :\n\n return( self[0].float( unit ) )", "def asFloat(cls, value, default=None):\n try:\n return float(value)\n except (ValueError, TypeError, KeyError):\n pass\n return default", "def SoFloatElement_get(stackIndex: 'int const', state: 'SoState') -> \"float\":\n return _coin.SoFloatElement_get(stackIndex, state)", "def get(stackIndex: 'int const', state: 'SoState') -> \"float\":\n return _coin.SoFloatElement_get(stackIndex, state)", "def get_buy_amount(self):\r\n return self.balance / 3", "def SoDecimationPercentageElement_get(state: 'SoState') -> \"float\":\n return _coin.SoDecimationPercentageElement_get(state)", "def get_buy_price(self) -> float:\n return self.buy_price", "def cash(self):\n return self.cents / 100", "def getUninvested(self) -> float:\n record = self.conn.execute(\"\"\"SELECT amount FROM uninvested\"\"\").fetchone()\n if record:\n return float(record[0])\n else:\n return 0", "def pct_to_float(number):\n return float(number.strip(\"%\").replace(\".\", \"\").replace(\",\", \".\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Distribute money value between len(ks) money objects according with given coefficients.
def Distribute(self, ks): # Count of coefficients. n = len(ks) if n == 0: # No distribution. raise ValueError("No factors for distribute money.") if n == 1: # Only one factor. return self # First normalize list. nks = ut.npa_norm(ks) # Create array for new moneys. ms = [0] * n # Cycle of initialization array of amounts for new moneys. rest = self.Amount for i in range(n - 1): am = int(round(self.amount * nks[i])) rest -= am ms[i] = Money.FromAmount(am) # The last element calculate from rest. ms[n - 1] = Money.FromAmount(rest) # Create money objects. return ms
[ "def compute_coefficients_ref(ks):\n coeffs = [1]\n for k in ks:\n coeffs = zipWith(lambda x,y:x+y,coeffs+[0],[0]+[-k*c for c in coeffs])\n return coeffs", "def basis_dk(k, i, p, u, u_list):\n if k > p:\n raise ValueError('k should not exceed p')\n elif k == 0:\n return basis(i, p, u, u_list)\n a = div(p, u_list[i + p] - u_list[i]) * basis_dk(k - 1, i, p - 1, u, u_list)\n b = div(p, u_list[i + p + 1] - u_list[i + 1]) * basis_dk(k - 1, i + 1, p - 1, u, u_list)\n return a - b", "def price(self, comm, ucomm, price):\r\n try:\r\n units = self[comm]\r\n except KeyError:\r\n return\r\n wdiff = Wallet()\r\n wdiff[comm] = -units\r\n wdiff[ucomm] = units * price\r\n self += wdiff", "def poissonDistribution(a, k):\n from math_ import factorial, exp\n\n if a < 0 or k < 0:\n return 0\n\n return float(pow(a, k)) * exp(-a) / factorial(k)", "def binomial_cdf(q, N, k):\n # Useful as sanity check that gmpy2 is providing sufficient precision.\n # g.bincoef is essential for ensuring precision.\n tmp_list = [mpfr(\"0\")]\n for i in range(0,k+1):\n tt1 = g.mul(g_pow(q,i),g.mul(g.bincoef(N,i),g_pow(1-q,N-i)))\n tmp_list.append( tt1 ) \n tmp1 = g.fsum(tmp_list)\n return tmp1", "def cdf(self, k):\n if not isinstance(k, int):\n k = int(k)\n if k < 0:\n return 0\n return (sum([self.pmf(n) for n in range(k + 1)]))", "def test_kl_div(self):\r\n import numpy as np\r\n import cvxpy as cp\r\n\r\n kK=50\r\n kSeed=10\r\n\r\n prng=np.random.RandomState(kSeed)\r\n #Generate a random reference distribution\r\n npSPriors=prng.uniform(0.0,1.0,kK)\r\n npSPriors=npSPriors/np.sum(npSPriors)\r\n\r\n #Reference distribution\r\n p_refProb=cp.Parameter(kK,1,sign='positive')\r\n #Distribution to be estimated\r\n v_prob=cp.Variable(kK,1)\r\n objkl=0.0\r\n for k in xrange(kK):\r\n objkl += cp.kl_div(v_prob[k,0],p_refProb[k,0])\r\n\r\n constrs=[__builtins__['sum']([v_prob[k,0] for k in xrange(kK)])==1]\r\n klprob=cp.Problem(cp.Minimize(objkl),constrs)\r\n p_refProb.value=npSPriors\r\n result = klprob.solve(solver=CVXOPT, verbose=True)\r\n self.assertItemsAlmostEqual(v_prob.value, npSPriors)\r\n result = klprob.solve(solver=SCS, verbose=True)\r\n self.assertItemsAlmostEqual(v_prob.value, npSPriors, places=3)", "def make_change(bill_amount_in_dollars, dollars_cost, cents_cost):\n total_change_cents = dollars_cost * 100 + cents_cost\n remaining_change_in_cents = (bill_amount_in_dollars * 100 -\n total_change_cents)\n coin_types = [100, 25, 10, 5, 1]\n coins_to_return = []\n for coin in coin_types:\n num_coins, remaining_change_in_cents = divmod(\n remaining_change_in_cents, coin)\n coins_to_return.append(num_coins)\n return coins_to_return", "def victor_miller_basis(k, prec=10, cusp_only=False, var='q'):\n k = Integer(k)\n if k%2 == 1 or k==2:\n return Sequence([])\n elif k < 0:\n raise ValueError(\"k must be non-negative\")\n elif k == 0:\n return Sequence([PowerSeriesRing(ZZ,var)(1).add_bigoh(prec)], cr=True)\n e = k.mod(12)\n if e == 2: e += 12\n n = (k-e) // 12\n\n if n == 0 and cusp_only:\n return Sequence([])\n\n # If prec is less than or equal to the dimension of the space of\n # cusp forms, which is just n, then we know the answer, and we\n # simply return it.\n if prec <= n:\n q = PowerSeriesRing(ZZ,var).gen(0)\n err = bigO(q**prec)\n ls = [0] * (n+1)\n if not cusp_only:\n ls[0] = 1 + err\n for i in range(1,prec):\n ls[i] = q**i + err\n for i in range(prec,n+1):\n ls[i] = err\n return Sequence(ls, cr=True)\n\n F6 = eisenstein_series_poly(6,prec)\n\n if e == 0:\n A = Fmpz_poly(1)\n elif e == 4:\n A = eisenstein_series_poly(4,prec)\n elif e == 6:\n A = F6\n elif e == 8:\n A = eisenstein_series_poly(8,prec)\n elif e == 10:\n A = eisenstein_series_poly(10,prec)\n else: # e == 14\n A = eisenstein_series_poly(14,prec)\n\n if A[0] == -1 :\n A = -A\n\n if n == 0:\n return Sequence([PowerSeriesRing(ZZ,var)(A.list()).add_bigoh(prec)],cr=True)\n\n F6_squared = F6**2\n F6_squared._unsafe_mutate_truncate(prec)\n D = _delta_poly(prec)\n Fprod = F6_squared\n Dprod = D\n\n if cusp_only:\n ls = [Fmpz_poly(0)] + [A] * n\n else:\n ls = [A] * (n+1)\n\n for i in range(1,n+1):\n ls[n-i] *= Fprod\n ls[i] *= Dprod\n ls[n-i]._unsafe_mutate_truncate(prec)\n ls[i]._unsafe_mutate_truncate(prec)\n\n Fprod *= F6_squared\n Dprod *= D\n Fprod._unsafe_mutate_truncate(prec)\n Dprod._unsafe_mutate_truncate(prec)\n\n\n P = PowerSeriesRing(ZZ,var)\n if cusp_only :\n for i in range(1,n+1) :\n for j in range(1, i) :\n ls[j] = ls[j] - ls[j][i]*ls[i]\n\n return Sequence([P(l.list()).add_bigoh(prec) for l in ls[1:]],cr=True)\n else :\n for i in range(1,n+1) :\n for j in range(i) :\n ls[j] = ls[j] - ls[j][i]*ls[i]\n\n return Sequence([P(l.list()).add_bigoh(prec) for l in ls], cr=True)", "def test_kl_div(self):\r\n import numpy as np\r\n import cvxpy as cp\r\n\r\n kK=50\r\n kSeed=10\r\n\r\n prng=np.random.RandomState(kSeed)\r\n #Generate a random reference distribution\r\n npSPriors=prng.uniform(0.0,1.0,kK)\r\n npSPriors=npSPriors/sum(npSPriors)\r\n\r\n #Reference distribution\r\n p_refProb=cp.Parameter(kK,1,sign='positive')\r\n #Distribution to be estimated\r\n v_prob=cp.Variable(kK,1)\r\n objkl=0.0\r\n for k in xrange(kK):\r\n objkl += cp.kl_div(v_prob[k,0],p_refProb[k,0])\r\n\r\n constrs=[sum([v_prob[k,0] for k in xrange(kK)])==1]\r\n klprob=cp.Problem(cp.Minimize(objkl),constrs)\r\n p_refProb.value=npSPriors\r\n result = klprob.solve(solver=SCS, verbose=True)\r\n self.assertItemsAlmostEqual(v_prob.value, npSPriors)", "def distribute_gain(self, db, player_key, position, room_key):\n distribution = db.child('game').child(\n room_key).child('distribution').get()\n contribution = db.child('game').child(\n room_key).child('contribution').get()\n try:\n whole = contribution.val()\n part = distribution.val()[position]\n earning = int((part * whole)+whole)\n if earning > 0:\n db.child('game').child(room_key).child('players').child(\n player_key).update({'gain': distribution.val()[position]})\n db.child('game').child(room_key).child('players').child(player_key).update(\n {'earning': earning})\n else:\n db.child('game').child(room_key).child('players').child(\n player_key).update({'gain': distribution.val()[position]})\n db.child('game').child(room_key).child('players').child(player_key).update(\n {'earning': 0})\n\n except KeyError:\n db.child('game').child(room_key).child(\n 'players').child(player_key).update({'gain': -1})", "def get_money(self, *args):\n for coin in args:\n self.value_entered += coin.value", "def drop_money(quantity, **wallet):\n output_sum = 0\n output_wallet = {}\n additional_wallet = wallet\n additional_wallet_1 = {}\n additional_wallet_2 = {}\n max_key = \"\"\n for key in wallet:\n output_wallet[key] = 0\n while quantity != 0:\n value = 0\n min_key = \"100 Rub\"\n\n for key in additional_wallet:\n if additional_wallet[key] != 0:\n additional_wallet_1[key] = additional_wallet[key]\n else:\n pass\n\n for key in additional_wallet_1:\n if int(str(key)[:-4]) > quantity:\n break\n else:\n if additional_wallet_1[key] == value:\n additional_wallet_2[key] = additional_wallet_1[key]\n value = additional_wallet_1[key]\n max_key = key\n elif additional_wallet_1[key] > value:\n additional_wallet_2.clear()\n additional_wallet_2[key] = additional_wallet_1[key]\n value = additional_wallet_1[key]\n max_key = key\n else:\n pass\n\n for key in additional_wallet_1:\n if int(str(key)[:-4]) > quantity:\n break\n else:\n if additional_wallet_1[key] == value:\n additional_wallet_2[key] = additional_wallet_1[key]\n value = additional_wallet_1[key]\n elif additional_wallet_1[key] < value:\n additional_wallet_2.clear()\n additional_wallet_2[key] = additional_wallet_1[key]\n value = additional_wallet_1[key]\n else:\n pass\n\n for key in additional_wallet_1:\n if (int(str(min_key)[:-4])) < (int(str(key)[:-4])):\n pass\n else:\n min_key = key\n\n if quantity % (int(str(min_key)[:-4])) != 0:\n print(\"\\nThe requested amount must be a multiple of:\", min_key)\n return \"Not Ok\"\n else:\n pass\n\n if value <= 2:\n for key in additional_wallet_1:\n if int(str(key)[:-4]) > quantity:\n break\n else:\n max_key = key\n output_wallet[max_key] = output_wallet[max_key] + 1\n additional_wallet[max_key] = additional_wallet[max_key] - 1\n output_sum = output_sum + int(str(max_key)[:-4])\n quantity = quantity - int(str(max_key)[:-4])\n additional_wallet_1.clear()\n else:\n output_wallet[max_key] = output_wallet[max_key] + 1\n additional_wallet[max_key] = additional_wallet[max_key] - 1\n output_sum = output_sum + int(str(max_key)[:-4])\n quantity = quantity - int(str(max_key)[:-4])\n additional_wallet_1.clear()\n return output_wallet", "def put_price(s, c, x, r, t):\n \n return (c - s + x*math.exp(-r*t))", "def coef_binomial(n: int, k: int) -> float:\n\n return factorial(n)/(factorial(k)*factorial(n-k))", "def distribute_amounts(available: int, categories: dict, distributed_by_categories: dict):\n\n data = []\n total_price = sum(categories.values())\n\n for category, price in categories.items():\n distributed_amount = round(price / total_price * available)\n\n # Check if sum of already distributed amount and current distributed amount does not exceeds the price\n if distributed_by_categories[category] + distributed_amount >= price:\n distributed_amount = price - distributed_by_categories[category]\n\n distributed_by_categories[category] += distributed_amount\n total_price -= price\n available -= distributed_amount\n\n data.append({\n 'category': category,\n 'net_amount': distributed_amount\n })\n\n return data, distributed_by_categories", "def profit(stock, cost):\r\n\r\n return float(stock - cost)", "def prox_csimplex(z, k):\n\t# safe guard for k\n\tassert 0<=k<=z.size, 'k: k must be between 0 and dimension of the input.'\n\n\tdef f(l):\n\t\tans = 0\n\t\tn = len(z)\n\t\tfor zi in z:\n\t\t\tif zi < l:\n\t\t\t\tans += 1/2*zi**2 - l*k/n\n\t\t\telif zi > 1 + l:\n\t\t\t\tans += 1/2*(1-zi)**2 + l*(1-k/n)\n\t\t\telse:\n\t\t\t\tans += 1/2*l**2 + l*(zi - l - k/n)\n\t\treturn ans\n\n\tdef df(l):\n\t\tans = 0\n\t\tn = len(z)\n\t\tfor zi in z:\n\t\t\tif zi < l:\n\t\t\t\tans += -k/n\n\t\t\telif zi > 1 + l:\n\t\t\t\tans += 1 - k/n\n\t\t\telse:\n\t\t\t\tans += -l + zi - k/n\n\t\treturn ans\n\n\tl0, r = bisect(df, -100500, + 100500, full_output=True)\n\tif not r.converged:\n\t\tprint(\"does not converge\")\n\treturn (z-l0).clip(0, 1)\n\n\t# TODO do the computation here\n\t# Hint: 1. construct the scalar dual object and use `bisect` to solve it.\n\t#\t\t2. obtain primal variable from optimal dual solution and return it.\n\t#", "def poisson(l, k):\n k = int(k)\n l = float(l)\n return math.pow(l, k) * math.exp( -l ) / math.factorial(k)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sub another money value.
def __sub__(self, y): return Money.FromAmount(self.Amount - y.Amount)
[ "def subtractUninvested(self, amount_sub : float) -> float:\n record = self.conn.execute(\"\"\"SELECT amount FROM uninvested\"\"\").fetchone()\n if record:\n amount = float(record[0]) - float(amount_sub)\n self.conn.execute(\"\"\"UPDATE uninvested SET amount=?\"\"\", (amount,))\n else:\n amount = float(-amount_sub)\n self.conn.commit()\n return amount", "def subtract(self, new_val):\n self.balance -= int(new_val)\n\n return self.balance", "def __sub__(self, other):\n sum = self.value - other.to(self.unit)\n return(Quantity(sum, self.unit))", "def withdraw_money(self, amount):\n self.balance -= amount", "def subtract(self, security, amount):\n self.balance[security] = self.balance.get(security, Decimal(0)) - Decimal(amount)", "def transfer_money(self, from_, to, amount):\n self.sub(from_, amount)\n self.add(to, amount)", "def subtract(self, amount):\n self.setvalue(self.value - amount)", "def bet_money(self, amount):\n self.money -= amount # money 100->80,\n self.bet += amount # bet 0-> 20", "def sub(self, other):\n new = ComplexNumber(self.get_real(), self.get_imaginary())\n new.real -= other.get_real()\n new.imaginary -= other.get_imaginary()\n return new", "def subtract(minuend: int, subtrahend: int) -> int:\n click.echo(f\"{minuend} - {subtrahend} = {minuend - subtrahend}\")", "def subtract(value, arg):\n return int(value) - int(arg)", "def __sub__(self, value):\n self.decrement(value=value)\n return self", "def __sub__(self, other):\n self._typecheck_other(other)\n try:\n return Ad_Var(self._val - other._val, self._ders - other._ders)\n except AttributeError:\n return Ad_Var(self._val - other, self._ders)", "def apply_cash_coupon(self,cash_amount):\r\n return self.price - cash_amount", "def subtract(value, arg):\n try:\n value = int(value)\n arg = int(arg)\n if arg:\n return value - arg\n except:\n pass\n return ''", "def subtract(account_id: str, substraction: Union[int, float]) \\\n -> OperationResult(Type[bool], Optional[dict]):\n account = Account.get(account_id)\n if not account:\n return OperationResult(result=False,\n description={'error': strings.ACCOUNT_DOES_NOT_EXIST_ERROR})\n\n if not account.status or account.current_balance - account.holds - substraction < 0:\n return OperationResult(result=False,\n description={'error': strings.OPERATION_NOT_POSSIBLE_ERROR})\n\n account.holds += round(substraction, 2)\n db.session.commit()\n return OperationResult(result=True)", "def withdraw(self, account, amount):\n # Take the amount of money our of the account\n self.accounts[account].balance -= amount\n # Return the amount of money we withdrew\n return amount", "def __sub__(self, other):\n other = coerceBigInt(other)\n if not other:\n return NotImplemented\n\n result = BigInt()\n librelic.bn_sub(byref(result), byref(self), byref(other))\n return result", "def debit_sub_amount(categories, category, amount):\n if category in categories.keys():\n categories[category]['amount left'] -= amount" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a cursor for reading the mongo database.
def get_read_cursor(usr=READ_USR, password=READ_PASS, db_host=DB_HOST): return MongoClient(db_host, username=usr, password=password, authSource="mag_db")["mag_db"]
[ "def cursor(self, *args, **kwargs):\n return self.connection.cursor(*args, **kwargs)", "def get_cursor(fail=True):\n if getattr(_CON, \"con\", None) == None:\n _connect()\n if getattr(_CON, \"con\", None) == None and fail:\n raise Exception(\"No database connection for %s\" % DATABASE_FILE)\n return contextlib.closing(_CON.con.cursor()) # close cursor after using it", "def get_cursor(db='default'):\n db_config = CONFIGURATION['databases'][db]\n # TODO: Think about caching this connection and just using context\n # managers differently.\n possible_connection_params = [\n 'database', 'user', 'password', 'host', 'port'\n ]\n kwargs = {k: db_config[k]\n for k in possible_connection_params\n if db_config[k] is not None}\n connection = db_config['db_api_module'].connect(**kwargs)\n cursor = connection.cursor()\n try:\n\n # Context managager body\n yield cursor\n\n connection.commit()\n except:\n connection.rollback()\n raise\n finally:\n cursor.close()\n connection.close()", "def read_collection(self, collection):\n\n\t\ttry:\n\t\t\treturn self.db[collection].find({}, no_cursor_timeout = True)\n\t\texcept Exception as e:\n\t\t\tlogging.error(\"[{}] : {}\".format(sys._getframe().f_code.co_name,e))\n\t\t\texit(1)", "def get_cursor(file_name):\n con = sql.connect(file_name)\n con.row_factory = sql.Row\n return con.cursor()", "def openCursor(self):\n oCursor = self._oConn.cursor();\n return TMDatabaseCursor(self, oCursor);", "def __get_cursor():\n try:\n return MYSQL_CONN.cursor()\n except OperationalError:\n MYSQL_CONN.ping(reconnect=True)\n return MYSQL_CONN.cursor()", "def get_cursor(connection):\n with connection:\n with connection.cursor() as cursor:\n yield cursor", "def connect_mongo():\n #Read login info from local file\n fd = open(\".mongopass\")\n mongoargs = []\n for i in fd.readlines():\n mongoargs.append(i.strip())\n fd.close()\n\n #Establish connection to MongoDB\n db_client = pymongo.MongoClient(mongoargs[0])\n\n #Db and Collection\n mydb = db_client[mongoargs[1]]\n mycol = mydb[mongoargs[2]]\n\n #Returns handle to the collection\n return mycol", "def database():\n client = MongoClient(username=\"user\", password=\"pass\", authSource=\"orion_test\")\n database = client.orion_test\n yield database\n client.close()", "def get_cursor():\n connection = get_connection()\n cursor = connection.cursor()\n\n yield cursor\n\n connection.commit()\n cursor.close()\n connection.close()", "def cursor(self):\n if not self.__is_close:\n return Cursor(self, self.__session, self.__sqlalchemy_mode)\n else:\n raise ProgrammingError(\"Connection closed\")", "def cursor_manager():\n yield from get_cursor()", "def read_mongo(db, collection, query={}, host='localhost', port=27017, username=None, password=None, no_id=True):\n # Connect to MongoDB\n db = _connect_mongo(host=host, port=port, username=username, password=password, db=db)\n # Make a query to the specific DB and Collection\n cursor = db[collection].find(query)\n # Expand the cursor and construct the DataFrame\n df = pd.DataFrame(list(cursor))\n # Delete the _id\n if no_id:\n del df['_id']\n return df", "def mongo():\n url = os.environ['SACRED_MONGO_URL']\n db_name = os.environ['SACRED_DB_NAME']\n client = MongoClient(url)\n db = client[db_name]\n try:\n from IPython import start_ipython\n start_ipython(argv=[], user_ns=dict(db=db))\n except ImportError:\n import code\n shell = code.InteractiveConsole(dict(db=db))\n shell.interact()", "def getSciDataCursor():\n try:\n connection = sqlite3.connect('sci_data.db', isolation_level=None, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)\n # importing the 'datetime' module declares some new SQLite field types: 'date' and 'timestamp'\n # 'PARSE_DECLTYPES' acivates them\n connection.execute('pragma foreign_keys=ON') # enforce foreign keys\n # check that foreign keys constraint was correctly set\n rslt = connection.execute('pragma foreign_keys')\n # if foreign_keys is supported, should have one item that is either (1,) or (0,)\n rl = [r for r in rslt] # comprehend it as a list\n if len(rl) == 0:\n print 'Foreign keys not supported in this version (' + sqlite3.sqlite_version + ') of sqlite. Not used in \"sci_data.db\".'\n if rl[0] != (1,):\n print 'Foreign keys supported, but not set in this connection to \"sci_data.db\"'\n connection.execute('pragma auto_vacuum=ON')\n connection.text_factory = str\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n return cursor\n \n except sqlite3.Error, e:\n print 'Error in \"sci_data.db\": %s' % e.args[0]\n sys.exit(1)\n return None", "def get_mongodb():\n from motor.motor_asyncio import AsyncIOMotorClient\n from asyncio import get_event_loop\n\n if not hasattr(get_mongodb, \"database\"):\n mongoparams = get_secret(\"mongodb\")\n loop = get_event_loop()\n client = AsyncIOMotorClient(mongoparams[\"url\"], io_loop=loop)\n get_mongodb.database = client[mongoparams[\"db\"]]\n return get_mongodb.database", "def read_connection(self):\n if not self.is_connection_open(self.read_db):\n try:\n self.read_db = self._connect(self.config['read_username'], self.config['read_password'],\n self.config['read_host'], self.config['read_port'], self.config['db_name'])\n # Dirty reads seem to decrease write locks in uat, but increase them in prod\n if self.DIRTY_READS: # Enable dirty reads on current connection\n with self.read_db.cursor() as cursor:\n cursor.execute('SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED')\n except Exception as e:\n logging.exception(\"DBClient.read_connection unhandled exception {}\".format(e))\n raise\n\n return self.read_db", "def get_cursor(self):\n return self.logger.cursor" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
looking for RIR server for specified ip address
def get_rir_server_url(self, ip_address): data = self.request(ip_address, "whois.iana.org") for line in [x.strip() for x in data.splitlines()]: match = re.match("refer:\s*([^\s]+)", line) if match is None: continue return match.group(1), 43 raise WtWhoisException("No root WHOIS server found for domain.")
[ "def find_server(self,key):\n self.check_init()\n for server_index in range(key,len(self.server_table)):\n if(self.server_table[server_index].ip != '-1'):\n return self.server_table[server_index]\n for server_index in range(0,key):\n if(self.server_table[server_index].ip != '-1'):\n return self.server_table[server_index]", "def findIlo(ilo_net):\n hp_servers = []\n nm = nmap.PortScanner()\n #scan net for ilo virtual media port is the key assumes that we don't override it in ilo config:q\n nm.scan(ilo_net,'17988','-PN') \n for h in nm.all_hosts():\n if nm[str(h)]['tcp'][17988]['state'] == 'open':\n # list of IP that have something looking like ILO :)\n #print 'SERVER %s -----------------' % str(h)\n #get damn server name aka sn\n try:\n conn = httplib.HTTPSConnection(str(h), timeout=5)\n except:\n print \"Can't connect to %s skip\" % str(h)\n continue\n try:\n conn.request(\"GET\", \"/xmldata?item=all\")\n response = conn.getresponse()\n except:\n print \"can't get response from %s\" % str(h)\n conn.close()\n continue\n \n data = response.read()\n a = re.findall('<SBSN>(.*)</SBSN>', data)\n conn.close\n if a:\n server_sn = a.pop().rstrip()\n print \"Found server %s with ILO module\" % server_sn\n hp_serv = HpServer(server_sn,'osmp.ru',str(h))\n hp_servers.append(hp_serv)\n\n #if list_all == 1:\n # print \"IP: %s Serial: %s Model: %s ILO FW: %s ILO Model: %s\" % (str(h), server_sn, model, fw_ver, ilo_model)\n return hp_servers", "def _recv_ip(self):\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock: # UDP\n # Allow the address to be re-used for when running multiple\n # components on the same machine\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.settimeout(0.6) # 0.6 second timeout\n sock.bind(('', server.BROADCAST_PORT)) # listen for broadcasts\n print(\"Listening for server IP...\")\n while True:\n try:\n data, (addr, _) = sock.recvfrom(16)\n if data == b'omnibus':\n print(f\"Found {addr}\")\n return addr\n except socket.timeout:\n pass\n print(\"Could not detect server IP. Please ensure it is running.\")\n if ip := input(\"Press enter to retry or manually enter the server IP: \").strip():\n return ip\n print(\"Retrying...\")", "def _findNameIP(self, name):\n _ipMatchRegex = re.compile( r'\\d+\\.\\d+\\.\\d+\\.\\d+' )\n\n # First, check for an IP address\n ipmatch = _ipMatchRegex.findall( name )\n if ipmatch:\n return ipmatch[ 0 ]\n # Otherwise, look up remote server\n output = self.masternode.cmd('getent ahostsv4 {}'.format(name))\n\n ips = _ipMatchRegex.findall( output )\n\n ip = ips[ 0 ] if ips else None\n return ip", "def find_with_ip():\n state_filter = \" nud \" + \" nud \".join(HOME_STATES.values()).lower()\n cmd = f\"ip neigh show {state_filter}\".split()\n neighbours = subprocess.run(cmd, shell=False, capture_output=True, text=True)\n neighbours_ip = [_.split()[0] for _ in neighbours.stdout.splitlines()]\n return neighbours_ip", "def find_best_server(self):\n pass", "def main(dest_name):\n\t\n\tsource_addr = gethostIP()\n\tdest_addr = socket.gethostbyname(dest_name)\n\tport = 33434\n\tttl = 16\n\tmax_hops = 0\n\tmin_hops = 0\n\ttarget_hops = 0\n\tRTT = 0\n\tfound = False\n\tprint \"Source: %s\" % (source_addr)\n\tprint \"Destination: %s\" % (dest_addr)\n\n\twhile True:\n\t\tif not found: #look for it\n\t\t\tif ttl == 256:\n\t\t\t\tttl -= 1\n\t\t\telif ttl > 255:\n\t\t\t\tprint \"Maximum TTL reached. IP not found. Exiting.\"\n\t\t\t\tquit()\n\t\t\tprint \"Searching with ttl of %i.\" % (ttl)\n\n\t\t\tcurr_addr, _, __ = connect(ttl, port, dest_name)\n\n\t\t\t#If target found, begin binary search\n\t\t\tif curr_addr == dest_addr:\n\t\t\t\tmax_hops = ttl\n\t\t\t\tmin_hops = ttl/2\n\t\t\t\tprint \"Initial server found with ttl = %i\" % (ttl)\n\t\t\t\tprint \"Beginning Binary search of ttls from %i to %i\\n\" % (min_hops, max_hops)\n\t\t\t\tfound = True\n\t\t\telse:\n\t\t\t\tttl *= 2\n\t\t\t\tprint \"Server not found.\"\n\t\telse: #Now start binary searching\n\t\t\tcurr_addr, RTT, curr_name = connect((max_hops+min_hops)/2, port, dest_name)\n\n\t\t\t# print data of individual probe in format of TTL|Name|IP|RTT\n\t\t\tif curr_addr is not None:\n\t\t\t\tcurr_host = \"%s (%s) %fms\" % (curr_name, curr_addr, RTT)\n\t\t\telse:\n\t\t\t\tcurr_host = \"*\"\n\t\t\tprint \"%d\\t%s\" % ((min_hops+max_hops)/2, curr_host)\n\n\t\t\tif curr_addr == dest_addr: #You found it in the range. Check lower\n\t\t\t\tmax_hops = (min_hops+max_hops)/2\n\t\t\t\tprint \"Found server-Checking ttl from %i to %i.\" % (min_hops, max_hops)\n\t\t\telse: #Not in range. Check higher.\n\t\t\t\tmin_hops = (min_hops+max_hops)/2\n\t\t\t\tprint \"Server not found-Checking ttl from %i to %i.\" % (min_hops, max_hops)\n\n\t\t\t# break if search over\n\t\t\tif min_hops+1 == max_hops: #Binary search over. Now return \n\t\t\t\tprint_results(RTT, max_hops, source_addr, dest_addr)\n\t\t\t\tbreak", "def _cmd_server_retrieve(self, name):\n ret = [\"\", 0]\n con = mysql.connect(self.risc.db_host, self.risc.db_user, self.risc.db_passwd, self.risc.db_name)\n cur = con.cursor()\n\n cur.execute(\"\"\"SELECT ip, port FROM ioq3_servers WHERE name = '%s'\"\"\" %(mysql.escape_string(name)))\n\n if cur.rowcount == 1:\n res = cur.fetchall()\n con.close()\n ret = [res[0][0], int(res[0][1])]\n else:\n cur.execute(\"\"\"SELECT ip, port FROM ioq3_servers WHERE name LIKE '%s'\"\"\" %('%'+mysql.escape_string(name)+'%'))\n if cur.rowcount == 1:\n res = cur.fetchall()\n ret = [res[0][0], int(res[0][1])]\n con.close()\n\n return ret", "def find_server_by(servers, id_=None, ip=None):\n for server in servers:\n if server['id'] == id_ or server['ip'] == ip:\n return server", "def find_next_server(self,server):\n index = self.server_table.index(server)\n for server_index in range(index,len(self.server_table)):\n if(self.server_table[server_index].ip != '-1'):\n return self.server_table[server_index]\n for server_index in range(0,index):\n if(self.server_table[server_index].ip != '-1'):\n return self.server_table[server_index]", "def wait_for_ip():\n nodes = list_nodes_full()\n if \"primaryIpAddress\" in nodes[hostname]:\n return nodes[hostname][\"primaryIpAddress\"]\n time.sleep(1)\n return False", "def scanner(self, ip_addr, port):\r\n tcp = IP(dst=ip_addr) / TCP(dport=port)\r\n res = sr1(tcp, timeout=5, verbose=False)\r\n return res", "def get_ip(server, net_type, ip_version):\r\n if net_type in server.addresses:\r\n for ip in server.addresses[net_type]:\r\n if ip['version'] == ip_version:\r\n return ip['addr']", "def __query_from_dns(ip_address):\n try:\n return socket.gethostbyaddr(ip_address)[0]\n except socket.gaierror:\n return ip_address\n except socket.herror:\n print(\"Unknown Host: %s\" % ip_address)\n return ip_address", "def _server():\n url = 'https://104.131.128.139/tcp'\n headers = {'X-Auth-Key': 'abc', 'X-Auth-Secret': 'abc'}\n\n try:\n return requests.get(url, headers=headers, verify=False).json()\n except requests.exceptions.ConnectionError:\n logging.error('server is unreachable')\n sys.exit(1)", "def check_reverse_lookup():\n try:\n host_name = socket.gethostname().lower()\n host_ip = socket.gethostbyname(host_name)\n host_fqdn = socket.getfqdn().lower()\n fqdn_ip = socket.gethostbyname(host_fqdn)\n return host_ip == fqdn_ip\n except socket.error:\n pass\n return False", "def _get_node_from_ip(self, ip):\r\n all_nodes = self.gce.list_nodes(ex_zone='all')\r\n for node in all_nodes:\r\n if ip in node.public_ips:\r\n return node\r\n return None", "def page1(self):\n result = request101.GET('/whatIsMyIPAddress')\n return result", "def promptUsrpIpAddr():\n\twhile(ip := input(\"Enter the IP Address of the node (Ex. 192.168.40.110): \")):\n\t\tfor node in list(uhd.find_devices()):\n\t\t\tif(ip == node['addr']):\n\t\t\t\tprint(\"UHD Lib has found a node matching the IP you entered!\")\n\t\t\t\treturn ip\n\t\tkeepIP = getSelectorBoolean(\"IP: \" + ip + \"not found by the UHD lib. Would you like still register this IP? Y/N: \")\n\t\tif(keepIP):\n\t\t\treturn ip" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
translate to list of section and write to self._los Each section is a list of lines like [val1, val2,..., val_k] val1, val2, ... val_k1 are strings which represent chain of names val_k is value
def translate_to_los(self): lines = self.break_to_lines(self._raw_whois) self._los = [] # list of sections section = [] new_section = False for l in lines: if len(l) == 0 or (len(l) > 0 and l[0] == self._comment_char): if len(section) > 0: self._los.append(section) section = [] new_section = True else: if new_section: new_section = False section.append([j.strip() for j in l.split(':')]) if len(section) > 0: self._los.append(section) return self._los
[ "def __init__(self, listSection):\r\n self.listHeader = listSection[0].split('[')[0]\r\n self.afterList = listSection[-1].split(']')[1]\r\n listString = self.buildListString(listSection)\r\n self.values = [value.strip() for value in listString.split(',')]", "def read_line_list(label):\n \n\n if label=='atom':\n filename=resource_filename('IGM','lines/atom_full.dat')\n elif label == 'LLS':\n filename=resource_filename('IGM','lines/lls.lst')\n elif label == 'LLS Small':\n filename=resource_filename('IGM','lines/lls_sub.lst')\n elif label == 'DLA':\n filename=resource_filename('IGM','lines/dla.lst')\n elif label == 'LBG':\n filename=resource_filename('IGM','lines/lbg.lst')\n elif label == 'Gal':\n filename=resource_filename('IGM','lines/gal_vac.lst')\n elif label == 'Eiger_Strong':\n filename=resource_filename('IGM','lines/Eiger_Strong.lst')\n elif label == 'Gal_Em':\n filename=resource_filename('IGM','lines/Galaxy_emission_Lines.lst')\n elif label == 'Gal_Abs':\n filename=resource_filename('IGM','lines/Galaxy_absorption_Lines.lst')\n elif label == 'Gal_long':\n filename=resource_filename('IGM','lines/Galaxy_Long_E_n_A.lst')\n elif label == 'AGN':\n filename=resource_filename('IGM','lines/AGN.lst')\n elif label == 'HI_recomb':\n filename=resource_filename('IGM','lines/HI_recombination.lst')\n elif label == 'HI_recomb_light':\n filename=resource_filename('IGM','lines/HI_recombination_light.lst')\n \n\n else:\n print('Give Correct LineList')\n\n data = []\n\n if label=='atom':\n\n s=ascii.read(filename)\n\n for line in range(0,len(s['col1'])):\n source = {}\n source['wrest'] = float(s['col2'][line])\n source['ion'] = s['col1'][line]+' '+np.str(np.int(s['col2'][line]))\n source['fval']=float(s['col3'][line])\n source['gamma']=float(s['col4'][line])\n\n data.append(source)\n\n elif ((label =='LBG') | (label =='Gal')):\n\n s=ascii.read(filename)\n\n for line in range(0,len(s['wrest'])):\n source = {}\n source['wrest'] = float(s['wrest'][line])\n source['ion'] = s['name'][line]+' '+s['transition'][line]\n source['fval']=float(s['ID'][line])\n source['gamma']=float(s['ID'][line])\n\n data.append(source)\n\n elif (label =='Eiger_Strong') |(label =='Gal_Em') | (label =='Gal_Abs') |(label =='Gal_long') | (label =='AGN'):\n\n s=ascii.read(filename)\n\n for line in range(0,len(s['wrest'])):\n source = {}\n source['wrest'] = float(s['wrest'][line])\n source['ion'] = s['name'][line]#+' '+s['transition'][line]\n source['fval']=float(0)#s['ID'][line])\n source['gamma']=float(0)#s['ID'][line])\n\n data.append(source)\n\n elif (label =='HI_recomb') |((label =='HI_recomb_light')):\n s=ascii.read(filename)\n\n for line in range(0,len(s['wrest'])):\n source = {}\n source['wrest'] = float(s['wrest'][line]*10**4)\n source['ion'] = s['name'][line]#+' '+s['transition'][line]\n source['fval']=float(0)#s['ID'][line])\n source['gamma']=float(0)#s['ID'][line])\n\n data.append(source)\n\n else: \n f=open(filename,'r')\n header1 = f.readline()\n for line in f:\n line = line.strip()\n columns = line.split()\n source = {}\n source['wrest'] = float(columns[0])\n source['ion'] = columns[1]+' '+columns[2]\n source['fval']=float(columns[3])\n data.append(source)\n\n\n return data", "def cat_splits_lin(vals,cat,mask=None):\n\n mask=catalog.CatalogMethods.check_mask(cat.coadd,mask)\n\n txt.write_methods.heading('Linear Splits',cat,label='linear_splits',create=True)\n\n for x in vals:\n\n txt.write_methods.heading(x,cat,label='linear_splits',create=False)\n\n arr1,arr1err,e1,e2,e1err,e2err,m1,m2,b1,b2,m1err,m2err,b1err,b2err=split_methods.split_gals_lin_along(cat,x,mask=mask,log=config.log_val.get(x,None),plot=True)\n \n txt.write_methods.write_append(x+' '+str(arr1)+' '+str(arr1err),cat,label='linear_splits',create=False)\n txt.write_methods.write_append('e '+str(e1)+' '+str(e2),cat,label='linear_splits',create=False)\n txt.write_methods.write_append('e err '+str(e1err)+' '+str(e2err),cat,label='linear_splits',create=False)\n txt.write_methods.write_append('slope '+str(m1)+' '+str(m2),cat,label='linear_splits',create=False)\n txt.write_methods.write_append('slope err '+str(m1err)+' '+str(m2err),cat,label='linear_splits',create=False)\n txt.write_methods.write_append('intercept '+str(b1)+' '+str(b2),cat,label='linear_splits',create=False)\n txt.write_methods.write_append('intercept err '+str(b1err)+' '+str(b2err),cat,label='linear_splits',create=False)\n\n return", "def load(self, values):\n # group values by part name\n grouped = defaultdict(list)\n for name, value in values:\n part, name = split_basename(name, 1)\n grouped[part].append((name, value))\n\n # delegate values loading to each section part\n for name, values in grouped.iteritems():\n self[name].load(values)", "def load(self, values):\n # group values per section\n grouped = defaultdict(list)\n for name, value in values:\n section, name = split_basename(name, 1)\n grouped[section].append((name, value))\n\n # load values on each section\n for section, values in grouped.iteritems():\n self[section].load(values)", "def _convert_out_to_list(out):\n lijst = []\n for line in out.splitlines():\n if re.match('^#', line):\n line = line[1:]\n line = line.replace(' ', '_')\n keys = line.split(\":\")\n else:\n values = line.split(\":\")\n adict = dict(itertools.izip(keys, values))\n lijst.append(adict)\n return lijst", "def toLines(self):\r\n indentLength = len(self.listHeader) + 1\r\n leadingWhitespace = ' '*indentLength\r\n valuesText = \",\\n{0}\".format(leadingWhitespace).join(self.values)\r\n fullListString = \"{0}[{1}]{2}\".format(self.listHeader, valuesText, self.afterList)\r\n return fullListString.split('\\n')", "def as_list(self, key, default=None):\r\n lines = []\r\n try:\r\n vlist = self[key]\r\n except KeyError:\r\n return default\r\n for val in vlist:\r\n lines.extend(\r\n line.strip() for line in val.splitlines()\r\n if line.strip() and not line.strip().startswith('#'))\r\n return lines", "def __write_LC_SEPERATE__(self, target_input, ext_name):\n t_lc = [] #target last column\n end = len(target_input[0]) - 1\n for tc, row in enumerate(target_input):\n if row != [] and row != None:\n t_lc.append([row[end]])\n del target_input[tc][end]\n\n newfile = \"{}/{}{}\".format(self.dirpath,self.filename,ext_name)\n newfile2 = \"{}/{}{}\".format(self.dirpath,self.filename,'.plabel')\n w1 = open(newfile,'wb')\n w2 = open(newfile2, 'wb')\n wr1 = csv.writer(w1, delimiter = self.del_type )\n wr2 = csv.writer(w2, delimiter = self.del_type)\n wr1.writerows(target_input)\n wr2.writerows(t_lc)\n return (target_input, t_lc)", "def parse_sections(self, offset):\n\n self.sections = []\n MAX_SIMULTANEOUS_ERRORS = 3\n for i in range(self.FILE_HEADER.NumberOfSections):\n if i >= MAX_SECTIONS:\n self.__warnings.append(\"Too many sections {0} (>={1})\".format(\n self.FILE_HEADER.NumberOfSections, MAX_SECTIONS))\n break\n simultaneous_errors = 0\n section = SectionStructure( self.__IMAGE_SECTION_HEADER_format__, pe=self )\n if not section:\n break\n section_offset = offset + section.sizeof() * i\n section.set_file_offset(section_offset)\n section_data = self.__data__[section_offset : section_offset + section.sizeof()]\n # Check if the section is all nulls and stop if so.\n if count_zeroes(section_data) == section.sizeof():\n self.__warnings.append(\n 'Invalid section {0}. Contents are null-bytes.'.format(i))\n break\n if not section_data:\n self.__warnings.append(\n 'Invalid section {0}. No data in the file (is this corkami\\'s virtsectblXP?).'.format(i))\n break\n section.__unpack__(section_data)\n self.__structures__.append(section)\n\n if section.SizeOfRawData+section.PointerToRawData > len(self.__data__):\n simultaneous_errors += 1\n self.__warnings.append(\n 'Error parsing section {0}. SizeOfRawData is larger than file.'.format(i))\n\n if self.adjust_FileAlignment( section.PointerToRawData,\n self.OPTIONAL_HEADER.FileAlignment ) > len(self.__data__):\n simultaneous_errors += 1\n self.__warnings.append(\n 'Error parsing section {0}. PointerToRawData points beyond the end of the file.'.format(i))\n\n if section.Misc_VirtualSize > 0x10000000:\n simultaneous_errors += 1\n self.__warnings.append(\n 'Suspicious value found parsing section {0}. VirtualSize is extremely large > 256MiB.'.format(i))\n\n if self.adjust_SectionAlignment( section.VirtualAddress,\n self.OPTIONAL_HEADER.SectionAlignment, self.OPTIONAL_HEADER.FileAlignment ) > 0x10000000:\n simultaneous_errors += 1\n self.__warnings.append(\n 'Suspicious value found parsing section {0}. VirtualAddress is beyond 0x10000000.'.format(i))\n\n if ( self.OPTIONAL_HEADER.FileAlignment != 0 and\n ( section.PointerToRawData % self.OPTIONAL_HEADER.FileAlignment) != 0):\n simultaneous_errors += 1\n self.__warnings.append(\n ('Error parsing section {0}. '\n 'PointerToRawData should normally be '\n 'a multiple of FileAlignment, this might imply the file '\n 'is trying to confuse tools which parse this incorrectly.').format(i))\n\n if simultaneous_errors >= MAX_SIMULTANEOUS_ERRORS:\n self.__warnings.append('Too many warnings parsing section. Aborting.')\n break\n\n\n section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')\n\n # Set the section's flags according the the Characteristics member\n set_flags(section, section.Characteristics, section_flags)\n\n if ( section.__dict__.get('IMAGE_SCN_MEM_WRITE', False) and\n section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False) ):\n\n if section.Name.rstrip(b'\\x00') == b'PAGE' and self.is_driver():\n # Drivers can have a PAGE section with those flags set without\n # implying that it is malicious\n pass\n else:\n self.__warnings.append(\n ('Suspicious flags set for section %d. ' % i) +\n 'Both IMAGE_SCN_MEM_WRITE and IMAGE_SCN_MEM_EXECUTE are set. '\n 'This might indicate a packed executable.')\n\n\n self.sections.append(section)\n\n # Sort the sections by their VirtualAddress and add a field to each of them\n # with the VirtualAddress of the next section. This will allow to check\n # for potentially overlapping sections in badly constructed PEs.\n self.sections.sort(key=lambda a: a.VirtualAddress)\n for idx, section in enumerate(self.sections):\n if idx == len(self.sections)-1:\n section.next_section_virtual_address = None\n else:\n section.next_section_virtual_address = self.sections[idx+1].VirtualAddress\n\n if self.FILE_HEADER.NumberOfSections > 0 and self.sections:\n return offset + self.sections[0].sizeof()*self.FILE_HEADER.NumberOfSections\n else:\n return offset", "def _read_lst_file(config: MutableMapping[str, Any]):\n cur_file = ReadMeta(\n filename=config[\"outputs\"][\"data_filename\"],\n input_start=config[\"inputs\"][\"start\"],\n input_stop1=config[\"inputs\"][\"stop1\"],\n input_stop2=config[\"inputs\"][\"stop2\"],\n input_stop3=config[\"inputs\"][\"stop3\"],\n input_stop4=config[\"inputs\"][\"stop4\"],\n input_stop5=config[\"inputs\"][\"stop5\"],\n binwidth=config[\"advanced\"][\"binwidth\"],\n use_sweeps=config[\"advanced\"][\"sweeps_as_lines\"],\n mirror_phase=config[\"advanced\"][\"phase\"],\n )\n cur_file.run()\n raw_data_obj = ReadData(\n filename=config[\"outputs\"][\"data_filename\"],\n start_of_data_pos=cur_file.start_of_data_pos,\n timepatch=cur_file.timepatch,\n is_binary=cur_file.is_binary,\n debug=config[\"advanced\"][\"debug\"],\n )\n raw_data = raw_data_obj.read_lst()\n if cur_file.is_binary:\n relevant_columns, dict_of_data = binary_parsing(cur_file, raw_data, config)\n else:\n relevant_columns, dict_of_data = ascii_parsing(cur_file, raw_data, config)\n lst_metadata = cur_file.lst_metadata\n fill_frac = (\n config[\"advanced\"][\"fill_frac\"]\n if cur_file.fill_fraction == -1.0\n else cur_file.fill_fraction\n )\n return relevant_columns, dict_of_data, lst_metadata, fill_frac", "def _collect_section_translations(exporter, sections, binding,\n export_what, key, rsrc):\n\n # For each section in the translation, make a record of that\n # in an internal data store which is used to generate .po\n # files.\n for section in sections:\n section_name = section['name']\n section_type = section['type']\n description = (\n binding.find_field(section_name).description or '')\n\n for translation in section['data']:\n message = translation['source_value'] or ''\n if not isinstance(message, basestring):\n message = unicode(message) # convert num\n translated_message = translation['target_value'] or ''\n is_current = translation['verb'] == VERB_CURRENT\n old_message = translation['old_source_value']\n\n # Skip exporting blank items; pointless.\n if not message:\n continue\n\n # If not exporting everything, and the current\n # translation is up-to-date, don't export it.\n if export_what != 'all' and is_current:\n continue\n\n # Set source string and location.\n message_entry = exporter.get_message(key, message)\n message_entry.add_location(key, section_name, section_type)\n\n # Describe the location where the item is found.\n message_entry.add_comment(description)\n\n try:\n resource_handler = resource.Registry.get(\n key.resource_key.type)\n title = resource_handler.get_resource_title(rsrc)\n if title:\n message_entry.add_comment(title)\n except AttributeError:\n # Under ETL, there is no real handler and title lookup\n # fails. In that case, we lose this data, which is non-\n # essential.\n pass\n\n # Add either the current translation (if current)\n # or the old translation as a remark (if we have one)\n if is_current:\n message_entry.add_translation(translated_message)\n else:\n message_entry.add_translation('')\n\n if old_message:\n message_entry.set_previous_id(old_message)\n if translated_message:\n message_entry.add_comment(\n 'Previously translated as: \"%s\"' %\n translated_message)", "def load(self, namelist):\n with open(namelist, \"r\") as nl_fin:\n lines = nl_fin.read()\n # print repr(lines)\n\n sections = [s.strip() for s in re.findall(\"&(.+)\", lines)]\n print sections\n\n for section in sections:\n # don't know why non-greedy characters not work in wrf namelist:\n # fixed: there should be a \"+\" following \"\\s\"\n # cannot tell the empty sections in WRF namelist\n # fixed: use re.S model\n # mat_obj = re.search(\"&%s.*\\s((.*\\s+)*?)/\" % section, lines,)\n mat_obj = re.search(\"&%s(.*?)\\n+\\s*/\" % section, lines, re.S)\n sect_content = mat_obj.group(1)\n\n # sect_content = lines.split(\"&%s\" % section)[1].split(\"\\n/\")[0]\n # if section == \"grib2\":\n # print lines.split(\"&%s\" % section)[1]\n # print repr(mat_obj.group())\n # print repr(sect_content)\n # print sect_content\n sect_dict = self._extract_section(sect_content)\n self.update({section: sect_dict})", "def set_ls(self, offset, data):\n\t\tassert offset + len(data) <= len(self.ls)\n\t\tself.ls[offset:offset+len(data)] = array.array(\"c\", data)", "def format_lrs(self):\n equilibria = []\n from sage.misc.sage_eval import sage_eval\n from itertools import groupby\n for collection in [list(x[1]) for x in groupby(self.raw_string[7:], lambda x: x == '\\n')]:\n if collection[0].startswith('2'):\n s1 = tuple([sage_eval(k) for k in collection[-1].split()][1:-1])\n for s2 in collection[:-1]:\n s2 = tuple([sage_eval(k) for k in s2.split()][1:-1])\n equilibria.append([s1, s2])\n\n return equilibria", "def load(lineseq):\n reslst = []\n resdct = {}\n \n for linenum, line in enumerate(lineseq):\n \n unit = line.strip()\n \n if (not unit) or (unit[0] == '#'):\n reslst.append((None, None, unit))\n continue\n \n match_assign = VariableAssignmentMatch(unit)\n if not match_assign:\n raise NotAssignmentError(\"not an assignment: l%d: %r\" % (linenum + 1, unit))\n varname, right_part = match_assign.groups()\n \n if varname in FORBIDDEN_VARNAMES:\n raise UnsupportedAssignmentError(\"unsupported variable name in assignment: l%d: %r\" % (linenum + 1, unit))\n \n splitted_value = []\n pos_rotl = len(right_part)\n \n state = UNQUOTED\n can_tilde_expand = True\n skip_next = False\n esc_octal_count = 0\n esc_hexa_count = 0\n esc_ctrl_count = 0\n accu = 0\n semicolon_seen = False\n \n for pos, (char, next) in enumerate(map(None, right_part, right_part[1:])):\n \n if skip_next:\n skip_next = False\n continue\n \n if state is UNQUOTED:\n \n if char in ' \\t;':\n if char == \";\":\n semicolon_seen = True\n pos_rotl = pos\n state = FINISHED\n continue\n elif char in \"|&()<>\":\n raise ComplexStatementError(\"complex statements (beginning with an assignment) are not supported - faulty line: l%d: %r\" % (linenum + 1, unit))\n elif char == \"'\":\n state = SINGLE_QUOTED\n continue\n elif char == '\"':\n state = DOUBLE_QUOTED\n continue\n elif char == \"\\\\\":\n if next is None:\n raise UnsupportedAssignmentError(\"continued lines are not supported - faulty line: l%d: %r\" % (linenum + 1, unit))\n else:\n state = ESCAPED\n continue\n elif char == \"$\":\n if next == \"'\":\n state = SINGLE_QUOTED_WITH_ESCAPING\n skip_next = True\n continue\n elif next in DOLLAR_UNQUOTED_UNSUP:\n raise UnsupportedAssignmentError(\"parameter / arithmetic expansion and command substitution are not supported - faulty line: l%d: %r\" % (linenum + 1, unit))\n else:\n pass # handled as a normal char\n elif can_tilde_expand and char == \"~\":\n raise UnsupportedAssignmentError(\"tilde expansion is not supported - faulty line: l%d: %r\" % (linenum + 1, unit))\n elif char == \"`\":\n raise UnsupportedAssignmentError(\"command substitution is not supported - faulty line: l%d: %r\" % (linenum + 1, unit))\n elif char in \"*?[\":\n raise UnsupportedAssignmentError(\"pathname expansion is not supported - faulty line: l%d: %r\" % (linenum + 1, unit))\n \n can_tilde_expand = (char == ':')\n splitted_value.append(char)\n \n elif state is ESCAPED:\n \n splitted_value.append(char)\n \n state = UNQUOTED\n can_tilde_expand = False\n \n elif state is DOUBLE_QUOTED:\n \n if char == '\"':\n state = UNQUOTED\n can_tilde_expand = False\n continue\n elif char == \"\\\\\":\n if next is None:\n raise UnsupportedAssignmentError(\"continued lines are not supported - faulty line: l%d: %r\" % (linenum + 1, unit))\n elif next in '\\\\\"$`':\n splitted_value.append(next)\n skip_next = True\n continue\n else:\n pass # handled as a normal char\n elif char == \"$\":\n if next in DOLLAR_QUOTED_UNSUP:\n raise UnsupportedAssignmentError(\"parameter / arithmetic expansion and command substitution are not supported - faulty line: l%d: %r\" % (linenum + 1, unit))\n else:\n pass # handled as a normal char\n elif char == '`':\n raise UnsupportedAssignmentError(\"command substitution is not supported - faulty line: l%d: %r\" % (linenum + 1, unit))\n \n splitted_value.append(char)\n \n elif state is SINGLE_QUOTED:\n \n if char == \"'\":\n state = UNQUOTED\n can_tilde_expand = False\n continue\n \n splitted_value.append(char)\n \n elif state is SINGLE_QUOTED_WITH_ESCAPING:\n \n if char == \"'\":\n state = UNQUOTED\n can_tilde_expand = False\n continue\n elif char == \"\\\\\":\n if next in ANSI_C_ESCAPED_CODE:\n splitted_value.append(ANSI_C_ESCAPED_CODE[next])\n skip_next = True\n continue\n elif next in '01234567':\n esc_octal_count = 0\n accu = 0\n state = SINGLE_QUOTED_ESC_OCTAL\n continue\n elif next == \"x\":\n esc_hexa_count = 0\n accu = 0\n state = SINGLE_QUOTED_ESC_HEXA\n continue\n elif next == \"c\":\n esc_ctrl_count = 0\n state = SINGLE_QUOTED_ESC_CTRL\n continue\n else:\n pass # handled as a normal char\n \n splitted_value.append(char)\n \n elif state is SINGLE_QUOTED_ESC_OCTAL:\n \n accu = accu * 8 + int(char, 8)\n esc_octal_count += 1\n \n if esc_octal_count == 3 or (next not in '01234567'):\n splitted_value.append(chr(accu & 0xFF))\n accu = 0\n state = SINGLE_QUOTED_WITH_ESCAPING\n \n elif state is SINGLE_QUOTED_ESC_HEXA:\n \n if esc_hexa_count > 0:\n accu = accu * 16 + int(char, 16)\n \n esc_hexa_count += 1\n \n if esc_octal_count == 3 or (next not in string.hexdigits):\n if esc_hexa_count == 1:\n splitted_value.append('\\\\')\n splitted_value.append('x')\n else:\n splitted_value.append(chr(accu & 0xFF))\n accu = 0\n state = SINGLE_QUOTED_WITH_ESCAPING\n \n elif state is SINGLE_QUOTED_ESC_CTRL:\n \n if esc_ctrl_count == 0:\n if next is None:\n raise UnsupportedAssignmentError(\"continued lines are not supported - faulty line: l%d: %r\" % (linenum + 1, unit))\n else:\n splitted_value.append(chr(ord(char) & 31))\n state = SINGLE_QUOTED_WITH_ESCAPING\n \n esc_ctrl_count += 1\n \n elif state is FINISHED:\n \n if char in \" \\t\":\n if next is None:\n break\n else:\n continue\n elif char == '#':\n break\n elif char == ';':\n if semicolon_seen:\n raise ComplexStatementError(\"multiple semicolons detected - faulty line: l%d: %r\" % (linenum + 1, unit))\n else:\n semicolon_seen = True\n continue\n else:\n raise ComplexStatementError(\"complex and multiple statements (beginning with an assignment) are not supported - faulty line: l%d: %r\" % (linenum + 1, unit))\n \n else:\n if state is UNQUOTED:\n state = FINISHED\n if state is not FINISHED:\n raise UnsupportedAssignmentError(\"state is %s but should be FINISHED - probably trying to parse a continued line - this is not supported - faulty line: l%d: %r\" % (state.name, linenum + 1, unit))\n \n value = ''.join(splitted_value)\n rotl = right_part[pos_rotl:]\n \n reslst.append((varname, value, rotl))\n resdct[varname] = value\n \n return reslst, resdct", "def test_ParseSectionBlock(self):\n PSB_test = [\n {\n 'string': r'value1=test1,value2=test2,value3=test3',\n 'values': {'value1': 'test1', 'value2': 'test2', 'value3': 'test3'}\n },\n {\n 'string': r'TYPE=TRFC,NAME=\\\"Test customer, Cupertino\\\",PORTS=ETH-3&HSL-10,UPORTS=,SPORTS=',\n 'values': {'uports': '', 'sports': '', 'type': 'TRFC', 'name': r'Test customer, Cupertino', 'ports': 'ETH-3&HSL-10'}\n },\n {\n 'string': r'NAME=Test\\,value1=test1',\n 'values': {'name': 'Test\\\\', 'value1': 'test1'}\n }\n ]\n for e in PSB_test:\n res = tl1.ParseSectionBlock(e['string'])\n self.assertEqual(res, e['values'], 'Incorrectly parsed section block, expected: %s output: %s' % (e['values'], res))", "def section1ToDo(report):\n toDos = {\n 'r':[],\n #\"required\"\n 's':[]\n #\"suggested\"\n }\n slos = SLOInReport.objects.filter(report=report).order_by(\"number\")\n if not report.author:\n toDos['r'].append((\"Add author to report\",0))\n if not report.date_range_of_reported_data:\n toDos['s'].append((\"Add date range of reported data\",0))\n if slos.count() is 0:\n toDos['r'].append((\"Create an SLO\",1))\n if SLOsToStakeholder.objects.filter(report=report).count() is 0:\n toDos['r'].append((\"Add description of how SLOs are communicated to stakeholders\",1))\n for slo in slos:\n b = blooms_suggestion(slo.goalText)\n if b and b != slo.slo.get_blooms_display and b!=\"none\":\n toDos['s'].append((\"Set the Bloom's level of SLO \"+str(slo.number)+\" to \"+b,1))\n if is_complex(slo.goalText):\n toDos['s'].append((\"Simplify or split SLO \"+str(slo.number)+\" into multiple, focused SLOs\",1))\n return toDos, slos", "def PolyLine2PipeLines(polyLine, parts, source = '', country_code = ''): \n # Initialization\n RetPipeLines = []\n # total lenght of polyline\n Length = 0\n\n # finding last element of polyline for later looop\n parts.append(len(polyLine.long))\n \n \n # go through each double entry and in corresponding \n countSegments = 0\n for pp in range(len(parts) - 1):\n Length = 0\n # Creation of lat/long pair of current range\n Line = K_Component.PolyLine(lat = [], long = [])\n for ii in range(parts[pp], parts[pp + 1]):\n Line.lat.append((polyLine.lat[ii]))\n Line.long.append((polyLine.long[ii]))\n \n # Determination of the lengthof each segment\n for ii in range(len(polyLine.long) - 1):\n try:\n long1 = polyLine.long[ii]\n lat1 = polyLine.lat[ii]\n long2 = polyLine.long[ii+1]\n lat2 = polyLine.lat[ii+1]\n if math.isfinite(long1) and math.isfinite(long2) and math.isfinite(lat1) and math.isfinite(lat2):\n Length = Length + M_DataAnalysis.distance(long1, lat1, long2, lat2)\n except:\n pass\n \n\n id = str(countSegments)\n node_id = [pp*2, pp*2 + 1]\n name = str(countSegments)\n source_id = [source + '_' + str(countSegments)]\n lat = Line.lat\n long = Line.long\n \n\n RetPipeLines.append(K_Component.PipeLines(id = id, node_id = node_id, name = name, \n source_id = source_id, lat = lat, long = long, \n country_code = [country_code, country_code], param = {'length': Length}))\n \n countSegments = countSegments + 1\n\n\n \n return RetPipeLines" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
find first section with name = section_name
def find_first_section(self, section_name): assert isinstance(section_name, tuple) or isinstance(section_name, list) for s in self._los: if self.list_le(section_name, s[0]): return s return None
[ "def sectionByName(self, name):\n for section in self._sections:\n if name == section.name:\n return section\n return None", "def get_section(section):", "def get_section_by_name(self, name):\r\n # The first time this method is called, construct a name to number\r\n # mapping\r\n #\r\n if self._section_name_map is None:\r\n self._section_name_map = {}\r\n for i, sec in enumerate(self.iter_sections()):\r\n self._section_name_map[sec.name] = i\r\n secnum = self._section_name_map.get(name, None)\r\n return None if secnum is None else self.get_section(secnum)", "def get_section_by_name(self, name):\n # The first time this method is called, construct a name to number\n # mapping\n #\n if self._section_name_map is None:\n self._section_name_map = {}\n for i, sec in enumerate(self.iter_sections()):\n self._section_name_map[sec.name] = i\n secnum = self._section_name_map.get(name, None)\n return None if secnum is None else self.get_section(secnum)", "def _getTopSection(self, name):\n section = self.query(name)\n assert len(section) == 1\n return section[0]", "def __getitem__(self, section_name):\n return self._toml[section_name]", "def get_section_from_chunk(chunk, sectionname, item):\n section = []\n in_section = False\n is_done = False\n i = -1\n\n# print(\"Looking for: \"+sectionname)\n for line in chunk:\n if is_done:\n # Something in the previous iteration decided we should stop processing this chunk\n break\n i += 1\n if line == sectionname:\n # We found the section we're looking for\n in_section = True\n continue\n if in_section:\n for check_section_name in SECTION_NAMES:\n # Check to see if we've hit another section\n if line == check_section_name+\":\":\n # We've hit another section, signal the outer loop to stop\n is_done = True\n break\n if not is_done:\n # We're still in our section, so store the line\n section.append(line)\n if section[-1] == \"\":\n # Sections usually end with a blank line, but we don't want it, so remove it\n section.pop()\n if \"\" in section and sectionname not in [\"Notes:\", \"Examples:\"]:\n # Having removed any final blank lines, there should be no further blank lines, but we found one\n message = \"%s has a blank line in %s\" % (item[\"signature\"], sectionname)\n warn(message)\n LINTS.append({\n \"file\": item[\"file\"],\n \"line\": int(item[\"lineno\"]) + 3,\n \"title\": \"Blank lines should not occur within sections\",\n \"message\": message,\n \"annotation_level\": \"failure\"\n\n })\n\n return section", "def get_section(cfg, section):\n section_lines = []\n is_append_section = False\n\n for line in cfg.splitlines():\n line = line.strip()\n\n if line.startswith('section') and not is_append_section:\n cfg_section = line.split('=', 1)[1].strip()\n if cfg_section == section:\n is_append_section = True\n elif line.startswith('section') and is_append_section:\n break # skip any subsequent sections\n\n if is_append_section:\n section_lines.append(line)\n\n return section_lines", "def _section_from_spec(self, spec):\r\n try:\r\n num = int(spec)\r\n if num < self.elffile.num_sections():\r\n return self.elffile.get_section(num)\r\n else:\r\n return None\r\n except ValueError:\r\n # Not a number. Must be a name then\r\n return self.elffile.get_section_by_name(str2bytes(spec))", "def _section_from_spec(self, spec):\n try:\n num = int(spec)\n if num < self.elffile.num_sections():\n return self.elffile.get_section(num)\n else:\n return None\n except ValueError:\n # Not a number. Must be a name then\n return self.elffile.get_section_by_name(str2bytes(spec))", "def get_section_by_offset(self, offset):\n\n for section in self.sections:\n if section.contains_offset(offset):\n return section\n\n return None", "def parse_section(self, name):\n options = dict(self.parser.items(name))\n factory_string = self._get_string(name, 'recipe', DEFAULT_RECIPE)\n recipe = self.load_recipe(factory_string, name, options)\n requirements = self._get_list(name, 'requires')\n recipe.requirements = [self.parse_section(req) for req in requirements]\n parts = self._get_list(name, 'parts')\n recipe.parts = [self.parse_section(part) for part in parts]\n return recipe", "def get_section(section_index):\n return lp_start_end_data[section_index]", "def section_at(self, offset: int) -> Section | None:\n sections = self._sections\n\n def after(i: int) -> bool:\n return sections[i].start > offset\n\n idx = search(0, len(sections), after)\n if idx != 0:\n section = sections[idx - 1]\n if section.start <= offset < section.end:\n return section\n return None", "def find_section_text(lines, section, go_to_end=False, section2=\"\"):\n if len(lines) == 0:\n return \"\"\n n = 0\n for line in lines:\n line_mod = line.replace(\" \", \"\")\n if line_mod.startswith(\"==%s\" % section) \\\n or (section2 != \"\" and line_mod.startswith(\"==%s\" % section2)):\n # Section started\n n += 1\n doc = \"\"\n # collect the documents till next section or the end \n newline = lines[n]\n while (go_to_end or not newline.strip().startswith('==')) \\\n and not newline.strip().startswith('[[Category'):\n doc += newline + '\\n'\n n += 1\n if n < len(lines):\n newline = lines[n]\n else:\n break\n return doc\n n += 1\n \n return \"\"", "def getSection(self,index):\n addr = HopperLowLevel.getSectionAddress(self.__internal_segment_addr__, index)\n if addr == 0:\n return None\n return Section(addr)", "def getSectionAtAddress(self,addr):\n idx=self.getSectionIndexAtAddress(addr)\n if idx == -1:\n return None\n return self.getSection(idx)", "def search(tokens, section):\n for t in tokens:\n if t[0] == section:\n return t[1:]\n return []", "def get_section(soup, attrs={}, name='div', all=False):\n if all == False:\n if isinstance(attrs, dict):\n return soup.find(name=name, attrs=attrs)\n else:\n tag = soup\n for ss in attrs:\n tag = tag.find(name=name, attrs=ss)\n return tag\n else:\n if isinstance(attrs, dict):\n return soup.findAll(name=name, attrs=attrs)\n else: # not sure how to handle this, so I'm forcing exit\n print(\"haven't coded this yet\")\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check if ip_address lies in ip_address range
def ip_in_range(self, ip, range): ip_lst = ip.split('.') for i1, i2, i3 in zip(range[0], ip_lst, range[1]): if int(i1) == int(i2) and int(i2) == int(i3): continue elif int(i1) <= int(i2) <= int(i3): return True else: return False
[ "def check_ip_in_subnet(self, ip_address):\n flag = False\n if compare_2_ips(ip_address.ip, self.ip) >=0 and compare_2_ips(ip_address.ip, self.max_ip) <= 0:\n flag = True\n return flag", "def inIPv4Range(ip: int, ipRange: rules.Ipv4Range) -> bool:\r\n\r\n if ipRange.mask > 32 or ipRange.mask < 0:\r\n raise ValueError(\"The mask of ipRange is invalid: %d. Should be in [0,32]\" % ipRange.mask)\r\n\r\n mask = ~((1 << (32 - ipRange.mask)) - 1)\r\n return ipRange.ip & mask == ip & mask", "def check_ip_from_defined_network(address):\n ip = ipaddress.ip_address(address)\n if not Network.objects.filter(\n min_ip__lte=int(ip), max_ip__gte=int(ip)\n ):\n raise ValidationError(\n 'IP {} doesn\\'t belong to any network!'.format(address)\n )", "def ip(indicator):\n try:\n ipaddress.ip_address(indicator)\n except ValueError:\n return False\n else:\n return True", "def _is_valid_ip_address(cls, s):\n return iptools.ipv4.validate_ip(s) or iptools.ipv6.validate_ip(s)", "def check_ip(ip_a: str, ip_b: str) -> bool:\n return ip_a.split(\".\")[:2] == ip_b.split(\".\")[:2]", "def is_valid_ip_address(address, family=socket.AF_INET):\r\n try:\r\n socket.inet_pton(family, address)\r\n except socket.error:\r\n return False\r\n\r\n return True", "def is_ip_valid(ip):\n try:\n ipaddress.ip_address(unicode(ip))\n except:\n return False\n return True", "def validate_ip_min_max(min_ip: str, max_ip: str) -> bool:\n min_ip_bytes = ip_str_to_byte_array(min_ip)\n if not min_ip_bytes:\n logging.error(\"invalid ip address: {}\".format(min_ip))\n return False\n max_ip_bytes = ip_str_to_byte_array(max_ip)\n if not max_ip_bytes:\n logging.error(\"invalid ip address: {}\".format(max_ip))\n return False\n min_ip_int = ip_bytes_to_int(min_ip_bytes)\n max_ip_int = ip_bytes_to_int(max_ip_bytes)\n if min_ip_int >= max_ip_int:\n return False\n return True", "def is_cloud_ip(self, ip_address):\n if self.NODE_ID_REGEX.match(ip_address):\n return True\n elif self.IP_REGEX.match(ip_address):\n return False\n else:\n self.invalid(\"IP: {} does not match ip or node-id formats.\".format(\n ip_address))", "def is_in_boundary(x, start, end):\n return x >= start and x <= end", "def ip_in_subnet(ip, subnet):\n try:\n ip = IP(ip) \n subnet = IP(subnet)\n\n except: \n print('%s is not a valid ip or %s a valid subnet' %(ip, subnet))\n return False\n else:\n result = ip in subnet\n if not result:\n print('%s not in %s' %(ip, subnet))\n return result", "def _in_range(value, range):\n # TODO: Implement this\n return True", "def ip_in_list(ip_to_check, addresses, delimiter=','):\n\n try:\n ip_to_check_converted = ipaddress.ip_address(ip_to_check)\n except:\n print(f'{ip_to_check} is not a valid IPv4 address')\n sys.exit(1)\n\n list_of_ranges = [item.strip() for item in addresses.split(delimiter)]\n\n for item in list_of_ranges:\n if '/' in item:\n try:\n network = convert_ip_cidr(item)\n if ip_to_check_converted in network:\n return True, item \n except Exception as e:\n print(e)\n pass\n else:\n try:\n ip_object_list = convert_ip(item)\n if ip_to_check_converted in ip_object_list:\n return True, item\n except Exception as e:\n print(e)\n pass \n\n return False, None", "def ips_in_pools(ips: list, pools: list)->bool:\n for pool in pools:\n min_ip_str = pool[\"ipRangeStart\"]\n max_ip_str = pool[\"ipRangeEnd\"]\n min_ip_int = ip_str_to_int(min_ip_str)\n if min_ip_int == -1:\n logging.error(\"invalid ip address: {}\".format(min_ip_str))\n return False\n max_ip_int = ip_str_to_int(max_ip_str)\n if max_ip_int == -1:\n logging.error(\"invalid ip address: {}\".format(max_ip_str))\n for ip in ips:\n ip_int = ip_str_to_int(ip)\n if ip_int == -1:\n logging.error(\"invalid ip address: {}\".format(ip))\n if ip_int < min_ip_int or ip_int > max_ip_int:\n return False\n return True", "def ext_network_ok(default_gateway, external_netmask, external_ip_range):\n ext_network = to_network(default_gateway, external_netmask)\n ext_ip_low, ext_ip_high = [ipaddress.IPv4Address(x) for x in external_ip_range]\n high_ip_ok = ext_ip_high in ext_network\n low_ip_ok = ext_ip_low in ext_network\n answer = high_ip_ok and low_ip_ok\n return answer", "def _is_satisfied(self, what) -> bool:\n try:\n ip_addr = ipaddress.ip_address(what)\n net = ipaddress.ip_network(self.value)\n except ValueError:\n return False\n return ip_addr in net", "def is_ip(addr):\n for family in [socket.AF_INET, socket.AF_INET6]:\n try:\n socket.inet_pton(family, addr)\n return True\n except socket.error:\n pass\n\n return False", "def isIP(arg):\n valid = ipFmt.match(arg)\n if valid:\n return True\n else:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
inetnum section parser. Write org_name and country into nac
def parse_inetnum_sec(self, inetnum_sec, nac): country_lst = self.find_all_items(inetnum_sec, ('country',)) if len(country_lst) == 0: self._messanger.send_message("Can't find country in inetnum section") else: nac[COUNTRY] = country_lst[0] org_name_lst = self.find_all_items(inetnum_sec, ('descr',)) if len(org_name_lst) == 0: self._messanger.send_message("Can't find description in inetnum section") else: nac[ORGNAME] = org_name_lst[0]
[ "def parse(self):\n nac = [None, [], None] # name, address, country\n\n self.translate_to_los()\n if self.check_simple_org_format():\n org_name = self.parse_simple_org()\n nac[ORGNAME] = org_name\n else:\n inetnum_sec = self.find_first_section(('inetnum',))\n if inetnum_sec:\n self.check_inetnum(inetnum_sec)\n else:\n raise UnknownWhoisFormat('Inetnum section was not found')\n\n #looking for address\n role_sec = self.find_first_section(('role',))\n if role_sec:\n self.parse_role(role_sec, nac)\n else:\n person_sec = self.find_first_section(('person',))\n if person_sec:\n self.parse_person(person_sec, nac)\n else:\n raise UnknownWhoisFormat('Role and Person sections were not found')\n\n return nac", "def parse_org_sec(self, org_section, nac):\n org_name_lst = self.find_all_items(org_section, ('org-name',))\n if len(org_name_lst) == 0:\n self._messanger.send_message(\"Can't find organisation name in organisation section\")\n else:\n nac[ORGNAME] = org_name_lst[0]\n\n org_address_lst = self.find_all_items(org_section, ('address',))\n if len(org_address_lst) == 0:\n self._messanger.send_message(\"Can't find organisation address in organisation section\")\n else:\n nac[ORGADDRESS] = org_address_lst", "def parse_person_sec(self, person_section, nac):\n person_name = self.find_first_item(person_section, ('person',))\n\n if person_name is None:\n self._messanger.send_message(\"Can't find name in person section\")\n else:\n nac[ORGNAME] = person_name\n\n address_lst = self.find_all_items(person_section, ('address',))\n if len(address_lst) == 0:\n self._messanger.send_message(\"Can't find address in person section\")\n else:\n nac[ORGADDRESS] = address_lst", "def parse(self):\n nac = [None, [], None] # name, address, country\n\n self.translate_to_los()\n\n if self.check_simple_org_format():\n org_name = self.parse_arin_simple_org()\n nac[ORGNAME] = org_name\n else:\n ref_ser = self.find_referral_server()\n if ref_ser:\n server_name, port_number = ref_ser\n # raw_whois = self.receive_raw_whois(ip_address, server_name, port_number)\n whois_parser = self._manager.create_parser(self._ip_address, server_name, port_number)\n whois_parser.receive_raw_whois()\n nac = whois_parser.parse()\n else:\n self.parse_arin_org(nac)\n return nac", "def give_ipv4(self):\n ip_8_bit_pos = 0\n ip_16_bit_pos = 128\n ip_24_bit_pos = 1\n for component in self.netkit_components:\n for IF in component.attr['IF']:\n\tprefix = component.attr['map_IF_prefix'][IF]\n\tprefix_length = len(prefix)\n\tif prefix_length <= 3: # 8 first bits\n\t component.attr['map_IF_ipv4'][IF]=\"\"+prefix+\".\"+str(ip_8_bit_pos)+\".\"+str(ip_16_bit_pos)+\".\"+str(ip_24_bit_pos)\n\telif prefix_length <= 7: # ex : 123.201\n\t component.attr['map_IF_ipv4'][IF]=\"\"+prefix+\".\"+str(ip_16_bit_pos)+\".\"+str(ip_24_bit_pos)\n\telif prefix_length <= 11:\n\t component.attr['map_IF_ipv4'][IF]=\"\"+prefix+\".\"+str(ip_24_bit_pos)\n\telse :\n\t print \"Error in prefix, this length is not supported\"\n\t sys.exit(-1)\n if ip_24_bit_pos < 255:\n\t ip_24_bit_pos+=1\n\telse:\n\t ip_24_bit_pos = 1\n\t if ip_16_bit_pos < 255:\n\t ip_16_bit_pos+=1\n\t else:\n\t ip_16_bit_pos=128\n\t if ip_8_bit_pos < 255:\n\t ip_8_bit_pos +=1\n\t else:\n\t print \"Error, to much elements. trololol this error will never be printed\"\n\t sys.exit(-1)", "def parse_conf(infile,sntx):\n parse = CiscoConfParse(infile, syntax=sntx, factory=True)\n\n int_cmds=parse.find_objects(r'^interface ')\n\n result={}\n result['interfaces']={}\n\n for interface_cmd in int_cmds:\n\n # get the interface name (remove the interface command from the configuration line)\n intf_name = interface_cmd.text[len(\"interface \"):]\n result[\"interfaces\"][intf_name] = {}\n\n # search for the description command, if not set use \"not set\" as value\n result[\"interfaces\"][intf_name][\"description\"] = \"not set\"\n for cmd in interface_cmd.re_search_children(r\"^\\s+description \"):\n result[\"interfaces\"][intf_name][\"description\"] = cmd.text.strip()[len(\"description \"):]\n\n IPv4_REGEX = r\" ip\\saddress\\s((\\d+\\.\\d+\\.\\d+\\.\\d+)\\s(\\d+\\.\\d+\\.\\d+\\.\\d+)|(\\d+\\.\\d+\\.\\d+\\.\\d+)\\/\\S+)$\"\n for cmd in interface_cmd.re_search_children(IPv4_REGEX):\n result[\"interfaces\"][intf_name][\"ip_conf\"] = {}\n ipv4_addr = interface_cmd.re_match_iter_typed(IPv4_REGEX, result_type=IPv4Obj)\n result[\"interfaces\"][intf_name][\"ip_conf\"].update({\n \"ipv4\": {\n \"address\": ipv4_addr.ip.exploded,\n \"netmask\": ipv4_addr.netmask.exploded\n }\n })\n IPv4_REGEX_S = r\" ip\\saddress\\s((\\d+\\.\\d+\\.\\d+\\.\\d+)\\s(\\d+\\.\\d+\\.\\d+\\.\\d+)|(\\d+\\.\\d+\\.\\d+\\.\\d+)\\/\\S+)\\ssecondary$\"\n for cmd in interface_cmd.re_search_children(IPv4_REGEX_S):\n ipv4_addr = interface_cmd.re_match_iter_typed(IPv4_REGEX_S, result_type=IPv4Obj)\n result[\"interfaces\"][intf_name][\"ip_conf\"].update({\n \"secondary\": {\n \"address\": ipv4_addr.ip.exploded,\n \"netmask\": ipv4_addr.netmask.exploded\n \n }\n })\n \n\n HSRP_REGEX = r\"^ standby\\s\\d{1,3}\\sip\\s(\\S+)\"\n for cmd in interface_cmd.re_search_children(HSRP_REGEX):\n ipv4_addr = interface_cmd.re_match_iter_typed(HSRP_REGEX, result_type=IPv4Obj)\n result[\"interfaces\"][intf_name][\"ip_conf\"].update({\n \"hsrp\": {\n \"address\": ipv4_addr.ip.exploded,\n \"netmask\": ipv4_addr.netmask.exploded\n }\n })\n\n\n result[\"interfaces\"][intf_name][\"is_ether\"]=interface_cmd.is_ethernet_intf\n result[\"interfaces\"][intf_name][\"is_port_chan\"]=interface_cmd.is_portchannel_intf\n result[\"interfaces\"][intf_name][\"is_subintf\"]=interface_cmd.is_subintf\n result[\"interfaces\"][intf_name][\"port_type\"]=interface_cmd.port_type\n result[\"interfaces\"][intf_name][\"ordinal_list\"]=interface_cmd.ordinal_list\n if interface_cmd.has_ip_hsrp:\n result[\"interfaces\"][intf_name][\"ip_conf\"].update({\n \"hsrp\": {\n \"address\": interface_cmd.hsrp_ip_addr,\n \"netmask\": '255.255.255.255'\n }\n })\n\n if interface_cmd.intf_in_portchannel:\n result[\"interfaces\"][intf_name][\"member_of\"]=interface_cmd.portchannel_number\n \n pprint(result)\n return result", "def parse_person(self, s, nac):\n org_name = self.find_first_item(s, ('person',))\n if org_name is None:\n raise UnknownWhoisFormat('Can not find person in Person section')\n\n address = self.find_all_items(s, ('address',))\n if len(address) == 0:\n raise UnknownWhoisFormat('Can not find address in Person section')\n\n country = self.find_first_item(s, ('country',))\n if country is None:\n raise UnknownWhoisFormat('Can not find country in Person section')\n\n nac[ORGNAME] = org_name\n nac[ORGADDRESS] = address\n nac[COUNTRY] = country\n return nac", "def num_to_country(self, cluster_num, type=1):\n if type:\n for n in cluster_num:\n print(n, \": \", self.header[n])\n print(\"------------\")\n else:\n for n in cluster_num:\n print(n, \": \", self.header[n], \", \", end=\"\")\n print()", "def three_addr_form(self):\n for fsym, cfg in self.cfgs.iteritems():\n print(\"three_addr_form in \" + fsym.name)\n cfg.to_three_addr_form()", "def _ip_address_spec(output, ipaddress, netmask, interface, scope, active ):\n output.beginAssembling(\"IPaddressSpec\")\n output.setVirtualNameValue(\"IPaddress\", ipaddress)\n output.setVirtualNameValue(\"IPnetmask\", netmask)\n output.setVirtualNameValue(\"InterfaceName\", interface)\n output.setVirtualNameValue(\"Active\", scope)\n output.setVirtualNameValue(\"Scope\", active)\n output.endAssembling(\"IPaddressSpec\")", "def get_network_info():\n f = open('/proc/net/route', 'r')\n lines = f.readlines()\n words = lines[1].split()\n local_network_ip = words[1]\n subnet_mask = words[7]\n local_network_array = []\n subnet_mask_array = []\n for i in range(8, 1, -2):\n octet = local_network_ip[i-2:i]\n octet = int(octet, 16)\n local_network_array.append(octet)\n octet = subnet_mask[i-2:i]\n octet = int(octet, 16)\n subnet_mask_array.append(octet)\n for i in range(4):\n local_network_start.append(local_network_array[i] & subnet_mask_array[i])\n local_network_end.append(local_network_array[i] | ((~subnet_mask_array[i]) & 0xFF))", "def parsed_metadata(self, msg):\n ips = msg.get_header_ips()\n result = []\n for ipaddr in ips:\n country = self.get_country(ipaddr)\n result.append(str(country))\n if result:\n result = \" \".join(result)\n msg.headers[\"X-Relay-Countries\"].append(result)\n self.ctxt.log.debug(\"X-Relay-Countries: '%s'\", result)\n msg.plugin_tags[\"RELAYCOUNTRY\"] = result", "def ip2asn(ipaddr):\n\n try:\n ip = ipaddress.ip_network(ipaddr)\n except ValueError:\n return {}\n\n if ip.is_private:\n return {}\n\n if ip.version == 4:\n\n a, b, c, d = str(ip.exploded).split('/')[0].split('.')\n reversed = \"%s.%s.%s.%s\" % (d, c, b, a)\n name = \"%s.origin.asn.cymru.com\" % (reversed)\n\n else:\n only_addr = str(ip.exploded).split('/')[0].replace(':', '')\n\n reversed = ''\n\n for number in only_addr[::-1]:\n\n reversed += number\n reversed += '.'\n\n reversed = reversed.rstrip('.')\n\n name = \"%s.origin6.asn.cymru.com\" % (reversed)\n\n try:\n response = dns.resolver.resolve(name, 'TXT')\n except:\n return {}\n\n # \"15169 | 8.8.4.0/24 | US | arin |\"\n r = {}\n r['asn'] = response[0].to_text().split('|')[0].strip(\" \\\"\").split(' ')[0]\n r['net'] = response[0].to_text().split('|')[1].strip(\" \\\"\")\n r['cc'] = response[0].to_text().split('|')[2].strip(\" \\\"\")\n r['rir'] = response[0].to_text().split('|')[3].strip(\" \\\"\").upper()\n r['asname'] = 'unknown'\n\n # Get AS Name\n # \"15169 | US | arin | 2000-03-30 | GOOGLE - Google Inc.,US\"\n try:\n name = \"AS%s.asn.cymru.com\" % (r['asn'])\n response = dns.resolver.resolve(name, 'TXT')\n r['asname'] = response[0].to_text().split('|')[4].strip(\" \\\"\")\n except:\n pass\n\n return(r)", "def build_ip_header(s,num,ttl,host):\n source_ip, port = s.getsockname()\n\n ip_version = 4\n ip_internet_header_length = 5\n ip_tos = 0\n ip_total_length = 220\n ip_identification = num\n ip_fragment_offset = 0 \n ip_ttl = ttl\n ip_protocol = 1 # 1 = ICMP\n ip_checksum = 0 # Depending on implementation, the kernel or the hardware will calculate this for us :)\n ip_source = socket.inet_aton(source_ip)\n ip_destination = socket.inet_aton(host)\n\n\n ip_ver_ihl = ( ip_version << 4) + ip_internet_header_length\n\n # The ! mark means network order\n # This code was written for an Intel Mac\n # Intel Macs are based on the Berkeley-derived kernels, which require a different byte order for\n # IP Headers.\n\n # On many Berkeley-derived kernels, all fields are in the \n # network byte order except ip_len and ip_off, which are in host byte order\n \n ip_header = (struct.pack('!BB',ip_ver_ihl,ip_tos) + struct.pack('H',ip_total_length) + \n struct.pack('!H',ip_identification) + struct.pack('H',ip_fragment_offset) + \n struct.pack('!BB',ip_ttl,ip_protocol) + struct.pack('!H',ip_checksum) + \n struct.pack('!4s4s',ip_source,ip_destination))\n\n return ip_header", "def get_asn_number(url):\n try:\n with geoip2.database.Reader(PATH + 'GeoLite2-ASN.mmdb') as reader:\n if valid_ip(url['host']):\n ip = url['host']\n else:\n ip = resolver.query(url['host'], 'A')\n ip = ip[0].to_text()\n\n if ip:\n response = reader.asn(ip)\n return response.autonomous_system_number\n else:\n return '-1'\n except Exception:\n return '-1'", "def parse_ipaddr(self):\n\t\tipaddr_dict = defaultdict(int)\n\t\ttry:\n\t\t\twith open(self.LOG_FILE, \"r\") as log_text:\n\t\t\t # Reads file line by line without loading the whole contents into memory\n\t\t\t for columns in ( raw.strip().split() for raw in log_text ): \n\t\t\t ipaddr_dict[str(columns[2])]=1\t\n\t\texcept IOError:\n\t\t\tprint \"IOError: Could not read file:\", LOG_FILE\n\t\treturn ipaddr_dict", "def main(response):\n with open(response, 'r') as xmlfile:\n for i, line in enumerate(xmlfile):\n ip = parseIP(line)\n if ip != -1:\n print ip\n xmlfile.close()", "def formatNetAdrs(ipbytes,maskbits):\n return formatIpAdrs(ipbytes)+(\"/%d\" % maskbits)", "def _parseIp(self):\n if self._is_ip:\n self._src_ip0 = self._src_ip = socket.inet_ntoa(self._hdr.src)\n self._dst_ip0 = self._dst_ip = socket.inet_ntoa(self._hdr.dst)\n self.proto = self.handled_protocols.get(self.proto_num)\n\n # If this is a transport protocol we handle...\n if self.proto:\n self._tcpudpcsum0 = self._hdr.data.sum\n self._sport0 = self._sport = self._hdr.data.sport\n self._dport0 = self._dport = self._hdr.data.dport\n self.skey = self._genEndpointKey(self._src_ip, self._sport)\n self.dkey = self._genEndpointKey(self._dst_ip, self._dport)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
organization section parser. Write org_name and address to nac
def parse_org_sec(self, org_section, nac): org_name_lst = self.find_all_items(org_section, ('org-name',)) if len(org_name_lst) == 0: self._messanger.send_message("Can't find organisation name in organisation section") else: nac[ORGNAME] = org_name_lst[0] org_address_lst = self.find_all_items(org_section, ('address',)) if len(org_address_lst) == 0: self._messanger.send_message("Can't find organisation address in organisation section") else: nac[ORGADDRESS] = org_address_lst
[ "def parse_person_sec(self, person_section, nac):\n person_name = self.find_first_item(person_section, ('person',))\n\n if person_name is None:\n self._messanger.send_message(\"Can't find name in person section\")\n else:\n nac[ORGNAME] = person_name\n\n address_lst = self.find_all_items(person_section, ('address',))\n if len(address_lst) == 0:\n self._messanger.send_message(\"Can't find address in person section\")\n else:\n nac[ORGADDRESS] = address_lst", "def parse(self):\n nac = [None, [], None] # name, address, country\n\n self.translate_to_los()\n if self.check_simple_org_format():\n org_name = self.parse_simple_org()\n nac[ORGNAME] = org_name\n else:\n inetnum_sec = self.find_first_section(('inetnum',))\n if inetnum_sec:\n self.check_inetnum(inetnum_sec)\n else:\n raise UnknownWhoisFormat('Inetnum section was not found')\n\n #looking for address\n role_sec = self.find_first_section(('role',))\n if role_sec:\n self.parse_role(role_sec, nac)\n else:\n person_sec = self.find_first_section(('person',))\n if person_sec:\n self.parse_person(person_sec, nac)\n else:\n raise UnknownWhoisFormat('Role and Person sections were not found')\n\n return nac", "def parse_role_sec(self, role_section, nac):\n org_name_lst = self.find_all_items(role_section, ('role',))\n if len(org_name_lst) == 0:\n self._messanger.send_message(\"Can't find organisation name in role section\")\n else:\n nac[ORGNAME] = org_name_lst[0]\n\n org_address_lst = self.find_all_items(role_section, ('address',))\n if len(org_address_lst) == 0:\n self._messanger.send_message(\"Can't find organisation address in role section\")\n else:\n nac[ORGADDRESS] = org_address_lst", "def organizations():", "def create_org_payload(self):\n organizations = ET.Element(\"organizations\")\n organization = ET.Element(\"organization\")\n organizations.append(organization)\n name = ET.SubElement(organization, \"name\")\n name.text = self._module.paramgram[\"org_name\"]\n fullName = ET.SubElement(organization, \"fullName\")\n fullName.text = self._module.paramgram[\"org_display_name\"]\n description = ET.SubElement(organization, \"description\")\n description.text = self._module.paramgram[\"org_description\"]\n if self._module.paramgram[\"uri\"] == \"/phoenix/rest/organization/add\":\n adminUser = ET.SubElement(organization, \"adminUser\")\n adminUser.text = self._module.paramgram[\"org_admin_username\"]\n adminPwd = ET.SubElement(organization, \"adminPwd\")\n adminPwd.text = self._module.paramgram[\"org_admin_password\"]\n adminEmail = ET.SubElement(organization, \"adminEmail\")\n adminEmail.text = self._module.paramgram[\"org_admin_email\"]\n includeRange = ET.SubElement(organization, \"includeRange\")\n includeRange.text = self._module.paramgram[\"org_include_ip_range\"]\n excludeRange = ET.SubElement(organization, \"excludeRange\")\n excludeRange.text = self._module.paramgram[\"org_exclude_ip_range\"]\n\n if self._module.paramgram[\"uri\"] == \"/phoenix/rest/organization/add\":\n custResource = ET.Element(\"custResource\")\n organization.append(custResource)\n eps = ET.SubElement(custResource, \"eps\")\n eps.text = self._module.paramgram[\"org_eps\"]\n max_devices = ET.SubElement(custResource, \"configItem\")\n max_devices.text = str(self._module.paramgram[\"org_max_devices\"])\n\n # CONCAT COLLECTORS BEFORE APPENDING IF SPECIFIED\n if self._module.paramgram[\"org_collectors\"]:\n # EXPECTS A LIST\n collector_data = self._module.paramgram[\"org_collectors\"]\n if isinstance(collector_data, list):\n # collector_xml = \"<collectors>\"\n collectors = ET.Element(\"collectors\")\n organization.append(collectors)\n for col in collector_data:\n collector = ET.SubElement(collectors, \"collector\")\n col_eps = ET.SubElement(collector, \"eps\")\n col_eps.text = col[\"eps\"]\n col_name = ET.SubElement(collector, \"name\")\n col_name.text = col[\"name\"]\n\n # OR IF A SINGLE COLLECTOR VIA PARAMETERS IS DEFINED\n elif self._module.paramgram[\"org_collector_name\"] and self._module.paramgram[\"org_collector_eps\"]:\n collectors = ET.Element(\"collectors\")\n organization.append(collectors)\n collector = ET.SubElement(collectors, \"collector\")\n col_eps = ET.SubElement(collector, \"eps\")\n col_eps.text = self._module.paramgram[\"org_collector_eps\"]\n col_name = ET.SubElement(collector, \"name\")\n col_name.text = self._module.paramgram[\"org_collector_name\"]\n\n xmlstr = ET.tostring(organizations, 'utf-8')\n return xmlstr", "def pt_organization_uri(name):\n\n\treturn 'organization/' + alphaNumeric(name.strip().lower(), '')", "def parse_organism(self):\n string = self.organism\n name, host_genus = \\\n basic.parse_names_from_record_field(string)\n self._organism_name = name\n self._organism_host_genus = host_genus", "def check_organizations(self,words,entity_tag):\n idWord=0\n last_org=-1\n for word in words:\n if word.title():\n first_letter=word[0]\n if re.search('[ÑA-Z]', first_letter)!=None and re.compile(self.organizations[first_letter]).search(word):\n entity_tag[idWord]='ORG'\n last_org=idWord\n idWord+=1", "def is_organization(word):\n letters = string.ascii_uppercase + 'Ñ'\n organizations = dict.fromkeys(letters, [])\n \n load_array('files/ner/org_esp_s.dat', organizations)\n\n if word.title():\n first_letter=word[0]\n if re.search('[ÑA-Z]', first_letter)!=None and re.compile(organizations[first_letter]).search(word):\n return 'ORG'\n else:\n return False", "def getOrganization(self, organizations):\n \n dict_organizations = {} #Empty dictionary to save the organizations\n\n for org in organizations:\n #Empty dictionary to save the attributes of the Organization\n org_att = {}\n org_att['name'] = org.attrib['name']\n org_att['description'] = org.attrib['description']\n org_att['id_equipments'] = org.attrib['id_equipments'].replace(\" \", \"\").split(',')\n\n # If the required information for the Organization is not given the RORI evaluation cannot be done\n if (org_att['name'] or org_att['id_equipments'] or org.attrib['id']) == \"\":\n self.informationRequired = \"Org\"\n\n #Append the attributes to the list of Organizations\n dict_organizations[org.attrib['id']] = org_att\n \n return dict_organizations", "def parse_person(self, s, nac):\n org_name = self.find_first_item(s, ('person',))\n if org_name is None:\n raise UnknownWhoisFormat('Can not find person in Person section')\n\n address = self.find_all_items(s, ('address',))\n if len(address) == 0:\n raise UnknownWhoisFormat('Can not find address in Person section')\n\n country = self.find_first_item(s, ('country',))\n if country is None:\n raise UnknownWhoisFormat('Can not find country in Person section')\n\n nac[ORGNAME] = org_name\n nac[ORGADDRESS] = address\n nac[COUNTRY] = country\n return nac", "def _assembly_organism_name(self, refseq_archaea_assembly_file, refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file, genbank_bacteria_assembly_file, output_organism_name_file):\n\n fout = open(output_organism_name_file, 'w')\n for assembly_file in [refseq_archaea_assembly_file, refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file, genbank_bacteria_assembly_file]:\n with open(assembly_file) as f:\n f.readline()\n header = f.readline().strip().split('\\t')\n org_name_index = header.index('organism_name')\n\n for line in f:\n line_split = line.strip().split('\\t')\n\n gid = line_split[0]\n if gid.startswith('GCA_'):\n gid = 'GB_' + gid\n else:\n gid = 'RS_' + gid\n org_name = line_split[org_name_index]\n fout.write('%s\\t%s\\n' % (gid, org_name))\n fout.close()", "def convert_org(data_in):\n with open('output.org', 'w') as file:\n for row in range(len(data) - 1):\n this_row = data_in.iloc[row, :]\n file.write(f\"* {row}\\n\") # creates heading. starts at 0\n file.write(\"#+BEGIN_VERSE:\\n\") # verse keeps whitespace\n file.write(str(this_row['quote']) + \"\\n\")\n file.write(\"#+END_VERSE\\n\")\n file.write(str(this_row['author']) + \"\\n\")\n file.write(str(this_row['source']) + \"\\n\")\n file.write(str(this_row['created time']) + \"\\n\")", "def orcid_author_get_parser(orcid):\n \n out_file = \"data/orcid_author_get.xml\"\n fout = open(out_file, \"w\")\n print(ORCID.orcid_author_get(orcid, kind=\"xml\").encode('utf-8'), file=fout)\n fout.close()\n \n tree = ET.parse(out_file)\n root_element = tree.getroot()\n ns = '{http://www.orcid.org/ns/orcid}'\n \n author = {'othernames': [], 'urls': [], 'identifiers': []}\n \n for child1 in root_element:\n if(child1.tag == ns + 'orcid-profile'):\n for child2 in child1:\n if(child2.tag == ns + 'orcid-identifier'):\n for child3 in child2:\n if(child3.tag == ns + 'path'):\n author['orcid'] = child3.text\n elif(child2.tag == ns + 'orcid-bio'):\n for child3 in child2:\n if(child3.tag == ns + 'personal-details'):\n for child4 in child3:\n if(child4.tag == ns + 'given-names'):\n author['firstname'] = child4.text\n elif(child4.tag == ns + 'family-name'):\n author['lastname'] = child4.text\n elif(child4.tag == ns + 'other-names'):\n for child5 in child4:\n if(child5.tag == ns + 'other-name'):\n author['othernames'].append(child5.text)\n elif(child3.tag == ns + 'researcher-urls'):\n for child4 in child3:\n if(child4.tag == ns + 'researcher-url'):\n for child5 in child4:\n if(child5.tag == ns + 'url'):\n author['urls'].append(child5.text)\n elif(child3.tag == ns + 'contact-details'):\n for child4 in child3:\n if(child4.tag == ns + 'email'):\n author['email'] = child4.text\n elif(child3.tag == ns + 'external-identifiers'):\n for child4 in child3:\n if(child4.tag == ns + 'external-identifier'):\n identifier = {}\n for child5 in child4:\n if(child5.tag == ns + 'external-id-common-name'):\n key = None\n if(child5.text == 'ResearcherID'):\n key = 'ResearcherID'\n elif(child5.text == 'Scopus Author ID'):\n key = 'ScopusID'\n elif(child5.tag == ns + 'external-id-reference'):\n value = child5.text\n if key is not None:\n identifier[key] = value\n author['identifiers'].append(identifier)\n \n return author", "def parse(self):\n nac = [None, [], None] # name, address, country\n\n self.translate_to_los()\n\n if self.check_simple_org_format():\n org_name = self.parse_arin_simple_org()\n nac[ORGNAME] = org_name\n else:\n ref_ser = self.find_referral_server()\n if ref_ser:\n server_name, port_number = ref_ser\n # raw_whois = self.receive_raw_whois(ip_address, server_name, port_number)\n whois_parser = self._manager.create_parser(self._ip_address, server_name, port_number)\n whois_parser.receive_raw_whois()\n nac = whois_parser.parse()\n else:\n self.parse_arin_org(nac)\n return nac", "def organisation():\n\n otable = db.org_organisation\n\n otable.acronym.readable = False\n otable.acronym.writable = False\n field = otable.sector_id\n field.readable = True\n field.writable = True\n field.label = T(\"Industry Sector\")\n org_has_items_field = otable.has_items\n org_has_items_field.default = True\n s3.filter = (org_has_items_field == True)\n\n if not s3_has_role(STAFF):\n # Tweak the breadcrumb\n breadcrumbs[2] = (T(\"Organization Profile\"), False,\n URL(c=request.controller,\n f=request.function,\n args=request.args))\n\n def corporation_rheader(r, tabs = []):\n \"\"\" Corporation rheader \"\"\"\n\n if r.representation == \"html\":\n\n if r.record is None:\n # List or Create form: rheader makes no sense here\n return None\n\n tabs = [(T(\"Basic Details\"), None),\n (T(\"Contacts\"), \"contact\"),\n (T(\"Donate Goods\"), \"don_item\", dict(item=\"goods\")),\n (T(\"Donate Services \"), \"don_item\", dict(item=\"services\")),\n (T(\"Donate Facilities \"), \"don_item\", dict(item=\"facilities\")),\n ]\n if \"register\" not in request.vars:\n tabs.append( (T(\"Donations\"), \"commit\") )\n rheader_tabs = s3_rheader_tabs(r, tabs)\n\n organisation = r.record\n if organisation.sector_id:\n _sectors = org_sector_represent(organisation.sector_id)\n else:\n _sectors = None\n\n sector_label = T(\"Industry Sector(s)\")\n\n rheader = DIV(TABLE(\n TR(\n TH(\"%s: \" % T(\"Corporation\")),\n organisation.name,\n TH(\"%s: \" % sector_label),\n _sectors\n )),\n rheader_tabs\n )\n return rheader\n return None\n\n ADD_CORPORATION = T(\"Add Corporation / Organization\")\n LIST_CORPORATIONS = T(\"List Corporations & Organizations\")\n s3.crud_strings[\"org_organisation\"] = Storage(\n title_create = ADD_CORPORATION,\n title_display = T(\"Corporation / Organization Details\"),\n title_list = LIST_CORPORATIONS,\n title_update = T(\"Edit Corporation / Organization\"),\n title_search = T(\"Search Corporations & Organizations\"),\n subtitle_create = T(\"Add New Corporation / Organization\"),\n subtitle_list = T(\"Corporations & Organizations\"),\n label_list_button = LIST_CORPORATIONS,\n label_create_button = ADD_CORPORATION,\n label_delete_button = T(\"Delete Corporation / Organization\"),\n msg_record_created = T(\"Corporation / Organization added\"),\n msg_record_modified = T(\"Corporation / Organization updated\"),\n msg_record_deleted = T(\"Corporation / Organization deleted\"),\n msg_list_empty = T(\"No Corporations & Organizations currently registered\"))\n\n def prep(r):\n don_item_filter(lambda query: \\\n r.resource.add_component_filter(\"don_item\", query))\n if r.component:\n if r.component.name == \"don_item\":\n itable = db.don_don_item\n itable.currency.readable = False\n elif r.component.name == \"human_resource\":\n hrtable = db.hrm_human_resource\n hrtable.type.writable = hrtable.type.readable = False\n hrtable.status.writable = hrtable.status.readable = False\n hrtable.focal_point.writable = hrtable.focal_point.readable = False\n hrtable.job_title.readable = hrtable.job_title.writable = False\n s3.jquery_ready.append(\"$('#hrm_human_resource_person_id__row1').hide();\")\n\n s3.crud_strings[\"hrm_human_resource\"] = Storage(\n title_create = T(\"Add Contact\"),\n title_display = T(\"Contact Details\"),\n title_list = T(\"Contacts\"),\n title_update = T(\"Edit Contact\"),\n title_search = T(\"Search Contacts\"),\n subtitle_create = T(\"Additional Contacts (optional)\"),\n subtitle_list = T(\"Contacts\"),\n label_list_button = T(\"List Contacts\"),\n label_create_button = T(\"Add Contacts\"),\n label_delete_button = T(\"Delete Contact\"),\n msg_record_created = T(\"Contact added\"),\n msg_record_modified = T(\"Contact updated\"),\n msg_record_deleted = T(\"Contact deleted\"),\n msg_no_match = T(\"No Contacts Found\"),\n msg_list_empty = T(\"Currently there are no Contact registered\"))\n\n list_fields = s3mgr.model.get_config(\"hrm_human_resource\", \"list_fields\")\n list_fields.remove(\"job_title\")\n configure(\"hrm_human_resource\",\n list_fields = list_fields\n )\n elif r.component.name == \"contact\":\n # Donation Organization Registration Workflow\n if \"register\" in request.vars:\n # Only force the open on 1st run\n s3.show_listadd = True\n configure(\"org_contact\",\n create_next = URL(c=\"don\", f=\"organisation\",\n args = [r.record.id, \"don_item\"],\n vars = dict(item=\"goods\"))\n )\n elif r.component.name == \"commit\":\n s3.crud_strings[\"req_commit\"].subtitle_list = T(\"Donations\")\n configure(\"req_commit\",\n list_fields = [\"req_id\",\n \"status\",\n \"donated_by_id\",\n \"datetime\",\n (T(\"Donated Resource\"),\"item\"),\n \"specs\",\n \"quantity_commit\",\n \"pack_value\",\n \"datetime_available\",\n \"type\",\n \"loan_value\",\n \"return_contact_id\",\n \"site_id\",\n \"datetime_return\",\n \"return_penalty\",\n \"return_instruct\",\n \"insured\",\n \"insure_details\",\n \"warrantied\",\n \"warranty_details\",\n \"transport_req\",\n \"security_req\",\n \"committer_id\",\n \"upload\",\n \"upload_additional\",\n \"comments\"\n ],\n insertable = False,\n editable = False,\n deletable = False,\n )\n \n\n configure(\"org_organisation\",\n list_fields = [\"id\",\n \"name\",\n #\"type\",\n \"sector_id\",\n \"address\",\n \"address_2\",\n \"L3\",\n \"L1\",\n \"upload\",\n \"phone\",\n (T(\"Contact Email\"), \"org_contact_email\"),\n #\"country\",\n #\"website\"\n ])\n\n # req CRUD strings\n REQ = T(\"Donation\")\n #ADD_REQ = T(\"Add Donation\")\n LIST_REQ = T(\"List Donations\")\n s3.crud_strings[\"req_req\"] = Storage(\n #title_create = ADD_REQ,\n title_display = T(\"Donation Details\"),\n title_list = LIST_REQ,\n #title_update = T(\"Edit Donation\"),\n title_search = T(\"Search Donations\"),\n #subtitle_create = ADD_REQ,\n subtitle_list = T(\"Donations\"),\n label_list_button = LIST_REQ,\n #label_create_button = ADD_REQ,\n #label_delete_button = T(\"Remove Donations\"),\n #msg_record_created = T(\"Donation Added\"),\n #msg_record_modified = T(\"Donation updated\"),\n #msg_record_deleted = T(\"Donation removed\"),\n msg_list_empty = T(\"No Donations from this Corporation\"))\n\n return organisation_controller(organisation_rheader = corporation_rheader,\n org_prep = prep)", "def find_organization_element(data_identification_element):\n return data_identification_element.find(\"{gmd}:pointOfContact/{gmd}:CI_ResponsibleParty/{gmd}:organisationName\"\n .format(gmd=NameSpaces.GMD.value[0]),\n Generator.generate_namespace_map())", "def test_put_organization(self):\n pass", "def org(value):\n start_token = '<org>'\n end_token = '</org>'\n return value.replace(start_token,'<i class=\"organisation\">').replace(end_token,'</i>&nbsp;<sup><i class=\"fa fa-briefcase\"></i></sup>')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
role section parser. Write org_name, address to nac
def parse_role_sec(self, role_section, nac): org_name_lst = self.find_all_items(role_section, ('role',)) if len(org_name_lst) == 0: self._messanger.send_message("Can't find organisation name in role section") else: nac[ORGNAME] = org_name_lst[0] org_address_lst = self.find_all_items(role_section, ('address',)) if len(org_address_lst) == 0: self._messanger.send_message("Can't find organisation address in role section") else: nac[ORGADDRESS] = org_address_lst
[ "def parse_role(self, s, nac):\n org_name = self.find_first_item(s, ('role',))\n if org_name is None:\n raise UnknownWhoisFormat('Can not find role in Role section')\n\n address = self.find_all_items(s, ('address',))\n if len(address) == 0:\n raise UnknownWhoisFormat('Can not find address in Role section')\n\n country = self.find_first_item(s, ('country',))\n if country is None:\n raise UnknownWhoisFormat('Can not find country in Role section')\n\n nac[ORGNAME] = org_name\n nac[ORGADDRESS] = address\n nac[COUNTRY] = country\n return nac", "def parse_org_sec(self, org_section, nac):\n org_name_lst = self.find_all_items(org_section, ('org-name',))\n if len(org_name_lst) == 0:\n self._messanger.send_message(\"Can't find organisation name in organisation section\")\n else:\n nac[ORGNAME] = org_name_lst[0]\n\n org_address_lst = self.find_all_items(org_section, ('address',))\n if len(org_address_lst) == 0:\n self._messanger.send_message(\"Can't find organisation address in organisation section\")\n else:\n nac[ORGADDRESS] = org_address_lst", "def parse_person_sec(self, person_section, nac):\n person_name = self.find_first_item(person_section, ('person',))\n\n if person_name is None:\n self._messanger.send_message(\"Can't find name in person section\")\n else:\n nac[ORGNAME] = person_name\n\n address_lst = self.find_all_items(person_section, ('address',))\n if len(address_lst) == 0:\n self._messanger.send_message(\"Can't find address in person section\")\n else:\n nac[ORGADDRESS] = address_lst", "def rfc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n anchor = ''\n anchorindex = text.find('#')\n if anchorindex > 0:\n text, anchor = text[:anchorindex], text[anchorindex:]\n try:\n rfcnum = int(text)\n except ValueError:\n msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum\n sn = nodes.strong('RFC ' + text, 'RFC ' + text)\n rn = nodes.reference('', '', internal=False, refuri=ref + anchor,\n classes=[name])\n rn += sn\n return [rn], []", "def create_opportunity_contact_role():", "def massage_roles(self):\n if not self.opts.role:\n self.guess_role()\n if self.opts.role:\n self.opts.role = [xx.lower() for xx in self.opts.role]\n for role in [nrole for nrole in VALID_ROLES\n if nrole[:4] == 'node']:\n if role in self.opts.role and not 'node' in self.opts.role:\n self.opts.role.append('node')\n if 'broker' in self.opts.role and not 'client' in self.opts.role:\n self.opts.role.append('client')\n self.logger.info('Please note: --role=broker implicitly '\n 'enables --role=client to ensure /usr/bin/rhc '\n 'is available for testing and '\n 'troubleshooting.')", "def parse(self):\n nac = [None, [], None] # name, address, country\n\n self.translate_to_los()\n if self.check_simple_org_format():\n org_name = self.parse_simple_org()\n nac[ORGNAME] = org_name\n else:\n inetnum_sec = self.find_first_section(('inetnum',))\n if inetnum_sec:\n self.check_inetnum(inetnum_sec)\n else:\n raise UnknownWhoisFormat('Inetnum section was not found')\n\n #looking for address\n role_sec = self.find_first_section(('role',))\n if role_sec:\n self.parse_role(role_sec, nac)\n else:\n person_sec = self.find_first_section(('person',))\n if person_sec:\n self.parse_person(person_sec, nac)\n else:\n raise UnknownWhoisFormat('Role and Person sections were not found')\n\n return nac", "def readPersona_role(file,output):\n personas = []\n num_of_conversation = 0\n persona_tmp = []\n current_role = \"\"\n with open(file,'r') as f:\n for line in f.readlines():\n line = line.split()\n if line[0] == '1':\n num_of_conversation += 1\n current_role = \"new\"\n if line[2] == \"persona:\":\n role = line[1]\n if (role != current_role) & (current_role != \"\"):\n personas.append(\" \".join(persona_tmp) + '\\n')\n persona_tmp = []\n persona_tmp.append(\" \".join(line[3:]))\n current_role = role\n\n with open(output,'w') as op:\n op.writelines(personas)", "def pep_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n anchor = ''\n anchorindex = text.find('#')\n if anchorindex > 0:\n text, anchor = text[:anchorindex], text[anchorindex:]\n try:\n pepnum = int(text)\n except ValueError:\n msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n ref = inliner.document.settings.pep_base_url + 'pep-%04d' % pepnum\n sn = nodes.strong('PEP ' + text, 'PEP ' + text)\n rn = nodes.reference('', '', internal=False, refuri=ref + anchor,\n classes=[name])\n rn += sn\n return [rn], []", "def __init__(self, role_name, org='', course_key=None):\n super().__init__()\n\n self.org = org\n self.course_key = course_key\n self._role_name = role_name", "async def roleinfo(self, ctx, *, role: str):\n\n role = await helpers.role_by_substring(ctx, role)\n\n embed = discord.Embed(title=role.name)\n embed.colour = role.color\n embed.description = f\"{role.id} | Members: {len(role.members)}\"\n embed.add_field(name=\"Color\", value=f\"{role.color}\", inline=False)\n\n if role.permissions.administrator:\n embed.add_field(name=\"Administrator\", value=True)\n\n else:\n paginator = commands.Paginator(prefix=\"\", suffix=\"\")\n\n for permission, value in role.permissions:\n if value:\n paginator.add_line(str(permission).capitalize().replace(\"_\", \" \"))\n\n for page in paginator.pages:\n embed.add_field(name=\"Permissions\", value=page)\n\n await ctx.send(embed=embed)", "def build_role(template=Template(), section=\"CFNRole\", project_name: str = None, **kwargs) -> Ref:\n template.set_version('2010-09-09')\n assert project_name\n project_name += 'Role'\n\n # NOTE: By default CodeBuild manages the policies for this role. If you delete a CFN stack and try to recreate the project\n # or make changes to it when the Codebuild managed Policy still exists, you'll see an error in the UI:\n # `The policy is attached to 0 entities but it must be attached to a single role`. (CFN fails with fail to update)\n # Orphaned policies created by CodeBuild will have CodeBuildBasePolicy prepended to them; search for policies with this\n # name and no role and delete to clear the error.\n # TODO: Get a CloudFormation feature request to turn this off for project creation- let CFN manage the policy.\n role_id = template.add_resource(\n Role(\n project_name,\n Path='/',\n AssumeRolePolicyDocument=PolicyDocument(\n Statement=[\n Statement(\n Effect=Allow,\n Action=[AssumeRole],\n Principal=Principal(\"Service\", [\"codebuild.amazonaws.com\"])\n )\n ]\n )\n )\n )\n\n template.add_output([Output(project_name, Value=Ref(role_id))])\n return Ref(role_id)", "def init_role(role): # -> None:\n ...", "def build_acl(self, file):\n \n self._acl = {}\n roles_to_create = {}\n dom = parse(file)\n \n # Find roles to create\n roles_nodes = dom.getElementsByTagName('roleSet')\n for roles_node in roles_nodes:\n role_nodes = roles_node.getElementsByTagName('role')\n for role_node in role_nodes:\n name_nodes = role_node.getElementsByTagName('name')\n parent_nodes = role_node.getElementsByTagName('inheritFrom')\n role_name = name_nodes[0].childNodes[0].data\n roles_to_create[role_name] = []\n \n # Find role parents\n for parent_node in parent_nodes:\n roles_to_create[role_name].append(parent_node.childNodes[0].data)\n \n # build inheritence chain\n for role, parents in roles_to_create.iteritems():\n self.set_role(self._create_role(role, roles_to_create))\n \n # assign permissions\n permissions = dom.getElementsByTagName('permissions')\n for permissions_node in permissions:\n permission_nodes = permissions_node.getElementsByTagName('permission')\n for permission_node in permission_nodes:\n resource_nodes = permission_node.getElementsByTagName('resource')\n role_nodes = permission_node.getElementsByTagName('role')\n privilege_nodes = permission_node.getElementsByTagName('privilege')\n \n for resource_node in resource_nodes:\n resource = Resource()\n resource.name = resource_node.childNodes[0].data\n for privilege_node in privilege_nodes:\n resource.set_privilege(privilege_node.childNodes[0].data)\n \n for role_node in role_nodes:\n try:\n role = self._acl[role_node.childNodes[0].data]\n except:\n raise AccessError('Role in permission is not defined.')\n \n role.set_resource(resource)", "async def settagrole(self, ctx, *, role : str = None):\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\t\tif role == None:\r\n\t\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredTagRole\", \"\")\r\n\t\t\tmsg = 'Add/remove tags now *admin-only*.'\r\n\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif type(role) is str:\r\n\t\t\tif role == \"everyone\":\r\n\t\t\t\trole = \"@everyone\"\r\n\t\t\troleName = role\r\n\t\t\trole = DisplayName.roleForName(roleName, ctx.message.guild)\r\n\t\t\tif not role:\r\n\t\t\t\tmsg = 'I couldn\\'t find *{}*...'.format(roleName)\r\n\t\t\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\t\t\tawait ctx.message.channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we can add it\r\n\t\tself.settings.setServerStat(ctx.message.guild, \"RequiredTagRole\", role.id)\r\n\r\n\t\tmsg = 'Role required for add/remove tags set to **{}**.'.format(role.name)\r\n\t\tmsg = Utils.suppressed(ctx,msg)\r\n\t\tawait ctx.message.channel.send(msg)", "def get_role(row):\n role = row[6]\n\n # Normalize roles Lead Link and Rep Link, as they contain the circle name as well\n if \"Lead Link\" in role:\n role = \"Lead Link\"\n\n if \"Rep Link\" in role:\n role = \"Rep Link\"\n\n return role", "def _set_role(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'ROOT': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:ROOT': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'DESIGNATED': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:DESIGNATED': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'ALTERNATE': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:ALTERNATE': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'BACKUP': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:BACKUP': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}},), is_leaf=True, yang_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='identityref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"role must be of a type compatible with identityref\"\"\",\n 'defined-type': \"openconfig-spanning-tree:identityref\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'ROOT': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:ROOT': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'DESIGNATED': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:DESIGNATED': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'ALTERNATE': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:ALTERNATE': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'BACKUP': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}, 'oc-stp-types:BACKUP': {'@module': 'openconfig-spanning-tree-types', '@namespace': 'http://openconfig.net/yang/spanning-tree/types'}},), is_leaf=True, yang_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='identityref', is_config=False)\"\"\",\n })\n\n self.__role = t\n if hasattr(self, '_set'):\n self._set()", "async def role(self, context, *text):\n \n if text[0] in config[\"roles\"].keys():\n subrole = \" \".join(text[1:])\n if subrole in config[\"roles\"].keys():\n await self.bot.say(toggle_role_subrole(text[0], subrole))\n else:\n await self.bot.say(\"One or more of the roles you used is not yet configured or does not exist.\")", "def organizations():" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
person section parser. Write peson name, address to nac
def parse_person_sec(self, person_section, nac): person_name = self.find_first_item(person_section, ('person',)) if person_name is None: self._messanger.send_message("Can't find name in person section") else: nac[ORGNAME] = person_name address_lst = self.find_all_items(person_section, ('address',)) if len(address_lst) == 0: self._messanger.send_message("Can't find address in person section") else: nac[ORGADDRESS] = address_lst
[ "def parse_person(self, s, nac):\n org_name = self.find_first_item(s, ('person',))\n if org_name is None:\n raise UnknownWhoisFormat('Can not find person in Person section')\n\n address = self.find_all_items(s, ('address',))\n if len(address) == 0:\n raise UnknownWhoisFormat('Can not find address in Person section')\n\n country = self.find_first_item(s, ('country',))\n if country is None:\n raise UnknownWhoisFormat('Can not find country in Person section')\n\n nac[ORGNAME] = org_name\n nac[ORGADDRESS] = address\n nac[COUNTRY] = country\n return nac", "def convert_to_person_line_delimited(person):\n person_fields = ['RECTYPE', 'YEAR', 'DATANUM', 'SERIAL', 'PERNUM', 'PERWT',\n 'SLWT', 'SLREC', 'RESPONDT', 'FAMUNIT', 'FAMSIZE', 'SUBFAM',\n 'SFTYPE', 'SFRELATE', 'MOMLOC', 'STEPMOM', 'MOMRULE_HIST',\n 'POPLOC', 'STEPPOP', 'POPRULE_HIST', 'SPLOC', 'SPRULE_HIST',\n 'NCHILD', 'NCHLT5', 'NSIBS', 'ELDCH', 'YNGCH', 'RELATE',\n 'RELATED', 'SEX', 'AGE', 'AGEMONTH', 'MARST', 'MARRNO',\n 'AGEMARR', 'CHBORN', 'RACE', 'RACED', 'HISPAN', 'HISPAND',\n 'BPL', 'BPLD', 'MBPL', 'MBPLD', 'FBPL', 'FBPLD', 'NATIVITY',\n 'CITIZEN', 'MTONGUE', 'MTONGUED', 'SPANNAME', 'HISPRULE',\n 'SCHOOL', 'HIGRADE', 'HIGRADED', 'EDUC', 'EDUCD', 'EMPSTAT',\n 'EMPSTATD', 'LABFORCE', 'OCC', 'OCC1950', 'IND', 'IND1950',\n 'CLASSWKR', 'CLASSWKRD', 'WKSWORK1', 'WKSWORK2', 'HRSWORK1',\n 'HRSWORK2', 'DURUNEMP', 'UOCC', 'UOCC95', 'UIND', 'UCLASSWK',\n 'INCWAGE', 'INCNONWG', 'OCCSCORE', 'SEI', 'PRESGL', 'ERSCOR50',\n 'EDSCOR50', 'NPBOSS50', 'MIGRATE5', 'MIGRATE5D', 'MIGPLAC5',\n 'MIGMET5', 'MIGTYPE5', 'MIGCITY5', 'MIGSEA5', 'SAMEPLAC',\n 'SAMESEA5', 'MIGCOUNTY', 'VETSTAT', 'VETSTATD', 'VET1940',\n 'VETWWI', 'VETPER', 'VETCHILD', 'HISTID', 'SURSIM', 'SSENROLL']\n\n line_list = []\n for field in person_fields:\n line_list.append(person[field])\n\n # append a new line at the end\n # line_list.append(\"\\n\")\n\n line = '|'.join(line_list)\n line = line + \"\\n\"\n return line", "def parse(self):\n nac = [None, [], None] # name, address, country\n\n self.translate_to_los()\n if self.check_simple_org_format():\n org_name = self.parse_simple_org()\n nac[ORGNAME] = org_name\n else:\n inetnum_sec = self.find_first_section(('inetnum',))\n if inetnum_sec:\n self.check_inetnum(inetnum_sec)\n else:\n raise UnknownWhoisFormat('Inetnum section was not found')\n\n #looking for address\n role_sec = self.find_first_section(('role',))\n if role_sec:\n self.parse_role(role_sec, nac)\n else:\n person_sec = self.find_first_section(('person',))\n if person_sec:\n self.parse_person(person_sec, nac)\n else:\n raise UnknownWhoisFormat('Role and Person sections were not found')\n\n return nac", "def parse_org_sec(self, org_section, nac):\n org_name_lst = self.find_all_items(org_section, ('org-name',))\n if len(org_name_lst) == 0:\n self._messanger.send_message(\"Can't find organisation name in organisation section\")\n else:\n nac[ORGNAME] = org_name_lst[0]\n\n org_address_lst = self.find_all_items(org_section, ('address',))\n if len(org_address_lst) == 0:\n self._messanger.send_message(\"Can't find organisation address in organisation section\")\n else:\n nac[ORGADDRESS] = org_address_lst", "def parse_personal(mdb):\n # Parse Id\n try:\n personalid = mdb.find(\"id\").get_text()\n except Exception:\n personalid = None\n # Parse First Name\n try:\n firstname = mdb.find(\"vorname\").get_text()\n except Exception:\n firstname = None\n # Parse Last Name\n try:\n lastname = mdb.find(\"nachname\").get_text()\n except Exception:\n lastname = None\n # Parse Academic Title\n try:\n acad = mdb.find(\"akad_titel\").get_text()\n if acad == \"\":\n acad = None\n except Exception:\n acad = None\n # Parse Year of Birth\n try:\n birthyear = mdb.find(\"geburtsdatum\").get_text()\n except Exception:\n birthyear = None\n # Parse Place of Birth\n try:\n birthplace = mdb.find(\"geburtsort\").get_text()\n except Exception:\n birthplace = None\n # Parse Year of Death\n try:\n death = mdb.find(\"sterbedatum\").get_text()\n if death == \"\":\n death = None\n except Exception:\n death = None\n # Parse Gender\n try:\n gender = mdb.find(\"geschlecht\").get_text()\n except Exception:\n gender = None\n # Parse Party\n try:\n party = mdb.find(\"partei_kurz\").get_text()\n except Exception:\n party = None\n # Parse Occupation\n try:\n occupation = mdb.find(\"beruf\").get_text()\n if occupation == \"\":\n occupation = None\n else:\n occupation = occupation.split(\", \")\n occupation = \";\".join(occupation)\n except Exception:\n occupation = None\n # Parse Parliamentary Periods\n try:\n period = mdb.find_all(\"wp\")\n period = [x.get_text() for x in period]\n period = \";\".join(period)\n except Exception:\n period = None\n # Parse Vita\n try:\n vita = mdb.find(\"vita_kurz\").get_text()\n except Exception:\n vita = None\n \n # Collect to Dict\n personal_dict = {\n \"ID\" : personalid,\n \"FirstName\" : firstname,\n \"LastName\" : lastname,\n \"Name\" : \" \".join([firstname, lastname]),\n \"AcademicTitle\" : acad,\n \"BirthYear\" : birthyear,\n \"BirthPlace\" : birthplace,\n \"DeathYear\" : death,\n \"Gender\" : gender,\n \"Party\" : party,\n \"Occupation\" : occupation,\n \"Period\" : period,\n \"Vita\" : vita\n }\n \n return(personal_dict)", "def parse_positions_person(line):\n dictionary = dict()\n dictionary[\"RECTYPE\"] = line[0:1]\n dictionary[\"YEAR\"] = line[1:5]\n dictionary[\"DATANUM\"] = line[5:7]\n dictionary[\"SERIAL\"] = line[7:15]\n dictionary[\"PERNUM\"] = line[15:19]\n dictionary[\"PERWT\"] = line[19:29]\n dictionary[\"SLWT\"] = line[29:39]\n dictionary[\"SLREC\"] = line[39:40]\n dictionary[\"RESPONDT\"] = line[40:41]\n dictionary[\"FAMUNIT\"] = line[41:43]\n dictionary[\"FAMSIZE\"] = line[43:45]\n dictionary[\"SUBFAM\"] = line[45:46]\n dictionary[\"SFTYPE\"] = line[46:47]\n dictionary[\"SFRELATE\"] = line[47:48]\n dictionary[\"MOMLOC\"] = line[48:50]\n dictionary[\"STEPMOM\"] = line[50:51]\n dictionary[\"MOMRULE_HIST\"] = line[51:52]\n dictionary[\"POPLOC\"] = line[52:54]\n dictionary[\"STEPPOP\"] = line[54:55]\n dictionary[\"POPRULE_HIST\"] = line[55:56]\n dictionary[\"SPLOC\"] = line[56:58]\n dictionary[\"SPRULE_HIST\"] = line[58:59]\n dictionary[\"NCHILD\"] = line[59:60]\n dictionary[\"NCHLT5\"] = line[60:61]\n dictionary[\"NSIBS\"] = line[61:62]\n dictionary[\"ELDCH\"] = line[62:64]\n dictionary[\"YNGCH\"] = line[64:66]\n dictionary[\"RELATE\"] = line[66:68]\n dictionary[\"RELATED\"] = line[68:72]\n dictionary[\"SEX\"] = line[72:73]\n dictionary[\"AGE\"] = line[73:76]\n dictionary[\"AGEMONTH\"] = line[76:78]\n dictionary[\"MARST\"] = line[78:79]\n dictionary[\"MARRNO\"] = line[79:80]\n dictionary[\"AGEMARR\"] = line[80:82]\n dictionary[\"CHBORN\"] = line[82:84]\n dictionary[\"RACE\"] = line[84:85]\n dictionary[\"RACED\"] = line[85:88]\n dictionary[\"HISPAN\"] = line[88:89]\n dictionary[\"HISPAND\"] = line[89:92]\n dictionary[\"BPL\"] = line[92:95]\n dictionary[\"BPLD\"] = line[95:100]\n dictionary[\"MBPL\"] = line[100:103]\n dictionary[\"MBPLD\"] = line[103:108]\n dictionary[\"FBPL\"] = line[108:111]\n dictionary[\"FBPLD\"] = line[111:116]\n dictionary[\"NATIVITY\"] = line[116:117]\n dictionary[\"CITIZEN\"] = line[117:118]\n dictionary[\"MTONGUE\"] = line[118:120]\n dictionary[\"MTONGUED\"] = line[120:124]\n dictionary[\"SPANNAME\"] = line[124:125]\n dictionary[\"HISPRULE\"] = line[125:126]\n dictionary[\"SCHOOL\"] = line[126:127]\n dictionary[\"HIGRADE\"] = line[127:129]\n dictionary[\"HIGRADED\"] = line[129:132]\n dictionary[\"EDUC\"] = line[132:134]\n dictionary[\"EDUCD\"] = line[134:137]\n dictionary[\"EMPSTAT\"] = line[137:138]\n dictionary[\"EMPSTATD\"] = line[138:140]\n dictionary[\"LABFORCE\"] = line[140:141]\n dictionary[\"OCC\"] = line[141:145]\n dictionary[\"OCC1950\"] = line[145:148]\n dictionary[\"IND\"] = line[148:152]\n dictionary[\"IND1950\"] = line[152:155]\n dictionary[\"CLASSWKR\"] = line[155:156]\n dictionary[\"CLASSWKRD\"] = line[156:158]\n dictionary[\"WKSWORK1\"] = line[158:160]\n dictionary[\"WKSWORK2\"] = line[160:161]\n dictionary[\"HRSWORK1\"] = line[161:163]\n dictionary[\"HRSWORK2\"] = line[163:164]\n dictionary[\"DURUNEMP\"] = line[164:167]\n dictionary[\"UOCC\"] = line[167:170]\n dictionary[\"UOCC95\"] = line[170:173]\n dictionary[\"UIND\"] = line[173:176]\n dictionary[\"UCLASSWK\"] = line[176:177]\n dictionary[\"INCWAGE\"] = line[177:183]\n dictionary[\"INCNONWG\"] = line[183:184]\n dictionary[\"OCCSCORE\"] = line[184:186]\n dictionary[\"SEI\"] = line[186:188]\n dictionary[\"PRESGL\"] = line[188:191]\n dictionary[\"ERSCOR50\"] = line[191:195]\n dictionary[\"EDSCOR50\"] = line[195:199]\n dictionary[\"NPBOSS50\"] = line[199:203]\n dictionary[\"MIGRATE5\"] = line[203:204]\n dictionary[\"MIGRATE5D\"] = line[204:206]\n dictionary[\"MIGPLAC5\"] = line[206:209]\n dictionary[\"MIGMET5\"] = line[209:213]\n dictionary[\"MIGTYPE5\"] = line[213:214]\n dictionary[\"MIGCITY5\"] = line[214:218]\n dictionary[\"MIGSEA5\"] = line[218:221]\n dictionary[\"SAMEPLAC\"] = line[221:222]\n dictionary[\"SAMESEA5\"] = line[222:223]\n dictionary[\"MIGCOUNTY\"] = line[223:227]\n dictionary[\"VETSTAT\"] = line[227:228]\n dictionary[\"VETSTATD\"] = line[228:230]\n dictionary[\"VET1940\"] = line[230:231]\n dictionary[\"VETWWI\"] = line[231:232]\n dictionary[\"VETPER\"] = line[232:233]\n dictionary[\"VETCHILD\"] = line[233:234]\n dictionary[\"HISTID\"] = line[234:270]\n dictionary[\"SURSIM\"] = line[270:272]\n dictionary[\"SSENROLL\"] = line[272:273]\n return dictionary", "def personparsing(page, thread_ident, profile):\n try: # Handle empty webdl failure\n soup = bs4.BeautifulSoup(page.text, 'lxml')\n except AttributeError:\n return profile\n e = profile\n\n \"\"\"VCF parsing subsection, kills early if vcf parse fails\"\"\"\n vcfregex = re.compile(r\"\\.vcf\")\n vcf_parent = soup.find_all('a', {'class': 'link download'}, href=True)\n for potential_link in vcf_parent:\n pot_link = potential_link['href']\n if vcfregex.findall(pot_link):\n e['VCard'] = pot_link.replace('.', '', 2)\n else:\n e['Bio'] = pot_link.replace('.', '', 2)\n try:\n vcf_link = e['VCard']\n to_add = vcfmuncher(vcf_link, thread_ident, e['Full Name'])\n if not to_add:\n print('[Error-{} vcfmuncher]: VCF could not be downloaded/parsed'.format(thread_ident))\n return profile\n else:\n e.update(to_add)\n except KeyError:\n print('[Error-{} personparser]: VCF element could not be located'.format(thread_ident))\n return profile\n\n # \"\"\"Page parsing subsection, expand/comment out as needed\"\"\"\n # def pythonicparser(title, bs4):\n # spec_parent = soup.find(bs4)\n # if spec_parent:\n # spec_el = spec_parent.find_all('li')\n # combined_spec = ''\n # for el in spec_el:\n # if el.get_text:\n # spec = el.get_text()\n # combined_spec += spec + ', '\n # e[str(title)] = combined_spec\n #\n # pythonicparser('Specialities', \"'div', {'id': MasterPage_ctl00_ContentPlaceHolder1_divAreasOfSpecialization\")\n #\n # experience_parents = soup.find_all('span', {'style': 'font-size: 8pt; font-weight: bold;'})\n # for el in experience_parents:\n # if el.get_text() == 'Years of Experience':\n # outer_el = el.parent\n # exp = outer_el.text.replace('Years of Experience', '')\n # e['Experience'] = exp.strip()\n # else:\n # continue\n\n return e", "def record_address(address, addrlen, motif_node, motif_node_dict):\n\tif motif_node.mn_has_name_recorded:\n\t\treturn None, None\n\telse:\n\t\tnew_motif_node = MotifNode('address')\n\t\t_, rtm_tree_named_node = record_relation(relation_to_str('RL_NAMED'), new_motif_node, motif_node, None, None, motif_node_dict)\n\t\tmotif_node.mn_has_name_recorded = True\n\treturn None, rtm_tree_named_node", "def build_person_line(serial, age, hisp, race, serial_len=8):\n person_line = get_sample_1940_person()\n person = parse_positions_person(person_line)\n person = modify_serial(person, serial, serial_len=serial_len)\n person = modify_age(person, age)\n person = modify_hisp(person, hisp)\n person = modify_race(person, race)\n\n person_line = convert_to_person_line_delimited(person)\n return person_line", "def parse_organism(self):\n string = self.organism\n name, host_genus = \\\n basic.parse_names_from_record_field(string)\n self._organism_name = name\n self._organism_host_genus = host_genus", "def parse(self, data, normalised_field_name='ADDRESS_norm'):\n self.log.info('Start parsing address data...')\n\n data = self._normalize_input_data(data, normalised_field_name=normalised_field_name)\n\n addresses = data[normalised_field_name].values\n self.log.info('{} addresses to parse...'.format(len(addresses)))\n\n # temp data storage lists\n organisation = []\n department = []\n sub_building = []\n building_name = []\n building_number = []\n street = []\n locality = []\n town = []\n postcode = []\n\n # loop over addresses and use the probabilistic parser to tag the address components - should avoid a loop\n for address in tqdm(addresses):\n parsed = parser.tag(address.upper())\n possible_postcode = self._extract_postcode(address) # regular expression extraction\n\n # if both parsers found postcode then check that they are the same\n if parsed.get('Postcode', None) is not None and possible_postcode is not None:\n if parsed['Postcode'] != possible_postcode:\n # not the same, use possible_postcode\n parsed['Postcode'] = possible_postcode\n\n # if the probabilistic parser did not find postcode but regular expression did, then use that\n if parsed.get('Postcode', None) is None and possible_postcode is not None:\n parsed['Postcode'] = possible_postcode\n\n if parsed.get('Postcode', None) is not None:\n # check that there is space, if not then add if the parsed postcode is long enough to contain a complete\n # postcode. Some users have partial postcodes to which one should not add a space.\n if ' ' not in parsed['Postcode'] and len(parsed['Postcode']) > 4:\n in_code = parsed['Postcode'][-3:]\n out_code = parsed['Postcode'].replace(in_code, '')\n parsed['Postcode'] = out_code + ' ' + in_code\n\n # change to all capitals\n parsed['Postcode'] = parsed['Postcode'].upper()\n\n # if Hackney etc. in StreetName then remove and move to locality if town name contains London\n # Probabilistic parser should see more cases with london localities, parsed incorrectly at the mo\n if parsed.get('StreetName', None) is not None and parsed.get('TownName', None) is not None:\n if 'LONDON' in parsed['TownName']:\n parsed = self._fix_london_boroughs(parsed, os.path.join(self.currentDirectory, '../../data/'))\n\n # sometimes building number gets placed at building name, take it and add to building name\n if parsed.get('BuildingNumber', None) is None and parsed.get('BuildingName', None) is not None:\n tmp = parsed['BuildingName'].split(' ')\n if len(tmp) > 1:\n try:\n _ = int(tmp[0])\n parsed['BuildingNumber'] = tmp[0]\n except ValueError:\n pass\n\n # some addresses contain place CO place, where the CO is not part of the actual name - remove these\n # same is true for IN e.g. Road Marton IN Cleveland\n if parsed.get('Locality', None) is not None:\n if parsed['Locality'].strip().endswith(' CO'):\n parsed['Locality'] = parsed['Locality'].replace(' CO', '')\n if parsed['Locality'].strip().endswith(' IN'):\n parsed['Locality'] = parsed['Locality'].replace(' IN', '')\n\n # parser sometimes places house to organisation name, while it is likelier that it should be subBuilding\n if parsed.get('OrganisationName') == 'HOUSE' and parsed.get('SubBuildingName', None) is None:\n parsed['SubBuildingName'] = parsed.get('OrganisationName')\n\n # store the parsed information to separate lists\n organisation.append(parsed.get('OrganisationName', None))\n department.append(parsed.get('DepartmentName', None))\n sub_building.append(parsed.get('SubBuildingName', None))\n building_name.append(parsed.get('BuildingName', None))\n building_number.append(parsed.get('BuildingNumber', None))\n street.append(parsed.get('StreetName', None))\n locality.append(parsed.get('Locality', None))\n town.append(parsed.get('TownName', None))\n postcode.append(parsed.get('Postcode', None))\n\n # add the parsed information to the dataframe\n data['OrganisationName'] = organisation\n data['DepartmentName'] = department\n data['SubBuildingName'] = sub_building\n data['BuildingName'] = building_name\n data['BuildingNumber'] = building_number\n data['StreetName'] = street\n data['Locality'] = locality\n data['TownName'] = town\n data['Postcode'] = postcode\n data['PAOText'] = data['BuildingName'].copy()\n data['SAOText'] = data['SubBuildingName'].copy()\n\n data = self._parser_postprocessing(data)\n\n return data", "def parse_input(loc, fname, fix_acronyms = True):\n\n papers = dict()\n counter = 0\n annotations = []\n relations = []\n for i, line in enumerate(read_file(fname, loc)):\n if not line:\n papers[pmid] = Paper(pmid, title, abstract, annotations,\n relations, fix_acronyms = fix_acronyms)\n\n counter = -1\n annotations = []\n relations = []\n elif counter < 2:\n vals = line.split('|')\n assert len(vals) == 3, \"Bad format for line {}\".format(i+1)\n assert vals[1] == [\"t\", \"a\"][counter]\n\n if counter == 0:\n pmid = int(vals[0])\n title = vals[2]\n else:\n assert pmid == int(vals[0])\n abstract = vals[2]\n else:\n vals = line.split('\\t')\n assert pmid == int(vals[0])\n if vals[1] == \"CID\":\n relations.append((OntologyID(vals[2]), OntologyID(vals[3])))\n else:\n # an annotation\n if len(vals) == 5: # no identifier was assigned\n vals.append(\"-1\")\n\n assert 6 <= len(vals) <= 7, \"Error on line {0}\".format(i+1)\n annotations.append(Annotation(vals[5], vals[4], vals[3], vals[1], vals[2]))\n\n counter += 1\n\n return papers", "def write_household_to_file(write_file, hh_line, person_lines):\n write_file.write(hh_line)\n for person_line in person_lines:\n write_file.write(person_line)", "def parse_occupation(self):\n pass", "def prob6(filename=\"fake_contacts.txt\"):\n #Read in the data\n with open(filename, 'r') as my_file:\n contact_data = my_file.readlines()\n \n #Create regular expressions to find the correct information\n name_pat = re.compile(r\"([A-Z][a-zA-Z]* (?:[A-Z]\\. )?[A-Z][a-zA-Z]*)\")\n birth_pat = re.compile(r\"([\\d]{1,2}/[\\d]{1,2}/[\\d]{2,4})\")\n email_pat = re.compile(r\"(\\S*@\\S*)\")\n phone_pat = re.compile(r\"[0-9\\(\\)-]{12,}\")\n \n #Create changes for all of the birthdays\n b1change = re.compile(r\"^([\\d]/[\\d]{1,2}/[\\d]{2,4})$\")\n b2change = re.compile(r\"^([\\d]{2})/([\\d]/[\\d]{2,4})$\")\n b3change = re.compile(r\"^([\\d]{2}/[\\d]{2})/([\\d]{2})$\")\n \n #Create changes for all the phone numbers\n p1change = re.compile(r\"([\\d]{3})(?:[-\\)]{1,2})\")\n p2change = re.compile(r\"[\\d]{4}\")\n my_dic = {}\n \n #Iterate through the data\n for i in contact_data:\n name = name_pat.findall(i)[0]\n \n #Get and change the birthday if it exists\n if not birth_pat.findall(i):\n birthday = None\n else:\n birthday = birth_pat.findall(i)[0]\n birthday = b1change.sub(r\"0\\1\", birthday)\n birthday = b2change.sub(r\"\\1/0\\2\", birthday)\n birthday = b3change.sub(r\"\\1/20\\2\", birthday)\n \n #Get and save the email as an attribute\n if not email_pat.findall(i):\n email = None\n else:\n email = email_pat.findall(i)[0]\n \n #Get and change the phone numbers into the proper order\n if not phone_pat.findall(i):\n phone = None\n else:\n phone = phone_pat.findall(i)[0]\n first_two = p1change.findall(phone)\n last = p2change.findall(phone)\n phone = \"(\" + first_two[0] + \")\" + first_two[1] + \"-\" + last[0]\n \n #Define the dictionary \n my_dic[name]= {\"birthday\": birthday, \"email\": email, \"phone\": phone}\n \n #Return the dictionary\n return(my_dic)\n\n raise NotImplementedError(\"Problem 6 Incomplete\")", "def anonymise_name_txt(pt_txt, path_to_doc, xml=False):\n not_pt_names = ['Age', 'age', 'DOB', 'Grand', 'Round', 'Telemetry', 'Meeting', 'Name', 'Name:', ':', 'NAME'\n 'AED', 'Rx', 'XX', 'xx', 'Antecedent', 'Hx', 'telemetry', 'meeting', 'summary', 'ED',\n 'Namexx', 'NamexxAge', 'Hxxx', 'Hx', 'xxx', 'Video', 'video', 'QSA', 'QSB', 'QSC', 'QSD',\n 'Early', 'Development', 'Date', 'TELEMETRY', 'MEETING', 'QS', 'Aged', 'FH', 'No',\n 'Discharge', 'DISCHARGE', 'Summary', 'SUMMARY', 'Family', 'FAMILY', 'No', 'FC', 'ITU'\n 'Referred', 'JD', 'Dr', 'DATE'\n ]\n regex_notnames = re.compile(r\"(Hosp).?\")\n regex_notnames_2 = re.compile(r\"\\s*:\\s*\")\n NLA = r\"(?!Grand|Round|Telemetry|TELEMETRY|MEETING|Meeting|Date|Course|Current|Attacks|Previous|EEG|Imaging|Onset|Risk|Factors|xx|AED|Rx|Hxxx)\"\n # NLB = r\"(?<![QSABCDEMCVP])\" # negative lookbehind\n # NLB = r\"(?<!Telemetry)\"\n\n\n if xml:\n # new regex to find names for DOCX - files that don't have names may catch incorrect names\n # some have Surname|SURNAME, Firstname - but must ensure name is there i.e. xml=TRUE\n try: \n name_pattern = r\"%s([A-Z]{1,}[a-z]*,?\\s[A-Z]{1}[a-zA-Z]*)\"%(NLA) #(?(?=regex)then|else)\n # negative lookahead to exclude these words\n # Surname|SURNAME, Firstname\n name_list = name_pattern_regex(name_pattern, pt_txt)\n \n if name_list[0].strip() not in not_pt_names and not regex_notnames.search(name_list[0]) and not regex_notnames_2.search(name_list[0]):\n surname = name_list[0].strip(',')\n surname = surname.strip()\n # clean up any name that picked up suffixes due to xml reader\n surname = re.sub(r\"QSA|QSB|QSC|QSD|Telemetry\", \"\", surname)\n \n else: raise AttributeError\n if name_list[-1].strip() not in not_pt_names and not regex_notnames.search(name_list[-1]) and not regex_notnames_2.search(name_list[-1]):\n firstname = name_list[-1]\n firstname = firstname.strip()\n # clean up any name that picked up suffixes due to xml reader\n firstname = re.sub(r\"QSA|QSB|QSC|QSD|Telemetry\", \"\", firstname)\n \n else: raise AttributeError\n\n \n\n pt_txt_sfnamefilter, names = pt_txt_replace (firstname, surname, pt_txt)\n\n return pt_txt_sfnamefilter, names \n \n except AttributeError:\n raise AttributeError\n # Firstname|FIRSTNAME SURNAME \n # try:\n # name_pattern = r\"([A-Z]{1}[a-z]+)|([A-Z]{1,}),?\\s?\\t?([A-Z]+)\" \n # name_list = name_pattern_regex(name_pattern, pt_txt)\n # if name_list[0] not in not_pt_names and not regex_notnames.search(name_list[0]) and not regex_notnames_2.search(name_list[0]):\n # firstname = name_list[0]\n # elif name_list[1] not in not_pt_names and not regex_notnames.search(name_list[1]) and not regex_notnames_2.search(name_list[1]):\n # firstname = name_list[1]\n # else: raise AttributeError\n\n # if name_list[-1] not in not_pt_names and not regex_notnames.search(name_list[-1]) and not regex_notnames_2.search(name_list[-1]):\n # surname = name_list[-1]\n # else: raise AttributeError\n\n # surname2 = re.sub(r\"QSA|QSB|QSC|Telemetry|\\d\", \"\", surname)\n # surname2 = surname2.strip()\n # firstname2 = re.sub(r\"QSA|QSB|QSC|Telemetry|\\d\", \"\", firstname)\n # firstname2 = firstname2.strip()\n \n # pt_txt_sfnamefilter, names = pt_txt_replace (firstname2, surname2, pt_txt)\n\n # return pt_txt_sfnamefilter, names\n \n # except IndexError:\n # raise AttributeError\n\n\n else: # if not xml\n try:\n name_pattern = r\"((Name|NAME)[\\s\\t:]*[\\s\\t]*)([a-zA-Z]+\\s+[a-zA-Z]+[\\s-]?[a-zA-Z]+)\" \n name_list = name_pattern_regex(name_pattern, pt_txt)\n\n if name_list[0].strip() not in not_pt_names and not regex_notnames.search(name_list[0]) and not regex_notnames_2.search(name_list[0]):\n firstname = name_list[0]\n elif name_list[1].strip() not in not_pt_names and not regex_notnames.search(name_list[1]) and not regex_notnames_2.search(name_list[1]):\n firstname = name_list[1]\n else:\n firstname = name_list[2]\n if name_list[-1].strip() not in not_pt_names and not regex_notnames.search(name_list[-1]) and not regex_notnames_2.search(name_list[-1]):\n surname = name_list[-1]\n else:\n surname = name_list[name_list.index(firstname) + 1] # add 1 to the index of the list from firstname\n\n pt_txt_sfnamefilter, names = pt_txt_replace(firstname, surname, pt_txt)\n \n return pt_txt_sfnamefilter, names\n\n except IndexError:\n name_list = name_pattern_regex(name_pattern, pt_txt, IndError=True)\n if name_list[0].strip() not in not_pt_names:\n firstname = name_list[0]\n else: raise AttributeError\n if name_list[1].strip() not in not_pt_names:\n surname = name_list[1]\n else: raise AttributeError\n pt_txt_sfnamefilter, names = pt_txt_replace(firstname, surname, pt_txt)\n\n return pt_txt_sfnamefilter, names\n\n except AttributeError: # maybe name is Ali O'Marvasti\n try:\n name_pattern = r\"Name[\\s\\t:]+\\w+\\s+[A-Z’]+[\\s-]?\\w+\"\n name_list = name_pattern_regex(name_pattern, pt_txt)\n firstname = name_list[1]\n\n if name_list[-1].strip() not in not_pt_names and not regex_notnames.search(name_list[-1]):\n surname = name_list[-1]\n elif name_list[2].strip() not in not_pt_names and not regex_notnames.search(name_list[2]):\n surname = name_list[2]\n else: raise AttributeError\n\n pt_txt_sfnamefilter, names = pt_txt_replace(firstname, surname, pt_txt) \n\n return pt_txt_sfnamefilter, names\n\n except AttributeError: # some docx have \"surname: SMITH firstname: Harry ...\"\n name_pattern = r\"surname[\\s\\t:\\n]*[a-z]+\\s\"\n name_list = name_pattern_regex(name_pattern, pt_txt.lower())\n\n if name_list[-1].strip() not in not_pt_names and not regex_notnames.search(name_list[-1]):\n surname = name_list[-1]\n else:\n print(\"check this surname out in file {}\",format(path_to_doc))\n raise AttributeError\n\n name_pattern2 = r\"(?<!r)(name)[\\s\\t:\\n]*[a-z]+\\s\" # negative look behind\n name_list2 = name_pattern_regex(name_pattern, pt_txt.lower())\n\n if name_list2[-1].strip() not in not_pt_names and not regex_notnames.search(name_list2[-1]):\n firstname = name_list2[-1]\n else:\n print(\"check this firstname out in file {}\",format(path_to_doc))\n raise AttributeError\n\n pt_txt_sfnamefilter, names = pt_txt_replace(firstname, surname, pt_txt) \n # print (\"\\nused firstname and surname as specified by the file contents {}\".format(path_to_doc))\n # print (\"firstname is {} and surname is {}\".format(firstname, surname))\n return pt_txt_sfnamefilter, names", "def prepare_pan_data(pan_data_text):\n\n #nitializing data variable\n name = ''\n fname = ''\n dob = ''\n pan_no = []\n pan_number = ''\n pan_detail_list = []\n\n # Clearing out the unwanted and emty space\n lines = pan_data_text.split('\\n')\n for line in lines:\n s = line.strip()\n s = line.replace('\\n','')\n s = s.rstrip()\n s = s.lstrip()\n pan_detail_list.append(s)\n pan_detail_list = list(filter(None, pan_detail_list))\n\n\n #Searching for PAN string\n pan_string = '(Pormanam|Number|umber|Account|ccount|count|Permanent|\\\n ermanent|manent|wumm)$'\n line_no = -1\n for wordline in pan_detail_list:\n word_lst = wordline.split( )\n if ([word for word in word_lst if re.search(pan_string, word)]):\n lineno = pan_detail_list.index(wordline)\n pan_no = pan_detail_list[lineno+1:]\n pan_detail_list.remove(pan_detail_list[lineno])\n break\n if pan_no:\n try:\n pan_number = validation.validate_pan_no(pan_no[0])\n pan_detail_list.remove(pan_number)\n except:\n pan_number = ''\n else:\n pan_number = ''\n\n #Get PAN number using RegEx pattern\n if pan_number == '':\n pan_reg_exp = re.compile(r'^[A-Z0-9]{5}[A-Z0-9]{4}[A-Z]$')\n for wordline in pan_detail_list:\n if pan_reg_exp.findall(wordline):\n pan_number = validation.validate_pan_no(wordline)\n pan_detail_list.remove(wordline)\n\n\n #Searching GOV text string and remove them\n gov_string = '(INCOMETAXDEPARWENT @|mcommx|INCOME|TAX|GOW|GOVT|GOVERNMENT|\\\n OVERNMENT|VERNMENT|DEPARTMENT|EPARTMENT|PARTMENT|ARTMENT|INDIA|NDIA)$'\n for wordline in pan_detail_list:\n word_lst = wordline.split( )\n if ([word for word in word_lst if re.search(gov_string, word)]):\n pan_detail_list.remove(wordline)\n\n #Remove smallcase elements form list\n pan_detail_list = [ele for ele in pan_detail_list if not any(c for c in ele if c.islower())]\n\n\n #Searching DOB using RegEx pattern\n date_reg_exp = re.compile(r'\\d{2}[-/]\\d{2}[-/]\\d{4}')\n for wordline in pan_detail_list:\n if date_reg_exp.findall(wordline):\n dob = wordline\n pan_detail_list.remove(wordline)\n\n\n #Get name & father name and validate them\n try:\n name = validation.validate_name(pan_detail_list[0])\n except:\n name = ''\n\n try:\n fname =validation.validate_father_name( pan_detail_list[1])\n except:\n fname = ''\n\n data = {\n 'Name' : name,\n 'Father name' : fname,\n 'Date of birth' : dob,\n 'PAN number' : pan_number,\n }\n\n return data", "def from_xml(self, filename):\n # #print(os.getcwd())\n root = ET.parse(filename).getroot()\n for node in root:\n if node.tag == \"persoon\":\n p = Person(node.attrib[\"naam\"], id_=node.attrib[\"id\"])\n for i in node:\n if i.tag == \"head\":\n self.head = p\n elif i.tag == \"geb\":\n p.birth = i.text\n elif i.tag == \"stf\":\n p.dead = i.text\n elif node.tag == \"familie\":\n parents = []\n children = []\n div = False\n for i in node:\n if i.tag in (\"ouder\", \"kind\"):\n try:\n p = Person.all_[i.text]\n except KeyError:\n p = Person(\"ERROR\", id_=i.text)\n if i.tag == \"ouder\":\n parents.append(p)\n else:\n children.append(p)\n elif i.tag == \"divorsed\":\n div = True\n self.families.append(Family(parents, children, div))", "def make_pid_segment(forename,\n surname,\n dob,\n sex,\n address,\n patient_id_tuple_list=[]):\n # -------------------------------------------------------------------------\n # Patient identification (PID)\n # -------------------------------------------------------------------------\n # http://www.corepointhealth.com/resource-center/hl7-resources/hl7-pid-segment # noqa\n # http://www.hl7.org/documentcenter/public/wg/conf/Msgadt.pdf (s5.4.8)\n\n # ID numbers...\n # http://www.cdc.gov/vaccines/programs/iis/technical-guidance/downloads/hl7guide-1-4-2012-08.pdf # noqa\n\n segment_id = u\"PID\"\n set_id = \"\"\n\n # External ID\n patient_external_id = \"\"\n # ... this one is deprecated\n # http://www.j4jayant.com/articles/hl7/16-patient-id\n\n # Internal ID\n internal_id_element_list = []\n for i in range(len(patient_id_tuple_list)):\n if not patient_id_tuple_list[i].id:\n continue\n id = patient_id_tuple_list[i].id\n check_digit = get_mod11_checkdigit(id)\n check_digit_scheme = \"M11\" # Mod 11 algorithm\n type_id = patient_id_tuple_list[i].id_type\n assigning_authority = patient_id_tuple_list[i].assigning_authority\n internal_id_element = hl7.Field(COMPONENT_SEPARATOR, [\n id,\n check_digit,\n check_digit_scheme,\n assigning_authority,\n type_id\n ])\n internal_id_element_list.append(internal_id_element)\n patient_internal_id = hl7.Field(REPETITION_SEPARATOR,\n internal_id_element_list)\n\n # Alternate ID\n alternate_patient_id = \"\"\n # ... this one is deprecated\n # http://www.j4jayant.com/articles/hl7/16-patient-id\n\n patient_name = hl7.Field(COMPONENT_SEPARATOR, [\n forename, # surname\n surname, # forename\n \"\", # middle initial/name\n \"\", # suffix (e.g. Jr, III)\n \"\", # prefix (e.g. Dr)\n \"\", # degree (e.g. MD)\n ])\n mothers_maiden_name = \"\"\n date_of_birth = cc_dt.format_datetime(dob, DATEFORMAT.HL7_DATE)\n alias = \"\"\n race = \"\"\n country_code = \"\"\n home_phone_number = \"\"\n business_phone_number = \"\"\n language = \"\"\n marital_status = \"\"\n religion = \"\"\n account_number = \"\"\n social_security_number = \"\"\n drivers_license_number = \"\"\n mother_identifier = \"\"\n ethnic_group = \"\"\n birthplace = \"\"\n birth_order = \"\"\n citizenship = \"\"\n veterans_military_status = \"\"\n\n fields = [\n segment_id,\n set_id, # PID.1\n patient_external_id, # PID.2\n patient_internal_id, # known as \"PID-3\" or \"PID.3\"\n alternate_patient_id, # PID.4\n patient_name,\n mothers_maiden_name,\n date_of_birth,\n sex,\n alias,\n race,\n address,\n country_code,\n home_phone_number,\n business_phone_number,\n language,\n marital_status,\n religion,\n account_number,\n social_security_number,\n drivers_license_number,\n mother_identifier,\n ethnic_group,\n birthplace,\n birth_order,\n citizenship,\n veterans_military_status,\n ]\n segment = hl7.Segment(FIELD_SEPARATOR, fields)\n return segment" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parse whois text and extracts org. name, org. address, country abbreviation
def parse(self): nac = [None, [], None] # name, address, country self.translate_to_los() if self.check_simple_org_format(): org_name = self.parse_arin_simple_org() nac[ORGNAME] = org_name else: ref_ser = self.find_referral_server() if ref_ser: server_name, port_number = ref_ser # raw_whois = self.receive_raw_whois(ip_address, server_name, port_number) whois_parser = self._manager.create_parser(self._ip_address, server_name, port_number) whois_parser.receive_raw_whois() nac = whois_parser.parse() else: self.parse_arin_org(nac) return nac
[ "def parse(self):\n nac = [None, [], None] # name, address, country\n\n self.translate_to_los()\n if self.check_simple_org_format():\n org_name = self.parse_simple_org()\n nac[ORGNAME] = org_name\n else:\n inetnum_sec = self.find_first_section(('inetnum',))\n if inetnum_sec:\n self.check_inetnum(inetnum_sec)\n else:\n raise UnknownWhoisFormat('Inetnum section was not found')\n\n #looking for address\n role_sec = self.find_first_section(('role',))\n if role_sec:\n self.parse_role(role_sec, nac)\n else:\n person_sec = self.find_first_section(('person',))\n if person_sec:\n self.parse_person(person_sec, nac)\n else:\n raise UnknownWhoisFormat('Role and Person sections were not found')\n\n return nac", "def run_whois(ip):\n who = IPWhois(ip).lookup(inc_raw=True)\n\n # The IPWhois package doesn't give us the name information, so we parse the raw information.\n raw_whois = who['raw']\n\n whois_lines = raw_whois.splitlines()\n\n # for now, just naively grab lines that might relate to people. Works well for APNIC whois lookups.\n name_lines = list(filter(lambda l: l.startswith(\"person:\"), whois_lines))\n\n # Alternatively, use the method below to try to catch more\n # match_indicators = re.compile('|'.join([\"person\", \"name\"]), re.IGNORECASE)\n # name_lines = list(filter(lambda l: match_indicators.search(l), whois_lines))\n\n name = \"unknown\"\n # Lines are 'label: info', so discard label and strip whitespace. Only take first.\n if len(name_lines) > 0:\n name = name_lines[0].rpartition(':')[2].lstrip()\n return name", "def whois(self):\n print(whois(self.src).decode(\"utf8\", \"ignore\"))", "def get_whois_data(domain, whois_servers):\n # remove http and www\n domain = domain.replace('http://', '')\n domain = domain.replace('www.', '')\n\n # get the extension , .com , .org , .edu\n if \".\" in domain:\n tld = get_tld(domain)\n print \"Domain is: \" + domain + \", Tld is \" + tld\n print type(tld)\n print tld + \" \" + str(len(tld))\n # if \".\" not in tld: #means TLD like com,net,org\n if tld == \"de\":\n msg = perform_cmd_whois(domain)\n else:\n if tld in whois_servers:\n whois = whois_servers[tld]\n else:\n whois = 'whois.internic.net'\n # TODO: add the none supported tlds( like tr) to the configuration file\n if \"tr\" is tld: # .tr tld doesnt work with whois requests TODO: check why tr tld not working with whois requests\n return \"\";\n msg = perform_whois(whois, domain,0)\n\n else: # no TLD in the url, not a valid url\n msg = \"\" # Return the reply\n return msg", "def _get_whois(self, address):\n req = urllib.request.Request('https://rdap.org/ip/' + address, headers={'Accept': 'application/rdap+json', 'User-Agent': 'Mozilla/5.0'})\n try:\n resp = urllib.request.urlopen(req)\n result = resp.read().decode()\n except HTTPError as e:\n return {'Error': 'HTTPError ' + str(e.code)}\n except URLError as e:\n return {'Error': 'URLError ' + str(e)}\n except Exception as e:\n return {'Error': str(e)}\n return json.loads(result)", "def _lookup_org(self):\n url = \"https://whois.arin.net/ui/query.do\"\n headers = {'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept': 'application/json'}\n ip = socket.gethostbyname(self.domain)\n data = {'xslt': 'https://localhost:8080/whoisrws-servlet/arin.xsl',\n 'flushCache': False,\n 'queryinput': ip,\n 'whoisSubmitButton': '+'}\n res = requests.post(url, headers=headers, data=data, verify=False, proxies=self.proxies, timeout=10)\n return res.json().get('ns4:pft').get('org').get('handle').get('$')", "def parse_person(self, s, nac):\n org_name = self.find_first_item(s, ('person',))\n if org_name is None:\n raise UnknownWhoisFormat('Can not find person in Person section')\n\n address = self.find_all_items(s, ('address',))\n if len(address) == 0:\n raise UnknownWhoisFormat('Can not find address in Person section')\n\n country = self.find_first_item(s, ('country',))\n if country is None:\n raise UnknownWhoisFormat('Can not find country in Person section')\n\n nac[ORGNAME] = org_name\n nac[ORGADDRESS] = address\n nac[COUNTRY] = country\n return nac", "def _parse_one_ipwhois(self, ip: str, jcontent: dict,\n reason) -> IPWhoisData:\n res: IPWhoisData = None\n try:\n if not isinstance(jcontent, dict):\n return res\n\n handle = jcontent.get(\"handle\")\n ip_ver = jcontent.get(\"ipVersion\")\n allocate_type = jcontent.get(\"type\")\n netname = jcontent.get(\"name\")\n country_code = jcontent.get(\"country\")\n if country_code is None:\n # 整理因为修改了mmdb的数据库,所以会返回组织和运营商\n geo, org, isp = self._dbip.get_ip_mmdbinfo(1, ip)\n country_code = geo._country_code\n\n raw: str = json.dumps(jcontent)\n md5 = helper_crypto.get_md5_from_str(raw)\n\n # construct obj\n res = IPWhoisData(reason, md5, raw, handle, allocate_type, netname,\n country_code, ip_ver)\n\n # last_modified\n jevents = jcontent.get(\"events\")\n if not jevents is None and len(jevents) > 0:\n for je in jevents:\n if je.__contains__(\"eventAction\") and \\\n je.__contains__(\"eventDate\"):\n jea = je[\"eventAction\"]\n jval = je[\"eventDate\"]\n if jea == \"last changed\":\n res.last_modified = jval\n elif jea == \"registration\":\n res.applicable_from = jval\n else:\n self._logger.warn(\n \"Unknown eventAction for ipwhois: ip={}, action={}, val={}\"\n .format(ip, jea, jval))\n\n # remarks\n jremarks = jcontent.get(\"remarks\")\n if not jremarks is None and len(jremarks) > 0:\n remarks = ''\n for jr in jremarks:\n jdes = jr.get(\"description\")\n if jdes is None or len(jdes) < 1:\n continue\n for jd in jdes:\n remarks += (jd + \"\\r\\n\")\n if not remarks is None and remarks != \"\":\n res.remarks = remarks\n\n # cidrs\n jcidrs = jcontent.get(\"cidr0_cidrs\")\n if not jcidrs is None and len(jcidrs) > 0:\n for jc in jcidrs:\n k = None\n if jc.__contains__(\"v4prefix\"):\n k = jc['v4prefix']\n elif jc.__contains__(\"v6prefix\"):\n k = jc['v6prefix']\n v = jc.get(\"length\")\n if v is None:\n continue\n res.set_cidrs(\"{}/{}\".format(k, v))\n\n # entities\n jentity = jcontent.get(\"entities\")\n if not jentity is None and len(jentity) > 0:\n for jen in jentity:\n en = self._parse_entity(ip, jen)\n if en is None:\n continue\n res.set_entity(en)\n\n except Exception:\n self._logger.debug(\n \"Parse one ipwhois error: ip:{}, error: {}\".format(\n ip, traceback.format_exc()))\n return res", "def whois_lookup(domain):\n try:\n result = whois(domain)\n except socket.error:\n log.info(Fore.YELLOW + '[!] Unable to perform a whois lookup' + Fore.RESET)\n\n attrs = result._regex or vars(result).get('_regex')\n for attr in attrs:\n value = result.__getattr__(attr)\n if isinstance(value, list):\n whois_record[attr] = []\n log.info('[+] ' + attr + ':')\n for item in value:\n item = unicode(item).encode('utf-8')\n whois_record[attr].append(item)\n log.info(LOG_FORMAT.format('', item))\n else:\n whois_record[attr] = value\n log.info(LOG_FORMAT.format(attr + ':', value))", "def cleanreg(r):\n\tprint \"Domain Name: %s\\n\" % r[0]\n\tprint \"Transfer Status: %s\\n\" % r[1].split(',')[0][3:-1]\n\tif r[2] != None:\n\t\tprint \"Creation Date: %s\\n\" % r[2][1:-2]\n\telse:\n\t\tpass\n\tprint \"Date Updated: %s\\n\" % r[3]\n\tprint \"Registrar Name: %s\\n\" % r[4]\n\tprint \"Registrar URL: %s\\n\" % r[5]\n\tprint \"Registrar Whois Server: %s\\n\" % r[6]\n\tprint \"DNSSEC: %s\\n\" % r[7]\n\t# if r[8] != None:\n\t# \tprint \"Registered Organization: %s\\n\" % r[8].split(',')[1][3:-2]\n\t# else:\n\t# \tpass\n\tprint \"Registered Address: %s\\n\" % r[8]\n\tprint \"Registered City: %s\\n\" % r[9]\n\tprint \"Registered State: %s\\n\" % r[10]\n\tprint \"Registered Zip: %s\\n\" % r[11]\n\t# print \"Expiration Date: %s\\n\" % r[12].split('u')[1][1:-3]\n\tprint \"Nameserver 1: %s\\n\" % r[13]\n\tprint \"Nameserver 2: %s\\n\" % r[14]\n\tif r[15] != None:\n\t\tprint \"Nameserver 3: %s\\n\" % r[15][0:-1]\n\telse: \n\t\tpass\n\tprint \"Organization: %s\\n\" % r[16]\n\tprint \"Email 1: %s\\n\" % r[17][0][3:-1]\n\tprint \"Email 2: %s\\n\" % r[17][1][3:-2]", "def __convert_whois_record(self, response):\n record = {}\n\n #TODO: look at different subclasses of WhoisEntry and better handle\n # missing attributes\n #TODO: process entire lists if they exist\n if response.registrar:\n record['registrar'] = response.registrar[0]\n try:\n if response.whois_server:\n record['whois_server'] = response.whois_server[0]\n except KeyError:\n # Some registrants (for example, .uk) don't return whois_server\n pass\n if response.domain_name:\n record['domain_name'] = response.domain_name[0]\n try:\n if response.referral_url:\n record['referral_url'] = response.referral_url[0]\n except KeyError:\n # Some registrants (for example, .uk) don't return referral_url\n pass\n\n #These list comprehensions get rid of empty strings that the parser sometimes adds to the lists\n if response.status:\n record['status'] = [x.replace(' ', '_') for x in response.status if len(x.strip())]\n try:\n if response.emails:\n record['registrar_contacts'] = [x for x in response.emails if len(x.strip())]\n except KeyError:\n # Some registrants (for example, .uk) don't return emails\n pass\n if response.name_servers:\n record['name_servers'] = [x for x in response.name_servers if len(x.strip())]\n\n #these dates can be datetimes or arrays of datetimes, not sure why\n if response.creation_date:\n if response.creation_date is list:\n record['creation_date'] = self.__get_xml_date_fmt(whois.parser.cast_date(response.creation_date[0]))\n else:\n record['creation_date'] = self.__get_xml_date_fmt(response.creation_date.timetuple())\n\n if response.updated_date:\n if response.updated_date is list:\n record['updated_date'] = self.__get_xml_date_fmt(whois.parser.cast_date(response.updated_date[0]))\n else:\n record['updated_date'] = self.__get_xml_date_fmt(response.updated_date.timetuple())\n\n if response.expiration_date:\n if response.expiration_date is list:\n record['expiration_date'] = self.__get_xml_date_fmt(whois.parser.cast_date(response.expiration_date[0]))\n else:\n record['expiration_date'] = self.__get_xml_date_fmt(response.expiration_date.timetuple())\n return record", "def parseaddr(self, string):\n username, emailaddr = email.utils.parseaddr(string)\n try:\n if chardet.detect(username)['encoding'] == 'ascii':\n # parse username decoded like '=?gbk?Q?=CF=E0=C6=AC?='\n dh = email.Header.decode_header(email.Header.Header(username))\n username = dh[0][0]\n if dh[0][1] and dh[0][1] != 'utf-8':\n username = username.decode(dh[0][1]).encode('utf-8')\n except Exception, e:\n print e\n return [emailaddr, None]\n return [emailaddr, username]", "def _parse_location(self, item):\n if item['location']:\n address = item['location'] + ' Chicago, IL'\n else:\n address = None\n return {'address': address, 'name': '', 'neighborhood': ''}", "def extract_domain(line):\n if curr_zone_type == 1: return line.split()[0]\n else: return line.split()[0].split('.')[-3]", "def __create_whois_object(self, domain):\n if not domain:\n return None\n\n if(self.__verbose_output):\n sys.stderr.write(\"** creating Whois object for: %s\\n\" % domain)\n\n if self.http_whois:\n record = self.__get_whois_record_http(domain)\n else:\n record = self.__get_whois_record(domain)\n\n if not record:\n return None\n\n whois = WhoisEntry()\n\n record['status'] = ['OK' if status == 'ACTIVE' else status for status in record['status']]\n\n #Only build registrar info objects if we have the relevant info\n if (record['registrar'] or record['whois_server'] or\n record['registrar_address'] or record['referral_url'] or\n record['registrar_contacts']):\n registrar = WhoisRegistrar()\n registrar.name = String(record.get('registrar'))\n registrar.address = String(record.get('registrar_address'))\n registrar.whois_server = URI(record.get('whois_server'))\n registrar.referral_url = URI(record.get('referral_url'))\n\n contacts = WhoisContacts()\n for email in record['registrar_contacts']:\n contact = WhoisContact()\n contact.contact_type = 'ADMIN'\n contact.name = String(record.get('registrar'))\n contact.email_address = EmailAddress(email)\n\n contacts.append(contact)\n registrar.contacts = contacts\n\n whois.registrar_info = registrar\n\n whois.domain_name = self.__create_domain_name_object(record.get('domain_name'))\n\n nservers = WhoisNameservers()\n for url in record.get('name_servers', []):\n nservers.append(self.__create_url_object(url))\n if nservers:\n whois.nameservers = nservers\n\n status = WhoisStatuses()\n for s in record.get('status', []):\n status.append(WhoisStatus(s))\n if status:\n whois.status = status\n\n whois.updated_date = DateTime(record.get('updated_date'))\n whois.creation_date = DateTime(record.get('creation_date'))\n whois.expiration_date = DateTime(record.get('expiration_date'))\n\n return whois", "def parse_hostname(self, data):\n #AccessFJWAN-SRS# show run sysname\n #--- Fri Jun 8 18:31:11 2018 ---\n #AccessFJWAN-SRS\n return data.splitlines()[-1]", "def handle_whois(self, args):\n if not args:\n self.error(IRC.ERR_NEEDMOREPARAMS)\n return\n self.error(IRC.ERR_UNKNOWNCOMMAND)", "def parsed_metadata(self, msg):\n ips = msg.get_header_ips()\n result = []\n for ipaddr in ips:\n country = self.get_country(ipaddr)\n result.append(str(country))\n if result:\n result = \" \".join(result)\n msg.headers[\"X-Relay-Countries\"].append(result)\n self.ctxt.log.debug(\"X-Relay-Countries: '%s'\", result)\n msg.plugin_tags[\"RELAYCOUNTRY\"] = result", "def _parse_one_ipwhois_history(self, ip: str, jw: dict,\n reason) -> IPWhoisData:\n res: IPWhoisData = None\n try:\n if not isinstance(jw, dict):\n return res\n\n # required fields\n applicableFrom = jw.get('applicableFrom')\n applicableUntil = jw.get('applicableUntil')\n\n jcontent: dict = jw.get(\"content\")\n if jcontent is None:\n self._logger.error(\n \"Parse one ipwhois filed not found: content, ip:{}\".format(\n ip))\n return res\n\n res = self._parse_one_ipwhois(ip, jcontent, reason)\n if res is None: return res\n if res.applicable_from is None and not applicableFrom is None:\n res.applicable_from = applicableFrom\n if res.applicable_until is None and not applicableUntil is None:\n res.applicable_until = applicableUntil\n\n except Exception:\n self._logger.debug(\n \"Parse one ipwhois error: ip:{}, error: {}\".format(\n ip, traceback.format_exc()))\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check is ReferralServer section exists. That means the ip_address is used another organization
def find_referral_server(self): s = self.find_first_section(('ReferralServer',)) if s: server = (s[0][2]).lstrip('/') port = int(s[0][3]) return server, port else: return None
[ "def check_reverse_lookup():\n try:\n host_name = socket.gethostname().lower()\n host_ip = socket.gethostbyname(host_name)\n host_fqdn = socket.getfqdn().lower()\n fqdn_ip = socket.gethostbyname(host_fqdn)\n return host_ip == fqdn_ip\n except socket.error:\n pass\n return False", "def ip_known(self, ip: str) -> bool:\n\t\treturn ip in self.__ip", "def check_dns(self):\n try:\n httplib.HTTPConnection(self.domain).request(\"GET\", \"/\")\n return True\n except gaierror:\n return False", "def is_ldap_upc_site(self):", "def have_any_address():\n if config.addresses():\n return True\n return False", "def not_already_known_consul_neighbour(ipv6_address):\n log.info(\n \"Checking if the consul agent already knows {}\".format(ipv6_address)\n )\n check_already_known = \"consul members | grep -v left | \" \\\n \"grep {}\".format(ipv6_address)\n return not check_nonzero_exit(check_already_known)", "def _is_satisfied(self, what) -> bool:\n try:\n ip_addr = ipaddress.ip_address(what)\n net = ipaddress.ip_network(self.value)\n except ValueError:\n return False\n return ip_addr in net", "def _check_referer_header(self, request, audience):\n check_referer = self.check_referer\n if check_referer is None:\n check_referer = (request.environ[\"wsgi.url_scheme\"] == \"https\")\n if check_referer:\n if request.referer is None:\n return False\n referer = urljoin(request.host_url, request.referer)\n if not check_url_origin(audience, referer):\n return False\n return True", "def __check_registered(self, source_address):\n if self.is_root:\n if self.stream.get_node_by_server(source_address[0], source_address[1]):\n if self.stream.get_node_by_server(source_address[0], source_address[1]).is_register():\n return True", "def check_reverse_dns_lookup(lookup_dict, dict_to_modify):\n for server_name in lookup_dict.keys():\n host_ip = lookup_dict[server_name][\"IP Address\"]\n if host_ip is not None:\n try:\n hostname = socket.gethostbyaddr(host_ip)\n DictionaryHandling.add_to_dictionary(dict_to_modify, server_name, \"PTR Record\", hostname[0])\n except socket.herror:\n DictionaryHandling.add_to_dictionary(dict_to_modify, server_name, \"PTR Record\", None)\n else:\n DictionaryHandling.add_to_dictionary(dict_to_modify, server_name, \"PTR Record\", None)", "def verify_routing_ip_exist(device, protocol, destination_address,\n max_time=60, check_interval=10):\n timeout = Timeout(max_time, check_interval)\n while timeout.iterate():\n out = None\n try:\n out = device.parse('show route protocol {protocol}'.format(\n protocol=protocol))\n except SchemaEmptyParserError:\n timeout.sleep()\n continue\n \n rt_list = Dq(out).get_values(\"rt\")\n\n for rt_dict in rt_list:\n rt_destination_ = Dq(rt_dict).get_values(\"rt-destination\", 0)\n if rt_destination_.startswith(destination_address):\n return True \n timeout.sleep()\n return False", "def is_known(self, remote_id):\n return hex(remote_id) in self.remote_list", "def site_exist(self):\n return self.site_count != 0", "def check_access(self, uid, host, port):\n pass", "def internal(self):\n return self.remote == site_config.params.INTERNAL_REMOTE", "def test_get_addr_info(self):\n\n actual = self.dns_lookup.get_addr_info(\"one.one.one.one\")\n\n self.assertIsInstance(actual, list)\n self.assertNotEqual([], actual)", "def isEndPointExistNotUsedButAGoodReference( self, epname):\n\t\t#######################################################\n\t\t#\tCheck to see if the given EndPoint exists.\n\t\t#######################################################\n\t\tmyargs\t\t\t= array( ['specialEndpoints'], java.lang.String )\n\t\tendpointAttrs\t= self.configService.getAttributes( self.configService.session, self.rootObjectName, myargs, False )\n\t\t#endpointAttrs\t= self.configService.getAttributes( self.configService.session, self.rootObjectName, None, False )\n\t\t#self.debug( __name__ + \".isEndPointExist(): endpointAttrs=\" + str( endpointAttrs ) + \"\\n\" )\n\t\tself.debug( __name__ + \".isEndPointExist(): endpointAttrs type=\" + str( type( endpointAttrs ) ) + \"\\n\" )\n\t\tfor endpointAttr in endpointAttrs:\n\t\t\t#self.debug( __name__ + \".isEndPointExist(): endpointAttr=\" + str( endpointAttr ) + \"\\n\" )\n\t\t\tself.debug( __name__ + \".isEndPointExist(): endpointAttr type=\" + str( type( endpointAttr ) ) + \"\\n\" )\n\t\t\tattrName = endpointAttr.getName()\n\t\t\tspecialEndPointAttrs= endpointAttr.getValue()\n\t\t\tself.debug( __name__ + \".isEndPointExist(): attrName=\" + str( attrName ) + \"\\n\" )\n\t\t\tself.debug( __name__ + \".isEndPointExist(): attrName type=\" + str( type( attrName ) ) + \"\\n\" )\n\t\t\t#self.debug( __name__ + \".isEndPointExist(): specialEndPointAttrs=\" + str( specialEndPointAttrs ) + \"\\n\" )\n\t\t\tself.debug( __name__ + \".isEndPointExist(): specialEndPointAttrs type=\" + str( type( specialEndPointAttrs ) ) + \"\\n\" )\n\t\t\tif isinstance( specialEndPointAttrs, java.util.ArrayList ):\n\t\t\t\tfor namedEndPoint in specialEndPointAttrs:\n\t\t\t\t\t#self.debug( __name__ + \".isEndPointExist(): namedEndPoint=\" + str( namedEndPoint ) + \"\\n\" )\n\t\t\t\t\tself.debug( __name__ + \".isEndPointExist(): namedEndPoint type=\" + str( type( namedEndPoint ) ) + \"\\n\" )\n\t\t\t\t\tepArgs = array( ['endPointName'], java.lang.String )\n\t\t\t\t\tnameAttrs\t= self.configService.getAttributes( self.configService.session, namedEndPoint, epArgs, False )\n\t\t\t\t\tself.debug( __name__ + \".isEndPointExist(): nameAttrs=\" + str( nameAttrs ) + \"\\n\" )\n\t\t\t\t\tself.debug( __name__ + \".isEndPointExist(): nameAttrs type=\" + str( type( nameAttrs ) ) + \"\\n\" )\n\t\t\t\t\tepName = self.configService.configServiceHelper.getAttributeValue( nameAttrs, 'endPointName' )\n\t\t\t\t\tif epName == epname:\n\t\t\t\t\t\treturn True\n\t\t\t\t#Endfor\n\t\t\t#Endif\n\t\t#Endfor\n\t\treturn False", "def __check_neighbour(self, address):\n print(\"neighbour checked!\")\n if self.stream.get_node_by_server(address[0], address[1]):\n if not (self.stream.get_node_by_server(address[0], address[1]).is_register()):\n return True\n\n pass", "def determine_if_server_url_exists(self, testcase=None):\n\n self.log.debug(\"Determining if server url exists in VIM.properties ...\")\n result = {'successful': False, 'exists': False}\n\n try:\n # open VIM.properties\n path = self.properties_path\n f = open(path, 'r')\n # check each line for server URL field\n for line in f:\n if \"VIM.url:\".lower() in line.lower():\n self.log.trace(\"Server URL found.\")\n result['exists'] = True\n f.close()\n if not result['exists']:\n self.log.trace(\"Server URL not found.\")\n\n self.log.trace(\"Determined if server url exists.\")\n result['successful'] = True\n except BaseException, e:\n self.handle_exception(e, operation=\"determine if server url exists\")\n\n # return\n if testcase is not None: testcase.processing = result['successful']\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parse whois text and extracts org. name, org. address, country abbreviation
def parse(self): nac = [None, [], None] # name, address, country self.translate_to_los() if self.check_simple_org_format(): org_name = self.parse_simple_org() nac[ORGNAME] = org_name else: inetnum_sec = self.find_first_section(('inetnum',)) if inetnum_sec: self.check_inetnum(inetnum_sec) else: raise UnknownWhoisFormat('Inetnum section was not found') #looking for address role_sec = self.find_first_section(('role',)) if role_sec: self.parse_role(role_sec, nac) else: person_sec = self.find_first_section(('person',)) if person_sec: self.parse_person(person_sec, nac) else: raise UnknownWhoisFormat('Role and Person sections were not found') return nac
[ "def parse(self):\n nac = [None, [], None] # name, address, country\n\n self.translate_to_los()\n\n if self.check_simple_org_format():\n org_name = self.parse_arin_simple_org()\n nac[ORGNAME] = org_name\n else:\n ref_ser = self.find_referral_server()\n if ref_ser:\n server_name, port_number = ref_ser\n # raw_whois = self.receive_raw_whois(ip_address, server_name, port_number)\n whois_parser = self._manager.create_parser(self._ip_address, server_name, port_number)\n whois_parser.receive_raw_whois()\n nac = whois_parser.parse()\n else:\n self.parse_arin_org(nac)\n return nac", "def run_whois(ip):\n who = IPWhois(ip).lookup(inc_raw=True)\n\n # The IPWhois package doesn't give us the name information, so we parse the raw information.\n raw_whois = who['raw']\n\n whois_lines = raw_whois.splitlines()\n\n # for now, just naively grab lines that might relate to people. Works well for APNIC whois lookups.\n name_lines = list(filter(lambda l: l.startswith(\"person:\"), whois_lines))\n\n # Alternatively, use the method below to try to catch more\n # match_indicators = re.compile('|'.join([\"person\", \"name\"]), re.IGNORECASE)\n # name_lines = list(filter(lambda l: match_indicators.search(l), whois_lines))\n\n name = \"unknown\"\n # Lines are 'label: info', so discard label and strip whitespace. Only take first.\n if len(name_lines) > 0:\n name = name_lines[0].rpartition(':')[2].lstrip()\n return name", "def whois(self):\n print(whois(self.src).decode(\"utf8\", \"ignore\"))", "def get_whois_data(domain, whois_servers):\n # remove http and www\n domain = domain.replace('http://', '')\n domain = domain.replace('www.', '')\n\n # get the extension , .com , .org , .edu\n if \".\" in domain:\n tld = get_tld(domain)\n print \"Domain is: \" + domain + \", Tld is \" + tld\n print type(tld)\n print tld + \" \" + str(len(tld))\n # if \".\" not in tld: #means TLD like com,net,org\n if tld == \"de\":\n msg = perform_cmd_whois(domain)\n else:\n if tld in whois_servers:\n whois = whois_servers[tld]\n else:\n whois = 'whois.internic.net'\n # TODO: add the none supported tlds( like tr) to the configuration file\n if \"tr\" is tld: # .tr tld doesnt work with whois requests TODO: check why tr tld not working with whois requests\n return \"\";\n msg = perform_whois(whois, domain,0)\n\n else: # no TLD in the url, not a valid url\n msg = \"\" # Return the reply\n return msg", "def _get_whois(self, address):\n req = urllib.request.Request('https://rdap.org/ip/' + address, headers={'Accept': 'application/rdap+json', 'User-Agent': 'Mozilla/5.0'})\n try:\n resp = urllib.request.urlopen(req)\n result = resp.read().decode()\n except HTTPError as e:\n return {'Error': 'HTTPError ' + str(e.code)}\n except URLError as e:\n return {'Error': 'URLError ' + str(e)}\n except Exception as e:\n return {'Error': str(e)}\n return json.loads(result)", "def _lookup_org(self):\n url = \"https://whois.arin.net/ui/query.do\"\n headers = {'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept': 'application/json'}\n ip = socket.gethostbyname(self.domain)\n data = {'xslt': 'https://localhost:8080/whoisrws-servlet/arin.xsl',\n 'flushCache': False,\n 'queryinput': ip,\n 'whoisSubmitButton': '+'}\n res = requests.post(url, headers=headers, data=data, verify=False, proxies=self.proxies, timeout=10)\n return res.json().get('ns4:pft').get('org').get('handle').get('$')", "def parse_person(self, s, nac):\n org_name = self.find_first_item(s, ('person',))\n if org_name is None:\n raise UnknownWhoisFormat('Can not find person in Person section')\n\n address = self.find_all_items(s, ('address',))\n if len(address) == 0:\n raise UnknownWhoisFormat('Can not find address in Person section')\n\n country = self.find_first_item(s, ('country',))\n if country is None:\n raise UnknownWhoisFormat('Can not find country in Person section')\n\n nac[ORGNAME] = org_name\n nac[ORGADDRESS] = address\n nac[COUNTRY] = country\n return nac", "def _parse_one_ipwhois(self, ip: str, jcontent: dict,\n reason) -> IPWhoisData:\n res: IPWhoisData = None\n try:\n if not isinstance(jcontent, dict):\n return res\n\n handle = jcontent.get(\"handle\")\n ip_ver = jcontent.get(\"ipVersion\")\n allocate_type = jcontent.get(\"type\")\n netname = jcontent.get(\"name\")\n country_code = jcontent.get(\"country\")\n if country_code is None:\n # 整理因为修改了mmdb的数据库,所以会返回组织和运营商\n geo, org, isp = self._dbip.get_ip_mmdbinfo(1, ip)\n country_code = geo._country_code\n\n raw: str = json.dumps(jcontent)\n md5 = helper_crypto.get_md5_from_str(raw)\n\n # construct obj\n res = IPWhoisData(reason, md5, raw, handle, allocate_type, netname,\n country_code, ip_ver)\n\n # last_modified\n jevents = jcontent.get(\"events\")\n if not jevents is None and len(jevents) > 0:\n for je in jevents:\n if je.__contains__(\"eventAction\") and \\\n je.__contains__(\"eventDate\"):\n jea = je[\"eventAction\"]\n jval = je[\"eventDate\"]\n if jea == \"last changed\":\n res.last_modified = jval\n elif jea == \"registration\":\n res.applicable_from = jval\n else:\n self._logger.warn(\n \"Unknown eventAction for ipwhois: ip={}, action={}, val={}\"\n .format(ip, jea, jval))\n\n # remarks\n jremarks = jcontent.get(\"remarks\")\n if not jremarks is None and len(jremarks) > 0:\n remarks = ''\n for jr in jremarks:\n jdes = jr.get(\"description\")\n if jdes is None or len(jdes) < 1:\n continue\n for jd in jdes:\n remarks += (jd + \"\\r\\n\")\n if not remarks is None and remarks != \"\":\n res.remarks = remarks\n\n # cidrs\n jcidrs = jcontent.get(\"cidr0_cidrs\")\n if not jcidrs is None and len(jcidrs) > 0:\n for jc in jcidrs:\n k = None\n if jc.__contains__(\"v4prefix\"):\n k = jc['v4prefix']\n elif jc.__contains__(\"v6prefix\"):\n k = jc['v6prefix']\n v = jc.get(\"length\")\n if v is None:\n continue\n res.set_cidrs(\"{}/{}\".format(k, v))\n\n # entities\n jentity = jcontent.get(\"entities\")\n if not jentity is None and len(jentity) > 0:\n for jen in jentity:\n en = self._parse_entity(ip, jen)\n if en is None:\n continue\n res.set_entity(en)\n\n except Exception:\n self._logger.debug(\n \"Parse one ipwhois error: ip:{}, error: {}\".format(\n ip, traceback.format_exc()))\n return res", "def whois_lookup(domain):\n try:\n result = whois(domain)\n except socket.error:\n log.info(Fore.YELLOW + '[!] Unable to perform a whois lookup' + Fore.RESET)\n\n attrs = result._regex or vars(result).get('_regex')\n for attr in attrs:\n value = result.__getattr__(attr)\n if isinstance(value, list):\n whois_record[attr] = []\n log.info('[+] ' + attr + ':')\n for item in value:\n item = unicode(item).encode('utf-8')\n whois_record[attr].append(item)\n log.info(LOG_FORMAT.format('', item))\n else:\n whois_record[attr] = value\n log.info(LOG_FORMAT.format(attr + ':', value))", "def cleanreg(r):\n\tprint \"Domain Name: %s\\n\" % r[0]\n\tprint \"Transfer Status: %s\\n\" % r[1].split(',')[0][3:-1]\n\tif r[2] != None:\n\t\tprint \"Creation Date: %s\\n\" % r[2][1:-2]\n\telse:\n\t\tpass\n\tprint \"Date Updated: %s\\n\" % r[3]\n\tprint \"Registrar Name: %s\\n\" % r[4]\n\tprint \"Registrar URL: %s\\n\" % r[5]\n\tprint \"Registrar Whois Server: %s\\n\" % r[6]\n\tprint \"DNSSEC: %s\\n\" % r[7]\n\t# if r[8] != None:\n\t# \tprint \"Registered Organization: %s\\n\" % r[8].split(',')[1][3:-2]\n\t# else:\n\t# \tpass\n\tprint \"Registered Address: %s\\n\" % r[8]\n\tprint \"Registered City: %s\\n\" % r[9]\n\tprint \"Registered State: %s\\n\" % r[10]\n\tprint \"Registered Zip: %s\\n\" % r[11]\n\t# print \"Expiration Date: %s\\n\" % r[12].split('u')[1][1:-3]\n\tprint \"Nameserver 1: %s\\n\" % r[13]\n\tprint \"Nameserver 2: %s\\n\" % r[14]\n\tif r[15] != None:\n\t\tprint \"Nameserver 3: %s\\n\" % r[15][0:-1]\n\telse: \n\t\tpass\n\tprint \"Organization: %s\\n\" % r[16]\n\tprint \"Email 1: %s\\n\" % r[17][0][3:-1]\n\tprint \"Email 2: %s\\n\" % r[17][1][3:-2]", "def __convert_whois_record(self, response):\n record = {}\n\n #TODO: look at different subclasses of WhoisEntry and better handle\n # missing attributes\n #TODO: process entire lists if they exist\n if response.registrar:\n record['registrar'] = response.registrar[0]\n try:\n if response.whois_server:\n record['whois_server'] = response.whois_server[0]\n except KeyError:\n # Some registrants (for example, .uk) don't return whois_server\n pass\n if response.domain_name:\n record['domain_name'] = response.domain_name[0]\n try:\n if response.referral_url:\n record['referral_url'] = response.referral_url[0]\n except KeyError:\n # Some registrants (for example, .uk) don't return referral_url\n pass\n\n #These list comprehensions get rid of empty strings that the parser sometimes adds to the lists\n if response.status:\n record['status'] = [x.replace(' ', '_') for x in response.status if len(x.strip())]\n try:\n if response.emails:\n record['registrar_contacts'] = [x for x in response.emails if len(x.strip())]\n except KeyError:\n # Some registrants (for example, .uk) don't return emails\n pass\n if response.name_servers:\n record['name_servers'] = [x for x in response.name_servers if len(x.strip())]\n\n #these dates can be datetimes or arrays of datetimes, not sure why\n if response.creation_date:\n if response.creation_date is list:\n record['creation_date'] = self.__get_xml_date_fmt(whois.parser.cast_date(response.creation_date[0]))\n else:\n record['creation_date'] = self.__get_xml_date_fmt(response.creation_date.timetuple())\n\n if response.updated_date:\n if response.updated_date is list:\n record['updated_date'] = self.__get_xml_date_fmt(whois.parser.cast_date(response.updated_date[0]))\n else:\n record['updated_date'] = self.__get_xml_date_fmt(response.updated_date.timetuple())\n\n if response.expiration_date:\n if response.expiration_date is list:\n record['expiration_date'] = self.__get_xml_date_fmt(whois.parser.cast_date(response.expiration_date[0]))\n else:\n record['expiration_date'] = self.__get_xml_date_fmt(response.expiration_date.timetuple())\n return record", "def parseaddr(self, string):\n username, emailaddr = email.utils.parseaddr(string)\n try:\n if chardet.detect(username)['encoding'] == 'ascii':\n # parse username decoded like '=?gbk?Q?=CF=E0=C6=AC?='\n dh = email.Header.decode_header(email.Header.Header(username))\n username = dh[0][0]\n if dh[0][1] and dh[0][1] != 'utf-8':\n username = username.decode(dh[0][1]).encode('utf-8')\n except Exception, e:\n print e\n return [emailaddr, None]\n return [emailaddr, username]", "def _parse_location(self, item):\n if item['location']:\n address = item['location'] + ' Chicago, IL'\n else:\n address = None\n return {'address': address, 'name': '', 'neighborhood': ''}", "def extract_domain(line):\n if curr_zone_type == 1: return line.split()[0]\n else: return line.split()[0].split('.')[-3]", "def __create_whois_object(self, domain):\n if not domain:\n return None\n\n if(self.__verbose_output):\n sys.stderr.write(\"** creating Whois object for: %s\\n\" % domain)\n\n if self.http_whois:\n record = self.__get_whois_record_http(domain)\n else:\n record = self.__get_whois_record(domain)\n\n if not record:\n return None\n\n whois = WhoisEntry()\n\n record['status'] = ['OK' if status == 'ACTIVE' else status for status in record['status']]\n\n #Only build registrar info objects if we have the relevant info\n if (record['registrar'] or record['whois_server'] or\n record['registrar_address'] or record['referral_url'] or\n record['registrar_contacts']):\n registrar = WhoisRegistrar()\n registrar.name = String(record.get('registrar'))\n registrar.address = String(record.get('registrar_address'))\n registrar.whois_server = URI(record.get('whois_server'))\n registrar.referral_url = URI(record.get('referral_url'))\n\n contacts = WhoisContacts()\n for email in record['registrar_contacts']:\n contact = WhoisContact()\n contact.contact_type = 'ADMIN'\n contact.name = String(record.get('registrar'))\n contact.email_address = EmailAddress(email)\n\n contacts.append(contact)\n registrar.contacts = contacts\n\n whois.registrar_info = registrar\n\n whois.domain_name = self.__create_domain_name_object(record.get('domain_name'))\n\n nservers = WhoisNameservers()\n for url in record.get('name_servers', []):\n nservers.append(self.__create_url_object(url))\n if nservers:\n whois.nameservers = nservers\n\n status = WhoisStatuses()\n for s in record.get('status', []):\n status.append(WhoisStatus(s))\n if status:\n whois.status = status\n\n whois.updated_date = DateTime(record.get('updated_date'))\n whois.creation_date = DateTime(record.get('creation_date'))\n whois.expiration_date = DateTime(record.get('expiration_date'))\n\n return whois", "def parse_hostname(self, data):\n #AccessFJWAN-SRS# show run sysname\n #--- Fri Jun 8 18:31:11 2018 ---\n #AccessFJWAN-SRS\n return data.splitlines()[-1]", "def handle_whois(self, args):\n if not args:\n self.error(IRC.ERR_NEEDMOREPARAMS)\n return\n self.error(IRC.ERR_UNKNOWNCOMMAND)", "def parsed_metadata(self, msg):\n ips = msg.get_header_ips()\n result = []\n for ipaddr in ips:\n country = self.get_country(ipaddr)\n result.append(str(country))\n if result:\n result = \" \".join(result)\n msg.headers[\"X-Relay-Countries\"].append(result)\n self.ctxt.log.debug(\"X-Relay-Countries: '%s'\", result)\n msg.plugin_tags[\"RELAYCOUNTRY\"] = result", "def _parse_one_ipwhois_history(self, ip: str, jw: dict,\n reason) -> IPWhoisData:\n res: IPWhoisData = None\n try:\n if not isinstance(jw, dict):\n return res\n\n # required fields\n applicableFrom = jw.get('applicableFrom')\n applicableUntil = jw.get('applicableUntil')\n\n jcontent: dict = jw.get(\"content\")\n if jcontent is None:\n self._logger.error(\n \"Parse one ipwhois filed not found: content, ip:{}\".format(\n ip))\n return res\n\n res = self._parse_one_ipwhois(ip, jcontent, reason)\n if res is None: return res\n if res.applicable_from is None and not applicableFrom is None:\n res.applicable_from = applicableFrom\n if res.applicable_until is None and not applicableUntil is None:\n res.applicable_until = applicableUntil\n\n except Exception:\n self._logger.debug(\n \"Parse one ipwhois error: ip:{}, error: {}\".format(\n ip, traceback.format_exc()))\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
extracts nac info from role section
def parse_role(self, s, nac): org_name = self.find_first_item(s, ('role',)) if org_name is None: raise UnknownWhoisFormat('Can not find role in Role section') address = self.find_all_items(s, ('address',)) if len(address) == 0: raise UnknownWhoisFormat('Can not find address in Role section') country = self.find_first_item(s, ('country',)) if country is None: raise UnknownWhoisFormat('Can not find country in Role section') nac[ORGNAME] = org_name nac[ORGADDRESS] = address nac[COUNTRY] = country return nac
[ "def parse_role_sec(self, role_section, nac):\n org_name_lst = self.find_all_items(role_section, ('role',))\n if len(org_name_lst) == 0:\n self._messanger.send_message(\"Can't find organisation name in role section\")\n else:\n nac[ORGNAME] = org_name_lst[0]\n\n org_address_lst = self.find_all_items(role_section, ('address',))\n if len(org_address_lst) == 0:\n self._messanger.send_message(\"Can't find organisation address in role section\")\n else:\n nac[ORGADDRESS] = org_address_lst", "async def roleinfo(self, ctx, *, role: str):\n\n role = await helpers.role_by_substring(ctx, role)\n\n embed = discord.Embed(title=role.name)\n embed.colour = role.color\n embed.description = f\"{role.id} | Members: {len(role.members)}\"\n embed.add_field(name=\"Color\", value=f\"{role.color}\", inline=False)\n\n if role.permissions.administrator:\n embed.add_field(name=\"Administrator\", value=True)\n\n else:\n paginator = commands.Paginator(prefix=\"\", suffix=\"\")\n\n for permission, value in role.permissions:\n if value:\n paginator.add_line(str(permission).capitalize().replace(\"_\", \" \"))\n\n for page in paginator.pages:\n embed.add_field(name=\"Permissions\", value=page)\n\n await ctx.send(embed=embed)", "def get_role(row):\n role = row[6]\n\n # Normalize roles Lead Link and Rep Link, as they contain the circle name as well\n if \"Lead Link\" in role:\n role = \"Lead Link\"\n\n if \"Rep Link\" in role:\n role = \"Rep Link\"\n\n return role", "async def roleinfo(self, ctx, role: discord.Role):\n embed = discord.Embed(title=f\"Role Information for {role.name}\", colour=role.colour.value or 0x2F3136)\n \n embed.add_field(name=\"ID\", value=role.id)\n embed.add_field(name=\"Members\", value=len(role.members))\n embed.add_field(\n name=\"Position\",\n value=f\"{len(ctx.guild.roles) - role.position}/{len(ctx.guild.roles)}\",\n )\n embed.add_field(\n name=\"Created at\",\n value=f\"{discord.utils.format_dt(role.created_at, 'F')} ({discord.utils.format_dt(role.created_at, 'R')})\",\n )\n embed.add_field(\n name=\"Role Color\",\n value=f\"INT: {role.color.value}\\nHEX: {hex(role.colour.value)[2:].zfill(6)}\\nRGB: rgb{role.color.to_rgb()}\",\n )\n embed.add_field(name=\"Mentionable\", value=\"Yes\" if role.mentionable else \"No\")\n embed.add_field(name=\"Displayed Separately?\", value=\"Yes\" if role.hoist else \"No\")\n \n await ctx.send(embed=embed)", "def role(self):\n if self.case_status == 'adoption':\n return ''\n return self.user_role", "async def role_info(self, ctx: Context, *roles: Union[Role, str]) -> None:\n parsed_roles = []\n failed_roles = []\n\n for role_name in roles:\n if isinstance(role_name, Role):\n # Role conversion has already succeeded\n parsed_roles.append(role_name)\n continue\n\n role = utils.find(\n lambda r: r.name.lower() == role_name.lower(), ctx.guild.roles)\n\n if not role:\n failed_roles.append(role_name)\n continue\n\n parsed_roles.append(role)\n\n if failed_roles:\n msg = f\"{Emojis.cross_mark} I could not convert the following role names to a role: \\n-\"\n msg += \"\\n-\".join(failed_roles)\n await ctx.send(msg)\n\n for role in parsed_roles:\n h, s, v = colorsys.rgb_to_hsv(*role.colour.to_rgb())\n\n embed = Embed(\n title=f\"{role.name} info\",\n colour=role.colour,\n )\n embed.add_field(name=\"ID\", value=role.id, inline=True)\n embed.add_field(name=\"Colour (RGB)\",\n value=f\"#{role.colour.value:0>6x}\", inline=True)\n embed.add_field(name=\"Colour (HSV)\",\n value=f\"{h:.2f} {s:.2f} {v}\", inline=True)\n embed.add_field(name=\"Member count\", value=len(\n role.members), inline=True)\n embed.add_field(name=\"Position\", value=role.position)\n embed.add_field(name=\"Permission code\",\n value=role.permissions.value, inline=True)\n\n await ctx.send(embed=embed)", "def node_roles(node):\n return \"_\".join(sorted(node[\"roles\"]))", "def test_get_role_associates(self):\n pass", "def _get_role_arn(self):\n if self.stack.cloudformation_service_role:\n return {\"RoleARN\": self.stack.cloudformation_service_role}\n else:\n return {}", "def parse_org_sec(self, org_section, nac):\n org_name_lst = self.find_all_items(org_section, ('org-name',))\n if len(org_name_lst) == 0:\n self._messanger.send_message(\"Can't find organisation name in organisation section\")\n else:\n nac[ORGNAME] = org_name_lst[0]\n\n org_address_lst = self.find_all_items(org_section, ('address',))\n if len(org_address_lst) == 0:\n self._messanger.send_message(\"Can't find organisation address in organisation section\")\n else:\n nac[ORGADDRESS] = org_address_lst", "def test_read_role(self):\n pass", "def rfc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n anchor = ''\n anchorindex = text.find('#')\n if anchorindex > 0:\n text, anchor = text[:anchorindex], text[anchorindex:]\n try:\n rfcnum = int(text)\n except ValueError:\n msg = inliner.reporter.error('invalid PEP number %s' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum\n sn = nodes.strong('RFC ' + text, 'RFC ' + text)\n rn = nodes.reference('', '', internal=False, refuri=ref + anchor,\n classes=[name])\n rn += sn\n return [rn], []", "def roleSummary(self, kvDict):\n\n role = Role.query.filter_by(**kvDict).all()\n\n if not role:\n print('No existing role found matching values.')\n return\n if len(role) > 1:\n print(f'{len(role)} roles found matching that criteria.')\n return\n role = role[0]\n\n # list User Permissions and sources\n permNames = role.allPermissionsRoles()[0]\n rolePerms = list(permNames)\n parentRoles = role.parents\n inheritedRoles = list(role.allPermissionsRoles()[1])\n\n if role.name in inheritedRoles:\n inheritedRoles.remove(role.name)\n\n for parentRole in parentRoles:\n if parentRole.name in inheritedRoles:\n inheritedRoles.remove(parentRole.name)\n\n for i, perm in enumerate(rolePerms):\n rolePerms[i] = Permission.query.filter_by(name=perm).first()\n for i, subRole in enumerate(inheritedRoles):\n inheritedRoles[i] = Role.query.filter_by(name=subRole).first()\n\n print(f'{role} Summary:')\n print('=========================')\n print('Inherited Role Tree:')\n print('- - - - - -')\n for parentRole in parentRoles:\n depth = 0\n print(parentRole)\n if parentRole.parents:\n self._get_subRole(parentRole.parents, depth + 1)\n if not parentRoles:\n print('No assigned roles')\n print('- - - - -')\n print('')\n\n print('Permissions / Permission Sources:')\n print('- - - - -')\n\n for perm in rolePerms:\n print(f'{perm.name}: ', end='')\n print('\\t\\t', end='')\n if perm in role.permissions:\n print(f' *Explicit,', end='')\n for parentRole in parentRoles:\n if perm in parentRole.permissions:\n print(f' {parentRole.name},', end='')\n for subParentRole in inheritedRoles:\n if perm in subParentRole.permissions:\n print(f' ({subParentRole.name}),', end='')\n print('')\n print('- - - - -')\n print('\\n')\n self._printPermissionAccessibleRoutes(permNames)\n print('\\n')\n self._printBlockedRoutes(permNames)\n print('\\n')\n self._printTemplateAccess(self._templateAccessSummary(permNames))\n print('\\n')", "def parse(self):\n nac = [None, [], None] # name, address, country\n\n self.translate_to_los()\n if self.check_simple_org_format():\n org_name = self.parse_simple_org()\n nac[ORGNAME] = org_name\n else:\n inetnum_sec = self.find_first_section(('inetnum',))\n if inetnum_sec:\n self.check_inetnum(inetnum_sec)\n else:\n raise UnknownWhoisFormat('Inetnum section was not found')\n\n #looking for address\n role_sec = self.find_first_section(('role',))\n if role_sec:\n self.parse_role(role_sec, nac)\n else:\n person_sec = self.find_first_section(('person',))\n if person_sec:\n self.parse_person(person_sec, nac)\n else:\n raise UnknownWhoisFormat('Role and Person sections were not found')\n\n return nac", "def readPersona_role(file,output):\n personas = []\n num_of_conversation = 0\n persona_tmp = []\n current_role = \"\"\n with open(file,'r') as f:\n for line in f.readlines():\n line = line.split()\n if line[0] == '1':\n num_of_conversation += 1\n current_role = \"new\"\n if line[2] == \"persona:\":\n role = line[1]\n if (role != current_role) & (current_role != \"\"):\n personas.append(\" \".join(persona_tmp) + '\\n')\n persona_tmp = []\n persona_tmp.append(\" \".join(line[3:]))\n current_role = role\n\n with open(output,'w') as op:\n op.writelines(personas)", "def get_role2(self):\r\n \r\n return self.obj_dict['role2']", "def get_role1(self):\r\n \r\n return self.obj_dict['role1']", "def test_getroles(self):\n pass", "def _roles(self, association):\n return self._assoc2roles[association]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
extracts nac info from person section
def parse_person(self, s, nac): org_name = self.find_first_item(s, ('person',)) if org_name is None: raise UnknownWhoisFormat('Can not find person in Person section') address = self.find_all_items(s, ('address',)) if len(address) == 0: raise UnknownWhoisFormat('Can not find address in Person section') country = self.find_first_item(s, ('country',)) if country is None: raise UnknownWhoisFormat('Can not find country in Person section') nac[ORGNAME] = org_name nac[ORGADDRESS] = address nac[COUNTRY] = country return nac
[ "def parse_person_sec(self, person_section, nac):\n person_name = self.find_first_item(person_section, ('person',))\n\n if person_name is None:\n self._messanger.send_message(\"Can't find name in person section\")\n else:\n nac[ORGNAME] = person_name\n\n address_lst = self.find_all_items(person_section, ('address',))\n if len(address_lst) == 0:\n self._messanger.send_message(\"Can't find address in person section\")\n else:\n nac[ORGADDRESS] = address_lst", "def parse(self):\n nac = [None, [], None] # name, address, country\n\n self.translate_to_los()\n if self.check_simple_org_format():\n org_name = self.parse_simple_org()\n nac[ORGNAME] = org_name\n else:\n inetnum_sec = self.find_first_section(('inetnum',))\n if inetnum_sec:\n self.check_inetnum(inetnum_sec)\n else:\n raise UnknownWhoisFormat('Inetnum section was not found')\n\n #looking for address\n role_sec = self.find_first_section(('role',))\n if role_sec:\n self.parse_role(role_sec, nac)\n else:\n person_sec = self.find_first_section(('person',))\n if person_sec:\n self.parse_person(person_sec, nac)\n else:\n raise UnknownWhoisFormat('Role and Person sections were not found')\n\n return nac", "def parse_org_sec(self, org_section, nac):\n org_name_lst = self.find_all_items(org_section, ('org-name',))\n if len(org_name_lst) == 0:\n self._messanger.send_message(\"Can't find organisation name in organisation section\")\n else:\n nac[ORGNAME] = org_name_lst[0]\n\n org_address_lst = self.find_all_items(org_section, ('address',))\n if len(org_address_lst) == 0:\n self._messanger.send_message(\"Can't find organisation address in organisation section\")\n else:\n nac[ORGADDRESS] = org_address_lst", "def orcid_author_get_parser(orcid):\n \n out_file = \"data/orcid_author_get.xml\"\n fout = open(out_file, \"w\")\n print(ORCID.orcid_author_get(orcid, kind=\"xml\").encode('utf-8'), file=fout)\n fout.close()\n \n tree = ET.parse(out_file)\n root_element = tree.getroot()\n ns = '{http://www.orcid.org/ns/orcid}'\n \n author = {'othernames': [], 'urls': [], 'identifiers': []}\n \n for child1 in root_element:\n if(child1.tag == ns + 'orcid-profile'):\n for child2 in child1:\n if(child2.tag == ns + 'orcid-identifier'):\n for child3 in child2:\n if(child3.tag == ns + 'path'):\n author['orcid'] = child3.text\n elif(child2.tag == ns + 'orcid-bio'):\n for child3 in child2:\n if(child3.tag == ns + 'personal-details'):\n for child4 in child3:\n if(child4.tag == ns + 'given-names'):\n author['firstname'] = child4.text\n elif(child4.tag == ns + 'family-name'):\n author['lastname'] = child4.text\n elif(child4.tag == ns + 'other-names'):\n for child5 in child4:\n if(child5.tag == ns + 'other-name'):\n author['othernames'].append(child5.text)\n elif(child3.tag == ns + 'researcher-urls'):\n for child4 in child3:\n if(child4.tag == ns + 'researcher-url'):\n for child5 in child4:\n if(child5.tag == ns + 'url'):\n author['urls'].append(child5.text)\n elif(child3.tag == ns + 'contact-details'):\n for child4 in child3:\n if(child4.tag == ns + 'email'):\n author['email'] = child4.text\n elif(child3.tag == ns + 'external-identifiers'):\n for child4 in child3:\n if(child4.tag == ns + 'external-identifier'):\n identifier = {}\n for child5 in child4:\n if(child5.tag == ns + 'external-id-common-name'):\n key = None\n if(child5.text == 'ResearcherID'):\n key = 'ResearcherID'\n elif(child5.text == 'Scopus Author ID'):\n key = 'ScopusID'\n elif(child5.tag == ns + 'external-id-reference'):\n value = child5.text\n if key is not None:\n identifier[key] = value\n author['identifiers'].append(identifier)\n \n return author", "def personparsing(page, thread_ident, profile):\n try: # Handle empty webdl failure\n soup = bs4.BeautifulSoup(page.text, 'lxml')\n except AttributeError:\n return profile\n e = profile\n\n \"\"\"VCF parsing subsection, kills early if vcf parse fails\"\"\"\n vcfregex = re.compile(r\"\\.vcf\")\n vcf_parent = soup.find_all('a', {'class': 'link download'}, href=True)\n for potential_link in vcf_parent:\n pot_link = potential_link['href']\n if vcfregex.findall(pot_link):\n e['VCard'] = pot_link.replace('.', '', 2)\n else:\n e['Bio'] = pot_link.replace('.', '', 2)\n try:\n vcf_link = e['VCard']\n to_add = vcfmuncher(vcf_link, thread_ident, e['Full Name'])\n if not to_add:\n print('[Error-{} vcfmuncher]: VCF could not be downloaded/parsed'.format(thread_ident))\n return profile\n else:\n e.update(to_add)\n except KeyError:\n print('[Error-{} personparser]: VCF element could not be located'.format(thread_ident))\n return profile\n\n # \"\"\"Page parsing subsection, expand/comment out as needed\"\"\"\n # def pythonicparser(title, bs4):\n # spec_parent = soup.find(bs4)\n # if spec_parent:\n # spec_el = spec_parent.find_all('li')\n # combined_spec = ''\n # for el in spec_el:\n # if el.get_text:\n # spec = el.get_text()\n # combined_spec += spec + ', '\n # e[str(title)] = combined_spec\n #\n # pythonicparser('Specialities', \"'div', {'id': MasterPage_ctl00_ContentPlaceHolder1_divAreasOfSpecialization\")\n #\n # experience_parents = soup.find_all('span', {'style': 'font-size: 8pt; font-weight: bold;'})\n # for el in experience_parents:\n # if el.get_text() == 'Years of Experience':\n # outer_el = el.parent\n # exp = outer_el.text.replace('Years of Experience', '')\n # e['Experience'] = exp.strip()\n # else:\n # continue\n\n return e", "def parse_positions_person(line):\n dictionary = dict()\n dictionary[\"RECTYPE\"] = line[0:1]\n dictionary[\"YEAR\"] = line[1:5]\n dictionary[\"DATANUM\"] = line[5:7]\n dictionary[\"SERIAL\"] = line[7:15]\n dictionary[\"PERNUM\"] = line[15:19]\n dictionary[\"PERWT\"] = line[19:29]\n dictionary[\"SLWT\"] = line[29:39]\n dictionary[\"SLREC\"] = line[39:40]\n dictionary[\"RESPONDT\"] = line[40:41]\n dictionary[\"FAMUNIT\"] = line[41:43]\n dictionary[\"FAMSIZE\"] = line[43:45]\n dictionary[\"SUBFAM\"] = line[45:46]\n dictionary[\"SFTYPE\"] = line[46:47]\n dictionary[\"SFRELATE\"] = line[47:48]\n dictionary[\"MOMLOC\"] = line[48:50]\n dictionary[\"STEPMOM\"] = line[50:51]\n dictionary[\"MOMRULE_HIST\"] = line[51:52]\n dictionary[\"POPLOC\"] = line[52:54]\n dictionary[\"STEPPOP\"] = line[54:55]\n dictionary[\"POPRULE_HIST\"] = line[55:56]\n dictionary[\"SPLOC\"] = line[56:58]\n dictionary[\"SPRULE_HIST\"] = line[58:59]\n dictionary[\"NCHILD\"] = line[59:60]\n dictionary[\"NCHLT5\"] = line[60:61]\n dictionary[\"NSIBS\"] = line[61:62]\n dictionary[\"ELDCH\"] = line[62:64]\n dictionary[\"YNGCH\"] = line[64:66]\n dictionary[\"RELATE\"] = line[66:68]\n dictionary[\"RELATED\"] = line[68:72]\n dictionary[\"SEX\"] = line[72:73]\n dictionary[\"AGE\"] = line[73:76]\n dictionary[\"AGEMONTH\"] = line[76:78]\n dictionary[\"MARST\"] = line[78:79]\n dictionary[\"MARRNO\"] = line[79:80]\n dictionary[\"AGEMARR\"] = line[80:82]\n dictionary[\"CHBORN\"] = line[82:84]\n dictionary[\"RACE\"] = line[84:85]\n dictionary[\"RACED\"] = line[85:88]\n dictionary[\"HISPAN\"] = line[88:89]\n dictionary[\"HISPAND\"] = line[89:92]\n dictionary[\"BPL\"] = line[92:95]\n dictionary[\"BPLD\"] = line[95:100]\n dictionary[\"MBPL\"] = line[100:103]\n dictionary[\"MBPLD\"] = line[103:108]\n dictionary[\"FBPL\"] = line[108:111]\n dictionary[\"FBPLD\"] = line[111:116]\n dictionary[\"NATIVITY\"] = line[116:117]\n dictionary[\"CITIZEN\"] = line[117:118]\n dictionary[\"MTONGUE\"] = line[118:120]\n dictionary[\"MTONGUED\"] = line[120:124]\n dictionary[\"SPANNAME\"] = line[124:125]\n dictionary[\"HISPRULE\"] = line[125:126]\n dictionary[\"SCHOOL\"] = line[126:127]\n dictionary[\"HIGRADE\"] = line[127:129]\n dictionary[\"HIGRADED\"] = line[129:132]\n dictionary[\"EDUC\"] = line[132:134]\n dictionary[\"EDUCD\"] = line[134:137]\n dictionary[\"EMPSTAT\"] = line[137:138]\n dictionary[\"EMPSTATD\"] = line[138:140]\n dictionary[\"LABFORCE\"] = line[140:141]\n dictionary[\"OCC\"] = line[141:145]\n dictionary[\"OCC1950\"] = line[145:148]\n dictionary[\"IND\"] = line[148:152]\n dictionary[\"IND1950\"] = line[152:155]\n dictionary[\"CLASSWKR\"] = line[155:156]\n dictionary[\"CLASSWKRD\"] = line[156:158]\n dictionary[\"WKSWORK1\"] = line[158:160]\n dictionary[\"WKSWORK2\"] = line[160:161]\n dictionary[\"HRSWORK1\"] = line[161:163]\n dictionary[\"HRSWORK2\"] = line[163:164]\n dictionary[\"DURUNEMP\"] = line[164:167]\n dictionary[\"UOCC\"] = line[167:170]\n dictionary[\"UOCC95\"] = line[170:173]\n dictionary[\"UIND\"] = line[173:176]\n dictionary[\"UCLASSWK\"] = line[176:177]\n dictionary[\"INCWAGE\"] = line[177:183]\n dictionary[\"INCNONWG\"] = line[183:184]\n dictionary[\"OCCSCORE\"] = line[184:186]\n dictionary[\"SEI\"] = line[186:188]\n dictionary[\"PRESGL\"] = line[188:191]\n dictionary[\"ERSCOR50\"] = line[191:195]\n dictionary[\"EDSCOR50\"] = line[195:199]\n dictionary[\"NPBOSS50\"] = line[199:203]\n dictionary[\"MIGRATE5\"] = line[203:204]\n dictionary[\"MIGRATE5D\"] = line[204:206]\n dictionary[\"MIGPLAC5\"] = line[206:209]\n dictionary[\"MIGMET5\"] = line[209:213]\n dictionary[\"MIGTYPE5\"] = line[213:214]\n dictionary[\"MIGCITY5\"] = line[214:218]\n dictionary[\"MIGSEA5\"] = line[218:221]\n dictionary[\"SAMEPLAC\"] = line[221:222]\n dictionary[\"SAMESEA5\"] = line[222:223]\n dictionary[\"MIGCOUNTY\"] = line[223:227]\n dictionary[\"VETSTAT\"] = line[227:228]\n dictionary[\"VETSTATD\"] = line[228:230]\n dictionary[\"VET1940\"] = line[230:231]\n dictionary[\"VETWWI\"] = line[231:232]\n dictionary[\"VETPER\"] = line[232:233]\n dictionary[\"VETCHILD\"] = line[233:234]\n dictionary[\"HISTID\"] = line[234:270]\n dictionary[\"SURSIM\"] = line[270:272]\n dictionary[\"SSENROLL\"] = line[272:273]\n return dictionary", "def parse_personal(mdb):\n # Parse Id\n try:\n personalid = mdb.find(\"id\").get_text()\n except Exception:\n personalid = None\n # Parse First Name\n try:\n firstname = mdb.find(\"vorname\").get_text()\n except Exception:\n firstname = None\n # Parse Last Name\n try:\n lastname = mdb.find(\"nachname\").get_text()\n except Exception:\n lastname = None\n # Parse Academic Title\n try:\n acad = mdb.find(\"akad_titel\").get_text()\n if acad == \"\":\n acad = None\n except Exception:\n acad = None\n # Parse Year of Birth\n try:\n birthyear = mdb.find(\"geburtsdatum\").get_text()\n except Exception:\n birthyear = None\n # Parse Place of Birth\n try:\n birthplace = mdb.find(\"geburtsort\").get_text()\n except Exception:\n birthplace = None\n # Parse Year of Death\n try:\n death = mdb.find(\"sterbedatum\").get_text()\n if death == \"\":\n death = None\n except Exception:\n death = None\n # Parse Gender\n try:\n gender = mdb.find(\"geschlecht\").get_text()\n except Exception:\n gender = None\n # Parse Party\n try:\n party = mdb.find(\"partei_kurz\").get_text()\n except Exception:\n party = None\n # Parse Occupation\n try:\n occupation = mdb.find(\"beruf\").get_text()\n if occupation == \"\":\n occupation = None\n else:\n occupation = occupation.split(\", \")\n occupation = \";\".join(occupation)\n except Exception:\n occupation = None\n # Parse Parliamentary Periods\n try:\n period = mdb.find_all(\"wp\")\n period = [x.get_text() for x in period]\n period = \";\".join(period)\n except Exception:\n period = None\n # Parse Vita\n try:\n vita = mdb.find(\"vita_kurz\").get_text()\n except Exception:\n vita = None\n \n # Collect to Dict\n personal_dict = {\n \"ID\" : personalid,\n \"FirstName\" : firstname,\n \"LastName\" : lastname,\n \"Name\" : \" \".join([firstname, lastname]),\n \"AcademicTitle\" : acad,\n \"BirthYear\" : birthyear,\n \"BirthPlace\" : birthplace,\n \"DeathYear\" : death,\n \"Gender\" : gender,\n \"Party\" : party,\n \"Occupation\" : occupation,\n \"Period\" : period,\n \"Vita\" : vita\n }\n \n return(personal_dict)", "def convert_citation_text_lines_to_info(text):\n lines = text.strip().split(\"\\n\")\n info = {\n \"_citation_id\": lines[0].strip(),\n }\n found = False\n other = []\n\n if lines[-1].strip().startswith(\">\") and len(lines) >= 2:\n # [N] > varname\n info[\"_pyref\"] = lines[-1][1:].strip()\n info[\"_work_type\"] = \"Ref\"\n found = True\n other = lines[1:-1]\n elif lines[-1].strip().startswith(\"http\") and len(lines) >= 3:\n # [N] WebName http://...\n info[\"title\"] = lines[1].strip()\n info[\"url\"] = lines[-1].strip()\n info[\"_work_type\"] = \"Site\"\n found = True\n other = lines[2:-1]\n elif len(lines) >= 5 and lines[-1].strip().isnumeric():\n # [N] author name place other year\n info[\"author\"] = lines[1].strip()\n info[\"title\"] = lines[2].strip()\n split = lines[3].strip().split(\"=\")\n if len(split) > 1:\n info[split[0]] = \"=\".join(split[1:])\n else:\n info[\"booktitle\"] = lines[3].strip()\n info[\"year\"] = int(lines[-1].strip())\n info[\"_work_type\"] = \"Work\"\n found = True\n other = lines[4:-1]\n if found:\n for num, line in zip(range(1, 10000), other):\n line = line.strip()\n split = line.split(\"=\")\n if len(split) > 1:\n info[split[0]] = \"=\".join(split[1:])\n else:\n info[\"_other{}\".format(num)] = line\n return info\n \n return \"Incomplete\"", "def extract_prior_acc_conference_data(self, debug):\r\n year = accolade = first_name = last_name = college = None\r\n with open('../conference_data/ACC.txt', 'r') as f:\r\n for line in f:\r\n if self.has_date(line):\r\n year = line.strip()\r\n elif 'Team' in line:\r\n accolade = line.strip()\r\n else:\r\n cleaned_line = re.sub('\\\\.{2,}', ' ', line).strip()\r\n print(cleaned_line)\r\n first_name = cleaned_line.split()[1].strip()\r\n last_name = ' '.join(cleaned_line.split()[2:-1]).strip()\r\n college = cleaned_line.split()[-1].strip()\r\n college = self.__convert_to_full_college_name(college)\r\n\r\n if debug:\r\n self.__print_conference_data_debug_message([year, first_name, last_name, accolade, college],\r\n [year, first_name, last_name, accolade, college])\r\n self.__append_conference_datum(year, first_name, last_name, accolade, college)", "def ner_transform_person_tag(ner):\n\n if ner == 'B-per-giv':\n return 'B-PERSON'\n elif ner == 'B-per-mid':\n return 'B-PERSON'\n elif ner == 'B-per-nam':\n return 'I-PERSON'\n elif ner == 'B-per-fam':\n return 'I-PERSON'\n elif ner == 'I-per-fam':\n return 'I-PERSON'\n elif ner == 'B-per-ini':\n return 'I-PERSON'\n return ner", "def _process_person_associations(self):\n\n if 'personAssociations' not in self.item:\n return\n \n self.data['contributors'] = []\n\n file_data = file_read_lines('user_ids_match')\n\n for item in self.item['personAssociations']:\n\n self.sub_data = {}\n self._get_contributor_name(item)\n\n self._add_subdata(item, 'uuid', ['person', 'uuid'])\n self._add_subdata(item, 'externalId', ['person', 'externalId'])\n self._add_subdata(item, 'authorCollaboratorName', ['authorCollaboration', 'names', 0, 'value']) \n self._add_subdata(item, 'personRole', ['personRoles', 0, 'value']) \n self._add_subdata(item, 'organisationalUnit', ['organisationalUnits', 0, 'names', 0, 'value'])\n self._add_subdata(item, 'type_p', ['externalPerson', 'types', 0, 'value'])\n self._add_subdata(item, 'uuid', ['externalPerson', 'uuid'])\n \n # Checks if the record owner is available in user_ids_match.txt\n person_external_id = get_value(item, ['person', 'externalId'])\n owner = self.general_functions.get_userid_from_list_by_externalid(person_external_id, file_data)\n \n if owner and int(owner) not in self.data['owners']:\n self.data['owners'].append(int(owner))\n\n # ORCID\n self._process_contributor_orcid()\n\n self.data['contributors'].append(self.sub_data)", "def parse(self):\n nac = [None, [], None] # name, address, country\n\n self.translate_to_los()\n\n if self.check_simple_org_format():\n org_name = self.parse_arin_simple_org()\n nac[ORGNAME] = org_name\n else:\n ref_ser = self.find_referral_server()\n if ref_ser:\n server_name, port_number = ref_ser\n # raw_whois = self.receive_raw_whois(ip_address, server_name, port_number)\n whois_parser = self._manager.create_parser(self._ip_address, server_name, port_number)\n whois_parser.receive_raw_whois()\n nac = whois_parser.parse()\n else:\n self.parse_arin_org(nac)\n return nac", "def _parse_author_details(self, bz_issue, at_name_bz):\n if 'email' in bz_issue[at_name_bz]:\n return self._get_people(bz_issue[at_name_bz]['name'], bz_issue[at_name_bz]['email'],\n bz_issue[at_name_bz]['real_name'])\n else:\n return self._get_people(bz_issue[at_name_bz]['name'])", "def extract_prior_stanford_conference_data(self, debug):\r\n with open('../conference_data/Stanford.txt', 'r') as f:\r\n for line in f:\r\n college = 'Stanford University'\r\n year = line.split()[0].strip()\r\n first_name = line.split()[1].strip()\r\n last_name = line.split()[2].strip()\r\n accolade = ' '.join(line.split()[-2:]).strip()\r\n if debug:\r\n self.__print_conference_data_debug_message([year, first_name, last_name, accolade, college],\r\n [year, first_name, last_name, accolade, college])\r\n\r\n self.__append_conference_datum(year, first_name, last_name, accolade, college)", "def _getAuthorInfo(self):\n author_page={}\n try:\n date_str = stripHtml(self.soup.find('div','fieldset').renderContents())\n date_str = date_str[date_str.find(':')+1:].strip()\n date_str = re.sub(\"(\\d+)(st|nd|rd|th)\",r'\\1',date_str)\n author_page [ 'edate_author_member_since' ] = datetime.strftime(datetime.\\\n strptime(date_str,\"%b %d, %Y\"), \"%Y-%m-%dT%H:%M:%SZ\")\n except:\n log.info(self.log_msg( 'join date not found' ) )\n try:\n post_str =stripHtml(self.soup.find('fieldset','fieldset').findNext\\\n ('td').renderContents())\n post_match= re.match('Total Posts:\\s*(\\d+)\\s*\\((.*?)\\s*posts per day\\)'\\\n ,post_str)\n if post_match:\n author_page[ 'ei_author_reviews_count' ] = post_match.group(1)\n author_page[ 'ef_author_reviews_per_day'] = float( post_match.group(2) )\n else:\n log.info(self.log_msg( 'no match is found for total post') )\n except:\n log.exception(self.log_msg( 'post info is not found') )\n\n add_info = { 'et_author_location': 'Location',\n 'et_author_dob':'Date of Birth',\n 'et_author_interests': 'Interests',\n 'et_author_occupation': 'Occupation',\n 'et_author_favorite_quote':'Favorite Quote',\n 'et_author_guilty_pleasure':'Guilty Pleasure',\n 'et_author_favorite_music':'Favorite Music',\n 'et_author_favorite_books':'Favorite Books',\n 'et_author_favorite_television_programs':'Favorite Television Programs',\n 'et_author_gender':'Gender',\n 'et_author_about' :'About Me',\n 'et_author_favorite_movies':'Favorite Movies'\n }\n for each in add_info.keys():\n try:\n temp_str = stripHtml(self.soup.find('strong',text = add_info\\\n [each]).findPrevious('td').renderContents())\n author_page [ each ] = temp_str[temp_str.find(':')+1:].strip()\n except:\n log.info('info not found for %s'%each)\n try:\n temp_str = stripHtml( self.soup.find('td',text='Contact Info').parent.\\\n findNext('td','panelsurround').findNext('td','panelsurround')\\\n .find('td').renderContents() )\n author_page [ 'et_author_homepage' ] = temp_str[temp_str.rfind(':')+1:].strip()\n except:\n log.info(self.log_msg('Author Home page is not found'))\n try:\n author_page [ 'et_author_instant_messaging' ] = stripHtml( self.soup\\\n .find('legend',text='Instant Messaging').findNext('tr')\\\n .renderContents() )\n except:\n log.info( self.log_msg( 'Author instant message is not found' ) )\n return author_page", "def extract_lncrna_only(input_file, output_file):\n\n ids = []\n for entry in entries:\n type = re.findall(\"^ENSG\\d+\\.\\d+:(.+)\", entry[3])\n # if the type exists\n if len(type) != 0:\n splits = type[0].split(\",\")\n # and if there is only 1 entry\n if len(splits) == 1:\n # and that entry is lncRNA\n if splits[0] == \"lncRNA\":\n ids.append(entry[1])\n with open(output_file, \"w\") as outfile:\n outfile.write(\"{0}\\n\".format(\"\\t\".join(sorted(ids))))", "def convert_to_person_line_delimited(person):\n person_fields = ['RECTYPE', 'YEAR', 'DATANUM', 'SERIAL', 'PERNUM', 'PERWT',\n 'SLWT', 'SLREC', 'RESPONDT', 'FAMUNIT', 'FAMSIZE', 'SUBFAM',\n 'SFTYPE', 'SFRELATE', 'MOMLOC', 'STEPMOM', 'MOMRULE_HIST',\n 'POPLOC', 'STEPPOP', 'POPRULE_HIST', 'SPLOC', 'SPRULE_HIST',\n 'NCHILD', 'NCHLT5', 'NSIBS', 'ELDCH', 'YNGCH', 'RELATE',\n 'RELATED', 'SEX', 'AGE', 'AGEMONTH', 'MARST', 'MARRNO',\n 'AGEMARR', 'CHBORN', 'RACE', 'RACED', 'HISPAN', 'HISPAND',\n 'BPL', 'BPLD', 'MBPL', 'MBPLD', 'FBPL', 'FBPLD', 'NATIVITY',\n 'CITIZEN', 'MTONGUE', 'MTONGUED', 'SPANNAME', 'HISPRULE',\n 'SCHOOL', 'HIGRADE', 'HIGRADED', 'EDUC', 'EDUCD', 'EMPSTAT',\n 'EMPSTATD', 'LABFORCE', 'OCC', 'OCC1950', 'IND', 'IND1950',\n 'CLASSWKR', 'CLASSWKRD', 'WKSWORK1', 'WKSWORK2', 'HRSWORK1',\n 'HRSWORK2', 'DURUNEMP', 'UOCC', 'UOCC95', 'UIND', 'UCLASSWK',\n 'INCWAGE', 'INCNONWG', 'OCCSCORE', 'SEI', 'PRESGL', 'ERSCOR50',\n 'EDSCOR50', 'NPBOSS50', 'MIGRATE5', 'MIGRATE5D', 'MIGPLAC5',\n 'MIGMET5', 'MIGTYPE5', 'MIGCITY5', 'MIGSEA5', 'SAMEPLAC',\n 'SAMESEA5', 'MIGCOUNTY', 'VETSTAT', 'VETSTATD', 'VET1940',\n 'VETWWI', 'VETPER', 'VETCHILD', 'HISTID', 'SURSIM', 'SSENROLL']\n\n line_list = []\n for field in person_fields:\n line_list.append(person[field])\n\n # append a new line at the end\n # line_list.append(\"\\n\")\n\n line = '|'.join(line_list)\n line = line + \"\\n\"\n return line", "def get_isni_bio(existing, author):\n auth_isni = re.sub(r\"\\D\", \"\", str(author.isni))\n if len(existing) == 0:\n return \"\"\n for value in existing:\n if hasattr(value, \"bio\") and auth_isni == re.sub(r\"\\D\", \"\", str(value.isni)):\n return mark_safe(f\"Author of <em>{value.bio}</em>\")\n\n return \"\"", "def parse_HTML_to_person(mid, html):\n page = bs(html, 'html.parser')\n person = {}\n person.update({\"mid\": mid})\n label = \"\"\n data = \"\"\n divs = page.find_all(\"div\")\n for div in divs:\n class_attr = div.get('class')\n if class_attr is not None:\n if class_attr[0] == \"imod-profile-field-label\":\n label = div.string\n elif class_attr[0] == \"imod-profile-field-data\":\n data = div.string\n person.update({label: data})\n label = \"\"\n data = \"\"\n return person" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that indicates whether log has any traces of warnings.
def FoundWarnings(self): return len(self.WarningLines()) > 0
[ "def has_warnings(self):\n for item in self._content:\n attrs = item[1]\n if attrs.get('warning',False):\n return True\n return False", "def HasExtractionWarnings(self):\n return self._store.HasExtractionWarnings()", "def can_log(self):\n return # boolean", "def should_save_traces():\n return _save_options_context.save_traces", "def indicate_warn():\n print_right('[' + c.yellow('WARN') + ']', 7)", "def has_tracebackhide(self) -> bool:\n return self.get_attribute_value(ATTRIBUTE_MARKER_TRACEBACKHIDE) == \"1\"", "def should_log_traceback(e):\n # TODO: Change to subclass and check instance variable flag\n return e.__class__.__name__ not in (\"UserDoesNotExist\", \"NoSeasonDataError\")", "def contains_warning(result: SerializedTestResult, warnings: FrozenSet[str]) -> bool:\n return any(_warn(check, warnings) for check in result.checks)", "def can_lookup_logs(self):\n return # boolean", "def can_log(self):\n if self._header:\n return self.header & self.CAN_LOG_MASK == self.CAN_LOG_MASK", "def deprecation_errors(self) -> Optional[bool]:\n return self._deprecation_errors", "def _warn(self, message):\n if self.print_warnings:\n print message", "def _filter_stderr_messages(self, record):\n if self.logging_options['stderr_on_errors'] and self.errors_raised: return True\n return False", "def activate_warnings(cls):\n cls.warnings = True\n # lib_config.warnings = True", "def test_diagnostics_disabled(coresys):\n coresys.config.diagnostics = False\n assert filter_data(coresys, SAMPLE_EVENT, {}) is None", "def __bool__(self) :\n\t\treturn bool(self.log)", "def warnings(self):\n # type: () -> List[str]\n return self._get_property('warnings')", "def properties_logging(self):\n return any(prop.activate_tracing for prop in self.all_properties)", "def show_warnings(self):\n\n for warning in self.__warnings:\n print('>', warning)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns unique list of compiler and linker warning lines.
def WarningLines(self): if self.__lines_with_warnings is None: warnings = set() for line in self.log_content.splitlines(): match = IbOutputParser.WARNING_LINE_MATCHER.match(line) if match: warnings.add(line) self.__lines_with_warnings = list(warnings) return self.__lines_with_warnings
[ "def _import_warnings(self):\n warnings = (\n r\"Warning: BMDL computation is at best imprecise for these data\",\n r\"THE MODEL HAS PROBABLY NOT CONVERGED!!!\",\n \"THIS USUALLY MEANS THE MODEL HAS NOT CONVERGED!\",\n r\"BMR value is not in the range of the mean function\",\n r\"BMD = 100\\*\\(maximum dose\\)\",\n r\"BMDL computation failed\\.\",\n \"Warning: optimum may not have been found. Bad completion code in Optimization routine.\",\n \"Warning: Likelihood for fitted model larger than the Likelihood for model A3.\",\n )\n self.output[\"warnings\"] = []\n for warning in warnings:\n m = re.search(warning, self.output_text)\n if m:\n self.output[\"warnings\"].append(m.group())", "def get_all_warnings(self):\n if self.state == Check.State.NOT_RUN:\n return []\n dep_warns = [set(dependency.get_all_warnings()) for dependency in self._dependencies]\n return list(set.union(set(self._warnings), *dep_warns))", "def warnings(self):\n # type: () -> List[str]\n return self._get_property('warnings')", "def diagnostics(self):\n return []", "def ProcessGlobalSuppresions(lines):\n for line in lines:\n if _SEARCH_C_FILE.search(line):\n for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:\n _global_error_suppressions[category] = True", "def print_warnings(self):\n for warn in self.warning:\n print(warn)", "def unused_import_line_numbers(\n messages: Iterable[pyflakes.messages.Message],\n) -> Iterator[int]:\n for message in messages:\n if isinstance(message, pyflakes.messages.UnusedImport):\n yield message.lineno", "def get_warnings(self):\r\n warnings = ctypes.c_int32()\r\n self.check(self._dll.PH_GetWarnings(self._deviceID, ctypes.byref(warnings)))\r\n return warnings.value", "def generate_warnings_for_unused(self) -> List[SQLBaseError]:\n return [\n SQLUnusedNoQaWarning(\n line_no=ignore.line_no,\n line_pos=ignore.line_pos,\n description=f\"Unused noqa: {ignore.raw_str!r}\",\n )\n for ignore in self._ignore_list\n if not ignore.used\n ]", "def FoundWarnings(self):\n return len(self.WarningLines()) > 0", "def unused_variable_line_numbers(\n messages: Iterable[pyflakes.messages.Message],\n) -> Iterator[int]:\n for message in messages:\n if isinstance(message, pyflakes.messages.UnusedVariable):\n yield message.lineno", "def show_warnings(self):\n\n for warning in self.__warnings:\n print('>', warning)", "def get_warnings(self):\n if self.scheduler:\n return self.scheduler.get_workplace_warnings ( )\n else:\n raise Exception('Razpored ne obstaja')", "def collect_warning_messages_from_report(html_file):\n with open(html_file, \"r\") as htmlf:\n soup = BeautifulSoup(htmlf.read(), \"html.parser\")\n\n warnings_text = []\n for div in soup.findAll(\"div\", {\"class\": \"output_stderr\"}):\n # we collect the text in the <pre> tags after the standard error,\n # and split the lines; we only keep the lines that contain 'Warning:'\n for pre in div.findAll(\"pre\"):\n warnings_msgs = pre.text.splitlines()\n warnings_msgs = [msg for msg in warnings_msgs if \"warning\" in msg]\n warnings_text.extend(warnings_msgs)\n\n return warnings_text", "def phenix_separate_output(cmd_result):\n try:\n from phenix_regression.command_line import find_errors_and_warnings\n except ImportError:\n return []\n out = cmd_result.stdout_lines\n err = cmd_result.stderr_lines\n bad_lines = []\n for i, line in enumerate(out+err):\n if find_errors_and_warnings.is_error_or_warning(line, line.lower()):\n bad_lines.append(line)\n return bad_lines", "def full_library_check():\n ret = {}\n ret[_ERRORS] = []\n ret[_WARNINGS] = []\n d = check_library_unlock_dates()\n for e in d[_ERRORS]:\n ret[_ERRORS].append(str(e))\n for w in d[_WARNINGS]:\n ret[_WARNINGS].append(str(w))\n for w in check_library_urls():\n ret[_WARNINGS].append(str(w))\n for e in action_dependency.check_unreachable_library_actions():\n ret[_ERRORS].append(str(e))\n for w in action_dependency.check_false_unlock_library_actions():\n ret[_WARNINGS].append(str(w))\n return ret", "def activate_warnings(cls):\n cls.warnings = True\n # lib_config.warnings = True", "def non_comment_lines(self):\n return [_ for _ in self.stripped_whole_lines() if not _.startswith(\"#\")]", "def Warnings(doc):\n\twarnings = doc.GetWarnings()\n\twarningMessages =[w.GetDescriptionText() for w in warnings]\n\tcountOfWarnings = CountFrequency(warningMessages)\n\tallwarnings = [{\"Description\": key, \"Count\": value} for key, value in countOfWarnings.items()]\n\tif len(allwarnings) < 1:\n\t\tallwarnings.append({\"Description\": \"\", \"Count\": 0} )\n\treturn allwarnings" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts text IB compilation log and creates HTML log with nice error links.
def ErrorLogHtml(self): content = [] self._ResetState() for line in self.log_content.splitlines(): content.append('%s\n' % self._ProcessLine(line)) return self._HtmlHeader() + ''.join(content) + '</body></html>'
[ "def compile_log_result(self):\n file = self.source_file[:-4] + \".log\"\n log.info(\"[COMPILE LOG] log file is here: {}\".format(file))\n \n log_line = 'Warning: No build log. Check console for errors.'\n\n try:\n with open(file, encoding=\"utf16\", mode=\"r\") as f:\n for line in f:\n if len(line) > 2:\n log_line = \"[COMPILE LOG] \" + re.sub(r\"\\n|\\r\", \"\", line)\n log.info(log_line)\n\n except OSError:\n log.warning(\"Log file not found.\")\n\n # Write last log line to status\n self.window.active_view().set_status('mql_comp_build_log', log_line)", "def get_error_log(self):\n\n response = self._connection.execute(\n 'GET',\n self._get_link('log_link'),\n accept='application/abapgit.adt.repo.object.v2+xml'\n )\n\n log = ElementTree.fromstring(response.text)\n\n def text(element, key):\n return element.find(f'{ABAPGIT_OBJECT_XML_SPACE}{key}').text\n\n return '\\n'.join(\n f'{text(obj, \"msgType\")}: {text(obj, \"type\")} {text(obj, \"name\")}: {text(obj, \"msgText\")}'\n for obj in log.findall(f'{ABAPGIT_OBJECT_XML_SPACE}abapObject')\n if text(obj, 'msgType') != 'S')", "def write_text_browser_error(self, error):\n \n self.textbrowser.append(\"<font color=red>>> ERROR:</font> <font size=4 color=red> %s</font>\" % error)", "def render_report_results(course, import_log):\n\n course_key = u'{}'.format(\n course.location.course_key.to_deprecated_string()\n )\n\n jinja_environment = Environment()\n with open(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n 'templates',\n 'report.j2'\n )\n ) as template_file:\n template = jinja_environment.from_string(template_file.read())\n UTF8Writer = codecs.getwriter('utf8')\n sys.stdout = UTF8Writer(sys.stdout)\n print(template.render({'course': course, 'course_key': course_key}))\n\n print('\\033[0;33mPossible issues in course:\\033[0m')\n print('==========================')\n for line in import_log.split('\\n'):\n split_msg = line.split('|')\n if split_msg[0] in ('WARNING', 'ERROR', 'CRITICAL'):\n print(split_msg[1])", "def build_log(self):\r\n\r\n encoded_lines = [line.encode('utf-8') for line in self.log_lines]\r\n return \"\\n\".join(encoded_lines)", "async def error_to_text(req: Request, res: Response, exc: HTTPError):\n res.status_code = exc.status_code\n text = exc.title\n if exc.detail:\n text += f\"\\n{exc.detail}\"\n res.text = text", "def log_ocr_error(error):\n print_error(error)", "async def error_to_html(req: Request, res: Response, exc: HTTPError):\n res.status_code = exc.status_code\n html = f\"<h1>{exc.title}</h1>\"\n if exc.detail:\n html += f\"\\n<p>{exc.detail}</p>\"\n res.html = html", "def html2text(self, text):\n try:\n import html2text\n print(html2text.html2text(text))\n except ImportError:\n print(text)", "def _parse_text(self, message):\n message_split = message.split('\\n')\n title = message_split[2]\n log_url = message_split[-1]\n text = message_split[3]\n header = message_split[0].split(' - ')\n header_formatted = '{} - [Watchdog Log]({})'.format(header[0], header[1])\n text_formatted = '{}: ***{}***'.format(text.split(':', 1)[0], text.split(':', 1)[1])\n\n return title, log_url, '{}\\n\\n{}'.format(header_formatted, text_formatted)", "def _parse_html_error(content):\n\n msg = None\n if not content:\n return msg\n\n from xml.dom.minidom import Document, parse\n dom = parse(cStringIO.StringIO(content))\n msg = \"\"\n\n paragraphs = []\n if not isinstance(dom, Document):\n # Assume the output was the message.\n msg = content\n else:\n paragraphs = dom.getElementsByTagName(\"p\")\n\n # XXX this is specific to the depot server's current\n # error output style.\n for p in paragraphs:\n for c in p.childNodes:\n if c.nodeType == c.TEXT_NODE:\n value = c.nodeValue\n if value is not None:\n msg += (\"\\n{0}\".format(value))\n\n return msg", "def get_error_body(self):\n return (\n '<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\\n'\n '<title>%(code)s</title>\\n'\n '<h1>%(name)s</h1>\\n'\n ) % {\n 'code': self.code,\n 'name': self.name\n }", "def set_text(self, edit_token, project=None, text=None):\n # this will process the errors, even if the view is closed\n if project is not None:\n if not project.errors.failure:\n self.text = self.create_message()[1] + \\\n project.errors.text\n self.line_to_pos = project.errors.line_to_pos\n self.line_to_file = project.errors.line_to_file\n else:\n self.text = \"\\n\\n\\n%s\" % project.errors.failure\n self.line_to_pos = {}\n self.line_to_file = {}\n elif text is not None:\n self.text = text\n self.line_to_pos = {}\n self.line_to_file = {}\n super(Error, self).set_text(edit_token, self.text)", "def error(self, text):\n self.message('ERROR', text, color='red')", "def RenderChangelogHTML(self, tweak_data):\n element = \"\"\n try:\n for version in tweak_data['changelog'][::-1]:\n element += DepictionGenerator.ChangelogEntry(self, version['version'], version['changes'])\n return element\n except Exception:\n return \"This package has no changelog.\"", "def add_log(self, text):\n if type(text) is list:\n for each in text:\n print(f'LOG: {each}')\n self.info_win.insert(tk.END, f'$ {each}\\n')\n else:\n print(f'LOG: {text}')\n self.info_win.insert(tk.END, f'$ {text}\\n')", "def log_error(page,disp_only=False):\n\ttry:\n\t\tif not disp_only:\n\t\t\tprint 'Logging error on ' + page.title()\n\t\t\ttry:\n\t\t\t\tf1 = open(logfile, 'r')\n\t\t\t\told = f1.read()\n\t\t\t\tf1.close()\n\t\t\texcept IOError:\n\t\t\t\told = \"\"\n\t\t\tnew = old + '\\n' + page.title(asLink=True)\n\t\t\tf2 = open(logfile, 'w')\n\t\t\tf2.write(new)\n\t\t\tf2.close()\n\t\telse:\n\t\t\ttry:\n\t\t\t\tf1 = open(logfile, 'r')\n\t\t\t\ttxt = f1.read()\n\t\t\t\tf1.close()\n\t\t\texcept IOError:\n\t\t\t\ttxt = \"Log is non-existent.\"\t\n\t\t\tprint txt\n\texcept UnicodeEncodeError:\n\t\treturn #fail silently\n\texcept UnicodeDecodeError:\n\t\treturn #fail silently", "def formatAsLink(self, text, url):\n for c in self.badChars:\n text = text.replace(c, '')\n return '[' + text + '](' + url + ')'", "def processPhoenixSubmitLog(itr):\n # Consume the iterator\n for line in itr:\n line = line.strip()\n log.info(line)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a hash with project names that appeared as failed in the log with number of errors for that project.
def FailedProjects(self): if self.__failed_projects is None: self.__failed_projects = {} for line in self.log_content.splitlines(): match = IbOutputParser.ERROR_MATCHER.match(line) if match and int(match.group(2)) > 0: self.__failed_projects[match.group(1)] = int(match.group(2)) return self.__failed_projects
[ "def get_errors(self, queue_id):\n try:\n errorlog = self._get_stderr_path(queue_id)\n except ValueError, e:\n errors = str(e)\n else:\n if os.path.exists(errorlog):\n err_f = open(errorlog, 'r')\n errors = err_f.read()\n err_f.close()\n \n\terrors += \"\\nReturned exit_status %d\"%self._check_job_return_status(queue_id) \n\n return errors", "def check_projects(config: Configuration, username: str) -> Dict[str, Any]:\n gigantum_root = config.app_workdir\n project_paths = glob.glob(f'{gigantum_root}/{username}/*/labbooks/*')\n inventory = InventoryManager()\n t0 = time.time()\n errors: Dict[str, Any] = {'errors': {}}\n for project_path in project_paths:\n try:\n # Try to load the labbook, and it's important fields.\n labbook = inventory.load_labbook_from_directory(project_path)\n _ = labbook.creation_date, labbook.modified_on, labbook.data\n except Exception as e:\n logger.error(e)\n errors['errors'][project_path.replace(gigantum_root, '')] = str(e)\n tfin = time.time()\n errors['_collectionTimeSec'] = float(f'{tfin - t0:.2f}')\n return errors", "def _GetFailedTestNames(self):\n return set(r.test_name for r in self._test_results if r.failed)", "def num_failed(self):\n return self._test_stats.failed", "def errors(self):\n return [thread.err for thread in self._threads]", "def get_fails():\n failed_downloads = []\n with open('failed_downloads.log','r') as logfile:\n for line in logfile:\n failed_downloads.append(line)\n\n print(\"{} bad files\".format(len(failed_downloads)))\n return set(failed_downloads)", "def get_failed(dfg=None):\n results = dict()\n results['data'] = list()\n\n jobs = models.Job.query.filter(\n models.Job.name.contains('DFG-%s' % dfg),\n (models.Job.last_build_result.like(\"FAILURE\") |\n models.Job.last_build_result.like(\"ABORTED\")))\n results = job_lib.construct_jobs_dictionary(jobs)\n\n return jsonify(results)", "def get_num_error_failures(self):\n return self.fails", "def getFailCount(self):\n num = 0\n for tc in self:\n if tc.result == TestStatus.Fail:\n num += 1\n return num", "def failed_analysis_workflow_count(self):\n return sum([wf_body['status'] in ('Failed', 'Aborted')\n for uuid, info in self.iter_bundles('primary')\n for wf_id, wf_body in info['analysis_workflows'].items()])", "def get_unique_issues(self) -> collections.Counter[str]:\n return collections.Counter(\n str(record[\"setup\"][\"git\"][\"issue_id\"]) for record in self.data\n )", "def _get_failed_builds(self):\n # CONSIDER: Omit archived projects?\n builds = []\n build_types = self._get_resource(self._BUILD_TYPES_RESOURCE)\n if self._COUNT_ATTRIBUTE in build_types and build_types[self._COUNT_ATTRIBUTE] > 0:\n for build_type in build_types[self._BUILD_TYPE_ATTRIBUTE]:\n build_type_id = build_type[self._ID_ATTRIBUTE]\n build_type_resource = self._BUILD_TYPE_RESOURCE_TEMPLATE.format(build_type_id=build_type_id)\n failed_builds = self._get_resource(build_type_resource)\n if self._COUNT_ATTRIBUTE in failed_builds and failed_builds[self._COUNT_ATTRIBUTE] > 0:\n builds.extend(failed_builds[self._BUILD_ATTRIBUTE])\n return builds", "def failedJobCount(workflowSpecId = None):\n try:\n return selectFailureCount(workflowSpecId)\n except StandardError, ex:\n msg = \"Error querying ProdMon DB Tables:\\n\"\n msg += str(ex)\n raise RuntimeError, msg", "def get_errors(self):\n result = []\n for error in self.errors:\n result.append(os.path.basename(error[0]) +\n ':\\n ' + str(error[1]) + '\\n')\n return result", "def get_cohort_error_messages(self):\n return self._get_cohort_messages(\"errors\")", "def _get_errors_msg(self):\n return (\"\\n\\nThere were some errors during soft_assert:\\n {}\\n\".format(\n string_utils.convert_list_to_str(self.__errors)))", "def findErrors():\n #Compile the regex \n m = re.compile(r\".*/.*\")\n\n #Create an array\n store_error_data = []\n\n #Create the dictionary\n error_dict = {}\n\n #Get file\n url = \"http://icarus.cs.weber.edu/~hvalle/cs3030/data/error.log.test\"\n \n #Find the errors and put them in the dictionary\n with urlopen(url) as errors:\n for find_error in errors:\n store_error_data.append(find_error.decode(\"utf-8\"))\n \n #Match the errors\n for lines in store_error_data:\n line_errors = lines.split()\n for words in line_errors:\n match_line = m.match(words)\n if match_line:\n \n #If there is a match increment the count\n if match_line.group() in error_dict:\n error_dict[match_line.group()] += 1\n else:\n error_dict[match_line.group()] = 1\n break \n \n #Print the errors\n print(\"***** Top 25 errors *****\")\n sorted_error_dict = sorted(error_dict, key=error_dict.get, reverse=True) \n for i in sorted_error_dict:\n print(error_dict[i], i)", "def get_error_summary(self, service_names):\n configs = {\n service_name: load_service_mconfig_as_json(service_name)\n for service_name in service_names\n }\n res = {\n service_name: Errors(\n log_level=configs[service_name].get('logLevel', 'INFO'),\n error_count=0,\n )\n for service_name in service_names\n }\n\n syslog_path = '/var/log/syslog'\n if not os.access(syslog_path, os.R_OK):\n raise PermissionError(\n 'syslog is not readable. '\n 'Try `sudo chmod a+r {}`. '\n 'Or execute the command with sudo '\n 'permissions: `venvsudo`'.format(syslog_path),\n )\n with open(syslog_path, 'r', encoding='utf-8', errors='ignore') as f:\n for line in f:\n for service_name in service_names:\n if service_name not in line:\n continue\n # Reset the counter for restart/start\n if 'Starting {}...'.format(service_name) in line:\n res[service_name].error_count = 0\n elif 'ERROR' in line:\n res[service_name].error_count += 1\n return res", "def failure_count(self) -> int:\n return pulumi.get(self, \"failure_count\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Processes project output lines
def _ProcessProjectOutputLine(self, match): project_id = int(match.group(1)) if not project_id in self.__project_outputs: self.__project_outputs[project_id] = [] self.__project_outputs[project_id].append(match.group(2)) self.__project_outputs[project_id].append('\n')
[ "def __processOutputLine(self, line):\n if line[0] in \"ACIMR?!\" and line[1] == \" \":\n status, path = line.strip().split(\" \", 1)\n self.__generateItem(status, path)", "def _worker_output(self, line):\n line = line.replace('\\n', '')\n self._view.add_to_log(line)", "def handle_stdout(self, line):\n pass", "def run(self):\n\n codelines = defaultdict(lambda: 0)\n non_matches = 0\n\n # rewind log file in case other sections are walking the lines\n self.mloginfo.logfileOpen.seek(0, 0)\n\n # get log file information\n lfinfo = LogFile(self.mloginfo.logfileOpen)\n if lfinfo.start and lfinfo.end:\n progress_start = self.mloginfo._datetime_to_epoch(lfinfo.start)\n progress_total = self.mloginfo._datetime_to_epoch(lfinfo.end) - progress_start\n else:\n self.progress_bar_enabled = False\n\n for i, line in enumerate(self.mloginfo.logfileOpen):\n cl = self.log2code(line)\n\n # update progress bar every 1000 lines\n if self.progress_bar_enabled and (i % 1000 == 0):\n ll = LogLine(line)\n if ll.datetime:\n progress_curr = self.mloginfo._datetime_to_epoch(ll.datetime)\n self.mloginfo.update_progress(float(progress_curr-progress_start) / progress_total)\n\n if cl:\n codelines[cl.pattern] += 1\n else:\n ll = LogLine(line)\n if ll.operation:\n # skip operations (command, insert, update, delete, query, getmore)\n continue\n if not ll.thread:\n # skip the lines that don't have a thread name (usually map/reduce or assertions)\n continue\n if len(ll.split_tokens) - ll._thread_offset <= 1:\n # skip empty log messages (after thread name)\n continue\n if \"warning: log line attempted\" in ll.line_str and \"over max size\" in ll.line_str:\n # skip lines that are too long\n continue\n\n # everything else is a real non-match\n non_matches += 1\n if self.mloginfo.args['verbose']:\n print \"couldn't match:\", line,\n\n # clear progress bar again\n self.mloginfo.update_progress(1.0)\n\n if self.mloginfo.args['verbose']: \n print\n\n for cl in sorted(codelines, key=lambda x: codelines[x], reverse=True):\n print \"%8i\"%codelines[cl], \" \", \" ... \".join(cl)\n\n print\n if non_matches > 0:\n print \"distinct couldn't match %i lines\"%non_matches\n if not self.mloginfo.args['verbose']:\n print \"to show non-matched lines, run with --verbose.\"", "def parse_each_output(self, tool_obj, outcsv):\n return 0", "def updateLineParsing(self):\n self.titleLine = self.parseLine(self.getTitleLine())\n self.outputLines = [self.parseLine(line) for line in\n self.getOutputLines(False)]\n if self.origOutputLines:\n self.origOutputLines = [self.parseLine(line) for line in\n self.getOutputLines(True)]", "def process_lines(self, lines, file):\n return lines", "def parse_output(self, output):\n pass", "def finalizeCodeOutput(self,command,output,workingDir):\n output = 'Dpl_INSTANT.outp-0'\n #print (command)\n #print (workingDir)\n #print (output) \n #outfile = os.path.join(workingDir,output+'.o')\n splitWorkDir = workingDir.split('/')\n pertNumber = splitWorkDir[-1]\n outputobj=phisicsdata.phisicsdata(output, workingDir)\n return \"keff\"+str(pertNumber).strip()\n #if outputobj.hasAtLeastMinorData(): outputobj.writeCSV(os.path.join(workingDir,output+'.csv'))\n #else: raise IOError('Relap5 output file '+ command.split('-o')[0].split('-i')[-1].strip()+'.o' + ' does not contain any minor edits. It might be crashed!')", "def parse_output(self):\n\n with open_compat(self.tmp_file, 'r') as f:\n output = read_compat(f).splitlines()\n self.clean_tmp_file()\n\n error = None\n header_lines = []\n if self.debug:\n section = 'General'\n last_section = None\n for line in output:\n if section == 'General':\n if self.skippable_line(line):\n continue\n\n # Skip blank lines\n if line.strip() == '':\n continue\n\n # Error lines\n if line[0:5] == 'wget:':\n error = line[5:].strip()\n if line[0:7] == 'failed:':\n error = line[7:].strip()\n\n if line == '---request begin---':\n section = 'Write'\n continue\n elif line == '---request end---':\n section = 'General'\n continue\n elif line == '---response begin---':\n section = 'Read'\n continue\n elif line == '---response end---':\n section = 'General'\n continue\n\n if section != last_section:\n console_write(u\"Wget HTTP Debug %s\" % section, True)\n\n if section == 'Read':\n header_lines.append(line)\n\n console_write(u' ' + line)\n last_section = section\n\n else:\n for line in output:\n if self.skippable_line(line):\n continue\n\n # Check the resolving and connecting to lines for errors\n if re.match('(Resolving |Connecting to )', line):\n failed_match = re.search(' failed: (.*)$', line)\n if failed_match:\n error = failed_match.group(1).strip()\n\n # Error lines\n if line[0:5] == 'wget:':\n error = line[5:].strip()\n if line[0:7] == 'failed:':\n error = line[7:].strip()\n\n if line[0:2] == ' ':\n header_lines.append(line.lstrip())\n\n if error:\n raise NonHttpError(error)\n\n return self.parse_headers(header_lines)", "def processLines(self, lines):\n\n for line in lines:\n if len(line) == 0:\n continue\n\n if line[-1] == \"\\r\":\n line = line[:-1]\n\n # Automatically make P10 protocols have their lines parsed\n # differently\n lineobj = IRCLine(line, self.protocol.p10)\n\n #debug output\n if self.config[\"etc\"][\"debug\"]:\n self.log(line, \"<<<\")\n\n if lineobj.verb == \"ERROR\":\n #If ERROR is sent, it's already fatal.\n raise IOError\n\n #Handle server commands\n try:\n for impl in self.s2scommands[lineobj.verb]:\n try:\n impl(cod, lineobj)\n except KeyError as e:\n continue\n except Exception as e:\n if not self.config[\"etc\"][\"production\"]:\n self.servicesLog(\"%s %s %s\" %(type(e), e.message, lineobj))\n traceback.print_exc(file=sys.stdout)\n continue\n except KeyError:\n pass", "def __manage_output(self, check_results: Classes.CheckResults):\n color = \"\" if self.__folder else check_results.color\n output = self.__manage_check_results(check_results, color)\n if not self.__folder:\n # Printing the message to the screen.\n print(output)\n else:\n # Adding the file name and it's content to the dictionary.\n self.__files[check_results.headline] = COLOR_MANAGER.remove_colors(output)", "def prepareProcess(name, cmdline, cwd=None, lineBasedOutput=True, ignoreOutput=False):", "def do_proj(self, line):\n # lazy, should make general 'extract date' function\n n = int(line.split()[0])-1\n #fmt = '%H%M %m/%d/%y'\n #date = datetime.strptime(' '.join(line.split()[1:]), fmt)\n date = datetime.now() + timedelta(days=float(line.split()[1]))\n self.d.makeProject(n, date)", "def process_file(input_filename, lines):\n\n\t# TODO: this function should be made into two functions. One tha processes\n\t# the file and generates all of the data structures and one that calls all\n\t# of the backend specific functions that outputs the code.\n\n\t# open the output files\n#\tif g.OUTPUT == \"C\" or g.OUTPUT == \"omp\" or g.OUTPUT == \"afl\" or g.OUTPUT == \"ocr\":\n#\t\tg.header_file_name = \"pil.h\"\n#\telif g.OUTPUT == \"swarm\":\n#\t\tg.header_file_name = \"pil.swh\"\n#\telse:\n#\t\terror(\"Unknown OUTPUT backend: \" + g.OUTPUT)\n\tg.header_file_name = \"pil.h\"\n\n\tg.h_file = open(g.header_file_name, \"w\")\n\tg.h_file.write(\"#ifndef PIL_H\\n\")\n\tg.h_file.write(\"#define PIL_H\\n\")\n\tg.h_file.write(\"\\n\")\n\n\tg.h_file.write(\"#include <stdint.h>\\n\")\n\tg.h_file.write(\"\\n\")\n\n\tg.h_file.write(\"#ifdef PIL2OCR\\n\")\n\tg.h_file.write(\"#include \\\"ocr.h\\\"\\n\")\n\tg.h_file.write(\"typedef ocrGuid_t guid_t;\\n\")\n\tg.h_file.write(\"#else\\n\")\n\tg.h_file.write(\"#define NULL_GUID NULL\\n\")\n\tg.h_file.write(\"typedef void* guid_t;\\n\")\n\tg.h_file.write(\"#endif // PIL2OCR\\n\")\n\tg.h_file.write(\"\\n\")\n\n\tg.h_file.write(\"typedef struct {\\n\")\n\tg.h_file.write(\"\\tguid_t guid;\\n\")\n\tg.h_file.write(\"\\tvoid *ptr;\\n\")\n\tg.h_file.write(\"} gpp_t;\\n\")\n\tg.h_file.write(\"\\n\")\n\n#\tg.h_file.write(\"struct _pil_communication_buffers {\\n\")\n#\tg.h_file.write(\"\\tvoid *ptr;\\n\")\n#\tg.h_file.write(\"\\tint volatile full;\\n\")\n#\tg.h_file.write(\"\\tsize_t size;\\n\")\n#\tg.h_file.write(\"};\\n\")\n#\tg.h_file.write(\"struct _pil_communication_buffers **_pil_send_buf;\\n\")\n#\tg.h_file.write(\"\\n\")\n\n\t# data structure to store nodes we encounter in so that we can process them\n\t# all together later\n\tnodes = []\n\n\t# 1) print the header\n\tif g.OUTPUT == \"C\" or g.OUTPUT == \"omp\":\n\t\tpil2c.process_header()\n\telif g.OUTPUT == \"swarm\":\n\t\tpil2swarm.process_header(input_filename)\n\telif g.OUTPUT == \"afl\":\n\t\tpil2afl.process_header()\n\telif g.OUTPUT == \"ocr\":\n\t\tpil2ocr.process_header()\n\telse:\n\t\terror(\"Unknown OUTPUT backend: \" + g.OUTPUT)\n\n\t# 2) process the file\n\tlineno = -1\n\twhile (lineno < len(lines)-1):\n\n\t\tlineno += 1\n\t\tl = lines[lineno]\n\n\t\t#line = re.split('\\s+', l)\n\t\tl = strip(l)\n\n\t\t# the line is empty\n\t\t#e = re.match('\\B', l)\n\t\t#if e:\n\t\tif l == '':\n\t\t\tprint l\n\t\t\tcontinue\n\n\t\t# the line is a comment\n\t\tc = re.match('#(.*)', l)\n\t\t# c.group(1) - the comment text\n\t\tif c:\n\t\t\td = re.match('#ifdef(.*)', l)\n\t\t\tif d:\n\t\t\t\tprint l\n\t\t\t\tcontinue\n\t\t\td = re.match('#ifndef(.*)', l)\n\t\t\tif d:\n\t\t\t\tprint l\n\t\t\t\tcontinue\n\t\t\td = re.match('#endif(.*)', l)\n\t\t\tif d:\n\t\t\t\tprint l\n\t\t\t\tcontinue\n\t\t\td = re.match('#else(.*)', l)\n\t\t\tif d:\n\t\t\t\tprint l\n\t\t\t\tcontinue\n\t\t\td = re.match('#include(.*)', l)\n\t\t\tif d:\n\t\t\t\tprint l\n\t\t\t\tcontinue\n\t\t\td = re.match('#undef(.*)', l)\n\t\t\tif d:\n\t\t\t\tprint l\n\t\t\t\tcontinue\n\t\t\td = re.match('#define(.*)', l)\n\t\t\tif d:\n\t\t\t\tprint l\n\t\t\telse:\n\t\t\t\tprint \"//\" + c.group(1)\n\t\t\tcontinue\n\n\t\tc = re.match('//(.*)', l)\n\t\t# c.group(1) - the comment text\n\t\tif c:\n\t\t\tprint \"//\" + c.group(1)\n\t\t\tcontinue\n\n\t\t# the line is a C style block comment on a single line\n\t\t# TODO: still don't account for multi-line block comments\n\t\tc = re.match('/\\*(.*)\\*/', l)\n\t\t# c.group(1) - the comment text\n\t\tif c:\n\t\t\tprint \"/*\" + c.group(1) + \"*/\"\n\t\t\tcontinue\n\n\t\t# TODO: make a function to handle this\n\t\t# the line is a variable\n\t\tv = re.match('(\\s*(\\w+)\\s*([*&]*)\\s*(\\w+)\\s*(=\\s*(NULL)\\s*){0,1});', l) # NULL initialization\n\t\t#v = re.match('(\\s*(\\w+)\\s*([*&]*)\\s*(\\w+)\\s*);', l)\n\t\t# v.group(1) - the whole statement\n\t\t# v.group(2) - the variable type\n\t\t# v.group(3) - the variable modifier\n\t\t# v.group(4) - the variable name\n\t\t# v.group(5) - the assignment\n\t\t# v.group(6) - the variable initialization\n\t\tva = re.match('(\\s*(\\w+)\\s*([*&]*)\\s*(\\(\\*(\\w+)\\))\\[(\\w+)\\]s*);', l)\n\t\t# va.group(1) - the whole statement\n\t\t# va.group(2) - the variable type\n\t\t# va.group(3) - the variable modifier\n\t\t# va.group(4) - the variable name as (*name)\n\t\t# va.group(5) - the variable name\n\t\t# va.group(6) - the variable size\n\n\t\tvas1 = re.match('(\\s*(\\w+)\\s*([*&]*)\\s*(\\w+)\\[(\\w+)\\]s*);', l)\n\t\t# va.group(1) - the whole statement\n\t\t# va.group(2) - the variable type\n\t\t# va.group(3) - the variable modifier\n\t\t# va.group(4) - the variable name\n\t\t# va.group(5) - the variable size\n\n\t\tvas2 = re.match('(\\s*(\\w+)\\s*([*&]*)\\s*(\\w+)\\[(\\w+)\\]\\[(\\w+)\\]s*);', l)\n\t\t# va.group(1) - the whole statement\n\t\t# va.group(2) - the variable type\n\t\t# va.group(3) - the variable modifier\n\t\t# va.group(4) - the variable name\n\t\t# va.group(5) - the variable size\n\t\t# va.group(6) - the variable size\n\n\t\tif v:\n\t\t\tvar_type = v.group(2)\n\t\t\tvar_modifier = v.group(3)\n\t\t\tvar_name = v.group(4)\n\t\t\tvar_init = v.group(6)\n\t\t\tg.variables[var_name] = var_type\n\t\t\tif var_modifier:\n\t\t\t\tg.variables[var_name] += \" \" + var_modifier\n\t\t\tif var_init:\n\t\t\t\t#warning(\"Caught a NULL initialized pointer that won't be NULL initialized: '\" + var_name + \"'\")\n\t\t\t\tg.initial_values[var_name] = var_init\n\t\t\tcontinue\n\t\tif va:\n\t\t\tvar_type = va.group(2)\n\t\t\tvar_modifier = va.group(3)\n\t\t\tvar_sname = va.group(4)\n\t\t\tvar_name = va.group(5)\n\t\t\tvar_size = va.group(6)\n\t\t\tg.variables[var_name] = var_type\n\t\t\tg.arrays[var_name] = var_size\n\t\t\tif var_modifier:\n\t\t\t\tg.variables[var_name] += \" \" + var_modifier\n\t\t\tcontinue\n\t\tif vas1:\n\t\t\tvar_type = vas1.group(2)\n\t\t\tvar_modifier = vas1.group(3)\n\t\t\tvar_name = vas1.group(4)\n\t\t\tvar_sizex = vas1.group(5)\n\t\t\tdebug(4, \"VAS1 match: \" + var_name + \"\\n\")\n\t\t\tg.variables[var_name] = var_type\n\t\t\tg.arrays[var_name] = [var_sizex]\n\t\t\tif var_modifier:\n\t\t\t\tg.variables[var_name] += \" \" + var_modifier\n\t\t\tcontinue\n\t\tif vas2:\n\t\t\tvar_type = vas2.group(2)\n\t\t\tvar_modifier = vas2.group(3)\n\t\t\tvar_name = vas2.group(4)\n\t\t\tvar_sizex = vas2.group(5)\n\t\t\tvar_sizey = vas2.group(6)\n\t\t\tdebug(4, \"VAS2 match: \" + var_name + \"\\n\")\n\t\t\tg.variables[var_name] = var_type\n\t\t\tg.arrays[var_name] = [var_sizex, var_sizey]\n\t\t\tif var_modifier:\n\t\t\t\tg.variables[var_name] += \" \" + var_modifier\n\t\t\tcontinue\n\n\t\t# the line is a function declaration\n\t\tf = re.match('void\\s+\\w+\\s*\\(', l)\n\t\tif f:\n\t\t\t#debug(3, v.group(0))\n\t\t\tlineno = process_function(lines, lineno)\n\t\t\tcontinue\n\n\t\tpn = re.match('_pil_node\\s+(\\w+).*', l)\n\t\tif pn:\n\t\t\tlabel = pn.group(1);\n\t\t\tlineno = process_function(lines, lineno)\n\t\t\tcontinue\n\n\t\tpnl = re.match('_pil_nodelet\\s+(\\w+).*', l)\n\t\tif pnl:\n\t\t\tlabel = pnl.group(1);\n\t\t\tlineno = process_function(lines, lineno)\n\t\t\tcontinue\n\n\t\t# the line is a pil_send\n\t\ts = re.match(g.NW_SEND_RE, l)\n\t\tif s:\n\t\t\tg.nw_calls.append(process_nw_call(lines, lineno))\n\t\t\tcontinue\n\n\t\t# the line is a pil_send\n\t\tr = re.match(g.NW_RECV_RE, l)\n\t\tif r:\n\t\t\tg.nw_calls.append(process_nw_call(lines, lineno))\n\t\t\tcontinue\n\n\t\t# the line is a node\n\t\tm = re.match(g.MAP_RE, l)\n\t\tif m:\n\t\t\t# add the node to the nodes list for later processing\n\t\t\tnodes.append(process_node(lines, lineno))\n\t\t\tcontinue\n\n\t\t# if we have made it this far, the line is invalid\n\t\twarning(\"invalid line: \" + l)\n\n\t# 3) create the global data structure\n\tif g.OUTPUT == \"C\" or g.OUTPUT == \"omp\":\n\t\tpil2c.process_variables()\n\telif g.OUTPUT == \"swarm\":\n\t\tpil2swarm.process_variables()\n\telif g.OUTPUT == \"afl\":\n\t\tpil2afl.process_variables()\n\telif g.OUTPUT == \"ocr\":\n\t\tpil2ocr.process_variables()\n\telse:\n\t\terror(\"Unknown OUTPUT backend: \" + g.OUTPUT)\n\n\t# 4) now that the globals are available, we can output pil_main and the body functions\n\tfor prototype in g.prototypes:\n\t\tprint prototype\n\n#\tif g.OUTPUT == \"C\" or g.OUTPUT == \"omp\":\n#\t\tpil2c.print_main_func()\n#\t\tpil2c.print_funcs()\n#\telif g.OUTPUT == \"swarm\":\n#\t\tpil2swarm.print_main_func()\n#\t\tpil2swarm.print_funcs()\n#\telif g.OUTPUT == \"afl\":\n#\t\tpil2afl.print_main_func()\n#\t\tpil2afl.print_funcs()\n#\telif g.OUTPUT == \"ocr\":\n#\t\tpil2ocr.print_main_func()\n#\t\tpil2ocr.print_funcs()\n#\telse:\n#\t\terror(\"Unknown OUTPUT backend: \" + g.OUTPUT)\n\n\t# 5) process all of the nodes\n\tsplit_nodes = handle_nodes(nodes)\n\n\t# 6) output pil_enter()\n\tprocess_pil_enter(split_nodes)\n\n\t# 7) print the main function\n\tif g.OUTPUT == \"C\" or g.OUTPUT == \"omp\":\n\t\tpil2c.process_main()\n\telif g.OUTPUT == \"swarm\":\n\t\tpil2swarm.process_main()\n\telif g.OUTPUT == \"afl\":\n\t\tpil2afl.process_main()\n\telif g.OUTPUT == \"ocr\":\n\t\tpil2ocr.process_main(split_nodes)\n\telse:\n\t\terror(\"Unknown OUTPUT backend: \" + g.OUTPUT)\n\n\tg.h_file.write(\"#endif // PIL_H\\n\")\n\tg.h_file.close()", "def processPhoenixSubmitLog(itr):\n # Consume the iterator\n for line in itr:\n line = line.strip()\n log.info(line)", "def compile_log_result(self):\n file = self.source_file[:-4] + \".log\"\n log.info(\"[COMPILE LOG] log file is here: {}\".format(file))\n \n log_line = 'Warning: No build log. Check console for errors.'\n\n try:\n with open(file, encoding=\"utf16\", mode=\"r\") as f:\n for line in f:\n if len(line) > 2:\n log_line = \"[COMPILE LOG] \" + re.sub(r\"\\n|\\r\", \"\", line)\n log.info(log_line)\n\n except OSError:\n log.warning(\"Log file not found.\")\n\n # Write last log line to status\n self.window.active_view().set_status('mql_comp_build_log', log_line)", "def execute(lines, targets='all'):", "def main(git_log):\n df = pd.read_csv(git_log, sep = '|', names = ['commit', 'message', 'author', 'email'])\n df['area'] = df['message'].apply(define_area)\n df['message'] = df['message'].apply(delete_prefix)\n\n # Split commits by areas\n core = df[df['area']==Area.core.value]\n tests = df[df['area']==Area.tests.value]\n build = df[df['area']==Area.build.value]\n apps = df[df['area']==Area.apps.value]\n docs = df[df['area']==Area.docs.value]\n other = df[df['area'].isna()]\n\n # Define individual contributors\n contributors = df.groupby(['author', 'email'])\n contributors = list(contributors.groups.keys())\n\n with open('release-notes.md', 'w') as f:\n f.write('# Release Notes\\n')\n\n f.write('\\n## API / ABI / Integration Changes\\n')\n f.write('\\n**API/ABI version: 1.x.**\\n')\n\n f.write('\\n## New Features and Improvements\\n')\n f.write('\\n## Important Bug Fixes\\n')\n f.write('\\n## Build\\n')\n f.write('\\n## Documentation\\n')\n\n f.write('\\n## Contributors\\n')\n for name, email in contributors:\n f.write(f'\\n{name} <{email}>')\n f.write('\\n')\n\n f.write('\\n## Changelog\\n')\n f.write('\\n<details><summary>Click to expand/collapse</summary>')\n f.write('\\n<p>')\n f.write('\\n')\n\n if not core.empty:\n f.write('\\n### Core Functionality')\n write_into_changelog(core, f)\n\n if not tests.empty:\n f.write('\\n### Unit Tests')\n write_into_changelog(tests, f)\n\n if not build.empty:\n f.write('\\n### Build Scripts (CMake, etc.)')\n write_into_changelog(build, f)\n\n if not apps.empty:\n f.write('\\n### Sample Applications')\n write_into_changelog(apps, f)\n\n if not docs.empty:\n f.write('\\n### Documentation')\n write_into_changelog(docs, f)\n\n if not other.empty:\n f.write('\\n### Other')\n write_into_changelog(other, f)\n\n f.write('\\n</p>')\n f.write('\\n</details>')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Using an unzipped, json package file with full urls, downloads a .deb package Uses the 'Filename' key to download the .deb package
def download_dpkg(package_files, packages, workspace_name, versionsfile): package_to_rule_map = {} package_to_version_map = {} package_file_to_metadata = {} for pkg_name in set(packages.split(",")): pkg = {} for package_file in package_files.split(","): if package_file not in package_file_to_metadata: with open(package_file, 'rb') as f: data = f.read() package_file_to_metadata[package_file] = json.loads(data.decode('utf-8')) metadata = package_file_to_metadata[package_file] if (pkg_name in metadata and (not VERSION_KEY in pkg or compare_versions(metadata[pkg_name][VERSION_KEY], pkg[VERSION_KEY]) > 0)): pkg = metadata[pkg_name] if (not pkg): raise Exception("Package: %s not found in any of the sources" % pkg_name) else: out_file = os.path.join("file", util.encode_package_name(pkg_name)) download_and_save(pkg[FILENAME_KEY], out_file) package_to_rule_map[pkg_name] = util.package_to_rule(workspace_name, pkg_name) package_to_version_map[pkg_name] = pkg[VERSION_KEY] actual_checksum = util.sha256_checksum(out_file) expected_checksum = pkg[SHA256_KEY] if actual_checksum != expected_checksum: raise Exception("Wrong checksum for package %s %s (%s). Expected: %s, Actual: %s" %(pkg_name, os.getcwd() + "/" + out_file, pkg[FILENAME_KEY], expected_checksum, actual_checksum)) with open(PACKAGE_MAP_FILE_NAME, 'w', encoding="utf-8") as f: f.write("packages = " + json.dumps(package_to_rule_map)) f.write("\nversions = " + json.dumps(package_to_version_map)) if versionsfile: with open(versionsfile, 'w', encoding="utf-8") as f: f.write(json.dumps(package_to_version_map, sort_keys=True, indent=4, separators=(',', ': '))) f.write('\n')
[ "def get_dpkg(name, release, dir):\n\n debian_repo = 'http://ftp.es.debian.org/debian/'\n sources_url = debian_repo + 'dists/' + release + '/source/Sources.gz'\n sources_file = os.path.join(dir, 'Sources.gz')\n urllib.request.urlretrieve(sources_url, sources_file)\n pkg_data = get_dpkg_data(sources_file, name)\n for file in pkg_data['components']:\n file_url = debian_repo + pkg_data['directory'] + \"/\" + file\n file_path = os.path.join(dir, file)\n logging.info (\"Downloading {} from {}\".format(file, file_url))\n urllib.request.urlretrieve(file_url, file_path)\n return os.path.join(dir, pkg_data['dsc'])", "def download_and_unpack(self, download_dir):\n pass", "def download_package_list(mirror_url, distro, arch, snapshot, sha256, packages_url, package_prefix):\n\n if bool(packages_url) != bool(package_prefix):\n raise Exception(\"packages_url and package_prefix must be specified or skipped at the same time.\")\n\n if (not packages_url) and (not mirror_url or not snapshot or not distro or not arch):\n raise Exception(\"If packages_url is not specified, all of mirror_url, snapshot, \"\n \"distro and arch must be specified.\")\n\n url = packages_url\n if not url:\n url = \"%s/debian/%s/dists/%s/main/binary-%s/Packages.xz\" % (\n mirror_url,\n snapshot,\n distro,\n arch\n )\n\n\n packages_copy = url.split('/')[-1]\n download_and_save(url, packages_copy)\n actual_sha256 = util.sha256_checksum(packages_copy)\n if sha256 != actual_sha256:\n raise Exception(\"sha256 of %s don't match: Expected: %s, Actual:%s\" %(packages_copy, sha256, actual_sha256))\n if packages_copy.endswith(\".gz\"):\n with gzip.open(packages_copy, 'rb') as f:\n data = f.read()\n else:\n with lzma.open(\"Packages.xz\", 'rb') as f:\n data = f.read()\n metadata = parse_package_metadata(data, mirror_url, snapshot, package_prefix)\n with open(PACKAGES_FILE_NAME, 'w', encoding=\"utf-8\") as f:\n json.dump(metadata, f)", "def download_package_details(input_file: IO[str], out_dir: str):\n os.makedirs(out_dir, exist_ok=True)\n\n package_iterator = map(lambda l: l.strip(), input_file)\n for packages in grouper(package_iterator, BULK_SIZE):\n for package, meta_data in bulk_fetch_details(packages).items():\n filename = '{}.json'.format(package)\n path = os.path.join(out_dir, filename)\n with open(path, 'w') as output_file:\n json.dump(meta_data, output_file, indent=2)\n time.sleep(DELAY)", "def download_from_dbox(url,out_file):\n\n response = urllib.request.urlopen(url)\n compressed_file = io.BytesIO(response.read())\n decompressed_file = gzip.GzipFile(fileobj=compressed_file)\n\n with open(out_file, 'wb') as outfile:\n outfile.write(decompressed_file.read())\n\n print(\"Download \" + out_file[:-4] + \" from dropbox succeed.\")", "def verified_download_full(package_list: tuple) -> dict:\n apt_url = metadata.get_apt_url()\n try:\n verified_info = download_and_verify_package_list(apt_url)\n return {package_name: download_package(package_name, verified_info) for package_name in package_list}\n except urllib.error.HTTPError:\n command.fail(\"unable to access apt branch\",\n \"do you have an apt branch at %s?\" % apt_url)", "def _download( self ):\n self._system.download_file(\"https://github.com/mastbaum/avalanche/tarball/\" + self._tar_name)", "def download_package_dict(package_name):\n\n global download_count, not_found_count\n\n download_count += 1\n if download_count % 1000 == 0:\n logger.info('Downloaded %s packages (%s not found)', download_count, not_found_count)\n\n response = session.get('https://pypi.python.org/pypi/%s/json' % package_name)\n if response.status_code == requests.codes.ok:\n return response.json()\n elif response.status_code == requests.codes.not_found:\n not_found_count += 1\n else:\n logger.warning('Unexpected error code for package %s: %s', package_name, response.status_code)", "def download_url():", "def download_wordlist_http(filedown):\n\n mkdir_if_not_exists(\"dictionaries\")\n\n # List of files to download:\n arguments = {\n 1: (\n \"Moby\",\n (\n \"mhyph.tar.gz\",\n \"mlang.tar.gz\",\n \"moby.tar.gz\",\n \"mpos.tar.gz\",\n \"mpron.tar.gz\",\n \"mthes.tar.gz\",\n \"mwords.tar.gz\",\n ),\n ),\n 2: (\"afrikaans\", (\"afr_dbf.zip\",)),\n 3: (\"american\", (\"dic-0294.tar.gz\",)),\n 4: (\"aussie\", (\"oz.gz\",)),\n 5: (\"chinese\", (\"chinese.gz\",)),\n 6: (\n \"computer\",\n (\n \"Domains.gz\",\n \"Dosref.gz\",\n \"Ftpsites.gz\",\n \"Jargon.gz\",\n \"common-passwords.txt.gz\",\n \"etc-hosts.gz\",\n \"foldoc.gz\",\n \"language-list.gz\",\n \"unix.gz\",\n ),\n ),\n 7: (\"croatian\", (\"croatian.gz\",)),\n 8: (\"czech\", (\"czech-wordlist-ascii-cstug-novak.gz\",)),\n 9: (\"danish\", (\"danish.words.gz\", \"dansk.zip\")),\n 10: (\n \"databases\",\n (\"acronyms.gz\", \"att800.gz\", \"computer-companies.gz\", \"world_heritage.gz\"),\n ),\n 11: (\n \"dictionaries\",\n (\n \"Antworth.gz\",\n \"CRL.words.gz\",\n \"Roget.words.gz\",\n \"Unabr.dict.gz\",\n \"Unix.dict.gz\",\n \"englex-dict.gz\",\n \"knuth_britsh.gz\",\n \"knuth_words.gz\",\n \"pocket-dic.gz\",\n \"shakesp-glossary.gz\",\n \"special.eng.gz\",\n \"words-english.gz\",\n ),\n ),\n 12: (\"dutch\", (\"words.dutch.gz\",)),\n 13: (\n \"finnish\",\n (\"finnish.gz\", \"firstnames.finnish.gz\", \"words.finnish.FAQ.gz\"),\n ),\n 14: (\"french\", (\"dico.gz\",)),\n 15: (\"german\", (\"deutsch.dic.gz\", \"germanl.gz\", \"words.german.gz\")),\n 16: (\"hindi\", (\"hindu-names.gz\",)),\n 17: (\"hungarian\", (\"hungarian.gz\",)),\n 18: (\"italian\", (\"words.italian.gz\",)),\n 19: (\"japanese\", (\"words.japanese.gz\",)),\n 20: (\"latin\", (\"wordlist.aug.gz\",)),\n 21: (\n \"literature\",\n (\n \"LCarrol.gz\",\n \"Paradise.Lost.gz\",\n \"aeneid.gz\",\n \"arthur.gz\",\n \"cartoon.gz\",\n \"cartoons-olivier.gz\",\n \"charlemagne.gz\",\n \"fable.gz\",\n \"iliad.gz\",\n \"myths-legends.gz\",\n \"odyssey.gz\",\n \"sf.gz\",\n \"shakespeare.gz\",\n \"tolkien.words.gz\",\n ),\n ),\n 22: (\"movieTV\", (\"Movies.gz\", \"Python.gz\", \"Trek.gz\")),\n 23: (\n \"music\",\n (\n \"music-classical.gz\",\n \"music-country.gz\",\n \"music-jazz.gz\",\n \"music-other.gz\",\n \"music-rock.gz\",\n \"music-shows.gz\",\n \"rock-groups.gz\",\n ),\n ),\n 24: (\n \"names\",\n (\n \"ASSurnames.gz\",\n \"Congress.gz\",\n \"Family-Names.gz\",\n \"Given-Names.gz\",\n \"actor-givenname.gz\",\n \"actor-surname.gz\",\n \"cis-givenname.gz\",\n \"cis-surname.gz\",\n \"crl-names.gz\",\n \"famous.gz\",\n \"fast-names.gz\",\n \"female-names-kantr.gz\",\n \"female-names.gz\",\n \"givennames-ol.gz\",\n \"male-names-kantr.gz\",\n \"male-names.gz\",\n \"movie-characters.gz\",\n \"names.french.gz\",\n \"names.hp.gz\",\n \"other-names.gz\",\n \"shakesp-names.gz\",\n \"surnames-ol.gz\",\n \"surnames.finnish.gz\",\n \"usenet-names.gz\",\n ),\n ),\n 25: (\n \"net\",\n (\n \"hosts-txt.gz\",\n \"inet-machines.gz\",\n \"usenet-loginids.gz\",\n \"usenet-machines.gz\",\n \"uunet-sites.gz\",\n ),\n ),\n 26: (\"norwegian\", (\"words.norwegian.gz\",)),\n 27: (\n \"places\",\n (\n \"Colleges.gz\",\n \"US-counties.gz\",\n \"World.factbook.gz\",\n \"Zipcodes.gz\",\n \"places.gz\",\n ),\n ),\n 28: (\"polish\", (\"words.polish.gz\",)),\n 29: (\n \"random\",\n (\n \"Ethnologue.gz\",\n \"abbr.gz\",\n \"chars.gz\",\n \"dogs.gz\",\n \"drugs.gz\",\n \"junk.gz\",\n \"numbers.gz\",\n \"phrases.gz\",\n \"sports.gz\",\n \"statistics.gz\",\n ),\n ),\n 30: (\"religion\", (\"Koran.gz\", \"kjbible.gz\", \"norse.gz\")),\n 31: (\"russian\", (\"russian.lst.gz\", \"russian_words.koi8.gz\")),\n 32: (\n \"science\",\n (\n \"Acr-diagnosis.gz\",\n \"Algae.gz\",\n \"Bacteria.gz\",\n \"Fungi.gz\",\n \"Microalgae.gz\",\n \"Viruses.gz\",\n \"asteroids.gz\",\n \"biology.gz\",\n \"tech.gz\",\n ),\n ),\n 33: (\"spanish\", (\"words.spanish.gz\",)),\n 34: (\"swahili\", (\"swahili.gz\",)),\n 35: (\"swedish\", (\"words.swedish.gz\",)),\n 36: (\"turkish\", (\"turkish.dict.gz\",)),\n 37: (\"yiddish\", (\"yiddish.gz\",)),\n }\n\n # download the files\n\n intfiledown = int(filedown)\n\n if intfiledown in arguments:\n\n dire = \"dictionaries/\" + arguments[intfiledown][0] + \"/\"\n mkdir_if_not_exists(dire)\n files_to_download = arguments[intfiledown][1]\n\n for fi in files_to_download:\n url = CONFIG[\"global\"][\"dicturl\"] + arguments[intfiledown][0] + \"/\" + fi\n tgt = dire + fi\n download_http(url, tgt)\n\n print(\"[+] files saved to \" + dire)\n\n else:\n print(\"[-] leaving.\")", "def do_package_download(mc, args):\n\n def download_to_fh(package_id, fh):\n fh.write(mc.packages.download(package_id))\n\n try:\n if args.filename:\n with open(args.filename, 'wb') as fh:\n download_to_fh(args.id, fh)\n print(\"Package downloaded to %s\" % args.filename)\n elif not sys.stdout.isatty():\n download_to_fh(args.id, sys.stdout)\n else:\n msg = ('No stdout redirection or local file specified for '\n 'downloaded package. Please specify a local file to save '\n 'downloaded package or redirect output to another source.')\n raise exceptions.CommandError(msg)\n except common_exceptions.HTTPNotFound:\n raise exceptions.CommandError(\"Package %s not found\" % args.id)", "def download_data_packages(repos=REPOS):\n for repo_name, package_url in repos.items():\n try:\n os.mkdir(repo_name)\n except FileExistsError:\n pass\n repo_dir = Path(repo_name)\n repo_file = repo_dir / \"datapackage.json\"\n r = requests.get(package_url)\n\n with open(repo_file, \"w\") as f:\n f.write(r.text)", "def download():\n # Download the zip\n target = 'https://github.com/downloads/banterability/pluggablemaps-congressionaldistricts/cd99_110_shp.zip'\n destination = os.path.join(data_dir, 'cd99_110_shp.zip')\n urllib.urlretrieve(target, destination)\n # Unzip it\n fh = open(destination, 'rb')\n zfile = zipfile.ZipFile(fh)\n for name in zfile.namelist():\n path = os.path.join(data_dir, name)\n out = open(path, 'wb')\n out.write(zfile.read(name))\n out.close()\n fh.close()", "def _download(self):\n self._system.download(\"ftp://root.cern.ch/root/\" + self._tar_name)", "def download_and_unzip_data(\n url = \"https://storage.googleapis.com/simpeg/bookpurnong/bookpurnong_inversion.tar.gz\"\n):\n # download the data\n downloads = Utils.download(url)\n\n # directory where the downloaded files are\n directory = downloads.split(\".\")[0]\n\n # unzip the tarfile\n tar = tarfile.open(downloads, \"r\")\n tar.extractall()\n tar.close()\n\n return downloads, directory", "def download_file(self, file_name, url):\n\n destination_dir = os.path.join(self.root, 'RawData')\n download_file(destination_dir, file_name, url)", "def run_downloader(machine, output, urls, force):\n\n global WORKDIR\n\n out = {'2.discover': [], '3.download': [], '4.check': []}\n lslpp_file = os.path.join(WORKDIR, 'lslpp_{}.txt'.format(machine))\n for url in urls:\n protocol, srv, rep, name = re.search(r'^(.*?)://(.*?)/(.*)/(.*)$', url).groups()\n logging.debug('{}: protocol={}, srv={}, rep={}, name={}'\n .format(machine, protocol, srv, rep, name))\n if '.epkg.Z' in name:\n ################################\n # URL as an efix file\n ################################\n logging.debug('{}: treat url as an epkg file'.format(machine))\n out['2.discover'].extend(name)\n\n # download epkg file\n epkg = os.path.abspath(os.path.join(WORKDIR, name))\n if download(url, epkg):\n out['3.download'].extend(epkg)\n\n # check prerequisite\n if check_prereq(epkg, lslpp_file, machine, force):\n out['4.check'].extend(epkg)\n\n elif '.tar' in name:\n ################################\n # URL as a tar file\n ################################\n logging.debug('{}: treat url as a tar file'.format(machine))\n dst = os.path.abspath(os.path.join(WORKDIR, name))\n\n # download and open tar file\n download(url, dst)\n tar = tarfile.open(dst, 'r')\n\n # find all epkg in tar file\n epkgs = [epkg for epkg in tar.getnames() if re.search(r'(\\b[\\w.-]+.epkg.Z\\b)$', epkg)]\n out['2.discover'].extend(epkgs)\n logging.debug('{}: found {} epkg.Z file in tar file'.format(machine, len(epkgs)))\n\n # extract epkg\n tar_dir = os.path.join(WORKDIR, 'tardir')\n if not os.path.exists(tar_dir):\n os.makedirs(tar_dir)\n for epkg in epkgs:\n try:\n tar.extract(epkg, tar_dir)\n except Exception as exc:\n logging.warn('EXCEPTION {}'.format(exc))\n increase_fs(tar_dir)\n tar.extract(epkg, tar_dir)\n epkgs = [os.path.abspath(os.path.join(tar_dir, epkg)) for epkg in epkgs]\n out['3.download'].extend(epkgs)\n\n # check prerequisite\n epkgs = [epkg for epkg in epkgs if check_prereq(epkg, lslpp_file, machine, force)]\n out['4.check'].extend(epkgs)\n else:\n ################################\n # URL as a Directory\n ################################\n logging.debug('{}: treat url as a directory'.format(machine))\n response = urllib.urlopen(url, context=ssl._create_unverified_context())\n\n # find all epkg in html body\n epkgs = [epkg for epkg in re.findall(r'(\\b[\\w.-]+.epkg.Z\\b)', response.read())]\n epkgs = list(set(epkgs))\n out['2.discover'].extend(epkgs)\n logging.debug('{}: found {} epkg.Z file in html body'.format(machine, len(epkgs)))\n\n # download epkg\n epkgs = [os.path.abspath(os.path.join(WORKDIR, epkg)) for epkg in epkgs\n if download(os.path.join(url, epkg), os.path.abspath(os.path.join(WORKDIR, epkg)))]\n out['3.download'].extend(epkgs)\n\n # check prerequisite\n epkgs = [epkg for epkg in epkgs if check_prereq(epkg, lslpp_file, machine, force)]\n out['4.check'].extend(epkgs)\n output.update(out)", "def download_and_unzip_package(package):\n\n # TODO: Will this work for all downloads? Are they all .whl files?\n # TODO: In the future, analyze dependencies too.\n\n if os.path.exists(\"pkg-source\"):\n shutil.rmtree(\"pkg-source\")\n os.mkdir(\"pkg-source\")\n\n # Download from pip and place in pkg-source directory\n subprocess.check_call(\n [\n sys.executable,\n \"-m\",\n \"pip\",\n \"-q\",\n \"download\",\n \"--no-dependencies\",\n \"--destination-directory\",\n \"pkg-source/.\",\n package,\n ]\n )\n\n # Identify and unzip any .whl files\n file_list = glob.glob(\"pkg-source/*.whl\")\n for file in file_list:\n with zipfile.ZipFile(file, \"r\") as zip_ref:\n zip_ref.extractall(\"pkg-source\")", "def download_file(out_dir, file_name, url):\n\n if not os.path.exists(out_dir):\n print('Creating new directory {}'.format(out_dir))\n os.mkdir(out_dir)\n\n print('Downloading data from {}'.format(url))\n\n file_handle, _ = urlretrieve(url)\n\n split_name = os.path.splitext(file_name)\n if split_name[1] == '.zip':\n zip_ref = zipfile.ZipFile(file_handle, 'r')\n zip_ref.extractall(os.path.join(out_dir, split_name[0]))\n elif split_name[1] == '.gz':\n tar_ref = tarfile.open(file_handle, 'r:gz')\n tar_ref.extractall(os.path.join(out_dir, split_name[0]))\n else:\n os.rename(file_handle, os.path.join(out_dir, file_name))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downloads a debian package list, expands the relative urls, and saves the metadata as a json file A debian package list is a (xz|gzip)ipped, newline delimited, colon separated file with metadata about all the packages available in that repository. Multiline keys are indented with spaces.
def download_package_list(mirror_url, distro, arch, snapshot, sha256, packages_url, package_prefix): if bool(packages_url) != bool(package_prefix): raise Exception("packages_url and package_prefix must be specified or skipped at the same time.") if (not packages_url) and (not mirror_url or not snapshot or not distro or not arch): raise Exception("If packages_url is not specified, all of mirror_url, snapshot, " "distro and arch must be specified.") url = packages_url if not url: url = "%s/debian/%s/dists/%s/main/binary-%s/Packages.xz" % ( mirror_url, snapshot, distro, arch ) packages_copy = url.split('/')[-1] download_and_save(url, packages_copy) actual_sha256 = util.sha256_checksum(packages_copy) if sha256 != actual_sha256: raise Exception("sha256 of %s don't match: Expected: %s, Actual:%s" %(packages_copy, sha256, actual_sha256)) if packages_copy.endswith(".gz"): with gzip.open(packages_copy, 'rb') as f: data = f.read() else: with lzma.open("Packages.xz", 'rb') as f: data = f.read() metadata = parse_package_metadata(data, mirror_url, snapshot, package_prefix) with open(PACKAGES_FILE_NAME, 'w', encoding="utf-8") as f: json.dump(metadata, f)
[ "def download_package_details(input_file: IO[str], out_dir: str):\n os.makedirs(out_dir, exist_ok=True)\n\n package_iterator = map(lambda l: l.strip(), input_file)\n for packages in grouper(package_iterator, BULK_SIZE):\n for package, meta_data in bulk_fetch_details(packages).items():\n filename = '{}.json'.format(package)\n path = os.path.join(out_dir, filename)\n with open(path, 'w') as output_file:\n json.dump(meta_data, output_file, indent=2)\n time.sleep(DELAY)", "def update_package_list():\n log_helper = logging_helper.logging_helper.Logger()\n data_collector = sysinfo_ops.DataCollect()\n\n # Determine architecture and proper repository\n config = manage_config.read_config_file()\n base_url = config.get('DefaultRepo', 'base_repo')\n curated_url = base_url + '/' + 'curated.xml.gz'\n local_path = '/tmp/curated.xml.gz'\n local_file = 'curated.txt'\n\n # Download and decompress the curated list\n # todo: this needs to return 'False' on timeout and give a json status of 'fail'\n shell_ops.run_command('timeout 5 wget %s -O %s' % (curated_url, local_path))\n data_ops.uncompress(local_path, local_file)\n build_package_database()\n\n # Remove tar file after use\n try:\n os.remove(local_path)\n except: # todo: This needs to throw an error. Try 'except (OSError, IOError):'\n pass\n\n # From the UI if json == null then the response failed (timed out)\n response = ({\n 'status': 'success'\n })\n response = json.dumps(response)\n log_helper.logger.debug(\"Finished updating package list: '%s'\" % response)\n return response", "def download_dpkg(package_files, packages, workspace_name, versionsfile):\n package_to_rule_map = {}\n package_to_version_map = {}\n package_file_to_metadata = {}\n for pkg_name in set(packages.split(\",\")):\n pkg = {}\n for package_file in package_files.split(\",\"):\n if package_file not in package_file_to_metadata:\n with open(package_file, 'rb') as f:\n data = f.read()\n package_file_to_metadata[package_file] = json.loads(data.decode('utf-8'))\n metadata = package_file_to_metadata[package_file]\n if (pkg_name in metadata and\n (not VERSION_KEY in pkg or compare_versions(metadata[pkg_name][VERSION_KEY], pkg[VERSION_KEY]) > 0)):\n pkg = metadata[pkg_name]\n if (not pkg):\n raise Exception(\"Package: %s not found in any of the sources\" % pkg_name)\n else:\n out_file = os.path.join(\"file\", util.encode_package_name(pkg_name))\n download_and_save(pkg[FILENAME_KEY], out_file)\n package_to_rule_map[pkg_name] = util.package_to_rule(workspace_name, pkg_name)\n package_to_version_map[pkg_name] = pkg[VERSION_KEY]\n actual_checksum = util.sha256_checksum(out_file)\n expected_checksum = pkg[SHA256_KEY]\n if actual_checksum != expected_checksum:\n raise Exception(\"Wrong checksum for package %s %s (%s). Expected: %s, Actual: %s\" %(pkg_name, os.getcwd() + \"/\" + out_file, pkg[FILENAME_KEY], expected_checksum, actual_checksum))\n with open(PACKAGE_MAP_FILE_NAME, 'w', encoding=\"utf-8\") as f:\n f.write(\"packages = \" + json.dumps(package_to_rule_map))\n f.write(\"\\nversions = \" + json.dumps(package_to_version_map))\n if versionsfile:\n with open(versionsfile, 'w', encoding=\"utf-8\") as f:\n f.write(json.dumps(package_to_version_map, sort_keys=True, indent=4, separators=(',', ': ')))\n f.write('\\n')", "def download_data_packages(repos=REPOS):\n for repo_name, package_url in repos.items():\n try:\n os.mkdir(repo_name)\n except FileExistsError:\n pass\n repo_dir = Path(repo_name)\n repo_file = repo_dir / \"datapackage.json\"\n r = requests.get(package_url)\n\n with open(repo_file, \"w\") as f:\n f.write(r.text)", "def get_data_offline():\n global constructed_packages_list_new\n constructed_packages_list_new = []\n curated_packages = []\n\n log_helper = logging_helper.logging_helper.Logger()\n log_helper.logger.debug(\"Starting Build...\")\n\n # -------------------------------------------------\n # ------------- Step 1: Gather info ---------------\n # -------------------------------------------------\n\n # Get the latest installed packages list\n if sysinfo_ops.os_type == 'ubuntu':\n my_list, my_dict = manage_package_deb.get_installed_packages_deb()\n else:\n my_list, my_dict = get_installed_packages_new()\n\n # Get the info for curated packages\n try:\n file_path = os.path.dirname(os.path.realpath(__file__))\n my_file = codecs.open(file_path + '/' + 'curated.txt', 'r')\n curated_packages = json.loads(my_file.read()) # list of json\n my_file.close()\n except Exception as e:\n log_helper.logger.error('Read curated.txt failed with ' + str(e))\n\n # Create a list of dict for curated packages, this can be used later..... dict key checking is\n # more efficient (due to hash table) than linear loop search\n for pc in curated_packages:\n try:\n pc['curated'] = True\n pc['group'] = \"\"\n if pc['name'] in my_list:\n pc['installed'] = True\n pc['version'] = my_dict[pc['name']]\n else:\n pc['installed'] = False\n pc['version'] = \"\"\n except Exception as e:\n log_helper.logger.error(str(e) + ' for ' + pc['name'])\n continue\n return json.dumps(curated_packages)", "def verified_download_full(package_list: tuple) -> dict:\n apt_url = metadata.get_apt_url()\n try:\n verified_info = download_and_verify_package_list(apt_url)\n return {package_name: download_package(package_name, verified_info) for package_name in package_list}\n except urllib.error.HTTPError:\n command.fail(\"unable to access apt branch\",\n \"do you have an apt branch at %s?\" % apt_url)", "def print_package_urls():\n for software in Config.package_list:\n print software['url']", "def pkg_info_json(folder=None):\r\n # ---- Checks\r\n if not folder:\r\n folder = sys.prefix + \"\\\\conda-meta\"\r\n folder = Path(folder)\r\n if not folder.is_dir():\r\n print(\"\\nInvalid path... {}\".format(folder))\r\n return\r\n files = list(folder.glob(\"*.json\"))\r\n if not files:\r\n print(\"{} doesn't have any json files\".format(folder))\r\n return\r\n #\r\n # --- Package, Filename, Dependencies\r\n packages = []\r\n m0 = m1 = m2 = 0\r\n for f in files:\r\n ret = parse_json(f, key=\"depends\") # ---- look at dependencies only\r\n nme = str(f.name).rsplit(\"-\", 2)[0] # ---- split off the last two\r\n if len(ret) == 1:\r\n ret = ret[0]\r\n elif len(ret) > 1:\r\n srted = sorted(ret)\r\n ret = \"; \".join([i for i in srted if \"py\" not in i]) # `; ` used\r\n else:\r\n ret = \"None\"\r\n m0 = max(m0, len(nme))\r\n m1 = max(m1, len(str(f.name)))\r\n m2 = max(m2, len(ret))\r\n packages.append((nme, f.name, ret))\r\n dt1 = [(\"Package\", \"<U{}\".format(m0)), (\"Filename\", \"<U{}\".format(m1)),\r\n (\"Dependencies\", \"<U{}\".format(m2))]\r\n packages = np.asarray(packages, dtype=dt1)\r\n #\r\n # ---- Dependency, Counts\r\n z = []\r\n for dep in packages['Dependencies']:\r\n if dep not in (\"\", \" \"):\r\n z += dep.split(\"; \") # split on `; ` delimiter\r\n z = np.asarray(z)\r\n uniq, idx, cnts = np.unique(z, return_index=True, return_counts=True)\r\n uniq2 = [[u, u.split(\" \")[0]][\" \" in u] for u in uniq if u != \"\"]\r\n m0 = max(np.char.str_len(uniq2))\r\n m1 = np.max(np.char.str_len(uniq2)) + 5\r\n dt2 = [(\"Full_name\", \"<U{}\".format(m0)), (\"Counts\", \"i8\"),\r\n (\"Simple_name\", \"<U{}\".format(m1))]\r\n dep_counts = np.asarray(list(zip(uniq, cnts, uniq2)), dtype=dt2)\r\n #\r\n # ---- Package, Required_by\r\n required_by = []\r\n names = packages['Package']\r\n depends = packages['Dependencies']\r\n max_len = 0\r\n for nme in names:\r\n if nme in ('py', 'python'):\r\n required_by.append([nme, \"many\"])\r\n continue\r\n w = names[[nme in i for i in depends]]\r\n if np.size(w) > 0:\r\n v = w.tolist()\r\n v0 = \"; \".join([i.split(\"; \")[0] for i in v])\r\n max_len = max(max_len, len(v0))\r\n required_by.append([nme, v0])\r\n else:\r\n required_by.append([nme, \"None\"])\r\n r_dt = \"<U{}\".format(max_len)\r\n dt = np.dtype([('Package', '<U30'), ('Required_by', r_dt)])\r\n required_by = uts(np.asarray(required_by), dtype=dt)\r\n return packages, dep_counts, required_by", "def download_package_dict(package_name):\n\n global download_count, not_found_count\n\n download_count += 1\n if download_count % 1000 == 0:\n logger.info('Downloaded %s packages (%s not found)', download_count, not_found_count)\n\n response = session.get('https://pypi.python.org/pypi/%s/json' % package_name)\n if response.status_code == requests.codes.ok:\n return response.json()\n elif response.status_code == requests.codes.not_found:\n not_found_count += 1\n else:\n logger.warning('Unexpected error code for package %s: %s', package_name, response.status_code)", "def create_download_command_list(downloadlist_filename, runjson_input):\n with open(downloadlist_filename, 'w') as f:\n for category in [\"Input_files_data\", \"Secondary_files_data\"]:\n for inkey, v in getattr(runjson_input, category).items():\n if v.mount: # do not download if it will be mounted\n continue\n if inkey.startswith('file://'):\n target = inkey.replace('file://', '')\n print(\"key %s will be downloaded to target %s\" % (v.path, inkey))\n run_on_nested_arrays2(v.path, target, add_download_cmd, data_bucket=v.dir_,\n profile=v.profile, f=f, unzip=v.unzip)\n else:\n target_template = INPUT_DIR + \"/%s\"\n if not v.rename or len(flatten(v.rename)) == 0:\n rename = create_dim(v.path, empty=True)\n else:\n rename = v.rename\n run_on_nested_arrays2(v.path, rename, add_download_cmd, data_bucket=v.dir_,\n profile=v.profile, f=f, unzip=v.unzip, target_template=target_template)", "def parse_package(string_package):\n res = {'dsc': string_package}\n current_field = None\n for line in string_package.splitlines():\n if not line.startswith(' '):\n field, contents = line.split(\":\", 1)\n current_field = field\n contents = contents.strip()\n if field in [\"Package\", \"Source\"]:\n res[\"Source\"] = contents\n # Single line string\n elif field in [\"Format\", \"Version\", \"Maintainer\",\n \"Homepage\", \"Vcs-Browser\", \"Testsuite\",\n \"Standards-Version\", \"Section\", \"Priority\",\n \"Directory\"]:\n res[field] = contents\n elif field in [\"Vcs-Arch\", \"Vcs-Bzr\", \"Vcs-Cvs\", \"Vcs-Darcs\",\n \"Vcs-Git\", \"Vcs-Hg\", \"Vcs-Mtn\", \"Vcs-Svn\"]:\n res[field] = contents\n res[\"Vcs\"] = contents\n # Single line comma separated\n elif field in [\"Binary\", \"Uploaders\", \"Testsuite-Triggers\",\n \"Build-Depends\", \"Build-Depends-Indep\",\n \"Build-Depends-Arch\", \"Build-Conflicts\",\n \"Build-Conflicts-Indep\", \"Build-Conflicts-Arch\"]:\n res[field] = list(map(lambda x: x.strip(),\n contents.split(\",\")))\n # Single line space separated\n elif field in [\"Architecture\"]:\n res[field] = list(map(lambda x: x.strip(),\n contents.split(\" \")))\n elif field in [\"Dgit\"]:\n res[field] = contents.split(\" \")[0]\n elif field in [\"Package-List\", \"Checksums-Sha1\",\n \"Checksums-Sha256\", \"Files\"]:\n res[field] = []\n else:\n contents = line.strip()\n if current_field == \"Package-List\":\n contents = contents.split(\" \")\n contents = {\n \"name\": contents[0],\n \"type\": contents[1],\n \"section\": contents[2],\n \"priority\": contents[3],\n \"arch\": [] if len(contents) < 5 else\n contents[4].split(\"=\")[1].split(\",\")}\n res[current_field].append(contents)\n elif current_field in [\"Files\", \"Checksums-Sha1\",\n \"Checksums-Sha256\"]:\n contents = contents.split(\" \")\n contents = {\n \"checksum\": contents[0],\n \"size\": contents[1],\n \"filename\": contents[2]}\n res[current_field].append(contents)\n return res", "def download_wordlist_http(filedown):\n\n mkdir_if_not_exists(\"dictionaries\")\n\n # List of files to download:\n arguments = {\n 1: (\n \"Moby\",\n (\n \"mhyph.tar.gz\",\n \"mlang.tar.gz\",\n \"moby.tar.gz\",\n \"mpos.tar.gz\",\n \"mpron.tar.gz\",\n \"mthes.tar.gz\",\n \"mwords.tar.gz\",\n ),\n ),\n 2: (\"afrikaans\", (\"afr_dbf.zip\",)),\n 3: (\"american\", (\"dic-0294.tar.gz\",)),\n 4: (\"aussie\", (\"oz.gz\",)),\n 5: (\"chinese\", (\"chinese.gz\",)),\n 6: (\n \"computer\",\n (\n \"Domains.gz\",\n \"Dosref.gz\",\n \"Ftpsites.gz\",\n \"Jargon.gz\",\n \"common-passwords.txt.gz\",\n \"etc-hosts.gz\",\n \"foldoc.gz\",\n \"language-list.gz\",\n \"unix.gz\",\n ),\n ),\n 7: (\"croatian\", (\"croatian.gz\",)),\n 8: (\"czech\", (\"czech-wordlist-ascii-cstug-novak.gz\",)),\n 9: (\"danish\", (\"danish.words.gz\", \"dansk.zip\")),\n 10: (\n \"databases\",\n (\"acronyms.gz\", \"att800.gz\", \"computer-companies.gz\", \"world_heritage.gz\"),\n ),\n 11: (\n \"dictionaries\",\n (\n \"Antworth.gz\",\n \"CRL.words.gz\",\n \"Roget.words.gz\",\n \"Unabr.dict.gz\",\n \"Unix.dict.gz\",\n \"englex-dict.gz\",\n \"knuth_britsh.gz\",\n \"knuth_words.gz\",\n \"pocket-dic.gz\",\n \"shakesp-glossary.gz\",\n \"special.eng.gz\",\n \"words-english.gz\",\n ),\n ),\n 12: (\"dutch\", (\"words.dutch.gz\",)),\n 13: (\n \"finnish\",\n (\"finnish.gz\", \"firstnames.finnish.gz\", \"words.finnish.FAQ.gz\"),\n ),\n 14: (\"french\", (\"dico.gz\",)),\n 15: (\"german\", (\"deutsch.dic.gz\", \"germanl.gz\", \"words.german.gz\")),\n 16: (\"hindi\", (\"hindu-names.gz\",)),\n 17: (\"hungarian\", (\"hungarian.gz\",)),\n 18: (\"italian\", (\"words.italian.gz\",)),\n 19: (\"japanese\", (\"words.japanese.gz\",)),\n 20: (\"latin\", (\"wordlist.aug.gz\",)),\n 21: (\n \"literature\",\n (\n \"LCarrol.gz\",\n \"Paradise.Lost.gz\",\n \"aeneid.gz\",\n \"arthur.gz\",\n \"cartoon.gz\",\n \"cartoons-olivier.gz\",\n \"charlemagne.gz\",\n \"fable.gz\",\n \"iliad.gz\",\n \"myths-legends.gz\",\n \"odyssey.gz\",\n \"sf.gz\",\n \"shakespeare.gz\",\n \"tolkien.words.gz\",\n ),\n ),\n 22: (\"movieTV\", (\"Movies.gz\", \"Python.gz\", \"Trek.gz\")),\n 23: (\n \"music\",\n (\n \"music-classical.gz\",\n \"music-country.gz\",\n \"music-jazz.gz\",\n \"music-other.gz\",\n \"music-rock.gz\",\n \"music-shows.gz\",\n \"rock-groups.gz\",\n ),\n ),\n 24: (\n \"names\",\n (\n \"ASSurnames.gz\",\n \"Congress.gz\",\n \"Family-Names.gz\",\n \"Given-Names.gz\",\n \"actor-givenname.gz\",\n \"actor-surname.gz\",\n \"cis-givenname.gz\",\n \"cis-surname.gz\",\n \"crl-names.gz\",\n \"famous.gz\",\n \"fast-names.gz\",\n \"female-names-kantr.gz\",\n \"female-names.gz\",\n \"givennames-ol.gz\",\n \"male-names-kantr.gz\",\n \"male-names.gz\",\n \"movie-characters.gz\",\n \"names.french.gz\",\n \"names.hp.gz\",\n \"other-names.gz\",\n \"shakesp-names.gz\",\n \"surnames-ol.gz\",\n \"surnames.finnish.gz\",\n \"usenet-names.gz\",\n ),\n ),\n 25: (\n \"net\",\n (\n \"hosts-txt.gz\",\n \"inet-machines.gz\",\n \"usenet-loginids.gz\",\n \"usenet-machines.gz\",\n \"uunet-sites.gz\",\n ),\n ),\n 26: (\"norwegian\", (\"words.norwegian.gz\",)),\n 27: (\n \"places\",\n (\n \"Colleges.gz\",\n \"US-counties.gz\",\n \"World.factbook.gz\",\n \"Zipcodes.gz\",\n \"places.gz\",\n ),\n ),\n 28: (\"polish\", (\"words.polish.gz\",)),\n 29: (\n \"random\",\n (\n \"Ethnologue.gz\",\n \"abbr.gz\",\n \"chars.gz\",\n \"dogs.gz\",\n \"drugs.gz\",\n \"junk.gz\",\n \"numbers.gz\",\n \"phrases.gz\",\n \"sports.gz\",\n \"statistics.gz\",\n ),\n ),\n 30: (\"religion\", (\"Koran.gz\", \"kjbible.gz\", \"norse.gz\")),\n 31: (\"russian\", (\"russian.lst.gz\", \"russian_words.koi8.gz\")),\n 32: (\n \"science\",\n (\n \"Acr-diagnosis.gz\",\n \"Algae.gz\",\n \"Bacteria.gz\",\n \"Fungi.gz\",\n \"Microalgae.gz\",\n \"Viruses.gz\",\n \"asteroids.gz\",\n \"biology.gz\",\n \"tech.gz\",\n ),\n ),\n 33: (\"spanish\", (\"words.spanish.gz\",)),\n 34: (\"swahili\", (\"swahili.gz\",)),\n 35: (\"swedish\", (\"words.swedish.gz\",)),\n 36: (\"turkish\", (\"turkish.dict.gz\",)),\n 37: (\"yiddish\", (\"yiddish.gz\",)),\n }\n\n # download the files\n\n intfiledown = int(filedown)\n\n if intfiledown in arguments:\n\n dire = \"dictionaries/\" + arguments[intfiledown][0] + \"/\"\n mkdir_if_not_exists(dire)\n files_to_download = arguments[intfiledown][1]\n\n for fi in files_to_download:\n url = CONFIG[\"global\"][\"dicturl\"] + arguments[intfiledown][0] + \"/\" + fi\n tgt = dire + fi\n download_http(url, tgt)\n\n print(\"[+] files saved to \" + dire)\n\n else:\n print(\"[-] leaving.\")", "def dataset_description_file(BIDS_DIR, XNAT, project):\n\n BIDSVERSION = \"1.0.1\"\n dataset_description = dict()\n dataset_description['BIDSVersion'] = BIDSVERSION\n dataset_description['Name'] = project\n dataset_description['DatasetDOI'] = XNAT.host\n project_info = XNAT.select('/project/' + project).get()\n project_info = ET.fromstring(project_info)\n PI_element = project_info.findall('{http://nrg.wustl.edu/xnat}PI')\n if len(PI_element) > 0:\n dataset_description['Author'] = PI_element[0][1].text, PI_element[0][0].text\n else:\n dataset_description['Author'] = \"No Author defined on XNAT\"\n dd_file = os.path.join(BIDS_DIR, project)\n if not os.path.exists(dd_file):\n os.makedirs(dd_file)\n with open(os.path.join(dd_file, 'dataset_description.json'), 'w+') as f:\n json.dump(dataset_description, f, indent=2)", "def _get_cluster_package_list(serve_dir: str=SERVE_DIR, package_list_base_dir: str=PACKAGE_LIST_DIR) -> str:\n latest_filename = os.path.join(SERVE_DIR, 'cluster-package-list.latest')\n if not os.path.exists(latest_filename):\n err_msg = 'Unable to find {}'.format(latest_filename)\n log.error(err_msg)\n log.error('You must run genconf.py before attempting Deploy.')\n raise ExecuteException(err_msg)\n\n with open(latest_filename) as f:\n latest_id = f.read().strip()\n\n package_list_filename = os.path.join(package_list_base_dir, '{}.package_list.json'.format(latest_id))\n if not os.path.exists(package_list_filename):\n err_msg = 'Unable to find {}'.format(package_list_filename)\n log.error(err_msg)\n log.error('You must run genconf.py before attempting Deploy.')\n raise ExecuteException(err_msg)\n\n return package_list_filename", "def bintray_descriptor_json(bintray_repository_name: str,\n bintray_subject: str,\n version: str,\n revision: str,\n version_tag: str,\n package_path: str,\n config: PackageConfig) -> str:\n package_dir = os.path.dirname(package_path)\n package_filename = os.path.basename(package_path)\n include_pattern = '%s/(%s)' % (package_dir, package_filename,)\n\n descriptor = {\n \"package\": {\n \"name\": config.name(),\n \"repo\": bintray_repository_name,\n \"subject\": bintray_subject,\n \"desc\": config.summary(),\n \"website_url\": config.url(),\n \"vcs_url\": config.git_url(),\n \"github_use_tag_release_notes\": True,\n \"github_release_notes_file\": config.changelog_file(),\n \"licenses\": [\n config.license(),\n ],\n \"labels\": config.tags(),\n \"public_download_numbers\": True,\n \"public_stats\": True,\n },\n\n \"version\": {\n \"name\": '%s-%s' % (version, revision,),\n \"desc\": \"%s (%s)\" % (version, revision,),\n \"released\": datetime.datetime.today().strftime('%Y-%m-%d'),\n \"vcs_tag\": version_tag,\n \"gpgSign\": True,\n },\n\n \"files\": [\n {\n \"includePattern\": include_pattern,\n \"uploadPattern\": \"$1\",\n \"matrixParams\": {\n \"override\": 1,\n\n # Used for .deb files only\n \"deb_distribution\": 'stable',\n \"deb_component\": 'main',\n \"deb_architecture\": 'all',\n }\n }\n ],\n \"publish\": True,\n }\n return json.dumps(descriptor)", "def processUrls(fp, urls, rootElement):\n fp.write('{\"%s\":[\\n' % rootElement)\n allObjects = [] \n for url in urls:\n print \"Processing %s\" % url\n obj = parseUrl(url)\n if obj != None:\n if obj.identifier != None:\n allObjects.append(obj) \n else:\n print \"Missing identifier in %s. Ignore\" % url\n # sort by issued date, starting with newest dates\n allObjects = sorted(allObjects, key=getIssuedDate, reverse=True)\n # iterate through objects and indicate whether it is the latest or not\n identifiers = [] \n for obj in allObjects:\n if (obj.identifier is not None and obj.identifier not in identifiers):\n identifiers.append(obj.identifier) \n obj.isLatest=True\n else:\n print 'The extension or vocabulary with URL %s issued %s is deprecated or superseded by one in production' % (obj.url, obj.issued)\n # write each object to the JSON file\n first = True;\n for obj in allObjects:\n if (not first):\n fp.write(',\\n')\n json.dump(obj.__dict__, fp, default=json_serial)\n first = False;\n fp.write('\\n]}')\n return allObjects", "def package_jsons():\n rootdir = path.dirname(path.dirname(path.realpath(__file__)))\n for root, dirs, files in os.walk(rootdir):\n if 'node_modules' in dirs:\n dirs.remove('node_modules')\n\n if 'package.json' in files:\n yield path.join(root, 'package.json')", "def update_datapackage(self):\n\n if self.source == \"git\":\n file_content = _get_data_from_url(self.metadata_uri)\n\n if not file_content.status_code == 200:\n file_error_msg = \"Could not fetch remote file: {}; {}\".format(\n self.url, file_content.status_code\n )\n click.ClickException(file_error_msg)\n # file_content = json.dumps([{\"url\": self.url, \"error\": file_error_msg}])\n else:\n file_content = file_content.json() # .decode(self.decode)\n elif self.source == \"s3\":\n raise NotImplementedError(\n \"Directly get dataherb.json from S3 is not yet implemented.\"\n )\n\n self.datapackage_meta = file_content\n\n self.herb_meta_json[\"datapackage\"] = self.datapackage_meta\n\n self.datapackage = Package(self.datapackage_meta)\n\n return self.datapackage", "def list_package(bucket, package):\n\n # figure out key name from package and release requested and what's\n # available in the bucket...\n pkg_name = None if package is None else package.project_name\n package_releases = []\n for key in bucket.get_all_keys():\n if package is None or key.name.startswith(\"{}/\".format(pkg_name)):\n package_base, _, pkg_full_name = key.name.partition(\"/\")\n if not pkg_full_name:\n continue\n if package is None:\n if package_base not in package_releases:\n package_releases.append(package_base)\n elif pkg_name == safe_name(package_base):\n key_pkg = parse_package_file(pkg_full_name, package)\n for spec in package.specs:\n if not spec[0](key_pkg.specs[0][1], spec[1]):\n break\n else:\n package_releases.append(pkg_full_name)\n\n if package is None:\n package_releases.sort()\n print(\"\\n\".join(package_releases))\n else:\n print_versioned(package_releases, package)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get config of the component with the given index in the pipeline.
def component_config_from_pipeline( index: int, pipeline: List[Dict[Text, Any]], defaults: Optional[Dict[Text, Any]] = None, ) -> Dict[Text, Any]: try: c = pipeline[index] return override_defaults(defaults, c) except IndexError: raise_warning( f"Tried to get configuration value for component " f"number {index} which is not part of your pipeline. " f"Returning `defaults`." ) return override_defaults(defaults, {})
[ "def get(self, idx: int) -> ConfigEntity:\n return self._configs[idx]", "def index_config(self) -> Optional[pulumi.Input['FieldIndexConfigArgs']]:\n return pulumi.get(self, \"index_config\")", "def get_compartment_by_index(net_index: int, comp_index: int) -> CompartmentData:\n return _translate_compartment(_controller.get_compartment_by_index(net_index, comp_index))", "def get_parameter(self, index):\n result = None\n if index < len(self.paramorder):\n key = self.paramorder[index]\n if key in self._parameters:\n result = self._parameters[key]\n\n return result", "def get_conf(self, comp, conf_name):\r\n for cfg in comp.configuration_sets[0].configuration_data:\r\n if cfg.name == conf_name:\r\n return cfg.data\r\n return None", "def calc_config(index):\n ids = np.unique(index['ID'])\n config = {}\n for id in ids:\n if id not in [21, 24, 26]:\n continue\n inds = index['ID'] == id\n _config = index['config'][inds]\n _beams_cy = index['beams_cy'][inds]\n # Check that these variables are consistent\n if not isuniform(_config):\n raise Exception(\"config are not identical for id: 0x{:X}.\"\n .format(id))\n if not isuniform(_beams_cy):\n raise Exception(\"beams_cy are not identical for id: 0x{:X}.\"\n .format(id))\n # Now that we've confirmed they are the same:\n config[id] = headconfig_int2dict(_config[0])\n config[id].update(beams_cy_int2dict(_beams_cy[0], id))\n config[id]['_config'] = _config[0]\n config[id]['_beams_cy'] = _beams_cy[0]\n config[id].pop('cy')\n return config", "def get_compartment_of_node(net_index: int, node_index: int) -> int:\n return _controller.get_compartment_of_node(net_index, node_index)", "def get_config(self, key: str):\n query_configs = self.request['url']['query']\n body_configs = self.request['body'].get('nboost', {})\n request_configs = {**query_configs, **body_configs}\n cli_config = self.cli_configs[key]\n config = request_configs.get(key, cli_config)\n return type(cli_config)(config)", "def get_mysql_config(name=None, index=None):\n if not name and not index:\n return MYSQL_SERVERS[0].client_config.copy()\n\n if name:\n for server in MYSQL_SERVERS:\n if server.name == name:\n return server.client_config.copy()\n elif index:\n return MYSQL_SERVERS[index].client_config.copy()\n\n return None", "def get(self, index: 'int const') -> \"int32_t\":\n return _coin.SoGLColorIndexElement_get(self, index)", "def get_config(self):\n return self.ag_config", "def __getitem__(self, index):\n if isinstance(index, six.string_types):\n # find the index of the port with the given name\n index = self.keys().index(index)\n return list.__getitem__(self, index)", "def _split_component_id_and_config(\n component_index: int, component_configuration_json: Dict\n ) -> ComponentId:\n # author, name, version, type are mandatory fields\n missing_fields = {\"public_id\", \"type\"}.difference(\n component_configuration_json.keys()\n )\n if len(missing_fields) > 0:\n raise ValueError(\n f\"There are missing fields in component id {component_index + 1}: {missing_fields}.\"\n )\n public_id_str = component_configuration_json.pop(\"public_id\")\n component_type = ComponentType(component_configuration_json.pop(\"type\"))\n component_public_id = PublicId.from_str(public_id_str)\n component_id = ComponentId(component_type, component_public_id)\n return component_id", "def get_instance(self, index):\n return self.instances[index]", "def get_by_index(self, index):\n return self.tile_list[index]", "async def config():\n with http_error_handling():\n return pipeline.config.as_dict()", "def pick_config(self, issuer):\n return self.client_configs[issuer]", "def get_conf(self, name='global'):\n return self.cluster_configuration_manager.get_object(name)", "def __getitem__(self, index):\n return self._renderers[index]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test module bthe_b.py by downloading bthe_b.csv and testing shape of extracted data has 100 rows and 8 columns
def test_bthe_b(): test_path = tempfile.mkdtemp() x_train, metadata = bthe_b(test_path) try: assert x_train.shape == (100, 8) except: shutil.rmtree(test_path) raise()
[ "def test_hunt_csv_2():\n ints = rng.integers(0, 100, 8)\n breaks = rng.choice((\"\\r\", \"\\n\", \"\\r\\n\"), 3)\n block = (\n f\"alpha{ints[0]},{ints[1]}{breaks[0]}\"\n f\"{ints[2]},{ints[3]}{breaks[1]}{ints[4]},{ints[5]}{breaks[2]}\"\n f\"{ints[6]},{ints[7]}beta\"\n )\n table = hunt_csv(re.compile(r\"(?<=alpha).*(?=beta)\", re.M + re.S), block)\n table = list([list(map(int, line)) for line in table])\n assert list(map(sum, table)) == list(map(sum, chunked(ints, 2)))", "def example_bed():\n yield pd.read_csv(\"tests/test_data/encoding_test.bed\", sep=\"\\t\", header=None)", "def load_bc_dataset():\n # import ipdb\n # ipdb.set_trace()\n f_train = \"data/Training_Data.txt\"\n f_test = \"data/Testing_Data.txt\"\n h_train, data_train = load_dataset(f_train)\n h_test, data_test = load_dataset(f_test)\n assert h_train == h_test, \"training data file header: {}\\\n is not equal to testing file header: {}\".format(h_train, h_test)\n n_col = len(h_train)\n assert data_train.shape[1] == n_col & data_test.shape[1] == n_col,\\\n \"training data feature num: {} should equal testing data feature num:\\\n {}\".format(data_train.shape[1], data_test.shape[1])\n # index_train = data_train[:, 0]\n # index_test = data_test[:, 0]\n X_train = data_train[:, 1:-1]\n X_test = data_test[:, 1:-1]\n y_train = data_train[:, -1]\n y_test = data_test[:, -1]\n\n # index = np.concatenate((index_train, index_test))\n X = np.vstack((X_train, X_test))\n y = np.concatenate((y_train, y_test)).astype(np.int)\n assert y.sum() == 115\n return X, y", "def load_boston():\n module_path = dirname(__file__)\n\n data_file_name = join(module_path, 'data', 'boston_house_prices.csv')\n with open(data_file_name) as f:\n data_file = csv.reader(f)\n temp = next(data_file)\n n_samples = int(temp[0])\n n_features = int(temp[1])\n data = np.empty((n_samples, n_features))\n target = np.empty((n_samples,))\n temp = next(data_file)\n feature_names = np.array(temp)\n\n for i, d in enumerate(data_file):\n data[i] = np.asarray(d[:-1], dtype=np.float64)\n target[i] = np.asarray(d[-1], dtype=np.float64)\n\n return Bunch(data=data,\n target=target)", "def test_humann2_split_tables_tsv(self):\n\n input_file=cfg.multi_sample_genefamilies_biom\n\n # create a temp directory\n temp_directory=utils.create_temp_folder(\"split_tables_biom\")\n\n # split the file\n utils.run_command([\"humann2_split_table\",\"--input\", input_file,\n \"--output\",temp_directory,\"--verbose\"])\n\n # test the split files are as expected\n for file in cfg.multi_sample_split_files_biom:\n self.assertTrue(utils.check_output(file,temp_directory))\n\n # remove the temp folder\n utils.remove_temp_folder(temp_directory)", "def test_read_bond_fields_HTM(self):\n filename = os.path.join(get_current_path(), 'samples', 'CLM BAL 2017-07-27.xls')\n wb = open_workbook(filename=filename)\n ws = wb.sheet_by_name('Portfolio Val.')\n row = 55 # the bond section starts at A56\n\n fields, n = read_bond_fields(ws, row)\n self.assertEqual(n, 4)\n self.assertEqual(len(fields), 15)\n\n f = ''\n for s in fields:\n f = f + s + ', '\n\n # check the fields are read correctly\n self.assertEqual(f, \n 'par_amount, is_listed, listed_location, fx_on_trade_day, coupon_rate, coupon_start_date, maturity_date, average_cost, amortized_cost, book_cost, interest_bought, amortized_value, accrued_interest, amortized_gain_loss, fx_gain_loss, ')", "def test_get_Bed(self):\r\n coordinates_input_file = \"InputTestFilesSection3/TrainingSets/other_active_region/HepG2/input_data/\" \\\r\n \"infile_coordinates.bed\"\r\n input_file = \"InputTestFilesSection3/TrainingSets/other_active_region/HepG2/input_data/infile_expression.txt\"\r\n output_file = \"InputTestFilesSection3/Geuv_MGProbes_testbed_outfile.bed\"\r\n x = WeightFeatures.getBed(coordinates_input_file, input_file, output_file)\r\n expected_outfile = \"InputTestFilesSection3/Geuv_MGProbes_expectbed_outfile.bed\"\r\n assert compare_files(output_file, expected_outfile)\r\n return", "def test_bs(self):\n result = StringIO()\n infile = open('test/data/bs.csv')\n # the content of bs.xml is validated against the xsd schema via xmllint,\n # an opensource command-line tools\n expecting_file = open('test/data/bs.xml', 'r')\n expecting = expecting_file.read()\n expecting_file.close()\n csvobj2xmlobj(infile, result, 'BeneficiarySummary')\n infile.close()\n self.assertEqual(result.getvalue(), expecting)", "def test_parse_b1(self):\n with open(PROCESS01_B1, 'rb') as fp:\n info = parse_jpg(fp)\n file_length = fp.tell()\n\n # Order by offset\n keys = sorted(info.keys(), key=lambda x: int(x.split('@')[1]))\n\n tempfile = NamedTemporaryFile()\n with open(tempfile.name, 'w') as tfile:\n tfile.write(\n \"Parser output for ' testb1.jpg', May 8, 1994, \"\n \"WBP and JLM (IBM)\\n\"\n )\n # Data\n for key in keys:\n writer = WRITERS[key[:3]]\n (marker, fill_bytes, data) = info[key]\n marker = '{:04x}'.format(marker)\n (name, offset) = key.split('@')\n if fill_bytes:\n marker = 'ff' * fill_bytes + marker\n tfile.write(' {:>2} FIL bytes\\n'.format(fill_bytes))\n writer(tfile, offset, marker, name, data)\n\n # End\n tfile.write(\n '{} markers found in {} bytes of compressed data\\n'\n .format(len(keys), file_length)\n )\n tfile.seek(0)\n\n with open(tempfile.name, 'r') as tfile:\n with open(PROCESS01_B1_REF, 'r', encoding='utf-8', errors='ignore') as rfile:\n for out, ref in zip(tfile, rfile):\n assert ref == out", "def importPLINKDATA(self, bfile):\n filename = bfile + '.bim'\n self.SNPs = pd.read_table(\n bfile+'.bim', sep=None, names=['CHR', 'RSID', 'Cm', 'POS', 'ALT', 'REF'], engine='python')\n self.Samples = pd.read_table(bfile+'.fam', sep=None,\n names=['FID', 'IID', 'PID', 'MID', 'Sex', 'Pheno'], engine='python')\n self.nSNPs = self.SNPs.shape[0]\n self.nSamples = self.Samples.shape[0]\n filename = bfile + '.bed'\n num_bytes = math.ceil(self.nSamples / 4.0)\n GENO = np.fromfile(filename, dtype=np.uint8, count=-1)\n GENO = GENO[3:]\n self.GENO = np.reshape(GENO, (num_bytes, - 1), order='F')", "def read_haha2019_file(file_loc, header=True, test=False, encoding=\"utf-8\"):\n \n with open(file_loc, \"r\", encoding=encoding) as f:\n r = csv.reader(f)\n \n if header:\n next(r) #pop the header row\n \n documents=[]\n for row in r:\n \n if test:\n id_num, tweet = row\n #no ratings included in test format. Default to Nones\n bin_label, num_ratings, num_1s, num_2s, num_3s, num_4s, num_5s, avg_rating = None, None, None, None, None, None, None, None\n \n else:\n id_num, tweet, bin_label, num_ratings, num_1s, num_2s, num_3s, num_4s, num_5s, avg_rating = row\n \n bin_label = int(bin_label)\n \n if avg_rating:\n if avg_rating == \"NULL\":\n avg_rating = 0.0\n else:\n avg_rating = float(avg_rating)\n \n documents.append((id_num, tweet, bin_label, avg_rating))\n \n return documents", "def test_basic_example_g_booleans(self):\n test_file = os.path.join(DATA_DIR, 'TypedCsvReader.example.g.csv')\n csv_file = open(test_file)\n test_file = TypedColumnReader.TypedColumnReader(csv_file, column_sep=',')\n num_lines = 0\n for row in test_file:\n num_lines += 1\n self.assertTrue(row['a'])\n self.assertFalse(row['b'])\n self.assertEqual(4, num_lines)\n csv_file.close()", "def testOsteosarcomaAblationDataset(self):\n csvName = \"csvs/cyclin_dataset.csv\"\n if \"keiser\" in hostname:\n DATA_DIR = \"/srv/nas/mk1/users/dwong/tifs/\" #where the raw images are located\n else:\n DATA_DIR = \"/data1/wongd/tifs/\"\n dataset = OsteosarcomaAblationDataset(csvName, DATA_DIR, thresh_percent=1.0) #full ablation dataset - all channel 0 input pixels should be fully ablated and set to 0 value\n generator = data.DataLoader(dataset, sampler = SubsetRandomSampler(list(range(0, len(dataset)))))\n i = 0\n ## iterate over a random subset of our data to test \n for names, local_batch, local_labels in generator:\n ## make sure data range is bounded correctly\n self.assertTrue(0 <= torch.max(local_batch) <= 255)\n ## make sure inputs and labels are correctly shaped\n self.assertEqual(tuple(local_batch.shape), (1, 1, 1104, 1104))\n self.assertEqual(tuple(local_labels.shape), (1, 1104, 1104))\n ## make sure all of input is ablated\n self.assertEqual(np.count_nonzero(local_batch.cpu().numpy()), 0)\n i += 1\n if i > sample_size:\n break", "def test_parse_biom(self):\n correct_biom = ([[ 1., 0., 1., 6.], [ 0., 5., 1., 10.], [ 4., 7., 9., 8.]], [u'OTU0', u'OTU1', u'OTU2', u'OTU3'], [u'ID0', u'ID1', u'ID2'])\n\n parsed_biom = fizzy.parse_biom(self.biom_file_handle)\n self.assertEqual(parsed_biom, correct_biom)", "def test_bids():\n test_path = tempfile.mkdtemp()\n x_train, metadata = bids(test_path)\n try:\n assert x_train.shape == (126, 12)\n except:\n shutil.rmtree(test_path)\n raise()", "def read_tble_bho(file_tble_bho):\r\n df_tble_bho = pd.read_excel(file_tble_bho)\r\n\r\n # required cols\r\n cols = ['cotrecho','cobacia',\r\n 'nucomptrec','nuareacont','nuareamont',\r\n 'nutrjus','dedominial','nustrahler',\r\n 'nuordemcda','cocursodag','cocdadesag','nudistbact',\r\n 'nunivotto',\r\n ]\r\n\r\n df_tble_bho = df_tble_bho[cols]\r\n\r\n # apply dtypes\r\n bho_dtypes = {\r\n 'fid':pd.Int64Dtype(),\r\n 'drn_pk':int,\r\n 'cotrecho':int,\r\n 'noorigem':int,\r\n 'nodestino':int,\r\n 'cocursodag':str,\r\n 'cobacia':str,\r\n 'nucomptrec':float,\r\n 'nudistbact':float,\r\n 'nudistcdag':float,\r\n 'nuareacont':float,\r\n 'nuareamont':float,\r\n 'nogenerico':str,\r\n 'noligacao':str,\r\n 'noespecif':str,\r\n 'noriocomp':str,\r\n 'nooriginal':str,\r\n 'cocdadesag':str,\r\n 'nutrjus':pd.Int32Dtype(),\r\n 'nudistbacc':float,\r\n 'nuareabacc':float,\r\n 'nuordemcda':pd.Int32Dtype(),\r\n 'nucompcda':float,\r\n 'nunivotto':int,\r\n 'nunivotcda':pd.Int32Dtype(),\r\n 'nustrahler':pd.Int32Dtype(),\r\n 'dedominial':str,\r\n 'dsversao':str,\r\n 'cobacia_50k':str,\r\n 'lat':float,\r\n 'lon':float,\r\n }\r\n hmap = {k:v for k,v in bho_dtypes.items() if k in df_tble_bho.columns}\r\n df_tble_bho = df_tble_bho.astype(hmap)\r\n\r\n return df_tble_bho", "def test_parse_b2(self):\n with open(PROCESS01_B2, 'rb') as fp:\n info = parse_jpg(fp)\n file_length = fp.tell()\n\n # Order by offset\n keys = sorted(info.keys(), key=lambda x: int(x.split('@')[1]))\n\n tempfile = NamedTemporaryFile()\n with open(tempfile.name, 'w') as tfile:\n tfile.write(\n \"Parsed version of ' testb2 hdr p', May 8, 1994, \"\n \"WBP and JLM (IBM)\\n\"\n )\n # Data\n for key in keys:\n writer = WRITERS[key[:3]]\n (marker, fill_bytes, data) = info[key]\n marker = '{:04x}'.format(marker)\n (name, offset) = key.split('@')\n if fill_bytes:\n marker = 'ff' * fill_bytes + marker\n tfile.write(' {:>2} FIL bytes\\n'.format(fill_bytes))\n writer(tfile, offset, marker, name, data)\n\n # End\n tfile.write(\n '{} markers found in {} bytes of compressed data\\n'\n .format(len(keys), file_length)\n )\n tfile.seek(0)\n\n with open(tempfile.name, 'r') as tfile:\n with open(PROCESS01_B2_REF, 'r', encoding='utf-8', errors='ignore') as rfile:\n for out, ref in zip(tfile, rfile):\n assert ref == out", "def test_aggcsv_instantiate():", "def b_file(tmpdir):\n b_file = tmpdir.join('b_file.csv') # pylint: disable=redefined-outer-name\n b_rows = [\n [\"Stephen\", \"Tyler\", \"7452 Terrace 'At the Plaza' road\", \"SomeTown\", \"SD\", \"91234\"],\n [\"Joan 'the bone'\", \"Anne\", \"9th, at Terrace plc\", \"Desert City\", \"CO\", \"00123\"],\n ]\n\n with open(b_file.strpath, 'w') as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerows(b_rows)\n\n return b_file" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch a dict of all departments and their numbers.
def _get_department_numbers_and_names() -> Dict[str, str]: response = CLIENT.get("https://vitemadose.gitlab.io/vitemadose/departements.json") response.raise_for_status() return {dep["code_departement"]: dep["nom_departement"] for dep in response.json()}
[ "def create_departments_dict():\n\n departmentsJsonResult = get_deps()\n departmentsDict = dict()\n for row in departmentsJsonResult:\n departmentsDict[row[\"id\"]] = (row[\"name\"], row[\"code\"])\n return departmentsDict", "def api_all_dep():\n deps =[{\"department\": elem.name} for elem in Department.query.all()]\n return jsonify(deps)", "def department_list():\n depts = Department.query.all()\n return render_template(\"depts.html\", depts=depts)", "def api_show_dep(_id):\n deps = Department.query.get(_id)\n number = Employee.query.filter_by(department=deps.name).count()\n salary = 0\n for elem in Employee.query.filter_by(department=deps.name):\n salary += elem.salary\n try:\n salary /= number\n except ZeroDivisionError:\n return \"There is no employees in this department\"\n return {\"Department\": deps.name, \"Average_salary\": round(salary, 2), \"Number_of_employees\": number}", "def get_departments_record(departments_qs, applications, admissions):\n departmental_records = {}\n for department in departments_qs:\n departmental_records[department.name] = {\n 'applications_count': applications.filter(department_choice=department).count(),\n 'admission_count': admissions.filter(choosen_department=department).count(),\n 'migrated_from_count': admissions.filter(department_choice=department,\n migration_status__icontains='from').count(),\n 'migrated_to_count': admissions.filter(choosen_department=department,\n migration_status__icontains='from').count(),\n 'missed': applications.filter(department_choice=department, \n rejected=True, admitted=False,\n paid=False).count(),\n }\n return departmental_records", "def get_departamentos(session):\n\n departamentos = []\n d = pq(get_pagina(session))\n\n for option in d(\"[name='id_departamento'] option\"):\n val = pq(option).val()\n if val != '-1':\n departamentos.append(val)\n\n return departamentos", "def getDeparting(start_date, end_date):\n cursor = conn.cursor()\n query = \"\"\"SELECT departure_odate, MIN(total_usd)\n FROM flights\n WHERE date(departure_odate) BETWEEN date('{0}') and date('{1}')\n GROUP BY departure_odate\n ORDER BY departure_odate;\n \"\"\".format(start_date, end_date)\n cursor.execute(query)\n data = cursor.fetchall()\n updateJSON(data, '')", "def domain_collections(self): \n domains = dict()\n domains_query = \"\"\"select pk1, batch_uid from domain\"\"\" \n domain_results = self.send_query(domains_query, True) \n for pk1, batch_uid in domain_results:\n domain = str(batch_uid)\n domains[domain] = dict()\n domains[domain][\"datatype\"] = \"domain_collections\"\n domains[domain][\"domain_id\"] = domain \n domains[domain][\"courses\"] = self.domain_collection_queries(pk1, \"domain_course_coll\")\n domains[domain][\"organizations\"] = self.domain_collection_queries(pk1, \"domain_organization_coll\")\n domains[domain][\"users\"] = self.domain_collection_queries(pk1, \"domain_user_coll\")\n domains[domain][\"enrollments\"] = self.domain_enrollments(pk1)\n \n return domains", "def get_dirigentnamen(self):\n query = \"\"\"\n SELECT \n entity_id, field_dirigent_naam_value\n FROM\n field_data_field_dirigent_naam\n \"\"\"\n self.cur.execute(query)\n res = self.cur.fetchall()\n dirigent_dict = {}\n for rec in res:\n dirigent_dict[rec['entity_id']] = rec['field_dirigent_naam_value']\n return dirigent_dict", "def get_department_children(self, department):\n data = []\n department_data = \\\n {\n 'name': department.name,\n 'type': 'department',\n 'id': department.id,\n 'className': 'o_hr_organization_chart_department',\n 'manager_name': department.manager_id.name,\n 'manager_title': get_position(department.manager_id),\n 'manager_image': get_image(department.manager_id),\n }\n employee_children = self.get_employee_data(department)\n if employee_children:\n data += employee_children\n department_children = self.env['hr.department']. \\\n search([('parent_id', '=', department.id)])\n for child in department_children:\n sub_children = self.env['hr.department']. \\\n search([('parent_id', '=', child.id)])\n if not sub_children:\n employee_children = self.get_employee_data(child)\n data.append({\n 'name': child.name,\n 'type': 'department',\n 'id': child.id,\n 'className': 'o_hr_organization_chart_department',\n 'manager_name': child.manager_id.name,\n 'manager_title': get_position(child.manager_id),\n 'manager_image': get_image(child.manager_id),\n 'children': employee_children\n })\n else:\n data.append(self.get_department_children(child))\n if department_children or employee_children:\n department_data['children'] = data\n return department_data", "def load_departures(url,departure_list):\n resp = requests.get(url).json()\n departure_list.extend(resp[\"results\"])\n \n #recursivelly loads the list\n if resp[\"next\"] is None:\n return departure_list\n else:\n return load_departures(resp[\"next\"], departure_list)", "def get_uitvoerders(self):\n query = \"\"\"\n SELECT \n entity_id, field_uitvoerders_value\n FROM\n field_data_field_uitvoerders\n \"\"\"\n self.cur.execute(query)\n res = self.cur.fetchall()\n uitvoerders_dict = {}\n for rec in res:\n uitvoerders_dict[rec['entity_id']] = rec['field_uitvoerders_value']\n return uitvoerders_dict", "def getDepotData(self):\n depotData = list(BusDepot.objects.all().values())\n return depotData", "def dnb_response():\n return {\n 'results': [\n {\n 'address_line_1': '10 Fake Drive',\n 'address_line_2': '',\n 'address_postcode': 'AB0 1CD',\n 'address_town': 'London',\n 'address_county': '',\n 'address_country': 'GB',\n 'registered_address_line_1': '11 Fake Drive',\n 'registered_address_line_2': '',\n 'registered_address_postcode': 'AB0 2CD',\n 'registered_address_town': 'London',\n 'registered_address_county': '',\n 'registered_address_country': 'GB',\n 'domain': 'foo.com',\n 'duns_number': '123456789',\n 'primary_name': 'FOO BICYCLES LIMITED',\n 'trading_names': [],\n 'registration_numbers': [\n {\n 'registration_number': '012345',\n 'registration_type': 'uk_companies_house_number',\n },\n ],\n 'global_ultimate_duns_number': '987654321',\n },\n ],\n }", "async def ticket_data(self):\n cursor = await self.get_data(\n \"SELECT `Num_Tickets` FROM `ticket_data` ORDER BY Date DESC \")\n values = []\n for f in cursor:\n values.append(int(str(f)[1]))\n\n data = {}\n if values[0]:\n data['last_hour'] = values[0]\n data['overall_tph'] = int(sum(values) / len(values))\n else:\n data['last_hour'] = \"n/a\"\n\n if values[1]:\n data['hour_difference'] = values[0] - values[1]\n else:\n data['hour_difference'] = 'More Data Needed'\n\n if values[23]:\n data['prev_difference'] = values[0] - values[23]\n else:\n data['prev_difference'] = 'More Data Needed'\n\n if values[23]:\n data['day_avg'] = int(sum(values[0:23]) / 24)\n else:\n data['day_avg'] = 'More Data Needed'\n\n return data", "def get_employees(self):\n\t\tprint(\"\\n----------Department {}'s Employees----------\".format(self.name))\n\t\tfor each in self.employees:\n\t\t\tprint(\"\\n{} {}'s information:\".format(each.first_name, each.last_name))\n\t\t\tfor key,value in each.__dict__.items():\n\t\t\t\tif '__' in key:\n\t\t\t\t\tkey = key.split('__')[1]\n\t\t\t\tprint(\" {} has a value of {}\".format(key.replace(\"_\",\" \"), value))\n\t\treturn self.employees", "def get_employee_data(self, department):\n employee_data = []\n domain = [\n ('department_id', '=', department.id),\n ]\n if department.manager_id:\n domain += [\n '|', ('parent_id', '=', False),\n ('parent_id', '=', department.manager_id.id),\n ('parent_id.department_id', '!=', department.id),\n ]\n else:\n domain += [\n '|', ('parent_id', '=', False),\n ('parent_id.department_id', '!=', department.id),\n ]\n employees = self.env['hr.employee'].search(domain)\n for employee in employees:\n children = self.get_employee_children(employee)\n employee_data.append(children)\n return employee_data", "def get_department_list(seed_page):\n soup = BeautifulSoup(seed_page)\n # Example tag:\n # <input name=\"subject\" type=\"checkbox\" value=\"COS\">\n dept_tags = soup('input', {\"name\": \"subject\"})\n departments = map(lambda t: t.attrs['value'], dept_tags)\n return departments", "def get_job_info(self) -> Dict[str, JobInfo]:\n qparam = {DSORT_UUID: [self._dsort_id]}\n return self._client.request_deserialize(\n HTTP_METHOD_GET,\n path=URL_PATH_DSORT,\n res_model=Dict[str, JobInfo],\n params=qparam,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve appointments for all queried departments.
def retrieve_all_suitable_appointments() -> Dict[str, List[AppointmentMatch]]: all_appointments = {} for department in DEPARTMENTS: entry = f"{DEPARTMENTS_TABLE[department]} ({department})" all_appointments[entry] = find_centers_for_department(department) return all_appointments
[ "def populate_appointments(endpoint, doctor):\n date = timezone.now().strftime('%Y-%m-%d')\n\n appointments = endpoint.list({'doctor': doctor.id, 'date': date})\n for appointment_data in appointments:\n patient = Patient.objects.get(id=appointment_data['patient'])\n\n # simplify/clean statuses for project purposes\n status = appointment_data['status']\n if status not in ('Checked In', 'In Session', \n 'Complete', 'Cancelled'):\n status = ''\n\n\n data = {\n 'doctor': doctor,\n 'patient': patient,\n 'scheduled_time': appointment_data['scheduled_time'],\n 'duration': appointment_data['duration'],\n 'office': appointment_data['office'],\n 'exam_room': appointment_data['exam_room'],\n 'status': status,\n 'reason': appointment_data['reason']\n }\n\n appointment, created = Appointment.objects.update_or_create(\n defaults=data, pk=appointment_data['id'])", "def appointment_db_query(self):\n \n appointments = []\n sql = \"\"\"\n SELECT appt.id AS appt_id, appt.customer_id AS cust_id,\n DATE_FORMAT(appt.startDate, '%W %m/%d/%Y %l:%i %p') AS appt_lt,\n CONVERT_TZ(DATE_SUB(appt.startDate, INTERVAL 2 HOUR), pref.value, 'UTC') AS reminder_utc,\n comp.id AS comp_id, comp.name AS comp_name, comp.domain AS comp_domain, pref.value AS comp_timezone,\n (SELECT value from preferences AS p WHERE name='receipt header' AND p.company_id=comp.id) AS comp_address,\n cust.email AS cust_email, cust.firstname AS cust_fname, cust.surname AS cust_lname, cust.phone_num as cust_phone\n FROM appointments AS appt\n INNER JOIN companies as comp ON comp.id=appt.company_id\n INNER JOIN customers as cust ON cust.id=appt.customer_id\n INNER JOIN preferences AS pref ON pref.company_id=comp.id\n WHERE (DATE(appt.startDate)=DATE(CONVERT_TZ(CURDATE(), 'UTC', pref.value)) \n OR DATE(appt.startDate)=DATE(CONVERT_TZ(DATE_ADD(CURDATE(), INTERVAL 1 DAY), 'UTC', pref.value)))\n AND pref.name='timezone';\n \"\"\"\n db = self.db_connect()\n appointments_db_query = self.db_execute_query(db, sql)\n appointments = appointments_db_query\n return appointments", "def get_appointments(self):\n if self.is_admin():\n return Appointment.objects\n\n elif self.is_doctor():\n return Appointment.objects.filter(doctor=self)\n\n return Appointment.objects.filter(patient=self)", "def appointments(self):\n appointments = []\n if self.show == 'forms':\n appointments = [self.appointment]\n else:\n # or filter appointments for the current membership categories\n # schedule_group__membership_form\n codes = []\n for category in self.membership_form_category:\n codes.extend(MembershipForm.objects.codes_for_category(membership_form_category=category))\n appointments = Appointment.objects.filter(\n registered_subject=self.registered_subject,\n visit_definition__code__in=codes).order_by(\n 'visit_definition__time_point', 'visit_instance', 'appt_datetime')\n return appointments", "def list_appointments(request, for_date: date, current_user_id=1):\n\n if request.method != 'GET':\n return HttpResponse(status=405)\n\n query_set = BookingService.get_appointments_for_range(current_user_id, for_date, timedelta(days=1) + for_date)\n return JsonResponse(status=200, data={\"appointments\": [model_to_dict(model) for model in query_set]})", "def emp_appointment_queries():\n if not hasattr(g, 'emp_appoint_queries'):\n g.emp_appoint_queries = EmployeeAppointmentQueries()\n return g.emp_appoint_queries", "def get(self):\n return render_template(\"appointments.html\",\n apps=get_db().get_all_apps())", "def get(self, request):\n user = self.request.user\n\n if user.is_staff:\n appointments = Appointment.objects.all()\n else:\n appointments = Appointment.objects.filter(client=user)\n\n serializer = AppointmentSerializer(appointments, many=True)\n return Response(serializer.data)", "def get_appointments(doc_id: int, cur) -> json:\n return cur.execute(\n \"SELECT appointment FROM Doctors where UID = ?;\", (doc_id,)\n ).fetchone()[0]", "def list_appointments(\n self,\n booking_business_id, # type: str\n orderby=None, # type: Optional[List[Union[str, \"models.Enum13\"]]]\n select=None, # type: Optional[List[Union[str, \"models.Enum14\"]]]\n expand=None, # type: Optional[List[str]]\n **kwargs # type: Any\n ):\n # type: (...) -> Iterable[\"models.CollectionOfBookingAppointment\"]\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.CollectionOfBookingAppointment\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n def prepare_request(next_link=None):\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n if not next_link:\n # Construct URL\n url = self.list_appointments.metadata['url'] # type: ignore\n path_format_arguments = {\n 'bookingBusiness-id': self._serialize.url(\"booking_business_id\", booking_business_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n if self._config.top is not None:\n query_parameters['$top'] = self._serialize.query(\"self._config.top\", self._config.top, 'int', minimum=0)\n if self._config.skip is not None:\n query_parameters['$skip'] = self._serialize.query(\"self._config.skip\", self._config.skip, 'int', minimum=0)\n if self._config.search is not None:\n query_parameters['$search'] = self._serialize.query(\"self._config.search\", self._config.search, 'str')\n if self._config.filter is not None:\n query_parameters['$filter'] = self._serialize.query(\"self._config.filter\", self._config.filter, 'str')\n if self._config.count is not None:\n query_parameters['$count'] = self._serialize.query(\"self._config.count\", self._config.count, 'bool')\n if orderby is not None:\n query_parameters['$orderby'] = self._serialize.query(\"orderby\", orderby, '[str]', div=',')\n if select is not None:\n query_parameters['$select'] = self._serialize.query(\"select\", select, '[str]', div=',')\n if expand is not None:\n query_parameters['$expand'] = self._serialize.query(\"expand\", expand, '[str]', div=',')\n\n request = self._client.get(url, query_parameters, header_parameters)\n else:\n url = next_link\n query_parameters = {} # type: Dict[str, Any]\n request = self._client.get(url, query_parameters, header_parameters)\n return request\n\n def extract_data(pipeline_response):\n deserialized = self._deserialize('CollectionOfBookingAppointment', pipeline_response)\n list_of_elem = deserialized.value\n if cls:\n list_of_elem = cls(list_of_elem)\n return deserialized.odata_next_link or None, iter(list_of_elem)\n\n def get_next(next_link=None):\n request = prepare_request(next_link)\n\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n error = self._deserialize(models.OdataError, response)\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n return pipeline_response\n\n return ItemPaged(\n get_next, extract_data\n )", "def my_appts():\n\n\tform = MyAppointmentSearch()\n\tappointments = Appointment.query.filter_by(date=str(datetime.date.today())).all()\n\n\tif form.validate_on_submit():\n\t\tselection = form.filter_menu.data\n\t\tif selection == '2':\n\t\t\tcurrent_date = date_tool.get_current_date()\n\t\t\tendof_week = date_tool.get_endof_week()\n\t\t\tappointments = db.session.query(Appointment).filter(Appointment.date.between(current_date, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t endof_week)).order_by(Appointment.date.asc())\n\t\telif selection == '3':\n\t\t\tstart_date, end_date = date_tool.getcurrent_beginend_ofmonth()\n\t\t\tappointments = db.session.query(Appointment).filter(Appointment.date.between(start_date, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t end_date)).order_by(Appointment.date.asc())\n\t\telse:\n\t\t\treturn redirect(url_for('my_appts'))\n\n\treturn render_template('my_appts.html', form=form, appointments=appointments, pretty_date=pretty_date)", "def get_all_appd():\n return list(appd_coll.find())", "def get_appointments(\n self,\n booking_business_id, # type: str\n booking_appointment_id, # type: str\n select=None, # type: Optional[List[Union[str, \"models.Enum15\"]]]\n expand=None, # type: Optional[List[str]]\n **kwargs # type: Any\n ):\n # type: (...) -> \"models.MicrosoftGraphBookingAppointment\"\n cls = kwargs.pop('cls', None) # type: ClsType[\"models.MicrosoftGraphBookingAppointment\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n accept = \"application/json\"\n\n # Construct URL\n url = self.get_appointments.metadata['url'] # type: ignore\n path_format_arguments = {\n 'bookingBusiness-id': self._serialize.url(\"booking_business_id\", booking_business_id, 'str'),\n 'bookingAppointment-id': self._serialize.url(\"booking_appointment_id\", booking_appointment_id, 'str'),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n if select is not None:\n query_parameters['$select'] = self._serialize.query(\"select\", select, '[str]', div=',')\n if expand is not None:\n query_parameters['$expand'] = self._serialize.query(\"expand\", expand, '[str]', div=',')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(models.OdataError, response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('MicrosoftGraphBookingAppointment', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def search_appt():\n\n\tform = SearchAppointmentForm()\n\tappt_results = db.session.query(Appointment).order_by(Appointment.id.desc()).limit(15)\n\n\tif form.validate_on_submit():\n\t\tselection = form.search_by.data\n\t\t\n\t\tif selection == '1':\n\t\t\tchars = '!@#$%^&*()_-+|\\\\}]{[;:/?.>,<`~='\n\t\t\tstate = True\n\t\t\tuser_input = form.search_field.data\n\n\t\t\twhile state:\n\t\t\t\tfor char in user_input:\n\t\t\t\t\tif char in chars or char.isalpha():\n\t\t\t\t\t\tstate = False\n\t\t\t\tbreak\n\n\t\t\tif state:\n\t\t\t\traw_results = Appointment.query.filter_by(marketer_id=int(form.search_field.data)).all()\n\t\t\t\tappt_results = list(reversed(raw_results))\n\t\t\telse:\n\t\t\t\tappt_results = db.session.query(Appointment).order_by(Appointment.id.desc()).limit(15)\n\t\t\t\tflash('Please enter only integers')\n\t\telif selection == '2':\n\t\t\tappt_results = Appointment.query.filter_by(client_first=form.search_field.data).all()\n\t\telif selection == '3':\n\t\t\tappt_results = Appointment.query.filter_by(client_last=form.search_field.data).all()\n\t\telif selection == '4':\n\t\t\tappt_results = Appointment.query.filter_by(date=form.search_field.data).all()\n\n\treturn render_template('search_appt.html', results=appt_results, form=form, pretty_date=pretty_date)", "def get_all():\n logger.debug('List of employees was returned')\n return Employee.query.all()", "def get_departments_record(departments_qs, applications, admissions):\n departmental_records = {}\n for department in departments_qs:\n departmental_records[department.name] = {\n 'applications_count': applications.filter(department_choice=department).count(),\n 'admission_count': admissions.filter(choosen_department=department).count(),\n 'migrated_from_count': admissions.filter(department_choice=department,\n migration_status__icontains='from').count(),\n 'migrated_to_count': admissions.filter(choosen_department=department,\n migration_status__icontains='from').count(),\n 'missed': applications.filter(department_choice=department, \n rejected=True, admitted=False,\n paid=False).count(),\n }\n return departmental_records", "def AddAppointments( self, Appointments ):\n\t\tfor App in Appointments:\n\t\t\tevent = Event()\n\t\t\tif App.has_key( 'Class' ): \n\t\t\t\tevent.add('summary', App['Subject']+\" - \"+App['Class'])\n\t\t\telse:\n\t\t\t\tevent.add('summary', App['Subject'])\n\t\t\tevent.add('dtstart', App['Hours'][0])\n\t\t\tevent.add('dtend', App['Hours'][1])\n\t\t\t\n\t\t\tif App.has_key( 'Location' ): event.add( 'location', App['Location'] )\n\t\t\t\n\t\t\tself.cal.add_component(event)\n\t\t\t# print \"Event added\", App", "def get_all():\n departments = Department.query.order_by(Department.id).all()\n salaries = db.session.query(func.avg(Employee.salary).label('average'),\n Employee.department_id).group_by(Employee.department_id).all()\n logger.debug('List of department was returned')\n return departments, salaries", "def prepare_appointments(self, using):\n self.pre_prepare_appointments(using)\n from edc.subject.appointment_helper.classes import AppointmentHelper\n if 'registered_subject' in dir(self):\n registered_subject = self.registered_subject\n else:\n registered_subject = RegisteredSubject.objects.get(subject_identifier=self.subject_identifier)\n try:\n visit_definitions = self.get_visit_definitions_from_instance()\n except AttributeError:\n visit_definitions = None\n AppointmentHelper().create_all(\n registered_subject,\n self.__class__.__name__.lower(),\n using=using,\n source='BaseAppointmentMixin',\n visit_definitions=visit_definitions)\n self.post_prepare_appointments(using)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Abstract method for locking a pokemon on a move
def add_lock(self, pokemon, move): pass
[ "def remove_lock(self, pokemon, move):\n pass", "def move(self, action):\n tile_type, from_pile, to_stack, nbr_to_move = action\n\n # Check for errors\n if self.winner is not None:\n raise Exception(\"Game already won\")\n #elif pile < 0 or pile >= len(self.piles):\n # raise Exception(\"Invalid pile\")\n #elif count < 1 or count > self.piles[pile]:\n # raise Exception(\"Invalid number of objects\")\n\n # get the tiles from the factory\n nbr_tiles, penalty = self.factory.remove_tiles_from_pile(from_pile, tile_type)\n\n if to_stack == penalty_stack_row_idx:\n # these tiles are going straight to penalty\n self.players[self.current_player_idx].add_tiles_to_penalty(nbr_tiles, tile_type)\n else:\n # put the tiles on the floor\n self.players[self.current_player_idx].move_tiles_to_row(nbr_tiles, tile_type, to_stack)\n\n if penalty == 1:\n self.players[self.current_player_idx].add_penalty_tile_to_penalty_stack()\n\n # check if the round is over\n if self.factory.get_tile_count_in_piles() == 0:\n # score this round and setup the next round \n # if the game is over, determine the winner\n if self.process_end_round():\n self.set_winner()\n # the end of round method also sets the next player\n else:\n # check if the player just did something which will end the game soon\n if not self.is_last_round:\n self.is_last_round = self.players[self.current_player_idx].has_a_completed_row()\n # pass the baton to the next player\n self.switch_player()\n\n \n\n # Update pile\n #self.piles[pile] -= count\n #self.switch_player()\n\n # Check for a winner\n #if all(pile == 0 for pile in self.piles):\n # self.winner = self.player", "def lock_piece(self):\n for p in self.current_piece:\n self.blocks.add(p)\n self.notify(PieceLocked(self.current_piece.copy()))\n self.check_rows()\n #todo: trigger game loss if this piece lies completely in the obstructed zone\n self.generate_active_piece(self.queue.next())\n self.may_hold = True", "def make_a_move(self, game):\n raise NotImplementedError(\"not implemented in CheckersBot abstract class\")", "def on_trap_check(self, pokemon):\n return False", "def test_move_lazyplayer(self):\n b = cs.Board()\n p = cs.LazyPlayer(b)\n p.move()\n\n assert p.player_position > 0\n assert p.player_position not in b.chutes.keys() and b.ladders.keys()\n\n position1 = p.player_position\n p.move()\n assert p.player_position != position1", "def before_locking_locked(actor, x, y, ctxt) :\n raise AbortAction(ctxt.world[WrongKeyMessages(x, y)], actor=actor)", "def mustlock(self):\n pass", "def on_move_hit(self, user, move, battle):", "def POST_lock(self, thing):\n if thing.archived_slow:\n return abort(400, \"Bad Request\")\n VNotInTimeout().run(action_name=\"lock\", target=thing)\n thing.locked = True\n thing._commit()\n\n ModAction.create(thing.subreddit_slow, c.user, target=thing,\n action='lock')\n \n \n # NEEDS ADD\n # TIMER\n #LINK TO BUMP TEXT", "def onLockNode(self, objects, opts):\n pass", "def lock(self,time=None):\n #print self.allowed,self.locked\n if self.allowed and not self.locked:\n if time is None:\n time = pf.GUI.drawwait\n if time > 0:\n pf.debug('STARTING TIMER')\n self.locked = True\n self.timer = threading.Timer(time,self.release)\n self.timer.start()", "def process_move(self, move, player):\n try: \n self.moves[player.name][self.sticks] = move\n except KeyError:\n self.moves[player.name] = {}\n self.moves[player.name][self.sticks] = move\n self.sticks -= move", "def request_player_move(self, newpos):\n pos = self.player.location.slot\n j, i = newpos\n j0, i0 = self.player.location.slot\n if self.maze.blocktype_at(i, j)['walkable']:\n self.move_player(newpos)\n elif self.maze.blocktype_at(i0, j)['walkable']:\n newpos[1] = i0\n self.move_player(newpos)\n elif self.maze.blocktype_at(i, j0)['walkable']:\n newpos[0] = j0\n self.move_player(newpos)\n self.norm_light = None", "def attack(self, other_pokemon):\r\n damage = 0\r\n # Check to make sure the pokemon isn't knocked out.\r\n if self.is_knocked_out == True:\r\n print(f\"{self.name} can't attack because it is knocked out!\")\r\n # If the attacking pokemon has an advantage over the other pokemon, then \r\n # it deals damage equal to twice the attacking pokemon's level.\r\n elif (self.type == \"Fire\" and other_pokemon.type == \"Grass\") or \\\r\n (self.type == \"Water\" and other_pokemon.type == \"Fire\") or \\\r\n (self.type == \"Grass\" and other_pokemon.type == \"Water\"):\r\n damage += 2 * self.level\r\n print(f\"{self.name} attacked {other_pokemon.name} for {damage} damage.\")\r\n print(\"It's super effective!\")\r\n other_pokemon.lose_health(damage)\r\n # If the attacking pokemon has a disadvantange, then it deals damage \r\n # equal to half the attacking pokemon level.\r\n elif (self.type == \"Grass\" and other_pokemon.type == \"Fire\") or \\\r\n (self.type == \"Fire\" and other_pokemon.type == \"Water\") or \\\r\n (self.type == \"Water\" and other_pokemon.type == \"Grass\"):\r\n damage += round(0.5 * self.level) \r\n print(f\"{self.name} attacked {other_pokemon.name} for {damage} damage.\")\r\n print(\"It's not very effective...\")\r\n other_pokemon.lose_health(damage)\r\n # If the attacking pokemon has neither advantange or disadvantage, then it \r\n # deals damage equal to its level to the other pokemon. \r\n else:\r\n damage += self.level\r\n print(f\"{self.name} attacked {other_pokemon.name} for {damage} damage.\")\r\n other_pokemon.lose_health(damage)", "def shift_players_pokemon(self, action: ShiftActionModel) -> None:\n\n self._player.flash()\n self._pokemon.do(Delay(1.5) + CallFunc(self._add_pkmn))\n self._dialog.set_text(I18n().get(\"BATTLE.GO_POKEMON\").format(action.pokemon.nickname))\n\n self.remove(self._moves)\n self._moves = MovesLayer(action.pokemon)\n self.add(self._moves)\n\n self.do(Delay(2) + CallFunc(self.show_actions))", "def opponentMove(self, move):\n\t\tpass", "def player_move(game, user_action):\n player = game.player\n directions = ['north', 'up', 'south', 'down', 'east',\n 'left', 'west', 'right', ]\n action = next((word for word in user_action if word in directions), '')\n player_direction = {'north': 'up', 'up': 'up', 'south': 'down',\n 'down': 'down', 'east': 'right',\n 'right': 'right', 'west': 'left',\n 'left': 'left', }\n if action in player_direction:\n cardinal = player_direction[action]\n lock_check(game, game.rooms[player.position[\n 'location']].paths[cardinal])\n else:\n print(\"Invalid direction\")\n return True", "def on_after_move_damage(self, battle, pokemon, damage, move, foe):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Abstract method for removing a lock from pokemon's moves
def remove_lock(self, pokemon, move): pass
[ "def add_lock(self, pokemon, move):\n pass", "def remove_item(game):\n player = game.player\n item = game.rooms[player.position['next location']].door['unlock']\n player.inventory.remove(item)", "def unmove(self):\n self.insert(None, self.moves.pop())\n self.legal_moves = self.generate_legal_moves()\n self.x_turn = not self.x_turn", "def process_move(self, move, player):\n try: \n self.moves[player.name][self.sticks] = move\n except KeyError:\n self.moves[player.name] = {}\n self.moves[player.name][self.sticks] = move\n self.sticks -= move", "def remove_piece(s, (x,y)):\n\t\ts.matrix[x][y].occupant = None", "def remove_reserve_piece(self):\n self._reserve_pieces -= 1", "def _removeTurnFromCache(turn):\n if turn in self.orders:\n del self.orders[turn]", "def remove(self, piece):\n assert piece.position is not None\n self.state.pitch.board[piece.position.y][piece.position.x] = None\n piece.position = None", "def drop_piece(self):\r\n self.last_update = time.time()\r\n self.pieces[0].x, self.pieces[0].y = self.pieces[3].x, self.pieces[3].y", "def __drop_piece(data, row, col, piece):\r\n data.game_board.drop_piece(row, col, piece)\r\n data.turn += 1\r\n data.turn %= 2\r\n if data.game_board.winning_move(piece, row, col):\r\n data.game_over = True\r\n data.winner = piece", "def __delitem__(self, rnum):\n del self._pieces[rnum]", "def remove_player(self):\n if self.num_player > 0:\n self.num_player -= 1\n self.available_place += 1\n self.update_full_status()\n self.save()", "def delete_by_user(self, user, *args, **kwargs):\r\n if not self.can_unlock(user):\r\n raise LockError(_(\"User '%(user)s' is not allowed to remove \"\r\n \"lock '%(lock)s'\") % { \"user\" : user, \"lock\" : self})\r\n return super(Lock, self).delete(*args, **kwargs)", "def remove_player(self, player_shot: Name):\n del self.players[player_shot]\n for name, player in self.players.items():\n player.remove_player(player_shot)", "def preRemovePlayer(game):\n from .verb import ExitVerb\n\n x = game.me.location\n while isinstance(game.me.location, Thing):\n ExitVerb().verbFunc(game)", "def clean_up_player(self):\n #LOGGER.debug('Clean up player')\n STATUS_PLAYING = boardgame.utils.Room.STATUS_PLAYING\n STATUS_WAITING = boardgame.utils.Room.STATUS_WAITING \n keys = self._player_list.keys()\n for key in keys: \n _player = self._player_list[key]\n if _player:\n current_time = time.time()\n playerstatus = _player.get_player_status()\n delta_time = current_time - playerstatus.active_time\n if (delta_time > MAX_IDLE_TIME_GAME) and (delta_time < MAX_IDLE_TIME_CONNECTION):\n current_room_id = playerstatus.current_room\n if current_room_id:\n room = self._game.get_room(current_room_id)\n if not room:\n LOGGER.error(' '.join(['This player',str(_player),'hold record',current_room_id,'which is not existed.']))\n continue\n if (room.get_playing_status() == STATUS_PLAYING) and (room.get_current_player() == playerstatus.username): \n room.part(_player)\n elif (room.get_playing_status() == STATUS_WAITING) and (playerstatus.status == STATUS_UNREADY):\n room.part(_player) \n if delta_time > MAX_IDLE_TIME_CONNECTION:\n current_room_id = playerstatus.current_room\n if current_room_id:\n room = self._game.get_room(current_room_id)\n room.part(_player) \n del self._player_list[key]\n LOGGER.debug('Player '+ playerstatus.username +' has quit the game.')", "def delete_row(self):\n def drop_down(board, num):\n for i in range(num - 1, 0, -1):\n for j in range(tetris_blocks.COLUMNS):\n board[j][i+1] = board[j][i]\n board[j][i] = None\n\n def move_up_remaining_rows(occupied, num):\n for n, i in enumerate(occupied):\n if i[1] < num:\n occupied[n] = [i[0], i[1]+1]\n\n for row in set(j for i, j in self._occupied):\n if all(self._board[i][row] == 2 for i in range(10)):\n for i in range(tetris_blocks.COLUMNS):\n self._board[i][row] = None\n self._occupied.remove([i, row])\n drop_down(self._board, row)\n move_up_remaining_rows(self._occupied, row)\n self._count += 1", "def _slot_remove(self, key, frame):\n\n frame.pop(key)", "def remove_piece(self, piece_id):\n pieces = self.get_pieces()\n del pieces[piece_id]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convenience method that will get a top level atdf section, making sure it is singular
def _getTopSection(self, name): section = self.query(name) assert len(section) == 1 return section[0]
[ "def get_section(section):", "def test_get_section_path():\n sp = iniconf.get_section_path(c['sec1'])\n errmsg = \"Section path is not as expected!\"\n assert sp == ['sec1'], errmsg\n sp = iniconf.get_section_path(c['sec1']['sec2'])\n assert sp == ['sec1', 'sec2'], errmsg\n sp = iniconf.get_section_path(c['sec1']['sec2']['sec3'])\n assert sp == ['sec1', 'sec2', 'sec3'], errmsg", "def get_section(section_index):\n return lp_start_end_data[section_index]", "def get_kegg_section(k_record, sname, whole_section=False):\n \n in_section = False\n section = []\n \n for line in k_record.splitlines():\n if line.startswith(sname):\n in_section = True\n section.append(line)\n elif in_section and line.startswith(' '):\n section.append(line)\n elif in_section and not line.startswith(' '):\n break\n\n if whole_section:\n sectionlines = section\n else:\n sectionlines = [line[12:] for line in section]\n return '\\n'.join(sectionlines)", "def is_bare_section(self, title):\n return (title != mp_level01_titles[0] and self.level == 0)", "def test_section(self):\n site = self.get_site()\n cat = pywikibot.Category(site, 'Category:Foo#bar')\n self.assertEqual(cat.section(), 'bar')\n cat2 = pywikibot.Category(site, 'Category:Foo')\n self.assertIsNone(cat2.section())", "def getSection(self):\n return self.getSegment().getSectionAtAddress(self.getEntryPoint())", "def getSection(self,index):\n addr = HopperLowLevel.getSectionAddress(self.__internal_segment_addr__, index)\n if addr == 0:\n return None\n return Section(addr)", "def find_first_section(self, section_name):\n assert isinstance(section_name, tuple) or isinstance(section_name, list)\n\n for s in self._los:\n if self.list_le(section_name, s[0]):\n return s\n\n return None", "def get_section(entry: LogEntry) -> str:\n section = entry.request.split('/')[:2]\n return '/'.join(section)", "def section(c32, name):\n\n entries = documents.entries\n\n if 'document' == name:\n return c32.template('2.16.840.1.113883.3.88.11.32.1')\n if 'allergies' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.102')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.2')\n\n el.entries = entries\n return el\n if 'demographics' == name:\n return c32.template('2.16.840.1.113883.3.88.11.32.1')\n if 'encounters' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.127')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.3')\n\n el.entries = entries\n return el\n if 'immunizations' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.117')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.6')\n\n el.entries = entries\n return el\n if 'results' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.122')\n el.entries = entries\n return el\n if 'medications' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.112')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.8')\n\n el.entries = entries\n return el\n if 'problems' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.103')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.11')\n\n el.entries = entries\n return el\n if 'procedures' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.108')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.12')\n\n el.entries = entries\n return el\n if 'vitals' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.119')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.16')\n\n el.entries = entries\n return el\n\n\n return None", "def subsection(self, uid, sid):\n\n self.cur.execute(\n \"SELECT Text FROM sections WHERE article = ? AND name = (SELECT name FROM sections WHERE id = ?)\",\n [uid, sid],\n )\n return \" \".join([x[0] for x in self.cur.fetchall()])", "def identify_section(self, envvar, guess=False):\n sec = None\n # looks if envvar exactly in list of sections\n for tsec, envs in reversed(sorted(self.environ.items())):\n if envvar in envs:\n sec = tsec\n break\n\n # guess based on if envvar starts with section name\n if guess and envvar.startswith(tsec):\n sec = tsec\n break\n\n # return section name if found\n if sec:\n return sec\n\n # if still no identified or guessed section, guess with rapidfuzz\n if guess:\n if rapidfuzz:\n envlist = sorted(self.to_dict().keys())\n sec_guess = rapidfuzz.process.extractOne(envvar, envlist, score_cutoff=70)\n if sec_guess:\n return self.identify_section(sec_guess[0])\n else:\n log.warning('rapidfuzz package is not installed. Cannot make a guess.')\n return sec", "def substance(self):\r\n if self.substance_ is None:\r\n desc = self.SPLDescriptor[\"substance-main\"]\r\n nodes = self.section().xpath(desc[\"xpath\"], namespaces=self.NAMESPACES)\r\n if len(nodes) != 1:\r\n raise SPLDocumentError(\"Main substance element must be present and unique\")\r\n self.substance_ = nodes[0]\r\n return self.substance_", "def get_section(soup, attrs={}, name='div', all=False):\n if all == False:\n if isinstance(attrs, dict):\n return soup.find(name=name, attrs=attrs)\n else:\n tag = soup\n for ss in attrs:\n tag = tag.find(name=name, attrs=ss)\n return tag\n else:\n if isinstance(attrs, dict):\n return soup.findAll(name=name, attrs=attrs)\n else: # not sure how to handle this, so I'm forcing exit\n print(\"haven't coded this yet\")\n return None", "def parse_sections(self):\n heading_tags = (\"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\")\n current_section = EpubPageSection(self)\n current_section.bind_to_parent(None)\n for elem in self.page_content_parsed.find(\".//body\").iterdescendants():\n if elem.tag in heading_tags:\n heading_text = \" \".join([t.strip() for t in elem.itertext()])\n heading_level = int(elem.tag[1])\n if current_section.title is None and not current_section.has_text_before_title:\n current_section.title = heading_text\n current_section.title_level = heading_level\n else:\n new_section = EpubPageSection(self, heading_text)\n new_section.title = heading_text\n new_section.title_level = heading_level\n if current_section.title is None:\n new_section.bind_to_parent(None)\n elif new_section.title_level > current_section.title_level:\n new_section.bind_to_parent(current_section)\n elif new_section.title_level == current_section.title_level:\n new_section.bind_to_parent(current_section.parent_section)\n else:\n parent = current_section.find_ancestor_with_title_level_less_than(\n new_section.title_level\n )\n new_section.bind_to_parent(parent)\n current_section = new_section\n else:\n if (not current_section.has_text_before_title\n and current_section.title is None\n and elem.text is not None\n and elem.text.strip()\n ):\n current_section.has_text_before_title = True\n if ([e for e in elem.iterancestors() if\n (e in current_section.content_elements) or # skip children of elements already included to current section\n (e.tag in heading_tags)] == [] # skip children of heading tag, as they are part of the title\n ):\n current_section.content_elements.append(elem)", "def _parse_section(self, section):\n if '/' in section:\n return section.split('/')\n else:\n return ['main', section]", "def __getitem__(self, section_name):\n return self._toml[section_name]", "def find_section_text(lines, section, go_to_end=False, section2=\"\"):\n if len(lines) == 0:\n return \"\"\n n = 0\n for line in lines:\n line_mod = line.replace(\" \", \"\")\n if line_mod.startswith(\"==%s\" % section) \\\n or (section2 != \"\" and line_mod.startswith(\"==%s\" % section2)):\n # Section started\n n += 1\n doc = \"\"\n # collect the documents till next section or the end \n newline = lines[n]\n while (go_to_end or not newline.strip().startswith('==')) \\\n and not newline.strip().startswith('[[Category'):\n doc += newline + '\\n'\n n += 1\n if n < len(lines):\n newline = lines[n]\n else:\n break\n return doc\n n += 1\n \n return \"\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute the command ceph orch ls .
def ls(self: OrchProtocol, config: Optional[Dict] = None) -> Tuple: cmd = ["ceph", "orch"] if config and config.get("base_cmd_args"): cmd.append(config_dict_to_string(config["base_cmd_args"])) cmd.append("ls") if config and config.get("args"): args = config.get("args") cmd.append(config_dict_to_string(args)) return self.shell(args=cmd)
[ "def ls():\n\tdata_socket = pack_and_send('ls')\n\tdata = recv(data_socket).decode('utf-8')\n\tshut(data_socket)\n\tstatus = _SOCK.recv(1)\n\tif not status or status == b'F':\n\t\t_log(\"Directory listing failed.\")\n\telif status == b'S':\n\t\t_log(data[:-1])\n\telse:\n\t\t_err_log(\"Unexpected status: {}\".format(status))", "def do_ls(self, args):\n\n if self.__is_open():\n try:\n files = list(self.fe.ls(add_details=True))\n files.sort(key=self.__sort_files)\n\n if self.fe.pwd() != \"/\":\n files = [(\"..\", \"D\")] + files\n\n print(\"\\nRemote files in '%s':\\n\" % self.fe.pwd())\n\n for elem, type in files:\n if type == 'D':\n print(\" <dir> %s\" % elem)\n else:\n print(\" <file/empty_dir> %s\" % elem)\n\n print(\"\")\n\n except IOError as e:\n self.__error(str(e))\n except Exception as e:\n print(e)", "def help_ls(self):\n print(help_msg.cmds['ls'])", "def do_showVolumes(self, filer):\n\t\tcommand = 'ssh -qn admin@%s vol show' % self.filer\n\t\tproc = subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n\t\tp_stdout = proc.communicate()[0]\n\t\tprint p_stdout", "async def list(self, ctx):\n\t\thidden = await self.config.hidden()\n\t\tif not hidden:\n\t\t\treturn await ctx.send('There are currently no hidden commands.')\n\t\tmsg = '```\\n'\n\t\tfor command in hidden:\n\t\t\tmsg += command + '\\n'\n\t\tmsg += '```'\n\t\tawait ctx.send(msg)", "def get_command(self, ctx, cmd_name):\n if cmd_name == \"ls\":\n cmd_name = \"list\"\n return click.Group.get_command(self, ctx, cmd_name)", "def shell(lista):", "def test_cli_list(self):\n with TemporaryDirectory() as directory:\n touch(os.path.join(directory, \"foo.gpg\"))\n touch(os.path.join(directory, \"foo/bar.gpg\"))\n touch(os.path.join(directory, \"Also with spaces.gpg\"))\n returncode, output = run_cli(main, \"--password-store=%s\" % directory, \"--list\")\n assert returncode == 0\n entries = output.splitlines()\n assert \"foo\" in entries\n assert \"foo/bar\" in entries\n assert \"Also with spaces\" in entries", "def get_file_listing(hosts, files):\n ls_command = \"/usr/bin/ls -la {}\".format(convert_string(files, \" \"))\n command = get_clush_command(hosts, args=\"-S -v\", command=ls_command, command_sudo=True)\n result = run_command(command, verbose=False, raise_exception=False)\n return result", "def openocdCmd(self, command_list, board):\n\n board_cfg = self.getBoardConfigName(board)\n\n args = ['openocd', '-s', self.ocd_script_dir,\n '-f', board_cfg]\n for cmd in command_list:\n args.append('-c')\n args.append(cmd)\n args.append('-c')\n args.append('shutdown')\n sp.call(args)", "def app_shell():\n\n require('hosts')\n\n run(\"invoke shell\")", "def list(ctx):\n \"\"\"been added as volume metadata or block drives as well as drives that have not been added and are available.\"\"\"\n\n \n\n cli_utils.establish_connection(ctx)\n \n\n \n\n ctx.logger.info(\"\"\": \"\"\"+\"\"\";\"\"\"+\"\")\n try:\n _ListDrivesResult = ctx.element.list_drives()\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n if ctx.json:\n print(simplejson.dumps(simplejson.loads(_ListDrivesResult), indent=4))\n return\n else:\n cli_utils.print_result(_ListDrivesResult, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth, filter_tree=ctx.filter_tree)", "def get_ls(debug_command, workdir):\n\n items = debug_command.split(' ')\n # cmd = items[0]\n options = ' '.join(items[1:])\n path = options.split(' ')[-1] if ' ' in options else options\n if path.startswith('-'):\n path = '.'\n finalpath = os.path.join(workdir, path)\n debug_command = debug_command.replace(path, finalpath)\n\n _, stdout, _ = execute(debug_command)\n logger.debug(\"%s:\\n\\n%s\\n\\n\", debug_command, stdout)\n\n return stdout", "def gsuite_chromeos_device_list_command(client: Client, args: Dict[str, str]) -> CommandResults: # pragma: no cover\n client.set_authorized_http(scopes=COMMAND_SCOPES.get('CHROMEOS_DEVICES_LIST', []))\n query_params = chromeos_device_list_create_query_parameters(projection=args.get('projection', 'full'),\n query=args.get('query', ''),\n include_child_org_units=argToBoolean(args.get(\n 'include_child_org_units', False)),\n order_by=args.get('order_by', ''),\n sort_order=args.get('sort_order', ''),\n org_unit_path=args.get('org_unit_path', ''),\n )\n pagination_args = prepare_pagination_arguments(page_token=args.get('page_token', ''),\n page_size=arg_to_number(args.get('page_size', '')),\n limit=arg_to_number(args.get('limit', '')))\n mutual_pagination_args = assign_params(\n request_by_device_type=chromeos_device_list_request,\n client=client,\n customer_id=args.get('customer_id', ''),\n response_devices_list_key=ChromeOSDevicesConfig.response_devices_list_key,\n query_params=query_params,\n )\n try:\n markdown = ''\n if 'limit' in pagination_args:\n pagination_result = device_list_automatic_pagination(**mutual_pagination_args, **pagination_args)\n else:\n pagination_result = device_list_manual_pagination(**mutual_pagination_args, **pagination_args)\n context_data: list[dict] = pagination_result.get('data', [{}])\n raw_response: list = pagination_result.get('raw_response', [])\n next_page_token: str = pagination_result.get('next_page_token', '')\n if not context_data:\n markdown = 'No results were found with the respected arguments'\n\n else:\n human_readable = devices_to_human_readable(\n devices_data=context_data,\n keys=['serialNumber', 'annotatedUser', 'model', 'osVersion', 'status', 'deviceId'],\n keys_mapping={'annotatedUser': 'User Name', 'osVersion': 'OS'})\n num_of_devices = len(context_data)\n markdown = tableToMarkdown(ChromeOSDevicesConfig.table_title, human_readable,\n metadata=f'{num_of_devices} {\"results\" if num_of_devices != 1 else \"result\"} found')\n outputs: Dict[str, Any] = {}\n if context_data:\n outputs[(f'{ChromeOSDevicesConfig.outputs_prefix}.'\n 'ChromeOSListObjects(val.resourceId && val.resourceId == obj.resourceId)')] = context_data\n\n if next_page_token:\n markdown += f'### Next Page Token:\\n{next_page_token}'\n outputs[f'{ChromeOSDevicesConfig.outputs_prefix}.PageToken(val.NextPageToken)'] = {'NextPageToken': next_page_token}\n\n command_results = CommandResults(\n readable_output=markdown,\n outputs=outputs,\n raw_response=raw_response,\n )\n return command_results\n except DemistoException as e:\n error_message = str(e)\n if ('INVALID_OU_ID' in error_message):\n raise DemistoException(MESSAGES.get('INVALID_ORG_UNIT_PATH', ''))\n raise DemistoException(error_message)", "def phone_ssh_cmd(self, cmd):\n self.ssh = paramiko.SSHClient()\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.ssh.connect(self.phone_info['ipAddress'],username=\"admin\",key_filename=self.hq_rsa_path)\n\n logger.info(\"Running ssh cmd: \\\"%s\\\" on phone %s\" % (cmd, self.phone_info['ipAddress']))\n stdin, stdout, stderr = self.ssh.exec_command(cmd, get_pty=True)\n result = stdout.readlines()\n \n if self.ssh:\n self.ssh.close()\n return result", "def test_list():\n\n cli = filesystem(apiVersion, account_name, dns_suffix, key)\n resp = cli.list()\n fileSystems = resp.json()\n print(dumps(fileSystems,indent=4))\n assert fileSystems['filesystems']", "def cli_cosmosdb_mongocluster_list(client,\r\n resource_group_name=None):\r\n\r\n if resource_group_name is None:\r\n return client.list()\r\n\r\n return client.list_by_resource_group(resource_group_name)", "def do_list_cmd(args, conn):\n\n # file status in data_migration table\n if args.status:\n status_query(args, conn)\n return\n\n cursor = conn.cursor()\n if args.max:\n # latest run (max runnr) for an experiemt in *file* table\n cursor.execute(\"\"\"SELECT 'Highest run# ' || max(run) FROM file WHERE exper_id = %s\"\"\", (args.expid,))\n else:\n # all runs for an experiemt in *file* table\n cursor.execute(\"\"\"SELECT * FROM file WHERE exper_id = %s\"\"\", (args.expid,))\n\n rows = cursor.fetchall()\n for row in rows:\n print \" | \".join(map(str,row))", "def read_fdisk():\n\n from subprocess import Popen, PIPE\n\n p = Popen([\"sudo\", \"fdisk\", \"-l\"], stdout=PIPE, stderr=PIPE)\n LL,e = p.communicate()\n for L in LL.splitlines():\n yield L" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if last sync was ok and happend recently, else False.
def is_notification_sync_ok(self) -> bool: return ( self.notifications_last_update_ok is True and self.is_notification_sync_fresh )
[ "def _should_sync(self, data, last_sync):\n\n # definitely sync if we haven't synced before\n if not last_sync or not last_sync.date:\n return True\n\n # check if any items have been modified since last sync\n for data_item in data:\n # >= used because if they are the same second, who knows\n # which actually happened first\n if not data_item.last_modified or data_item.last_modified >= last_sync.date:\n return True\n\n return False", "def was_modified_since_last_sync(self):\n info = self.get_sync_info()\n if not info:\n return None\n if self.size != info[\"s\"]:\n return True\n if self.mtime > info[\"m\"]:\n return True\n return False", "def has_sync_errors(self):\n return self.sync_errors.qsize() > 0", "def check_new_sync(self):\n if self.__sync_halted:\n print ('A \"git rv sync\" was previously halted in branch %r. Please '\n 'execute the command:\\n\\tgit rv sync --continue\\n'\n 'instead.' % (self.__branch,))\n self.state = self.FINISHED\n else:\n head_commit = utils.get_head_commit(current_branch=self.__branch)\n if head_commit != self.__last_commit:\n print UNEXPORTED_CHANGES_BLOCK_SYNC\n self.state = self.FINISHED\n else:\n self.state = self.FETCH_REMOTE\n self.advance()", "def are_all_syncs_ok(self) -> bool:\n return (\n self.is_structure_sync_ok\n and self.is_notification_sync_ok\n and self.is_forwarding_sync_ok\n and self.is_assets_sync_ok\n )", "def needs_refresh(self):\n\n if not self.selected_repository or not self.meta_root:\n # Nowhere to obtain metadata from; this should rarely\n # occur except during publisher initialization.\n return False\n\n lc = self.last_refreshed\n if not lc:\n # There is no record of when the publisher metadata was\n # last refreshed, so assume it should be refreshed now.\n return True\n\n ts_now = time.time()\n ts_last = calendar.timegm(lc.utctimetuple())\n\n rs = self.selected_repository.refresh_seconds\n if not rs:\n # There is no indicator of how often often publisher\n # metadata should be refreshed, so assume it should be\n # now.\n return True\n\n if (ts_now - ts_last) >= rs:\n # The number of seconds that has elapsed since the\n # publisher metadata was last refreshed exceeds or\n # equals the specified interval.\n return True\n\n return False", "def finished(self):\n return (self._curr_date >= self._to_date) and not self._buffer", "def is_syncing(self) -> bool:\n return self._tendermint.syncing()", "def needs_update(self):\n if not self.last_update:\n return True\n now = datetime.now()\n if self.last_update + timedelta(minutes=self.update_interval) < now:\n return True\n else:\n return False", "def _sync_required(self):\n try:\n # Get the time at which entities in the region were updated.\n # If the times match, then ML2 is in sync with EOS. Otherwise\n # perform a complete sync.\n if not self._force_sync and self._region_in_sync():\n LOG.info(_LI('OpenStack and EOS are in sync!'))\n return False\n except arista_exc.AristaRpcError:\n LOG.warning(EOS_UNREACHABLE_MSG)\n # Force an update incase of an error.\n self._force_sync = True\n return True", "def _should_backup(self, now, checksum):\n with shelve.open(str(self.last_file)) as last:\n last_checksum = last[\"checksum\"] if \"checksum\" in last else None\n last_access_time = last[\"time\"] if \"time\" in last else None\n if last_checksum is None and last_access_time is None:\n return True\n\n is_old = (now - last_access_time).total_seconds() >= self.frequency\n is_outdated = checksum != last_checksum\n return is_old and is_outdated", "def needs_sync(self):\n changes = ChangedEntityLocale.objects.filter(entity__resource__project=self)\n return changes.exists() or self.unsynced_locales", "def need_update(self):\n if self.sha: # If not set - connecting and get hash\n return self.__sha != self.__repo['sha']\n return False", "def conversation_is_finished(self) -> bool:\n return not self.last_response.need_more_info", "def _is_uptodate(self, binding):\n assert self.magento_record\n if not self.magento_record.get('updated_at'):\n return # no update date on Magento, always import it.\n if not binding:\n return # it does not exist so it should not be skipped\n sync = binding.sync_date\n if not sync:\n return\n from_string = fields.Datetime.from_string\n sync_date = from_string(sync)\n magento_date = from_string(self.magento_record['updated_at'])\n # if the last synchronization date is greater than the last\n # update in magento, we skip the import.\n # Important: at the beginning of the exporters flows, we have to\n # check if the magento_date is more recent than the sync_date\n # and if so, schedule a new import. If we don't do that, we'll\n # miss changes done in Magento\n return magento_date < sync_date", "def check_modified(self):\n return bool(self._modified)", "def check_update(self):\n\n if time.time() - self._last_update_check >= self.frametime:\n # A framerate occurs! Check if it was too long ago\n if time.time() - self._last_update_check >= self._reset_timeout:\n # Reset it\n self._last_update_check = time.time()\n else:\n self._last_update_check += self.frametime\n return True\n return False", "def should_post_update(stdout, now, last_packet):\n packet_interval = MIN_PACKET_INTERNAL if stdout else MAX_PACKET_INTERVAL\n return len(stdout) >= MAX_CHUNK_SIZE or (now - last_packet) > packet_interval", "def has_finished_provenance(self):\n return len(self._pending) < self._pending_count" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns true if they have been no errors and last syncing occurred within alloted time for all sync categories
def are_all_syncs_ok(self) -> bool: return ( self.is_structure_sync_ok and self.is_notification_sync_ok and self.is_forwarding_sync_ok and self.is_assets_sync_ok )
[ "def _should_sync(self, data, last_sync):\n\n # definitely sync if we haven't synced before\n if not last_sync or not last_sync.date:\n return True\n\n # check if any items have been modified since last sync\n for data_item in data:\n # >= used because if they are the same second, who knows\n # which actually happened first\n if not data_item.last_modified or data_item.last_modified >= last_sync.date:\n return True\n\n return False", "def has_sync_errors(self):\n return self.sync_errors.qsize() > 0", "def check_times(self):\r\n if self.in_time and self.out_time and not (self.in_time == self.out_time):\r\n return False\r\n return True", "def is_notification_sync_ok(self) -> bool:\n return (\n self.notifications_last_update_ok is True\n and self.is_notification_sync_fresh\n )", "def needs_sync(self):\n changes = ChangedEntityLocale.objects.filter(entity__resource__project=self)\n return changes.exists() or self.unsynced_locales", "def _sync_required(self):\n try:\n # Get the time at which entities in the region were updated.\n # If the times match, then ML2 is in sync with EOS. Otherwise\n # perform a complete sync.\n if not self._force_sync and self._region_in_sync():\n LOG.info(_LI('OpenStack and EOS are in sync!'))\n return False\n except arista_exc.AristaRpcError:\n LOG.warning(EOS_UNREACHABLE_MSG)\n # Force an update incase of an error.\n self._force_sync = True\n return True", "def check_timestamps(self):\n times = [update.timestamp for update in self.current_group()]\n time_range = max(times) - min(times)\n if time_range >= SYNC_ACCURACY_S:\n raise TimingException(\n \"ERROR: timestamps of readings in group #{} differed by {:.4f} s.\"\n .format(self.current_seq_no, time_range))", "def has_been_done_recently(self) -> bool:\n task_actions = self.taskaction_set.all()\n count = self.taskaction_set.count()\n if count == 0:\n return False\n else:\n now_str = str(timezone.now())\n year_now = now_str[0:4] # TODO replace this with timezone values - i have an example in blogs I believe\n month_now = now_str[5:7] # TODO replace this with timezone values - i have an example in blogs I believe\n day_now = now_str[8:10] # TODO replace this with timezone values - i have an example in blogs I believe\n get_todays_taskactions = self.taskaction_set.filter(time_checked__year=year_now,\n time_checked__month=month_now,\n time_checked__day=day_now)\n gtt_count = get_todays_taskactions.count()\n if gtt_count > 0:\n return True\n else:\n return False", "def check_out_time(self):\r\n if self.in_time and self.out_time and (self.out_time <= self.in_time):\r\n return False\r\n return True", "def check_for_conflicts(cls, timeslot):\n\t\tqs = Vacation.objects.filter(healer=timeslot.healer).filter_by_datetime(timeslot.start)\n\n\t\tfor vacation in qs:\n\t\t\tif timeslot.is_single():\n\t\t\t\tif not (timeslot.end<=vacation.start or timeslot.start>=vacation.end):\n\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tvacation_dates = vacation.get_dates()\n\t\t\t\tfor vacation_date in vacation_dates:\n\t\t\t\t\tif timeslot.is_rule_conflict(vacation_date, repeating=False):\n\t\t\t\t\t\treturn True\n\n\t\treturn False", "def is_time_syncronized(self):\n return self.request('time_facade.IsSynchronized')", "def is_synced(self):\n if self.is_empty:\n return False\n if not np.all(self.good_traces(include_box=True)):\n return False\n side = np.clip(self.traceid, -1, 1)\n if len(side) == 0:\n return False\n return side[0] == -1 and side.size % 2 == 0 and np.all(side[1:] + side[:-1] == 0)", "def successful(self):\n return not np.isnan(self.time_points.interaction)", "def check_in_time(self):\r\n if self.out_time and not self.in_time:\r\n return False\r\n return True", "def needs_refresh(self):\n\n if not self.selected_repository or not self.meta_root:\n # Nowhere to obtain metadata from; this should rarely\n # occur except during publisher initialization.\n return False\n\n lc = self.last_refreshed\n if not lc:\n # There is no record of when the publisher metadata was\n # last refreshed, so assume it should be refreshed now.\n return True\n\n ts_now = time.time()\n ts_last = calendar.timegm(lc.utctimetuple())\n\n rs = self.selected_repository.refresh_seconds\n if not rs:\n # There is no indicator of how often often publisher\n # metadata should be refreshed, so assume it should be\n # now.\n return True\n\n if (ts_now - ts_last) >= rs:\n # The number of seconds that has elapsed since the\n # publisher metadata was last refreshed exceeds or\n # equals the specified interval.\n return True\n\n return False", "def is_syncing(self) -> bool:\n return self._tendermint.syncing()", "def was_modified_since_last_sync(self):\n info = self.get_sync_info()\n if not info:\n return None\n if self.size != info[\"s\"]:\n return True\n if self.mtime > info[\"m\"]:\n return True\n return False", "def check_added(prev_mol: Chem.Mol, curr_mol: Chem.Mol) -> bool:\n prev_count = prev_mol.GetNumAtoms() + prev_mol.GetNumBonds()\n curr_count = curr_mol.GetNumAtoms() + curr_mol.GetNumBonds()\n return prev_count < curr_count", "def can_update_time_periods(self):\n return # boolean" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add character to this owner. Raises ValueError when character does not belong to owner's corporation.
def add_character( self, character_ownership: CharacterOwnership ) -> "OwnerCharacter": if ( character_ownership.character.corporation_id != self.corporation.corporation_id ): raise ValueError( f"Character {character_ownership.character} does not belong " "to owner corporation." ) obj, _ = self.characters.get_or_create(character_ownership=character_ownership) return obj
[ "def add_character(self, character, position, name='', symbol='', ):\n if name == '':\n name = character.name\n if symbol == '':\n symbol = character.name.strip()[0].lower()\n self.atlas[name] = position\n self.people[name] = character\n self.symbols[name] = symbol", "def addChar(self, *args):\r\n return _osgDB.Field_addChar(self, *args)", "def add(self, character: 'Character') -> None:\n if not self._p1:\n self._p1 = character\n self._p2 = character.enemy\n if len(self.flag) != len(self._content):\n self._content.append(character)\n elif self.flag[0]:\n if self.peek() != character:\n self.flag.append(False)\n self._content.append(character)\n return\n count = 0\n for i in range(len(self._content)):\n if self._content[i] == character and self.flag[i]:\n count += 1\n if count >= 2:\n self.flag.append(False)\n else:\n self.flag.append(True)\n self._content.append(character)", "async def add(self, character):\n if self._is_started:\n raise BattleAlreadyStartedException(\"The battle has already started\")\n if await self.has_character(character.get_name()):\n raise AlreadyInBattleException(\"player is already in this battle\")\n self._characters.append(character)", "def add_character(self, number):\n from evennia.utils import create\n setattr(self, \"account%s\" % number,\n create.create_account(\"TestAccount%s\" % number, email=\"test@test.com\", password=\"testpassword\",\n typeclass=self.account_typeclass))\n setattr(self, \"char%s\" % number,\n create.create_object(self.character_typeclass, key=\"Char%s\" % number,\n location=self.room1, home=self.room1))", "def add_character(name, picture, list_of_songs):\n\tname = name.lower()\n\tchar = Character(name, picture, list_of_songs)\n\tchar.save()", "def add_character(self, character, pos=None):\n if not isinstance(character, Character):\n raise ValueError(f\"character must be of type Character.\")\n if pos is None:\n idxs = np.where(self._grid == 0)\n x = np.random.choice(idxs[0])\n y = np.random.choice(idxs[1])\n pos = (x, y)\n self._grid[pos] = 1\n self._grid_map[character.id] = pos\n self._icon_map[character.id] = character.icon", "def func(self):\n\n # making sure caller is really an account\n self.character = None\n if utils.inherits_from(self.caller, \"evennia.objects.objects.Object\"):\n # An object of some type is calling. Convert to account.\n self.character = self.caller\n if hasattr(self.caller, \"account\"):\n self.caller = self.caller.account\n\n if not self.args:\n self.caller.msg(\"Usage: create <character name>\")\n return\n charname = self.args.strip()\n old_char = managers.objects.get_objs_with_key_and_typeclass(charname, CHARACTER_TYPECLASS)\n if old_char:\n self.caller.msg(\"Character |c%s|n already exists.\" % charname)\n return\n # create the character\n\n new_character = create_object(CHARACTER_TYPECLASS, key=charname)\n if not new_character:\n self.caller.msg(\n \"|rThe Character couldn't be created. This is a bug. Please contact an admin.\"\n )\n return\n # make sure to lock the character to only be puppeted by this account\n new_character.locks.add(\n \"puppet:id(%i) or pid(%i) or perm(Developer) or pperm(Developer)\"\n % (new_character.id, self.caller.id)\n )\n\n # save dbref\n avail_chars = self.caller.db._character_dbrefs\n if avail_chars:\n avail_chars.append(new_character.id)\n else:\n avail_chars = [new_character.id]\n self.caller.db._character_dbrefs = avail_chars\n self.caller.msg(\"|gThe character |c%s|g was successfully created!\" % charname)", "def setup_character_and_account(self, character, account, num=\"\"):\n from world.dominion.setup_utils import setup_dom_for_player, setup_assets\n # the attributes that are for 1 don't have a number\n if num == 1:\n num = \"\"\n num = str(num)\n setattr(self, 'dompc%s' % num, setup_dom_for_player(account))\n setattr(self, \"assetowner%s\" % num, setup_assets(getattr(self, \"dompc%s\" % num), 0))\n setattr(self, \"roster_entry%s\" % num,\n self.active_roster.entries.create(player=getattr(self, \"account%s\" % num),\n character=getattr(self, \"char%s\" % num)))", "def add_entity(self, character):\n\t\tnew_entity = AIEntity(character)\n\t\tnew_entity.create()\n\n\t\tself.entities += [new_entity]", "def addChar( self ):\n\t\tself.lexeme.append( self.nextChar )\n\t\tself.lexeme2.append( self.nextChar )", "def func(self):\r\n\r\n # making sure caller is really a player\r\n self.character = None\r\n if utils.inherits_from(self.caller, \"src.objects.objects.Object\"):\r\n # An object of some type is calling. Convert to player.\r\n #print self.caller, self.caller.__class__\r\n self.character = self.caller\r\n if hasattr(self.caller, \"player\"):\r\n self.caller = self.caller.player\r\n\r\n if not self.args:\r\n self.caller.msg(\"Usage: create <character name>\")\r\n return\r\n charname = self.args.strip()\r\n old_char = managers.objects.get_objs_with_key_and_typeclass(charname, CHARACTER_TYPECLASS)\r\n if old_char:\r\n self.caller.msg(\"Character {c%s{n already exists.\" % charname)\r\n return\r\n # create the character\r\n\r\n new_character = create_object(CHARACTER_TYPECLASS, key=charname)\r\n if not new_character:\r\n self.caller.msg(\"{rThe Character couldn't be created. This is a bug. Please contact an admin.\")\r\n return\r\n # make sure to lock the character to only be puppeted by this player\r\n new_character.locks.add(\"puppet:id(%i) or pid(%i) or perm(Immortals) or pperm(Immortals)\" %\r\n (new_character.id, self.caller.id))\r\n\r\n # save dbref\r\n avail_chars = self.caller.db._character_dbrefs\r\n if avail_chars:\r\n avail_chars.append(new_character.id)\r\n else:\r\n avail_chars = [new_character.id]\r\n self.caller.db._character_dbrefs = avail_chars\r\n self.caller.msg(\"{gThe Character {c%s{g was successfully created!\" % charname)", "def set_owner_character(character_id: int) -> Tuple[User, Owner]:\n my_user, character_ownership = create_user_from_evecharacter(\n character_id,\n permissions=[\"structures.add_structure_owner\"],\n scopes=Owner.get_esi_scopes(),\n )\n my_character = my_user.profile.main_character\n my_owner = Owner.objects.get(\n corporation__corporation_id=my_character.corporation_id\n )\n my_owner.characters.create(character_ownership=character_ownership)\n return my_user, my_owner", "def add_child(self, character, child_node):\n if not self.has_child(character):\n # Add given character and child node to this node's children\n self.children[ord(character)-97] = child_node\n else:\n raise ValueError(f'Child exists for character {character!r}')", "def set_character(self, new_character):\n self.character = new_character", "def AddChar(self,char):\n if not(ischar(char)):\n print >> sys.stderr, \"Error: Tried to add non-string or string with more than one character using StringStat.AddChar.\"\n exit()\n \n newchar=self.ParseChar(char)\n \n if not(newchar):\n #In this case char was not one of the characters\n #being accepted. Don't add it to the queue.\n return None\n \n strlist=self.queue.Add(newchar)\n\n if not(strlist):\n #The queue is not up to n characters yet, so don't\n #add/increment this string in self.data.\n return newchar\n\n #Convert strlist from a list of chars into a string\n strng=\"\"\n for ch in strlist:\n if not(ischar(ch)):\n print >> sys.stderr, \"Error: Non char got into StringStat.queue.\"\n exit()\n strng+=ch\n\n #Now can add/increment this string in self.data\n self.Incr(strng)\n return newchar", "def push_char(self, char, x, y):\n if char is not None:\n char = self.charset.apply_mapping(char)\n if self.modes.insert:\n self.insert_chars(char=char)\n else:\n self.set_char(char)\n\n self.set_term_cursor(x, y)", "def add(self):\n self.token += self.char\n return None", "def accumulate_punctuator(self, char):\n raise RuntimeError(\"Not implemented\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Count of valid owner characters.
def characters_count(self) -> int: return self.characters.count()
[ "def get_nbr_of_characters():\n\tchars = Character.objects()\n\treturn len(chars)", "def n_wrong_characters(self):\n return self._n_characters_with_status(STATUS_WRONG)", "def count_number_of_characters(text):\r\n return len(text)", "def n_untouched_characters(self):\n return len([x for x in self.actions if not x])", "def number_of_emojies(self, text):\r\n counter = 0\r\n for character in text:\r\n if character in UNICODE_EMOJI:\r\n counter += 1\r\n return counter", "def charCount(self, aggregated=False):\n pass", "def test_count_name_chars(self):\n counter = model.Counter.get_unfinished_or_create('haiti', 'person')\n counter.put()\n self.to_delete.append(counter)\n\n counter.increment(u'arbitrary \\xef characters \\u5e73 here')\n counter.put() # without encode_count_name, this threw an exception", "def count_chars(self, text):\r\n return len(text) - text.count(\" \")", "def check_chars(text):\n\n lenght = len(text)\n return lenght", "def weird_char_count(chars):\n if type(chars) == str:\n chars = chars.decode('utf-8')\n\n chars = re.sub(r'[0-9a-zA-Z;:<>=+!@#$%^&*()_\\'\"|./ ]', '_', chars)\n count = (chars.count('_') + 1) / 2 # take the ceil\n chars = chars.replace('_', '')\n return count + len(chars)", "def characterCount(items):\n\treturn [len(i) for i in items]", "def __len__(self):\n return(len(self.nucleotides))", "def characters (file_name):\n\n file_contents = read_file (file_name)\n count_chars = 0\n for line in file_contents:\n count_chars += len (line)\n return count_chars", "def count_o(value):\n return value.count('o')", "def num_humans(self):\n return len( self._human_list )", "def letter_counter(s):\n upper_case_letters = [c for c in s if c.isupper()]\n lower_case_letters = [c for c in s if c.islower()]\n return len(upper_case_letters),len(lower_case_letters)", "def test_character_count(self):\n self.assertEqual(analyze_text(self.filename)[1], 134)", "def test_bolg_perma_character_count(self):\n\t\ttest_bolg = create( \n\t\t\ttitle='Roberto Duran, Marvin Hagler, Sugar Ray Leonard, and Thomas Hearns', \n\t\t\tperma='', \n\t\t\texcerpt=testbolg_excerpt, \n\t\t\tbody=testbolg_body,\n\t\t\ttags=testbolg_tags)\n\n\t\tchar_length = len(test_bolg['perma'])\n\t\tself.assertEqual(char_length, 50 )", "def test_positive_count_non_ascii_chars(test_file_with_non_ascii):\n assert count_non_ascii_chars(test_file_with_non_ascii) == 4" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Notify admin and users about an error with the owner characters.
def notify_error( error: str, character: CharacterOwnership = None, level="warning" ) -> None: message_id = f"{__title__}-Owner-fetch_token-{self.pk}" title = f"{__title__}: Failed to fetch token for {self}" error = f"{error} Please add a new character to restore service level." if character and character.character_ownership: notify_throttled( message_id=message_id, user=character.character_ownership.user, title=title, message=error, level=level, ) title = f"FYI: {title}" notify_admins_throttled( message_id=message_id, title=title, message=error, level=level, timeout=STRUCTURES_NOTIFY_THROTTLED_TIMEOUT, )
[ "def on_register_error_dm_command(self, event):\n if event.author.id in bot.config.exception_dms:\n api_loop(\n event.channel.send_message,\n f\"You're already registered :ok_hand:\",\n )\n else:\n config = bot.get_config()\n if \"exception_dms\" not in config:\n config[\"exception_dms\"] = []\n config[\"exception_dms\"].append(event.author.id)\n bot.overwrite_config(config)\n bot.config.exception_dms.append(event.author.id)\n api_loop(\n event.channel.send_message,\n f\":thumbsup:\",\n )", "def error(update, context): #в случае вознекновения ошибки она выводится в logger (здесь в поток вывода)\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)", "async def error(self, user, msg):\n self.logger.warning(\"ERRROR: {}\".format(msg))\n r = {\"command\": \"ERROR\", \"args\": [msg]}\n await self.send_obj(user, r)", "def error(update, context):\r\n logger.warning('Update \"%s\" caused error \"%s\"', update, error)", "def notify_error(self, error):\n\n if not self.save:\n return\n\n reversion.set_user(self.ticket_user)\n\n now = datetime.datetime.now(datetime.timezone.utc)\n notified = self.ixlan.ixf_ixp_import_error_notified\n self.ixlan.ixf_ixp_import_error\n\n if notified:\n diff = (now - notified).total_seconds() / 3600\n if diff < settings.IXF_PARSE_ERROR_NOTIFICATION_PERIOD:\n return\n\n self.ixlan.ixf_ixp_import_error_notified = now\n self.ixlan.ixf_ixp_import_error = error\n self.ixlan.save()\n\n ixf_member_data = IXFMemberData(ixlan=self.ixlan, asn=0)\n\n subject = (\n f\"Could not process IX-F Data - {self.ixlan.ix.name} ({self.ixlan.ix.id})\"\n )\n template = loader.get_template(\"email/notify-ixf-source-error.txt\")\n message = template.render(\n {\"error\": error, \"dt\": now, \"instance\": ixf_member_data}\n )\n\n # AC does not want ticket here as per #794\n # self._ticket(ixf_member_data, subject, message)\n\n if ixf_member_data.ix_contacts:\n self._email(\n subject, message, ixf_member_data.ix_contacts, ix=ixf_member_data.ix\n )", "def error(self, message):\n pass", "async def cog_command_error(self, ctx: Context, error: Exception):\n\n logging.error(f\"{type(error).__name__}:{error}\") # Log the error in the console\n\n if isinstance(error, commands.CheckFailure):\n await ctx.send(embed=ErrorEmbed(\"Channel is not NSFW\"))\n\n elif isinstance(error, commands.CommandInvokeError):\n await ctx.send(\n embed=ErrorEmbed(str(error)[str(error).find('[')+1::])\n )\n else:\n await ctx.send(embed=ErrorEmbed(str(error)), delete_after=10)", "def error_print():\n print(\"ERROR: Invalid Entry!\")", "def on_error(self, e):\n pass", "async def cog_error(self, ctx: commands.Context, error: Exception):\n error = getattr(error, \"original\", error)\n if isinstance(error, commands.ExtensionNotFound):\n await ctx.send(\"That extension wasn't found. Are you sure you're my dev?\")\n return await ctx.message.add_reaction(self.bot.emoji[False])\n else:\n print(2)\n await self.bot._exception_handle(error)", "def error(self, text):\n self.message('ERROR', text, color='red')", "def on_usage_error(self, error: str):\n raise UsageError(error, self, self.command)", "def test_fail_update_other_user_message(self):\n pass", "def oob_error(oobhandler, session, errmsg, *args, **kwargs):\r\n session.msg(oob=(\"send\", {\"ERROR\": errmsg}))", "def check_error_notification(letter_guessed, old_letters_guessed):\r\n # Check if guess is more than single char, and alphabet letters.\r\n if len(letter_guessed) > 1 and letter_guessed.isalpha():\r\n error_notification = \"X\\nOnly one letter is allowed\"\r\n # Check if guess is more than single char, and not alphabet letters.\r\n elif len(letter_guessed) > 1 and not letter_guessed.isalpha():\r\n error_notification = \"X\\nOnly one alphabet A-Z letter is \" \\\r\n \"allowed\"\r\n # Check if guess is a single char, and not alphabet letter.\r\n elif len(letter_guessed) == 1 and not letter_guessed.isalpha():\r\n error_notification = \"X\\nOnly type alphabet A-Z letter is \" \\\r\n \"allowed\"\r\n # Check if guess exist in the already guessed letters' list.\r\n elif letter_guessed in old_letters_guessed:\r\n error_notification = \"X\\nThis letter was guessed already\"\r\n else:\r\n error_notification = \"Un-resolved letter\"\r\n return error_notification", "def test_nonexistent_admin_username():\n expect_error(invite_user, InputError, \"a\", \"b\", 1)", "def _notify_error(self, message):\n if not self._arguments.ignore:\n raise Exception(message)\n print(termcolor.colored(\"ERROR: {}\".format(message), 'red'))", "def crash():\r\n message_display('You Died Sucka')", "def onError(self, conn, exc):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates all structures from ESI.
def update_structures_esi(self, user: User = None): self.structures_last_update_ok = None self.structures_last_update_at = now() self.save() token = self.fetch_token() is_ok = self._fetch_upwell_structures(token) if STRUCTURES_FEATURE_CUSTOMS_OFFICES: is_ok &= self._fetch_custom_offices(token) if STRUCTURES_FEATURE_STARBASES: is_ok &= self._fetch_starbases(token) if is_ok: self.structures_last_update_ok = True self.save() if user: self._send_report_to_user( topic="structures", topic_count=self.structures.count(), user=user )
[ "def _fetch_upwell_structures(self, token: Token) -> bool:\n from .eveuniverse import EsiNameLocalization\n\n corporation_id = self.corporation.corporation_id\n structures = list()\n try:\n # fetch all structures incl. localizations for services\n structures_w_lang = esi_fetch_with_localization(\n esi_path=\"Corporation.get_corporations_corporation_id_structures\",\n args={\"corporation_id\": corporation_id},\n token=token,\n languages=EsiNameLocalization.ESI_LANGUAGES,\n has_pages=True,\n )\n except OSError as ex:\n message_id = (\n f\"{__title__}-fetch_upwell_structures-{self.pk}-{type(ex).__name__}\"\n )\n title = f\"{__title__}: Failed to update upwell structures for {self}\"\n message = (\n f\"{self}: Failed to update upwell structures \"\n f\"from ESI for due to: {ex}\"\n )\n logger.exception(message)\n notify_admins_throttled(\n message_id=message_id,\n title=title,\n message=message,\n level=\"danger\",\n timeout=STRUCTURES_NOTIFY_THROTTLED_TIMEOUT,\n )\n return False\n\n is_ok = True\n # reduce data\n structures = self._compress_services_localization(\n structures_w_lang, EveUniverse.ESI_DEFAULT_LANGUAGE\n )\n\n # fetch additional information for structures\n if not structures:\n logger.info(\"%s: No Upwell structures retrieved from ESI\", self)\n else:\n logger.info(\n \"%s: Fetching additional infos for %d Upwell structures from ESI\",\n self,\n len(structures),\n )\n for structure in structures:\n try:\n structure_info = esi_fetch(\n \"Universe.get_universe_structures_structure_id\",\n args={\"structure_id\": structure[\"structure_id\"]},\n token=token,\n )\n structure[\"name\"] = Structure.extract_name_from_esi_respose(\n structure_info[\"name\"]\n )\n structure[\"position\"] = structure_info[\"position\"]\n except OSError as ex:\n message_id = (\n f\"{__title__}-fetch_upwell_structures-details-\"\n f\"{self.pk}-{type(ex).__name__}\"\n )\n title = (\n f\"{__title__}: Failed to update details for \"\n f\"structure from {self}\"\n )\n message = (\n f\"{self}: Failed to update details for structure \"\n f\"with ID {structure['structure_id']} from ESI due to: {ex}\"\n )\n logger.warning(message, exc_info=True)\n notify_admins_throttled(\n message_id=message_id,\n title=title,\n message=message,\n level=\"warning\",\n timeout=STRUCTURES_NOTIFY_THROTTLED_TIMEOUT,\n )\n structure[\"name\"] = \"(no data)\"\n is_ok = False\n\n logger.info(\n \"%s: Storing updates for %d upwell structures\",\n self,\n len(structures),\n )\n for structure in structures:\n Structure.objects.update_or_create_from_dict(structure, self)\n\n if STRUCTURES_DEVELOPER_MODE:\n self._store_raw_data(\"structures\", structures, corporation_id)\n\n self._remove_structures_not_returned_from_esi(\n structures_qs=self.structures.filter_upwell_structures(),\n new_structures=structures,\n )\n return is_ok", "def update_all_esi(self) -> int:\n logger.info(\n \"%s: Updating %d objects from from ESI...\",\n self.model.__name__,\n self.count(),\n )\n count_updated = 0\n for eve_obj in self.all().order_by(\"last_updated\"):\n try:\n self.update_or_create_esi(eve_obj.id)\n count_updated += 1\n except HTTPError:\n logger.exception(\"Update interrupted by exception\")\n\n return count_updated", "def update(self):\n # ic()\n # self.update_scans()\n self.update_data()", "def refresh(self):\n self._parse_oem_attributes()", "def _update(self):\n self._update_assets()\n self._update_funds()", "def do_update_all(self, **kwargs):\n _hardware_id = kwargs['hardware_id']\n _error_code = 0\n _msg = ''\n\n for _node in self.do_select_children(_hardware_id).all_nodes()[1:]:\n try:\n _error_code, _debug_msg = self.do_update(_node.identifier)\n _msg = _msg + _debug_msg + '\\n'\n except AttributeError:\n _error_code = 1\n _msg = (\n \"RAMSTK ERROR: One or more records in the HazOps table \"\n \"for Hardware ID {0:d} did not \"\n \"update.\").format(_hardware_id)\n except NodeIDAbsentError:\n pass\n\n if _error_code == 0:\n _msg = (\"RAMSTK SUCCESS: Updating all records in the HazOps table \"\n \"for Hardware ID {0:d}.\").format(_hardware_id)\n\n return _error_code, _msg", "def all_structs(self):\n if self.cluster:\n for e in self.cluster.structs:\n yield e\n for e in self.idl.structs:\n yield e", "def update_everything(self, amounts_storages, amounts_fluxes):\n self.update_all_fluxes(amounts_fluxes)\n self.update_all_storages(amounts_storages)", "def em_update(self):\n raise NotImplementedError", "def update(s):\n s.getPlaneState()\n s.horizon()\n s.FPM()\n s.instruments()", "def update_asset_esi(self, user: User = None):\n self.assets_last_update_ok = None\n self.assets_last_update_at = now()\n self.save()\n\n token = self.fetch_token()\n structure_ids = {x.id for x in Structure.objects.filter(owner=self)}\n try:\n OwnerAsset.objects.update_or_create_for_structures_esi(\n structure_ids, self.corporation.corporation_id, token\n )\n except OSError as ex:\n message_id = f\"{__title__}-fetch_assets-{self.pk}-{type(ex).__name__}\"\n title = f\"{__title__}: Failed to update assets for {self}\"\n message = f\"{self}: Failed to update assets from ESI due to {ex}\"\n logger.warning(message, exc_info=True)\n notify_admins_throttled(\n message_id=message_id,\n title=title,\n message=message,\n level=\"warning\",\n timeout=STRUCTURES_NOTIFY_THROTTLED_TIMEOUT,\n )\n raise ex\n else:\n self.assets_last_update_ok = True\n self.save()\n\n if user:\n self._send_report_to_user(\n topic=\"assets\", topic_count=self.structures.count(), user=user\n )", "def update_or_create_esi(self, structure_id: int, token: Token) -> tuple:\n from .models import Owner\n\n log_prefix = make_log_prefix(self, structure_id)\n logger.info(\"%s: Trying to fetch structure from ESI\", log_prefix)\n try:\n if token is None:\n raise ValueError(\"Can not fetch structure without token\")\n\n structure_info = esi_fetch(\n esi_path=\"Universe.get_universe_structures_structure_id\",\n args={\"structure_id\": structure_id},\n token=token,\n )\n structure = {\n \"structure_id\": structure_id,\n \"name\": self.model.extract_name_from_esi_respose(\n structure_info[\"name\"]\n ),\n \"position\": structure_info[\"position\"],\n \"type_id\": structure_info[\"type_id\"],\n \"system_id\": structure_info[\"solar_system_id\"],\n }\n owner = Owner.objects.get(\n corporation__corporation_id=structure_info[\"corporation_id\"]\n )\n obj, created = self.update_or_create_from_dict(\n structure=structure, owner=owner\n )\n\n except Exception as ex:\n logger.warn(\"%s: Failed to load structure\", log_prefix)\n raise ex\n\n return obj, created", "def do_update_all(self, **kwargs): # pylint: disable=unused-argument\n _error_code = 0\n _msg = ''\n\n for _node in self.tree.all_nodes():\n try:\n _error_code, _debug_msg = self.do_update(_node.identifier)\n\n _msg = _msg + _debug_msg + '\\n'\n\n except AttributeError:\n _error_code = 1\n _msg = (\"RAMSTK ERROR: One or more records in the function \"\n \"table did not update.\")\n\n if _error_code == 0:\n _msg = (\"RAMSTK SUCCESS: Updating all records in the function \"\n \"table.\")\n\n return _error_code, _msg", "def update_extractions_from_esi(self):\n logger.info(\"%s: Fetching extractions from ESI...\", self)\n extractions = (\n esi.client.Industry.get_corporation_corporation_id_mining_extractions(\n corporation_id=self.corporation.corporation_id,\n token=self.fetch_token().valid_access_token(),\n ).results()\n )\n logger.info(\"%s: Received %d extractions from ESI.\", self, len(extractions))\n extractions_by_refinery = defaultdict(list)\n for row in extractions:\n extractions_by_refinery[row[\"structure_id\"]].append(row)\n new_extractions_count = 0\n for refinery_id, refinery_extractions in extractions_by_refinery.items():\n try:\n refinery = self.refineries.get(pk=refinery_id)\n except Refinery.DoesNotExist:\n continue\n new_extractions_count += refinery.create_extractions_from_esi_response(\n refinery_extractions\n )\n refinery.cancel_started_extractions_missing_from_list(\n [row[\"extraction_start_time\"] for row in refinery_extractions]\n )\n if new_extractions_count:\n logger.info(\"%s: Created %d new extractions.\", self, new_extractions_count)", "def update_all(self):\n\n # get all rows from table\n data = self.engine.fetch_all_like_entry(JoinSongArtist())\n\n # update each\n for row in data: self.update(row)", "def update_dict(self, results):\n\n # {'Volumes':\n # [\n # {'Attachments':\n # [\n # {'AttachTime': datetime.datetime(2020, 3, 16, 20, 0, 35, tzinfo=tzutc()),\n # 'Device': '/dev/sda1',\n # 'InstanceId': 'i-0765529fec90ba56b',\n # 'State': 'attached',\n # 'VolumeId': 'vol-09db404935694e941',\n # 'DeleteOnTermination': True}\n # ],\n # 'AvailabilityZone': 'us-east-2c',\n # 'CreateTime': datetime.datetime(2020, 3, 16, 20, 0, 35, 257000, tzinfo=tzutc()),\n # 'Encrypted': False,\n # 'Size': 8,\n # 'SnapshotId': 'snap-085c8383cc8833286',\n # 'State': 'in-use',\n # 'VolumeId': 'vol-09db404935694e941',\n # 'Iops': 100,\n # 'Tags':\n # [{'Key': 'Name',\n # 'Value': 'xin-vol-3'}],\n # 'VolumeType': 'gp2'},\n # {...}\n # ]\n # }\n\n\n if results is None:\n return None\n # elif type(elements) == list:\n # _elements = elements\n # else:\n # _elements = [elements]\n d = []\n\n elements = results['Volumes']\n #print(type(elements))\n for entry in elements:\n #print(\"entry\", entry)\n #print(type(entry))\n try:\n for item in entry['Tags']:\n if item['Key'] == 'Name':\n volume_name = item['Value']\n else:\n volume_name =\" \"\n except:\n pass\n if \"cm\" not in entry:\n entry['cm'] = {}\n\n entry[\"cm\"].update({\n \"cloud\": self.cloud,\n \"kind\": \"volume\",\n \"name\": volume_name,\n \"region\": entry[\"AvailabilityZone\"], # for aws region = AvailabilityZone\n })\n\n# entry[\"cm\"][\"created\"] = str(DateTime.now())\n\n d.append(entry)\n return d", "def _update(self):\n for key in self.keys:\n # self.show.<key> = namedtuple\n setattr(self._show\n , key\n , self._db[key]\n ) \n # self.pull.<key> = dict\n setattr(self._pull,\n key,\n self.db[key]._asdict() \n )", "def Update_All_Analysis():\r\n conn = connect_db()\r\n cur = conn.cursor()\r\n cur.execute('SELECT * FROM stock;')\r\n stocks = cur.fetchall()\r\n\r\n for stock in stocks:\r\n Add_Analysis(conn, cur, stock[0])\r\n cur.close()\r\n print('Update all analysis success')", "async def refresh_inventories(self) -> None:\n self.refresh_event.clear()\n await self.symbol_get_event.wait()\n log.debug(\"Refreshing documentation inventory...\")\n self.inventory_scheduler.cancel_all()\n\n self.base_urls.clear()\n self.doc_symbols.clear()\n self.renamed_symbols.clear()\n await self.item_fetcher.clear()\n\n coros = [\n self.update_or_reschedule_inventory(\n package[\"package\"], package[\"base_url\"], package[\"inventory_url\"]\n ) for package in await self.bot.api_client.get(\"bot/documentation-links\")\n ]\n await asyncio.gather(*coros)\n log.debug(\"Finished inventory refresh.\")\n self.refresh_event.set()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove structures no longer returned from ESI.
def _remove_structures_not_returned_from_esi( self, structures_qs: models.QuerySet, new_structures: list ): ids_local = {x.id for x in structures_qs} ids_from_esi = {x["structure_id"] for x in new_structures} ids_to_remove = ids_local - ids_from_esi if len(ids_to_remove) > 0: structures_qs.filter(id__in=ids_to_remove).delete() logger.info( "Removed %d structures which apparently no longer exist.", len(ids_to_remove), )
[ "def _removePreviouslyExtractedVessels(self):\n removeNodesFromMRMLScene([self._vesselVolumeNode, self._vesselModelNode])", "def remove_destroyed_entities (entities):\n entities_to_remove = []\n\n # Adding the entities to remove in the list\n for entity in entities:\n if entities[entity]['type'] != 'peak' and entities[entity]['type'] != 'hub':\n structure_points = entities[entity]['structure_points']\n if structure_points <= 0:\n entities_to_remove.append(entity)\n\n # Removing the entities in the list, from entities dict\n for entity in entities_to_remove:\n del entities[entity]\n\n return entities", "def cleanup(event, roblist):\n\n newevent = eformat.write.FullEventFragment()\n newevent.copy_header(event)\n for rob in event:\n if rob.source_id().code() not in roblist:\n logging.info('Removing ROB %s from event %d (not at hit list)' % \\\n (rob.source_id(), event.lvl1_id()))\n continue\n else:\n newrob = eformat.write.ROBFragment(rob)\n newevent.append(newrob)\n\n # return modified event\n # ---------------------\n return newevent.readonly()", "def get(self):\n # self.hts.multiEvents[\"STOCK\"].exit()\n self.hts.remove_real()\n self.hts.clear_market_state(\"{0:05}\".format(1))\n print(\"REMOVE ALL REAL\")", "def filter_rm_ipaddrs(results):\n for row in results:\n del(row[\"ipaddr\"])\n return results", "def removeElement(self):", "def forget(self):\n earliest_episode_len = self.buffer_episode_lens.pop(0) # number of elements to remove\n del self.buffer[:earliest_episode_len] # remove the first n elements", "def remove_destructed_objects():\n for ob in simulate.obj_list_destruct:\n simulate.destruct2(ob)\n simulate.obj_list_destruct.remove(ob)", "def unregister_baremetal(self):\n list_data = self.request[api.DATA][api.DATA]['ids']\n resp_dict = {}\n for id in list_data:\n temp = self._unregister_from_eon(id)\n resp_dict[id] = temp\n return resp_dict", "def strip_data_for_partner_release(results):\n stripped = []\n results = filter_mod_ipaddrs(results)\n for row in results:\n try:\n stripped_row = filter_rm_non_public(row)\n stripped_row = filter_rm_section_time(stripped_row)\n stripped_row = filter_mod_datestamps(stripped_row)\n stripped_row = filter_rm_misc(stripped_row)\n stripped_row = filter_rm_emailaddr(stripped_row)\n stripped_row = filter_rm_coppa(stripped_row)\n except AttributeError:\n continue\n if stripped_row:\n stripped.append(stripped_row)\n else:\n continue\n return stripped", "def remove_all_data(self):\n self._entries = []", "def prune_old_events(events, now):\n for event in events: # for each event\n try:\n end_time = dateutil.parser.parse(event['end']['dateTime']).date()\n except KeyError:\n end_time = dateutil.parser.parse(event['end']['date']).date()\n if end_time < now.date(): # and the event is currently occuring\n logger.info('removing event {0}: in the past'.format(event[u'id']))\n events.remove(event)\n return events", "def clear_data():\n try:\n db.all_requests.remove()\n return {\"msg\": \"complete\"}\n except:\n return {\"msg\": \"error\"}", "def remove_all():\n del Customer.data[:]\n Customer.index = 0\n return Customer.data", "def _delexpired( self ):\n\t\tkeys = self._data.keys()\n\t\t#print \"Cleaning expired files, {} to process.\".format( len( keys ) ),\n\t\tfor f in set( keys ):\n\t\t\tif self._expired( f ):\n\t\t\t\tdel( self._data[f] )", "def __remove_einfo_object__(einfo) :\n\n global __data__, __enclave_name_map__\n\n try :\n # we use the old information because the name may change\n # in the new enclave (though the enclave_id will not)\n old_einfo = __data__[einfo.enclave_id]\n __data__.pop(einfo.enclave_id, None)\n __enclave_name_map__.pop(old_einfo.name, None)\n except :\n pass", "def cleanupOldUnusedVols(verbose):\n _, dvol = getOldUnusedVols(False)\n for k, v in dvol.items():\n resp = EC2C.delete_volume(\n DryRun = DRY,\n VolumeId = k\n )\n if verbose:\n print(\"Volume with id: \"+k+\" deleted\")\n print(\"Delete \"+str(len(lvol.keys()))+\" volumes\")", "def _remove_hetatm(self, chain):\n for residue in chain.copy():\n if residue.id[0].strip() and residue.resname not in AminoAcidThreeToOne.__members__:\n chain.detach_child(residue.id)", "def drop_volatile(data):\n clean = copy.copy(data)\n\n for field in [\"id\", \"createdAt\", \"updatedAt\", \"createdBy\", \"updatedBy\"]:\n clean.pop(field, None)\n\n return clean" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch Upwell structures from ESI for self. Return True if successful, else False.
def _fetch_upwell_structures(self, token: Token) -> bool: from .eveuniverse import EsiNameLocalization corporation_id = self.corporation.corporation_id structures = list() try: # fetch all structures incl. localizations for services structures_w_lang = esi_fetch_with_localization( esi_path="Corporation.get_corporations_corporation_id_structures", args={"corporation_id": corporation_id}, token=token, languages=EsiNameLocalization.ESI_LANGUAGES, has_pages=True, ) except OSError as ex: message_id = ( f"{__title__}-fetch_upwell_structures-{self.pk}-{type(ex).__name__}" ) title = f"{__title__}: Failed to update upwell structures for {self}" message = ( f"{self}: Failed to update upwell structures " f"from ESI for due to: {ex}" ) logger.exception(message) notify_admins_throttled( message_id=message_id, title=title, message=message, level="danger", timeout=STRUCTURES_NOTIFY_THROTTLED_TIMEOUT, ) return False is_ok = True # reduce data structures = self._compress_services_localization( structures_w_lang, EveUniverse.ESI_DEFAULT_LANGUAGE ) # fetch additional information for structures if not structures: logger.info("%s: No Upwell structures retrieved from ESI", self) else: logger.info( "%s: Fetching additional infos for %d Upwell structures from ESI", self, len(structures), ) for structure in structures: try: structure_info = esi_fetch( "Universe.get_universe_structures_structure_id", args={"structure_id": structure["structure_id"]}, token=token, ) structure["name"] = Structure.extract_name_from_esi_respose( structure_info["name"] ) structure["position"] = structure_info["position"] except OSError as ex: message_id = ( f"{__title__}-fetch_upwell_structures-details-" f"{self.pk}-{type(ex).__name__}" ) title = ( f"{__title__}: Failed to update details for " f"structure from {self}" ) message = ( f"{self}: Failed to update details for structure " f"with ID {structure['structure_id']} from ESI due to: {ex}" ) logger.warning(message, exc_info=True) notify_admins_throttled( message_id=message_id, title=title, message=message, level="warning", timeout=STRUCTURES_NOTIFY_THROTTLED_TIMEOUT, ) structure["name"] = "(no data)" is_ok = False logger.info( "%s: Storing updates for %d upwell structures", self, len(structures), ) for structure in structures: Structure.objects.update_or_create_from_dict(structure, self) if STRUCTURES_DEVELOPER_MODE: self._store_raw_data("structures", structures, corporation_id) self._remove_structures_not_returned_from_esi( structures_qs=self.structures.filter_upwell_structures(), new_structures=structures, ) return is_ok
[ "def get_available_structures( self ):\n _check_type(self)\n return _get_available(self, \"structure_\")", "def update_structures_esi(self, user: User = None):\n self.structures_last_update_ok = None\n self.structures_last_update_at = now()\n self.save()\n token = self.fetch_token()\n\n is_ok = self._fetch_upwell_structures(token)\n if STRUCTURES_FEATURE_CUSTOMS_OFFICES:\n is_ok &= self._fetch_custom_offices(token)\n if STRUCTURES_FEATURE_STARBASES:\n is_ok &= self._fetch_starbases(token)\n\n if is_ok:\n self.structures_last_update_ok = True\n self.save()\n if user:\n self._send_report_to_user(\n topic=\"structures\", topic_count=self.structures.count(), user=user\n )", "def fetchSpecial(self):\n\t\t#msg = [0xb5, 0x62, 0x01, 0x03, 0x10,0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x6D] #Status poll?\n\t\t#msg = [0xb5, 0x62, 0x01, 0x02, 0x1c,0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1F, 0xA6] #Posllh poll?\n\t\tmsg = [0xb5, 0x62, 0x06, 0x01, 0x03, 0x00, 0x01, 0x02, 0x01, 0x0e, 0x47]\n\t\tbuffer = self.bus.xfer2(msg)\n\t\tfor byte in buffer:\n\t\t\tself.scan_ubx(byte)\n\t\t\tif(self.mess_queue.empty() != True):\n\t\t\t\tdata = self.parse_ubx()\n\t\t\t\tif (data != None):\n\t\t\t\t\tif (self.debug == True):\n\t\t\t\t\t\tprint(data)\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn data\n\t\treturn None", "def fetch_data(self):\r\n print(\"Fetching Data from USGS Water Services API\")\r\n self.response = requests.get(self.complete_url)\r\n self.response.raise_for_status()", "def fetch(cls):\n cls.fetch_bus_stops()\n cls.fetch_bus_routes()\n cls.fetch_bus_timetables()", "def check_structure_exists(structure, all_structures):\n\n structure = structure.copy() # Copy the structure in order to preserve the original\n all_structures = all_structures.copy()\n\n # Generate a list containing all the chain IDs of the input structure\n chain_ids_structure = tuple(sorted([x.id[1] for x in structure.get_chains()]))\n\n for each_structure in all_structures:\n\n # Chain IDs of another structure\n chain_ids_each_structure = tuple(sorted([x.id[1] for x in each_structure.get_chains()]))\n\n # Checks wether both structures have the same chain IDs\n if chain_ids_structure == chain_ids_each_structure:\n\n # Selects a chain of the input structure\n chain_str = list(structure.get_chains())[0]\n id_str = chain_str.id[1]\n\n # Searches for chains with the same ID\n for chain_each_str in each_structure.get_chains():\n\n id_each_str = chain_each_str.id[1]\n\n # If a chain with the same name is found, superimpose both chains\n if id_str == id_each_str:\n\n RMSD, structure = superimpose(chain_str, chain_each_str, moving=structure)\n\n # If the superimposition yields a low RMSD (<3A) they are the same chain\n if RMSD > 3.0:\n continue\n\n partners = set()\n\n # Creates a list of partners\n for searching_partner in each_structure.get_chains():\n partner_found = False\n\n for possible_partner in structure.get_chains():\n\n if partner_found is True:\n break\n\n if possible_partner.id[1] == searching_partner.id[1] and possible_partner not in partners:\n\n RMSD = superimpose(searching_partner, possible_partner)\n\n if RMSD <= 3.0:\n partners.add(possible_partner)\n partner_found = True\n\n # If all chains have a partner returns True\n if len(partners) == len(list(each_structure.get_chains())):\n return True \n\n # If not all chains match returns False\n return False", "async def load_eu_data():\n eu_url = \"https://sourceforge.net/projects/miuimix/rss?path=/\"\n async with ClientSession() as session:\n stable = eT.fromstring(await fetch(session, f'{eu_url}/weekly'))\n weekly = eT.fromstring(await fetch(session, f'{eu_url}/stable'))\n stable_links = [i.find('link').text for i in stable[0].findall('item')]\n weekly_links = [i.find('link').text for i in weekly[0].findall('item')]\n return [*stable_links, *weekly_links]", "def test_multi_pending_success(self):\n\n def handle(event):\n yield 0xFF00, self.query\n yield 0xFF01, self.query\n yield 0xFF00, self.query\n yield 0x0000, self.query\n yield 0xA700, None\n\n handlers = [(evt.EVT_C_FIND, handle)]\n\n self.ae = ae = AE()\n ae.add_supported_context(UnifiedProcedureStepPull)\n ae.add_requested_context(UnifiedProcedureStepPull, ExplicitVRLittleEndian)\n scp = ae.start_server((\"localhost\", 11112), block=False, evt_handlers=handlers)\n\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n result = assoc.send_c_find(self.query, query_model=UnifiedProcedureStepPull)\n status, identifier = next(result)\n assert status.Status == 0xFF00\n assert identifier == self.query\n status, identifier = next(result)\n assert status.Status == 0xFF01\n assert identifier == self.query\n status, identifier = next(result)\n assert status.Status == 0xFF00\n assert identifier == self.query\n status, identifier = next(result)\n assert status.Status == 0x0000\n assert identifier is None\n with pytest.raises(StopIteration):\n next(result)\n\n assoc.release()\n assert assoc.is_released\n scp.shutdown()", "def download(self):\n\n from shutil import copyfileobj\n from requests import get\n\n req = get(Settings.online_dir_structure, stream=True)\n\n if req.status_code == 200:\n with open(self.structure, 'wb') as file:\n req.raw.decode_content = True\n copyfileobj(req.raw, file)\n del req\n\n return True\n return False", "def getAssemblyinfo(speciesName):\n\n#---------------Create e-search URL & send request to API-----------------------\n base_url = \"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/\"\n \n url = base_url + \"esearch.fcgi?db=assembly&term=(%s[All Fields])&usehistory=y&api_key=f1e800ad255b055a691c7cf57a576fe4da08\" % speciesName # creates e-search url\n\n api_request = requests.get(url) #sends request to api\n \n # grab the response content \n xml_content = api_request.content \n \n # parse with beautiful soup \n soup = BeautifulSoup(xml_content, 'xml')\n#--------------Get Query Key & Web Environments from xml------------------------ \n query_str = soup.find('QueryKey') #finds query key tag from xml\n \n querykey = str(query_str) #converts result to string variable\n \n querykey_num = querykey[10:len(querykey)-11] #parses out query key from string\n \n web_env_str = soup.find('WebEnv') #finds web environment tag from xml\n \n web_env = str(web_env_str) #converts result to string variable\n \n web_env_num = web_env[8:len(web_env)-9] #parses out web environment from string\n \n#-----------------Create e-summary URL and send request to API------------------\n summary_url = base_url + \"esummary.fcgi?db=assembly&query_key=%s&WebEnv=%s&api_key=f1e800ad255b055a691c7cf57a576fe4da08\" % (querykey_num, web_env_num)\n \n api_request_summary = requests.get(summary_url) #sends request to api\n \n # grab the response content \n xml_content_summary = api_request_summary.content\n \n # parse with beautiful soup \n soup_summary = BeautifulSoup(xml_content_summary, 'xml')\n#------------Gets desired information from Assembly database--------------------\n accession_str = soup_summary.find('AssemblyAccession') #finds Assembly accession number tag from xml\n \n accession = str(accession_str) #converts result to string variable\n \n accession_num = accession[19:len(accession)-20] #parses out accession number from string\n \n bioproject_str = soup_summary.find('BioprojectAccn') #finds bioproject tag from xml\n \n bioproject = str(bioproject_str) #converts result to string variable\n \n bioproject_num = bioproject[16:len(bioproject)-17] #parses out bioproject number from string\n \n pubdate_str = soup_summary.find('AsmReleaseDate_GenBank') #finds Assembly publication date tag from xml\n \n pubdate = str(pubdate_str) #converts result to string variable\n \n pubdate_num = pubdate[24:len(pubdate)-37] #parses out assembly publication date from string\n \n return accession_num, bioproject_num, pubdate_num", "def _fetch(self) -> Iterator: # pragma: no cover\n raise NotImplementedError", "def uniprotAPICall(protein_name):\n # API call to UniRef DB\n base_url = \"http://www.uniprot.org/uniprot/\"\n extension = \".xml\"\n my_response = requests.get(base_url + protein_name + extension)\n \n # For successful API call, response code will be 200 (OK)\n if not my_response.ok:\n print \"UniProt node not found: \" + str(protein_name) \n return\n\n # get root of the XML response\n root = ET.fromstring(my_response.content)\n rep_member = root.find('{http://uniprot.org/uniprot}entry')\n\n # set up dict to put in info\n member_dict = {}\n\n # Add any properties that have type - id pairings\n for prop in rep_member.iter():\n if 'type' in prop.attrib and 'id' in prop.attrib:\n member_dict[prop.attrib['type'].replace(\" \", \"_\")] = prop.attrib['id']\n # else:\n # member_dict[prop.attrib['type'].replace(\n # \" \", \"_\")] = prop.attrib['id']\n \n # Get protein accession. Ex: Q8KM74\n member_dict['UniProtKB_accession'] = rep_member.find('{http://uniprot.org/uniprot}accession').text\n member_dict['id'] = member_dict['UniProtKB_accession']\n\n # Get specific protein accession. Ex: Q8KM74_METTR\n member_dict['UniProtKB_ID'] = rep_member.find('{http://uniprot.org/uniprot}name').text\n\n # Get source organism\n member_dict['source_organism'] = rep_member.find('{http://uniprot.org/uniprot}organism').find('{http://uniprot.org/uniprot}name').text\n\n # Get protein existance: http://www.uniprot.org/help/protein_existence\n member_dict['protein_existence'] = rep_member.find('{http://uniprot.org/uniprot}proteinExistence').attrib['type'] if 'type' in rep_member.find('{http://uniprot.org/uniprot}proteinExistence').attrib else None\n \n # Get protein length\n member_dict['length'] = int(rep_member.find('{http://uniprot.org/uniprot}sequence').attrib['length']) if 'length' in rep_member.find('{http://uniprot.org/uniprot}sequence').attrib else None\n\n #print member_dict\n #name = UniProtKB_accession, UniProtKB_ID (has the _1343), UniProtKB_accession, id = UniProtKB_ID, length, protein_name, source_organism, NCBI_taxonomy, UniParc_ID, Pfam,Supfam\n\n return ClustNode(member_dict)", "def fetch_and_insert_eeg_files(self, derivatives=False):\n\n inserted_eegs = []\n # load the Physiological object that will be used to insert the\n # physiological data into the database\n physiological = Physiological(self.db, self.verbose)\n\n # TODO if derivatives, grep the source file as well as the input file ID???\n\n # grep the raw files\n eeg_files = self.bids_layout.get(\n subject = self.bids_sub_id,\n session = self.bids_ses_id,\n scope = 'derivatives' if derivatives else 'raw',\n # datatype = self.bids_modality,\n suffix = self.bids_modality,\n extension = ['set', 'edf', 'vhdr', 'vmrk', 'eeg', 'bdf']\n )\n\n # return if no eeg_file was found\n if not eeg_files:\n return None\n\n for eeg_file in eeg_files:\n eegjson_file = self.bids_layout.get_nearest(\n eeg_file.path,\n return_type = 'tuple',\n strict=False,\n extension = 'json',\n suffix = self.bids_modality,\n all_ = False,\n full_search = False,\n )\n\n fdt_file = self.bids_layout.get_nearest(\n eeg_file.path,\n return_type = 'tuple',\n strict=False,\n extension = 'fdt',\n all_ = False,\n full_search = False,\n )\n\n # read the json file if it exists\n eeg_file_data = {}\n eegjson_file_path = None\n if eegjson_file:\n with open(eegjson_file.path) as data_file:\n eeg_file_data = json.load(data_file)\n # copy the JSON file to the LORIS BIDS import directory\n eegjson_file_path = self.copy_file_to_loris_bids_dir(\n eegjson_file.path, derivatives\n )\n eeg_file_data['eegjson_file'] = eegjson_file_path\n json_blake2 = utilities.compute_blake2b_hash(eegjson_file.path)\n eeg_file_data['physiological_json_file_blake2b_hash'] = json_blake2\n\n # greps the file type from the ImagingFileTypes table\n file_type = physiological.determine_file_type(eeg_file.path)\n\n # grep the output type from the physiological_output_type table\n output_type = 'derivative' if derivatives else 'raw'\n output_type_id = self.db.grep_id_from_lookup_table(\n id_field_name = 'PhysiologicalOutputTypeID',\n table_name = 'physiological_output_type',\n where_field_name = 'OutputTypeName',\n where_value = output_type,\n insert_if_not_found = False\n )\n\n # get the acquisition date of the EEG file or the age at the time of the EEG recording\n eeg_acq_time = None\n if self.scans_file:\n scan_info = ScansTSV(self.scans_file, eeg_file.path, self.verbose)\n eeg_acq_time = scan_info.get_acquisition_time()\n eeg_file_data['age_at_scan'] = scan_info.get_age_at_scan()\n\n # copy the scans.tsv file to the LORIS BIDS import directory\n scans_path = scan_info.copy_scans_tsv_file_to_loris_bids_dir(\n self.bids_sub_id, self.loris_bids_root_dir, self.data_dir\n )\n\n eeg_file_data['scans_tsv_file'] = scans_path\n scans_blake2 = utilities.compute_blake2b_hash(self.scans_file)\n eeg_file_data['physiological_scans_tsv_file_bake2hash'] = scans_blake2\n\n # if file type is set and fdt file exists, append fdt path to the\n # eeg_file_data dictionary\n fdt_file_path = None\n if file_type == 'set' and fdt_file:\n # copy the fdt file to the LORIS BIDS import directory\n fdt_file_path = self.copy_file_to_loris_bids_dir(\n fdt_file.path, derivatives\n )\n\n eeg_file_data['fdt_file'] = fdt_file_path\n fdt_blake2 = utilities.compute_blake2b_hash(fdt_file.path)\n eeg_file_data['physiological_fdt_file_blake2b_hash'] = fdt_blake2\n\n # append the blake2b to the eeg_file_data dictionary\n blake2 = utilities.compute_blake2b_hash(eeg_file.path)\n eeg_file_data['physiological_file_blake2b_hash'] = blake2\n\n # check that the file using blake2b is not already inserted before\n # inserting it\n result = physiological.grep_file_id_from_hash(blake2)\n physio_file_id = result['PhysiologicalFileID'] if result else None\n eeg_path = result['FilePath'] if result else None\n\n if not physio_file_id:\n # grep the modality ID from physiological_modality table\n modality_id = self.db.grep_id_from_lookup_table(\n id_field_name='PhysiologicalModalityID',\n table_name='physiological_modality',\n where_field_name='PhysiologicalModality',\n where_value=self.bids_modality,\n insert_if_not_found=False\n )\n\n # copy the eeg_file to the LORIS BIDS import directory\n eeg_path = self.copy_file_to_loris_bids_dir(\n eeg_file.path, derivatives\n )\n\n # insert the file along with its information into\n # physiological_file and physiological_parameter_file tables\n eeg_file_info = {\n 'FileType': file_type,\n 'FilePath': eeg_path,\n 'SessionID': self.session_id,\n 'AcquisitionTime': eeg_acq_time,\n 'InsertedByUser': getpass.getuser(),\n 'PhysiologicalOutputTypeID': output_type_id,\n 'PhysiologicalModalityID': modality_id\n }\n physio_file_id = physiological.insert_physiological_file(\n eeg_file_info, eeg_file_data\n )\n \n # if the EEG file was a set file, then update the filename for the .set\n # and .fdt files in the .set file so it can find the proper file for\n # visualization and analyses\n file_paths_updated = file_type != 'set'\n if not file_paths_updated:\n set_full_path = os.path.join(self.data_dir, eeg_path)\n fdt_full_path = eeg_file_data['fdt_file'] if 'fdt_file' in eeg_file_data.keys() else None\n\n if fdt_full_path:\n fdt_full_path = os.path.join(self.data_dir, eeg_file_data['fdt_file'])\n file_paths_updated = utilities.update_set_file_path_info(set_full_path, fdt_full_path)\n\n if file_paths_updated:\n inserted_eegs.append({\n 'file_id': physio_file_id,\n 'file_path': eeg_path,\n 'eegjson_file_path': eegjson_file_path,\n 'fdt_file_path': fdt_file_path,\n 'original_file_data': eeg_file,\n })\n\n return inserted_eegs", "def test_fetch_reads_no_upas(self):\n file_util = self._get_file_util()\n with self.assertRaises(ValueError) as cm:\n file_util.fetch_reads_files(None)\n exception = cm.exception\n self.assertIn(\"reads_upas must be a list of UPAs\", str(exception))\n\n with self.assertRaises(ValueError) as cm:\n file_util.fetch_reads_files([])\n exception = cm.exception\n self.assertIn(\"reads_upas must contain at least one UPA\", str(exception))", "def harvest(self):\n self.setupdirs()\n self.updateHarvestRequest()\n self.setUpCrosswalk()\n self.data = None\n self.__until = datetime.fromtimestamp(self.startUpTime, timezone.utc).strftime('%Y-%m-%dT%H:%M:%SZ')\n self.__metadataPrefix = self.harvestInfo['provider_type']\n try:\n self.__set = self.harvestInfo['oai_set']\n except KeyError:\n pass\n try:\n if self.harvestInfo['advanced_harvest_mode'] == 'INCREMENTAL':\n if self.harvestInfo['last_harvest_run_date'] != '':\n self.__from = self.harvestInfo['last_harvest_run_date']\n else:\n self.identifyRequest()\n while self.firstCall or self.__resumptionToken != \"\":\n time.sleep(0.1)\n self.getHarvestData()\n self.storeHarvestData()\n self.runCrossWalk()\n self.postHarvestData()\n self.finishHarvest()\n except Exception as e:\n self.logger.logMessage(\"ERROR RECEIVING OAI DATA, resumptionToken:%s\" % self.__resumptionToken, \"ERROR\")\n self.handleExceptions(e)", "def is_complete(self) -> bool:\n blocks = [block for block in self.blocks if block.status is not Block.Retrieved]\n return len(blocks) == 0", "def _fetch_starbases(self, token: Token) -> bool:\n\n structures = list()\n corporation_id = self.corporation.corporation_id\n try:\n starbases = esi_fetch(\n \"Corporation.get_corporations_corporation_id_starbases\",\n args={\"corporation_id\": corporation_id},\n token=token,\n has_pages=True,\n )\n if not starbases:\n logger.info(\"%s: No starbases retrieved from ESI\", self)\n else:\n names = self._fetch_starbases_names(corporation_id, starbases, token)\n for starbase in starbases:\n starbase[\"fuel_expires\"] = self._calc_starbase_fuel_expires(\n corporation_id, starbase, token\n )\n # convert starbases to structures\n for starbase in starbases:\n if starbase[\"starbase_id\"] in names:\n name = names[starbase[\"starbase_id\"]]\n else:\n name = \"Starbase\"\n structure = {\n \"structure_id\": starbase[\"starbase_id\"],\n \"type_id\": starbase[\"type_id\"],\n \"corporation_id\": corporation_id,\n \"name\": name,\n \"system_id\": starbase[\"system_id\"],\n }\n if \"state\" in starbase:\n structure[\"state\"] = starbase[\"state\"]\n\n if \"moon_id\" in starbase:\n structure[\"moon_id\"] = starbase[\"moon_id\"]\n\n if \"fuel_expires\" in starbase:\n structure[\"fuel_expires\"] = starbase[\"fuel_expires\"]\n\n if \"reinforced_until\" in starbase:\n structure[\"state_timer_end\"] = starbase[\"reinforced_until\"]\n\n if \"unanchors_at\" in starbase:\n structure[\"unanchors_at\"] = starbase[\"unanchors_at\"]\n\n structures.append(structure)\n\n logger.info(\n \"%s: Storing updates for %d starbases\", self, len(structures)\n )\n for structure in structures:\n Structure.objects.update_or_create_from_dict(structure, self)\n\n if STRUCTURES_DEVELOPER_MODE:\n self._store_raw_data(\"starbases\", structures, corporation_id)\n\n except OSError as ex:\n message_id = f\"{__title__}-_fetch_starbases-{self.pk}-{type(ex).__name__}\"\n title = f\"{__title__}: Failed to fetch starbases for {self}\"\n message = f\"{self}: Failed to fetch starbases from ESI due to {ex}\"\n logger.exception(message)\n notify_admins_throttled(\n message_id=message_id,\n title=title,\n message=message,\n level=\"danger\",\n timeout=STRUCTURES_NOTIFY_THROTTLED_TIMEOUT,\n )\n return False\n\n self._remove_structures_not_returned_from_esi(\n structures_qs=self.structures.filter_starbases(),\n new_structures=structures,\n )\n return True", "def find_uavs_close(self):\n\n # Avoid immediate exchange\n if self.model.steps <= 20:\n return\n\n # Scan for UAVs\n other_uavs = self.sensor.scan_for_uavs(self.pos)\n # If there are other UAVs ...\n if len(other_uavs) is not 0:\n # ... exchange perceived_world_grids with them\n for other_uav in other_uavs:\n self.communication_module.exchange_grid_with(other_uav)", "def _get_structure_makeup(self, instance_id):\n if instance_id not in self._struct_cache:\n if not self._target_is_connected:\n if not self.forward_open():\n self._status = (10, \"Target did not connected. get_tag_list will not be executed.\")\n logger.warning(self._status)\n raise DataError(\"Target did not connected. get_tag_list will not be executed.\")\n\n message_request = [\n pack_uint(self._get_sequence()),\n bytes([TAG_SERVICES_REQUEST['Get Attributes']]),\n bytes([3]), # Request Path ( 20 6B 25 00 Instance )\n CLASS_ID[\"8-bit\"], # Class id = 20 from spec 0x20\n CLASS_CODE[\"Template Object\"], # Logical segment: Template Object 0x6C\n INSTANCE_ID[\"16-bit\"], # Instance Segment: 16 Bit instance 0x25\n b'\\x00',\n pack_uint(instance_id),\n pack_uint(4), # Number of attributes\n pack_uint(4), # Template Object Definition Size UDINT\n pack_uint(5), # Template Structure Size UDINT\n pack_uint(2), # Template Member Count UINT\n pack_uint(1) # Structure Handle We can use this to read and write UINT\n ]\n\n if self.send_unit_data(\n build_common_packet_format(DATA_ITEM['Connected'],\n b''.join(message_request), ADDRESS_ITEM['Connection Based'],\n addr_data=self._target_cid, )) is None:\n raise DataError(\"send_unit_data returned not valid data\")\n self._struct_cache[instance_id] = self._buffer\n\n return self._struct_cache[instance_id]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
compress service names localizations for each structure We are assuming that services are returned from ESI in the same order for each language.
def _compress_services_localization( structures_w_lang: dict, default_lang: str ) -> list: structures_services = Owner._collect_services_with_localizations( structures_w_lang, default_lang ) structures = Owner._condense_services_localizations_into_structures( structures_w_lang, default_lang, structures_services ) return structures
[ "def _collect_services_with_localizations(structures_w_lang, default_lang):\n structures_services = dict()\n for lang, structures in structures_w_lang.items():\n if lang != default_lang:\n for structure in structures:\n if \"services\" in structure and structure[\"services\"]:\n structure_id = structure[\"structure_id\"]\n if structure_id not in structures_services:\n structures_services[structure_id] = dict()\n structures_services[structure_id][lang] = list()\n for service in structure[\"services\"]:\n structures_services[structure_id][lang].append(\n service[\"name\"]\n )\n return structures_services", "def _condense_services_localizations_into_structures(\n structures_w_lang, default_lang, structures_services\n ):\n structures = structures_w_lang[default_lang]\n for structure in structures:\n if \"services\" in structure and structure[\"services\"]:\n structure_id = structure[\"structure_id\"]\n for lang in structures_w_lang.keys():\n if (\n lang != default_lang\n and lang in structures_services[structure_id]\n ):\n for service, name_loc in zip(\n structure[\"services\"],\n structures_services[structure_id][lang],\n ):\n service[\"name_\" + lang] = name_loc\n return structures", "def CustomServiceNames(self) -> ServiceNameCollection:", "def _get_services_container_names(self):\n services = {}\n sellables = set(os.listdir(Two1Composer.SERVICES_DIR)).intersection(\n set(Two1Composer.GRID_SERVICES))\n for service in sellables:\n services[service] = \"sell_\" + service\n return services", "def normalize_keystone_services(services):\n ret = []\n for service in services:\n service_type = service.get('type', service.get('service_type'))\n new_service = {\n 'id': service['id'],\n 'name': service['name'],\n 'description': service.get('description', None),\n 'type': service_type,\n 'service_type': service_type,\n }\n ret.append(new_service)\n return ret", "def compress_tags(tags: List[str]) -> List[str]:\n return list(map(TagsProcessor.compress_tag, tags))", "def strings_to_services(\n strings: list[str], string_to_service: dict[str, VacuumEntityFeature]\n) -> VacuumEntityFeature:\n services = VacuumEntityFeature(0)\n for string in strings:\n services |= string_to_service[string]\n return services", "def demangle_names(mangled_names):\n demangled_names = {}\n\n for mangled_name in mangled_names :\n\n demangled_name = subprocess.run([\"c++filt\", mangled_name], stdout=subprocess.PIPE).stdout.decode(\"utf-8\")\n demangled_names[mangled_name] = demangled_name\n \n return demangled_names", "def add_euconc_to_short_names(short_name_dict):\n long_names = list(short_name_dict.keys())\n for long_name in long_names:\n sndict = short_name_dict[long_name]\n match = re.search(r'(.+)_AIR_CONCENTRATION', long_name)\n if match:\n eu_long_name = match.group(1)+'_EULERIAN_CONCENTRATION'\n short_name_dict[eu_long_name] = sndict\n tot_long_name = match.group(1)+'_CONCENTRATION'\n short_name_dict[tot_long_name] = sndict\n\n return short_name_dict", "def service_completer(ctx, args, incomplete) -> List:\n result = [(str(svc.uid)) for svc in service_registry.values()]\n result.extend(svc.name for svc in service_registry.values())\n return result", "def split_canonical_names(nm_result: Dict):\n split_layer_infos = []\n for layer in nm_result[\"layer_info\"]:\n if \",\" in layer[\"canonical_name\"]:\n for sub_layer_name in layer[\"canonical_name\"].split(\",\"):\n sub_layer_info = deepcopy(layer)\n sub_layer_info[\"meta_canonical_name\"] = layer[\"canonical_name\"]\n sub_layer_info[\"canonical_name\"] = sub_layer_name\n split_layer_infos.append(sub_layer_info)\n else:\n layer[\"meta_canonical_name\"] = None\n split_layer_infos.append(layer)\n nm_result[\"layer_info\"] = split_layer_infos", "def services_to_strings(\n services: VacuumEntityFeature,\n service_to_string: dict[VacuumEntityFeature, str],\n) -> list[str]:\n return [\n service_to_string[service]\n for service in service_to_string\n if service & services\n ]", "def canonical_names(dex_names):\n return [ canonical_name(d) for d in dex_names ]", "def __format_names(self):\n self.formats = list(map(lower_and_add_dot, self.formats))\n self.lop = list(filter(lambda x: x[-4:].lower() in self.formats, self.lop))", "def vrt2lists_fi():\n corpus_folder = os.path.join('data', 'corpora', 'wikipedia-fi-2017-src',\n 'wikipedia-fi-2017-src')\n corpus = []\n tag_corpus = []\n files = list(os.walk(corpus_folder))[0][2]\n for file in files:\n with open(os.path.join(corpus_folder, file), encoding='utf8') as f:\n data = f.read().split('</sentence>')\n for sent in data:\n sentence = []\n tag_sentence = []\n items = [element.split('\\t') for element in sent.split('\\n')]\n for item in items:\n if len(item) == 10:\n word = item[1]\n tag = item[3]\n #sentence.append((word, tag))\n sentence.append(word)\n tag_sentence.append(tag)\n if len(sentence) > 1 and len(sentence) == len(tag_sentence):\n corpus.append(sentence)\n tag_corpus.append(tag_sentence)\n\n \n # Save the corpora\n with open(os.path.join('data','corpora','Wikipedia_fi_2017.pkl'), 'wb') as f:\n pickle.dump(corpus, f, 4)\n \n with open(os.path.join('data','corpora','Wikipedia_fi_2017_pos.pkl'), 'wb') as f:\n pickle.dump(tag_corpus, f, 4)\n\n #with open(os.path.join('data','corpora','Wikipedia_fi_2017_words_tags.pkl'), 'wb') as f:\n #pickle.dump(corpus, f, 4)", "def test_translate_services_several_one_inexistent(self):\n self.cmd.services = {'OTH': 'Other', '': '', 'SRV': 'Service'}\n string = 'OTH, SRV , SRV1'\n self.assertEqual(self.cmd.translate_services(string), 'Other, Service, SRV1')", "def get_services(self):\r\n services_dict = {}\r\n for service in self.services:\r\n service_chars = {}\r\n service_chars['characteristics'] = service.get_characteristics()\r\n services_dict[service.get_uuid()] = service_chars\r\n\r\n return services_dict", "def services_all(ctx):\n ctx.run(KUBERNETES_GET_SERVICES_ALL_CMD)", "def service_templates(self):\n return set(\n [t for t in self.list_templates() if '$service/' in t]\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
collect services with name localizations for all structures
def _collect_services_with_localizations(structures_w_lang, default_lang): structures_services = dict() for lang, structures in structures_w_lang.items(): if lang != default_lang: for structure in structures: if "services" in structure and structure["services"]: structure_id = structure["structure_id"] if structure_id not in structures_services: structures_services[structure_id] = dict() structures_services[structure_id][lang] = list() for service in structure["services"]: structures_services[structure_id][lang].append( service["name"] ) return structures_services
[ "def _condense_services_localizations_into_structures(\n structures_w_lang, default_lang, structures_services\n ):\n structures = structures_w_lang[default_lang]\n for structure in structures:\n if \"services\" in structure and structure[\"services\"]:\n structure_id = structure[\"structure_id\"]\n for lang in structures_w_lang.keys():\n if (\n lang != default_lang\n and lang in structures_services[structure_id]\n ):\n for service, name_loc in zip(\n structure[\"services\"],\n structures_services[structure_id][lang],\n ):\n service[\"name_\" + lang] = name_loc\n return structures", "def _compress_services_localization(\n structures_w_lang: dict, default_lang: str\n ) -> list:\n structures_services = Owner._collect_services_with_localizations(\n structures_w_lang, default_lang\n )\n structures = Owner._condense_services_localizations_into_structures(\n structures_w_lang, default_lang, structures_services\n )\n return structures", "def _all_services(type_, *args, **kwargs):\n return all_srvs[type_]", "def CustomServiceNames(self) -> ServiceNameCollection:", "def _get_services_container_names(self):\n services = {}\n sellables = set(os.listdir(Two1Composer.SERVICES_DIR)).intersection(\n set(Two1Composer.GRID_SERVICES))\n for service in sellables:\n services[service] = \"sell_\" + service\n return services", "def services_all(ctx):\n ctx.run(KUBERNETES_GET_SERVICES_ALL_CMD)", "def parse_services(self):\n #Client\n for item in self.client_config.get(\"services\"):\n service = Service(item[\"name\"],type=item[\"type\"],parameters=item)\n self.client_services_list.append(service) \n\n #Server\n for item in self.server_config.get(\"services\"):\n service = Service(item[\"name\"],type=item[\"type\"],parameters=item)\n self.server_services_list.append(service)", "def get_all_local_services(self):\n return self._services", "def service_completer(ctx, args, incomplete) -> List:\n result = [(str(svc.uid)) for svc in service_registry.values()]\n result.extend(svc.name for svc in service_registry.values())\n return result", "def get_all_services():\n global tts_srv\n tts_srv = QI_SESSION.service(\"ALTextToSpeech\")\n\n global al_srv\n al_srv = QI_SESSION.service(\"ALAutonomousLife\")\n\n global ba_srv\n ba_srv = QI_SESSION.service(\"ALBasicAwareness\")\n\n global ab_srv\n ab_srv = QI_SESSION.service(\"ALAutonomousBlinking\")\n\n global motion_srv\n motion_srv = QI_SESSION.service(\"ALMotion\")\n\n global video_srv\n video_srv = QI_SESSION.service(\"ALVideoDevice\")\n\n global tablet_srv\n tablet_srv = QI_SESSION.service(\"ALTabletService\")\n\n global as_srv\n as_srv = QI_SESSION.service(\"ALAnimatedSpeech\")\n\n global ap_srv\n ap_srv = QI_SESSION.service(\"ALAnimationPlayer\")\n\n global posture_srv\n posture_srv = QI_SESSION.service(\"ALRobotPosture\")\n\n global ar_srv\n ar_srv = QI_SESSION.service(\"ALAudioRecorder\")\n\n global ad_srv\n ad_srv = QI_SESSION.service(\"ALAudioDevice\")\n\n global fd_srv\n fd_srv = QI_SESSION.service(\"ALFaceDetection\")\n\n global mem_srv\n mem_srv = QI_SESSION.service(\"ALMemory\")\n\n global lm_srv\n lm_srv = QI_SESSION.service(\"ALListeningMovement\")\n\n global sm_srv\n sm_srv = QI_SESSION.service(\"ALSpeakingMovement\")\n\n global audio_player\n audio_player = QI_SESSION.service(\"ALAudioPlayer\")\n\n global led_srv\n led_srv = QI_SESSION.service(\"ALLeds\")", "def __collect_names():\n for c in constants.keys():\n if not all_names.__contains__(c):\n all_names.append(c)\n for c in u_funs.keys():\n if not all_names.__contains__(c):\n all_names.append(c)\n for c in vars.keys():\n if not all_names.__contains__(c):\n all_names.append(c)\n for c in lsts.keys():\n if not all_names.__contains__(c):\n all_names.append(c)\n for c in funs.keys():\n if not all_names.__contains__(c):\n all_names.append(c)\n for c in commands.keys():\n if not all_names.__contains__(c):\n all_names.append(c)", "def getSpanNames(self, service_name):\n pass", "def __register_general_subservices(self, service):\n self.logger.debug(\"registering (sub)service %s\" % service.name)\n with self.services_lock:\n self.services[service.name] = service\n for subservice in service.subservices:\n self.__register_general_subservices(subservice)", "def test_load_local_data__services(self):\n services = {'Abbr1': 'Service 1', 'Abbr2': 'Service 2'}\n languages = {'Lang1': 'Language 1', 'Lang2': 'Language 2', 'Lang3': 'Language 3'}\n self.cmd.languages = languages\n self.cmd.services = services\n self.cmd.save_data()\n self.cmd.languages = None\n self.cmd.services['Abbr3'] = 'Service 3'\n self.cmd.load_local_data()\n self.assertTrue(len(self.cmd.services) == 3)\n self.assertTrue('Abbr3' in self.cmd.services)\n self.assertTrue(len(self.cmd.languages) == 3)", "def service_templates(self):\n return set(\n [t for t in self.list_templates() if '$service/' in t]\n )", "def service_names(self):\n return self.services.keys()", "def services(\n self,\n ) -> google.protobuf.internal.containers.MessageMap[\n builtins.str, global___GapicMetadata.ServiceForTransport\n ]:", "def user_translator_for_locales(self):\n locales = []\n\n for group in self.groups.all():\n locale = group.translated_locales.first()\n if locale:\n locales.append(locale)\n\n return locales", "def _extract_services(plugins):\n services = []\n for plugin in filter(_has_services, plugins):\n services.extend(plugin.services)\n return services" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add corresponding service name localizations to structure's services
def _condense_services_localizations_into_structures( structures_w_lang, default_lang, structures_services ): structures = structures_w_lang[default_lang] for structure in structures: if "services" in structure and structure["services"]: structure_id = structure["structure_id"] for lang in structures_w_lang.keys(): if ( lang != default_lang and lang in structures_services[structure_id] ): for service, name_loc in zip( structure["services"], structures_services[structure_id][lang], ): service["name_" + lang] = name_loc return structures
[ "def _collect_services_with_localizations(structures_w_lang, default_lang):\n structures_services = dict()\n for lang, structures in structures_w_lang.items():\n if lang != default_lang:\n for structure in structures:\n if \"services\" in structure and structure[\"services\"]:\n structure_id = structure[\"structure_id\"]\n if structure_id not in structures_services:\n structures_services[structure_id] = dict()\n structures_services[structure_id][lang] = list()\n for service in structure[\"services\"]:\n structures_services[structure_id][lang].append(\n service[\"name\"]\n )\n return structures_services", "def CustomServiceNames(self) -> ServiceNameCollection:", "def _compress_services_localization(\n structures_w_lang: dict, default_lang: str\n ) -> list:\n structures_services = Owner._collect_services_with_localizations(\n structures_w_lang, default_lang\n )\n structures = Owner._condense_services_localizations_into_structures(\n structures_w_lang, default_lang, structures_services\n )\n return structures", "def _add_services(self):\n this_service = {'name': 'designate'}\n other_services = [\n {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'memcached', 'location': 'cs:memcached'},\n {'name': 'designate-bind'},\n {'name': 'neutron-api'}\n ]\n\n use_source = [\n 'percona-cluster',\n 'rabbitmq-server',\n ]\n\n no_origin = [\n 'designate-bind',\n 'memcached',\n ]\n\n super(DesignateBasicDeployment, self)._add_services(this_service,\n other_services,\n use_source,\n no_origin)", "def _process_service_name(self):\n self.infos.service_name = self._bind_data(self.configuration['service']['name'])\n self.infos.green_infos.stack['Parameters']['ServiceName']['Default'] = self.infos.service_name\n self.infos.init_infos.stack['Parameters']['ServiceName']['Default'] = self.infos.service_name\n self._log_information(key='Service', value=self.infos.service_name, ljust=18)", "def add_service(self, unique=False):\n try:\n ZCONF.register_service(self.info, allow_name_change=not unique)\n except:\n print('Collision')", "def __register_general_subservices(self, service):\n self.logger.debug(\"registering (sub)service %s\" % service.name)\n with self.services_lock:\n self.services[service.name] = service\n for subservice in service.subservices:\n self.__register_general_subservices(subservice)", "def add_service(self, generic_handler):\n self._services.add(generic_handler.service_name)", "def _get_services_container_names(self):\n services = {}\n sellables = set(os.listdir(Two1Composer.SERVICES_DIR)).intersection(\n set(Two1Composer.GRID_SERVICES))\n for service in sellables:\n services[service] = \"sell_\" + service\n return services", "def setGlobalTranslationService(service):\n global translationService\n old_service = translationService\n translationService = service\n return old_service", "def addService(self, zeroconf, type, name):\r\n info = zeroconf.getServiceInfo(type, name)\r\n print(\"Service %s added, service info: %s\" % (name, info)) \r\n self._services[name] = info\r\n self._updateList()", "def push_service_into_list(self, name, service):\n self.services[name] = service\n if service.required:\n self.services['required'].append(name)\n else:\n self.services['optional'].append(name)\n self.logger.debug('added %s to the service list', name)\n self.logger.log(9, self.services[name].__dict__)", "def test_insert_lang_serv__service(self):\n service = ['Abbr1', 'Service 1']\n self.cmd.insert_lang_serv(Service(), service)\n obj = Service.objects.get(abbr__exact='Abbr1')\n self.assertTrue(obj.name == 'Service 1')", "def add_service(self, service_type, name, url):\n self.services = [post for post in self.services if post[2] != url] + [(service_type, name, url)]\n with open(os.path.join(self.working_directory, self.file_name), \"w\") as file:\n json.dump(self.services, file, indent = 4)\n return \"Ok\"", "def parse_services(self):\n #Client\n for item in self.client_config.get(\"services\"):\n service = Service(item[\"name\"],type=item[\"type\"],parameters=item)\n self.client_services_list.append(service) \n\n #Server\n for item in self.server_config.get(\"services\"):\n service = Service(item[\"name\"],type=item[\"type\"],parameters=item)\n self.server_services_list.append(service)", "def write_services(dir: str, updated: str, data: dict) -> None:\n name_size = max(len(x.get('name')) for x in data.values()) + 2\n port_protocol_size = max(len(x) for x in data) + 2\n aliases_size = max(len(' '.join(x.get('aliases'))) for x in data.values()) + 2\n with open(f'{dir}/services', 'w') as f:\n f.write(\n '# Network services, Internet style (/etc/services)\\n'\n '# See also http://www.iana.org/assignments/service-names-port-numbers\\n'\n '#\\n'\n f'# last updated {updated}\\n')\n for record, info in data.items():\n name = info.get('name', '')\n aliases = ' '.join(info.get('aliases', []))\n description = info.get('description')\n f.write(\n f'{name: <{name_size}}'\n f'{record: <{port_protocol_size}}'\n f'{aliases: <{aliases_size}}'\n f'{description}\\n')\n f.flush()\n os.fsync(f.fileno())", "def test_load_local_data__services(self):\n services = {'Abbr1': 'Service 1', 'Abbr2': 'Service 2'}\n languages = {'Lang1': 'Language 1', 'Lang2': 'Language 2', 'Lang3': 'Language 3'}\n self.cmd.languages = languages\n self.cmd.services = services\n self.cmd.save_data()\n self.cmd.languages = None\n self.cmd.services['Abbr3'] = 'Service 3'\n self.cmd.load_local_data()\n self.assertTrue(len(self.cmd.services) == 3)\n self.assertTrue('Abbr3' in self.cmd.services)\n self.assertTrue(len(self.cmd.languages) == 3)", "def gatts_register_services(\n self, services_definition: tuple[_Service, ...], /\n ) -> tuple[tuple[memoryview, ...], ...]:", "def test_translate_services_several_one_inexistent(self):\n self.cmd.services = {'OTH': 'Other', '': '', 'SRV': 'Service'}\n string = 'OTH, SRV , SRV1'\n self.assertEqual(self.cmd.translate_services(string), 'Other, Service, SRV1')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch custom offices from ESI for this owner. Return True when successful, else False.
def _fetch_custom_offices(self, token: Token) -> bool: corporation_id = self.corporation.corporation_id structures = dict() try: pocos = esi_fetch( "Planetary_Interaction.get_corporations_corporation_id_customs_offices", args={"corporation_id": corporation_id}, token=token, has_pages=True, ) if not pocos: logger.info("%s: No custom offices retrieved from ESI", self) else: pocos_2 = {row["office_id"]: row for row in pocos} office_ids = list(pocos_2.keys()) positions = self._fetch_locations_for_pocos( corporation_id, office_ids, token ) names = self._fetch_names_for_pocos(corporation_id, office_ids, token) # making sure we have all solar systems loaded # incl. their planets for later name matching for solar_system_id in {int(x["system_id"]) for x in pocos}: EveSolarSystem.objects.get_or_create_esi(solar_system_id) # compile pocos into structures list for office_id, poco in pocos_2.items(): planet_name = names.get(office_id, "") if planet_name: try: eve_planet = EvePlanet.objects.get(name=planet_name) except EvePlanet.DoesNotExist: name = "" planet_id = None else: planet_id = eve_planet.id name = eve_planet.eve_type.name_localized_for_language( STRUCTURES_DEFAULT_LANGUAGE ) else: name = None planet_id = None reinforce_exit_start = datetime( year=2000, month=1, day=1, hour=poco["reinforce_exit_start"] ) reinforce_hour = reinforce_exit_start + timedelta(hours=1) structure = { "structure_id": office_id, "type_id": constants.EVE_TYPE_ID_POCO, "corporation_id": corporation_id, "name": name if name else "", "system_id": poco["system_id"], "reinforce_hour": reinforce_hour.hour, "state": Structure.State.UNKNOWN, } if planet_id: structure["planet_id"] = planet_id if office_id in positions: structure["position"] = positions[office_id] structures[office_id] = structure logger.info( "%s: Storing updates for %d customs offices", self, len(structure) ) for office_id, structure in structures.items(): structure_obj, _ = Structure.objects.update_or_create_from_dict( structure, self ) try: poco = pocos_2[office_id] except KeyError: logger.warning( "%s: No details found for this POCO: %d", self, office_id ) else: standing_level = PocoDetails.StandingLevel.from_esi( poco.get("standing_level") ) PocoDetails.objects.update_or_create( structure=structure_obj, defaults={ "alliance_tax_rate": poco.get("alliance_tax_rate"), "allow_access_with_standings": poco.get( "allow_access_with_standings" ), "allow_alliance_access": poco.get( "allow_alliance_access" ), "bad_standing_tax_rate": poco.get( "bad_standing_tax_rate" ), "corporation_tax_rate": poco.get( "corporation_tax_rate" ), "excellent_standing_tax_rate": poco.get( "excellent_standing_tax_rate" ), "good_standing_tax_rate": poco.get( "good_standing_tax_rate" ), "neutral_standing_tax_rate": poco.get( "neutral_standing_tax_rate" ), "reinforce_exit_end": poco.get("reinforce_exit_end"), "reinforce_exit_start": poco.get( "reinforce_exit_start" ), "standing_level": standing_level, "terrible_standing_tax_rate": poco.get( "terrible_standing_tax_rate" ), }, ) if STRUCTURES_DEVELOPER_MODE: self._store_raw_data("customs_offices", structures, corporation_id) except OSError as ex: message_id = ( f"{__title__}-_fetch_customs_offices-{self.pk}-{type(ex).__name__}" ) title = f"{__title__}: Failed to update custom offices for {self}" message = f"{self}: Failed to update custom offices from ESI due to: {ex}" logger.exception(message) notify_admins_throttled( message_id=message_id, title=title, message=message, level="danger", timeout=STRUCTURES_NOTIFY_THROTTLED_TIMEOUT, ) return False self._remove_structures_not_returned_from_esi( structures_qs=self.structures.filter_customs_offices(), new_structures=structures.values(), ) return True
[ "def test_corp_party_offices(session): # pylint: disable=unused-argument\n offices = CorpParty.get_offices_held_by_corp_party_id(1)\n assert len(offices) == 2", "def office_get_all(self):\n\n return self.offices", "def test_get_all_offices(self):\n with self.app_context():\n response = self.app.get(\"/api/v1/offices\")\n self.assertEqual(response.status_code, 200)", "def test_get_all_offices(self):\n self.client().post('/api/v1/office', data=self.add_office,\n content_type='application/json')\n response = self.client().get('/api/v1/office',\n content_type='application/json')\n self.assertEqual(response.status_code, 200)", "def _get_offers(self):\n try:\n offers = requests.get(\"{base_url}{offers}\".format(base_url=self.AWS_PRICE_BASE_URL,\n offers=self.AWS_REGION_OFFERS))\n\n if offers.status_code == 200:\n response = json.loads(offers.text)\n regions = response.get('regions')\n if regions:\n offer = regions.get(self.region).get(self.AWS_OFFERS_KEY)\n offers_url = \"{base_url}{offer}\".format(base_url=self.AWS_PRICE_BASE_URL,\n offer=offer)\n data = requests.get(offers_url)\n if data.status_code == 200:\n return data.text\n else:\n return json.dumps(dict())\n return False\n except requests.ConnectionError as exp:\n print(exp)\n exit(1)", "def employee_offices(user, structure=None):\n if not user: return None\n oe = OrganizationalStructureOfficeEmployee.objects.filter(employee=user)\n if structure:\n oe = oe.filter(office__organizational_structure=structure)\n return oe", "def organisation_endpoints(ods_code):\n\n return jsonify(sample_data.endpoint_data)", "def is_ca_external(self, obj_dict):\n return (self.endpoint == objects.get_singular(\n objects.CUSTOM_ATTRIBUTES) and\n obj_dict[\"definition_type\"] in objects.ALL_SINGULAR_DISABLED_OBJS)", "def _extract_availability(self, response):\n web_only_query = queries[\"extract_availability_web_only\"]\n store_only_query = queries[\"extract_availability_store_only\"]\n add_to_cart_query = queries[\"extract_availability_add_to_cart\"]\n\n #check for web only == not available in store\n if response.css(web_only_query):\n availability = self._set_availability(True, False)\n\n #check for store only == available in store only\n elif response.css(store_only_query):\n availability = self._set_availability(False, True)\n\n #check for add to cart == available online and in store\n elif response.css(add_to_cart_query):\n availability = self._set_availability(True, True)\n\n #not available at either (assumed)\n else:\n availability = self._set_availability(False, False)\n\n return availability", "def supports_vault_lookup(self):\n return # boolean", "def is_company_officer(self):\n is_officer = False\n if self.user.is_staff and self.user.is_superuser:\n is_officer = True\n else:\n from htk.admintools.utils import get_company_officers_id_email_map\n officers_map = get_company_officers_id_email_map()\n officer_email = officers_map.get(self.user.id)\n if officer_email:\n is_officer = self.has_email(officer_email)\n return is_officer", "def getAvailableKnownInStoreItems(self,store_id):\n cursor = self.mydb.cursor()\n query = \"SELECT sku,Price,location FROM Walmart{} WHERE availability=1 and price!=-1\".format(store_id)\n cursor.execute(query)\n result = cursor.fetchall()\n cursor.close()\n return result", "def getCloudEvents(self):\n # send a request with authorization id for this customer\n try:\n dbg.prn(dbg.BKP,\"getCloudEvents-->\")\n response = False\n cfg = pxp._cfgGet()\n if(not cfg):\n return False #the encoder is not initalized yet most likely - nothing to do here\n response = pu.io.url(c.cloud+\"ajEvents/ajax\",params={\"v0\":cfg[1],\"v4\":cfg[2]})\n if(not response):\n return False #cloud not available\n data = json.loads(response)\n if('success' in data and data['success'] and 'entries' in data):\n # entries contains a list of past events. convert them into hid:{....} format (dictionary)\n events = {}\n for evt in data['entries']:\n events[evt['hid']]=evt\n return events\n dbg.prn(dbg.BKP|dbg.ERR,\"bkpmgr.getCloudEvents: corrupt response \", data)\n return False #response was corrupt\n except Exception as e:\n dbg.prn(dbg.BKP|dbg.ERR,\"[---]bkpmgr.getCloudEvents: \", e, sys.exc_info()[-1].tb_lineno, response)\n return False", "def can_lookup_calendars(self):\n return # boolean", "def getAffiliations(entity):", "def widevine_eula():\n if cdm_from_repo():\n cdm_version = latest_widevine_available_from_repo().get('version')\n cdm_os = config.WIDEVINE_OS_MAP[system_os()]\n cdm_arch = config.WIDEVINE_ARCH_MAP_REPO[arch()]\n else: # Grab the license from the x86 files\n log(0, 'Acquiring Widevine EULA from x86 files.')\n cdm_version = latest_widevine_version(eula=True)\n cdm_os = 'mac'\n cdm_arch = 'x64'\n\n url = config.WIDEVINE_DOWNLOAD_URL.format(version=cdm_version, os=cdm_os, arch=cdm_arch)\n downloaded = http_download(url, message=localize(30025), background=True) # Acquiring EULA\n if not downloaded:\n return False\n\n from zipfile import ZipFile\n with ZipFile(compat_path(store('download_path'))) as archive:\n with archive.open(config.WIDEVINE_LICENSE_FILE) as file_obj:\n eula = file_obj.read().decode().strip().replace('\\n', ' ')\n\n return yesno_dialog(localize(30026), eula, nolabel=localize(30028), yeslabel=localize(30027)) # Widevine CDM EULA", "def fetch_office_employees(self, office_code):\n self._connect()\n\n query = \"SELECT employeeNumber, firstName, lastName FROM employees\\\n WHERE officeCode = \" + str(office_code)\n\n rows = self._mysql.query(query)\n\n employees = []\n\n for row in rows:\n tmp = basic_employee.BasicEmployee(row[0], row[1], row[2])\n employees.append(tmp)\n\n self._close()\n\n return employees", "def _ems_available(self):\n if CFG.get_safe('container.exchange.auto_register', False) and self.use_ems:\n # ok now make sure it's in the directory\n exchange_service = get_service_registry().is_service_available('exchange_management')\n if exchange_service:\n return True\n\n return False", "def test_get_asset_ips_and_enrich_offense_addresses__no_enrich(self):\n offense = deepcopy(RAW_RESPONSES[\"qradar-update-offense\"])\n src_adrs = {254: '8.8.8.8'}\n dst_adrs = {4: '1.2.3.4'}\n expected = {'8.8.8.8', '1.2.3.4'}\n actual = get_asset_ips_and_enrich_offense_addresses(\n offense, src_adrs, dst_adrs, skip_enrichment=True)\n assert offense == RAW_RESPONSES[\"qradar-update-offense\"]\n assert expected == actual" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract name of planet from assert name for a customs office.
def _extract_planet_name(text: str) -> str: reg_ex = re.compile(r"Customs Office \((.+)\)") matches = reg_ex.match(text) return matches.group(1) if matches else ""
[ "def get_plant_name(self):\n if not self.plant_name:\n self.plant_name = self._search('botanische naam')\n return self.plant_name", "def extractFromTitle(title):\n # remove trailing period\n period_idx = title.rfind('.')\n if period_idx>0 and period_idx>len(title)-5:\n title = title[:period_idx]\n\n # store value of name\n name = ''\n words = title.split()\n\n # if title has less than 5 words, then the title is the name of the tool\n if len(words) < 5:\n return title\n\n # the word(s) before the colon is the name\n colon_idx = title.rfind(':')\n if colon_idx>0:\n return title[:colon_idx]\n\n # a version of the title with no unicode\n noUniTitle = re.sub(r'[^\\x00-\\x7F]+',' ', title)\n\n # the word(s) before the different versions of dashes is the name\n oneDash_idx = noUniTitle.find(' - ')\n if oneDash_idx>0:\n return noUniTitle[:oneDash_idx]\n\n longDash_idx = title.find('–')\n if longDash_idx>0:\n return title[:longDash_idx]\n\n medDash_idx = title.find('—')\n if medDash_idx>0:\n return title[:medDash_idx]\n\n doubleDash_idx = title.find('--')\n if doubleDash_idx>0:\n return title[:doubleDash_idx]\n\n\n # the word(s) in parentheses is the name\n paren_idx = title.find('(')\n if paren_idx > 0:\n end_paren_idx = title.find(')')\n return title[paren_idx+1:end_paren_idx]\n\n # the word(s) following the word 'with' is the name\n with_idx = title.rfind('with')\n comma_idx = title.find(',')\n if with_idx > 0 and comma_idx < 0:\n with_name = title[with_idx+len('with '):].strip()\n if len(with_name.split()) < 3:\n return with_name\n\n # the word(s) before the comma is the name\n if comma_idx > 0 and title.count(',')==1:\n return title[:comma_idx]\n\n # the word(s) following the word 'using' is the name\n using_idx = title.find('using')\n if using_idx>0:\n using_name = title[using_idx+len('using'):].strip()\n if len(using_name.split()) < 2:\n return using_name\n\n # looks at the first word\n # if the word has a mix of upper and lower case letters, it is a name\n first = words[0]\n if words[0]=='The' or words[0]=='A':\n first = words[1]\n\n if first.isupper():\n return first\n else:\n numUpper = 0\n changes = 0\n isUpper = first[0].isupper()\n for i in range(1, len(first)):\n if isUpper:\n numUpper+=1\n\n if not isUpper==first[i].isupper():\n changes+=1\n isUpper = first[i].isupper()\n\n if changes > 1 or isUpper>2:\n return first\n\n return name", "def getName(self, productBox):\n name = productBox.css(\"p.product-card-name::text\").get().strip()\n #remove collab \"x\" if required\n if name[:2] == \"x \":\n name = name[2:]\n return name", "def plantname_from_filename(f):\n f = Path(f)\n f = f.with_suffix('')\n name = f.name\n splits = name.split(\" - \")\n if len(splits) == 2:\n plantname = splits[0]\n el = splits[1]\n elif len(splits) == 3:\n plantname = splits[1]\n el = splits[2]\n return plantname, el", "def projection_name(self) -> str:\n return self.__data[\"name\"]", "def find_pokemon_name(self, text):\n return self.__pkmMan.find_pokemon_name(text)", "def laptop_name(self, name_locater):\n return self.soup.select(name_locater)", "def test_get_by_name(self):\n c = Client()\n url = '/sw/planet/?filter{name}=%s' % self.planet3.name\n\n # Get Data\n result = c.get(url)\n planet_list = json.loads(result.content)['planets']\n # Total 1 planets\n self.assertTrue(isinstance(planet_list, list))\n self.assertEqual(len(planet_list), 1)\n self.assertTrue(isinstance(planet_list[0], dict))\n\n planet = planet_list[0]\n self.assertEqual(planet['id'], self.planet3.id)\n self.assertEqual(planet['name'], self.planet3.name)\n self.assertEqual(planet['climate'], self.planet3.climate)\n self.assertEqual(planet['terrain'], self.planet3.terrain)\n self.assertEqual(planet['screenings'], self.planet3.get_screenings())", "def _pmf_doc_name(doc):\n return 'proceedings-{number}-{slug}'.format(\n number=doc.factory_parent.meeting.number,\n slug=xslugify(doc.factory_parent.type.slug).replace(\"_\", \"-\")[:128]\n )", "def get_city_name(data):\n return data[\"name\"]", "def show_name(pokemon):\n poke = pokepy.V2Client().get_pokemon(pokemon)\n name = poke.name\n return name", "def test_naming(self):\n tems = \"{=name}\"\n tem = Paulatemplate(tems, \"nametest\")\n self.assertEqual(tem.name, \"nametest\")", "def _extract_service_name(self, response: TextResponse) -> str:\n name = response.css(\".biz-page-title::text\").extract_first()\n if not name:\n self.log(\"Cannot find the name of the service: \" + response.url, logging.ERROR)\n return \"\"\n else:\n return name.strip()", "def teamname_only(string):\n return string.split(\" \")[0]", "def getObject(name):\n byname = ['sun', 'moon', 'mercury', 'venus', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune', 'pluto',\n 'ariel', 'callisto', 'deimos', 'dione', 'enceladus', 'europa', 'ganymede', 'hyperion', 'iapetus',\n 'io', 'mimas', 'miranda', 'oberon', 'phobos', 'rhea', 'tethys', 'titan', 'titania', 'umbriel']\n if name.strip().lower() in byname:\n obj = ephem.__dict__[name.title()]()\n return EphemPos(obj)", "def name(self):\n return self.solv_dict['name']", "def test_strip_grid_from_name_basic(self):\n result = _strip_grid_from_name(\"atm_grid\")\n self.assertEqual(result, \"atm\")", "def name(self):\n return self.point_def.name", "def municipality(self):\n return self.generator.parse(\"{{city}} kommune\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch starbases from ESI for this owner. Return True when successful, else False.
def _fetch_starbases(self, token: Token) -> bool: structures = list() corporation_id = self.corporation.corporation_id try: starbases = esi_fetch( "Corporation.get_corporations_corporation_id_starbases", args={"corporation_id": corporation_id}, token=token, has_pages=True, ) if not starbases: logger.info("%s: No starbases retrieved from ESI", self) else: names = self._fetch_starbases_names(corporation_id, starbases, token) for starbase in starbases: starbase["fuel_expires"] = self._calc_starbase_fuel_expires( corporation_id, starbase, token ) # convert starbases to structures for starbase in starbases: if starbase["starbase_id"] in names: name = names[starbase["starbase_id"]] else: name = "Starbase" structure = { "structure_id": starbase["starbase_id"], "type_id": starbase["type_id"], "corporation_id": corporation_id, "name": name, "system_id": starbase["system_id"], } if "state" in starbase: structure["state"] = starbase["state"] if "moon_id" in starbase: structure["moon_id"] = starbase["moon_id"] if "fuel_expires" in starbase: structure["fuel_expires"] = starbase["fuel_expires"] if "reinforced_until" in starbase: structure["state_timer_end"] = starbase["reinforced_until"] if "unanchors_at" in starbase: structure["unanchors_at"] = starbase["unanchors_at"] structures.append(structure) logger.info( "%s: Storing updates for %d starbases", self, len(structures) ) for structure in structures: Structure.objects.update_or_create_from_dict(structure, self) if STRUCTURES_DEVELOPER_MODE: self._store_raw_data("starbases", structures, corporation_id) except OSError as ex: message_id = f"{__title__}-_fetch_starbases-{self.pk}-{type(ex).__name__}" title = f"{__title__}: Failed to fetch starbases for {self}" message = f"{self}: Failed to fetch starbases from ESI due to {ex}" logger.exception(message) notify_admins_throttled( message_id=message_id, title=title, message=message, level="danger", timeout=STRUCTURES_NOTIFY_THROTTLED_TIMEOUT, ) return False self._remove_structures_not_returned_from_esi( structures_qs=self.structures.filter_starbases(), new_structures=structures, ) return True
[ "def repository_is_starred(user, repository):\n try:\n (Star.select().where(Star.repository == repository.id, Star.user == user.id).get())\n return True\n except Star.DoesNotExist:\n return False", "def query_balances(\n self,\n requested_save_data: bool = False,\n timestamp: Timestamp = None,\n ignore_cache: bool = False,\n ) -> Dict[str, Any]:\n log.info('query_balances called', requested_save_data=requested_save_data)\n\n balances = {}\n problem_free = True\n for _, exchange in self.exchange_manager.connected_exchanges.items():\n exchange_balances, _ = exchange.query_balances(ignore_cache=ignore_cache)\n # If we got an error, disregard that exchange but make sure we don't save data\n if not isinstance(exchange_balances, dict):\n problem_free = False\n else:\n balances[exchange.name] = exchange_balances\n\n try:\n blockchain_result = self.chain_manager.query_balances(\n blockchain=None,\n ignore_cache=ignore_cache,\n )\n balances['blockchain'] = {\n asset: balance.to_dict() for asset, balance in blockchain_result.totals.items()\n }\n except (RemoteError, EthSyncError) as e:\n problem_free = False\n log.error(f'Querying blockchain balances failed due to: {str(e)}')\n\n result = self.query_fiat_balances()\n if result != {}:\n balances['banks'] = result\n balances = account_for_manually_tracked_balances(db=self.data.db, balances=balances)\n\n combined = combine_stat_dicts([v for k, v in balances.items()])\n total_usd_per_location = [(k, dict_get_sumof(v, 'usd_value')) for k, v in balances.items()]\n\n # calculate net usd value\n net_usd = FVal(0)\n for _, v in combined.items():\n net_usd += FVal(v['usd_value'])\n\n stats: Dict[str, Any] = {\n 'location': {\n },\n 'net_usd': net_usd,\n }\n for entry in total_usd_per_location:\n name = entry[0]\n total = entry[1]\n if net_usd != FVal(0):\n percentage = (total / net_usd).to_percentage()\n else:\n percentage = '0%'\n stats['location'][name] = {\n 'usd_value': total,\n 'percentage_of_net_value': percentage,\n }\n\n for k, v in combined.items():\n if net_usd != FVal(0):\n percentage = (v['usd_value'] / net_usd).to_percentage()\n else:\n percentage = '0%'\n combined[k]['percentage_of_net_value'] = percentage\n\n result_dict = merge_dicts(combined, stats)\n\n allowed_to_save = requested_save_data or self.data.should_save_balances()\n\n if problem_free and allowed_to_save:\n if not timestamp:\n timestamp = Timestamp(int(time.time()))\n self.data.save_balances_data(data=result_dict, timestamp=timestamp)\n log.debug('query_balances data saved')\n else:\n log.debug(\n 'query_balances data not saved',\n allowed_to_save=allowed_to_save,\n problem_free=problem_free,\n )\n\n # After adding it to the saved file we can overlay additional data that\n # is not required to be saved in the history file\n try:\n details = self.accountant.events.details\n for asset, (tax_free_amount, average_buy_value) in details.items():\n if asset not in result_dict:\n continue\n\n result_dict[asset]['tax_free_amount'] = tax_free_amount\n result_dict[asset]['average_buy_value'] = average_buy_value\n\n current_price = result_dict[asset]['usd_value'] / result_dict[asset]['amount']\n if average_buy_value != FVal(0):\n result_dict[asset]['percent_change'] = (\n ((current_price - average_buy_value) / average_buy_value) * 100\n )\n else:\n result_dict[asset]['percent_change'] = 'INF'\n\n except AttributeError:\n pass\n\n return result_dict", "def test_is_starred_requires_an_owner(self):\n self.instance.is_starred(None, \"repo\")\n\n assert self.session.get.called is False", "def handle_starmap(self, message, address):\n # TODO validate response\n soma_remote_id = message.data['soma_id']\n remote_starmap = message.data['starmap']\n\n log_starmap = \"\\n\".join([\"- <{} {}>\".format(\n orb_info['type'], orb_id[:6]) for orb_id, orb_info in remote_starmap.iteritems()])\n\n self.logger.info(\"Scanning starmap of {} orbs from {}\\n{}\".format(\n len(remote_starmap), self.source_format(address), log_starmap))\n\n # Get or create copy of remote Soma's starmap\n local_starmap = Starmap.query.get(soma_remote_id)\n if local_starmap is None:\n local_starmap = Starmap(soma_remote_id)\n db.session.add(local_starmap)\n\n request_objects = list() # list of objects to be downloaded\n for orb_id, orb_info in remote_starmap.iteritems():\n orb_type = orb_info['type']\n orb_modifed = iso8601.parse_date(orb_info['modified'])\n orb_creator = orb_info['creator']\n\n # Create Orb if the object has not been seen before\n orb_local = Orb.query.get(orb_id)\n if orb_local is None:\n orb_local = Orb(orb_type, orb_id, orb_modifed, orb_creator)\n db.session.add(orb_local)\n\n # Request corresponding object if this object is not yet in\n # our own starmap (greedy downloading)\n #if not orb_local in self.starmap.index:\n\n # As the above doesnt work yet (*bug*), check directly\n if (orb_type == 'Star' and Star.query.get(orb_id) is None) or (orb_type == \"Persona\" and Persona.query.get(orb_id) is None):\n request_objects.append((orb_type, orb_id, address))\n # Also download if the remote version is newer\n # elif orb_modifed > orb_local.modified:\n # request_objects.append((orb_type, orb_id, address))\n\n # Add to local copy of the remote starmap to keep track of\n # who already has the Orb\n if orb_local not in local_starmap.index:\n local_starmap.index.append(orb_local)\n db.session.add(local_starmap)\n db.session.commit()\n\n # Spawn requests\n for orb_type, orb_id, address in request_objects:\n self.message_pool.spawn(self.request_object, orb_type, orb_id, address)", "def _find_shares(self, paths):\n if not paths:\n paths = [os.getcwd()]\n\n for path in paths:\n for dirname in os.listdir(path):\n candidate = os.path.join(path, dirname)\n if os.path.exists(os.path.join(candidate, Manifest.filename)):\n yield candidate", "def any_share(self):\n return next(iter(self._shares), None)", "def can_a_star_complete(self):\n path, cost = self.AStarSearch((self.x, self.y), self.world.exitcell)\n return path is not None and cost is not None", "def is_complete(self) -> bool:\n blocks = [block for block in self.blocks if block.status is not Block.Retrieved]\n return len(blocks) == 0", "def core_star_systems(self):\n return set((self.star_systems[u] for u in self.core_star_system_uuids))", "def test_get_all_shares(response):\n res = get_all_shares(response)\n assert len(res) == 8 # site gives 8 different image links", "def test_is_starred(self):\n self.instance.is_starred(\"username\", \"repository\")\n\n self.session.get.assert_called_once_with(\n url_for(\"user/starred/username/repository\")\n )", "def is_there_star(self, star):\r\n if star in self.stars:\r\n return True\r\n return False", "def is_covering(self):\n return all(atom_index in self.sub_atoms_mapped for atom_index in (self.sub.atoms.keys()))", "async def get_rigs_data(self):\n return await self.request(\"GET\", \"/main/api/v2/mining/rigs2\")", "def xmlrpc_get_images(self, info, owner_userid, glob, limit, offset):\n\t\treturn self.get_images(owner_userid, info['userid'], glob, limit, offset)", "def iter_owners(self, path_iter):\n\n\t\t\tif not isinstance(path_iter, list):\n\t\t\t\tpath_iter = list(path_iter)\n\t\t\towners_cache = self._populate()\n\t\t\tvardb = self._vardb\n\t\t\troot = vardb._eroot\n\t\t\thash_pkg = owners_cache._hash_pkg\n\t\t\thash_str = owners_cache._hash_str\n\t\t\tbase_names = self._vardb._aux_cache[\"owners\"][\"base_names\"]\n\n\t\t\tdblink_cache = {}\n\n\t\t\tdef dblink(cpv):\n\t\t\t\tx = dblink_cache.get(cpv)\n\t\t\t\tif x is None:\n\t\t\t\t\tif len(dblink_cache) > 20:\n\t\t\t\t\t\t# Ensure that we don't run out of memory.\n\t\t\t\t\t\traise StopIteration()\n\t\t\t\t\tx = self._vardb._dblink(cpv)\n\t\t\t\t\tdblink_cache[cpv] = x\n\t\t\t\treturn x\n\n\t\t\twhile path_iter:\n\n\t\t\t\tpath = path_iter.pop()\n\t\t\t\tis_basename = os.sep != path[:1]\n\t\t\t\tif is_basename:\n\t\t\t\t\tname = path\n\t\t\t\telse:\n\t\t\t\t\tname = os.path.basename(path.rstrip(os.path.sep))\n\n\t\t\t\tif not name:\n\t\t\t\t\tcontinue\n\n\t\t\t\tname_hash = hash_str(name)\n\t\t\t\tpkgs = base_names.get(name_hash)\n\t\t\t\towners = []\n\t\t\t\tif pkgs is not None:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tfor hash_value in pkgs:\n\t\t\t\t\t\t\tif not isinstance(hash_value, tuple) or \\\n\t\t\t\t\t\t\t\tlen(hash_value) != 3:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\tcpv, counter, mtime = hash_value\n\t\t\t\t\t\t\tif not isinstance(cpv, basestring):\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tcurrent_hash = hash_pkg(cpv)\n\t\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\tif current_hash != hash_value:\n\t\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t\tif is_basename:\n\t\t\t\t\t\t\t\tfor p in dblink(cpv).getcontents():\n\t\t\t\t\t\t\t\t\tif os.path.basename(p) == name:\n\t\t\t\t\t\t\t\t\t\towners.append((cpv, p[len(root):]))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tif dblink(cpv).isowner(path):\n\t\t\t\t\t\t\t\t\towners.append((cpv, path))\n\n\t\t\t\t\texcept StopIteration:\n\t\t\t\t\t\tpath_iter.append(path)\n\t\t\t\t\t\tdel owners[:]\n\t\t\t\t\t\tdblink_cache.clear()\n\t\t\t\t\t\tgc.collect()\n\t\t\t\t\t\tfor x in self._iter_owners_low_mem(path_iter):\n\t\t\t\t\t\t\tyield x\n\t\t\t\t\t\treturn\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor cpv, p in owners:\n\t\t\t\t\t\t\tyield (dblink(cpv), p)", "def fetch_roi_synapses(server, uuid, synapses_instance, rois, fetch_labels=False, return_partners=False, processes=16):\n # Late imports to avoid circular imports in dvid/__init__\n from neuclease.dvid import fetch_combined_roi_volume, determine_point_rois, fetch_labels_batched, fetch_mapping, fetch_mappings\n\n assert rois, \"No rois provided, result would be empty. Is that what you meant?\"\n\n if isinstance(rois, str):\n rois = [rois]\n\n # Determine name of the segmentation instance that's\n # associated with the given synapses instance.\n syn_info = fetch_instance_info(server, uuid, synapses_instance)\n seg_instance = syn_info[\"Base\"][\"Syncs\"][0]\n\n logger.info(f\"Fetching mask for ROIs: {rois}\")\n # Fetch the ROI as a low-res array (scale 5, i.e. 32-px resolution)\n roi_vol_s5, roi_box_s5, overlapping_pairs = fetch_combined_roi_volume(server, uuid, rois)\n\n if len(overlapping_pairs) > 0:\n logger.warning(\"Some ROIs overlapped and are thus not completely represented in the output:\\n\"\n f\"{overlapping_pairs}\")\n\n # Convert to full-res box\n roi_box = (2**5) * roi_box_s5\n\n # fetch_synapses_in_batches() requires a box that is 64-px-aligned\n roi_box = round_box(roi_box, 64, 'out')\n\n logger.info(\"Fetching synapse points\")\n # points_df is a DataFrame with columns for [z,y,x]\n points_df, partners_df = fetch_synapses_in_batches(server, uuid, synapses_instance, roi_box, processes=processes)\n\n # Append a 'roi_name' column to points_df\n logger.info(\"Labeling ROI for each point\")\n points_df = points_df.reset_index()\n determine_point_rois(server, uuid, rois, points_df, roi_vol_s5, roi_box_s5)\n points_df = points_df.set_index(points_df.columns[0])\n\n logger.info(\"Discarding points that don't overlap with the roi\")\n rois = {*rois}\n points_df = points_df.query('roi in @rois').copy()\n\n columns = ['z', 'y', 'x', 'kind', 'conf', 'roi_label', 'roi']\n\n if fetch_labels:\n logger.info(\"Fetching supervoxel under each point\")\n svs = fetch_labels_batched(server, uuid, seg_instance,\n points_df[['z', 'y', 'x']].values,\n supervoxels=True,\n processes=processes)\n\n with Timer(\"Mapping supervoxels to bodies\", logger):\n # Arbitrary heuristic for whether to do the\n # body-lookups on DVID or on the client.\n if len(svs) < 100_000:\n bodies = fetch_mapping(server, uuid, seg_instance, svs)\n else:\n mapping = fetch_mappings(server, uuid, seg_instance)\n mapper = LabelMapper(mapping.index.values, mapping.values)\n bodies = mapper.apply(svs, True)\n\n points_df['sv'] = svs\n points_df['body'] = bodies\n columns += ['body', 'sv']\n\n if return_partners:\n # Filter\n # partners_df = partners_df.query('post_id in @points_df.index and pre_id in @points_df.index').copy()\n\n # Faster filter (via merge)\n partners_df = partners_df.merge(points_df[[]], 'inner', left_on='pre_id', right_index=True)\n partners_df = partners_df.merge(points_df[[]], 'inner', left_on='post_id', right_index=True)\n return points_df[columns], partners_df\n else:\n return points_df[columns]", "def is_root_credentials(self):\n username = self.ask_amazon_for_username()\n try:\n self.connection.get_all_access_keys(username)\n except boto.exception.BotoServerError as error:\n if error.status == 404 and error.code == \"NoSuchEntity\":\n if username in self.ask_amazon_for_account_aliases():\n return True\n else:\n raise\n return False", "def list_smb_shares(mnode):\n g.log.info(\"List all SMB Shares\")\n smb_shares_list = []\n cmd = \"smbclient -L localhost\"\n ret, out, _ = g.run(mnode, cmd)\n if ret != 0:\n g.log.error(\"Failed to find the SMB Shares\")\n return smb_shares_list\n else:\n out = out.splitlines()\n for line in out:\n if 'gluster-' in line:\n smb_shares_list.append(line.split(\" \")[0].strip())\n\n return smb_shares_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch notifications for this owner from ESI and proceses them.
def fetch_notifications_esi(self, user: User = None) -> None: notifications_count_all = 0 self.notifications_last_update_ok = None self.notifications_last_update_at = now() self.save() token = self.fetch_token(rotate_characters=True) try: notifications = self._fetch_notifications_from_esi(token) except OSError as ex: message_id = ( f"{__title__}-fetch_notifications-{self.pk}-{type(ex).__name__}" ) title = f"{__title__}: Failed to update notifications for {self}" message = f"{self}: Failed to update notifications from ESI due to {ex}" logger.exception(message) notify_admins_throttled( message_id=message_id, title=title, message=message, level="danger", timeout=STRUCTURES_NOTIFY_THROTTLED_TIMEOUT, ) self.notifications_last_update_ok = False self.save() raise ex else: notifications_count_new = self._store_notifications(notifications) self._process_moon_notifications() if notifications_count_new > 0: logger.info( "%s: Received %d new notifications from ESI", self, notifications_count_new, ) self._process_timers_for_notifications(token) notifications_count_all += notifications_count_new else: logger.info("%s: No new notifications received from ESI", self) self.notifications_last_update_ok = True self.save() if user: self._send_report_to_user( topic="notifications", topic_count=notifications_count_all, user=user, )
[ "def notifications(self):\r\n from .._impl.notification import Notification\r\n result = []\r\n url = \"%s/community/users/%s/notifications\" % (self._portal.resturl, self._user_id)\r\n params = {\"f\" : \"json\"}\r\n ns = self._portal.con.get(url, params)\r\n if \"notifications\" in ns:\r\n for n in ns[\"notifications\"]:\r\n result.append(Notification(url=\"%s/%s\" % (url, n['id']),\r\n user=self,\r\n data=n,\r\n initialize=False)\r\n )\r\n del n\r\n return result\r\n return result", "def _notify_for_ob(cls): # pylint: disable=too-many-locals\n unpaid_status = (\n InvoiceStatus.SETTLEMENT_SCHEDULED.value, InvoiceStatus.PARTIAL.value, InvoiceStatus.CREATED.value)\n notification_date = datetime.today() - timedelta(days=current_app.config.get('NOTIFY_AFTER_DAYS'))\n # Get distinct accounts with pending invoices for that exact day\n notification_pending_accounts = db.session.query(InvoiceModel.payment_account_id).distinct().filter(and_(\n InvoiceModel.invoice_status_code.in_(unpaid_status),\n InvoiceModel.payment_method_code == PaymentMethod.ONLINE_BANKING.value,\n # cast is used to get the exact match stripping the timestamp from date\n cast(InvoiceModel.created_on, Date) == notification_date.date()\n )).all()\n current_app.logger.debug(f'Found {len(notification_pending_accounts)} invoices to notify admins.')\n for payment_account in notification_pending_accounts:\n try:\n payment_account_id = payment_account[0]\n total = db.session.query(func.sum(InvoiceModel.total).label('total')).filter(and_(\n InvoiceModel.invoice_status_code.in_(unpaid_status),\n InvoiceModel.payment_account_id == payment_account_id,\n InvoiceModel.payment_method_code == PaymentMethod.ONLINE_BANKING.value\n )).group_by(InvoiceModel.payment_account_id).all()\n pay_account: PaymentAccountModel = \\\n PaymentAccountModel.find_by_id(payment_account_id)\n\n cfs_account = CfsAccountModel.find_effective_by_account_id(payment_account_id)\n\n # emit account mailer event\n addition_params_to_mailer = {'transactionAmount': float(total[0][0]),\n 'cfsAccountId': cfs_account.cfs_account,\n 'authAccountId': pay_account.auth_account_id,\n }\n mailer.publish_mailer_events('ob.outstandingInvoice', pay_account, addition_params_to_mailer)\n except Exception as e: # NOQA # pylint: disable=broad-except\n capture_message(f'Error on notifying mailer OB Pending invoice: account id={pay_account.id}, '\n f'auth account : {pay_account.auth_account_id}, ERROR : {str(e)}', level='error')\n current_app.logger.error(e)", "def get_notifications(self, context):\n module_context.init()\n LOG.info(\"Received RPC GET NOTIFICATIONS \")\n events = self.sc.get_stashed_events()\n notifications = []\n for event in events:\n notification = event.data\n msg = (\"Notification Data: %r\" % notification)\n notifications.append(notification)\n LOG.info(msg)\n return notifications", "def _fetch_moon_notifications_from_esi(self) -> dict:\n logger.info(\"%s: Fetching notifications from ESI...\", self)\n all_notifications = (\n esi.client.Character.get_characters_character_id_notifications(\n character_id=self.character_ownership.character.character_id,\n token=self.fetch_token().valid_access_token(),\n ).results()\n )\n moon_notifications = [\n notif\n for notif in all_notifications\n if notif[\"type\"] in NotificationType.all_moon_mining\n ]\n return moon_notifications", "def deliver_pending_notifications():\n while not notifications.empty():\n msg = notifications.get()\n msg.deliver()", "def notify(cls, ar, owner, user, message):\n fltkw = gfk2lookup(cls.owner, owner)\n qs = cls.objects.filter(\n user=user, seen__isnull=True, **fltkw)\n if not qs.exists():\n # create a notification object and send email\n obj = cls(user=user, owner=owner, message=message)\n obj.full_clean()\n obj.save()\n obj.send_email(ar)", "def list_notifications(self):\r\n return self._notification_manager.list()", "def _store_notifications(self, notifications: list) -> int:\n # identify new notifications\n existing_notification_ids = set(\n self.notifications.values_list(\"notification_id\", flat=True)\n )\n new_notifications = [\n obj\n for obj in notifications\n if obj[\"notification_id\"] not in existing_notification_ids\n ]\n # create new notif objects\n sender_type_map = {\n \"character\": EveEntity.CATEGORY_CHARACTER,\n \"corporation\": EveEntity.CATEGORY_CORPORATION,\n \"alliance\": EveEntity.CATEGORY_ALLIANCE,\n }\n new_notification_objects = list()\n for notification in new_notifications:\n known_sender_type = sender_type_map.get(notification[\"sender_type\"])\n if known_sender_type:\n sender, _ = EveEntity.objects.get_or_create_esi(\n id=notification[\"sender_id\"]\n )\n else:\n sender = None\n text = notification[\"text\"] if \"text\" in notification else None\n is_read = notification[\"is_read\"] if \"is_read\" in notification else None\n new_notification_objects.append(\n Notification(\n notification_id=notification[\"notification_id\"],\n owner=self,\n created=now(),\n details=yaml.safe_load(text),\n is_read=is_read,\n last_updated=now(),\n # at least one type has a trailing white space\n # which we need to remove\n notif_type=notification[\"type\"].strip(),\n sender=sender,\n timestamp=notification[\"timestamp\"],\n )\n )\n\n Notification.objects.bulk_create(new_notification_objects)\n if len(new_notification_objects) > 0:\n logger.info(\n \"%s: Received %d new notifications from ESI\",\n self,\n len(new_notification_objects),\n )\n else:\n logger.info(\"%s: No new notifications received from ESI\", self)\n return len(new_notification_objects)", "async def notification_list(self, context):\n if self.db == None:\n await self.start() # Initiate DB, because it's not initialized yet\n\n notifications = self.get_notifications(context.message.author.id)\n if not notifications:\n return await self.bot.send_message(context.message.author, 'You have no notifications at this time.')\n else:\n notifications_list_str = ''\n for notification in notifications.values():\n time_until = notification['notification_time'] - int(datetime.now().timestamp()) # Time until notification\n notifications_list_str += '%s %s in %s\\n' % (notification['uid'], notification['notification_message'], self.get_time_string(time_until))\n return await self.bot.send_message(context.message.author, notifications_list_str) # Full list of notifications\n return", "def processNotifications(self, notifications):\n aggregator = service.IService(self.store).getServiceNamed('aggregator')\n aggregator.processNotifications(self.handle, notifications)", "def serialized_notifications(self):\n unread_count = self.notifications.unread().count()\n count = settings.NOTIFICATIONS_MAX_COUNT\n notifications = []\n\n if unread_count > count:\n count = unread_count\n\n for notification in self.notifications.prefetch_related(\n \"actor\", \"target\", \"action_object\"\n )[:count]:\n actor = None\n is_comment = False\n\n if hasattr(notification.actor, \"slug\"):\n if \"new string\" in notification.verb:\n actor = {\n \"anchor\": notification.actor.name,\n \"url\": reverse(\n \"pontoon.translate.locale.agnostic\",\n kwargs={\n \"slug\": notification.actor.slug,\n \"part\": \"all-resources\",\n },\n )\n + \"?status=missing,pretranslated\",\n }\n else:\n actor = {\n \"anchor\": notification.actor.name,\n \"url\": reverse(\n \"pontoon.projects.project\",\n kwargs={\"slug\": notification.actor.slug},\n ),\n }\n elif hasattr(notification.actor, \"email\"):\n actor = {\n \"anchor\": notification.actor.name_or_email,\n \"url\": reverse(\n \"pontoon.contributors.contributor.username\",\n kwargs={\"username\": notification.actor.username},\n ),\n }\n\n target = None\n if notification.target:\n t = notification.target\n # New string or Manual notification\n if hasattr(t, \"slug\"):\n target = {\n \"anchor\": t.name,\n \"url\": reverse(\n \"pontoon.projects.project\",\n kwargs={\"slug\": t.slug},\n ),\n }\n\n # Comment notifications\n elif hasattr(t, \"resource\"):\n is_comment = True\n target = {\n \"anchor\": t.resource.project.name,\n \"url\": reverse(\n \"pontoon.translate\",\n kwargs={\n \"locale\": notification.action_object.code,\n \"project\": t.resource.project.slug,\n \"resource\": t.resource.path,\n },\n )\n + f\"?string={t.pk}\",\n }\n\n notifications.append(\n {\n \"id\": notification.id,\n \"level\": notification.level,\n \"unread\": notification.unread,\n \"description\": {\n \"content\": notification.description,\n \"is_comment\": is_comment,\n },\n \"verb\": notification.verb,\n \"date\": notification.timestamp.strftime(\"%b %d, %Y %H:%M\"),\n \"date_iso\": notification.timestamp.isoformat(),\n \"actor\": actor,\n \"target\": target,\n }\n )\n\n return {\n \"has_unread\": unread_count > 0,\n \"notifications\": notifications,\n \"unread_count\": str(self.unread_notifications_display),\n }", "def send_notifications():\n # Send any email related tasks\n for note in EmailNotification.objects.select_related('event', 'account').filter(sent=False):\n note.send()", "def notifications():\n\n db = get_db_read_replica()\n min_block_number = request.args.get(\"min_block_number\", type=int)\n max_block_number = request.args.get(\"max_block_number\", type=int)\n\n track_ids_to_owner = []\n try:\n track_ids_str_list = request.args.getlist(\"track_id\")\n track_ids_to_owner = [int(y) for y in track_ids_str_list]\n except Exception as e:\n logger.error(f\"Failed to retrieve track list {e}\")\n\n # Max block number is not explicitly required (yet)\n if not min_block_number and min_block_number != 0:\n return api_helpers.error_response({\"msg\": \"Missing min block number\"}, 400)\n\n if not max_block_number:\n max_block_number = min_block_number + max_block_diff\n elif (max_block_number - min_block_number) > max_block_diff:\n max_block_number = min_block_number + max_block_diff\n\n with db.scoped_session() as session:\n current_block_query = session.query(Block).filter_by(is_current=True)\n current_block_query_results = current_block_query.all()\n current_block = current_block_query_results[0]\n current_max_block_num = current_block.number\n if current_max_block_num < max_block_number:\n max_block_number = current_max_block_num\n\n notification_metadata = {\n \"min_block_number\": min_block_number,\n \"max_block_number\": max_block_number,\n }\n\n # Retrieve milestones statistics\n milestone_info = {}\n\n # Cache owner info for network entities and pass in w/results\n owner_info = {const.tracks: {}, const.albums: {}, const.playlists: {}}\n\n # List of notifications generated from current protocol state\n notifications_unsorted = []\n with db.scoped_session() as session:\n #\n # Query relevant follow information\n #\n follow_query = session.query(Follow)\n\n # Impose min block number restriction\n follow_query = follow_query.filter(\n Follow.is_current == True,\n Follow.is_delete == False,\n Follow.blocknumber > min_block_number,\n Follow.blocknumber <= max_block_number,\n )\n\n follow_results = follow_query.all()\n # Used to retrieve follower counts for this window\n followed_users = []\n # Represents all follow notifications\n follow_notifications = []\n for entry in follow_results:\n follow_notif = {\n const.notification_type: const.notification_type_follow,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.follower_user_id,\n const.notification_metadata: {\n const.notification_follower_id: entry.follower_user_id,\n const.notification_followee_id: entry.followee_user_id,\n },\n }\n follow_notifications.append(follow_notif)\n # Add every user who gained a new follower\n followed_users.append(entry.followee_user_id)\n\n # Query count for any user w/new followers\n follower_counts = get_follower_count_dict(\n session, followed_users, max_block_number\n )\n milestone_info[\"follower_counts\"] = follower_counts\n\n notifications_unsorted.extend(follow_notifications)\n\n #\n # Query relevant favorite information\n #\n favorites_query = session.query(Save)\n favorites_query = favorites_query.filter(\n Save.is_current == True,\n Save.is_delete == False,\n Save.blocknumber > min_block_number,\n Save.blocknumber <= max_block_number,\n )\n favorite_results = favorites_query.all()\n\n # ID lists to query count aggregates\n favorited_track_ids = []\n favorited_album_ids = []\n favorited_playlist_ids = []\n\n # List of favorite notifications\n favorite_notifications = []\n favorite_remix_tracks = []\n\n for entry in favorite_results:\n favorite_notif = {\n const.notification_type: const.notification_type_favorite,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.user_id,\n }\n save_type = entry.save_type\n save_item_id = entry.save_item_id\n metadata = {\n const.notification_entity_type: save_type,\n const.notification_entity_id: save_item_id,\n }\n\n # NOTE if deleted, the favorite can still exist\n # TODO: Can we aggregate all owner queries and perform at once...?\n if save_type == SaveType.track:\n owner_id = get_owner_id(session, \"track\", save_item_id)\n if not owner_id:\n continue\n metadata[const.notification_entity_owner_id] = owner_id\n favorited_track_ids.append(save_item_id)\n owner_info[const.tracks][save_item_id] = owner_id\n\n favorite_remix_tracks.append(\n {\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n \"user_id\": entry.user_id,\n \"item_owner_id\": owner_id,\n \"item_id\": save_item_id,\n }\n )\n\n elif save_type == SaveType.album:\n owner_id = get_owner_id(session, \"album\", save_item_id)\n if not owner_id:\n continue\n metadata[const.notification_entity_owner_id] = owner_id\n favorited_album_ids.append(save_item_id)\n owner_info[const.albums][save_item_id] = owner_id\n\n elif save_type == SaveType.playlist:\n owner_id = get_owner_id(session, \"playlist\", save_item_id)\n if not owner_id:\n continue\n metadata[const.notification_entity_owner_id] = owner_id\n favorited_playlist_ids.append(save_item_id)\n owner_info[const.playlists][save_item_id] = owner_id\n\n favorite_notif[const.notification_metadata] = metadata\n favorite_notifications.append(favorite_notif)\n notifications_unsorted.extend(favorite_notifications)\n\n track_favorite_dict = {}\n album_favorite_dict = {}\n playlist_favorite_dict = {}\n\n if favorited_track_ids:\n track_favorite_counts = get_save_counts(\n session,\n False,\n False,\n favorited_track_ids,\n [SaveType.track],\n max_block_number,\n )\n track_favorite_dict = dict(track_favorite_counts)\n\n favorite_remix_notifications = get_cosign_remix_notifications(\n session, max_block_number, favorite_remix_tracks\n )\n notifications_unsorted.extend(favorite_remix_notifications)\n\n if favorited_album_ids:\n album_favorite_counts = get_save_counts(\n session,\n False,\n False,\n favorited_album_ids,\n [SaveType.album],\n max_block_number,\n )\n album_favorite_dict = dict(album_favorite_counts)\n\n if favorited_playlist_ids:\n playlist_favorite_counts = get_save_counts(\n session,\n False,\n False,\n favorited_playlist_ids,\n [SaveType.playlist],\n max_block_number,\n )\n playlist_favorite_dict = dict(playlist_favorite_counts)\n\n milestone_info[const.notification_favorite_counts] = {}\n milestone_info[const.notification_favorite_counts][\n const.tracks\n ] = track_favorite_dict\n milestone_info[const.notification_favorite_counts][\n const.albums\n ] = album_favorite_dict\n milestone_info[const.notification_favorite_counts][\n const.playlists\n ] = playlist_favorite_dict\n\n #\n # Query relevant repost information\n #\n repost_query = session.query(Repost)\n repost_query = repost_query.filter(\n Repost.is_current == True,\n Repost.is_delete == False,\n Repost.blocknumber > min_block_number,\n Repost.blocknumber <= max_block_number,\n )\n repost_results = repost_query.all()\n\n # ID lists to query counts\n reposted_track_ids = []\n reposted_album_ids = []\n reposted_playlist_ids = []\n\n # List of repost notifications\n repost_notifications = []\n\n # List of repost notifications\n repost_remix_notifications = []\n repost_remix_tracks = []\n\n for entry in repost_results:\n repost_notif = {\n const.notification_type: const.notification_type_repost,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.user_id,\n }\n repost_type = entry.repost_type\n repost_item_id = entry.repost_item_id\n metadata = {\n const.notification_entity_type: repost_type,\n const.notification_entity_id: repost_item_id,\n }\n if repost_type == RepostType.track:\n owner_id = get_owner_id(session, \"track\", repost_item_id)\n if not owner_id:\n continue\n metadata[const.notification_entity_owner_id] = owner_id\n reposted_track_ids.append(repost_item_id)\n owner_info[const.tracks][repost_item_id] = owner_id\n repost_remix_tracks.append(\n {\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n \"user_id\": entry.user_id,\n \"item_owner_id\": owner_id,\n \"item_id\": repost_item_id,\n }\n )\n\n elif repost_type == RepostType.album:\n owner_id = get_owner_id(session, \"album\", repost_item_id)\n if not owner_id:\n continue\n metadata[const.notification_entity_owner_id] = owner_id\n reposted_album_ids.append(repost_item_id)\n owner_info[const.albums][repost_item_id] = owner_id\n\n elif repost_type == RepostType.playlist:\n owner_id = get_owner_id(session, \"playlist\", repost_item_id)\n if not owner_id:\n continue\n metadata[const.notification_entity_owner_id] = owner_id\n reposted_playlist_ids.append(repost_item_id)\n owner_info[const.playlists][repost_item_id] = owner_id\n\n repost_notif[const.notification_metadata] = metadata\n repost_notifications.append(repost_notif)\n\n # Append repost notifications\n notifications_unsorted.extend(repost_notifications)\n\n track_repost_count_dict = {}\n album_repost_count_dict = {}\n playlist_repost_count_dict = {}\n\n # Aggregate repost counts for relevant fields\n # Used to notify users of entity-specific milestones\n if reposted_track_ids:\n track_repost_counts = get_repost_counts(\n session,\n False,\n False,\n reposted_track_ids,\n [RepostType.track],\n max_block_number,\n )\n track_repost_count_dict = dict(track_repost_counts)\n\n repost_remix_notifications = get_cosign_remix_notifications(\n session, max_block_number, repost_remix_tracks\n )\n notifications_unsorted.extend(repost_remix_notifications)\n\n if reposted_album_ids:\n album_repost_counts = get_repost_counts(\n session,\n False,\n False,\n reposted_album_ids,\n [RepostType.album],\n max_block_number,\n )\n album_repost_count_dict = dict(album_repost_counts)\n\n if reposted_playlist_ids:\n playlist_repost_counts = get_repost_counts(\n session,\n False,\n False,\n reposted_playlist_ids,\n [RepostType.playlist],\n max_block_number,\n )\n playlist_repost_count_dict = dict(playlist_repost_counts)\n\n milestone_info[const.notification_repost_counts] = {}\n milestone_info[const.notification_repost_counts][\n const.tracks\n ] = track_repost_count_dict\n milestone_info[const.notification_repost_counts][\n const.albums\n ] = album_repost_count_dict\n milestone_info[const.notification_repost_counts][\n const.playlists\n ] = playlist_repost_count_dict\n\n # Query relevant created entity notification - tracks/albums/playlists\n created_notifications = []\n\n #\n # Query relevant created tracks for remix information\n #\n remix_created_notifications = []\n\n # Aggregate track notifs\n tracks_query = session.query(Track)\n # TODO: Is it valid to use Track.is_current here? Might not be the right info...\n tracks_query = tracks_query.filter(\n Track.is_unlisted == False,\n Track.is_delete == False,\n Track.stem_of == None,\n Track.blocknumber > min_block_number,\n Track.blocknumber <= max_block_number,\n )\n tracks_query = tracks_query.filter(Track.created_at == Track.updated_at)\n track_results = tracks_query.all()\n for entry in track_results:\n track_notif = {\n const.notification_type: const.notification_type_create,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.owner_id,\n # TODO: is entity owner id necessary for tracks?\n const.notification_metadata: {\n const.notification_entity_type: \"track\",\n const.notification_entity_id: entry.track_id,\n const.notification_entity_owner_id: entry.owner_id,\n },\n }\n created_notifications.append(track_notif)\n\n if entry.remix_of:\n # Add notification to remix track owner\n parent_remix_tracks = [\n t[\"parent_track_id\"] for t in entry.remix_of[\"tracks\"]\n ]\n remix_track_parents = (\n session.query(Track.owner_id, Track.track_id)\n .filter(\n Track.track_id.in_(parent_remix_tracks),\n Track.is_unlisted == False,\n Track.is_delete == False,\n Track.is_current == True,\n )\n .all()\n )\n for remix_track_parent in remix_track_parents:\n [\n remix_track_parent_owner,\n remix_track_parent_id,\n ] = remix_track_parent\n remix_notif = {\n const.notification_type: const.notification_type_remix_create,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.owner_id,\n # TODO: is entity owner id necessary for tracks?\n const.notification_metadata: {\n const.notification_entity_type: \"track\",\n const.notification_entity_id: entry.track_id,\n const.notification_entity_owner_id: entry.owner_id,\n const.notification_remix_parent_track_user_id: remix_track_parent_owner,\n const.notification_remix_parent_track_id: remix_track_parent_id,\n },\n }\n remix_created_notifications.append(remix_notif)\n\n # Handle track update notifications\n # TODO: Consider switching blocknumber for updated at?\n updated_tracks_query = session.query(Track)\n updated_tracks_query = updated_tracks_query.filter(\n Track.is_unlisted == False,\n Track.stem_of == None,\n Track.created_at != Track.updated_at,\n Track.blocknumber > min_block_number,\n Track.blocknumber <= max_block_number,\n )\n updated_tracks = updated_tracks_query.all()\n for entry in updated_tracks:\n prev_entry_query = (\n session.query(Track)\n .filter(\n Track.track_id == entry.track_id,\n Track.blocknumber < entry.blocknumber,\n )\n .order_by(desc(Track.blocknumber))\n )\n # Previous unlisted entry indicates transition to public, triggering a notification\n prev_entry = prev_entry_query.first()\n\n # Tracks that were unlisted and turned to public\n if prev_entry.is_unlisted == True:\n track_notif = {\n const.notification_type: const.notification_type_create,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.owner_id,\n # TODO: is entity owner id necessary for tracks?\n const.notification_metadata: {\n const.notification_entity_type: \"track\",\n const.notification_entity_id: entry.track_id,\n const.notification_entity_owner_id: entry.owner_id,\n },\n }\n created_notifications.append(track_notif)\n\n # Tracks that were not remixes and turned into remixes\n if not prev_entry.remix_of and entry.remix_of:\n # Add notification to remix track owner\n parent_remix_tracks = [\n t[\"parent_track_id\"] for t in entry.remix_of[\"tracks\"]\n ]\n remix_track_parents = (\n session.query(Track.owner_id, Track.track_id)\n .filter(\n Track.track_id.in_(parent_remix_tracks),\n Track.is_unlisted == False,\n Track.is_delete == False,\n Track.is_current == True,\n )\n .all()\n )\n for remix_track_parent in remix_track_parents:\n [\n remix_track_parent_owner,\n remix_track_parent_id,\n ] = remix_track_parent\n remix_notif = {\n const.notification_type: const.notification_type_remix_create,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.owner_id,\n # TODO: is entity owner id necessary for tracks?\n const.notification_metadata: {\n const.notification_entity_type: \"track\",\n const.notification_entity_id: entry.track_id,\n const.notification_entity_owner_id: entry.owner_id,\n const.notification_remix_parent_track_user_id: remix_track_parent_owner,\n const.notification_remix_parent_track_id: remix_track_parent_id,\n },\n }\n remix_created_notifications.append(remix_notif)\n\n notifications_unsorted.extend(remix_created_notifications)\n\n # Aggregate playlist/album notifs\n collection_query = session.query(Playlist)\n # TODO: Is it valid to use is_current here? Might not be the right info...\n collection_query = collection_query.filter(\n Playlist.is_delete == False,\n Playlist.is_private == False,\n Playlist.blocknumber > min_block_number,\n Playlist.blocknumber <= max_block_number,\n )\n collection_query = collection_query.filter(\n Playlist.created_at == Playlist.updated_at\n )\n collection_results = collection_query.all()\n\n for entry in collection_results:\n collection_notif = {\n const.notification_type: const.notification_type_create,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.playlist_owner_id,\n }\n metadata = {\n const.notification_entity_id: entry.playlist_id,\n const.notification_entity_owner_id: entry.playlist_owner_id,\n const.notification_collection_content: entry.playlist_contents,\n }\n\n if entry.is_album:\n metadata[const.notification_entity_type] = \"album\"\n else:\n metadata[const.notification_entity_type] = \"playlist\"\n collection_notif[const.notification_metadata] = metadata\n created_notifications.append(collection_notif)\n\n # Playlists that were private and turned to public aka 'published'\n # TODO: Consider switching blocknumber for updated at?\n publish_playlists_query = session.query(Playlist)\n publish_playlists_query = publish_playlists_query.filter(\n Playlist.is_private == False,\n Playlist.created_at != Playlist.updated_at,\n Playlist.blocknumber > min_block_number,\n Playlist.blocknumber <= max_block_number,\n )\n publish_playlist_results = publish_playlists_query.all()\n for entry in publish_playlist_results:\n prev_entry_query = (\n session.query(Playlist)\n .filter(\n Playlist.playlist_id == entry.playlist_id,\n Playlist.blocknumber < entry.blocknumber,\n )\n .order_by(desc(Playlist.blocknumber))\n )\n # Previous private entry indicates transition to public, triggering a notification\n prev_entry = prev_entry_query.first()\n if prev_entry.is_private == True:\n publish_playlist_notif = {\n const.notification_type: const.notification_type_create,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.playlist_owner_id,\n }\n metadata = {\n const.notification_entity_id: entry.playlist_id,\n const.notification_entity_owner_id: entry.playlist_owner_id,\n const.notification_collection_content: entry.playlist_contents,\n const.notification_entity_type: \"playlist\",\n }\n publish_playlist_notif[const.notification_metadata] = metadata\n created_notifications.append(publish_playlist_notif)\n\n notifications_unsorted.extend(created_notifications)\n\n # Get additional owner info as requested for listen counts\n tracks_owner_query = session.query(Track).filter(\n Track.is_current == True, Track.track_id.in_(track_ids_to_owner)\n )\n track_owner_results = tracks_owner_query.all()\n for entry in track_owner_results:\n owner = entry.owner_id\n track_id = entry.track_id\n owner_info[const.tracks][track_id] = owner\n\n # Get playlist updates\n today = date.today()\n thirty_days_ago = today - timedelta(days=30)\n thirty_days_ago_time = datetime(\n thirty_days_ago.year, thirty_days_ago.month, thirty_days_ago.day, 0, 0, 0\n )\n playlist_update_query = session.query(Playlist)\n playlist_update_query = playlist_update_query.filter(\n Playlist.is_current == True,\n Playlist.is_delete == False,\n Playlist.last_added_to >= thirty_days_ago_time,\n Playlist.blocknumber > min_block_number,\n Playlist.blocknumber <= max_block_number,\n )\n\n playlist_update_results = playlist_update_query.all()\n\n # Represents all playlist update notifications\n playlist_update_notifications = []\n playlist_update_notifs_by_playlist_id = {}\n for entry in playlist_update_results:\n playlist_update_notifs_by_playlist_id[entry.playlist_id] = {\n const.notification_type: const.notification_type_playlist_update,\n const.notification_blocknumber: entry.blocknumber,\n const.notification_timestamp: entry.created_at,\n const.notification_initiator: entry.playlist_owner_id,\n const.notification_metadata: {\n const.notification_entity_id: entry.playlist_id,\n const.notification_entity_type: \"playlist\",\n const.notification_playlist_update_timestamp: entry.last_added_to,\n },\n }\n\n # get all favorited playlists\n # playlists may have been favorited outside the blocknumber bounds\n # e.g. before the min_block_number\n playlist_favorites_query = session.query(Save)\n playlist_favorites_query = playlist_favorites_query.filter(\n Save.is_current == True,\n Save.is_delete == False,\n Save.save_type == SaveType.playlist,\n )\n playlist_favorites_results = playlist_favorites_query.all()\n\n # dictionary of playlist id => users that favorited said playlist\n # e.g. { playlist1: [user1, user2, ...], ... }\n # we need this dictionary to know which users need to be notified of a playlist update\n users_that_favorited_playlists_dict = ft.reduce(\n lambda accumulator, current: accumulator.update(\n {\n current.save_item_id: accumulator[current.save_item_id]\n + [current.user_id]\n if current.save_item_id in accumulator\n else [current.user_id]\n }\n )\n or accumulator,\n playlist_favorites_results,\n {},\n )\n\n for playlist_id in users_that_favorited_playlists_dict:\n if playlist_id not in playlist_update_notifs_by_playlist_id:\n continue\n playlist_update_notif = playlist_update_notifs_by_playlist_id[playlist_id]\n playlist_update_notif[const.notification_metadata].update(\n {\n const.notification_playlist_update_users: users_that_favorited_playlists_dict[\n playlist_id\n ]\n }\n )\n playlist_update_notifications.append(playlist_update_notif)\n\n notifications_unsorted.extend(playlist_update_notifications)\n\n # Final sort - TODO: can we sort by timestamp?\n sorted_notifications = sorted(\n notifications_unsorted,\n key=lambda i: i[const.notification_blocknumber],\n reverse=False,\n )\n\n return api_helpers.success_response(\n {\n \"notifications\": sorted_notifications,\n \"info\": notification_metadata,\n \"milestones\": milestone_info,\n \"owners\": owner_info,\n }\n )", "def get_for_user(cls, user):\n notifications = cls.objects.filter(user = user)\n notifications = notifications.order_by('-created')\n notifications = notifications.prefetch_related('notification')\n\n return notifications", "def list_notifications(request):\n notifications = Notification.objects.filter(\n receiving_user=request.user)\n data = NotificationModelSerializer(notifications, many=True).data\n return Response(data, status=status.HTTP_200_OK)", "def notification_trigger(self):\n self.today = self.entry_date.strftime(\"%Y-%m-%d\")\n #finding notify items\n self.df_notify = self.df_user.loc[self.df_user[\"notify (days)\"] <= self.today] \n self.name_notify = list(self.df_notify[\"title\"])\n #EXPIRED THINGS\n self.df_exp_dead = self.df_user.loc[self.df_user[\"expiration (days)\"] < self.today]\n self.names_expired = list(self.df_exp_dead[\"title\"])\n #NOTIFY ITEMS\n self.list_notify_notexpired = [x for x in self.name_notify if x not in self.names_expired]\n\n self.result.config(text=\"EXPIRES SOON:\")\n self.result3.config(text=\", \".join(self.list_notify_notexpired))\n self.result4.config(text=\"EXPIRED ITEMS: \"+\", \".join(self.names_expired))", "def _store_notifications(self, notifications: list) -> int:\n # identify new notifications\n existing_notification_ids = set(\n self.notifications.values_list(\"notification_id\", flat=True)\n )\n new_notifications = [\n obj\n for obj in notifications\n if obj[\"notification_id\"] not in existing_notification_ids\n ]\n # create new notif objects\n new_notification_objects = list()\n for notification in new_notifications:\n sender_type = EveEntity.Category.from_esi_name(notification[\"sender_type\"])\n if sender_type != EveEntity.Category.OTHER:\n sender, _ = EveEntity.objects.get_or_create_esi(\n eve_entity_id=notification[\"sender_id\"]\n )\n else:\n sender, _ = EveEntity.objects.get_or_create(\n id=notification[\"sender_id\"],\n defaults={\"category\": sender_type},\n )\n text = notification[\"text\"] if \"text\" in notification else None\n is_read = notification[\"is_read\"] if \"is_read\" in notification else None\n new_notification_objects.append(\n Notification(\n notification_id=notification[\"notification_id\"],\n owner=self,\n sender=sender,\n timestamp=notification[\"timestamp\"],\n # at least one type has a trailing white space\n # which we need to remove\n notif_type=notification[\"type\"].strip(),\n text=text,\n is_read=is_read,\n last_updated=now(),\n created=now(),\n )\n )\n\n Notification.objects.bulk_create(new_notification_objects)\n return len(new_notification_objects)", "def test_get_self_notifications(self):\n pass", "def active_notifications(self) -> dict:\n for _ in self._update():\n pass\n return self._active_notifications" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
stores new notifications in database. Returns number of newly created objects.
def _store_notifications(self, notifications: list) -> int: # identify new notifications existing_notification_ids = set( self.notifications.values_list("notification_id", flat=True) ) new_notifications = [ obj for obj in notifications if obj["notification_id"] not in existing_notification_ids ] # create new notif objects new_notification_objects = list() for notification in new_notifications: sender_type = EveEntity.Category.from_esi_name(notification["sender_type"]) if sender_type != EveEntity.Category.OTHER: sender, _ = EveEntity.objects.get_or_create_esi( eve_entity_id=notification["sender_id"] ) else: sender, _ = EveEntity.objects.get_or_create( id=notification["sender_id"], defaults={"category": sender_type}, ) text = notification["text"] if "text" in notification else None is_read = notification["is_read"] if "is_read" in notification else None new_notification_objects.append( Notification( notification_id=notification["notification_id"], owner=self, sender=sender, timestamp=notification["timestamp"], # at least one type has a trailing white space # which we need to remove notif_type=notification["type"].strip(), text=text, is_read=is_read, last_updated=now(), created=now(), ) ) Notification.objects.bulk_create(new_notification_objects) return len(new_notification_objects)
[ "def _store_notifications(self, notifications: list) -> int:\n # identify new notifications\n existing_notification_ids = set(\n self.notifications.values_list(\"notification_id\", flat=True)\n )\n new_notifications = [\n obj\n for obj in notifications\n if obj[\"notification_id\"] not in existing_notification_ids\n ]\n # create new notif objects\n sender_type_map = {\n \"character\": EveEntity.CATEGORY_CHARACTER,\n \"corporation\": EveEntity.CATEGORY_CORPORATION,\n \"alliance\": EveEntity.CATEGORY_ALLIANCE,\n }\n new_notification_objects = list()\n for notification in new_notifications:\n known_sender_type = sender_type_map.get(notification[\"sender_type\"])\n if known_sender_type:\n sender, _ = EveEntity.objects.get_or_create_esi(\n id=notification[\"sender_id\"]\n )\n else:\n sender = None\n text = notification[\"text\"] if \"text\" in notification else None\n is_read = notification[\"is_read\"] if \"is_read\" in notification else None\n new_notification_objects.append(\n Notification(\n notification_id=notification[\"notification_id\"],\n owner=self,\n created=now(),\n details=yaml.safe_load(text),\n is_read=is_read,\n last_updated=now(),\n # at least one type has a trailing white space\n # which we need to remove\n notif_type=notification[\"type\"].strip(),\n sender=sender,\n timestamp=notification[\"timestamp\"],\n )\n )\n\n Notification.objects.bulk_create(new_notification_objects)\n if len(new_notification_objects) > 0:\n logger.info(\n \"%s: Received %d new notifications from ESI\",\n self,\n len(new_notification_objects),\n )\n else:\n logger.info(\"%s: No new notifications received from ESI\", self)\n return len(new_notification_objects)", "def test_notification_when_book_created(self):\n init_notification_count = Notification.objects.count()\n Book.objects.create(name='Hepta',\n author='X',\n library=Library.objects.all()[0])\n self.assertEqual(init_notification_count + 2,\n Notification.objects.count())", "def create_new_notification(args):\n\n notifications_db, cursor = connect_db()\n\n cursor.execute('create table if not exists notifications '\n '(n integer, type integer, notes text)')\n\n cursor.execute('select count(*) from notifications')\n\n notification_number = cursor.fetchone()[0] + 1\n\n try:\n notification_type = TYPE_TO_INDEX[args[0]]\n except KeyError:\n print '[ERROR] Not supported type, see \"<script> u\" for possible'\\\n ' types'\n return\n\n notification_body = ' '.join(args[1:])\n\n cursor.execute(\n 'insert into notifications(n, type, notes) values (?, ?, ?)',\n (notification_number, notification_type, notification_body))\n\n notifications_db.commit()\n notifications_db.close()", "def test_creates_in_app_notification_successfully(self):\n\n notification = self.fetch_all_notifications(token=self.user_token)\n\n self.assertEqual(notification.status_code, status.HTTP_200_OK)\n\n self.assertTrue(notification.data[\"count\"] == 1)\n\n follow = self.follow_user(self.control_username, self.user_token)\n\n self.assertEqual(follow.status_code, status.HTTP_200_OK)\n\n article = self.create_article(token=self.control_token)\n\n self.assertEqual(article.status_code, status.HTTP_201_CREATED)\n\n notification = self.fetch_all_notifications(token=self.user_token)\n\n self.assertEqual(notification.status_code, status.HTTP_200_OK)\n\n self.assertTrue(notification.data[\"count\"] == 2)", "def test_many_notifications() -> None:\n tester = Notifications()\n for _ in range(100):\n tester.add(Notification(\"test\", timeout=60))\n assert len(tester) == 100", "def count_changes(self):\n count = self.favourites.filter(deleted=False).count()\n self.favourite_count = count\n self.save()", "def createNotification(profileIds, title, text, type, id=None, courseId=None):\n for profileId in profileIds:\n urlsafeId = profileId.urlsafe()\n if memcache.get('notif' + urlsafeId) is None:\n memcache.add('notif' + urlsafeId, 0, NOTIFICATION_TIME)\n else:\n memcache.incr('notif' + urlsafeId)\n timeStamp = datetime.datetime.now() + datetime.timedelta(hours=5, minutes=30)\n newNotification = Notification(type=type, id=id, title=title, text=text,\n profileIdList=profileIds, timeStamp=timeStamp,\n courseId=courseId)\n newNotification.put()", "def fristNotifications(self):\n\n if(self._meds ==[]):\n print(\"meds was not set in noti generator\")\n\n notiID = 0\n for medItem in self._meds:\n itemDict= vars(medItem)\n cnt = float(itemDict['qty'])\n\n if cnt == 0:\n new_noti = NotiGenerator.generateEmptyNotification(medName=itemDict['name'],\n medID=itemDict['medID'],\n medShelf=itemDict['shelf'],\n notiID=notiID)\n self._notifications.append(new_noti)\n #delete this medicine too.\n\n notiID += 1\n\n expDate = itemDict['expDate']\n expDate = datetime.datetime.strptime(expDate, \"%Y-%m-%d\").date()\n today = datetime.datetime.now().date()\n\n if today >= expDate:\n new_noti = NotiGenerator.generateExpiredNotification(medName=itemDict['name'],\n medID=itemDict['medID'],\n medShelf=itemDict['shelf'],\n notiID=notiID)\n self._notifications.append(new_noti)\n notiID += 1\n\n print(notiID)\n return self._notifications", "def check_database_counts(self):\n # test select 100 tasks / taskevents from setUp()\n q = self.get_count('task_id', 'Task')\n self.assertTrue(q == MAX_ADD)\n q = self.get_count('id', 'TaskEvent')\n self.assertTrue(q == MAX_ADD)\n\n # checking tasks\n query = self.select('select * from Task where task_id = 10')\n self.assertTrue(len(query) == 1)\n\n # check all tasks are stopped\n q = self.get_count_where('id', 'TaskEvent', 'is_started', 0)\n query = self.select('select count(id) from TaskEvent where is_started = 0')\n self.assertTrue(query[0][0] == MAX_ADD)\n\n # check task does not exist\n query = self.select('select count(task_id) from Task where task_id = 420 and user = \"Name 420\"')\n self.assertTrue(query[0][0] == 0)\n\n # now add the task\n new_task = self.add_task(420, 'MANTASK', 'Name 420')", "def get_number_of_notifications(cursor) -> int:\n\n try:\n cursor.execute(\n \"\"\"\n SELECT count(*) \n FROM Notificaciones\n \"\"\"\n )\n rows = cursor.fetchall()\n except:\n raise NameError(\n \"El sistema de notificaciones no pudo ser inicializado\")\n\n numero_de_notificaciones = rows[0][0]\n\n return numero_de_notificaciones", "def created(*args, **kwargs):\n return ManagerNotificationWrapper(ACTIONS.created, *args, **kwargs)", "def notify_for_new_package(sender, instance, created, **kwargs):\n if created and instance.user.gcmdevice_set.exists():\n from notifications.models import GCMMessage\n GCMMessage.objects.create(\n user=instance.user,\n title=\"You've been enrolled.\",\n message=\"Welcome to {0}\".format(instance.category.title),\n deliver_on=timezone.now(),\n obj=instance,\n priority=GCMMessage.HIGH\n )", "def test_model_create_resource(self):\n # Act\n self.semester.save()\n # Assert\n self.new_count = Semester.objects.count()\n self.assertEquals(\n self.old_count + 1, self.new_count, 'The Semester was not created'\n )", "def _report_created(self):\n statsreporter.stats().incr('new_task_created_' + self.task_name)", "def insert_notification_list_db(self, jsonData, recover_by, session):\n\n # NOTE: The notification item 'endTime' may have a NULL value.\n # reference : The Notification Spec for RecoveryController.\n # JSON decoder perform null -> None translation\n try:\n if not jsonData.get(\"endTime\"):\n j_endTime = None\n else:\n j_endTime = datetime.datetime.strptime(\n jsonData.get(\"endTime\"), '%Y%m%d%H%M%S')\n # update and deleted :not yet\n create_at = datetime.datetime.now()\n update_at = None\n delete_at = None\n deleted = 0\n # progress 0:not yet\n progress = 0\n # From /etc/hosts\n # NOTE: Hosts hostname suffix is\n # undetermined(\"_data_line\",\"_control_line\")\n iscsi_ip = None\n controle_ip = socket.gethostbyname(jsonData.get(\"hostname\"))\n recover_to = None\n if recover_by == 0:\n recover_to = self._get_reserve_node_from_reserve_list_db(\n jsonData.get(\"cluster_port\"),\n jsonData.get(\"hostname\"),\n session)\n # If reserve node is None, set progress 3.\n if recover_to is None:\n progress = 3\n\n def strp_time(u_time):\n \"\"\"\n Convert unicode time with format '%Y%m%d%H%M%S' to\n datetime format.\n \"\"\"\n try:\n d = datetime.datetime.strptime(u_time, '%Y%m%d%H%M%S')\n\n except (ValueError, TypeError) as e:\n LOG.warning(e)\n d = None\n\n return d\n\n notification_time = strp_time(jsonData.get(\"time\"))\n notification_startTime = strp_time(jsonData.get(\"startTime\"))\n except Exception as e:\n\n error_type, error_value, traceback_ = sys.exc_info()\n tb_list = traceback.format_tb(traceback_)\n LOG.error(error_type)\n LOG.error(error_value)\n for tb in tb_list:\n LOG.error(tb)\n\n LOG.error(e.message)\n\n raise e\n # Todo: (sampath) correct the exceptions catching\n # Insert to notification_list DB.\n\n try:\n msg = \"Do add_notification_list.\"\n LOG.info(msg)\n result = dbapi.add_notification_list(\n session,\n create_at=create_at,\n update_at=update_at,\n delete_at=delete_at,\n deleted=deleted,\n notification_id=jsonData.get(\"id\"),\n notification_type=jsonData.get(\"type\"),\n notification_regionID=jsonData.get(\"regionID\"),\n notification_hostname=jsonData.get(\"hostname\"),\n notification_uuid=jsonData.get(\"uuid\"),\n notification_time=notification_time,\n notification_eventID=jsonData.get(\"eventID\"),\n notification_eventType=jsonData.get(\"eventType\"),\n notification_detail=jsonData.get(\"detail\"),\n notification_startTime=notification_startTime,\n notification_endTime=j_endTime,\n notification_tzname=jsonData.get(\"tzname\"),\n notification_daylight=jsonData.get(\"daylight\"),\n notification_cluster_port=jsonData.get(\"cluster_port\"),\n progress=progress,\n recover_by=recover_by,\n iscsi_ip=iscsi_ip,\n controle_ip=controle_ip,\n recover_to=recover_to\n )\n msg = \"Succeeded in add_notification_list. \" \\\n + \"Return_value = \" + str(result)\n LOG.info(msg)\n\n msg = \"Do get_all_reserve_list_by_hostname_not_deleted.\"\n LOG.info(msg)\n cnt = dbapi.get_all_reserve_list_by_hostname_not_deleted(\n session,\n jsonData.get(\"hostname\")\n )\n msg = \"Succeeded in get_all_reserve_list_by_hostname_not_deleted. \" \\\n + \"Return_value = \" + str(cnt)\n LOG.info(msg)\n\n if len(cnt) > 0:\n msg = \"Do update_reserve_list_by_hostname_as_deleted.\"\n LOG.info(msg)\n dbapi.update_reserve_list_by_hostname_as_deleted(\n session,\n jsonData.get(\"hostname\"),\n datetime.datetime.now()\n )\n msg = \"Succeeded in \" \\\n + \"update_reserve_list_by_hostname_as_deleted.\"\n LOG.info(msg)\n\n ret_dic = {\n \"create_at\": create_at,\n \"update_at\": update_at,\n \"delete_at\": delete_at,\n \"deleted\": deleted,\n \"notification_id\": jsonData.get(\"id\"),\n \"notification_type\": jsonData.get(\"type\"),\n \"notification_regionID\": jsonData.get(\"regionID\"),\n \"notification_hostname\": jsonData.get(\"hostname\"),\n \"notification_uuid\": jsonData.get(\"uuid\"),\n \"notification_time\": jsonData.get(\"time\"),\n \"notification_eventID\": jsonData.get(\"eventID\"),\n \"notification_eventType\": jsonData.get(\"eventType\"),\n \"notification_detail\": jsonData.get(\"detail\"),\n \"notification_startTime\": jsonData.get(\"startTime\"),\n \"notification_endTime\": j_endTime,\n \"notification_tzname\": jsonData.get(\"tzname\"),\n \"notification_daylight\": jsonData.get(\"daylight\"),\n \"notification_cluster_port\": jsonData.get(\"cluster_port\"),\n \"progress\": progress,\n \"recover_by\": recover_by,\n \"iscsi_ip\": iscsi_ip,\n \"controle_ip\": controle_ip,\n \"recover_to\": recover_to\n }\n\n return ret_dic\n\n except Exception as e:\n\n error_type, error_value, traceback_ = sys.exc_info()\n tb_list = traceback.format_tb(traceback_)\n LOG.error(error_type)\n LOG.error(error_value)\n for tb in tb_list:\n LOG.error(tb)\n\n LOG.error(e.message)\n\n raise e", "def record(self, entries):\n count = None\n for ent in entries:\n count = db.insert_one({\n 'txhash': ent[0],\n 'start': ent[1],\n 'end': ent[2],\n })\n return count", "def save(self, request):\n n_errors = 0\n created, fetched = Counter(), Counter()\n\n try:\n reader = decode_assignments(self.cleaned_data[\"assignments\"])\n for obj, was_created in parse_assignments(reader):\n if isinstance(obj, Exception):\n n_errors += 1\n messages.add_message(request, messages.WARNING, str(obj))\n continue\n\n if was_created:\n created[obj.__class__.__name__] += 1\n else:\n fetched[obj.__class__.__name__] += 1\n except Exception as e:\n n_errors += 1\n messages.add_message(\n request, messages.WARNING,\n f\"processing ended prematurely: {e}\"\n )\n\n msg = (\n f\"created {sum(created.values())} objects \"\n f\"(fetched {sum(fetched.values())} objects, {n_errors} errors)\"\n )\n messages.add_message(request, messages.SUCCESS, msg)", "def test_model_can_store_data(self):\n Note.objects.create(note_text=\"Test\")\n data_in_model = Note.objects.all().count()\n self.assertEqual(data_in_model, 2)", "def test_check_notification_entries(self):\n # Create some notification entries\n\n self.assertEqual(NotificationEntry.objects.count(), 0)\n\n NotificationEntry.notify('test.notification', 1)\n\n self.assertEqual(NotificationEntry.objects.count(), 1)\n\n delta = timedelta(days=1)\n\n self.assertFalse(NotificationEntry.check_recent('test.notification', 2, delta))\n self.assertFalse(NotificationEntry.check_recent('test.notification2', 1, delta))\n\n self.assertTrue(NotificationEntry.check_recent('test.notification', 1, delta))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
processes notifications for timers if any
def _process_timers_for_notifications(self, token: Token): if STRUCTURES_ADD_TIMERS: cutoff_dt_for_stale = now() - timedelta( hours=STRUCTURES_HOURS_UNTIL_STALE_NOTIFICATION ) notifications = ( Notification.objects.filter(owner=self) .filter(notif_type__in=NotificationType.relevant_for_timerboard) .exclude(is_timer_added=True) .filter(timestamp__gte=cutoff_dt_for_stale) .select_related("owner", "sender") .order_by("timestamp") ) if notifications.exists(): if not token: token = self.fetch_token() for notification in notifications: notification.process_for_timerboard(token)
[ "def handle_notifies(self):\n assert threading.current_thread() == self.MAIN_THREAD\n try:\n while True:\n evt_type, evt = self.notifies.get_nowait()\n\n for o in self.observers[evt_type]:\n self.__invoke_observer(o, evt_type, evt)\n\n except QueueEmpty:\n pass", "def reliable_schedule_notifications(self):\n pass", "def reliable_time_period_notifications(self):\n pass", "def timer_cron (self):\n\n # filter through each users timers via timer_filter().\n for nick, timers in self.timers.iteritems():\n # set a member variable for time_filter() to reference.\n self.current_nick = nick\n\n # filter through timers.\n self.timers[nick] = filter(self.timer_filter, timers)\n\n # commit the data structure to memory.\n self.bot.memory_remember(\"timers\", self.timers)", "def __notify_handle(self, listeners):\n timestamp = time.time()\n for listener in listeners:\n try:\n listener.handleReceived(timestamp)\n\n except Exception as ex:\n print(\"Something went wrong: \", ex)", "def process_timer(ctx, key, time):\n raise Exception('process_timer not implemented')", "def run(self):\n # Run all timers and remove those who won't trigger again.\n self._timers = [timer for timer in self._timers if timer.run()]", "def alarm_notify(alarm_output, alarm_date, alarm_time):\r\n\r\n one_alarm = []\r\n #Text To Speech Notifications\r\n engine = pyttsx3.init()\r\n engine.say(alarm_output)\r\n engine.runAndWait()\r\n\r\n #appending Notifation Details to List\r\n one_alarm.append(alarm_date)\r\n one_alarm.append(alarm_time)\r\n one_alarm.append(alarm_output)\r\n notified_alarms.append(one_alarm)\r\n\r\n #Removing Notified Alarm From Schedule\r\n for every_list in alarm_schedule:\r\n if every_list == one_alarm:\r\n alarm_schedule.remove(every_list)", "def processNotifications(self, notifications):\n aggregator = service.IService(self.store).getServiceNamed('aggregator')\n aggregator.processNotifications(self.handle, notifications)", "def _process_notification(self, notification):\n event_type = notification.get('event_type')\n\n # NOTE(zykes): Only bother to actually do processing if there's any\n # matching events, skips logging of things like compute.exists etc.\n if event_type in self._get_handler_event_types():\n for handler in self.handlers:\n self._process_notification_for_handler(handler, notification)", "def process_timeout(self):\n self.timers.process_timeout()", "def set_timers(user_id, user):\n\n\tif hasattr(user, 'timer'):\n\t\tuser.timer_processes = []\n\n\t\tprint 'setting timer'\n\n\t\tprint user.timer\n\t\t#=====[ Start new process for each timer ]=====\n\t\tfor idx, time in enumerate(user.timer):\n\n\t\t\tif idx == len(user.timer) - 1:\n\t\t\t\tp = Process(target=send_warning, args=(user_id, FINAL_TIMING_WARNING, time,))\n\t\t\telse:\n\t\t\t\tp = Process(target=send_warning, args=(user_id, TIMING_WARNING, time,))\n\t\t\t#=====[ Keep reference to process and start it ]=====\n\t\t\tp.start()\n\t\t\tuser.timer_processes.append(p.pid)\n\n\n\t\tut.update(user_id, user)", "def reliable_event_notifications(self):\n pass", "def notify(info):\n __notifier.notify(info)", "def run(self):\n processes = self.scheduler.getProcessEvents()\n for p in processes:\n me = self.memManager.handleProcess(p)\n if(me):\n self.notifyObservers(me)", "def loop(self):\n if self.notifier.check_events():\n # read notified events and enqeue them\n self.notifier.read_events()\n\n # process the queue of events\n self.notifier.process_events()", "def start_timers(self) -> List[Timer]:", "def set_time_based_notification(domain_class, state, roles, time):", "def _nfvi_periodic_timer_event():\n while True:\n timer_id = (yield)\n DLOG.verbose(\"NFVI periodic timer called, timer_id=%s.\" % timer_id)\n\n host_table = tables.tables_get_host_table()\n for host in list(host_table.values()):\n host.periodic_timer()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update all assets from ESI related to active structure for this owner.
def update_asset_esi(self, user: User = None): self.assets_last_update_ok = None self.assets_last_update_at = now() self.save() token = self.fetch_token() structure_ids = {x.id for x in Structure.objects.filter(owner=self)} try: OwnerAsset.objects.update_or_create_for_structures_esi( structure_ids, self.corporation.corporation_id, token ) except OSError as ex: message_id = f"{__title__}-fetch_assets-{self.pk}-{type(ex).__name__}" title = f"{__title__}: Failed to update assets for {self}" message = f"{self}: Failed to update assets from ESI due to {ex}" logger.warning(message, exc_info=True) notify_admins_throttled( message_id=message_id, title=title, message=message, level="warning", timeout=STRUCTURES_NOTIFY_THROTTLED_TIMEOUT, ) raise ex else: self.assets_last_update_ok = True self.save() if user: self._send_report_to_user( topic="assets", topic_count=self.structures.count(), user=user )
[ "def update_assets(self):\n assets = merge_assets([script.used_assets for script in self.paths_to_scripts.values()])\n for asset in assets:\n asset.supply(self.path)", "def _update(self):\n self._update_assets()\n self._update_funds()", "def update_inplace(self):\n for resource_name in self.all_custom_ami_resources():\n ami = self.resources[resource_name]\n self.load_latest_ami_name_pattern(ami)\n self.update_ami(resource_name, ami)", "def update_all_esi(self) -> int:\n logger.info(\n \"%s: Updating %d objects from from ESI...\",\n self.model.__name__,\n self.count(),\n )\n count_updated = 0\n for eve_obj in self.all().order_by(\"last_updated\"):\n try:\n self.update_or_create_esi(eve_obj.id)\n count_updated += 1\n except HTTPError:\n logger.exception(\"Update interrupted by exception\")\n\n return count_updated", "def update_everything(self, amounts_storages, amounts_fluxes):\n self.update_all_fluxes(amounts_fluxes)\n self.update_all_storages(amounts_storages)", "def update_structures_esi(self, user: User = None):\n self.structures_last_update_ok = None\n self.structures_last_update_at = now()\n self.save()\n token = self.fetch_token()\n\n is_ok = self._fetch_upwell_structures(token)\n if STRUCTURES_FEATURE_CUSTOMS_OFFICES:\n is_ok &= self._fetch_custom_offices(token)\n if STRUCTURES_FEATURE_STARBASES:\n is_ok &= self._fetch_starbases(token)\n\n if is_ok:\n self.structures_last_update_ok = True\n self.save()\n if user:\n self._send_report_to_user(\n topic=\"structures\", topic_count=self.structures.count(), user=user\n )", "def update_objects(self):\n\t\tself.update_projectiles()", "def update(self):\n # ic()\n # self.update_scans()\n self.update_data()", "def update_accounts(self):\n\n # Clear tree\n self.tree.delete(*self.tree.get_children())\n\n # Get all acount names\n names = [x[\"name\"] for x in accounts.get_all()]\n\n # Fill tree\n for name in names:\n item = self.tree.insert(\"\", \"end\", text=name, open=False,\n values=[\"\" for x in self.VAR])\n\n self._resize_tree()", "def update(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"object.update\", self._object._eco_id)\r\n p2e._app.Exec(arg_str)", "def refresh(self):\n self._parse_oem_attributes()", "def _update_ownership(self, checkout_state_dir: Path, uid: int, gid: int) -> None:\n metadata_path = checkout_state_dir / \"local\" / \"metadata.table\"\n inode_metadata_mod.update_ownership(metadata_path, uid, gid)", "def update_all_storages(self, amounts):\n for storage, amount in zip(self.storages, amounts):\n storage.update_storage(amount)", "def update_oam_config(self, context):\n return self.call(context, self.make_msg('update_oam_config'))", "def update_projectiles(self):\n\t\t\n\t\tfor p in self.active_projectiles:\n\t\t\tp.update()", "def update_compact_files(self, ):\n for file_path, updates in self._updates.items():\n if os.path.exists(file_path):\n with open_temp_copy(file_path, binary=True) as instream, open(file_path, 'wb') as outstream:\n updated_events = self._updated_compact_events(\n yaml.parse(instream),\n updates\n )\n \n yaml.emit(updated_events, outstream)\n else:\n with open(file_path, 'wb') as outstream:\n yaml.emit(self._fresh_content_events(updates.items()), outstream)", "def update(self, *args, **kwargs):\n\n if args or kwargs:\n files = self.search_files(*args, **kwargs)\n else:\n files = self.files\n\n for db_file in files:\n db_file.update()", "def update_asset_value(self) -> None:\n base = \"BALN\"\n quote = \"bnUSD\"\n dex_score = self._dex_score.get()\n oracle_address = self._oracle.get()\n try:\n dex = self.create_interface_score(dex_score, DexInterface)\n oracle = self.create_interface_score(oracle_address, OracleInterface)\n price = dex.getBalnPrice()\n priceData = oracle.get_reference_data('USD', 'ICX')\n self._last_price.set(priceData['rate'] * price // EXA)\n self._price_update_time.set(self.now())\n self.OraclePrice(base + quote, self._oracle_name.get(), dex_score, price)\n except BaseException as e:\n revert(f'{base + quote}, {self._oracle_name.get()}, {dex_score}, Exception: {e}')", "def update_robots(self):\n for robot_item in self.get_robot_graphics_items():\n robot_item.updateAll()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the IntersectionOverUnion metric for the given ground truth and predicted labels.
def evaluate(ground_truth_labels: type_alias.TensorLike, predicted_labels: type_alias.TensorLike, grid_size: int = 1, name: str = "intersection_over_union_evaluate") -> tf.Tensor: with tf.name_scope(name): ground_truth_labels = tf.convert_to_tensor(value=ground_truth_labels) predicted_labels = tf.convert_to_tensor(value=predicted_labels) shape.compare_batch_dimensions( tensors=(ground_truth_labels, predicted_labels), tensor_names=("ground_truth_labels", "predicted_labels"), last_axes=-grid_size, broadcast_compatible=True) ground_truth_labels = asserts.assert_binary(ground_truth_labels) predicted_labels = asserts.assert_binary(predicted_labels) sum_ground_truth = tf.math.reduce_sum( input_tensor=ground_truth_labels, axis=list(range(-grid_size, 0))) sum_predictions = tf.math.reduce_sum( input_tensor=predicted_labels, axis=list(range(-grid_size, 0))) intersection = tf.math.reduce_sum( input_tensor=ground_truth_labels * predicted_labels, axis=list(range(-grid_size, 0))) union = sum_ground_truth + sum_predictions - intersection return tf.where( tf.math.equal(union, 0), tf.ones_like(union), intersection / union)
[ "def IoU(detection1, detection2):\n\n # determine the (x, y)-coordinates of the intersection rectangle\n xA = max(detection1[0], detection2[0])\n yA = max(detection1[1], detection2[1])\n xB = min(detection1[2], detection2[2])\n yB = min(detection1[3], detection2[3])\n\n # area of intersection\n interArea = max(0, xB - xA) * max(0, yB - yA)\n\n # compute the area of both the prediction and ground-truth rectangles\n boxAArea = (detection1[3] - detection1[1]) * (detection1[2] - detection1[0])\n boxBArea = (detection2[3] - detection2[1]) * (detection2[2] - detection2[0])\n\n # compute the intersection over union by taking the intersection\n # area and dividing it by the sum of prediction + ground-truth\n # areas - the interesection area\n iou = interArea / float(boxAArea + boxBArea - interArea)\n\n # return the intersection over union value\n return iou", "def iou_score(pred_cls, true_cls, nclass=7, drop=drop):\n intersect_ = []\n union_ = []\n for i in range(nclass):\n if i not in drop:\n intersect = ((pred_cls == i) + (true_cls == i)).eq(2).sum().item()\n union = ((pred_cls == i) + (true_cls == i)).ge(1).sum().item()\n intersect_.append(intersect)\n union_.append(union)\n return np.array(intersect_), np.array(union_)", "def compute(self, predict, target, **kwargs):\n dims = tuple(range(predict.ndimension())[1:])\n intersect = (predict * target).sum(dims)\n union = (predict + target - predict * target).sum(dims)\n result = 1.0 - intersect / eps_denom(union)\n return result", "def intersection_over_union(heatmap1: np.ndarray, heatmap2: np.ndarray) -> float:\n intersection = np.bitwise_and(heatmap1, heatmap2)\n union = np.bitwise_or(heatmap1, heatmap2)\n\n count_inter = float(np.count_nonzero(intersection))\n count_union = float(np.count_nonzero(union))\n\n iou = count_inter / count_union\n\n return iou", "def iou(masks_true, masks_pred):\n if masks_true.shape[1:] != masks_pred.shape[1:]:\n raise ValueError('Predicted masks have wrong shape!')\n n_true_masks, height, width = masks_true.shape\n n_pred_masks = masks_pred.shape[0]\n m_true = masks_true.copy().reshape(n_true_masks, height * width).T\n m_pred = masks_pred.copy().reshape(n_pred_masks, height * width)\n numerator = np.dot(m_pred, m_true)\n denominator = m_pred.sum(1).reshape(-1, 1) + m_true.sum(0).reshape(1, -1)\n\n return numerator / (denominator - numerator)", "def get_intersections_and_cardinalities(\n references: List[np.ndarray],\n predictions: List[np.ndarray],\n labels: List[LabelEntity],\n) -> Tuple[NumberPerLabel, NumberPerLabel]:\n\n # TODO [Soobee] : Add score for background label and align the calculation method with validation\n all_intersections: NumberPerLabel = {label: 0 for label in labels}\n all_intersections[None] = 0\n all_cardinalities: NumberPerLabel = {label: 0 for label in labels}\n all_cardinalities[None] = 0\n for reference, prediction in zip(references, predictions):\n intersection = np.where(reference == prediction, reference, 0)\n all_intersections[None] += np.count_nonzero(intersection)\n all_cardinalities[None] += np.count_nonzero(reference) + np.count_nonzero(prediction)\n for i, label in enumerate(labels):\n label_num = i + 1\n all_intersections[label] += np.count_nonzero(intersection == label_num)\n reference_area = np.count_nonzero(reference == label_num)\n prediction_area = np.count_nonzero(prediction == label_num)\n all_cardinalities[label] += reference_area + prediction_area\n return all_intersections, all_cardinalities", "def calculate_area(pred, label, num_classes, ignore_index=255):\n if len(pred.shape) == 2:\n pred = pred[np.newaxis, :, :]\n if len(label.shape) == 2:\n label = label[np.newaxis, :, :]\n if not pred.shape == label.shape:\n raise ValueError('Shape of `pred` and `label should be equal, '\n 'but there are {} and {}.'.format(\n pred.shape, label.shape))\n\n # Delete ignore_index\n mask = label != ignore_index\n pred = pred + 1\n label = label + 1\n pred = pred * mask\n label = label * mask\n\n pred = np.eye(num_classes + 1)[pred] # F.one_hot(pred, num_classes + 1)\n label = np.eye(num_classes + 1)[label] # F.one_hot(pred, num_classes + 1)\n pred = pred[:, :, :, 1:]\n label = label[:, :, :, 1:]\n\n pred_area = []\n label_area = []\n intersect_area = []\n\n for i in range(num_classes):\n pred_i = pred[:, :, :, i]\n label_i = label[:, :, :, i]\n pred_area_i = np.sum(pred_i)\n label_area_i = np.sum(label_i)\n intersect_area_i = np.sum(pred_i * label_i)\n pred_area.append(pred_area_i)\n label_area.append(label_area_i)\n intersect_area.append(intersect_area_i)\n pred_area = np.array(pred_area)\n label_area = np.array(label_area)\n intersect_area = np.array(intersect_area)\n return intersect_area, pred_area, label_area", "def IoU_pt(y_pred, y_true):\n smooth = 1.\n y_pred_sig = F.sigmoid(y_pred)\n num = y_true.size(0) # Number of batches\n x = y_pred_sig.view(num, -1).float() # Flatten\n y = y_true.view(num, -1).float()\n intersection = torch.sum(x * y)\n score = (intersection + smooth) / (torch.sum(x) + torch.sum(y) - intersection + smooth)\n out = torch.sum(score)\n print(\"iou {}\".format(out))\n return out", "def compute_accuracy(predictions, labels):\n return labels[predictions.ravel() < 0.5].mean()\n # return np.mean(labels==(predictions.ravel() > 0.5))", "def compute_overlap_metrics(pred_npy, target_npy, metrics=None):\n if metrics is None:\n metrics = ['dice']\n\n for metric in metrics:\n if metric not in {'jaccard', 'dice', 'volume_similarity', 'false_negative', 'false_positive'}:\n raise ValueError('Does not exist the {} metric'.format(metric))\n\n pred = sitk.GetImageFromArray(pred_npy)\n target = sitk.GetImageFromArray(target_npy)\n overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()\n overlap_measures_filter.Execute(target, pred)\n\n overlap_results = dict()\n for metric in metrics:\n if metric == 'jaccard':\n overlap_results['jaccard'] = overlap_measures_filter.GetJaccardCoefficient()\n elif metric == 'dice':\n overlap_results['dice'] = overlap_measures_filter.GetDiceCoefficient()\n elif metric == 'volume_similarity':\n overlap_results['volume_similarity'] = overlap_measures_filter.GetVolumeSimilarity()\n elif metric == 'false_negative':\n overlap_results['false_negative'] = overlap_measures_filter.GetFalseNegativeError()\n elif metric == 'false_positive':\n overlap_results['false_positive'] = overlap_measures_filter.GetFalsePositiveError()\n\n return overlap_results", "def overlap_images(gtimage, predimage):\n\n\n gtimage=(numpy.array(gtimage)>127)*1\n predimage=(numpy.array(predimage)>127)*1\n\n intersec = numpy.bitwise_and(gtimage, predimage)\n intersec_val = float(numpy.sum(intersec))\n\n union = numpy.bitwise_or(gtimage, predimage)\n\n union_val = float(numpy.sum(union))\n\n if union_val == 0:\n return 0\n else:\n if float(intersec_val / union_val)>0.5:\n return 1\n else:\n return 0", "def calc_accuracy(labels, predicted_labels):\n num_obs = len(labels)\n accuracy = sum(predicted_labels==labels)/num_obs\n return accuracy", "def _subset_accuracy_update(\n preds: Tensor,\n target: Tensor,\n threshold: float,\n top_k: Optional[int],\n) -> Tuple[Tensor, Tensor]:\n\n preds, target = _input_squeeze(preds, target)\n preds, target, mode = _input_format_classification(\n preds, target, threshold=threshold, top_k=top_k\n )\n\n if mode == DataType.MULTILABEL and top_k:\n raise ValueError(\n \"You can not use the `top_k` parameter to calculate accuracy for multi-label inputs.\"\n )\n\n if mode == DataType.MULTILABEL:\n correct = (preds == target).all(dim=1).sum()\n total = tensor(target.shape[0])\n elif mode == DataType.MULTICLASS:\n correct = (preds * target).sum()\n total = target.sum()\n elif mode == DataType.MULTIDIM_MULTICLASS:\n sample_correct = (preds * target).sum(dim=(1, 2))\n correct = (sample_correct == target.shape[2]).sum()\n total = tensor(target.shape[0])\n else:\n correct, total = tensor(0), tensor(0)\n\n return correct, total", "def forward(self, y_true_clf, y_pred_clf, y_true_reg, y_pred_reg, training_mask):\n\n # Classification loss\n # clf_loss = self._cross_entropy_loss(y_true_clf, y_pred_clf)\n clf_loss = self._dice_coefficient(y_true_clf, y_pred_clf, training_mask) * 0.01\n\n # Regression loss\n \n # 1. IoU loss\n # split the regression map by channel. Each channel has shape 1 * img_size/4 * img_size/4\n top_gt, right_gt, bottom_gt, left_gt, theta_gt = torch.split(\n y_true_reg, split_size_or_sections=1, dim=1\n )\n top_pred, right_pred, bottom_pred, left_pred, theta_pred = torch.split(\n y_pred_reg, split_size_or_sections=1, dim=1\n )\n\n # Per pixel area calculation for corresponding bbox\n # sum of left + right will give width and sum of top + bottom will give height for each pixel in bbox\n # and multiplication of height and width is area \n area_gt = (top_gt + bottom_gt) * (right_gt + left_gt)\n area_pred = (top_pred + bottom_pred) * (right_pred + left_pred)\n\n # Now calc. area of intersection height and width and then area of intersection\n w_int = torch.min(right_gt, right_pred) + torch.min(left_gt, left_pred)\n h_int = torch.min(top_gt, top_pred) + torch.min(bottom_gt, bottom_pred)\n\n # Area of intersection between gt and prediction\n area_int = w_int * h_int\n\n # From simple set theory\n area_union = area_gt + area_pred - area_int\n\n iou_loss = -torch.log((area_int+1) / (1+area_union)) # +1 is to prevent 0 as log(0) = -inf.\n angle_loss = 1 - torch.cos(theta_pred - theta_gt)\n\n # Regression loss. It consists of IoU loss + bbox rotation angle loss\n regression_loss = iou_loss + self.config[\"fots_hyperparameters\"][\"lam_theta\"] * angle_loss\n\n # For regression loss, only consider the loss for the pixels where the ground truth\n # bboxes are present.\n regression_loss = torch.mean(regression_loss * y_true_clf * training_mask)\n\n # Merge the reg loss and clf loss using hyperparameter lambda reg. which\n # keeps balance between two losses\n detection_loss = clf_loss + self.config[\"fots_hyperparameters\"][\"lam_reg\"] * regression_loss\n\n return detection_loss", "def multi_label_acc(predict: torch.Tensor, label: torch.Tensor):\n assert predict.shape == label.shape\n label_num = torch.sum(label, dim=-1)\n acc = 0\n for i in range(predict.size(0)):\n _, predict_ind = torch.topk(predict[i, :], int(label_num[i]))\n right_num = torch.sum(label[i][predict_ind])\n acc += (right_num / label_num[i]).item()\n acc /= predict.size(0)\n return acc", "def accuracy(predictions, labels):\n return 100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0]", "def _intersect_and_union(\n segmap_list: T.List[T.Array], mask_list: T.List[T.Array], num_classes: int, ignore_index: int\n) -> T.Dict[str, T.Array]:\n\n zero_array = np.zeros(num_classes, dtype=np.float)\n total_area_dict = {\n \"segmap\": zero_array.copy(),\n \"mask\": zero_array.copy(),\n \"intersect\": zero_array.copy(),\n \"union\": zero_array.copy(),\n }\n\n for segmap, mask in zip(segmap_list, mask_list):\n\n bool_array = mask != ignore_index\n segmap = segmap[bool_array]\n mask = mask[bool_array]\n intersect = segmap[segmap == mask]\n\n bins = np.arange(num_classes + 1)\n segmap_area, _ = np.histogram(segmap, bins=bins)\n mask_area, _ = np.histogram(mask, bins=bins)\n intersect_area, _ = np.histogram(intersect, bins=bins)\n union_area = segmap_area + mask_area - intersect_area\n\n total_area_dict[\"segmap\"] += segmap_area\n total_area_dict[\"mask\"] += mask_area\n total_area_dict[\"intersect\"] += intersect_area\n total_area_dict[\"union\"] += union_area\n\n return total_area_dict", "def compute_occurrences(gold_labels, predictions, none_label):\n occurrences = []\n if len(gold_labels) > 0:\n if len(predictions) > 0:\n for gold in gold_labels:\n if gold in predictions:\n # for handling true positive\n occurrences.append([gold, gold])\n else:\n # for handling false negatives\n occurrences.append([gold, none_label])\n\n # for handling false positives\n for pred in predictions:\n if pred not in gold_labels:\n occurrences.append([none_label, pred])\n else:\n for gold in gold_labels:\n occurrences.append([gold, none_label])\n else:\n pass\n\n return occurrences", "def get_IoU(predictionImage, labelImage):\n prediction = np.array(predictionImage)\n label = np.array(labelImage)\n\n TP = ((prediction == 1) & (label == 1)).sum()\n FP = ((prediction == 1) & (label != 1)).sum()\n FN = ((prediction != 1) & (label == 1)).sum()\n TN = ((prediction != 1) & (label != 1)).sum()\n\n if TP > 0 or FN > 0 or FP > 0: \n IoU_human = TP / (TP + FN + FP)\n else:\n IoU_human = 1\n if TN > 0 or FP > 0 or FN > 0:\n IoU_bg = TN / (TN + FP + FN)\n else:\n IoU_bg = 1\n\n IoU = (IoU_human + IoU_bg) / 2\n #IoU = ((0.5 * IoU_human) + (1.5 * IoU_bg)) / 2\n\n return IoU" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create list of tuples representing specified number of posts to make
def createPosts(self, numPosts): allAuthors = self.makeNames(numPosts) allTitles = self.makeTitles(numPosts) postDetails, totalsDict = PostMaker.makePostLengths(numPosts) allSkateParagraphs = self.getSkateParagraphs(totalsDict[PostMaker.skateType]) allWikihowLines = self.getWikihowLines(totalsDict[PostMaker.wikiType]) madePosts = [] wikiCounter = 0 skateCounter = 0 for index, post in enumerate(postDetails): if post[0] == PostMaker.wikiType: body = " ".join(allWikihowLines[wikiCounter : wikiCounter + post[1]]) madePosts.append((allAuthors[index], allTitles[index], body)) wikiCounter += post[1] else: body = "".join( allSkateParagraphs[skateCounter : skateCounter + post[1]] ).strip() madePosts.append((allAuthors[index], allTitles[index], body)) skateCounter += post[1] return madePosts
[ "def get_posts(off, cnt):\r\n\tposts = mc.get('posts')\r\n\tif(posts == None):\r\n\t\tcursor = db_execute('SELECT * FROM news INNER JOIN users ON news.creator_id = users.id ORDER BY created DESC')\r\n\t\tposts = cursor.fetchall()\r\n\t\tmc.set('posts', posts)\r\n\treturn posts[off:off+cnt]", "def create_test_posts(self):\n self.test_posts = []\n for i in range(N_TEST_USERS):\n self.test_posts += [Post.objects.create(author=self.users[i], text=POSTS[i])]", "def extract_tumblr_posts(client, nb_requests, search_query, before, delta_limit): \n \n posts = []\n for i in range(nb_requests):\n tagged = client.tagged(search_query, filter='text', before=before)\n for elt in tagged:\n timestamp = elt['timestamp']\n if (abs(timestamp - before) < delta_limit):\n before = timestamp\n\n current_post = []\n current_post.append(elt['id'])\n current_post.append(elt['post_url'])\n elt_type = elt['type']\n current_post.append(elt_type)\n current_post.append(timestamp)\n current_post.append(elt['date'])\n current_post.append(elt['tags'])\n current_post.append(elt['liked'])\n current_post.append(elt['note_count'])\n\n if (elt_type == 'photo'):\n # Only take the first image\n current_post.append(elt['photos'][0]['original_size']['url'])\n current_post.append(elt['caption'].replace('\\n',' ').replace('\\r',' '))\n current_post.append(search_query)\n posts.append(current_post)\n elif (elt_type == 'text'):\n current_post.append(np.nan)\n current_post.append(elt['body'].replace('\\n',' ').replace('\\r',' '))\n current_post.append(search_query)\n posts.append(current_post)\n return posts", "def get_latest_posts(self):\n result = []\n count = self.post_set.all().count()\n all = self.post_set.all().order_by('-id')\n if count > 0:\n result.append(all[count-1])\n if count > 3:\n all = all[0:3:-1] # TODO: FIX THIS TO HAVE FIRST ENTRY\n elif count == 3:\n all = all[0:2:-1]\n elif count == 2:\n all = all[0:1:-1]\n else:\n all = []\n for post in all:\n result.append(post)\n return result", "def build_goal_post_msgs(goalposts):\n # Create an empty list of goalposts\n message_list = []\n # Iterate over all goalpost candidates\n for goalpost in goalposts:\n # Create a empty post message\n post_msg = GoalPostInImage()\n post_msg.width = goalpost.get_width()\n if goalpost.get_rating() is not None:\n post_msg.confidence = goalpost.get_rating()\n post_msg.foot_point.x = goalpost.get_center_x()\n post_msg.foot_point.y = goalpost.get_lower_right_y()\n post_msg.top_point.x = goalpost.get_center_x()\n post_msg.top_point.y = goalpost.get_upper_left_y()\n message_list.append(post_msg)\n return message_list", "def getPostIds(self) -> list:\n maxcount = 80\n try:\n postIds1 = requests.get(\n f\"{self.graphUrl}?fields=posts&{self.accessToken}\", verify=False\n )\n if (\n json.load(StringIO(postIds1.headers[\"x-business-use-case-usage\"]))[\n self.pageId\n ][0][\"call_count\"]\n > maxcount\n ):\n time.sleep(2)\n postIds2 = requests.get(\n f\"{postIds1.json()['posts']['paging']['next']}\", verify=False\n )\n postIds3 = requests.get(\n f\"{postIds2.json()['paging']['next']}\", verify=False\n )\n postIds = (\n postIds1.json()[\"posts\"][\"data\"]\n + postIds2.json()[\"data\"]\n + postIds3.json()[\"data\"]\n )\n ids = [post[\"id\"].split(\"_\")[1] for post in postIds]\n return ids\n except Exception as e:\n print(e)", "def getPosts(self):\n with open(self.dataFile,'r') as f:\n head=[f.next() for x in xrange(5)]\n contents = [line.strip() for line in f][:-1]\n records=[contents[x:x+self.LINES_PER_RECORD] for x in xrange(0, len(contents), self.LINES_PER_RECORD)]\n posts=[]\n for r in records:\n post = postEdmunds()\n post.populate(r)\n post.threadID=self.ID\n posts.append(post)\n return posts", "def generate_posts(self) -> None:\n\n for i in range(len(self)):\n self[i].generate_posts(\n api=self.api,\n max_posts=self.max_post_per_user\n )", "def _create_pets(self, count: int) -> list:\n pet_collection = []\n for _ in range(count):\n pet = PetFactory()\n pet.create()\n pet_collection.append(pet)\n return pet_collection", "def take(n, collection):\n return [item for item, _ in zip(collection, range(n))]", "def get_posts_by_creator(off, cnt, user):\r\n\tposts = None\r\n\tif(posts == None):\r\n\t\tcursor = db_execute('SELECT * FROM news INNER JOIN users ON news.creator_id = users.id WHERE user = %s ORDER BY created DESC', user)\r\n\t\tposts = cursor.fetchall()\r\n\treturn posts[off:off+cnt]", "def create_ngrams(self, tokens):\n return [tuple(tokens[i:i+self.N]) for i in range(0, len(tokens)-self.N+1)]", "def jobpost_recent_posts(limit=5):\n return list(JobPost.objects.published()[:limit])", "def _split_posts(self, thread_num, html, time_grabbed, board_images_path):\n # Split poage into posts\n fragments = thread_parsers.split_thread_into_posts(html)\n for fragment in fragments:\n # Parse post\n new_post = WarosuPost(thread_num=thread_num, board_images_path=board_images_path, html=fragment, time_grabbed=time_grabbed)\n if new_post.num:\n self.posts[new_post.num] = new_post\n else:\n logging.error(u'New post did not have \"num\", did not store it!')\n return", "def create_booster_pack(reactions):\n booster_pack = []\n if len(reactions) >= 9:\n booster_pack = reactions[:9]\n elif 5 <= len(reactions) < 9:\n booster_pack = reactions[:5]\n\n for i, post in enumerate(booster_pack):\n booster_pack[i] = post['id']\n\n print(f'Post IDs above threshold:', end=\" \")\n print(booster_pack)\n\n return booster_pack", "def build_list(length):\n return build_list_with_step(length, 1)", "def get_data(subs, n_posts=1):\n conn, curs = conn_curs() # this one connects to allan\n curs.execute(\"SELECT Distinct(subreddit) FROM posts\")\n x = [i[0] for i in curs.fetchall()]\n for i in subs:\n if i not in x:\n print(i)\n sub = reddit.subreddit(i)\n hot = sub.hot(limit=n_posts)\n for post in hot:\n text = f\"{post.title} {post.selftext}\".replace(\"'\", \"\")\n which_sub = str(post.subreddit)[:20]\n insert_post(text, which_sub)\n print('uploaded')\n print('Finished sub')\n return", "async def list_posts_as_msg(ctx, posts, post_limit, sort_type) -> None:\n msg_output = \"\"\n for i, post in enumerate(posts):\n msg_output += \"{0} post: {1}: {2}\\n\".format(sort_type, str(i + 1), post.title)\n\n await ctx.send(msg_output)", "def generate_articles_list(number) -> str:\n conn = sqlite3.connect(gv.DB_PATH)\n cur = conn.cursor()\n cur.execute(\"\"\"SELECT title, datetime FROM articles \n WHERE datetime < date('now')\n ORDER BY datetime DESC\n LIMIT (?)\"\"\",(number,))\n t = \"\"\n for elt in cur:\n t += f\"=> article?datetime={elt[1]}&title={elt[0]} {elt[0]}\\n\"\n return t" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create specified amount of post titles from random quote API
def makeTitles(self, number): quoteUrl = "https://fireflyquotes.p.rapidapi.com/quotes/random" headers = { "x-rapidapi-key": self.apiKey, "x-rapidapi-host": "fireflyquotes.p.rapidapi.com", } titles = [] for _ in range(number): response = requests.get(quoteUrl, headers=headers) response.raise_for_status() title = response.json()["body"]["Quote"] titles.append(title) return titles
[ "def get_random_quote():\n global tweet_counter\n tag_wheel = ['inspire', 'management', 'life',\n 'love', 'art', 'students', 'funny']\n if tweet_counter == 0:\n week_order = random.sample(range(len(tag_wheel)), len(tag_wheel))\n today_theme = week_order[tweet_counter]\n url = \"https://quotes.rest/qod?category={}\".format(tag_wheel[today_theme])\n response = requests.get(url)\n if response.status_code == 200:\n quotes = response.json()['contents']['quotes'][0]\n if int(quotes['length']) <= 200:\n tweet_counter += 1\n if (tweet_counter == 7):\n tweet_counter = 0\n return quotes\n return None", "def random_title():\n q = \"\"\" SELECT ArticleID\n FROM ArticleInformation\n ORDER BY RANDOM()\n LIMIT 1000 \"\"\"\n\n curr.execute(q)\n\n titles = [t[0] for t in curr.fetchall()]\n\n return titles", "def create_test_posts(self):\n self.test_posts = []\n for i in range(N_TEST_USERS):\n self.test_posts += [Post.objects.create(author=self.users[i], text=POSTS[i])]", "def create_tweet():\n quotes = get_random_quote()\n while quotes is None:\n quotes = get_random_quote()\n download_image(quotes['background'])\n message = \"\"\"{}\n\n\"{}\" -- {}\"\"\".format(quotes['title'], quotes['quote'], quotes['author'])\n for tags in quotes['tags']:\n tags = tags.title()\n tags = tags.replace(\"-\", \"\")\n message = message + \" #\" + tags\n message += \" #TheySaidSo\"\n return message", "def createPosts(self, numPosts):\n allAuthors = self.makeNames(numPosts)\n allTitles = self.makeTitles(numPosts)\n postDetails, totalsDict = PostMaker.makePostLengths(numPosts)\n allSkateParagraphs = self.getSkateParagraphs(totalsDict[PostMaker.skateType])\n allWikihowLines = self.getWikihowLines(totalsDict[PostMaker.wikiType])\n\n madePosts = []\n wikiCounter = 0\n skateCounter = 0\n\n for index, post in enumerate(postDetails):\n if post[0] == PostMaker.wikiType:\n body = \" \".join(allWikihowLines[wikiCounter : wikiCounter + post[1]])\n madePosts.append((allAuthors[index], allTitles[index], body))\n wikiCounter += post[1]\n else:\n body = \"\".join(\n allSkateParagraphs[skateCounter : skateCounter + post[1]]\n ).strip()\n madePosts.append((allAuthors[index], allTitles[index], body))\n skateCounter += post[1]\n\n return madePosts", "def random_titles(n, m=None):\n q = \"\"\" SELECT articleID, group_concat(redirect) kwd\n FROM OriginalKeywords NATURAL JOIN KeywordForms\n WHERE redirect IN\n (SELECT redirect\n FROM OriginalKeywords NATURAL JOIN KeywordForms\n GROUP BY redirect\n HAVING count(articleID) >= 5)\n AND redirect != 'NULL'\n GROUP BY articleID\"\"\"\n\n\n titles = curr.execute(q).fetchall()\n\n\n test_titles = set()\n for i in range(n):\n r = random.randint(0, len(titles) - 1)\n title = titles[r]\n titles.remove(title)\n # Ignore keywords in the training set (since thats what \n # we are trying to predict)\n test_titles.add(title[0])\n\n return test_titles", "def get_random_page_title(num_pages=1):\r\n # Parameters for fetching a random page\r\n # only grabs articles\r\n params = {\r\n \"action\": \"query\",\r\n \"format\": \"json\",\r\n \"list\": \"random\",\r\n \"rnnamespace\": 0,\r\n \"rnlimit\": num_pages\r\n }\r\n data = SESSION.get(url=URL, params=params).json()\r\n random_pages = data[\"query\"][\"random\"]\r\n titles = [random_pages[i]['title'] for i in range(len(random_pages))]\r\n return titles", "def post_several_articles(self, repeat):\n for n in range(repeat):\n self.client.post(self.articles_url, self.article, format='json')", "def post_quote():\n\n api_url = \"http://api.theysaidso.com/qod.json?category={}\"\n categories = ['inspire', 'life']\n try:\n response = requests.get(api_url.format(random.choice(categories)))\n quote = response.json()['contents']['quotes'][0]\n text = '{}'.format(quote['quote'])\n bot.speak(text, \"#inspiration\", quote['author'])\n\n except requests.RequestException:\n bot.speak('I am having an uninspired day. Hope you do better!', \"#inspiration\")", "def generate_random_entries(self, number_of_entries):\n counter = 1\n for i in range(number_of_entries):\n self.mongo_db_service.add_entry(\n {\n 'id': counter,\n 'is_modified': False,\n 'status': random.randint(1, 1000000),\n 'data': Utils.generate_random_string(length=random.randint(8, 15)),\n 'timestamp': int(time.time())\n })\n counter += 1", "def make_tweet():\n\trandom_quote = random.choice(quotes)\n\trandom_quote += \" #seuss\" #Adding in hashtags\n\treturn random_quote", "def generate_posts(self) -> None:\n\n for i in range(len(self)):\n self[i].generate_posts(\n api=self.api,\n max_posts=self.max_post_per_user\n )", "def create_stock_data(request, mkm_sandbox):\n test_articles = {\"article\": []}\n for i in range(1, 1001):\n test_articles[\"article\"].append(\n {\"idProduct\": i, \"idLanguage\": 1, \"comments\": \"test product\", \"count\": 1, \"price\": 4, \"condition\": \"EX\"}\n )\n r = mkm_sandbox.stock_management.post_stock(data=test_articles)\n\n def teardown():\n articles_to_delete = {\"article\": []}\n for a in r.json()[\"inserted\"]:\n articles_to_delete[\"article\"].append(\n {\"idArticle\": a[\"idArticle\"][\"idArticle\"], \"count\": a[\"idArticle\"][\"count\"]}\n )\n mkm_sandbox.stock_management.delete_stock(data=articles_to_delete)\n\n request.addfinalizer(teardown)\n\n return", "def gen_random_sample(engine,\n n_posts=params.subreddit_cluster_params['n_posts'],\n sample_id=params.subreddit_cluster_params['sample_id'],\n ):\n\n print('Generating random sample of {} posts per subreddit...'.format(n_posts))\n sample_filename = 'post_ids_{}_{}.pkl'.format(n_posts,\n sample_id,\n )\n\n try:\n post_ids = pd.read_pickle(sample_filename)\n\n except (OSError, IOError, FileNotFoundError) as e:\n # Downloads list of subreddits with n+ posts per year\n sql = (\"SELECT \"\n \"display_name as subreddit, \"\n \"url as sub_url, \"\n \"post_count \"\n \"FROM all_subreddits \"\n \"WHERE post_count >= {}\".format(n_posts))\n print('Downloading subreddits with at least {} submissions per year from database...'.format(n_posts))\n subs = pd.read_sql(sql,\n engine,\n )\n subs.set_index('subreddit', inplace=True)\n print('{} subreddits in sample dataset'.format(len(subs)))\n print(subs)\n\n # Retrieves IDs for all post in each subreddit\n all_post_ids = load_posts_from_pkl()\n # all_post_ids = all_post_ids[['subreddit', 'fullname']]\n\n # Deletes those posts made in subreddits with less than n posts in the last year\n all_post_ids = all_post_ids.loc[all_post_ids['subreddit'].isin(subs.index)]\n\n # Randomly select n posts per subreddit\n print('Randomly sampling {} submissions per subreddit...'.format(n_posts))\n post_ids = all_post_ids.groupby('subreddit').apply(lambda x: x.sample(n=n_posts,\n random_state=sample_id,\n ))\n post_ids = post_ids.join(subs['sub_url'], how='left')\n post_ids = post_ids[['subreddit', 'sub_url', 'fullname']]\n post_ids.index = post_ids.index.droplevel(1)\n post_ids.drop('subreddit',\n axis=1,\n inplace=True,\n )\n post_ids.to_pickle(sample_filename)\n print(post_ids)\n print('{} submissions in sample dataset'.format(len(post_ids)))\n\n return post_ids", "def get_random_title_template(sentiment=\"\", style=\"long\"):\n\n session = create_db_session()\n news_title = session.query(Newsroom_headline). \\\n filter_by(sentiment=sentiment, style=style)\n\n record = news_title[math.floor(random.random() * news_title.count())]\n session.close()\n return record", "def get_articles(rep):\n\n article_url = 'http://webhose.io/filterWebContent?token=' + \\\n WEBHOSE_API_KEY + \\\n '&format=json&sort=crawled&q=%22' + rep.firstname + \\\n '%20' + rep.lastname + \\\n '%22%20language%3Aenglish%20site_type' + \\\n '%3Anews%20thread.country%3AUS'\n articles_response = requests.request('GET', article_url,\n headers=headers)\n articles = articles_response.json()['posts']\n\n for i in range(0, 3):\n if len(articles) == i:\n break\n\n curArticle = articles[i]\n if len(curArticle['text']) > 200:\n curArticle['text'] = curArticle['text'][:200] + '...'\n rep.articles.append(build_article(rep, curArticle))", "async def send_post_as_msg(ctx, posts, post_limit=1) -> None:\n for i, post in enumerate(posts):\n if i == post_limit - 1:\n title = emoji_utils.pastify_string(post.title)\n await ctx.send(title)\n if post.selftext:\n if len(post.selftext) < 2000:\n await ctx.send(post.selftext)\n else:\n for msg in range(0, len(post.selftext), 1500):\n await ctx.send(post.selftext[msg : msg + 1500])\n await ctx.send(\"sauce: \" + post.url)", "def quote(data,length_quote):\n new_string = '\"'\n num_words = 0\n x = random.choice(list(data.keys()))\n while num_words < length_quote:\n if num_words > 0:\n new_string += ' '\n #print(new_string)\n next_word = random.choice(data[x])\n new_string = new_string + next_word\n x = next_word\n num_words = num_words + 1\n new_string += '.\"'\n #print(new_string)\n return new_string", "def get_quote():\n random_number = random.randint(0, len(QUOTES) - 1)\n random_quote = QUOTES[random_number]\n return random_quote" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the citation(s) for this tool
def citation(**kwargs): print_citation()
[ "def citations(self):\n\n # concatenate all blocks citations\n citations = []\n for block in self.blocks:\n citations += block.citations\n\n # remove duplicates\n citations = list(set(citations))\n citation_dict = {}\n\n for name in citations:\n if name[0] == \"@\":\n citation_dict[name] = name\n else:\n citation_dict[name] = default_citations[name]\n\n tex_citep = \", \".join(\n [\n f\"{name} \\citep{{{name}}}\"\n for name in citation_dict.keys()\n if name not in [\"prose\", \"astropy\"]\n ]\n )\n tex_citep += \" and astropy \\citep{astropy}\"\n tex = (\n f\"This research made use of \\\\textsf{{prose}} \\citep{{prose}} and its dependencies ({tex_citep}).\"\n \"\"\n )\n\n return tex, \"\\n\\n\".join(citation_dict.values())", "def display(self):\r\n\r\n bookinfo = '\"{}, written by {}\"'.format(self.title, self.author)\r\n print bookinfo", "def citation():\n\n cite = (\"To cite OSMnx, use:\\n\\n\"\n \"Boeing, G. 2017. OSMnx: New Methods for Acquiring, Constructing, Analyzing, \"\n \"and Visualizing Complex Street Networks. Computers, Environment and Urban \"\n \"Systems, 65(126-139). https://doi.org/10.1016/j.compenvurbsys.2017.05.004\"\n \"\\n\\n\"\n \"BibTeX entry for LaTeX users:\\n\\n\"\n\n \"@article{boeing_osmnx_2017,\\n\"\n \" title = {{OSMnx}: {New} {Methods} for {Acquiring}, {Constructing}, {Analyzing}, and {Visualizing} {Complex} {Street} {Networks}},\\n\"\n \" volume = {65},\\n\"\n \" doi = {10.1016/j.compenvurbsys.2017.05.004},\\n\"\n \" number = {126-139},\\n\"\n \" journal = {Computers, Environment and Urban Systems},\\n\"\n \" author = {Boeing, Geoff},\\n\"\n \" year = {2017}\\n\"\n \"}\")\n\n print(cite)", "def how_to_cite():\n print(\"If you use the API to generate results, please cite our manuscript describing the API - Lindgren et al. 2021, PMID:33560848, https://pubs.acs.org/doi/10.1021/acs.jproteome.0c00919\")\n print('\\n')\n print(\"For instructions on how to cite a specific dataset, please call its how_to_cite method, e.g. cptac.Endometrial().how_to_cite()\")", "def citation(self):\n return romannet_citation + few_citation + few_software_citation", "def printComposition(self):\r\n self.findComposition()\r\n for kmer in self.kmerComp:\r\n print(kmer)", "def printInfo(self):\r\n\r\n about = \"Student name is {0}, {1}, and {2} is taking {3}.\".format(\r\n self.lastName, self.firstName, self.pronoun, len(self._courseList))\r\n\r\n print(about)", "def citation(self) -> str:\n return self.collection.extra_fields.get(CITATION)", "def do_showLicence(self, arg):\n\t\tprint(self.license)", "def display(self):\r\n print(\"\\nCop name : \", self.cop_name)\r\n print(\"Cop age : \", self.cop_age)\r\n print(\"Cop work experience : \", self.work_exp)\r\n print(\"Cop designation : \", self.designation)", "def citation(self) -> str:\n return self.item.properties.get(CITATION)", "def get_citation_as_bibtex(self, newline=\"<br/>\", use_hyperlinks=True):\n bibtex = \"\"\n\n if self.pub_venue_type is self.JOURNAL or\\\n self.pub_venue_type is self.ARTICLE:\n bibtex += \"@article{\"\n else:\n bibtex += \"@inproceedings{\"\n\n\n bibtex += self.get_bibtex_id() + newline\n\n # start author block\n bibtex += \" author = {\"\n\n author_idx = 0\n num_authors = self.authors.count()\n for author in self.authors.all():\n citation_name = author.get_citation_name(full_name=True)\n bibtex += citation_name\n\n if (author_idx + 1) < num_authors:\n bibtex += \" and \"\n\n author_idx += 1\n bibtex += \"}\" + newline\n # end author block\n\n bibtex += \" title={{{}}},{}\".format(self.title, newline)\n bibtex += \" booktitle={{{}}},{}\".format(self.book_title, newline)\n bibtex += \" booktitleshort={{{}}},{}\".format(self.book_title_short, newline)\n\n if self.series:\n bibtex += \" series = {\" + self.series + \"},\"\n\n bibtex += \" year={{{}}},{}\".format(self.date.year, newline)\n\n if self.isbn:\n bibtex += \" isbn={{{}}},{}\".format(self.isbn, newline)\n\n if self.geo_location:\n bibtex += \" location={{{}}},{}\".format(self.geo_location, newline)\n\n if self.page_num_start and self.page_num_end:\n bibtex += \" pages={{{}--{}}},{}\".format(self.page_num_start, self.page_num_end, newline)\n\n if self.num_pages:\n bibtex += \" numpages={{{}}},{}\".format(self.num_pages, newline)\n\n if self.doi:\n if use_hyperlinks:\n bibtex += \" doi={{<a href='{}'>{}</a>}},{}\".format(self.doi, self.doi, newline)\n else:\n bibtex += \" doi={{{}}},{}\".format(self.doi, newline)\n\n if self.official_url:\n if use_hyperlinks:\n bibtex += \" url={{<a href='{}'>{}</a>}},{}\".format(self.official_url, self.official_url, newline)\n else:\n bibtex += \" url={{{}}},{}\".format(self.official_url, newline)\n\n if self.acmid:\n bibtex += \" acmid={{{}}},{}\".format(self.acmid, newline)\n\n if self.publisher:\n bibtex += \" publisher={{{}}},{}\".format(self.publisher, newline)\n\n bibtex += \"}\"\n return bibtex", "def getCitation(self, identifier):\n\n catalog = getToolByName(self.context, 'portal_catalog')\n results = catalog(\n portal_type='Citation', \n pmr2_citations=identifier,\n )\n return results", "def printCancers():\r\n \r\n for i in range(len(cancerNames)):\r\n print(i,cancerNames[i])", "def print_report():\n print_days_percent_errors()\n print \"\"\n print_popular_authors()\n print \"\"\n print_popular_articles()\n print \"\"", "def get_citation_as_html(self):\n citation = \"\"\n author_idx = 0\n num_authors = self.authors.count()\n for author in self.authors.all():\n citation += author.get_citation_name(full_name=False)\n\n if (author_idx + 1) < num_authors:\n citation += \", \"\n else:\n citation += \" \"\n\n author_idx += 1\n\n citation += \"({}). \".format(self.date.year)\n citation += self.title + \". \"\n citation += \"<i>{}</i>. \".format(self.book_title_short)\n\n if self.official_url:\n citation += \"<a href={}>{}</a>\".format(self.official_url, self.official_url)\n\n return citation", "def list_() -> None:\n available_sources = [\n \"Wikipedia (wiki) [with different locales]\",\n \"Accadde Oggi (accadde)\",\n ]\n print(\"\\nAvailable sources:\\n\")\n\n for source in available_sources:\n print(f\" • {source}\")", "def print_desc(self):\n print(self.description)\n return", "def _print_paper_details(paper: Paper, highlights: List[str], show_abstract: bool, show_extra_info: bool): # pragma: no cover\n\n print(f'{Fore.GREEN}{Style.BRIGHT}Title:{Style.NORMAL} {paper.title}')\n print(f'{Fore.GREEN}{Style.BRIGHT}Authors:{Style.NORMAL} {\" | \".join(paper.authors)}')\n if len(paper.keywords) > 0:\n print(f'{Fore.GREEN}{Style.BRIGHT}Keywords:{Style.NORMAL} {\", \".join(paper.keywords)}')\n print(f'{Fore.GREEN}{Style.BRIGHT}Publication date:{Style.NORMAL} {paper.publication_date.strftime(\"%Y-%m-%d\")}')\n\n print('\\n')\n\n if show_abstract:\n abstract = paper.abstract\n for term in highlights:\n abstract = re.sub(r'({0}+)'.format(term), Fore.YELLOW + Style.BRIGHT +\n r'\\1' + Fore.RESET + Style.NORMAL, abstract, flags=re.IGNORECASE)\n print(abstract)\n\n print('\\n')\n\n if show_extra_info:\n if paper.comments is not None:\n print(f'{Style.BRIGHT}Paper comments:{Style.NORMAL} {paper.comments}')\n if paper.citations is not None:\n print(f'{Style.BRIGHT}Paper citations:{Style.NORMAL} {paper.citations}')\n if paper.number_of_pages is not None:\n print(f'{Style.BRIGHT}Paper number of pages:{Style.NORMAL} {paper.number_of_pages}')\n if paper.doi is not None:\n print(f'{Style.BRIGHT}Paper DOI:{Style.NORMAL} {paper.doi}')\n if paper.databases is not None:\n print(f'{Style.BRIGHT}Paper found in:{Style.NORMAL} {\", \".join(paper.databases)}')\n if len(paper.urls) > 0:\n print(f'{Style.BRIGHT}Paper URL:{Style.NORMAL} {list(paper.urls)[0]}')\n\n if paper.publication is not None:\n print(f'{Style.BRIGHT}Publication name:{Style.NORMAL} {paper.publication.title}')\n print(f'{Style.BRIGHT}Publication is potentially predatory:{Style.NORMAL} {paper.publication.is_potentially_predatory}')\n if paper.publication.category is not None:\n print(f'{Style.BRIGHT}Publication category:{Style.NORMAL} {paper.publication.category}')\n if len(paper.publication.subject_areas) > 0:\n print(f'{Style.BRIGHT}Publication areas:{Style.NORMAL} {\", \".join(paper.publication.subject_areas)}')\n if paper.publication.isbn is not None:\n print(f'{Style.BRIGHT}Publication ISBN:{Style.NORMAL} {paper.publication.isbn}')\n if paper.publication.issn is not None:\n print(f'{Style.BRIGHT}Publication ISSN:{Style.NORMAL} {paper.publication.issn}')\n if paper.publication.publisher is not None:\n print(f'{Style.BRIGHT}Publication publisher:{Style.NORMAL} {paper.publication.publisher}')\n if paper.publication.cite_score is not None:\n print(f'{Style.BRIGHT}Publication Cite Score:{Style.NORMAL} {paper.publication.cite_score}')\n if paper.publication.sjr is not None:\n print(f'{Style.BRIGHT}Publication SJR:{Style.NORMAL} {paper.publication.sjr}')\n if paper.publication.snip is not None:\n print(f'{Style.BRIGHT}Publication SNIP:{Style.NORMAL} {paper.publication.snip}')\n\n print('\\n')\n\n if paper.selected is not None:\n\n print(f'{Fore.BLUE}{Style.BRIGHT}Selected: {Style.NORMAL}{\"Yes\" if paper.selected else \"No\"}')\n \n if paper.categories is not None and len(paper.categories.items()) > 0:\n categories_string = ' | '.join([f'{k}: {\", \".join(v)}' for k, v in paper.categories.items() if len(v) > 0])\n print(f'{Fore.BLUE}{Style.BRIGHT}Categories: {Style.NORMAL}{categories_string}')\n\n print('\\n')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructs a unique name for a directory in ./output using current time and script arguments
def make_output_dir_name(args): prefix = datetime.now().strftime('%Y%m%d-%H%M') dir_name = f'./output/{prefix}_epochs={args.epochs}_lr={args.lr}' dir_name += '_with-pretrained-backbone' if args.pretrained_backbone else '_no-pretrained-backbone' if args.no_geometry_loss: dir_name += '_no-geometry-loss' if args.resume: # Extract date prefix from checkpoint path: # e.g. 20210320-1439 in output/20210320-1439_epochs=1_lr=0.005/checkpoint.pth dir_name += f'_resume={str(args.resume.parent.name).split("_")[0]}' return dir_name
[ "def default_output_dir():\n now = datetime.datetime.now()\n ##output_dir = \"{}-{}-{}.{}-{}-{}.{}\".format(now.year, now.month, now.day, now.hour, now.minute, now.second, now.microsecond)\n output_dir = \"{}-{}-{}.{}-{}-{}\".format(now.year, now.month, now.day, now.hour, now.minute, now.second)\n logger.debug('Generated output directory \"{}\"'.format(output_dir))\n \n return output_dir", "def generate_file_name():\n import datetime\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n filename = \"game saved at {}\".format(now)\n return filename", "def create_pathname(self, output_path):\n self.generate_name()\n\n return os.path.join(output_path, self.name)", "def gen_filename() -> str:\n return str(datetime.timestamp(datetime.now())).replace(\".\", \"\")", "def get_build_dir_name(self) -> str:\n return f\"{self.launch_time}-{self.name}\"", "def create_name(base_folder, name):\n now = datetime.now()\n date_time = now.strftime(\"%m-%d-%y-%H-%M-%S\") + f\"-{name}\"\n base_folder += date_time\n return base_folder", "def build_filenamebase(args):\n\n # transform arguments args to a dictionary\n args_dict=vars(args)\n # contruct the filename with the simulation parameters:\n\n filename=''\n for key, val in sorted(args_dict.items()):\n if key != 'silent' and key != 'freediffusion' and key != 'Nf' and key != 'log':\n filename = filename + key + str(val)+'_'\n if args.freediffusion:\n filename = filename + 'freediffusion_'\n return filename", "def create_file_name():\n # This generates a name that is between 3 to 63 chars long\n return str(uuid.uuid4())", "def create_output_directory(args):\n if args.testrun:\n output_folder = args.outputpath+datetime.datetime.now().strftime('%Y_%m_%d_%Hh%M')+\"_TEST/\"\n output_log_path = \"OutPut_Logs/\" + datetime.datetime.now().strftime('%Y_%m_%d_%Hh%M') + \"_TEST\"\n print \"Name of output dir: \", output_folder\n else:\n os.system('xrdfs root://cmseos.fnal.gov/ mkdir ' + args.outputpath + args.outputdir)\n output_folder = args.outputpath+args.outputdir+\"/\"+datetime.datetime.now().strftime('%Y_%m_%d_%Hh%M')\n output_log_path = \"OutPut_Logs/\" + args.outputdir+\"/\"+ datetime.datetime.now().strftime('%Y_%m_%d_%Hh%M')\n print \"Name of output dir: \", output_folder\n\n # create directory in eos for output files\n os.system('xrdfs root://cmseos.fnal.gov/ mkdir ' + output_folder)\n # create directory in pwd for log files\n os.system('mkdir -p ' + output_log_path + \"/Logs\")\n return output_folder, output_log_path", "def generate_default_name():\n return \"{}{}\".format(os.getpid(), str(time.time()).replace(\".\", \"\"))", "def make_save_dir(self):\n\n today = datetime.now()\n path = \"./Output/\" + today.strftime('%H_%M_%S_%d_%m_%Y')\n os.mkdir(path)\n\n return path", "def name_file(self, output_filename):\n return self.output_path / output_filename", "def _get_output_file_name(self):\n datetime_suffix = datetime.now().strftime('%Y%m%d_%H%M%S')\n\n # Only select the non-empty strings from the file name parts\n output_file_name = '_'.join([a for a in\n [self.output_file_name_prefix, self.output_file_name,\n self.output_file_name_suffix, datetime_suffix] if a\n ])\n\n return f\"{output_file_name}{self._get_output_file_extension()}\"", "def make_directory(countyName, date):\n \n #pathRoot is taken from the config_all file\n \n outdirectory = pathRoot + '/tweetsPDsentiment/output/' + countyName + '/tweetStreams/' + date\n return outdirectory", "def format_name(root_dir, time_str, room, ext):\n rootdir = root_dir\n dir_format = '{root}/{date}/{group}'\n tempdir = '{root}/active'.format(root=rootdir)\n name_format = '{date} Showroom - {handle} {time}{count}.{ext}'\n\n # count = 0\n # count_str = '_{:02d}'\n\n destdir = dir_format.format(root=rootdir, date=time_str[:10], group=room.group)\n\n os.makedirs('{}/logs'.format(destdir), exist_ok=True)\n\n _date, _time = time_str.split(' ')\n short_date = _date[2:].replace('-', '')\n\n outfile = name_format.format(date=short_date, handle=room.handle,\n time=_time.replace(':', ''), count='', ext=ext)\n\n return tempdir, destdir, outfile", "def _make_output_file_path_unique(self, run_name: str, op_name: str,\n output_file: str) -> str:\n if not output_file.startswith(\"/tmp/\"):\n return output_file\n return f'{self._pipeline_root}/{run_name}/{op_name.lower()}/{output_file[len(\"/tmp/\"):]}'", "def get_output_basename(self):\n cumf_base_name = self.options[\"full_task_name\"]\n cumf_base_name = re.sub(r\"[() ]\", r\"_\", cumf_base_name)\n if cumf_base_name.endswith(\"_\"):\n cumf_base_name = cumf_base_name[:-1]\n return \"ana.\" + cumf_base_name", "def _generate_log_path(self):\n file_name = self.if_name + \"_\" + \\\n datetime.today().strftime(\"%Y%m%d_%H%M%S\")\n return os.path.join(self.log_dir, file_name)", "def set_output_dir(self, inputfile):\r\n\t\tprint('******* Output Directory *******')\r\n\t\tif not os.path.exists(inputfile.DirOutput):\r\n\t\t\tos.mkdir(inputfile.DirOutput)\r\n\t\t\tprint(\"Directory \", inputfile.DirOutput, \" Created \")\r\n\t\telse:\r\n\t\t\tprint(\"Directory \", inputfile.DirOutput, \" already exists\")\r\n\t\t\r\n\t\toutput_dir_nc = inputfile.DirOutput+'/TimeFrames'\r\n\t\t\r\n\t\tif not os.path.exists(output_dir_nc):\r\n\t\t\tos.mkdir(output_dir_nc)\r\n\t\t\tprint(\"Directory \", output_dir_nc, \" Created \")\r\n\t\telse:\r\n\t\t\tprint(\"Directory \", output_dir_nc, \" already exists\")\r\n\t\t\r\n\t\t# Output filenames\r\n\t\tself.fnameTS_avg = inputfile.DirOutput+'/' + inputfile.Mname + '_avg'\r\n\t\tself.fnameTS_OF = inputfile.DirOutput+'/' + inputfile.Mname + '_OF_'\r\n\t\tself.fnameTS_UZ = inputfile.DirOutput+'/' + inputfile.Mname + '_UZ_'\r\n\t\tself.fnameTS_GW = inputfile.DirOutput+'/' + inputfile.Mname + '_GW_'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints current configuration to a file in the output directory
def print_config_file(output_dir, args): with open(os.path.join(output_dir, 'config.cfg'), 'w') as f: for k, v in vars(args).items(): f.write(f'{k}={v}\n') f.write(f'device={get_device()}')
[ "def print_config_file():\r\n print(CONFIG_FILE_CONTENT, end=\"\")", "def _display_config_file(self):\n print(f'\\n{ProctorConfig.config_file}:')\n with open(ProctorConfig.config_file) as f:\n print(f.read())", "def cc_print_cmd(yaml_file):\n data = yaml.safe_load(yaml_file)\n with open(cc.config_template_fn(), \"r\") as f:\n config_t = jinja2.Template(f.read())\n sys.stdout.write(config_t.render(data))", "def print_config(kubesplit_config: KubesplitConfig):\n io_config = kubesplit_config.io_config\n print(\n \"[kubesplit(\"\n + __version__\n + \")] Processing: input=\"\n + io_config.input_display_name\n + \", output_dir=\"\n + io_config.output_dir\n + \", clean_output_dir=\"\n + str(kubesplit_config.clean_output_dir)\n + \", prefix_resource_files=\"\n + str(kubesplit_config.prefix_resource_files)\n + \", typ=\"\n + kubesplit_config.yamkix_config.parsing_mode\n + \", explicit_start=\"\n + str(kubesplit_config.yamkix_config.explicit_start)\n + \", explicit_end=\"\n + str(kubesplit_config.yamkix_config.explicit_end)\n + \", default_flow_style=\"\n + str(kubesplit_config.yamkix_config.default_flow_style)\n + \", quotes_preserved=\"\n + str(kubesplit_config.yamkix_config.quotes_preserved)\n + \", dash_inwards=\"\n + str(kubesplit_config.yamkix_config.dash_inwards)\n + \", spaces_before_comment=\"\n + str(kubesplit_config.yamkix_config.spaces_before_comment)\n + \", show_version=\"\n + str(kubesplit_config.version),\n file=sys.stderr,\n )", "def output(self):\n self.ensure_output_path()\n dst = os.path.join(self.output_path, self.filename)\n self.feed.atom_file(dst, pretty=True)", "def _dump_configuration():\n # The config file is expected to be YAML, but it should still be able\n # to read a json file\n ctx.logger.info('Dumping configuration from the inputs...')\n config = ctx.instance.runtime_properties['config']\n with open(CONFIG_PATH, 'w') as f:\n json.dump(config, f)", "def generate(self, target_dir: Optional[str]):\n for config_file in self.config_files:\n config_file.write(target_dir)", "def print_config(config):\n log.debug('options: \\n' + yaml.dump(config.__dict__, explicit_start=True, explicit_end=True,\n default_flow_style=False))", "def view_conf() -> None:\n print(Config.get_conf())", "def render_cfg(self, filename=None):\n if filename is not None:\n f = file(filename, 'w')\n f.write(self._render_cfg())\n f.close()\n return\n return self._render_cfg()", "def save(self):\n with open(\"config.py\", 'w') as configfile:\n self.config.write(configfile)\n pass\n pass", "def print_config(config):\n print('#'*50)\n [print(f'# {key}: {value}') for key, value in config.items()]\n print('#'*50)", "def set_output_file( self, filename ):\n self.__out_file = filename\n self.__dry_run = False", "def set_output_file(self, filename):\n self.__out_file = filename\n self.__dry_run = False", "def _write_default_cfg_file(self, cfg_path):\n with open(cfg_path, 'wt') as cfg_file:\n cfg_file.write('[main]\\n')\n cfg_file.write('par_file_age=30 # units are days\\n')", "def writeRedConf():\n global GlobalConfig\n if isinstance(GlobalConfig['redConfDir'],str) or isinstance(GlobalConfig['redConfDir'],unicode):\n if os.path.isdir(GlobalConfig['redConfDir']):\n config = MyConfig()\n config.add_section('settings')\n config.set('settings','confdir',GlobalConfig['redConfDir'])\n ConfigFile=codecs.open(redirectConfPath(),encoding='utf-8',mode='w')\n config.write(ConfigFile)\n ConfigFile.close()", "def _write_gin_configs(output_file):\n config_str = gin.operative_config_str()\n logging.info('=' * 80)\n logging.info('Gin configs\\n%s', config_str)\n logging.info('=' * 80)\n with tf.gfile.GFile(output_file, 'w') as f:\n f.write(config_str)", "def print_settings():\n click.echo(r\"-------------------------------------\")\n click.echo(r\"app_conf_dir = %s\" % app_conf_dir)\n click.echo(r\"git_work_dir = %s\" % git_work_dir)\n click.echo(r\"collectstatic_cmd = %s\" % collectstatic_cmd)\n click.echo(r\"heroku_api_token = %s\" % heroku_api_token)\n click.echo(r\"-------------------------------------\")", "def write_results(self, resultFile=None) -> str:\n currentPath = os.getcwd()\n\n if not resultFile:\n resultFile = self.get_default_result_file_name()\n\n with open(resultFile, 'w') as f:\n self.print_configuration_parameters(f)\n self.print_backtest_results(f)\n\n if self.outputTrades:\n self.print_trades(f)\n\n filePath = os.path.join(os.getcwd(), resultFile)\n\n os.chdir(currentPath)\n return filePath" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a torch.device of type 'cuda' if available, else of type 'cpu'
def get_device() -> torch.device: return torch.device('cuda' if torch.cuda.is_available() else 'cpu')
[ "def get_device():\n return torch.device('cuda' if torch.cuda.is_available() else 'cpu')", "def resolve_device(device = None) -> torch.device:\n if device is None or device == 'gpu':\n device = 'cuda'\n if isinstance(device, str):\n device = torch.device(device)\n if not torch.cuda.is_available() and device.type == 'cuda':\n device = torch.device('cpu')\n print('No cuda devices were available. The model runs on CPU')\n return device", "def _init_device(self, cuda_device: torch.device = torch.device('cpu')):\n if self.config.cuda_enabled() and torch.cuda.is_available():\n return torch.device(cuda_device)\n else:\n # Force usage of CPU\n torch.cuda.is_available = lambda: False\n return torch.device(\"cpu\")", "def get_device(gpu_list=None):\n if gpu_list is None:\n gpu_list = list(range(torch.cuda.device_count()))\n elif not gpu_list:\n return torch.device('cpu'), False\n return torch.device('cuda:{}'.format(gpu_list[0])), True", "def maybe_cuda(t):\n if torch.cuda.is_available():\n return t.cuda()\n return t", "def cuda(self: T, device: Optional[int] = None) -> T:\n return self.to(torch.device(f\"cuda:{device}\" if device is not None else \"cuda\"))", "def get_device(gpus=None):\n if not gpus:\n parallel = False\n device = torch.device(\"cpu\")\n return parallel, device\n if len(gpus) > 1:\n parallel = True\n device = torch.device(\"cpu\")\n else:\n parallel = False\n device = torch.device(gpus[0])\n return parallel, device", "def device(use_gpu=False):\n if use_gpu:\n return ops.device(\"/gpu:0\")\n return ops.device(\"/cpu:0\")", "def get_device_from_fit_dictionary(X: Dict[str, Any]) -> torch.device:\n if not torch.cuda.is_available():\n return torch.device(\"cpu\")\n\n return torch.device(X.get(\"device\", \"cpu\"))", "def cuda(tensor):\n\n return tensor.to(args.device)", "def get_device(tensor_or_module, default=None):\n if hasattr(tensor_or_module, \"device\"):\n return tensor_or_module.device\n elif hasattr(tensor_or_module, \"parameters\"):\n return next(tensor_or_module.parameters()).device\n elif default is None:\n raise TypeError(f\"Don't know how to get device of {type(tensor_or_module)} object\")\n else:\n return torch.device(default)", "def _is_cuda_available():\n dev = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n assert dev == torch.device(\"cuda\")", "def setup_device(\n model: nn.Module, target_devices: List[int]\n) -> Tuple[torch.device, List[int]]:\n available_devices = list(range(torch.cuda.device_count()))\n\n if not available_devices:\n log.warning(\n \"There's no GPU available on this machine. Training will be performed on CPU.\"\n )\n device = torch.device(\"cpu\")\n model = model.to(device)\n return model, device\n\n if not target_devices:\n log.info(\"No GPU selected. Training will be performed on CPU.\")\n device = torch.device(\"cpu\")\n model = model.to(device)\n return model, device\n\n max_target_gpu = max(target_devices)\n max_available_gpu = max(available_devices)\n\n if max_target_gpu > max_available_gpu:\n msg = (\n f\"Configuration requests GPU #{max_target_gpu} but only {max_available_gpu} \"\n \"available. Check the configuration and try again.\"\n )\n log.critical(msg)\n raise Exception(msg)\n\n log.info(f\"Using devices {target_devices} of available devices {available_devices}\")\n device = torch.device(f\"cuda:{target_devices[0]}\")\n if len(target_devices) > 1:\n model = nn.DataParallel(model, device_ids=target_devices)\n else:\n model = model.to(device)\n return model, device", "def cuda(self, device=None):\n if torch.cuda.is_available():\n self.is_cuda = True\n self.device = device\n self.network_module.cuda(device)\n print(\"Moving \", self.name, \" to GPU\")\n else:\n print(\"CUDA is unavailable\")\n return self", "def set_gpu(gpu, enable_benchmark=True):\n if len(str(gpu)) > 1:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu\n parallel = True\n device = torch.device(\"cuda:{}\".format(','.join([str(a) for a in range(len(gpu.split(',')))])))\n print(\"Devices being used:\", device)\n else:\n parallel = False\n device = torch.device(\"cuda:{}\".format(gpu))\n print(\"Device being used:\", device)\n torch.backends.cudnn.benchmark = enable_benchmark\n return device, parallel", "def get_module_device(module: nn.Module) -> torch.device:\n return next(module.parameters()).device", "def device_by_cpu(self, cpu, type=CS_DEVTYPE_CORE):\n for d in self.devices:\n if d.cpu_number == cpu and d.type == type:\n return d\n return None", "def set_device_id(device_id: Optional[Union[int, str]] = None) -> str:\r\n if device_id is not None and device_id != \"cpu\" and torch.cuda.is_available():\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(device_id)\r\n return \"cuda\"\r\n else:\r\n return \"cpu\"", "def place(tensor, device=-1):\n\n if device < 0:\n return tensor.cpu()\n else:\n return tensor.cuda(device)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for reading CLIMOD2 csv data into pandas dataframe. Missing data values are converted to NaN Trace values are converted to zero.
def read_climod2(path): df = pd.read_csv(path, index_col=0, header=0, na_values=['m', 'M'], parse_dates=True, skipinitialspace=True) # Get list of columns read # cols = list(df.columns.values) # Replace 'T' values with 0.0, for now. (T = trace amount) df = df.replace('T', 0.0) # Make sure all columns are suitable dtype (note, may want to change # so that specific cols have dtype best suited for them...) df = df.convert_objects(convert_numeric=True) # Return the data read from path as a pandas dataframe. return df
[ "def prepare_df(args):\r\n path, column, replace_commas, reverse = args\r\n data = pd.read_csv(path)\r\n data = data[[column]]\r\n if reverse:\r\n data.index = data.index[::-1]\r\n data = data.iloc[::-1]\r\n if replace_commas:\r\n data[column] = data[column].apply(lambda x: float(x.replace(',', '')))\r\n else:\r\n data[column] = data[column].apply(lambda x: float(x))\r\n return data", "def pines_log_reader(path):\n try:\n df = pd.read_csv(path)\n except:\n print('Something is wrong with {}, inspect!'.format(path.name))\n breakpoint()\n\n # Remove trailing/leading spaces from column names\n df.columns = df.columns.str.lstrip()\n df.columns = df.columns.str.rstrip()\n\n # Remove our header comment idicator in the first column if it's there.\n if '#' in df.columns[0]:\n df.rename(\n columns={df.columns[0]: df.columns[0].replace('#', '')}, inplace=True)\n\n # Remove trailing and leading spaces from log entries.\n for key in df.keys():\n try:\n df[key] = df[key].str.strip()\n except:\n continue\n\n return df", "def _to_dataframe(self, raw):\n\n # if data is already a DataFrame, do nothing.\n if isinstance(raw, pd.DataFrame):\n return raw\n\n output = pd.read_csv(raw)\n\n return output", "def _read_into_dataframe(self):\n if(self._filename.endswith('.csv') or self._filename.endswith('.tsv')):\n separator = define_separator(self._filename)\n self._data = read_csv(self._filename, sep=separator)\n else:\n raise NotImplementedError(\"File formats different from ['csv', 'tsv'] are not implemented yet.\")", "def parse(file_name):\n return pd.read_csv(file_name, na_values='---')", "def from_csv(cls, filepath_or_buffer): \n records = pd.read_csv(filepath_or_buffer)\n\n return cls(records)\n\n # ------------------------------------------------------------------\n # Old implementation kept for future use:\n\n # # Read the data from the csv file, assuming the third column of the\n # # file represents timestamp and parsing it as a datetime.\n # records = pd.read_csv(\n # filepath,\n # index_col=[0, 1],\n # header=[0, 1], \n # parse_dates=[2]\n # )\n\n # # Convert the index's 'offset' level to TimedeltaIndex.\n # records.index = records.index.set_levels(\n # pd.TimedeltaIndex(data.index.get_level_values('offset')),\n # level='offset')\n\n # # Fix column level values, an artifact of blank level values in a\n # # .csv file.\n # fields = data.columns.get_level_values('field')\n\n # #srcs = data.columns.get_level_values('source').str.replace('Un.*', 'device')\n # srcs = data.columns.get_level_values('elev_source').str.replace('Un.*', 'device')\n \n # col_tups = [(field, src) for field, src in zip(fields, srcs)]\n # data.columns = pandas.MultiIndex.from_tuples(col_tups,\n # names=['field', 'source'])\n # data['time', 'device'] = \\\n # (data['timestamp', 'device'] \\\n # - data['timestamp', 'device'].iloc[0]).dt.total_seconds()\n\n # ------------------------------------------------------------------", "def load_csv():\n\ttry:\n\t\tdf = pd.read_csv(DATASET_CSV_PATH)\n\texcept:\n\t\tprint('Error reading %s. Make sure file exists or try to regenerate it using generate_csv() method.')\n\t\tdf = pd.DataFrame()\n\n\treturn df", "def readTruBlu(csvfile):\n sep = ','\n header = 0\n skiprows = 16 #this is somewhat weak, number of lines could change over time??\n\t# Definitely weak. Probably an automated read to csv header would be better\n index_col = 3\n #names = ['ID','Name','Address','Time of Acquisition','Elapsed(Sec)','Level(PSI)','Temperature (\\'C)','Battery Voltage(Volt)','Supply Voltage(Volt)','Scan No','blank']\n parse_dates = True\n #skip_footer = 1\n #print(csvfile)\n #df = read_csv(csvfile, sep=sep, names=names, skiprows=skiprows, index_col=index_col, parse_dates=parse_dates)\n \n try:\n if os.stat(csvfile).st_size > 0:\n df = read_csv(csvfile, sep=sep, skiprows=skiprows, header=header, index_col=index_col, parse_dates=parse_dates)\n return df\n else:\n print((csvfile + \" is empty\"))\n except OSError:\n print((csvfile + \" does not exist\"))", "def csv_to_dataframe(csv):\n data = pd.read_csv(csv,thousands='.', decimal=',', index_col=[0])\n return data", "def read_demand_dataframe():\n \n # Point to where you've stored the CSV file on your local machine\n \"remember read demandv1.2 file attached as modified datset \"\n \"original demand file wil not work \"\n desktop = os.path.join(os.path.expanduser('~'),\"Desktop\")\n filepath = os.path.join(desktop,\"Demandv1.2.csv\")\n \n \n\n dataframe = pd.read_csv(filepath, sep=\",\",names=['Month DD Raised','No. of FTE Request Raised','SkillList','Location'], header=1)\n\n \n\n\n return dataframe", "def read_data(datapath):\n data = pd.read_csv(datapath, index_col='ID')\n ####Deleting cols. with only 0 or only 1 values: \n df = (data.sum (axis = 0) != 0) & (data.sum (axis = 0) != len(data)) \n data = data[df[df].index]\n return data", "def read_dataset(filepath):\r\n data_frame = pd.read_csv(filepath, dtype={'date': str})\r\n # checks for inconsistent or missing data and imputes it\r\n data_frame = check_data_validity(data_frame)\r\n return data_frame", "def import_csv(file_path):\n #data_frame = csv_import_adapter.import_dataframe_from_path(\n data_frame=pd.read_csv(\n os.path.join(file_path), sep=\";\")\n data_frame[\"time:timestamp\"] = data_frame[\"time:timestamp\"].apply(lambda x:\n datetime.strptime(x, '%d-%m-%Y:%H.%M'))\n if 'time:complete' in data_frame.columns:\n data_frame[\"time:complete\"] = pd.to_datetime(data_frame[\"time:complete\"], format='%d-%m-%Y:%H.%M')\n data_frame[\"Activity\"] = data_frame[\"concept:name\"]\n parameters = {constants.PARAMETER_CONSTANT_CASEID_KEY: \"concept:name\",\n constants.PARAMETER_CONSTANT_ACTIVITY_KEY: \"activity\",\n constants.PARAMETER_CONSTANT_TIMESTAMP_KEY: \"time:timestamp\"}\n csv_log = conversion_factory.apply(data_frame, parameters=parameters)\n print(\"Import of csv successful,with {0} traces in total\".format(len(csv_log)))\n return csv_log", "def convert_to_df(path):\n return pd.read_csv(path, sep='\\t')", "def load_tick_sample() -> pd.DataFrame:\r\n\r\n devadarsh.track('load_tick_sample')\r\n\r\n project_path = os.path.dirname(__file__)\r\n tick_df = pd.read_csv(os.path.join(project_path, 'data/tick_data.csv'), index_col=0, parse_dates=[0])\r\n\r\n return tick_df", "def read_csv_as_str(*args, **kwargs):\n kwargs['dtype'] = str\n kwargs['na_filter'] = False\n return pd.read_csv(*args, **kwargs)", "def csv_to_pd(csvfname):\n csvfile = open(csvfname)\n\n line = 'test'\n counter = 0\n while line != 'Data starts here.\\n':\n line = csvfile.readline()\n counter = counter + 1\n\n data = pd.read_csv(csvfname, skiprows=counter)\n data.sort_values(['Track_ID', 'Frame'], ascending=[1, 1])\n\n return data", "def __process_nasa_co2_data(file):\n with open(file, 'r') as fin:\n all_lines = fin.readlines()\n\n header_lines = np.array([1 for x in all_lines if x.startswith('#')]).sum()\n\n co2_data = pd.read_csv(file, skiprows=header_lines, header=None,\n delim_whitespace=True)\n co2_data[co2_data == -99.99] = np.nan\n\n co2_data.columns = ['Year', 'Month', 'Year Fraction', 'Average', 'Interpolated',\n 'Trend', 'N Days']\n\n co2_data.set_index(['Year', 'Month'], inplace=True)\n new_idx = [datetime(x[0], x[1], 1) for x in co2_data.index]\n co2_data.index = new_idx\n co2_data.index.name = 'Date'\n\n return co2_data", "def make_dataframe(csv):\n try:\n dataframe = pd.read_table(csv, sep=\"\\s+|,\", engine=\"python\")\n except:\n error(\"{} does not exist or cannot be read\".format(csv),\n continue_exec=False)\n return dataframe" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is called when somebody on the elevator chooses a floor. This could happen at any time, whether or not the elevator is moving. Any floor could be requested at any time.
def on_floor_selected(self, floor): if not self.valid_floor(floor): return direction_to_floor = self.direction_to(floor) if direction_to_floor is None: self.log("missed the boat") return # Check the other queue for duplicates other_direction = self.other_direction(direction_to_floor) if self.orders[other_direction]: _floor = self.orders[other_direction][0].floor if _floor == floor: # Serve that, but not this floor request (line 485) return if self.bounded_direction: self.log("floor selected. bounded direction detected. direction to floor %d: %s" % (floor, self.direction_str(direction_to_floor)) ) if direction_to_floor == self.bounded_direction: self.current_direction = self.bounded_direction self.bounded_direction = None else: self.log("floor selection ignored. Mismatch between bounded direction and direction to floor selected") # self.bounded_direction = None return if self.current_direction and self.current_direction != direction_to_floor: # Set it to wait for requests to move to the other direction other_direction = self.other_direction(self.current_direction) self.current_direction = other_direction self.log("""\ floor selection ignored. floor selected: %d Direction to floor: %s. Must wait for requests to move to the other direction""" % (floor, self.direction_str(direction_to_floor))) # Clear for the next call if self.callbacks.current_floor == self.destination_floor: self.log("Clear for the next call") # Reverse again other_direction = self.other_direction(other_direction) if self.orders[other_direction] and self.orders[other_direction][0].floor == self.callbacks.current_floor: self.orders[other_direction].pop(0) self.current_direction = None return self.index(direction_to_floor, floor) # sort the list so closer floors are attended first # self.orders[direction_to_floor].sort() self.sort(direction_to_floor) if self.current_direction is None: self.current_direction = direction_to_floor self.destination_floor = self.orders[self.current_direction][0].floor self.log("on floor selected")
[ "def on_floor_selected(self, floor):\n # Check if oppsite\n has_request = filter(lambda request: request[\"floor\"] == floor, self.queue)\n if self.is_counter(floor) or len(has_request) > 0:\n return\n\n if floor > self.callbacks.current_floor:\n self.last_direction = UP\n elif floor < self.callbacks.current_floor:\n self.last_direction = DOWN\n else:\n return\n \n self.queue.insert(0,{ \"floor\": floor, \"direction\": 0 })", "def go_to_floor(self, target_floor):\n # assume 2 seconds to move from one floor to the next, but 1 extra second to start and 3 extra seconds to stop\n if self.door_open: # We don't want an elevator moving with open doors\n self.close_door()\n original_floor = self.current_floor\n while (self.current_floor != target_floor):\n # Going up!\n if (self.current_floor < target_floor):\n self.going_up = True\n if (self.current_floor == original_floor):\n self.do(1, \"Starting elevator\")\n self.do(2, \"Moving from floor %d to %d\" % (self.current_floor, self.current_floor + 1))\n self.current_floor += 1\n if (self.current_floor == target_floor):\n self.do(3, \"Stopping elevator\")\n else:\n target_floor = self.next_floor_up()\n # Going down!\n elif (self.current_floor > target_floor):\n self.going_up = False\n if (self.current_floor == original_floor):\n self.do(1, \"Starting elevator\")\n self.do(2, \"Moving from floor %d to %d\" % (self.current_floor, self.current_floor - 1))\n self.current_floor -= 1\n if (self.current_floor == target_floor):\n self.do(3, \"Stopping elevator\")\n else:\n target_floor = self.next_floor_down()\n self.go_to[target_floor] = False", "def chooseElevator(self, floor:int, direction:Direction) -> list[str]:\n candidates = candidates1 = candidates2 = candidates3= []\n chosen_id = None\n for id, e in self._elevators.items():\n if floor == e._cur_floor and e._cur_direction == direction and (e._actions and e._actions[0].act==ElevatorStatus.PARK):\n return [id]\n if floor <= e._cur_floor and e._cur_direction == Direction.DOWN and direction == Direction.DOWN:\n candidates1.append(id)\n elif floor >= e._cur_floor and e._cur_direction == Direction.UP and direction == Direction.UP:\n candidates1.append(id)\n if e.idle():\n candidates2.append(id)\n elif direction != e._cur_direction:\n candidates3.append(id)\n if candidates1:\n candidates += sorted(candidates1, key=lambda id: abs(self._elevators[id]._cur_floor - floor), reverse=True)\n if candidates2:\n candidates += sorted(candidates2, key=lambda id: abs(self._elevators[id]._cur_floor - floor), reverse=True)\n if candidates3:\n candidates += sorted(candidates3, key=lambda id: abs(self._elevators[id]._cur_floor - floor), reverse=True)\n if not candidates1 and not candidates2 and not candidates3:\n candidates = sorted([id for id in self._elevators.keys()], key=lambda id: abs(self._elevators[id]._cur_floor - floor), reverse=True)\n return candidates", "def next_action(self):\n\n if len(self.to_visit) > 0:\n call = self.to_visit[0]\n if call.floor < self.current_floor:\n self.__move_down()\n self.to_visit.sort(key=lambda x: x.floor, reverse=True)\n elif call.floor > self.current_floor:\n self.__move_up()\n self.to_visit.sort(key=lambda x: x.floor)\n else:\n self.__open_door()\n if call.type == \"E\":\n try:\n cal = self.__get_input_for_call_d()\n self.receive_call(cal)\n except ValueError as e:\n print(str(e))\n print('-' * 20)\n self.to_visit.pop(0)\n self.to_visit = [x for x in self.to_visit if x.type == \"D\" and x.floor == self.current_floor]\n self.__close_door()\n else:\n if self.current_floor != 0:\n self.__move_down()\n if self.current_floor == 0:\n self.status = StatusEnum.HOLD.value\n self.direction = DirectionEnum.UP.value\n print(\"Elevator {}: current Position -> {}\".format(self.id_elv, self.current_floor))\n print(\"Elevator {}: status -> {}, direction -> {}\".format(self.id_elv, self.status, self.direction))", "def step(self):\r\n self.__schedule_elevators()\r\n for elevator_id, elevator in enumerate(self.elevators):\r\n elevator.step()\r\n\r\n if elevator.get_current_floor() == elevator.get_target_floor():\r\n #print(\"Elevator : \" + str(elevator_id) + \" Door Opens \")\r\n #print(\"Elevator : \" + str(elevator_id) + \" Door Closes \")\r\n self.elevator_queues[elevator_id].popleft()\r\n self.elevators[elevator_id].set_target_floor(None)", "def __schedule_elevators(self):\r\n self.__process_pickup_requests()\r\n for elevator_id, elevator in enumerate(self.elevators):\r\n if len(self.elevator_queues[elevator_id]) > 0:\r\n first_element = self.elevator_queues[elevator_id][0]\r\n elevator.set_target_floor(first_element)", "def __open_door(self):\n\n self.status = StatusEnum.STOP.value\n print(\"Elevator {}: current Position -> {}\".format(self.id_elv, self.current_floor))\n print(\"Elevator {}: status -> {}, direction -> {}\".format(self.id_elv, self.status, self.direction))\n print(\"Elevator {}: open door \".format(self.id_elv))", "def elevator_stop(self, elevator, floor, direction):\n\n # If the person is waiting for the elevator and it arrives on their floor,\n # board the elevator if it is going the right way\n if floor == self.floor and self.waiting_state == -2:\n if (direction == 1) == (self.floor < self.destination):\n elevator.add_rider(self)\n self.waiting_state = -1\n self.floor = -1\n\n # If the person is on the elevator, get off if it is on the right floor\n if self.waiting_state == -1 and floor == self.destination:\n elevator.remove_rider(self)\n self.waiting_state = 0", "def pickup_request(self, pickup_floor : int , direction:[-1,1]):\r\n\r\n assert pickup_floor <= self.highest_floor, \"pickup floor should be lesser than highest floor\"\r\n assert pickup_floor >= self.lowest_floor, \"pickup floor should be greater than lowest floor\"\r\n\r\n if pickup_floor == self.highest_floor:\r\n assert direction != 1, \" Cannot go from highest floor to above\"\r\n elif pickup_floor == self.lowest_floor:\r\n assert direction != -1, \" Cannot go from lowest floor to below\"\r\n\r\n\r\n self.pickup_requests.append((pickup_floor, direction ))", "def empty_closest_floor(self, elevator: Elevator,\n waiting: Dict[int, List[Person]],\n max_floor: int) -> int:\n closest_floor = elevator.get_floor()\n floors_to_check = self.floor_check(elevator, max_floor)\n for floor in floors_to_check:\n if len(waiting[floor]) > 0:\n closest_floor = floor\n break\n return closest_floor", "def __init__(self, floors, name=\"\"):\n if name != \"\":\n self.name = name\n else:\n self.name = \"Elevator\"\n self.current_floor = 1\n self.top_floor = floors\n self.busy = False # if the elevator is busy\n self.door_open = False # if the door is open\n self.going_up = True # if the elevator is (or was last) going up\n self.go_to = dict() # status of floors to be visited by elevator\n for x in range(floors):\n self.go_to[x + 1] = False", "def get_next_floor(self, passenger_list):\n self.next_floor = passenger_list[0].desired_floor", "def pick_floor(self, time):\n \n if is_morning(time):\n f = floor_distrs[self.type]['morning']\n return f()\n elif is_afternoon(time):\n f = floor_distrs[self.type]['afternoon']\n return f()\n elif is_evening(time):\n f = floor_distrs[self.type]['evening']\n return f()", "def move_elevator(floor: int, control: str) -> int:\n if control == '(':\n return floor + 1\n if control == ')':\n return floor - 1\n raise ValueError('Unexpected control input: {}'.format(control))", "def run_elevator_simulation(self, sim_size, capacity, walking = False):\n elevator = Elevator(capacity)\n self.spawn_passengers(sim_size)\n \n if walking == True:\n # people take 3x as long to walk, as the elevator takes\n # to move a single floor.\n for i, p in enumerate(self.waiting_list):\n distance = abs(p.desired_floor - p.starting_floor)\n if distance <= 4:\n p.waittime = distance * 3 \n self.finished_passengers.append(self.waiting_list.pop(i))\n \n \n while len(self.waiting_list) != 0 or len(elevator.passenger_list) != 0:\n #uncomment print statements below to track elevator movement\n# print(\"# of waiting: \", len(self.waiting_list))\n# print(\"# of passengers: \", len(elevator.passenger_list))\n# print(\"# of finished: \", len(self.finished_passengers))\n# print(\"I'm on floor %d.\" %elevator.current_floor)\n# print(\"-------------------\")\n# for p in self.waiting_list:\n# print (p.starting_floor, p.desired_floor)\n\n \n if elevator.moving == False:\n # time steps are added whenever the elevator stops to unload\n # big elevators take longer\n if elevator.max_capacity >= 15:\n self.update_time(elevator, 5)\n else:\n self.update_time(elevator, 3)\n # determines who's waiting on the current floor & adds them\n # to the elevator/drops off current passengers\n waiting_list_on_current_floor = [p for p in self.waiting_list \n if p.starting_floor == elevator.current_floor]\n self.unload_elevator(elevator, waiting_list_on_current_floor)\n# print(\"Currently holding %d passengers.\" %elevator.current_capacity)\n \n \n if len(elevator.passenger_list) == 0 and len(self.waiting_list) != 0:\n # if there's no more passengers in the elevator, pick up new ones\n # move elevator 1 floor\n elevator.get_next_passenger(self.waiting_list)\n elevator.move()\n self.update_time(elevator, 1) \n \n if len(elevator.passenger_list) != 0:\n elevator.get_next_floor(elevator.passenger_list)\n elevator.move()\n self.update_time(elevator, 1)\n \n \n# print (\"I have moved all passengers!\")\n for p in self.finished_passengers:\n self.total_runtime += p.waittime\n \n# print (\"Average runtime is %d units.\" %(self.total_runtime/sim_size))\n return (self.total_runtime/sim_size)", "def get_floor_and_target(self,elevator_id : int) -> (int,int):\r\n\r\n assert 0 <= elevator_id <= len(self.elevators)-1, \"Elevator of this id not in the system\"\r\n\r\n elevator = self.elevators[elevator_id]\r\n floor = elevator.get_current_floor()\r\n target_floor = elevator.get_target_floor()\r\n\r\n return floor, target_floor", "def _add_elevator(self, nb_floors=10, start_floor=1):\n \n elevator_frame = tk.Frame(self.root)\n elevator_frame.pack(side=tk.LEFT)\n \n elevator_text = tk.Label(elevator_frame, text=\" Elevator (current\\nposition in red):\\n\")\n elevator_text.pack()\n \n self.floors ={}\n for i in range(nb_floors, 0, -1):\n self.floors[i] = tk.Label(elevator_frame, text=\"%i\"%i, width=5, height=2, borderwidth=2, \n relief=\"groove\", bg=\"white\")\n self.floors[i].pack()\n \n status_box = tk.Frame(elevator_frame, bd=1, pady=10)\n status_box.pack(expand=True, fill=tk.X)\n status_text = tk.Label(status_box, text=\"Status:\")\n status_text.pack(side=tk.LEFT)\n self.status = tk.Label(status_box, text=\"Still\")\n self.status.pack(side=tk.LEFT)\n \n # We start with the first floor\n self.current_floor = start_floor\n self.go_to(start_floor)", "def unload_elevator(self, elevator, waiting_list_on_current_floor):\n #p is used to represent passengers in all code\n for i, p in enumerate(elevator.passenger_list):\n if p.desired_floor == elevator.current_floor:\n self.finished_passengers.append(elevator.passenger_list.pop(i))\n elevator.current_capacity -= 1\n \n entered_elevator = []\n for p in waiting_list_on_current_floor:\n if elevator.current_capacity < elevator.max_capacity:\n elevator.passenger_list.append(p)\n entered_elevator.append(p)\n elevator.current_capacity += 1\n \n self.waiting_list = [p for p in self.waiting_list if p not in entered_elevator]\n self.moving = True", "def test_resolve_rooms_id_floor_by_floor_easyroom(self):\n\n floor = self.db_building[\"dxf\"][\"floors\"][0]\n DXFRoomIdsResolver.resolve_rooms_id(\n self.building,\n floor,\n \"easyroom\"\n )\n\n self.assertEqual(floor[\"rooms\"][\"R003\"], self.final_rooms[\"R003\"])\n\n\n floor = self.db_building[\"dxf\"][\"floors\"][1]\n DXFRoomIdsResolver.resolve_rooms_id(\n self.building,\n floor,\n \"easyroom\"\n )\n\n self.assertEqual(floor[\"rooms\"][\"R023\"], self.final_rooms[\"R023\"])\n self.assertTrue(\"R022\" not in floor[\"rooms\"])\n self.assertTrue(\"R002\" not in floor[\"rooms\"])", "def closest_target_floor(self, elevator: Elevator, max_floor: int) -> int:\n closest_floor = elevator.get_floor()\n closest_floors = self.floor_check(elevator, max_floor)\n passenger_floors = []\n for passenger in elevator.get_passengers():\n passenger_floors.append(passenger.get_target_floor())\n for floor in closest_floors:\n if floor in passenger_floors:\n closest_floor = floor\n break\n return closest_floor" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the words to the frequency counts.
def add_to_freq(self, words, index): for word in words: count = 0 if (word in self.stop_words): continue if (self.frequencies[index].has_key(word)): count = self.frequencies[index][word] + 1 else: count = 1 self.frequencies[index][word] = count
[ "def update_word_counts(word_counts):\n\tfor word, count in word_counts:\n\t\tredis_wcloud_cli.zadd(WORD_CLOUD_SET,word,count)", "def add_count(self, word, count=1):\n # word_count = self.histogram.get(word, 0) + count #if word is in words_histogram's keys, count will increment, else equal 1\n # self.histogram[word] = word_count\n if self.frequency(word) > 0: #if word exist already\n self[word] += count\n else: #if new word\n self[word] = count\n self.unique_words_count += 1\n self.words_count += count", "def set_freq(self):\n for site, tags in self.words_by_site.items():\n self.word_frequency[site] = defaultdict(int)\n words = tags.split(\" \")\n for word in words:\n # Save words containing no punctuation characters.\n match = [char in word for char in string.punctuation]\n if all(m is False for m in match) and len(word) > 3:\n self.word_frequency[site][word] += 1\n dump_as_json(self.word_frequency, self.freqs_file_path)\n return self.word_frequency", "def add_count(self, word, count=1):\n \n if word in self[0]:\n self[1] += count\n self.tokens += count\n else:\n self.append(word, count)\n self.tokens += count\n self.types += 1", "def word_frequency(text: str):\n # get individual words\n tokenized = text.split()\n\n # count the frequency\n word_counter = collections.Counter(tokenized)\n #frequencies = list(collections.Counter(tokenized).items())\n\n return word_counter", "def init_freq_vector(self):\n count = {}\n for word in self.words:\n if word in list(count.keys()):\n count[word] += 1\n else:\n count[word] = 0\n\n return [(k, v) for k, v in count.items()]", "def reducer(words, frequency):\n for word in words:\n frequency[word] += 1\n\n return frequency", "def get_word_frequencies(self):\n words = Counter()\n for ctree in self.get_ctrees():\n if 'word' in ctree.results:\n for word in ctree.results['word']['frequencies']:\n words.update({word['word'], int(word['count'])})\n return words", "def accumulate_counts(words, total=Counter()):\n assert isinstance(total, Counter)\n theList = {}\n# iterate through words and increment the total associated with the word\n for word in words:\n if word in total:\n total[word] +=1\n else:\n total[word] = 1\n return total", "def count(self, word):\n self.nWords += 1", "def _add_word(self,word):\n word=word.lower() \n if word not in stop_words:\n steamed_word = self._stem(word) \n self.all_words.setdefault(steamed_word,0)\n self.all_words[steamed_word] += 1", "def add_word(self,word,d):\n w=word.lower() \n # if w not in stop_words:\n # ws=stemmer.stem(w,0,len(w)-1)\n ws = w\n d.setdefault(ws,0)\n d[ws] += 1", "def __count_words(self) -> None:\n self.n_words = len(self.data.split())", "def frequencies(word_list):\n\n word_freqs = {}\n # iterate through the list of words\n for w in word_list:\n # the word has already been found\n if w in word_freqs:\n word_freqs[w] += 1\n # the word has not yet already been found\n else:\n word_freqs[w] = 1\n return word_freqs", "def get_word_counts(docs):\n pass", "def word_frequency(text: str):\n # get individual words\n tokenized = text.split()\n\n # count the frequency\n word_counter = collections.Counter(tokenized)\n\n return word_counter", "def count_words(cleaned_corpus):\n unique_words = set(cleaned_corpus.split())\n word_frequency = {}\n for word in unique_words:\n word = word.lower()\n count = cleaned_corpus.count(word)\n word_frequency[word] = count\n return(word_frequency)", "def get_freq(words):\r\n dic = {words[i]:0 for i in range(len(words))}\r\n for i in range(len(words)):\r\n dic[words[i]] = dic[words[i]] + 1\r\n return dic", "def _count_frequencies(self, tokens: list) -> dict:\n frequencies = defaultdict(lambda: 0)\n\n for token in tokens:\n frequencies[token] += 1\n\n return frequencies" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parses the xml files and extracts words from descriptions.
def read_file(self, filename): tree = ET.parse(filename) root = tree.getroot() for child in root: docDesc = '' if (child.tag == 'Description'): docDesc = clean(child.text) words = docDesc.lower().split() self.add_to_freq(words, 0) words = list(set(words)) self.add_to_freq(words, 1)
[ "def english_xml_parser(language,shorts,infile,outfile):\n language_capitalized = language[0].upper() + language[1:] # for matching in text\n tree = ET.parse(infile)\n root = tree.getroot() \n for child in root:\n for child2 in child:\n if child.tag == '{http://www.mediawiki.org/xml/export-0.10/}page':\n for grandchild in child2: # the \"page\" part of the xml file\n if grandchild.tag == \"{http://www.mediawiki.org/xml/export-0.10/}title\":\n title = grandchild.text # this is the title of the entry -> the word, this happens in the first iteration\n grandchild.clear()\n elif grandchild.tag == \"{http://www.mediawiki.org/xml/export-0.10/}revision\":\n for element in grandchild.findall(\"{http://www.mediawiki.org/xml/export-0.10/}text\"): # this is the case for every iteration after the first one\n # we are talking about the text-part containing the languages, not the one containing information on flection\n text_wiki = element.text\n if text_wiki: # when there is any text in this part of the tree\n for textbit in text_wiki.split('----'): \n if \"==\" + language_capitalized + \"==\" in textbit: # find the section for the current language (English Spanish German)\n if \"===Etymology\" not in textbit: # when there is no etymology\n for cat in textbit.split(\"\\n==\"): # find the different categories in this subtree\n if title != None:\n write_file(language,shorts,title,outfile,cat)\n else:\n segments = textbit.split('===Etymology') # find etymology\n for segment in segments: # for each part of the etymology\n if segment.startswith(\"===\\n\"): # find the categories\n for cat in segment.split(\"\\n==\"): # for each category\n if title != None:\n write_file(language,shorts,title,outfile,cat)\n elif re.match(\"\\s*\\d+===\",segment): # find other kinds of categories\n for cat in segment.split(\"\\n==\"): # for each category\n if title != None:\n write_file(language,shorts,title,outfile,cat)\n else:\n element.clear()\n else:\n grandchild.clear()", "def parse(self):\n\t\tif not os.path.isdir(self.path_to_input):\n\t\t\traise FileNotFoundError(\"ERROR: no such folder: \" + self.path_to_input)\n\n\t\tfiles = glob.glob(self.path_to_input + \"/*.txt\")\n\t\tcombined_words = []\n\t\tfor file_ in files:\n\t\t\twith codecs.open(file_, encoding=\"utf8\") as f:\n\t\t\t\tword_list = f.read().split()\n\t\t\t\tcombined_words.extend(word_list)\n\n\t\tself.content = \" \".join(combined_words)", "def get_description_data(xml_file):\n soup = bs4.BeautifulSoup(xml_file, 'lxml')\n descs = soup.find_all('description')\n for desc in descs:\n desc_data = str(desc.string)\n # if '.com' in desc_data:\n desc_arr = \"\"\n desc_arr.append(desc_data)", "def _parse_document(self, filename):\n\n print 'Parsing %s ' % filename\n self.__current_file = filename\n\n root_element = self._get_root_element_from_xml(filename)\n # Parse the metadata element block and store in new document\n document = self._process_metadata_and_create_document(root_element)\n if document is not None:\n if self.verbose:\n self._print_metadata(document)\n\n # Parse and store the location elements\n locations = self._process_locations(root_element, document)\n from django.db.models import Count\n if self.verbose:\n print '\\tLocations mentions'.upper()\n for location in LocationMention.objects.filter(document=document).values('text').annotate(total=Count('text')) :\n print '\\t- {0} {1}'.format(location['text'], location['total'])\n print '\\tLocations ignored'.upper()\n print '\\t- ',self.__ignored_locations\n print ''\n return", "def german_xml_parser(language,shorts,infile,outfile):\n tree = ET.parse(infile)\n root = tree.getroot()\n plurals = set()\n entries = dict()\n for child in root:\n for child2 in child:\n # find page\n if child2.tag == \"{http://www.mediawiki.org/xml/export-0.10/}page\":\n for child3 in child2:\n # find word/title\n if child3.tag == \"{http://www.mediawiki.org/xml/export-0.10/}title\":\n if child3.text != None:\n word = child3.text # this is the title of the entry -> the word, this happens in the first iteration\n if language == \"German\" and word[-1] == \"s\" or word[-1] == \"n\" or word[-2:] == \"en\" or word[-1] == \"e\" or word[-2:] == \"er\": # when the word contains a typical ending for regular plural\n plurals.add(word) # add word into set of all words that could be a regular plural of some other word\n # this will be iterated over later on to see which words need to be added to the dictionary\n elif child3.tag == \"{http://www.mediawiki.org/xml/export-0.10/}revision\":\n for child4 in child3:\n if child4.tag == \"{http://www.mediawiki.org/xml/export-0.10/}text\":\n text_wiki = child4.text\n if text_wiki:\n # add textbit to dictionary\n for textbit in text_wiki.split(\"---\"):\n if language == \"german\" and \"Substantiv|Deutsch\" in textbit: # find the German-section for nouns \n entries[word] = {}\n entries[word][\"textbit\"] = textbit\n elif language == \"spanish\" and \"Substantiv|Spanisch\" in textbit:\n entries[word] = {}\n entries[word][\"textbit\"] = textbit\n elif language == \"english\" and \"Substantiv|Englisch\" in textbit:\n entries[word] = {}\n entries[word][\"textbit\"] = textbit\n for title in entries:\n if \"textbit\" in entries[title].keys():\n write_dict(entries,title,language)\n del entries[title][\"textbit\"]\n # find words that look like a plural of another word, but are not actually\n if language == \"german\":\n word = plurals.pop()\n for item in entries: # title\n if word[:-1] == item or word[:-2] == item: # if the word looks like a pluralversion of the title\n for subitem in entries[item]: # i\n if word not in entries[item][subitem][\"plural\"]: # when the word is not already noted as a possible plural\n entries[item][subitem][\"plural\"].append(word)\n # write the file\n for el in entries:\n outfile.write(\"title: \" + str(el) + \"\\n\")\n for el2 in entries[el]:\n if \"flection\" in entries[el][el2] and entries[el][el2][\"flection\"] != set():\n outfile.write(\"\\tinflection: \" + str(entries[el][el2][\"flection\"]) + \"\\n\")\n if \"gender\" in entries[el][el2]:\n outfile.write(\"\\tgender: \" + str(entries[el][el2][\"gender\"]) + \"\\n\")\n if \"plural\" in entries[el][el2]:\n outfile.write(\"\\tplural: \" + str(entries[el][el2][\"plural\"]) + \"\\n\")\n if \"senses\" in entries[el][el2]:\n for number in entries[el][el2][\"senses\"]:\n outfile.write(\"\\t\\tsense\" + str(number) + \": \" + str(entries[el][el2][\"senses\"][number]) + \"\\n\")\n if \"examples\" in entries[el][el2] and number in entries[el][el2][\"examples\"]:\n outfile.write(\"\\t\\t\\texample(s)\" + str(number) + \": \" + str(entries[el][el2][\"examples\"][number]) + \"\\n\")\n outfile.write(\"\\n\")", "def perform_extraction(dumpdir:str, outputdir:str, logger:logging.Logger) -> None:\n\n\tparagraphs_file_name = \"paragraphs.txt\"\n\tfulltexts_dir_name = \"fulltexts\"\n\tknowledgebase_file_name = \"incomlete-kb.txt\" # incomplete kb\n\tkb_file_name = \"knowledgebase.txt\" # complete\n\n\n\tparagraphs_file = open(os.path.join(outputdir, paragraphs_file_name), \"w\")\n\tfulltexts_dir = os.path.join(outputdir, fulltexts_dir_name)\n\tif not os.path.exists(fulltexts_dir):\n\t\tos.makedirs(fulltexts_dir)\n\tknowledgebase_file = open(os.path.join(outputdir, knowledgebase_file_name), \"w\")\n\n\n\tlogger.info(\"==== Performing extraction ====\")\n\n\t# variables for logging and progress tracking, begin with 'log_'\n\tlog_totalpagecount = 0\n\tlog_pagechunk = 10000 # display info after processing this many pages\n\n\n\t### WARNING!: iterates over overy file in every subdirectory of 'dumpdir'! \n\t### Make sure no other subdirectories or files are in there\n\tfor subdir in [os.path.join(dumpdir,node) for node in os.listdir(dumpdir) if os.path.isdir(os.path.join(dumpdir, node))]:\n\t\tfor file_path in [os.path.join(subdir, node) for node in os.listdir(subdir) if os.path.isfile(os.path.join(subdir, node))]:\n\t\t\thtml_file = open(file_path, \"r\")\n\t\t\twhile True:\n\t\t\t\tline = html_file.readline()\n\t\t\t\t\n\t\t\t\t# Found beginning of a wiki page\n\t\t\t\tif line.strip().startswith('<doc'): # process page\n\t\t\t\t\tdoc_lines = []\n\t\t\t\t\tdoc_lines.append(line.strip())\n\t\t\t\t\t\n\t\t\t\t\t# Read all lines of wikipage ( <doc ...> * </doc> )\n\t\t\t\t\twhile True:\n\t\t\t\t\t\tnext_line = html_file.readline().strip()\n\t\t\t\t\t\tif not next_line == '': # discard blank lines\n\t\t\t\t\t\t\tdoc_lines.append(next_line)\n\t\t\t\t\t\tif next_line == \"</doc>\":\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\t# join by newline (which separates paragraphs in the html file) - \n\t\t\t\t\t# reading separate lines and then str.join() is faster than gradual concatenation)\n\t\t\t\t\tdoc_text = \"\\n\".join(doc_lines)\n\t\t\t\t\t# Html text of wiki page (from <doc ...> to </doc>) is in doc_text. Now convert into plain text and extract info\n\t\t\t\t\tpage_title, page_uri, page_id, page_first_paragraph, page_fulltext = extract_page_info(doc_text) \n\n\t\t\t\t\tif ' (rozcestník)' in page_title or page_title.lower() == 'hlavní strana':\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tpage_title = re.sub(r' \\([^\\)]*\\)$', '', page_title)\n\n\t\t\t\t\t# write data to specific files:\n\t\t\t\t\tparagraphs_file.write(page_uri + '\\t' + page_first_paragraph + '\\n')\n\n\t\t\t\t\t\n\t\t\t\t\t# replace '/' in the #title with %2F - its URL escape - because '/' is forbidden in filenames\n\t\t\t\t\tescaped_page_title = re.sub(r'/', r'%2F', page_title) \n\t\t\t\t\ttemp_filename = \"wp_\" + escaped_page_title # filename: wp_ (as wikipage) + page title\n\t\t\t\t\ttemp_dir = os.path.join(fulltexts_dir, \"d_\" + get_dir_name_fulltexts(escaped_page_title)) # dirname - use first two letters of the page title\n\t\t\t\t\tif not os.path.exists(temp_dir):\n\t\t\t\t\t\tos.makedirs(temp_dir)\n\n\t\t\t\t\ttemp_fulltext_file = open(os.path.join(temp_dir, temp_filename + '.txt'), \"w\")\n\t\t\t\t\ttemp_fulltext_file.write(page_fulltext) \n\t\t\t\t\ttemp_fulltext_file.close()\n\t\t\t\t\n\t\t\t\t\tentity_line = \"{}\\t{}\\t{}\\t{}\".format(page_id, page_uri, page_title, page_first_paragraph)\n\t\t\t\t\tknowledgebase_file.write(entity_line + '\\n')\n\n\t\t\t\t\tlog_totalpagecount += 1\n\t\t\t\t\t# logging\n\t\t\t\t\tif log_totalpagecount % log_pagechunk == 0:\n\t\t\t\t\t\tlogger.info(\"Processed {} pages\".format(log_totalpagecount))\n\n\n\t\t\t\telif line == \"\": # end of file reached\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\n\t# Close opened files:\n\tparagraphs_file.close()\n\tknowledgebase_file.close()\n\n\tlogger.info(\"==== Extraction complete : Pages processed: {} ====\".format(log_totalpagecount))", "def processFiles(fileList):\n all_text = []\n for iFile in fileList:\n with gzip.open(iFile) as f:\n tree = etree.fromstring(f.read())\n text = tree.xpath(\"//DOC[@type='story']/TEXT/P/text()\")\n text = [p for p in text if p]\n all_text = all_text + text\n print(*all_text, sep = '\\n')", "def parse(self, filename_xml: str) -> Iterable[Dict]:\n\n self.logger.info(\n f\"Parsing MedlinePlus Health Topic XML file '{filename_xml}'\"\n )\n\n # Open the XML file.\n file_xml = self.open_xml_file(filename_xml=filename_xml)\n\n # Retrieve an iterable that yields `<health-topic>` XML elements from\n # the XML file.\n elements = self.generate_xml_elements(\n file_xml=file_xml, element_tag=\"health-topic\"\n )\n\n # Iterate over the `<health-topic>` elements and yield dictionaries with\n # the parsed data.\n for element in elements:\n health_topic = self.parse_health_topic(element)\n\n # Guard against empty documents.\n if not health_topic:\n continue\n\n yield health_topic", "def director(cv):\n cur = {}\n\n i = 0\n for (xmlFolder, xmlFiles) in self.getXML():\n console(f\"Start folder {xmlFolder}:\")\n\n cur[FOLDER] = cv.node(FOLDER)\n cv.feature(cur[FOLDER], folder=xmlFolder)\n\n for xmlFile in xmlFiles:\n i += 1\n console(f\"\\r{i:>4} {xmlFile:<50}\", newline=False)\n\n cur[FILE] = cv.node(FILE)\n cv.feature(cur[FILE], file=xmlFile.removesuffix(\".xml\"))\n\n with open(f\"{xmlPath}/{xmlFolder}/{xmlFile}\", encoding=\"utf8\") as fh:\n text = fh.read()\n text = transformFunc(text)\n tree = etree.parse(text, parser)\n root = tree.getroot()\n cur[XNEST] = []\n cur[TNEST] = []\n walkNode(cv, cur, root)\n\n addSlot(cv, cur, None)\n cv.terminate(cur[FILE])\n\n console(\"\")\n console(f\"End folder {xmlFolder}\")\n cv.terminate(cur[FOLDER])\n\n console(\"\")\n\n for fName in featureMeta:\n if not cv.occurs(fName):\n cv.meta(fName)\n for fName in cv.features():\n if fName not in featureMeta:\n cv.meta(\n fName,\n description=f\"this is XML attribute {fName}\",\n valueType=\"str\",\n )\n\n if verbose == 1:\n console(\"source reading done\")\n return True", "def _get_word_list():\n with open(static_path(NOUN_FILE)) as file:\n nouns = file.readlines()\n\n with open(static_path(ADJECTIVE_FILE)) as file:\n adjectives = file.readlines()\n\n return nouns, adjectives", "def load_xml(self):\n try:\n self.root = XMLReader(self.path).root\n\n #for sign in self.root.findall('./signs/sign'):\n # self.load_sign_xml(sign)\n\n for block in self.root.findall('./blocks/block'):\n self.load_block_xml(block)\n\n # load replacments etc...\n except Exception, e:\n log.exception('error loading buildfile')", "def detect_negation(xml_file_path, xml_out, cuewords):\n\n # Open txt file with cuewords\n cuewords = open(CUEWORDS_DATA_PATH+cuewords, 'r', encoding='utf8')\n\n # Empty list for collecting\n cueword_list = []\n\n # Read words from file into list\n for word in cuewords.readlines():\n word = word.strip()\n cueword_list.insert(0, word)\n\n # Go through all files in xml_file_path directory\n for file in os.listdir(xml_file_path):\n\n # Set path to file\n file = xml_file_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n # Open Files\n chapter_input = open(file, 'r', encoding='utf8')\n\n # Create Same Filename in Tagged Folder\n chapter_output = open(xml_out+os.path.split(file)[-1], 'w', encoding='utf8')\n\n # Console log\n print('Writing Negation frames from: ' + chapter_input.name + ' to output file: ' + chapter_output.name)\n\n # Process xml input file with BeautifulSoup\n chapter_input = BeautifulSoup(chapter_input, 'xml')\n\n def detect_splitwords():\n \"\"\" This function is a collection of functions for detecting splitwords only,\n such as: un-erwarterer, außer-ordentlich, zweifel-los etc.\n It is called from within the main loop and it consists of 5 basic rules.\n \"\"\"\n\n # SPLITWORD RULES\n\n # RULE 1: splitwords starting with 'un'\n # Exceptions 'un' ADJA: unerwarterer, unglücklichen, unerschütterlichen\n # Exceptions 'un' ADJD: ungewöhnlicher\n if t_word[:2] == 'un' and (t_pos in UN_AUS_RULES_POS_TAGS):\n create_splitword_tags(t_word[:2], t_word[2:])\n create_negation_frame()\n create_splitword_target(t_word[:2])\n create_splitword_focus(t_word[2:])\n create_splitword_negated(t_word[2:])\n create_splitword_scope(t_word[2:])\n\n # RULE 2: splitwords with 'außerordentlich'\n if t_word[:15] == 'außerordentlich' and (t_pos in UN_AUS_RULES_POS_TAGS):\n create_splitword_tags(t_word[:5], t_word[5:])\n create_negation_frame()\n create_splitword_target(t_word[:5])\n create_splitword_focus(t_word[5:])\n create_splitword_negated(t_word[5:])\n create_splitword_scope(t_word[5:])\n\n # RULE 3: splitwords ending with 'los'\n # Exceptions: Some Focus Exceptions: 'zweifellos ADJD', 'ratlos ADJD'\n if t_word[-3:] == 'los':\n create_splitword_tags(t_word[:-3], t_word[-3:])\n create_negation_frame()\n create_splitword_target(t_word[-3:])\n create_splitword_focus(t_word[:-3])\n create_splitword_negated(t_word[:-3])\n create_splitword_scope(t_word[:-3])\n\n # RULE 4: splitwords ending with 'lose', or 'frei'\n if t_word[-4:] == 'lose' or t_word[-4:] == 'frei':\n create_splitword_tags(t_word[:-4], t_word[-4:])\n create_negation_frame()\n create_splitword_target(t_word[-4:])\n create_splitword_focus(t_word[:-4])\n create_splitword_negated(t_word[:-4])\n create_splitword_scope(t_word[:-4])\n\n # RULE 5: splitwords ending with 'loser|s|n'\n if t_word[-5:-1] == 'lose':\n create_splitword_tags(t_word[:-5], t_word[-5:])\n create_negation_frame()\n create_splitword_target(t_word[-5:])\n create_splitword_focus(t_word[:-5])\n create_splitword_negated(t_word[:-5])\n create_splitword_scope(t_word[:-5])\n\n def guess_splitwords():\n \"\"\" This function tries to guess splitwords starting with un-\n and having ADJD or ADJA pos tags\n \"\"\"\n\n if t_word[:2] == 'un' and (t_pos == 'ADJD' or t_pos == 'ADJA'):\n create_splitword_tags(t_word[:2], t_word[2:])\n create_negation_frame()\n create_splitword_target(t_word[:2])\n create_splitword_focus(t_word[2:])\n create_splitword_negated(t_word[2:])\n create_splitword_scope(t_word[2:])\n\n\n def detect_cuewords():\n \"\"\" Collection of functions for detecting other cuewords,\n such as: ni-emals, kein-er, kein, etc.\n It is called from within the main loop and it consists of multiple rules.\n \"\"\"\n\n # cuewords\n\n if t_word[:2] == 'ni':\n create_negation_frame()\n create_target_focus_scope()\n\n if t_word[:4] == 'kein':\n create_negation_frame()\n create_target_focus_scope()\n\n if t_word[:4] == 'nein':\n create_negation_frame()\n create_target_focus_scope()\n\n\n def guess_cuewords():\n \"\"\" This function tries to guess splitwords starting with\n ni-\n \"\"\"\n\n if t_word[:3] == 'nie':\n create_negation_frame()\n create_target_focus_scope()\n\n if t_word[:3] == 'nic':\n create_negation_frame()\n create_target_focus_scope()\n\n\n def create_splitword_tags(wordpart_1, wordpart_2):\n \"\"\"\n Function for creating splitword tags.\n\n Args:\n wordpart_1 (str): First part of the splitword\n wordpart_2 (str): Second part of the splitword\n\n Returns:\n xml tags\n <splitword idref=\"TOKEN-ID\">\n <part id=\"TOKEN-ID_s0\" word=\"wordpart_1\"/>\n <part id=\"TOKEN-ID_s1\" word=\"wordpart_2\"/>\n </splitword>\n\n Example:\n create_splitword_tags('zweifel','los')\n or\n word = \"zweifellos\"\n create_splitword_tags(word[:-3], [:-3])\n \"\"\"\n\n # Create new <splitwords> tag\n if not sentence.sem.find('splitwords'):\n splitwords = chapter_input.new_tag('splitwords')\n sentence.sem.insert(2, splitwords)\n else:\n splitwords = sentence.sem.find('splitwords')\n\n # Create new <splitword> tag within <splitwords>\n splitword = chapter_input.new_tag('splitword', idref=t_id)\n splitwords.append(splitword)\n\n # Create sub tags <part> 1\n part1 = chapter_input.new_tag('part', word=wordpart_1, id=t_id+'_s0')\n splitword.insert(0, part1)\n\n # Create sub tags <part> 2\n part2 = chapter_input.new_tag('part', word=wordpart_2, id=t_id+'_s1')\n splitword.insert(1, part2)\n\n\n def create_negation_frame():\n \"\"\"\n Function for creating a Negation frame.\n It looks for a <frames> tag within <sem> and creates a new one if not found.\n Within it creates a <frame name=\"Negation\"> tag.\n Each new frame is set on the last index so other functions can find it easily.\n\n Returns:\n xml tag\n <frame id=\"SENTENCE-ID_FRAME-ID\" name=\"Negation\">\n \"\"\"\n\n\n # Create <frames>\n if not sentence.sem.find('frames'):\n frames = chapter_input.new_tag('frames')\n sentence.sem.insert(3, frames)\n else:\n frames = sentence.sem.find('frames')\n\n frame = chapter_input.new_tag('frame')\n frame['name'] = NEGATION_FRAME_NAME\n frames.append(frame)\n\n def count_frames():\n \"\"\" Returns the count of all Negation Frames \"\"\"\n frames = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})\n frame_count = []\n for f_r in frames:\n frame_count.append(f_r)\n return len(frame_count)\n\n frame['id'] = s_id+'_f'+str(count_frames())\n\n\n def create_splitword_target(word_part):\n \"\"\"\n Function for creating a splitword target.\n\n Args:\n word_part (str): Target part of the negated slpitword\n\n Returns:\n xml tag\n <target>\n <fenode idref=\"SPLITWORDPART-ID\" is_split=\"yes\"/>\n </target>\n\n Example:\n create_splitword_target('los')\n \"\"\"\n\n split_word = sentence.sem.find('splitword', {'idref' : t_id})\n wordpart_idref = split_word.find('part', {'word' : word_part})\n\n last_frame = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})[-1]\n\n # Create <target>\n target = chapter_input.new_tag('target')\n last_frame.insert(0, target)\n\n # Create target <fenode>\n target_fenode = chapter_input.new_tag('fenode')\n target_fenode['idref'] = wordpart_idref.get('id')\n target_fenode['is_split'] = 'yes'\n target.insert(0, target_fenode)\n\n\n def create_splitword_focus(word_part):\n \"\"\"\n Function for creating a splitword focus.\n\n Args:\n word_part (str): Focus part of the negated splitword\n\n Returns:\n xml tag\n <fe id=\"SENTENCE-ID_FE-ID\" name=\"Focus\">\n <fenode idref=\"SPLITWORDPART-ID\" is_split=\"yes\"/>\n </fe>\n\n Example:\n create_splitword_focus('zweifel')\n \"\"\"\n\n split_word = sentence.sem.find('splitword', {'idref' : t_id})\n wordpart_idref = split_word.find('part', {'word' : word_part})\n\n last_frame = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})[-1]\n\n # Create focus\n focus = chapter_input.new_tag('fe')\n focus['name'] = FOCUS_TAG_NAME\n focus['id'] = last_frame.get('id')+'_e1'\n last_frame.insert(1, focus)\n\n # Create focus <fenode>\n focus_fenode = chapter_input.new_tag('fenode')\n focus_fenode['idref'] = wordpart_idref.get('id')\n focus_fenode['is_split'] = 'yes'\n focus.insert(0, focus_fenode)\n\n def create_splitword_negated(word_part):\n \"\"\"\n Function for creating the negated part of a splitword.\n\n Args:\n word_part (str): Negated part of the splitword\n\n Returns:\n xml tag\n <fe id=\"SENTENCE-ID_FE-ID\" name=\"Negated\">\n <fenode idref=\"SPLITWORDPART-ID\" is_split=\"yes\"/>\n </fe>\n\n Example:\n create_splitword_negated('zweifel')\n \"\"\"\n\n split_word = sentence.find('splitword', {'idref' : t_id})\n wordpart_idref = split_word.find('part', {'word' : word_part})\n\n last_frame = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})[-1]\n\n # Create negated\n negated = chapter_input.new_tag('fe')\n negated['name'] = NEGATED_TAG_NAME\n negated['id'] = last_frame.get('id')+'_e2'\n last_frame.insert(2, negated)\n\n # Create negated <fenode>\n negated_fenode = chapter_input.new_tag('fenode')\n negated_fenode['idref'] = wordpart_idref.get('id')\n negated_fenode['is_split'] = 'yes'\n negated.insert(0, negated_fenode)\n\n def create_splitword_scope(word_part):\n \"\"\"\n Function for creating the scope part of a splitword.\n\n Args:\n word_part (str): Scope part of the splitword\n\n Returns:\n xml tag\n <fe id=\"SENTENCE-ID_FE-ID\" name=\"Negated\">\n <fenode idref=\"SPLITWORDPART-ID\" is_split=\"yes\"/>\n </fe>\n\n Example:\n create_splitword_scope('zweifel')\n \"\"\"\n\n split_word = sentence.find('splitword', {'idref' : t_id})\n wordpart_idref = split_word.find('part', {'word' : word_part})\n\n last_frame = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})[-1]\n\n # Create scope\n scope = chapter_input.new_tag('fe')\n scope['name'] = SCOPE_TAG_NAME\n scope['id'] = last_frame.get('id')+'_e3'\n last_frame.insert(3, scope)\n\n # Create scope <fenode>\n scope_fenode = chapter_input.new_tag('fenode')\n scope_fenode['idref'] = wordpart_idref.get('id')\n scope_fenode['is_split'] = 'yes'\n scope.insert(0, scope_fenode)\n\n\n def create_target_focus_scope():\n \"\"\"\n Function for creating target focus and scope, for other cuewords.\n\n Returns:\n Full xml frame tag\n <frame id=\"SENTENCE-ID_FRAME-ID\" name=\"Negation\">\n <target>\n <fenode idref=\"WORD-ID\"/>\n </target>\n <fe id=\"67_f1_e1\" name=\"Focus\">\n <fenode idref=\"WORD-ID\"/>\n </fe>\n <fe id=\"67_f1_e1\" name=\"Negated\">\n <fenode idref=\"WORD-ID\"/>\n </fe>\n <fe id=\"67_f1_e3\" name=\"Scope\">\n <fenode idref=\"WORD-ID\"/>\n </fe>\n </frame>\n\n Example:\n create_target_focus_scope()\n \"\"\"\n\n # Create <target>\n target = chapter_input.new_tag('target')\n last_frame = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})[-1]\n last_frame.insert(0, target)\n\n # Create focus\n focus = chapter_input.new_tag('fe')\n focus['name'] = FOCUS_TAG_NAME\n focus['id'] = last_frame.get('id')+'_e1'\n last_frame.insert(1, focus)\n\n # Create negated\n negated = chapter_input.new_tag('fe')\n negated['name'] = NEGATED_TAG_NAME\n negated['id'] = last_frame.get('id')+'_e2'\n last_frame.insert(2, negated)\n\n # Create scope\n scope = chapter_input.new_tag('fe')\n scope['name'] = SCOPE_TAG_NAME\n scope['id'] = last_frame.get('id')+'_e3'\n last_frame.append(scope)\n\n\n def create_target_fenode():\n \"\"\"\n Function for creating target fenode\n \"\"\"\n # Create target <fenode>\n target_fenode = chapter_input.new_tag('fenode')\n target_fenode['idref'] = t_id\n target.insert(0, target_fenode)\n\n def create_focus_fenode(t_id):\n \"\"\"\n Function for creating target fenode\n\n Args:\n t_id (str): Terminal ID\n \"\"\"\n # Create focus <fenode>\n focus_fenode = chapter_input.new_tag('fenode')\n focus_fenode['idref'] = t_id\n focus.insert(0, focus_fenode)\n\n def create_negated_fenode(t_id):\n \"\"\"\n Function for creating negated fenode\n\n Args:\n t_id (str): Terminal ID\n \"\"\"\n # Create focus <fenode>\n negated_fenode = chapter_input.new_tag('fenode')\n negated_fenode['idref'] = t_id\n negated.insert(0, negated_fenode)\n\n def create_scope_fenode(t_id):\n \"\"\"\n Function for creating scope fenode\n\n Args:\n t_id (str): Terminal ID\n \"\"\"\n # Create scope <fenode>\n scope_fenode = chapter_input.new_tag('fenode')\n scope_fenode['idref'] = t_id\n scope.append(scope_fenode)\n\n\n # Run Target Function and mark cueword\n create_target_fenode()\n\n # Find previous and next siblings of the cueword within a sentence\n prev_siblings = sentence.find('t', id=t_id).find_previous_siblings('t')\n next_siblings = sentence.find('t', id=t_id).find_next_siblings('t')\n\n # Mark scope for terminals left of the cueword\n for p_s in prev_siblings:\n\n # Break scope if POS in SCOPE_START_FENODE\n if p_s.get('pos') in SCOPE_START_FENODE:\n break\n\n # Create scope <fenode>\n create_scope_fenode(p_s.get('id'))\n\n\n # Mark scope for terminals right of the cueword\n for n_s in next_siblings:\n\n # End Scope if pos in SCOPE_END_FENODE\n if n_s.get('pos') in SCOPE_END_FENODE or n_s.get('lemma') in SCOPE_END_LEMMA:\n break\n\n # Continue Scope for exceptions\n if n_s.get('pos') in SCOPE_BREAKING_FENODE[0]:\n ns_next = n_s.find_next_sibling('t')\n if ns_next.get('pos') in SCOPE_CONTINUE_FENODE:\n continue\n elif ns_next.get('pos') not in SCOPE_CONTINUE_FENODE:\n break\n\n # Create scope <fenode>\n create_scope_fenode(n_s.get('id'))\n\n\n # Find negated for word nicht right of the cueword\n for n_s in next_siblings:\n if t_word == 'nicht':\n if n_s.get('pos') in NICHT_NEGATED_RULES:\n create_negated_fenode(n_s.get('id'))\n break\n\n # Find negated for word nicht left of the cueword\n for p_s in prev_siblings:\n if t_word == 'nicht':\n if p_s.get('pos') in NICHT_NEGATED_RULES and not negated.find('fenode'):\n create_negated_fenode(p_s.get('id'))\n break\n\n # Find focus for terminals right of the cueword\n for n_s in next_siblings:\n\n # RULE 1: nicht PTKNEG\n if t_word == 'nicht' and t_pos == 'PTKNEG':\n if n_s.get('pos') in NICHT_RULES and not focus.find('fenode'):\n create_focus_fenode(n_s.get('id'))\n break\n\n if t_word == 'nein':\n continue\n\n elif n_s.get('pos') in FOCUS_LEMMA_RULES and not focus.find('fenode'):\n create_focus_fenode(n_s.get('id'))\n\n # RULE 2: kein\n if t_word[:4] == 'kein' and t_pos == 'PIAT':\n if n_s.get('pos') in NICHT_RULES and not focus.find('fenode'):\n create_focus_fenode(n_s.get('id'))\n break\n\n elif n_s.get('pos') in FOCUS_LEMMA_RULES and not focus.find('fenode'):\n create_focus_fenode(n_s.get('id'))\n\n # Find focus for 'nichts' right of the cueword\n for n_s in next_siblings:\n if t_word == 'nichts' and t_pos in NICHTS_RULES:\n if n_s.get('pos') in NICHTS_FOCUS_RULES and not focus.find('fenode'):\n create_focus_fenode(n_s.get('id'))\n\n # Find focus and target for terminals left of the cueword\n for p_s in prev_siblings:\n\n # RULE 1: nicht PTKNEG for previous siblings\n if t_word == 'nicht' and t_pos == 'PTKNEG':\n if p_s.get('pos') in NICHT_PREV_RULES and not focus.find('fenode'):\n create_focus_fenode(p_s.get('id'))\n break\n\n elif t_word == 'nicht' and not focus.find('fenode'):\n create_focus_fenode(t_id)\n\n if p_s.get('pos') in FOCUS_LEMMA_RULES:\n pass\n\n if t_word == 'nichts' and t_pos == 'NN':\n create_focus_fenode(t_id)\n\n\n ###########\n # The Loop\n for sentence in chapter_input.find_all('s'):\n\n for terminal in sentence.find_all('t'):\n\n # collect terminal word in lowercase\n t_word = terminal.get('word').lower()\n\n # collect terminal IDs\n t_id = terminal.get('id')\n\n # Collect terminal POS tags\n t_pos = terminal.get('pos')\n\n # collect sentence IDs\n s_id = sentence.get('id')\n\n if t_word in cueword_list:\n detect_splitwords()\n detect_cuewords()\n\n elif t_word not in cueword_list:\n guess_splitwords()\n guess_cuewords()\n\n chapter_output.write(chapter_input.prettify())\n print('Done!')\n chapter_output.close()", "def read_doc(self,docfile):\n\t\tstemmer=FrenchStemmer()\n\t\tflux=open(docfile)\n\t\tline=flux.readline()\n\t\tposition=0\n\t\ttitle=True\n\t\tfirst=True\n\t\twhile line != '':\n\t\t liste=line.split()\n\t\t if title==True and len(liste)>0: #remplir le dictionnaire du titre\n\t\t self.full_title = line\n\t\t title=False\n\t\t for each in liste:\n\t\t each=each.lower()\n\t\t if '\\'' in each:\n\t\t\tstrings=self.splitAccent(each)\n\t\t\tstrings[0]+='\\''\n\t\t\tself.nb_word+=len(strings)\n\t\t\tfor word in strings:\n\t\t\t word= stemmer.stem(word.decode('iso-8859-1') )\n\t\t\t if word not in self.word2pos_list_title:\n\t\t\t self.word2pos_list_title[word]=[]\n\t\t\t self.word2pos_list_title[word].append(position)\n\t\t\t position+=1\n\t\t else:\n\t\t\tself.nb_word+=1\n\t\t\teach=stemmer.stem(each.decode('iso-8859-1'))\n\t\t\tif each not in self.word2pos_list_title:\n\t\t\t self.word2pos_list_title[each]=[]\n\t\t\tself.word2pos_list_title[each].append(position)\n\t\t\tposition+=1\n\t\t line=flux.readline()\n\t\t liste=line.split()\n\t\t if first==True and title==False and liste!=[]: #pour remplir le dictionnaire du premier paragraphe\n\t\t first=False\n\t\t for each in liste:\n\t\t\teach=each.lower()\n\t\t\tif '\\'' in each:\n\t\t\t strings=self.splitAccent(each)\n\t\t\t strings[0]+='\\''\n\t\t\t self.nb_word+=len(strings)\n\t\t\t for word in strings:\n\t\t\t word= stemmer.stem(word.decode('iso-8859-1') )\n\t\t\t if word not in self.word2pos_list_first:\n\t\t\t self.word2pos_list_first[word]=[]\n\t\t\t self.word2pos_list_first[word].append(position)\n\t\t\t position+=1\n\t\t\telse:\n\t\t\t self.nb_word+=1\n\t\t\t each=stemmer.stem(each.decode('iso-8859-1'))\n\t\t\t if each not in self.word2pos_list_first:\n\t\t\t self.word2pos_list_first[each]=[]\n\t\t\t self.word2pos_list_first[each].append(position)\n\t\t\t position+=1\n\t\t line=flux.readline()\n\t\t liste=line.split()\n\t\t if first==False and title==False and liste!=[]: #pour remplir le dictionnaire du corps de texte\n\t\t for each in liste:\n\t\t each=each.lower()\n\t\t if '\\'' in each:\n\t\t\tstrings=self.splitAccent(each)\n\t\t\tstrings[0]+='\\''\n\t\t\tself.nb_word+=len(strings)\n\t\t\tfor word in strings:\n\t\t\t word= stemmer.stem(word.decode('iso-8859-1') )\n\t\t\t if word not in self.word2pos_list_body:\n\t\t\t self.word2pos_list_body[word]=[]\n\t\t\t self.word2pos_list_body[word].append(position)\n\t\t\t position+=1\n\t\t else:\n\t\t\tself.nb_word+=1\n\t\t\teach=stemmer.stem(each.decode('iso-8859-1'))\n\t\t\tif each not in self.word2pos_list_body:\n\t\t\t self.word2pos_list_body[each]=[]\n\t\t\t self.word2pos_list_body[each].append(position)\n\t\t\telse:\n\t\t\t self.word2pos_list_body[each].append(position)\n\t\t\tposition+=1\n\t\t line=flux.readline()\n\t\t#print self.word2pos_list_title\n\t\t#print self.word2pos_list_first\n\t\t#print self.word2pos_list_body", "def read_xml_files(files, label = \"male\"):\n tweets = []\n for file in tqdm(files):\n path = 'Data/pan17/en/'+file+'.xml'\n tree = ET.parse(path)\n root = tree.getroot()\n texts = []\n for child in root.iter('documents'):\n for child2 in child.iter('document'):\n texts.append(child2.text)\n tweets.extend(texts[40:50])\n content = {'tweets': tweets,\n 'labels': label}\n df = pd.DataFrame(content) \n \n return df", "def Extraction (self):\n with open(self.corpus, 'r') as f:\n line = True\n while line:\n line = f.readline()\n if TAG_START_PAGE in line:\n line = f.readline()\n if ':' not in line:\n #valid page\n word = line[line.index(TAG_START_TITLE) + len(TAG_START_TITLE):line.index(TAG_END_TITLE)] \n #loop until found start tag\n while TAG_START_TRAD not in line and TAG_END_PAGE not in line:\n line = f.readline ()\n# print (line)\n if TAG_END_PAGE in line:\n continue\n #Now start extracting traductions\n while line.strip() != '':\n if line.startswith(TAG_START_LANG) and TAG_END_LANG in line:\n lang = line[len(TAG_START_LANG):line.index(TAG_END_LANG)]\n if '|' in lang:\n lang = lang[:lang.index('|')]\n #first hyper filter\n line = re.sub(HYPER_FILTER,']]',line)\n #traductions extraction\n trad = [t[2:-2] for l in line.split(',') for t in re.findall(PATTERN_TRAD, l) if len(t.split()) > 0]\n #fine filter\n traductions = []\n for t in trad: \n if t.startswith('[['):\n t = t[2:]\n if ']]' in t:\n while ']]' in t and '[[' in t:\n traductions.append(t[:t.index(']]')])\n t = t[t.index('[[')+2:]\n if ']]' in t:\n traductions.append(t[:t.index(']]')])\n elif '[[' in t:\n traductions.append(t[t.index('[[')+2:])\n else:\n traductions.append(t)\n else:\n traductions.append(t) \n #clear non-traductions\n for t in traductions:\n for exclude in self.exclude_Tags :\n if exclude in t:\n traductions.remove(t)\n break\n print (word, self.lang, lang, traductions)\n with open(self.csv, 'a') as csv:\n for t in traductions:\n if len(t.strip()) > 0:\n line = ''.join([self.lang, SEP_CSV, word, SEP_CSV, lang, SEP_CSV, t]) + '\\n'\n csv.write (line)\n line = f.readline ()\n continue", "def _parseXML(self):\n\n results = {}\n\n # If all augments files are missing, return a default record\n for xmlFile in self.xmlFiles:\n if os.path.exists(xmlFile):\n break\n else:\n results[\"Default\"] = AugmentRecord(\n \"Default\",\n enabled=True,\n enabledForCalendaring=True,\n enabledForAddressBooks=True,\n enabledForLogin=True,\n )\n\n # Compare previously seen modification time and size of each\n # xml file. If all are unchanged, skip.\n if self._shouldReparse(self.xmlFiles):\n for xmlFile in self.xmlFiles:\n if os.path.exists(xmlFile):\n # Creating a parser does the parse\n XMLAugmentsParser(xmlFile, results)\n newModTime = os.path.getmtime(xmlFile)\n newSize = os.path.getsize(xmlFile)\n self.xmlFileStats[xmlFile] = (newModTime, newSize)\n\n return results", "def parse_article_xml(self, article_xml_filenames):\n \n articles = []\n \n for article_xml_filename in article_xml_filenames:\n \n article = self.create_article()\n article.parse_article_file(article_xml_filename)\n if(self.logger):\n log_info = \"Parsed \" + article.doi_url\n self.admin_email_content += \"\\n\" + log_info\n self.logger.info(log_info)\n # Add article object to the object list\n articles.append(article)\n \n # Add article to the DOI to file name map\n self.xml_file_to_doi_map[article.doi] = article_xml_filename\n \n return articles", "def parsed_words(self):\n try:\n lemmatized_words = self.get_lemmatized_words()\n important_words = self.important_words\n \n final_parsed_words =[]\n for i in range(len(important_words)):\n if(important_words[i][1][0]==\"R\"):\n final_parsed_words.append(self.adv_to_adj(important_words[i][0]))\n else:\n final_parsed_words.append(lemmatized_words[i])\n return final_parsed_words\n except Exception as e:\n print(e)", "def _parse(self, filenames):\n\n doxygen_config = \"\"\"\n INPUT = %(filenames)s\n INPUT_ENCODING = UTF-8\n RECURSIVE = NO\n QUIET = YES\n WARNINGS = NO\n WARN_IF_UNDOCUMENTED = NO\n\n GENERATE_HTML = NO\n GENERATE_LATEX = NO\n GENERATE_XML = YES\n XML_OUTPUT = xml\n XML_PROGRAMLISTING = NO\n\n DOXYFILE_ENCODING = UTF-8\n PROJECT_NAME = \"My Project\"\n # relative or absolute\n OUTPUT_DIRECTORY = %(output_dir)s\n CREATE_SUBDIRS = NO\n OUTPUT_LANGUAGE = English\n BRIEF_MEMBER_DESC = NO\n REPEAT_BRIEF = YES\n EXTRACT_PRIVATE = YES\n EXTRACT_LOCAL_METHODS = YES\n EXTRACT_ANON_NSPACES = YES\n EXTRACT_STATIC = YES\n EXTRACT_PACKAGE = YES\n HIDE_UNDOC_MEMBERS = YES\n HIDE_UNDOC_CLASSES = YES\n HIDE_IN_BODY_DOCS = NO\n CASE_SENSE_NAMES = NO\n HIDE_SCOPE_NAMES = NO\n SHOW_INCLUDE_FILES = YES\n SORT_MEMBER_DOCS = NO\n SORT_BRIEF_DOCS = NO\n SORT_MEMBERS_CTORS_1ST = NO\n SORT_GROUP_NAMES = NO\n SORT_BY_SCOPE_NAME = NO\n STRICT_PROTO_MATCHING = NO\n GENERATE_TODOLIST = NO\n GENERATE_TESTLIST = NO\n GENERATE_BUGLIST = NO\n GENERATE_DEPRECATEDLIST= NO\n\n ENABLE_PREPROCESSING = YES\n MACRO_EXPANSION = YES\n # If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will\n # remove all refrences to function-like macros that are alone on a line, have an\n # all uppercase name, and do not end with a semicolon. Such function macros are\n # typically used for boiler-plate code, and will confuse the parser if not\n # removed.\n # The default value is: YES.\n # This tag requires that the tag ENABLE_PREPROCESSING is set to YES.\n SKIP_FUNCTION_MACROS = YES\n \"\"\"\n\n with tempfile.TemporaryDirectory() as xml_dir:\n self.xml_dir = os.path.join(xml_dir, \"xml\")\n # create config file\n doxygen_config %= {\n \"filenames\": \" \".join(filenames),\n \"output_dir\": xml_dir\n }\n conf_filename = os.path.join(xml_dir, \"config_file.doxygen\")\n self.push_stack(\"creating doxygen config\")\n try:\n with open(conf_filename, \"w\", encoding=\"utf-8\") as f:\n f.write(doxygen_config)\n except IOError as e:\n self.error(str(e))\n self.pop_stack()\n # create doxygen output\n self.push_stack(\"doxygen call\")\n try:\n subprocess.call(args=[\"doxygen\", conf_filename])\n except subprocess.CalledProcessError as e:\n self.error(str(e))\n self.pop_stack()\n\n # parse doxygen output\n self.push_stack(\"reading doxygen output\")\n try:\n for root, dirs, files in os.walk(xml_dir):\n for f in files:\n #print(f)\n if f.startswith(\"group__\") and f.endswith(\".xml\"):\n group_name = f[7:f.index(\".xml\")]\n if group_name in self.group_names:\n self._parse_doxy_xml(os.path.join(root, f))\n except IOError as e:\n self.error(str(e))\n #except BaseException as e:\n # self.error(e.__class__.__name__ + \":\" + str(e))\n\n self.pop_stack()\n\n self.filenames = filenames" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each outgoing, we add a feature that indicates the total number of packets before it in a sequence. Also, we show the number of incoming packets between outgoing packets This is supposed to indicate burst patterns. We only go up to 300 and pad after that.
def get_packet_ordering(trace, features): # Number of packets before it in the sequence count = 0 for i, val in enumerate(trace): if val[1] > 0: count += 1 features.append(i) if count == 300: break # Pad for i in range(count, 300): features.append(-1) # Number of incoming packets between outgoing packets count = 0 prevloc = 0 for i, val in enumerate(trace): if val[1] > 0: count += 1 features.append(i - prevloc) prevloc = i if count == 300: break # Pad for i in range(count, 300): features.append(-1)
[ "def concentraction_packets(trace, features):\n features_added = 0\n for i in range(0, len(trace), 30):\n if i == 3000: # span_length * max_spans (30 * 100)\n break\n\n count = 0\n try:\n for j in range(30):\n if trace[i + j][1] > 0:\n count += 1\n except IndexError:\n pass\n\n features.append(count)\n features_added += 1\n\n # Pad\n for i in range(0, 100 - features_added):\n features.append(0)", "def _print_packet_num_being_sent(packet_count, n_packets):\n log.debug(\"send SDP packet with missing sequence numbers: {} of {}\",\n packet_count + 1, n_packets)", "def acked_packets (self):\n if not self.tcp.ACK: return None\n return self.buddy_flow.next_seqs.get(self.tcp.ack)", "def n_dropped_fixed_route_packets(self):\n return self._register_values[\n constants.ROUTER_REGISTER_REGISTERS.DUMP_FR.value]", "def counts_udp_icmpv6(self):\n self.icmpv6_number = 0\n self.udp_number = 0\n for packet in self.filtered_list:\n if (58,'ICMPv6') in packet[4]:\n self.icmpv6_number += 1\n if (17,'UDP') in packet[4]:\n self.udp_number += 1", "def addGapsToHMMSeqs(self):\n for seq in self.records:\n seq.seq.insertAllGaps(self.total_gaps)", "def n_dropped_multicast_packets(self):\n return self._register_values[\n constants.ROUTER_REGISTER_REGISTERS.DUMP_MC.value]", "def n_dropped_peer_to_peer_packets(self):\n return self._register_values[\n constants.ROUTER_REGISTER_REGISTERS.DUMP_PP.value]", "def _generateDelayStats(self):\n\n def computeDelay(messages):\n data = messages[['datetime', 'sender']]\n delay = np.insert(data['datetime'].iloc[1:].values -data['datetime'].iloc[:-1].values, 0, 0)\n data['delay'] = delay\n return data\n\n #sender of current message must be different from sender previous message\n def removeConsecutiveMessages(messages):\n return messages[messages['sender'] != messages['sender'].shift()]\n\n res = removeConsecutiveMessages(computeDelay(self.df))\n res = res.groupby('sender').sum()\n return res", "def test_flow_control_decreases_with_padded_data(self, frame_factory):\n c = h2.connection.H2Connection(client_side=False)\n c.receive_data(frame_factory.preamble())\n f1 = frame_factory.build_headers_frame(self.example_request_headers)\n f2 = frame_factory.build_data_frame(b'some data', padding_len=10)\n\n c.receive_data(f1.serialize() + f2.serialize())\n\n remaining_length = (\n self.DEFAULT_FLOW_WINDOW - len(b'some data') - 10 - 1\n )\n assert (c.remote_flow_control_window(1) == remaining_length)", "def padded_count(self):\n c = 0\n for pkt in self.packets:\n if pkt.type[\"padded\"]:\n c += 1\n return c", "def make_packet(packet_num):", "def updatePacketsCount(self, packet_id):\n if self.last_id == -1:\n self.last_id = packet_id\n self.packets_dropped = 0\n return\n # ID loops every 101 packets\n if packet_id > self.last_id:\n self.packets_dropped = packet_id - self.last_id - 1\n else:\n self.packets_dropped = packet_id + 101 - self.last_id - 1\n self.last_id = packet_id\n if self.packets_dropped > 0:\n print(\"Warning: dropped \" + str(self.packets_dropped) + \" packets.\")", "def get_captured_packet_count(self, tool=None, **kwargs):\n pass", "def _pad(self, messages, seq_lengths):\n _, max_len = messages.shape[0], messages.shape[1]\n\n mask = torch.arange(max_len, device=self.device).expand(len(seq_lengths), max_len) < seq_lengths.unsqueeze(1)\n\n if self.training:\n mask = mask.type(dtype=messages.dtype)\n messages = messages * mask.unsqueeze(2)\n\n # give full probability (1) to eos tag (used as padding in this case)\n messages[:, :, self.sender.eos_id] += (mask == 0).type(dtype=messages.dtype)\n else:\n # fill in the rest of message with eos\n messages = messages.masked_fill_(mask == 0, self.sender.eos_id)\n\n return messages", "def pad_framewise_output(framewise_output: torch.Tensor, frames_num: int):\n # print(framewise_output.size(), frames_num)\n pad = framewise_output[:, -1:, :].repeat(\n 1, frames_num - framewise_output.shape[1], 1)\n \"\"\"tensor for padding\"\"\"\n\n output = torch.cat((framewise_output, pad), dim=1)\n \"\"\"(batch_size, frames_num, classes_num)\"\"\"\n\n return output", "def add(self, sequence):\n for d, padded_sequence in enumerate(self.__padded_sequences(sequence)):\n for e, element in enumerate(padded_sequence):\n if e == 0 or not isinstance(element, Markov.Endpoint):\n subseq = tuple(padded_sequence[e+1 : e+1+self.order])\n self.chains[d][element][subseq] += 1", "def recruit_bps_macrophage(self):\n # TODO - is this the best way to use perfusion? Is also included in the events function.\n\n r = np.random.random() * self.totals[TOTAL_PERFUSION]\n\n running_total = 0\n for node in self.node_list_bps:\n running_total += node.perfusion\n if running_total > r:\n node.update(MACROPHAGE_REGULAR, 1)\n return", "def _print_packets(self):\n controller = self.get_controller()\n print \"PENDING PACKETS\"\n for p in controller.get_pending_packets():\n print \" - \" + str(p.get_pos()) + \" \" + str(p)\n print \"BUFFER PACKETS\"\n buf = controller.get_buffer()\n for p in buf:\n print \" [\" + str(buf.index(p)) + \"] \" + str(p.get_pos()) + \" \" + str(p)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This measure is supposed to indicate where the outgoing packets are indicated. We divide the trace up into nonoverlapping spans of 30 packets and add the number of outgoing packets in those spans as a feature We only have a maximum of a 100 spans
def concentraction_packets(trace, features): features_added = 0 for i in range(0, len(trace), 30): if i == 3000: # span_length * max_spans (30 * 100) break count = 0 try: for j in range(30): if trace[i + j][1] > 0: count += 1 except IndexError: pass features.append(count) features_added += 1 # Pad for i in range(0, 100 - features_added): features.append(0)
[ "def analyze_trace(trace, target_number_of_friends, target=0):\n\n ## ADD CODE HERE\n \"\"\"\n print(\"no.rounds:\", len(trace))\n print(\"no:\", target_number_of_friends)\n print(\"target:\", target)\n print(\"trace1\",trace[1])\n print(\"trace10\",trace[1][0])\n print(\"trace101\",trace[1][0][1])\n print(\"no.senders\",len(trace[1][0][:]))\n \"\"\"\n #create a counter object using Python's built-in functions\n countlist = Counter()\n #for each round of communication:\n for i in range(0,len(trace)):\n #if the target sender id is present in the senders list then:\n if (target in trace[i][0][:]):\n #count the occurance of each unique receiver id and concatenate into an overall id-frequency object list\n countlist += Counter(trace[i][1][:])\n\n #print(\"clist:\", countlist)\n\n #print(\"mostcommon:\", countlist.most_common(target_number_of_friends))\n #print(\"mostcommon:\", (countlist.most_common(target_number_of_friends)[0][0]))\n\n #initialise empty list\n friends = []\n #until you obtain the same number of receivers as the number of friends the target has:\n for i in range(0, target_number_of_friends):\n #keep appending the most common (highest frequency) receiver id in a descending order from most to least likely 'friend'\n friends.append((countlist.most_common(target_number_of_friends)[i][0]))\n #print(\"friends list:\", friends)\n #return list of most likely friends the target has been sending messages to\n return friends", "def ntraces(self):\n meas_list = self.scpi.query_meas_number_list(self.active_channel)\n return 0 if meas_list is None else len(meas_list)", "def test_span_processor_dropped_spans(self):\n span_processor = datadog.DatadogExportSpanProcessor(\n self.exporter, max_trace_size=128\n )\n tracer_provider = trace.TracerProvider()\n tracer_provider.add_span_processor(span_processor)\n tracer = tracer_provider.get_tracer(__name__)\n\n with tracer.start_as_current_span(\"root\"):\n for _ in range(127):\n with tracer.start_span(\"foo\"):\n pass\n with self.assertLogs(level=logging.WARNING):\n with tracer.start_span(\"one-too-many\"):\n pass\n\n self.assertTrue(span_processor.force_flush())\n datadog_spans = get_spans(tracer, self.exporter)\n self.assertEqual(len(datadog_spans), 128)\n tracer_provider.shutdown()", "def eastwest_traffic_probe(label, average_period, history_sample_count):\n\n server_facing_interface_query = \\\n ('node(\"system\", name=\"system\", system_id=not_none(), role=\"leaf\").'\n 'out(\"hosted_interfaces\").'\n 'node(\"interface\", name=\"iface\", if_name=not_none()).'\n 'out(\"link\").'\n 'node(\"link\", link_type=\"ethernet\").'\n 'in_(\"link\").'\n 'node(\"interface\").'\n 'in_(\"hosted_interfaces\").'\n 'node(\"system\", system_type=\"server\")')\n\n external_router_facing_interface_query = \\\n ('node(\"system\", name=\"system\", system_id=not_none()).'\n 'out(\"hosted_interfaces\").'\n 'node(\"interface\", name=\"iface\", if_name=not_none()).'\n 'out(\"link\").'\n 'node(\"link\", link_type=\"ethernet\", role=\"to_external_router\")')\n\n payload = {\n 'label': label,\n 'processors': [\n {'name': 'leaf server traffic counters',\n 'type': 'if_counter',\n 'inputs': {},\n 'outputs': {'out': 'server_traffic_counters'},\n 'properties': {\n 'system_id': 'system.system_id',\n 'interface': 'iface.if_name',\n 'counter_type': 'rx_bytes',\n 'graph_query': server_facing_interface_query,\n },\n 'stages': [{'name': 'out',\n 'units': 'Bps'}],\n },\n\n {'name': 'server traffic average',\n 'type': 'periodic_average',\n 'inputs': {'in': 'server_traffic_counters'},\n 'outputs': {'out': 'server_traffic_avg'},\n 'properties': {\n 'period': average_period,\n },\n 'stages': [{'name': 'out',\n 'units': 'Bps'}],\n },\n\n {'name': 'external router south-north link traffic',\n 'type': 'if_counter',\n 'inputs': {},\n 'outputs': {'out': 'ext_router_interface_traffic'},\n 'properties': {\n 'system_id': 'system.system_id',\n 'interface': 'iface.if_name',\n 'counter_type': 'tx_bytes',\n 'graph_query': external_router_facing_interface_query,\n },\n 'stages': [{'name': 'out',\n 'units': 'Bps'}],\n },\n {'name': 'external router south-north links traffic average',\n 'type': 'periodic_average',\n 'inputs': {'in': 'ext_router_interface_traffic'},\n 'outputs': {'out': 'ext_router_interface_traffic_avg'},\n 'properties': {\n 'period': average_period,\n },\n 'stages': [{'name': 'out',\n 'units': 'Bps'}],\n },\n\n {'name': 'total server traffic',\n 'type': 'sum',\n 'inputs': {'in': 'server_traffic_avg'},\n 'outputs': {'out': 'total_server_traffic'},\n 'properties': {\n 'group_by': []\n },\n 'stages': [{'name': 'out',\n 'units': 'Bps'}],\n },\n {'name': 'server generated traffic average',\n 'type': 'periodic_average',\n 'inputs': {'in': 'total_server_traffic'},\n 'outputs': {'out': 'total_server_generated_traffic_average'},\n 'properties': {\n 'period': average_period,\n },\n 'stages': [{'name': 'out',\n 'units': 'Bps'}],\n },\n {'name': 'total server traffic history',\n 'type': 'accumulate',\n 'inputs': {'in': 'total_server_generated_traffic_average'},\n 'outputs': {'out': 'total_server_traffic_history'},\n 'properties': {\n 'total_duration': 0,\n 'max_samples': history_sample_count,\n },\n 'stages': [{'name': 'out',\n 'units': 'Bps'}],\n },\n\n {'name': 'south-north traffic',\n 'type': 'sum',\n 'inputs': {'in': 'ext_router_interface_traffic_avg'},\n 'outputs': {'out': 'total_outgoing_traffic'},\n 'properties': {\n 'group_by': []\n },\n 'stages': [{'name': 'out',\n 'units': 'Bps'}],\n },\n {'name': 'outgoing_traffic_average',\n 'type': 'periodic_average',\n 'inputs': {'in': 'total_outgoing_traffic'},\n 'outputs': {'out': 'total_outgoing_traffic_average'},\n 'properties': {\n 'period': average_period,\n },\n 'stages': [{'name': 'out',\n 'units': 'Bps'}],\n },\n {'name': 'south-north traffic history',\n 'type': 'accumulate',\n 'inputs': {'in': 'total_outgoing_traffic_average'},\n 'outputs': {'out': 'total_outgoing_traffic_timeseries'},\n 'properties': {\n 'total_duration': 0,\n 'max_samples': history_sample_count,\n },\n 'stages': [{'name': 'out',\n 'units': 'Bps'}],\n\n },\n\n {'name': 'east-west traffic',\n 'type': 'subtract',\n 'inputs': {'minuend': 'total_server_generated_traffic_average',\n 'subtrahend': 'total_outgoing_traffic_average'},\n 'outputs': {'out': 'eastwest_traffic'},\n 'properties': {},\n 'stages': [{'name': 'out',\n 'units': 'Bps'}],\n },\n {'name': 'east-west traffic history',\n 'type': 'accumulate',\n 'inputs': {'in': 'eastwest_traffic'},\n 'outputs': {'out': 'eastwest_traffic_history'},\n 'properties': {\n 'total_duration': 0,\n 'max_samples': history_sample_count,\n },\n 'stages': [{'name': 'out',\n 'units': 'Bps'}],\n\n },\n ],\n }\n\n return payload", "def calculate_segment_nframes(path, segment_len):\n\n wave_reader, wave_params = read_audio(path)\n window_nframes = int(wave_params.framerate * 0.01) # every window last 0.01 second\n segment_nframes = int(wave_params.framerate * segment_len)\n\n # switch every window by 0.01 second\n # save the frame index of middle of the window to frame_list\n # save maximum value of the window to max_list\n frame = 0\n frame_list, max_list = [], []\n while True:\n if frame >= wave_params.nframes:\n break\n fragment = wave_reader.readframes(window_nframes)\n frame_list.append(min(int(frame + window_nframes / 2),\n wave_params.nframes))\n max_list.append(audioop.max(fragment, wave_params.sampwidth))\n frame += window_nframes\n wave_reader.close()\n\n # calculate the threshold by 30 percentile\n max_list_sorted = sorted(max_list)\n threshold = max_list_sorted[int(len(max_list_sorted) * 30. / 100)]\n\n # calculate how many previous windows have maximum values smaller than threshold\n continuous = 0\n continuous_list = []\n for max_val in max_list:\n if max_val < threshold:\n continuous += 1\n else:\n continuous = 0\n continuous_list.append(continuous)\n\n # find frame numbers of breakpoints\n breakpoint_frame_list = []\n while True:\n frame_min = frame_list[0]\n frame_max = frame_min + segment_nframes - window_nframes\n if frame_list[-1] <= frame_max:\n break\n\n for index, frame in enumerate(frame_list):\n if frame > frame_max:\n continuous_max_value = max(continuous_list[:index])\n continuous_max_index = continuous_list.index(continuous_max_value)\n for i in range(continuous_max_index + 1):\n continuous_list[i] = 0\n\n continuous_max_index = int(continuous_max_index - (continuous_max_value - 1) / 2)\n breakpoint_frame_list.append(frame_list[continuous_max_index])\n frame_list = frame_list[continuous_max_index + 1:]\n continuous_list = continuous_list[continuous_max_index + 1:]\n break\n\n # remove too close breakpoints\n i = 1\n while True:\n if len(breakpoint_frame_list) < 2 or i >= len(breakpoint_frame_list):\n break\n if i == 1:\n if breakpoint_frame_list[i] < segment_nframes:\n del breakpoint_frame_list[0]\n else:\n i += 1\n else:\n if breakpoint_frame_list[i] - breakpoint_frame_list[i - 2] < segment_nframes:\n del breakpoint_frame_list[i - 1]\n else:\n i += 1\n\n # calculate nframes_list\n segment_nframes_list = []\n if len(breakpoint_frame_list) > 0:\n segment_nframes_list.append(breakpoint_frame_list[0])\n for i in range(1, len(breakpoint_frame_list)):\n segment_nframes_list.append(breakpoint_frame_list[i] - breakpoint_frame_list[i - 1])\n if len(breakpoint_frame_list) == 0 or breakpoint_frame_list[-1] < wave_params.nframes:\n segment_nframes_list.append(segment_nframes)\n return segment_nframes_list", "def test_total_neighbors(st: SpaceTime):\n # This is actually only true if the space_time is large enough. WHen it is small enough one node may be two different neighors reducing the total number of neighbors.\n for n in events(st):\n assert len(n.neighbors) >= 4", "def test_dropout_pos_20_8(self):\n print('TEST: dropout pos 20 window=8')\n samplefreq, stereo_array, mon_array, filename = audio_dropout_detector.read_wav(os.path.join(AUDIO_FILES,\n POS_INFILES[1]))\n print('{0} sampling frequency of {1}/sec\\n'.format(filename, samplefreq))\n\n window, threshold = 8, 10\n contains_dropouts, plot_start, plot_end = audio_dropout_detector.analyzer(mon_array, window=window,\n threshold=threshold)\n # If it did contain dropouts, let's plot the problem area for subjective analysis\n assert contains_dropouts, 'Error: {0} was reported NOT to have dropouts with a window of: {1} and threshold of {2}'.format(\n filename, window, threshold)\n if DEBUG: # The plotting is slow so let's only plot in unittest when debugging\n if contains_dropouts:\n print('The file contains dropouts: {0}\\nplot_area: {1}-{2}'.format(contains_dropouts, plot_start, plot_end))\n audio_dropout_detector.plot_problem_area(stereo_array, filename, plot_start, plot_end)", "def get_network_usage(self):\n f = open(self.paths['TEMP_FOLDER_PATH'] + 'bwm.log').readlines()\n network_samples = []\n for line in f:\n if line.split(';')[1] == 'eth0': #Hard code eth0.\n network_samples.append(8 * float(line.split(';')[4]))\n if not network_samples:\n self.write_to_log('>> ERROR!! Network file is empty.\\n')\n self.s.sendto('>> ERROR!! Network file is empty.', self.addr)\n return False \n self.network_repetitions.append(numpy.mean(network_samples))\n self.network_max.append(max(network_samples))\n \n return True", "def get_captured_packet_count(self, tool=None, **kwargs):\n pass", "def nudge_traces(self, trace_cen):\n # Check input\n if self.par['max_nudge'] is not None and self.par['max_nudge'] <= 0:\n # Nothing to do\n return trace_cen\n # Check vector size\n if trace_cen.shape[0] != self.nspec:\n msgs.error('Traces have incorrect length.')\n _buffer = self.par['det_buffer']\n if _buffer < 0:\n msgs.warn('Buffer must be greater than 0; ignoring.')\n _buffer = 0\n\n if self.par['max_nudge'] is not None:\n msgs.info('Nudging traces, by at most {0} pixel(s)'.format(self.par['max_nudge'])\n + ', to be no closer than {0} pixel(s) from the detector edge.'.format(_buffer))\n\n # NOTE: Should never happen, but this makes a compromise if a\n # trace crosses both the left and right spatial edge of the\n # detector.\n offset = np.clip(_buffer - np.amin(trace_cen, axis=0), 0, self.par['max_nudge']) \\\n + np.clip(self.nspat - 1 - _buffer - np.amax(trace_cen, axis=0),\n None if self.par['max_nudge'] is None else -self.par['max_nudge'], 0)\n\n if np.all(np.logical_not(np.absolute(offset) > 0)):\n # No offsets necessary\n return trace_cen\n\n # TODO: This nudging can actually make the slit length\n # negative. This isn't handled in this method because it's only\n # meant to nudge the traces away from the detector edge,\n # independent of what edge it is or its counterpart. For now,\n # check_synced handles what to do if the slit length is\n # negative.\n\n # Offset and return the traces\n return trace_cen + offset[None,:]", "def print_missing_packets(source_file_name, report_file_name, max, start_at_line): \n # Field indexes metadata\n global idx_packet_no\n global idx_hash\n global idx_src_ip\n global idx_dst_ip\n global idx_ip_ident\n global idx_ip_len\n global idx_l4_proto\n global idx_tcp_src_prt\n global idx_tcp_dst_prt\n global idx_tcp_seq\n global idx_tcp_ack\n global idx_tcp_flgs\n global idx_udp_src_prt\n global idx_udp_dst_prt\n\n global arg_show_packets\n global arg_limit_list\n # TODO:\n # - Seq=... , Ack=..., Win=..., Len=...\n print(\"Finding missing packets...\")\n # Create report file\n if os.path.exists(report_file_name):\n os.remove(report_file_name)\n L3TrafficFile = open(report_file_name, 'w')\n # Write Header in report file\n L3TrafficFile.write(\"# Created with Axle Counter , Build \" + buildno+\"\\n\")\n L3TrafficFile.write(\"# Source file: \" +source_file_name+\"\\n\")\n # \n if (start_at_line>0):\n L3TrafficFile.write(\"Starting at Packet \" + str(start_at_line)+ \" (first common packet)\\n\")\n print(\"Starting at Packet \" + str(start_at_line)+ \" (first common packet)\\n\")\n L3TrafficFile.write(\"k\\n\")\n # Counter for reported lines\n i=0\n listsize=0\n # Iterate through all packets in input file 1\n for record in capture_metadata1:\n # Check, if already max lines are reported -> exit\n if listsize>=arg_limit_list:\n break\n # Check, if packet from input file 1 is also in inout file 2 (hash value)\n if find_checksum(record[idx_hash])==0:\n # If so, write report line\n reportline='{:6s}'.format(str(record[idx_packet_no]).rjust(5))\n reportline=reportline+'{:15s}'.format(str(record[idx_src_ip]))+\" -> \"+'{:15s}'.format(str(record[idx_dst_ip]))\n reportline=reportline+' '+'{:5s}'.format(str(record[idx_l4_proto]))\n # Write UDP information\n if record[idx_l4_proto]==\"UDP\":\n reportline=reportline+' '+'{:5s}'.format(str(record[idx_udp_src_prt]))+\" -> \"+'{:5s}'.format(str(record[idx_udp_dst_prt]))\n # Write TCP information\n if record[6]==\"TCP\":\n reportline=reportline+' '+'{:5s}'.format(str(record[idx_tcp_src_prt]))+\" -> \"+'{:5s}'.format(str(record[idx_tcp_src_prt]))\n # TCP flag more verbose (one character to three characters)\n reportline=reportline+' ['+expand_tcp_flags(str(record[idx_tcp_flgs]))+'] ' \n reportline=reportline+\" (IP Len=\"+(str(record[idx_ip_len]))+\" Byte)\"\n # Note the offset\n if i>=start_at_line:\n L3TrafficFile.write(str(reportline)+'\\n')\n listsize=listsize+1\n if arg_show_packets==True:\n print(reportline)\n i=i+1\n L3TrafficFile.close()\n return 0", "def flow_stats(self, device):\r\n for flow_direction in self.device_flows[device]: # flow = flow_tuple i.e. (ip_src, ip_dst, )\r\n for flow in self.device_flows[device][flow_direction]:\r\n flow_size = 0\r\n self.device_flow_stats[device][flow_direction][flow] = []\r\n self.device_flow_stats[device][flow_direction][flow].append(self.device_flows[device][flow_direction][flow][0][\"direction\"])\r\n flow_duration = None\r\n if len(self.device_flows[device][flow_direction][flow]) > 1:\r\n first_pkt_ts = self.device_flows[device][flow_direction][flow][0]['relative_timestamp']\r\n last_pkt_ts = self.device_flows[device][flow_direction][flow][-1]['relative_timestamp']\r\n flow_duration = abs(last_pkt_ts.total_seconds() - first_pkt_ts.total_seconds())\r\n else:\r\n flow_duration = abs(self.device_flows[device][flow_direction][flow][0]['relative_timestamp'].total_seconds())\r\n self.device_flow_stats[device][flow_direction][flow].append(flow_duration)\r\n # pkt_count = 0\r\n # print(flow, \"duration:\", flow_duration)\r\n for packet in self.device_flows[device][flow_direction][flow]:\r\n if packet[\"protocol\"] == \"TCP\":\r\n flow_size += packet[\"tcp_data\"][\"payload_len\"]\r\n elif packet[\"protocol\"] == \"UDP\":\r\n flow_size += packet[\"udp_data\"][\"payload_len\"]\r\n elif packet[\"protocol\"] == \"ICMP\":\r\n flow_size += packet[\"icmp_data\"][\"payload_len\"]\r\n else:\r\n flow_size += packet['payload_len']\r\n # pkt_count += 1\r\n self.device_flow_stats[device][flow_direction][flow].append(flow_size)\r\n # self.device_flow_stats[device][flow_direction][flow].append(pkt_count)\r", "def _detect_bout(source, window=10, threshold=0.08, bout_dis=80,\n bout_dur=300, show_flag=False, debug_flag=False) -> list:\n\n # calculate sd for window\n n = len(source)\n n_source = np.reshape(source[:n//window*window], (n//window, window))\n sd_source = np.std(n_source, axis=1)\n windowid = np.arange(len(sd_source))\n\n boutid = windowid[np.where(sd_source > threshold)]\n if (debug_flag): print(boutid)\n bout_list = []\n\n if (len(boutid) > 0):\n # detect continous bout (inter distance 100 windows)\n n_boutid = np.zeros(len(boutid)+2)\n n_boutid[0] = -1000\n n_boutid[-1] = boutid[-1] + 1000\n n_boutid[1:-1] = boutid\n ii = [i for i in range(len(n_boutid)-1) if (n_boutid[i+1] - n_boutid[i]) > bout_dis]\n last_window = n_boutid[ii]\n ii = [i for i in range(1, len(n_boutid)) if (n_boutid[i] - n_boutid[i-1]) > bout_dis]\n first_window = n_boutid[ii]\n\n for i in range(len(first_window)-1):\n if (last_window[i+1] - first_window[i] > bout_dur):\n bout_list.append((first_window[i], last_window[i+1]))\n if (debug_flag): print(bout_list)\n\n # show in time series\n if show_flag and (n < 5000):\n f = figure(width=950, height=200, y_range=[min(sd_source), max(sd_source)],\n title='standard deviation in window size {}, interdistance {}'.format(window, window*bout_dis))\n f.line(windowid, sd_source, color='navy')\n f.circle(boutid, sd_source[boutid], size=7, color='red', alpha=0.5)\n for i in range(len(bout_list)):\n bouts_start = Span(location=bout_list[i][0], dimension='height', line_color='green',\n line_dash='dashed', line_width=1.5)\n f.add_layout(bouts_start)\n bouts_stop = Span(location=bout_list[i][1], dimension='height', line_color='blue',\n line_dash='dashed', line_width=1.5)\n f.add_layout(bouts_stop)\n\n show(f)\n\n for i in range(len(bout_list)):\n bout_list[i] = (bout_list[i][0]*window, bout_list[i][1]*window)\n\n return bout_list", "def compute_link_utilization_over_time(link_byte_counts):\n def find_matching_iface_stats(byte_count, source_id, destination_id):\n matching_stats = [d_i for d_i in byte_count\n if d_i[\"sourceSwitchId\"] == source_id and\n d_i[\"destinationSwitchId\"] == destination_id]\n if len(matching_stats) != 1:\n raise ValueError(\"Unexpected results in find_matching_iface_stats. \\\n Found %d matching iface_stats\" % len(matching_stats))\n return matching_stats[0]\n\n def compute_tx_rate(count_in_bytes):\n return (count_in_bytes * 8) / 10.0**7\n\n pp.pprint(len(link_byte_counts[0])) \n # First compute the delta between the iface_stats in time_period t_i and the iface_stats\n # in time period t_{i+1}.\n # tx_rate_t: (source_id x destination_id) -> link_utilization_in_time_period_t forall. t\n tx_rate_t = []\n for t_0, t_1 in zip(link_byte_counts, link_byte_counts[1:]):\n byte_count_delta_t = defaultdict(float)\n for iface_stats in t_0:\n source_id = iface_stats[\"sourceSwitchId\"]\n destination_id = iface_stats[\"destinationSwitchId\"]\n t_0_count = iface_stats[\"bytesSent\"] + iface_stats[\"bytesReceived\"]\n try:\n t_1_stats = find_matching_iface_stats(t_1, source_id, destination_id)\n t_1_count = t_1_stats[\"bytesSent\"] + t_1_stats[\"bytesReceived\"]\n except ValueError:\n t_1_count = t_0_count\n\n count_delta = t_1_count - t_0_count\n link_key = compute_link_key(source_id, \n destination_id)\n byte_count_delta_t[link_key] += count_delta\n\n tx_rate_t.append({the_link_key: compute_tx_rate(byte_count_t) \n for the_link_key, byte_count_t in byte_count_delta_t.items()})\n return tx_rate_t", "def test_packet_in_rate(self):\n valve = self.valves_manager.valves[self.DP_ID]\n now = self.mock_time(10)\n for _ in range(valve.dp.ignore_learn_ins * 2 + 1):\n if valve.rate_limit_packet_ins(now):\n return\n self.fail(\"packet in rate limit not triggered\")", "def segment_threshold(eventfile,segment_length,demod,tbin_size,threshold,PI1,PI2,t1,t2):\n if demod != True and demod != False:\n raise ValueError(\"demod should either be True or False!\")\n\n dat_files = presto_dat(eventfile,segment_length,demod,PI1,PI2,t1,t2)\n rebin_t = np.arange(segment_length+1)*1 #1-second bins\n passed_threshold = []\n print('Now finding the number of segments that can be used...')\n for i in tqdm(range(len(dat_files))):\n dat_file_data = np.fromfile(dat_files[i],dtype='<f',count=-1)\n data_t = np.arange(len(dat_file_data))*tbin_size\n rebin_sum,rebin_edges,rebin_trunc = stats.binned_statistic(data_t,dat_file_data,statistic='sum',bins=rebin_t)\n #print(str(pathlib.Path(dat_files[i]).name),len(rebin_sum[rebin_sum>0])/len(rebin_sum)*100)\n #print(len(rebin_sum[rebin_sum>0]),len(rebin_sum))\n if len(rebin_sum[rebin_sum>0])/len(rebin_sum)*100 >= threshold:\n passed_threshold.append(i)\n\n print('Will use ' + str(len(passed_threshold)) + ' out of ' + str(len(dat_files)) + ' segments.')\n\n return np.array(passed_threshold), len(passed_threshold)", "def calc_ply_drops(self, inner_step):\r\n n_ply_drops = np.zeros((self.reduced.n_panels,), dtype='int16')\r\n for index, panel in enumerate(self.reduced.panels):\r\n n_ply_drops[index] = self.reduced.n_plies_per_group[inner_step] \\\r\n - panel.n_plies_per_group[inner_step]\r\n return n_ply_drops", "def test_num_unused_buffers(self):\n verifier = MetricVerifier(self.impalad_test_service)\n verifier.verify_num_unused_buffers()", "def get_tick_damages(report, version, start, end):\n # Set up initial options to count ticks\n options = {\n 'start': start,\n 'end': end + 60000, # 60s is the longest dot\n 'filter': \"\"\"\n source.type=\"player\" and\n ability.id not in (1000493, 1000819, 1000820, 1001203, 1000821, 1000140, 1001195, 1001291, 1001221)\n and (\n (\n type=\"applydebuff\" or type=\"refreshdebuff\" or type=\"removedebuff\"\n ) or (\n isTick=\"true\" and\n type=\"damage\" and\n target.disposition=\"enemy\" and\n ability.name!=\"Combined DoTs\"\n ) or (\n (\n type=\"applybuff\" or type=\"refreshbuff\" or type=\"removebuff\"\n ) and (\n ability.id=1000190 or ability.id=1000749 or ability.id=1000501 or\n ability.id=1001205 or ability.id=1002706\n )\n ) or (\n type=\"damage\" and ability.id=799\n )\n )\n \"\"\"\n # Filter explanation:\n # 1. source.type is player because tether doesn't affect pets or npcs\n # 2. exclude non-dot debuff events like foe req that spam event log to minimize requests\n # 3. include debuff events\n # 4. include individual dot ticks on enemy\n # 5. include only buffs corresponding to ground effect dots\n # (shadow flare, salted earth, doton, flamethrower, slipstream)\n # 6. include radiant shield damage\n }\n\n tick_data = fflogs_api('events/summary', report, options)\n\n # Active debuff window. These will be the debuffs whose damage will count, because they\n # were applied within the tether window. List of tuples (sourceID, abilityID)\n active_debuffs = []\n\n # These will be how much tick damage was applied by a source, only counting\n # debuffs applied during the window\n tick_damage = {}\n\n # Wildfire instances. These get special handling afterwards, for stormblood logs\n wildfires = {}\n\n for event in tick_data['events']:\n # Fix rare issue where full source is reported instead of just sourceID\n if 'sourceID' not in event and 'source' in event and 'id' in event['source']:\n event['sourceID'] = event['source']['id']\n\n action = (event['sourceID'], event['ability']['guid'])\n\n # Record wildfires but skip processing for now. Only for stormblood logs\n if event['ability']['guid'] == 1000861 and version < 20:\n if event['sourceID'] in wildfires:\n wildfire = wildfires[event['sourceID']]\n else:\n wildfire = {}\n\n if event['type'] == 'applydebuff':\n if 'start' not in wildfire:\n wildfire['start'] = event['timestamp']\n elif event['type'] == 'removedebuff':\n if 'end' not in wildfire:\n # Effective WF duration is 9.25\n wildfire['end'] = event['timestamp'] - 750\n elif event['type'] == 'damage':\n if 'damage' not in wildfire:\n wildfire['damage'] = event['amount']\n\n wildfire['target'] = event['targetID']\n\n wildfires[event['sourceID']] = wildfire\n continue\n\n # Debuff applications inside window\n if event['type'] in ['applydebuff', 'refreshdebuff', 'applybuff', 'refreshbuff'] and event['timestamp'] < end:\n # Add to active if not present\n if action not in active_debuffs:\n active_debuffs.append(action)\n\n # Debuff applications outside window\n elif event['type'] in ['applydebuff', 'refreshdebuff', 'applybuff', 'refreshbuff'] and event['timestamp'] > end:\n # Remove from active if present\n if action in active_debuffs:\n active_debuffs.remove(action)\n\n # Debuff fades don't have to be removed. Wildfire (ShB) will occasionally\n # log its tick damage after the fade event, so faded debuffs that deal\n # damage should still be included as implicitly belonging to the last application\n\n # Damage tick\n elif event['type'] == 'damage':\n # If this is radiant shield, add to the supportID\n if action[1] == 799 and event['timestamp'] < end:\n if event['supportID'] in tick_damage:\n tick_damage[event['supportID']] += event['amount']\n else:\n tick_damage[event['supportID']] = event['amount']\n\n # Add damage only if it's from a snapshotted debuff\n elif action in active_debuffs:\n if event['sourceID'] in tick_damage:\n tick_damage[event['sourceID']] += event['amount']\n else:\n tick_damage[event['sourceID']] = event['amount']\n\n # Wildfire handling. This part is hard\n # There will be no wildfires for shadowbringers logs, since they are handled\n # as a normal DoT tick.\n for source, wildfire in wildfires.items():\n # If wildfire never went off, set to 0 damage\n if 'damage' not in wildfire:\n wildfire['damage'] = 0\n\n # If entirely within the window, just add the real value\n if ('start' in wildfire and\n 'end' in wildfire and\n wildfire['start'] > start and\n wildfire['end'] < end):\n if source in tick_damage:\n tick_damage[source] += wildfire['damage']\n else:\n tick_damage[source] = wildfire['damage']\n\n # If it started after the window, ignore it\n elif 'start' in wildfire and wildfire['start'] > end:\n pass\n\n # If it's only partially in the window, calculate how much damage tether would've affected\n # Shoutout to [Odin] Lynn Nuvestrahl for explaining wildfire mechanics to me\n elif 'end' in wildfire:\n # If wildfire started before dragon sight, the start will be tether start\n if 'start' not in wildfire:\n wildfire['start'] = start\n # If wildfire ended after dragon sight, the end will be tether end\n if wildfire['end'] > end:\n wildfire['end'] = end\n\n # Set up query for applicable mch damage\n options['start'] = wildfire['start']\n options['end'] = wildfire['end']\n\n # Only damage on the WF target by the player, not the turret\n options['filter'] = 'source.type!=\"pet\"'\n options['filter'] += ' and source.id=' + str(source)\n options['filter'] += ' and target.id=' + str(wildfire['target'])\n\n wildfire_data = fflogs_api('tables/damage-done', report, options)\n\n # If there's 0 damage there won't be any entries\n if not len(wildfire_data['entries']):\n pass\n\n # Filter is strict enough that we can just use the number directly\n elif source in tick_damage:\n tick_damage[source] += int(0.25 * wildfire_data['entries'][0]['total'])\n else:\n tick_damage[source] = int(0.25 * wildfire_data['entries'][0]['total'])\n\n return tick_damage" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract all of the features for the kNN model in the [kNN.py](../attacks/kNN.py) file. trace is a trace of loading a web page in the following format `[(time, incoming)]` where outgoing is 1 is incoming and 1
def extract_kNN_features(trace): features = [] get_transmission_size_features(trace, features) get_packet_ordering(trace, features) concentraction_packets(trace, features) bursts(trace, features) first_20_packets(trace, features) return features
[ "def get_traces(sampler, nthin):\n # load every nthin'th sample from the walkers and reshape to\n # final dimensions\n traces = sampler.chain[:, ::nthin, :].reshape(-1, sampler.dim).copy()\n # convert from sample space to meaningfull space\n traces[:, [1, 4, 5]] = np.exp(traces[:, [1, 4, 5]])\n return traces", "def process_track(filename):\n track = Track.from_gpx(filename)[0]\n track.compute_metrics()\n\n for segment in track.segments:\n features = extract_features_2(segment.points)\n return features", "def extract_libffm_features(input_lines, has_label=True):\n labels = []\n features = []\n impression_ids = []\n\n start_index = 1 if has_label else 0\n\n for _ in input_lines:\n line = _.strip()\n if not line:\n continue\n tmp = line.strip().split('%')\n if len(tmp) == 2:\n impression_ids.append(tmp[1].strip())\n else:\n impression_ids.append('none')\n\n line = tmp[0]\n cols = line.strip().split(' ')\n label = float(cols[0].strip()) if has_label else 0\n #if label > 0:\n # label = 1\n #else:\n # label = 0\n cur_feature_list = []\n\n for word in cols[start_index:]:\n if not word.strip():\n continue\n tokens = word.strip().split(':')\n cur_feature_list.append( \\\n [int(tokens[0]) -1, \\\n int(tokens[1]) -1, \\\n float(tokens[2])])\n features.append(cur_feature_list)\n labels.append(label)\n\n result = {}\n result['labels'] = labels\n result['features'] = features\n result['impression_ids'] = impression_ids\n return result", "def get_features(self, parser_state):\n # STUDENT\n # hint: parser_state.stack_peek_n\n stack_top = parser_state.stack_peek_n(1)[0]\n _, _, embed_stack_top = stack_top\n #print(\"embed_top: \", embed_stack_top)\n _, _, embed_buffer_top = parser_state.input_buffer_peek_n(1)[0]\n # _, _, embed_buffer_top = stack_top\n _, _, embed_buffer_second = parser_state.input_buffer_peek_n(2)[1]\n #emded_model = neural_net.VanillaWordEmbedding(test_word_to_ix, TEST_EMBEDDING_DIM)\n #embed_top = \n return [embed_stack_top, embed_buffer_top, embed_buffer_second]\n\n # END STUDENT", "def extract_features(self, dataset, cut_layer, epsilon=None):\n self.model.set_train(False)\n\n tic = time.perf_counter()\n\n feature_dataset = []\n\n for inputs, targets in dataset:\n inputs = mindspore.Tensor(inputs)\n targets = mindspore.Tensor(targets)\n\n logits = self.model.forward_to(inputs, cut_layer)\n\n if epsilon is not None:\n logits = logits.asnumpy()\n logits = unary_encoding.encode(logits)\n logits = unary_encoding.randomize(logits, epsilon)\n logits = mindspore.Tensor(logits.astype('float32'))\n\n feature_dataset.append((logits, targets))\n\n toc = time.perf_counter()\n logging.info(\"[Client #%d] Features extracted from %s examples.\",\n self.client_id, len(feature_dataset))\n logging.info(\"[Client #{}] Time used: {:.2f} seconds.\".format(\n self.client_id, toc - tic))\n\n return feature_dataset", "def recover_parts(cf: CacheFile) -> Tuple[List[Annotations], List[List[TraceData]]]:\n with read_file(cf.fname) as f:\n events, traces = [], []\n for origin in f.keys():\n yml = dict()\n yml[\"origin\"] = origin\n yml[\"attrs\"] = parse_traceattrs(f[origin].attrs)\n\n trace_attrs = []\n trace_data = []\n for idx in f[origin][\"traces\"]:\n dset = f[origin][\"traces\"][idx]\n dset.id.refresh() # load fresh from file\n trace_attrs.append(parse_traceattrs(dset.attrs))\n trace_data.append(parse_tracedata(dset))\n yml[\"traces\"] = trace_attrs\n events.append(yml)\n traces.append(trace_data)\n return events, traces", "def read_trace_data(filename):\n\n global current_max_cpu\n global sample_num, last_sec_cpu, last_usec_cpu, start_time\n\n try:\n data = open(filename, 'r').read()\n except:\n print('Error opening ', filename)\n sys.exit(2)\n\n for line in data.splitlines():\n search_obj = \\\n re.search(r'(^(.*?)\\[)((\\d+)[^\\]])(.*?)(\\d+)([.])(\\d+)(.*?core_busy=)(\\d+)(.*?scaled=)(\\d+)(.*?from=)(\\d+)(.*?to=)(\\d+)(.*?mperf=)(\\d+)(.*?aperf=)(\\d+)(.*?tsc=)(\\d+)(.*?freq=)(\\d+)'\n , line)\n\n if search_obj:\n cpu = search_obj.group(3)\n cpu_int = int(cpu)\n cpu = str(cpu_int)\n\n time_pre_dec = search_obj.group(6)\n time_post_dec = search_obj.group(8)\n core_busy = search_obj.group(10)\n scaled = search_obj.group(12)\n _from = search_obj.group(14)\n _to = search_obj.group(16)\n mperf = search_obj.group(18)\n aperf = search_obj.group(20)\n tsc = search_obj.group(22)\n freq = search_obj.group(24)\n common_comm = search_obj.group(2).replace(' ', '')\n\n # Not all kernel versions have io_boost field\n io_boost = '0'\n search_obj = re.search(r'.*?io_boost=(\\d+)', line)\n if search_obj:\n io_boost = search_obj.group(1)\n\n if sample_num == 0 :\n start_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000)\n sample_num += 1\n\n if last_sec_cpu[cpu_int] == 0 :\n last_sec_cpu[cpu_int] = time_pre_dec\n last_usec_cpu[cpu_int] = time_post_dec\n else :\n duration_us = (int(time_pre_dec) - int(last_sec_cpu[cpu_int])) * 1000000 + (int(time_post_dec) - int(last_usec_cpu[cpu_int]))\n duration_ms = Decimal(duration_us) / Decimal(1000)\n last_sec_cpu[cpu_int] = time_pre_dec\n last_usec_cpu[cpu_int] = time_post_dec\n elapsed_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000) - start_time\n load = Decimal(int(mperf)*100)/ Decimal(tsc)\n freq_ghz = Decimal(freq)/Decimal(1000000)\n# Sanity check calculation, typically anomalies indicate missed samples\n# However, check for 0 (should never occur)\n tsc_ghz = Decimal(0)\n if duration_ms != Decimal(0) :\n tsc_ghz = Decimal(tsc)/duration_ms/Decimal(1000000)\n store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz)\n\n if cpu_int > current_max_cpu:\n current_max_cpu = cpu_int\n# End of for each trace line loop\n# Now seperate the main overall csv file into per CPU csv files.\n split_csv()", "def train(self, features):", "def extract_features_pos(passage,LM,w2v_model,d2v_model,google_model,label,nlp):\n # a passage is a consecutive set of lines without a blank line in between. we extract features with these pairs \n # of lines as prev and next lines. they're a more coherent unit. The passages is obtained by methods above, \n # namely, splitting the training file by '\\n\\n'\n line_list=passage.split('\\n')\n line_list=[i for i in line_list if i!='']\n if len(line_list)<=1:\n return []\n features=['loglik_norm','d2v_dist','w2v_dist','rhyme_prev','rhyme_current','len_prev','len_cur','label']\n pos_feature_vec=[]\n for i in range(1,len(line_list)):\n #extract features from the current and prev line\n prev=line_list[i-1]\n current=line_list[i]\n features=feature_extractor(current,prev,LM,w2v_model,d2v_model,google_model,label,nlp)\n pos_feature_vec.append(features)\n return np.array(pos_feature_vec)", "def traces(self):\r\n\t\treturn [b'TR'+i for i in b'ABCDEF' if self.query(b'DISP:TRAC:STAT? TR'+i)]", "def fetch_features():\n sp = get_client()\n\n raw_data = sys.stdin.read()\n tracks = jsonpickle.decode(raw_data)\n\n # get track features\n from span.tasks.features import get_audio_features\n\n features = get_audio_features(sp, tracks)\n\n # export data\n sys.stdout.write(jsonpickle.encode(features))", "def get_ts_features_to_preprocess(self):", "def get_traces(model):\n if isinstance(model, pm.MCMC):\n m = model\n else:\n m = model.mc\n\n nodes = list(m.stochastics)\n\n names = [node.__name__ for node in nodes]\n dtype = [(name, np.float) for name in names]\n traces = np.empty(nodes[0].trace().shape[0], dtype=dtype)\n\n # Store traces in one array\n for name, node in zip(names, nodes):\n traces[name] = node.trace()[:]\n\n return traces", "def test_train(self):\n trace.train(10)", "def _find_aten_nodes_in_forward_pass(trace: Union[torch.jit.TopLevelTracedModule, torch.jit.TracedModule]) \\\n -> List[torch._C.Node]:\n # pylint: disable=protected-access\n nodes = []\n try:\n nodes = [node for node in trace.graph.nodes() if \"aten::\" in node.kind() and\n ConnectedGraph._parse_op_type(node) not in ConnectedGraph.passthrough_graph_nodes]\n except RuntimeError:\n pass\n return nodes", "def get_features(self, detection):\n features = np.zeros((1, 0), dtype=np.float32)\n if 'bdif' in self.label_config[\"features\"]:\n features = np.append(features, detection.bbox)\n features = np.append(features, detection.bbox)\n if 'bbox' in self.label_config[\"features\"]:\n features = np.append(features, detection.bbox)\n if 'brel' in self.label_config[\"features\"]:\n cam_id, _ = self.data_provider.cam_and_time(detection)\n imsize = self.data_provider.cam_size[cam_id]\n tmp = detection.bbox\n tmp[0, 0] /= imsize[0]\n tmp[0, 2] /= imsize[0]\n tmp[0, 1] /= imsize[1]\n tmp[0, 3] /= imsize[1]\n features = np.append(features, tmp)\n if 'conf' in self.label_config[\"features\"]:\n features = np.append(features, detection.confidence)\n\n social_cnt = 0\n if 'soc1' in self.label_config[\"features\"]:\n social_cnt = 1\n if 'soc3' in self.label_config[\"features\"]:\n social_cnt = 3\n if 'soc5' in self.label_config[\"features\"]:\n social_cnt = 5\n\n dens = np.zeros((1, 3), dtype=np.flot32)\n\n if social_cnt > 0 or 'dens' in self.label_config[\"features\"]:\n if detection not in self.social.keys():\n self.social[detection] = np.zeros((1, 3 * social_cnt))\n if detection.time not in self.det_by_time.keys():\n pass\n else:\n neighbours = np.asarray(\n self.det_by_time[detection.time])\n if len(neighbours) == 0:\n pass\n else:\n dx = neighbours[:, 0] - \\\n detection.bbox[0, 0] - \\\n detection.bbox[0, 2] * 0.5\n dy = neighbours[:, 1] - \\\n detection.bbox[0, 1] - \\\n detection.bbox[0, 3] * 0.5\n dd = dx**2 + dy**2\n if 'dens' in self.label_config[\"features\"]:\n dds = sorted(list(dd.reshape((-1,))))\n if len(dds) < 20:\n dds += [0] * (20 - len(dds))\n dens[0, 0] = dds[0]\n dens[0, 1] = dds[4]\n dens[0, 2] = dds[19]\n\n for rep in range(min(len(neighbours), social_cnt)):\n who = np.argmin(dd)\n self.social[detection][0, 3*rep:3*rep+3] =\\\n np.asarray([dx[who],\n dy[who],\n neighbours[who, -1]])\n dd[who] = 1e10\n\n features = np.append(features, self.social[detection])\n\n if 'dens' in self.label_config[\"features\"]:\n features = np.append(features, dens)\n\n if 'intp' in self.label_config[\"features\"]:\n if not hasattr(detection, \"interpolated\"):\n detection.interpolated = False\n features = np.append(features,\n np.asarray([detection.interpolated]))\n if 'appr' in self.label_config[\"features\"]:\n features = np.append(features, self.app_feat(detection))\n\n detection.features = features", "def feature_peek(self):\n print self.train_feature.head()\n print self.test_feature.head()", "def extract_features(imgs, config=DetectionConfig()):\n\tprint(\"extracting features from\", len(imgs), \"images\")\n\t# Create a list to append feature vectors to\n\tfeatures = []\n\t# Iterate through the list of images\n\tfor img in imgs:\n\t\tfeatures.append(extract_feature(img, config=config))\n\t# Return list of feature vectors\n\tprint(\"done extracting features from\", len(imgs), \"images\")\n\treturn features", "def feature_info(self):\n print self.train_feature.info()\n print self.test_feature.info()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructor defaultClassName proposed name for the new class (string) defaultFile proposed name for the source file (string) defaultPath default path for the new file (string) parent parent widget if the dialog (QWidget)
def __init__(self, defaultClassName, defaultFile, defaultPath, parent=None): super(NewDialogClassDialog, self).__init__(parent) self.setupUi(self) self.pathnamePicker.setMode(E5PathPickerModes.DirectoryMode) self.okButton = self.buttonBox.button(QDialogButtonBox.Ok) self.okButton.setEnabled(False) self.classnameEdit.setText(defaultClassName) self.filenameEdit.setText(defaultFile) self.pathnamePicker.setText(defaultPath) msh = self.minimumSizeHint() self.resize(max(self.width(), msh.width()), msh.height())
[ "def __init__(self, currentPath, mode, parent=None):\n super(LfConvertDataDialog, self).__init__(parent)\n self.setupUi(self)\n \n self.newProjectPicker.setMode(E5PathPickerModes.DirectoryMode)\n \n self.__defaults = getDefaults()\n self.__currentPath = Utilities.toNativeSeparators(currentPath)\n \n self.currentProjectLabel.setPath(currentPath)\n self.newProjectPicker.setText(os.path.dirname(currentPath))\n \n self.lfFileSizeSpinBox.setValue(self.__defaults[\"minsize\"])\n self.lfFilePatternsEdit.setText(\" \".join(self.__defaults[\"pattern\"]))\n \n if mode == 'normal':\n self.lfFileSizeSpinBox.setEnabled(False)\n self.lfFilePatternsEdit.setEnabled(False)\n \n msh = self.minimumSizeHint()\n self.resize(max(self.width(), msh.width()), msh.height())", "def newConnectomeFile(self, parent):\n\t\tFormWindow(parent)", "def choose_file(self):\n dirname = self.get_default_directory(self.filename.text())\n filename = getOpenFileName(self, 'Open File', dirname)\n if os.path.exists(filename): # avoids problems if <Cancel> was selected\n dirname = os.path.dirname(filename)\n self.filename.setText(str(filename))\n self.set_default_directory(dirname)", "def __init__(self, filename, old=None, new=None):\n self.filename = filename\n self.old = old\n self.new = new", "def __init__(self, vcs, parent=None):\n super(HgNewProjectOptionsDialog, self).__init__(parent)\n self.setupUi(self)\n \n self.vcsProjectDirPicker.setMode(E5PathPickerModes.DirectoryMode)\n \n self.__vcs = vcs\n \n vcsUrlHistory = self.__vcs.getPlugin().getPreferences(\n \"RepositoryUrlHistory\")\n self.vcsUrlPicker.setMode(E5PathPickerModes.DirectoryMode)\n self.vcsUrlPicker.setInsertPolicy(QComboBox.InsertAtTop)\n self.vcsUrlPicker.setSizeAdjustPolicy(\n QComboBox.AdjustToMinimumContentsLength)\n self.vcsUrlPicker.setPathsList(vcsUrlHistory)\n self.vcsUrlClearHistoryButton.setIcon(\n UI.PixmapCache.getIcon(\"editDelete.png\"))\n self.vcsUrlPicker.setText(\"\")\n \n ipath = (\n Preferences.getMultiProject(\"Workspace\") or\n Utilities.getHomeDir()\n )\n self.__initPaths = [\n Utilities.fromNativeSeparators(ipath),\n Utilities.fromNativeSeparators(ipath) + \"/\",\n ]\n self.vcsProjectDirPicker.setText(self.__initPaths[0])\n \n self.lfNoteLabel.setVisible(\n self.__vcs.isExtensionActive(\"largefiles\"))\n self.largeCheckBox.setVisible(\n self.__vcs.isExtensionActive(\"largefiles\"))\n \n self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)\n \n msh = self.minimumSizeHint()\n self.resize(max(self.width(), msh.width()), msh.height())", "def set_default_filename(cls, default_filename: str) -> None:\n if default_filename:\n filename_dict = {'default_filename': default_filename}\n cls.__save(filename_dict)", "def parentFileName(*args, **kwargs):\n \n pass", "def __init__(self, tipo='text', fichero='agenda.txt'):\n self.tipo = tipo\n self.fichero = os.path.join(os.getcwd(), fichero)\n if not os.path.exists(self.fichero):\n print(f'Fichero {self.fichero} no existe, se creará uno nuevo.')\n creado = self._create_file()\n if creado:\n print('Fichero creado satisfactoriamente.')\n else:\n print('Ocurrió un error al crear el fichero.')", "def __init__(self, filename):\n self.PICKLE_NAME = filename", "def createEditor(self, parent, option, index):\n pathToFileName = \"\"\n if QT_VERSION_STR[0] == '4':\n pathToFileName = QFileDialog.getOpenFileName(None, \"Open\")\n elif QT_VERSION_STR[0] == '5':\n pathToFileName, temp = QFileDialog.getOpenFileName(None, \"Open\")\n pathToFileName = str(pathToFileName) # QString ==> str\n if len(pathToFileName):\n index.model().setData(index, pathToFileName, Qt.EditRole)\n index.model().dataChanged.emit(index, index) # Tell model to update cell display.\n return None", "def __init__(self, parent,width,height,background,pos_x,pos_y,title,title_y,\n filename=None,font_size=18):\n\n Dialog.__init__(self, parent,width,height,background,pos_x,pos_y,title,\n title_y,filename,font_size)", "def __init__(self, ui_directory, py_directory=None):\r\n self.ui_directory = ui_directory if isdir(ui_directory) else None\r\n if py_directory is None:\r\n self.py_directory = ui_directory\r\n else:\r\n self.py_directory = py_directory if isdir(py_directory) else None", "def __view_filename_default(self):\n module = self.__module__.split('.')\n class_filename = module[-1] + '.py'\n module_dir_name = module[2:-1]\n base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n view_filename = reduce(os.path.join, \n [base_dir] + module_dir_name \\\n + UI_DIR_NAME + [class_filename])\n return view_filename", "def __init__(self):\n self.basename = self.basename or self.__class__.__name__.lower()\n self.set_fields()", "def _get_input_file_name(self):\n file_path = QFileDialog.getOpenFileName(parent=self)\n if file_path[0] != '':\n self._input_edit.setText(file_path[0])\n self._output_edit.setText(self._default_path)\n return", "def __init__(self, directory_to_create):\n\n super().__init__()\n self.directory_to_create = directory_to_create", "def InitFileMenuElements(self):\n \n # MenuBar -> File -> New\n self.newAct = QAction(\n QIcon(RelativePath('asset',\n 'image',\n 'menubar',\n 'file',\n 'new.png')),\n '&New',\n self.parent)\n self.newAct.setShortcut('Ctrl+N')\n self.newAct.triggered.connect(self.parent.NewAct)\n # MenuBar -> File -> Open ...\n self.openAct = QAction(\n QIcon(RelativePath('asset',\n 'image',\n 'menubar',\n 'file',\n 'open.png')),\n '&Open ...',\n self.parent)\n self.openAct.setShortcut('Ctrl+O')\n self.openAct.triggered.connect(self.parent.OpenAct)\n # MenuBar -> File -> Save\n self.saveAct = QAction(\n QIcon(RelativePath('asset',\n 'image',\n 'menubar',\n 'file',\n 'save.png')),\n '&Save',\n self.parent)\n self.saveAct.setShortcut('Ctrl+S')\n self.saveAct.triggered.connect(self.parent.SaveAct)\n # MenuBar -> File -> Save all\n self.saveAllAct = QAction('&Save all', self.parent)\n self.saveAllAct.setShortcut('Ctrl+Alt+S')\n self.saveAllAct.triggered.connect(self.parent.SaveAllAct)\n # MenuBar -> File -> Save as\n self.saveAsAct = QAction('&Save as ...', self.parent)\n self.saveAsAct.setShortcut('Ctrl+Shift+S')\n self.saveAsAct.triggered.connect(self.parent.SaveAsAct)\n # MenuBar -> File -> Close\n self.closeAct = QAction(\n QIcon(RelativePath('asset',\n 'image',\n 'menubar',\n 'file', \n 'close.png')),\n '&Close',\n self.parent)\n # No shortcut\n self.closeAct.triggered.connect(self.parent.CloseAct)\n # MenuBar -> File -> Close all\n self.closeAllAct = QAction('&Close all', self.parent)\n self.closeAllAct.setShortcut('Ctrl+Shift+W')\n self.closeAllAct.triggered.connect(self.parent.CloseAllAct)\n # MenuBar -> File -> Quit\n self.quitAct = QAction(\n QIcon(RelativePath('asset', \n 'image',\n 'menubar',\n 'file',\n 'quit.png')),\n '&Quit',\n self.parent)\n self.quitAct.setShortcut('Ctrl+Q')\n self.quitAct.triggered.connect(self.parent.QuitAct)", "def __init__(self, file=os.path.dirname(__file__) + '/defaults.txt'):\n self.__compatible_formats = ('.jpg', '.png')\n self.__name_set = False\n self.__loo = []\n self.__defaults = {}\n\n def str_to_bool(s):\n \"\"\"\n str_to_bool(s)\n\n Returns false if the given string is \"false\" (non case-sensitive)\n or if the string is empty returns True otherwise.\n \"\"\"\n return s and not s.lower() == 'false'\n\n # Read default options\n try:\n with open(file, 'r') as f:\n for line in f:\n ls = line.split()\n if ls[0] == 'formats':\n self.__defaults['formats'] = ls[1:]\n else:\n self.__defaults[ls[0]] = ls[1]\n\n # unpack dictionary into the appropriate attributes\n self.input_dir = os.getcwd()\n self.lop = os.listdir(self.input_dir)\n self.name = os.path.basename(os.getcwd()) + '.tex'\n self.verbose = str_to_bool(self.__defaults['verbose'])\n self.cleanup = str_to_bool(self.__defaults['cleanup'])\n self.sort = str_to_bool(self.__defaults['sort'])\n self.resize = float(self.__defaults['resize'])\n self.quality = int(self.__defaults['quality'])\n self.angle = float(self.__defaults['angle'])\n self.formats = self.__defaults['formats']\n self.__format_names()\n\n except IndexError:\n raise IndexError('Defaults file missing a value for ' + str(ls[0]))\n except FileNotFoundError:\n raise FileNotFoundError('File {} not found'.format(file))\n except ValueError:\n raise ValueError('Wrong data type for one of the options in the defaults file')", "def __setInputFilepath(self):\r\n\r\n\r\n input_filepath = tkFileDialog.askopenfilename(title=\"Select a file\")\r\n self.__input_filepath.setEntryText(input_filepath)\r\n self.__presetHeaderDefine()\r\n self.__presetArrayName()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Private slot to set the enable state of theok button.
def __enableOkButton(self): self.okButton.setEnabled( self.classnameEdit.text() != "" and self.filenameEdit.text() != "" and self.pathnamePicker.text() != "")
[ "def __updateOK(self):\n enabled = True\n if self.noneButton.isChecked():\n enabled = False\n elif self.idButton.isChecked():\n enabled = self.idEdit.text() != \"\"\n elif self.tagButton.isChecked():\n enabled = self.tagCombo.currentText() != \"\"\n elif self.branchButton.isChecked():\n enabled = self.branchCombo.currentText() != \"\"\n elif self.bookmarkButton.isChecked():\n enabled = self.bookmarkCombo.currentText() != \"\"\n \n self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(enabled)", "def enable(self, on):\n self.setEnabled(on) #qt", "def _set_isOKButtonVisible(self, *args) -> \"bool\" :\n return _core.Command__set_isOKButtonVisible(self, *args)", "def set_yes_ACK(self):\n self.ACK =\"YES\"", "def onCheck(self):\r\n\r\n if self.accept_var.get() == 1:\r\n self.next_button.config(state=NORMAL)\r\n else:\r\n self.next_button.config(state=DISABLED)", "def test_is_enabled(self):\n button = self.dlg.by(class_name=\"Button\",\n name=\"OK\").find()\n self.assertEqual(button.is_enabled(), True)", "def dialogAccept(self):\n self.startSetup()\n self.enableSetup()", "def onChecked(self):\n if self.sender().objectName() == \"-b\":\n if self.thresholdEdit.isEnabled():\n self.thresholdEdit.setDisabled(True)\n else:\n self.thresholdEdit.setDisabled(False)\n elif self.sender().objectName() == \"-a\":\n if self.taxamapEdit.isEnabled():\n self.taxamapEdit.setDisabled(True)\n else:\n self.taxamapEdit.setDisabled(False)\n elif self.sender().objectName() == \"-s\":\n if self.sNetEdit.isEnabled():\n self.sNetEdit.setDisabled(True)\n else:\n self.sNetEdit.setDisabled(False)\n elif self.sender().objectName() == \"-n\":\n if self.nNetRetEdit.isEnabled():\n self.nNetRetEdit.setDisabled(True)\n else:\n self.nNetRetEdit.setDisabled(False)\n elif self.sender().objectName() == \"-h\":\n if self.hybridEdit.isEnabled():\n self.hybridEdit.setDisabled(True)\n else:\n self.hybridEdit.setDisabled(False)\n elif self.sender().objectName() == \"-w\":\n if self.wetOpEdit.isEnabled():\n self.wetOpEdit.setDisabled(True)\n else:\n self.wetOpEdit.setDisabled(False)\n elif self.sender().objectName() == \"-x\":\n if self.numRunEdit.isEnabled():\n self.numRunEdit.setDisabled(True)\n else:\n self.numRunEdit.setDisabled(False)\n elif self.sender().objectName() == \"-m\":\n if self.nNetExamEdit.isEnabled():\n self.nNetExamEdit.setDisabled(True)\n else:\n self.nNetExamEdit.setDisabled(False)\n elif self.sender().objectName() == \"-md\":\n if self.maxDiaEdit.isEnabled():\n self.maxDiaEdit.setDisabled(True)\n else:\n self.maxDiaEdit.setDisabled(False)\n elif self.sender().objectName() == \"-rd\":\n if self.retDiaEdit.isEnabled():\n self.retDiaEdit.setDisabled(True)\n else:\n self.retDiaEdit.setDisabled(False)\n elif self.sender().objectName() == \"-f\":\n if self.maxFEdit.isEnabled():\n self.maxFEdit.setDisabled(True)\n else:\n self.maxFEdit.setDisabled(False)\n elif self.sender().objectName() == \"-p\":\n if self.stopCriterionEdit.isEnabled():\n self.stopCriterionEdit.setDisabled(True)\n else:\n self.stopCriterionEdit.setDisabled(False)\n elif self.sender().objectName() == \"-r\":\n if self.maxRoundEdit.isEnabled():\n self.maxRoundEdit.setDisabled(True)\n else:\n self.maxRoundEdit.setDisabled(False)\n elif self.sender().objectName() == \"-t\":\n if self.maxTryPerBrEdit.isEnabled():\n self.maxTryPerBrEdit.setDisabled(True)\n else:\n self.maxTryPerBrEdit.setDisabled(False)\n elif self.sender().objectName() == \"-i\":\n if self.improveThresEdit.isEnabled():\n self.improveThresEdit.setDisabled(True)\n else:\n self.improveThresEdit.setDisabled(False)\n elif self.sender().objectName() == \"-l\":\n if self.maxBlEdit.isEnabled():\n self.maxBlEdit.setDisabled(True)\n else:\n self.maxBlEdit.setDisabled(False)\n elif self.sender().objectName() == \"-pl\":\n if self.numProcEdit.isEnabled():\n self.numProcEdit.setDisabled(True)\n else:\n self.numProcEdit.setDisabled(False)\n elif self.sender().objectName() == \"resultOutputFile\":\n if self.fileDestEdit.isEnabled():\n self.fileDestEdit.setDisabled(True)\n self.fileDestBtn.setDisabled(True)\n else:\n self.fileDestEdit.setDisabled(False)\n self.fileDestBtn.setDisabled(False)\n else:\n pass", "def ok_trigger(self):\n self.set_config()\n self.okWindow.setWindowTitle(\"+1 Love Acquired!\")\n self.okWindow.exec_()\n\n QtCore.QCoreApplication.instance().quit()", "def updateActionEnablement(self, isShowing: bool) -> None:\n ...", "def enableactions(self):\n pass", "def click_yes_button(self):\n return self", "def handle_clicked_yes(self):\n if self.on_yes is not None:\n self.on_yes(self)", "def enableOrDisableCreateButton(self):\n #self.InitializeButton.enabled = self.inputFiducialsNodeSelector.currentNode() is not None\n pass", "def on_noneButton_toggled(self, checked):\n self.__updateOK()", "def on_startEditButton_clicked(self, checked):\n if checked:\n self.sendEditButton.setEnabled(True)\n self.cancelEditButton.setEnabled(True)\n self.shareButton.setEnabled(False)\n self.startEditButton.setEnabled(False)\n \n self.startEdit.emit()", "def _get_isOKButtonVisible(self) -> \"bool\" :\n return _core.Command__get_isOKButtonVisible(self)", "def set_open_enabled(self, enable):\n # ic()\n self.actionSaveDataAs.setEnabled(enable)\n self.paramMenu.setEnabled(enable)\n self.actionOpenFolder.setEnabled(enable)\n self.actionNewFile.setEnabled(enable)\n # self.ui.listScans.setEnabled(enable)", "def __enableItemButtons(self, enabled):\n self.notify.debug(\"__enableItemButtons %d\" % enabled)\n\n if enabled:\n buttonState = DGG.NORMAL\n else:\n buttonState = DGG.DISABLED\n \n # Also control paging between pickers to prevent exceptions if you\n # switch pickers while waiting for an AI response.\n if hasattr(self, 'inAtticButton'):\n self.inAtticButton['state'] = buttonState\n if hasattr(self, 'inRoomButton'):\n self.inRoomButton['state'] = buttonState\n if hasattr(self, 'inTrashButton'):\n self.inTrashButton['state'] = buttonState\n \n # Update the enabled state of all panels.\n pickers = [self.atticPicker,\n self.inRoomPicker,\n self.inTrashPicker\n ]\n \n for picker in pickers:\n if picker:\n for panel in picker['items']:\n if not panel.isEmpty():\n panel.enable(enabled)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Private slot called, when thext of the classname edit has changed. text changed text (string)
def on_classnameEdit_textChanged(self, text): self.__enableOkButton()
[ "def text_changed(self):\n self.default = False\n self.emit(SIGNAL('text_changed_at(QString,int)'),\n self.filename, self.editor.get_position('cursor'))", "def on_text_edited(self, event):\n event.Skip()\n self.shell_obj._field_text_edited()", "def onTextChange(self, event):\r\n self.textChangeFlag = True;", "def _update_text(self, text):\n self.blockSignals(True)\n\n cursor_position = self.cursorPosition()\n self.setText(text)\n self.setCursorPosition(cursor_position)\n\n self.blockSignals(False)\n\n self.updated.emit(text)", "def setTextAsUpdated(self):\r\n self.textChangeFlag = False", "def _ui_shell_text_changed(self):\n text = self.text\n\n #\n # a Search, eg '/DnsParse_'\n #\n\n if self.is_search(text):\n self._execute_search(text)\n self._highlight_search()\n return\n\n # not a search query clear any lingering filters for it\n else:\n self._table_model.filter_string(\"\")\n\n #\n # a Jump, eg '0x804010a' or 'sub_1400016F0'\n #\n\n if self.is_jump(text) and self._table_view:\n self._line_label.setText(\"Jump\")\n self._highlight_jump()\n return\n\n #\n # a Composition, eg '(A | B) - C'\n #\n\n self._execute_composition(text)\n self._highlight_composition()\n self._ui_hint_coverage_refresh()", "def textCtrlDescription_TextChange(self, event):\n self.SelectedItem.description = event.GetString()", "def on_filenameEdit_textChanged(self, text):\n self.__enableOkButton()", "def change_text(self):\r\n self.webView.update_by_content(self.editor.toPlainText())", "def on_edited_e( self, cell, path, new_text, model ):\n #print \"Change '%s' to '%s'\" % (model[path][1], new_text)\n model[path][1] = new_text\n #return", "def Edit_text(self):\r\n current = self.project_tree.currentItem()\r\n try:\r\n try:\r\n self.setTexttree_window.close()\r\n except:\r\n pass\r\n try:\r\n self.setExportDic_window.close()\r\n except:\r\n pass\r\n if len(self.project_tree.selectedItems()) == 1:\r\n if current.text(1) != \"\":\r\n self.setTexttree_window = tree_set_text_window()\r\n self.setTexttree_window.signal_edit_text.connect(self.edit_tree_text)\r\n self.setTexttree_window.signal_edit_text1.connect(self.edit_tree_text_conduct)\r\n else:\r\n self.setExportDic_window = tree_Edit_dic_window(current.text(0))\r\n self.setExportDic_window.signal_edit_text.connect(self.edit_dic_name)\r\n else:\r\n QtWidgets.QMessageBox.warning(self, 'error', 'Please choose in Projectbox')\r\n except:\r\n ...", "def on_delayed_text_changed(self):\n self.sync_text()", "def setText( self, newmsg ):\n self.msg.setText( newmsg )\n return", "def setText(self, textState):\r\n oldInsertionPoint = self.control.GetInsertionPoint()\r\n self.control.SetValue(textState)\r\n self.control.SetInsertionPoint(oldInsertionPoint) \r\n self.textChangeFlag = False", "def updateText(self):\n self.errorFlag = False\n try:\n self.setText(self.field.editorText(self.node))\n except ValueError as err:\n if len(err.args) >= 2:\n self.setText(err.args[1])\n else:\n self.setText(self.node.data.get(self.field.name, ''))\n self.errorFlag = True\n if self.field.useRichText:\n self.doc.setHtml(self.text())\n else:\n self.doc.setPlainText(self.text())", "def update_textLine_check(self, obj, text, attr, button_name, check_func):\n \n # be sure we have a type to compare against for setting the text\n global g_LineEdittype\n if g_LineEdittype == None: g_LineEdittype = type(QtGui.QLineEdit())\n\n rtext = str(text) # in case of QString\n\n if check_func(rtext, button_name, 1, self, 1):\n self.set_uvar(attr, rtext)\n if type(obj) == g_LineEdittype: obj.setText(text)\n else: print '** update_textLine_check: not a LineEdit type'\n return 1\n else:\n # error, reset to previous attribute\n # obj.clear()\n obj.setText(self.uvars.val(attr))\n obj.setFocus()\n return 0", "def textFormatEdited(self):\n self.item.format = str(self.textFormatLine.text())\n \n logger = logging.getLogger(__name__+\".TextSettingsDialog\")\n logger.debug(\"Text format changed for '%s' to '%s'\", self.item.title, self.item.format)", "def edit_tree_text_conduct(self, text):\r\n current = self.project_tree.currentItem()\r\n try:\r\n current.setText(3, text)\r\n except:\r\n ...", "def filtername_changed_slot(self, new_name):\n self.filterframe.fltr.setName(new_name)\n self.filterframe.ui.filtername_label.setText(new_name) \n self.filterframe.ui.filtername_label.update()\n self.filterframe.adjustSize()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Private slot called, when thext of the filename edit has changed. text changed text (string)
def on_filenameEdit_textChanged(self, text): self.__enableOkButton()
[ "def text_changed(self):\n self.default = False\n self.emit(SIGNAL('text_changed_at(QString,int)'),\n self.filename, self.editor.get_position('cursor'))", "def on_text_edited(self, event):\n event.Skip()\n self.shell_obj._field_text_edited()", "def on_edited_e( self, cell, path, new_text, model ):\n #print \"Change '%s' to '%s'\" % (model[path][1], new_text)\n model[path][1] = new_text\n #return", "def onTextChange(self, event):\r\n self.textChangeFlag = True;", "def __onFileChanged(self, path):\n content, encoding = self.editor.readFile(\n path, encoding=self.editor.fileEncoding)\n if content == self.editor.toPlainText():\n logger.debug(\"FileWatcherMode: Internal change, skipping\")\n return\n self.__changeWaiting = True\n if self.editor.hasFocus() and self.__flgNotify:\n self.__notifyChange()", "def current_editor_changed(self, filename):\r\n if filename is None:\r\n filename = translations.TR_NEW_DOCUMENT\r\n self.emit(SIGNAL(\"currentEditorChanged(QString)\"), filename)", "def Edit_text(self):\r\n current = self.project_tree.currentItem()\r\n try:\r\n try:\r\n self.setTexttree_window.close()\r\n except:\r\n pass\r\n try:\r\n self.setExportDic_window.close()\r\n except:\r\n pass\r\n if len(self.project_tree.selectedItems()) == 1:\r\n if current.text(1) != \"\":\r\n self.setTexttree_window = tree_set_text_window()\r\n self.setTexttree_window.signal_edit_text.connect(self.edit_tree_text)\r\n self.setTexttree_window.signal_edit_text1.connect(self.edit_tree_text_conduct)\r\n else:\r\n self.setExportDic_window = tree_Edit_dic_window(current.text(0))\r\n self.setExportDic_window.signal_edit_text.connect(self.edit_dic_name)\r\n else:\r\n QtWidgets.QMessageBox.warning(self, 'error', 'Please choose in Projectbox')\r\n except:\r\n ...", "def _update_text(self, text):\n self.blockSignals(True)\n\n cursor_position = self.cursorPosition()\n self.setText(text)\n self.setCursorPosition(cursor_position)\n\n self.blockSignals(False)\n\n self.updated.emit(text)", "def on_classnameEdit_textChanged(self, text):\n self.__enableOkButton()", "def _onEdit(self, text):\n\n self._session['outputdir'] = text", "def textCtrlDescription_TextChange(self, event):\n self.SelectedItem.description = event.GetString()", "def files_changed(self):\n # TODO: implement", "def _ui_shell_text_changed(self):\n text = self.text\n\n #\n # a Search, eg '/DnsParse_'\n #\n\n if self.is_search(text):\n self._execute_search(text)\n self._highlight_search()\n return\n\n # not a search query clear any lingering filters for it\n else:\n self._table_model.filter_string(\"\")\n\n #\n # a Jump, eg '0x804010a' or 'sub_1400016F0'\n #\n\n if self.is_jump(text) and self._table_view:\n self._line_label.setText(\"Jump\")\n self._highlight_jump()\n return\n\n #\n # a Composition, eg '(A | B) - C'\n #\n\n self._execute_composition(text)\n self._highlight_composition()\n self._ui_hint_coverage_refresh()", "def change_text(self):\r\n self.webView.update_by_content(self.editor.toPlainText())", "def on_modified(self, event):\n\n filepath = event.src_path\n filename = os.path.basename(filepath)\n print(\"ファイル {} が変更されました。\".format(filename))", "def recent_files_combo_box_index_changed(self, path):\n current_index = self.recent_files_comboBox.currentIndex()\n path = self.recent_files_comboBox.itemData(current_index)\n self.find_from_path_lineEdit.setText(path)\n self.find_from_path_pushButton_clicked()", "def _currentTabChanged(self, fileName):\r\n self.currentTabChanged.emit(fileName)", "def reset_new_name(self, event=None):\n\n selected_file = self._selected_file.get()\n base, ext = os.path.splitext(os.path.basename(selected_file))\n\n # Reset the displayed basename\n self._new_name.set(base)\n\n # Set the focus on the filename entry box and select all text\n self.focus_filename_entry()", "def setTextAsUpdated(self):\r\n self.textChangeFlag = False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Public method to retrieve the data entered into the dialog. tuple giving the classname (string) and the file name (string)
def getData(self): return ( self.classnameEdit.text(), os.path.join(self.pathnamePicker.text(), self.filenameEdit.text()) )
[ "def __init__(self,parent, engname, onames, exts=['dat'], ext_descrips = ['data'],ext_multi=[True]):\n wx.Dialog.__init__(self,parent,-1,title='Export multiple objects to file',\n style=wx.DEFAULT_DIALOG_STYLE| wx.RESIZE_BORDER,\n size=(720,420))\n\n #get references to required core tools.\n app = wx.GetApp()\n self.console = app.toolmgr.get_tool('Console')\n\n #attributes\n self.engname = engname\n self.onames = onames\n self.opt_dialog = None\n self.filepaths = []\n self.options = {} #dict of ext string: options dialog\n\n #get extensions\n self.ext_descrips = ext_descrips\n self.exts = exts\n self.ext_multi = ext_multi\n self.multi = True #return multiple filepaths\n\n #controls\n self._InitControls()", "def _get_filename(self) -> \"std::string\" :\n return _core.FileDialog__get_filename(self)", "def get_from_form_data(self, data, files, name):\n return self.field.widget.value_from_datadict(data, files, name)", "def askfil(self, event):\n fil = filedialog.askopenfilename(\n initialdir=\"C:\\\"\",\n title=\"Select data output directory:\",\n filetypes=[(\"CSV files\", \"*.csv\")]\n )\n\n if not fil == \"\":\n event.widget.delete(0, tk.END)\n event.widget.insert(0, fil)\n event.widget.after(75, event.widget.xview_moveto, 1)\n title = fil.split(\"/\")[-1].replace('_', ' ')[:-4] # ignore extension\n event.widget.after(75, lambda: self.title.delete(0, tk.END))\n event.widget.after(75, lambda: self.title.insert(0, title))\n # NOTE: on use of after\n # https://stackoverflow.com/questions/29334544/", "def getData(self):\n patterns = self.lfFilePatternsEdit.text().split()\n if set(patterns) == set(self.__defaults[\"pattern\"]):\n patterns = []\n \n return (\n self.newProjectPicker.text(),\n self.lfFileSizeSpinBox.value(),\n patterns,\n )", "def file2Browse(): #Description line\n\n filePath = tkinter.filedialog.askopenfile(filetypes =[(\"All Files\", \"*\")])\n file2Path.set(filePath.name) #Set the value of the File 2 Entry widget to the path to the file you just selected", "def dialog_handler_cb(self, item, data) -> None:\n # Dialog box initialization event\n if item == KDialogInitEvent:\n vs.SetItemText(self.dialog, self.kWidgetID_fileName, self.parameters.excelFileName)\n # vs.SetItemText(self.dialog, self.kWidgetID_imageFolderName, self.settings.imageFolderName)\n\n vs.ShowItem(self.dialog, self.kWidgetID_excelSheetNameLabel, False)\n vs.ShowItem(self.dialog, self.kWidgetID_excelSheetName, False)\n self.show_parameters(False)\n\n vs.EnableItem(self.dialog, self.kWidgetID_importButton, False)\n vs.EnableItem(self.dialog, self.kWidgetID_importNewCount, False)\n vs.EnableItem(self.dialog, self.kWidgetID_importUpdatedCount, False)\n vs.EnableItem(self.dialog, self.kWidgetID_importDeletedCount, False)\n\n elif item == self.kWidgetID_fileName:\n self.parameters.excelFileName = vs.GetItemText(self.dialog, self.kWidgetID_fileName)\n\n elif item == self.kWidgetID_fileBrowseButton:\n result, self.parameters.excelFileName = vs.GetFileN(\"Open Excel file\", \"\", \"xlsm\")\n if result:\n vs.SetItemText(self.dialog, self.kWidgetID_fileName, self.parameters.excelFileName)\n\n elif item == self.kWidgetID_excelSheetName:\n new_excel_sheet_name = vs.GetChoiceText(self.dialog, self.kWidgetID_excelSheetName, data)\n if self.parameters.excelSheetName != new_excel_sheet_name:\n self.parameters.excelSheetName = new_excel_sheet_name\n self.show_parameters(False)\n if data != 0:\n self.show_parameters(True)\n\n elif item == self.kWidgetID_withImageSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_withImage, data == 0)\n self.parameters.withImageSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_withImageSelector, data)\n elif item == self.kWidgetID_withImage:\n self.parameters.pictureParameters.withImage = \"{}\".format(data != 0)\n # elif item == self.kWidgetID_imageFolderName:\n # self.settings.imageFolderName = vs.GetItemText(\n # self.dialog, self.kWidgetID_imageFolderName)\n # elif item == self.kWidgetID_imageFolderBrowseButton:\n # result, self.settings.imageFolderName = vs.GetFolder(\"Select the images folder\")\n # if result == 0:\n # vs.SetItemText(self.dialog, self.kWidgetID_imageFolderName, self.settings.imageFolderName)\n elif item == self.kWidgetID_imageTextureSelector:\n self.parameters.imageTextureSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_withImageSelector, data)\n elif item == self.kWidgetID_imageWidthSelector:\n self.parameters.imageWidthSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_imageWidthSelector, data)\n elif item == self.kWidgetID_imageHeightSelector:\n self.parameters.imageHeightSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_imageHeightSelector, data)\n elif item == self.kWidgetID_imagePositionSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_imagePosition, data == 0)\n self.parameters.imagePositionSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_imagePositionSelector, data)\n elif item == self.kWidgetID_imagePosition:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_imagePosition, 3)\n if valid:\n self.parameters.pictureParameters.imagePosition = str(value)\n elif item == self.kWidgetID_withFrameSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_withFrame, data == 0)\n self.parameters.withFrameSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_withFrameSelector, data)\n elif item == self.kWidgetID_withFrame:\n self.parameters.pictureParameters.withFrame = \"{}\".format(data != 0)\n elif item == self.kWidgetID_frameWidthSelector:\n self.parameters.frameWidthSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameWidthSelector, data)\n elif item == self.kWidgetID_frameHeightSelector:\n self.parameters.frameHeightSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameHeightSelector, data)\n elif item == self.kWidgetID_frameThicknessSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_frameThickness, data == 0)\n self.parameters.frameThicknessSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameThicknessSelector, data)\n elif item == self.kWidgetID_frameThickness:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_frameThickness, 3)\n if valid:\n self.parameters.pictureParameters.frameThickness = str(value)\n elif item == self.kWidgetID_frameDepthSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_frameDepth, data == 0)\n self.parameters.frameDepthSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameDepthSelector, data)\n elif item == self.kWidgetID_frameDepth:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_frameDepth, 3)\n if valid:\n self.parameters.pictureParameters.frameDepth = str(value)\n elif item == self.kWidgetID_frameClassSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_frameClass, data == 0)\n self.parameters.frameClassSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameClassSelector, data)\n elif item == self.kWidgetID_frameClass:\n index, self.parameters.pictureParameters.frameClass = vs.GetSelectedChoiceInfo(self.dialog, self.kWidgetID_frameClass, 0)\n elif item == self.kWidgetID_frameTextureScaleSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureScale, data == 0)\n self.parameters.frameTextureScaleSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameTextureScaleSelector, data)\n elif item == self.kWidgetID_frameTextureScale:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_frameTextureScale, 1)\n if valid:\n self.parameters.pictureParameters.frameTextureScale = str(value)\n elif item == self.kWidgetID_frameTextureRotationSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureRotation, data == 0)\n self.parameters.frameTextureRotationSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameTextureRotationSelector, data)\n elif item == self.kWidgetID_frameTextureRotation:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_frameTextureRotation, 1)\n if valid:\n self.parameters.pictureParameters.frameTextureRotation = str(value)\n elif item == self.kWidgetID_withMatboardSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_withMatboard, data == 0)\n self.parameters.withMatboardSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_withMatboardSelector, data)\n elif item == self.kWidgetID_withMatboard:\n self.parameters.pictureParameters.withMatboard = \"{}\".format(data != 0)\n elif item == self.kWidgetID_matboardPositionSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_matboardPosition, data == 0)\n self.parameters.matboardPositionSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_matboardPositionSelector, data)\n elif item == self.kWidgetID_windowWidthSelector:\n self.parameters.windowWidthSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_windowWidthSelector, data)\n elif item == self.kWidgetID_windowHeightSelector:\n self.parameters.windowHeightSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_windowHeightSelector, data)\n elif item == self.kWidgetID_matboardPosition:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_matboardPosition, 3)\n if valid:\n self.parameters.pictureParameters.matboardPosition = str(value)\n elif item == self.kWidgetID_matboardClassSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_matboardClass, data == 0)\n self.parameters.matboardClassSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_matboardClassSelector, data)\n elif item == self.kWidgetID_matboardClass:\n index, self.parameters.pictureParameters.matboardClass = vs.GetSelectedChoiceInfo(self.dialog, self.kWidgetID_matboardClass, 0)\n elif item == self.kWidgetID_matboardTextureScaleSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureScale, data == 0)\n self.parameters.matboardTextureScaleSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_matboardTextureScaleSelector, data)\n elif item == self.kWidgetID_matboardTextureScale:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_matboardTextureScale, 1)\n if valid:\n self.parameters.pictureParameters.matboardTextureScale = str(value)\n elif item == self.kWidgetID_matboardTextureRotatSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureRotat, data == 0)\n self.parameters.matboardTextureRotatSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_matboardTextureRotatSelector, data)\n elif item == self.kWidgetID_matboardTextureRotat:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_matboardTextureRotat, 1)\n if valid:\n self.parameters.pictureParameters.matboardTextureRotat = str(value)\n elif item == self.kWidgetID_withGlassSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_withGlass, data == 0)\n self.parameters.withGlassSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_withGlassSelector, data)\n elif item == self.kWidgetID_withGlass:\n self.parameters.pictureParameters.withGlass = \"{}\".format(data != 0)\n elif item == self.kWidgetID_glassPositionSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_glassPosition, data == 0)\n self.parameters.glassPositionSelector = vs.GetChoiceText(\n self.dialog, self.kWidgetID_glassPositionSelector, data)\n elif item == self.kWidgetID_glassPosition:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_glassPosition, 3)\n if valid:\n self.parameters.pictureParameters.glassPosition = str(value)\n elif item == self.kWidgetID_glassClassSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_glassClass, data == 0)\n self.parameters.glassClassSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_glassClassSelector, data)\n elif item == self.kWidgetID_glassClass:\n index, self.parameters.pictureParameters.glassClass = vs.GetSelectedChoiceInfo(self.dialog, self.kWidgetID_glassClass, 0)\n elif item == self.kWidgetID_excelCriteriaSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_excelCriteriaValue, data != 0)\n new_excel_criteria_selector = vs.GetChoiceText(self.dialog, self.kWidgetID_excelCriteriaSelector, data)\n if new_excel_criteria_selector != self.parameters.excelCriteriaSelector:\n self.parameters.excelCriteriaSelector = new_excel_criteria_selector\n self.update_criteria_values(False)\n if data != 0:\n self.update_criteria_values(True)\n else:\n index = vs.GetChoiceIndex(self.dialog, self.kWidgetID_excelCriteriaValue, self.parameters.excelCriteriaValue)\n if index == -1:\n vs.SelectChoice(self.dialog, self.kWidgetID_excelCriteriaValue, 0, True)\n self.parameters.excelCriteriaValue = \"Select a value ...\"\n else:\n vs.SelectChoice(self.dialog, self.kWidgetID_excelCriteriaValue, index, True)\n elif item == self.kWidgetID_excelCriteriaValue:\n self.parameters.excelCriteriaValue = vs.GetChoiceText(self.dialog, self.kWidgetID_excelCriteriaValue, data)\n elif item == self.kWidgetID_symbolCreateSymbol:\n self.parameters.symbolCreateSymbol = \"{}\".format(data != 0)\n selector_index = vs.GetSelectedChoiceIndex(self.dialog, self.kWidgetID_symbolFolderSelector, 0)\n vs.EnableItem(self.dialog, self.kWidgetID_symbolFolderSelector, data)\n vs.EnableItem(self.dialog, self.kWidgetID_symbolFolder, selector_index == 0 and data == 1)\n elif item == self.kWidgetID_symbolFolderSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_symbolFolder, data == 0)\n self.parameters.symbolFolderSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_symbolFolderSelector, data)\n elif item == self.kWidgetID_classAssignPictureClass:\n self.parameters.classAssignPictureClass = \"{}\".format(data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_classPictureClassSelector, data == 1)\n selector_index = vs.GetPopUpChoiceIndex(self.dialog, self.kWidgetID_classPictureClassSelector, self.parameters.classClassPictureSelector)\n vs.EnableItem(self.dialog, self.kWidgetID_classPictureClass, selector_index == 0 and data != 0)\n elif item == self.kWidgetID_classPictureClassSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_classPictureClass, data == 0)\n self.parameters.classClassPictureSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_classPictureClassSelector, data)\n elif item == self.kWidgetID_classPictureClass:\n index, self.parameters.pictureParameters.pictureClass = vs.GetSelectedChoiceInfo(self.dialog, self.kWidgetID_classPictureClass, 0)\n elif item == self.kWidgetID_classCreateMissingClasses:\n self.parameters.createMissingClasses = \"{}\".format(data == 1)\n elif item == self.kWidgetID_metaImportMetadata:\n self.parameters.metaImportMetadata = \"{}\".format(data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaArtworkTitleSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaAuthorNameSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaArtworkCreationDateSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaArtworkMediaSelector, data == 1)\n # vs.EnableItem(self.dialog, self.kWidgetID_metaTypeSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaRoomLocationSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaArtworkSourceSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaRegistrationNumberSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaAuthorBirthCountrySelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaAuthorBirthDateSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaAuthorDeathDateSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaDesignNotesSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaExhibitionMediaSelector, data == 1)\n elif item == self.kWidgetID_metaArtworkTitleSelector:\n self.parameters.metaArtworkTitleSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaArtworkTitleSelector, data)\n elif item == self.kWidgetID_metaAuthorNameSelector:\n self.parameters.metaAuthorNameSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaAuthorNameSelector, data)\n elif item == self.kWidgetID_metaArtworkCreationDateSelector:\n self.parameters.metaArtworkCreationDateSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaArtworkCreationDateSelector, data)\n elif item == self.kWidgetID_metaArtworkMediaSelector:\n self.parameters.metaArtworkMediaSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaArtworkMediaSelector, data)\n # elif item == self.kWidgetID_metaTypeSelector:\n # self.parameters.metaTypeSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaTypeSelector, data)\n elif item == self.kWidgetID_metaRoomLocationSelector:\n self.parameters.metaRoomLocationSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaRoomLocationSelector, data)\n elif item == self.kWidgetID_metaArtworkSourceSelector:\n self.parameters.metaArtworkSourceSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaArtworkSourceSelector, data)\n elif item == self.kWidgetID_metaRegistrationNumberSelector:\n self.parameters.metaRegistrationNumberSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaRegistrationNumberSelector, data)\n elif item == self.kWidgetID_metaAuthorBirthCountrySelector:\n self.parameters.metaAuthorBirthCountrySelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaAuthorBirthCountrySelector, data)\n elif item == self.kWidgetID_metaAuthorBirthDateSelector:\n self.parameters.metaAuthorBirthDateSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaAuthorBirthDateSelector, data)\n elif item == self.kWidgetID_metaAuthorDeathDateSelector:\n self.parameters.metaAuthorDeathDateSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaAuthorDeathDateSelector, data)\n elif item == self.kWidgetID_metaDesignNotesSelector:\n self.parameters.metaDesignNotesSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaDesignNotesSelector, data)\n elif item == self.kWidgetID_metaExhibitionMediaSelector:\n self.parameters.metaExhibitionMediaSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaExhibitionMediaSelector, data)\n elif item == self.kWidgetID_importIgnoreErrors:\n self.parameters.importIgnoreErrors = \"{}\".format(data != 0)\n vs.ShowItem(self.dialog, self.kWidgetID_importErrorCount, data == 0)\n elif item == self.kWidgetID_importIgnoreExisting:\n self.parameters.importIgnoreExisting = \"{}\".format(data != 0)\n elif item == self.kWidgetID_importIgnoreUnmodified:\n self.parameters.importIgnoreUnmodified = \"{}\".format(data != 0)\n elif item == self.kWidgetID_importButton:\n self.import_pictures()\n vs.SetItemText(self.dialog, self.kWidgetID_importNewCount, \"New Pictures: {}\".format(self.importNewCount))\n vs.SetItemText(self.dialog, self.kWidgetID_importUpdatedCount, \"Updated Pictures: {}\".format(self.importUpdatedCount))\n vs.SetItemText(self.dialog, self.kWidgetID_importDeletedCount, \"Deleted Pictures: {}\".format(self.importDeletedCount))\n vs.SetItemText(self.dialog, self.kWidgetID_importErrorCount, \"Error Pictures: {}\".format(self.importErrorCount))\n\n # This section handles the following cases:\n # - The Dialog is initializing\n # - The name of the workbook file has changed\n if item == self.kWidgetID_fileName or item == self.kWidgetID_fileBrowseButton or item == KDialogInitEvent:\n self.set_workbook()\n\n # The image selection has changed\n if item == self.kWidgetID_withImageSelector or item == self.kWidgetID_withImage or item == self.kWidgetID_excelSheetName:\n state = vs.GetSelectedChoiceIndex(self.dialog, self.kWidgetID_withImageSelector, 0) != 0 or \\\n vs.GetBooleanItem(self.dialog, self.kWidgetID_withImage) is True\n\n vs.EnableItem(self.dialog, self.kWidgetID_imageWidthLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imageWidthSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imageHeightLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imageHeightSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imagePositionLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imagePositionSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imagePosition, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imageTextureLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imageTextureSelector, state)\n\n # The frame selection has changed\n if item == self.kWidgetID_withFrameSelector or item == self.kWidgetID_withFrame or item == self.kWidgetID_excelSheetName:\n state = vs.GetSelectedChoiceIndex(self.dialog, self.kWidgetID_withFrameSelector, 0) != 0 or \\\n vs.GetBooleanItem(self.dialog, self.kWidgetID_withFrame) is True\n\n vs.EnableItem(self.dialog, self.kWidgetID_frameWidthLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameWidthSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameHeightLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameHeightSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameThicknessLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameThicknessSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameThickness, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameDepthLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameDepthSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameDepth, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameClassLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameClassSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameClass, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureScaleLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureScaleSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureScale, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureRotationLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureRotationSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureRotation, state)\n\n # The matboard selection has changed\n if item == self.kWidgetID_withMatboardSelector or item == self.kWidgetID_withMatboard or item == self.kWidgetID_excelSheetName:\n state = vs.GetSelectedChoiceIndex(self.dialog, self.kWidgetID_withMatboardSelector, 0) != 0 or \\\n vs.GetBooleanItem(self.dialog, self.kWidgetID_withMatboard) is True\n\n vs.EnableItem(self.dialog, self.kWidgetID_windowWidthLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_windowWidthSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_windowHeightLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_windowHeightSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardPositionLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardPositionSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardPosition, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardClassLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardClassSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardClass, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureScaleLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureScaleSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureScale, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureRotatLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureRotatSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureRotat, state)\n\n # The glass selection has changed\n if item == self.kWidgetID_withGlassSelector or item == self.kWidgetID_withGlass or item == self.kWidgetID_excelSheetName:\n state = vs.GetSelectedChoiceIndex(self.dialog, self.kWidgetID_withGlassSelector, 0) != 0 or \\\n vs.GetBooleanItem(self.dialog, self.kWidgetID_withGlass) is True\n\n vs.EnableItem(self.dialog, self.kWidgetID_glassPositionLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_glassPositionSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_glassPosition, state)\n vs.EnableItem(self.dialog, self.kWidgetID_glassClassLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_glassClassSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_glassClass, state)\n\n # After the event has been handled, update some of the import validity settings accordingly\n self.parameters.imageValid = ((self.parameters.withImageSelector == \"-- Manual\" and self.parameters.pictureParameters.withImage == \"True\") or\n self.parameters.withImageSelector != \"-- Manual\") and \\\n (self.parameters.imageTextureSelector != \"-- Select column ...\") and \\\n (self.parameters.imageWidthSelector != \"-- Select column ...\") and \\\n (self.parameters.imageHeightSelector != \"-- Select column ...\")\n\n self.parameters.frameValid = ((self.parameters.withFrameSelector == \"-- Manual\" and self.parameters.pictureParameters.withFrame == \"True\") or\n self.parameters.withFrameSelector != \"-- Manual\") and \\\n (self.parameters.frameWidthSelector != \"-- Select column ...\") and \\\n (self.parameters.frameHeightSelector != \"-- Select column ...\")\n\n self.parameters.matboardValid = ((self.parameters.withMatboardSelector == \"-- Manual\" and self.parameters.pictureParameters.withMatboard == \"True\") or\n self.parameters.withMatboardSelector != \"-- Manual\") and \\\n (self.parameters.windowWidthSelector != \"-- Select column ...\") and \\\n (self.parameters.windowHeightSelector != \"-- Select column ...\")\n\n self.parameters.glassValid = ((self.parameters.withGlassSelector == \"-- Manual\" and\n self.parameters.pictureParameters.withGlass == \"True\") or self.parameters.withGlassSelector != \"-- Manual\")\n\n self.parameters.criteriaValid = \\\n (self.parameters.excelCriteriaSelector != \"-- Select column ...\" and self.parameters.excelCriteriaValue != \"Select a value ...\")\n\n self.parameters.importValid = (self.parameters.imageValid or self.parameters.frameValid) and self.parameters.criteriaValid\n\n vs.EnableItem(self.dialog, self.kWidgetID_importButton, self.parameters.importValid)\n vs.EnableItem(self.dialog, self.kWidgetID_importNewCount, self.parameters.importValid)\n vs.EnableItem(self.dialog, self.kWidgetID_importUpdatedCount, self.parameters.importValid)\n vs.EnableItem(self.dialog, self.kWidgetID_importDeletedCount, self.parameters.importValid)", "def file1Browse(): #Description line\n\n filePath = tkinter.filedialog.askopenfile(filetypes =[(\"All Files\", \"*\")])\n file1Path.set(filePath.name) #Set the value of the File 1 Entry widget to the path to the file you just selected", "def __setInputFilepath(self):\r\n\r\n\r\n input_filepath = tkFileDialog.askopenfilename(title=\"Select a file\")\r\n self.__input_filepath.setEntryText(input_filepath)\r\n self.__presetHeaderDefine()\r\n self.__presetArrayName()", "def pickOrderFile(self):\n order = self.openFileNameDialog()\n self.label_orderFile.setText(order)\n self.orderFile = order\n return order", "def browse_csv(inst):\n try:\n from tkinter import filedialog as fd\n except ImportError as err:\n msgbox.showerror(\"Error\",f\"Error loading module : {err}\")\n else:\n inst.temp_path=\"\"\n inst.filepath=fd.askopenfilename(title=\"Select .csv file\",initialdir=inst.fDir,filetypes=[(\"CSV files\",\".csv\")])\n global copy_path\n copy_path=inst.filepath\n if inst.filepath:\n inst.temp_path=copy.deepcopy(inst.filepath)\n inst.entry2.configure(state=\"active\")\n inst.entry2.delete(0,tk.END)\n inst.entry2.insert(0,inst.temp_path)\n inst.entry2.configure(state=\"readonly\")\n inst.entry3.configure(state=\"active\")\n inst.entry3.delete(0,tk.END)\n inst.excel_sheet_name.set(None)\n inst.entry3_sheet_name.delete(0,tk.END)\n inst.entry4_username.delete(0,tk.END)\n inst.entry4_password.delete(0,tk.END)\n inst.entry4_database.delete(0,tk.END)\n inst.entry4_table.delete(0,tk.END)\n else:\n inst.entry2.configure(state=\"active\")\n inst.entry2.delete(0,tk.END)\n inst.entry2.insert(0,inst.temp_path)\n inst.entry2.configure(state=\"readonly\")\n inst.entry3.configure(state=\"active\")\n inst.entry3.delete(0,tk.END)\n inst.excel_sheet_name.set(None)\n inst.entry3_sheet_name.delete(0,tk.END)\n inst.entry4_username.delete(0,tk.END)\n inst.entry4_password.delete(0,tk.END)\n inst.entry4_database.delete(0,tk.END)\n inst.entry4_table.delete(0,tk.END)", "def make_filedialog_widgets(self):\n\n file_frame = tk.Frame(self.root)\n file_frame.grid(row=3, column=0, sticky='ew', **self.frame_padding)\n file_frame.columnconfigure(0, weight=3)\n file_frame.columnconfigure(1, weight=1)\n self.home_dir = str(pathlib.Path.home())\n self.file_name = tk.StringVar(self.root, value=self.home_dir)\n\n self.file_label = tk.Entry(\n file_frame, textvariable=self.file_name, font=self.font, width=35)\n self.file_label.config(state=\"disabled\")\n self.file_label.grid(row=0, column=0, ipady=self.entry_ipady)\n\n self.choose_button = tk.Button(file_frame, text=\"Choose file\",\n font=self.font, command=self.choose_file)\n self.choose_button.grid(row=0, column=1)", "def get_data_loader(self, CurWindow):\n if \"Select PATH with data\" in CurWindow.dataloader_list.currentText():\n self.data_loader_path = QFileDialog.getExistingDirectory(self, \"Select your trainingdata path\", os.path.expanduser('~'))\n elif \"Select FILE with data\" in CurWindow.dataloader_list.currentText():\n self.data_loader_path = QFileDialog.getOpenFileName(self, \"Select your data loader script\", os.path.expanduser('~'), 'CSV(*.csv);; Python(*.py)')[0]\n\n if \".csv\" in self.data_loader_path:\n print(\"CSV file selected\")\n self.CSVDataloaderWindow()\n else:\n print(\"No CSV file\")\n self.set_data_loader_label(CurWindow)", "def selection(self):\n\n response = self.dialog.run()\n\n if response == Gtk.ResponseType.ACCEPT:\n if self.multiple:\n selection = self.dialog.get_filenames()\n else:\n selection = self.dialog.get_filename()\n else:\n selection = \"\"\n\n return selection", "def open(self):\n self.ui.textLog.clear()\n self.filename = QtWidgets.QFileDialog.getOpenFileName(\n filter=\"Instance Document (*.XML *.XBRL)\"\n )[0]\n if self.filename != \"\":\n self.status.setText(self.filename)\n else:\n self.reset_status()", "async def new():\n dialogue = tk.AsyncToplevel(manager)\n dialogue.title(self.cur_locale.menu.fileselect.new)\n dialogue.protocol(\"WM_DELETE_WINDOW\", nothing)\n filename = tk.AsyncEntry(dialogue)\n filename.pack()\n\n async def cb():\n if filename.get() != len(filename.get()) * \".\":\n for i in r'\\/:*?\"<>|':\n if i in filename.get():\n button.config(\n text=self.cur_locale.menu.fileselect.button.invalid\n )\n break\n else:\n manager.file = manager.dir / filename.get()\n await manager.destroy()\n else:\n button.config(\n text=self.cur_locale.menu.fileselect.button.special\n )\n\n # Confirm button\n button = tk.AsyncButton(\n dialogue,\n text=self.cur_locale.menu.fileselect.button.default,\n callback=cb,\n )\n button.pack(fill=tk.X)\n\n # Cancel button\n tk.AsyncButton(\n dialogue,\n text=self.cur_locale.menu.fileselect.button.cancel,\n callback=dialogue.destroy,\n ).pack(fill=tk.X)\n await manager.wait_window(dialogue)", "def browse_cpd_file(self):\n # get the index of the selected item\n idx = self.view.selectedIndexes()[0]\n # get the selected item from the index\n selected_item = idx.model().itemFromIndex(idx)\n # get the parent of the selected item\n parent = selected_item.parent()\n # set the file filter\n file_filter = \"Excel (*.xls *.xlsx)\"\n # get the file path from the selected item\n file_path = os.path.split(str(idx.data()))[0]\n file_path = os.path.join(file_path,\"\")\n # dialog for open file\n file_path = os.path.join(file_path, \"\")\n new_file_path = QtWidgets.QFileDialog.getOpenFileName(caption=\"Choose a CPD results file ...\",\n directory=file_path, filter=file_filter)[0]\n # update the model\n if len(str(new_file_path)) > 0:\n new_file_path = QtCore.QDir.toNativeSeparators(str(new_file_path))\n new_file_parts = os.path.split(str(new_file_path))\n parent.child(selected_item.row(), 1).setText(new_file_parts[1])", "def read_input(self, input_cls, filename, **kwargs):\n input_inst = input_cls()\n input_inst.read_input(filename)\n return input_inst.get_data()", "def __init__(self, defaultClassName, defaultFile, defaultPath,\n parent=None):\n super(NewDialogClassDialog, self).__init__(parent)\n self.setupUi(self)\n \n self.pathnamePicker.setMode(E5PathPickerModes.DirectoryMode)\n \n self.okButton = self.buttonBox.button(QDialogButtonBox.Ok)\n self.okButton.setEnabled(False)\n \n self.classnameEdit.setText(defaultClassName)\n self.filenameEdit.setText(defaultFile)\n self.pathnamePicker.setText(defaultPath)\n \n msh = self.minimumSizeHint()\n self.resize(max(self.width(), msh.width()), msh.height())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return flag, refer to times of repetition in poker group
def _get_same_number_flag(self): flag = 0 for i in range(5): for j in range(i+1, 5): if self.poker_group[i].num == self.poker_group[j].num: flag += 1 return flag
[ "def is_repetition(actions):\n if len(actions) < 9:\n return False\n for i in range(3, round(len(actions) / 3), 3):\n if actions[-3 * i:].count(actions[-i:]) >= 3:\n return True\n return False", "def test_repetition(token):\n \n parsed = []\n i = 0\n tmp = ''\n contains_repetiton = False\n while i < len(token):\n if i+1 < len(token) and token[i] == token[i+1] and i+2 < len(token) and token[i] == token[i+2]:\n contains_repetiton = True\n if tmp :\n parsed.append((tmp,False))\n c = token[i]\n i = i+3\n while i < len(token) and token[i] == c:\n i += 1\n parsed.append((c,True))\n tmp = ''\n else:\n tmp += token[i]\n i += 1\n if tmp :\n parsed.append((tmp, False))\n if contains_repetiton :\n return parsed\n else:\n return False", "def check(self, card_hits):\n comparison_list = [x and y for x, y in zip(card_hits, self.mask)] #Pairs the 'card_hits' with the prize pattern mask, then proceeds to create a boolean list indicating if each hit is present in the prize pattern \n return comparison_list == self.mask #if the comparison_list is equal to the prize pattern, the card_list follows the corresponding pattern\n #raise NotImplementedError(\">>> your code goes here <<<\")", "def find_repeat_error(self, add=True):\n # In case of repetition of error\n if add is True:\n self.repeat_problem += 1\n\n # In case of no repetition action, set counter to 0\n if add is not True:\n self.repeat_problem = 0\n self.print_obs = True", "def checkForWin(self, player):\n for p in self.getPlayers():\n if p.get_name() == player:\n if p.how_many_captured() >= 6:\n return True", "def is_repeating_playlist(self):\n previous_songs = {self.name: None}\n repeating_flag = False\n\n next_song = self.next\n while next_song is not None:\n if next_song.name in previous_songs.keys():\n repeating_flag = True\n break\n else:\n previous_songs[next_song.name] = None\n print(previous_songs)\n next_song = next_song.next\n return repeating_flag", "def getnumplayed(self):\n if self.verbose:\n print(self.name, 'Getting the number of cards played')\n if self.log is not None:\n self.log.write(self.name + ' Getting the number of cards played\\n')\n return self.numplayed", "def prob_estimation(n):\n truecount = 0\n for i in range(n):\n test = gen_rand_23()\n if has_duplicates(test):\n truecount += 1\n return truecount", "def is_repeated(self):\n raise NotImplementedError()", "def hasGroupsSizeX(self, deck):\r\n def gcd( a, b ):\r\n while b:\r\n a, b = b, a % b\r\n return a\r\n \r\n def GCD( nums ):\r\n res = nums[0]\r\n for i in range(1, len(nums)):\r\n res = gcd( res, nums[i])\r\n return res\r\n \r\n \r\n d = {}\r\n for c in deck:\r\n d[c] = d.get( c, 0 ) + 1\r\n values = list( d.values() ) \r\n r = GCD( values )\r\n if r == 1:\r\n return False\r\n else:\r\n return True", "def repeats(self):\n if self._repeats is None:\n for c in self.conditions:\n if c.repeats:\n self._repeats = True\n break\n else:\n self._repeats = False \n\n return self._repeats", "def isHacktoberfestCompleted(countOfPR):\n\n if (countOfPR < 4):\n print(\"You have incomplete PR's, let me do it for you\")\n while(countOfPR < 4):\n countOfPR = makePR(countOfPR)\n time.sleep(2)\n print(\"\\nYou have successfully completed 4 PR's :)\")\n return True\n return False", "def has_twopair(self):\n \n ranks = [ ]\n c=0\n for card in self.cards:\n ranks.append(card.rank)\n for i in ranks:\n if ranks.count(i) == 2:\n c+=1\n if c == 4:\n return True\n return False", "def repeat_ties(self) -> bool:\n return self._repeat_ties", "def repeat_ties(self) -> typing.Optional[bool]:\n return self._repeat_ties", "def randomsevendivision(somelist):\n marker = False\n num = random.choice(somelist)\n print(num)\n if num % 7 == 0:\n marker = True\n else:\n marker = False\n\n return marker", "def completed_by_players(self):\n finished_players = 0\n for player in self.players:\n if len(player.guess) == len(self.sequence):\n finished_players += 1\n return finished_players == len(self.players)", "def check(self, card):\n\n if not self.hand:\n self.state = -1\n self.game.report(self.idx, self.state, None)\n return -1\n\n res = [c for c in self.hand if c['num'] == card['num']]\n if len(res) > 0:\n self.pending = res\n self.state = 1\n return 1\n\n res = [c for c in self.hand if c['suit'] == card['suit']]\n if len(res) > 0:\n self.pending = res\n self.state = 1\n return 1\n\n return 0", "def timetrial_tiebreaker(tt):\n\n return (\n tt.result != '', # Done is better\n tt.state != 'initial', # Initiated is better\n tt.created_time, # Created later is better\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the given source context can be a possible match, judging only by the files of both contexts.
def isFileSuitable(self, src_ctx): return src_ctx.file in self.files or self.isLinkerOptimizationCandidate(src_ctx)
[ "def _check_files_match(left, right, base_dir):\n with open(os.path.join(base_dir, left), 'r') as left_file:\n with open(os.path.join(base_dir, right), 'r') as right_file:\n result = (left_file.read() == right_file.read())\n\n return result", "def isValidContext(self, context: docking.ActionContext) -> bool:\n ...", "def agrees(x, y, identsy, mode):\n if mode not in [\"loose\", \"strict\"]:\n raise Exception(\"Must set a mode to either loose or strict!\")\n # If the two CONS SUBS have the same source and target\n elif x == y:\n return True\n # Check if they agree with inferred relations found via IDENT chains\n elif mode == \"loose\":\n # Get inferred sources and targets\n source_idsy, target_idsy = infer_con_sub(y.get_source(), y.get_target(), identsy)\n\n # Check the intersection of both sets of inferred arguments, to infer agreement\n if x.get_source().id_num in source_idsy and x.get_target().id_num in target_idsy:\n return True\n else:\n return False\n else:\n return False", "def _entries_have_same_source(\n entry1: Entry, entry2: Entry, opened_files: Set[str]) -> bool:\n if entry1.source == entry2.source:\n return True\n\n opened_files.add(entry1.source)\n opened_files.add(entry2.source)\n return filecmp.cmp(entry1.source, entry2.source)", "def _contextMatch(self, pattern, string, sets):\n\n # pattern is rule left or right context\n # string is itape\n\n if len(pattern) == 0:\n # rule context is none, so match\n return True\n elif len(string) == 0:\n # rule context is not none and itape context is none\n return False\n elif len(pattern) > 1 and pattern[1] == \"*\":\n r = self._contextMatch(pattern[2:], string, sets)\n tmp = pattern[2:]\n tmp.insert(0, pattern[0])\n s = self._contextMatch(tmp, string, sets)\n t = self._itemMatch(pattern[0], string[0], sets) and self._contextMatch(pattern, string[1:], sets)\n return (r or s or t)\n elif len(pattern) > 1 and pattern[1] == \"+\":\n r = self._itemMatch(pattern[0], string[0], sets) \n tmp = pattern[2:]\n tmp.insert(0, \"*\")\n tmp.insert(0, pattern[0])\n s = self._contextMatch(tmp, string[1:], sets)\n return (r and s)\n elif self._itemMatch(pattern[0], string[0], sets):\n return self._contextMatch(pattern[1:], string[1:], sets)\n else:\n return False", "def validate(context, permissive=True):\n # Must match at least 1 schema\n matches = classify_context(context, permissive=permissive)\n if len(matches) > 0:\n return True\n else:\n if permissive:\n return False\n else:\n raise ValueError(\"Unable to validate context\")", "def check_files():\n global font_source\n # We need to perform a directory traversal to find .ttf .otf or .sfd.\n # Any one will do. [We are so kind arn't we]\n for dirpath, dirnames, filenames in os.walk('.'):\n for filename in filenames:\n if font_reg_exp.search(filename):\n if font_sfd_regx.search(filename):\n font_source = True\n return 0\n return -1 # No need to go into sub directories\n return -1", "def src_matches(self, ref):\n return bool(C.git_refspec_src_matches(self._refspec, to_bytes(ref)))", "def is_relevant_sourcekey(self, sourcekey: str) -> bool:\n ...", "def _check_match(self, file_desc) -> bool:\n\n python_version = file_desc.get(\"python_version\", \"\")\n if python_version in DEPRECATED_PYTHON:\n return True\n\n # source dist: never filter out\n pt = file_desc.get(\"packagetype\")\n if pt == \"sdist\":\n return False\n\n # Windows installer\n if pt in self._packagetypes:\n return True\n\n fn = file_desc[\"filename\"]\n for i in self._patterns:\n if i in fn:\n return True\n\n return False", "def verify_blocks(rst_file, source_files, source_dict):\n\n for block_type, source_type in source_dict.items():\n # Extract code blocks from rst file.\n blocks = get_blocks(rst_file, block_type)\n for line, block in blocks:\n # Check if block is in the list of files of correct type.\n block_in_source(line, block, [sf for sf in source_files\\\n if path.splitext(sf)[-1] == source_type])", "def _is_source(file_path):\n _, ext = os.path.splitext(file_path)\n return ext in KEEP", "def is_context_spec(mapping):\n return is_context(mapping) or (isinstance(mapping, str) and bool(PIPELINE_CONTEXT_RE.match(mapping)))", "def issourcefile(fname):\n return hasextension(fname) and True not in [fname.endswith(x) for x in ['.o','.exe','.a'] ]", "def _check_overlapping_sources(self, targets_by_source):\n overlapping_sources = set()\n for s in targets_by_source:\n if len(targets_by_source[s]) > 1:\n overlapping_sources.add(s)\n self.context.log.error(\n \"Error: source file %s included in multiple targets %s\" % (s, targets_by_source[s]))", "def is_target(src_file, src_file_extensions):\n return any(src_file.endswith(x) for x in src_file_extensions)", "def _dir_filter(self, node, *args, **kwargs):\n try:\n node_filters = self.settings.include_paths\n if not isinstance(node_filters, list):\n node_filters = [node_filters]\n node_filters = [self.site.content.node_from_relative_path(f)\n for f in node_filters]\n except AttributeError:\n node_filters = None\n result = any(node.source == f.source or\n node.source.is_descendant_of(f.source)\n for f in node_filters if f) \\\n if node_filters else True\n return result", "def svn_fs_check_related(id1: \"svn_fs_id_t\", id2: \"svn_fs_id_t\") -> \"svn_boolean_t\":\n return _fs.svn_fs_check_related(id1, id2)", "def _agrees(x, y, identsy):\n # If the two relations have the same source and target\n if x == y:\n return True\n # Check if they agree with inferred relations found via IDENT chains\n else:\n # Get inferred sources and targets\n head_idsy, tail_idsy = _infer_structural_rel(y.get_head(), y.get_tail(), identsy)\n\n # Check the intersection of both sets of inferred arguments, to infer agreement\n if x.get_head().id_doc_num in head_idsy and set([t.id_doc_num for t in x.get_tail()]).issubset(tail_idsy):\n return True\n else:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the given source context can be a possible match for a linker optimized version of our binary function.
def isLinkerOptimizationCandidate(self, src_ctx): raise NotImplementedError("Subclasses should implement this!")
[ "def isFileSuitable(self, src_ctx):\n return src_ctx.file in self.files or self.isLinkerOptimizationCandidate(src_ctx)", "def test_is_source_need_build_return_true(self, mock_load, mock_isfile):\n mock_load.return_value = None, _CC_NAME_TO_MODULE_INFO\n mod_info = native_module_info.NativeModuleInfo()\n mock_isfile.return_value = False\n self.assertTrue(mod_info._is_source_need_build(\n _CC_NAME_TO_MODULE_INFO['multiarch']))", "def detect(source):\r\n source = source.replace(' ', '')\r\n if re.search(r'eval\\(function\\(h,u,n,t,e,r', source):\r\n return True\r\n else:\r\n return False", "def test_is_source_need_build_return_false(self, mock_load, mock_isfile):\n mock_load.return_value = None, _CC_NAME_TO_MODULE_INFO\n mod_info = native_module_info.NativeModuleInfo()\n self.assertFalse(mod_info._is_source_need_build(\n _CC_NAME_TO_MODULE_INFO['multiarch1']))\n mock_isfile.return_value = True\n self.assertFalse(mod_info._is_source_need_build(\n _CC_NAME_TO_MODULE_INFO['multiarch']))", "def is_relevant_sourcekey(self, sourcekey: str) -> bool:\n ...", "def main():\n check_if_point_source()", "def matches(source: t.Any) -> t.Callable[[t.Any], bool]:\n return lambda obj: pyd.is_match(obj, source)", "def is_statically_linked(self, func, address=None):\n # Find a function with the given name.\n for config_func in self.json.get('functions', []):\n if config_func['name'] == func:\n break\n else: # nobreak\n raise AssertionError('no such function: {}'.format(func))\n\n if config_func['fncType'] != 'staticallyLinked':\n return False\n\n if address is not None and _string_to_int(config_func.get('startAddr')) != address:\n return False\n\n return True", "def has_ldflags(argv):\n link_flags = set(('-ldflags', '-linkmode', '-extld', '-extldflags'))\n if set(argv) & link_flags:\n return True\n for arg in argv:\n if arg.startswith('-ldflags=') or arg.startswith('-linkmode='):\n return True\n return False", "def is_source_op(self, op):\n op_handler = self._op_handler_dict[op.type]\n return op_handler.is_source_op", "def is_referenced(target):", "def is_valid_compiler(self, _): # pylint:disable=no-self-use\n return True", "def is_referenced_in_insn(root_insn):\n for insn in get_inlined_insns(root_insn):\n if insn.kind == ir.PHI:\n for orig_bb, _ in insn.pairs:\n if bb == orig_bb:\n return True\n else:\n return False", "def is_source_obj(self, obj):\n try:\n if os.path.basename(inspect.getfile(obj)).startswith(FILE_PREFIX):\n return True\n except TypeError:\n pass\n\n return False", "def check_func (self, func,\r\n headers=None, include_dirs=None,\r\n libraries=None, library_dirs=None,\r\n decl=0, call=0):\r\n\r\n self._check_compiler()\r\n body = []\r\n if decl:\r\n body.append(\"int %s ();\" % func)\r\n body.append(\"int main () {\")\r\n if call:\r\n body.append(\" %s();\" % func)\r\n else:\r\n body.append(\" %s;\" % func)\r\n body.append(\"}\")\r\n body = string.join(body, \"\\n\") + \"\\n\"\r\n\r\n return self.try_link(body, headers, include_dirs,\r\n libraries, library_dirs)", "def has_build_target(atest_module_info, rel_path):\n return any(\n is_source_under_relative_path(mod_path, rel_path)\n for mod_path in atest_module_info.path_to_module_info)", "def is_function_egen(fnc):\n return fnc in ROUNDS\\\n or fnc in SHA3 \\\n or fnc in ESTREAM\\\n or fnc in BLOCK", "def is_binary_executable(self, filename):\n filename = self._root_dir + '/' + filename\n (sourcetype, filetype) = self.get_filetype(filename)\n if sourcetype:\n if (\"ELF\" in filetype and \"rror\" not in filetype):\n return True\n else:\n elf_pattern = \"\\x7fELF\".encode()\n bin_head = FileUtil(filename).getdata('rb', 4)\n if (elf_pattern == bin_head[0:4] and\n FileUtil(filename).isexecutable()):\n return True\n return False", "def CheckUseIntelCompiled(myflags: Dict[str, Any]) -> bool:\n if myflags['hpcc_use_intel_compiled_hpl']:\n return myflags['hpcc_math_library'] == HPCC_MATH_LIBRARY_MKL\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize all storage arrays based on of stars and targets
def initializeStorageArrays(self): self.DRM = [] OS = self.OpticalSystem SU = self.SimulatedUniverse allModes = OS.observingModes num_char_modes = len( list(filter(lambda mode: "spec" in mode["inst"]["name"], allModes)) ) self.fullSpectra = np.zeros((num_char_modes, SU.nPlans), dtype=int) self.partialSpectra = np.zeros((num_char_modes, SU.nPlans), dtype=int) self.propagTimes = np.zeros(self.TargetList.nStars) * u.d self.lastObsTimes = np.zeros(self.TargetList.nStars) * u.d self.starVisits = np.zeros( self.TargetList.nStars, dtype=int ) # contains the number of times each star was visited self.starRevisit = np.array([]) self.starExtended = np.array([], dtype=int) self.lastDetected = np.empty((self.TargetList.nStars, 4), dtype=object)
[ "def initializeStorageArrays(self):\r\n\r\n self.DRM = []\r\n self.fullSpectra = np.zeros(self.SimulatedUniverse.nPlans, dtype=int)\r\n self.partialSpectra = np.zeros(self.SimulatedUniverse.nPlans, dtype=int)\r\n self.propagTimes = np.zeros(self.TargetList.nStars)*u.d\r\n self.lastObsTimes = np.zeros(self.TargetList.nStars)*u.d\r\n self.starVisits = np.zeros(self.TargetList.nStars, dtype=int)#contains the number of times each star was visited\r\n self.starRevisit = np.array([])\r\n self.starExtended = np.array([], dtype=int)\r\n self.lastDetected = np.empty((self.TargetList.nStars, 4), dtype=object)", "def _set_arrays_alloc(self):\n\n if not self.v_inplace:\n self.v = np.zeros(self.n)\n\n if not self.e_inplace:\n self.e = np.zeros(self.n)", "def _init_starting_arrays(self):\n\t\tself.Xf = []\n\t\tself.X = self.data['values'].values.tolist()\n\t\tself._init_level_array()\n\t\tself._init_trend_array()\n\t\tself._init_seasons_array()", "def init_everything_for_multiple_trials(self, ):\n # Produces scrambled experimental data set\n self._ev = self.exp\n self._ev = self.scramble_exp_data(self._ev)\n\n # Finds a spatial PDF for the background, based on the experimental\n # Sin Declination distribution\n bckg_spline_space = self.create_space_bkg_pdf(self._ev)\n self.bckg_spline_space = bckg_spline_space\n\n # Assigns a weight to each source_path, equal to 1/(r^2) for distance r\n self.sources['weight_distance'] = self.sources['distance']**(-2.)\n\n # If accounting for energy, produces Energy PDFs\n if self.UseEnergy is True:\n print('Initialising Energy PDFs')\n self.generate_spline_dict_for_all_gamma(self.exp, self.mc)\n self.generate_bkg_weight_dict_for_all_gamma(self._ev)\n\n # If using time, calculates Time weights for the source_path\n if self.UseTime is True:\n self.compute_source_weights_time()\n self.init_random_generator_pdf()", "def init_galaxy(self):\n\n self.logger.info('Pre-processing for galaxies started.')\n if self.params['gal_type'] == 0:\n # Analytic profile - sersic disk\n # Read distribution of sizes (fwhm, converted to scale radius)\n\n fits = fio.FITS(self.params['gal_sample'])[-1]\n pind_list = np.ones(fits.read_header()['NAXIS2']).astype(bool) # storage list for original index of photometry catalog\n for filter in filter_flux_dict.keys(): # Loop over filters\n mag_dist = fits.read(columns=filter_flux_dict[filter]) # magnitudes\n pind_list = pind_list&(mag_dist<99)&(mag_dist>0) # remove bad mags\n\n size_dist = fits.read(columns='fwhm')\n size_dist = self.fwhm_to_hlr(size_dist)\n pind_list = pind_list&(size_dist*2.*0.06/wfirst.pixel_scale<16) # remove large objects to maintain 32x32 stamps\n pind_list = np.where(pind_list)[0]\n self.obj_list = []\n self.pind_list = []\n for i in range(self.params['gal_n_use']):\n # Create unique object list of length gal_n_use, each with unique size.\n ind = pind_list[int(self.gal_rng()*len(pind_list))]\n self.pind_list.append(ind)\n self.obj_list.append(galsim.Sersic(self.params['disk_n'], half_light_radius=1.*size_dist[ind]))\n else:\n pass # cosmos gal not guaranteed to work. uncomment at own risk \n # # Cosmos real or parametric objects\n # if self.params['gal_type'] == 1:\n # use_real = False\n # gtype = 'parametric'\n # else:\n # use_real = True\n # gtype = 'real'\n\n # # Load cosmos catalog\n # cat = galsim.COSMOSCatalog(self.params['cat_name'], dir=self.params['cat_dir'], use_real=use_real)\n # self.logger.info('Read in %d galaxies from catalog'%cat.nobjects)\n\n # rand_ind = []\n # for i in range(self.params['gal_n_use']):\n # # Select unique cosmos index list with length gal_n_use.\n # rand_ind.append(int(self.gal_rng()*cat.nobjects))\n # # Make object list of unique cosmos galaxies\n # self.obj_list = cat.makeGalaxy(rand_ind, chromatic=True, gal_type=gtype)\n\n if isinstance(self.params['gal_dist'],string_types):\n # Provided an ra,dec catalog of object positions.\n radec_file = fio.FITS(self.params['gal_dist'])[-1].read()\n self.radec = []\n self.gind_list = []\n for i in range(self.n_gal):\n # Select a random ra,dec position n_gal times.\n self.gind_list.append(i) # Save link to unique object index\n # Allows removal of duplicates - doesn't matter for postage stamp sims?\n self.radec.append(galsim.CelestialCoord(radec_file['ra'][i]*galsim.degrees,radec_file['dec'][i]*galsim.degrees))\n else:\n raise ParamError('Bad gal_dist filename.')\n\n self.logger.debug('Pre-processing for galaxies completed.')\n\n return radec_file['ra'][self.gind_list],radec_file['dec'][self.gind_list]", "def initialize(self):\n F = len(self.inputs[0])\n min_val = np.min(self.inputs)\n max_val = np.max(self.inputs)\n \n np.random.seed(1)\n if self.init=='random':\n # create 3D array storing initial models\n self.M = np.random.uniform(min_val, max_val, size=(self.J*self.K, F))\n self.M = np.array(self.M)", "def _initial_target_setup(self):\n # Targets\n self.target = []\n n_targets = self.config['simulation']['n_targets']\n for target in self.config['simulation']['target_building_id']:\n info = {}\n info['target_id'] = target\n info['probability_goals'] = 1 / n_targets\n info['progress_goals'] = 0\n info['probability_goals_indoor'] = 1 / n_targets\n info['progress_goals_indoor'] = 0\n info['defence_perimeter'] = 0\n\n building_info = self.building_info(target)\n info['position'] = building_info['position']\n info['perimeter'] = building_info['perimeter']\n info['area'] = building_info['area']\n info['n_floors'] = building_info['n_floors']\n info['n_defence_perimeter'] = building_info['perimeter'] / (\n self.config['ugv']['defense_radius'] * 2)\n\n self.target.append(info)", "def __init__(self):\n self.array = None\n self.target = None", "def __init__(self):\n log.debug(\"Initialized an empty SensorTransformations class.\")\n\n self.d415_rgb = np.array([])\n self.d415_depth = np.array([])\n self.d435_rgb = np.array([])\n self.d435_depth = np.array([])\n self.ps_rgb = np.array([])\n self.ps_depth = np.array([])\n self.cham_rgb = np.array([])", "def setup_mult_dirs(self):\n # setup dirs to hold the original and multiplier model input quantities\n set_dirs = []\n# if len(self.pp_props) > 0 or len(self.zone_props) > 0 or \\\n# len(self.grid_props) > 0:\n if self.pp_props is not None or \\\n self.zone_props is not None or \\\n self.grid_props is not None or\\\n self.const_props is not None:\n set_dirs.append(self.arr_org)\n set_dirs.append(self.arr_mlt)\n # if len(self.bc_props) > 0:\n if self.bc_props is not None:\n set_dirs.append(self.bc_org)\n for d in set_dirs:\n d = os.path.join(self.m.model_ws,d)\n self.log(\"setting up '{0}' dir\".format(d))\n if os.path.exists(d):\n if self.remove_existing:\n shutil.rmtree(d)\n else:\n raise Exception(\"dir '{0}' already exists\".\n format(d))\n os.mkdir(d)\n self.log(\"setting up '{0}' dir\".format(d))", "def initialize(self, arms: List[str]):\n pass", "def __init__(self,sizes):\r\n\r\n self.numberLayers = len(sizes)\r\n \r\n #Initialization of weights and biases\r\n self.biases = [np.random.randn(y,1) for y in sizes[1:]]\r\n self.weights = [np.random.randn(y,x) for x,y in zip(sizes[:-1], sizes[1:])]", "def __init__(self, initial_demands = []):\n self.explicitly_demanded = set()\n self.nodes = {}\n self.provided = set()\n self.parent_ptrs = {}\n for demand in initial_demands:\n self.add_new_demand(demand)", "def __init__(self, sizes):\n self.num_layers = len(sizes)\n self.sizes = sizes\n self.biases = [np.random.randn(y,1) for y in sizes[1:]]\n self.weights = [np.random.randn(y, x) \n for x, y in zip(sizes[:-1], sizes[1:])]", "def _initialize_splits(self):\n # There is one set of split weighs for each layer of each tree; thus the matrices.\n self.split_weight = []\n self.split_bias = []\n self.split_strength = []\n for depth in range(self.max_depth):\n self.split_weight += [_zeros(2 ** depth, self.num_predictors, self.num_trees)]\n self.split_bias += [_zeros(2 ** depth, 1, self.num_trees)]\n self.split_strength += [_zeros(2 ** depth, 1, self.num_trees)]", "def _initialize_cache(self):\n np = self.dest.get_number_of_particles()\n self.particle_cache = [ LongArray() for i in range(np) ]", "def initialize(self, particles):\n self.particles = particles\n self.arrays = particles.arrays\n\n # setup the cell manager\n self._set_dirty()\n self._compute_block_size()\n self._setup_cell_manager()", "def reset_all(self):\n self.reset_memory()\n self.reset_traces()\n self.reset_tags()\n\n self.prev_obs = np.zeros(self.nx_inst)\n self.prev_qa = 0\n self.prev_max = 0.", "def setup_after_space_initialization(self):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Choose next telescope target based on star completeness and integration time.
def choose_next_target(self, old_sInd, sInds, slewTimes, t_dets): Comp = self.Completeness TL = self.TargetList TK = self.TimeKeeping # reshape sInds sInds = np.array(sInds, ndmin=1) # 1/ Choose next telescope target comps = Comp.completeness_update( TL, sInds, self.starVisits[sInds], TK.currentTimeNorm.copy() ) # add weight for star revisits ind_rev = [] if self.starRevisit.size != 0: dt_rev = self.starRevisit[:, 1] * u.day - TK.currentTimeNorm.copy() ind_rev = [ int(x) for x in self.starRevisit[dt_rev < 0 * u.d, 0] if x in sInds ] f2_uv = np.where( (self.starVisits[sInds] > 0) & (self.starVisits[sInds] < self.nVisitsMax), self.starVisits[sInds], 0, ) * (1 - (np.in1d(sInds, ind_rev, invert=True))) # f3_uv = np.where( # (self.sInd_detcounts[sInds] > 0) # & (self.sInd_detcounts[sInds] < self.max_successful_dets), # self.sInd_detcounts[sInds], # 0, # ) * (1 - (np.in1d(sInds, ind_rev, invert=True))) # L = TL.L[sInds] l_extreme = max( [ np.abs(np.log10(np.min(TL.L[sInds]))), np.abs(np.log10(np.max(TL.L[sInds]))), ] ) if l_extreme == 0.0: l_weight = 1 else: l_weight = 1 - np.abs(np.log10(TL.L[sInds]) / l_extreme) ** self.lum_exp t_weight = t_dets / np.max(t_dets) weights = ( (comps + self.revisit_weight * f2_uv / float(self.nVisitsMax)) / t_weight ) * l_weight # weights = ((comps + self.revisit_weight*f3_uv/float(self.max_successful_dets) # *f2_uv/float(self.nVisitsMax))/t_weight)*l_weight sInd = np.random.choice(sInds[weights == max(weights)]) return sInd, slewTimes[sInd]
[ "def next_target(self, old_sInd, mode):\r\n OS = self.OpticalSystem\r\n ZL = self.ZodiacalLight\r\n Comp = self.Completeness\r\n TL = self.TargetList\r\n Obs = self.Observatory\r\n TK = self.TimeKeeping\r\n \r\n # create DRM\r\n DRM = {}\r\n \r\n # allocate settling time + overhead time\r\n tmpCurrentTimeAbs = TK.currentTimeAbs.copy() + Obs.settlingTime + mode['syst']['ohTime']\r\n tmpCurrentTimeNorm = TK.currentTimeNorm.copy() + Obs.settlingTime + mode['syst']['ohTime']\r\n\r\n\r\n # look for available targets\r\n # 1. initialize arrays\r\n slewTimes = np.zeros(TL.nStars)*u.d\r\n fZs = np.zeros(TL.nStars)/u.arcsec**2.\r\n dV = np.zeros(TL.nStars)*u.m/u.s\r\n intTimes = np.zeros(TL.nStars)*u.d\r\n obsTimes = np.zeros([2,TL.nStars])*u.d\r\n sInds = np.arange(TL.nStars)\r\n \r\n # 2. find spacecraft orbital START positions (if occulter, positions \r\n # differ for each star) and filter out unavailable targets \r\n sd = None\r\n if OS.haveOcculter == True:\r\n sd = Obs.star_angularSep(TL, old_sInd, sInds, tmpCurrentTimeAbs)\r\n obsTimes = Obs.calculate_observableTimes(TL,sInds,tmpCurrentTimeAbs,self.koMap,self.koTimes,mode)\r\n slewTimes = Obs.calculate_slewTimes(TL, old_sInd, sInds, sd, obsTimes, tmpCurrentTimeAbs) \r\n \r\n # 2.1 filter out totTimes > integration cutoff\r\n if len(sInds.tolist()) > 0:\r\n sInds = np.intersect1d(self.intTimeFilterInds, sInds)\r\n\r\n # start times, including slew times\r\n startTimes = tmpCurrentTimeAbs.copy() + slewTimes\r\n startTimesNorm = tmpCurrentTimeNorm.copy() + slewTimes\r\n\r\n # 2.5 Filter stars not observable at startTimes\r\n try:\r\n koTimeInd = np.where(np.round(startTimes[0].value)-self.koTimes.value==0)[0][0] # find indice where koTime is startTime[0]\r\n #wherever koMap is 1, the target is observable\r\n sInds = sInds[np.where(np.transpose(self.koMap)[koTimeInd].astype(bool)[sInds])[0]]# filters inds by koMap #verified against v1.35\r\n except:#If there are no target stars to observe \r\n sInds = np.asarray([],dtype=int)\r\n \r\n # 3. filter out all previously (more-)visited targets, unless in \r\n if len(sInds.tolist()) > 0:\r\n sInds = self.revisitFilter(sInds, tmpCurrentTimeNorm)\r\n\r\n # 4.1 calculate integration times for ALL preselected targets\r\n maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife = TK.get_ObsDetectionMaxIntTime(Obs, mode)\r\n maxIntTime = min(maxIntTimeOBendTime, maxIntTimeExoplanetObsTime, maxIntTimeMissionLife)#Maximum intTime allowed\r\n\r\n if len(sInds.tolist()) > 0:\r\n if OS.haveOcculter == True and old_sInd is not None:\r\n sInds,slewTimes[sInds],intTimes[sInds],dV[sInds] = self.refineOcculterSlews( old_sInd, sInds, slewTimes, obsTimes, sd, mode) \r\n endTimes = tmpCurrentTimeAbs.copy() + intTimes + slewTimes\r\n else: \r\n intTimes[sInds] = self.calc_targ_intTime(sInds, startTimes[sInds], mode)\r\n sInds = sInds[np.where(intTimes[sInds] <= maxIntTime)] # Filters targets exceeding end of OB\r\n endTimes = startTimes + intTimes\r\n \r\n if maxIntTime.value <= 0:\r\n sInds = np.asarray([],dtype=int)\r\n\r\n # 5.1 TODO Add filter to filter out stars entering and exiting keepout between startTimes and endTimes\r\n \r\n # 5.2 find spacecraft orbital END positions (for each candidate target), \r\n # and filter out unavailable targets\r\n if len(sInds.tolist()) > 0 and Obs.checkKeepoutEnd:\r\n try: # endTimes may exist past koTimes so we have an exception to hand this case\r\n tmpIndsbool = list()\r\n for i in np.arange(len(sInds)):\r\n koTimeInd = np.where(np.round(endTimes[sInds[i]].value)-self.koTimes.value==0)[0][0] # find indice where koTime is endTime[0]\r\n tmpIndsbool.append(self.koMap[sInds[i]][koTimeInd].astype(bool)) #Is star observable at time ind\r\n sInds = sInds[tmpIndsbool]\r\n del tmpIndsbool\r\n except:\r\n sInds = np.asarray([],dtype=int)\r\n \r\n # 6. choose best target from remaining\r\n if len(sInds.tolist()) > 0:\r\n # choose sInd of next target\r\n sInd, waitTime = self.choose_next_target(old_sInd, sInds, slewTimes, intTimes[sInds])\r\n \r\n if sInd == None and waitTime is not None:#Should Choose Next Target decide there are no stars it wishes to observe at this time.\r\n self.vprint('There are no stars Choose Next Target would like to Observe. Waiting %dd'%waitTime.value)\r\n return DRM, None, None, waitTime\r\n elif sInd == None and waitTime == None:\r\n self.vprint('There are no stars Choose Next Target would like to Observe and waitTime is None')\r\n return DRM, None, None, waitTime\r\n # store selected star integration time\r\n intTime = intTimes[sInd]\r\n \r\n # if no observable target, advanceTime to next Observable Target\r\n else:\r\n self.vprint('No Observable Targets at currentTimeNorm= ' + str(TK.currentTimeNorm.copy()))\r\n return DRM, None, None, None\r\n \r\n # update visited list for selected star\r\n self.starVisits[sInd] += 1\r\n # store normalized start time for future completeness update\r\n self.lastObsTimes[sInd] = startTimesNorm[sInd]\r\n \r\n # populate DRM with occulter related values\r\n if OS.haveOcculter == True:\r\n DRM = Obs.log_occulterResults(DRM,slewTimes[sInd],sInd,sd[sInd],dV[sInd])\r\n return DRM, sInd, intTime, slewTimes[sInd]\r\n \r\n return DRM, sInd, intTime, waitTime", "def technology_target_init(m, per, tech):\n start = 2000 if per == m.PERIODS.first() else m.PERIODS.prev(per)\n end = per\n target = sum(\n mw\n for (tyear, ttech, mw) in technology_targets\n if ttech == tech and start < tyear and tyear <= end\n )\n return target", "def next_step(bot_position, target_position, graph):\n return graph.a_star(bot_position, target_position)[-1]", "def getNextRobotLocation(): #@NoSelf", "def target_temperature_step(self):# -> Optional[float]:\r\n return 1", "def find_target(self):\n\n # Logic if the path was not provided\n if self.tracking == -1:\n\n #check if current target the robot has just arrived at is the goal\n if self.astar.check_for_goal(self.robot_target_node) == 1:\n arrived = 1\n\n else:\n\n # analyze neighbor nodes and add the lowest cost to open list\n self.astar.analyze_neighbors(self.robot_target_node)\n\n # retrive result form neighbor analysis and set target state\n self.robot_target_node = heapq.heappop(self.astar.open_list)\n self.astar.closed_list.append(self.robot_target_node)\n self.target_state = [self.robot_target_node.x_pos_w,\n self.robot_target_node.y_pos_w]\n\n self.grid.ax.plot(self.robot_target_node.x_pos_w,\n self.robot_target_node.y_pos_w,\n 'ro', markersize=1, zorder=6)\n arrived = 0\n\n # Logic if the path was provided\n else:\n self.tracking += 1\n\n if self.tracking >= len(self.path):\n arrived = 1\n\n else:\n arrived = 0\n self.target_state = [self.path[self.tracking].x_pos_w,\n self.path[self.tracking].y_pos_w]\n return arrived", "def find_path(self, max_step = 2):\n print('start:',np.around(self.atom_chosen, decimals=2), 'goal',np.around(self.design_chosen,decimals=2))\n if np.linalg.norm(self.atom_chosen - self.design_chosen)< self.safe_radius_nm:\n print('direct step, RRT not used')\n return self.design_chosen, [self.design_chosen, self.atom_chosen]\n rrt = RRT(\n start=self.atom_chosen, goal=self.design_chosen, rand_area=[-2, 15],\n obstacle_list=self.obstacle_list, expand_dis= max_step, path_resolution=1)\n path_len = np.inf\n min_path = None\n for _ in range(20):\n path = rrt.planning(animation=False)\n if path is not None:\n if len(path)<path_len:\n min_path = path\n path_len = len(path)\n else:\n break\n\n if min_path is None:\n print('Cannot find path')\n return None, None\n next_target = np.array(min_path[-2])\n return next_target, min_path", "def getSolutionAtNextTime(self):", "def first_gt(target: int) -> int:\n\n s = AdjacentSpiral()\n s.run(target)\n return s.last_value", "def get_next_step(self, target_location, bg_set):\n shortest_path = self.get_path_to(target_location, bg_set)\n\n # Shortest path provides the nodes to traverse in order, so the next node is the best next step\n # If the path is of length 1, the player is starting at the target node, so the function\n # Returns None as the next step. Use an exception here instead of if statement\n # for lower comparison overhead\n try:\n adjacent_market = shortest_path[1]\n except (IndexError, TypeError):\n adjacent_market = None\n\n return adjacent_market", "def test_choose_next_trial_from_queue(\n self,\n default_asha_state_and_search_method: Tuple[searcher.SearcherState, ASHADSATSearchMethod],\n ) -> None:\n searcher_state, search_method = default_asha_state_and_search_method\n search_method.trial_tracker.queue.clear()\n hparams, search_data = search_method.get_random_hparams_and_search_data(1)\n # Create an arbitrary counter to differentiate hparams and avoid the duplicate check in\n # `queue_and_register_trial`.\n arbitrary = 0\n\n # Create a curr_rung = 0 lineage\n trial = None\n hparams = copy.deepcopy(hparams)\n hparams[\"_arbitrary\"] = arbitrary\n arbitrary += 1\n trial = search_method.trial_tracker.create_trial(\n hparams=hparams, search_data=copy.deepcopy(search_data), parent_trial=trial\n )\n search_method.trial_tracker.queue_and_register_trial(trial)\n assert trial.searcher_metric_name\n search_method.trial_tracker.update_trial_metric(trial, {trial.searcher_metric_name: 0.0})\n\n # Create several curr_rung = 1 lineages of varying lengths\n for num_in_lineage in range(1, 3):\n trial = None\n for _ in range(num_in_lineage):\n hparams = copy.deepcopy(hparams)\n hparams[\"_arbitrary\"] = arbitrary\n arbitrary += 1\n search_data = copy.deepcopy(search_data)\n search_data.curr_rung = 1\n trial = search_method.trial_tracker.create_trial(\n hparams=hparams, search_data=search_data, parent_trial=trial\n )\n search_method.trial_tracker.queue_and_register_trial(trial)\n assert trial.searcher_metric_name\n search_method.trial_tracker.update_trial_metric(\n trial, {trial.searcher_metric_name: 0.0}\n )\n\n # Get the next trial:\n next_trial = search_method.choose_next_trial_from_queue()\n assert next_trial.search_data\n assert isinstance(next_trial.search_data, ASHADSATSearchData)\n assert next_trial.search_data.curr_rung == 1\n assert next_trial.num_completed_trials_in_lineage == num_in_lineage", "def get_next_match():\n pass", "def movetarget(self):\n x, y = self.target[0], self.target[1]\n neigh = [(nx, ny) for nx in [x - 1, x, x + 1] for ny in [y - 1, y, y + 1] if (nx, ny) != (x, y) if\n (nx, ny) in self.cells]\n nextstep = neigh[randint(0, len(neigh) - 1)]\n self.target = nextstep", "def construct_optimized_traj(self, initial_state, desired_state, objects, walls):\n best_traj = []\n best_traj_cost = self.LARGE_NUMBER\n\n start_time = time.perf_counter()\n end_time = start_time + self.PLAN_TIME_BUDGET\n MAX_TRYS = 10\n cnt = 0\n\n while (time.perf_counter() < end_time and cnt < MAX_TRYS):\n end_time_per_trial = min(end_time, time.perf_counter() + (self.PLAN_TIME_BUDGET/2))\n traj, traj_dist = self.construct_traj(initial_state, desired_state, objects, walls, end_time_per_trial)\n if(traj_dist == self.LARGE_NUMBER):\n cnt+= 0.5\n print(\"NO PATHS FOUND (Generate Optimized Trajectory)\")\n if(traj_dist < best_traj_cost): \n cnt+=1\n best_traj = traj \n best_traj_cost = traj_dist \n # print(\"tries\", cnt)\n\n return best_traj, best_traj_cost", "def next_target(self):\n next_search_list = self.search_list if len(self.search_list) else self.searching_list\n return next(iter(next_search_list))", "def Enforce_Technology_Target_rule(m, per, tech):\n\n # get target, including any capacity specified in the predetermined builds,\n # so the target will be additional to those\n target = (\n m.technology_target[per, tech]\n + m.gen_tech_predetermined_cap_dict[tech, per]\n )\n\n # convert target to closest integral number of units\n # (some of the targets are based on nominal unit sizes rather than actual max output)\n if m.gen_tech_unit_size_dict[tech] > 0.0:\n target = (\n round(target / m.gen_tech_unit_size_dict[tech])\n * m.gen_tech_unit_size_dict[tech]\n )\n\n if tech == \"LoadShiftBattery\":\n # special treatment for batteries, which are not a standard technology\n if hasattr(m, \"BuildBattery\"):\n # note: BuildBattery is in MWh, so we convert to MW\n build = (\n sum(m.BuildBattery[z, per] for z in m.LOAD_ZONES)\n / m.battery_min_discharge_time\n )\n else:\n build = 0\n else:\n build = sum(\n m.BuildGen[g, per]\n for g in m.GENERATION_PROJECTS\n if m.gen_tech[g] == tech and (g, per) in m.GEN_BLD_YRS\n )\n\n if type(build) is int and build == 0:\n # no matching projects found\n if target == 0:\n return Constraint.Skip\n else:\n print(\n \"WARNING: target was set for {} in {}, but no matching projects are available. \"\n \"Model will be infeasible.\".format(tech, per)\n )\n return Constraint.Infeasible\n elif psip and (\n m.options.psip_relax_after is None or per <= m.options.psip_relax_after\n ):\n return build == target\n elif m.options.psip_minimal_renewables and tech in m.RENEWABLE_TECHNOLOGIES:\n # only build the specified amount of renewables, no more\n return build == target\n else:\n # treat the target as a lower bound\n return build >= target", "def nextStep():\n traci.simulationStep()", "def _nextSlave(self, builder, slaves):\n request = builder.current_builder_request\n target_name = request.properties.getProperty('target-slave')\n\n if target_name:\n # See if we have the requested slave.\n for slave_builder in slaves:\n if slave_builder.slave.slavename == target_name:\n return slave_builder\n\n for slave_builder in slaves:\n if slave_builder.slave.canStartBuild():\n return slave_builder\n\n return random.choice(slaves)", "def get_target(self, weights={'observable': 1.0, 'moon_separation': 1.0}):\n\n # Make sure we have some targets\n self.read_target_list()\n\n self.logger.info('Evaluating candidate targets')\n\n merits = []\n\n chosen_target = None\n\n for target in self.list_of_targets:\n self.logger.debug('Target: {}'.format(target.name))\n observable = False\n target_merit = 0.0\n for term in weights.keys():\n (merit_value, observable) = self.get_merit_value(term, target)\n\n if merit_value and observable:\n target_merit += weights[term] * merit_value\n self.logger.debug('\\tTarget merit: {}'.format(target_merit))\n self.logger.debug(\"\\tTarget priority: {}\".format(target.priority))\n else:\n self.logger.debug('\\t Vetoing...')\n break\n\n if observable:\n merits.append((target.priority * target_merit, target))\n\n self.logger.debug('Target {} with priority {} has merit of {}'.format(\n target.name, target.priority, merit_value))\n if len(merits) > 0:\n self.logger.debug(merits)\n chosen = sorted(merits, key=lambda x: x[0])[-1][1]\n self.logger.info('Chosen target is {} with priority {}'.format(\n chosen.name, chosen.priority))\n chosen_target = chosen\n\n return chosen_target" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the area of the triangle whose two sides are ab and ac
def area_triangle_cross(ab, ac): return .5 * np.sqrt(np.sum(np.cross(ab, ac)**2, axis=1))
[ "def get_ab_area(self):\n\t\treturn la.norm(cross(self.a, self.b))", "def get_ab_area(self):\n\t\treturn la.norm(cross(self.a, self.b))/2", "def triangle_area(A, B, C):\n area = abs((A.x*(B.y-C.y) + B.x*(C.y-A.y) + C.x*(A.y-B.y)) / 2)\n return area", "def get_ac_area(self):\n\t\treturn la.norm(cross(self.a, self.c))", "def triangle_area(verts):\n a, b, c = verts[0], verts[1], verts[2]\n return (a.x*(b.y-c.y) + b.x*(c.y-a.y) + c.x*(a.y-b.y)) / 2.0", "def triangle_area(b,h):\n return 0.5*b*h", "def triangle_area(b, h):\n return(0.5 * b * h)", "def TriangleToArea(nodes):\n a = distancebetweenpoints(nodes[0], nodes[1])\n b = distancebetweenpoints(nodes[1], nodes[2])\n c = distancebetweenpoints(nodes[0], nodes[2])\n s = (a + b + c) / 2\n return numpy.sqrt(s * (s - a) * (s - b) * (s - c))", "def area( self ):\n\n return self.__sideAB * self.__sideDA * math.sin(math.radians(self.__angleA))", "def equilateral_triangle_area(base, height):\n return 0.5 * base * height", "def area_from_3_pts(a, b, c):\n return 0.5 * (b - a).cross(c - a).length", "def equilateral_triangle_area_alternative_method(side):\n return (math.sqrt(3.0)/4.0)*side*side", "def get_ah_area(self):\n\t\treturn la.norm(cross(self.a, self.h))", "def polygon_area(vertices: \"list[(float, float)]\") -> float:\n n = len(vertices)\n a = 0.0\n for i in range(n):\n j = (i + 1) % n\n a += abs(vertices[i][0] * vertices[j][1]-vertices[j][0] * vertices[i][1])\n result = a / 2.0\n return result", "def AreaForShape(shape):\n total_A = 0\n\n for polygon in SplitIntoPolygons(shape):\n cx, cy, A = CenterOfMass(polygon)\n total_A += A\n\n return total_A", "def trapezoid_area(height, side1, side2):\n return height/2.0*(side1+side2)", "def polygon_area(vertices):\n nvtcs = len(vertices)\n area = 0.0\n for ii in range(nvtcs):\n jj = (ii + 1) % nvtcs\n area += vertices[ii][0] * vertices[jj][1]\n area -= vertices[jj][0] * vertices[ii][1]\n area = abs(area) / 2.0\n return area", "def trapezoid_area(a,b,h):\n return 0.5*(a+b)*h", "def area_of_triangle_with(self, input_vector):\n return Decimal('0.5')*self.area_of_parallelogram_with(input_vector)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Split a path to a list.
def split(path, lst=None): empty = ("/", "\\", "") if lst is None: lst = [] if path in empty: return lst new_path, base = os.path.split(path) if base in empty: return [new_path] + lst lst.insert(0, base) return ComparePaths.split(new_path, lst)
[ "def split(path):\n return os.sep.split(path)", "def split_path(self, path):\n\n return path.split('/')", "def _split_all(path):\n result = []\n a = path\n old_a = None\n while a != old_a:\n (old_a, (a, b)) = a, posixpath.split(a)\n\n if a or b:\n result.insert(0, b or \"/\")\n\n return result", "def _path_to_list(path):\n if isinstance(path, six.string_types):\n return util.tokenize(path)\n elif isinstance(path, list):\n return path\n else:\n raise ValueError()", "def recursive_split (a_path):\n if not len(a_path):\n return [[],[]]\n if a_path == \"/\" or a_path == \"//\":\n return [ [a_path] , [a_path]]\n sub_path_part, sub_dir_part = os.path.split(a_path)\n if sub_dir_part:\n sub_path_parts, sub_dir_parts = recursive_split (sub_path_part)\n return [ [a_path] + sub_path_parts,\n [sub_dir_part] + sub_dir_parts]\n else:\n return [ [] , [\"/\"]]", "def split(path):\n # Algorithm copied from https://github.com/python/cpython/blob/3.11/Lib/posixpath.py#L100\n path = _fspath(path)\n sep = b'/' if isinstance(path, bytes) else '/'\n i = path.rfind(sep) + 1\n head, tail = path[:i], path[i:]\n if head and head != sep * len(head): head = head.rstrip(sep)\n return head, tail", "def split_field_path(path):\n if not path:\n return []\n\n elements = []\n want_dot = False\n\n for element in _tokenize_field_path(path):\n if want_dot:\n if element != \".\":\n raise ValueError(\"Invalid path: {}\".format(path))\n else:\n want_dot = False\n else:\n if element == \".\":\n raise ValueError(\"Invalid path: {}\".format(path))\n elements.append(element)\n want_dot = True\n\n if not want_dot or not elements:\n raise ValueError(\"Invalid path: {}\".format(path))\n\n return elements", "def _path_split(path):\n\n # Initialize the state\n start = None\n slash = True\n\n # Walk through the path\n for idx, char in enumerate(path):\n if char == '/':\n if not slash:\n # We hit the next slash, so yield the path element and\n # reset\n yield path[start:idx]\n start = None\n\n # Ignore repeated slashes\n slash = True\n elif start is None:\n # Found the start of a path element\n start = idx\n slash = False\n\n if start is not None:\n # Make sure to yield the last element\n yield path[start:]", "def testSplitPath(self):\n path_spec = fake_path_spec.FakePathSpec(location='/')\n\n test_file_system = TestFileSystem(self._resolver_context, path_spec)\n\n expected_path_segments = ['test1', 'test2', 'test3']\n\n path_segments = test_file_system.SplitPath('/test1/test2/test3')\n self.assertEqual(path_segments, expected_path_segments)\n\n path_segments = test_file_system.SplitPath('/test1/test2/test3/')\n self.assertEqual(path_segments, expected_path_segments)\n\n path_segments = test_file_system.SplitPath('/test1///test2/test3')\n self.assertEqual(path_segments, expected_path_segments)", "def split_path(cls, node_path):\n i = node_path.rfind(\"/\")\n if i == 0:\n return \"/\", node_path[1:]\n else:\n return node_path[:i], node_path[i + 1:]", "def splitdrive(path):\n path = _fspath(path)\n return path[:0], path", "def splitLVPath(path):\n import re\n match=None\n for pattern in LogicalVolume.LVPATH_SPLIT_PATTERNS:\n match=re.match(pattern, path)\n if match and match.group(1) != \"mapper\":\n return match.groups()\n raise LogicalVolume.LVMInvalidLVPathException(\"Path %s is not a valid LVM Path\" %(path))", "def _parse_split(self, path):\n prefix = ''\n\n if not path.endswith(os.sep):\n prefix = basename(path)\n path = dirname(path)\n\n if not isdir(path):\n return (None, None)\n\n return (path, prefix)", "def split(l, sep):\n res = [[]]\n for el in l:\n if el == sep:\n res.append([])\n else:\n res[-1].append(el)\n return res", "def _path_components(self):\r\n return self.path.lstrip('/').split('/')", "def split(path_to_split):\n idx1 = 0\n idx2 = 1\n complete = False\n while not complete:\n complete = True\n while idx2 < len(path_to_split):\n point1 = path_to_split[idx1]\n point2 = path_to_split[idx2]\n x1 = point1[0]\n y1 = point1[1]\n x2 = point2[0]\n y2 = point2[1]\n dist = math.hypot(x2 - x1, y2 - y1)\n if dist > 0.000003:\n complete = False\n new_x = (x1 + x2)/2\n new_y = (y1 + y2)/2\n new_point = [new_x, new_y]\n path_to_split.insert(idx2, new_point)\n idx1 += 1\n idx2 += 1\n return path_to_split", "def split(hdfs_path, user=None):\n # Use a helper class to compile URL_PATTERN once and for all\n return _HdfsPathSplitter.split(hdfs_path, user or common.DEFAULT_USER)", "def cut(self, path):\n # Cut the path in segments\n segments = ['/']\n tmp = '/'\n for item in path.split('/'):\n if item:\n tmp += item + '/'\n segments.append(tmp)\n\n return segments", "def splitDAGPath(path):\n if not isinstance(path, str):\n raise ValueError(\"string type expected as path argument, got %s\"%type(path))\n \n namespace = None\n n = path.find(\":\")\n if n!=-1:\n namespace = path[:n]\n path = path[n+1:]\n return namespace, path.split(\"|\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get size as a string with appropirate unit.
def get_size(self): units = ("B", "KB", "MB", "GB", "TB") for i, unit in enumerate(units): high = 10**(i*3) if self.size < high*1000: return f"{round(self.size/high, 3)} {unit}"
[ "def get_display_size(size):\n return \"{} ({}) ({})\".format(\n size, bytes_to_human(size, binary=True),\n bytes_to_human(size, binary=False))", "def size_unit(self) -> str:\n return pulumi.get(self, \"size_unit\")", "def size_string(size):\n try:\n return 'x'.join(map(str, size))\n except TypeError:\n return str(size)", "def format_size(size):\n if abs(size) < 1000:\n return str(size) + 'B'\n\n for unit in ('k', 'M', 'G'):\n size /= 1000\n if abs(size) < 1000:\n return SIZE_FORMAT.format(size, unit)\n\n return SIZE_FORMAT.format(size / 1000, 'T')", "def formatSize(self):\n return format_size(self.getSize())", "def size_human(size):\r\n\r\n if size:\r\n _abbrevs = [\r\n (1<<50L, 'P'),\r\n (1<<40L, 'T'),\r\n (1<<30L, 'G'),\r\n (1<<20L, 'M'),\r\n (1<<10L, 'k'),\r\n (1, 'bytes')]\r\n\r\n for factor, suffix in _abbrevs:\r\n if size > factor:\r\n break\r\n if factor == 1:\r\n return \"%d %s\" % (size, suffix)\r\n else:\r\n return \"%.3f%s\" % (float(size)/float(factor), suffix)", "def nice_size(size):\n words = [ 'bytes', 'Kb', 'Mb', 'Gb' ]\n try:\n size = float( size )\n except:\n return '??? bytes'\n for ind, word in enumerate(words):\n step = 1024 ** (ind + 1)\n if step > size:\n size = size / float(1024 ** ind)\n out = \"%.1f %s\" % (size, word)\n return out\n return '??? bytes'", "def size_converter(_bytes: int) -> str:\n KB = _bytes / float(1 << 10)\n MB = _bytes / float(1 << 20)\n GB = _bytes / float(1 << 30)\n\n if GB > 1:\n return f\"{round(GB, 2):,} GB\"\n elif MB > 1:\n return f\"{round(MB, 2):,} MB\"\n\n return f\"{round(KB, 2):,} KB\"", "def size_to_kb_mb_string(data_size: int, as_additional_info: bool = False) -> str:\n\tif data_size < 1024:\n\t\tas_additional_info = False\n\t\tdynamic = f'{data_size} bytes'\n\telif data_size < 1048576:\n\t\tdynamic = f'{data_size / 1024:0.1f} kB'\n\telse:\n\t\tdynamic = f'{data_size / 1048576:0.1f} MB'\n\n\tif as_additional_info:\n\t\treturn f'{data_size} bytes ({dynamic})'\n\telse:\n\t\treturn dynamic", "def human_readable_file_size(size_in_bytes):\n return size(size_in_bytes, system=alternative)", "def human_file_size(size):\n suffixes = ' kMGTPEH'\n if size == 0:\n num_scale = 0\n else:\n num_scale = int(math.floor(math.log(size) / math.log(1000)))\n if num_scale > 7:\n suffix = '?'\n else:\n suffix = suffixes[num_scale]\n num_scale = int(math.pow(1000, num_scale))\n value = size / num_scale\n str_value = str(value)\n if str_value[2] == '.':\n str_value = str_value[:2]\n else:\n str_value = str_value[:3]\n return \"{0:>3s}{1}\".format(str_value, suffix)", "def fileSizeAsMb(cls, size):\n for q in [\"bytes\", \"KB\", \"MB\", \"GB\", \"TB\"]:\n if size < 1024 or q == \"TB\":\n s = str(round(size, 1))\n if s.endswith(\".0\"):\n s = s[:-2]\n return \"%s %s\" % (s, q)\n size = size / 1024.0\n # not reachable\n return `size`", "def pretty_size(size_in_bytes=0, measure=None):\n\n # Force size_in_bytes to be an integer\n size_in_bytes = size_in_bytes or 0\n\n # Map out the math required to re-calculate bytes into human-readable formats.\n pretty_size_map = {\n\n # Do not round.\n \"B\": size_in_bytes,\n\n # Round to nearest whole number.\n \"KB\": round(size_in_bytes / 1000.0, 0),\n \"KiB\": round(size_in_bytes / 1024.0, 0),\n\n # Round to one decimal place.\n \"MB\": round(size_in_bytes / 1000.0 / 1000.0, 1),\n \"MiB\": round(size_in_bytes / 1024.0 / 1024.0, 1),\n\n # Round to two decimal places.\n \"GB\": round(size_in_bytes / 1000.0 / 1000.0 / 1000.0, 2),\n \"GiB\": round(size_in_bytes / 1024.0 / 1024.0 / 1024.0, 2)\n }\n\n # If measure was specified, format and return. This is usually used when calling\n # this function recursively, but can be called manually.\n if measure:\n return f'{pretty_size_map[measure]} {measure}'\n elif pretty_size_map['GiB'] > 1:\n return pretty_size(size_in_bytes, 'GiB')\n elif pretty_size_map['MiB'] > 1:\n return pretty_size(size_in_bytes, 'MiB')\n elif pretty_size_map['KiB'] > 1:\n return pretty_size(size_in_bytes, 'KiB')\n else:\n return f'{size_in_bytes:,.0f} B'", "def _convert_size(input_size):\n if input_size == 0:\n return '0B'\n\n size_name = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\n i = int(math.floor(math.log(input_size, 1024)))\n power = math.pow(1024, i)\n size = round(input_size / power, 2)\n return '%s %s' % (size, size_name[i])", "def generate_human_readable_size(byte_size: int) -> str:\n size_measurement_units = (('KiB', 1024), ('MiB', 1024**2), ('GiB', 1024**3), ('TiB', 1024**4))\n suffix = None\n divisor = None\n for u, m in size_measurement_units:\n if byte_size >= m:\n suffix = u\n divisor = m\n\n if suffix and divisor:\n return f'{round(byte_size / divisor, 1)} {suffix}'\n return f'{byte_size}B'\n # return f'{round(byte_size/divisor, 1)} {suffix}'", "def _readable_memory_size(weight_memory_size):\n units = [\"Byte\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\"]\n scale = 1024\n for unit in units:\n if weight_memory_size / scale < 1:\n return \"{:.2f} {}\".format(weight_memory_size, unit)\n else:\n weight_memory_size /= scale\n return \"{:.2f} {}\".format(weight_memory_size, units[-1])", "def fmt_binary_size(size):\n units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB']\n\n unit = 0\n for unit in range(0, len(units)):\n if size < 1024:\n break\n size /= 1024.0\n\n size = int(math.ceil(size))\n\n return f'{size} {units[unit]}'", "def _pixel_size_to_str(self, pixel_size: Union[float, tuple, list] = None):\n\n def _res_to_str(res):\n return f\"{abs(res):.2f}m\".replace(\".\", \"-\")\n\n if pixel_size:\n if isinstance(pixel_size, (tuple, list)):\n res_x = _res_to_str(pixel_size[0])\n res_y = _res_to_str(pixel_size[1])\n if res_x == res_y:\n res_str = res_x\n else:\n res_str = f\"{res_x}_{res_y}\"\n else:\n res_str = _res_to_str(pixel_size)\n else:\n res_str = _res_to_str(self.pixel_size)\n\n return res_str", "def si_size(b):\n UNITS = ('B', 'KB', 'MB', 'GB', 'TB', 'PB')\n index = 0\n while b >= 1024 and index < len(UNITS) - 1:\n b /= 1024.0\n index += 1\n return \"%.1f %s\" % (b, UNITS[index])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the bio of this AccountItemUpdate.
def bio(self) -> str: return self._bio
[ "def __str__(self):\n return self.bio", "def item_description(self, item):\n return item.summary", "def description(self):\n return self.data_hash['activity']['description']", "def description(self):\n return self._book_dict[\"description\"]", "def get_isni_bio(existing, author):\n auth_isni = re.sub(r\"\\D\", \"\", str(author.isni))\n if len(existing) == 0:\n return \"\"\n for value in existing:\n if hasattr(value, \"bio\") and auth_isni == re.sub(r\"\\D\", \"\", str(value.isni)):\n return mark_safe(f\"Author of <em>{value.bio}</em>\")\n\n return \"\"", "def screen_name_with_bio(self):\n accounts_for_pro_or_anti_followers={}\n for json_file in self.data: # going through the list and picking out the json_file\n #adding the desciption of the bio with the screen name to the dictionary screen_name_with_desciption\n accounts_for_pro_or_anti_followers[json_file[\"user\"][\"screen_name\"]]=json_file[\"user\"][\"description\"]\n return(accounts_for_pro_or_anti_followers)# returns dictionary with screen name and bio", "def notes(self) -> str:\n return super(Account, self).get_slot_value('notes')", "def get_account(self):\n return self.fetch_data(\"account\")", "def description(self):\r\n return self._agent_properties.get('AgentProperties', {}).get('userDescription')", "def bundle_info(self):\n return self._bundle_info", "def user_info(self):\n return self.__user_info", "def citation(self) -> str:\n return self.item.properties.get(CITATION)", "def attribute_info(self):\n return self._attribute_info", "async def bio(self, ctx, args=None, language=\"en\"):\n user_or_username = await self.get_username(ctx, args)\n await self._generate_view(ctx, user_or_username, \"bio\", language)", "def __get_account_details(self):\n account = self.get_random_record_with_valid_attribute(\n 'accounts', 'account_type', ['Firm', 'Client', 'Counterparty']\n )\n account_id = account['account_id']\n account_type = account['account_type']\n return account_id, account_type", "def custom_profile_attributes(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"custom_profile_attributes\")", "def get_iam_user_info(self):\n from boto.iam.connection import IAMConnection\n iac = IAMConnection(self.options['aws_access_key'],self.options['aws_secret_key'])\n return iac.get_user()['get_user_response']['get_user_result']['user']", "def account(self):\n return self.__account", "def get_acc_info(self):\n acc_info=self.connection.get_accounts()\n self.db.update_account_info_table(self.account_id, acc_info)", "def avatar(self) -> str:\n return self.bot.user.avatar_url_as(format='png')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the bio of this AccountItemUpdate.
def bio(self, bio: str): self._bio = bio
[ "def italic_bi(self, italic_bi):\n self._italic_bi = italic_bi", "def load_or_update_bio( conn, journo_id, bio, default_approval=False ):\n\n assert( 'kind' in bio and bio['kind']!='' )\n c = conn.cursor()\n c.execute( \"SELECT * FROM journo_bio WHERE journo_id=%s AND kind=%s\", journo_id, bio['kind'] )\n old_bios = c.fetchall()\n\n # each journo can only have one bio of a kind\n assert( len( old_bios ) in (0,1) )\n\n if len(old_bios) > 0:\n # update existing bio, keeping the approval status\n c.execute( \"UPDATE journo_bio SET bio=%s,srcurl=%s WHERE journo_id=%s AND kind=%s\", bio['bio'].encode('utf-8'), bio['srcurl'], journo_id, bio['kind'] )\n\n else:\n # create new bio entry\n c.execute( \"INSERT INTO journo_bio (journo_id, bio, kind,srcurl, approved ) VALUES (%s,%s,%s,%s,%s)\", journo_id, bio['bio'].encode('utf-8'), bio['kind'], bio['srcurl'], default_approval )", "def update_account_meta(self, user, account, domain, meta, replace=False):\n return", "def set_biomes(self, new_biomes):\n self._biomes = None\n\n for index, chunk in enumerate(self.chunks):\n z_start, z_end, x_start, x_end = calculate_chunk_bounds(index)\n\n chunk_biomes = chunk.get_biomes()\n if chunk_biomes is None:\n continue\n\n nbt_type = type(chunk.nbt_data.root[\"Level\"][\"Biomes\"])\n # Getting the type here is necessary because some older chunks\n # use ByteArray for the biomes, but newer chunks use IntArray.\n # Unfortunately, even \"converted\" worlds still sometimes have\n # chunks with old-style data structures in them.\n\n chunk.nbt_data.root[\"Level\"][\"Biomes\"] = nbt_type(\n new_biomes[z_start:z_end, x_start:x_end].flatten()\n )", "def setUserIsBanned(self, account, email, reason=None):\n EquipmentACLInfo.setBanned(account, self, email, reason)\n self._updateCalendarPermissions(account)", "def edit_bio(request):\n users_profile = UserProfiles.objects.get(user=request.user)\n if request.user != users_profile.user:\n raise Http404(\"You do not have permission\")\n if request.method != 'POST':\n form = BiographyForm(instance=users_profile)\n else:\n form = BiographyForm(instance=users_profile, data=request.POST)\n if form.is_valid():\n form.save()\n print(\"execute this line in bio\")\n return HttpResponseRedirect(reverse('users:my_profile'))\n\n context = {'form': form}\n return render(request, 'users/edit_bio.html', context)", "def setInformation(self, account, acl, equipment, info):\n info = to_string(info)\n\n if not info:\n return\n\n acl.assertIsAdministrator(account)\n\n item = equipment._getFromDB()\n item.constraints.booking_info = info\n item.put()\n\n self.booking_info = info", "def setItalic(self, isItalic):\n\t\tself._isItalic = isItalic", "def set_account_information(self, user_id, req):\n c = self.db.cursor()\n try:\n c.execute(\"\"\"\n UPDATE Users\n SET \n username = ?,\n email = ?,\n fName = ?,\n lName = ?,\n streetAddress = ?,\n city = ?,\n state = ?,\n postCode = ?\n WHERE\n id = ?\n \"\"\",\n (\n req['username'],\n req['email'],\n req['fName'],\n req['lName'],\n req['streetAddress'],\n req['city'],\n req['state'],\n req['postCode'],\n user_id\n )\n )\n self.db.commit()\n except sqlite3.Error as e:\n log.error(e)\n raise Exception", "def _update_fields(self, anki_note: Note) -> None:\n anki_note['Question'] = self.question\n anki_note['Answer'] = self.answer\n self._base_update(anki_note)", "def test_user_has_profile_with_bio(self):\n this_user = self.users[0]\n this_user.profile.bio = 'I take beautiful pictures. You should hire me.'\n this_user.save()\n self.assertTrue(self.users[0].profile.bio == 'I take beautiful pictures. You should hire me.')", "async def avatar(self, ctx, image: str):\n try:\n with open(os.path.join(self.bot.base_dir, image), \"rb\") as avatar:\n f = avatar.read()\n image_bytes = bytearray(f)\n await self.bot.user.edit(avatar=image_bytes)\n except Exception as e:\n await ctx.send(\"Failed to change avatar\")\n print(e)", "async def newavatar(self, ctx):\n await self.change_avatar(ctx)", "async def __edit_bot_avatar(self, ctx: Context, url: str):\n\n async with self.bot.session.get(url) as response:\n image_bytes = await response.read()\n await ctx.bot.user.edit(avatar=image_bytes)\n await asyncio.sleep(2)\n await ctx.send(f\"<:me:589614537775382552>\"\n f\"<:and:589614537867657235>\"\n f\"<:the:589614537309945878>\"\n f\"<:boys:589614537490300940>\"\n f\" | new bot avatar is \\n{self.bot.user.avatar_url}\")", "def edit_user(self, user: User, attribute: dict[str, Any]) -> None:\n\t\tpass", "def update_user(\n self,\n user_id,\n bio=\"\",\n first_name=\"\",\n last_name=\"\",\n profile_pic=\"\",\n new_password=\"\",\n new_email=\"\",\n ):\n new_info = {}\n if bio:\n new_info[\"bio\"] = bio\n if first_name:\n new_info[\"first_name\"] = first_name\n if last_name:\n new_info[\"last_name\"] = last_name\n if profile_pic:\n new_info[\"profile_pic\"] = profile_pic\n if new_password:\n new_info[\"new_password\"] = new_password\n if new_email:\n new_info[\"new_email\"] = new_email\n\n self.client.Users.users.update_one(\n {\"user_id\": user_id}, {\"$set\": new_info}, upsert=True\n )\n return new_info", "def set_base_val(self, item: DTOBase) -> None:\n self.base_item.data = item", "async def update(self, account: MetatraderAccountUpdateDto):", "def setAssetUser(self, assetName, area, userName, seq = ''):\n\t\tassetId = self.getAssetIdFromName(assetName, area, seq )\n\t\tuserId = self.getUserIdFromName( userName )\n\t\tif userId:\n\t\t\tcon = lite.connect(self.dataBaseFile)\n\t\t\twith con:\n\t\t\t\tcur = con.cursor()\n\t\t\t\tcur.execute(\"UPDATE Assets SET UserId = %i WHERE Id = %i\"%(userId, assetId))", "def test_base_attributes_modified(self):\n user = BonitaUser(username=u'myusername', password=u'mypassword')\n user._uuid = 'myuuid'\n user.clear()\n\n # Prepare response of MockedServer\n url = '/identityAPI/updateUserByUUID'\n code = 200\n user_xml = build_bonita_user_xml(uuid='myuuid', password='mypassword', username='other_usernames')\n BonitaServer.set_response_list([[url, code, user_xml]])\n\n # Modify some base attributes\n user.last_name = u'last_name'\n user.title = u'Doctor'\n user.username = u'other_username'\n user.first_name = u'first_name'\n user.job_title = u'job_title'\n\n user._update()\n\n assert user.is_modified is False\n assert user.last_name == u'last_name'\n assert user.title == u'Doctor'\n assert user.username == u'other_username'\n assert user.first_name == u'first_name'\n assert user.job_title == u'job_title'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
search method let the AP to search the nearby devices and get the mac address return to system.py
def search(self,num): while True: if num ==1: device_address = None time.sleep(3) # Sleep three seconds nearby_devices = bluetooth.discover_devices() for mac_address in nearby_devices: device_address = mac_address break if device_address is not None: data = [{'mac_address': device_address, 'type': 4}] break else: print("Please open your bluetooth!") return data
[ "def search(self,user_name, device_name):\n while True:\n device_address = None\n print(\"Searching for device..\")\n time.sleep(2) #Sleep 2 seconds \n nearby_devices = bluetooth.discover_devices()\n\n for mac_address in nearby_devices:\n if device_name == bluetooth.lookup_name(mac_address, timeout=5):\n device_address = mac_address\n break\n if device_address is not None:\n print(\"Hi {}! Your phone ({}) has the MAC address: {}\".format(user_name, device_name, device_address))\n return {\"mac_address\": device_address, \"email\": user_name}\n else:\n print(\"Could not find target device nearby...\")\n time.sleep(3)\n return {}", "def search_mac_addresses(cls):\n gm = GetMacAddresses()\n data = gm.run()\n return data", "def scan_devices(self):\n self._update_info()\n _LOGGER.debug(\"Keenetic last update results %s\", self.last_results)\n return [device.mac for device in self.last_results]", "def arp_scan(self):\r\n if self.router_mac and self.router_mac == GLOBAL_MAC:\r\n self.init()\r\n\r\n self.generate_ips()\r\n scan_result = arping(f\"{self.router_ip}/24\", verbose=0, timeout=1)\r\n clean_result = [(i[1].psrc, i[1].src) for i in scan_result[0]]\r\n\r\n self.devices_appender(clean_result)", "def find_devices(controller):\n pysicl.gpib_timeout(500)\n for addr in range(1,31):\n print addr\n if addr != 21:\n status = dev_status(controller+str(addr))\n print addr,status\n if status > -1:\n print addr,\":\",status\n pysicl.gpib_timeout(10000)", "def network_search(self):\n id_min = self.id_search_min.value()\n id_max = self.id_search_max.value()\n search_result = mixcell.search(id_min,id_max,self.baudrates_search_list)\n if search_result == mixcell.PORT_ERROR:\n self.port_error_message()\n elif search_result == mixcell.BAUDRATE_ERROR:\n self.baudrate_error_message()\n elif len(search_result) == 0:\n self.no_servos_found_message()\n else:\n self.table_organize(search_result)", "def get_devices():\n print(\"Scanning for available devices.\")\n nearby_devices = bluetooth.discover_devices()\n out = {}\n for bdaddr in nearby_devices:\n name = bluetooth.lookup_name(bdaddr)\n out[name] = bdaddr\n if out is not None:\n print(\"Found the following devices:\")\n print_devices(out)\n print(\"\")\n else:\n print(\"Found no devices.\")\n return(out)", "def discover_atag():\r\n # return format: [b'ONE xxxx-xxxx-xxxx_xx-xx-xxx-xxx (ST)',\r\n # ('xxx.xxx.x.x', xxxx)]\r\n # sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) # UDP\r\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\r\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\r\n\r\n sock.settimeout(30)\r\n sock.bind((\"\", 11000))\r\n try:\r\n while True:\r\n result = sock.recvfrom(37)\r\n host_ip = result[1][0]\r\n device_id = result[0].decode().split()[1]\r\n return host_ip, device_id\r\n except socket.timeout:\r\n return False\r\n except Exception as err:\r\n raise RequestError(err)", "def arpScan():\n print(\"____________________________________________________\\n\")\n print(\"ARP Scan\\n\")\n autoMode=input(\"Would you like to automatically detect IP subnet? [Y/N]: \")\n if autoMode==\"y\" or autoMode==\"Y\": targetRange=findIP()\n else: targetRange=input(\"Please enter a target range (for example: 192.168.159.1/24): \") #Will change to input\n arpRequests=ARP(pdst=targetRange) #create packet for broadcast\n broadcastMAC = Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n packet = broadcastMAC/arpRequests #Joins the request & broadcast mac - required for arp\n\n result = srp(packet, timeout=3, verbose=0)[0]#Sends packets & records result\n discovered = []\n \n #Gathers all responses\n for null, response in result:\n macVendor=macToMan(response.hwsrc).strip(\"\\n\")\n discovered.append([response.psrc,response.hwsrc,macVendor[9:]])\n\n #Displays to user\n currentTime=str(datetime.datetime.now())\n print(\"Devices Found:\")\n print(\"IP\" + \" \"*15+\"MAC\"+\" \"*15+\"MAC Vendor\")\n for i in range(len(discovered)):\n print(\"{:16} {:16} {}\".format(discovered[i][0],discovered[i][1],discovered[i][2]))\n ARPdb(discovered[i][0],discovered[i][1],discovered[i][2],currentTime)", "def _FindMacAddresses(self):\n for item in self._GetDataTypeItems('network'):\n if 'hardware' in item:\n if 'Ethernet' in item and 'MAC Address' in item['Ethernet']:\n intf_mac = item['Ethernet']['MAC Address']\n intf_name = item.get('interface', None)\n\n if item['hardware'] == 'Ethernet':\n intf_type = 'ethernet'\n elif item['hardware'] == 'AirPort':\n intf_type = 'airport'\n elif item['hardware'] == 'FireWire':\n intf_type = 'firewire'\n else:\n intf_type = None\n\n if intf_type is not None:\n self._profile['%s_mac' % intf_type] = intf_mac\n if intf_name is not None:\n self._profile['interface_%s' % intf_name] = intf_type", "def find_phones():\r\n sock_sender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n sock_sender.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\r\n\r\n # IP, Port tuple representing the phone\r\n phone = None\r\n rejected_hosts = []\r\n\r\n broadcast_addrs = _get_broadcast_addrs()\r\n if not broadcast_addrs:\r\n print('There was a problem running the phone finder. You will have to configure manually.')\r\n return None\r\n\r\n print('Ready to search for phones.')\r\n manual = input('Press Enter when the app is open on your phone, or type \"m\" to skip to manual configuration.\\n')\r\n manual = manual.lower()\r\n if manual.lower() == 'm':\r\n return None\r\n\r\n for port in range(PORT_MIN, PORT_MAX+1):\r\n count = 0\r\n\r\n # Search more on the earlier ports which are much more likely to be the right one\r\n #if port == PORT_MIN:\r\n # tries = 4\r\n #else:\r\n # tries = 2\r\n tries = 2\r\n\r\n print('Searching on port ' + str(port), end=\"\")\r\n while not phone and count < tries:\r\n count += 1\r\n print('.', end='')\r\n stdout.flush()\r\n\r\n # Send on ALL the interfaces (required by Windows!)\r\n for broadcast_addr in broadcast_addrs:\r\n #print('\\nbroadcasting on ' + broadcast_addr + ' to ' + str(port))\r\n discover_bytes = bytes(DISCOVER_REQUEST, ENCODING)\r\n sock_sender.sendto(discover_bytes, (broadcast_addr, port))\r\n\r\n sock_recvr = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n sock_recvr.bind(('', port))\r\n\r\n # Wait for phone to respond\r\n # I don't know what an appropriate timeout for this would be - shorter is better but how short\r\n # is too short?\r\n ready = select([sock_recvr], [], [sock_sender, sock_recvr], 0.25)\r\n if ready[0]:\r\n # Buffsize must match ConnectionInitThread.BUFFSIZE\r\n data, other_host = sock_recvr.recvfrom(256)\r\n data = data.decode(ENCODING).rstrip(' \\0')\r\n if not data.startswith(DISCOVER_CONFIRM):\r\n print('Received a strange response: ' + data)\r\n continue\r\n\r\n # Skip over rejected hosts\r\n if not other_host[0] in rejected_hosts:\r\n print()\r\n print('Got a response from ' + str(other_host))\r\n # The first line of the response is a confirm, the second is phone info, the third is port#\r\n # Print out the phone info received, and get the user to confirm\r\n print('Phone info: ' + data.splitlines()[1])\r\n confirm = input('Is this your phone? y/N: ')\r\n if confirm.lower() == 'y':\r\n # Get the port the TCP Socket is listening for from the third line of the request\r\n tcp_port_str = data.splitlines()[2]\r\n # Convert to an int\r\n tcp_port = port_str_to_int(tcp_port_str)\r\n if not tcp_port:\r\n # Cannot recover from this; it's a server bug. Manual config only workaround.\r\n print('Received invalid port from phone; cannot continue.'.format(tcp_port_str))\r\n return None\r\n\r\n return other_host[0], tcp_port\r\n else:\r\n rejected_hosts.append(other_host[0])\r\n\r\n if ready[2]:\r\n print('There was an error selecting ' + ready[2])\r\n\r\n sock_recvr.close()\r\n\r\n print()\r\n\r\n return None", "def test_ap_wps_ssdp_msearch(dev, apdev):\n ap_uuid = \"27ea801a-9e5c-4e73-bd82-f89cbcd10d7e\"\n add_ssdp_ap(apdev[0]['ifname'], ap_uuid)\n\n msg = '\\r\\n'.join([\n 'M-SEARCH * HTTP/1.1',\n 'Host: 239.255.255.250:1900',\n 'Mx: 1',\n 'Man: \"ssdp:discover\"',\n 'St: urn:schemas-wifialliance-org:device:WFADevice:1',\n '', ''])\n ssdp_send(msg)\n\n msg = '\\r\\n'.join([\n 'M-SEARCH * HTTP/1.1',\n 'host:\\t239.255.255.250:1900\\t\\t\\t\\t \\t\\t',\n 'mx: \\t1\\t\\t ',\n 'man: \\t \\t \"ssdp:discover\" ',\n 'st: urn:schemas-wifialliance-org:device:WFADevice:1\\t\\t',\n '', ''])\n ssdp_send(msg)\n\n ssdp_send_msearch(\"ssdp:all\")\n ssdp_send_msearch(\"upnp:rootdevice\")\n ssdp_send_msearch(\"uuid:\" + ap_uuid)\n ssdp_send_msearch(\"urn:schemas-wifialliance-org:service:WFAWLANConfig:1\")\n ssdp_send_msearch(\"urn:schemas-wifialliance-org:device:WFADevice:1\");\n\n msg = '\\r\\n'.join([\n 'M-SEARCH * HTTP/1.1',\n 'HOST:\\t239.255.255.250:1900',\n 'MAN: \"ssdp:discover\"',\n 'MX: 130',\n 'ST: urn:schemas-wifialliance-org:device:WFADevice:1',\n '', ''])\n ssdp_send(msg, no_recv=True)", "def find_device_in_ipam(ip, devices, logger):\n logger.debug('%s - Getting the device from the devices of NSoT.', ip)\n for device in devices:\n if 'attributes' in device:\n if 'address' in device['attributes']:\n if device['attributes']['address'] == ip:\n return device", "def devices_appender(self, scan_result):\r\n self.devices = []\r\n unique = []\r\n\r\n # Sort by last part of ip xxx.xxx.x.y\r\n scan_result = sorted(\r\n scan_result,\r\n key=lambda i:int(i[0].split('.')[-1])\r\n )\r\n \r\n for ip, mac in scan_result:\r\n mac = good_mac(mac)\r\n\r\n # Store gateway\r\n if ip == self.router_ip:\r\n self.router_mac = mac\r\n continue\r\n \r\n # Skip me or duplicated devices\r\n if ip == self.my_ip or mac in unique:\r\n continue\r\n \r\n # update same device with new ip\r\n if self.old_ips.get(mac, ip) != ip:\r\n self.old_ips[mac] = ip\r\n unique.append(mac)\r\n\r\n self.devices.append(\r\n {\r\n 'ip': ip,\r\n 'mac': good_mac(mac),\r\n 'vendor': get_vendor(mac),\r\n 'type': 'User',\r\n 'admin': False\r\n }\r\n )\r\n \r\n # Remove device with old ip\r\n for device in self.devices[:]:\r\n mac, ip = device['mac'], device['ip']\r\n if self.old_ips.get(mac, ip) != ip:\r\n self.devices.remove(device)\r\n \r\n # Re-create devices old ips dict\r\n self.old_ips = {d['mac']: d['ip'] for d in self.devices}\r\n\r\n self.add_me()\r\n self.add_router()\r\n\r\n # Clear arp cache to avoid duplicates next time\r\n if unique:\r\n self.flush_arp()", "def scan(self):\n for addr in range(127):\n # Skip I2C addresses which are reserved.\n if addr <= 7 or addr >= 120:\n continue\n if self.ping(addr):\n self._log.debug('Detected device at address 0x{0:02x}.'.format(addr))", "def scan_candidate_wifi(self):\n cmd = self.select_cmd('search_wifi_list')\n\n # scan wifi list\n self.is_inter_up = False\n while True:\n try:\n with Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) as proc:\n output = proc.communicate(input=(WifiHandler.sudo_password+'\\n').encode())\n except SubprocessError as e:\n print(e)\n self.error_and_return('Improper Popen object opened')\n return\n \n # wlan0 interface is already opened\n if output[1] == b'':\n self.is_inter_up = True\n break\n # wlan0 interface is closed or resource busy\n elif output[0] == b'':\n if self.is_interface_off(output[1]):\n print('interface off')\n return\n else:\n print('resource busy')\n pass\n time.sleep(0.01)\n\n # parsing all ssid list\n ssid_cnt = 0\n tmp_scanned_wifi_info = dict()\n tmp_known_host = []\n for each_line in output[0].decode('utf-8').split('\\n'):\n tmp_each_info = []\n if each_line.find('BSS') != -1 and each_line.find(WifiHandler.interface_name) != -1:\n if ssid_cnt != 0 and len(tmp_scanned_wifi_info.get(ssid_cnt)) == 2:\n tmp_scanned_wifi_info[ssid_cnt].append(\"FREE\")\n ssid_cnt += 1\n tmp_scanned_wifi_info[ssid_cnt] = []\n elif each_line.find('signal') != -1:\n tmp_scanned_wifi_info[ssid_cnt].append(int(float(each_line.split(' ')[1])))\n elif each_line.find('SSID:') != -1:\n tmp_ssid = each_line.split(' ')[1]\n if tmp_ssid != '' and tmp_ssid.find('x00') == -1:\n is_known_host = self.is_known_host(tmp_ssid)\n tmp_scanned_wifi_info[ssid_cnt].append(tmp_ssid)\n tmp_scanned_wifi_info[ssid_cnt].append(is_known_host)\n elif each_line.find('RSN') != -1:\n tmp_scanned_wifi_info[ssid_cnt].append('PSK')\n # Sort out the duplicate value and generate json format \n df_scanned_wifi_info = pd.DataFrame(data=tmp_scanned_wifi_info.values(),\n columns=['SIGNAL', 'SSID', 'KNOWN_HOST', 'PSK'])[['SSID', 'PSK', 'SIGNAL', 'KNOWN_HOST']]\n df_tmp_psk = df_scanned_wifi_info[['SSID', 'PSK', 'KNOWN_HOST']].drop_duplicates()\n df_tmp_signal = df_scanned_wifi_info.groupby('SSID').SIGNAL.min().reset_index(name = \"SIGNAL\")\n wifi_info = pd.merge(df_tmp_psk, df_tmp_signal, how=\"inner\", on=\"SSID\").sort_values(by=['SIGNAL']).to_dict('records')\n \n return wifi_info", "def lookup_host_mac(self, mac):\n msg = pypureomapi.OmapiMessage.open(\"host\")\n msg.obj.append((\"hardware-address\", pypureomapi.pack_mac(mac)))\n response = self.query_server(msg)\n if response.opcode != pypureomapi.OMAPI_OP_UPDATE:\n raise pypureomapi.OmapiErrorNotFound()\n try:\n return pypureomapi.unpack_ip(dict(response.obj)[\"ip-address\"])\n except KeyError: # ip-address\n raise pypureomapi.OmapiErrorNotFound()", "def main():\n\n scanner = Scanner()\n mac_addresses = scanner.scan()\n\n if not mac_addresses:\n print(\"No switchbot found.\")\n return\n\n print(\"Switchbot mac addresses:\")\n for mac in mac_addresses:\n print(mac)", "def query_devices():\n return devices.find()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an example db handle for testing. Returns None if helper packages not present.
def example_handle(): # TODO: parameterize this assert have_sqlalchemy db_engine = sqlalchemy.engine.create_engine( r"postgresql://johnmount@localhost/johnmount" ) db_handle = PostgreSQLModel().db_handle(conn=db_engine, db_engine=db_engine) db_handle.db_model.prepare_connection(db_handle.conn) return db_handle
[ "def getDb():\n return psycopg2.connect(\"dbname='snippets'\")", "def create_testdata_db(self):\n\n try:\n dsn = CommandlineTool.get_input_option('yoda-db-testdata-dsn')\n force = CommandlineTool.get_input_option('force')\n if (not dsn):\n dsn = self._mh.ext_cfg['Yoda']['db_testdata_dsn'].format(var_dir=syscfg.HTK_VAR_DIR)\n db = DBO(dsn)._dbo_driver\n db._parse_dsn(dsn)\n\n result = True\n if (not db.database_exists() or force):\n if (force):\n dmsg(self._mh._trn.msg('yoda_remove_testdata_db', dsn))\n db.remove_database()\n\n print(self._mh._trn.msg('yoda_create_testdata_db', dsn))\n db.connect()\n dbdir = os.path.join(self._mh.ext_cfg['Yoda']['test_repo_root'].format(var_dir=syscfg.HTK_VAR_DIR), 'db_testdata')\n script = file_get_contents(\n os.path.join(dbdir, 'db_struct.sql'))\n db._cursor.executescript(script)\n script = file_get_contents(os.path.join(dbdir, 'db_data.sql'))\n db._cursor.executescript(script)\n print(self._mh._trn.msg('yoda_testdata_db_created'))\n else:\n print(self._mh._trn.msg('yoda_testdata_db_exists', dsn))\n result = False\n\n return result\n except Error as ex:\n print(self._mh._trn.msg('yoda_testdata_db_error', ex))\n return False", "def get_db():\n db = getattr(g, '_database', None)\n if db is None:\n with app.app_context():\n if app.config.get('TESTING'):\n db = g._database = sqlite3.connect(app.config['DATABASE'])\n db.row_factory = sqlite3.Row\n db.execute('PRAGMA foreign_keys = ON')\n elif app.config['PRODUCTION']:\n components = urlparse.urlparse(os.environ['DATABASE_URL'])\n db = g._database = psycopg2.connect(\n database=components.path[1:],\n user=components.username,\n password=components.password,\n host=components.hostname\n )\n else:\n db = g._database = psycopg2.connect(\n 'dbname={0} user={1} password={2}'.format(\n app.config['DATABASE'], app.config['USER'],\n app.config['PASSWORD']))\n return db", "def get_database():\r\n dbpath = \"/\".join(__file__.split('/')[:-1] + ['samples.db'])\r\n return shelve.open(dbpath,protocol=2,writeback=True)", "def getDefaultDB():\n\n return _defaultDB", "def get_db_info():\n db_info = db_specification()\n\n server = db_info.get(\"server\")\n port = db_info.get(\"port\")\n repository = db_info.get(\"repository\")\n\n connection = test_db_connection(server, port, repository)\n if connection == False:\n return get_db_info()\n else:\n return db_info", "def get_real_test_db_connection():\n\ttry:\n\t\tmongoengine.connect(settings._MONGODB_NAME, host= settings._MONGODB_DATABASE_HOST)\n\t\tconnect_db = mongoengine.connection.get_connection()\n\t\treturn connect_db\n\texcept Exception as e:\n\t\tlogging.debug( '%s (%s)' % (e.message, type(e)))", "def database(request):\r\n\r\n if os.getenv('WAREHOUSE_DATABASE_URL'):\r\n # Assume that the database was externally created\r\n url = os.getenv('WAREHOUSE_DATABASE_URL')\r\n else:\r\n # (Drop and) create the warehouse_unittest database with UTF-8 encoding\r\n # (in case the default encoding was changed from UTF-8)\r\n subprocess.call(['dropdb', 'warehouse_unittest'])\r\n subprocess.check_call(['createdb', '-E', 'UTF8', 'warehouse_unittest'])\r\n url = 'postgresql:///warehouse_unittest'\r\n\r\n engine = create_engine(url, poolclass=AssertionPool)\r\n\r\n request.addfinalizer(engine.dispose)\r\n\r\n if not os.getenv('WAREHOUSE_DATABASE_URL'):\r\n request.addfinalizer(\r\n lambda: subprocess.call(['dropdb', 'warehouse_unittest'])\r\n )\r\n\r\n # Connect to the database and create the necessary extensions\r\n engine.execute('CREATE EXTENSION IF NOT EXISTS \"citext\"')\r\n engine.execute('CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\"')\r\n\r\n # Have Alembic create the schema\r\n alembic_cfg = alembic.config.Config()\r\n alembic_cfg.set_main_option(\r\n \"script_location\",\r\n \"warehouse:migrations\",\r\n )\r\n alembic_cfg.set_main_option(\"url\", url)\r\n alembic.command.upgrade(alembic_cfg, \"head\")\r\n\r\n return engine", "def setup_database(config=None):\n if config is None:\n # TODO: How could we support orion.core.config.storage.database as well?\n config = orion.core.config.database.to_dict()\n\n db_opts = config\n dbtype = db_opts.pop(\"type\")\n\n log.debug(\"Creating %s database client with args: %s\", dbtype, db_opts)\n\n return database_factory.create(dbtype, **db_opts)", "def database():\n client = MongoClient(username=\"user\", password=\"pass\", authSource=\"orion_test\")\n database = client.orion_test\n yield database\n client.close()", "def db(self):\n zodb = self.extensions.get('zodb', None)\n if zodb is not None:\n return zodb.db\n raise RuntimeError('Nothing Set!')", "def setup_db(request):\n def fin():\n os.remove('test.sql')\n request.addfinalizer(fin)\n\n cfg = load_test_yaml()\n sql = db.Adapter(cfg['engine'])\n\n for process, directives in cfg['process'].items():\n if not 'pk' in directives:\n directives['pk'] = '_id'\n if directives['action'] == 'store':\n sql.declare(directives['tablename'], directives['pk'], directives['schema'])\n return sql", "def testdatabase_factory(request):\n # Inline imports so this plugin can work if you don't have sqlalchemy and don't need this fixture\n from sqlalchemy import create_engine, event\n from sqlalchemy.engine.url import make_url\n from sqlalchemy.orm import sessionmaker, scoped_session\n\n def _testdatabase_factory(base, database_url, setup_global_test_fixtures=None):\n \"\"\"\n Params:\n - base: Python module that contains Session, Base\n - database_url: The main DATABASE_URL\n - setup_global_test_fixtures: Callback to set up global fixtures\n \"\"\"\n\n def testdatabase():\n \"\"\"\n Set up session-wide test database\n Returns a dictionary, db_params, with engine and connection\n \"\"\"\n reset_db = request.config.getoption(\"--reset-db\")\n Base = base.Base\n\n db_params = {}\n engine = base.engine\n\n # Using the original DATABASE_URL, make a new TEST_DATABASE_URL for the test database\n # (same server but just different db name)\n url_parsed = make_url(database_url)\n\n # Just get the \"base\" Postgres URL without a database name\n BASE_DB_SERVER_URL = 'postgres://{}:{}@{}:{}'.format(\n url_parsed.username, url_parsed.password_original, url_parsed.host, url_parsed.port or 5432\n )\n # Assume there is a basic database called Postgres (we always have to connect to some database)\n DB_SERVER_URL = '{}/postgres'.format(BASE_DB_SERVER_URL)\n\n # Construct a separate URL for the test db (same server but separate test database name)\n test_db_name = '{}_test'.format(url_parsed.database)\n TEST_DB_URL = '{}/{}'.format(BASE_DB_SERVER_URL, test_db_name)\n\n temp_engine = create_engine(DB_SERVER_URL)\n temp_conn = temp_engine.connect() # todo add code to wait for Postgres to be running\n temp_conn.execute('commit') # end the already open transaction\n\n check_existing_db_query = (\n \"SELECT datname FROM pg_catalog.pg_database WHERE lower(datname) = lower('{}')\".format(\n test_db_name\n )\n )\n res = temp_conn.execute(check_existing_db_query)\n existing_database_found = False\n tables_exist = None\n\n if len(list(res)) > 0:\n existing_database_found = True\n print('Test database already exists')\n else:\n temp_conn.execute('create database {}'.format(test_db_name))\n print('Creating test database:', test_db_name)\n tables_exist = False\n\n temp_conn.close()\n\n # Clear tables to reset schema, if requested\n if existing_database_found and reset_db:\n # Do dropdb instead of Base.metadata.drop_all_engine() because sometimes cascade-deletes fails\n # with FKs, if we are removing a table / changing the schema a lot\n print('Dropping/creating test database')\n subprocess.run('dropdb {}'.format(test_db_name), shell=True, check=True)\n subprocess.run('createdb {}'.format(test_db_name), shell=True, check=True)\n tables_exist = False\n\n base.initialize_database(TEST_DB_URL)\n engine = base.engine\n connection = engine.connect()\n\n if tables_exist is None:\n # Find out if tables exist\n num_tables = list(\n connection.execute(\"select count(*) from information_schema.tables where table_schema='public'\")\n )[0][0]\n tables_exist = num_tables > 0\n\n # Recreate sessions to bind to this new connection\n session_factory = sessionmaker(bind=connection)\n Session = scoped_session(session_factory)\n base.Session = Session\n\n Base.metadata.create_all(engine)\n print('Created all tables')\n\n if tables_exist is False and setup_global_test_fixtures is not None:\n # Set up global fixture if it's first time making DB (easier doing this vs trying to set up\n # another nested transaction\n setup_global_test_fixtures()\n\n session = Session\n\n db_params['engine'] = engine\n db_params['connection'] = connection\n db_params['session'] = session\n\n # Allow for nested transactions inside tests\n @event.listens_for(session, \"after_transaction_end\")\n def restart_savepoint(session, transaction):\n if transaction.nested and not transaction._parent.nested:\n session.expire_all()\n session.begin_nested()\n\n @event.listens_for(session, 'before_commit')\n def check_before_commit(session):\n if not current_test_settings['db_allowed']:\n raise Exception(\"Test tried to access the database without declaring 'db' fixture\")\n\n yield db_params\n\n connection.close()\n\n return testdatabase()\n\n return _testdatabase_factory", "def test_init(self):\n # Assert that initialisation works correctly\n database_helper = DatabaseHelper(db_name=test_db_name)\n assert database_helper\n\n # Assert the connection to the internal database has been made\n assert database_helper._db_connection", "def db(self):\n return self._db or DEFAULT_DB_ALIAS", "def postgres():\n utils.create_db()\n try:\n yield utils.get_postgres_dsn()\n finally:\n utils.drop_db()", "def test_postgres_connector():\n connector = PostgresConnector()\n db_connector = DBConnector(connector)\n\n connection = db_connector.connect_to_db('source/bbdd/db_info.ini')\n\n assert connection is not None", "def db():", "def create_test_db(self):\n self.engine = sqlalchemy.create_engine(\"sqlite:///:memory:\")\n self.slave = self.engine\n self.metadata = Metadata()\n self.create_db()\n self.reset_db()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load emoji codes from the JSON file. This function tweaks some emojis to avoid Sphinx warnings when generating
def load_emoji_codes(): fname = resource_filename(__name__, 'codes.json') with open(fname, encoding='utf-8') as fp: codes = json.load(fp) # Avoid unexpected warnings warning_keys = [] for key, value in codes.items(): if value.startswith("*"): warning_keys.append(key) for key in warning_keys: codes[key] = "\\" + codes[key] return codes
[ "def load_file(self, file):\n with open(file, \"r\", encoding=\"utf-8\") as fileobj:\n data = json.load(fileobj)\n\n index = dict()\n pattern = re.compile(r\"<(a?):([a-zA-Z0-9\\_]+):([0-9]+)>$\")\n\n for key, value in data.items():\n if not isinstance(key, str):\n t = type(key).__name__\n raise TypeError(f\"Bad type {t!r} for emoji key (must be str)!\")\n \n if not isinstance(value, str):\n t = type(key).__name__\n raise TypeError(f\"Bad type {t!r} for value of emoji {key!r} (must be str)!\")\n\n match = pattern.match(value)\n\n if match:\n anim = bool(match.group(1))\n name = match.group(2)\n id = int(match.group(3))\n partial = discord.PartialEmoji(animated=anim, name=name, id=id)\n else:\n partial = discord.PartialEmoji(animated=False, name=value, id=None)\n\n index[key] = partial\n\n self.objects.update(index)", "def download(cls):\n emojis = json.loads(urllib.request.urlopen(cls.URL).read())\n emoji_names = {}\n for emoji in emojis:\n utf8 = unified_to_unicode(emoji[\"unified\"])\n for name in emoji[\"short_names\"]:\n emoji_names[name] = utf8\n\n with open(cls.JSON_PATH, \"w\") as f:\n json.dump(emoji_names, f, sort_keys=True, indent=2)", "def test__parse_custom_emojis():\n emojis = {\n BUILTIN_EMOJIS['heart'],\n Emoji.precreate(202301010080, name = 'haru', animated = True),\n BUILTIN_EMOJIS['knife'],\n Emoji.precreate(202301010081, name = 'kuroi'),\n }\n text = ' '.join([emoji.as_emoji for emoji in emojis] * 2)\n \n expected_output = {emoji for emoji in emojis if emoji.is_custom_emoji()}\n \n parsed_emojis = parse_custom_emojis(text)\n vampytest.assert_eq(expected_output, parsed_emojis)", "def make_emoji_dict(self):\n emoji_dict = {}\n for line in self.emoji_full_filepath.rstrip('\\n').split('\\n'):\n (emoji, description) = line.strip().split('\\t')[0:2]\n emoji_dict[emoji] = description\n\n return emoji_dict", "def getEmojis(self):\n def f(emojis_json):\n emojis = []\n for e in emojis_json:\n try:\n emojis.append(e['emoji'])\n except KeyError:\n pass # No Emoji found.\n return emojis\n return self.cache.get(EMOJI_API, f, fallback=[])", "async def change_custom_emojis(self, attachments: List[discord.Attachment]) -> None:\n try:\n async with self.session.get(attachments[0].url) as infile:\n data = yaml.safe_load(await infile.read())\n except yaml.error.YAMLError as exc:\n raise InvalidFileError(\"Error Parsing the YAML\") from exc\n # new_dict = {}\n for team in TEAMS:\n TEAMS[team][\"emoji\"] = data[team][0] if data[team][0] is not None else data[\"Other\"][0]\n team_data = json.dumps(TEAMS, indent=4, sort_keys=True, separators=(\",\", \" : \"))\n constants_string = (\n f'BASE_URL = \"{BASE_URL}\"\\n'\n f'HEADSHOT_URL = \"{HEADSHOT_URL}\"\\n'\n f'CONTENT_URL = \"{CONTENT_URL}\"\\n'\n f\"CONFIG_ID = {CONFIG_ID}\\n\"\n f\"TEAMS = {team_data}\\n\"\n )\n path = Path(__file__).parent / \"new-constants.py\"\n constants_string = constants_string.replace(\"true\", \"True\").replace(\"false\", \"False\")\n with path.open(\"w\") as outfile:\n outfile.write(constants_string)", "def _reassign_emoji(cmap_ops):\n\n cmap_ops.phase('reassign emoji')\n\n color_only_emoji = set(unicode_data.get_presentation_default_emoji())\n color_only_emoji.remove(0x1f004) # mahjong tile red dragon\n color_only_emoji.remove(0x1f0cf) # playing card black joker\n # remove emoji with a variation selector that allows a text presentation\n # include proposed variants from 2016/08/23\n color_only_emoji -= unicode_data.get_unicode_emoji_variants(\n 'proposed_extra')\n\n all_emoji = unicode_data.get_emoji()\n cmap_ops.create_script('Zsye')\n cmap_ops.add_all(all_emoji, 'Zsye')\n\n cmap_ops.remove_all_from_all(color_only_emoji, ['Zsym', 'SYM2'])", "def replace_emojis(df):\n emoji_dictionary = {'\\xe2\\x9d\\xa4\\xef\\xb8\\x8f': str(u'<3'),\n '\\xf0\\x9f\\x91\\xa8': str(u':3'),\n '\\xf0\\x9f\\x92\\x94': str(u'</3'),\n '\\xf0\\x9f\\x98\\x82': str(u\":')\"),\n '\\xf0\\x9f\\x98\\x83': str(u':)'),\n '\\xf0\\x9f\\x98\\x84': str(u':D'),\n '\\xf0\\x9f\\x98\\x87': str(u'o:)'),\n '\\xf0\\x9f\\x98\\x89': str(u';)'),\n '\\xf0\\x9f\\x98\\x8d': str(u':*'),\n '\\xf0\\x9f\\x98\\x8e': str(u'8)'),\n '\\xf0\\x9f\\x98\\x90': str(u':|'),\n '\\xf0\\x9f\\x98\\x92': str(u':$'),\n '\\xf0\\x9f\\x98\\x95': str(u':/'),\n '\\xf0\\x9f\\x98\\x97': str(u':*'),\n '\\xf0\\x9f\\x98\\x98': str(u':*'),\n '\\xf0\\x9f\\x98\\x99': str(u':*'),\n '\\xf0\\x9f\\x98\\x9a': str(u':*'),\n '\\xf0\\x9f\\x98\\x9b': str(u':p'),\n '\\xf0\\x9f\\x98\\x9c': str(u';d'),\n '\\xf0\\x9f\\x98\\x9d': str(u'x-p'),\n '\\xf0\\x9f\\x98\\x9e': str(u\":'(\"),\n '\\xf0\\x9f\\x98\\xa0': str(u'>:('),\n '\\xf0\\x9f\\x98\\xa1': str(u':@'),\n '\\xf0\\x9f\\x98\\xa2': str(u\":'(\"),\n '\\xf0\\x9f\\x98\\xa5': str(u\":'(\"),\n '\\xf0\\x9f\\x98\\xa6': str(u':('),\n '\\xf0\\x9f\\x98\\xae': str(u':o')}\n\n df['message'] = df['message'].replace(emoji_dictionary)\n\n return df", "def load_emoji_embeddings(self):\n emoji_embeddings_path = \"./data/word2vec/emoji2vec.bin\"\n print(\"Loading Emoji Embeddings\")\n model = models.KeyedVectors.load_word2vec_format(emoji_embeddings_path, binary=True)\n return model", "def trans_emoji(text):\n def _emoji(matched):\n hex = matched.group(1)\n return ('\\\\U%08x' % int(hex, 16)).decode('unicode-escape').encode('utf-8')\n\n replace_t = re.sub(Constant.REGEX_EMOJI, _emoji, text)\n return replace_t", "def remove_emoji(text):", "async def replace(self, ctx, emoji):\n await ctx.message.channel.send(f\"Reply emoji changed to {emoji}\")\n if '<' in emoji:\n emoji = emoji.split(':')[1]\n with open(\"emoji\", \"wb\") as f:\n f.write(emoji.encode('utf-8'))", "def get_emoji(i):\n if i < 0 or i >= len(map_id_to_emoji):\n raise KeyError('Invalid Emoji ID')\n return map_id_to_emoji[i]", "def normalize_emoji(text):\n # Translate textual smilies to color emoji.\n text = re.sub(TEXT_TO_EMOJI_PATTERN, text_to_emoji_callback, text)\n # Translate hollow smilies to color emoji.\n text = re.sub(WHITE_TO_EMOJI_PATTERN, white_to_emoji_callback, text)\n # Translate text macros to color emoji.\n return emoji.emojize(text, use_aliases=True)", "def _emoji_pua_set():\n return lint_config.parse_int_ranges('FE4E5-FE4EE FE82C FE82E-FE837')", "def replace_emoji(status):\n return EMOJI.get(Status(status), '')", "def parse_custom_emojis(text):\n if text is None:\n return set()\n \n return {*_iter_parse_custom_emojis(text)}", "async def unicode_emoji(self, ctx: Context, emoji: str) -> None:\n\n await ctx.send(f\"`{emoji.encode('unicode-escape').decode('ASCII')}`\")", "def parse_emoji(text):\n parsed = EMOJI_RP.fullmatch(text)\n if (parsed is not None):\n animated, name, emoji_id = parsed.groups()\n animated = (animated is not None)\n emoji_id = int(emoji_id)\n return Emoji._create_partial(emoji_id, name, animated)\n \n try:\n return UNICODE_TO_EMOJI[text]\n except KeyError:\n pass\n \n if text.startswith(':') and text.endswith(':') and not text.endswith(VARIATION_SELECTOR_16_POSTFIX_WITH_COLON):\n try:\n return BUILTIN_EMOJIS[text[1:-1]]\n except KeyError:\n pass\n \n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show all the pets.
def show_pets(): pets = Pet.query.all() return render_template("pet-list.html", pets=pets)
[ "def list_pets():\n pets = Pet.query.all()\n return render_template('list.html', pets=pets)", "def display_pets_list():\n\n pets = Pet.query.all()\n\n return render_template('pet_listing.html',\n pets=pets)", "def show_pets(self):\r\n print(\"The owner of these pets : \" + self.owner)\r\n for each in self.name:\r\n print(each)", "def showAllEats():\n\n eats = session.query(Eats).all()\n return render_template('alleats.html', eats=eats,\n login_session=login_session)", "def showAllItems():\n\n items = readAllItems()\n return render_template('show_all_items.html', items=items)", "def all_pets(request):\n pets = Pet.objects.filter(removed__isnull=True)\n types = request.GET.get('type')\n if types:\n where = None\n for typ in types.split(','):\n if where:\n where |= Q(type__icontains=typ)\n else:\n where = Q(type__icontains=typ)\n pets = pets.filter(where)\n\n format = request.GET.get('format', \"\")\n if format.lower() == \"json\":\n return json_response([model_to_dict(p) for p in pets])\n \n attrs = {\n 'pets': pets,\n }\n return render(request, 'list.html', attrs)", "def list_pets(self, **kwargs):\n return self.make_request(\"/pets\", params=kwargs)", "def show_inventory(self):\n for i in self.inventory:\n self.show_car(i)", "def show_home_page():\n pets = Pet.query.all()\n \n return render_template('home.html', pets = pets)", "def showAllPilots(self):\n data_list = self.__logic.showAllPilots()\n self.__data_printer.printAllEmps(data_list)", "def show_all_movies():\n\n movies = crud.get_movies()\n\n return render_template('all_movies.html', movies = movies)", "def view_all_prices():\n get_all = SHEET.worksheet(\"price\").get_all_records()\n for price in get_all:\n print_all_price(price)\n back_to_menu()", "def show_product(self):\n return self.baskets", "def plants_list():\n return render_template('plants_list.html', plants=plants.find())", "def display_all_tickets(self):\n\n self.model.get_all_tickets()\n pass", "def plants_list():\n\n # Database call to retrieve *all* plants from the Mongo database's `plants` collection.\n plants_data = plants_collection.find({})\n\n context = {\n 'plants': plants_data,\n }\n return render_template('plants_list.html', **context)", "def showAllLocs():\n\n locations = session.query(Locations).all()\n return render_template('locations.html',\n locations=locations, login_session=login_session)", "def show_all_employees():\n\n logger.debug('Function show_all_employees(). Routed to /employees')\n titles = ['Name', 'Birthday', 'In Department']\n employees = es.get_all()\n\n logger.info('Get list of employees, length = %i', len(employees))\n return render_template('employees.html',\n title='Employees',\n table_title='List of Employees',\n headers=titles,\n employees=employees)", "def show_all_models():\n query_set = KModel.objects.all()\n return query_set_to_html(query_set)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add pet form; handle adding and display of form.
def show_and_handle_new_pet_form(): form = AddPetForm() if form.validate_on_submit(): name = form.name.data species = form.species.data img = form.img.data or None age = form.age.data notes = form.notes.data new_pet = Pet(name=name, species=species, img=img, age=age, notes=notes) db.session.add(new_pet) db.session.commit() flash(f"Added {name} to pet list") return redirect("/") else: return render_template( "add-pet-form.html", form=form)
[ "def display_add_pet_form():\n form = AddPetForm()\n\n if form.validate_on_submit():\n pet = Pet(\n name=form.name.data,\n species=form.species.data,\n photo_url=form.photo_url.data,\n age=form.age.data,\n notes=form.notes.data)\n db.session.add(pet)\n db.session.commit()\n flash(f\"Added new pet: {pet.name}\")\n return redirect(\"/\")\n else:\n return render_template(\"add_pet.html\", form=form)", "def add_a_pet():\n form = AddPetForm()\n\n if form.validate_on_submit():\n \"\"\"when submitting form\"\"\"\n name = form.name.data\n species = form.species.data\n photo_url = form.photo_url.data\n age = form.age.data\n notes = form.notes.data\n\n # create an add pet submit\n pet = Pet(name=name, species=species, photo_url=photo_url, age=age, notes=notes)\n db.session.add(pet)\n db.session.commit()\n return redirect('/')\n else:\n \"\"\"showing form\"\"\"\n return render_template('add-pet.html', form=form)", "def add_pet():\n form = AddPetForm()\n if form.validate_on_submit():\n new_pet = Pet(\n name = form.name.data,\n species = form.species.data,\n photo_url = form.photo_url.data,\n age = form.age.data,\n notes = form.notes.data,\n available = form.available.data)\n db.session.add(new_pet)\n db.session.commit()\n return redirect('/')\n \n else:\n return render_template('add_pet.html', form=form)", "def show_pet_details(id):\n pet = Pet.query.get_or_404(id)\n form = AddPet(obj=pet)\n if form.validate_on_submit(): \n pet.name = form.name.data\n pet.species = form.species.data\n pet.notes = form.notes.data\n pet.photo_url = form.photo_url.data\n pet.available = form.available.data\n db.session.commit()\n return redirect('/')\n else: \n return render_template('pet_details.html', pet=pet, form=form)", "def display_pet_details_and_edit_form(pet_id):\n pet = Pet.query.get_or_404(pet_id)\n form = EditPetForm(obj=pet)\n if form.validate_on_submit():\n print(\"*!*!*!*!*! IT WORKED !*!!\"*10)\n pet.photo_url=form.photo_url.data\n pet.notes=form.notes.data\n pet.available=form.available.data\n db.session.commit()\n flash(f\"Edited pet: {pet.name}\")\n return redirect(f\"/{pet_id}\")\n else:\n return render_template(\"edit_pet.html\", form=form, pet=pet)", "def show_add_form():\n return render_template(\"add_user.html\")", "def new_tag_form():\n\n return render_template(\"new-tag.html\")", "def add_pet(self, pet):", "def show_create_tag_form():\n\n\n return render_template(\"/add-tag.html\")", "def show_tag_form():\n\n return render_template('add_tag.html')", "def student_add_form():\n\n return render_template(\"add_student.html\")", "def addListing(self):\n listing_id = self.request.matchdict.get('listing_id', None)\n\n if listing_id:\n listing = self.jobs_lib.getListingById(listing_id)\n if listing.user_id != self.request.authenticated_userid:\n return HTTPForbidden()\n else:\n listing = self.jobs_lib.newListing()\n\n user = self.jobs_lib.getUserById(self.request.authenticated_userid)\n\n myform = Form(self.getListingForm(user), buttons=('Post Add',))\n\n if self.request.method == 'POST':\n check_csrf_token(self.request)\n\n controls = self.request.POST.items() # get the form controls\n\n try:\n appstruct = myform.validate(controls) # call validate\n except deform.ValidationFailure as e: # catch the exception\n return {'form':e.render()} # re-render the form with an exception\n\n self.jobs_lib.createUpdateListing(user, listing, **appstruct['listing'])\n\n if listing.user.email_validated is False:\n self.sendValidationEmail(listing.user)\n\n return HTTPFound(location=self.request.route_path('showlisting',\n listing_id=listing.listing_id,\n listing_title=listing.safeTitle()))\n else:\n appstruct = dict(listing=dict(csrf_token=self.request.session.get_csrf_token()))\n if user.company_id:\n appstruct['listing']['company'] = user.company.name\n\n if listing_id:\n # Display the edit form with pre-existing values\n columns = self.jobs_lib.getColumns(listing)\n for key in columns:\n appstruct['listing'][key] = getattr(listing, key)\n\n return dict(form=myform.render(appstruct))", "def show_add_entry_form(request):\n return render(request, 'phones/add_entry.html', {\n 'form': forms.EntryForm()\n })", "def add(what):\n message = None\n if request.method == \"POST\":\n\n if what == 'person':\n check_person_id = bank.add_persons(request.form)\n if check_person_id is False:\n message = \"Error: id {i} already exist\".format(i=request.form[\"id\"])\n else:\n message = \"{name} has been added\".format(name=request.form[\"name\"])\n print(request.form)\n\n else:\n bank.add_acoounts(request.form)\n message = \"a new {acc} has been added\".format(acc=request.form[\"type\"])\n print(request.form)\n\n bank.save_data()\n\n return render_template(\n \"add.html\",\n what=what,\n account_types=[{\"id_\": \"Account\"}, {\"id_\": \"SavingsAccount\"}],\n message=message\n )", "def do_creation_switches(self):\n form = self.caller.db.petition_form\n if \"submit\" in self.switches:\n if not form:\n raise self.PetitionCommandError(\"You must create a form first.\")\n form = PetitionForm(form, owner=self.caller.dompc)\n if not form.is_valid():\n raise self.PetitionCommandError(form.display_errors())\n petition = form.save()\n self.msg(\"Successfully created petition %s.\" % petition.id)\n self.caller.attributes.remove(\"petition_form\")\n if petition.organization is not None:\n members = Member.objects.filter(\n organization=petition.organization, deguilded=False\n )\n targets = (\n PetitionSettings.objects.all()\n .exclude(ignored_organizations=petition.organization)\n .exclude(inform=False)\n .filter(owner__memberships__in=members)\n )\n targets = [\n ob\n for ob in targets\n if petition.organization.access(ob.owner, \"view_petition\")\n ]\n for target in targets:\n target.owner.player.msg(\n \"{wA new petition was posted by %s to %s.{n\"\n % (petition.owner, petition.organization)\n )\n target.owner.player.inform(\n \"{wA new petition was posted by %s to %s.{n|/|/%s\"\n % (petition.owner, petition.organization, petition.display()),\n category=\"Petition\",\n append=True,\n )\n else:\n targets = (\n PetitionSettings.objects.all()\n .exclude(inform=False)\n .exclude(ignore_general=True)\n )\n for target in targets:\n target.owner.player.msg(\n \"{wA new petition was posted by %s{n\" % petition.owner\n )\n target.owner.player.inform(\n \"{wA new petition was posted by %s{n|/|/%s\"\n % (petition.owner, petition.display()),\n category=\"Petition\",\n append=True,\n )\n else:\n if \"create\" in self.switches:\n if form:\n self.display_petition_form()\n raise self.PetitionCommandError(\n \"You already are creating a petition.\"\n )\n self.caller.db.petition_form = {\n \"topic\": self.lhs or None,\n \"description\": self.rhs,\n }\n elif form is None:\n raise self.PetitionCommandError(\"You must use /create first.\")\n elif \"topic\" in self.switches:\n form[\"topic\"] = self.args\n elif \"desc\" in self.switches:\n form[\"description\"] = self.args\n elif \"org\" in self.switches:\n from world.dominion.models import Organization\n\n if not self.args:\n form[\"organization\"] = None\n else:\n try:\n form[\"organization\"] = Organization.objects.get(\n name__iexact=self.args\n ).id\n except (Organization.DoesNotExist, ValueError, TypeError):\n raise self.PetitionCommandError(\"No organization by that name.\")\n elif \"cancel\" in self.switches:\n self.caller.attributes.remove(\"petition_form\")\n self.msg(\"Petition form cancelled.\")\n self.display_petition_form()", "def add_form(request, athlete_id, year, month, day, template = 'athletelog/competition_form.html'):\n year, month, day = int(year), int(month), int(day)\n date = datetime.date(year, month, day)\n athlete = models.Athlete.objects.get(person__user__username=athlete_id)\n competition_data = {'day': date, 'event': '50 m', 'place': '', 'result': ''}\n return display_form(request, 'add', athlete, date, competition_data, add_submit, template)", "def addForm(self, name, form):\n # XXX should check that name is a legal PDF name\n if self.inObject != \"form\":\n self.inForm()\n self.Reference(form, xObjectName(name))\n self.inObject = None", "def create_pet(self, pet):\n return self.make_request('/pets', pet, method='POST')", "def edit_pet_details(pet_id):\n\n pet = Pet.query.get_or_404(pet_id)\n\n form = EditPetForm(obj=pet)\n\n if form.validate_on_submit():\n pet.img = form.img.data or None\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.commit()\n\n flash(f\"Successfully edited the details for {pet.name}\")\n\n return redirect(f\"/{pet.id}\")\n\n else:\n return render_template(\"pet-details.html\", pet=pet, form=form)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Edit pet details form; handle editing or displaying a form
def edit_pet_details(pet_id): pet = Pet.query.get_or_404(pet_id) form = EditPetForm(obj=pet) if form.validate_on_submit(): pet.img = form.img.data or None pet.notes = form.notes.data pet.available = form.available.data db.session.commit() flash(f"Successfully edited the details for {pet.name}") return redirect(f"/{pet.id}") else: return render_template("pet-details.html", pet=pet, form=form)
[ "def display_pet_details_and_edit_form(pet_id):\n pet = Pet.query.get_or_404(pet_id)\n form = EditPetForm(obj=pet)\n if form.validate_on_submit():\n print(\"*!*!*!*!*! IT WORKED !*!!\"*10)\n pet.photo_url=form.photo_url.data\n pet.notes=form.notes.data\n pet.available=form.available.data\n db.session.commit()\n flash(f\"Edited pet: {pet.name}\")\n return redirect(f\"/{pet_id}\")\n else:\n return render_template(\"edit_pet.html\", form=form, pet=pet)", "def show_pet_details(id):\n pet = Pet.query.get_or_404(id)\n form = AddPet(obj=pet)\n if form.validate_on_submit(): \n pet.name = form.name.data\n pet.species = form.species.data\n pet.notes = form.notes.data\n pet.photo_url = form.photo_url.data\n pet.available = form.available.data\n db.session.commit()\n return redirect('/')\n else: \n return render_template('pet_details.html', pet=pet, form=form)", "def show_edit_post_form(id):\n post = Post.query.get_or_404(id)\n tags = Tag.query.all()\n\n return render_template(\"post_edit.html\" , post=post , tags=tags)", "def post_edit_form(post_id):\n posts = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n return render_template(\"/edit-post.html\", posts=posts, tags=tags)", "def show_and_handle_new_pet_form():\n\n form = AddPetForm()\n\n if form.validate_on_submit():\n name = form.name.data\n species = form.species.data\n img = form.img.data or None\n age = form.age.data\n notes = form.notes.data\n\n\n new_pet = Pet(name=name,\n species=species,\n img=img,\n age=age,\n notes=notes)\n\n db.session.add(new_pet)\n db.session.commit()\n\n flash(f\"Added {name} to pet list\")\n\n return redirect(\"/\")\n\n else:\n return render_template(\n \"add-pet-form.html\", form=form)", "def display_add_pet_form():\n form = AddPetForm()\n\n if form.validate_on_submit():\n pet = Pet(\n name=form.name.data,\n species=form.species.data,\n photo_url=form.photo_url.data,\n age=form.age.data,\n notes=form.notes.data)\n db.session.add(pet)\n db.session.commit()\n flash(f\"Added new pet: {pet.name}\")\n return redirect(\"/\")\n else:\n return render_template(\"add_pet.html\", form=form)", "def show_edit_tag_form(id):\n tag = Tag.query.get_or_404(id)\n return render_template(\"tag_edit.html\" , tag=tag)", "def edit(request):\n if request.method == 'POST':\n form = VMEditForm(request.POST)\n if form.is_valid():\n VM_id = form.cleaned_data['VM_id']\n flavor_id = form.cleaned_data['flavor_id']\t\n\t\t\tapi.editVM(VM_id, flavor_id)\n\t\t\treturn HttpResponseRedirect('/project_space/manage')\n else:\n\t\treturn HttpResponseRedirect('/project_space/manage')", "def browse_edit():\n # if user is creating a new empty deck, render only the deckname field with newtermfield and newdeffield\n # if user is editing a preexisting deck, render the deckname field, all cards as term, definition, a term/def row for new entries, a \"return to decks\" button, and a \"review this deck\" button\n \n if request.method == 'POST':\n class _BrowseEditForm(BrowseEditForm):\n pass\n\n # print(request.form)\n\n # set decktitle field to the deck's name:\n deckid = request.form['hidden_deckid_field']\n deck = Deck.query.get(deckid)\n setattr(_BrowseEditForm.deckname, 'default', deck.deckname)\n\n browse_edit_form = _BrowseEditForm()\n class _CardForm(CardForm):\n pass\n \n for card in Card.query.filter_by(deck_id=deckid).all():\n _CardForm.term = card.term\n _CardForm.definition = card.definition\n browse_edit_form.cards.append_entry(_CardForm())\n # for empty decks, `cards` should not render.\n if browse_edit_form.validate_on_submit:\n return render_template('browse_edit.html', title='Enter a card', form=browse_edit_form)\n return redirect(url_for('browse_edit.html'))", "def editEat(eat_id):\n edited_eat = session.query(Eats).filter_by(id=eat_id).one()\n if login_session['user_id'] != edited_eat.user_id:\n flash(\"Sorry, you do not have permissions to edit this item\")\n return redirect(url_for('showAllEats'))\n form = newEatForm()\n avail_locs = [(loc.id, loc.name) for loc in session.query(Locations).all()]\n form.location.choices = avail_locs\n if request.method == 'POST':\n if form.name.data:\n edited_eat.name = form.name.data\n if form.description.data:\n edited_eat.description = form.description.data\n if form.pic_url.data:\n edited_eat.pic_url = form.pic_url.data\n if form.location.data:\n edited_eat.loc_id = form.location.data\n session.add(edited_eat)\n session.commit()\n flash('%s was edited!' % edited_eat.name)\n return redirect(url_for('showAllEats'))\n else:\n return render_template('editeat.html', eat=edited_eat,\n form=form, login_session=login_session)", "def service_edit_view(request, service_pk):\n\tif request.method == 'POST':\t\n\t\tservice = get_object_or_404(Service, pk = service_pk)\n\t\tform = bundle_forms.CreateServiceForm(request.POST, instance = service, prefix='editservice')\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tmessages.success(request, 'Se han guardado los cambios en el servicio con exito')\n\t\t\treturn HttpResponseRedirect('/services/'+str(service.id))\n\n\telse:\n\t\tservice = get_object_or_404(Service, pk = service_pk)\n\t\tform = bundle_forms.CreateServiceForm(instance = service ,prefix='editservice')\n\t\treturn render(request, 'bundles_app/edit_service.html', {'form':form} )", "def show_pet_info(pet_id):\n pet = Pet.query.get_or_404(pet_id)\n\n return render_template('pet-details.html', pet=pet)", "def edit(id):\n catalogs = Catalog.get_all()\n item = Item.find_by_id(id)\n return render_template('items/edit.html', item=item, catalogs=catalogs)", "def edit(self):\n todo = Todo.find(self.request.param('id'))\n context = {\n 'todo': todo\n }\n return view('todo/edit', context)", "def edit_form(request, athlete_id, year, month, day, competition_id, template = 'athletelog/competition_form.html'):\n year, month, day = int(year), int(month), int(day)\n date = datetime.date(year, month, day)\n athlete = models.Athlete.objects.get(person__user__username=athlete_id)\n\n competition = get_object_or_404(models.Competition, pk=competition_id)\n competition_data = {'id': competition_id, 'day': date, 'event': competition.event.name,\n 'event_info': competition.event_info, 'result': competition.result,\n 'place': competition.place, 'note': competition.note}\n return display_form(request, 'edit', athlete, date, competition_data, edit_submit, template)", "def edit_tag_form(tag_id):\n\n tag = Tag.query.get_or_404(tag_id)\n return render_template(\"edit-tag.html\", tag=tag)", "def edit_isp(isp_id):\n isp = db_session.query(ISP).filter_by(id=isp_id).one()\n\n if request.method == \"POST\":\n if request.form[\"choice\"] == \"edit\":\n isp.name = request.form[\"name\"]\n db_session.add(isp)\n db_session.commit()\n flash(\"ISP Successfully Edited.\")\n return redirect(url_for(\"show_isps\"))\n else:\n return render_template(\"edit_isp.html\", isp=isp, title=\"Edit ISP\")", "def edit_book(book_id):\n book_record = find_book(book_id)\n\n return render_template(\"edit_book.html\", book=book_record)", "def edit(organisation_id, practice_id):\n practice = Practice().get(organisation_id=organisation_id, practice_id=practice_id)\n people = Person().list(organisation_id=organisation_id)\n form = PracticeForm()\n if people:\n form.head.choices += [(person[\"id\"], person[\"name\"]) for person in people]\n\n if form.validate_on_submit():\n changed_practice = Practice().edit(\n organisation_id=organisation_id,\n practice_id=practice_id,\n name=form.name.data,\n head_id=form.head.data,\n cost_centre=form.cost_centre.data,\n )\n flash(\n \"Your changes to <a href='{}' class='alert-link'>{}</a> have been saved.\".format(\n url_for(\n \"practice.view\",\n organisation_id=organisation_id,\n practice_id=practice_id,\n ),\n changed_practice[\"name\"],\n ),\n \"success\",\n )\n return redirect(url_for(\"practice.list\", organisation_id=organisation_id))\n elif request.method == \"GET\":\n form.name.data = practice[\"name\"]\n if practice[\"head\"]:\n form.head.data = practice[\"head\"][\"id\"]\n form.cost_centre.data = practice[\"cost_centre\"]\n\n return render_template(\n \"edit_practice.html\",\n title=f\"Edit {practice['name']}\",\n form=form,\n practice=practice,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return bool comparison if "node" is of "node type".
def is_node_of_type(node, node_type): # type: (nt.DagNode, str) -> bool return mc.nodeType(str(node)) == node_type
[ "def is_type(node_name, node_type):\n\n if not maya.cmds.objExists(node_name):\n return False\n if maya.cmds.objectType(node_name) != node_type:\n return False\n return True", "def isPyNode(node):\r\n if re.search('pymel', str(node.__class__)):\r\n return 1\r\n else:\r\n return 0", "def is_node_a_state(node: dict) -> bool:\n try:\n if node['y:GenericNode']['@configuration'] == \"com.yworks.entityRelationship.big_entity\":\n return True\n except KeyError:\n logging.warning(\"%s node is incorrect\" % node['id'])\n return False\n return False", "def isOfType(self, type: 'SoType') -> \"SbBool\":\n return _coin.ScXMLObject_isOfType(self, type)", "def is_in_tree(self, type_to_search):\n return self.get_sub_tree(type_to_search) is not None", "def node_exists(config_node):\n return isinstance(config_node, ConfigNode)", "def IsNode(self, *args):\n return _snap.TNEANet_IsNode(self, *args)", "def IsNode(self, *args):\n return _snap.TUNGraph_IsNode(self, *args)", "def type_match(types: List) -> Callable[[torch.fx.Node], bool]:\n if not isinstance(types, list):\n types = [types]\n\n def fn(node):\n node_module = get_module_from_node(node)\n for t in types:\n if isinstance(node_module, t):\n return True\n return False\n\n return fn", "def IsNode(self, *args):\n return _snap.PUNGraph_IsNode(self, *args)", "def is_type(self, type_name):\n return self.property == '<http://linkedspending.aksw.org/ontology/'+type_name+'>'", "def is_node_a_choice(node: dict) -> bool:\n try:\n if node['y:GenericNode']['@configuration'] == \"com.yworks.bpmn.Gateway.withShadow\":\n return True\n except KeyError:\n logging.warning(\"%s node is incorrect\" % node['id'])\n return False\n return False", "def IsNode(self, *args):\n return _snap.TNEGraph_IsNode(self, *args)", "def IsNode(self, *args):\n return _snap.TNGraph_IsNode(self, *args)", "def IsNode(self, *args):\n return _snap.TBPGraph_IsNode(self, *args)", "def node_is_object(config_node):\n return isinstance(config_node, ConfigJSONObject)", "def IsNode(self, *args):\n return _snap.PNEANet_IsNode(self, *args)", "def isOfType(self, type: 'SoType') -> \"SbBool\":\n return _coin.SoField_isOfType(self, type)", "def IsNode(self, *args):\n return _snap.PNGraph_IsNode(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to toggle the visibility of the defined cameras "clipping planes" visibility.
def camera_manip_clipping_toggle(cameras, enable=True): # type: (Iterable[nt.Camera], bool) -> None # sets the visibility of the camera component manipulator for "clipping planes" # ["cycling index", "center of interest", "pivot", "clipping planes", "unused"] if enable: manipulators_state = [False, False, False, True, False] else: manipulators_state = [False, False, False, False, False] for cam in cameras: mc.renderManip(str(cam), e=True, camera=manipulators_state)
[ "def show_cam_clip_planes():\n for camera in cmds.ls(type=\"camera\"):\n cmds.setAttr(camera + \".nearClipPlane\", channelBox=True)\n cmds.setAttr(camera + \".farClipPlane\", channelBox=True)\n\n # select the perspCamera\n if cmds.objExists(\"persp\"):\n cmds.select(\"persp\")", "def viewClipPlane(camera, nearClipPlane=\"string\", farClipPlane=\"string\", surfacesOnly=bool, autoClipPlane=bool):\n pass", "def perCameraVisibility(camera=\"string\", remove=bool, removeAll=bool, exclusive=bool, removeCamera=bool, hide=bool):\n pass", "def set_cameras_clip_plane(cameras, near, far):\n # type: (Iterable[nt.Camera], float, float) -> None\n for cam in cameras: # type: nt.Camera\n cam.setNearClipPlane(near)\n cam.setFarClipPlane(far)", "def showPlane(self, flag): \n\t\tif flag:\n\t\t\tself.boxWidget.On()\n\t\telse:\n\t\t\tself.boxWidget.Off()", "def setNearFarClippingPlanes(*args, **kwargs):\n \n pass", "def change_default_clip_plane():\n import pymel.core as pm\n\n # methods\n def set_values(widget, near, far):\n \"\"\"Set the near and far clip values.\"\"\"\n cmds.optionVar(floatValue=[\"defaultCameraNearClipValue\", near.value()])\n cmds.optionVar(floatValue=[\"defaultCameraFarClipValue\", far.value()])\n widget.close()\n\n def create_intfield(layout, name):\n \"\"\"Create the near/far label and intfield combo.\"\"\"\n hbox = QtWidgets.QHBoxLayout()\n label = QtWidgets.QLabel(name + \" Clip:\")\n label.setMinimumWidth(60)\n field = QtWidgets.QDoubleSpinBox()\n if name == \"Near\":\n field.setMinimum(0.0000000001)\n field.setDecimals(4)\n else:\n field.setMaximum(10000000000)\n field.setDecimals(0)\n field.setValue(\n cmds.optionVar(query=\"defaultCamera{}ClipValue\".format(name))\n )\n hbox.addWidget(label)\n hbox.addWidget(field)\n hbox.setStretch(1, 1)\n layout.addLayout(hbox)\n return field\n\n # widgets\n maya = pm.toQtWindow(\"MayaWindow\")\n widget = QtWidgets.QDialog(maya)\n widget.setWindowTitle(\"Set default camera clipPlane\")\n widget.setMinimumWidth(300)\n\n layout = QtWidgets.QVBoxLayout(widget)\n layout.setContentsMargins(2, 2, 2, 2)\n layout.setSpacing(2)\n\n near_field = create_intfield(layout, \"Near\")\n far_field = create_intfield(layout, \"Far\")\n\n btn_hbox = QtWidgets.QHBoxLayout()\n ok_btn = QtWidgets.QPushButton(\"OK\")\n cl_btn = QtWidgets.QPushButton(\"Cancel\")\n btn_hbox.addWidget(ok_btn)\n btn_hbox.addWidget(cl_btn)\n layout.addLayout(btn_hbox)\n\n # signals\n ok_btn.clicked.connect(partial(set_values, widget, near_field, far_field))\n cl_btn.clicked.connect(widget.close)\n widget.show()", "def SetClipPlanes(self, *args):\n return _Graphic3d.Graphic3d_GraphicDriver_SetClipPlanes(self, *args)", "def toggle_visibility(self):\n\n if self.actor.GetVisibility():\n self.actor.VisibilityOff()\n\n else:\n self.actor.VisibilityOn()", "def set_default_clip_plane():\n values = [\n cmds.optionVar(query=\"defaultCameraNearClipValue\"),\n cmds.optionVar(query=\"defaultCameraFarClipValue\"),\n ]\n for camera in cmds.ls(type=\"camera\"):\n for attr in [\"nearClipPlane\", \"farClipPlane\"]:\n plug = \"{}.{}\".format(camera, attr)\n value = values[0] if \"near\" in attr else values[-1]\n if cmds.getAttr(plug, settable=True):\n cmds.setAttr(plug, value)\n\n show_cam_clip_planes()", "def setMaskPlaneVisibility(name, show=True):\n\n global _maskPlaneVisibility\n try:\n type(_maskPlaneVisibility)\n except NameError, e:\n _maskPlaneVisibility = {}\n\n if isinstance(name, dict):\n for k in name.keys():\n setMaskPlaneVisibility(k, name[k])\n return\n\n _maskPlaneVisibility[name] = show", "def SetClipPlanes(self, *args):\n return _Graphic3d.Graphic3d_Structure_SetClipPlanes(self, *args)", "def toggle_snap_to_surface_normal():\r\n pass", "def set_show_floor(visibility=True):\n for a in bpy.data.window_managers[0].windows[0].screen.areas:\n if a.type == \"VIEW_3D\":\n for space in a.spaces:\n if space.type == \"VIEW_3D\":\n space.show_floor = visibility", "def toggle_frontpanel(self, bools, text=None):\n self.Keithley6221.display_enabled = bools", "def toggleMultiBeamPlot(self):\n if self.mb_dock.isVisible(): self.mb_dock.hide()\n else: self.mb_dock.show()", "def force_visible_armature(self, armature_object): \n logging.debug(\"Turn the armature visibility ON\") \n if armature_object.hide == True:\n armature_object.hide = False\n for n in range(len(armature_object.layers)):\n armature_object.layers[n] = True", "def hide(objects, allObjects=bool, returnHidden=bool, invertComponents=bool, clearSelection=bool, testVisibility=bool):\n pass", "def _make_all_layers_visible():\r\n\r\n for i in range(len(bpy.context.scene.layers)):\r\n bpy.context.scene.layers[i] = True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
From the sequence of nodes, return nodes that are of a "Camera Type".
def resolve_cameras(nodes): # type: (Iterable[nt.DagNode]) -> Generator[nt.Camera] for node in nodes: if is_node_of_type(node, "transform"): for cam in node.listRelatives(type="camera"): yield cam elif is_node_of_type(node, "camera"): yield node
[ "def cameraNode(self):\n # update transform with current camera parameters - only default view for now\n viewNode = self.threeDView().mrmlViewNode()\n cameraNodes = slicer.util.getNodes('vtkMRMLCameraNode*')\n for cameraNode in cameraNodes.values():\n if cameraNode.GetActiveTag() == viewNode.GetID():\n return cameraNode\n return None", "def get_node_types(nodes, return_shape_type=True):\n\n from tpDcc.dccs.maya.core import shape\n\n nodes = python.force_list(nodes)\n\n found_type = dict()\n\n for n in nodes:\n node_type = maya.cmds.nodeType(n)\n if node_type == 'transform':\n if return_shape_type:\n shapes = shape.get_shapes(n)\n if shapes:\n node_type = maya.cmds.nodeType(shapes[0])\n if node_type not in found_type:\n found_type[node_type] = list()\n\n found_type[node_type].append(n)\n\n return found_type", "def _get_cameraType(self) -> \"adsk::core::CameraTypes\" :\n return _core.Camera__get_cameraType(self)", "def filter_node_list(self, node_list, node_type_filter_list):\n\n #node_list_filtered\n node_list_filtered = []\n\n #iterate and append\n for node in node_list:\n\n #match\n if (node.type().name() in node_type_filter_list):\n\n #append\n node_list_filtered.append(node)\n\n\n #return\n return node_list_filtered", "def type_match(types: List) -> Callable[[torch.fx.Node], bool]:\n if not isinstance(types, list):\n types = [types]\n\n def fn(node):\n node_module = get_module_from_node(node)\n for t in types:\n if isinstance(node_module, t):\n return True\n return False\n\n return fn", "def list_cameras(self):\n res = []\n camera_names = cmds.listCameras()\n for camera_name in camera_names:\n camera_shape = cmds.listRelatives(\n camera_name, type=\"camera\", s=True\n )[0]\n res.append((camera_name, camera_shape))\n return res", "def get_oneview_nodes(ironic_nodes):\n types = SUPPORTED_DRIVERS + SUPPORTED_HARDWARE_TYPES\n return [i for i in ironic_nodes if i.driver in types]", "def list_cameras(cls):\n return [cam.getTransform() for cam in pm.ls(type=\"camera\") if \"cam_\" in cam.name()]", "def get_nodes(self, role, is_role, nodes=None):\n if role not in self.VALID_ROLES:\n return []\n nodes = nodes or self.nodes\n return [node for node in nodes if node.is_role(role) == is_role]", "def nodesOfKind(sequence,kind,result):\n\tposition_base=sequence.getPosition()\n\tfor k in kind:\n\t\tnodes=sequence.getNodesOfType(k)\n\t\tif debug: print sequence.getId(),\":\",k,nodes\n\t\tfor n in nodes:\n\t\t\tn.setPosition(n.getPosition()+position_base)\n\t\t\tnodeToElement(n,result)\n\t\t\tn.setPosition(n.getPosition()-position_base)\n\ttry:\n\t\tsubsequences=sequence.getSequences()\n\texcept:\n\t\treturn \n\telse:\n\t\tfor s in subsequences:\n\t\t\tnodesOfKind(s,kind,result)", "def parse_cameras(cameras):\n itercams = iter(cameras)\n next(itercams) # don't care about the first line\n fmt = next(itercams) # second line contains format\n sep_type = \"Type weg\" in fmt #is road type separate from nr?\n\n cams = []\n for line in itercams:\n fields = line.split(',')\n if sep_type:\n fields[2:4] = fields[2]+fields[3], #cat road type and number\n if len(fields) == 6:\n fields[3:5] = [fields[3][1:]+\".\"+fields[4][:-1]] #fuse kilometer mark\n\n fields[-1] = fields[-1].rstrip('\\n')\n cams.append(tuple(fields))\n\n del(cams[-9:])\n return tuple(cams)", "def nodeTypeNameMatches(node, matchtype):\n return True", "def nodeTypeNameComponentsMatch(node, matchtype):\n return True", "def test_get_nodes_by_type(self):\n test_template = Template('{% load waffle_tags %}{% switch \"x\" %}{{ a }}{% else %}{{ b }}{% endswitch %}')\n children = test_template.nodelist.get_nodes_by_type(VariableNode)\n self.assertEqual(len(children), 2)", "def getFaces(faceType, nodeTags):", "def __nodeMatches(node, matchtype, parmlist, basetypematch=False):\n return True", "def get_by_type(self, types: list, additional_filter: str = None, attribute_values: dict = None,\n\t\t\t\t\tinclude_terminating: bool = False):\n\t\tself.logger.info('Loading nodes of type %s with filter %s and values %s', types, additional_filter,\n\t\t\t\t\t\t attribute_values)\n\n\t\tfilter = ''\n\t\tif not include_terminating:\n\t\t\tfilter = 'and ItemStatus <> :terminating and ItemStatus <> :removing'\n\n\t\tif additional_filter is None and attribute_values is not None:\n\t\t\traise RuntimeError('Filter is not set but attribute values are given.')\n\t\telif additional_filter is not None and attribute_values is None:\n\t\t\traise RuntimeError('Filter is set but no attribute values are given.')\n\t\telif additional_filter is not None and attribute_values is not None:\n\t\t\tfilter = filter + ' and (' + additional_filter + ')'\n\t\telif attribute_values is None:\n\t\t\tattribute_values = { }\n\n\t\tif not include_terminating:\n\t\t\tattribute_values.update({ ':terminating': 'terminating' })\n\t\t\tattribute_values.update({ ':removing': 'removing' })\n\n\t\tparts = []\n\t\tfor index, node_type in enumerate(types):\n\t\t\tattribute_values.update({ ':node_type' + str(index): node_type })\n\t\t\tparts.append('ItemType = :node_type' + str(index))\n\t\texpression = '(' + ' or '.join(parts) + ') ' + filter\n\n\t\titems = self.client.scan(expression, attribute_values)\n\n\t\tnodes = []\n\t\tfor item in items:\n\t\t\tnode = Node(item.pop('EC2InstanceId'), item.pop('ItemType'))\n\t\t\tnode.set_status(item.pop('ItemStatus'))\n\t\t\tfor k, v in item.items():\n\t\t\t\tnode.set_property(k, v)\n\t\t\tnodes.append(node)\n\n\t\treturn nodes", "def get_all(cls, connection):\n resp = connection._get('/1/tenants/%s/networkCameras' % connection.tenant.tenant_id)\n netcams = resp.json()['data']\n return [CogniacNetworkCamera(connection, netcam) for netcam in netcams]", "def _getNodes(self,typ):\n return odict([(k,n) for k,n in self._nodes.items() if isinstance(n, typ)])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set defined cameras clip plane values.
def set_cameras_clip_plane(cameras, near, far): # type: (Iterable[nt.Camera], float, float) -> None for cam in cameras: # type: nt.Camera cam.setNearClipPlane(near) cam.setFarClipPlane(far)
[ "def set_default_clip_plane():\n values = [\n cmds.optionVar(query=\"defaultCameraNearClipValue\"),\n cmds.optionVar(query=\"defaultCameraFarClipValue\"),\n ]\n for camera in cmds.ls(type=\"camera\"):\n for attr in [\"nearClipPlane\", \"farClipPlane\"]:\n plug = \"{}.{}\".format(camera, attr)\n value = values[0] if \"near\" in attr else values[-1]\n if cmds.getAttr(plug, settable=True):\n cmds.setAttr(plug, value)\n\n show_cam_clip_planes()", "def set_values(widget, near, far):\n cmds.optionVar(floatValue=[\"defaultCameraNearClipValue\", near.value()])\n cmds.optionVar(floatValue=[\"defaultCameraFarClipValue\", far.value()])\n widget.close()", "def change_default_clip_plane():\n import pymel.core as pm\n\n # methods\n def set_values(widget, near, far):\n \"\"\"Set the near and far clip values.\"\"\"\n cmds.optionVar(floatValue=[\"defaultCameraNearClipValue\", near.value()])\n cmds.optionVar(floatValue=[\"defaultCameraFarClipValue\", far.value()])\n widget.close()\n\n def create_intfield(layout, name):\n \"\"\"Create the near/far label and intfield combo.\"\"\"\n hbox = QtWidgets.QHBoxLayout()\n label = QtWidgets.QLabel(name + \" Clip:\")\n label.setMinimumWidth(60)\n field = QtWidgets.QDoubleSpinBox()\n if name == \"Near\":\n field.setMinimum(0.0000000001)\n field.setDecimals(4)\n else:\n field.setMaximum(10000000000)\n field.setDecimals(0)\n field.setValue(\n cmds.optionVar(query=\"defaultCamera{}ClipValue\".format(name))\n )\n hbox.addWidget(label)\n hbox.addWidget(field)\n hbox.setStretch(1, 1)\n layout.addLayout(hbox)\n return field\n\n # widgets\n maya = pm.toQtWindow(\"MayaWindow\")\n widget = QtWidgets.QDialog(maya)\n widget.setWindowTitle(\"Set default camera clipPlane\")\n widget.setMinimumWidth(300)\n\n layout = QtWidgets.QVBoxLayout(widget)\n layout.setContentsMargins(2, 2, 2, 2)\n layout.setSpacing(2)\n\n near_field = create_intfield(layout, \"Near\")\n far_field = create_intfield(layout, \"Far\")\n\n btn_hbox = QtWidgets.QHBoxLayout()\n ok_btn = QtWidgets.QPushButton(\"OK\")\n cl_btn = QtWidgets.QPushButton(\"Cancel\")\n btn_hbox.addWidget(ok_btn)\n btn_hbox.addWidget(cl_btn)\n layout.addLayout(btn_hbox)\n\n # signals\n ok_btn.clicked.connect(partial(set_values, widget, near_field, far_field))\n cl_btn.clicked.connect(widget.close)\n widget.show()", "def setNearFarClippingPlanes(*args, **kwargs):\n \n pass", "def SetClipPlanes(self, *args):\n return _Graphic3d.Graphic3d_Structure_SetClipPlanes(self, *args)", "def camera_setting_init():\n bpy.data.cameras['Camera'].clip_start = g_depth_clip_start\n bpy.data.cameras['Camera'].clip_end = g_depth_clip_end\n bpy.data.objects['Camera'].rotation_mode = g_rotation_mode", "def SetClipPlanes(self, *args):\n return _Graphic3d.Graphic3d_GraphicDriver_SetClipPlanes(self, *args)", "def show_cam_clip_planes():\n for camera in cmds.ls(type=\"camera\"):\n cmds.setAttr(camera + \".nearClipPlane\", channelBox=True)\n cmds.setAttr(camera + \".farClipPlane\", channelBox=True)\n\n # select the perspCamera\n if cmds.objExists(\"persp\"):\n cmds.select(\"persp\")", "def viewClipPlane(camera, nearClipPlane=\"string\", farClipPlane=\"string\", surfacesOnly=bool, autoClipPlane=bool):\n pass", "def setValue(self, box: 'SbBox3f', planenormal: 'SbVec3f', draggerscalefactor: 'float') -> \"void\":\n return _coin.SoClipPlaneManip_setValue(self, box, planenormal, draggerscalefactor)", "def setClipPlane(self, point=(0., 0., 0.), normal=(0., 0., 0.)):\n self._clipPlane = ClippingPlane(point, normal)", "def SetClipRegion(self, p_int=..., p_int=..., p_int=..., p_int=..., p_int=..., p_int=...):\n ...", "def setPlane(*args, **kwargs):\n \n pass", "def change_clip_plane_position(self, clip_name, position):\n return self.change_property(clip_name, \"Location\", position)", "def setCVPositions(*args, **kwargs):\n \n pass", "def clip(self, plane: 'SbPlane') -> \"void\":\n return _coin.SbClip_clip(self, plane)", "def setNearPlaneValue(self, value: 'float') -> \"void\":\n return _coin.SoRenderManager_setNearPlaneValue(self, value)", "def SetPlane(self, p_float, p_float_1, p_float_2, p_float_3):\n ...", "def clim_set(self, parameter, value):\n if isinstance(value, tuple):\n cmin, cmax = value\n self._run_code(f'self.img_2D.setLevels(({cmin}, {cmax}))')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Destroy a child widget of the specified parent widget.
def destroy_child_widget(parent, child_name): # type: (QWidget, str) -> None for widget in parent.children(): # type: QWidget if widget.objectName() == child_name: log.info('Closing previous instance of "%s"' % child_name) widget.close() widget.deleteLater()
[ "def del_parent(self):\n self.parent = None", "def child_removed(self, child):\n if isinstance(child, QtContainer):\n self.widget().setPageWidget(self.page_widget())", "def child_removed(self, child):\n if isinstance(child, WxSplitItem):\n widget = child.widget()\n self.widget().DetachWindow(widget)\n widget.Hide()\n self.size_hint_updated()", "def destroy_children(self, widget) -> None:\r\n for children in widget.winfo_children():\r\n children.destroy()", "def remove(self):\n\n parent = self.parent\n if parent is not None:\n parent._children.remove(self)\n parent.layout_changed()\n self._parent = None", "def child_removed(self, child):\n super(WxActionGroup, self).child_removed(child)\n if isinstance(child, WxAction) and child.widget is not None:\n self.widget.RemoveAction(child.widget)\n parent = self.parent()\n if parent is not None:\n parent.widget.RemoveAction(child.widget)", "def destroy(self):\n StockParamWidgets._destroy_widget(self._label)\n StockParamWidgets._destroy_widget(self._enable_widget)\n StockParamWidgets._destroy_widget(self._sonic_param_widget)\n StockParamWidgets._destroy_widget(self._instrument_widget)", "def child_removed(self, child):\n super(WxPage, self).child_removed(child)\n if isinstance(child, WxContainer):\n self.widget.SetPageWidget(self.page_widget())", "def destroy(self):\n self.is_destroyed = True\n self.destroyed()\n self.unobserve()\n for child in self._children:\n child.destroy()\n del self._children\n parent = self._parent\n if parent is not None:\n if parent.is_destroyed:\n self._parent = None\n else:\n self.set_parent(None)", "def removeChild(self, *args) -> \"void\":\n return _coin.SoVRMLParent_removeChild(self, *args)", "def closing_widget(self):\n pass", "def remove_child(self, index: int) -> 'DOMLayout':\n element = self._children[index]\n del self._children[index]\n del self._children_positions[index]\n element.remove_observer(DOMEventType.RESIZE, self._on_child_resize)\n element.remove_global_observer(self._on_child_event)\n\n self._rerender(resize=(True, True))\n return self", "def DetachFromParent(self):\n self.DetachFromDocument()\n self.parent = None", "def remove_shellwidget(self, shellwidget):\n\n self.get_widget().remove_shellwidget(shellwidget)", "def DeleteChild(self, child):\n if self.IsEmpty():\n raise XMLUnknownChild(child.xmlname)\n factoryName = self._FindFactory(child.__class__)\n if factoryName:\n factory = getattr(self, factoryName)\n if isinstance(factory, MethodType):\n deleteFactory = getattr(self, \"Delete_\" + factoryName)\n deleteFactory(child)\n elif isinstance(factory, NoneType):\n raise XMLUnknownChild(child.xmlname)\n elif isinstance(factory, ListType):\n match = False\n for i in xrange(len(factory)):\n if factory[i] is child:\n child.DetachFromDocument()\n child.parent = None\n del factory[i]\n match = True\n break\n if not match:\n raise XMLUnknownChild(child.xmlname)\n elif factory is child:\n # Single allowable child is replaced with None\n child.DetachFromDocument()\n child.parent = None\n setattr(self, factoryName, None)\n else:\n raise TypeError\n else:\n match = False\n for i in xrange(len(self._children)):\n if self._children[i] is child:\n child.DetachFromDocument()\n child.parent = None\n del self._children[i]\n match = True\n break\n if not match:\n raise XMLUnknownChild(child.xmlname)", "def childWindowClosing(self, window):\n\n self.childWindows.remove(window)", "def remove_widget(self, label: str) -> PlotBuilder:\n self.widgets[label][1].remove()\n del self.widgets[label]\n\n return self", "def _del_control(self, ctl):\n self._parent._del_control(ctl)", "def remove(self):\r\n self.child = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator to inject the function docstring into it's returned object tooltip. Assumes that the returning object is of type QtWidgets.QWidget
def set_return_widget_tooltip_from_docstring(func): @wraps(func) def wrapper(*args, **kwargs): widget = func(*args, **kwargs) tooltip = func.func_doc # type: QtWidgets.QWidget widget.setToolTip(tooltip) return widget return wrapper
[ "def build_tooltip(function):\n # Get the docstring for the \"function\" argument by using inspect\n docstring = inspect.getdoc(function)\n border = '#' * 28\n return '{}\\n{}\\n{}'.format(border, docstring, border)", "def add_snippets(func):\n func.__doc__ = inspect.getdoc(func)\n if func.__doc__:\n func.__doc__ %= {key: value.strip() for key, value in snippets.items()}\n return func", "def wrapper(func: F) -> F:\n return _add_text_to_function_docstring_after_summary(\n func=func,\n text=text,\n )", "def _add_docstring(func):\n\n func.__doc__ = \\\n \"\"\"Add arguments of {} command to a subparser.\n\n Args:\n -----\n subparser_action: a argparse._SubParsersAction (obtained by\n `parser.add_subparsers`).\n\n Returns:\n --------\n subparser_action: The same object of the input. Returning it just for\n our convenience.\n subparser: the subparser added into the parser.\n \"\"\".format(func.__name__)\n\n return func", "def _get_tooltip(self, *args):\n if self.__get_tooltip_func is not None:\n return self.__get_tooltip_func(*args)\n return 'Tooltip'", "def docstring(self):\n docs = []\n for key, func in self.items():\n sig = getattr(key, 'sig', '')\n doc = func.__doc__ or ''\n docs.append(f'{func.__name__}{sig}\\n {doc}')\n return '\\n\\n'.join(docs)", "def api_doc(**kwds) -> Callable:\r\n\r\n def wrapper(func: Callable):\r\n d = kwds.pop(\"description\", func.__doc__ or \"\")\r\n kwds[\"description\"] = d\r\n func = doc(**kwds)(func)\r\n return func\r\n\r\n return wrapper", "def add_sample_code(func, sample_code):\n func.__doc__ = func.__doc__ + sample_code", "def get_base_docstring(self):", "def _add_text_below_function_docstring_argument(\n func: F,\n argument_name: str,\n text: str,\n) -> F:\n existing_docstring = func.__doc__ if func.__doc__ else \"\"\n\n func.__doc__ = _add_text_below_string_docstring_argument(\n docstring=existing_docstring, argument_name=argument_name, text=text\n )\n\n return func", "def copy_replace_short_description(\n other: _Func,\n style: DocstringStyle = DocstringStyle.AUTO,\n rendering_style: RenderingStyle = RenderingStyle.COMPACT,\n):\n\n def wrapper(func: _Func) -> _Func:\n this_doc = parse(func.__doc__ or \"\", style=style)\n other_doc = parse(other.__doc__ or \"\", style=style)\n\n new_doc = copy.deepcopy(other_doc)\n new_doc.short_description = this_doc.short_description\n\n func.__doc__ = compose(new_doc, rendering_style=rendering_style, style=style)\n return func\n\n return wrapper", "def docs(f, *args, **kwargs):\n\n print(('Documentation for %s%s%s:' % (Fore.CYAN, _get_scope(f, args), Fore.RESET)))\n print((inspect.getdoc(f)))\n\n return f(*args, **kwargs)", "def test_function_docstrings(func):\n check_docstring(obj=func)", "def create_tooltip(self, name, lineno):\r\n doc = self.docstrings.get(lineno, None)\r\n if doc is None:\r\n doc = ''\r\n else:\r\n doc = '\\n' + doc\r\n tooltip = name + doc\r\n return tooltip", "def construct_function_popup(self, function_name, example, url):\n text = '<b><a href=\"%s\"><u>%s</u></a></b> (Intrinsic function)<br><br>' % (url, function_name)\n text += '<b>Example:</b><br>'\n text += ' %s' % example\n\n return text", "def appender(defaultdocs, passed_to=None):\r\n\r\n def _doc(func):\r\n params = inspect.signature(func).parameters\r\n params = [param.name for param in params.values()]\r\n msg = '\\n**kwargs : passed to `%s`'\r\n params = ''.join([textwrap.dedent(defaultdocs\r\n .get(param, msg % passed_to)) for param in params])\r\n func.__doc__ += '\\n\\nParameters\\n' + 10 * '=' + params\r\n return func\r\n\r\n return _doc", "def test_review_func_docstrings(self):\n for func in self.review_f:\n self.assertIsNot(func[1].__doc__, None,\n \"{:s} method needs a docstring\".format(func[0]))\n self.assertTrue(len(func[1].__doc__) >= 1,\n \"{:s} method needs a docstring\".format(func[0]))", "def doc(self):\n doc = ''\n if self.set_func:\n doc = self.set_func.__doc__\n if not doc and self.declared_func:\n doc = self.declared_func.__doc__\n return doc or ''", "def has_docstring(func):\n return func.__doc__ is not None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot boxes on an image
def plot_boxes(img=None, boxes=None, normalized=True, labels=None, linewidth=1.5, box_color='g', font_color='w', facecolor=None, fontsize=16, title=None): #fig, ax = plt.subplots(1, figsize=(fig_size, fig_size)) fig, ax = plt.subplots(1) if title: ax.set_title(title, fontsize=20, color=font_color) if facecolor: ax.set_facecolor='b' if img is not None: if tf.is_tensor(img): img = img.numpy() ax.imshow(img) else: assert boxes is not None, "Boxes must not be None if img is None" ax.axis('auto') if boxes is not None: if tf.is_tensor(boxes): boxes = boxes.numpy() # somtimes useful to plot anchor boxes even without an image else: assert isinstance(boxes, (list, np.ndarray)), "Bounding boxes must be a tensor, list, or numpy array" assert normalized==False, "normalized must be False if no img is passed" if img is None: ax.set_xlim([np.min(boxes[:,0])-1, np.max(boxes[:,2])+1]) ax.set_ylim([np.min(boxes[:,1])-1, np.max(boxes[:,3])+1]) boxes = boxes.tolist() for bbox in boxes: if normalized: bbox = transform_bbox(bbox, img.shape[1], img.shape[0], normalized=True) else: bbox = transform_bbox(bbox, normalized=False) rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], linewidth=linewidth, edgecolor=box_color, fill=False) ax.add_patch(rect) if labels is not None: for caption in labels: ax.set_title(caption, color=font_color, fontsize=font_size) plt.tight_layout() plt.show()
[ "def plot(self,image=None,figsize=(20,20)):\n all_labels=list(self.all_boxes.keys())\n if not image:\n image=self.original_image.copy()\n\n for k in all_labels:\n box_k=self.all_boxes[k]\n h=self.height[k]\n w=self.width[k]\n\n for pt in box_k:\n cv2.rectangle(image, (pt[0][0],pt[0][1]), (pt[1][0],pt[1][1]), (0,255,255), 2)\n plt.figure(figsize=figsize)\n plt.imshow(image)\n plt.show()", "def visualize(self, img, boxes, labels, figsize=(15,15)):\n fig,ax = plt.subplots(figsize=figsize)\n\n if isinstance(img, torch.Tensor):\n img = img.numpy().squeeze().transpose((1,2,0))\n # Display the image\n ax.imshow(img)\n\n # Create a Rectangle patch\n for box, label in zip(boxes, labels):\n color = np.random.rand(3,)\n x,y,w,h = box\n rect = patches.Rectangle((x,y),w,h,linewidth=2,edgecolor = color,facecolor='none')\n plt.text(x, y-3, self.labels[label], color = color, fontsize=20)\n # Add the patch to the Axes\n ax.add_patch(rect)\n plt.show()", "def draw_box(img, boxes):\n box = ImageDraw.Draw(img)\n for i in range(boxes.shape[0]):\n data = list(boxes[i])\n shape = [data[0], data[1], data[2], data[3]]\n box.rectangle(shape, outline =\"#02d5fa\", width=3)\n return img", "def box_faces(img):\n k=face_detect(img)\n\n fig,ax = plt.subplots()\n ax.imshow(img)\n\n for i in range(len(k)):\n lst = numsfromrect(k[i])\n ax.add_patch(patches.Rectangle( (lst[0],lst[1]), lst[2]-lst[0], lst[3]-lst[1], fill=False))", "def plot_bbs(image, bboxes, size = 2, name = 'image', shape = (500, 500), xy = (10, 10)):\n mn_val = np.mean(image)\n if mn_val >= 127.5:\n color = (0, 0, 0)\n else:\n color = (255, 255, 255)\n if bboxes:\n im_copy = bboxes.draw_on_image(image, color = color, size = size, copy = True);\n else:\n im_copy = image.copy()\n cv_imshow(im_copy, shape = shape, xy = xy, name = name)", "def plot(image, classified_boxes, window_size):\n fig1 = plt.figure(dpi=400)\n ax1 = fig1.add_subplot(1,1,1) \n ax1.imshow(image, cmap=plt.cm.gray)\n ax1.axis('off')\n for box in classified_boxes:\n x_min, y_min, x_max, y_max = box[0]-.5, box[1]-.5, box[0]+window_size[0]-.5, box[1]+window_size[1]-.5\n prediction, predict_score = box[2], box[3]\n ax1.text(x_min, y_min-3, \"%s %d%%\" % (prediction, predict_score*100), color=\"red\", fontsize=3)\n x = [x_max, x_max, x_min, x_min, x_max]\n y = [y_max, y_min, y_min, y_max, y_max]\n line, = ax1.plot(x,y,color=\"red\")\n line.set_linewidth(.5)\n fig1.savefig(\"classification.png\")\n plt.show()\n return", "def draw_box(image, markers, thickness=1):\n\n cv2.line(image,(markers[0]), (markers[1]),(0, 50, 255), thickness)\n cv2.line(image,(markers[0]), (markers[2]),(0, 50, 255), thickness)\n cv2.line(image,(markers[3]), (markers[1]),(0, 50, 255), thickness)\n cv2.line(image,(markers[3]), (markers[2]),(0, 50, 255), thickness)\n\n return image\n raise NotImplementedError", "def plot_image_with_bboxes(image_id,\r\n images_folder_path=Path('data/raw/train/'),\r\n target_folder_path=Path('data/interim/train/')):\r\n fig = plt.figure(figsize=(10, 10))\r\n ax = fig.add_subplot(111)\r\n\r\n im = Image.open(images_folder_path / (image_id + '.jpg'))\r\n\r\n ax.imshow(im)\r\n\r\n bbox_list = get_bbox_for_image(image_id)\r\n\r\n for bbox in bbox_list:\r\n add_bbox_to_axis(ax, bbox)\r\n\r\n fig.savefig(target_folder_path / (image_id + '_bbox.jpg'))\r\n\r\n return", "def image_drawbox(self, images, boxes, labels, score):\n num_boxes = len(boxes)\n if num_boxes > 10:\n num_boxes = 10\n for i in range(num_boxes):\n img_temp = scipy.misc.toimage(images[i])\n draw = ImageDraw.Draw(img_temp)\n for box_idx in range(len(boxes[i])):\n draw.rectangle(boxes[i][box_idx].tolist(), outline=(255,0,0))\n draw.text((int(boxes[i][box_idx][0]),int(boxes[i][box_idx][1])),\\\n str(float(score[i][box_idx])), (255,255,255))\n# pdb.set_trace()\n# img_temp.save('tesorboard_image_test.png')\n img_temp = scipy.misc.fromimage(img_temp)\n img_temp = np.transpose(img_temp, (2,0,1))\n images[i] = img_temp\n return images", "def draw_boxes(img, bounding_boxes, color=(0, 0, 255), thickness=6):\n image_with_boxes = np.copy(img)\n\n # draw each bounding box on your image copy using cv2.rectangle()\n for corner_1, corner_2 in bounding_boxes:\n cv2.rectangle(image_with_boxes, corner_1, corner_2, color, thickness)\n\n return image_with_boxes", "def plot_unlabeled_on_image(img, unlabeld_boxes, color=[255, 0, 0]):\n cop = copy.copy(img)\n cop = LabeledImagesMaker.draw_boxes(cop, unlabeld_boxes, color=color)\n LabeledImagesMaker.plot(cop)\n return cop", "def draw_boxes(boxes, img, model_size, crop_rect, color=(255, 255, 255), debug=False):\n\n retimg = img.copy()\n [xmin, xmax] = crop_rect[0]\n [ymin, ymax] = crop_rect[1]\n crop_w = xmax - xmin\n crop_h = ymax - ymin\n [model_h, model_w] = model_size\n [img_h, img_w] = img.shape[0:2]\n\n for box in boxes:\n # only show if prediction is in CLASSES_TO_SHOW\n if box.cn not in CLASSES_TO_SHOW:\n if debug: print(\"[INFO] detected class\", box.cn)\n continue\n\n label = '{} {:.2f}'.format(box.cn, box.prob)\n\n # convert bounding box to coordinates\n left = (box.x - box.w / 2)\n right = (box.x + box.w / 2)\n top = (box.y - box.h / 2)\n bottom = (box.y + box.h / 2)\n\n # scale up boxes to cropped image size\n left *= crop_w / model_w\n right *= crop_w / model_w\n top *= crop_h / model_h\n bottom *= crop_h / model_h\n\n # shift boxes from cropped to original image\n left += xmin\n right += xmin\n top += ymin\n bottom += ymin\n\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(img_h, np.floor(bottom + 0.5).astype('int32'))\n right = min(img_w, np.floor(right + 0.5).astype('int32'))\n\n # draw rectangle\n cv2.rectangle(retimg, (left, top), (right, bottom), color=color, thickness=2, lineType=cv2.LINE_AA)\n\n # write label\n fontface = cv2.FONT_HERSHEY_SIMPLEX\n fontscale = 0.5\n fontthickness = 1\n textsize, _ = cv2.getTextSize(label, fontface, fontscale, fontthickness)\n cv2.putText(retimg, label, (left + 2, top + textsize[1] + 2),\n fontface, fontScale=fontscale, color=color,\n thickness=fontthickness, lineType=cv2.LINE_AA)\n\n return retimg", "def test_basic_pil(self):\n img = Image.new('RGB', (25, 25))\n imgdraw = ImageDraw.Draw(img)\n res = bbb.draw_boxes(img.copy(), [self.anno], (255, 0, 0))\n imgdraw.line([(1, 5), (11, 5), (11, 20), (1, 20), (1, 5)], (255, 0, 0), 3)\n\n self.assertEqual(list(img.getdata()), list(res.getdata()))", "def show_boxes(self, image, boxes, box_classes, box_scores, file_name):\n for i, box in enumerate(boxes):\n x, y, w, h = int(box[0]), int(box[1]), int(box[2]), int(box[3])\n label = self.class_names[box_classes[i]] + '{:.2f}'.format(\n box_scores[i])\n rect = cv2.rectangle(image, (x, y), (w, h),\n 255, 2, cv2.LINE_AA)\n image = cv2.putText(rect, label, (x-5, y-5),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5, (0, 0, 255), 1)\n cv2.imshow(file_name, image)\n if not os.path.isdir('./detections/'):\n os.mkdir('./detections/')\n if cv2.waitKey() == 115:\n cv2.imwrite('./detections/{}'.format(file_name), image)\n cv2.destroyAllWindows()", "def overlay_boxes(self, image, predictions):\n labels = predictions.get_field('labels')\n boxes = predictions.bbox\n colors = self.compute_colors_for_labels(labels).tolist()\n for box, color in zip(boxes, colors):\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(image, tuple(top_left), tuple(bottom_right), tuple(color), 1)\n\n return image", "def visualize_bbox(img, bbox, color=BOX_COLOR, thickness=2):\n# x_min, y_min, x_max, y_max = list(map(int, bbox))\n x_min, y_min, x_max, y_max = list(map(round, bbox))\n\n img = cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=BOX_COLOR, thickness=thickness)\n return img", "def draw_boxes(record, im_save_dir, judge=True):\n image = cv2.imread(record['index'])\n h, w = image.shape[:2]\n fig = plt.figure(figsize=(w / 96, h / 96))\n ax = fig.add_subplot(1, 1, 1)\n\n prediction = record['prediction'][0]\n coords = prediction['position'] # ((x, y), width, height)\n ax.add_patch(plt.Rectangle(*coords, fill=False, edgecolor='cyan', linewidth=2))\n\n display_coords = np.array(coords[0]) + [3, -10] # align rectangle box, top-left\n display_txt = '{!s}: {:.3f}'.format(prediction['class'], prediction['score'])\n ax.text(*display_coords, display_txt, bbox={'facecolor': 'cyan', 'alpha': 0.4})\n\n if judge:\n judge_coords = display_coords + [coords[1] - 20, 0] # align rectangle box, top-right\n is_right = int(prediction['class'] == record['label'])\n judge_txt = [{'symbol': '×', 'color': 'red'}, {'symbol': '√', 'color': 'green'}][is_right]\n ax.text(*judge_coords, judge_txt['symbol'], bbox={'facecolor': judge_txt['color'], 'alpha': 0.7})\n\n extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n plt.axis('off')\n plt.imshow(image)\n im_save_name = im_save_dir + record['index'].split('/')[-1]\n plt.savefig(im_save_name, bbox_inches=extent)\n plt.clf()", "def show_grids(img, bounding_boxes, facial_landmarks=[], step=1):\n img_copy = img.copy()\n draw = ImageDraw.Draw(img_copy)\n\n for b in bounding_boxes:\n draw.rectangle([(b[0], b[1]), (b[2], b[3])],\n outline = 'white')\n\n inx = 0\n for pp in facial_landmarks:\n p = pp.reshape(2,5).T\n p = p.tolist()\n mouth_center = [(p[3][0] + p[4][0]) / 2, (p[3][1] + p[4][1]) / 2]\n eye_center = [(p[0][0] + p[1][0]) / 2, (p[0][1] + p[1][1]) / 2]\n p6 = [(p[2][0] - mouth_center[0])/4 + mouth_center[0],\n (p[2][1] - mouth_center[1])/4 + mouth_center[1]]\n p9 = [p[3][0] - (p[4][0]-p[3][0])/3, p[3][1] - (p[4][1]-p[3][1])/3]\n p10 = [p[4][0] + (p[4][0]-p[3][0])/3, p[4][1] + (p[4][1]-p[3][1])/3]\n p11 = [mouth_center[0] - (eye_center[0] - mouth_center[0]) / 2,\n mouth_center[1] - (eye_center[1] - mouth_center[1]) / 2]\n p12 = [(eye_center[0] -mouth_center[0])/4 + eye_center[0],\n (eye_center[1] - mouth_center[1])/4 + eye_center[1]]\n p13 = [(p[0][0] + p[3][0])/2, (p[0][1] + p[3][1])/2]\n p14 = [(p[1][0] + p[4][0])/2, (p[1][1] + p[4][1])/2]\n\n\n p.append(p6)\n p.append([p[0][0]-3/8*(p[1][0]-p[0][0]), 3/2*p[0][1]-1/2*p[1][1]]) \n p.append([p[1][0]+3/8*(p[1][0]-p[0][0]), 3/2*p[1][1]-1/2*p[0][1]])\n p.append(p9)\n p.append(p10)\n p.append(p11) \n p.append(p12)\n p.append(p13)\n p.append(p14)\n\n\n #for i in range(12):\n # draw.ellipse([\n # (p[i][0]-2.0,p[i][1]-2.0),\n # (p[i][0]+2.0,p[i][1]+2.0)\n # ],outline='white',fill='white')\n\n #draw.ellipse(\n # [(p[1][0]-30.0, p[1][1]-30.0),\n # (p[1][0]+30.0, p[1][1]+30.0)],\n # outline=(136,232,232),\n # width=5\n #)\n\n draw.line(\n ((p[6][0], p[6][1]),\n (p[0][0], p[0][1]),\n (p[12][0], p[12][1]),\n (p[5][0], p[5][1]),\n (p[13][0],p[13][1]),\n (p[1][0], p[1][1]),\n (p[7][0], p[7][1])),\n fill=(136,232,232),\n width=1\n )\n\n draw.line(\n ((p[11][0], p[11][1]),\n (p[7][0], p[7][1]),\n (p[9][0], p[9][1]),\n (p[10][0], p[10][1]),\n (p[8][0], p[8][1]),\n (p[6][0], p[6][1]),\n (p[11][0], p[11][1])),\n fill=(136,232,232),\n width=1\n )\n\n draw.line(\n ((p[11][0], p[11][1]),\n (p[1][0], p[1][1]),\n (p[2][0], p[2][1]),\n (p[5][0], p[5][1]),\n (p[4][0], p[4][1]),\n (p[10][0], p[10][1]),\n (p[3][0], p[3][1]),\n (p[5][0], p[5][1]),\n (p[2][0], p[2][1]),\n (p[0][0], p[0][1]),\n (p[11][0], p[11][1])),\n fill=(136,232,232),\n width=1\n )\n\n return img_copy", "def blobber(img):\n\tblobs = blob_dog(img, min_sigma=20, threshold=.1)\n\tblobs[:, 2] = blobs[:, 2] * sqrt(2)\n\tfig, ax = plt.subplots()\n\tax.imshow(img, cmap=\"gray\")\n\tfor blob in blobs:\n\t\ty, x, r = blob\n\t\tc = plt.Circle((x, y), r, color=\"0.75\", linewidth=2, fill=False)\n\t\tax.add_patch(c)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return function to convert metric values. Tries return a ``int`` type, is not decimal numbers, else return a ``float`` type. The returned function receives a number, ``value``. If ``to_metric`` is defined, only returns the convert ``value``, else returns a tuple with converted ``value`` and ``value`` metric type, for example ``(10, 'km')``. Arguments ========= ``metric`` Metric type sent to function. By default metre type is defined.
def format_metric_factory(metric=METER, to_metric=None, round_to=None): get_number = METRIC_TYPES.get number = get_number(metric) if number is None: raise ValueError('Invalid metric type: %s' % metric) get_type = METRIC_NUMBERS.get if to_metric: to_number = get_number(to_metric) if to_number is None: raise ValueError('Invalid metric type: %s' % to_metric) elif to_number == number: method = lambda value: value else: if to_number < number: count_number = -1 calc_method = lambda num: num * 10 else: count_number = 1 calc_method = lambda num: num / 10 def method(value): check_number = number while True: key = get_type(check_number + count_number) check_number = get_number(key) value = calc_method(value) if key == to_metric: break return value def replacer(value): value = float(value) value = method(value) value = float(value) if value.is_integer(): value = int(value) elif round_to is not None: value = round(value, round_to) if value.is_integer(): value = int(value) return value else: options = 1, lambda num: num / 10 options_reverse = -1, lambda num: num * 10 def method(value): result_key = metric check_number = number if value < 1: count_number, calc_method = options_reverse else: count_number, calc_method = options while True: if 1 <= value <= 9: break key = get_type(check_number + count_number) if not key: break check_number = METRIC_TYPES[key] result_key = key value = calc_method(value) return value, result_key def replacer(value): value = float(value) value, key = method(value) if value.is_integer(): value = int(value) elif round_to is not None: value = round(value, round_to) if value.is_integer(): value = int(value) return value, key return replacer
[ "def convert_metric(self, metric):\n tpot_metrics = { # dict mapping metric_str to the str used by TPOT:\n 'accuracy': 'accuracy',\n 'f1': 'f1',\n 'log_loss': 'neg_log_loss',\n 'roc_auc': 'roc_auc',\n 'balanced_accuracy': 'balanced_accuracy',\n 'precision': 'precision',\n 'recall': 'recall',\n 'mean_squared_error': 'neg_mean_squared_error',\n 'median_absolute_error': 'neg_median_absolute_error',\n 'mean_absolute_error': 'neg_mean_absolute_error',\n 'r2': 'r2',\n }\n if metric in tpot_metrics:\n return tpot_metrics[metric]\n else:\n warnings.warn(\"Unknown metric will not be used by TPOT: %s\" % metric)\n return None", "def metric_to_string(value, metric=METER, to_metric=None, round_to=None):\n return metric_to_string_factory(metric, to_metric, round_to)(value)", "def get_metric_value(metric, data):\n\n if len(metric) > 1:\n return get_metric_value(metric[1:], data[metric[0]])\n\n return data[metric[0]]", "def get_metric_function(metric):\n metric_function = PREDEFINED_METRICS.get(metric)\n\n if not metric_function:\n metric_function = pickle.loads(base64.b64decode(metric))\n\n return metric_function", "def get_metric(self, x, y, metric: str):\n\n if metric == 'score' or metric == 'accuracy':\n return self.score(x, y)\n elif metric == 'precision':\n y_pred = self.predict(x)\n from sklearn.metrics import precision_score\n return precision_score(y, y_pred)\n elif metric == 'recall':\n y_pred = self.predict(x)\n from sklearn.metrics import recall_score\n return recall_score(y, y_pred)\n else:\n print(\"Not supported.\")", "def _templatize_metric_fn(self, metric_fn):\n\n if tf.executing_eagerly():\n return metric_fn\n\n def _metric_fn(*args, **kwargs):\n \"\"\"The wrapping function to be returned.\"\"\"\n\n # We can only be passed in either a dict or a list of tensors.\n args = args if args else kwargs\n metrics = call_eval_metrics((metric_fn, args))\n if not self._use_tpu:\n return metrics\n\n logging.log_first_n(logging.INFO,\n \"Writing eval metrics to variables for TPU\", 1)\n wrapped_metrics = {}\n for i, key in enumerate(sorted(metrics)):\n tensor, op = metrics[key]\n # key cannot be in var name since it may contain illegal chars.\n var = tf_compat.v1.get_variable(\n \"metric_{}\".format(i),\n shape=tensor.shape,\n dtype=tensor.dtype,\n trainable=False,\n initializer=tf_compat.v1.zeros_initializer(),\n collections=[tf_compat.v1.GraphKeys.LOCAL_VARIABLES])\n if isinstance(op, tf.Operation) or op.shape != tensor.shape:\n with tf.control_dependencies([op]):\n op = var.assign(tensor)\n metric = (var, var.assign(op))\n wrapped_metrics[key] = metric\n return wrapped_metrics\n\n return tf_compat.v1.make_template(\"metric_fn_template\", _metric_fn)", "def convert(value, from_, to, date):\n\trate = get_rate_as_at(date, from_, to)\n\tconverted_value = flt(value) / (rate or 1)\n\treturn converted_value", "def _convert(value, conversion_function):\n if conversion_function is not None:\n value = conversion_function(value)\n return value", "def val_metric(self, key, metric=None):\n actual_key = \"val_\" + key\n return self.metric(key, val_metric=metric, mode=\"val\")", "def find_values(results, metric): # pragma: no cover\n has_valid_value, value, _ = parse_metric.parse_chartjson_metric(\n results, metric.as_pair())\n if has_valid_value:\n return True, value\n\n # TODO(eakuefner): Get rid of this fallback when bisect uses ToT Telemetry.\n has_valid_value, value, _ = parse_metric.parse_chartjson_metric(\n results, metric.as_pair(Metric.OLD_STYLE_DELIMITER))\n if has_valid_value:\n return True, value\n\n # If we still haven't found a valid value, it's possible that the metric was\n # specified as interaction-chart/trace or interaction-chart/interaction-chart,\n # and the chartjson chart names use @@ as the separator between interaction\n # and chart names.\n if Metric.OLD_STYLE_DELIMITER not in metric.chart_name:\n return False, [] # Give up; no results found.\n interaction, chart = metric.chart_name.split(Metric.OLD_STYLE_DELIMITER, 1)\n metric.interaction_record_name = interaction\n metric.chart_name = chart\n has_valid_value, value, _ = parse_metric.parse_chartjson_metric(\n results, metric.as_pair())\n return has_valid_value, value", "def convert(self, value, param, ctx):\n if isinstance(value, str):\n if value.lower() == 'none':\n return None\n else:\n try:\n value = float(value)\n except ValueError:\n pass\n return value\n else:\n self.fail('Cannot recognize str or float type: {} {}'\n .format(value, type(value)), param, ctx)", "def convert(self, value):\n good_type = isinstance(value, self.type)\n if good_type:\n return value\n\n types = self.type\n if callable(types):\n types = [types]\n\n for function in types:\n try:\n converted = function(value)\n except ValueError:\n continue\n else:\n return converted\n\n # Convertion impossible\n raise BadDataType(\"the data {} ({}) is not of the right type; \" \\\n \"expected {}, got {} ({})\".format(self.name, self.description,\n self.type, repr(value), type(value)))", "def retrieve_metric(self, metric, func=None):\n if func:\n self._modify_state(func)\n result = {}\n for obj in ['download', 'playback']:\n result[obj] = self.managed_objects[obj].__dict__[metric]\n return result", "def _convert_value(value: Any, feature: feature_lib.FeatureConnector) -> Any:\n if isinstance(value, lazy_imports_lib.lazy_imports.PIL_Image.Image):\n buffer = io.BytesIO()\n value.save(fp=buffer, format=_IMAGE_ENCODING_FORMAT)\n return buffer.getvalue()\n elif isinstance(value, datetime.datetime):\n return int(value.timestamp())\n elif isinstance(feature, feature_lib.Sequence):\n if isinstance(value, list):\n return value\n else:\n return [value]\n elif isinstance(feature, feature_lib.Scalar):\n if value is not None:\n return value\n elif feature.dtype == tf.string:\n return \"\"\n elif feature.dtype.is_integer:\n return 0\n elif feature.dtype.is_bool:\n return False\n elif feature.dtype.is_floating:\n return 0.0\n raise ValueError(f\"Could not get default value for {feature}\")\n raise ValueError(f\"Type {type(value)} of value {value} \"\n f\"for feature {type(feature)} is not supported.\")", "def convert(rates, value, from_string, to_string):\n rate = conversion_control_structure(rates, from_string, to_string)\n if rate is None:\n pass\n else:\n return round((rate * value), 2)", "def value_converter(func=None, *, name=None, convert_default=None, convert_default_filter=lambda s: True):\n if convert_default is not None:\n warnings.warn(\"The convert_default parameter of value_converter is deprecated. \"\n \"Direct users to use clize.Parameter.cli_default() instead.\",\n DeprecationWarning,\n stacklevel=2)\n def decorate(func):\n info = {\n 'name': util.name_type2cli(func) if name is None else name,\n 'convert_default': convert_default,\n 'convert_default_filter': convert_default_filter,\n }\n try:\n func._clize__value_converter = info\n return func\n except (TypeError, AttributeError):\n @wraps(func)\n def _wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n _wrapper._clize__value_converter = info\n return _wrapper\n if func is not None:\n return decorate(func)\n return decorate", "def expand_metric_label_value(\n env, label_name, metric_exporter_name, item_value, resource_obj,\n uri2resource, metric_values=None):\n\n def uri2resource_func(uri):\n return uri2resource[uri]\n\n def uris2resources_func(uris):\n return [uri2resource[uri] for uri in uris]\n\n try:\n func = env.compile_expression(item_value)\n except jinja2.TemplateSyntaxError as exc:\n logprint(logging.WARNING, PRINT_V,\n \"Ignoring label '{}' on metric with exporter name '{}' due to \"\n \"syntax error in the Jinja2 expression in its value: {}\".\n format(label_name, metric_exporter_name, exc))\n return None\n try:\n value = func(\n resource_obj=resource_obj,\n metric_values=metric_values,\n uri2resource=uri2resource_func,\n uris2resources=uris2resources_func)\n # pylint: disable=broad-exception-caught,broad-except\n except Exception as exc:\n logprint(logging.WARNING, PRINT_V,\n \"Ignoring label '{}' on metric with exporter name '{}' due to \"\n \"error in rendering the Jinja2 expression in its value: \"\n \"{}: {}\".\n format(label_name, metric_exporter_name,\n exc.__class__.__name__, exc))\n return None\n return str(value)", "def get_metric(metric, problem_type=None, metric_type=None) -> Scorer:\n\n if metric is not None and isinstance(metric, str):\n if metric == \"soft_log_loss\":\n if problem_type == QUANTILE:\n raise ValueError(f\"{metric_type}={metric} can not be used for quantile problems\")\n from .softclass_metrics import soft_log_loss\n\n return soft_log_loss\n if problem_type is not None:\n if problem_type not in METRICS:\n raise ValueError(f\"Invalid problem_type '{problem_type}'. Valid problem types: {list(METRICS.keys())}\")\n if metric not in METRICS[problem_type]:\n valid_problem_types = _get_valid_metric_problem_types(metric)\n if valid_problem_types:\n raise ValueError(\n f\"{metric_type}='{metric}' is not a valid metric for problem_type='{problem_type}'. Valid problem_types for this metric: {valid_problem_types}\"\n )\n else:\n raise ValueError(\n f\"Unknown metric '{metric}'. \" f\"Valid metrics for problem_type='{problem_type}':\\n\" f\"{list(METRICS[problem_type].keys())}\"\n )\n return METRICS[problem_type][metric]\n for pt in METRICS:\n if metric in METRICS[pt]:\n return METRICS[pt][metric]\n all_available_metrics = dict()\n for pt in METRICS:\n all_available_metrics[pt] = list(METRICS[pt].keys())\n all_available_metrics[SOFTCLASS] = [\"soft_log_loss\"]\n\n raise ValueError(\n f\"{metric_type}='{metric}' is an unknown metric, all available metrics by problem_type are:\\n\"\n f\"{json.dumps(all_available_metrics, indent=2)}\\n\"\n f\"You can also refer to \"\n f\"autogluon.core.metrics to see how to define your own {metric_type} function\"\n )\n else:\n return metric", "def fetch_metric_stats(self, metric: Dict) -> List[Dict]:\n\n _cfg = self.config.get\n\n COMPARATORS = {\n 'Average': mean,\n 'Maximum': max,\n 'Minimum': min,\n }\n\n params = metric.copy()\n\n # Setting up default metric parameters if unspecified for current metric.\n for k, v in _cfg('default_metric_values', {}).items():\n if k not in metric:\n params[k] = v\n logger.debug(f\"Set the default {v} for {k} in metric query {metric}.\")\n\n assert len(params['Statistics']) == 1, \"Complex statistics aggregation is not yet supported\"\n\n duration = int(params.pop('MetricAggregationTimeSlice'))\n\n params['EndTime'] = datetime.datetime.now()\n params['StartTime'] = params['EndTime'] - datetime.timedelta(seconds=duration)\n\n logger.debug(f\"Query to CloudWatch `get_metric_statistics`: {params}\")\n result = self.cloudwatch_client.get_metric_statistics(**params)\n\n comparator_name = params['Statistics'][0]\n comparator = COMPARATORS[comparator_name]\n\n return comparator(x[comparator_name] for x in result.get('Datapoints', list()))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert metric value to ``str`` string.
def metric_to_string(value, metric=METER, to_metric=None, round_to=None): return metric_to_string_factory(metric, to_metric, round_to)(value)
[ "def metrics_to_str(metrics, prefix=\"\"):\n my_str = \", \".join([\"%s: %.3f\" % (k, v) for k, v in metrics.items()])\n if prefix:\n my_str = prefix + \" \" + my_str\n return my_str", "def format_constant(self, value):\n return str(value)", "def c_str(value):\n return str(value)", "def _kv_to_str(self, value):\n if isinstance(value, str):\n return value\n elif isinstance(value, bool):\n return str(value).lower()\n elif isinstance(value, Number):\n return str(value)\n else:\n # don't coerce unrecognized types, TypeError will be raised later\n return value", "def to_str(cls, value):\n return cls.encode.get(value, cls.encode.get(cls.DEFAULT, None))", "def get_string(value):\n # TODO(wilhuff): Actually use the SBData API to get this.\n # Get the summary as a C literal and parse it (for now). Using the SBData\n # API would allow this to directly read the string contents.\n summary = value.GetSummary()\n return ast.literal_eval(summary)", "def get_text(self):\n # FIXME Finish comments\n if self.value is None:\n result_str = str(self.value)\n else:\n format_str = '%%.%dg' % self.precision\n result_str = '['\n result_str += ', '.join([format_str % f for f in self.value])\n result_str += ']'\n return result_str", "def value2str(self, value=NODEFAULT, current=0):\n if current:\n value = self._value\n if value is NODEFAULT:\n return str(value)\n else:\n return self._value2str(value)", "def __str__(self):\r\n return str(self.value) + ' ' + self.units", "def value_to_message(self, value):\n return value", "def _fmt_value(x):\n if precision is not None and isinstance(x, Number):\n return str(round(x, precision))\n else:\n return str(x)", "def field_to_str(value):\n if not value:\n field_value = \"\"\n else:\n try:\n # could be a key or a Reference object, eg\n # <models.UserInfo object at 0x94bed32057743898>\n field_value = str(value.key().id_or_name())\n except:\n field_value = str(value)\n return field_value", "def to_string(self, *_):\n return str(self.constant_coefficient)", "def __str__(self):\n return (f' The value of the node is {self.val}')", "def repr_value(self, value):\n return self.num_format(value)", "def __str__(G):\n return G.value", "def valueToString(node):\n \n if node.type in (\"number\", \"string\", \"false\", \"true\", \"regexp\", \"null\"):\n return compressor.compress(node)\n elif node.type in nodeTypeToDocType:\n if node.type == \"plus\":\n return detectPlusType(node)\n elif node.type in (\"new\", \"new_with_args\", \"dot\"):\n return detectObjectType(node)\n else:\n return nodeTypeToDocType[node.type]\n else:\n return \"Other\"", "def pprint_value_string(self, value):\n unit = '' if self.unit is None else ' ' + self.unit\n value = self.pprint_value(value)\n return title_format.format(name=self.name, val=value, unit=unit)", "def value_str(obj):\n if obj is None:\n return 'NOT_SET'\n else:\n return '{} ({})'.format(obj, obj.__class__.__name__)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given data packed into a string, reverse bytes for a given word length and return the byteflipped string
def _flip(self, dataStr, numBytes): out = "" for i in xrange(len(dataStr)/numBytes): l = list(dataStr[numBytes*i:numBytes*(i+1)]) l.reverse() out += (''.join(l)) return out
[ "def swapbytes(data):\n return data[::-1]", "def reverseByteOrder(self, data):\n # Courtesy Vishal Sapre\n byteCount = len(hex(data)[2:].replace('L','')[::2])\n val = 0\n for i in range(byteCount):\n val = (val << 8) | (data & 0xff)\n data >>= 8\n return val", "def swapbytesinbits(data):\n if len(data) % 8 != 0:\n raise ValueError(f\"little-endianness is only defined if data length {len(data)} is multiple of 8\")\n return b\"\".join(data[i:i+8] for i in reversed(range(0,len(data),8)))", "def reverseHex (data):\n\n b = bytearray (binascii.unhexlify (data))\n b.reverse ()\n\n return binascii.hexlify (b)", "def reverse_cipher_slice(message):\n return message[::-1]", "def _reverse_byte(self, byte):\r\n reversed = 0x00\r\n for i in range(8):\r\n bit = (byte & (0b1<<i))>>i\r\n reversed |= bit << (7-i)\r\n return reversed", "def get_reverse_endian(bytes_array):\n hex_str = bytes_array.hex()\n hex_list = [\"\".join(i) for i in zip(hex_str[::2], hex_str[1::2])]\n hex_list.reverse()\n return bytes.fromhex(\"\".join(hex_list))", "def getworkByteswap (data):\n\n data = bytearray (data)\n assert len (data) % 4 == 0\n for i in range (0, len (data), 4):\n data[i], data[i + 3] = data[i + 3], data[i]\n data[i + 1], data[i + 2] = data[i + 2], data[i + 1]\n\n return data", "def decode(self, data_string):\r\n\r\n if type(data_string) is not bytes:\r\n raise ValueError('Must pass bytes to decode')\r\n\r\n # Obtain and remove the number of padding bits stored in the\r\n # first byte.\r\n padding_length = data_string[0]\r\n data_string = data_string[1:]\r\n\r\n # If the padding bit is set to 0xff the message is not encoded.\r\n if padding_length == 0xff:\r\n return data_string\r\n\r\n # Convert ascii string into binary string\r\n binary_string = ''\r\n for byte in data_string:\r\n binary_string += '{0:08b}'.format(byte)[::-1]\r\n\r\n # Remove padding bits from the end\r\n binary_string = binary_string[:len(binary_string) - padding_length]\r\n\r\n # Match binary to entries in the huffman tree\r\n decoded_string = b'';\r\n tree_node = self.huffman_tree\r\n\r\n for bit in binary_string:\r\n if bit in tree_node:\r\n tree_node = tree_node[bit]\r\n else:\r\n decoded_string += bytes([tree_node['asc']])\r\n tree_node = self.huffman_tree[bit]\r\n\r\n decoded_string += bytes([tree_node['asc']])\r\n\r\n return decoded_string", "def byte_reverse (bits, byte_size=8):\n if len(bits)%byte_size != 0:\n raise Exception(\"Cannot reverse byte-endianness of non-byte-aligned bitstream\")\n byte_len = len(bits)/byte_size\n for byte in range(0,byte_len/2):\n pos1 = byte*byte_size\n pos2 = (byte_len-1-byte)*byte_size\n bits[pos1:pos1+byte_size], bits[pos2:pos2+byte_size] = \\\n bits[pos2:pos2+byte_size], bits[pos1:pos1+byte_size]", "def get_reverse_bits(bytes_array):\n num_bytes = len(bytes_array)\n formatstring = \"{0:0%db}\" % (num_bytes * 8)\n bit_str = formatstring.format(int.from_bytes(bytes_array, byteorder='big'))\n return int(bit_str[::-1], 2).to_bytes(num_bytes, byteorder='big')", "def change_endianness(x):\n\n # If there is an odd number of elements, we make it even by adding a 0\n if (len(x) % 2) == 1:\n x += \"0\"\n y = x.decode('hex')\n z = y[::-1]\n return z.encode('hex')", "def word_flipper(our_string):\n\n word_list = our_string.split(\" \")\n\n for idx in range(len(word_list)):\n word_list[idx] = word_list[idx][::-1] # [index1:index2:step]\n\n return \" \".join(word_list)", "def _binary_table_byte_swap(data):\n orig_dtype = data.dtype\n\n names = []\n formats = []\n offsets = []\n\n to_swap = []\n\n if sys.byteorder == \"little\":\n swap_types = (\"<\", \"=\")\n else:\n swap_types = (\"<\",)\n\n for idx, name in enumerate(orig_dtype.names):\n field = _get_recarray_field(data, idx)\n\n field_dtype, field_offset = orig_dtype.fields[name]\n names.append(name)\n formats.append(field_dtype)\n offsets.append(field_offset)\n\n if isinstance(field, chararray.chararray):\n continue\n\n # only swap unswapped\n # must use field_dtype.base here since for multi-element dtypes,\n # the .str with be '|V<N>' where <N> is the total bytes per element\n if field.itemsize > 1 and field_dtype.base.str[0] in swap_types:\n to_swap.append(field)\n # Override the dtype for this field in the new record dtype with\n # the byteswapped version\n formats[-1] = field_dtype.newbyteorder()\n\n # deal with var length table\n recformat = data.columns._recformats[idx]\n if isinstance(recformat, _FormatP):\n coldata = data.field(idx)\n for c in coldata:\n if (\n not isinstance(c, chararray.chararray)\n and c.itemsize > 1\n and c.dtype.str[0] in swap_types\n ):\n to_swap.append(c)\n\n for arr in reversed(to_swap):\n arr.byteswap(True)\n\n data.dtype = np.dtype({\"names\": names, \"formats\": formats, \"offsets\": offsets})\n\n yield data\n\n for arr in to_swap:\n arr.byteswap(True)\n\n data.dtype = orig_dtype", "def unpack_utf8_string(data: bytes, length_byte_size=2) -> Tuple[str, int]:\n array_bytes, consumed = DecodeUtils.unpack_byte_array(data, length_byte_size)\n return array_bytes.decode(\"utf-8\"), consumed", "def reverse_str() -> None:\n r = requests.post(\"http://challenge.code2040.org/api/reverse\",\n data={'token': token})\n\n if (type(r.text) is str): # Making sure it is a string\n reverse_str = str(r.text[::-1])\n\n r = requests.post(\"http://challenge.code2040.org/api/reverse/validate\",\n data={'token': token, 'string': reverse_str})\n print(r.status_code, r.reason)", "def reverse_bytes(hexstrinput):\n\n try:\n hexstrinput = hexlify_(unhexlify_(hexstrinput))\n test2 = int(hexstrinput,16)\n test2 = \"\"\n except:\n raise TypeError(\"Input must be hex\")\n assert not len(hexstrinput) % 2\n output = str(\"\")\n for i in range(int(len(hexstrinput) // 2)):\n j = i*2\n if j == 0:\n output = output + hexstrinput[-1*(j+2):]\n else:\n output = output + hexstrinput[-1*(j+2):-1*(j)]\n return str(output)", "def get_reverse_nibbles(bytes_array):\n return bytes.fromhex(bytes_array.hex()[::-1])", "def get_data_from_word(self, word):\n return struct.pack('<H', word)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list, use the input port type to create a string representing the data
def _listToString(self, listData): portType = self._sink.port_type if portType == _BULKIO__POA.dataChar: string = ''.join(listData) elif portType == _BULKIO__POA.dataOctet: string = ''.join(listData) elif portType == _BULKIO__POA.dataShort: string = struct.pack(str(len(listData)) + 'h', *listData) elif portType == _BULKIO__POA.dataUshort: string = struct.pack(str(len(listData)) + 'H', *listData) elif portType == _BULKIO__POA.dataLong: string = struct.pack(str(len(listData)) + 'i', *listData) elif portType == _BULKIO__POA.dataUlong: string = struct.pack(str(len(listData)) + 'I', *listData) elif portType == _BULKIO__POA.dataFloat: string = struct.pack(str(len(listData)) + 'f', *listData) elif portType == _BULKIO__POA.dataLongLong: string = struct.pack(str(len(listData)) + 'q', *listData) elif portType == _BULKIO__POA.dataUlongLong: string = struct.pack(str(len(listData)) + 'Q', *listData) elif portType == _BULKIO__POA.dataDouble: string = struct.pack(str(len(listData)) + 'd', *listData) elif portType == _BULKIO__POA.dataString: string = listData[0] elif portType == _BULKIO__POA.dataXml: pass elif portType == _BULKIO__POA.dataFile: pass else: log.error("Invalid data type") string = None return string
[ "def _make_portlist(self, ports, sep=','):\n\n if self.target['ports']:\n self.ports = sep.join([p[0] for p in self.target['ports']])\n else:\n newports = sep.join([str(p) for p in ports])\n\n return newports", "def _build_ports(ports):\n if ports:\n return \"(\" + \" and \".join(\"udp port {}\".format(x) for x in ports) + \")\"\n return \"\"", "def _translate_port_range(ports: List[str]) -> str:\n _ports = []\n for port in ports:\n if \"-\" in port:\n (start, end) = port.split(\"-\")\n _ports.append(\"%d:%d\" % (int(start), int(end)))\n else:\n _ports.append(\"%d\" % int(port))\n return \",\".join(_ports)", "def serialize(ports):\n data = []\n for p in ports:\n d = {}\n d[\"PortAddr\"] = p.portaddr\n d[\"Protocol\"] = p.protocol\n d[\"Enabled\"] = p.enabled\n d[\"UseTLS\"] = p.usetls\n data.append(d)\n return data", "def etherType(data: list):\n HexEType = \"\".join(data)\n strType = \"INCONNU\"\n estIPV4 = False\n if HexEType.lower() == \"0800\":\n strType = \"IPV4\"\n estIPV4 = True\n elif HexEType.lower() == \"0806\":\n strType = \"ARP REQUEST/RESPONSE\"\n elif HexEType.lower() == \"86dd\":\n strType = \"IPV6\"\n\n return f\"Type Ethernet :\\t\\t{strType} (0x{HexEType})\", estIPV4", "def formatIP(l):\n return \".\".join([str(int(i,16)) for i in l])", "def _2str(unit):\n if not isinstance(unit,list):\n raise TypeError('Input required as <list>!')\n return ' '.join([str(u) for u in unit])", "def port2string(self, port):\r\n return chr((port & 0xff00) >> 8)+ chr(port & 0x00ff)", "def _format_ports(self, pre_selected_ports):\n ports = pre_selected_ports.copy()\n\n for comp, port_list in ports.items():\n if len(port_list) == 1:\n ports[comp] = port_list[0]\n elif len(port_list) == 0:\n # Nothing is selected, meaning it will be randomly selected.\n ports[comp] = \"random\"\n elif comp == \"worker_ports\":\n min_port = port_list[0]\n max_port = port_list[len(port_list) - 1]\n if len(port_list) < 50:\n port_range_str = str(port_list)\n else:\n port_range_str = f\"from {min_port} to {max_port}\"\n ports[comp] = f\"{len(port_list)} ports {port_range_str}\"\n return ports", "def _make_awk_from_typed_list(neighbors_list, builder):\n for neighs in neighbors_list:\n # Creates a new line (\"list\") in the builder\n # # with builder.list(): # Numba incompatible\n builder.begin_list()\n for j in neighs:\n builder.integer(j) # Appends as int64, unavoidably...\n builder.end_list()", "def _encode_list(l,buff):\n buff.append(b'l')\n for i in l:\n _encode(i,buff)\n buff.append(b'e')", "def list_to_string(input_list, seperator):\n output = input_list[0]\n for item in input_list[1:]:\n output = string_concatenator(output, item, seperator)\n return output", "def bitlist_to_string(data: List[int]) -> ByteString:\n result = []\n pos = 0\n c = 0\n while pos < len(data):\n c += data[pos] << (7 - (pos % 8))\n if (pos % 8) == 7:\n result.append(c)\n c = 0\n pos += 1\n\n return bytes(result)", "def parse_port_list(host_list, portList):\n\thost_port_map = []\n\tfor host in host_list:\n\t\tfor port in portList:\n\t\t\t# host-port tuple:\n\t\t\thost_port_map.append((host, port))\n\treturn host_port_map", "def list_to_string(the_list):\n return \"[\" + \", \".join(str(x) for x in the_list) + \"]\"", "def _make_node_str_list(l):\n char_ps = [c_char_p(_mpv_coax_proptype(e, str)) for e in l]\n node_list = MpvNodeList(\n num=len(l),\n keys=None,\n values=(MpvNode * len(l))(*[MpvNode(\n format=MpvFormat.STRING,\n val=MpvNodeUnion(string=p))\n for p in char_ps]))\n node = MpvNode(\n format=MpvFormat.NODE_ARRAY,\n val=MpvNodeUnion(list=pointer(node_list)))\n return char_ps, node_list, node, cast(pointer(node), c_void_p)", "def format_strings_for_cmd(input_list):\n return \"['\" + \"', '\".join(input_list) + \"']\"", "def StringConverter(org_list, seperator=''):\n return seperator.join(org_list)", "def build_list(list_file, ports):\n regex = re.compile(r\"^(https?:\\/\\/)?.+?(:[0-9]{0,5})?$\")\n scan_set = set()\n lines = [line.rstrip() for line in list_file.readlines()]\n for line in lines:\n line = re.match(regex, line)\n if not line:\n pass\n elif line[1] and line[2]: # protocol and port\n scan_set.add(line[0])\n elif line[1] and not line[2]: # protocol no port\n print('Protocol no port')\n if line[1] == 'https://':\n scan_set.add(line[0])\n else:\n for port in ports:\n # Convert http://example.com:443 to https://example.com:443\n if str(port) == '443':\n uri = line[0].replace('http://', 'https://') + ':' + str(port)\n scan_set.add(uri)\n else:\n uri = line[0] + ':' + str(port)\n scan_set.add(uri)\n print(scan_set)\n exit()\n\n elif not line[1] and line[2]: # no protocol but port\n if line[2] == ':443':\n uri = 'https://' + line[0]\n else:\n uri = 'http://' + line[0]\n scan_set.add(uri)\n elif not line[1] and not line[2]: # neither protocol nor port\n for port in ports:\n if str(port) == '443':\n uri = 'https://' + line[0] + ':' + str(port)\n else:\n uri = 'http://' + line[0] + ':' + str(port)\n scan_set.add(uri)\n return scan_set" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open the data and/or server sockets based on the current properties
def _openSocket(self): log.info("Connection Type: " + str(self.connection_type)) log.info("IP Address: " + self.ip_address) log.info("Port: " + str(self.port)) if self.connection_type == "server": self._dataSocket = None self._serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: self._serverSocket.bind(("localhost", self.port)) except Exception, e: log.error("Unable to bind socket: " + str(e)) return self._serverSocket.listen(1) elif self.connection_type == "client": self._dataSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._dataSocket.connect((self.ip_address, self.port)) self._serverSocket = None else: log.error("Invalid connection type: " + self.connection_type) self._dataSocket = None self._serverSocket = None
[ "def open(self):\n self.socket.connect(self.addr)\n logger.info(\"%s socket connected to %s\", self.name, self.addr)", "def open_connection(self):\n logging.debug(\"Creating socket connection to host: {0}, port: {1}\".format(\n self.hostname, self.port))\n try:\n self._sock = socket.create_connection((self.hostname, self.port),10)\n except socket.error:\n logging.exception(\"Unable to connect to Munin host {0}, port: {1}\".format(\n self.hostname, self.port))\n sys.exit(1)\n\n self._conn = self._sock.makefile()\n self.hello_string = self._readline()", "def open(self,host='',port=1314,nostart=False):\n\t\n from subprocess import STDOUT, Popen\n\t\n\t sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\t try:\n\t \tsock.connect((host,port))\n\t except socket.error:\n\t \tif nostart:\n\t \t\traise socket.error\n\t \telse:\n self.festival_pid = Popen([\"festival\", \"--server\"]).pid \n\t\t \tatexit.register(self._kill_server)\n\t\t \tfor t in xrange(20):\n\t\t \t\ttry:\n\t\t \t\t\ttime.sleep(.25)\n\t\t \t\t\tsock.connect((host,port))\n\t\t \t\texcept socket.error:\n\t\t \t\t\tpass\n\t\t \t\telse:\n\t\t \t\t\tbreak\n\t\t \telse:\n\t\t \t\traise socket.error\n\t\t\n\t self.sock = sock\n return sock", "def create(self):\r\n\t\tif self.type == 'i': # Internet socket\r\n\t\t\tself.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\t\tself.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\t\t\tself.sock.bind((self.host, self.port))\r\n\t\t\tutil.logMessage('Bound to TCP socket on port %d ' % self.port)\r\n\t\telse:\r\n\t\t\tif os.path.exists(self.file):\r\n\t\t\t\t# if socket already exists, remove it. This prevents errors when the socket is corrupt after a crash.\r\n\t\t\t\tos.remove(self.file)\r\n\t\t\tself.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\r\n\t\t\tself.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\t\t\tself.sock.bind(self.file) # Bind BEERSOCKET\r\n\t\t\t# set all permissions for socket\r\n\t\t\tos.chmod(self.file, 0777)", "def open(self, settings): \r\n self._open_settings = dict(settings)\r\n\r\n # update the local copies of settings\r\n if 'read_terminator' in settings:\r\n self._read_terminator = settings['read_terminator']\r\n\r\n if 'cmd_terminator' in settings:\r\n self._cmd_terminator = settings['cmd_terminator']\r\n\r\n if 'cmd_timeout' in settings:\r\n self._cmd_timeout = settings['cmd_timeout']\r\n\r\n return True", "def test_parameters_socket(self):\n dir = os.path.dirname(os.path.abspath(__file__))\n socket = \"/tmp/a_socket_file\"\n launcher = FirenadoLauncher(dir=dir, socket=socket)\n self.assertEqual(dir, launcher.dir)\n self.assertEqual(socket, launcher.socket)", "def openSocket():\n host, port, pw, nick, channel = getSettings()\n s = socket.socket()\n s.connect((host, port))\n sysMessage(s, \"PASS \" + pw)\n sysMessage(s, \"NICK \" + nick)\n sysMessage(s, \"JOIN #\" + channel)\n return s", "def __init__(self, cfg):\r\n\r\n\t\tself.type = 'f' # default to file socket\r\n\t\tself.file = None\r\n\t\tself.host = 'localhost'\r\n\t\tself.port = None\r\n\t\tself.sock = 0\r\n\r\n\t\tisWindows = sys.platform.startswith('win')\r\n\t\tuseInternetSocket = bool(cfg.get('useInternetSocket', isWindows))\r\n\t\tif useInternetSocket:\r\n\t\t\tself.port = cfg.get('socketPort', 6332)\r\n\t\t\tself.type = 'i'\r\n\t\telse:\r\n\t\t\tself.file = util.addSlash(cfg['scriptPath']) + 'BEERSOCKET'", "def socket(self, mode=3, stream=None, heartbeat=None):\r\n # build connection url\r\n if stream is None:\r\n url = make_uri(self.server.uri, '/%s/channel' % self.pid,\r\n mode=mode)\r\n else:\r\n url = make_uri(self.server.uri, '/%s/channel/%s' % (self.pid,\r\n stream), mode=mode)\r\n url = \"ws%s\" % url.split(\"http\", 1)[1]\r\n\r\n # build connection options\r\n options = {}\r\n if heartbeat and heartbeat is not None:\r\n options['heartbeat'] = heartbeat\r\n\r\n # eventually add sll options\r\n if is_ssl(url):\r\n options['ssl_options'] = parse_ssl_options(self.server.options)\r\n\r\n return IOChannel(self.server.loop, url, mode=mode,\r\n api_key=self.server.api_key, **options)", "def socket_open(self):\n log.info(\"Creating UDP socket %s:%d for communication with the client\",\n self.receiverIP, self.receiverPort)\n\n try:\n self.receiverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.receiverSocket.bind((self.receiverIP, self.receiverPort))\n except Exception as e:\n log.error(\"Could not create UDP socket for communication with the client!\")\n log.debug(e)\n traceback.print_exc()", "def open(self, connection_timeout=None, cmd=Sl4aCommand.INIT):\n if connection_timeout:\n timeout_time = time.time() + connection_timeout\n else:\n timeout_time = sys.maxsize\n self._counter = self._id_counter()\n while True:\n try:\n self.conn = socket.create_connection(\n (self.addr, self.port), max(1, timeout_time - time.time()))\n self.conn.settimeout(self._SOCKET_TIMEOUT)\n break\n except (socket.timeout):\n logging.exception(\"Failed to create socket connection!\")\n raise\n except (socket.error, IOError):\n # TODO: optimize to only forgive some errors here\n # error values are OS-specific so this will require\n # additional tuning to fail faster\n if time.time() + 1 >= timeout_time:\n logging.exception(\"Failed to create socket connection!\")\n raise\n time.sleep(1)\n\n self.client = self.conn.makefile(mode=\"brw\")\n\n resp = self._cmd(cmd, self.uid)\n if not resp:\n raise Sl4aProtocolError(\n Sl4aProtocolError.NO_RESPONSE_FROM_HANDSHAKE)\n result = json.loads(str(resp, encoding=\"utf8\"))\n if result['status']:\n self.uid = result['uid']\n else:\n self.uid = UNKNOWN_UID", "def _setup_socketio(self) -> None:", "def _connect(self):\n\n wrapper_headers, wrapper_body = self._create_wrapper_request()\n\n self.wrapper_user = self._get_wrapper_user(wrapper_headers)\n self.wrapper_key = self._get_wrapper_key(wrapper_body)\n\n self.websocket = self._get_websocket()\n\n return self.init()", "def _open(self):\n # Make sure we close any previous transport open to this usb device.\n port_path = tuple(self.port_path)\n with self._HANDLE_CACHE_LOCK:\n old_transport = self._HANDLE_CACHE.get(port_path)\n if old_transport is not None:\n old_transport.Close()\n\n self._read_endpoint = None\n self._write_endpoint = None\n\n for endpoint in self._setting.iterEndpoints():\n address = endpoint.getAddress()\n if address & usb1.USB_ENDPOINT_DIR_MASK: # pylint: disable=no-member\n self._read_endpoint = address\n self._max_read_packet_len = endpoint.getMaxPacketSize()\n else:\n self._write_endpoint = address\n\n assert self._read_endpoint is not None\n assert self._write_endpoint is not None\n\n transport = self._device.open()\n iface_number = self._setting.getNumber()\n try:\n if (platform.system() != 'Windows' and transport.kernelDriverActive(iface_number)):\n transport.detachKernelDriver(iface_number)\n except usb1.USBErrorNotFound: # pylint: disable=no-member\n warnings.warn('Kernel driver not found for interface: %s.', iface_number)\n transport.claimInterface(iface_number)\n self._transport = transport\n self._interface_number = iface_number\n\n with self._HANDLE_CACHE_LOCK:\n self._HANDLE_CACHE[port_path] = self\n # When this object is deleted, make sure it's closed.\n weakref.ref(self, self.close)", "def __init__(self):\n self.files_handler = None\n self.client = BaseClient()\n # connects to server and starts session\n self.client.connect(SERVER_IP, PORT)\n self.session = self.client.get_session()\n # sends payload's properties\n self.session.send_text(JSON_TEXT)\n # receive's payload's status\n response = self.session.receive().get_data()\n print (response)\n response = json.loads(response)\n is_active = response['active']\n if is_active:\n # continues active payload\n self.handle_active_payload(response)\n else:\n self.handle_inactive_payload(response)", "def _open(self):\n res = None\n if self._isopen:\n return True\n\n if self.hosts:\n saved_simple_error = None\n saved_gssapi_error = None\n for server in self.hosts:\n proto = 'ldaps' if SSL(self.ldap['ssl']) == SSL.USESSL else 'ldap'\n port = 636 if SSL(self.ldap['ssl']) == SSL.USESSL else 389\n uri = f\"{proto}://{server}:{port}\"\n try:\n self._handle = ldap.initialize(uri)\n except Exception as e:\n self.logger.debug(f'Failed to initialize ldap connection to [{uri}]: ({e}). Moving to next server.')\n continue\n\n res = None\n ldap.protocol_version = ldap.VERSION3\n ldap.set_option(ldap.OPT_REFERRALS, 0)\n ldap.set_option(ldap.OPT_NETWORK_TIMEOUT, 10.0)\n\n if SSL(self.ldap['ssl']) != SSL.NOSSL:\n ldap.set_option(ldap.OPT_X_TLS_ALLOW, 1)\n ldap.set_option(\n ldap.OPT_X_TLS_CACERTFILE,\n f\"/etc/certificates/{self.ldap['certificate']['cert_name']}.crt\"\n )\n ldap.set_option(\n ldap.OPT_X_TLS_REQUIRE_CERT,\n ldap.OPT_X_TLS_ALLOW\n )\n\n if SSL(self.ldap['ssl']) == SSL.USETLS:\n try:\n self._handle.start_tls_s()\n\n except ldap.LDAPError as e:\n self.logger.debug('Encountered error initializing start_tls: %s', e)\n saved_simple_error = e\n continue\n\n if self.ldap['anonbind']:\n try:\n res = self._handle._handle.simple_bind_s()\n break\n except Exception as e:\n saved_simple_error = e\n self.logger.debug('Anonymous bind failed: %s' % e)\n continue\n\n if self.ldap['kerberos_principal']:\n try:\n self._handle.sasl_gssapi_bind_s()\n res = True\n break\n except Exception as e:\n saved_gssapi_error = e\n self.logger.debug(f'SASL GSSAPI bind failed: {e}. Attempting simple bind')\n\n try:\n res = self._handle.simple_bind_s(self.ldap['binddn'], self.ldap['bindpw'])\n break\n except Exception as e:\n self.logger.debug(f'Failed to bind to [{uri}] using [{self.ldap[\"binddn\"]}]: {e}')\n saved_simple_error = e\n continue\n\n if res:\n self._isopen = True\n elif saved_gssapi_error:\n raise CallError(str(saved_gssapi_error))\n elif saved_simple_error:\n raise CallError(str(saved_simple_error))\n\n return (self._isopen is True)", "def openSocket(self, device=None):\n # Hard-coded socket port needs to match the one in DefaultVehicleHal\n remotePortNumber = 33452\n extraArgs = '' if device is None else '-s %s' % device\n adbCmd = '/home/himinds/Android/android-pie-compile/out/host/linux-x86/bin/adb %s forward tcp:0 tcp:%d' % (\n extraArgs, remotePortNumber)\n adbResp = subprocess.check_output(adbCmd, shell=True)[0:-1]\n localPortNumber = int(adbResp)\n #localPortNumber = 44567\n #adbCmd1 = 'qemu-system-x86_64 -m 2048 -boot d -enable-kvm -smp 3 -net nic -net user,hostfwd=tcp::4444-:5555,hostfwd=tcp::%d-:33452 -hda /home/himinds/Android/android-image-x86_64/android-oreo.img -cdrom /home/himinds/Android/android-x86-oreo/out/target/product/x86_64android_x86_64.iso' % (extraArgs, localPortNumber)\n #adbResp1 = subprocess.check_output(adbCmd1, shell=True)[0:-1]\n print('Connecting local port %s to remote port %s on %s' %\n (localPortNumber, remotePortNumber,\n 'default device' if device is None else 'device %s' % device))\n # Open the socket and connect\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect(('localhost', localPortNumber))\n print(\"hello\")", "def open(self, addr):\n if (self.connected is False):\n uri = \"ws://\"+addr+\":81\"\n self.ws = websocket.create_connection(uri,sockopt=((socket.SOL_SOCKET, socket.SO_REUSEADDR,1),\n (socket.IPPROTO_TCP, socket.TCP_NODELAY,1),))\n self.ws.settimeout(self.default_recv_timeout)\n self.ipAddr = addr\n self.connected = True", "def tcp_request(self):\n sock = socket(AF_INET, SOCK_STREAM)\n sock.settimeout(TIMEOUT)\n\n try:\n sock.connect((self.host, self.port))\n self.opened_tcp = \"+\"\n\n try:\n data = sock.recv(512).decode()\n\n except timeout:\n # It is not a post protocol because there is no greeting.\n # It may be HTTP.\n sock.send(\"GET / HTTP/1.1{0}{0}\".format(linesep).encode())\n\n try:\n data = sock.recv(512).decode()\n if data.startswith(\"HTTP\"):\n self.protocol = \"HTTP\"\n except timeout:\n # This is not a protocol from the list.\n return\n\n else:\n # It may be a post server.\n if data.startswith(\"220\"):\n # Mail-server is connected to electrical power station.\n data = data.lower()\n if data.find(\"smtp\") > 0:\n self.protocol = \"SMTP\"\n elif data.find(\"ftp\") > 0:\n self.protocol = \"FTP\"\n elif data.startswith(\"+OK\"):\n self.protocol = \"POP3\"\n\n # TCP is closed in following cases.\n except timeout:\n self.opened_tcp = \"-\"\n except error:\n debug(\"Can't get information about TCP on port: %s.\", self.port)\n self.opened_tcp = \"-\"\n finally:\n sock.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The thread function for collecting data from the sink and pushing it to the socket
def _pushThread(self): self.settingsAcquired = False self.threadExited = False while not self._exitThread: if self._dataSocket == None: if self.connection_type == "server": if self._serverSocket == None: self._openSocket() log.debug("Waiting for client connection") (self._dataSocket, clientAddress) = self._serverSocket.accept() log.debug("Got client connection: " + str(clientAddress)) else: self._openSocket() time.sleep(0.1) continue if not self._sink: log.warn("No connections to NetworkSink") time.sleep(1.0) continue (retval, timestamps) = self._sink.retrieveData() if not retval or len(retval) == 0: time.sleep(0.1) continue data = self._formatData(retval) data=self.leftover+data self.leftover = "" # If the byte swap value is 1, then # use the size of the data if self.byte_swap == 1: portType = self._sink.port_type if portType == _BULKIO__POA.dataChar: byteSwap = 1 elif portType == _BULKIO__POA.dataOctet: byteSwap = 1 elif portType == _BULKIO__POA.dataShort: byteSwap = 2 elif portType == _BULKIO__POA.dataUshort: byteSwap = 2 elif portType == _BULKIO__POA.dataLong: byteSwap = 4 elif portType == _BULKIO__POA.dataUlong: byteSwap = 4 elif portType == _BULKIO__POA.dataFloat: byteSwap = 4 elif portType == _BULKIO__POA.dataLongLong: byteSwap = 8 elif portType == _BULKIO__POA.dataUlongLong: byteSwap = 8 elif portType == _BULKIO__POA.dataDouble: byteSwap = 8 elif portType == _BULKIO__POA.dataString: byteSwap = 1 elif portType == _BULKIO__POA.dataXml: pass elif portType == _BULKIO__POA.dataFile: pass else: byteSwap = 0 if byteSwap != 0: data = self._flip(data, byteSwap) elif self.byte_swap > 1: beforedata = copy.copy(data) data = self._flip(data, self.byte_swap) if len(data) < len(beforedata): self.leftover = str(beforedata[len(data):]) self._pushToSocket(data)
[ "async def writer_worker(self):\n try:\n while True:\n data = await self.inbound_queue.get()\n print('SOCKET > ', data)\n self.writer.write(data.encode())\n await self.writer.drain()\n finally:\n self.writer = None", "async def reader_worker(self):\n try:\n while True:\n data = await self.reader.readline()\n print('SOCKET <', data)\n for queue in self.outbound_queues:\n await queue.put(data.decode())\n finally:\n self.reader = None", "def __client_thread(self):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((self._host, self._port))\n data_raw = b''\n while True: # main loop\n while len(data_raw) < 44:\n data_raw += s.recv(1024)\n div = len(data_raw) // 44\n data_used = data_raw[(div - 1) * 44:div * 44]\n data_raw = data_raw[div * 44:]\n self.__lock.acquire()\n self.__data = data_used\n self.__lock.release()", "def main():\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((HOST, PORT))\n\n p = pyaudio.PyAudio()\n in_stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, output=True, frames_per_buffer=BUFFER_SIZE_SEND)\n out_stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=BUFFER_SIZE_SEND)\n\n\n get_event = threading.Event()\n get_event.set()\n\n send_event = threading.Event()\n send_event.set()\n\n\n get_thread = threading.Thread(target=get_data, args=(sock, in_stream, get_event))\n get_thread.daemon = True\n send_thread = threading.Thread(target=send_data, args=(sock, out_stream, send_event))\n send_thread.daemon = True\n\n get_thread.start()\n send_thread.start()\n\n return get_event, send_event, get_thread, send_thread", "def _data_handler(self, msg):\n\n if len(msg) != 2:\n self.logger.info('skipping malformed message: %s' % str(msg))\n else:\n\n # When a message arrives, increase the corresponding received_count\n in_id = msg[0]\n out_id, data = msgpack.unpackb(msg[1])\n self.logger.info('recv from %s: %s' % (in_id, data))\n # Increase the appropriate count in recv_counts by 1\n self.recv_counts[(in_id,out_id)] += 1\n self.data_to_route.append((in_id, out_id, data))\n # When data with source/destination IDs corresponding to\n # every entry in the routing table has been received upto\n # current time step, deliver the data in the buffer:\n #if all((c for c in self.recv_counts.values())):\n if all(self.recv_counts.values()):\n self.logger.info('recv from all modules')\n for in_id, out_id, data in self.data_to_route:\n self.logger.info('sent to %s: %s' % (out_id, data))\n\n # Route to the destination ID and send the source ID\n # along with the data:\n self.sock_data.send_multipart([out_id,\n msgpack.packb((in_id, data))])\n\n # Reset the incoming data buffer\n self.data_to_route = []\n # Decrease all values in recv_counts to indicate that an\n # execution time_step has been succesfully completed\n for k in self.recv_counts.iterkeys(): self.recv_counts[k]-=1\n self.logger.info('----------------------')", "def collect_data(self):\n while(self.is_streaming):\n self.skipped_bytes = 0\n self.read_serial_binary()", "def collector(self):\n\n\t\tlogger = getlogger(\"Collector\")\n\t\tlogger.info(\"Starting collector.\")\n\n\t\t# Set up new poll context and initialize it\n\t\tlogger.info(\"Setting up poll context\")\n\t\tconf = CONF['collector']\n\t\tcontext = PollContext((conf['c_addr'], conf.as_int('c_port')))\n\t\tcontext.initialize()\n\n\t\tsteps = self.collect_interval\n\n\t\t# Main loop\n\t\tlogger.info(\"Collector is running\")\n\t\twhile self.running:\n\n\t\t\t# Wait for next event\n\t\t\ttry:\n\t\t\t\tevents = context.wait(1000)\n\t\t\texcept Exception, e:\n\t\t\t\tlogger.error(str(e))\n\n\t\t\tfor event, data, sock in events:\n\n\t\t\t\t# No register event coming\n\t\t\t\tif event == \"TIMEOUT\":\n\t\t\t\t\tif steps > 0:\n\t\t\t\t\t\tsteps -= 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.debug(\"Send DATA event to each one of workers\")\n\t\t\t\t\t\tfor key, worker in self.worker_set.items():\n\t\t\t\t\t\t\t# send DATA to each worker\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tworker['fd'].send(\"DATA\")\n\t\t\t\t\t\t\t\tlogger.debug(\"send to %s\"%key)\n\n\t\t\t\t\t\t\t# remove disconnected worker from worker set\n\t\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\t\tif e.errno == errno.EBADF:\n\t\t\t\t\t\t\t\t\tdel self.worker_set[key]\n\t\t\t\t\t\t\t\t\tlogger.info(\"The connection to worker %s is closed.\" % key)\n\n\t\t\t\t\t\tsteps = self.collect_interval\n\n\t\t\t\t\tbreak\n\n\t\t\t\t# New worker coming\n\t\t\t\tif event == \"REGISTER\":\n\t\t\t\t\tip = sock.getpeername()[0]\n\n\t\t\t\t\t# initialize coming worker\n\t\t\t\t\tself.worker_set[ip] = {\n\t\t\t\t\t\t'fd': sock, \n\t\t\t\t\t\t'addr': eval(data),\n\t\t\t\t\t\t'agents': 0}\n\n\t\t\t\t\tlogger.info(\"Worker %s has registered, address on %s\" % (ip, data))\n\t\t\t\t\tcontinue\n\n\t\t\t\t# Metric data coming\n\t\t\t\tif event == \"DATA\":\n\n\t\t\t\t\t# insert data into database\n\t\t\t\t\tcount = self.db.insert_metric(data)\n\t\t\t\t\tlogger.info(\"store metrics : %d\" % count)\n\n\t\tlogger.info(threading.currentThread().getName() + \" is closing\")", "def process(self):\n while self._process_rcv_data():\n pass", "def _pushToSocket(self,\n data):\n if self._dataSocket != None:\n dataSent = 0\n dataToSend = len(data)\n \n while dataSent != dataToSend:\n dataSentTemp = self._dataSocket.send(data[dataSent:])\n\n if dataSentTemp == -1:\n log.error(\"Error with socket send\")\n break\n elif dataSentTemp == 0:\n log.debug(\"Connection closed by remote host\")\n self._dataSocket.shutdown(socket.SHUT_RDWR)\n self._dataSocket.close()\n self._dataSocket = None\n else:\n dataSent += dataSentTemp", "def _inner():\n self._running = True\n while self._running:\n try:\n data = self.input.read(None)\n if not data:\n self.stop()\n self.done.send(True)\n self.output.write(data)\n greenthread.sleep(IO_THREAD_SLEEP_TIME)\n except Exception as exc:\n self.stop()\n LOG.exception(exc)\n self.done.send_exception(exc)", "def _process_data_events(self):\n self.channel.basic_consume(self._on_response, no_ack=True,\n queue=self.callback_queue)\n while True:\n with self.internal_lock:\n self.connection.process_data_events()\n time.sleep(0.1)", "def start_consuming(self):", "def run(self):\n message_buffer = b\"\"\n while True:\n try:\n message_buffer += self.read()\n except DataSourceError as e:\n LOG.warn(\"Can't read from data source -- stopping: %s\", e)\n break\n\n while True:\n message, message_buffer, byte_count = self._parse_message(\n message_buffer)\n if message is None:\n break\n if not hasattr(message, '__iter__') or not (\n ('name' in message and 'value' in message) or (\n 'id' in message and 'data' in message)):\n self.corrupted_messages += 1\n break\n\n self.bytes_received += byte_count\n if self.callback is not None:\n self.callback(message)", "def _push_next_dataset(self):\n if self._sent_idx < len(self._dataset):\n url = self._dataset[self._sent_idx]\n else:\n return\n # push to worker asynchronously\n async_ret = self._worker_pool.apply_async(\n self._worker_fn, (url, self._dataset_fn, self._sampler_fn))\n # data buffer stores the async result\n self._data_buffer[self._sent_idx] = async_ret\n self._sent_idx += 1", "def worker():\r\n unprocessed=bytes()\r\n while True:\r\n try:\r\n chunk = self.socket.recv(2048)\r\n if len(chunk)==0: \r\n break\r\n else: \r\n unprocessed+=chunk \r\n result = self._parseData(unprocessed)\r\n #_parse data will return how many bytes was parse or -1 on error\r\n #we trim the pendingData buffer from the left using result as the index\r\n #if result == 0 it means no data was parsed and it will stay in unprocessed buffer until more data has arrived\r\n if result < 0:\r\n sys.stderr.write(\"TcpSocketAdapter._parseData error %d\"%result)\r\n break\r\n elif result > 0:\r\n unprocessed=unprocessed[result:]\r\n except (ConnectionAbortedError, OSError):\r\n break\r\n print(\"socket worker shutting down\")", "def __read(self):\n\n # Create buffer for receiving fragmented data.\n receive_buffer = dict()\n\n # Poll UDP socket and publish data.\n while not self.__stop_event.is_set():\n\n # Wait for a data event in the socket.\n events = self.__poller.poll(READ_TIMEOUT)\n if events and events[0][1] & select.POLLIN:\n\n # Read multiple packets from the socket.\n socket_data = list()\n while True:\n try:\n socket_data.append(self.__socket.recvfrom(MTU_MAX))\n except:\n break\n\n # Remarshal and issue data to callbacks.\n self.__remarshal(socket_data, receive_buffer)\n\n else:\n continue\n\n # Close socket on exiting thread.\n self.__socket.close()", "def dataReceived(self, returned_data):\n server_queue.put(returned_data)", "def _socket_read_thread_proc(self):\n def _win_select():\n while len(self._socket_dict) == 0:\n time.sleep(.001)\n if not self._is_running:\n return []\n return self._socket_selector.select()\n\n def _nix_select():\n return self._socket_selector.select()\n\n if sys.platform == 'win32':\n select_func = _win_select\n else:\n select_func = _nix_select\n\n buffer = bytearray(8192)\n buff_view = memoryview(buffer)\n buff_view[0:2] = CMD_DATA_PACKET\n while self._is_running:\n # TODO: On windows I can't do this when no sockets are registered\n events = select_func()\n for key, event in events:\n if event & selectors.EVENT_READ:\n try:\n bytes_read = key.fileobj.recv_into(buff_view[6:])\n except EOFError:\n # This socket has been closed, disconnect it\n self.disconnect_socket(key.data)\n continue\n length = bytes_read + 6 # add header to length\n len_bytes = pack('>H', length)\n id_bytes = pack('>H', key.data)\n buff_view[2:4] = len_bytes\n buff_view[4:6] = id_bytes\n try:\n self._handle.bulkWrite(self._out_endpoint, buff_view[:length])\n except usb1.USBError as err:\n eprint(\"Error writing data: %s\" % err)", "def socket_2_queue():\n global queue\n while True:\n # establish a connection\n clientsocket, addr = serversocket.accept()\n try:\n data = clientsocket.recv(8192)\n except:\n print('exception during reading from socket')\n continue\n jdata = json.loads(data)\n jdata['clientsocket'] = clientsocket\n with queue_semaphore:\n queue.append(jdata)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Push data to the current data socket, handling short writes as necessary
def _pushToSocket(self, data): if self._dataSocket != None: dataSent = 0 dataToSend = len(data) while dataSent != dataToSend: dataSentTemp = self._dataSocket.send(data[dataSent:]) if dataSentTemp == -1: log.error("Error with socket send") break elif dataSentTemp == 0: log.debug("Connection closed by remote host") self._dataSocket.shutdown(socket.SHUT_RDWR) self._dataSocket.close() self._dataSocket = None else: dataSent += dataSentTemp
[ "def write_and_send(self, data):\r\n self.__my_socket.send_(data)\r\n self.recev()", "def push(self, data):\r\n\r\n self.outbuffer += data", "def send_data(self, proto_id, data):\n for p in self.socks5_factory.client.server_dict.keys():\n bytes_sent = 0\n while bytes_sent < len(data):\n chunk_data = data[bytes_sent:bytes_sent + 4096]\n seq_id = self.get_seq_id() # attach sequence number for each data chunk in one protocol\n self.socks5_factory.send_buffer[proto_id][seq_id] = chunk_data # update send buffer\n print \"send to server: \", proto_id, seq_id, self.socks5_factory.send_buffer\n packet = self.socks5_factory.client.create_message(proto_id, seq_id, chunk_data)\n self.socks5_factory.client.endpoint.send(p.address, packet)\n bytes_sent += 4096", "def send(self, data) -> None:\n\n pickle_data = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)\n\n self.__sock.send(pickle_data)\n self.__sock.send(Socket.SOCK_DATA_END)", "def send(self, clientsocket, data):\n print(\"### send -- start\") #debug\n print(data) #debug\n print(\"### send -- end\") #debug\n # creates a stream of bytes\n serialized_data = pickle.dumps(data)\n while True:\n try:\n # data sent to the client\n clientsocket.sendall(serialized_data)\n # # check acknowledge\n # try:\n # if not receive_ack(clientsocket, n):\n # continue\n except socket.timeout:\n continue\n else:\n break", "def writeSomeData(self, data):\n # Limit length of buffer to try to send, because some OSes are too\n # stupid to do so themselves (ahem windows)\n limitedData = lazyByteSlice(data, 0, self.SEND_LIMIT)\n\n try:\n return untilConcludes(self.socket.send, limitedData)\n except socket.error as se:\n if se.args[0] in (EWOULDBLOCK, ENOBUFS):\n return 0\n else:\n return main.CONNECTION_LOST", "def send_data_to_socket(self):\r\n if not self.connected:\r\n self.throw_exception(message='disconnected')\r\n\r\n if not self._outgoing_buffer:\r\n return 0\r\n\r\n while True:\r\n try:\r\n bytes_sent = self.gearman_socket.send(self._outgoing_buffer)\r\n except ssl.SSLError as e:\r\n if e.errno == ssl.SSL_ERROR_WANT_READ:\r\n continue\r\n elif e.errno == ssl.SSL_ERROR_WANT_WRITE:\r\n continue\r\n else:\r\n self.throw_exception(exception=e)\r\n except socket.error, socket_exception:\r\n self.throw_exception(exception=socket_exception)\r\n\r\n if bytes_sent == 0:\r\n self.throw_exception(message='remote disconnected')\r\n break\r\n\r\n self._outgoing_buffer = self._outgoing_buffer[bytes_sent:]\r\n return len(self._outgoing_buffer)", "def send( self, data: JSONData ) -> None:\n\n self.sock.sendall( self.encode( data ) )\n self.sock.shutdown( socket.SHUT_WR ) # Signal end of message", "def __send_bytes(self, data):\n self.socket.sendall(data)", "def protocol_send(self, data, sock):", "def _send(self, data, newline=\"\\r\\n\", sock=None):\n self.outbuff.append(data+newline)\n for msg in self.outbuff:\n if self.verbose:\n print(\"<<< \"+msg)\n self.sock.send((msg+newline).encode(\"utf-8\"))", "def sock_send(self, data):\n\n self.sock.send(data)", "def _pushThread(self):\n self.settingsAcquired = False\n self.threadExited = False\n\n while not self._exitThread:\n if self._dataSocket == None:\n if self.connection_type == \"server\":\n if self._serverSocket == None:\n self._openSocket()\n \n log.debug(\"Waiting for client connection\")\n (self._dataSocket, clientAddress) = self._serverSocket.accept()\n log.debug(\"Got client connection: \" + str(clientAddress))\n else:\n self._openSocket()\n\n time.sleep(0.1)\n continue\n\n if not self._sink:\n log.warn(\"No connections to NetworkSink\")\n time.sleep(1.0)\n continue\n\n (retval, timestamps) = self._sink.retrieveData()\n\n if not retval or len(retval) == 0:\n time.sleep(0.1)\n continue\n data = self._formatData(retval)\n data=self.leftover+data\n self.leftover = \"\"\n\n # If the byte swap value is 1, then\n # use the size of the data\n if self.byte_swap == 1:\n portType = self._sink.port_type\n\n if portType == _BULKIO__POA.dataChar:\n byteSwap = 1\n elif portType == _BULKIO__POA.dataOctet:\n byteSwap = 1\n elif portType == _BULKIO__POA.dataShort:\n byteSwap = 2\n elif portType == _BULKIO__POA.dataUshort:\n byteSwap = 2\n elif portType == _BULKIO__POA.dataLong:\n byteSwap = 4\n elif portType == _BULKIO__POA.dataUlong:\n byteSwap = 4\n elif portType == _BULKIO__POA.dataFloat:\n byteSwap = 4\n elif portType == _BULKIO__POA.dataLongLong:\n byteSwap = 8\n elif portType == _BULKIO__POA.dataUlongLong:\n byteSwap = 8\n elif portType == _BULKIO__POA.dataDouble:\n byteSwap = 8\n elif portType == _BULKIO__POA.dataString: \n byteSwap = 1\n elif portType == _BULKIO__POA.dataXml:\n pass\n elif portType == _BULKIO__POA.dataFile:\n pass\n else:\n byteSwap = 0\n \n if byteSwap != 0:\n data = self._flip(data, byteSwap)\n\n elif self.byte_swap > 1:\n beforedata = copy.copy(data)\n data = self._flip(data, self.byte_swap)\n if len(data) < len(beforedata):\n self.leftover = str(beforedata[len(data):])\n\n self._pushToSocket(data)", "def socket_send(self):\n if not self.send_ready():\n warnings.warn('socket_send() called on empty buffer',\n RuntimeWarning, 2)\n return 0\n ready_bytes = bytes(''.join(self.send_buffer))\n self.send_buffer = array.array('c')\n\n def send(send_bytes):\n \"\"\"\n throws x84.bbs.exception.Disconnected on sock.send err\n \"\"\"\n try:\n return self.sock.send(send_bytes)\n except socket.error as err:\n if err[0] == 11:\n warnings.warn('%s: %s (bandwidth exceed)' % (\n self.addrport(), err[1],), RuntimeWarning, 2)\n else:\n raise Disconnected(\n 'socket send %d: %s' % (err[0], err[1],))\n\n sent = send(ready_bytes)\n if sent < len(ready_bytes):\n # re-buffer data that could not be pushed to socket;\n self.send_buffer.fromstring(ready_bytes[sent:])\n else:\n # When a process has completed sending data to an NVT printer\n # and has no queued input from the NVT keyboard for further\n # processing (i.e., when a process at one end of a TELNET\n # connection cannot proceed without input from the other end),\n # the process must transmit the TELNET Go Ahead (GA) command.\n if (not self.input_ready()\n and self.check_local_option(SGA) is False\n and not self._check_reply_pending(SGA)):\n sent += send(bytes(''.join((IAC, GA))))\n return sent", "async def writer_worker(self):\n try:\n while True:\n data = await self.inbound_queue.get()\n print('SOCKET > ', data)\n self.writer.write(data.encode())\n await self.writer.drain()\n finally:\n self.writer = None", "def send(self, data):\n self._socket.sendall(data)", "def _writeSomeData(self, data):\n sent = self.transport._originalWriteSomeData(data)\n self.dataSentEvent(sent)\n return sent", "def send(self, buf):", "def handle_read(self):\n\t\tself.data_buffer.append(self.recv(8192))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Subscribe to updates for a specific symbol and field. The callback will be called as 'await callback(symbol, field, value, timestamp)' whenever an update is received.
async def subscribe(self, symbol, field, callback): async with self.__lock: # Connect the websocket if necessary if self.__websocket is None: await self.__connect() # Send the subscribe message if we're not already subscribed if symbol not in self.__subscriptions: msg = {"op": "subscribe", "args": [f"instrument:{symbol}"]} await self.__websocket.send(json.dumps(msg)) # Add the subscriber to the dict of subscriptions self.__subscriptions.setdefault(symbol, {}).setdefault(field, []).append(callback) # Call the callback with the latest data data = self.__data.get(symbol, {}) if field in data: (value, timestamp) = data[field] await callback(symbol, field, value, timestamp)
[ "def _listen_callback(_, key, value, __):\n print(\"{!r} updated: {!r}\".format(key, value))", "async def slots_updates_subscribe(self) -> None:\n req = SlotsUpdatesSubscribe()\n await self.send_data(req)", "def on_update(self, callback):\n self._update_callback.add(callback)", "def subscribe_to_ticker(self, symbol, callback):\n symbol = utils.order_symbol(symbol)\n id_ = \"_\".join([\"ticker\", symbol])\n data = {\n 'event': 'subscribe',\n 'channel': 'ticker',\n 'symbol': symbol,\n }\n payload = json.dumps(data, ensure_ascii=False).encode('utf8')\n return self._start_socket(id_, payload, callback)", "def on_update(self, func):\n self._on_update = func\n return func", "def subscribe_to_market_data(self, symbol: str) -> Coroutine:\n return self._websocketClient.subscribe_to_market_data(self._account.id, symbol)", "def registerShipUpdateCallback(self, callback):\n\n\t\tself.__shipUpdateCallbacks.append(callback)", "def receiveUpdate(self, info=None):\n pass", "def subscribe(self, callback):\n self.change_callbacks.append(callback)\n return self", "def on_change(self, callback, *args, **kwargs):\n self._var.trace(\"w\", lambda *_: callback(*args, **kwargs))", "def add_update_callback(self, cb):\n def callback(err):\n cb()\n\n ncb = lib.UPDATE_CALLBACK(callback)\n\n assert self.raw_ptr is not None\n\n token = lib.srl__consul__add_update_callback(self.raw_ptr, ncb)\n\n self.update_callbacks[token] = ncb\n\n return token", "def update_frequency(self, update_frequency):\n\n self._update_frequency = update_frequency", "def subscribe_to_candles(self, symbol, timeframe, callback):\n\n valid_tfs = ['1m', '5m', '15m', '30m', '1h', '3h', '6h', '12h', '1D',\n '7D', '14D', '1M']\n if timeframe:\n if timeframe not in valid_tfs:\n raise ValueError(\"timeframe must be any of %s\" % valid_tfs)\n else:\n timeframe = '1m'\n identifier = ('candles', symbol, timeframe)\n id_ = \"_\".join(identifier)\n symbol = utils.order_symbol(symbol)\n key = 'trade:' + timeframe + ':' + symbol\n data = {\n 'event': 'subscribe',\n 'channel': 'candles',\n 'key': key,\n }\n payload = json.dumps(data, ensure_ascii=False).encode('utf8')\n return self._start_socket(id_, payload, callback)", "def symbolChanged(self, symbol: ghidra.program.model.symbol.Symbol, type: int, addr: ghidra.program.model.address.Address, affectedObj: object, oldValue: object, newValue: object) -> None:\n ...", "def connect(self, key: str, func: Callable[[Any], None], init: bool) -> None:\n self._entries[key].changed.connect(func)\n if init:\n func(getattr(self, key))", "def handle_update(self, data):\n if self.skip_update(data):\n logger.debug('skipping update data', data)\n return\n if data['id'] in self.index:\n self.index[data['id']] = data\n\n for callback in self.callbacks.get('update') or set():\n callback(data)", "async def on_channel_update(self, before, after):", "def addForceUpdateCallback(*args, **kwargs):\n \n pass", "def setChangedCallback(self, *args) -> \"void\":\n return _coin.SoSensorManager_setChangedCallback(self, *args)", "def subscribe(channel: str, callback: Callable[..., Any]) -> None:\n _get().subscribe(channel, callback)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }