query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
send_approved_withdrawals_to_paypal_wallets fetches all processed and approved withdrawals and schedule them for sending to the client paypal wallet address
def send_approved_withdrawals_to_paypal_wallets(self) -> Optional[List[Future]]: try: wallet_transactions: List[WalletTransactionsModel] = WalletTransactionsModel.query( WalletTransactionsModel.is_verified == True, WalletTransactionsModel.is_settled == False).fetch_async().get_result() print('approved withdrawals running') return [self.do_send_to_client_paypal(transaction=transaction) for transaction in wallet_transactions if transaction.transaction_type == 'withdrawal'] except RetryError as e: # TODO Log this error return None
[ "def sync_all_pending_payments(db):\n payins = db.all(\"\"\"\n SELECT pi.*\n FROM payins pi\n JOIN payin_transfers pt ON pt.payin = pi.id\n JOIN exchange_routes r ON r.id = pi.route\n WHERE pt.status = 'pending'\n AND r.network = 'paypal'\n \"\"\")\n print(\"Syncing %i pending PayPal payments...\" % len(payins))\n for payin in payins:\n sync_payment(db, payin)\n sleep(0.2)", "def add_approved_deposits_to_wallet(self) -> Optional[List[Future]]:\n try:\n wallet_transactions: List[WalletTransactionsModel] = WalletTransactionsModel.query(\n WalletTransactionsModel.is_verified == True, WalletTransactionsModel.is_settled == False).fetch_async().get_result()\n print(\"approved deposits running\")\n return [self.do_send_to_client_wallet(transaction=transaction) for transaction in wallet_transactions\n if transaction.transaction_type == 'deposit']\n except RetryError as e:\n # TODO log this errors\n return None", "def _maybe_query_withdrawals(self) -> Optional[list[gevent.Greenlet]]:\n eth2 = self.chains_aggregator.get_module('eth2')\n if eth2 is None:\n return None\n\n if eth2.withdrawals_query_lock.locked():\n return None # already running\n\n now = ts_now()\n with self.database.conn.read_ctx() as cursor:\n result = self.database.get_used_query_range(cursor, LAST_WITHDRAWALS_QUERY_TS)\n if result is not None and now - result[1] <= DAY_IN_SECONDS:\n return None\n\n task_name = 'Periodically query ethereum withdrawals'\n log.debug(f'Scheduling task to {task_name}')\n return [self.greenlet_manager.spawn_and_track(\n after_seconds=None,\n task_name=task_name,\n exception_is_error=True,\n method=eth2.query_services_for_validator_withdrawals,\n to_ts=now,\n )]", "def withdrawals(self, request, pk=None):\n user = get_object_or_404(User, pk=pk)\n self.check_object_permissions(request, user)\n \n withdrawal_data = request.data.copy()\n\n try:\n amount = float(withdrawal_data.get('amount')) * -1\n withdrawal_data['amount'] = amount\n # negate amount so it's subtracted from user's available balance before saving the instance\n\n available = user.balance.first().available_balance\n\n if available + amount < 0:\n error_message = {'detail': 'You cannot withdraw more than your available balance'}\n return Response(error_message, status=status.HTTP_403_FORBIDDEN)\n\n except ValueError:\n pass\n\n serializer = BankTransferSerializer(data=withdrawal_data)\n\n if serializer.is_valid():\n serializer.save(owner=user)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def _notify_for_ob(cls): # pylint: disable=too-many-locals\n unpaid_status = (\n InvoiceStatus.SETTLEMENT_SCHEDULED.value, InvoiceStatus.PARTIAL.value, InvoiceStatus.CREATED.value)\n notification_date = datetime.today() - timedelta(days=current_app.config.get('NOTIFY_AFTER_DAYS'))\n # Get distinct accounts with pending invoices for that exact day\n notification_pending_accounts = db.session.query(InvoiceModel.payment_account_id).distinct().filter(and_(\n InvoiceModel.invoice_status_code.in_(unpaid_status),\n InvoiceModel.payment_method_code == PaymentMethod.ONLINE_BANKING.value,\n # cast is used to get the exact match stripping the timestamp from date\n cast(InvoiceModel.created_on, Date) == notification_date.date()\n )).all()\n current_app.logger.debug(f'Found {len(notification_pending_accounts)} invoices to notify admins.')\n for payment_account in notification_pending_accounts:\n try:\n payment_account_id = payment_account[0]\n total = db.session.query(func.sum(InvoiceModel.total).label('total')).filter(and_(\n InvoiceModel.invoice_status_code.in_(unpaid_status),\n InvoiceModel.payment_account_id == payment_account_id,\n InvoiceModel.payment_method_code == PaymentMethod.ONLINE_BANKING.value\n )).group_by(InvoiceModel.payment_account_id).all()\n pay_account: PaymentAccountModel = \\\n PaymentAccountModel.find_by_id(payment_account_id)\n\n cfs_account = CfsAccountModel.find_effective_by_account_id(payment_account_id)\n\n # emit account mailer event\n addition_params_to_mailer = {'transactionAmount': float(total[0][0]),\n 'cfsAccountId': cfs_account.cfs_account,\n 'authAccountId': pay_account.auth_account_id,\n }\n mailer.publish_mailer_events('ob.outstandingInvoice', pay_account, addition_params_to_mailer)\n except Exception as e: # NOQA # pylint: disable=broad-except\n capture_message(f'Error on notifying mailer OB Pending invoice: account id={pay_account.id}, '\n f'auth account : {pay_account.auth_account_id}, ERROR : {str(e)}', level='error')\n current_app.logger.error(e)", "def suspend_customers_services():\n service = get_service_instance()\n config = service.config\n now = timezone.now()\n invoicing_config = InvoicingConfig.objects.all()[0]\n connection = mail.get_connection()\n try:\n connection.open()\n except:\n logger.error(u\"Connexion error\", exc_info=True)\n count, total_amount = 0, 0\n deadline = now - timedelta(days=invoicing_config.tolerance)\n invoice_qs = Invoice.objects.filter(due_date__lte=deadline, status=Invoice.OVERDUE)\n logger.debug(\"%d invoice(s) candidate for service suspension.\" % invoice_qs.count())\n for invoice in invoice_qs:\n subscription = invoice.subscription\n if subscription.plan.raw_monthly_cost == 0:\n continue\n invoice.status = Invoice.EXCEEDED\n invoice.save()\n count += 1\n total_amount += invoice.amount\n try:\n subscription.is_active = False\n subscription.save()\n except:\n logger.error(\"Error while processing subscription %s\" % str(subscription), exc_info=True)\n continue\n member = subscription.service.member\n add_event(service, SERVICE_SUSPENDED_EVENT, member=member, object_id=invoice.id)\n logger.debug(\"Event posted to %s's Console\" % member.username)\n subject, message, sms_text = get_service_suspension_message(invoice)\n if member.email:\n invoice_url = service.url + reverse('billing:invoice_detail', args=(invoice.id,))\n html_content = get_mail_content(subject, message, template_name='billing/mails/notice.html',\n extra_context={'invoice_url': invoice_url, 'cta': _(\"Pay now\")})\n # Sender is simulated as being no-reply@company_name_slug.com to avoid the mail\n # to be delivered to Spams because of origin check.\n sender = '%s <no-reply@%s>' % (config.company_name, service.domain)\n msg = EmailMessage(subject, html_content, sender, [member.email])\n msg.content_subtype = \"html\"\n logger.debug(\"Sending mail to %s\" % member.email)\n try:\n if msg.send():\n logger.debug(\"Mail sent to %s\" % member.email)\n else:\n logger.error(u\"Notice of suspension for Invoice #%s not sent to %s\" % (invoice.number, member.email), exc_info=True)\n except:\n logger.error(u\"Connexion error on Invoice #%s to %s\" % (invoice.number, member.email), exc_info=True)\n try:\n connection.close()\n finally:\n pass", "def hunt(self, trials=10000, sleep_time=0.1):\n num_runs = 0\n pre_arbitrage_assets = self.load_arbitrage_assets()\n time.sleep(sleep_time)\n while(num_runs < trials):\n try:\n self.update_orderbook()\n except ConnectionError as e:\n print(e + \"will suspend bot for 10 seconds\")\n time.sleep(10)\n continue\n #Search for inefficiency\n orderbook_btc = self.orderbook_btc_eth(self.orderbook)\n orderbook_eth = self.orderbook_eth_btc(self.orderbook)\n if(orderbook_btc[0][1] - (self.fee * orderbook_btc[0][1]) > self.bit_rate['btc_one'] and\n orderbook_eth[0][1] - (self.fee * orderbook_eth[0][1]) > float(self.bit_rate['askPrice'])): \n #print('found' + orderbook_btc[0][0] + orderbook_eth[0][0] + str(num_runs))\n num_runs += 1\n purchase = []\n for k in self.orderbook:\n if(list(k.keys())[0] == orderbook_btc[0][0]):\n purchase.insert(0, k)\n if(list(k.keys())[0] == orderbook_eth[0][0]):\n purchase.insert(1, k)\n btc_limit = binance_config.btc_trade_limit\n while(btc_limit > 0.001):\n if(self.determine_feasibility(orderbook_btc[0][0], orderbook_eth[0][0], purchase, btc_limit) is True):\n self.execute_trade(orderbook_btc[0][0], orderbook_eth[0][0], purchase, btc_limit)\n break\n else:\n btc_limit = btc_limit - 0.001\n num_runs += 1\n if(num_runs % 100 == 0):\n print(str(num_runs))\n post_arbitrage_assets = self.load_arbitrage_assets()\n \n #Print results\n time_delta = datetime.datetime.now().replace(microsecond=0) - pre_arbitrage_assets['datetime'] \n print('Initial: BTC:', pre_arbitrage_assets['BTC'],'ETH:', pre_arbitrage_assets['ETH'], 'BNB:', pre_arbitrage_assets['BNB'])\n print('After__: BTC:', post_arbitrage_assets['BTC'],'ETH:', post_arbitrage_assets['ETH'], 'BNB:', post_arbitrage_assets['BNB'])\n print('Diff___: BTC:', float(post_arbitrage_assets['BTC'])-float(pre_arbitrage_assets['BTC']),\n 'ETH:', float(post_arbitrage_assets['ETH'])-float(pre_arbitrage_assets['ETH']),\n 'BNB:', float(post_arbitrage_assets['BNB'])-float(pre_arbitrage_assets['BNB']),\n 'TIME:', divmod(time_delta.total_seconds(), 60))", "def get_transfer_status():\n\n dwolla_transfers = Transaction.objects.filter(status=TransactionStatus.PENDING)\n for trans_obj in dwolla_transfers:\n if trans_obj.has_dwolla_transfer:\n\n seller = trans_obj.deal.seller\n buyer = trans_obj.deal.buyer\n\n # update funding source for buyer\n try:\n retrieve_funding_sources(buyer)\n sleep(1) # control rate limit\n except Exception as e:\n pass\n\n # update balance for seller\n balance = retrieve_balance(seller.funding_id)\n seller.update(funding_balance=balance)\n sleep(1)\n\n # update transfer status\n status = retrieve_transfer(trans_obj.dwolla_transaction_id)\n sleep(1) # control rate limit\n\n if status is not None:\n trans_obj.update(status=status)", "def test_get_withdrawals(self):\n pass", "async def fetch_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n request = {\n 'token_side': 'WITHDRAW',\n }\n return await self.fetch_deposits_withdrawals(code, since, limit, self.extend(request, params))", "def test_paypal_notify_url_for_withdrawal_with_refunded(self, mock_postback):\n mock_postback.return_value = b\"VERIFIED\"\n entry = baker.make(Entry, status='selected_confirmed', withdrawn=True)\n pptrans = create_entry_paypal_transaction(\n entry.user, entry, 'withdrawal'\n )\n pptrans.transaction_id = \"test_trans_id\"\n pptrans.save()\n\n self.assertFalse(PayPalIPN.objects.exists())\n params = dict(IPN_POST_PARAMS)\n params.update(\n {\n 'custom': b('withdrawal {}'.format(entry.id)),\n 'invoice': b(pptrans.invoice_id),\n 'payment_status': b'Refunded'\n }\n )\n self.paypal_post(params)\n entry.refresh_from_db()\n self.assertFalse(entry.withdrawal_fee_paid)\n self.assertTrue(entry.withdrawn) # still withdrawn\n\n self.assertEqual(len(mail.outbox), 1,)\n\n # emails sent to support\n self.assertEqual(mail.outbox[0].to, [settings.SUPPORT_EMAIL])", "def check_for_updated_balance(self, snowflake):\n transaction_list = rpc.listtransactions(snowflake, 100)\n for tx in transaction_list:\n if tx[\"category\"] != \"receive\":\n continue\n if tx.get('generated') is True:\n continue\n txid = tx[\"txid\"]\n amount = tx[\"amount\"]\n confirmations = tx[\"confirmations\"]\n address = tx[\"address\"]\n deposit_status = self.get_transaction_status_by_txid(txid)\n user = self.get_user_by_address(address)\n\n # This address isn't a part of any user's account\n if not user:\n continue\n\n snowflake_cur = user[\"snowflake_pk\"]\n\n if deposit_status == \"DOESNT_EXIST\" and confirmations >= MIN_CONFIRMATIONS_FOR_DEPOSIT:\n self.add_to_balance(snowflake_cur, amount)\n self.add_deposit(snowflake_cur, amount, txid, 'CONFIRMED')\n elif deposit_status == \"DOESNT_EXIST\" and confirmations < MIN_CONFIRMATIONS_FOR_DEPOSIT:\n self.add_deposit(snowflake_cur, amount,\n txid, 'UNCONFIRMED')\n self.add_to_balance_unconfirmed(snowflake_cur, amount)\n elif deposit_status == \"UNCONFIRMED\" and confirmations >= MIN_CONFIRMATIONS_FOR_DEPOSIT:\n self.add_to_balance(snowflake_cur, amount)\n self.remove_from_balance_unconfirmed(snowflake_cur, amount)\n self.confirm_deposit(txid)", "def confirm(self):\n\n self.confirmed = True\n\n # open bookings are marked as denied during completion\n # and the booking costs are copied over permanently (so they can't\n # change anymore)\n b = object_session(self).query(Booking)\n b = b.filter(Booking.period_id == self.id)\n b = b.options(joinedload(Booking.occasion))\n b = b.options(\n defer(Booking.group_code),\n defer(Booking.attendee_id),\n defer(Booking.priority),\n defer(Booking.username),\n )\n\n for booking in b:\n if booking.state == 'open':\n booking.state = 'denied'\n\n booking.cost = booking.occasion.total_cost", "def suspend_subscriptions():\n ikwen_service = get_service_instance()\n now = datetime.now()\n connection = mail.get_connection()\n try:\n connection.open()\n except:\n logger.error(u\"Connexion error\", exc_info=True)\n\n for invoicing_config in InvoicingConfig.objects.all():\n service = invoicing_config.service\n if service.status != Service.ACTIVE:\n continue\n config = service.basic_config\n db = service.database\n add_database(db)\n deadline = now - timedelta(days=invoicing_config.tolerance)\n invoice_qs = Invoice.objects.using(db).select_related('subscription')\\\n .filter(due_date__lte=deadline, status=Invoice.OVERDUE)\n count, total_amount = 0, 0\n for invoice in invoice_qs:\n due_date = invoice.due_date\n due_datetime = datetime(due_date.year, due_date.month, due_date.day)\n diff = now - due_datetime\n subscription = invoice.subscription\n tolerance = subscription.tolerance\n if diff.days < tolerance:\n continue\n invoice.status = Invoice.EXCEEDED\n invoice.save()\n count += 1\n total_amount += invoice.amount\n subscription.status = Subscription.SUSPENDED\n subscription.save()\n member = subscription.member\n add_event(service, SERVICE_SUSPENDED_EVENT, member=member, object_id=invoice.id)\n subject, message, sms_text = get_service_suspension_message(invoice)\n balance, update = Balance.objects.using(WALLETS_DB_ALIAS).get_or_create(service_id=service.id)\n if member.email:\n if 0 < balance.mail_count < LOW_MAIL_LIMIT:\n notify_for_low_messaging_credit(service, balance)\n if balance.mail_count <= 0 and not getattr(settings, 'UNIT_TESTING', False):\n notify_for_empty_messaging_credit(service, balance)\n else:\n invoice_url = service.url + reverse('billing:invoice_detail', args=(invoice.id,))\n html_content = get_mail_content(subject, message, service=service, template_name='billing/mails/notice.html',\n extra_context={'member_name': member.first_name, 'invoice': invoice,\n 'invoice_url': invoice_url, 'cta': _(\"Pay now\"),\n 'currency': config.currency_symbol})\n # Sender is simulated as being no-reply@company_name_slug.com to avoid the mail\n # to be delivered to Spams because of origin check.\n sender = '%s <no-reply@%s>' % (config.company_name, service.domain)\n msg = XEmailMessage(subject, html_content, sender, [member.email])\n msg.service = service\n msg.content_subtype = \"html\"\n try:\n with transaction.atomic(using=WALLETS_DB_ALIAS):\n if msg.send():\n balance.mail_count -= 1\n balance.save()\n else:\n logger.error(u\"Notice of suspension for Invoice #%s not sent to %s\" % (invoice.number, member.email), exc_info=True)\n except:\n print \"Sending mail to %s failed\" % member.email\n logger.error(u\"Connexion error on Invoice #%s to %s\" % (invoice.number, member.email), exc_info=True)\n\n if sms_text and member.phone:\n if 0 < balance.sms_count < LOW_SMS_LIMIT:\n notify_for_low_messaging_credit(service, balance)\n if balance.sms_count <= 0 and not getattr(settings, 'UNIT_TESTING', False):\n notify_for_empty_messaging_credit(service, balance)\n continue\n try:\n with transaction.atomic(using=WALLETS_DB_ALIAS):\n balance.sms_count -= 1\n balance.save()\n phone = member.phone if len(member.phone) > 9 else '237' + member.phone\n send_sms(phone, sms_text, fail_silently=False)\n except:\n logger.error(\n u\"SMS overdue notice for invoice #%s not sent to %s\" % (invoice.number, member.phone),\n exc_info=True)\n\n if count > 0:\n report = SendingReport.objects.using(db).create(count=count, total_amount=total_amount)\n sudo_group = Group.objects.using(db).get(name=SUDO)\n add_event(ikwen_service, SUSPENSION_NOTICES_SENT_EVENT, group_id=sudo_group.id, object_id=report.id)\n\n try:\n connection.close()\n except:\n pass", "def reset_all_budget_renewal_needs():\n budgets = Budget.objects.filter(is_monthly=True)\n\n for budget in budgets:\n if settings.DEBUG:\n reset_google_ads_campaign(budget.id)\n else:\n reset_google_ads_campaign.delay(budget.id)\n\n return 'reset_all_budget_renewal_needs'", "def do_send_to_client_paypal(self, transaction: WalletTransactionsModel) -> Future:\n # TODO use paypal SDK to send transactions to paypal here\n # TODO then update transaction to reflect that transaction was sent\n # NOTE: Could also listen to an _ipn to find out if transaction succeeded on paypal side\n wallet_instance: WalletModel = WalletModel.query(\n WalletModel.organization_id == transaction.organization_id, WalletModel.uid == transaction.uid).get_async().get_result()\n\n if wallet_instance.is_verified:\n paypal_address = wallet_instance.paypal_address\n amount_to_send: AmountMixin = transaction.amount\n # TODO send amount to paypal using paypal address from wallet and amount from transactions\n transaction.is_settled = True\n tran_key: Optional[ndb.Key] = transaction.put_async(retries=self._max_retries,\n timeout=self._max_timeout).get_result()\n yield bool(tran_key)\n yield False", "def update_campaigns_in_budgets(self):\n budgets = Budget.objects.all()\n\n for budget in budgets:\n if settings.DEBUG:\n update_budget_campaigns(budget.id)\n else:\n update_budget_campaigns.delay(budget.id)\n\n return 'update_campaigns_in_budgets'", "async def fetch_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n return await self.fetch_deposits_withdrawals(code, since, limit, self.extend({'type': 'withdraw'}, params))", "def calculate_payoff_times(self):\n with self.database.transaction():\n current_id = 0\n for Bo in constants.initial_balance_range():\n for r in constants.interest_rate_range():\n for p in constants.monthly_payment_range():\n print(\"Calculating for initial balance {0}, rate {1}, monthly payment {2}\".format(Bo, r, p))\n t = time_until_zero_balance(r, Bo, p)\n if t is not None:\n database.create_point(current_id, Bo, r, p, t)\n current_id += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
do_send_to_client_wallet send the deposit to client wallet
def do_send_to_client_wallet(self, transaction: WalletTransactionsModel) -> Future: # requesting the user wallet wallet_instance: WalletModel = WalletModel.query( WalletModel.organization_id == transaction.organization_id, WalletModel.uid == transaction.uid).get_async().get_result() is_currency_valid: bool = wallet_instance.available_funds.currency == transaction.amount.currency if isinstance(wallet_instance, WalletModel) and is_currency_valid: wallet_instance.available_funds.amount_cents += transaction.amount.amount_cents key: Optional[ndb.Key] = wallet_instance.put_async( retries=self._max_retries, timeout=self._max_timeout).get_result() if bool(key): transaction.is_settled = True tran_key: Optional[ndb.Key] = transaction.put_async(retries=self._max_retries, timeout=self._max_timeout).get_result() yield bool(tran_key) yield False
[ "def deposit(self, amount):\n self.wallet += amount", "async def deposit(self, ctx, amount):\n data = await BonfideCoin(self.bot).get(ctx.guild.id, ctx.author.id)\n if data is None:\n await self.add_to_db(ctx.guild.id, ctx.author.id)\n\n data = await BonfideCoin(self.bot).get(ctx.guild.id, ctx.author.id)\n\n # return error if balance is 0\n if data.get(\"wallet\") == 0:\n return await ctx.send(\"You don't have sufficient balance to deposit.\")\n\n query = \"\"\"UPDATE bonafidecoin SET bank = $3 + bank, wallet = wallet - $3 WHERE guild_id = $1 AND user_id\n = $2\"\"\"\n if amount.lower() == \"all\":\n await self.bot.db.execute(\n query, ctx.guild.id, ctx.author.id, data.get(\"wallet\")\n )\n return await ctx.send(\n f\"Successfully deposited <:coin:853891390537465858> **{data.get('wallet')}**.\"\n )\n\n else:\n try:\n # check if money being deposited is equal to or less than wallet\n if 0 < data.get(\"wallet\") >= int(amount):\n await self.bot.db.execute(\n query, ctx.guild.id, ctx.author.id, int(amount)\n )\n return await ctx.send(\n f\"Successfully deposited <:coin:853891390537465858> **{amount}**.\"\n )\n return await ctx.send(\"You don't have sufficient balance to deposit.\")\n except ValueError:\n return await ctx.send(\"Enter a valid amount.\")", "def transfer_wallet(self, currency, amount, wallet_from, wallet_to):\n body = {\n 'currency': currency,\n 'amount': str(amount),\n 'walletfrom': wallet_from,\n 'walletto': wallet_to,\n }\n return self.auth_req('v1/transfer', body)", "async def transfer(self, ctx, amount : int, user : discord.Member):\n try:\n if await self.usercheck('levels', ctx.message.author) is False:\n await self._create_user(ctx.message.author.id)\n except:\n pass\n if amount < 10:\n await ctx.send(\"Minimum send price is $10\")\n return\n if user.bot:\n await ctx.send(\"You can't send credits to bots.\")\n return\n elif user == ctx.message.author:\n await ctx.send(\"You cant send credits to yourself.\")\n return\n else:\n if await self.usercheck('economy', ctx.message.author) is False:\n await ctx.send(\"You don't have a bank account...\")\n return\n elif await self.usercheck('economy', user) is False:\n await ctx.send(f\"{user.name} has no bank account...\")\n return\n else:\n x = await self.execute(f\"SELECT balance FROM economy WHERE userid = {ctx.message.author.id}\", isSelect=True)\n author_balance = int(x[0])\n x = await self.execute(f\"SELECT balance FROM economy WHERE userid = {user.id}\", isSelect=True)\n user_balance = int(x[0])\n if (author_balance - amount) < 0:\n await ctx.send(\"You don't have that much to spend.\")\n return\n else:\n await self.execute(f\"UPDATE economy SET balance = {amount + user_balance} WHERE userid = {user.id}\", commit=True)\n await self.execute(f\"UPDATE economy SET balance = {author_balance - amount} WHERE userid = {ctx.message.author.id}\", commit=True)\n await ctx.send(f\"Send `{amount}` to {user.mention}!\")\n try:\n await user.send(f\"{ctx.message.author.name} has sent you ${amount}.\")\n except:\n pass", "def deposit(self, commitment_service_address, deposit_amount):", "def do_send_to_client_paypal(self, transaction: WalletTransactionsModel) -> Future:\n # TODO use paypal SDK to send transactions to paypal here\n # TODO then update transaction to reflect that transaction was sent\n # NOTE: Could also listen to an _ipn to find out if transaction succeeded on paypal side\n wallet_instance: WalletModel = WalletModel.query(\n WalletModel.organization_id == transaction.organization_id, WalletModel.uid == transaction.uid).get_async().get_result()\n\n if wallet_instance.is_verified:\n paypal_address = wallet_instance.paypal_address\n amount_to_send: AmountMixin = transaction.amount\n # TODO send amount to paypal using paypal address from wallet and amount from transactions\n transaction.is_settled = True\n tran_key: Optional[ndb.Key] = transaction.put_async(retries=self._max_retries,\n timeout=self._max_timeout).get_result()\n yield bool(tran_key)\n yield False", "def spv_main_send_transaction(spv):\n balance = spv.request_balance()\n if balance > 10:\n peer_index = random.randint(0, len(spv.peers) - 1)\n chosen_peer = spv.peers[peer_index]\n created_tx = spv.create_transaction(chosen_peer[\"pubkey\"], 10)\n tx_json = created_tx.to_json()\n tx_hash = algo.hash1(tx_json)\n print(f\"SPV {spv.name} sent {tx_hash} to {chosen_peer['pubkey']}\")", "async def deposit(ctx, user: discord.User=None):\n err_embed = discord.Embed(title=\":x:Error:x:\", colour=discord.Colour(0xf44242))\n good_embed = discord.Embed(title=\"Your Tipjar Info\")\n exists = session.query(Wallet).filter(Wallet.userid == ctx.message.author.id).first()\n tipjar_addr = rpc.getAddresses()['addresses'][0]\n if exists:\n pid = gen_paymentid(exists.address)\n good_embed.description = \"Deposit {} to start tipping! ```transfer 3 {} <amount> -p {}```\".format(config['symbol'], tipjar_addr, pid)\n balance = session.query(TipJar).filter(TipJar.paymentid == pid).first()\n if not balance:\n t = TipJar(pid, ctx.message.author.id, 0)\n session.add(t)\n session.commit()\n await client.send_message(ctx.message.author, embed = good_embed)\n else:\n err_embed.description = \"You haven't registered a wallet!\"\n err_embed.add_field(name=\"Help\", value=\"Use `{}registerwallet <addr>` before trying to tip!\".format(config['prefix']))\n await client.say(embed=err_embed)", "def transfer(self, amount, account):\n \n if ((self.getBalance()) >= amount):\n self.withdraw(amount, \"Transfer to \" + account.name)\n account.deposit(amount, \"Transfer from \" + self.name)\n\n print(\"Transferred $\" + \"{:.2f}\".format(amount) + \" from \" + self.name + \" to \" + account.name)\n\n else:\n print(\"Insufficient Balance\")", "def sendtx(cmd):\n txData = cmd.split(\"sendtx \")[-1]\n if \"{\" in txData:\n txData = json.loads(txData)\n print(\"Sending transaction...\")\n coin.addTx(txData)", "def send_money(self):\n pass", "def test_retrieve_wallet(db, client):\n _path = apiutils.create_wallet_path()\n response = apiutils.post(\n db, client, _path,\n {'name': 'wallet with balance', 'balance': '100.00'}\n )\n assert response.status_code == 201\n w_path = apiutils.get_wallet_path(wallet_pk=1)\n response = apiutils.get(db, client, w_path)\n assert response.status_code == 200\n assert data_test_wallet.validate_wallet(response.json())\n assert response.json()['balance'] == '0.00'", "async def donate(self, ctx, amount: CoinConverter):\n await self.transfer(ctx.author.id, ctx.guild.id, amount)\n await ctx.send(f'\\N{MONEY WITH WINGS} `{ctx.author!s}` > '\n f'`{amount}JC` > `{ctx.guild!s}` \\N{MONEY BAG}')", "async def registerwallet(ctx, address):\n\n address = address.strip()\n err_embed = discord.Embed(title=\":x:Error:x:\", colour=discord.Colour(0xf44242))\n good_embed = discord.Embed(title=\"{}'s Wallet\".format(ctx.message.author.name),colour=discord.Colour(0xD4AF37))\n if address is None:\n err_embed.description = \"Please provide an address\"\n await client.send_message(ctx.message.author, embed = err_embed)\n return\n\n exists = session.query(Wallet).filter(Wallet.userid == ctx.message.author.id).first()\n addr_exists = session.query(Wallet).filter(Wallet.address == address).first()\n if exists:\n good_embed.title = \"Your wallet exists!\".format(exists.address)\n good_embed.description = \"```{}``` use `{}updatewallet <addr>` to change\".format(exists.address, config['prefix'])\n await client.send_message(ctx.message.author, embed = good_embed)\n return\n if addr_exists:\n err_embed.description = \"Address already registered by another user!\"\n await client.send_message(ctx.message.author, embed = err_embed)\n return\n\n elif not exists and len(address) == 99:\n w = Wallet(address, ctx.message.author.id,ctx.message.id)\n session.add(w)\n session.commit()\n good_embed.title = \"Successfully registered your wallet\"\n good_embed.description = \"```{}```\".format(address)\n await client.send_message(ctx.message.author, embed = good_embed)\n\n pid = gen_paymentid(address)\n balance = session.query(TipJar).filter(TipJar.paymentid == pid).first()\n if not balance:\n t = TipJar(pid, ctx.message.author.id, 0)\n session.add(t)\n else:\n balance.paymentid = pid\n session.commit()\n tipjar_addr = rpc.getAddresses()['addresses'][0]\n good_embed.title = \"Your Tipjar Info\"\n good_embed.description = \"Deposit {} to start tipping! ```transfer 3 {} <amount> -p {}```\".format(config['symbol'], tipjar_addr, pid)\n balance = session.query(TipJar).filter(TipJar.paymentid == pid).first()\n await client.send_message(ctx.message.author, embed = good_embed)\n return\n elif len(address) > 99:\n err_embed.description = \"Your wallet must be 99 characeters long, your entry was too long\"\n elif len(address) < 99:\n err_embed.description = \"Your wallet must be 99 characeters long, your entry was too short\"\n await client.say(embed = err_embed)", "async def withdraw(self, **params):\r\n return await self.client_helper(\"withdraw\", **params)", "def transfer(tipper, tippee, amount):\n typecheck(tipper, unicode, tippee, unicode, amount, decimal.Decimal)\n with db.get_connection() as conn:\n cursor = conn.cursor()\n\n # Decrement the tipper's balance.\n # ===============================\n\n DECREMENT = \"\"\"\\\n\n UPDATE participants\n SET balance=(balance - %s)\n WHERE id=%s\n AND pending IS NOT NULL\n RETURNING balance\n\n \"\"\"\n cursor.execute(DECREMENT, (amount, tipper))\n rec = cursor.fetchone()\n assert rec is not None, (tipper, tippee, amount) # sanity check\n if rec['balance'] < 0:\n\n # User is out of money. Bail. The transaction will be rolled back \n # by our context manager.\n\n return False\n\n\n # Increment the tippee's *pending* balance.\n # =========================================\n # The pending balance will clear to the balance proper when Payday is \n # done.\n\n INCREMENT = \"\"\"\\\n\n UPDATE participants\n SET pending=(pending + %s)\n WHERE id=%s\n AND pending IS NOT NULL\n RETURNING pending\n\n \"\"\"\n cursor.execute(INCREMENT, (amount, tippee))\n rec = cursor.fetchone()\n assert rec is not None, (tipper, tippee, amount) # sanity check\n\n\n # Record the transfer.\n # ====================\n\n RECORD = \"\"\"\\\n\n INSERT INTO transfers\n (tipper, tippee, amount)\n VALUES (%s, %s, %s)\n\n \"\"\"\n cursor.execute(RECORD, (tipper, tippee, amount))\n\n\n # Record some stats.\n # ==================\n\n STATS = \"\"\"\\\n\n UPDATE paydays \n SET ntransfers = ntransfers + 1\n , transfer_volume = transfer_volume + %s\n WHERE ts_end='1970-01-01T00:00:00+00'::timestamptz\n RETURNING id\n\n \"\"\"\n cursor.execute(STATS, (amount,))\n assert_one_payday(cursor.fetchone())\n\n\n # Success.\n # ========\n \n conn.commit()\n return True", "async def withdraw(self, ctx, amount: int):\n data = await BonfideCoin(self.bot).get(ctx.guild.id, ctx.author.id)\n if data is None:\n await self.add_to_db(ctx.guild.id, ctx.author.id)\n\n data = await BonfideCoin(self.bot).get(ctx.guild.id, ctx.author.id)\n if amount <= data.get(\"bank\"):\n query = \"\"\"UPDATE bonafidecoin SET bank = bank - $3, wallet = wallet + $3 WHERE guild_id = $1 AND user_id\n = $2 \"\"\"\n await self.bot.db.execute(query, ctx.guild.id, ctx.author.id, amount)\n return await ctx.send(\n f\"Successfully withdrawn <:coin:853891390537465858> **{amount}**.\"\n )\n\n return await ctx.send(\"You don't have sufficient balance to withdraw.\")", "def send_txn(self, addr, txn):\n proxy = Proxy(addr)\n d = proxy.callRemote('puttxn', txn, False)\n d.addCallbacks(self.transaction_sent,\n self.transaction_send_error)", "def test_update_wallet_with_balance(db, drf_client):\n w_pk = populate_db_wallet.add_wallet('Wallet')\n _path = apiutils.put_patch_wallet_path(w_pk)\n response = apiutils.patch(\n db, drf_client, _path,\n {'name': 'wallet with balance 2', 'balance': '100.00'}\n )\n assert response.status_code == 200\n w_path = apiutils.get_wallet_path(wallet_pk=w_pk)\n response = apiutils.get(db, drf_client, w_path)\n assert response.status_code == 200\n assert data_test_wallet.validate_wallet(response.json())\n assert response.json()['balance'] == '0.00'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add_approved_deposits_to_wallet fetches all processed deposits which are not yet settled and then adding them to the client wallet
def add_approved_deposits_to_wallet(self) -> Optional[List[Future]]: try: wallet_transactions: List[WalletTransactionsModel] = WalletTransactionsModel.query( WalletTransactionsModel.is_verified == True, WalletTransactionsModel.is_settled == False).fetch_async().get_result() print("approved deposits running") return [self.do_send_to_client_wallet(transaction=transaction) for transaction in wallet_transactions if transaction.transaction_type == 'deposit'] except RetryError as e: # TODO log this errors return None
[ "async def fetch_deposits(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n request = {\n 'token_side': 'DEPOSIT',\n }\n return await self.fetch_deposits_withdrawals(code, since, limit, self.extend(request, params))", "def send_approved_withdrawals_to_paypal_wallets(self) -> Optional[List[Future]]:\n try:\n wallet_transactions: List[WalletTransactionsModel] = WalletTransactionsModel.query(\n WalletTransactionsModel.is_verified == True, WalletTransactionsModel.is_settled == False).fetch_async().get_result()\n print('approved withdrawals running')\n\n return [self.do_send_to_client_paypal(transaction=transaction) for transaction in wallet_transactions\n if transaction.transaction_type == 'withdrawal']\n except RetryError as e:\n # TODO Log this error\n return None", "def deposit(self, commitment_service_address, deposit_amount):", "def getUnconfirmedDeposits(self):\n pass", "def test_reject_approved_invoiced_entries(self):\r\n self.login_user(self.superuser)\r\n self.create_entries(timezone.now(), Entry.APPROVED)\r\n self.create_entries(timezone.now(), Entry.INVOICED)\r\n\r\n response = self.client.post(self.url, data=self.data)\r\n\r\n entries = Entry.no_join.filter(status=Entry.UNVERIFIED)\r\n self.assertEquals(entries.count(), 0)", "def check_for_updated_balance(self, snowflake):\n transaction_list = rpc.listtransactions(snowflake, 100)\n for tx in transaction_list:\n if tx[\"category\"] != \"receive\":\n continue\n if tx.get('generated') is True:\n continue\n txid = tx[\"txid\"]\n amount = tx[\"amount\"]\n confirmations = tx[\"confirmations\"]\n address = tx[\"address\"]\n deposit_status = self.get_transaction_status_by_txid(txid)\n user = self.get_user_by_address(address)\n\n # This address isn't a part of any user's account\n if not user:\n continue\n\n snowflake_cur = user[\"snowflake_pk\"]\n\n if deposit_status == \"DOESNT_EXIST\" and confirmations >= MIN_CONFIRMATIONS_FOR_DEPOSIT:\n self.add_to_balance(snowflake_cur, amount)\n self.add_deposit(snowflake_cur, amount, txid, 'CONFIRMED')\n elif deposit_status == \"DOESNT_EXIST\" and confirmations < MIN_CONFIRMATIONS_FOR_DEPOSIT:\n self.add_deposit(snowflake_cur, amount,\n txid, 'UNCONFIRMED')\n self.add_to_balance_unconfirmed(snowflake_cur, amount)\n elif deposit_status == \"UNCONFIRMED\" and confirmations >= MIN_CONFIRMATIONS_FOR_DEPOSIT:\n self.add_to_balance(snowflake_cur, amount)\n self.remove_from_balance_unconfirmed(snowflake_cur, amount)\n self.confirm_deposit(txid)", "async def fetch_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n request = {\n 'token_side': 'WITHDRAW',\n }\n return await self.fetch_deposits_withdrawals(code, since, limit, self.extend(request, params))", "def _add_payments(trades, date, propFactor=1):\n\n # Dictionaries to cache the order value per order number in the case of done away trades\n # and per buy and sell in the case of done with trades.\n orderValueDict = {}\n buyValueDict = {}\n sellValueDict = {}\n\n for trade in trades:\n if not(PS_TradeFees.isTakeonTrade(trade)):\n # If stock trade is not part of the allocation process then it needs to have strate fees added to it.\n if trade.Instrument().InsType() in ['Stock', 'ETF']: # and trade.Text1() not in ['Allocation Process']:\n # Calculate strate fees based on the value of the order for Trade Report (done away) trades\n tradeType = trade.EquityTradeType()\n if tradeType == 'Trade Report':\n orderNumber = trade.AdditionalInfo().XtpOrderRef()\n if orderNumber:\n if orderNumber in orderValueDict:\n orderValue = orderValueDict[orderNumber]\n else:\n orderValue = _CalculateOrderValue(trade, date, orderNumber)\n orderValueDict[orderNumber] = orderValue\n else:\n orderValue = abs(trade.Premium())\n # Calculate strate fees based on the value of all buys/sells per instrument\n elif tradeType == 'DMA':\n instrumentName = trade.Instrument().Name()\n if trade.Quantity() < 0:\n if instrumentName in sellValueDict:\n orderValue = sellValueDict[instrumentName]\n else:\n orderValue = _CalculateBuySellValue(trade, date, 'Sell')\n sellValueDict[instrumentName] = orderValue\n else:\n if instrumentName in buyValueDict:\n orderValue = buyValueDict[instrumentName]\n else:\n orderValue = _CalculateBuySellValue(trade, date, 'Buy')\n buyValueDict[instrumentName] = orderValue\n else:\n orderValue = abs(trade.Premium())\n\n PS_TradeFees.add_trade_fees(trade, propFactor, orderValue)\n else:\n PS_TradeFees.add_trade_fees(trade, propFactor)", "def request_spend():\n params = request.get_json()\n\n txid = params[\"vault_txid\"]\n self.spend_requests[txid] = params[\"addresses\"]\n self.spend_acceptance[txid] = [None, None, None, None]\n\n return jsonify({\"success\": True}), 201", "def save_pending_deletion_requests(pending_deletion_requests):\n user_ids = [request.user_id for request in pending_deletion_requests]\n pending_deletion_request_models = (\n user_models.PendingDeletionRequestModel.get_multi(\n user_ids, include_deleted=True)\n )\n final_pending_deletion_request_models = []\n for deletion_request_model, deletion_request in python_utils.ZIP(\n pending_deletion_request_models, pending_deletion_requests):\n deletion_request.validate()\n deletion_request_dict = {\n 'email': deletion_request.email,\n 'role': deletion_request.role,\n 'deletion_complete': deletion_request.deletion_complete,\n 'exploration_ids': deletion_request.exploration_ids,\n 'collection_ids': deletion_request.collection_ids,\n 'pseudonymizable_entity_mappings': (\n deletion_request.pseudonymizable_entity_mappings)\n }\n if deletion_request_model is not None:\n deletion_request_model.populate(**deletion_request_dict)\n else:\n deletion_request_dict['id'] = deletion_request.user_id\n deletion_request_model = user_models.PendingDeletionRequestModel(\n **deletion_request_dict\n )\n final_pending_deletion_request_models.append(deletion_request_model)\n\n user_models.PendingDeletionRequestModel.put_multi(\n final_pending_deletion_request_models)", "def test_deposit_updates_balance_immediately(raiden_chain, token_addresses):\n app0, app1 = raiden_chain\n registry_address = app0.raiden.default_registry.address\n token_address = token_addresses[0]\n token_network_identifier = views.get_token_network_identifier_by_token_address(\n views.state_from_app(app0),\n app0.raiden.default_registry.address,\n token_address,\n )\n\n api0 = RaidenAPI(app0.raiden)\n\n old_state = get_channelstate(app0, app1, token_network_identifier)\n api0.set_total_channel_deposit(registry_address, token_address, app1.raiden.address, 210)\n new_state = get_channelstate(app0, app1, token_network_identifier)\n\n assert new_state.our_state.contract_balance == old_state.our_state.contract_balance + 10", "def test_deleting_draft_requests(self):\n\t\tself.review_request.target_people.add(self.user)\n\t\tself.review_request.target_groups.add(self.group)\n\t\tself._check_counters(total_outgoing=1, pending_outgoing=1)\n\t\tself.review_request.delete()\n\t\tself._check_counters()", "def ticket_aged_proposals(self):\n \"\"\"\n if not self.save:\n return\n\n qset = IXFMemberData.objects.filter(\n deskpro_id__isnull=True, requirement_of__isnull=True\n )\n\n # get ticket days period\n ticket_days = EnvironmentSetting.get_setting_value(\n \"IXF_IMPORTER_DAYS_UNTIL_TICKET\"\n )\n\n if ticket_days > 0:\n\n # we adjust the query to only get proposals\n # that are older than the specified period\n\n now = datetime.datetime.now(datetime.timezone.utc)\n max_age = now - datetime.timedelta(days=ticket_days)\n qset = qset.filter(created__lte=max_age)\n\n for ixf_member_data in qset:\n\n action = ixf_member_data.action\n if action == \"delete\":\n action = \"remove\"\n elif action == \"noop\":\n continue\n\n typ = action\n\n # create the ticket\n # and also notify the net and ix with\n # a reference to the ticket in the subject\n\n self.ticket_proposal(\n ixf_member_data, typ, True, True, True, {}, ixf_member_data.action\n )\n \"\"\"\n return", "def test_get_deposits(self):\n pass", "def test_deposit_updates_balance_immediately(raiden_chain, token_addresses):\n app0, app1 = raiden_chain\n registry_address = app0.raiden.default_registry.address\n token_address = token_addresses[0]\n token_network_address = views.get_token_network_address_by_token_address(\n views.state_from_app(app0), app0.raiden.default_registry.address, token_address\n )\n\n api0 = RaidenAPI(app0.raiden)\n\n old_state = get_channelstate(app0, app1, token_network_address)\n api0.set_total_channel_deposit(registry_address, token_address, app1.raiden.address, 210)\n new_state = get_channelstate(app0, app1, token_network_address)\n\n assert new_state.our_state.contract_balance == old_state.our_state.contract_balance + 10", "def get_net_deposits():\n try:\n currency = conf.base if conf.base != 'BTC' else 'XBt'\n if conf.exchange == 'bitmex':\n result = exchange.private_get_user_wallet({'currency': currency})\n return (result['deposited'] - result['withdrawn']) * conf.satoshi_factor\n if conf.exchange == 'kraken':\n net_deposits = 0\n deposits = exchange.fetch_deposits(conf.base)\n for deposit in deposits:\n net_deposits += deposit['amount']\n ledgers = exchange.private_post_ledgers({'asset': currency, 'type': 'withdrawal'})['result']['ledger']\n for withdrawal_id in ledgers:\n net_deposits += float(ledgers[withdrawal_id]['amount'])\n return net_deposits\n log.error(\"get_net_deposit() not yet implemented for %s\", conf.exchange)\n return None\n\n except (ccxt.ExchangeError, ccxt.AuthenticationError, ccxt.ExchangeNotAvailable, ccxt.RequestTimeout) as error:\n log.error('Got an error %s %s, retrying in about 5 seconds...', type(error).__name__, str(error.args))\n sleep_for(4, 6)\n return get_net_deposits()", "async def fetch_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n return await self.fetch_deposits_withdrawals(code, since, limit, self.extend({'type': 'withdraw'}, params))", "def lockout_budgets(self, selected_budget, budget_list):\n pass", "def distribute_deprecated(self, radius = 5):\n\t\t\n\t\tself.is_processing = True\n\t\tself.save()\n\t\tenough_points = True \n\t\t\n\t\tt = TxtTemplates()\n\n\t\t#print \"Sending out offers\"\n\n\t\t# 70 percent of old customers, 30 percent of new\n\t\tmax_offers = self.max_offers\n\t\texisting_num = int(round(0.7*max_offers))\n\n\t\tmerchant = self.merchant\n\t\t\n\t\tnearby_customers = self.merchant.get_customers_within_miles(radius)\n\t\tfans = merchant.fans.filter(pk__in=nearby_customers, verified_phone=0).exclude(active=False).exclude(verified=False).order_by('?').values_list('pk', flat=True)\n\t\tantifans = merchant.antifans.all().values_list('pk', flat=True)\n\t\t# TODO: geographically filter\n\t\t\n\t\tnonfans = Customer.objects.filter(pk__in=nearby_customers, verified_phone=0).exclude(active=False).exclude(verified=False).exclude(pk__in=fans).exclude(pk__in=antifans).values_list('pk',flat=True)\n\t\t#nonfans = Customer.objects.exclude(active=False).exclude(verified=False).exclude(pk__in=fans).exclude(pk__in=antifans).filter(zipcode=merchant.zipcode).values_list('pk', flat=True)\n\n\t\tprint \"Num fans:\",fans.count()\n\t\tprint \"Num nonfans:\",nonfans.count()\n\t\tfan_target = set(list(fans))\n\t\tnonfan_target = set(list(nonfans))\t\n\t\ttarget = fan_target | nonfan_target\n\t\tif len(target) > max_offers:\n\t\t\ttarget_list = random.sample(target, max_offers)\n\t\telse:\n\t\t\ttarget_list = list(target)\n\n\t\tfrom worldbank.models import Transaction\n\n\t\tallowed_number =int( self.merchant.balance/abs(Transaction.points_table[\"MOD\"]))\n\t\t#print \"balance=\" ,self.merchant.balance\n\t\t#print \"allowed_number\", allowed_number\n\t\tif allowed_number == 0:\n\t\t\t# check if there's enough balance\n\t\t\tenough_points = False\n\n\t\tif len(target_list) > allowed_number:\n\t\t\ttarget_list = random.sample(target_list, allowed_number)\n\t\tsentto = self.gen_offer_codes(Customer.objects.filter(pk__in=target_list))\t\n\t\t#print \"count=\" , self.offercode_set.all().count()\n\t\tfor o in self.offercode_set.all():\n\t\t\toffer_msg = t.render(TxtTemplates.templates[\"CUSTOMER\"][\"OFFER_RECEIVED\"],{ \"merchant\":self.merchant.business_name, \"title\":self.title, \"code\":o.code })\t\t\n\t\t\tsms_notify(o.customer.phone, offer_msg, SMS_DEBUG)\n\t\t\ttransaction = Transaction.objects.create(time_stamp=datetime.now(),\n\t\t\t\t\t\t\toffer = self,\n\t\t\t\t\t\t\toffercode = o,\n\t\t\t\t\t\t\tdst = self.merchant,\n\t\t\t\t\t\t\tttype = \"MOD\")\n\t\t\ttransaction.execute()\n\n\t\tself.num_init_sentto =sentto\n\t\tself.is_processing = False\n\t\tself.expired_time = self.starting_time + timedelta(minutes=self.duration)\n\t\tself.save()\n\n\t\n\n\t\t\n\t\tif enough_points: \n\t\t\t# number of people sent to, it can be 0 \n\t\t\treturn self.num_init_sentto\n\t\telse:\n\t\t\t# not enough points to send to\n\t\t\treturn -2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a random meme.
def meme_rand(): img = None quote = None img = random.choice(imgs) quote = random.choice(quotes) path = meme.make_meme(img, quote.body, quote.author) return render_template('meme.html', path=path)
[ "def meme_rand():\n\n img = random.choice(imgs)\n quote = random.choice(quotes)\n print(f'This are the {quote} in the file')\n path = meme.make_meme(system_path, img, quote.body, quote.author)\n return render_template('meme.html', path=path)", "def random_gen(self):\n\t\ttypes = [\"Normal\", \"Robot\", \"Ninja\", \"Fire\", \"Water\", \"Dinosaur\", \"Earth\", \"Sound\", \"Wind\", \"Darkness\", \"Light\", \"Plasma\", \"Solar\", \"Lunar\", \"Meme\", \"Magic\"]\n\t\tself._name_gen()\n\t\tself.speed = random.randint(1, 6) # All ranges here are balanced using eyeballs and hopes. And wishes.\n\t\tself.attk_pw = random.randint(0, 5)\n\t\tself.attk_type = random.choice(['physical', 'emotional'])\n\t\tself.moveType = random.choice(types)\n\t\tif self.attk_type == 'emotional':\n\t\t\tself.fp = random.randint(1, 5)", "def random():\n return Note(random.randrange(12))", "def randmess(self):\n if self.dorandmess:\n a = int(random.random()*1000)\n b = int(random.random()*1000)\n if a == b:\n self.msg(randmessage)", "def rand():\r\n global rand_seed\r\n rand_seed = (MULTIPLIER * rand_seed + INCREMENT)\r\n return (rand_seed >> 16) & 0x7FFF", "def d12():\n\treturn random.randint(1, 12)", "def random():\n return Scale(Note.random(), Mode.random())", "def random_number():\n return random.getrandbits(32)", "def get_random_number():\n\n return random.randint(0, 100000)", "def gen_random(self, field_name, random):\r\n ...", "def generateRandomWorkTime(self):\n assert self.workTime == 0\n self.workTime = self.randomGenerator.generate()\n printHandler(\"W\",self.name,\"worktime\",self.workTime)", "def random_data():\n return binascii.b2a_hex(os.urandom(31)).decode('utf-8')", "def generate_random_monster(self, y, x):\n return entities.CorporateZombie(y, x)", "async def memegen(self, ctx, name=\"\", *fields):\n if len(fields) == 0:\n return await ctx.send(\"Controleer je argumenten.\")\n\n # Get the meme info that corresponds to this name\n result: memes.Meme = memes.getMeme(name)\n\n # No meme found\n if result is None:\n return await ctx.send(\"Deze meme staat niet in de database.\")\n\n # Convert to list to support item assignment\n fields = list(fields)\n\n generated = generate(result, fields)\n\n # If the request was successful, remove the message calling it\n if generated[\"success\"]:\n await self.utilsCog.removeMessage(ctx.message)\n\n # Send the meme's url or the error message\n await ctx.send(generated[\"message\"])", "def sing_random(self):\n\n if random() <= self.luck:\n print(\"The bar appreciates your creativity! Fun +10\")\n person.fun(self, 10)\n elif random() >= (1 - self.luck):\n print(\"You lose self-confidence and run off stage. Fun -10\")\n person.fun(self, -10)\n else:\n print(\"You sang a random song. Fun +5\")\n person.fun(self, 5)", "def gen_date():\r\n return random.randint(DAY1, TODAY)", "def random():\n return PrivateKey(secrets.token_bytes(32))", "def random_texture(n=100):\n m = Microstructure(name='random_texture')\n for i in range(n):\n m.grains.append(Grain(i + 1, Orientation.random()))\n return m", "def generate(cls):\n account_id = random.randint(0, 10)\n amount = random.randint(0, 20000)\n auction_id = random.randint(0, 20)\n time_unit = random.randint(0, 100)\n return cls(account_id=account_id, amount=amount, auction_id=auction_id, time_unit=time_unit)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
rebuild a list of coordinates from the original collection of shapes and the points obtained from the square process
def get_shapes_from_new_points(self, original_shapes, new_points): unik_points = list(self.point_shapes) new_s = [] for l in original_shapes: #coords, is_poly = (l[0].coords, False) if l.geom_type == 'MultiLineString' else (l.exterior.coords, True) coords = get_coords(l) is_poly = True if self.geom_type == 'Polygon' else False size = len(coords) new_s.append(np.zeros((size, 2)) ) for idx_p, p in enumerate(unik_points): index_of_lines = self.point_shapes[p] # point_lines_idx[p] #print(index_of_lines) for idx_l in index_of_lines: r = self.get_rank_point_in_shape(idx_p, idx_l) #print(idx_l, r, new_points[idx_p]) new_s[idx_l][r] = np.array(new_points[idx_p]) if r == 0 and is_poly: new_s[idx_l][-1] = np.array(new_points[idx_p]) return new_s
[ "def generate_all_shape_coord(self):\r\n for rotation in range(self.max_rotations):\r\n self.shape_coord.append(list(self.generate_shape_coord(rotation)))", "def __returnCoordinatesReshaped(self, newShape):\n returnCoordinates = self.gridContainer['gridCoord']\n returnCoordinates.shape = newShape\n\n return returnCoordinates", "def test_processed_points_calculation(self):\n\n assert self.test_shape.processed_points == [\n (49.937460888595446, 2.5, \"circle\"),\n (43.300748759659555, 25.000903120744287, \"circle\"),\n (27.1320420790315, 41.99824154201773, \"straight\"),\n (77.154447582418, 128.6358861991937, \"circle\"),\n (129.90375269002172, 75.00010024693078, \"circle\"),\n (149.97916521970643, 2.5, \"straight\"),\n (49.937460888595446, 2.5, \"circle\"),\n ]", "def make(self):\n\n self._make_tree()\n self.mask = np.zeros(self.x.shape)\n\n # Get coordinates from shapefile for each polygon\n for shp in self.shapefile:\n poly = shape(shp['geometry'])\n\n if isinstance(poly, Polygon):\n xx, yy, coords = self._gridify(poly)\n self._update_mask(xx, yy, coords)\n elif isinstance(poly, MultiPolygon):\n # Loop over each piece of the polygon and add it to mask\n for part in poly:\n xx, yy, coords = self._gridify(part)\n self._update_mask(xx, yy, coords)\n else:\n raise TypeError('Unknown shape type {}'.format(type(poly).__name__))\n\n # Make sure double-counted points are handled\n self.mask[self.mask > 1] = 1", "def _GenPoints(self):\n if len(self) == 1:\n self._points = [self]\n self._pointsPositions = [self.GetPosition()]\n return self._points\n else:\n res = []\n children = self.GetChildren()\n # children.sort(lambda x,y:cmp(len(y),len(x)))\n children.sort(key=lambda x:len(x), reverse=True)\n for child in children:\n res += child.GetPoints()\n self._points=res\n self._pointsPositions = [x.GetPosition() for x in res]", "def bounding_box(self):\n# first_point and last_point contain UTM coordinates from self.shapes that\n# correspond to top left and bottom right squares in the geographic grid\n first_point = self.shapes[0].points[0]\n last_point = self.shapes[len(self.shapes)-1].points[0]\n\n# The 0th element in each coord pair describes longitude\n west_UTM = first_point[0]\n east_UTM = last_point[0]\n\n# The 1th element in each coord pair describes latitude\n north_UTM = first_point[1]\n south_UTM = last_point[1]\n\n return [(west_UTM, east_UTM, self.west_lon, self.east_lon), (south_UTM, north_UTM, self.south_lat, self.north_lat)]", "def returnGridAsArrayOfCoordinates(cls):", "def pix_coords(\n points:list,\n window:pygs.Window,\n pcsys:dict = pcsys\n ) -> list:\n return [pix_coord(point, window, pcsys) for point in points]", "def expand_around_existing_points(self, num_pts, reso):\n\n new_pts = []\n \n i = numpy.arange(-num_pts * reso, num_pts * reso + reso*0.01, reso)\n for xi in i:\n for yi in i:\n for zi in i:\n vec = numpy.array([xi, yi, zi])\n new_pts.append(self.points + vec)\n self.points = numpy.vstack(new_pts)\n \n self.__unique_points()", "def shape2coords(self, f):\n\t\tn = np.sqrt(self.mu/(self.a**3))\n\t\tE = f2E(f, self.e)\n\t\tx = self.a*(np.cos(E) - self.e)\n\t\ty = self.a*np.sqrt(1 - (self.e**2))*np.sin(E)\n\t\tvx = - self.a*n*np.sin(E)/(1 - self.e*np.cos(E))\n\t\tvy = self.a*n*np.cos(E)*np.sqrt(1 - (self.e**2))/(1 - self.e*np.cos(E))\n\t\tR = np.array([x,y,0])\n\t\tV = np.array([vx,vy,0])\n\t\treturn R, V", "def pick_points_on_shape(self):\r\n a = self.a \r\n N = 81 # number of vertices\r\n t = np.linspace(-4,4,N)\r\n verts = np.zeros((N,2))\r\n verts[:,0] = a*(np.abs(t))**3 - 1.0\r\n verts[:,1] = t\r\n return t, verts", "def create_points_for_rectangle(x, y, width, height):\r\n points = []\r\n points.append(x)\r\n points.append(y)\r\n points.append(x)\r\n points.append(y + height)\r\n points.append(x + width)\r\n points.append(y + height)\r\n points.append(x + width)\r\n points.append(y)\r\n points.append(x)\r\n points.append(y)\r\n \r\n return points", "def rect_to_squares(selectionX0, selectionY0, selectionX1, selectionY1, limitX, limitY, minSize):\n\n minX = min(selectionX0, selectionX1)\n maxX = max(selectionX0, selectionX1)\n minY = min(selectionY0, selectionY1)\n maxY = max(selectionY0, selectionY1)\n centroidX = (minX + maxX)/2\n centroidY = (minY + maxY)/2\n\n diffX = maxX - minX\n diffY = maxY - minY\n diffMin = min(diffX, diffY)\n diffMax = max(diffX, diffY)\n if (diffMin < 1): # must be at least 1 pixel in each dimension to avoid div by zero\n return []\n\n aspectRatio = diffX/diffY\n flip = False\n if (aspectRatio < 1): # vertical rectangle\n flip = True\n aspectRatio = 1./aspectRatio\n # print(\"Rect: \" + str((minX, minY, maxX, maxY, diffX, diffY, flip, aspectRatio)))\n\n # number of squares is simply the rounded aspect ratio with contraint of minimimum size\n numSquares = max(round(aspectRatio), 1)\n if (diffMax/numSquares < minSize):\n numSquares = max(math.floor(diffMax/minSize), 1)\n\n offset = diffMax/numSquares\n squareSize = max(diffMax/numSquares, minSize)\n squareSize = squareSize * 1.1 # give them 10% overlap\n squareCoords = []\n for i in range(numSquares):\n squareCentroidX = centroidX\n squareCentroidY = centroidY\n\n if (flip):\n squareCentroidY += offset*i - offset*(numSquares-1)/2\n else:\n squareCentroidX += offset*i - offset*(numSquares-1)/2\n\n sx0 = int(max(squareCentroidX - squareSize/2, 0))\n sy0 = int(max(squareCentroidY - squareSize/2, 0))\n sx1 = int(min(squareCentroidX + squareSize/2, limitX))\n sy1 = int(min(squareCentroidY + squareSize/2, limitY))\n # print(\"Square: \", (sx0, sy0, sx1, sy1))\n squareCoords.append((sx0, sy0, sx1, sy1))\n\n return squareCoords", "def get_coordinates(self):\n return (copy.deepcopy(self.coordinates))", "def compute_all_points(self):\n\n # allocate enough storage for all points\n self.points = np.zeros((self.n_points, self.dim))\n\n # initiate a counter for the number of already counted points\n num_included_points = 0\n\n # loop over all subspaces of the SG\n for i in range(self.nSubspaces):\n # traverse the SG in a top-down manner\n current_subspace = self.subspace_list[\n self.index_list4top_down_sparse_grid_traverse[i]\n ]\n\n # copy the points from the subspace into the array of the SG\n self.points[\n num_included_points : num_included_points\n + current_subspace.n_points,\n :,\n ] = current_subspace.points\n\n # increase the counter accordingly\n num_included_points += current_subspace.n_points", "def transform(self, points):\n scaled = [ ]\n for pt in points:\n scaled_pt = self.transform_pt(pt)\n scaled.append(scaled_pt)\n return scaled", "def smalllestRectangles(points):\n pass", "def cube2latlon_preprocess(x, y, xi, yi):", "def _extract_svg_coordinates_helper_function_(paths, number_of_samples=30):\n path_coordinates = []\n x_coord = []\n y_coord = []\n\n for idx in paths:\n for jdy in idx:\n for j in range(number_of_samples):\n path_coordinates.append(jdy.point(j / (number_of_samples - 1)))\n\n for k in range(len(path_coordinates)):\n xi = path_coordinates[k].real\n yi = path_coordinates[k].imag\n\n x_coord.append(xi)\n y_coord.append(yi)\n\n return list(zip(np.asarray(x_coord), np.asarray(y_coord)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the mutual information between a data column (x) and the labels (y). The data column is a single attribute over all the examples (n x 1). Mutual information is the difference between the entropy BEFORE the split set, and the weightedaverage entropy of EACH possible split.
def mutual_information(x, y, weight): # INSERT YOUR CODE HERE # raise Exception('Function not yet implemented!') entY = entropy(y, weight) unique_values_x, counts = np.unique(x, return_counts=True) probabilities_x = counts/len(x) mapping_of_probabilities = zip(probabilities_x,unique_values_x) #weighted-average entropy of each possible split for prob, unique_value in mapping_of_probabilities: entY-=prob*entropy(y[x==unique_value], weight) return entY
[ "def mutual_information(x, y, weights, entropy_y=None):\r\n if entropy_y is None:\r\n entropy_y = entropy(y, weights)\r\n partitioned_x = partition(x)\r\n total_values = weighted_length(weights)\r\n entropy_x = 0\r\n for val in partitioned_x.keys():\r\n labels_for_val = [y[i] for i in partitioned_x[val]]\r\n weights_for_val = [weights[i] for i in partitioned_x[val]]\r\n entropy_x += (weighted_length(weights_for_val)/total_values)*entropy(labels_for_val, weights_for_val)\r\n return entropy_y - entropy_x", "def mutual_information(joint_distribution, distribution_var1, distribution_var2):\n len_y = joint_distribution.shape[0]\n len_x = joint_distribution.shape[1]\n entropy = 0\n for y in range(len_y):\n for x in range(len_x):\n if (joint_distribution[y,x] != 0 and\n distribution_var1[x] != 0 and\n distribution_var2[y] != 0):\n entropy += joint_distribution[y,x] * math.log(joint_distribution[y, x]/(distribution_var1[x] * distribution_var2[y]), 2)\n return entropy", "def compute_information_gain(x_train, y_train, split, vocabularies):\r\n index_of_split = vocabularies[split]\r\n left = []\r\n right = []\r\n for i in range(len(y_train)):\r\n if x_train[i, index_of_split] <= 0.5:\r\n left.append(y_train[i])\r\n else:\r\n right.append(y_train[i])\r\n root_entropy = compute_entropy(y_train)\r\n left_entropy = compute_entropy(left)\r\n right_entropy = compute_entropy(right)\r\n left_probability = len(left) / len(y_train)\r\n right_probability = len(right) / len(y_train)\r\n information_gain = root_entropy - left_entropy * left_probability - right_entropy * right_probability\r\n return information_gain", "def mutual_information(self, x, y=None, mi_limits=None, mi_shape=256, binby=[], limits=None, shape=default_shape, sort=False, selection=False, async=False):\n\t\tif y is None:\n\t\t\twaslist, [x,] = vaex.utils.listify(x)\n\t\telse:\n\t\t\twaslist, [x,y] = vaex.utils.listify(x, y)\n\t\t\tx = list(zip(x, y))\n\t\t\tif mi_limits:\n\t\t\t\tmi_limits = [mi_limits]\n\t\t#print(\"x, mi_limits\", x, mi_limits)\n\t\tlimits = self.limits(binby, limits, async=True)\n\t\t#print(\"$\"*80)\n\t\tmi_limits = self.limits(x, mi_limits, async=True)\n\t\t#print(\"@\"*80)\n\n\t\t@delayed\n\t\tdef calculate(counts):\n\t\t\t# TODO: mutual information doesn't take axis arguments, so ugly solution for now\n\t\t\tfullshape = _expand_shape(shape, len(binby))\n\t\t\tout = np.zeros((fullshape), dtype=float)\n\t\t\tif len(fullshape) == 0:\n\t\t\t\tout = vaex.kld.mutual_information(counts)\n\t\t\t\t#print(\"count> \", np.sum(counts))\n\t\t\telif len(fullshape) == 1:\n\t\t\t\tfor i in range(fullshape[0]):\n\t\t\t\t\tout[i] = vaex.kld.mutual_information(counts[...,i])\n\t\t\t\t\t#print(\"counti> \", np.sum(counts[...,i]))\n\t\t\t\t#print(\"countt> \", np.sum(counts))\n\t\t\telif len(fullshape) == 2:\n\t\t\t\tfor i in range(fullshape[0]):\n\t\t\t\t\tfor j in range(fullshape[1]):\n\t\t\t\t\t\tout[i,j] = vaex.kld.mutual_information(counts[...,i,j])\n\t\t\telif len(fullshape) == 3:\n\t\t\t\tfor i in range(fullshape[0]):\n\t\t\t\t\tfor j in range(fullshape[1]):\n\t\t\t\t\t\tfor k in range(fullshape[2]):\n\t\t\t\t\t\t\tout[i,j,k] = vaex.kld.mutual_information(counts[...,i,j,k])\n\t\t\telse:\n\t\t\t\traise ValueError(\"binby with dim > 3 is not yet supported\")\n\t\t\treturn out\n\t\t@delayed\n\t\tdef has_limits(limits, mi_limits):\n\t\t\tif not _issequence(binby):\n\t\t\t\tlimits = [list(limits)]\n\t\t\tvalues = []\n\t\t\tfor expressions, expression_limits in zip(x, mi_limits):\n\t\t\t\t#print(\"mi for\", expressions, expression_limits)\n\t\t\t\t#total_shape = _expand_shape(mi_shape, len(expressions)) + _expand_shape(shape, len(binby))\n\t\t\t\ttotal_shape = _expand_shape(mi_shape, len(expressions)) + _expand_shape(shape, len(binby))\n\t\t\t\t#print(\"expressions\", expressions)\n\t\t\t\t#print(\"total_shape\", total_shape)\n\t\t\t\t#print(\"limits\", limits,expression_limits)\n\t\t\t\t#print(\"limits>\", list(limits) + list(expression_limits))\n\t\t\t\tcounts = self.count(binby=list(expressions) + list(binby), limits=list(expression_limits)+list(limits),\n\t\t\t\t\t\t shape=total_shape, async=True, selection=selection)\n\t\t\t\tvalues.append(calculate(counts))\n\t\t\treturn values\n\n\t\t@delayed\n\t\tdef finish(mi_list):\n\t\t\tif sort:\n\t\t\t\tmi_list = np.array(mi_list)\n\t\t\t\tindices = np.argsort(mi_list)[::-1]\n\t\t\t\tsorted_x = list([x[k] for k in indices])\n\t\t\t\treturn mi_list[indices], sorted_x\n\t\t\telse:\n\t\t\t\treturn np.array(vaex.utils.unlistify(waslist, mi_list))\n\t\tvalues = finish(delayed_list(has_limits(limits, mi_limits)))\n\t\treturn self._async(async, values)\n\n\t\tif limits is None:\n\t\t\tlimits_done = Task.fulfilled(self.minmax())\n\t\telse:\n\t\t\tlimits_done = Task.fulfilled(limits)\n\t\tif grid is None:\n\t\t\tif limits is None:\n\t\t\t\thistogram_done = limits_done.then(lambda limits: self.histogram(limits, size=size))\n\t\t\telse:\n\t\t\t\thistogram_done = Task.fulfilled(self.histogram(limits, size=size))\n\t\telse:\n\t\t\thistogram_done = Task.fulfilled(grid)\n\t\tmutual_information_promise = histogram_done.then(vaex.kld.mutual_information)\n\t\treturn mutual_information_promise if self.async else mutual_information_promise.get()", "def mutual_inf(x, y): \n# Calculate marginal distributions\n px = get_marginal(x)\n py = get_marginal(y)\n \n \n joint_x_y = get_joint(x,y)\n# calculate mutual information\n mi = 0\n \n for n_x, x_un in enumerate(np.unique(x)):\n pxi = px[n_x] # p(x)\n \n for n_y, y_un in enumerate(np.unique(y)):\n pyi = py[n_y] # p(y) \n \n joint_i = joint_x_y[x_un][y_un] # P(x,y)\n \n if ((pxi == 0) or (pyi == 0) or (joint_i ==0 )):\n continue\n else:\n mi += joint_i * np.log2(joint_i/(pxi*pyi))\n \n return mi", "def mutual_information_from_output(self, output_vector): \n entropy_o = calc_entropy(output_vector)\n \n cnt_i = len(output_vector)\n return np.log2(cnt_i) - entropy_o/cnt_i", "def mutual_information(self, max_lag):\n\n #number of bins - say ~ 20 pts / bin for joint distribution\n #and that at least 4 bins are required\n N = max(self.X.shape)\n num_bins = max(4.,np.floor(np.sqrt(N/20)))\n num_bins = int(num_bins)\n\n m_score = np.zeros((max_lag))\n\n for jj in range(max_lag):\n lag = jj+1\n\n ts = self.X[0:-lag]\n ts_shift = self.X[lag::]\n\n min_ts = np.min(self.X)\n max_ts = np.max(self.X)+.0001 #needed to bin them up\n\n bins = np.linspace(min_ts,max_ts,num_bins+1)\n\n bin_tracker = np.zeros_like(ts)\n bin_tracker_shift = np.zeros_like(ts_shift)\n\n for ii in range(num_bins):\n\n locs = np.logical_and( ts>=bins[ii], ts<bins[ii+1] )\n bin_tracker[locs] = ii\n\n locs_shift = np.logical_and( ts_shift>=bins[ii], ts_shift<bins[ii+1] )\n bin_tracker_shift[locs_shift]=ii\n\n m_score[jj] = metrics.mutual_info_score(bin_tracker,bin_tracker_shift)\n return m_score", "def calculate_entropy(y):\n y = y.flatten()\n log2 = lambda x: math.log(x) / math.log(2)\n unique_labels = np.unique(y)\n entropy = 0\n for label in unique_labels:\n count = len(y[y == label])\n p = count / float(len(y))\n entropy += -p * log2(p)\n return entropy", "def adjusted_mutual_info_score(\n labels_true, labels_pred, *, average_method=\"arithmetic\"\n):\n labels_true, labels_pred = check_clusterings(labels_true, labels_pred)\n n_samples = labels_true.shape[0]\n classes = np.unique(labels_true)\n clusters = np.unique(labels_pred)\n\n # Special limit cases: no clustering since the data is not split.\n # It corresponds to both labellings having zero entropy.\n # This is a perfect match hence return 1.0.\n if (\n classes.shape[0] == clusters.shape[0] == 1\n or classes.shape[0] == clusters.shape[0] == 0\n ):\n return 1.0\n\n contingency = contingency_matrix(labels_true, labels_pred, sparse=True)\n # Calculate the MI for the two clusterings\n mi = mutual_info_score(labels_true, labels_pred, contingency=contingency)\n # Calculate the expected value for the mutual information\n emi = expected_mutual_information(contingency, n_samples)\n # Calculate entropy for each labeling\n h_true, h_pred = entropy(labels_true), entropy(labels_pred)\n normalizer = _generalized_average(h_true, h_pred, average_method)\n denominator = normalizer - emi\n # Avoid 0.0 / 0.0 when expectation equals maximum, i.e a perfect match.\n # normalizer should always be >= emi, but because of floating-point\n # representation, sometimes emi is slightly larger. Correct this\n # by preserving the sign.\n if denominator < 0:\n denominator = min(denominator, -np.finfo(\"float64\").eps)\n else:\n denominator = max(denominator, np.finfo(\"float64\").eps)\n ami = (mi - emi) / denominator\n return ami", "def MutualInformation(hgram):\n\t# Convert bins counts to probability values\n\tpxy = hgram / float(np.sum(hgram))\n\tpx = np.sum(pxy, axis=1) # marginal for x over y\n\tpy = np.sum(pxy, axis=0) # marginal for y over x\n\tpx_py = px[:, None] * py[None, :] # Broadcast to multiply marginals\n\t# Now we can do the calculation using the pxy, px_py 2D arrays\n\tnzs = pxy > 0 # Only non-zero pxy values contribute to the sum\n\treturn np.sum(pxy[nzs] * np.log(pxy[nzs] / px_py[nzs]))", "def information_gain(data, split_name, target_name):\n\n # Split the data into two subsets, based on the median\n cond = data[split_name] <= data[split_name].median()\n left_split = data.loc[cond, target_name]\n right_split = data.loc[~cond, target_name]\n\n # Return information gain\n return (entropy(data[target_name])\n - 1/data.shape[0] * (left_split.shape[0] * entropy(left_split)\n + right_split.shape[0] * entropy(right_split)))", "def mutual_info_metric(df_classified, title, to_drop=None, gen_image=True):\n # remove all the string columns or later calculations will fail\n if to_drop is None:\n to_drop = [ each for each in list(df_classified.columns) if type(each) == str ]\n calc_mutual_info = SelectKBest(score_func=mutual_info_classif, k=1)\n df_classified = df_classified.dropna()\n # normalization of features in each sample\n feature_data = df_classified.drop(to_drop, axis=1).values\n mean_feature_data = np.mean(feature_data, axis=1)\n mean_feature_data = np.expand_dims(mean_feature_data, axis=1)\n feature_data = feature_data - mean_feature_data\n label = df_classified[['covid(label)']].values.ravel()\n\n df_mutual_info = calc_mutual_info.fit(feature_data, label)\n \n plt.figure()\n plt.scatter(\n df_classified.drop(to_drop, axis=1).columns,\n df_mutual_info.scores_,\n alpha=0.3\n )\n plt.xlabel(\"Feature\")\n plt.xticks(rotation=90)\n plt.ylabel(\"mutual_info\")\n plt.title(title)\n plt.tight_layout()\n plt.savefig(\"../graphs/new_b_ii/\" + title + \".png\")\n plt.show()\n \n if gen_image:\n try:\n # show the conditional entropy as an image matrix\n length = int(np.sqrt(df_mutual_info.scores_.shape[0]))\n tem = np.reshape(df_mutual_info.scores_, (length, length))\n plt.figure()\n plt.imshow(tem, cmap=plt.cm.gray)\n plt.title(title + \"as image\")\n plt.tight_layout()\n plt.savefig(\"../graphs/new_b_ii/\" + title + \"image.png\")\n plt.show()\n except Exception as error:\n print(f\"Error when trying to generate image for {title}\")\n print(error)\n print()", "def normalized_mutual_info_score(\n labels_true, labels_pred, *, average_method=\"arithmetic\"\n):\n labels_true, labels_pred = check_clusterings(labels_true, labels_pred)\n classes = np.unique(labels_true)\n clusters = np.unique(labels_pred)\n\n # Special limit cases: no clustering since the data is not split.\n # It corresponds to both labellings having zero entropy.\n # This is a perfect match hence return 1.0.\n if (\n classes.shape[0] == clusters.shape[0] == 1\n or classes.shape[0] == clusters.shape[0] == 0\n ):\n return 1.0\n\n contingency = contingency_matrix(labels_true, labels_pred, sparse=True)\n contingency = contingency.astype(np.float64, copy=False)\n # Calculate the MI for the two clusterings\n mi = mutual_info_score(labels_true, labels_pred, contingency=contingency)\n\n # At this point mi = 0 can't be a perfect match (the special case of a single\n # cluster has been dealt with before). Hence, if mi = 0, the nmi must be 0 whatever\n # the normalization.\n if mi == 0:\n return 0.0\n\n # Calculate entropy for each labeling\n h_true, h_pred = entropy(labels_true), entropy(labels_pred)\n\n normalizer = _generalized_average(h_true, h_pred, average_method)\n return mi / normalizer", "def adjusted_mutual_info(y_true: tf.Tensor, y_pred: tf.Tensor) -> float:\n # pylint: disable=unbalanced-tuple-unpacking\n y_pred, y_true = _get_flatten_non_padding_value([y_pred, y_true],\n mask_gen_tensor=y_true)\n return sklearn.metrics.adjusted_mutual_info_score(y_true, y_pred)", "def splitEntropy(self, split, classLabels, featureVals):\n\t\t# note that all data is weighted here\n\t\ttotal = 0.0\n\t\tfor j in featureVals:\n\t\t\t# N_j is the total number of instances that are on branch j\n\t\t\tN_j = sum([x[1] for x in split[j].items()])\n\t\t\tif N_j == 0:\n\t\t\t\tcontinue\n\n\t\t\t# compute entropy of branch j\n\t\t\tentropy = 0.0\n\t\t\tfor i in classLabels:\n\t\t\t\t# p_ij is the probability that an instance taking branch j has class i\n\t\t\t\tp_ij = float(split[j][i]) / N_j\n\t\t\t\tif p_ij != 0:\n\t\t\t\t\t# compute sum of entropy for each \n\t\t\t\t\tentropy += p_ij * log(p_ij)\n\t\t\t\t\t\n\t\t\t# split entropy is entropy of each branch weighted by proportion of data\n\t\t\t# in that branch\n\t\t\ttotal += N_j * entropy\n\t\treturn -total", "def mutual_info_proxy(self):\n h_samples = self.get_hidden(self.v_samples)\n tf_zip = lambda a, b: tf.stack([a, b], 1)\n double_map_fn = lambda map_fn: (lambda ab: map_fn(a[0], a[1]))\n\n expect_delta_energy = tf.map_fn(\n double_map_fn(self.expect_delta_energy),\n tf_zip(self.e_samples, h_samples))\n return T.mean(expect_delta_energies)", "def _knn_mutual_information(\n X: Float2DArray,\n) -> FloatMatrix:\n n_features: PositiveInt\n _, n_features = X.shape\n mi_knn: FloatMatrix = np.empty((n_features, n_features))\n for idx_feature, feature in enumerate(X.T):\n mi_knn[idx_feature] = mutual_info_regression(\n X, feature,\n )\n return mi_knn", "def _score(\n self, split: Callable[[object], bool], X: np.ndarray,\n y: np.ndarray):\n def labeling_entropy(\n labeling: np.ndarray, labels: np.ndarray) -> float:\n \"\"\"\n Calculates the entropy of a labeling given the possible\n labels\n \"\"\"\n if labels.shape[0] == 0:\n return 1\n return entropy(\n [\n sum(pd.Index((label,)).get_indexer(labeling[:, 0]) + 1)\n / labeling.shape[0]\n for label in labels\n ],\n base=labels.shape[0])\n\n (_, yl), (_, yr) = self._split_sample(split, X, y)\n if self.classification:\n # When this tree is being used in a classification context\n # then use classification and split entropies with the\n # mutual information to calculate the score\n classes = np.unique(y)\n l_entropy = labeling_entropy(yl, classes)\n r_entropy = labeling_entropy(yr, classes)\n l_probability = yl.shape[0]/y.shape[0]\n r_probability = yr.shape[0]/y.shape[0]\n\n classification_entropy = labeling_entropy(y, classes)\n split_entropy = entropy((l_probability, r_probability), base=2)\n mean_posterior_entropy = (\n l_probability * l_entropy + r_probability * r_entropy)\n mutual_information = (\n classification_entropy - mean_posterior_entropy)\n score = (\n (2 * mutual_information)\n / (classification_entropy + split_entropy))\n else:\n # When this tree is being used in a regression context then\n # use the variance before and after the split is performed\n # to assess the information gain and calculate the score\n total_var = np.var(y)\n l_var = np.var(yl)\n r_var = np.var(yr)\n\n l_proportion = yl.shape[0] / y.shape[0]\n r_proportion = yr.shape[0] / y.shape[0]\n\n score = (\n (total_var - l_proportion * l_var - r_proportion * r_var)\n / total_var)\n\n return score", "def get_information_gain(y, x):\n return get_entropy(y) - get_conditional_entropy(y, x)", "def cal_mutual_info(df, target_var=None, disc_features_only=True):\n df = df.copy()\n\n df_f_type = df.dtypes\n df_f_type = df_f_type.loc[~df_f_type.index.isin([target_var])].copy()\n cols_if_num = df_f_type.apply(lambda x: np.issubdtype(x, np.number))\n discrete_f = ~cols_if_num\n # get all categorical features\n cols_num = cols_if_num[cols_if_num].index.tolist()\n cols_cat = cols_if_num[~cols_if_num].index.tolist()\n\n for col_cat in cols_cat:\n df[col_cat] = df[col_cat].fillna('Missing')\n\n for col_num in cols_num:\n df[col_num] = df[col_num].fillna(df[col_num].mean())\n\n enc = OrdinalEncoder()\n df[cols_cat] = enc.fit_transform(df[cols_cat])\n enc = OrdinalEncoder()\n df.loc[:, target_var] = enc.fit_transform(df[[target_var]])\n\n if not disc_features_only:\n all_features = df_f_type.index.tolist()\n mutual_info = mutual_info_classif(df[all_features], df[target_var].values,\n discrete_features=discrete_f,\n n_neighbors=20,\n random_state=123)\n df_mutual_info = pd.DataFrame(data=zip(all_features, mutual_info), columns=['columns', 'mutual_info'])\n return df_mutual_info\n else:\n\n mutual_info = mutual_info_classif(df[cols_cat], df[target_var].values,\n discrete_features=True)\n df_mutual_info = pd.DataFrame(data=zip(cols_cat, mutual_info), columns=['columns', 'mutual_info'])\n return df_mutual_info" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implements the classical ID3 algorithm given training data (x), training labels (y) and an array of attributevalue pairs to consider. This is a recursive algorithm that depends on three termination conditions 1. If the entire set of labels (y) is pure (all y = only 0 or only 1), then return that label 2. If the set of attributevalue pairs is empty (there is nothing to split on), then return the most common value of y (majority label) 3. If the max_depth is reached (prepruning bias), then return the most common value of y (majority label) Otherwise the algorithm selects the next best attributevalue pair using INFORMATION GAIN as the splitting criterion and partitions the data set based on the values of that attribute before the next recursive call to ID3. The tree we learn is a BINARY tree, which means that every node has only two branches. The splitting criterion has to be chosen from among all possible attributevalue pairs. That is, for a problem with two features/attributes x1 (taking values a, b, c) and x2 (taking values d, e), the initial attribute value pair list is a list of all pairs of
def id3(x, y, weight, attribute_value_pairs=None, depth=0, max_depth=5): # INSERT YOUR CODE HERE. NOTE: THIS IS A RECURSIVE FUNCTION. # raise Exception('Function not yet implemented!') unique_labels, count_unique_labels = np.unique(y, return_counts = True) if attribute_value_pairs is None: attribute_value_pairs = [] for i in range(x.shape[1]): unique_attributes = partition(x[:, i]) for each_attribute_from_set in unique_attributes.keys(): attribute_value_pairs.append((i, each_attribute_from_set)) attribute_value_pairs = np.array(attribute_value_pairs).astype(int) if len(unique_labels)==1: return unique_labels[0] if len(attribute_value_pairs)==0 or depth == max_depth: return unique_labels[np.argmax(count_unique_labels)] entropy_info = [] mutual_information_list = [] for feature_column, value in attribute_value_pairs: indices = np.where(x[:, feature_column] == value)[0] y_for_feature_single_attribute = y[indices] entropy_info_for_feature_single_attribute = entropy(y_for_feature_single_attribute, weight) entropy_info.append(entropy_info_for_feature_single_attribute) mutual_information_list.append(mutual_information(x[:, feature_column], y, weight)) # convert it into np array to find the argmax mutual_info_array = np.array(mutual_information_list, dtype=float) (max_attribute, max_value) = attribute_value_pairs[np.argmax(mutual_info_array)] max_attribute_partition = partition(np.array(x[:, max_attribute] == max_value).astype(int)) attribute_value_pairs = np.delete(attribute_value_pairs, np.argwhere(np.all(attribute_value_pairs == (max_attribute, max_value), axis=1)),0) decision_tree = {} for decision_value, indices in max_attribute_partition.items(): x_new = x[indices] y_new = y[indices] attribute_decision = bool(decision_value) decision_tree[(max_attribute, max_value, attribute_decision)] = id3(x_new, y_new, weight, attribute_value_pairs=attribute_value_pairs, max_depth=max_depth, depth=depth+1) return decision_tree
[ "def id3(x, y, attributes, max_depth, weights: list =None, attribute_values: dict=None, depth: int=0) -> dict:\r\n if len(y) == 0:\r\n raise Exception(\"No data passed\")\r\n if weights is None:\r\n weights = [1/len(y)]*len(y)\r\n if attribute_values is None:\r\n attribute_values = get_attribute_value_pairs(attributes, x)\r\n label_map = partition(y)\r\n if len(label_map.keys()) == 1: # congrats all the examples have same label!\r\n return (label_map.popitem())[0]\r\n\r\n if len(attributes) == 0 or depth == max_depth: # majority voted label is selected.\r\n return majority_label(label_map)\r\n\r\n root = dict()\r\n max_info_gain = -1\r\n max_gain_pair = None\r\n entropy_y = entropy(y, weights)\r\n for attribute in attributes:\r\n for attr_value in attribute_values[attribute]:\r\n attr_column = convert_column_to_dual_values(x[:, attribute], attr_value)\r\n current_gain = mutual_information(attr_column, y, weights, entropy_y)\r\n if current_gain > max_info_gain:\r\n max_info_gain = current_gain\r\n max_gain_pair = (attribute, attr_value)\r\n attribute_values[max_gain_pair[0]].remove(max_gain_pair[1])\r\n subset_x = {True: [], False: []}\r\n subset_y = {True: [], False: []}\r\n subset_weights = {True: [], False: []}\r\n attr_subset_true = list(attributes)\r\n attr_subset_true.remove(max_gain_pair[0])\r\n subset_attr = {True: attr_subset_true, False: list(attributes)}\r\n root_attr = max_gain_pair[0]\r\n root_val = max_gain_pair[1]\r\n for i in range(len(y)):\r\n key = x[i, root_attr] == root_val\r\n subset_x[key].append(x[i])\r\n subset_weights[key].append(weights[i])\r\n subset_y[key].append(y[i])\r\n for key in [True, False]:\r\n root[(root_attr, root_val, key)] = id3(np.asarray(subset_x[key]), np.asarray(subset_y[key]), subset_attr[key],\r\n max_depth, subset_weights[key], copy.deepcopy(attribute_values), depth+1)\r\n root[(max_gain_pair, 'default')] = majority_label(label_map)\r\n return root", "def predict(tree, x, y = []):\n\n\t#conditions of continuous and discrete features\n\tnode_id = 1 #initialize node identifier as first node under the root\n\twhile 1:\n\t\tnodes = tree[node_id]\n\n\t\tif nodes[0][5] == \"c\":\n\t\t\tif x[nodes[0][1]] <= nodes[0][2]:\n\t\t\t\tindex, node_id = 0, nodes[0][0] #set identifier of child node\n\t\t\telse:\n\t\t\t\tindex, node_id = 1, nodes[1][0] #set identifier of child node\n\t\telse:\n\t\t\tif x[nodes[0][1]] in nodes[0][2]:\n\t\t\t\tindex, node_id = 0, nodes[0][0] #set identifier of child node\n\n\t\t\telif x[nodes[1][1]] in nodes[1][2]:\n\t\t\t\tindex, node_id = 1, nodes[1][0] #set identifier of child node\n\n\t\t\telse:\n\t\t\t\t#value is not in left or right branch. Get label distributions of left and right child\n\t\t\t\t#sum labels distribution to get parent label distribution\n\t\t\t\tnode_id = str(nodes[0][0]) + \",\" + str(nodes[1][0])\n\t\t\t\tindex, nodes = 0, [[0,0,0,{ k: nodes[0][3].get(k, 0) + nodes[1][3] .get(k, 0) for k in set(nodes[0][3]) | set(nodes[1][3] )}]]\n\t\t\t\t#print node_id, nodes[0][3], y\n\n\t\tif node_id in tree.keys(): #check if tree can be traversed further\n\t\t\tcontinue\n\t\t\n\t\tprediction = max(nodes[index][3], key = nodes[index][3].get)\n\t\tif y == []:\n\t\t\treturn prediction\n\t\t\n\t\tprobs = sorted(zip(nodes[index][3].keys(), np.true_divide(nodes[index][3].values(), np.sum(nodes[index][3].values()))), key = itemgetter(1), reverse = True)\n\t\tif prediction == y:\n\t\t\tmargin = probs[0][1] - probs[1][1] if len(probs) > 1 else 1\n\t\telse:\n\t\t\tmargin = dict(probs).get(y, 0) - probs[0][1]\n\t\treturn node_id, margin", "def _build_tree(self, X, y, current_depth=0):\n\n largest_impurity = 0\n best_criteria = None # Feature index and threshold\n best_sets = None # Subsets of the data\n\n # Check if expansion of y is needed\n if len(np.shape(y)) == 1:\n y = np.expand_dims(y, axis=1)\n\n # Add y as last column of X\n Xy = np.concatenate((X, y), axis=1)\n\n n_samples, n_features = np.shape(X)\n\n if n_samples >= self.min_samples_split and current_depth <= self.max_depth:\n # Calculate the impurity for each feature\n for feature_i in range(n_features):\n # All values of feature_i\n feature_values = np.expand_dims(X[:, feature_i], axis=1)\n unique_values = np.unique(feature_values)\n\n # Iterate through all unique values of feature column i and\n # calculate the impurity\n for threshold in unique_values:\n # Divide X and y depending on if the feature value of X at index feature_i\n # meets the threshold\n Xy1, Xy2 = divide_on_feature(Xy, feature_i, threshold)\n\n if len(Xy1) > 0 and len(Xy2) > 0:\n # Select the y-values of the two sets\n y1 = Xy1[:, n_features:]\n y2 = Xy2[:, n_features:]\n\n # Calculate impurity\n impurity = self._impurity_calculation(y, y1, y2)\n\n # If this threshold resulted in a higher information gain than previously\n # recorded save the threshold value and the feature\n # index\n if impurity > largest_impurity:\n largest_impurity = impurity\n best_criteria = {\"feature_i\": feature_i, \"threshold\": threshold}\n best_sets = {\n \"leftX\": Xy1[:, :n_features], # X of left subtree\n \"lefty\": Xy1[:, n_features:], # y of left subtree\n \"rightX\": Xy2[:, :n_features], # X of right subtree\n \"righty\": Xy2[:, n_features:] # y of right subtree\n }\n\n if largest_impurity > self.min_impurity:\n # Build subtrees for the right and left branches\n true_branch = self._build_tree(best_sets[\"leftX\"], best_sets[\"lefty\"], current_depth + 1)\n false_branch = self._build_tree(best_sets[\"rightX\"], best_sets[\"righty\"], current_depth + 1)\n return DecisionNode(feature_i=best_criteria[\"feature_i\"], threshold=best_criteria[\n \"threshold\"], true_branch=true_branch, false_branch=false_branch)\n\n # We're at leaf => determine value\n leaf_value = self._leaf_value_calculation(y)\n\n return DecisionNode(value=leaf_value)", "def _build_tree(self, X, y, current_depth=0):\n\n largest_impurity = 0\n best_criteria = None # Feature index and threshold\n best_sets = None # Subsets of the data\n\n # Check if expansion of y is needed\n if len(np.shape(y)) == 1:\n y = np.expand_dims(y, axis=1)\n\n # Add y as last column of X\n Xy = np.concatenate((X, y), axis=1)\n\n n_samples, n_features = np.shape(X)\n\n if n_samples >= self.min_samples_split and current_depth <= self.max_depth:\n # Calculate the impurity for each feature\n for feature in range(n_features):\n # All values of feature\n feature_values = np.expand_dims(X[:, feature], axis=1)\n unique_values = np.unique(feature_values)\n\n # Iterate through all unique values of feature column i and\n # calculate the impurity\n for threshold in unique_values:\n # Divide X and y depending on if the feature value of X at index feature\n # meets the threshold\n Xy1, Xy2 = divide_on_feature(Xy, feature, threshold)\n\n if len(Xy1) > 0 and len(Xy2) > 0:\n # Select the y-values of the two sets\n y1 = Xy1[:, n_features:]\n y2 = Xy2[:, n_features:]\n\n # Calculate impurity\n impurity = self._impurity_calculation(y, y1, y2)\n\n # If this threshold resulted in a higher information gain than previously\n # recorded save the threshold value and the feature\n # index\n if impurity > largest_impurity:\n largest_impurity = impurity\n best_criteria = {\"feature\": feature, \"threshold\": threshold}\n best_sets = {\n \"leftX\": Xy1[:, :n_features], # X of left subtree\n \"lefty\": Xy1[:, n_features:], # y of left subtree\n \"rightX\": Xy2[:, :n_features], # X of right subtree\n \"righty\": Xy2[:, n_features:] # y of right subtree\n }\n\n if largest_impurity > self.min_impurity:\n # Build subtrees for the right and left branches\n left = self._build_tree(best_sets[\"leftX\"], best_sets[\"lefty\"], current_depth + 1)\n right = self._build_tree(best_sets[\"rightX\"], best_sets[\"righty\"], current_depth + 1)\n return DecisionNode(feature=best_criteria[\"feature\"], threshold=best_criteria[\n \"threshold\"], left=left, right=right)\n\n # We're at leaf => determine value\n leaf_value = self._leaf_value_calculation(y)\n\n return DecisionNode(value=leaf_value)", "def _Build_Tree(self, X, y, current_depth=0):\n\n Largest_Impurity = 0\n \n Best_Criteria = None # Feature index and Threshold\n \n Best_Subsets = None # Subsets of the data\n\n # Check if expansion of y is needed\n if len(np.shape(y)) == 1:\n y = np.expand_dims(y, axis=1)\n\n # Add y as last column of X\n Xy = np.concatenate((X, y), axis=1)\n\n n_samples, n_features = np.shape(X)\n\n if n_samples >= self.Minimum_Samples_Split and current_depth <= self.Max_Depth:\n # Calculate the impurity for each feature\n for Feature_Index in range(n_features):\n # All Values of Feature_Index\n Feature_Values = np.expand_dims(X[:, Feature_Index], axis=1)\n Unique_Values = np.unique(Feature_Values)\n\n # Iterate through all unique Values of feature column i and\n # calculate the impurity\n for Threshold in Unique_Values:\n # Divide X and y depending on if the feature Value of X at index Feature_Index\n # meets the Threshold\n Xy1, Xy2 = divide_on_feature(Xy, Feature_Index, Threshold)\n\n if len(Xy1) > 0 and len(Xy2) > 0:\n # Select the y-Values of the two sets\n y1 = Xy1[:, n_features:]\n y2 = Xy2[:, n_features:]\n\n # Calculate impurity\n impurity = self._Impurity_Calculation(y, y1, y2)\n\n # If this Threshold resulted in a higher information gain than previously\n # recorded save the Threshold Value and the feature\n # index\n if impurity > Largest_Impurity:\n Largest_Impurity = impurity\n Best_Criteria = {\"Feature_Index\": Feature_Index, \"Threshold\": Threshold}\n Best_Subsets = {\n \"leftX\": Xy1[:, :n_features], # X of left subTree\n \"lefty\": Xy1[:, n_features:], # y of left subTree\n \"rightX\": Xy2[:, :n_features], # X of right subTree\n \"righty\": Xy2[:, n_features:] # y of right subTree\n }\n\n if Largest_Impurity > self.Minimum_Impurity:\n # Build subTrees for the right and left branches\n Branch_True = self._Build_Tree(Best_Subsets[\"leftX\"], Best_Subsets[\"lefty\"], current_depth + 1)\n Branch_False = self._Build_Tree(Best_Subsets[\"rightX\"], Best_Subsets[\"righty\"], current_depth + 1)\n return DecisionNode(Feature_Index=Best_Criteria[\"Feature_Index\"], Threshold=Best_Criteria[\n \"Threshold\"], Branch_True=Branch_True, Branch_False=Branch_False)\n\n # We're at leaf => determine Value\n leaf_Value = self._Leaf_Value_Calculation(y)\n\n return DecisionNode(Value=leaf_Value)", "def best_attribute(df, attributes, target, splitting_heuristic):\n best_value = 0.0\n best_gain = 0.0\n best_attr = None\n\n for attr in attributes:\n gain = splitting_heuristic(df, attr, target)\n if (gain >= best_gain and attr != target):\n best_gain = gain\n best_attr = attr\n\n return best_attr", "def build(self, X: np.ndarray, y: np.ndarray) -> 'ExtraTree':\n if self.classification:\n self.classes_ = np.unique(y)\n\n def _build(\n X: np.ndarray, y: np.ndarray,\n decision_tree: ExtraTree.DecisionTree):\n # Mark constant attributes\n constant = [\n all(value == sample[0] for value in sample)\n for sample in np.hsplit(X, X.shape[1])]\n\n if self._stop(X, y, constant):\n if self.classification:\n unique, counts = np.unique(y, return_counts=True)\n occurrences = dict(zip(unique, counts))\n for cls in self.classes_:\n if not occurrences.get(cls):\n occurrences[cls] = 0\n prediction = np.asarray([\n occurrences[cls] / y.shape[0]\n for cls in self.classes_])\n else:\n prediction = y.mean()\n leaf = ExtraTree.Leaf(prediction)\n decision_tree.assign(leaf)\n return\n\n # Select K attributes to draw splits from\n individual_probability = 1 / (X.shape[1] - sum(constant))\n if self.max_features == 'auto' or self.max_features is None:\n if self.classification:\n max_features = y.shape[0]\n else:\n max_features = int(np.sqrt(y.shape[0]))\n else:\n max_features = self.max_features\n k_choices = np.random.choice(\n X.shape[1],\n min(max_features, len(constant) - sum(constant)),\n replace=False,\n p=[\n individual_probability if not constant[i] else 0\n for i in range(X.shape[1])\n ])\n K_attributes = X[:, k_choices]\n\n # Draw random splits\n K_splits = {\n self._pick_random_split(\n K_attributes[:, column], k_choices[column])\n for column in range(K_attributes.shape[1])}\n\n # Pick the best split\n split = max(K_splits, key=lambda s: self._score(s, X, y))\n\n # Branching\n node = ExtraTree.Node(split)\n (Xl, yl), (Xr, yr) = self._split_sample(split, X, y)\n _build(Xl, yl, node.left)\n _build(Xr, yr, node.right)\n decision_tree.assign(node)\n\n _build(X, y, self.decision_tree)\n return self", "def __id3(self, dataset: Dataset, parent_dataset: Dataset, depth: int) -> Union[Node, Leaf]:\n\n if self.__max_depth is None or depth < self.__max_depth: # depth limit not reached\n if len(dataset) == 0: # empty dataset\n return Leaf(parent_dataset.most_frequent_label) # most frequent label of the parent\n elif len(dataset.label_space) == 1 or len(dataset.feature_names) == 0:\n # all examples have the same label or there is no features left in the dataset\n return Leaf(dataset.most_frequent_label)\n else:\n mdf: str = dataset.most_discriminatory_feature\n sub_datasets: dict[str, Dataset] = dataset.group_by_feature(mdf)\n node: Node = Node(mdf, dataset.most_frequent_label)\n # for each child, create the corresponding decision subtree\n for feature_value, sub_dataset in sub_datasets.items():\n feature_value: str\n sub_dataset: Dataset\n\n child_node: Union[Node, Leaf] = self.__id3(sub_dataset, dataset, depth + 1) # create the subtree\n node.add_child(feature_value, child_node) # add as a child\n return node\n else: # depth limit reached\n return Leaf(dataset.most_frequent_label)", "def classifyG(train, test):\n test_X, test_y = test ## X and y components of test\n test_y = [int(item) for item in test_y] ## convert list of floats to int\n\n col_X = [] # Will be the attribute specified by index\n Dy = [] # The classes whose attributes go into the \"Yes\" tree\n Dn = [] # The calsses whose attributes go into the \"No\" tree\n predict = [] # Will be the list of predicted values\n\n classifier, index, value = bestSplit(train, \"GINI\")\n\n ## Want to get the desired column of specified index from the best split\n for i in range(0, len(test_y)):\n col_X.append(test_X[i][index])\n i=0\n\n for entry in col_X:\n ## actual classifiying done here, done by best split method\n if (entry <= value):\n Dy.append(test_y[i])\n else:\n Dn.append(test_y[i])\n i+=1\n\n Dy_predictor=mode(Dy) ## Getting the mode of the no tree, will predict class\n Dn_predictor=mode(Dn) ## Getting the mode of the yes tree,\" \"\n\n for entry in col_X:\n ## Predicting done here!\n if (entry <= value):\n predict.append(Dy_predictor)\n else:\n predict.append(Dn_predictor)\n return(predict)", "def dataset3Params(X, y, Xval, yval):\n# ====================== YOUR CODE HERE ======================\n# Instructions: Fill in this function to return the optimal C and sigma\n# learning parameters found using the cross validation set.\n# You can use svmPredict to predict the labels on the cross\n# validation set. For example, \n# predictions = svmPredict(model, Xval)\n# will return the predictions on the cross validation set.\n#\n# Note: You can compute the prediction error using \n# mean(double(predictions ~= yval))\n# =========================================================================\n\t# The score() function for a trained SVM takes in\n\t# X and y to test the score on, and the (float)\n\t# value returned is \"Mean accuracy of self.predict(X) wrt. y\"\n\n\tCvalues = (0.01, 0.03, 0.1, 0.3, 1., 3., 10., 30.)\n\tsigmavalues = Cvalues\n\tbest_pair, best_score = (0, 0), 0\n\n\tfor Cvalue in Cvalues:\n\t\tfor sigmavalue in sigmavalues:\n\t\t\tgamma = np.power(sigmavalue,-2.)\n\t\t\tgaus_svm = svm.SVC(C=Cvalue, kernel='rbf', gamma=gamma)\n\t\t\tgaus_svm.fit( X, y.flatten() )\n\t\t\tthis_score = gaus_svm.score(Xval,yval)\n\t\t\t#print this_score\n\t\t\tprint Cvalue,sigmavalue,this_score\n\t\t\tif this_score > best_score:\n\t\t\t\tbest_score = this_score\n\t\t\t\tbest_pair = (Cvalue, sigmavalue)\n\n\t\t\t\t\t\n\tprint \"Best C, sigma pair is (%f, %f) with a score of %f.\"%(best_pair[0],best_pair[1],best_score)\n\treturn best_pair[0], best_pair[1]", "def select_attribute(data):\n\n # Calculate the total entropy\n p = 0\n n = 0\n for row in data:\n if row[-1]:\n p += 1\n else:\n n += 1\n total_entropy = entropy(p, n)\n\n # Calculate the gain for each attribute and keep track of the index of the\n # max\n max_index = 0\n max_gain = 0\n for index in range(len(data[0]) - 1):\n counts = {}\n for row in data:\n attribute_value = row[index]\n if attribute_value not in counts:\n counts[attribute_value] = [0, 0]\n if row[-1]:\n counts[attribute_value][0] += 1\n else:\n counts[attribute_value][1] += 1\n gain = total_entropy - entropy2(counts.values())\n #print(f\"Index: {index} Gain: {gain}\")\n if gain > max_gain:\n max_gain = gain\n max_index = index\n\n return max_index", "def ib3(X_train, y_train):\n classes = np.unique(y_train)\n \n # Start with the first element.\n x_train_reduced = np.asarray([X_train[0,:]])\n y_train_reduced = np.asarray([y_train[0]])\n acceptable = np.array([0])\n \n lower = lambda p,z,n: (p + (z**2)/(2*n) - z*((p*(1-p)/n + (z**2)/(4*n**2)))**0.5)/(1 + (z**2)/n)\n upper = lambda p,z,n: (p + (z**2)/(2*n) + z*((p*(1-p)/n + (z**2)/(4*n**2)))**0.5)/(1 + (z**2)/n)\n \n for index, (x_instance, y_instance) in enumerate(zip(X_train, y_train)):\n\n best_knn = self.knn\n best_knn.fit(x_train_reduced, y_train_reduced)\n# print(x_train_reduced)\n y_pred_instance = best_knn.predict(np.asarray([x_instance]))\n\n # This part is similar to IB2\n if y_pred_instance != y_instance:\n x_train_reduced = np.vstack([x_train_reduced, x_instance])\n acceptable = np.hstack([acceptable, index])\n \n \n incorrect_class = 0\n correct_class = 0\n \n # Not going on onced got the expected value\n if len(acceptable) > len(y_train)/30: \n break\n \n # This part differ from IB2, just acceptable instance are kept.\n # Count the number of incorrect and correct classification\n for x_instance_reduced in x_train_reduced:\n best_knn = self.knn\n best_knn.fit(x_train_reduced, y_train_reduced)\n y_pred_instance_reduced = best_knn.predict(np.asarray([x_instance_reduced]))\n \n if y_pred_instance_reduced != y_instance:\n incorrect_class += 1\n else:\n correct_class += 1\n \n n = incorrect_class + correct_class\n p = correct_class / n\n \n # For acceptance\n z = 0.9\n lower_bound = lower(p, z, n)\n upper_bound = upper(p, z, n)\n# print(lower_bound, upper_bound, incorrect_class, correct_class)\n if (incorrect_class/n <= lower_bound) or (correct_class/n >= upper_bound):\n acceptable = np.hstack([acceptable, index])\n \n\n \n # For removing\n z = 0.7\n lower_bound = lower(p, z, n)\n upper_bound = upper(p, z, n)\n \n if (incorrect_class/n <= lower_bound) or (correct_class/n >= upper_bound):\n acceptable = np.delete(acceptable, [index], axis=0) \n\n# if p == 1:\n# break\n \n x_train_reduced = X_train[acceptable]\n y_train_reduced = y_train[acceptable]\n indexes_reduced = acceptable\n \n return x_train_reduced, y_train_reduced, indexes_reduced", "def find_best_split(self, data, attributes, classes, randomize):\n best_gain = -1 # keep track of the best information gain\n best_attr = -1 # keep train of the feature / value that produced it\n best_thr = -1\n n_features = len(attributes)\n \n if (randomize == False):\n for col in range(n_features): # for each feature\n # unique values in the column\n values = data[:,col] \n min_value = np.min(values)\n max_value = np.max(values)\n for iterr in range(self.pruning_thr):\n thr = min_value + iterr * (max_value - min_value)/(self.pruning_thr+1)\n gain = self.info_gain(data, col, thr, classes) # Calculate the information gain from this split\n if gain > best_gain:\n best_gain, best_attr, best_thr = gain, col, thr\n \n elif (randomize == True):\n # unique values in the column\n rndm_col = np.random.choice(np.array(attributes), replace=False) \n values = data[:,rndm_col]\n min_value = np.min(values)\n max_value = np.max(values)\n for iterr in range(self.pruning_thr):\n thr = min_value + iterr * (max_value - min_value)/(self.pruning_thr+1)\n gain = self.info_gain(data, rndm_col, thr, classes) # Calculate the information gain from this split\n if gain > best_gain:\n best_gain, best_attr, best_thr = gain, rndm_col, thr\n return best_attr, best_thr, best_gain", "def build_tree(data):\n #print(\"Creating node from data...\")\n #pp.pprint(data)\n node = Node()\n\n # Check to see if all the labels are the same, if so we are creating a RESULT\n # node\n result = majority_class(data)\n node.majority = result['majority']\n if result['unanimous']:\n #print(f\"RESULT: {result['majority']}\")\n node.type = 'RESULT'\n return node\n\n # If not we are creating a DECISION node\n node.type = 'DECISION'\n index = select_attribute(data)\n node.index = index\n node.branches = {}\n #print(f\"DECISION: Splitting on index {index}...\")\n groups = split_on_attribute(data, index)\n for attribute_value, group_data in groups.items():\n #print(f\"Creating {attribute_value} node\")\n node.branches[attribute_value] = build_tree(group_data)\n return node", "def compute_leaf(self, y):\r\n\r\n #########################################################################\r\n # TODO: Compute for the resulting value of the leaf node #\r\n #########################################################################\r\n num_samples_per_class = [np.sum(y == i) for i in range(len(np.unique(y)))]\r\n leaf_value = np.argmax(num_samples_per_class)\r\n #########################################################################\r\n # END OF YOUR CODE #\r\n ######################################################################### \r\n return leaf_value", "def _best_split(self, X, y):\n # Need at least two elements to split a node.\n m = y.size\n if m <= 1:\n return None, None\n\n # Count of each class in the current node.\n num_parent = [np.sum(y == c) for c in range(self.n_classes_)]\n\n # Gini of current node.\n best_gini = 1.0 - sum((n / m) ** 2 for n in num_parent)\n best_idx, best_thr = None, None\n\n # Loop through all features.\n for idx in range(self.n_features_):\n # Sort data along selected feature.\n thresholds, classes = zip(*sorted(zip(X[:, idx], y)))\n\n # We could actually split the node according to each feature/threshold pair\n # and count the resulting population for each class in the children, but\n # instead we compute them in an iterative fashion, making this for loop\n # linear rather than quadratic.\n num_left = [0] * self.n_classes_\n num_right = num_parent.copy()\n for i in range(1, m): # possible split positions\n c = classes[i - 1]\n num_left[c] += 1\n num_right[c] -= 1\n gini_left = 1.0 - sum(\n (num_left[x] / i) ** 2 for x in range(self.n_classes_)\n )\n gini_right = 1.0 - sum(\n (num_right[x] / (m - i)) ** 2 for x in range(self.n_classes_)\n )\n\n # The Gini impurity of a split is the weighted average of the Gini\n # impurity of the children.\n gini = (i * gini_left + (m - i) * gini_right) / m\n\n # The following condition is to make sure we don't try to split two\n # points with identical values for that feature, as it is impossible\n # (both have to end up on the same side of a split).\n if thresholds[i] == thresholds[i - 1]:\n continue\n\n if gini < best_gini:\n best_gini = gini\n best_idx = idx\n best_thr = (thresholds[i] + thresholds[i - 1]) / 2 # midpoint\n return best_idx, best_thr", "def MajPred( self , y ) :\r\n \r\n import numpy as np\r\n \r\n if y is None or self._L is None : return\r\n # check for indexing mismatch? \r\n try : y[self._L] \r\n except Exception as e :\r\n raise ValueError( 'passed y cannot be indexed by CatTreeNode._L (%s)' % e )\r\n \r\n # search through unique items in y[L], getting counts and majority element. \r\n # implementation differs for lists and for numpy.ndarrays\r\n \r\n self._c = {} # empty dictionary for counts\r\n self._C = 0 \r\n if isinstance( y , list ) : \r\n u = set( y[self._L] )\r\n for i in u :\r\n self._c[i] = y[self._L].count(i)\r\n if self._c[i] > self._C : \r\n self._y = i\r\n self._C = self._c[i]\r\n elif isinstance( y , np.ndarray ) : \r\n u = np.unique( y[self._L] )\r\n for i in u :\r\n self._c[i] = len( np.where( y[self._L] == i ) )\r\n if self._c[i] > self._C : \r\n self._y = i\r\n self._C = self._c[i]\r\n else : \r\n raise ValueError( 'y is not a comprehensible object here (list, numpy.ndarray)' )\r\n \r\n # now, self._y is set as a majority predictor, unique item counts are set in self._c, \r\n # and we can (re)set self._C as the total coverage\r\n self._C = len( self._L )\r\n \r\n # set error for this majority prediction... note using np.nditer\r\n self._e = sum( 1 if y[i] != self._y else 0 for i in self._L ) # np.nditer(self._L) )\r\n \r\n # return error count\r\n return self._e", "def check_attributes_celebA(self, attributes, x, y):\n\n orig_attr_score = np.zeros((len(attributes),1))\n adv_attr_score = np.zeros((len(attributes),1))\n for i in range(len(attributes)):\n attr = attributes[i]\n # load json and create model\n json_file_name = \"../../aix360/models/CEM_MAF/simple_{}_model.json\".format(attr)\n json_file = open(json_file_name, 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n # load weights into new model\n weight_file_name = \"../../aix360/models/CEM_MAF/simple_{}_weights.h5\".format(attr)\n loaded_model.load_weights(weight_file_name)\n\n orig_attr_score[i] = loaded_model.predict(x)[0]\n adv_attr_score[i] = loaded_model.predict(y)[0]\n\n # pre-determined thresholds for changes in prediction values\n thresh_pos = np.zeros((len(attributes),1))\n thresh_pos[0] = .15\n thresh_pos[1] = .15\n thresh_pos[2] = .15\n thresh_pos[3] = .15\n thresh_pos[4] = .15\n thresh_pos[5] = .15\n thresh_pos[6] = .1\n thresh_pos[7] = .25\n thresh_pos[8] = .1\n thresh_pos[9] = .15\n thresh_pos[10] = .15\n thresh_pos[11] = .15\n\n thresh_neg = np.zeros((len(attributes),1))\n thresh_neg[0] = -.25\n thresh_neg[1] = -.25\n thresh_neg[2] = -.25\n thresh_neg[3] = -.25\n thresh_neg[4] = -.35\n thresh_neg[5] = -.25\n thresh_neg[6] = -.12\n thresh_neg[7] = -.25\n thresh_neg[8] = -.25\n thresh_neg[9] = -.25\n thresh_neg[10] = -.25\n thresh_neg[11] = -.25\n\n changes_abs = adv_attr_score - orig_attr_score\n changes = np.zeros((len(attributes),1))\n res = \"\"\n for i in range(len(attributes)):\n if changes_abs[i] >= thresh_pos[i]:\n changes[i] = 1\n elif changes_abs[i] <= thresh_neg[i]:\n changes[i] = -1\n added = np.where(changes == 1)[0]\n for j in range(len(added)):\n res += \"Added \" + attributes[added[j]] + \",\"\n removed = np.where(changes[i] == -1)[0]\n for j in range(len(removed)):\n res += \"Removed \" + attributes[removed[j]] + \",\"\n return res[:-1]", "def select_tree_model(x_train, y_train, x_validation, y_validation):\r\n depths = [16, 32, 64, 128, 256]\r\n criterias = ['entropy', 'gini']\r\n max_accuracy = -1000\r\n best_tree = None\r\n for criteria in criterias:\r\n for depth in depths:\r\n new_model = DecisionTreeClassifier(criterion=criteria, max_depth=depth, random_state=SEED)\r\n new_model.fit(x_train, y_train)\r\n\r\n accuracy = new_model.score(x_validation, y_validation)\r\n print('Accuracy for depth', depth, \"and criteria\", criteria, \": \", accuracy)\r\n\r\n if accuracy > max_accuracy:\r\n max_accuracy = accuracy\r\n best_tree = new_model\r\n return best_tree", "def _cart(self, labeled_data, gini_split_threshold, depth=0):\r\n labels = [label[1] for label in labeled_data]\r\n gini_threshold = gini_split_threshold * self._max_gini(labels)\r\n if depth >= self.max_depth or self._gini(labels) <= gini_threshold:\r\n counts = Counter(labels)\r\n max_val = max(counts.values())\r\n for key, value in counts.items():\r\n if value == max_val:\r\n classification = key\r\n return Node(len(labeled_data), labeled_data, classification, gini=self._gini(labels))\r\n\r\n lowest_cost = float('inf')\r\n for feature in labeled_data[0][0].keys():\r\n gini_calculations = self._gini_cost(labeled_data, feature)\r\n if gini_calculations[0] < lowest_cost:\r\n lowest_cost, threshold, left_samples, right_samples = gini_calculations\r\n chosen_feature = feature\r\n\r\n if self.root is None:\r\n self.root = Node(len(labeled_data), labeled_data, left_samples[0][1], threshold, chosen_feature, self._gini(labels))\r\n node = self.root\r\n else:\r\n node = Node(len(labeled_data), labeled_data, left_samples[0][1], threshold, chosen_feature, self._gini(labels))\r\n\r\n if depth < self.max_depth:\r\n depth += 1\r\n node.left = self._cart(left_samples, gini_split_threshold, depth)\r\n node.right = self._cart(right_samples, gini_split_threshold, depth)\r\n\r\n return node" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Predicts the classification label for a single example x using tree by recursively descending the tree until a label/leaf node is reached. Returns the predicted label of x according to tree
def predict_example(x, tree): # INSERT YOUR CODE HERE. NOTE: THIS IS A RECURSIVE FUNCTION. # raise Exception('Function not yet implemented!') for attribute_keys, sub_tree in tree.items(): attribute = attribute_keys[0] value = attribute_keys[1] decision = attribute_keys[2] if decision == (x[attribute] == value): if type(sub_tree) is dict: label = predict_example(x, sub_tree) else: label = sub_tree return label
[ "def predict(tree, x, y = []):\n\n\t#conditions of continuous and discrete features\n\tnode_id = 1 #initialize node identifier as first node under the root\n\twhile 1:\n\t\tnodes = tree[node_id]\n\n\t\tif nodes[0][5] == \"c\":\n\t\t\tif x[nodes[0][1]] <= nodes[0][2]:\n\t\t\t\tindex, node_id = 0, nodes[0][0] #set identifier of child node\n\t\t\telse:\n\t\t\t\tindex, node_id = 1, nodes[1][0] #set identifier of child node\n\t\telse:\n\t\t\tif x[nodes[0][1]] in nodes[0][2]:\n\t\t\t\tindex, node_id = 0, nodes[0][0] #set identifier of child node\n\n\t\t\telif x[nodes[1][1]] in nodes[1][2]:\n\t\t\t\tindex, node_id = 1, nodes[1][0] #set identifier of child node\n\n\t\t\telse:\n\t\t\t\t#value is not in left or right branch. Get label distributions of left and right child\n\t\t\t\t#sum labels distribution to get parent label distribution\n\t\t\t\tnode_id = str(nodes[0][0]) + \",\" + str(nodes[1][0])\n\t\t\t\tindex, nodes = 0, [[0,0,0,{ k: nodes[0][3].get(k, 0) + nodes[1][3] .get(k, 0) for k in set(nodes[0][3]) | set(nodes[1][3] )}]]\n\t\t\t\t#print node_id, nodes[0][3], y\n\n\t\tif node_id in tree.keys(): #check if tree can be traversed further\n\t\t\tcontinue\n\t\t\n\t\tprediction = max(nodes[index][3], key = nodes[index][3].get)\n\t\tif y == []:\n\t\t\treturn prediction\n\t\t\n\t\tprobs = sorted(zip(nodes[index][3].keys(), np.true_divide(nodes[index][3].values(), np.sum(nodes[index][3].values()))), key = itemgetter(1), reverse = True)\n\t\tif prediction == y:\n\t\t\tmargin = probs[0][1] - probs[1][1] if len(probs) > 1 else 1\n\t\telse:\n\t\t\tmargin = dict(probs).get(y, 0) - probs[0][1]\n\t\treturn node_id, margin", "def predict(self, example):\n curr = self.root\n while not curr.is_leaf:\n # get the feature with the index of the current attribute in this node\n feature_value = example[self.total_attributes.index(curr.attr)]\n # go to that feature node and continue scanning the tree\n curr = curr.children[feature_value]\n return curr.classification", "def predict(self, test_example):\r\n\r\n probs = self.features[0].get_probs(test_example[0])\r\n for i, feature in enumerate(test_example):\r\n probs *= self.features[i].get_probs(feature)\r\n total_examples = sum(self.total)\r\n probs *= self.total\r\n return CLASS_LABELS[np.argmax(probs)]", "def predict(self, features):\n leafNode = self.root.find(features)\n return leafNode.getmostProbableLabel()", "def predict(self,newdata):\n root=self.root\n while not self.is_leaf(root):\n x=newdata[root.var]\n if root.dtype!=\"Continuous\":\n if x in root.cut:\n root=root.get_left\n else:\n root=root.get_right\n else:\n if x<=root.cut:\n root=root.get_left\n else:\n root=root.get_right\n return root.pred", "def predict(self, X):\n # Check is fit had been called\n check_is_fitted(self, ['X_', 'y_'])\n\n # Input validation\n X = check_array(X, dtype=np.unicode_)\n\n self.labeled_transactions_ = list()\n y_pred = list()\n\n # TODO evaluate parallelization here\n for X_row in X:\n tr = Transaction(X_row, self._item_to_item_id)\n\n # match against level 1\n matching_rules = _get_matching_rules(tr, self.lvl1_rules_, self.max_matching)\n\n # if level 1 was not used, match against level 2\n if not matching_rules:\n matching_rules = _get_matching_rules(tr, self.lvl2_rules_, self.max_matching)\n if matching_rules:\n tr.used_level = 2\n else:\n tr.used_level = 1\n\n if not matching_rules:\n y_pred.append(self.unlabeled_class_)\n else:\n y_pred.append(self._get_class_label(matching_rules))\n tr.matched_rules = matching_rules\n\n self.labeled_transactions_.append(tr) # keep track of labeled transaction\n\n y_pred = [self._ystr_to_orig[label] for label in y_pred]\n return np.array(y_pred)", "def classify(self, d):\n\t\treturn self.labels[d.features[self.root]+1]", "def predict_label(image):\n \n img = load_img(\n image, target_size=(SHAPE[0, SHAPE[1])\n )\n\n img_array = img_to_array(img)\n img_array = np.expand_dims(img_array, 0) # Create a batch\n\n # get the weights for each class\n predictions = model.predict(img_array)\n \n # get the confidence score for the prediction\n score = tf.nn.softmax(predictions[0])\n\n # get the label for the predicted clas : 0/1\n # depending on which class has the higher score\n label = np.argmax(score)\n\n # generating class name for the label\n if label == 1 : cls = 'signature'\n else : cls = 'no_signature'\n \n return label", "def predict(self, instance, tree, y):\n # Returns the class of a given instance.\n # Raise a ValueError if the class is not trained.\n if isinstance(self.tree, dict):\n for attr in instance.keys():\n if attr in tree.keys():\n try:\n prediction = tree[attr][instance[attr]]\n except:\n return y.mode()[0]\n if isinstance(prediction, dict):\n prediction = DecisionTree.predict(self, instance, prediction, y)\n return prediction\n else:\n return prediction\n else:\n raise ValueError('ID3 untrained')", "def predict(self, x_set):\n def classify(x):\n # Pick top-voted label among the k nearest neighbors.\n label_votes = self.knn_label_votes(x)\n return max(label_votes, key=label_votes.get)\n\n return np.array(list(map(classify, x_set)))", "def predict(self, X):\n probabilities = self.predict_probability(X)\n\n def classForProbability(probability):\n if probability > 0.5:\n return self.classOneLabel\n return self.classZeroLabel\n\n return numpy.array([\n classForProbability(p) for p in probabilities\n ])", "def __predict_label(self, label_probs):\n def driver(prob):\n candidate = np.argmax(prob)\n if candidate == 0 and prob[0] > self.model_paras['threshold_positive']:\n return 0\n elif candidate == 2 and prob[2] > self.model_paras['threshold_negative']:\n return 2\n else:\n return 1\n\n labels = list(map(driver, label_probs))\n return labels", "def predict(self,unlabeled):\r\n y_pred = unlabeled['label']\r\n if(self.main_transformer!=None):\r\n X,y = self.main_transformer.transform(unlabeled)\r\n y_pred = self.model_main.predict(X)\r\n pred_probs = self.model_main.predict_proba(X)\r\n for i,probs in enumerate(pred_probs):\r\n if(max(probs)<self.alpha):\r\n y_pred[i] = 'Unsorted'\r\n unsorted = unlabeled.loc[y_pred == 'Unsorted']\r\n if(self.small_transformer!=None and len(unsorted)!=0):\r\n X,y = self.small_transformer.transform(unsorted)\r\n y = self.model_small.predict(X)\r\n pred_probs = self.model_small.predict_proba(X)\r\n for i,probs in enumerate(pred_probs):\r\n if(max(probs)<self.beta):\r\n y[i] = 'Unsorted'\r\n y_pred[y_pred=='Unsorted'] = y\r\n return y_pred", "def classify(self, classificationData):\n node = self.root\n if node.isleaf != True:\n while node.isleaf == False:\n if node.attr != None:\n node = node.children[classificationData[node.attr]]\n if node == None:\n return None\n return node.value\n else:\n return node.value", "def compute_leaf(self, y):\r\n\r\n #########################################################################\r\n # TODO: Compute for the resulting value of the leaf node #\r\n #########################################################################\r\n num_samples_per_class = [np.sum(y == i) for i in range(len(np.unique(y)))]\r\n leaf_value = np.argmax(num_samples_per_class)\r\n #########################################################################\r\n # END OF YOUR CODE #\r\n ######################################################################### \r\n return leaf_value", "def gradient_boosting_predict(X, trees, y_mean, nu=0.1):\n ### BEGIN SOLUTION\n \n ### END SOLUTION\n return y_hat", "def make_leaf(self, data):\n labels = data[:,-1].tolist()\n node = DecisionNode()\n node.label = max(set(labels), key=labels.count)\n return node", "def classify1(self,X):\n prediction = self.classify.predict(X)\n \n return prediction", "def label(tile,tileNet,ClassCoordinates,raster):\r\n tile=extractTile(raster,tile)\r\n labelVector=tileNet.encode(tile)\r\n labelVector=labelVector.detach().numpy()\r\n label=ClassCoordinates.knn.predict(labelVector)\r\n return(label)", "def predict(self, X):\n return predicted_value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the average error between the true labels (y_true) and the predicted labels (y_pred) Returns the error = (1/n) sum(y_true != y_pred)
def compute_error(y_true, y_pred): # INSERT YOUR CODE HERE sum=0 n=len(y_true) for x in range(n): if(y_true[x]!=y_pred[x]): sum= sum+1 err = sum/n return err
[ "def mean_squared_error(ys_true, ys_pred):\n return 1 / ys_true.shape[0] * np.sum(np.power(ys_true - ys_pred, 2))", "def error_squared(true_labels, predicted_labels):", "def mean_squared_error(y_true, y_pred):\n mse = np.mean(np.power(y_true-y_pred,2))\n return mse", "def relative_mean_squared_error(y_true, y_pred):\n diff_squared = np.abs(y_true-y_pred)**2\n return np.mean(diff_squared/(y_true**2))", "def mean_absolute_error(y, y_pred):\n\n return absolute_error(y, y_pred) / len(y)", "def cal_classificationerror(y, y_pred):\n return 1-accuracy(y,y_pred)", "def accuracy(y_true, y_pred):\n return torch.mean(y_pred.eq(y_true).float())", "def RMSE(y_true, y_pred):\n return math.sqrt(mean_squared_error(y_true, y_pred))", "def average_precision_score(y, y_pred):\n pass", "def mse(y_true, y_predicted):\n return ((y_true - y_predicted) ** 2).mean(axis=0)", "def mean_square_error(y_true, y_pred):\n # return mean_squared_error(y_true, y_pred) # use sklean default function\n y_true = np.asarray(y_true)\n y_pred = np.asarray(y_pred)\n mse = ((y_true - y_pred) ** 2).mean(axis=0)\n return float(mse)", "def angle_error(y_true, y_pred):\n diff = angle_difference(K.argmax(y_true, 1), K.argmax(y_pred, 1))\n return K.mean(K.cast(K.abs(diff), K.floatx()))", "def fail_recall(y_true, y_pred):\n result = []\n for x in xrange(len(y_true)):\n if y_true[x] < 5.5:\n print 'real/pred', y_true[x], '/', y_pred[x]\n res = y_pred[x] < 5.5\n result.append(res)\n print result\n try:\n error = (((float)(sum(result))) / len(result))\n print error\n except ZeroDivisionError:\n return None, 0\n return error, len(result)", "def error_rate(predictions, labels):\n return 100*(1- np.float32(np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))) / predictions.shape[0])", "def mae(y_true, y_pred):\n\n return K.mean(K.abs(K.sum(K.cast(K.arange(0, 71), dtype='float32') * y_pred, axis=1) -\n K.sum(K.cast(K.arange(0, 71), dtype='float32') * y_true, axis=1)), axis=-1)", "def compute_error_and_bias(y_true,y_pred,a):\n\n error = np.mean(np.not_equal(y_true,y_pred))\n alpha_1 = np.sum(np.logical_and(y_pred==1,np.logical_and(y_true==1,a==0)))/ float(np.sum(np.logical_and(y_true==1,a==0)))\n beta_1 = np.sum(np.logical_and(y_pred == 1, np.logical_and(y_true == 1, a == 1))) / float(np.sum(\n np.logical_and(y_true == 1, a == 1)))\n alpha_2 = np.sum(np.logical_and(y_pred == 1, np.logical_and(y_true == -1, a == 0))) / float(np.sum(\n np.logical_and(y_true == -1, a == 0)))\n beta_2 = np.sum(np.logical_and(y_pred == 1, np.logical_and(y_true == -1, a == 1))) / float(np.sum(\n np.logical_and(y_true == -1, a == 1)))\n biasY1 = np.abs(alpha_1-beta_1)\n biasYm1 = np.abs(alpha_2-beta_2)\n\n return error,biasY1,biasYm1", "def relative_mean_absolute_error(y_true, y_pred):\n diff = np.abs(y_true-y_pred)\n return np.mean(diff/y_true)", "def error_knearest(ypred, ytest):\n return sum(ypred!=ytest) / len(ytest)", "def balanced_accuracy_score(y_true, y_pred):\n C = confusion_matrix(y_true, y_pred)\n with np.errstate(divide='ignore', invalid='ignore'):\n per_class = np.diag(C) / C.sum(axis=1)\n if np.any(np.isnan(per_class)):\n per_class = per_class[~np.isnan(per_class)]\n score = np.mean(per_class)\n return score", "def calculate_mean_average_precision(y_gold, y_pred, n=3):\n\n y_actual = copy.deepcopy(y_gold)\n y_hat = copy.deepcopy(y_pred)\n\n \n y_actual = y_actual.tolist()\n y_hat = y_hat.tolist()\n\n y_hat_len = len(y_hat)\n\n assert (len(y_actual) == len(y_hat))\n\n total_ave_precision = 0.0\n num_classes = len(y_hat[0])\n\n pos_y_hat_len = 0\n for i in range(y_hat_len):\n\n relevant_answers = 1\n pos_y_hat_len += 1\n\n ave_precision = 0\n predicted_answers = 0\n correct_answers = 0\n for j in range (n):\n predicted_answers += 1\n if (y_actual[i] == y_hat[i][j]):\n correct_answers += 1\n ave_precision += float(correct_answers) / float(predicted_answers)\n\n ave_precision = ave_precision / float(relevant_answers)\n total_ave_precision += ave_precision\n\n mean_average_precision = float(total_ave_precision) / float(pos_y_hat_len)\n return mean_average_precision" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function checks if user in current session is allowed to update an item associated with a user_id_to_check
def user_is_authorized_to_update_item(user_id_to_check): if "authorized_user_id" in login_session: if int(user_id_to_check) == int(login_session["authorized_user_id"]): lib.debug_print("authorized") return True lib.debug_print("not authorized") return False
[ "def test_func(self):\n profile_id = self.get_object().id\n if self.request.user.id == profile_id:\n # then we can allow updating\n return True\n return False", "def admin_chk(user_id):\n if user_id == str(137974469896437760) or user_id == str(148798654688395265): return True\n else: return False", "def check_user(self, user):\n if not user:\n return False\n\n if self.staff_required:\n return False\n\n if user in self.whitelist:\n return True\n else:\n return False\n\n # hash = hashlib.md5(str(user.id).encode('utf-8'))[-4:]\n # return int(hash, 16) % 100 < self.percent", "def can_be_edited_by(self, user):\n return user.is_active and (user.pk == self.user.pk or\n self.can_be_moderated_by(user))", "def itemBelongsToOtherUser(item):\r\n user = getUserInfo(item.user_id)\r\n return login_session['email'] != user.email", "def check_is_admin(self, user_id):\n ids = TeamAdmin.get_user_ids(self.id)\n return int(user_id) in ids", "def userCanAffordItemObj(self, user : basedUser.BasedUser, item : gameItem.GameItem) -> bool:\n return user.credits >= item.getValue()", "def can_edit(expend_id, user_id):\n\n query = 'SELECT can_edit FROM user_expend WHERE user_id=%s AND expend_id=%s;'\n\n args = (user_id, expend_id)\n res = Expend._make_select(query, args)[0]['can_edit']\n return bool(res)", "def can_reject( self, item, user ):\n return user.has_perm( self.permission )", "def test_check_user_entitlement_item(self):\n pass", "def check_permission(self, user):\n return user.has_perms(self.permissions_required)", "def has_edit_rights(self, user: User):\n if user.is_moderator():\n return True\n\n if self.activity_id is not None:\n return user.supervises_activity(self.activity_id)\n if self.event_id is not None:\n return self.event.has_edit_rights(user)\n return self.user_id == user.id", "def checklist_update(user, user_id, checklist_id):\n\n checklist_fields = checklist_schema.load(request.json)\n\n checklists = Checklist.query.filter_by(id=checklist_id)\n\n if checklists[0].owner_id != user.id:\n return abort(401, description=\"You do not have permission to update this checklist.\")\n\n if checklists.count() != 1:\n return abort(404, description=\"Checklist not found.\")\n\n checklists.update(checklist_fields)\n db.session.commit()\n\n return jsonify(checklist_schema.dump(checklists[0]))", "def check_user_perm(self, user, doc, write = False):\n\n try:\n perm = self.user_perms.get(doc = doc, user = user)\n if not write:\n return True\n else:\n return perm.write and write\n except ObjectDoesNotExist:\n return False", "def restricted(func):\n\t@wraps(func)\n\tdef wrapped(update, context, *args, **kwargs):\n\t\tuser_id = update.effective_user.id\n\t\tif user_id not in LIST_OF_ADMINS:\n\t\t\tprint(\"Unauthorized access denied for {}.\".format(user_id))\n\t\t\treturn\n\t\treturn func(update, context, *args, **kwargs)\n\treturn wrapped", "def is_allowed_update_for(self, instance):\n return self._is_allowed_for(instance, 'update')", "def can_edit(self, user):\n # so far only the owner can edit\n try:\n if user.cc3_profile.pk == self.created_by.pk:\n return True\n except CC3Profile.DoesNotExist:\n pass\n return False", "def validate_access(tribe_id, user):\n\n tribe_id = int(tribe_id)\n\n if tribe_id in [t.team.tribe_id for t in user.teams]:\n return True\n\n if tribe_id in user.editing_ids():\n return True\n\n return False", "def is_mutable_by(self, user):\n return ((self.submitter == user or\n user.has_perm('reviews.can_edit_reviewrequest',\n self.local_site)) and\n not is_site_read_only_for(user))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
On get returns "delete profile" page to confirm if user really wants to delete their profile. On post deletes user profile. Note all user's ads will be automatically deleted as well because this is how database is setup (there is an automatic delete cascade for user>ad objects).
def delete_user_profile(): user = flask_login.current_user delete_user_profile_form = FlaskForm() if request.method == 'POST' and delete_user_profile_form.validate_on_submit(): if not user_is_authorized_to_update_item(user.id): flash("You are not authorized to update this page") return redirect(url_for("index")) user_email = user.email user_id = user.id database.set_user_authenticated_status(user, False) flask_login.logout_user() database.delete_user(database.get_user_by_unicode_id(user_id)) flash("User account for " + user_email + "and all ads for this account were deleted.") return redirect(url_for("index")) return render_template("delete_user_profile.html", user=user, delete_user_profile_form=delete_user_profile_form, page_info=get_page_info())
[ "def delete_profile(request):\n if request.method == 'POST':\n form = RemoveUser(request.POST)\n\n if form.is_valid():\n request.user.delete()\n auth.logout(request)\n messages.success(request, 'You have successfully deleted your account')\n return render(request, 'index.html')\n else:\n messages.error(request, \"Not able to delete account!\")\n else:\n form = RemoveUser()\n arg = {'form': form}\n return render(request, 'Profile/delete_profile.html', arg)", "def delete_profile(self):\n pass", "def delete_user_account():\n\n # get user\n user = g.user\n\n # delete post\n user.status = User.STATUS_DELETED\n user.status_changed_at = datetime.now()\n\n # delete user profile\n if user.profile:\n user.profile.status = UserProfile.STATUS_DELETED\n user.profile.status_changed_at = datetime.now()\n\n db.session.commit()\n\n # response\n return '', 204", "def accountRemoval(request):\n if 'remove_profile' in request.POST:\n # messages.info(request, 'You clicked button 1')\n pass\n elif 'remove_all' in request.POST:\n # messages.info(request, 'You clicked button 2')\n instance = get_user(request)\n instance.delete()\n return render(request, 'account/logged_out.html')\n \n return render(request, 'account/account_remove.html')", "def delete_user_on_profile_delete(sender, instance, **kwargs):\n \n instance.user.delete()", "def _delete_flow(self, user, assertion):\n client = mozillian_client(user['email'], assertion)\n uniq_id = user['uniq_id']\n\n r = client.get(reverse('phonebook.edit_profile'))\n doc = pq(r.content)\n\n # Make sure there's a link to a confirm deletion page, and nothing\n # pointing directly to the delete URL.\n eq_(reverse('confirm_delete'), doc('#delete-profile').attr('href'),\n 'We see a link to a confirmation page.')\n self.assertFalse(any((reverse('phonebook.delete_profile') in el.action)\n for el in doc('#main form')),\n \"We don't see a form posting to the account delete URL.\")\n\n # Follow the link to the deletion confirmation page.\n r = client.get(doc('#delete-profile').attr('href'))\n\n # Test that we can go back (i.e. cancel account deletion).\n doc = pq(r.content)\n eq_(reverse('phonebook.edit_profile'),\n doc('#cancel-action').attr('href'))\n\n # Test that account deletion works.\n delete_url = doc('#delete-action').closest('form').attr('action')\n r = client.post(delete_url, {'unique_id': uniq_id}, follow=True)\n eq_(200, r.status_code)\n self.assertFalse(_logged_in_html(r))\n\n client = test.Client()\n client.get(reverse('home'))\n data = dict(assertion=assertion, mode='register')\n r = client.post(reverse('browserid_login'), data, follow=True)\n\n self.assertFalse(_logged_in_html(r))", "def delete_profile_for_user(sender, instance=None, **kwargs):\n if instance:\n user_profile = UserProfile.objects.get(user=instance)\n user_profile.delete()", "def delete_request(request):\n \n data = request.data\n user = Profile.objects.get(username=data['username'])\n req_deleted = delete_follow_request(request.user,user)\n \n if req_deleted:\n if is_following(user.user,request.user):\n return JsonResponse({'success':True,'Follows':True},safe=False)\n return JsonResponse({'success':True,'Follows':False},safe=False)\n \n return JsonResponse({'success':False},safe=False)", "def test_superuser_can_delete_any_profile(self):\n SUPERUSER = 0\n self.client.login(\n username=self.users[SUPERUSER].get('username'),\n password=self.users[SUPERUSER].get('password')\n )\n for user in User.objects.all():\n if user.username == self.users[SUPERUSER].get('username'): # skips to delete its profile but keeps its ID\n superuser_ID = user.pk\n continue\n response = self.client.delete('/1.0/users/{0}/'.format(user.pk))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n # superuser deletes its profile\n response = self.client.delete('/1.0/users/{0}/'.format(superuser_ID))\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def delete_user_profile(user_profile_id):\n\n # get user_profile\n user_profile = UserProfile.query.get(user_profile_id)\n if user_profile is None:\n abort(404)\n\n # delete user_profile\n db.session.delete(user_profile)\n db.session.commit()\n\n # response\n return '', 204", "def test_user_can_delete_its_profile_and_cannot_other_profiles(self):\n NO_SUPERUSER = 1\n self.client.login(\n username=self.users[NO_SUPERUSER].get('username'),\n password=self.users[NO_SUPERUSER].get('password')\n )\n for user in User.objects.all():\n response = self.client.delete('/1.0/users/{0}/'.format(user.pk))\n if user.username == self.users[NO_SUPERUSER].get('username'):\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n else:\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def delete(self, customer_id: int, profile_id: int) -> int:\n response = self.base_delete_request(\n f\"{self.base_url}/{customer_id}/social-profiles/{profile_id}\"\n )\n\n return self.process_result_with_status_code(response, 204)", "def can_be_deleted(self, userprofile):\n return self.can_be_edited(userprofile)", "def user_post_delete_handler(sender, **kwargs):\n total = User.objects.all().count()\n write_point('default.django.auth.user.delete', value=1)\n write_point('default.django.auth.user.count', value=total)", "def test_delete_profile(self):\n self.profile.save_profile()\n profile2 = Profile(profile_photo ='test_profile_photo2',bio = 'test_bio2')\n profile2.save_profile()\n self.profile.delete_profile()\n all_profiles = Profile.objects.all()\n self.assertTrue(len(all_profiles)==1)", "def delete_post(request, pk=None):\n post = get_object_or_404(Post, pk=pk)\n if request.user.is_authenticated():\n if post.user.id == request.user.id:\n SomeModel.objects.filter(id=id).delete()\n else:\n return render(request,'404.html')\n\n return redirect(\"deletedfeedback.html\")", "def delete_user():\n if session.get('user_id'):\n response = nb_session.delete(\n f'https://{nation_slug}.nationbuilder.com/api/v1/people/{session[\"user_id\"]}',\n )\n session.pop('user_id')\n session.clear()\n\n return redirect(url_for('people'))", "def delete_posting():\n\n\tcurrent_user = session.get(\"user_id\")\n\tuser = User.query.filter(User.user_id == current_user).first()\n\tpost = Post.query.filter(db.and_(Post.user_email == user.email,\n\t\tPost.active == True)).all()\n\n\treturn render_template('delete_posting.html', post=post)", "def delete_dog_wanted_ad(request, pk):\n user = request.user\n\n if user.is_superuser:\n wanted_ad_to_delete = get_object_or_404(ProductWanted, pk=pk)\n wanted_ad_to_delete.delete()\n\n \n messages.success(request, \"Wanted Ad successfully deleted\")\n return redirect('wanted_dogs')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns JSON for a single ad
def get_ad_json(ad_id): ad = database.get_ad_by_id(ad_id) if ad: return jsonify(database.ad_to_dict(ad, serialize=True)) return jsonify({})
[ "def get(self):\n rs = Con.getad_info()\n return jsonify({'result': rs})", "def show_ads(template_name, user_id=None):\n ads_html = list()\n search_filtering_parameters = get_search_filtering_parameters_from_request(request)\n if user_id:\n search_filtering_parameters[\"user_id\"] = user_id\n\n ads, total_number_of_ads, min_ad_idx_displayed, max_ad_idx_displayed = \\\n database.get_ads_to_display(**search_filtering_parameters)\n\n if total_number_of_ads > 0:\n for ad in ads:\n ads_html.append(render_template(template_name, ad=database.ad_to_dict(ad)))\n\n ads_data = dict()\n ads_data[\"ads_html\"] = ads_html\n ads_data[\"total_number_of_ads\"] = str(total_number_of_ads)\n ads_data[\"min_ad_idx_displayed\"] = str(min_ad_idx_displayed)\n ads_data[\"max_ad_idx_displayed\"] = str(max_ad_idx_displayed)\n\n return jsonify(ads_data)", "def get_city_ads_json(city_id):\n ads = database.get_ads_by_city(city_id)\n list_of_ads_dictionaries = [database.ad_to_dict(ad, serialize=True) for ad in ads]\n return jsonify(list_of_ads_dictionaries)", "def json(self):\n return {\n \"id\": self.id,\n \"string_id\": self.string_id,\n \"upvotes\": self.upvotes,\n \"downvotes\": self.downvotes,\n \"karma\": self.karma,\n \"created\": self.created,\n }", "def GET(self, id_):\n web.header('Content-Type', 'application/json')\n try:\n band = self.catalog[id_]\n return transform.band_to_json(band)\n except KeyError:\n return web.notfound('Band {} not found'.format(id_))", "def render_ad_by_id(ad_id, fallback='True'):\n try:\n ad_id = int(ad_id)\n except:\n\tad_id = 0\n\n try:\n ad = BannerAd.current.get(id=ad_id)\n except BannerAd.DoesNotExist:\n ad = None\n\n if not ad:\n ad = None\n if fallback == 'True':\n return render_adsense(type)\n else:\n return ''\n \n code = ''\n if not ad.code:\n code = '<img src=\"%s\" alt=\"%s\" />' % (ad.image.url, ad.name)\n if ad.url:\n code = ''.join(['<a href=\"%s\">' % ad.url, code, '</a>'])\n else: \n code = ad.code\n code = ''.join(['<div class=\"ad ad_%s\">' % ad.ad_type, code, '</div>'])\n \n return code", "def serialize(ag):\n #turn adventureGame object into string\n return json.dumps(ag.data)", "def get_jsonld(self):\n response = self._dataverse.accept_jsonld().make_request(self._uri)\n self._jsonld = response.json()\n return self._jsonld", "def asset_details(self):\n if self.asset_details_json:\n return json.loads(self.asset_details_json)\n else:\n return {}", "def render_json(request, addon, stats):\n response = http.HttpResponse(mimetype='text/json')\n\n # XXX: Subclass DjangoJSONEncoder to handle generators.\n if isinstance(stats, GeneratorType):\n stats = list(stats)\n\n # Django's encoder supports date and datetime.\n fudge_headers(response, stats)\n simplejson.dump(stats, response, cls=DjangoJSONEncoder)\n return response", "def azure_sd_json(self) -> str:\n return pulumi.get(self, \"azure_sd_json\")", "def sense_details_json(request, **kwargs):\r\n import json\r\n from .models import Sense\r\n senseid = int(kwargs.get('senseid'))\r\n try:\r\n sense = Sense.objects.get(id=senseid)\r\n except Sense.DoesNotExist:\r\n response = {'lemma': '?', 'definition': 'undefined', 'link': ''}\r\n else:\r\n if sense.definition:\r\n defn = '\"' + sense.definition + '\"'\r\n else:\r\n defn = '[undefined]'\r\n response = {'lemma': '%s (%s)' % (sense.lemma, sense.wordclass_readable()),\r\n 'definition': defn, 'link': sense.oed_url(),}\r\n return HttpResponse(json.dumps(response), content_type='text/plain')", "def test_ad_valid_create(self):\n\n token = AccessToken.for_user(self.user)\n self.client.credentials(HTTP_AUTHORIZATION=\"Bearer \" + str(token))\n url = reverse(\"ad-create\")\n data = {\n \"title\": \"Test title\",\n \"description\": \"Test description\",\n \"price\": \"100\",\n }\n response = self.client.post(url, data, format=\"json\")\n\n # Checks if the ad is created\n self.assertTrue(Ad.objects.filter(id=response.data.get(\"id\")).exists())\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # Checks if the owner is the one that created the ad\n self.assertEqual(response.data.get(\"owner\").get(\"email\"), \"test@test.org\")", "def get_athlete(self):\n REQUEST_URL = self.API_BASE + 'athlete'\n r = requests.get(REQUEST_URL, \n headers=self.hd)\n return dict(r.json())", "def show_single_lesson_json(lesson_id):\n \n lesson = crud.get_lesson_by_id(lesson_id)\n lesson_dict = lesson.as_dict()\n\n if not lesson_dict.get('imgUrl'):\n lesson_dict['imgUrl'] = '/static/img/unimpressed.jpg'\n \n comp_data = []\n\n for comp in lesson.comps:\n comp_data.append(comp.as_dict())\n\n return {\"lesson\": lesson_dict, \"comps\": comp_data}", "def obtenerDatosIdJSON(self, idadi):\n\n # SQL\n consulta1 = \"\"\"\n SELECT \n ARRAY_TO_JSON(ARRAY_AGG(ROW_TO_JSON(data)))\n FROM (\n SELECT adi.add_id idadicional, \n p.per_nombres||' '||p.per_apellidos persona,\n adi.nac_id idnacionalidad,\n nac.nac_des nacionalidad, \n adi.add_lugarnac lugarnacimiento, \n adi.san_id idtiposangre,\n san.san_des sangre,\n adi.add_alergias alergias,\n adi.add_capacdife capacidades,\n adi.add_foto foto,\n adi.add_estado estado\n FROM \n membresia.admision_adicionales adi\n LEFT JOIN referenciales.personas p ON \n adi.add_id = p.per_id\n LEFT JOIN referenciales.nacionalidad nac ON\n adi.nac_id = nac.nac_id\n LEFT JOIN referenciales.sangre san ON\n adi.san_id = san.san_id\n WHERE \n adi.add_id = %s AND adi.add_estado <> false) data;\n \"\"\"\n\n try:\n\n conexion = Conexion()\n con = conexion.getConexion()\n cur = con.cursor()\n cur.execute(consulta1, (idadi, ))\n\n return cur.fetchone()\n\n except con.Error as e:\n print(e.pgerror)\n return False\n finally:\n if con is not None:\n cur.close()\n con.close()", "def showCatalogJSON():\n\n items = session.query(Item).order_by(Item_id.desc())\n return jsonify(Item=[i.serialize for i in items])", "def render_adsense(type):\n if type == 'banner':\n code = \"\"\"\n <script type=\"text/javascript\"><!--\n google_ad_client = \"pub-5361914556213729\";\n google_ad_slot = \"1625200313\";\n google_ad_width = 468;\n google_ad_height = 60;\n //-->\n </script>\n <script type=\"text/javascript\"\n src=\"http://pagead2.googlesyndication.com/pagead/show_ads.js\">\n </script>\n \"\"\"\n elif type == 'shortvert':\n code = \"\"\"\n <script type=\"text/javascript\"><!--\n google_ad_client = \"pub-5361914556213729\";\n google_ad_slot = \"8697309618\";\n google_ad_width = 120;\n google_ad_height = 240;\n //-->\n </script>\n <script type=\"text/javascript\"\n src=\"http://pagead2.googlesyndication.com/pagead/show_ads.js\">\n </script>\n \"\"\"\n elif type == 'vertical':\n code = \"\"\"\n <script type=\"text/javascript\"><!--\n google_ad_client = \"pub-5361914556213729\";\n google_ad_slot = \"9446223050\";\n google_ad_width = 120;\n google_ad_height = 600;\n //-->\n </script>\n <script type=\"text/javascript\"\n src=\"http://pagead2.googlesyndication.com/pagead/show_ads.js\">\n </script>\n \"\"\"\n else:\n return ''\n \n return '<div class=\"ad ad_%s\">%s</div>' % (type, code)", "def json( self ):\n\t\treturn self.acls" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns JSON for all ads in a city (as per city_id)
def get_city_ads_json(city_id): ads = database.get_ads_by_city(city_id) list_of_ads_dictionaries = [database.ad_to_dict(ad, serialize=True) for ad in ads] return jsonify(list_of_ads_dictionaries)
[ "def get_all_city_facts(request, city_id):\n try:\n city_facts = CityFact.objects.filter(city=city_id)\n except CityFact.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = CityFactSerializer(city_facts, many=True)\n return Response(serializer.data)", "def city_grab(city_id):\n cities = models.storage.all('City')\n for key in cities.keys():\n c, p, id = key.partition('.')\n if id == city_id:\n return (jsonify(cities.get(key).to_dict()))\n abort(404)", "def place_list(city_id):\n city = storage.get(\"City\", city_id)\n if not city:\n abort(404)\n place_list = [place.to_dict() for place in city.places]\n return jsonify(place_list)", "def get_all_city_images(request, city_id):\n try:\n city_images = CityImage.objects.filter(city=city_id)\n except CityImage.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = CityImageSerializer(city_images, many=True)\n return Response(serializer.data)", "def city(city_id):\n city = storage.get('City', city_id)\n if city:\n return (jsonify(city.to_dict()), 200)\n abort(404)", "def cities(\n district_id: typing.Union[str, int],\n) -> typing.List:\n # parse input\n district_id = int(district_id)\n # fetch\n cs = b2c.json(\n '/services/Address/getCityListAsJson',\n params={\"id\": district_id}\n )\n # parse output\n cs = map(\n lambda c: {\n 'district_id': district_id,\n 'city_id': int(c['id']),\n 'name': c['name']\n },\n cs\n )\n cs = sorted(cs, key=lambda k: k['city_id'])\n return list(cs)", "def aqdevice_city(request, city): \n try:\n if request.method == 'GET':\n aqdevices = AQDevice.objects.filter(city=city)\n serializer = AQDeviceSerializer(aqdevices, many=True)\n return Response(serializer.data) \n except:\n return Response(status=404)", "def get_city(city_id):\n city_obj = storage.get(\"City\", city_id)\n if city_obj is None:\n abort(404)\n return jsonify(city_obj.to_dict())", "def generateCities(self):\n citiesDict = {}\n for k in CITIES_TEMPLATE:\n citiesDict[k] = City(k, CITIES_TEMPLATE[k][\"connections\"], CITIES_TEMPLATE[k][\"colour\"])\n return citiesDict", "def get_doctors():\n all_doctors = schema.Doctor.query.all()\n result = schema.doctors_schema.dump(all_doctors)\n return jsonify(result.data)", "def get(self, city=\"São Paulo\"):\n c = CovidCasesRaw()\n resp = c.list_city_cases(city)\n\n return resp", "def _get_city_names() -> List[str]:\n with open(CITIES_JSON_FILE_PATH, \"r\") as read_file:\n return [city['city'] for city in json.loads(read_file.read())]", "def activities_all():\n data = get_json_data()\n return json.dumps(data)", "def get_all_city_schemas(self):\n return cities_database", "def build_city_list():\n city_rank_url = 'https://www.businessinsider.com/us-news-best-places-to-live-in-america-2016-3'\n response_text = make_url_after_check_cache(city_rank_url, CACHE_DICT)\n soup = BeautifulSoup(response_text, 'html.parser')\n found_list = soup.find_all(class_=\"slide-layout clearfix\")\n in_city_list = []\n for city_result in found_list:\n title = city_result.find(\"h2\", class_=\"slide-title-text\").text\n first_split = title.split('.')\n second_split = ''.join(first_split[1:]).split(',')\n city = City(first_split[0].strip(),\n second_split[0].strip(),\n second_split[-1].strip(),\n city_result.find('p').text)\n in_city_list.append(city)\n return in_city_list", "def list_cities():\n states = storage.all(State).values()\n return render_template('8-cities_by_states.html', states=states)", "def getAllPincodesWithCity(context=None):\n if context.get(\"error\") is True:\n return {\n \"statusCode\": 4001,\n \"statusMessage\": context.get(\"error_response\", \"\")\n }\n token = context.get(\"token\")\n #url is build for client api to request for pincodes serving \"Cash\"\n url = 'https://%s%s?token=%s' %\\\n (settings.APP_API_URL, settings.POSTAL_CODE_API_CASH_CITY_ALL,\n token)\n pin_response = requests.get(url,\n headers={\"content-type\":\n \"application/x-www-form-urlencoded\"},\n verify=False)\n pin_dict = json.loads(pin_response.content)\n pin_list = list()\n if pin_dict.get(\"result\", None):\n pin_list = pin_dict[\"result\"]\n return {\n \"statusCode\": 200,\n \"statusMessage\": \"Success\",\n \"response\": pin_list\n }", "def cities(self):\n return [city for city in models.storage.all(City)\n if City.state_id == self.id]", "def get_data():\n city = request.args.get('city', None)\n progress_status = request.args.get('progress_status', None)\n\n # We recover the investment data depending on the filter\n if city and progress_status:\n investments = models.Investment.query.filter(and_(models.Investment.ville==city, models.Investment.etat_d_avancement==progress_status)).all()\n elif city:\n investments = models.Investment.query.filter(models.Investment.ville==city).all()\n elif progress_status: \n investments = models.Investment.query.filter(models.Investment.etat_d_avancement==progress_status).all()\n else: \n investments = models.Investment.query.all()\n\n result = response.json_response(json.dumps(investments, cls=alchemy_json_encoder.AlchemyEncoder))\n\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get ads_data data structure to be displayed in the list of ads. Filtering parameters are extracted from request.
def show_ads(template_name, user_id=None): ads_html = list() search_filtering_parameters = get_search_filtering_parameters_from_request(request) if user_id: search_filtering_parameters["user_id"] = user_id ads, total_number_of_ads, min_ad_idx_displayed, max_ad_idx_displayed = \ database.get_ads_to_display(**search_filtering_parameters) if total_number_of_ads > 0: for ad in ads: ads_html.append(render_template(template_name, ad=database.ad_to_dict(ad))) ads_data = dict() ads_data["ads_html"] = ads_html ads_data["total_number_of_ads"] = str(total_number_of_ads) ads_data["min_ad_idx_displayed"] = str(min_ad_idx_displayed) ads_data["max_ad_idx_displayed"] = str(max_ad_idx_displayed) return jsonify(ads_data)
[ "def getAds(self):\n \n ids = [id for id in self.db]\n \n results = etree.Element(\"results\")\n for id in ids:\n if (id.find(\"_design\") != -1):\n continue\n \n result = etree.Element(\"result\")\n data = self.db[id]\n result.set(\"title\", data[\"title\"])\n result.set(\"content\", data[\"content\"])\n result.set(\"href\", data[\"href\"])\n results.append(result)\n\n return results", "def get_all_ads(self):\n resp = self.session.get('https://www.kijiji.ca/m-my-ads.html')\n user_id=get_token(resp.text, 'userId')\n my_ads_url = 'https://www.kijiji.ca/j-get-my-ads.json?_=1&currentOffset=0&isPromoting=false&show=ACTIVE&user={}'.format(user_id)\n my_ads_page = self.session.get(my_ads_url)\n my_ads_tree = json.loads(my_ads_page.text)\n ad_ids = [entry['id'] for entry in my_ads_tree['myAdEntries']]\n ad_names = [entry['title'] for entry in my_ads_tree['myAdEntries']]\n return zip(ad_names, ad_ids)", "def get_ads(self, ad_account_id, campaign_id, ad_group_id, query_parameters=None):\n return self.get_iterator(\n f\"/v5/ad_accounts/{ad_account_id}/ads\"\n f\"?campaign_ids={campaign_id}&ad_group_ids={ad_group_id}\",\n query_parameters,\n )", "def dh_adviser_search(request, access_token, adviser_name):\n advisers = []\n url = settings.DATA_HUB_ADVISER_SEARCH_URL\n params = {\"autocomplete\": adviser_name}\n\n response = dh_request(request, access_token, \"GET\", url, {}, params=params)\n if not response.ok:\n return advisers, response.json()\n\n advisers = [\n {\n \"datahub_id\": adviser[\"id\"],\n \"name\": adviser[\"first_name\"],\n \"is_active\": adviser[\"is_active\"],\n }\n for adviser in response.json()[\"results\"]\n ]\n\n return advisers, None", "def _data(self):\n query = {\n 'object_provides': ISubscribableSection.__identifier__,\n 'sort_on' : 'sortable_title',\n }\n results = self.catalog.unrestrictedSearchResults(**query)\n all_requests = []\n for brain in results:\n section = brain.getObject()\n if getSecurityManager().checkPermission(\n permissions.ManagePortal, section):\n manager = SubscriptionsManager(section)\n requests = manager.getRequests()\n for r in requests:\n r['title'] = brain.Title\n r['description'] = brain.Description\n r['url'] = brain.getURL()\n all_requests.append(r)\n\n if all_requests:\n all_requests.sort(self._compareRequestDates)\n\n return all_requests", "def ad_list(request, web_id):\n\ttry:\n\t\tsite = Website.objects.get(pk=web_id)\n\t\tad_list = Adspace.objects.filter(website=site)\n\texcept Adspace.DoesNotExist:\n\t\traise Http404(\"Website does not exist\")\n\treturn render(request, 'ad_list.html', {'ad_list': ad_list})", "def list(request):\n\tlatest_ad_list = Adspace.objects.all()\n\tcontext = {'latest_ad_list': latest_ad_list}\n\t# TODO: FILTER BY PREFERENCES\n\treturn render(request, 'list.html', context)", "def get(self):\n rs = Con.getad_info()\n return jsonify({'result': rs})", "def get_dataset(self):\n return Adviser.objects.values(\n 'id',\n 'date_joined',\n 'first_name',\n 'last_login',\n 'last_name',\n 'telephone_number',\n 'contact_email',\n 'dit_team_id',\n 'is_active',\n 'sso_email_user_id',\n )", "def get_records(self, context, domain_id, criterion=None):", "def get_deals():\n deals = []\n\n url = 'https://www.dailyoffers.nl/solr/products/select/?facet.field=catId&facet.field=mainCatId&facet.field=shoId&wt=json&rows=10000000&facet=true&q=(active%3A1+AND+productType%3Adailydeal)+OR+(productType%3Aextradeal+AND+order_2%3A[1+TO+100000]+AND+startdate%3A[*+TO+NOW]+AND+enddate%3A[NOW+TO+*])&json.nl=map' # noqa: E501\n\n response = get(url)\n json_resp = json.loads(response.text)\n\n for item in json_resp['response']['docs']:\n title = item['title']\n price = item['newprice']\n shipping = item['shippingcosts']\n url = 'https://www.dailyoffers.nl{}'.format(item['productUrl'])\n deal = {'title': title, 'price': '{} ({} shipping)'.format(price, shipping), 'link': url}\n deals.append(deal)\n\n return deals", "def make_ad_dict(self, ads):\n ret = []\n for ad in ads:\n ret.append({'type': 'LoginDomainConfigV600', \"directoryBindingType\": \"USER_ACCOUNT\", 'name': ad['name'],\n 'authProtocol': 'AD', 'orgUnits': [],\n 'userNamingAttribute': 'UID', 'baseDN': ad['baseDN'],\n 'credential': {'userName': ad['userName'], 'password': ad['password']},\n 'directoryServers': [{'type': 'LoginDomainDirectoryServerInfoDto',\n 'directoryServerSSLPortNumber': ad['directoryServerSSLPortNumber'],\n 'directoryServerIpAddress': ad['directoryServerIpAddress'],\n 'directoryServerCertificateStatus': '',\n 'serverStatus': '',\n 'directoryServerCertificateBase64Data': ad[\n 'directoryServerCertificateBase64Data']}]})\n return ret", "def for_ads(self):\n return self.active().exclude(ad__isnull=True).distinct()", "def getAgencyBookingData(filter_on, travel_agency, label1, label2):\n df_list = []\n for i in range(0, countfile('data/pig_data/ARIMADataIATA/part')):\n result_file = 'data/pig_data/ARIMADataIATA/part-v001-o000-r-0000' + str(i)\n output_file = 'agent_data' + str(i) + '.csv'\n getfile(result_file, output_file)\n dataframe = filterFile(output_file, filter_on, travel_agency, label1, label2)\n df_list.append(dataframe)\n return pd.concat(df_list)", "def get_data():\n city = request.args.get('city', None)\n progress_status = request.args.get('progress_status', None)\n\n # We recover the investment data depending on the filter\n if city and progress_status:\n investments = models.Investment.query.filter(and_(models.Investment.ville==city, models.Investment.etat_d_avancement==progress_status)).all()\n elif city:\n investments = models.Investment.query.filter(models.Investment.ville==city).all()\n elif progress_status: \n investments = models.Investment.query.filter(models.Investment.etat_d_avancement==progress_status).all()\n else: \n investments = models.Investment.query.all()\n\n result = response.json_response(json.dumps(investments, cls=alchemy_json_encoder.AlchemyEncoder))\n\n return result", "def my_ads(request):\n\n liste = Ad.objects.filter(author=request.user, is_deleted=False).order_by('-pk')\n\n return render(request, 'polyclassifiedads/myads/list.html', {'liste': liste})", "def extract_ad_campaigns(\n client: Client,\n start_date: datetime,\n end_date: datetime,\n stat_granularity: str,\n include_current_day_stats: bool,\n):\n start_date_start_of_day = as_start_of_day(start_date)\n end_date_end_of_day = as_end_of_day(end_date)\n\n if include_current_day_stats and stat_granularity not in [\"total\", \"daily\"]:\n raise ValueError(\n \"include_current_day_stats can be set to True only when stat_granularity \"\n + \"is either 'total' or 'daily'\"\n )\n\n formatted_start_date = start_date_start_of_day.strftime(SKLIK_API_DATETIME_FMT)\n formatted_end_date = end_date_end_of_day.strftime(SKLIK_API_DATETIME_FMT)\n\n create_report_display_options = {\"statGranularity\": stat_granularity}\n if include_current_day_stats is not None:\n create_report_display_options[\n \"includeCurrentDayStats\"\n ] = include_current_day_stats\n\n create_report_data = client.call(\n \"campaigns.createReport\",\n [\n # restrictionFilter\n {\"dateFrom\": formatted_start_date, \"dateTo\": formatted_end_date},\n # displayOptions\n create_report_display_options,\n ],\n )\n\n if \"reportId\" not in create_report_data:\n raise ValueError(\"Could not create a new report\")\n report_id = create_report_data[\"reportId\"]\n\n # fetch report\n campaigns = _extract_paginated(\n client,\n \"campaigns.readReport\",\n [\n # reportId\n report_id,\n # displayOptions\n {\"displayColumns\": AD_CAMPAIGN_REPORT_COLUMNS},\n ],\n response_data_list_key=\"report\",\n )\n\n # campaigns.stats is an array, flatten by duplication\n campaigns = [\n # merge\n dict(**record, **recordStat)\n for record in campaigns\n for recordStat in record.get(\"stats\", [])\n ]\n\n for record in campaigns:\n # remove nested .stats\n del record[\"stats\"]\n # convert \".date\" to string if present\n if \"date\" in record:\n record[\"date\"] = str(record[\"date\"])\n\n return campaigns", "def get_audience_insights_data(\n self,\n page_source: str,\n ) -> list:\n audience_insights_data = []\n\n left_column_data = page_source.cssselect('div.demographics div.vertical-bar-panel')\n for data in left_column_data:\n categories = data.cssselect('div.vertical-bar-panel-header h3')\n details = data.cssselect('div.vertical-bar-chart-legend div.vertical-bar-label')\n # check data\n if (\n (len(categories) != 1) or\n (len(details) == 0)\n ):\n continue\n\n # get data\n category = categories[0].text.strip()\n for detail in details:\n labels = detail.cssselect('h6')\n numbers = detail.cssselect('h4')\n if (\n (len(labels) != 1) or\n (len(numbers) != 1)\n ):\n return []\n\n label = labels[0].text.strip()\n number = numbers[0].text.strip()\n number = round((float(number.replace('%', '').replace('< ', '')) / 100.0), 2)\n audience_insights_data.append(\n {\n 'twitter_id': self.tw_id,\n 'category': category,\n 'label': label,\n 'percent': number,\n 'target_date': self.target_month,\n 'created_at': self.today,\n }\n )\n\n right_column_data = page_source.cssselect('div.demographics div.top-n-panel')\n for data in right_column_data:\n categories = data.cssselect('div.top-n-panel-header h3')\n details = data.cssselect('div.top-n-panel-table table tbody tr')\n # check data\n if (\n (len(categories) != 1) or\n (len(details) == 0)\n ):\n continue\n\n # get data\n category = categories[0].text.strip()\n for detail in details:\n labels = detail.cssselect('td.top-n-panel-name span')\n numbers = detail.cssselect('td.statistic-cell span')\n if (\n (len(labels) != 1) or\n (len(numbers) != 1)\n ):\n return []\n\n label = labels[0].text.strip()\n number = numbers[0].text.strip()\n number = round((float(number.replace('%', '').replace('< ', '')) / 100.0), 2)\n audience_insights_data.append(\n {\n 'twitter_id': self.tw_id,\n 'category': category,\n 'label': label,\n 'percent': number,\n 'target_date': self.target_month,\n 'created_at': self.today,\n }\n )\n\n return audience_insights_data", "def get_articleData(analytics, s_dt, e_dt, token = None):\n response = analytics.reports().batchGet(\n body={\n 'reportRequests': [\n {\n 'viewId': VIEW_ID,\n 'pageToken': token,\n 'pageSize': 100000,\n 'samplingLevel': 'LARGE',\n 'dateRanges': [{'startDate': s_dt, 'endDate': e_dt}],\n 'segments':[{'segmentId': '<SEGMENT_ID>'}],\n 'metrics': [{'expression': 'ga:uniquePageviews'},\n {'expression': 'ga:users'},\n {'expression': 'ga:sessions'},],\n 'dimensions': [{'name': 'ga:country'},\n {'name': 'ga:hostname'},\n {'name': 'ga:pagePath'},\n {'name': 'ga:pageTitle'},\n {'name': 'ga:yearMonth'},\n {'name': 'ga:previousPagePath'},\n {'name': 'ga:dimension1'},\n {'name': 'ga:segment'}]\n }]\n }\n ).execute()\n \n # Check for 'nextPageToken'\n try:\n if response['reports'][0]['nextPageToken']:\n token = response['reports'][0]['nextPageToken']\n except KeyError:\n pass\n\n aRows = response['reports'][0]['data']['rows']\n global aDAllRows\n aDAllRows.extend(aRows)\n \n # recursive function\n try:\n if response['reports'][0]['nextPageToken']:\n get_articleData(analytics, s_dt, e_dt, token)\n except KeyError:\n pass\n\n return aDAllRows" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Debug helper function prints fields and values of request form
def print_request_form(input_request): f = input_request.form for key in f.keys(): for value in f.getlist(key): print key, ":", value
[ "def debug_task(self):\n print('Request: {0!r}'.format(self.request))", "def print_html_form ():\n print(\"content-type: text/html\\n\")\n print(HTML_TEMPLATE % {'SCRIPT_NAME':os.environ['SCRIPT_NAME']})", "def show_parameters(self):\n for p in self.parameters:\n print p", "def get_parameters_form():", "def log_output(self):\n\t\tpretty_output = json.dumps(self.nested_params, sort_keys=False, indent=4, separators=(',', ': '))\n\t\tprint(pretty_output)", "def get_form_details(form):\r\n details = {}\r\n try:\r\n action = form.attrs.get(\"action\").lower()\r\n except:\r\n action = None\r\n method = form.attrs.get(\"method\", \"get\").lower()\r\n inputs = []\r\n for input_tag in form.find_all(\"input\"):\r\n input_type = input_tag.attrs.get(\"type\", \"text\")\r\n input_name = input_tag.attrs.get(\"name\")\r\n input_value = input_tag.attrs.get(\"value\", \"\")\r\n inputs.append({\"type\": input_type, \"name\": input_name, \"value\": input_value})\r\n details[\"action\"] = action\r\n details[\"method\"] = method\r\n details[\"inputs\"] = inputs\r\n return details", "def show_vars(self):\n def _show(name, value):\n print('\\n> _show(%s):\\n' % name)\n print(value)\n\n _show('self.config', self.config)\n _show('self.datastore', self.datastore)\n _show('self.metadata', self.metadata)\n _show('self.output', self.output)\n _show('self.output_format', self.output_format)\n _show('self.datatable', self.datatable)\n _show('self.dataframe', self.dataframe)", "def logDetails(self):\n for k,v in self._parser.getDetailsDict().items():\n self._log.debug(\"> %11s : %s\" % (k, str(v)[:50]))", "def printParams(self):\n\t\tprint(\"Total size:\", self.totalSize)\n\t\tprint(\"Packet info:\\n\", \"\\n\".join([str(i) for i in self.packetInfo]))\n\t\tprint(\"sizeLst:\", self.sizeLst)\n\t\tprint(\"idIx:\", self.idIx)\n\t\tprint(\"nonDataIx\", self.nonDataIx)\n\t\tprint(\"nonDataMask\", self.nonDataMask)\n\t\tprint(\"packetCheck\", self.packetCheck)\n\t\tprint(\"dataIx\", self.dataIx)\n\t\tprint(\"packetTypes\", self.packetTypes)\n\t\tprint(\"dataFormat\", self.dataFormat)", "def render(self, request):\n IP = request.getClientIP()\n html = \"\"\n html += \"<html>Hello, world!</html><br><br>\"\n html += \"Keys are...<br>\"\n for key in request.args.keys():\n html += \"%s \" % key\n html += \"<br>uri = %s<br>\" % request.uri\n html += \"<br>method = %s<br>\" % request.method\n html += \"<br>path = %s<br>\" % request.path\n \n field_value = request.args.get('Field', '')\n html += \"<br>Field = %s<br>\" % field_value\n html += \"<br>ClientIP = %s<br>\" % IP\n button_val = request.args.get('name_submit','') \n html += \"<br>button_val = %s<br>\" % button_val\n form = \"\"\"\n <FORM ACTION=\".\" METHOD=\"POST\" ENCTYPE=\"application/x-www-form-urlencoded\">\n<P>Test input: <INPUT TYPE=\"TEXT\" NAME=\"Field\" SIZE=\"25\"><BR>\n<INPUT TYPE=\"SUBMIT\" NAME=\"name_submit\" VALUE=\"Submit\">\n</FORM>\n \"\"\"\n return html + form", "def print_parameters(self):\n\n print(\"**********************************\")\n print(\"* Parameters\\n*\")\n print(\"* Simulation time: {}s\".format(self.total_simtime/1000.0))\n print(\"* Simintervall: {}\".format(SIM_INTERVAL))\n print(\"* Timestep: {}\".format(TIMESTEP))\n\n print(\"* \")\n self.model.print_structure()\n print(\"**********************************\\n\\n\")", "def _do_log_request(self, request):\n if self.log_request_attrs:\n cookies = getattr(request, \"_cookies\", {})\n headers = getattr(request, \"headers\", {})\n lattrs = \", \".join(self.log_request_attrs).format(\n url=request.url,\n body_size=len(request.body or \"\"),\n method=request.method,\n headers=self._clean_headers(headers=headers),\n cookies=self._clean_headers(headers=cookies),\n )\n self.LOG.debug(f\"REQUEST ATTRS: {lattrs}\")\n\n if self.LOG_REQUEST_BODY:\n self.LOG.debug(\n self.log_body(body=request.body, body_type=\"REQUEST\", src=request),\n )", "def print_debug(self):\n print('\\n'.join(self.debug_buffer))", "def param2form(self, dico, verbose=DEBUG):\n myform = {} # a dico to handle widgets in the form\n for vb in self.form.children:\n myform[vb.description] = vb\n keys = myform.keys() # keys of form\n # then propagate\n for k,v in dico.items():\n k = k.replace('_',' ')\n if k not in keys:\n if verbose:\n print ('key not in form:',k)\n else:\n myform[k].value = v", "def pretty_print_request_json(request):\n log_api.info(\n \"{}\\n{}\\n\\n{}\\n\\n{}\\n\".format(\n \"-----------Request----------->\",\n request.method + \" \" + request.url,\n \"\\n\".join(\"{}: {}\".format(k, v) for k, v in request.headers.items()),\n json.dumps(json.loads(request.body), indent=4),\n )\n )", "def print_options(cls):\n print(\"Available basic options:\\n\")\n for option in cls.allowed_options:\n input_type = cls.allowed_options[option][1]\n input_structure = cls.required_inputs[input_type]\n print(\"{:s}\".format(option))\n print(\"-\" * len(option))\n for k, v in input_structure.items():\n print(f\" {k} ({v.__name__})\")\n print()", "def log_parameters(self):\n logging.info(\"### TICKER = %s \" % self.ticker)\n logging.info(\"### STRIKE = %f \" % self.strike_price)\n logging.info(\"### DIVIDEND = %f \" % self.dividend)\n logging.info(\"### VOLATILITY = %f \" % self.volatility)\n logging.info(\"### TIME TO MATURITY = %f \" % self.time_to_maturity)\n logging.info(\"### RISK FREE RATE = %f \" % self.risk_free_rate)\n logging.info(\"### SPOT PRICE = %f \" % self.spot_price)", "def get_defined_fields(self):\n if self.action in ['retrieve', 'update']:\n # For detail and edit api form fields used while submitting\n # determination should be used.\n determination = self.get_object()\n return determination.form_fields\n submission = self.get_submission_object()\n return get_fields_for_stage(submission)", "def show_request(request: http.Request):\n return {\n 'method': request.method,\n 'url': request.url,\n 'headers': dict(request.headers),\n }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates ad object from passed form and saves it to the database
def update_ad_from_form_info(ad, form): ad.text = form["ad_text"] ad.price_cents = int((float(form["ad_price"]) * 100)) ad.contact_email = form["contact_email"] # default to email for now ad.primary_contact = ad.contact_email ad.contact_name = str(form["contact_name"]) ad.contact_phone = str(form["contact_phone"]) ad.city_id = int(form["select_city"]) ad.sub_category_id = int(form["sub-category-selected"]) ad.title = form["ad_title"] database.update_ad(ad)
[ "def _edit(request, id, Form, secret_key=None):\n\n if not secret_key and not request.user.pk:\n raise Http404\n\n try:\n object = Ad.objects.get(pk=id, is_deleted=False)\n\n if not request.user.is_staff and ((not secret_key and (object.author != request.user or object.secret_key)) or (secret_key and (object.secret_key != secret_key or object.author))):\n raise Http404\n except:\n object = Ad(offline_date=datetime.date.today() + datetime.timedelta(days=30))\n\n if not secret_key:\n object.author = request.user\n else:\n object.secret_key = secret_key\n\n if request.method == 'POST': # If the form has been submitted...\n form = Form(request.POST, instance=object)\n\n tags = request.POST.get('tags')\n\n file_key = request.POST['file_key']\n\n if form.is_valid(): # If the form is valid\n was_a_new_object = not form.instance.pk\n object = form.save()\n\n object.tags.clear()\n\n for t in tags.split(','):\n if t.strip():\n tag, __ = AdTag.objects.get_or_create(tag=t.strip())\n object.tags.add(tag)\n\n for file_pk in request.session.get('pca_files_%s' % (file_key,), []):\n photo = AdPhoto.objects.get(pk=file_pk)\n photo.ad = object\n photo.save()\n\n for photo in object.adphoto_set.all():\n if photo.pk not in request.session.get('pca_files_%s' % (file_key,), []):\n os.unlink(photo.file.path)\n photo.delete()\n\n # Clean up session\n del request.session['pca_files_%s' % (file_key,)]\n\n messages.success(request, _('The ad has been saved !'))\n\n if object.is_validated:\n object.is_validated = False\n object.save()\n\n messages.warning(request, _('The ad has been put offline until validation !'))\n\n send_templated_mail(_('AGEPoly\\'s classified ads: Ad ID %s need to be validated again') % (object.id,), settings.POLYCLASSIFIEDADS_EMAIL_FROM, [settings.POLYCLASSIFIEDADS_EMAIL_MANAGERS], 'ad_to_revalidate', {'ad': object, 'site': get_current_site(request)})\n else:\n\n send_templated_mail(_('AGEPoly\\'s classified ads: New ad with ID %s need to be validated') % (object.id,), settings.POLYCLASSIFIEDADS_EMAIL_FROM, [settings.POLYCLASSIFIEDADS_EMAIL_MANAGERS], 'new_ad_to_validate', {'ad': object, 'site': get_current_site(request)})\n\n if secret_key and was_a_new_object:\n send_templated_mail(_('AGEPoly\\'s classified ads: New ad (%s)') % (object.title,), settings.POLYCLASSIFIEDADS_EMAIL_FROM, [object.contact_email], 'external_ad', {'ad': object, 'site': get_current_site(request)})\n\n if secret_key:\n r = redirect('polyclassifiedads.views.external_show', id=object.pk)\n\n key, val = r._headers['location']\n\n r._headers['location'] = (key, '%s?secret_key=%s' % (val, secret_key,)) # Todo: Hackpourri\n\n return r\n return redirect('polyclassifiedads.views.show', id=object.pk)\n else:\n form = Form(instance=object)\n\n tags = ','.join([tag.tag for tag in object.tags.all()]) if object.pk else ''\n\n file_key = str(uuid.uuid4())\n\n request.session['pca_files_%s' % (file_key,)] = [f.pk for f in object.adphoto_set.all()] if object.pk else []\n\n files = [AdPhoto.objects.get(pk=pk) for pk in request.session['pca_files_%s' % (file_key,)]]\n\n date_format = form.fields['offline_date'].widget.format.replace('%Y', 'yyyy').replace('%m', 'mm').replace('%d', 'dd')\n\n return render(request, 'polyclassifiedads/myads/edit.html', {'form': form, 'date_format': date_format, 'tags': tags, 'secret_key': secret_key, 'file_key': file_key, 'files': files})", "def update_deal(id):\n if request.method == 'POST':\n name = request.form['post_name']\n producer = request.form['post_producer']\n new_price = request.form['post_price_new']\n old_price = request.form['post_price_old']\n category = request.form['post_category']\n link = request.form['post_link']\n update_deal_data(id, name, producer, new_price, old_price, link, category)\n return redirect(url_for(\"show_deal\", id=id))", "def update(self, obj, id):", "def update(self):\n\n self.fields = self.getOverriderFields()\n z3c.form.form.EditForm.update(self)", "def update_or_create(self, ad_data: dict, search_phrase: str) -> None:\n sp_obj = SPManager().get_or_create(search_phrase)\n ad_obj, created = Ad.objects.update_or_create(\n id=ad_data['id'],\n defaults=ad_data,\n )\n sp_obj.ads.add(ad_obj)", "def form_valid(self, form):\n self.object = form.save(\n author=ReviewAuthor.objects.get(user=self.request.user),\n book=Book.objects.get(id=self.kwargs['pk']))\n return HttpResponseRedirect(self.get_success_url())", "def update_regatta(regatta_id, form):\n regatta = get_regatta_from_regatta_id(regatta_id)\n # Udpate each field in regatta:\n regatta.name = form['name']\n # Sqlite apparently outputs strings for date fields\n # so need to convert to Python datetime for datebase type\n regatta.date = datetime.datetime.strptime(\n form['date'], '%Y-%m-%d')\n regatta.season_id = form['season_id']\n regatta.weblink = form['weblink']\n regatta.description = form['description']\n session.add(regatta)\n session.commit()\n return", "def form_valid(self, form, ingredient_form, instruction_form):\n\n self.object = form.save()\n ingredient_form.instance = self.object\n ingredient_form.save()\n instruction_form.instance = self.object\n instruction_form.save()\n return HttpResponseRedirect(\n reverse('mealplanner:recipe-detail', kwargs={'pk': self.object.pk})\n )", "def update_instance(self, instance: Model, field: Field, value: Any):", "def update (self, anObject):\n self.server.sql (\"\"\"update Department\n set deptCode = %s,\n name = %s,\n managerID = %s\n where departmentID = %d\"\"\" \\\n % ( \\\n self.sqlInt (anObject.deptCode),\n self.sqlString (anObject.name),\n self.sqlInt (anObject.managerID),\n anObject.departmentID\n ))", "def update(self, instance, validated_data):\n instance.focus = validated_data.get('focus', instance.focus)\n instance.abbrev = validated_data.get('abbrev', instance.abbrev)\n instance.slug = validated_data.get('slug', instance.slug)\n instance.is_active = validated_data.get('is_active', instance.is_active)\n\n instance.save()\n return instance", "def form_valid(self, form, ingredient_form, instruction_form):\n\n self.object = form.save(commit=False)\n self.object.author = self.request.user\n self.object.save()\n ingredient_form.instance = self.object\n ingredient_form.save()\n instruction_form.instance = self.object\n instruction_form.save()\n return HttpResponseRedirect(\n reverse('mealplanner:recipe-detail', kwargs={'pk': self.object.pk})\n )", "def update(self, instance, validated_data):\n instance.category = validated_data.get('category', instance.category)\n instance.abbrev = validated_data.get('abbrev', instance.abbrev)\n instance.slug = validated_data.get('slug', instance.slug)\n instance.color = validated_data.get('color', instance.color)\n instance.description = validated_data.get('description', instance.description)\n instance.is_active = validated_data.get('is_active', instance.is_active)\n\n instance.save()\n return instance", "def form_valid(self, form, other_form):\n\n self.object = form.save()\n\n ## Set pointer to master record and save the other object\n self.other_object = other_form.save(commit=False)\n self.other_object.pk = self.object.pk\n self.other_object.save()\n\n return HttpResponseRedirect(self.get_success_url())", "def _update_fields(self):\n if self.id:\n self._update_doc_info()\n self._set_current_status()", "def test_update_model_with_form(self):\n pass", "def save_model(self, request, obj, form, change):\n obj.save(request=request)", "def test_update_dog_with_form(self):\n pass", "def save(self, commit=True, should_refresh=True, *args, **kwargs):\n instance = self.instance\n if instance is None or instance.id is None:\n # in case there is not an instance, create a new one\n instance = super(AbstractModelInstanceUpdateForm, self).save(commit=commit, *args, **kwargs)\n return instance\n for field in self._save_fields:\n if field in self.cleaned_data:\n value = self.cleaned_data[field]\n instance.__setattr__(field, value)\n if commit:\n instance.save(update_fields=self._save_fields)\n if should_refresh:\n from htk.utils.general import refresh\n instance = refresh(instance)\n return instance" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Debug helper function prints all fields of current login_session
def print_current_session(printing_on = options.DEBUG_PRINT_ON): if not printing_on: return print "current login_session: " for i in login_session: print str(i) + " : " + str(login_session[i])
[ "def info(self):\n print \"---SESSION DETAILS---\"\n print \"URL\",self.session.get_full_url()\n print \"HEADERS\",self.session.header_items()\n print \"METHOD\",self.session.get_method()\n print \"DATA\",self.session.get_data()\n print \"TYPE\",self.session.get_type()\n print \"SELECTOR\",self.session.get_selector()\n print \"---------------------\"", "def show_session():\n\n return dict(session)", "def logDetails(self):\n for k,v in self._parser.getDetailsDict().items():\n self._log.debug(\"> %11s : %s\" % (k, str(v)[:50]))", "def show_vars(self):\n def _show(name, value):\n print('\\n> _show(%s):\\n' % name)\n print(value)\n\n _show('self.config', self.config)\n _show('self.datastore', self.datastore)\n _show('self.metadata', self.metadata)\n _show('self.output', self.output)\n _show('self.output_format', self.output_format)\n _show('self.datatable', self.datatable)\n _show('self.dataframe', self.dataframe)", "def print_list(auth_list):\n print '*' * 15 + str(len(auth_list)) + ' passwords found!' + '*' * 15\n for auth_info in auth_list:\n print 'Link : ' + auth_info['link']\n print 'User name : ' + auth_info['username']\n print 'Password : ' + auth_info['password']\n print '*' * 30", "def print_info():\n info_dict = Framework.get_info()\n for actor in info_dict:\n print(actor, info_dict[actor])", "def print_debug(self):\n print('\\n'.join(self.debug_buffer))", "def displayLogin(self):\n self.l1.setVisible(False)\n self.l2.setVisible(False)\n self.l3.setVisible(False)\n self.logl1.setVisible(True)\n self.adminl1.setVisible(False)\n\n self.adminUsername.setVisible(True)\n self.adminPassword.setVisible(True)\n self.log.setVisible(True)\n\n self.lRecharge.setVisible(False)\n self.bRecharge.setVisible(False)\n self.moneyBox.setVisible(False)\n\n self.username.setVisible(False)\n self.name.setVisible(False)\n self.surname.setVisible(False)\n self.bCreateAccount.setVisible(False)\n\n self.lAddDevice.setVisible(False)\n self.username2.setVisible(False)\n self.bAddDevice.setVisible(False)", "def showDetails(self):\n for k,v in self._parser.getDetailsDict().items():\n print \"%11s : %s\" % (k, str(v)[:60])", "def get_login_info(self):\n username = raw_input(\"Username: \")\n password = getpass.getpass(\"Password:\")\n return (username, password)", "def show_status(self):\n if self._logged_in:\n print(f'{self._name} is logged in')\n else:\n print(f'{self._name} is NOT logged in')", "def log_output(self):\n\t\tpretty_output = json.dumps(self.nested_params, sort_keys=False, indent=4, separators=(',', ': '))\n\t\tprint(pretty_output)", "def print_student_info(self):\n print(\"ID: %s\" % self.__ID)\n print(\"Name: %s, %s\" % (self.__last_name, self.__first_name))", "def print(self):\n print(\"----- KVClient Info -----\")\n print(\"client id: %d\" % self.get_id())\n print(\"data:\")\n for name, data in self._data_store.items():\n print(name)\n print(data)\n print(\"-------------------------\")", "def describe_user(self):\n description = f\"First name : {self.first_name.title()}\\n\"\n description += f\"Last name : {self.last_name.title()}\\n\"\n description += f\"Age : {self.age}\\n\"\n description += f\"Heigth : {self.heigth}\\n\"\n description += f\"Weight : {self.weight}\\n\"\n description += f\"Login attempts : {self.login_attempts}\\n\"\n print(description)", "def show_session():\n if CONFIGURATION.DEBUG:\n try:\n return '%s' % (session['user'])\n except KeyError:\n return 'Not logged in'\n else:\n redirect(url_for('.root'))", "def pprint(self, tag=None):\n if tag is not None:\n if tag not in self.db:\n return\n idx = self.db.index(tag)\n tags = self.db[idx:idx+1]\n else:\n tags = self.db\n d = {0: \"OUT\", 1: \"IN\"}\n for tag, sessionobj in tags.iteritems():\n print(\"TAG: {0}\".format(tag))\n for callid, ifaceids in sessionobj.callids.iteritems():\n print(\" CALL-ID: {0}\".format(callid))\n for ifaceid, infos in ifaceids.iteritems():\n print(\" IFACEID: {0}\".format(ifaceid))\n for i, info in enumerate(infos):\n print(\" {0:>3}: {1}\".format(d[i], info))\n print(\" SUM: {0}\\n\".format(self.db[tag]))", "def debug_values(self):\n\t\treturn \"HANDSHAKE\" + \\\n\t\t\t\"\\n\\tRAW\" + \\\n\t\t\t\"\\n\\t\\tpstrlen (bytes = {}): {}\".format(\n\t\t\t\tlen(self.pstrlen), format_hex_output(self.pstrlen)) + \\\n\t\t\t\"\\n\\t\\tpstr (bytes = {}): {}\".format(\n\t\t\t\tlen(self.pstr), format_hex_output(self.pstr)) + \\\n\t\t\t\"\\n\\t\\treserved (bytes = {}): {}\".format(\n\t\t\t\tlen(self.reserved), format_hex_output(self.reserved)) + \\\n\t\t\t\"\\n\\t\\tinfo_hash (bytes = {}): {}\".format(\n\t\t\t\tlen(self.info_hash),format_hex_output(self.info_hash)) + \\\n\t\t\t\"\\n\\t\\tpeer_id (bytes = {}): {}\".format(\n\t\t\t\tlen(self.peer_id), format_hex_output(self.peer_id))", "def print_state(self):\n print(self.board.get_other_player_name(self.board.current_player.name) + \n \" player action: \" + self.last_command.strip()) \n print(self.board)\n self.print_metadata()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs application in deployed configuration on Heroku
def run_heroku(): app.secret_key = "secret key" # used to sign sessions, need to change it to a properly generated key in production if not create_database.table_exists("user"): create_database.connect_to_db_and_populate_initial_data() populate_database.populate_application_test_data() #app.debug = True #app.run(port=5000) port = int(os.environ.get("PORT", 33507)) app.run(host="0.0.0.0", port=port)
[ "def run_web_prod():\n\n _execvp([\n \"gunicorn\", \"--config\", \"python:src.gunicorn_cfg\", \"src.wsgi:app\"\n ])", "def deploy():\n\n puts(blue(\"Deploying to Heroku.\"))\n local('git push heroku HEAD:master')\n\n # Run collectstatic on Heroku with the staging environment\n set_staging = 'DJANGO_SETTINGS_MODULE=coursereviews.settings.staging'\n collectstatic_cmd = 'python manage.py collectstatic --noinput'\n local('heroku run {0} {1}'.format(set_staging, collectstatic_cmd))\n\n puts(blue('Checking for migrations.'))\n migrations = local('heroku run python manage.py showmigrations --plan', capture=True)\n if any(filter(lambda m: '[X]' not in m, migrations.split('\\n'))):\n local('heroku maintenance:on')\n local('heroku run python manage.py migrate')\n local('heroku maintenance:off')\n else:\n puts(blue('Nothing to migrate.'))\n\n local('heroku ps')", "def run():\n LOG.info('initiating app...')\n app.run(host=current_app.config['HOST'],\n port=current_app.config['PORT'], debug=current_app.config['DEBUG'])", "def deploy_webapp():\r\n # require('settings', provided_by=[production, staging])\r\n # require('branch', provided_by=[stable, master, branch])\r\n\r\n # with settings(warn_only=True):\r\n # maintenance_up()\r\n\r\n upload_and_explode_code_bundle()\r\n # Apply requirements.txt, if it exists\r\n # _install_pip_requirements()\r\n \r\n # Restart the web server with the latest code\r\n stop_webserver()\r\n symlink_current_release()\r\n # maintenance_down()\r\n start_webserver()", "def deploy_eis_app():", "def main() -> None:\n\n # Import config from ENV\n host = os.getenv(\"pastrami_host\", \"0.0.0.0\")\n port = int(os.getenv(\"pastrami_port\", \"8080\"))\n debug = os.getenv(\"pastrami_uvicorn_debug\", None) is not None\n\n if debug:\n reload = True\n log_level = \"debug\"\n else:\n reload = False\n log_level = \"warning\"\n\n # Launch webapp through uvicorn\n uvicorn.run(\n \"pastrami:create_app\",\n host=host,\n port=port,\n log_level=log_level,\n reload=reload,\n factory=True,\n server_header=False,\n proxy_headers=True,\n )", "def heroku_setup():\n try:\n # If not already a heroku app directory\n if subprocess.Popen(['heroku', 'ps'], stderr=subprocess.PIPE).wait():\n subprocess.check_call(['heroku', 'create'])\n except OSError as e:\n if e.errno == errno.ENOENT:\n print 'Error: heroku is not installed. Please install and setup the Heroku CLI and then rerun this. Exiting...'\n sys.exit(1)\n else:\n pass\n\n config_vars = []\n for pass_var in passwords:\n config_vars.append(pass_var + '=' + passwords[pass_var])\n subprocess.check_output(['heroku', 'config:set'] + config_vars)\n subprocess.check_call(['git', 'push', 'heroku', 'master'])\n subprocess.check_call(['heroku', 'ps:scale', 'worker=1'])", "def is_heroku():\n return 'FOREMAN_WORKER_NAME' in os.environ", "def deploy():\n local('appcfg.py update src', capture=False)", "def deploy():\n code_dir = '/srv/http/web0263/mentoki_live/mentoki'\n with cd(code_dir):\n run(\"git pull\")\n run(\"touch app.wsgi\")", "def start_webserver():\n with cd('%(path)s/releases/current/%(app_name)s' % env):\n run('gunicorn_django --config %(path)s/releases/current/config/gunicorn.conf' % env)", "def main():\n application = webapp.WSGIApplication(ROUTES, debug=True)\n run_wsgi_app(application)", "def deploy_app():\r\n upload_and_explode_code_bundle()\r\n symlink_current_release()", "def deploy_configurations():\r\n deploy_app_configurations()\r\n # Serve the media folder if necessary\r\n # _create_media_link()\r\n restart_webserver()", "def deploy():\n require('settings', provided_by=[production, staging])\n\n render()\n _gzip_www()\n _deploy_to_s3()", "def serve():\n if flask_app.config[\"WSGI_SERVER\"] == \"flask\":\n flask_app.run(debug=True)\n elif flask_app.config[\"WSGI_SERVER\"] == \"gunicorn\":\n options = {\n \"bind\": \"%s:%s\"\n % (flask_app.config[\"HOST\"], str(flask_app.config[\"PORT\"])),\n \"workers\": flask_app.config[\"WORKERS\"],\n \"accesslog\": \"-\",\n \"errorlog\": \"-\",\n }\n WSGIApplication(flask_app, options).run()", "def run_debug():\n app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0 # debug option to be sure updates are applied right away\n app.debug = True\n app.secret_key = \"secret key\" # used to sign sessions, need to change it to a properly generated key in production\n\n if options.LOCAL_HOST == options.LOCAL_HOST_WINDOWS:\n app.run(port=5000)\n elif options.LOCAL_HOST == options.LOCAL_HOST_VM:\n app.run(host='0.0.0.0', port=5000)\n else:\n app.run(port=5000)", "def web():\n from mephisto.client.full.server import app\n\n app.run(debug=False)", "def main():\r\n app.run(host='0.0.0.0')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs application with debug on, normally to be used for local debugging and development
def run_debug(): app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0 # debug option to be sure updates are applied right away app.debug = True app.secret_key = "secret key" # used to sign sessions, need to change it to a properly generated key in production if options.LOCAL_HOST == options.LOCAL_HOST_WINDOWS: app.run(port=5000) elif options.LOCAL_HOST == options.LOCAL_HOST_VM: app.run(host='0.0.0.0', port=5000) else: app.run(port=5000)
[ "def set_debug_on():\n global _debug\n _debug = True\n print 'Debug on.'", "def debug_cli():", "def on_runDebugMenuItem_activate(self,*args):\n self.run_mode = \"Debug\"\n self.set_run_menu(running=True,status=\"Debugging...\",debug=True)\n self._ui.interpreter = piedit.interpreter.Interpreter()\n self._ui.interpreter.debug.DEBUG = True\n self._ui.interpreter.run_program(pixels=self._ui.pixels,width=self._ui.width,height=self._ui.height,start=False)\n self._ui.highlight_pixel(0,0)", "def debug(program: Program, settings: Settings) -> None:\n debugger = Debugger(program, settings)\n Shell(debugger, settings).loop()", "def set_debug(cls, on=True):\n cls.debug = on", "def devserver():\n setup_logging('devserver')\n app = create_app(parse_options())\n log_messages(app, OPTIONS['--port'])\n app.run(host = '0.0.0.0', port = int(OPTIONS['--port']))", "def setDebug(debug: bool = False) -> None:\n if debug:\n debugFlag = True\n else:\n debugFlag = False", "def setTFEDebugFlag(debug: bool = False) -> None:\n global __TFE_DEBUG__\n if debug is True:\n logger.info(\"Running in DEBUG mode\")\n\n __TFE_DEBUG__ = debug", "def isDebug():\n return isLocal() or getMelangeVersion() == 'devvin'", "def test001_jumpscale_debug(self):\n\n # if we want to change debug value:\n # hrd = j.core.hrd.get('/opt/jumpscale7/hrd/system/system.hrd'); hrd.set('debug','0')\n self.lg('%s STARTED' % self._testID)\n\n self.lg('start new application')\n j.application.start('jsshell')\n\n self.lg('check if jumpscale\\'s debug mode is off, should succeed ')\n self.assertEqual(j.application.debug, False)\n\n self.lg('%s ENDED' % self._testID)", "def run():\n LOG.info('initiating app...')\n app.run(host=current_app.config['HOST'],\n port=current_app.config['PORT'], debug=current_app.config['DEBUG'])", "def toggle_debug():\n global DEBUG\n if DEBUG:\n DEBUG = False\n print(\"debug disabled\")\n else:\n DEBUG = True\n print(\"debug enabled\")", "def startProgram(debug=False):\n console_ui = UIConsoleFactory().initUI(debug)\n\n GameUseCase(console_ui)", "def build_and_debug(main_name):\n\n # STEP 1.0 get main name\n if main_name is None:\n GNATemulator.__error_exit(msg=\"Main not specified.\")\n return\n\n # STEP 1.5 Build it\n\n try:\n yield GNATemulator.build(main_name)\n except RuntimeError:\n # Build error, we stop there\n return\n\n binary = GPS.File(main_name).executable_path.path\n # STEP 2 Switch to the \"Debug\" perspective To have GNATemu console in\n # the debugger perspective.\n\n GPS.MDI.load_perspective(\"Debug\")\n\n # STEP 2 load with Emulator\n debug_port = GPS.Project.root().get_attribute_as_string(\n package=\"Emulator\", attribute=\"Debug_Port\")\n\n # TODO: remove this fall-back once GNATemulator supports the\n # new 'Debug_Port' attribute (Fabien's task)\n if debug_port == \"\":\n debug_port = \"1234\"\n\n yield GNATemulator.run_gnatemu([\"--freeze-on-startup\",\n \"--gdb=%s\" % debug_port,\n binary])\n\n log(\"... done.\")\n\n # STEP 3 launch the debugger\n try:\n debugger_promise = promises.DebuggerWrapper(\n GPS.File(binary),\n remote_target=\"localhost:\" + debug_port,\n remote_protocol=\"remote\")\n except Exception:\n GNATemulator.__error_exit(\"Could not initialize the debugger.\")\n return\n\n # block execution until debugger is free\n r3 = yield debugger_promise.wait_and_send(block=True)\n if not r3:\n GNATemulator.__error_exit(\"Could not initialize the debugger.\")\n return\n\n log(\"... done.\")", "def setDebugMode(self, debugMode): \n self.debug = debugMode", "def set_debug_off():\n global _debug\n _debug = False\n print 'Debug off.'", "async def setup_debug_environment(bot):\n _setup_debug_environment(bot)\n if not bot.use_verbose_output:\n for handler in logger.handlers:\n if handler.get_name() == 'jb_log_stream':\n handler.setLevel(logging.INFO)", "def setDebugMode(self, debug):\n return _core.CGPkronSum_setDebugMode(self, debug)", "def setDebug(cls, isDebug):\n \n cls.isDebug = isDebug" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function reads all xml files in given full directory path Tokenize the tweets and also proprocess the tweets
def read_user_tweets(dir_path): tweet_dict = {} words = [] tokenize_dict = {} user_tweets = "" i = 0 cachedStopWords = stopwords.words("english") # print(cachedStopWords) #print stop words # loop over the user files for filename in os.listdir(dir_path): #skip files if it's not xml if filename.endswith(".xml"): dom = ElementTree.parse(dir_path+filename) tweets = dom.find('documents') #loop over tweet of one user for tweet in tweets: #concantanate tweets of one user by new line user_tweets = user_tweets + "\n" + (tweet.text).lower() #remove punctiation and numbers user_tweets = re.sub(r'[^\w\s]','', user_tweets) user_tweets = re.sub(r'[0-9]','', user_tweets) #cut '.xml' from file name to get user value as the same as in txt file filename = filename[:-4] #lowercase the text tweet_dict[filename] = user_tweets.lower() #tokenize user tweets tokenize = word_tokenize(user_tweets) tokenize = [word for word in tokenize if not (word.startswith('http') or word.startswith('amp') or word.startswith('xx')) ] tokenize_dict[filename] = tokenize i += 1 if i % 100 == 0: print(i) words += [word for word in tokenize_dict[filename] if word not in cachedStopWords] user_tweets = "" return tweet_dict, words
[ "def gettweets(self, path):\n #tweet_folder = 'tweets'\n tweet_folder = 'tweets_analyze'\n tweet_folder1 = 'tweets'\n for (root, dirs, files) in os.walk(path):\n if \"content\" in root and \"nytimes\" not in root:\n for f in files:\n idstr = f.split('_')[0]\n if not os.path.exists(root+'/../'+tweet_folder):\n os.mkdir(root+'/../'+tweet_folder)\n os.mkdir(root+'/../'+tweet_folder1)\n f1 = open(root+'/'+f, 'r')\n lines = f1.readlines()\n p = root+'/../'+tweet_folder+'/'\n p_objs = root+'/../'+tweet_folder1+'/'\n self.genrelatedtweets(idstr, p, p_objs, lines)\n f1.close()", "def parse_all_tweets(directory='./data/trump_tweet_data_archive/', output='data/raw_tweets.txt'):\n for filename in sorted(os.listdir(directory)):\n if filename.endswith('.json'):\n read_tweets(directory+filename, output)", "def read_xml_files(files, label = \"male\"):\n tweets = []\n for file in tqdm(files):\n path = 'Data/pan17/en/'+file+'.xml'\n tree = ET.parse(path)\n root = tree.getroot()\n texts = []\n for child in root.iter('documents'):\n for child2 in child.iter('document'):\n texts.append(child2.text)\n tweets.extend(texts[40:50])\n content = {'tweets': tweets,\n 'labels': label}\n df = pd.DataFrame(content) \n \n return df", "def processData(json_file, rts=10, start_at=None):\n #Twitter Creds\n# twitter_app_auth = {\n# 'consumer_key': '',\n# 'consumer_secret': '',\n# 'access_token': '',\n# 'access_token_secret': ''\n# }\n\n # API setup\n auth = tweepy.OAuthHandler(twitter_app_auth['consumer_key'], twitter_app_auth['consumer_secret'])\n auth.set_access_token(twitter_app_auth['access_token'], twitter_app_auth['access_token_secret'])\n api = tweepy.API(auth)\n\n # Carmen setup\n resolver = carmen.get_resolver()\n resolver.load_locations()\n\n # File setup \n file_directory = json_file\n json_data=open(file_directory).read()\n users = json.loads(json_data)\n\n if start_at:\n start_indx = [users.index(user) for user in users if user['username'] == start_at]\n users = users[start_indx[0]:]\n\n # Mashape Key for botometer\n mashape_key = 'TonZ1SlGz7mshDB8TSdsbjQebLgHp16UAtojsnSFkac2fxpBTa'\n\n # Filter for twitter profiles in the US - just do 20 profiles by default\n twitter_profiles = []\n all_recent_tweets = []\n usa_usernames = []\n counter = 0\n for tweet in users:\n try:\n if tweet['username'] not in usa_usernames:\n profile = api.get_user(tweet['username'], wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n recent_tweets = api.user_timeline(tweet['username'], count=rts, max_id=int(tweet['marker_tweet_id'])-1)\n if recent_tweets:\n recent_tweet = recent_tweets[0]\n location = resolver.resolve_tweet(recent_tweet._json)\n else:\n location = None\n if location:\n if location[1].country == 'United States':\n print 'processing %s...' % tweet['username']\n print 'recent tweets for %s: %s' % (tweet['username'], len(recent_tweets))\n profile._json['county'] = location[1].county\n profile._json['latitude'] = location[1].latitude\n profile._json['longitude'] = location[1].longitude\n profile = add_gender(profile, recent_tweet)\n # is it a bot?\n bom = None\n while not bom:\n try:\n print 'checking whether or not its a bot...'\n bom = botometer.Botometer(wait_on_ratelimit=True, mashape_key=mashape_key, **twitter_app_auth)\n except Exception as e:\n print 'probably timeout error. Waiting 1 minute before trying again...'\n time.sleep(60)\n result = bom.check_account(tweet['username'])\n profile._json['bot_likelihood'] = result['scores']['universal']\n twitter_profiles.append(profile)\n all_recent_tweets.append(recent_tweets)\n usa_usernames.append(tweet['username'])\n counter += 1\n if counter == 100:\n print '100 profiles hit; writing jsons before moving onto the next batch.'\n usa_users = [x for x in users if x['username'] in usa_usernames]\n final_twitter_profiles = [user._json for user in twitter_profiles]\n final_recent_tweets = [status._json for recent_tweets in all_recent_tweets for status in recent_tweets]\n print 'processed %s (%s) profiles.' % (counter, len(usa_users))\n print '%s recent tweets. ' % len(final_recent_tweets)\n write_to_json(final_twitter_profiles, final_recent_tweets, usa_users, json_file)\n twitter_profiles = []\n all_recent_tweets = []\n usa_usernames = []\n counter = 0\n except tweepy.TweepError as e:\n print e.message\n if 'Failed to send request:' in e.reason:\n print \"Time out error caught.\"\n time.sleep(180)\n elif e.message == 'Not authorized.':\n pass\n elif e.message[0]['message'] == 'Rate limit exceeded':\n print 'rate limit exceeded. waiting 15 minutes...'\n time.sleep(60 * 15)\n usa_users = [x for x in users if x['username'] in usa_usernames]\n final_twitter_profiles = [user._json for user in twitter_profiles]\n final_recent_tweets = [status._json for recent_tweets in all_recent_tweets for status in recent_tweets]\n print 'processed %s (%s) profiles.' % (counter, len(usa_users))\n print '%s recent tweets. ' % len(final_recent_tweets)\n return final_twitter_profiles, final_recent_tweets, usa_users", "def tweet_files(dir):\n result = dict()\n sub_dirs = os.listdir(dir)\n for sub_dir_name in sub_dirs:\n sub_dir_name = dir + os.sep + sub_dir_name\n sub_dir = Path(sub_dir_name)\n if (not sub_dir.is_dir()) or sub_dir.name.startswith(\".\"):\n continue\n\n sub_dir_contents = [sub_dir_name + os.sep + f for f in os.listdir(sub_dir_name)]\n user_files = [f for f in sub_dir_contents if is_visible_file(f)]\n if len(user_files) == 0:\n continue\n\n catgy_list = dict()\n result[sub_dir.name] = catgy_list\n\n user_files.sort(reverse=True)\n\n current_user_batch = [user_files.pop()]\n current_user_name = screen_name_from_tweets_file(user_files[0])\n while len(user_files) > 0:\n f = user_files.pop()\n p = Path(f)\n fname = p.name\n\n if fname.startswith(current_user_name):\n current_user_batch.insert(0, f)\n else:\n catgy_list[current_user_name] = current_user_batch\n current_user_name = screen_name_from_tweets_file(f)\n current_user_batch = [f]\n\n catgy_list[current_user_name] = current_user_batch\n\n return result", "def parse(self):\n\t\tif not os.path.isdir(self.path_to_input):\n\t\t\traise FileNotFoundError(\"ERROR: no such folder: \" + self.path_to_input)\n\n\t\tfiles = glob.glob(self.path_to_input + \"/*.txt\")\n\t\tcombined_words = []\n\t\tfor file_ in files:\n\t\t\twith codecs.open(file_, encoding=\"utf8\") as f:\n\t\t\t\tword_list = f.read().split()\n\t\t\t\tcombined_words.extend(word_list)\n\n\t\tself.content = \" \".join(combined_words)", "def readData():\n tweets = json.loads(open('collectTweetResult.json').read()) #read collected tweet data\n tokens = json.loads(open('cleantweet.json').read()) #read cleaned tweet data for sentiment analysis\n return tweets,tokens", "def tweets(self):\n tweet=[] # creating a list to add all of the tweets text to\n for json_file in self.data:\n tweet.append(json_file[\"text\"])# adding the text of the tweets to the list\n return tweet # returning the list of tweets so that I can use this function tweets and apply it", "def load_data(path):\n\n\tf = open(path,'r');\n\n\ttweets = [];\n\ttarget = [];\n\tfor line in f :\n\t\tif line != '' and line != '\\n':\n\t\t\tlistLine = line.strip().split('\\t');\n\t\t\t\n\t\t\t#Tokenize tweet\n\t\t\tlistLine[0] = u\" \".join(twokenize_ES.tokenize(listLine[0]))\n\t\t\t\n\t\t\t#Analize tweet\n\t\t\tlistLine[0] = emoticons_ES.analyze_tweet(listLine[0])\n\t\t\t\n\t\t\t#RemovePunctuation\n\t\t\tlistLine[0] = u\" \".join(twokenize_ES.remove_punct(listLine[0]))\n\n\t\t\ttweets.append(listLine[0].strip().split());\n\t\t\tif listLine[1] == 'positive':\n\t\t\t\ttarget.append([1,0,0])\n\t\t\telif listLine[1] == 'negative':\n\t\t\t\ttarget.append([0,0,1])\n\t\t\telse:\n\t\t\t\ttarget.append([0,1,0])\n\n\treturn [tweets,target]", "def processFiles(fileList):\n all_text = []\n for iFile in fileList:\n with gzip.open(iFile) as f:\n tree = etree.fromstring(f.read())\n text = tree.xpath(\"//DOC[@type='story']/TEXT/P/text()\")\n text = [p for p in text if p]\n all_text = all_text + text\n print(*all_text, sep = '\\n')", "def import_dir(path: str) -> List[Document]:\n results: List[Document] = []\n for dirname, _, files in os.walk(path):\n for f in files:\n p = Path(dirname).joinpath(f)\n if p.suffix == \".xml\":\n print(f\"File: {p} with size {p.stat().st_size} bytes\")\n results.append(Parser.parse(p)) # we don't insert them right now, we insert them in chunks later\n return results", "def crawl_twitter_content(options):\n content_file_writer = FileWriter(100000, \"twitter_content\", options.output) \n with open(options.input, \"r\") as input_f:\n for user_name in input_f:\n try:\n user_name = user_name.strip()\n pre_tweets, last_tweet_time = crawl_content_withapi(user_name.strip(), options)\n tweet_list = trans_json_to_tweet(pre_tweets)\n logging.info(\"Get {} Tweets From Api\".format(str(len(tweet_list))))\n write_content_to_file(content_file_writer, tweet_list, user_name)\n\n if options.all and len(tweet_list) >= 3200:\n logging.info(\"Start Crawl Status Not Use Api!\")\n new_tweet_list = crawl_content_noapi(user_name.strip(), last_tweet_time)\n write_content_to_file(content_file_writer, new_tweet_list, user_name)\n logging.info(\"Get {} Tweets From No Api\".format(str(len(new_tweet_list))))\n tweet_list.append(new_tweet_list)\n\n if options.comment:\n status_id_list = get_status_id_list(tweet_list)\n logging.info(\"Start Crawl Comment\" + str(len(status_id_list)))\n crawl_comments(options, user_name.strip(), status_id_list, content_file_writer)\n\n except Exception as e:\n print \"Have Exception %s\" % e\n\n content_file_writer.close()", "def tokenizedText(files, directory):\n tokens =[]\n for filename in files:\n if '.txt' in filename:\n lines = open(directory + '/'+ filename, 'r').read()\n sentences = re.compile(r'(?<=[.!?;])\\s*').split(lines)\n sentences_with_tag = '';\n for sentence in sentences:\n sentences_with_tag += ' START ' + sentence + ' END '\n try:\n tokens += word_tokenize(sentences_with_tag.decode('utf8')) \n except:\n pass\n return tokens", "def parseAll(self):\n\n for file in self.toParse:\n if(file.endswith(\".txt\")):\n self.parse(file)", "def main(directory):\n docs = []\n for entry in entries:\n docs.append(Document(entry, path))\n\n processed = []\n\n print('Processing documents...')\n print()\n for document in docs:\n processed.append(document.pre_process())\n \n processed_counts = termCounts(processed)\n \n with open('wordCounts.txt', 'w') as file:\n file.write(json.dumps(processed_counts))\n \n return processed_counts", "def __init__(self,\n api=None,\n search=['None'],\n time_limit=0,\n tweets_limit=0,\n start_time=datetime.datetime.now(),\n start_counter=0,\n file_path='.',\n file_name='tweets_',\n file_extension='.csv',\n file_size_limit=0,\n tweets_in_file=0,\n file_size=0\n ):\n TweetsListener.__init__(self,\n api,\n search,\n time_limit,\n tweets_limit,\n start_time,\n start_counter)\n self.file_path=file_path\n self.file_name=file_name\n self.file_extension=file_extension\n self.tweets_file=self.file_path + self.file_name + self.file_extension\n self.file_size_limit=file_size_limit\n self.tweets_in_file=tweets_in_file\n self.file_size=file_size\n self.file_line_counter=0\n if self.tweets_in_file>0 or self.file_size>0:\n self.split_to_files=True\n self.file_number=0\n else:\n self.split_to_files=False", "def loadParsedTweets(filename):\n try:\n with open(filename) as fl:\n jsonObj = json.load(fl)\n tweets = [tweet[\"text\"] for tweet in jsonObj]\n except (MemoryError, ValueError):\n with open(filename) as fl:\n tweets = [json.loads(line)[\"text\"] for line in fl]\n \n parsedTweets = [nltk.wordpunct_tokenize(tweet) for tweet in tweets]\n\n return parsedTweets", "def read_xml_directory(path):\n files = glob.glob(path + \"*.xml\")\n\n if not files:\n print('No files found for %s' % path)\n\n xmls = []\n for xmlfile in files:\n with open(xmlfile, 'r') as f:\n parsed = etree.parse(f)\n xmls.append(parsed)\n\n return xmls", "def load(self, input_file):\n self.tweets = Tweets(input_file)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function open the txt file in the dir_path directory and extrats real gender of users. Returns all gender of username by dictionary
def read_truth_gender(dir_path): file = open(dir_path+"truth.txt", "r") lines = file.readlines() file.close gender_dict = {} for line in lines: user, gender, country = line.split(":::") gender_dict[user] = gender return gender_dict
[ "def get_user_data():\n f = open(\"ml-100k/u.user\", 'r')\n users = []\n for line in f.readlines():\n u = line.split(\"|\")\n u[0] = int(u[0])\n u[1] = int(u[1])\n\n gender = 0\n if u[2] == \"M\":\n gender = 1\n\n if u[1] > ageRange[1]:\n ageRange[1] = u[1]\n elif u[1] < ageRange[0]:\n ageRange[0] = u[1]\n\n user = User(u[0], u[1], gender, u[3], u[4], line)\n users.append(user)\n\n for user in users:\n user.ageGenderOccupationValue = calculateAGOValue(user, ageRange)\n return users", "def male_names() -> Dict[str, List[str]]:\n return load_names(ethnicity=\"danish\", gender=\"male\", min_prop_gender=0.5)", "def female_names() -> Dict[str, List[str]]:\n return load_names(ethnicity=\"danish\", gender=\"female\", min_prop_gender=0.5)", "def load_one_gender_data(gender,path):\n\n\tdata = import_artist_files(path+gender+\"/\")\n\tfor dictio in data:\n\t\tdictio['gender'] = gender\n\tdatas = split_train_dev_test(data)\n\treturn datas", "def __read_users(self):\n\n path = os.path.join(self.cwd,'data/users')\n available_users = os.listdir(path)\n if len(available_users)>0:\n for user_id in available_users:\n if user_id == 'README.md':\n continue\n #assuming the user data was stored in JSON format\n with open(os.path.join(path,user_id),'r') as file:\n user_data = json.load(file)\n user = UserProfile(user_data['id'],user_data['name'], user_data['email'], \n user_data['password'], user_data['timeline'])\n user.init_friends(user_data['friends'])\n user.init_my_groups(user_data['my_groups'])\n user.init_joined_groups(user_data['joined_groups'])\n user.init_my_pages(user_data['my_pages'])\n user.init_followed_pages(user_data['followed_pages'])\n user.init_events(user_data['my_events'])\n self.users[user_id.split('.')[0]]=user", "def find_gender(self, ocr_text):\r\n \r\n split_ocr = ocr_text.split('\\n')\r\n #print(\"Split data:\",split_ocr)\r\n #split_ocr = split_ocr.split(' ')\r\n #print(\"Split data:\",split_ocr)\r\n text1=[]\r\n for i in split_ocr:\r\n text1+=i.split(\" \")\r\n \r\n #print(text1)\r\n \r\n #print(\"Split data:\",split_ocr)\r\n \r\n if 'Male' in text1 or 'MALE' in text1:\r\n GENDER = 'Male'\r\n elif 'Female' in text1 or 'FEMALE' in text1:\r\n GENDER = 'Female'\r\n else:\r\n GENDER = 'NAN'\r\n return GENDER", "def _get_gender(self):\n female = ['female', 'actress', 'women']\n male = ['male', 'actor', 'men']\n full_text = self.soup.get_text().lower()\n count_female = full_text.count(' she ') + full_text.count(' her ')\n count_male = full_text.count(' he ') + full_text.count(' his ')\n\n try:\n #Grabs the text in catlinks id\n catlinks = self.soup.find(id='catlinks').text.lower()\n if any(s in catlinks for s in female):\n self.gender = 'F'\n elif any(s in catlinks for s in male):\n self.gender = 'M'\n else:\n try:\n ratio_male = float(count_male) / float(count_female)\n except:\n ratio_male = 1\n if ratio_male > 2:\n self.gender = 'M'\n elif ratio_male < 0.5:\n self.gender = 'F'\n else:\n self.gender = None\n except:\n self.gender = None", "def labelGender(tweet, males, females):\n #name = tweet['user']['name'].lower().split()\n name = tweet.lower().split()\n if len(name) == 0:\n name = ['']\n name = re.findall('\\w+', name[0])\n if len(name) == 0:\n name = ''\n else:\n name = name[0]\n if name in males:\n return 'm'\n tweet['user']['gender'] = 'm'\n elif name in females:\n return 'f'\n tweet['user']['gender'] = 'f'\n else:\n return 'n'\n tweet['user']['gender'] = 'n'\n return tweet", "def read_user_tweets(dir_path):\n tweet_dict = {}\n words = []\n tokenize_dict = {}\n user_tweets = \"\"\n i = 0\n cachedStopWords = stopwords.words(\"english\")\n# print(cachedStopWords) #print stop words\n# loop over the user files\n for filename in os.listdir(dir_path):\n #skip files if it's not xml \n if filename.endswith(\".xml\"): \n dom = ElementTree.parse(dir_path+filename) \n tweets = dom.find('documents')\n #loop over tweet of one user \n for tweet in tweets:\n #concantanate tweets of one user by new line \n user_tweets = user_tweets + \"\\n\" + (tweet.text).lower()\n #remove punctiation and numbers\n user_tweets = re.sub(r'[^\\w\\s]','', user_tweets)\n user_tweets = re.sub(r'[0-9]','', user_tweets)\n #cut '.xml' from file name to get user value as the same as in txt file\n filename = filename[:-4]\n #lowercase the text\n tweet_dict[filename] = user_tweets.lower()\n #tokenize user tweets\n tokenize = word_tokenize(user_tweets)\n tokenize = [word for word in tokenize if not (word.startswith('http') or word.startswith('amp') or word.startswith('xx')) ]\n tokenize_dict[filename] = tokenize\n i += 1\n if i % 100 == 0:\n print(i)\n words += [word for word in tokenize_dict[filename] if word not in cachedStopWords]\n user_tweets = \"\"\n \n return tweet_dict, words", "def read_from_file(self, file_name):\n\t\twith open (self.user_folder + file_name, 'r') as file:\n\t\t\tnames_list = file.readlines()\n\t\t\tfor name in names_list:\n\t\t\t\tprint(name.strip())", "def _load_users_stats(self):\n with open(os.path.join(self._datasets_path, 'females.tsv'), 'r', encoding='utf-8') as file:\n for line in file:\n self._females[line.split('\\t')[0]] += 1\n\n with open(os.path.join(self._datasets_path, 'males.tsv'), 'r', encoding='utf-8') as file:\n for line in file:\n self._males[line.split('\\t')[0]] += 1", "def get_gender(dataset = \"all\"):\r\n\treturn process_main(get_status_statistic, \"get_gender\", dataset, \"gender\", tuple())", "def _users_from_dir(self):\n for fp in os.listdir(self._path_to_db):\n if fp.endswith(\".json\"):\n path = self._path_to_db+ \"/\"+fp\n with open(path, \"r\") as f_user:\n user_dict = json.load(f_user)\n user = User(user_dict[\"username\"], user_dict[\"address\"], user_dict[\"message\"], user_dict[\"status\"])\n self._users[user_dict[\"username\"]] = user", "def parse_user(self):\n msg(\"parsing u.user\")\n lines = file('/'.join((self.datadir,\"u.user\"))).read().split('\\n')\n records = [line.split('|') for line in lines if line]\n pairs = [tuple([int(line[0]),\n int(line[1]),\n line[2],\n line[3],\n line[4]])\n for line in records]\n for id, age, gender, occupation, zipcode in pairs:\n self.user_info[id]=(age, gender, occupation, zipcode)\n self.ocp_by_user[occupation].append(id)", "def read_daily_gz_files(path):\n\n # create the save paths\n if path[-1] == \"/\" or path[-1] == \"\\\\\":\n file_name = os.path.split(os.path.split(path)[0])[-1]\n\n else:\n file_name = os.path.split(path)[-1]\n\n save_path_interactions = os.path.join(\"..\", \"data\", file_name + \".csv\")\n save_path_user_ids = os.path.join(\"..\", \"data\", file_name + \"_partial_user_ids.csv\")\n\n # get the file names to be read\n files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]\n\n # read the files one by one\n active = []\n passive = []\n date = []\n user_id = {}\n for file in files:\n\n # read a file\n _file = []\n with gzip.open(os.path.join(path, file), 'r') as f:\n for line in f:\n _file.append(line)\n\n # get the date format\n _date = file.split(\".\")[0].split(\"_\")[0] + \"-\" + file.split(\".\")[0].split(\"_\")[1] + \"-\" + file.split(\".\")[0].split(\"_\")[2]\n\n # parse through the file\n for i in _file:\n # dealing with a json\n if str(i)[2] == \"{\":\n json_i = json.loads(i)\n # tweet\n if len(json_i['entities']['user_mentions']) == 0:\n active.append(json_i['user']['screen_name'])\n passive.append(json_i['user']['screen_name'])\n date.append(_date)\n\n user_id[json_i['user']['screen_name']] = json_i['user']['id']\n\n # reply, retweet or mention\n else:\n for entity in json_i['entities']['user_mentions']:\n active.append(json_i['user']['screen_name'])\n passive.append(entity['screen_name'])\n date.append(_date)\n\n user_id[json_i['user']['screen_name']] = json_i['user']['id']\n user_id[entity['screen_name']] = entity['id']\n\n # dealing with a tsv-line\n else:\n # tweet\n if str(i).split(\"\\\\t\")[2].find(\"@\") == -1:\n active.append(str(i).split(\"\\\\t\")[0][2:])\n passive.append(str(i).split(\"\\\\t\")[0][2:])\n date.append(_date)\n\n # reply, retweet or mention\n else:\n for entry in str(i).split(\"\\\\t\")[-3].split():\n try:\n float(entry)\n actual_entries = list(filter(lambda user: user[0] == \"@\", str(i).split(\"\\\\t\")[2].split()))\n\n for _aentry in actual_entries:\n if _aentry != '@' and _aentry != '@ ':\n if str(_aentry).find(\".\") == -1 and str(_aentry).find(\":\") == -1 and str(_aentry).find(\"#\") == -1:\n active.append(str(i).split(\"\\\\t\")[0][2:])\n passive.append(_aentry[1:])\n date.append(_date)\n\n except ValueError:\n active.append(str(i).split(\"\\\\t\")[0][2:])\n passive.append(entry)\n date.append(_date)\n\n for i in np.where(np.array(active) == '')[0][::-1]:\n active.pop(i)\n passive.pop(i)\n\n for i in np.where(np.array(passive) == '')[0][::-1]:\n passive.pop(i)\n active.pop(i)\n\n # save the tweeter interactions csv\n pd.DataFrame(list(zip(active, passive, date))).to_csv(save_path_interactions, header = False, index = False)\n\n # save the partial user ids\n pd.DataFrame.from_dict(user_id, orient = 'index').reset_index().to_csv(save_path_user_ids, header = False, index = False)", "def iter_forgery(self, user):\n\n user_folder = os.path.join(self.path, '{:04d}'.format(user))\n all_files = sorted(os.listdir(user_folder))\n forgery_files = filter(lambda x: ('{:04d}f'.format(user)) in x.lower(), all_files)\n for f in forgery_files:\n full_path = os.path.join(user_folder, f)\n img = imread(full_path, as_gray=True)\n yield img_as_ubyte(img), f", "def add_gender(user, tweet):\n # Gender setup\n gender_dict = {'M': 'Male', 'F': 'Female'}\n\n gender_data = process_tweet(tweet._json)\n if len(gender_data[0]['gender']) == 1:\n if gender_dict[gender_data[0]['gender'][0]['value']]:\n gender = gender_dict[gender_data[0]['gender'][0]['value']]\n else:\n gender = 'Unknown'\n elif len(gender_data[0]['gender']) == 2:\n if gender_data[0]['gender'][0]['prob'] > gender_data[0]['gender'][1]['prob']:\n gender = gender_dict[gender_data[0]['gender'][0]['value']]\n elif gender_data[0]['gender'][0]['prob'] < gender_data[0]['gender'][1]['prob']:\n gender = gender_dict[gender_data[0]['gender'][1]['value']]\n else:\n gender = 'Unknown'\n else:\n gender = 'Unknown'\n user._json['gender'] = gender\n\n return user", "def grab_all_user_profiles(path):\n\n user_profs_df = pd.read_csv(path, sep='\\t')\n return filter_incompletes(user_profs_df)", "def load_users(path):\r\n with io.open(path + 'files/users.json', 'r', encoding='utf8') as f:\r\n list_dict = json.load(f)\r\n\r\n return [User(a['name'], a['city'], a['country'], a['radius'], coords=a['coords']) for a in list_dict]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connects to MCU via serial port, sends bitrate selection packet
def connect(self, serial_port, can_bitrate): self.serial = serial.Serial( port=str(serial_port), baudrate=250000, timeout=1 ) try: self.serial.setDTR(True) time.sleep(0.2) self.serial.setDTR(False) time.sleep(2) self.serial.write(chr(self.supported_can_bitrates.index(int(can_bitrate)))) except ValueError: raise SerialException("CAN bitrate not supported!")
[ "def send_traffic(serialport, pack):\n pack[0] = 0x01\n pack[1] = 0x00\n # print(pack)\n serialport.write(pack)", "def initSerial(self, port, baud, filename):\n if not self.connected:\n try:\n self.s = serial.Serial(port, baud)\n except Exception as e:\n self.stop()\n raise (e)\n else:\n # Wake up grbl\n self.s.write(b\"\\r\\n\\r\\n\")\n # allow grbl to initialize\n time.sleep(2)\n # flush startup from serial\n self.s.flushInput()\n # open startup file\n with open(filename, 'r') as f:\n for line in f:\n # strip EOL chars\n l = line.strip()\n self.sendCommand(l + '\\r')\n print('Connected to GRBL')\n # self.connect_btn.configure(fg='green', text='Connected')\n self.connected = True\n self.__parseParameters()\n\n else:\n self.connected = False\n self.s.close()\n self.s = None\n self.queue = None\n print('Connection closed')\n self.connect_btn.configure(fg='red', text='Connect')", "def send_serial():\n global ComboValues\n global local_filename\n global headers\n global parsedROM\n Button_Refresh['state'] = \"disabled\"\n Button_File['state'] = \"disabled\"\n Button_PGM['state'] = \"disabled\"\n #Check if local file\n if RadioVar.get() == 2:\n P_Bar['maximum'] = os.stat(FilePath_Var.get()).st_size\n fileName.put(FilePath_Var.get())\n fileSize.put(os.stat(FilePath_Var.get()).st_size)\n serPort.put(PGM_Var.get())\n #Check if Web file\n if RadioVar.get() == 1:\n FilePath_Var.set(\"Fetching File from Web\")\n a = parsedROM['VECTREX ROMS']['TITLES'][ComboVar.get()]['FILE NAME']\n local_filename, headers = urllib.request.urlretrieve('http://www.myexjwlife.com/vectrex/'+a)\n P_Bar['maximum'] = int(headers['Content-Length'])\n FilePath_Var.set(\"\")\n fileName.put(local_filename)\n fileSize.put(int(headers['Content-Length']))\n serPort.put(PGM_Var.get())", "def on_connect(self, event):\n\n if not self._process:\n serial_port = self.serial_combobox.GetStringSelection()\n bitrate = self.bitrate_combobox.GetStringSelection()\n\n if serial_port and bitrate:\n try:\n self._queue = multiprocessing.Queue()\n self._process = process.SerialProcess(serial_port, bitrate, self._queue)\n self.connect_button.SetLabel(\"Disconnect\")\n except serial_interface.SerialException, e:\n wx.MessageBox(\"Cannot open serial port! \"+e.message, \"Error\", wx.OK | wx.ICON_ERROR)\n else:\n wx.MessageBox(\"Please select serial port and bitrate.\", \"Error\", wx.OK | wx.ICON_ERROR)\n else:\n self._queue.put(\"stop\")\n self.connect_button.SetLabel(\"Connect\")\n self._process = None", "def connect_pump():\n x = serial.Serial(\"/dev/ttyUSB0\", baudrate=9600,\n bytesize=serial.EIGHTBITS,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n timeout=1)\n if x.isOpen():\n print \"Connected to Pump \\n\"\n return x", "def connect(self, mach) -> channel.Channel:\n return mach.open_channel(\"picocom\", \"-q\", \"-b\", \"115200\",\n self.console_uart)", "def send_packet():", "def __set_baudrate(self, baud_rate):\n response = subprocess.call([\"sudo\", \"ip\", \"link\", \"set\", canSend.can_interface, \"down\"])\n if response != 0:\n print(\"Error: Cannot deactivate '{0}' interface\".format(canSend.can_interface))\n print(response)\n response = subprocess.call(\n [\"sudo\", \"ip\", \"link\", \"set\", canSend.can_interface, \"type\", \"can\", \"bitrate\", str(baud_rate)])\n if response != 0:\n print(\"Error: Cannot set {0} baudrate for interface '{1}'\".format(baud_rate, canSend.can_interface))\n print(response)\n response = subprocess.call([\"sudo\", \"ip\", \"link\", \"set\", canSend.can_interface, \"up\"])\n if response != 0:\n print(\"Error: Cannot eactivate '{0}' interface\".format(canSend.can_interface))\n print(response)\n self.__print_actual_baudrate()", "def __init__(self):\n self.serial = serial.Serial()\n \n self.serial.baudrate = 38400 #111111 # Xbee communication link is closer of 111111baud than 115200baud\n # due to his clock and PLL. In strict 115200baud computer configuration\n # frame are regularly lost. \n \n self.serial.port = '/dev/ttyAMA0'\n \n self.serial.timeout = 0.1 # 100ms default timeout for pySerial call", "def connect(self):\n self.serial_connection = serial.Serial(self.serial_name, timeout=self.time_out)\n self.serial_connection.flushInput()", "def connect_serial(self, serial_port:serial.tools.list_ports_common.ListPortInfo):\n if serial_port is None:\n print(\"serial_port is not correct, initial manual serial port selection.\")\n\n while serial_port is None:\n comlist = list_ports.comports()\n id = 0\n for element in comlist:\n if element:\n id = id + 1\n print(\"ID: \" + str(id) + \" -- Portname: \" + str(element) + \"\\n\")\n port = int(input(\"Enter a port number: \"))\n if port - 1 < len(comlist):\n serial_port = comlist[port-1]\n else:\n print(\"Wrong serial port selected, try again\")\n\n\n self.openRS485(serial_port)\n #self.add_device(1, \"Hand Grip/Open\", \"softhand\")\n #self.add_device(2, \"Wrist Flex/Exten\", \"qbmove\")\n #self.add_device(3, \"Wrist Pron/Supi\", \"qbmove\")\n\n for item in self.devices:\n item.activate()\n elif type(serial_port) is serial.tools.list_ports_common.ListPortInfo:\n self.openRS485(serial_port)\n else:\n pass", "def connect(self, device, port, speed = DEFAULT_SPEED):\n # If we are already connect, disconnect\n self.disconnect()\n # Verify the device type\n device = device.lower()\n if not CHIPLIST.has_key(device):\n raise MicrobootException(\"Unrecognised device type '%s'\" % device)\n self.deviceName = device\n # Set up the serial port\n self.serial = serial.Serial(\n port = port,\n baudrate = speed,\n timeout = 0.2 # TODO: Should probably be configurable\n )\n # Get device data and verify\n info = self.getBootInfo()\n if info[0] < CHIPLIST[device][2]:\n self.disconnect()\n raise MicrobootException(\"Bootloader protocol is not supported. Wanted %d, got %d.\" % (CHIPLIST[device][2], info[0]))\n if (info[2] <> CHIPLIST[device][0]) or (info[3] <> CHIPLIST[device][1]):\n self.disconnect()\n raise MicrobootException(\"Unexpected processor type - wanted %02X/%02X, got %02X/%02X.\" % (CHIPLIST[device][0], CHIPLIST[device][1], info[1], info[2]))\n # Set up state\n self.deviceName = device\n self.deviceInfo = CHIPLIST[device]\n # Done\n return self.bootInfo", "def transport_serial_send_break_byte(self, port=0):", "def test_sendBytes(self):\n msgBytes = b'ABCDEF'\n self.serialPort.read(100) # clear serial buffer\n self.serialComm.sendBytes(msgBytes)\n time.sleep(0.1)\n self.serialComm.readBytes()\n assert(self.serialComm.radio.getRxBytes() == msgBytes)", "def rx_serial(self, data):\n if self.__print_rx_details:\n if len(data):\n print('rx: ' + hexlify(data))\n\n # This -----------------------------------------------------------------\n # Copy received to the buffer.\n self.__rx_buffer = self.__rx_buffer + bytearray(data)\n\n start = self.__rx_buffer.find(b'\\x7e')\n end = self.__rx_buffer.find(b'\\x7f')\n\n # If not found clear the buffer and wait for it.\n if start < 0:\n self.__rx_buffer = bytearray()\n return\n # If no end not found... wait for more.\n if end < 0:\n return\n\n # Get the frames\n while start < end:\n start = self.__rx_buffer[:end].rfind(b'\\x7e')\n self.buff.add(self.packet_unstuff(self.__rx_buffer[start:end + 1]))\n if self.__print_rx:\n print((\"PACKET!!: \" + hexlify(self.__rx_buffer[start:end + 1])))\n\n self.__rx_buffer = self.__rx_buffer[end + 1:]\n start = self.__rx_buffer.find(b'\\x7e')\n end = self.__rx_buffer.find(b'\\x7f')\n if start < 0:\n self.__rx_buffer = bytearray()\n return\n return", "def serialHandler():\n incomingData = []\n \n with serial.Serial(PORT_NAME, PORT_SPEED, rtscts=PORT_HWFC) as port:\n while True:\n # Read one byte from serial port\n incomingByte = port.read(size=1)\n \n # Echo to terminal. Used for appears typing text\n #port.write(bytearray(incomingByte))\n \n # Parse incoming byte\n \n # If get start of packet byte - receive next byte with overall len of the packet\n if ord(incomingByte) == LB_START_BYTE:\n logging.debug(\"Received start byte 0xAB\")\n \n # Receife start byte. Next - length byte\n lenByte = port.read(size=1)\n logging.debug(\"Received length of data = \" + str(lenByte))\n \n # Waiting exaclty length byte chain\n incomingData = port.read(size=ord(lenByte))\n logging.debug(\"Finish receive of the data bytes\")\n \n # Parse data and forming a answer\n answer = parseProtocolData(incomingData)\n \n # Write answer to the port\n port.write(answer)\n \n # Flush serial port buffer to send data physically\n port.flush()\n else:\n # This is no a start byte\n # Think about it\n pass\n # End of parse incoming byte", "def connectToSerial(dev):\n\tinsteon.setPort(IOPort(SerialIOStream(dev)))", "def send_raw_packet(packet, port):", "def connect(self):\n\n self.state[\"state\"] = \"connecting\"\n\n # Creating control and interrupt sockets\n s_ctrl = socket.socket(\n family=socket.AF_BLUETOOTH,\n type=socket.SOCK_SEQPACKET,\n proto=socket.BTPROTO_L2CAP)\n s_itr = socket.socket(\n family=socket.AF_BLUETOOTH,\n type=socket.SOCK_SEQPACKET,\n proto=socket.BTPROTO_L2CAP)\n\n # Setting up HID interrupt/control sockets\n try:\n s_ctrl.bind((self.bt.address, 17))\n s_itr.bind((self.bt.address, 19))\n except OSError:\n s_ctrl.bind((socket.BDADDR_ANY, 17))\n s_itr.bind((socket.BDADDR_ANY, 19))\n\n s_itr.listen(1)\n s_ctrl.listen(1)\n\n self.bt.set_discoverable(True)\n\n ctrl, ctrl_address = s_ctrl.accept()\n itr, itr_address = s_itr.accept()\n\n # Send an empty input report to the Switch to prompt a reply\n self.protocol.process_commands(None)\n msg = self.protocol.get_report()\n itr.sendall(msg)\n\n # Setting interrupt connection as non-blocking.\n # In this case, non-blocking means it throws a \"BlockingIOError\"\n # for sending and receiving, instead of blocking.\n fcntl.fcntl(itr, fcntl.F_SETFL, os.O_NONBLOCK)\n\n # Mainloop\n while True:\n # Attempt to get output from Switch\n try:\n reply = itr.recv(50)\n if self.logger_level <= logging.DEBUG and len(reply) > 40:\n self.logger.debug(format_msg_switch(reply))\n except BlockingIOError:\n reply = None\n\n self.protocol.process_commands(reply)\n msg = self.protocol.get_report()\n\n if self.logger_level <= logging.DEBUG and reply:\n self.logger.debug(format_msg_controller(msg))\n\n try:\n itr.sendall(msg)\n except BlockingIOError:\n continue\n\n # Exit pairing loop when player lights have been set and\n # vibration has been enabled\n if (reply and len(reply) > 45 and\n self.protocol.vibration_enabled and self.protocol.player_number):\n break\n\n # Switch responds to packets slower during pairing\n # Pairing cycle responds optimally on a 15Hz loop\n time.sleep(1/15)\n\n self.slow_input_frequency = True\n self.input.exited_grip_order_menu = False\n\n return itr, ctrl" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes in the learning rate return and make a plot of the learning rate
def plot_learning_rate(result): avg_result = result.mean(0) ind = np.arange(len(avg_result)) + 1 plt.plot(ind, avg_result) plt.show()
[ "def learningRate_plot(history):\n learningRates = np.concatenate([epoch.get('lrs', []) for epoch in history])\n plt.plot(learningRates)\n plt.xlabel('Batch no.')\n plt.ylabel('Learning rate')\n plt.title('Learning Rate vs. Batch no.')", "def draw_learning_curve(numbers):\r\n \r\n plt.xlabel('Simulation Epoch')\r\n plt.ylabel('Success Rate')\r\n plt.title('Learning Curve')\r\n plt.grid(True)\r\n\r\n plt.plot(numbers['x'], numbers['success_rate'], 'r', lw=1)\r\n plt.show()", "def learning_curve_per_train_steps(Loss_list):\n print(Loss_list)\n fig = plt.figure()\n plt.title('Learning Curve : Diatom Dataset')\n plt.plot(Loss_list)\n plt.yscale('log')\n plt.xlabel('training_steps')\n plt.ylabel('Loss : Cross Entropy')\n fig.savefig('Learning_curve_plot_diatom_per_training_steps.png')", "def plot_learning(self):\n plt.figure(figsize=(10,7))\n plt.plot([i for i in range(len(self.fitness_list))], self.fitness_list)\n plt.ylabel(\"Fitness\")\n plt.xlabel(\"Iteration\")\n plt.title('Cost function over iterations')\n plt.show()", "def learning_curve(losses: list, names : list):\n for loss in losses:\n plt.plot(loss)\n plt.xscale(\"log\")\n plt.ylim(0, 35000)\n plt.xlabel(\"Iterations\")\n plt.ylabel(\"Squared Loss\")\n plt.title(\"Gradient Descent\")\n plt.legend(names)\n plt.savefig(\"learning_curve.png\")\n plt.show()", "def plot_loss(loss_history):\n plt.title('Loss history')\n plt.xlabel('Iteration')\n plt.ylabel('Loss')\n plt.plot(loss_history)\n plt.show()", "def plot_learning_curve(episode_mse, l):\n x, y = zip(*episode_mse)\n\n fig = plt.figure()\n plt.plot(x, y)\n\n fig.suptitle('Episode no. vs Mean Squared Error for lambda = ' + str(l), fontsize=17)\n plt.xlabel(\"Episode no.\")\n plt.ylabel(\"MSE\")\n\n plt.show()", "def plot_learning_curve_per_epochs(train_loss_per_training_steps, val_loss_per_training_steps, steps_per_epoch, total_steps):\n Loss_per_epochs = []\n val_loss_per_epochs = []\n for i in range(0, total_steps, steps_per_epoch):\n Loss_per_epochs.append(train_loss_per_training_steps[i])\n val_loss_per_epochs.append(val_loss_per_training_steps[i])\n\n fig = plt.figure()\n plt.title('Learning Curve : Diatom Dataset')\n plt.plot(Loss_per_epochs, 'b', label='train')\n plt.plot(val_loss_per_epochs, 'g', label='val')\n plt.legend()\n plt.yscale('log')\n plt.xlabel('Epochs')\n plt.ylabel('Loss : Cross Entropy')\n fig.savefig('Learning_curve_plot_diatom_per_epochs.png')", "def visualize(self, timestep, brain_name, log_dir):\n # Check whether any time has passed since the last update.\n if self.time_since_reward_log > 0:\n # Update the lifetime record of the reward.\n self.reward_history.append(float(self.cumulative_reward) / \n (self.time_since_reward_log + 1))\n self.cumulative_reward = 0 \n self.time_since_reward_log = 0\n self.reward_steps.append(timestep)\n\n performance = np.mean(self.reward_history)\n\n # Plot the lifetime record of the reward.\n fig = plt.figure(11111)\n plt.plot(self.reward_steps, self.reward_history, color=tools.COPPER,\n linewidth=2.5)\n plt.gca().set_axis_bgcolor(tools.COPPER_HIGHLIGHT)\n plt.xlabel('Time step')\n plt.ylabel('Average reward')\n plt.title('Reward history for {0}'.format(brain_name))\n fig.show()\n fig.canvas.draw()\n\n # Save a copy of the plot.\n filename = 'reward_history_{0}.png'.format(brain_name)\n pathname = os.path.join(log_dir, filename)\n plt.savefig(pathname, format='png')\n\n # Plot the learned reward value of each feature.\n fig = plt.figure(11112)\n fig.clf()\n for i, value in enumerate(self.reward_by_feature):\n plt.plot([0., value], [i,i], color=tools.COPPER, linewidth=5.,\n solid_capstyle='butt')\n plt.plot([0.,0.],[0., self.reward_by_feature.size - 1.], \n color=tools.COPPER_SHADOW, linewidth=1.)\n plt.gca().set_axis_bgcolor(tools.COPPER_HIGHLIGHT)\n plt.gca().set_xlim((-1., 1.))\n plt.gca().set_ylim((-1., self.reward_by_feature.size))\n plt.xlabel('Reward')\n plt.ylabel('Sensor index')\n plt.title('{0} Amygdala'.format(brain_name))\n fig.show()\n fig.canvas.draw()\n\n # Save a copy of the plot.\n filename = 'reward_by_feature_{0}.png'.format(brain_name)\n pathname = os.path.join(log_dir, filename)\n plt.savefig(pathname, format='png')\n \n return performance", "def plot_learning_curves_nn(model):\n history = model.history\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train','val'],loc='upper right')\n plt.show()", "def display_loss_graph(loss_rate):\n fig = plt.figure()\n ax = fig.add_subplot()\n # for key, value in comm_succ.items():\n # ax.plot(value, label = key)\n ax.plot(loss_rate)\n ax.set(title=\"Loss\",\n xlabel=\"Number of rounds\",\n ylabel=\"Loss\")\n # ax.set_ylim([0,100])\n # ax.set_yticks(np.arange(0,100,10))\n ax.legend(loc=\"lower right\")\n plt.show()", "def plot(self):\n import matplotlib.pyplot as plt\n plt.plot(self.lambdas, self.result['beta'] )\n plt.ylabel('Coefficient')\n plt.xlabel('Regularization Parameter')\n plt.suptitle('Regularization Path')\n plt.show()", "def loss_plot(train_loss, test_loss):\n plt.plot(range(len(train_loss)), train_loss, 'b', label='Training loss')\n plt.plot(range(len(train_loss)), test_loss, 'r', label='Test loss')\n plt.title('Training and Test loss')\n plt.xlabel('Epochs ', fontsize=16)\n plt.ylabel('Loss', fontsize=16)\n plt.legend()\n plt.figure()\n plt.show()", "def curve_convergence(self):\n fig, ax = plt.subplots(1, 1, figsize=(20,15)) \n\n title = r'%d iterations ' % max(self.adjoint.iterations)\n title += 'at learning rate $\\gamma = %.1f$' % self.adjoint.lr\n self.subplot_solution_descent(ax, title)\n ax.legend(loc='upper center', ncol=2)\n\n plt.show()\n plt.close()", "def plot_loss(losses):\n plt.figure()\n plt.title(\"Training Loss\")\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"binary cross-entropy loss\")\n plt.plot(losses)\n plt.show()", "def plot_lr_distributions(predicted_log_lrs, y, savefig=None, show=None):\n plt.figure(figsize=(10, 10), dpi=100)\n points0, points1 = Xy_to_Xn(predicted_log_lrs, y)\n plt.hist(points0, bins=20, alpha=.25, density=True)\n plt.hist(points1, bins=20, alpha=.25, density=True)\n plt.xlabel('10log LR')\n if savefig is not None:\n plt.savefig(savefig)\n plt.close()\n if show or savefig is None:\n plt.show()", "def plot_accuracy(history):\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('Accuracy of the Model')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()", "def learning_curves(training_loss,validation_loss,data_train,prediction_train,data_test,prediction_test,progress):\n plt.figure()\n if len(progress) == 3:\n plt.suptitle(f\"Iteration {progress[0]}, model {progress[1]}/{progress[2]}\")\n else:\n plt.suptitle(f\"Iteration {progress[0]}, model {progress[1]}/{progress[2]}, run {progress[3]}/{progress[4]}\")\n plt.subplot(1, 2, 1)\n plt.title('Learning Curves')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.plot(training_loss, \"k-\", label='train')\n plt.plot(validation_loss, \"C0--\", label='val')\n plt.ylim([0,0.2])\n plt.legend()\n \n plt.subplot(1, 2, 2)\n plt.scatter(data_train.flatten(),prediction_train.flatten(),c=\"k\")\n plt.scatter(data_test.flatten(),prediction_test.flatten(),c=\"C0\")\n plt.plot([-1,1],[-1,1],\"k--\")\n plt.title('Prediction correletation')\n plt.xlabel('Data')\n plt.ylabel('Prediction')\n plt.xlim([-1,1])\n plt.ylim([-1,1])\n if len(progress) == 3:\n name = os.path.join(\"ann\",f\"model_{progress[0]}_{progress[1]}\")\n else:\n name = os.path.join(\"ann\",f\"model_{trial_id[:8]}_{progress[0]}_{progress[1]}_{progress[3]}\")\n save_figure(name)", "def plot_fit(self):\n plt.scatter(self.data['SCORE'], self.data['RESULT'])\n _max = np.max(self.data['SCORE'])\n _min = np.min(self.data['SCORE'])\n x = np.linspace(_min, _max, 400)\n y = Log_Regress._sigma(self.coeff, x)\n plt.plot(x, y)\n plt.xlabel('Score')\n plt.ylabel('Probability')\n \n \n\n plt.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function for plotting the cost to go function in the meta mountain car environment, given the ctg table.
def ctg_table_helper(table, savedir=None): # Process the data for plotting x_gran, y_gran = table.shape[0], table.shape[1] X = np.arange(-1.2, 0.6, 1.8/x_gran) Y = np.arange(-0.07, 0.07, 0.14/y_gran) X,Y = np.meshgrid(X,Y) # Plotting fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface(X,Y,table, cmap=cm.coolwarm) if savedir is not None: fig.savefig(savedir,transparent=True)
[ "def abatement_cost_plotter(directory, gwp=34, discount_rate=0, gas_price=0):\n npv, emissions, techs = results_analysis_functions.results_analysis(directory, discount_rate, gas_price)\n files = [f for f in listdir(directory) if isfile(join(directory, f)) if '.json' not in f if 'Frame.p' not in f]\n with open(directory + '/' + files[0], 'rb') as f:\n sample = load(f)\n emissions = np.sum(emissions * sample.time.delta_t * 3600 * 24 / 1e6, axis=1) # metric tonnes\n em_abate = np.zeros([emissions.shape[0] - 1, emissions.shape[1]])\n cost_abate = np.zeros([emissions.shape[0] - 1, emissions.shape[1]])\n for ind in range(em_abate.shape[0]):\n em_abate[ind, :] = emissions[-1, :] - emissions[ind, :]\n cost_abate[ind, :] = -npv['Total'][ind, :]\n abatement_cost = cost_abate / em_abate / gwp\n medianprops = dict(color='k')\n boxprops = dict(linewidth=4)\n boxplot = plt.boxplot(np.transpose(abatement_cost), medianprops=medianprops,\n boxprops=boxprops, patch_artist=True)\n ind = 1\n for bx in boxplot['boxes']:\n bx.set(facecolor=color_set[ind])\n ind += 1\n ax = plt.gca()\n ax.set_xticklabels(techs[:len(techs) - 1])\n ax.set_ylabel('Mitigation cost\\n(USD/metric ton CO$_2$ eq.)')\n ax.set_xlabel('LDAR program')\n plot_fixer()", "def cost_components(dframe=dframe):\n \n labels = ['fringe','unemployment','BaltCorps_fee','StrongCity_fee','stipend']\n colors = ['#eff3ff','#bdd7e7','#6baed6','#3182bd','#08519c']\n values = []\n for label in labels:\n values.append(dframe[label].sum())\n\n text = []\n for label,value in zip(labels,values):\n text.append('{}<br>${:,.0f}'.format(label.capitalize(),value))\n\n fig = {\n 'data':[\n {'labels': labels,\n 'values': values,\n 'marker': {'colors': colors},\n 'name': 'cost components',\n 'hole': .4,\n 'type': 'pie',\n 'text': text,\n 'hoverinfo':'text+percent'\n }],\n 'layout': {\n 'title':'Fellowship Cost Components',\n 'hovermode': 'closest',\n 'paper_bgcolor': '#bdbdbd',\n 'plot_bgcolor': '#bdbdbd',\n 'annotations': [\n {\n 'font':{'size':12,\n 'color':'#636363'},\n 'showarrow':False,\n 'text': '5yr Fees: ${:,.0f}'.format(dframe['BaltCorps_fee'].sum() + dframe['StrongCity_fee'].sum()),\n 'x':1.35,\n 'y':.4},\n {\n 'font':{'size':12,\n 'color':'#636363'},\n 'showarrow':False,\n 'text': '5yr OPCs: ${:,.0f}'.format(dframe['fringe'].sum() + dframe['unemployment'].sum()),\n 'x':1.35,\n 'y':.3},\n {\n 'font':{'size':12,\n 'color':'darkgrey'},\n 'showarrow':False,\n 'text': '<b>Source:</b> Data Provided by Baltimore Corps June 2019:<br>https://github.com/brl1906/fellowship-analysis',\n 'x':.5,\n 'y':-.2},\n\n ]\n }\n }\n\n return fig", "def plot_trajectory_history(tr, select_obj, fn) :\n\n mask = (tr.labels == select_obj)\n\n# fig.clf\n traj = tr.trajectory[:,mask,:]\n data = tr.data[:,mask,:]\n\n zn = tr.coords['zn']\n z = np.interp(traj[:,:,2], np.arange(len(zn)), zn)\n\n times = tr.times/3600.0\n# print np.shape(z)\n\n# plottypes = [\\\n# (\"z\", r\"$z$ m\"), \\\n# (\"t\", r\"time h$^{-1}$\"), \\\n# ]\n #print np.shape(z)\n fig1, axa = plt.subplots(3,2,figsize=(8,10))\n\n for j,v in enumerate([\"w\",\"th\",\"q_vapour\",\"q_cloud_liquid_mass\"]):\n# print (j,v,var(v))\n ax = axa[(j)%2,(j)//2]\n for i in range(np.shape(z)[1]-1) :\n ax.plot(data[:,i,tr.var(v)],z[:,i])\n ax.set_xlabel(tr.variable_list[v],fontsize=16)\n ax.set_ylabel(r\"$z$ m\",fontsize=16)\n ax.set_title('Cloud %2.2d'%select_obj)\n\n ax = axa[2,0]\n for i in range(np.shape(z)[1]-1) :\n piref_z = np.interp(z[:,i],zn,tr.refprof['pi'])\n# print piref_z\n thl = data[:,i,tr.var(\"th\")] - \\\n L_over_cp*data[:,i,tr.var(\"q_cloud_liquid_mass\")]/piref_z\n# print thl, data[:,var(\"th\"),i],data[:,var(\"q_vapour\"),i]\n ax.plot(thl,z[:,i])\n ax.set_xlabel(r\"$\\theta_L$ K\",fontsize=16)\n ax.set_ylabel(r\"$z$ m\",fontsize=16)\n ax.set_title('Cloud %2.2d'%select_obj)\n\n ax = axa[2,1]\n for i in range(np.shape(z)[1]-1) :\n qt = data[:,i,tr.var(\"q_vapour\")] + \\\n data[:,i,tr.var(\"q_cloud_liquid_mass\")]\n# print qt,data[:,var(\"q_vapour\"),i],data[:,var(\"q_cloud_liquid_mass\"),i]\n ax.plot( qt,z[:,i])\n ax.set_xlabel(r\"$q_t$ kg/kg\",fontsize=16)\n ax.set_ylabel(r\"$z$ m\",fontsize=16)\n ax.set_title('Cloud %2.2d'%select_obj)\n\n plt.tight_layout()\n fig1.savefig(fn+'_Cloud_traj_%3.3d'%select_obj+'.png')\n\n fig2, axa = plt.subplots(3,2,figsize=(8,10))\n\n for j,v in enumerate([\"w\",\"th\",\"q_vapour\",\"q_cloud_liquid_mass\"]):\n# print (j,v,var(v))\n ax = axa[(j)%2,(j)//2]\n for i in range(np.shape(z)[1]-1) :\n ax.plot(times,data[:,i,tr.var(v)])\n ax.plot(times[tr.ref]*np.ones(2),ax.get_ylim(),'--k')\n ax.set_ylabel(tr.variable_list[v],fontsize=16)\n ax.set_xlabel(r\"time h$^{-1}$\",fontsize=16)\n ax.set_title('Cloud %2.2d'%select_obj)\n\n ax = axa[2,0]\n for i in range(np.shape(z)[1]-1) :\n piref_z = np.interp(z[:,i],zn,tr.refprof['pi'])\n# print piref_z\n thl = data[:,i,tr.var(\"th\")] - \\\n L_over_cp*data[:,i,tr.var(\"q_cloud_liquid_mass\")]/piref_z\n# print thl, data[:,var(\"th\"),i],data[:,var(\"q_vapour\"),i]\n ax.plot(times,thl)\n ax.plot(times[tr.ref]*np.ones(2),ax.get_ylim(),'--k')\n ax.set_ylabel(r\"$\\theta_L$ K\",fontsize=16)\n ax.set_xlabel(r\"time h$^{-1}$\",fontsize=16)\n ax.set_title('Cloud %2.2d'%select_obj)\n\n ax = axa[2,1]\n for i in range(np.shape(z)[1]-1) :\n qt = data[:,i,tr.var(\"q_vapour\")] + \\\n data[:,i,tr.var(\"q_cloud_liquid_mass\")]\n# print qt,data[:,var(\"q_vapour\"),i],data[:,var(\"q_cloud_liquid_mass\"),i]\n ax.plot( times, qt)\n ax.plot(times[tr.ref]*np.ones(2),ax.get_ylim(),'--k')\n ax.set_ylabel(r\"$q_t$ kg/kg\",fontsize=16)\n ax.set_xlabel(r\"time h$^{-1}$\",fontsize=16)\n ax.set_title('Cloud %2.2d'%select_obj)\n\n plt.tight_layout()\n plt.savefig(fn+'_Cloud_traj_%3.3d_time_'%select_obj+'.png')\n\n fig2 = plt.figure(figsize=(10,6))\n\n ax1 = fig2.add_subplot(111, projection='3d')\n\n ax1.set_xlim(tr.coords['xcoord'][0]-10, tr.coords['xcoord'][-1]+10)\n ax1.set_ylim(tr.coords['ycoord'][0]-10, tr.coords['ycoord'][-1]+10)\n ax1.set_zlim(0, tr.coords['zcoord'][-1])\n for it in range(len(traj)):\n ax1.plot(traj[it,:,0],traj[it,:,1],zs=traj[it,:,2], \\\n linestyle='',marker='.')\n ax1.set_title('Cloud %2.2d'%select_obj)\n\n plt.savefig(fn+'_Cloud_traj_pos_%3.3d'%select_obj+'.png')\n\n fig3, ax = plt.subplots(1,1,figsize=(10,6))\n for i in range(np.shape(z)[1]-1) :\n ax.plot(times, z[:,i])\n ax.plot(times[tr.ref]*np.ones(2),ax.get_ylim(),'--k')\n# print(times[tr.ref]*np.ones(2),plt.ylim())\n ax.set_xlabel(r\"time h$^{-1}$\",fontsize=16)\n ax.set_ylabel(r\"$z$ m\",fontsize=16)\n ax.set_title('Cloud %2.2d'%select_obj)\n fig3.savefig(fn+'_Cloud_traj_z_%3.3d'%select_obj+'.png')\n# plt.close(fig2)\n\n ntr = 1\n while True:\n if f\"tracer_rad{ntr}\" in tr.variable_list:\n ntr += 1\n else:\n ntr -= 1\n break\n\n if ntr > 0:\n fig4, axa = plt.subplots(2, ntr,figsize=(ntr*4,12))\n for n in range(1, ntr+1):\n v = f\"tracer_rad{n}\"\n print(f\"Plotting {v}\")\n\n ax = axa[0, n-1]\n for i in range(np.shape(z)[1]-1) :\n ax.plot(data[:,i,tr.var(v)],z[:,i])\n ax.set_xlabel(tr.variable_list[v],fontsize=16)\n ax.set_ylabel(r\"$z$ m\",fontsize=16)\n ax.set_title('Cloud %2.2d'%select_obj)\n\n ax = axa[1, n-1]\n for i in range(np.shape(z)[1]-1) :\n ax.plot( times, data[:,i,tr.var(v)])\n ax.plot(times[tr.ref]*np.ones(2),ax.get_ylim(),'--k')\n ax.set_ylabel(tr.variable_list[v],fontsize=16)\n ax.set_xlabel(r\"time h$^{-1}$\",fontsize=16)\n ax.set_title('Cloud %2.2d'%select_obj)\n fig4.savefig(fn+'_Cloud_tracer_z_%3.3d'%select_obj+'.png')\n\n return", "def draw_cost_map(self, cost_map, display_time=0.01):\n cost_map = np.swapaxes(cost_map, 0, 1)\n self.ax.set_xticks(np.arange(cost_map.shape[1]))\n self.ax.set_yticks(np.arange(cost_map.shape[0]))\n plt.grid(True, which='both')\n self.ax.imshow(-cost_map, alpha=0.5, cmap='inferno', origin='lower')", "def plot_gcbias_by_ec(self, infile, pdf, plot_title, metrics, gcdata):\n\n def get_samples_by_ec(metrics):\n \"\"\"\n returns samples that belong to each ec\n \"\"\"\n outdata = {}\n metrics = metrics.groupby(\"experimental_condition\")\n for gc in metrics.groups.keys():\n vals = metrics.get_group(gc)[\"cell_id\"]\n\n outdata[gc] = vals\n return outdata\n\n plt.figure(figsize=(12, 12))\n sns.set(context='talk',\n style='whitegrid',\n font='Helvetica',\n rc={'axes.titlesize': 9,\n 'axes.labelsize': 6,\n 'xtick.labelsize': 6,\n 'ytick.labelsize': 6,\n 'legend.fontsize': 6})\n\n df = self.read_input_data(infile, self.gc_tablename)\n df = df.set_index(\"cell_id\")\n\n samps = get_samples_by_ec(metrics)\n cmap = self.get_cmap(metrics)\n\n # we dont want different alpha on different pages, so calculate it\n # using max\n alpha = self.get_alpha(max([len(v) for v in samps.values()]))\n for ec, samps in samps.items():\n for samp in samps:\n cc = metrics[metrics['cell_id'] == samp][\"cell_call\"].iloc[0]\n plt.plot(range(0, 101), df.loc[samp][\n map(str, range(0, 101))].tolist(), color=cmap[cc], alpha=alpha)\n\n if self.gc_content:\n ax = sns.barplot(x='gc', y='windows', data=gcdata,\n color='#E7B591', ci=None)\n plt.setp(ax.patches, linewidth=0)\n\n # Plot the legend\n patches = [matplotlib.patches.Patch(color='#E7B591',\n label=\"Windows at GC%\")]\n legend1 = plt.legend(handles=patches,\n bbox_to_anchor=(0, 0, 0.15, -0.15))\n plt.gca().add_artist(legend1)\n\n patches = [matplotlib.patches.Patch(color=v, label=k)\n for k, v in cmap.items()]\n plt.legend(\n handles=patches, bbox_to_anchor=(\n 0, 0, 0.6, -0.15), ncol=6)\n\n plt.xlabel('GC% of 100 base windows')\n plt.ylabel('Normalized Coverage')\n plt.title('condition:' + ec)\n ticks = np.arange(0, 100, 10)\n plt.xticks(ticks, map(str, ticks))\n\n plt.ylim((0, 2))\n\n pdf.savefig(bbox_inches='tight', pad_inches=0.4)\n plt.close()", "def plotCostVsIterations(JVals):\n plt.figure()\n # plt.xkcd()\n plt.plot(JVals)\n plt.xlabel('iterations')\n plt.ylabel('cost')\n plt.title('gradient descent performance')\n plt.show()", "def CreateCOMap(lon, lat, field_mt, out_name):\n # plot with cartopy\n fig = plt.figure(figsize=(10,6))\n ax = plt.axes(projection=ccrs.PlateCarree())\n gl = ax.gridlines(draw_labels=True)\n gl.top_labels = False\n gl.right_labels = False\n \n\n # Add some cartopy features to the map\n land_50m = cfeature.NaturalEarthFeature('physical', 'land', '50m') \n ax.add_feature(land_50m, edgecolor='k',linewidth=0.5,facecolor='None',zorder=3) \n \n cs = plt.pcolormesh(lon, lat, field_mt, cmap='rainbow', transform=ccrs.PlateCarree())\n cbaxes = fig.add_axes([0.2, 0.03, 0.6, 0.03]) \n cb = plt.colorbar(cs, cax = cbaxes, orientation = 'horizontal' )\n cb.set_label('Total CO column mixing ratio (ppb)')\n \n # Save the figure\n fig.savefig(out_name, bbox_inches='tight')#, dpi=1200)\n plt.cla()\n plt.clf()\n #plt.close()\n return", "def plot_gcbias_by_ec_cc(self, infile, pdf, plot_title, metrics, gcdata):\n\n def get_samples_by_ec_cc(metrics):\n \"\"\"\n returns samples that belong to each ec and cc\n \"\"\"\n outdata = {}\n metrics = metrics.groupby([\"experimental_condition\", \"cell_call\"])\n for gc in metrics.groups.keys():\n vals = metrics.get_group(gc)[\"cell_id\"]\n\n outdata[gc] = vals\n return outdata\n\n sns.set(context='talk',\n style='whitegrid',\n font='Helvetica',\n rc={'axes.titlesize': 9,\n 'axes.labelsize': 6,\n 'xtick.labelsize': 6,\n 'ytick.labelsize': 6,\n 'legend.fontsize': 6})\n\n df = self.read_input_data(infile, self.gc_tablename)\n df = df.set_index(\"cell_id\")\n\n samps = get_samples_by_ec_cc(metrics)\n\n # we dont want different alpha on different pages, so calculate it\n # using max\n alpha = self.get_alpha(max([len(v) for v in samps.values()]))\n for ec, samps in samps.items():\n plt.figure(figsize=(12, 12))\n for samp in samps:\n plt.plot(range(0, 101), df.loc[samp][\n map(str, range(0, 101))].tolist(), color='#2098AE', alpha=alpha)\n\n if self.gc_content:\n ax = sns.barplot(x='gc', y='windows', data=gcdata,\n color='#E7B591', ci=None)\n plt.setp(ax.patches, linewidth=0)\n\n patches = [matplotlib.patches.Patch(color='#E7B591',\n label=\"Windows at GC%\")]\n plt.legend(handles=patches, bbox_to_anchor=(0, 0, 0.5, -0.15))\n\n plt.ylim((0, 2))\n ticks = np.arange(0, 100, 10)\n plt.xticks(ticks, map(str, ticks))\n plt.xlabel('GC% of 100 base windows')\n plt.ylabel('Normalized Coverage')\n plt.title(\n 'condition: %s Cell Call %s' %\n (ec[0], ec[1]))\n pdf.savefig(bbox_inches='tight', pad_inches=0.4)\n plt.close()", "def abatement_cost_plotter_json(directory, gwp=34, discount_rate=0, gas_price=0):\n npv, emissions, techs = results_analysis_functions.results_analysis(directory, discount_rate, gas_price)\n files = [f for f in listdir(directory) if isfile(join(directory, f)) if '.json' in f]\n with open(f\"{directory}/{files[0]}\", 'r') as jfile:\n data = jfile.read()\n sample = json.loads(data)\n emissions = np.sum(emissions * sample['time']['delta_t'] * 3600 * 24 / 1e6, axis=1) # metric tonnes\n em_abate = np.zeros([emissions.shape[0] - 1, emissions.shape[1]])\n cost_abate = np.zeros([emissions.shape[0] - 1, emissions.shape[1]])\n for ind in range(em_abate.shape[0]):\n em_abate[ind, :] = emissions[-1, :] - emissions[ind, :]\n cost_abate[ind, :] = -npv['Total'][ind, :]\n abatement_cost = cost_abate / em_abate / gwp\n medianprops = dict(color='k')\n boxprops = dict(linewidth=4)\n boxplot = plt.boxplot(np.transpose(abatement_cost), medianprops=medianprops,\n boxprops=boxprops, patch_artist=True)\n ind = 1\n for bx in boxplot['boxes']:\n bx.set(facecolor=color_set[ind])\n ind += 1\n ax = plt.gca()\n ax.set_xticklabels(techs[:len(techs) - 1])\n ax.set_ylabel('Mitigation cost\\n(USD/metric ton CO$_2$ eq.)')\n ax.set_xlabel('LDAR program')\n plot_fixer()", "def plot_sql_policty(self):\n bottom_end = int(self.quantity/2)\n top_end = int(self.quantity*2)\n possible_quantities = list(range(bottom_end, top_end))\n \n order_costs = []\n holding_costs = []\n total_costs = []\n \n for q in possible_quantities:\n oc = self.calculate_order_cost(q)\n order_costs.append(oc)\n hc = self.calculate_holding_cost(q)\n holding_costs.append(hc)\n tc = oc + hc\n total_costs.append(tc)\n \n fig, ax = plt.subplots(figsize=(10, 6))\n \n ax.plot(possible_quantities, order_costs, label=\"Order costs\", color=\"red\")\n ax.plot(possible_quantities, holding_costs, label=\"Holding costs\", color=\"orange\")\n ax.plot(possible_quantities, total_costs, label=\"Total costs\", color=\"green\")\n ax.grid(color='lightgrey', linestyle='-', linewidth=1)\n ax.set_facecolor(\"white\")\n ax.legend(fontsize=14, loc=4)\n plt.axhline(self.total_cost, linestyle='--', color=\"black\", alpha=0.6)\n plt.axvline(self.quantity, linestyle='--', color=\"black\", alpha=0.6)\n plt.title(\"Cost Development for Inventory Model\", fontsize=16) \n plt.xlabel(\"Quantity\", fontsize=14)\n plt.ylabel(\"Costs\", fontsize=14)\n plt.show()", "def PlotG(cell):\n\n t = np.asarray(cell.record['time']) * .001\n\n gAMPA = np.asarray(cell.record['gAMPA'])\n plt.plot(t, -gAMPA, 'orange', label='gAMPA')\n gGABA = np.asarray(cell.record['gGABA'])\n plt.plot(t, -gGABA, 'b', label='gGABA')\n gNMDA = np.asarray(cell.record['gNMDA'])\n plt.plot(t, -gNMDA, 'r', label='gNMDA')\n plt.ylabel('conductances (nS)')\n plt.xlabel('time (s)')\n plt.legend()", "def plot_recap_vitro_ephy(title_dict, reM, phy_dict, cluster_ids, df_stim, cell_db_ids=None,\n checkerboard=None, fullfield_fl=None, fl_bars=None, chirp_am=None,\n chirp_fm=None, moving_gratings=None, export_path=\"./recap_plot.pdf\"):\n print(\"Generating the recap plot\")\n configure_pyplot_recap()\n\n cond = title_dict[\"condition\"]\n date = title_dict[\"date\"]\n record_name = title_dict[\"record_name\"]\n record_id = title_dict[\"record_id\"]\n\n if cell_db_ids is None:\n cell_db_ids = [-1]*len(cluster_ids)\n\n with PdfPages(export_path) as pp:\n\n #Plotting Cover\n fig = plt.figure(figsize=(8.267717*2,11.69291*2)) #A4 values in inches *2\n gs = gridspec.GridSpec(28, 20, left=0.05, right=.95, top=.92, bottom=.05, wspace=0.00, hspace=0.00)\n ax_rem = fig.add_subplot(gs[:10,2:-1])\n reM.plot(ax_rem)\n\n ax_stim_recap = fig.add_subplot(gs[11:16,:])\n plot_stim_recap_table(ax_stim_recap, df_stim)\n suptitle = \" - \".join([cond, date, record_name+\" n°\"+str(record_id)])\n plt.suptitle(suptitle)\n\n pp.savefig()\n plt.close()\n\n for cluster, cell_id in zip(cluster_ids, cell_db_ids):\n reM_cell_idx = reM[\"S_matrix\"][0].attrs[\"cell_map\"][cluster]#np.where(cluster==cluster_ids)[0][0]\n\n fig = plt.figure(figsize=(8.267717*2,11.69291*2)) #A4 values in inches *2\n suptitle = \" - \".join([cond, date, record_name+\" n°\"+str(record_id),\n \"Cluster n°\"+str(cluster), \"Cell id n°\"+str(cell_id)])\n plt.suptitle(suptitle)\n\n mask_cluster = phy_dict[\"spike_clusters\"]==cluster\n cluster_composition = np.unique(phy_dict[\"spike_templates\"][mask_cluster])\n\n gs = gridspec.GridSpec(28, 20, left=0.05, right=.95, top=.92, bottom=.05, wspace=0.00, hspace=0.00)\n\n #Template on electrodes\n cell_loc_ax = fig.add_subplot(gs[0:4,0:4])\n plot_spike_template_MEA(cell_loc_ax, cluster_composition, phy_dict[\"templates\"], phy_dict[\"channel_positions\"])\n\n #Autocorrelogram\n autocorr_ax = fig.add_subplot(gs[0:4,5:9])\n plot_autocorrelogram(autocorr_ax, cluster, phy_dict[\"spike_times\"], phy_dict[\"spike_clusters\"],\n bin_ms=.001, sampling_rate=30000, tails=30)\n\n #Spike amplitude across time\n sp_amp_ax = fig.add_subplot(gs[0:4,10:])\n plot_spike_amplitudes(sp_amp_ax, cluster, phy_dict[\"spike_templates\"], phy_dict[\"spike_clusters\"],\n phy_dict[\"spike_times\"], phy_dict[\"amplitudes\"])\n plot_stim_epochs_to_spikes(sp_amp_ax, reM, y_pos=0.6)\n\n #Checkerboard STA\n if checkerboard is not None:\n pval_checker = checkerboard[1][reM_cell_idx]\n pval_checker = np.min(pval_checker[pval_checker!=0])\n inner_grid = gridspec.GridSpecFromSubplotSpec(4, 4,\n subplot_spec=gs[5:12,0:12], wspace=.09, hspace=.13)\n plot_2d_sta(checkerboard[0][reM_cell_idx], pval=pval_checker, grid=inner_grid)\n\n #Fullfield flickering STA\n if fullfield_fl is not None:\n pval_fffl = fullfield_fl[1][reM_cell_idx]\n pval_fffl = np.min(pval_fffl[pval_fffl!=0])\n sp_amp_ax = fig.add_subplot(gs[5:12,13:])\n plot_t_sta(sp_amp_ax, fullfield_fl[0][reM_cell_idx], pval=pval_fffl)\n\n #Chirp_FM\n if chirp_fm is not None:\n chirpfm_ax = fig.add_subplot(gs[13:16,:])\n plot_chirp(chirpfm_ax, chirp_fm[0], chirp_fm[1][:,reM_cell_idx], smooth=False)\n chirpfm_ax.set_title(\"Chirp FM\")\n\n #Chirp_AM\n if chirp_am is not None:\n chirpam_ax = fig.add_subplot(gs[17:20,:])\n plot_chirp(chirpam_ax, chirp_am[0], chirp_am[1][:,reM_cell_idx], smooth=False)\n chirpam_ax.set_title(\"Chirp AM\")\n\n #Flickering bars\n if fl_bars is not None:\n pval_bars = fl_bars[1][reM_cell_idx]\n pval_bars = np.min(pval_bars[pval_bars!=0])\n fl_bars_ax = fig.add_subplot(gs[21:,:12])\n plot_fl_bars(fl_bars_ax, fl_bars[0][reM_cell_idx], pval=pval_bars)\n\n #Moving gratings\n if moving_gratings is not None:\n ds_ax = fig.add_subplot(gs[21:,13:], projection=\"polar\")\n plot_ds_wheel(ds_ax, moving_gratings, cell_idx=reM_cell_idx)\n\n pp.savefig()\n plt.close()\n\n print(\"Cell cluster n°\",cluster,\"done\")\n\n sns.set()\n plt.rcdefaults()\n print()", "def test_tables_correctly_plotted():\n table = [ [1,2,3],\n [4,5,6],\n [7,8,9]]\n \n x_labels = [\"a\",\"b\",\"c\"]\n y_labels = [\"d\",\"e\",\"f\"]\n\n show_table(numpy.array(table), x_labels, y_labels)", "def standard_killer_plot():\n Nclimb = 3; Ncruise = 2; Nmission = 1;\n subsList = [get_optimal737_subs(), get_M072_737_subs(), get_D8_eng_wing_subs(), get_D8_no_BLI_subs(), get_optimalD8_subs(), get_optimalD8_subs()]\n configList = ['optimal737', 'M072_737', 'D8_eng_wing', 'D8_no_BLI', 'optimalD8', 'optimalD8']\n fixedBPRList = [True, True, True, True, True, False]\n pRatOptList = [False, False, False, False, False, True]\n mutategpargList = [False, False, False, False, False, False]\n sol = {}; wf = [];\n for i in range(0,6):\n m = Mission(Nclimb, Ncruise, configList[i], Nmission)\n m.cost = m['W_{f_{total}}'].sum()\n substitutions = subsList[i]\n substitutions.update({'R_{req}': 3000.*units('nmi'),\n 'n_{pass}': 180.})\n sol[i] = optimize_aircraft(m, substitutions, fixedBPRList[i], pRatOptList[i], mutategpargList[i])\n wf.append(sol[i]('W_{f_{total}}'))\n\n wing_sens = [sol[i]['sensitivities']['constants']['C_{wing}'] for i in range(0,6)]\n HT_sens = [sol[i]['sensitivities']['constants']['C_{ht}'] for i in range(0,6)]\n VT_sens = [sol[i]['sensitivities']['constants']['C_{VT}'] for i in range(0,6)]\n fuse_sens = [sol[i]['sensitivities']['constants']['C_{fuse}'] for i in range(0,6)]\n engine_sens = [sol[i]['sensitivities']['constants']['C_{engsys}'] for i in range(0,6)]\n lg_sens = [sol[i]['sensitivities']['constants']['C_{lg}'] for i in range(0,6)]\n Mmin_sens = [sol[i]['sensitivities']['constants']['M_{min}'] for i in range(0,6)]\n\n ytest = [mag(wf[i]/wf[0])[0] for i in range(0,6)]\n xtest = [0, 1, 2, 3, 4, 5]\n xlabels = ['Optimized 737-800 M = 0.8', 'Slow to M = 0.72', 'D8 fuselage, Pi tail', 'Rear podded engines', 'Integrated engines, BLI = D8', 'Optimize engine', '2020 Engines']\n\n plt.plot(xtest, ytest, \"o--\")\n plt.plot([0, 1, 2, 3, 4, 5, 6], [1, .88, .81, .82, .67, .66, .63], \"o--\")\n plt.plot([0, 1, 2, 3, 6], [1, .868, .871, .865, .602], \"o--\")\n plt.plot([0, 1, 2, 3, 4, 5, 6], [1, 41129./43843, 38402./43843, 37180./43843, 32987./43843, 32383./43843, 29753./43843], \"o--\")\n plt.xticks(np.linspace(0,6,7), xlabels, rotation='vertical')\n plt.ylim([0,1.1])\n plt.xlim([-.5, 6.5])\n plt.grid()\n plt.xlabel('Design Step', fontsize = 20)\n plt.ylabel('$W_{\\mathrm{f}}/W_{\\mathrm{f}_\\mathrm{0}}$', fontsize = 20)\n plt.title('D8 Morphing Chart')\n plt.legend(['SP Model', 'TASOPT', 'NASA', 'Aurora'], loc=3)\n plt.savefig('Morphing_Chart_Figs/D8_standard_morphing_chart.pdf', bbox_inches=\"tight\")\n plt.show(), plt.close()\n\n xtest = [0, 1, 2, 3, 4, 5]\n\n plt.plot(xtest, wing_sens, \"o--\")\n plt.xticks(xtest, xlabels, rotation='vertical')\n plt.ylim([0,0.25])\n plt.xlim([-.5, 5.5])\n plt.grid()\n plt.xlabel('Design Step', fontsize = 20)\n plt.ylabel('Sensitivity to Wing Weight', fontsize = 20)\n plt.title('Wing Weight Sensitivity Morphing Chart')\n plt.savefig('Morphing_Chart_Figs/D8_standard_killer_chart_max_opt_eng_wing_sens.pdf', bbox_inches=\"tight\")\n plt.show(), plt.close()\n\n plt.plot(xtest, HT_sens, \"o--\")\n plt.xticks(xtest, xlabels, rotation='vertical')\n plt.ylim([0,0.02])\n plt.xlim([-.5, 5.5])\n plt.grid()\n plt.xlabel('Design Step', fontsize = 20)\n plt.ylabel('Sensitivity to Horizontal Tail Weight', fontsize = 20)\n plt.title('Horizontal Tail Weight Sensitivity Morphing Chart')\n plt.savefig('Morphing_Chart_Figs/D8_standard_killer_chart_max_opt_eng_HT_sens.pdf', bbox_inches=\"tight\")\n plt.show(), plt.close()\n\n plt.plot(xtest, VT_sens, \"o--\")\n plt.xticks(xtest, xlabels, rotation='vertical')\n plt.ylim([0,0.15])\n plt.xlim([-.5, 5.5])\n plt.grid()\n plt.xlabel('Design Step', fontsize = 20)\n plt.ylabel('Sensitivity to Vertical Tail Weight', fontsize = 20)\n plt.title('Vertical Tail Weight Sensitivity Morphing Chart')\n plt.savefig('Morphing_Chart_Figs/D8_standard_killer_chart_max_opt_eng_VT_sens.pdf', bbox_inches=\"tight\")\n plt.show(), plt.close()\n\n plt.plot(xtest, fuse_sens, \"o--\")\n plt.xticks(xtest, xlabels, rotation='vertical')\n plt.ylim([0,0.55])\n plt.xlim([-.5, 5.5])\n plt.grid()\n plt.xlabel('Design Step', fontsize = 20)\n plt.ylabel('Sensitivity to Fuselage Weight', fontsize = 20)\n plt.title('Fuselage Weight Sensitivity Morphing Chart')\n plt.savefig('Morphing_Chart_Figs/D8_standard_killer_chart_max_opt_eng_fuse_sens.pdf', bbox_inches=\"tight\")\n plt.show(), plt.close()\n\n plt.plot(xtest, engine_sens, \"o--\")\n plt.xticks(xtest, xlabels, rotation='vertical')\n plt.ylim([0,0.3])\n plt.xlim([-.5, 5.5])\n plt.grid()\n plt.xlabel('Design Step', fontsize = 20)\n plt.ylabel('Sensitivity to Engine Weight', fontsize = 20)\n plt.title('Engine Weight Sensitivity Morphing Chart')\n plt.savefig('Morphing_Chart_Figs/D8_standard_killer_chart_max_opt_eng_engine_sens.pdf', bbox_inches=\"tight\")\n plt.show(), plt.close()\n\n plt.plot(xtest, lg_sens, \"o--\")\n plt.xticks(xtest, xlabels, rotation='vertical')\n plt.ylim([0,0.07])\n plt.xlim([-.5, 5.5])\n plt.grid()\n plt.xlabel('Design Step', fontsize = 20)\n plt.ylabel('Sensitivity to Landing Gear Weight', fontsize = 20)\n plt.title('Landing Gear Weight Sensitivity Morphing Chart')\n plt.savefig('Morphing_Chart_Figs/D8_standard_killer_chart_max_opt_eng_lg_sens.pdf', bbox_inches=\"tight\")\n plt.show(), plt.close()", "def marginal_cost_rule(_m, g, y):\r\n\r\n if g in m.G_THERM:\r\n\r\n # Existing generators\r\n if g in m.G_E_THERM:\r\n\r\n # Last year in the dataset for which fuel cost information exists\r\n max_year = max([i[1] for i in self.data.existing_units_dict.keys() if 'FUEL_COST' in i])\r\n\r\n # If year in model horizon exceeds max year for which data are available use values for last\r\n # available year\r\n if y > max_year:\r\n # Use final year in dataset to max year\r\n y = max_year\r\n\r\n # Fuel cost\r\n fuel_cost = self.data.existing_units_dict[('FUEL_COST', y)][g]\r\n\r\n # Variable operations and maintenance cost\r\n vom = self.data.existing_units_dict[('PARAMETERS', 'VOM')][g]\r\n\r\n # Heat rate\r\n heat_rate = self.data.existing_units_dict[('PARAMETERS', 'HEAT_RATE')][g]\r\n\r\n # Candidate generators\r\n elif g in m.G_C_THERM:\r\n\r\n # Last year in the dataset for which fuel cost information exists\r\n max_year = max([i[1] for i in self.data.existing_units_dict.keys() if 'FUEL_COST' in i])\r\n\r\n # If year in model horizon exceeds max year for which data are available use values for last\r\n # available year\r\n if y > max_year:\r\n # Use final year in dataset to max year\r\n y = max_year\r\n\r\n # Fuel cost\r\n fuel_cost = self.data.candidate_units_dict[('FUEL_COST', y)][g]\r\n\r\n # Variable operations and maintenance cost\r\n vom = self.data.candidate_units_dict[('PARAMETERS', 'VOM')][g]\r\n\r\n # Heat rate\r\n heat_rate = self.data.candidate_units_dict[('PARAMETERS', 'HEAT_RATE')][g]\r\n\r\n else:\r\n raise Exception(f'Unexpected generator encountered: {g}')\r\n\r\n # Compute marginal cost for thermal units\r\n marginal_cost = float((fuel_cost * heat_rate) + vom)\r\n\r\n elif (g in m.G_E_WIND) or (g in m.G_E_SOLAR) or (g in m.G_E_HYDRO):\r\n # Marginal cost = VOM cost for wind and solar generators\r\n marginal_cost = self.data.existing_units.loc[g, ('PARAMETERS', 'VOM')]\r\n\r\n elif (g in m.G_C_WIND) or (g in m.G_C_SOLAR):\r\n # Marginal cost = VOM cost for wind and solar generators\r\n marginal_cost = self.data.candidate_units.loc[g, ('PARAMETERS', 'VOM')]\r\n\r\n elif g in m.G_STORAGE:\r\n # Assume marginal cost = VOM cost of typical hydro generator (7 $/MWh)\r\n marginal_cost = 7\r\n\r\n else:\r\n raise Exception(f'Unexpected generator: {g}')\r\n\r\n assert marginal_cost >= 0, 'Cannot have negative marginal cost'\r\n\r\n return float(marginal_cost + random.uniform(0, 2))", "def plot_product_map(data, data_type, cust_map = False):\n\n tsne_data = data.reset_index()\n sns.set(font_scale=2,rc={'figure.figsize':(20,15)})\n #sns.set_style(\"whitegrid\")\n\n if data_type == \"instacart\": \n tsne_data[\"cat\"] = tsne_data[\"cat\"].astype('category')\n tsne_data[\"dept\"] = tsne_data[\"dept\"].astype('category')\n \n # can visualise hue by dept or by cat\n graph = sns.scatterplot(x = \"x\", y= \"y\", \n data=tsne_data, hue=\"dept\", s=60) \n \n elif data_type == \"c2v\": # don't give it a particular hue\n graph = sns.scatterplot(x = \"x\", y= \"y\", data=tsne_data, s=60)\n \n elif data_type == \"simulated_c2v_pooled\": \n tsne_data[\"cat_i\"] = tsne_data[\"cat_i\"].astype('category')\n graph = sns.scatterplot(x = \"x\", y= \"y\", data=tsne_data,\n hue = \"cat_i\", s=60)\n else:\n tsne_data[\"c\"] = tsne_data[\"c\"].astype('category')\n graph = sns.scatterplot(x = \"x\", y= \"y\", \n data=tsne_data, hue=\"c\", s=60)\n\n \n # move legend to the upper right next to the figure\n # - see https://matplotlib.org/3.1.1/tutorials/intermediate/legend_guide.html\n graph.legend(loc='upper left', bbox_to_anchor=(1, 1), ncol=1) #ncol=2 for larger things\n plt.show()", "def cost(job_id, sfn=TIBANNA_DEFAULT_STEP_FUNCTION_NAME, update_tsv=False):\n print(API().cost(job_id=job_id, sfn=sfn, update_tsv=update_tsv))", "def plot_trajectories(tjcs, title, dims): \n wm = plt.get_current_fig_manager()\n wm.window.wm_geometry(dims)\n plt.title(title)\n plt.axis([0,16,0,12])\n plt.xlabel('X (mts)')\n plt.ylabel('Y (mts)')\n \n if len(tjcs) < 50:\n palette = generate_palette(len(tjcs))\n for n, t in enumerate(tjcs):\n t = np.array(t)\n plt.plot( t[:,0], t[:,1], \"-\", c = palette[n] )\n plt.plot( t[-1,0], t[-1,1], \"o\", markersize = 5.0, c = palette[n] )\n else:\n for t in tjcs:\n t = np.array(t)\n plt.plot(t[:,0], t[:,1], \"-\")", "def plotCon(xx, cc, ccRes, tt, plt_profiles='all',\n locs=[1, 3], colorbar=False, styles=['--', '-'],\n save=False, path=None):\n if path is None:\n if sys.platform == \"darwin\": # folder for linux\n path = '/Users/AmanuelWK/Desktop/'\n elif sys.platform.startswith(\"linux\"): # folder for mac\n path = '/home/amanuelwk/Desktop/'\n\n M = cc[0, :].size # number of profiles\n\n # setting number of profiles to plot\n if plt_profiles is 'all':\n plt_nbr = np.arange(M) # go through all profiles\n else:\n skp = int(M/plt_profiles)\n plt_nbr = np.arange(0, M, skp)\n\n # plotting concentration profiles\n l1s = [] # for sperate legends\n l2s = []\n # mapping profiles to colormap\n lines = np.linspace(0, 1, M)\n colors = [cm.jet(x) for x in lines]\n # Set the colormap and norm\n cmap = cm.jet\n norm = mpl.colors.Normalize(vmin=tt[0]/60, vmax=tt[-1]/60)\n scalarMap = cm.ScalarMappable(norm=norm, cmap=cmap)\n scalarMap.set_array(tt/60) # mapping colors to time in minutes\n\n fig = plt.figure()\n for j in plt_nbr:\n plt.gca().set_xlim(left=xx[0])\n plt.gca().set_xlim(right=xx[-1])\n l1, = plt.plot(xx, cc[:, j], '--', color=colors[j])\n l1s.append([l1])\n if j > 0:\n # plot t=0 profile only for experiment\n # because numerical profiles are computed from this one\n l2, = plt.plot(xx, ccRes[:, j], '-', color=colors[j])\n l2s.append([l2])\n # plotting two legends, for color and linestyle\n plt.legend([l1, l2], [\"Experiment\", \"Numerical\"], loc=locs[0])\n plt.xlabel('z-distance [µm]')\n plt.ylabel('Concentration [µM]')\n # place colorbar in inset in current axis\n fig.tight_layout()\n # TODO: think about position of colorbar\n # inset = inset_axes(plt.gca(), width=\"40%\", height=\"3%\", loc=locs[0])\n cb1 = plt.colorbar(scalarMap, cmap=cmap, norm=norm, orientation='vertical')\n cb1.set_label('Time [min]')\n\n if save:\n plt.savefig(path+'profiles.pdf', bbox_inches='tight')\n else:\n plt.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert an HL7 string to a dictionary
def hl7_str_to_dict(s, use_long_name=True): #s = s.replace("\n", "\r") print(s) try: m = parse_message(s) return hl7_message_to_dict(m, use_long_name=use_long_name) except ParserError: return dict()
[ "def convert_to_dictionary(self, string):\n return json.loads(string)", "def get_dict(string: str) -> Dict[str, int]:\n splited = string[1:-1].split(\", \")\n my_dict = {}\n for i in splited:\n key, value = i.split(\":\")\n if key[0] == \"'\" and key[-1] == \"'\":\n key = key[1:-1]\n if value[0] == \"'\" and value[-1] == \"'\":\n value = value[1:-1]\n my_dict[key] = value\n return my_dict", "def test_simple_str_to_dict(self):\n d = msgmap.str_to_dict('k1:v1 k2:v2 k3:v3')\n self.assertEqual(len(d), 3)\n self.assertEqual(d.get('k1'), 'v1')\n self.assertEqual(d.get('k2'), 'v2')\n self.assertEqual(d.get('k3'), 'v3')", "def parseErlangValue(string):\n try:\n d = erlangValue.parseString(string)\n return convertToDict(d)\n except ParseException, err:\n logging.error(err.line)\n logging.error(\" \"*(err.column-1) + \"^\")\n logging.error(err)\n raise", "def dict_from_str(cls, decoder, src):\n result = {}\n for s in src.split():\n sv = decoder(s)\n if sv not in result:\n result[sv] = s\n return result", "def _registry_data_str_to_dict(key, text, platform, domain):\n text = str(text).replace('RegistryEntry(', '')[:-1]\n items = [item.replace(\"'\", \"\") for item in text.split(', ')]\n\n # Do not reformat items if not requested platform\n if (f\"platform={platform}\" in items\n and (domain is None or _base_domain(key) == domain)):\n pass\n elif platform is None and _base_domain(key) == domain:\n pass\n else:\n return {'platform': 'not_platform_domain'}\n\n items_dict = {}\n for item in items:\n try:\n if instr(item, '=') is False:\n continue\n\n key_value = item.split('=')\n key = key_value[0]\n value = key_value[1]\n if value == 'None':\n items_dict[key] = None\n elif value.isnumeric():\n items_dict[key] = int(value)\n elif value.find('.') and value.split('.')[0].isnumeric() and value.split('.')[1].isnumeric():\n items_dict[key] = float(value)\n elif value.startswith('{'):\n items_dict[key] = eval(value)\n elif value.startswith('['):\n items_dict[key] = eval(value)\n else:\n items_dict[key] = value.replace('xa0', '')\n except:\n pass\n\n return items_dict", "def getkvdict(s):\n return str2kvdict(s, sep='@', dlm='::')", "def _str2dict(self, istring):\n retDict = {}\n if istring == EMPTY_FIELD:\n return retDict\n for feat in istring.split(FEAT_SEP):\n # feature format changed in MATE\n if FEAT_VALUE_SEP_RE.search(feat):\n retDict.update((feat.split(FEAT_VALUE_SEP),))\n else:\n retDict.update([self._new2old(feat)])\n return retDict", "def parseErlangValue(string):\n try:\n d = erlangValue.parseString(string)\n return convertToDict(d)\n except ParseException, err:\n #logging.error(err.line)\n #logging.error(\" \"*(err.column-1) + \"^\")\n #logging.error(err)\n #raise\n return []", "def direct_from_string(text: str) -> dict:\n return PlainTextObject(text=text).to_dict()", "def str2dict(s):\n\n if type(s) not in (str, unicode):\n s = str(s)\n d = {}\n for kv in [[x.strip() for x in i.split('=', 1)] for i in s.split(',')]:\n if (len(kv[0]) > 0) and (len(kv[1]) > 0):\n d[kv[0]] = kv[1]\n return d", "def parse_variable_str_to_dict(variables_str) -> Dict[str, Union[str, int, float, bool]]:\n variables = {}\n\n for var_str in variables_str.split(' '):\n if var_str != '':\n\n var_name = var_str.split('=')[0]\n var_value = var_str.split('=')[1].replace('\"', '')\n\n # Cast to correct type\n if re.match(int_and_float_re, var_value):\n var_value = json.loads(var_value) # todo change this with just normal casting see if makes a difference timewise?\n elif re.match(bool_re, var_value):\n var_value = str_to_bool(var_value)\n\n variables[var_name] = var_value\n\n return variables", "def convert(self, s: State) -> Dict[str, np.ndarray]:", "def parse_str_to_dict(self, data: str) -> OrderedDict:\n return xmltodict.parse(data)", "def headerDict(header_string):\n\t# >7244:002ce8 FBpp0236088 gene=FBgn0208790 orthodb8_OG=EOG8MGTH1 orthodb8_level=32281 organism_name=`Drosophila virilis` uniprot_de=`GJ21671`\n\t# Handling awful cases like uniprot_de=`Probable tRNA 2`-O-ribose methyltransferase`\n\theader_string = header_string.replace(\"`-\", \"'-\")\n\tquote_split = header_string.split(\"`\")\n\tdef garble(x):\n\t\treturn x.replace(\" \", \"@*#/*\")\n\tdef degarble(x):\n\t\treturn x.replace(\"@*#/*\", \" \")\n\treform = quote_split[0]\n\txi = 1\n\twhile xi < len(quote_split):\n\t\t# string in quotes\n\t\treform += garble(quote_split[xi])\n\t\t# next string\n\t\treform += quote_split[xi+1]\n\t\txi = xi+2\n\t# Split \n\n\td = {}\n\tfor entry in reform.split():\n\t\tif '=' in entry:\n\t\t\tsp = entry.split('=')\n\t\t\td[sp[0]] = degarble(sp[1])\n\t\n\treturn d", "def stringtodict(liststring):\n return dict([ (item.strip(), 1) for item in liststring.split(\",\")])", "def query_string_to_dict(query_string):\n return dict((k, v[0]) for k, v in urlparse.parse_qs(query_string).iteritems())", "def parse_lexicon_entry(lexicon_entry: str) -> dict:\n functional_labels = {}\n lexicon_entry = lexicon_entry.replace(\"[\", \"\").replace(\"]\", \"\").replace(\";\", \"\")\n labels = lexicon_entry.split()\n for label in labels:\n parts = label.split('=')\n if len(parts) == 2:\n functional_labels[parts[0].strip()] = parts[1].strip()\n return functional_labels", "def pfstring_to_pfdict(self, pfstring):\n pfstring = pfstring.strip('\\0')\n pfptr = stock.ParameterFile()\n pfptr.pfcompile(pfstring)\n pfdict = pfptr.pf2dict()\n return pfdict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert an HL7 message to a dictionary
def hl7_message_to_dict(m, use_long_name=True): if m.children: d = {} for c in m.children: name = str(c.name).lower() if use_long_name: name = str(c.long_name).lower() if c.long_name else name dictified = hl7_message_to_dict(c, use_long_name=use_long_name) if name in d: if not isinstance(d[name], list): d[name] = [d[name]] d[name].append(dictified) else: d[name] = dictified return d else: return m.to_er7()
[ "def hl7_str_to_dict(s, use_long_name=True):\n #s = s.replace(\"\\n\", \"\\r\")\n print(s)\n try:\n m = parse_message(s)\n return hl7_message_to_dict(m, use_long_name=use_long_name)\n except ParserError:\n return dict()", "def parse_msg(msg):\n subject = msg.get(\"Subject\")\n return {\n \"subject\": subject,\n \"sender\": msg.get(\"Sender\"),\n \"date\": msg.get(\"Date\"),\n \"size\": len(bytes(msg)),\n }", "def parse_message(message):\n return {\n \"msg\": message.message,\n \"sender\": message.sender.name,\n \"sent_on\": message.sent_on.strftime(\"%b %d %y - %H:%M\"),\n }", "def decode_msg_dict(timestamp, bitlist):\n\n return {'timestamp': timestamp,\n 'msgtype': bitlist.ubits(0,6),\n 'repeat': bitlist.ubits(6,2),\n 'mmsi': bitlist.ubits(8, 30),\n 'status': bitlist.ubits(38, 4),\n 'turn': bitlist.sbits(42, 8),\n 'speed': bitlist.ubits(50, 10),\n 'accuracy': bitlist.ubits(60, 1),\n 'lon': bitlist.sbits(61, 28),\n 'lat': bitlist.sbits(89, 27),\n 'course': bitlist.ubits(116, 12),\n 'heading': bitlist.ubits(128, 9),\n 'second': bitlist.ubits(137, 6),\n 'maneuver': bitlist.ubits(143, 2),\n 'raim': bitlist.ubits(148, 1),\n 'radio': bitlist.ubits(149, 19)}", "def _parse_line(self, line):\n return {'raw_message': line}", "def parse_message(self, message):\n try:\n # first 3x32 bit ints are the track header\n self.frame_num, message = self.unpacker('I', message)\n self.ping_num , message = self.unpacker('I', message)\n self.ping_time, message = self.unpacker('d', message)\n self.num_tracks, message = self.unpacker('I', message)\n\n print 'Frame:', self.frame_num\n print ' - ping:', self.ping_num\n print ' - ping time:', self.ping_time\n print ' - num tracks:', self.num_tracks\n\n for i in range(self.num_tracks):\n target = {}\n target['id'],message = self.unpacker('H', message)\n target['size_sq_m'], message = self.unpacker('f', message)\n target['speed_mps'], message = self.unpacker('f', message)\n target['target_strength'], message = self.unpacker('f', message)\n target['min_range_m'], message = self.unpacker('f', message)\n target['max_range_m'], message = self.unpacker('f', message)\n target['min_bearing_deg'], message = self.unpacker('f', message)\n target['max_bearing_deg'], message = self.unpacker('f', message)\n target['min_elevation_deg'], message = self.unpacker('f', message)\n target['max_elevation_deg'], message = self.unpacker('f', message)\n target['first_detect'], message = self.unpacker('f', message) #time\n target['pings_visible'], message = self.unpacker('H', message)\n target['last_pos_range'], message = self.unpacker('f', message)\n target['last_pos_bearing'], message = self.unpacker('f', message)\n target['last_pos_elevation'], message = self.unpacker('f', message)\n target['last_vel_range'], message = self.unpacker('f', message)\n target['last_vel_bearing'], message = self.unpacker('f', message)\n target['last_vel_elevation'], message = self.unpacker('f', message)\n target['width'], message = self.unpacker('f', message)\n target['length'], message = self.unpacker('f', message)\n target['height'], message = self.unpacker('f', message)\n\n self.targets.append(target)\n\n return True\n except:\n print \"Failed to parse track buffer:\", sys.exc_info()\n return False", "def json_payload_to_dict(message, enc=\"utf-8\"):\n return MessageUtils.json_to_dict(MessageUtils.decode_payload(message, enc))", "def _read_message(data, msg):\n if msg.type in IGNORED_MESSAGES:\n data = _ignore(data, msg)\n elif msg.type == 'time_signature':\n # NOTE: right now we're only handling fours\n if msg.numerator == 4 and msg.denominator == 4:\n data = _dict_update(\n data,\n clocks_per_click=msg.clocks_per_click,\n notated_32nd_notes_per_beat=msg.notated_32nd_notes_per_beat)\n else:\n raise TimeSignatureException('not 4/4')\n elif msg.type == 'note_on':\n data = _note_on_update(data, msg)\n elif msg.type == 'note_off':\n data = _note_off_update(data, msg)\n\n return data", "def _load_message(message_filename):\n\n dict = {}\n\n with open(message_filename) as f:\n # Load the json file\n msg = json.load(f)\n\n # Derives the uuid from message_filename\n dict['id'] = message_filename[9:45]\n\n # Get the rest of the data\n dict['to'] = msg['to']\n dict['from'] = msg['from']\n dict['subject'] = msg['subject']\n dict['body'] = msg['body']\n\n # Converts the time string to the correct type and format\n dict['time'] = datetime.strptime(msg['time'], DATE_FORMAT)\n\n return dict", "def _parse_sns_message(self, sns_message):\n splitted_list = sns_message.split(PATTERN_LINESPLITTER)\n # Workaround for when the last parameter is not terminated with\n # the same separator pattern, then a closing quote might remain.\n if splitted_list[-1] != '' and splitted_list[-1][-1] == '\\'':\n # Cut the last character from the last item\n splitted_list[-1] = splitted_list[-1][:-1]\n result_dict = {}\n for line_item in splitted_list:\n line_item = line_item.strip()\n if PATTERN_KEYSPLITTER not in line_item:\n # Unparseable line, do not parse\n continue\n key, value = line_item.split(PATTERN_KEYSPLITTER, 1)\n result_dict[key] = self._cast_type(value)\n return result_dict", "def _convert_data_to_dict(self, data: Any) -> Tuple[str, dict]:\n topic_attribute_name = self.get_topic_attribute_name(data.private_revCode)\n _, topic_name = topic_attribute_name.split(\"_\", maxsplit=1)\n\n data_stream = copy.deepcopy(\n self._template_manager_message[topic_attribute_name]\n )\n data_vars = data.get_vars()\n\n for topic_attribute in data_vars:\n data_stream[topic_attribute][\"value\"] = data_vars[topic_attribute]\n\n payload = (\n data_stream\n if topic_attribute_name in self.periodic_data\n else [\n data_stream,\n ]\n )\n\n data_as_dict = dict(\n csc=self.remote.salinfo.name,\n salindex=self.remote.salinfo.index,\n data=dict(\n [\n (topic_name, payload),\n ],\n ),\n )\n return topic_attribute_name, data_as_dict", "def bs_to_dict(byte_sequence):\n ret_val = collections.defaultdict()\n ret_val['SHA1'] = byte_sequence.sha1\n ret_val['Size'] = byte_sequence.size\n ret_val['Properties'] = props_to_dict(byte_sequence.properties)\n return ret_val", "def _get_state_dict(self):\n state = self._recv_bytes().decode('utf-8')\n if state[:14] == \"END_OF_MESSAGE\":\n return {}, state[15:] == 'True'\n self._conn.send(b\"RECEIVED\")\n state_dict = json.loads(state)\n return state_dict, None", "def process_message(self, msg_txt):\n logging.info(\"Processing message through API AI - msg: {}\".format(msg_txt))\n request = self.api_ai.text_request()\n request.query = msg_txt\n # get json response as bytes and decode it into a string\n resp = request.getresponse().read().decode('utf-8')\n resp = json.loads(resp) # convert string to json dict\n\n return {\n 'contexts': resp['result']['contexts'] if 'contexts' in resp else None,\n 'intent': resp['result']['metadata']['intentName'],\n 'parameters': resp['result']['parameters']\n }", "def getMsgDict(self):\n return self._msgDict or {}", "def _parse_message(self):\n parts = self.sms_message.split(\",\")\n values = {}\n for part in parts:\n key,value = part.split(\"=\",1)\n values[key] = value\n # grab the parts we want\n if '@' in values:\n self.message_type = int(values['@'])\n elif 'AT' in values:\n self.message_type = int(values['AT'])\n self.msisdn = values['CN']\n self.serialnumber = values['SN']\n self.signalstrength = int(values['S'])\n self.batterystrength = int(values['B'])\n self.puffcount = int(values['PC'])\n self.ussdresponse = values['U']\n self.medicationcompartment = values['M']\n self.ce = values['CE']\n # datetime format=DDMMYYHHMMSS\n t = values['T']\n (dd,month,yy,hh,minute,ss) = [int(x) for x in (t[0:2],t[2:4],t[4:6],t[6:8],t[8:10],t[10:12])]\n yy += 2000 # Y2K!\n self.timestamp = datetime.datetime(yy,month,dd,hh,minute,ss)", "def parse_msg(self, body):\n result = {}\n doc = minidom.parseString(body)\n root = doc.getElementsByTagName(\"xml\")[0]\n for item in root.childNodes:\n if item.firstChild:\n value = item.firstChild.nodeValue\n else:\n value = item.nodeValue\n result[item.nodeName] = value\n return result", "def parse_twilio(payload: dict) -> dict:\n try:\n answers = payload['twilio']['collected_data']['covid_19_questionary']['answers']\n return {q: answers[q]['answer'] for q in answers.keys()}\n except KeyError:\n return {}", "def convert(self, s: State) -> Dict[str, np.ndarray]:", "def params(self):\n for entry in self.raw['entries']:\n if entry['type'] == 'message':\n return entry['data']\n\n return {}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a new argument. Same usage as ArgumentParser.add_argument
def add_argument(self, *args, **kwargs): if self.__parser is None: raise AttributeError("Already parsed") else: self.__parser.add_argument(*args, **kwargs)
[ "def add_arg(self, *args, **kwargs) -> argparse.Action:\n return self.parser.add_argument(*args, **kwargs)", "def add_argument(self, argument):\n self.arguments.insert_argument(argument)", "def add_argument_cmd(self, *args, **kwargs):\n pass", "def add_arg(self, name, alias=None, **attributes):\n if name in self._positional_args or name in self._optional_args:\n raise DuplicateArgError(name)\n\n new_arg = _Argument(alias, **attributes)\n\n if name[0] is '-':\n self._optional_args.append(name)\n else:\n self._positional_args.append(name)\n if new_arg.required is True:\n self._required_args.append(name)\n self._args[name] = new_arg", "def attach_argument(self, argument: Argument, parser=None):\n if not parser:\n parser = self.parser\n\n parser.add_argument(*argument.args, **argument.kwargs)", "def add_argument(self, name:str, deco_spec:dict, param_spec:dict) -> str:\n raise NotImplementedError", "def add_cmdline_arg(args, arg, *values):\n if arg not in args:\n args = list(args) + [arg] + list(values)\n return args", "def add_argument(self, arg_text):\n arg_index = len(self.args)\n self.args.append(arg_text)\n self.roles_dict[arg_index] = arg_text # Note: This ignores all internal modifications\n self.template += '{A' + str(arg_index) + '} '", "def is_argparse_add_argument(node):\n return (\n isinstance(node, Expr)\n and isinstance(node.value, Call)\n and isinstance(node.value.func, Attribute)\n and node.value.func.attr == \"add_argument\"\n and isinstance(node.value.func.value, Name)\n and node.value.func.value.id == \"argument_parser\"\n )", "def add_argument(parser):\n parser.add_argument(\"--loglevel\", help=\"Set logging level\",\n choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'], default='INFO')\n try:\n parser.add_argument(\"--logfilename\", help=\"output filename for logfile\")\n except argparse.ArgumentError as e:\n pass", "def enterArg(self, ctx: RulesParser.ArgContext):\n self.context.value.args.append(ctx.getText())", "def add_arg_group(parser, title):\n return parser.add_argument_group(f'{title} arguments')", "def insert_add_arg(self, node, key, value):\n if node not in self.args:\n self.args[node] = {}\n if 'add' not in self.args[node]:\n self.args[node]['add'] = {}\n self.args[node]['add'][key] = value", "def add_argument(self, *args, **kwargs):\n if not args or len(args) == 1 and args[0][0] not in self.prefix_chars:\n raise ValueError(\"Positional arguments are not allowed! Defining them could mess up grid running!\")\n return super(AthenaArgumentParser, self).add_argument(*args, **kwargs)", "def add_argument_case(self, new_argument_case: int):\n self.argument_cases_ids.append(new_argument_case)", "def add_arguments(self, parser):\n parser.add_argument('start_index', type=int)", "def addArgs(self, args):\r\n self.args.extend(args)", "def argument(self, argument):\n \n self._argument = argument", "def AddLanugageArg(\n parser,\n required=False,\n help_text='preferred language of contact. Must be a valid ISO 639-1 '\n 'language code.'):\n parser.add_argument('--language', help=help_text, required=required)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Polynomial interpolation in a table. points Interpolation table. The x values are in the first column (column 0), and by default, the y values are in the second column (column 1). The x values must be in ascending order, with no duplicates. x The value to interpolate. degree The degree of the interpolating polynomial. ycol The column number of the y values. (0based.) regions Sometimes, you want to divide the range being interpolated into several distinct regions, where the interpolated function may be discontinuous at the boundaries between regions. To do this, supply this argument, which should contain a list (in ascending order) of the xcoordinates of the region boundaries. When the interpolation runs against a boundary, the algorithm will add a copy of the last point before the boundary, positioned at the boundary. This helps to control runaway extrapolation leading up to the boundary. Returns the interpolated value. The method used is Newtonian interpolation. Based on the cernlib routine divdif.
def interpolate (points, x, degree, ycol=1, regions=[], xcol=0): if len (points) < 2 or degree < 1: raise Exception ('bad args!') degree = min (degree, len (points) - 1) # START. FIND SUBSCRIPT IX OF X IN ARRAY. #ix = _bisect (points, x, lambda x, p, xcol=xcol: cmp (x, p[xcol]))# - 1 ix = _bisect2 (points, x, xcol) ir = _bisect (regions, x) # Number of points to try for. # Either degree+1 or degree+2, whichever is even, # to give the same number of points on each side. # If we run up against an edge or a boundary, we'll # fall back to using just degree+1 points (or fewer if we can't # even get that many). # If we end up using degree+2 points, we'll do two interpolations # of degree degree and average them. npts = degree + 2 - (degree%2) l = 0 # noqa: E741 t = [] d = [] # If we run up against the edge of a region boundary, # we'll want to add a psuedopoint right at the boundary # (copying the point closest to the boundary) instead of the # point farthest away from it. extralo = 0 extrahi = 0 # Starting point index, not considering edges or boundaries. ilo = ix - npts // 2 # Make sure this point is within the array range and has not # crossed a region boundary. if ilo < 0: ilo = 0 npts = degree+1 while ilo < len (points) and ir>0 and points[ilo][xcol] < regions[ir-1]: ilo += 1 npts = degree+1 extralo = 1 # Same deal for the right hand edge. # ihi is one past the last point to use. himoved = 0 ihi = ilo + npts if ihi > len (points): ihi = len (points) npts = degree+1 himoved = 1 while ihi > 0 and ir<len(regions) and points[ihi-1][xcol] >= regions[ir]: ihi -= 1 npts = degree+1 extrahi = 1 himoved = 1 lomoved = 0 ilo = ihi - npts if ilo < 0: ilo = 0 lomoved = 1 while ilo < len (points) and ir>0 and points[ilo][xcol] < regions[ir-1]: ilo += 1 extralo = 1 lomoved = 1 npts = ihi - ilo t = [] d = [] if extralo and points[ilo][xcol] != regions[ir-1]: if not himoved: ihi -= 1 else: npts += 1 t.append (regions[ir-1]) d.append (points[ilo][ycol]) if extrahi and points[ihi-1][xcol] != regions[ir]: if not lomoved: ilo += 1 else: npts += 1 t.append (regions[ir]) d.append (points[ihi-1][ycol]) t += [points[i][xcol] for i in range (ilo, ihi)] d += [points[i][ycol] for i in range (ilo, ihi)] degree = min (degree, npts-1) extra = npts != degree+1 if extra: (t[0], t[npts-2]) = (t[npts-2], t[0]) (d[0], d[npts-2]) = (d[npts-2], d[0]) # REPLACE D BY THE LEADING DIAGONAL OF A DIVIDED-DIFFERENCE TABLE, # SUPPLEMENTED BY AN EXTRA LINE IF *EXTRA* IS TRUE. for l in range(0, degree): if extra: d[degree+1] = (d[degree+1]-d[degree-1]) / (t[degree+1]-t[degree-1-l]) for i in range (degree, l, -1): d[i] = (d[i]-d[i-1]) / (t[i]-t[i-1-l]) # EVALUATE THE NEWTON INTERPOLATION FORMULA AT X, AVERAGING TWO VALUES # OF LAST DIFFERENCE IF *EXTRA* IS TRUE. sum = d[degree] if extra: sum=0.5*(sum+d[degree+1]) for j in range (degree-1, -1, -1): sum = d[j] + (x - t[j]) * sum return sum
[ "def polyvals(x0, y0, x, deg=0):\n import numpy as np\n return np.polyval(np.polyfit(x0, y0, deg), x)", "def polyinterp(points, x_min_bound=None, x_max_bound=None, plot=False):\n no_points = points.shape[0]\n order = np.sum(1 - np.isnan(points[:, 1:3]).astype(\"int\")) - 1\n\n x_min = np.min(points[:, 0])\n x_max = np.max(points[:, 0])\n\n # compute bounds of interpolation area\n if x_min_bound is None:\n x_min_bound = x_min\n if x_max_bound is None:\n x_max_bound = x_max\n\n # explicit formula for quadratic interpolation\n if no_points == 2 and order == 2 and plot is False:\n # Solution to quadratic interpolation is given by:\n # a = -(f1 - f2 - g1(x1 - x2))/(x1 - x2)^2\n # x_min = x1 - g1/(2a)\n # if x1 = 0, then is given by:\n # x_min = - (g1*x2^2)/(2(f2 - f1 - g1*x2))\n\n if points[0, 0] == 0:\n x_sol = (\n -points[0, 2] * points[1, 0] ** 2 / (2 * (points[1, 1] - points[0, 1] - points[0, 2] * points[1, 0]))\n )\n else:\n a = (\n -(points[0, 1] - points[1, 1] - points[0, 2] * (points[0, 0] - points[1, 0]))\n / (points[0, 0] - points[1, 0]) ** 2\n )\n x_sol = points[0, 0] - points[0, 2] / (2 * a)\n\n x_sol = np.minimum(np.maximum(x_min_bound, x_sol), x_max_bound)\n\n # explicit formula for cubic interpolation\n elif no_points == 2 and order == 3 and plot is False:\n # Solution to cubic interpolation is given by:\n # d1 = g1 + g2 - 3((f1 - f2)/(x1 - x2))\n # d2 = sqrt(d1^2 - g1*g2)\n # x_min = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2))\n d1 = points[0, 2] + points[1, 2] - 3 * ((points[0, 1] - points[1, 1]) / (points[0, 0] - points[1, 0]))\n d2 = np.sqrt(d1**2 - points[0, 2] * points[1, 2])\n if np.isreal(d2):\n x_sol = points[1, 0] - (points[1, 0] - points[0, 0]) * (\n (points[1, 2] + d2 - d1) / (points[1, 2] - points[0, 2] + 2 * d2)\n )\n x_sol = np.minimum(np.maximum(x_min_bound, x_sol), x_max_bound)\n else:\n x_sol = (x_max_bound + x_min_bound) / 2\n\n # solve linear system\n else:\n # define linear constraints\n A = np.zeros((0, order + 1))\n b = np.zeros((0, 1))\n\n # add linear constraints on function values\n for i in range(no_points):\n if not np.isnan(points[i, 1]):\n constraint = np.zeros((1, order + 1))\n for j in range(order, -1, -1):\n constraint[0, order - j] = points[i, 0] ** j\n A = np.append(A, constraint, 0)\n b = np.append(b, points[i, 1])\n\n # add linear constraints on gradient values\n for i in range(no_points):\n if not np.isnan(points[i, 2]):\n constraint = np.zeros((1, order + 1))\n for j in range(order):\n constraint[0, j] = (order - j) * points[i, 0] ** (order - j - 1)\n A = np.append(A, constraint, 0)\n b = np.append(b, points[i, 2])\n\n # check if system is solvable\n if A.shape[0] != A.shape[1] or np.linalg.matrix_rank(A) != A.shape[0]:\n x_sol = (x_min_bound + x_max_bound) / 2\n f_min = np.Inf\n else:\n # solve linear system for interpolating polynomial\n coeff = np.linalg.solve(A, b)\n\n # compute critical points\n dcoeff = np.zeros(order)\n for i in range(len(coeff) - 1):\n dcoeff[i] = coeff[i] * (order - i)\n\n crit_pts = np.array([x_min_bound, x_max_bound])\n crit_pts = np.append(crit_pts, points[:, 0])\n\n if not np.isinf(dcoeff).any():\n roots = np.roots(dcoeff)\n crit_pts = np.append(crit_pts, roots)\n\n # test critical points\n f_min = np.Inf\n x_sol = (x_min_bound + x_max_bound) / 2 # defaults to bisection\n for crit_pt in crit_pts:\n if np.isreal(crit_pt) and crit_pt >= x_min_bound and crit_pt <= x_max_bound:\n F_cp = np.polyval(coeff, crit_pt)\n if np.isreal(F_cp) and F_cp < f_min:\n x_sol = np.real(crit_pt)\n f_min = np.real(F_cp)\n\n if plot:\n plt.figure()\n x = np.arange(x_min_bound, x_max_bound, (x_max_bound - x_min_bound) / 10000)\n f = np.polyval(coeff, x)\n plt.plot(x, f)\n plt.plot(x_sol, f_min, \"x\")\n\n return x_sol", "def draw_points_and_poly_interval(key_name, X, y, reg_func_interval, degree, count):\n import matplotlib.pyplot as plt\n X_quad = t_l.generate_polynomials(X, degree)\n interval_values = [*map(lambda a:a[1], reg_func_interval.interval_values_quad_)]\n plt.figure(figsize=(16,9))\n plt.title(key_name[:-1-key_name[::-1].find('.')] + '\\'s Interval Regression')\n plt.xlabel('time')\n plt.ylabel(key_name)\n plt.grid(True)\n plt.scatter(X, y, color='red', label='Sample Point', linewidths=1)\n y_func = reg_func_interval.predict(X_quad)\n picture_y_min = np.amin(y_func)\n picture_y_max = np.amax(y_func)\n plt.plot(X, y_func, color='orange', label='degree ' + str(degree), linewidth=3)\n\n for x_interval in interval_values:\n plt.plot([x_interval,x_interval],[picture_y_min,picture_y_max], color='k', linewidth=1.5, linestyle=\"--\")\n plt.legend(loc='upper left')\n # plt.savefig('points_poly_' + str(count) + '.png', dpi=200)\n plt.show()", "def interpolating_poly(n, x, X='x', Y='y'):\n if isinstance(X, str):\n X = symbols(\"%s:%s\" % (X, n))\n\n if isinstance(Y, str):\n Y = symbols(\"%s:%s\" % (Y, n))\n\n coeffs = []\n\n for i in xrange(0, n):\n numer = []\n denom = []\n\n for j in xrange(0, n):\n if i == j:\n continue\n\n numer.append(x - X[j])\n denom.append(X[i] - X[j])\n\n numer = Mul(*numer)\n denom = Mul(*denom)\n\n coeffs.append(numer/denom)\n\n return Add(*[ coeff*y for coeff, y in zip(coeffs, Y) ])", "def eval_interp_bicubic(coeffs, x, y, shape):\n\n row = int(np.floor(y))\n col = int(np.floor(x))\n\n rows = shape[0] - 1\n cols = shape[1] - 1\n\n xval = x % 1.0\n yval = y % 1.0\n\n A = coeffs[col * rows + row, :, :]\n\n # Switch x and y because of the image coord sys\n\n xar = np.array([1.0, xval, xval ** 2, xval ** 3])\n yar = np.array([1.0, yval, yval ** 2, yval ** 3])\n\n #p = yar @ A @ xar\n p = xar @ A @ yar\n\n return p", "def interp_spline(x, xvals, yvals, nochecks=False):\n spl = CubicSpline(xvals, yvals, nochecks=nochecks)\n return spl(x)", "def get_regression(x, y=None, deg=1, **kwargs):\r\n if y is None:\r\n y = x\r\n x = range(len(x))\r\n pfit = np.poly1d(np.polyfit(x, y, deg, **kwargs))\r\n return pfit(x)", "def ridge_method_poly(x, y, L, degree):\n\n X = np.array([x]).T\n y = np.array([y]).T\n\n N = X.shape[0]\n\n if X.shape != y.shape:\n msg = \"\\n\\nArguments <x> and <y> in function <ridge_method_poly> must be of \"\n msg += f\"the same shape. \\n\\n\\tx.shape = {x.shape}\\ny.shape = {y.shape}\"\n raise Exception(msg)\n\n if len(x.shape) != 1:\n msg = \"\\n\\nArguments <x> and <y> in function <ridge_method_poly> must be \"\n msg += f\"one dimensional. \\n\\n\\tx.shape = {x.shape}\\ny.shape = {y.shape}\"\n raise Exception(msg)\n\n try:\n if np.less(L, 0).any():\n msg = \"\\n\\nArgument <L> in function <ridge_method_poly> must be a number \"\n msg += f\" or array of numbers greater than or equal to zero.\"\n raise Exception(msg)\n except ValueError:\n msg = \"\\n\\nArgument <L> in function <ridge_method_poly> must be a \"\n msg += f\"number or array of numbers.\"\n raise Exception(msg)\n\n try:\n if degree == int(degree) and degree > 0:\n degree = int(degree)\n else:\n msg = \"\\n\\nArgument <degree> in function <ridge_method_poly> must be an \"\n msg += f\"integer greater than zero. \\ndegree = {degree}\"\n raise Exception(msg)\n except ValueError:\n msg = \"\\n\\nArgument <degree> in function <ridge_method_poly> must be a \"\n msg += f\"number. \\n\\n\\ttype(degree) = {type(degree)}\"\n raise Exception(msg)\n\n X = np.hstack([np.ones_like(X), X])\n M = degree+1\n\n X = np.tile(x, reps = (M,1)).T\n A = np.zeros((N, M))\n\n for n in range(M):\n x = X[:,n]\n A[:,n] = x**n\n\n beta_ridge = np.matmul(A.T, A)\n beta_ridge = np.linalg.inv(beta_ridge + L*np.identity(beta_ridge.shape[0]))\n beta_ridge = np.matmul(beta_ridge, A.T)\n beta_ridge = np.matmul(beta_ridge, y)\n\n return beta_ridge.T[0]", "def polynomial_fit(xi, y, degree):\n z = np.polyfit(xi, y, degree)\n f = np.poly1d(z)\n return f", "def Piecewise_Linear_Interpolation_Function(x,data):\n #print(x)\n if x>data[-1][0]:\n return data[-1][1]\n for i in range(len(data)):\n #print(i,data[i][0])\n if (data[i][0]<=x and data[i+1][0]>=x):\n index=i\n break\n x1=data[index][0]\n y1=data[index][1]\n x2=data[index+1][0]\n y2=data[index+1][1]\n return y1*(x-x2)/(x1-x2)+y2*(x-x1)/(x2-x1)", "def interp(x, y, xint):\n\n # Put this in so that the function accepts integer and float single values\n if not isinstance(y, list):\n y = [y]\n if not isinstance(x, list):\n x = [x]\n\n if not min(x) <= xint <= max(x) and not any(float_is_close(xint, xval) for xval in [min(x), max(x)]):\n print x\n print xint\n raise ValueError(\"Insufficient Data\")\n\n for i, xval in enumerate(x):\n if float_is_close(xval, xint):\n yint = y[i]\n return yint\n\n for i, xp in enumerate(x):\n if xint < xp:\n p2 = (xp, y[i])\n p1 = (x[i-1], y[i-1])\n slope = (p2[1]-p1[1])/(p2[0]-p1[0])\n yint = slope*(xint-p1[0]) + p1[1]\n return yint", "def interp_pts(x_data, baseline_points=(), interp_method='linear', data=None):", "def interp(y, x, xinterp, missing=1e+20):\n import arrayfns\n import numpy.ma as MA\n import numpy as N\n from .where_close import where_close\n\n\n #- Check inputs for possible errors:\n\n if (N.rank(y) != 1) or (N.rank(x) != 1):\n raise ValueError(\"interp: Input(s) not a vector\")\n if N.rank(xinterp) > 1:\n raise ValueError(\"interp: xinterp not a vector or scalar\")\n if x[-1] <= x[0]:\n raise ValueError(\"interp: x not monotonically increasing\")\n\n\n #- Establish constants and define xint, a rank 1 version of\n # xinterp to be used for the rest of the function:\n\n if N.rank(xinterp) == 0:\n xint = N.reshape(xinterp, (1,))\n else:\n xint = xinterp\n\n num_xint = N.size(xint)\n\n\n #- Mask as missing values of xint that are outside of the range\n # of x:\n\n yint_outrange_mask = N.logical_or( N.less(xint, x[0]) \\\n , N.greater(xint, x[-1]) )\n\n\n #- Mask of elements with missing values in y, if there are any\n # missing values in y. If xint equals a value in x, missing \n # values mask for that xint is the same as the corresponding \n # value in x; and mask elements in xint which fall in an interval \n # (whose upper bound index is top_idx) where one of the endpoints \n # is missing:\n\n y_miss_mask = where_close(y, missing)\n yint_miss_mask = N.zeros(num_xint)\n\n if MA.maximum(y_miss_mask) == 1:\n\n for i in range(num_xint):\n if yint_outrange_mask[i] == 0:\n x_eq_xint = where_close(x, xint[i])\n if MA.maximum(x_eq_xint) == 1:\n yint_miss_mask[i] = y_miss_mask[N.nonzero(x_eq_xint)]\n else:\n top_idx = N.nonzero(N.greater(x, xint[i]))[0]\n yint_miss_mask[i] = y_miss_mask[top_idx] or \\\n y_miss_mask[top_idx-1]\n\n\n #- Return interpolated values, set to missing values as \n # appropriate, and making a scalar if xinterp is a scalar:\n\n yint = arrayfns.interp(y, x, xint)\n N.putmask( yint, N.logical_or(yint_miss_mask, yint_outrange_mask) \\\n , missing)\n if N.rank(xinterp) == 0: yint = yint[0]\n\n return yint", "def CloughTocher2d_interp(x, y, xref, yref, vals):\n interp = CloughTocher2d_interpolator(xref, yref, vals)\n X, Y = np.meshgrid(x,y)\n return interp((Y, X))", "def polyEval(p, x):\n\tk = len(p)-1 # last valid index\n\tif(k < 0):\n\t\treturn 0\n\ty = p[k]\n\twhile(k > 0):\n\t\tk -= 1\n\t\ty = y*x + p[k]\n\treturn y", "def fit_polynomial(xmin,xmax,x,y,deg=2):\n x_fit= (x > xmin) & (x < xmax)\n # We could make these less correlated by better choice of parameters\n poly=np.polyfit(np.log(x[x_fit]), np.log(y[x_fit]), deg=deg)\n return np.poly1d(poly)", "def interpolate(x, y, x1):\r\n\tfor item in x:\r\n\t\titem = float(item)\r\n\tfor item in y:\r\n\t\titem = float(item)\r\n\tx1 = float(x1)\r\n\t \r\n\ty1 = y[0] + (x1 - x[0]) / (x[1] - x[0]) * (y[1] - y[0])\r\n\t\r\n\treturn y1", "def interpolate(self, x):\n # Cache interpolant to avoid overhead\n if not hasattr(self, \"_interpolant\"):\n self._create_interpolant()\n return self._interpolant(x)", "def fit_poly(data, error_func, degree=4):\n\t# Generates initial guess for polynomial model (all coeffs = 1)\n\tCguess = np.poly1d(np.ones(degree + 1, dtype=np.float32))\n\n\t# Plot initial guess (optional)\n\tx = np.linspace(-5, 5, 21)\n\tplt.plot(x,\n\t\tnp.polyval(Cguess, x),\n\t\t'm--',\n\t\tlinewidth=2.0,\n\t\tlabel='Initial guess')\n\n\t# Call optimizer to minimize error function\n\tresult = opt.minimize(\n\t\tfun=error_func,\n\t\tx0=Cguess,\n\t\targs=(data,),\n\t\tmethod='SLSQP',\n\t\toptions={'disp': True})\n\treturn np.poly1d(result.x)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the weighted power spectral density matrix. It's also called covariance matrix. With the dim parameters you can change the sort of the dims of the observation and mask, but not every combination is allowed.
def get_power_spectral_density_matrix( observation, mask=None, sensor_dim=-2, source_dim=-2, time_dim=-1, normalize=True, ): # ensure negative dim indexes sensor_dim, source_dim, time_dim = ( d % observation.ndim - observation.ndim for d in (sensor_dim, source_dim, time_dim) ) # ensure observation shape (..., sensors, frames) obs_transpose = [ i for i in range(-observation.ndim, 0) if i not in [sensor_dim, time_dim] ] + [sensor_dim, time_dim] observation = observation.transpose(obs_transpose) if mask is None: psd = np.einsum('...dt,...et->...de', observation, observation.conj()) # normalize psd /= observation.shape[-1] else: # Unfortunately, this function changes `mask`. mask = np.copy(mask) # normalize if mask.dtype == np.bool: mask = np.asfarray(mask) if normalize: mask /= np.maximum( np.sum(mask, axis=time_dim, keepdims=True), 1e-10, ) if mask.ndim + 1 == observation.ndim: mask = np.expand_dims(mask, -2) psd = np.einsum( '...dt,...et->...de', mask * observation, observation.conj(), ) else: # ensure shape (..., sources, frames) mask_transpose = [ i for i in range(-observation.ndim, 0) if i not in [source_dim, time_dim] ] + [source_dim, time_dim] mask = mask.transpose(mask_transpose) psd = np.einsum( '...kt,...dt,...et->...kde', mask, observation, observation.conj() ) if source_dim < -2: # Assume PSD shape (sources, ..., sensors, sensors) is desired psd = np.rollaxis(psd, -3, source_dim % observation.ndim) return psd
[ "def compute_sigma_weights(self, dim: int) -> Tuple[torch.Tensor, torch.Tensor]:", "def compute_sigma_weights(self, dim: int) -> Tuple[torch.Tensor, torch.Tensor]:\n\n lambd = self.compute_lambda(dim=dim)\n\n # Create covariance weights\n weights_c = torch.full(\n size=(2 * dim + 1,),\n fill_value=1.0 / (2.0 * (dim + lambd)),\n dtype=torch.float32,\n )\n weights_c[0] = lambd / (dim + lambd) + (1.0 - self.alpha ** 2 + self.beta)\n\n # Mean weights should be identical, except for the first weight\n weights_m = weights_c.clone()\n weights_m[0] = lambd / (dim + lambd)\n\n return weights_c, weights_m", "def construct_weight_matrix(d, d0):\n nr, nc = d.shape\n assert nr == nc\n w = np.exp(-d / d0)\n identity = np.eye(nr, dtype=bool)\n w[identity] = 0 # Zero diagonal elements to remove self-coupling\n # w /= w.sum(axis=1) # Normalize rows to account for variation in parcel size\n return w", "def cov_matrix_deriv(self, params):\n if self.model is None:\n psd = np.exp(np.array([params[p].value for p in params])) * self.psdnorm\n\n # in this simple case, the covariance matrix is just a linear sum of each frequency term\n # so the derivative is simple - we multiply by p when we're talking about the log\n return np.stack([c * p for c, p in zip(self.cos_integral, psd)], axis=-1)\n else:\n psd_deriv = self.model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n return np.stack([np.sum([c * p for c, p in zip(self.cos_integral, psd_deriv[:, par])], axis=0) for par in range(psd_deriv.shape[-1])], axis=-1)", "def dimensionless_mass_density(self, scaled_radius, conc):\n return nfw_dimensionless_mass_density(scaled_radius, conc)", "def compute_spectral_density(elevation, sample_dt):\n elevation[np.isnan(elevation)] = 0.\n sample_dt = float(sample_dt)\n nperseg = round(SPECTRUM_WINDOW_SIZE / sample_dt)\n nfft = 2 ** (math.ceil(math.log(nperseg, 2))) # round to next higher power of 2\n return scipy.signal.welch(\n elevation, 1 / sample_dt, window='hann',\n nperseg=nperseg, nfft=nfft, noverlap=nperseg // 2,\n )", "def weight_matrix_disc(radius):\n # initialize matrix\n size = 2*radius + 1\n weights = np.zeros([size, size])\n center = radius\n r_sq = radius**2\n\n # set values in disc to 1\n for i in range(size):\n for j in range(size):\n if (i - center)**2 + (j - center)**2 <= r_sq:\n weights[i][j] = 1\n \n return weights", "def cross_cov_matrix_deriv(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n if self.cpsd_model is None:\n cpsd = np.exp(np.array(\n [params['%sln_cpsd%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])) * self.psdnorm\n else:\n cpsd = self.cpsd_model(params, self.fbins.bin_cent) * self.psdnorm\n\n # likewise for the (phase) lags\n if self.lag_model is None:\n lags = np.array([params['%slag%01d' % (self.prefix, i)].value for i in range(len(self.fbins))])\n else:\n lags = self.lag_model(params, self.fbins.bin_cent)\n\n if self.cpsd_model is None:\n cpsd_derivs = np.stack([(c * np.cos(phi) - s * np.sin(phi)) * p for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n psd_model_deriv = self.cpsd_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n cpsd_derivs = np.stack([np.sum([pd * (c * np.cos(phi) - s * np.sin(phi)) for pd, c, s, phi\n in zip(psd_model_deriv[:, par], self.cos_integral, self.sin_integral, lags)],\n axis=0) for par in range(psd_model_deriv.shape[-1])], axis=-1)\n\n if self.lag_model is None:\n lag_derivs = np.stack([-1 * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi in zip(cpsd, self.cos_integral, self.sin_integral, lags)], axis=-1)\n else:\n lag_model_deriv = self.lag_model.eval_gradient(params, self.fbins.bin_cent) * self.psdnorm\n lag_derivs = np.stack([np.sum([-1 * phid * p * (c * np.sin(phi) + s * np.cos(phi)) for p, c, s, phi, phid\n in zip(cpsd, self.cos_integral, self.sin_integral, lags, lag_model_deriv[:, par])],\n axis=0) for par in range(lag_model_deriv.shape[-1])], axis=-1)\n\n # this is the stack of (1) the derivatives w.r.t. the cross powers (multiplied by p when we're using the log)\n # and (2) the phases\n return np.concatenate([cpsd_derivs, lag_derivs], axis=-1)", "def _normalized_weights(Wk, Gk, Cm_inv_sq, reduce_rank, nn, sk):\n # np.dot Gk with Cm_inv_sq on left and right\n norm_inv = np.matmul(Gk.transpose(0, 2, 1),\n np.matmul(Cm_inv_sq[np.newaxis], Gk))\n\n # invert this using an eigenvalue decomposition\n norm = _pos_semidef_inv(norm_inv, reduce_rank)\n\n # Reapply source covariance after inversion\n norm *= sk[:, :, np.newaxis]\n norm *= sk[:, np.newaxis, :]\n power = np.matmul(norm, np.matmul(Wk, Gk)) # np.dot for each source\n\n # Determine orientation of max power\n assert power.dtype in (np.float64, np.complex128) # LCMV, DICS\n eig_vals, eig_vecs = np.linalg.eig(power)\n if not np.iscomplexobj(power) and np.iscomplexobj(eig_vecs):\n raise ValueError('The eigenspectrum of the leadfield is '\n 'complex. Consider reducing the rank of the '\n 'leadfield by using reduce_rank=True.')\n idx_max = np.argmax(eig_vals, axis=1)\n max_power_ori = eig_vecs[np.arange(eig_vecs.shape[0]), :, idx_max]\n\n # set the (otherwise arbitrary) sign to match the normal\n sign = np.sign(np.sum(max_power_ori * nn, axis=1, keepdims=True))\n sign[sign == 0] = 1\n max_power_ori *= sign\n\n # Compute the filter in the orientation of max power\n Wk_max = np.matmul(max_power_ori[:, np.newaxis], Wk)[:, 0]\n Gk_max = np.matmul(Gk, max_power_ori[:, :, np.newaxis])\n denom = np.matmul(Gk_max.transpose(0, 2, 1),\n np.matmul(Cm_inv_sq[np.newaxis], Gk_max))[:, 0]\n np.sqrt(denom, out=denom)\n Wk_max /= denom\n # All three entries get the same value from this operation\n Wk[:] = Wk_max[:, np.newaxis]", "def density_mat_fn(circuit, num_wires):\n dev_wires = range(num_wires)\n dev = qml.device(\"default.qubit\", wires=dev_wires)\n zero_state = np.array([0] * len(dev_wires))\n\n @qml.qnode(dev)\n def density_mat(wires_out, *circ_args, basis_state=zero_state, **circ_kwargs):\n qml.BasisState(basis_state, wires=dev_wires)\n circuit(*circ_args, **circ_kwargs)\n return qml.density_matrix(wires=wires_out)\n\n return density_mat", "def apply_window_covariance(zone,Cinv,setk,thin=1,withmask=True,windowkplus=0.2,kpmin=3.e-4, bisp=False, indexkred=None, masktriangle=None):\n if not bisp:\n if check_if_multipoles_k_array(setk):\n setk = setk[:len(setk)/3]\n nkin = len(setk)\n\n \n \n if Cinv.shape[0]/3 != nkin:\n print (Cinv.shape[0]/3, nkin)\n raise Exception('The setk needs to match the array of k for Cinv')\n \n #Put the inverse covariance in shape (l,k,l',k') \n Cinvllp = np.swapaxes(Cinv.reshape((3,nkin,3,nkin)),axis1=1,axis2=2)\n \n # Load window matrices\n Qll = np.load(opa.join(INPATH,'Window_functions/Qll_LightConeHector'+zone+'.npy')) \n setk_or = np.loadtxt(opa.join(INPATH,'Window_functions/kp_LightConeHector'+zone+'.txt')) \n setkp_or = np.loadtxt(opa.join(INPATH,'Window_functions/k_LightConeHector'+zone+'.dat'))\n\n Qll = Qll[:,:,::thin,:]\n Qll_old = 1.*Qll\n setkp_or = setkp_or[::thin]\n \n# if withmask:\n# kpgrid,kgrid = np.meshgrid(setkp_or,setk_or,indexing='ij')\n# mask = (kpgrid<kgrid+windowkplus)\n# Qll = np.einsum('lpkn,kn->lpkn',Qll,mask)\n \n \n # the spacing (needed to do the convolution as a sum)\n deltak = setkp_or[1:] - setkp_or[:-1]\n deltak = np.concatenate([[0],deltak])\n Qll_weighted = np.einsum('lpkn,k->lpkn',Qll,deltak)\n\n \n \n # Only keep value of setkp_or in the relevant range\n maskred = ((setkp_or>kpmin)&(setkp_or<setk.max()+windowkplus))\n #print maskred.shape, np.sum(maskred)\n kpred = setkp_or[maskred]\n \n# Qll_weighted_red = Qll_weighted[:,:,maskred,:]\n Qll_weighted_red = Qll_weighted[:,:,maskred,:]\n Qll_out = Qll_weighted_red[:,:,:,indexkred]\n \n # Put the Qll(k) on the same array as Cinv for the matrix multiplication\n# Qll_out = 1.*Qll_weighted_red\n# Qll_out = scipy.interpolate.interp1d(setk_or,Qll_weighted_red,axis=-1)(setk)\n# print maskred, indexkred\n bigW_mask = 1.*Qll_out\n# bigW_mask = bigW_mask[:,:,:,indexkred]\n\n \n nkout = len(kpred)\n\n \n # Cinv convoluted once for the P_model Cinv Pdata term\n Cinvllpw = np.einsum('likp,imnp->lmkn', Cinvllp,Qll_out)\n \n # Cinv convoluted twice for the P_model Cinv Pmodel term\n Cinvllpww = np.einsum('imnk,ilkp->mlnp', Qll_out,Cinvllpw)\n \n # Standard form for the matrices (concatenated multipoles)\n Cinvpw = np.swapaxes(Cinvllpw,axis1=1,axis2=2).reshape((3*nkin,3*nkout)) \n Cinvpww = np.swapaxes(Cinvllpww,axis1=1,axis2=2).reshape((3*nkout,3*nkout))\n if bisp:\n \n \n setk_or = np.loadtxt(opa.join(INPATH,'Window_functions/kp_LightConeHector'+zone+'.txt')) \n setkp_or = np.loadtxt(opa.join(INPATH,'Window_functions/k.dat'))\n setkp_or = setkp_or[::thin]\n nkbisp = len(masktriangle) \n# if withmask:\n# kpgrid,kgrid = np.meshgrid(setkp_or,setk_or,indexing='ij')\n# mask = (kpgrid<kgrid+windowkplus)\n\n\n # Only keep value of setkp_or in the relevant range\n \n maskred = ((setkp_or>kpmin)&(setkp_or<setk.max()+windowkplus))\n kpred = setkp_or[maskred]\n \n \n #Deltak now computed from the masked k's\n deltak = kpred[1:] - kpred[:-1]\n \n# deltak = setkp_or[1:] - setkp_or[:-1]\n deltak = np.concatenate([[0],deltak])\n\n \n # Importing the big window function without 4 indices and thinning it appropriately \n bigW = np.load(opa.join(INPATH,'Window_functions/bigW_LightConeHector'+zone+'.npy'))\n bigW_diet = np.zeros(shape=(bigW.shape[0], (bigW.shape[1]-nkbisp)/thin + nkbisp))\n for i in range(bigW_diet.shape[0]):\n bigWcol = bigW[i,:-nkbisp]\n #Applying weight from thinning process\n# print bigWcol.shape, nkbisp, len(masktriangle)\n\n# bigWcolthin = bigWcol[::thin]*np.concatenate([deltak, deltak, deltak])\n \n bigWcolthin = bigWcol[::thin]\n\n # odd number of entries\n #bigWcolthin = bigWcolthin[:-1]\n \n #Rebuilding bigW with only the non-masked points\n bigW_diet[i,:-nkbisp] = bigWcolthin\n #Keeping the bispectrum terms that aren't thinned out\n bigW_diet[i, -nkbisp:] = bigW[i, -nkbisp:]\n \n bigW = 1.*bigW_diet\n #Masking out bigW for only the observed points. Mask is for theory points and then observed points\n theorymask = np.concatenate([maskred, maskred, maskred, masktriangle])\n theorypoints = np.sum(theorymask)\n\n datamask = np.concatenate([indexkred, indexkred, indexkred, masktriangle]) \n datapoints = np.sum(datamask)\n\n matrixmask = np.outer(datamask, theorymask)\n #print datapoints, theorypoints, \n bigW_mask = bigW[matrixmask].reshape((datapoints, theorypoints))\n \n bigW_mask = bigW_mask[:]*np.concatenate([deltak, deltak, deltak, [1]*sum(masktriangle)])\n# print np.concatenate([deltak, deltak, deltak])\n \n #print bigW_mask.shape, bigW.shape, matrixmask.shape\n Cinvllp = 1.*Cinv\n Cinvpw = np.dot(Cinv, bigW_mask)\n Cinvpww = np.dot(bigW_mask.T, Cinvpw)\n \n return kpred,Cinvpw,Cinvpww", "def var_cov_matrix(df, weigths):\n\n sigma = np.cov(np.array(df).T, ddof=0)\n var = (np.array(weigths) * sigma * np.array(weigths).T).sum()\n return var", "def get_mixture_density(mass_fractions, mass_densities):\n return np.sum(mass_fractions * mass_densities)", "def broadcast_weights(weight, data_shape, chan_axis=0):\n\n\n nchan = data_shape[chan_axis]\n\n broadcast = np.ones((nchan, 1))\n return weight[np.newaxis, :] * broadcast", "def portfolio_vol(weights, covmat):\n vol= (weights.T @ covmat @ weights)**0.5\n return vol #Square root because this results the variance so to get standard deviation", "def _compute_3d_power(self, displaced, random):\n attrs = {}\n # add self.attrs\n attrs.update(self.attrs)\n\n delta_d = displaced.compute(mode='complex', Nmesh=self.attrs['Nmesh'])\n delta_s = random.compute(mode='complex', Nmesh=self.attrs['Nmesh'])\n\n delta = delta_d - delta_s\n\n c1 = delta\n c2 = delta\n\n # calculate the 3d power spectrum, slab-by-slab to save memory\n p3d = c1\n for (s0, s1, s2) in zip(p3d.slabs, c1.slabs, c2.slabs):\n s0[...] = s1 * s2.conj()\n\n for i, s0 in zip(p3d.slabs.i, p3d.slabs):\n # clear the zero mode.\n mask = True\n for i1 in i:\n mask = mask & (i1 == 0)\n s0[mask] = 0\n\n # the complex field is dimensionless; power is L^3\n # ref to http://icc.dur.ac.uk/~tt/Lectures/UA/L4/cosmology.pdf\n p3d[...] *= self.attrs['BoxSize'].prod()\n\n # get the number of objects (in a safe manner)\n #N1 = c1.attrs.get('N', 0)\n #N2 = c2.attrs.get('N', 0)\n #attrs.update({'N1':N1, 'N2':N2})\n\n # add shotnoise (nonzero only for auto-spectra)\n Pshot = 0.\n if 'shotnoise' in delta_d.attrs:\n Pshot += delta_d.attrs['shotnoise']\n if 'shotnoise' in delta_s.attrs:\n Pshot += delta_s.attrs['shotnoise']\n #if self.first is self.second:\n # if 'shotnoise' in c1.attrs:\n # Pshot = c1.attrs['shotnoise']\n attrs['shotnoise'] = Pshot\n\n\n return p3d, attrs", "def cov_matrix(self, params):\n # if no model is specified, the PSD model is just the PSD value in each frequency bin\n # note the factor of 2 to integrate over the negative frequencies too!\n if self.model is None:\n psd = np.exp(np.array([params[p].value for p in params])) * self.psdnorm\n else:\n psd = self.model(params, self.fbins.bin_cent) * self.psdnorm\n\n cov = np.sum(np.array([p * c for p, c in zip(psd, self.cos_integral)]), axis=0)\n\n return cov", "def dp_mixture(ctx: ProbCtx):\n weights, means = dp(ctx, true_alpha, dims)\n lik = loglikelihood(weights, means, training_data)\n ctx.score_log(lik)\n return weights.tolist(), means.tolist()", "def _clearskypower(y, q, tod_i, doy_i, tod_vec, doy_vec, bw_tod, bw_doy):\n from scipy.stats import vonmises\n from statsmodels.stats.weightstats import DescrStatsW\n\n wts_tod = vonmises.pdf(\n x=tod_i * 2 * np.pi / 24, kappa=bw_tod, loc=tod_vec * 2 * np.pi / 24\n )\n wts_doy = vonmises.pdf(\n x=doy_i * 2 * np.pi / 365.25, kappa=bw_doy, loc=doy_vec * 2 * np.pi / 365.25\n )\n\n wts = wts_doy * wts_tod\n wts = wts / wts.sum()\n\n csp = DescrStatsW(y, weights=wts).quantile(probs=q).values[0]\n\n return csp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates an LCMV beamforming vector.
def get_lcmv_vector(atf_vectors, response_vector, noise_psd_matrix): response_vector = np.asarray(response_vector) # TODO: If it is a list, a list of response_vectors is returned. K, F, D = atf_vectors.shape assert noise_psd_matrix.shape == (F, D, D), noise_psd_matrix.shape Phi_inverse_times_H = np.squeeze(stable_solve( np.broadcast_to(noise_psd_matrix[None, :, :, :], (K, F, D, D)), atf_vectors[:, :, :, None] # k, f, d ), axis=-1) # k, f, d assert Phi_inverse_times_H.shape == (K, F, D), Phi_inverse_times_H.shape H_times_Phi_inverse_times_H = np.einsum( 'k...d,K...d->...kK', atf_vectors.conj(), Phi_inverse_times_H ) # f, k, K response_vector = response_vector[None, :, None].astype(np.complex64) response_vector = np.repeat(response_vector, F, axis=0) temp = stable_solve( H_times_Phi_inverse_times_H, response_vector, # F, K, 1 ) # f, k beamforming_vector = np.einsum( 'k...d,...k->...d', Phi_inverse_times_H, np.squeeze(temp, axis=-1) ) return beamforming_vector
[ "def calculLwmVl(self):\r\n self.lwmVl = sommeEnergetique(self.lrwmVl,self.lmwmVl)", "def glvvec(w):\n if GLOVE_CACHE != None:\n return GLOVE_CACHE[w]\n if w in GLOVE_VOCAB:\n i = GLOVE_VOCAB.index(w)\n return GLOVE_MAT[i]\n else:\n return np.zeros(GLVVEC_LENGTH)", "def v(self):\n return self.velocity + self.dv()", "def find_mic(self, v, vectors=True):\n vecs = np.asarray(v).reshape(-1, 3)\n if any(self.pbc):\n vecs = np.einsum('ji,nj->ni', np.linalg.inv(self.cell), vecs)\n vecs[:,self.pbc] -= np.rint(vecs)[:,self.pbc]\n vecs = np.einsum('ji,nj->ni', self.cell, vecs)\n if vectors:\n return vecs.reshape(np.asarray(v).shape)\n return np.linalg.norm(vecs, axis=-1).reshape(np.asarray(v).shape[:-1])", "def velocity(self):\n if self.vmax > 0:\n mod = VelField(x_0=self.x_0,\n y_0=self.y_0,\n r_eff=self.r_eff,\n ellip=self.ellip,\n theta=self.theta,\n vmax=self.vmax,\n q=self.q)\n result = mod(self.x, self.y)\n else:\n result = np.ones(shape=self.x.shape)\n\n return result", "def v(self):\n return self.centroid_velocity_tangent / np.linalg.norm(\n self.centroid_velocity_tangent\n )", "def _calculate_lll(self, delta: float = 0.75) -> Tuple[np.ndarray, np.ndarray]:\n # Transpose the lattice matrix first so that basis vectors are columns.\n # Makes life easier.\n a = self._matrix.copy().T\n\n b = np.zeros((3, 3)) # Vectors after the Gram-Schmidt process\n u = np.zeros((3, 3)) # Gram-Schmidt coeffieicnts\n m = np.zeros(3) # These are the norm squared of each vec.\n\n b[:, 0] = a[:, 0]\n m[0] = dot(b[:, 0], b[:, 0])\n for i in range(1, 3):\n u[i, 0:i] = dot(a[:, i].T, b[:, 0:i]) / m[0:i]\n b[:, i] = a[:, i] - dot(b[:, 0:i], u[i, 0:i].T)\n m[i] = dot(b[:, i], b[:, i])\n\n k = 2\n\n mapping = np.identity(3, dtype=np.double)\n while k <= 3:\n # Size reduction.\n for i in range(k - 1, 0, -1):\n q = round(u[k - 1, i - 1])\n if q != 0:\n # Reduce the k-th basis vector.\n a[:, k - 1] = a[:, k - 1] - q * a[:, i - 1]\n mapping[:, k - 1] = mapping[:, k - 1] - q * mapping[:, i - 1]\n uu = list(u[i - 1, 0: (i - 1)])\n uu.append(1)\n # Update the GS coefficients.\n u[k - 1, 0:i] = u[k - 1, 0:i] - q * np.array(uu)\n\n # Check the Lovasz condition.\n if dot(b[:, k - 1], b[:, k - 1]) >= (\n delta - abs(u[k - 1, k - 2]) ** 2\n ) * dot(b[:, (k - 2)], b[:, (k - 2)]):\n # Increment k if the Lovasz condition holds.\n k += 1\n else:\n # If the Lovasz condition fails,\n # swap the k-th and (k-1)-th basis vector\n v = a[:, k - 1].copy()\n a[:, k - 1] = a[:, k - 2].copy()\n a[:, k - 2] = v\n\n v_m = mapping[:, k - 1].copy()\n mapping[:, k - 1] = mapping[:, k - 2].copy()\n mapping[:, k - 2] = v_m\n\n # Update the Gram-Schmidt coefficients\n for s in range(k - 1, k + 1):\n u[s - 1, 0: (s - 1)] = (\n dot(a[:, s - 1].T, b[:, 0: (s - 1)]) / m[0: (s - 1)]\n )\n b[:, s - 1] = a[:, s - 1] - dot(\n b[:, 0: (s - 1)], u[s - 1, 0: (s - 1)].T\n )\n m[s - 1] = dot(b[:, s - 1], b[:, s - 1])\n\n if k > 2:\n k -= 1\n else:\n # We have to do p/q, so do lstsq(q.T, p.T).T instead.\n p = dot(a[:, k:3].T, b[:, (k - 2): k])\n q = np.diag(m[(k - 2): k])\n result = np.linalg.lstsq(q.T, p.T, rcond=None)[0].T\n u[k:3, (k - 2): k] = result\n\n return a.T, mapping.T", "def compute_vector(word, model):\n return sum([model.wv.get_vector(x) for x in [word[i:i + 3] for i in range(len(word) - 2)]])", "def vd2lmd(vp, vs, rho):\n lam = rho * (vp ** 2 - 2 * vs ** 2)\n mu = rho * vs ** 2\n rho = rho\n return lam, mu, rho", "def alm2vlm( glm, clm=None ):\n lmax = nlm2lmax(len(glm))\n ret = np.zeros( (lmax+1)**2, dtype=np.complex )\n for l in xrange(0, lmax+1):\n ms = np.arange(1,l+1)\n ret[l*l+l] = -glm[l]\n ret[l*l+l+ms] = -glm[ms * (2*lmax+1-ms)/2 + l]\n ret[l*l+l-ms] = -(-1)**ms * np.conj( glm[ms * (2*lmax+1-ms)/2 + l] )\n\n if clm != None:\n assert( len(clm) == len(glm) )\n for l in xrange(0, lmax+1):\n ms = np.arange(1,l+1)\n ret[l*l+l] += -1.j * clm[l]\n ret[l*l+l+ms] += -1.j * clm[ms * (2*lmax+1-ms)/2 + l]\n ret[l*l+l-ms] += -(-1)**ms * 1.j * np.conj( clm[ms * (2*lmax+1-ms)/2 + l] )\n \n return ret", "def get_V_Matrix(p,M): \r\n V = np.zeros([2*M,p+1])\r\n \r\n x = np.linspace(-M,M-1,2*M)\r\n \r\n x = (x+0.5)/M\r\n\r\n for i in range(p+1):\r\n V[:,i] = np.power(x,i)\r\n \r\n return V", "def MassPow(Mvec, MLpar, z):\n\n A = MLpar['A']\n b = MLpar['b']\n L = A * np.array(Mvec)**b*u.Lsun\n return L", "def localVelTriVT(Vx,Vy,Vz,sweep):\n \n Vxl = Vx * np.cos(sweep) - Vz * np.sin(sweep);\n Vyl = Vy;\n Vzl = Vx * np.sin(sweep) + Vz * np.cos(sweep);\n \n return Vxl,Vyl,Vzl;", "def E2V(E):\r\n# for energy in mev returns velocity in m/s\r\n return sqrt(E/5.227e-6)", "def veq(self):\n return self._veq / self._velocity_factor", "def compute_vec2vec_projection(self, u, v):\n return (np.dot(u, v) / np.linalg.norm(v)) * v", "def vectorfield(t,x_,args):\n Pi = numpy.pi\n theta = x_[0]\n v = x_[1]\n g = args[0]\n b = args[1]\n L = args[2]\n m = args[3]\n\n f_ = numpy.zeros((2,))\n f_[0] = v\n f_[1] = -sin(theta)/L*g-b/(L*L)*v/m\n\n return f_", "def base_vectors(self):\n # normalize n\n n = self.direction / (self.direction**2).sum(axis=-1)\n\n # choose two vectors perpendicular to n\n # choice is arbitrary since the coil is symetric about n\n if np.abs(n[0]) == 1:\n l = np.r_[n[2], 0, -n[0]]\n else:\n l = np.r_[0, n[2], -n[1]]\n\n l /= (l**2).sum(axis=-1)\n m = np.cross(n, l)\n return n, l, m", "def V_LJ(mag_r, sp):\n V_rc = 4 * sp.eps * ((sp.sigma / sp.rc) ** 12 - (sp.sigma / sp.rc) ** 6)\n return 4 * sp.eps * ((sp.sigma / mag_r) ** 12 - (sp.sigma / mag_r) ** 6) - \\\n V_rc if mag_r < sp.rc else 0.0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Phase correction to reduce distortions due to phase inconsistencies. We need a copy first, because not all elements are touched during the multiplication. Otherwise, the vector would be modified in place.
def phase_correction(vector): # w = W.copy() # F, D = w.shape # for f in range(1, F): # w[f, :] *= np.exp(-1j*np.angle( # np.sum(w[f, :] * w[f-1, :].conj(), axis=-1, keepdims=True))) # return w vector = np.array(vector, copy=True) vector[..., 1:, :] *= np.cumprod( np.exp( 1j * np.angle( np.sum( vector[..., 1:, :].conj() * vector[..., :-1, :], axis=-1, keepdims=True ) ) ), axis=0 ) return vector
[ "def apply_phase(self, a):\n self.state[:, self.n + a] = ((self.state[:, self.n + a]\n + self.state[:, a]) % 2)", "def RestartVector(v, Q):\n m, n = Q.shape\n q0 = numpy.zeros(m)\n for i in xrange(n):\n q0 = q0 + v[i]*Q[:,i]\n \n q0Norm = Vector2Norm(q0)\n q0 = q0/q0Norm\n return q0", "def differentiate(self):\n for i in range(1, len(self.coeff)):\n self.coeff[i - 1] = i * self.coeff[i]\n del self.coeff[-1]\n if len(self.coeff) == 0:\n self.coeff.append(0)", "def __mul__(self, factor):\n\n ret = Vector(self)\n for x in range(len(ret)):\n ret[x] = ret[x] * float(factor)\n return Vector(ret[:3])", "def _multiply_compute_phase(s1, s2):\n # Compute the number of i and -i phases\n has_minus_i = StabilizerState._get_minus_i_mask(s1, s2)\n has_i = StabilizerState._get_i_mask(s1, s2)\n num_i = np.count_nonzero(has_i)\n num_minus_i = np.count_nonzero(has_minus_i)\n has_minus_phase = ((num_i - num_minus_i) % 4) / 2\n return np.logical_xor(np.logical_xor(s1[-1], s2[-1]), has_minus_phase)", "def getOriginalVec(self):\n tmp = self._vec[:] # copy list\n if len(self._vec) == 3:\n if self._matlabFlag:\n if self._vec[2] > 0:\n tmp[2] = self._vec[1] - 1\n else:\n tmp[2] = self._vec[1] + 1\n else:\n tmp[2] = self._vec[1]\n tmp[1] = self._vec[2]\n if tmp[1] == 1:\n tmp.pop(1)\n else:\n if self._matlabFlag:\n tmp[1] -= 1\n if tmp[0] == 1:\n tmp[0] = tmp.pop()\n else:\n if tmp[0] == 0:\n tmp[0] = tmp.pop()\n return tmp", "def phase_spherical_variance():\n pass", "def unit_vector(vector):\n return vector/mag(vector)", "def __mul__(self, a):\n \n if len(a) < len(self.v):\n raise Exception('Length of a is smaller than length of v!')\n elif len(a) > len(self.v):\n differenceav = (len(a) - len(self.v))\n add = np.zeros(differenceav)\n velong = np.insert(self.v,0,add).reshape(len(a),1) # add zeros at the end\n self.v = velong\n else:\n self.v = self.v\n \n gamma = ((np.linalg.norm(self.v))**2)/2\n vvtrans = self.v * np.transpose(self.v)\n H = np.identity((len(a))) - (vvtrans/gamma)\n reflection = np.dot(H,a)\n \n return(reflection)", "def normalize_cart(u):\n return u / np.sqrt(np.sum(u**2))", "def __mod__(self, p): \n if p in (7, 31, 127, 255):\n return MMVector(p, self.data[p]) >> self.shift\n elif p in (3, 15):\n v0 = self % 255\n return MMVector(p, v0)\n elif isintance(p, Integral):\n err = \"Cannot reduce MMVectorCRT object modulo %d\"\n raise ValueError(err % p)\n else:\n err = \"Modulus for reducing MMVectorCRT object must be int\"\n raise TypeError(err)", "def LU_inplace(A):\n m = A.shape[0]\n for k in range(m-1):\n A[k+1:,k] /= A[k,k]\n A[k+1:,k+1:] -= np.outer(A[k+1:,k], A[k,k+1:])\n return A", "def remove_piston(self):\n self.phase -= mean(self.phase)\n return self", "def _mul_ct_sk(self, encrypted):\n phase = encrypted[0]\n\n secret_key_array = self._get_sufficient_sk_power(len(encrypted))\n\n for j in range(1, len(encrypted)):\n for i in range(len(self._coeff_modulus)):\n phase[i] = poly_add_mod(\n poly_mul_mod(\n encrypted[j][i], secret_key_array[j - 1][i], self._coeff_modulus[i]\n ),\n phase[i],\n self._coeff_modulus[i],\n )\n\n return phase", "def scale_vec(vector, initial_space):\n vec_in = np.copy(vector)\n vec_out = (vec_in - initial_space[:, 0]) * 2 / np.diff(initial_space).squeeze() - 1\n\n return vec_out", "def zero_phase_v_q(self):\n M = np.eye(2)\n M[0,1] = -self.phase\n return self.v_q.dot(M)", "def test_flat_invertible_phase_space(self):\n \n E_cm = 5000.0\n \n # Try to run the above for a 2->8.\n my_PS_generator = PS.FlatInvertiblePhasespace(\n [0.]*2, [100. + 10.*i for i in range(8)],\n beam_Es =(E_cm/2., E_cm/2.), beam_types=(0, 0) )\n # Try to run the above for a 2->1. \n # my_PS_generator = FlatInvertiblePhasespace([0.]*2, [5000.0])\n \n random_variables = [random.random() for _ in range(my_PS_generator.nDimPhaseSpace())]\n\n# import time\n# start = time.time()\n# n_loops = 1\n# for _ in range(n_loops):\n momenta, wgt = my_PS_generator.generateKinematics(E_cm, random_variables)\n# end = time.time()\n# misc.sprint('Time per call',(end-start)/float(n_loops))\n #print \"\\n =========================\"\n #print \" || PS generation ||\"\n #print \" =========================\" \n #print \"\\nRandom variables :\\n\",random_variables\n #print \"\\n%s\\n\"%momenta.__str__(n_initial=my_PS_generator.n_initial)\n #print \"Phase-space weight : %.16e\\n\"%wgt,\n \n variables_reconstructed, wgt_reconstructed = \\\n my_PS_generator.invertKinematics(E_cm, momenta)\n\n #print \"\\n =========================\"\n #print \" || Kinematic inversion ||\"\n #print \" =========================\"\n #print \"\\nReconstructed random variables :\\n\",variables_reconstructed\n differences = [abs(variables_reconstructed[i]-random_variables[i]) \n for i in range(len(variables_reconstructed))]\n\n self.assertLess(max(differences[i]/random_variables[i] for i in range(len(differences))), 1.0e-10)\n self.assertLess(abs(wgt-wgt_reconstructed)/abs(wgt), 1.0e-10)\n \n #print \"Reconstructed weight = %.16e\"%wgt_reconstructed\n #if differences:\n # print \"\\nMax. relative diff. in reconstructed variables = %.3e\"%\\\n # max(differences[i]/random_variables[i] for i in range(len(differences)))\n #print \"Rel. diff. in PS weight = %.3e\\n\"%((wgt_reconstructed-wgt)/wgt)", "def phase_unwrap(arg):\n eps = np.pi\n threshold = np.pi*2 - eps\n phase = np.ndarray((len(arg)))\n wrap = 0\n phase[0] = arg[0]\n for j in range(1, len(arg)):\n if arg[j] - arg[j-1] > threshold:\n wrap -= 1\n elif arg[j] - arg[j-1] < -threshold:\n wrap += 1\n phase[j] = arg[j] + np.pi * 2 * wrap\n\n # Remove the linear phase offset\n n = len(phase)\n phase -= np.arange(n) * (phase[-1]-phase[0]) / (n-1) + phase[0]\n return phase", "def boxify(phase):\n return np.sign(phase[:, 1] - phase[:, 1].mean())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies a beamforming vector such that the sensor dimension disappears. Although this function may seem simple, it turned out that using it reduced implementation errors in practice quite a bit.
def apply_beamforming_vector(vector, mix): assert vector.shape[-1] < 30, (vector.shape, mix.shape) return np.einsum('...a,...at->...t', vector.conj(), mix)
[ "def action_scaling_vecs(self):\n vel_vec = np.arange(1, self.specs['velocity_limits'][1] + 1, 1)\n\n acc_pos_vec = self.calc_acceleration_from_power(\n vel_vec, self.specs['power_limits'][1])\n acc_neg_vec = self.calc_acceleration_from_power(\n vel_vec, self.specs['power_limits'][0])\n acc_0_vec = self.calc_acceleration_from_power(vel_vec, 0)\n\n acc_pos_vec = np.min([\n acc_pos_vec,\n np.ones(len(acc_pos_vec)) * self.specs['acceleration_limits'][1]\n ],\n axis=0)\n acc_neg_vec = np.max([\n acc_neg_vec,\n np.ones(len(acc_neg_vec)) * self.specs['acceleration_limits'][0]\n ],\n axis=0)\n\n # TODO: Find better solution :)\n # This is kind of a workaround. Roman got the values for 0 from the\n # data, which seems difficult to implement here. So the added 1.0 in\n # acc_pos_vec is handcrafted.\n self.vel_vec = np.append(0, vel_vec)\n self.acc_pos_vec = np.append(1.0, acc_pos_vec)\n self.acc_neg_vec = np.append(0.0, acc_neg_vec)\n self.acc_0_vec = np.append(0.0, acc_0_vec)", "def fix_axis(self, axis, value):\n # Pre-evaluate the fixed axis, adjusting b\n b = self.b[:] - self.A[:, axis] * value\n # Remove that axis from a\n A = numpy.delete(self.A, axis, 1)\n fixed_values = self.fixed_values[:]\n fixed_values[axis] = value\n return QEF(A, b, fixed_values)", "def vector_dim_reduction(target_vector, source_vectors, rows_used):\n\n remove_rows = [x for x in range(\n len(target_vector)) if x not in rows_used]\n\n target_vector = np.delete(target_vector, remove_rows, axis=0)\n source_vectors = np.delete(source_vectors, remove_rows, axis=1)\n\n return target_vector, source_vectors", "def project(v: np.ndarray, w: np.ndarray) -> np.ndarray:\n return np.dot(v, w) * (w / np.linalg.norm(w))", "def _unweight(self):\n matrix = self.matrix.copy()\n matrix[:, :len(self.xdef)] = (matrix[:, :len(self.xdef)] /\n matrix[:, [-1]])\n return matrix", "def antiVectorize(vec, m):\r\n # Correct:\r\n M = np.zeros((m,m))\r\n M[np.tril_indices(m,k=-1)] = vec\r\n M= M.transpose()\r\n M[np.tril_indices(m,k=-1)] = vec\r\n return M", "def scale_vec(vector, initial_space):\n vec_in = np.copy(vector)\n vec_out = (vec_in - initial_space[:, 0]) * 2 / np.diff(initial_space).squeeze() - 1\n\n return vec_out", "def zero(self):\n v = np.zeros(self.get_dimension())\n self.set_vector(v)", "def _remove_dilations(self):\n\n input_shape = tf_shape(self.input)\n in_spatial_shape = input_shape[1:self.spatial_size + 1]\n\n channels_count = input_shape[self.spatial_size + 1]\n # Initialize gather_ind with the range of channels\n # e.g. [0 1]\n gather_ind = tf.range(channels_count, dtype=tf.int64)\n # convert the vector to column vector\n # in the following logic we use column vectors\n gather_ind = tf.expand_dims(gather_ind, 1)\n\n # initilize the output_shape with zeros\n # self.output_shape will contain the shape of the\n # output tensor after the loop below is executed\n self.output_shape = [0] * (self.spatial_size + 2)\n self.output_shape[0] = input_shape[0]\n \"\"\"\n Loop over the input spatial dimensions starting from the\n last (most internal) going up to the first dimension\n\n On every step of the loop calculate the output indices and\n map them to the input indices using `_calc_input_ind`,\n then \"combine\" with the already calculated indices from the\n previous dimensions using cartesian product.\n\n For the following example input:\n\n Input: [[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [ 12, 13, 14, 15]]\n\n Kernel: [2, 2]\n Dilations: [2, 2]\n Strides: [1, 1]\n\n these are the steps that will be executed:\n\n 1. Initilize gather_ind = [[0]] # we have only 1 channel\n\n 2. Loop step 0 (axis 1):\n filter_size = 3\n output_size = 4\n dim_ind = [[0]\n [2]\n [1]\n [3]]\n\n gather_ind = [[0 0]\n [2 0]\n [1 0]\n [3 0]]\n\n 3. Loop step 1 (axis 0):\n filter_size = 3\n output_size = 4\n dim_ind = [[0]\n [2]\n [1]\n [3]]\n\n gather_ind = [[0 0 0]\n [0 2 0]\n [0 1 0]\n [0 3 0]\n [2 0 0]\n [2 2 0]\n [2 1 0]\n [2 3 0]\n [1 0 0]\n [1 2 0]\n [1 1 0]\n [1 3 0]\n [3 0 0]\n [3 2 0]\n [3 1 0]\n [3 3 0]]\n\n These are the indices used for gather_nd operation to collect\n the values from the input data.\n \"\"\"\n\n for dim in range(self.spatial_size - 1, -1, -1):\n filter_size = (self.kernel_shape[dim] - 1) * \\\n self.dilations[dim] + 1\n output_size = ((\n (in_spatial_shape[dim] - filter_size) // self.strides[dim]) + 1\n ) * self.kernel_shape[dim]\n self.output_shape[dim + 1] = output_size\n\n # initialize the output dimension index with the range of the\n # dimension output size (e.g. 4): [0, 1, 2, 3]\n dim_ind = tf.range(output_size)\n\n # calculate the matching indices in the input data\n # [0, 1, 2, 3] will calculate to [0, 2, 1, 3]\n # from the above example\n dim_ind = self._calc_input_ind(dim_ind, self.kernel_shape[dim],\n self.dilations[dim], self.strides[dim])\n # convert to column vector\n dim_ind = tf.expand_dims(dim_ind, 1)\n\n # \"combine\" current dimension indices with the previous dimensions\n # using cartesian product\n gather_ind = tf_product(dim_ind, gather_ind)\n\n # The result from the above loop for 2D data will be:\n # [[y1, x1, c], [y2, x2, c], ..., [yn, xm, c]] where n is the height,\n # m is the width and c is the channel number.\n\n # set the channels count in the output_shape\n self.output_shape[self.spatial_size + 1] = channels_count\n\n # expand the dimensions to match the input dimensions + 1\n for x in range(self.spatial_size):\n gather_ind = tf.expand_dims(gather_ind, 0)\n # dublicate the indices for every batch\n gather_ind = tf.tile(gather_ind,\n [input_shape[0]] + [1] * (self.spatial_size + 1))\n\n # extract the selected values from the input\n output = tf.gather_nd(self.input, gather_ind, batch_dims=1)\n # reshape the output to the correct shape calculated earlier\n output = tf.reshape(output, self.output_shape)\n\n return output", "def beam2D(coord, params):\n vec = coord[1, :] - coord[0, :]\n nx = vec[0]/np.linalg.norm(vec)\n ny = vec[1]/np.linalg.norm(vec)\n L = np.linalg.norm(vec)\n Q = np.array([\n [nx, -ny, 0, 0, 0, 0],\n [ny, nx, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0],\n [0, 0, 0, nx, -ny, 0],\n [0, 0, 0, ny, nx, 0],\n [0, 0, 0, 0, 0, 1]])\n young, area_moment, area = params[:3]\n bending_stiff = area_moment * young\n ratio = area/area_moment\n stiff_mat = bending_stiff/L**3 * np.array([\n [ratio*L**2, 0, 0, -ratio*L**2, 0, 0],\n [0, 12, 6*L, 0, -12, 6*L],\n [0, 6*L, 4*L**2, 0, -6*L, 2*L**2],\n [-ratio*L**2, 0, 0, ratio*L**2, 0, 0],\n [0, -12, -6*L, 0, 12, -6*L],\n [0, 6*L, 2*L**2, 0, -6*L, 4*L**2]])\n \n if len(params) == 3:\n dens = 1.0\n else:\n dens = params[3:]\n mass = area * L * dens\n mass_mat = mass/420*np.array([\n [140, 0, 0, 70, 0, 0],\n [0, 156, 22*L, 0, 54, -13*L],\n [0, 22*L, 4*L**2, 0, 13*L, -3*L**2],\n [70, 0, 0, 140, 0, 0],\n [0, 54, 13*L, 0, 156, -22*L],\n [0, -13*L, -3*L**2, 0, -22*L, 4*L**2]])\n stiff_mat = Q.T @ stiff_mat @ Q\n mass_mat = Q.T @ mass_mat @ Q\n return stiff_mat, mass_mat", "def TransformVector(self, *args) -> \"itkVariableLengthVectorD\":\n return _itkCompositeTransformPython.itkCompositeTransformD2_TransformVector(self, *args)", "def _aligned_vector(vector, vector_ref):\n if dot_vectors(vector, vector_ref) < 0.0:\n return scale_vector(vector, -1.0)\n return vector", "def _transform(self, vector, word):\n for w in word:\n vector = np.dot(vector, self._reflections[w])\n return vector", "def unit_vector(vector):\n return vector/mag(vector)", "def unit_vector(v):\n return v / la.norm(v)", "def __filterVelocity(self):\n pass\n # windowedVelocity = dict()\n # maxTimestamp = max(self.angularVelocityDict.keys())\n # for t in self.angularVelocityDict:\n # index = int(t/step)\n # if index not in windowedVelocity:\n # windowedVelocity[index] = list()\n # windowedVelocity[index].append(self.angularVelocityDict[t])\n # self.angularVelocityWindow = (step, list())\n # angVel = self.angularVelocityWindow[1]\n # for index in windowedVelocity:\n # angVel.append(\n # sum(windowedVelocity[index])/len(windowedVelocity[index]))", "def reflect(self, beam):\n\n # initialize phase contributions\n high_order = np.zeros_like(beam.x)\n quadratic = 0\n linear = 0\n\n # initialize some other arrays\n zi = np.zeros_like(beam.x)\n yi = np.zeros_like(beam.x)\n zi_1d = np.zeros(0)\n yi_1d = np.zeros(0)\n k_ix = 0\n k_iy = 0\n k_iz = 0\n cz = 0\n cy = 0\n\n # actual angle of incidence\n self.total_alpha = self.alpha + self.delta\n\n shapeError2 = np.zeros_like(beam.x)\n\n # check distance to beam focus\n self.projectWidth = np.abs(self.length * (self.alpha + self.delta))\n\n # figure out outgoing k-vector based on incident beam and mirror orientation\n if self.orientation == 0:\n\n # small change to total angle of incidence\n self.total_alpha += -beam.ax\n\n k_ix = -np.sin(self.alpha - beam.ax)\n k_iy = np.sin(beam.ay)\n k_iz = np.sqrt(1 - k_ix ** 2 - k_iy ** 2)\n\n # coordinate mapping for interpolation\n zi = beam.x / np.sin(self.total_alpha)\n zi_1d = zi\n cz = beam.cx / np.sin(self.total_alpha)\n print(cz*1e6)\n yi = beam.y\n yi_1d = yi\n cy = beam.cy\n # beam radius across grating (grating can be long enough that the additional correction is needed\n zEff = beam.zx + (zi_1d - cz) * np.cos(self.total_alpha)\n alphaBeam = -beam.ax - np.arctan((zi_1d - cz) * np.sin(self.total_alpha) / zEff)\n beamz = beam.zx\n\n elif self.orientation == 1:\n\n # small change to total angle of incidence\n self.total_alpha += -beam.ay\n\n k_ix = -np.sin(self.alpha - beam.ay)\n k_iy = -np.sin(beam.ax)\n k_iz = np.sqrt(1 - k_ix ** 2 - k_iy ** 2)\n\n # coordinate mapping for interpolation\n zi = beam.y / np.sin(self.total_alpha)\n zi_1d = zi\n cz = beam.cy / np.sin(self.total_alpha)\n yi = -beam.x\n yi_1d = yi\n cy = -beam.cx\n\n # beam radius across grating (grating can be long enough that the additional correction is needed\n zEff = beam.zy + (zi_1d - cz) * np.cos(self.total_alpha)\n alphaBeam = -beam.ay - np.arctan((zi_1d - cz) * np.sin(self.total_alpha) / zEff)\n beamz = beam.zy\n\n elif self.orientation == 2:\n\n # small change to total angle of incidence\n self.total_alpha += beam.ax\n\n k_ix = -np.sin(self.alpha + beam.ax)\n k_iy = -np.sin(beam.ay)\n k_iz = np.sqrt(1 - k_ix ** 2 - k_iy ** 2)\n\n # coordinate mapping for interpolation\n zi = -beam.x / np.sin(self.total_alpha)\n zi_1d = zi\n cz = -beam.cx / np.sin(self.total_alpha)\n yi = -beam.y\n yi_1d = yi\n cy = -beam.cy\n\n # beam radius across grating (grating can be long enough that the additional correction is needed\n zEff = beam.zx + (zi_1d - cz) * np.cos(self.total_alpha)\n alphaBeam = beam.ax - np.arctan((zi_1d - cz) * np.sin(self.total_alpha) / zEff)\n beamz = beam.zx\n\n elif self.orientation == 3:\n\n # small change to total angle of incidence\n self.total_alpha += beam.ay\n\n k_ix = -np.sin(self.alpha + beam.ay)\n k_iy = beam.ax\n k_iz = np.sqrt(1 - k_ix ** 2 - k_iy ** 2)\n\n # coordinate mapping for interpolation\n zi = -beam.y / np.sin(self.total_alpha)\n zi_1d = zi\n cz = -beam.cy / np.sin(self.total_alpha)\n yi = beam.x\n yi_1d = yi\n cy = beam.cx\n\n # beam radius across grating (grating can be long enough that the additional correction is needed\n zEff = beam.zy + (zi_1d - cz) * np.cos(self.total_alpha)\n\n alphaBeam = beam.ay - np.arctan((zi_1d - cz) * np.sin(self.total_alpha) / zEff)\n beamz = beam.zy\n\n k_i = np.array([k_ix, k_iy, k_iz])\n delta_k = self.rotation(k_i)\n\n # mirror shape error interpolation onto beam coordinates (if applicable)\n if self.shapeError is not None:\n # get shape of shape error input\n mirror_shape = np.shape(self.shapeError)\n\n # assume this is the central line shaper error along the long axis if only 1D\n if np.size(mirror_shape) == 1:\n # assume this is the central line and it's the same across the mirror width\n Ms = mirror_shape[0]\n # mirror coordinates (beam coordinates)\n max_zs = self.length / 2\n # mirror coordinates\n zs = np.linspace(-Ms / 2, Ms / 2 - 1, Ms) * max_zs / (Ms / 2 - 1)\n # 1D interpolation onto beam coordinates\n shapeError2 = np.interp(zi_1d - self.dx / np.tan(self.total_alpha), zs, self.shapeError)\n\n # if 2D, assume index 0 corresponds to short axis, index 1 to long axis\n else:\n # shape error array shape\n Ns = mirror_shape[0]\n Ms = mirror_shape[1]\n # mirror coordinates\n max_xs = self.length / 2\n # mirror coordinates\n zs = np.linspace(-Ms / 2, Ms / 2 - 1, Ms) * max_xs / (Ms / 2 - 1)\n\n # 1D interpolation onto beam coordinates (just take central line)\n shapeError2 = np.interp(zi_1d - self.dx / np.tan(self.total_alpha), zs, self.shapeError[int(Ns/2), :])\n\n # figure out aperturing due to mirror's finite size\n z_mask = (np.abs(zi - self.dx / np.tan(self.total_alpha)) < self.length / 2).astype(float)\n\n # calculate effect of ellipse misalignment\n p_misalign = self.calc_misalignment(beam, cz)\n\n # apply benders\n bend_coeff = self.bend(cz)\n\n # sum up coefficients from misalignment and bending\n coeff_total = Util.combine_coeff(p_misalign, bend_coeff)\n\n # offset along mirror z-axis\n offset = cz - self.dx / np.tan(self.total_alpha)\n\n # get coefficients centered about beam center instead of mirror center\n p_recentered = Util.recenter_coeff(coeff_total, offset)\n\n # get polynomial order\n M_poly = np.size(coeff_total) - 1\n\n # calculate contributions to high order error\n total_error = shapeError2 * 1e-9 + Util.polyval_high_order(p_recentered, zi - cz)\n\n # calculate effect on high order phase for glancing incidence mirror\n phase = -total_error * 4 * np.pi * np.sin(self.total_alpha) / beam.lambda0\n\n # add phase to high_order\n high_order += phase\n\n # scaling between mirror z-axis and new beam coordinates\n scale = np.sin(self.total_alpha)\n\n # scale the offset\n offset_scaled = offset * scale\n\n # for low orders change coordinates into reflected coordinates\n p_scaled = Util.poly_change_coords(coeff_total, scale)\n\n # multiply by -2 sin(alpha) to get total path length change\n p_scaled *= -2 * np.sin(self.total_alpha)\n\n # Add normal 2nd order phase to p_scaled\n # p_scaled[-3] += (-1 / (2 * (self.p + cz*np.cos(self.total_alpha)))\n # - 1 / (2 * (self.q - cz * np.cos(self.total_alpha))))\n # the difference between p and beamz is already accounted for in the \"calc_misalignment\" method now,\n # so the beam radius of curvature should be completely removed here. For the cases considered so far this\n # gave identical results to previously.\n p_scaled[-3] += (-1 / (2 * (beamz))\n - 1 / (2 * (self.q - cz * np.cos(self.total_alpha))))\n\n\n # account for decentering\n p_scaled = Util.recenter_coeff(p_scaled, offset_scaled)\n\n # now add in normal focusing contribution to phase\n # factor out pi/lambda for quadratic term (so equal to 1/z)\n quadratic += 2 * p_scaled[-3]\n\n # factor out 2pi/lambda for linear term (so equal to change in propagation angle)\n linear += p_scaled[-2]\n\n # now change outgoing beam k-vector based on mirror orientation, and apply quadratic phase\n if self.orientation == 0:\n\n # modify beam's wave attribute by mirror aperture and phase error\n beam.wavex *= z_mask * np.exp(1j * high_order)\n\n # take into account mirror reflection causing beam to invert\n beam.x *= -1\n\n # adjust beam direction relative to properly aligned axis\n beam.rotate_nominal(delta_azimuth=2 * self.alpha)\n delta_ax = -2 * beam.ax + np.arcsin(delta_k[0] / np.cos(self.alpha)) - linear\n # delta_ax = -2*beam.ax + np.arcsin(delta_k[0])\n delta_ay = np.arcsin(delta_k[1])\n beam.rotate_beam(delta_ax=delta_ax, delta_ay=delta_ay)\n\n # adjust beam direction relative to properly aligned axis\n # beam.ax = -beam.ax + np.arcsin(delta_k[0] / np.cos(self.alpha)) - linear\n # beam.ay += np.arcsin(delta_k[1])\n\n # adjust beam quadratic phase\n # beam.zx = 1 / (1 / beam.zx + quadratic)\n new_zx = 1 / (1 / beam.zx + quadratic)\n beam.change_z(new_zx=new_zx)\n\n # adjust beam position due to mirror de-centering\n delta_cx = 2 * self.dx * np.cos(self.total_alpha)\n beam.cx = -beam.cx + delta_cx\n beam.x = beam.x + delta_cx\n\n elif self.orientation == 1:\n\n # modify beam's wave attribute by mirror aperture and phase error\n beam.wavey *= z_mask * np.exp(1j * high_order)\n\n # take into account mirror reflection causing beam to invert\n beam.y *= -1\n\n # adjust beam direction relative to properly aligned axis\n beam.rotate_nominal(delta_elevation=2 * self.alpha)\n delta_ay = -2 * beam.ay + np.arcsin(delta_k[0] / np.cos(self.alpha)) - linear\n # delta_ax = -2*beam.ax + np.arcsin(delta_k[0])\n delta_ax = -np.arcsin(delta_k[1])\n beam.rotate_beam(delta_ax=delta_ax, delta_ay=delta_ay)\n\n # adjust beam direction relative to properly aligned axis\n # beam.ax += -np.arcsin(delta_k[1])\n # beam.ay = -beam.ay + np.arcsin(delta_k[0] / np.cos(self.alpha)) - linear\n\n # adjust beam quadratic phase\n # beam.zy = 1 / (1 / beam.zy + quadratic)\n new_zy = 1 / (1 / beam.zy + quadratic)\n beam.change_z(new_zy=new_zy)\n\n # adjust beam position due to mirror de-centering\n delta_cy = 2 * self.dx * np.cos(self.total_alpha)\n beam.cy = -beam.cy + delta_cy\n beam.y = beam.y + delta_cy\n\n elif self.orientation == 2:\n\n # modify beam's wave attribute by mirror aperture and phase error\n beam.wavex *= z_mask * np.exp(1j * high_order)\n\n # take into account mirror reflection causing beam to invert\n beam.x *= -1\n\n # adjust beam direction relative to properly aligned axis\n beam.rotate_nominal(delta_azimuth=-2 * self.alpha)\n delta_ax = -2 * beam.ax - np.arcsin(delta_k[0] / np.cos(self.alpha)) + linear\n # delta_ax = -2*beam.ax + np.arcsin(delta_k[0])\n delta_ay = -np.arcsin(delta_k[1])\n beam.rotate_beam(delta_ax=delta_ax, delta_ay=delta_ay)\n\n # adjust beam direction relative to properly aligned axis\n # beam.ax = -beam.ax - np.arcsin(delta_k[0] / np.cos(self.alpha)) + linear\n # beam.ay += -np.arcsin(delta_k[1])\n\n # adjust beam quadratic phase\n # beam.zx = 1 / (1 / beam.zx + quadratic)\n new_zx = 1 / (1 / beam.zx + quadratic)\n beam.change_z(new_zx=new_zx)\n\n # adjust beam position due to mirror de-centering\n delta_cx = -2 * self.dx * np.cos(self.total_alpha)\n beam.cx = -beam.cx + delta_cx\n beam.x = beam.x + delta_cx\n\n elif self.orientation == 3:\n\n # modify beam's wave attribute by mirror aperture and phase error\n beam.wavey *= z_mask * np.exp(1j * high_order)\n\n # take into account mirror reflection causing beam to invert\n beam.y *= -1\n\n # adjust beam direction relative to properly aligned axis\n beam.rotate_nominal(delta_elevation=-2 * self.alpha)\n delta_ay = -2 * beam.ay - np.arcsin(delta_k[0] / np.cos(self.alpha)) + linear\n # delta_ax = -2*beam.ax + np.arcsin(delta_k[0])\n delta_ax = np.arcsin(delta_k[1])\n beam.rotate_beam(delta_ax=delta_ax, delta_ay=delta_ay)\n\n # adjust beam direction relative to properly aligned axis\n # beam.ax += np.arcsin(delta_k[1])\n # beam.ay = -beam.ay - np.arcsin(delta_k[0] / np.cos(self.alpha)) + linear\n\n # adjust beam quadratic phase\n # beam.zy = 1 / (1 / beam.zy + quadratic)\n new_zy = 1 / (1 / beam.zy + quadratic)\n beam.change_z(new_zy=new_zy)\n\n # adjust beam position due to mirror de-centering\n delta_cy = -2 * self.dx * np.cos(self.total_alpha)\n beam.cy = -beam.cy + delta_cy\n beam.y = beam.y + delta_cy\n\n # plt.figure()\n # plt.plot(np.abs(beam.wavex))\n # plt.figure()\n # plt.plot(np.angle(beam.wavex))\n\n return", "def _normalize_fixed_pos_vel_data(x_inp, normed_min, normed_max, shape, scaling_factor=1.0):\n x_min = 0.344*scaling_factor\n y_min = -0.256*scaling_factor\n z_min = -0.149*scaling_factor\n x_max = 0.856*scaling_factor\n y_max = 0.256*scaling_factor\n z_max = -0.0307*scaling_factor\n\n if len(x_inp.get_shape()) == 2:\n x = (x_inp[:, 0] - x_min) / (x_max - x_min)\n y = (x_inp[:, 1] - y_min) / (y_max - y_min)\n z = (x_inp[:, 2] - z_min) / (z_max - z_min)\n x_normed = tf.stack([x, y, z], axis=1)\n elif len(x_inp.get_shape()) == 3:\n x = (x_inp[:, :, 0] - x_min) / (x_max - x_min)\n y = (x_inp[:, :, 1] - y_min) / (y_max - y_min)\n z = (x_inp[:, :, 2] - z_min) / (z_max - z_min)\n x_normed = tf.stack([x, y, z], axis=2)\n else:\n raise ValueError(\"parsing dataset failed because position samples have unexpected shapes\")\n\n normed_min_tensor = tf.fill(shape, tf.cast(normed_min, x_normed.dtype))\n normed_max_tensor = tf.fill(shape, tf.cast(normed_max, x_normed.dtype))\n\n x_normed = x_normed * (normed_max_tensor - normed_min_tensor) + normed_min_tensor\n\n return x_normed", "def normalise(vect):\n return vect / np.sum(vect)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the MVDR beamforming vector described in [Souden2010MVDR]. The implementation is based on the description of [Erdogan2016MVDR]. The ref_channel is selected based of an SNR estimate. The eps ensures that the SNR estimation for the ref_channel works as long target_psd_matrix and noise_psd_matrix do not contain inf or nan. Also zero matrices work. The default eps is the smallest non zero value.
def get_mvdr_vector_souden( target_psd_matrix, noise_psd_matrix, ref_channel=None, eps=None, return_ref_channel=False ): assert noise_psd_matrix is not None phi = stable_solve(noise_psd_matrix, target_psd_matrix) lambda_ = np.trace(phi, axis1=-1, axis2=-2)[..., None, None] if eps is None: eps = np.finfo(lambda_.dtype).tiny mat = phi / np.maximum(lambda_.real, eps) if ref_channel is None: ref_channel = get_optimal_reference_channel( mat, target_psd_matrix, noise_psd_matrix, eps=eps) assert np.isscalar(ref_channel), ref_channel beamformer = mat[..., ref_channel] if return_ref_channel: return beamformer, ref_channel else: return beamformer
[ "def get_lcmv_vector_souden(\n target_psd_matrix,\n interference_psd_matrix,\n noise_psd_matrix,\n ref_channel=None,\n eps=None,\n return_ref_channel=False\n):\n raise NotImplementedError(\n 'This is not yet thoroughly tested. It also misses the response vector,'\n 'thus it is unclear, how to select, which speaker to attend to.'\n )\n phi_in = stable_solve(noise_psd_matrix, interference_psd_matrix)\n phi_xn = stable_solve(noise_psd_matrix, target_psd_matrix)\n\n D = phi_in.shape[-1]\n\n # Equation 5, 6\n gamma_in = np.trace(phi_in, axis1=-1, axis2=-2)[..., None, None]\n gamma_xn = np.trace(phi_xn, axis1=-1, axis2=-2)[..., None, None]\n\n # Can be written in a single einsum call, here separate for clarity\n # Equation 11\n gamma = gamma_in * gamma_xn - np.trace(\n np.einsum('...ab,...bc->...ac', phi_in, phi_xn)\n )[..., None, None]\n # Possibly:\n # gamma = gamma_in * gamma_xn - np.einsum('...ab,...ba->...', phi_in, phi_xn)\n\n eye = np.eye(D)[(phi_in.ndim - 2) * [None] + [...]]\n\n # TODO: Should be determined automatically (per speaker)?\n ref_channel = 0\n\n # Equation 51, first fraction\n if eps is None:\n eps = np.finfo(gamma.dtype).tiny\n mat = gamma_in * eye - phi_in / np.maximum(gamma.real, eps)\n\n # Equation 51\n # Faster, when we select the ref_channel before matrix multiplication.\n beamformer = np.einsum('...ab,...bc->...ac', mat, phi_xn)[..., ref_channel]\n # beamformer = np.einsum('...ab,...b->...a', mat, phi_xn[..., ref_channel])\n\n if return_ref_channel:\n return beamformer, ref_channel\n else:\n return beamformer", "def getRMSD(self):\n\n ensemble = self._ensemble\n indices = ensemble._indices\n if indices is None:\n return getRMSD(ensemble._coords,\n ensemble._confs[self._index],\n self.getWeights())[0]\n else:\n return getRMSD(ensemble._coords[indices],\n ensemble._confs[self._index, indices],\n self.getWeights())[0]", "def getRMSD(self):\n\n ensemble = self._ensemble\n index = self._index\n indices = ensemble._indices\n if indices is None:\n return getRMSD(ensemble._coords,\n ensemble._confs[index],\n ensemble._weights[index])\n else:\n return getRMSD(ensemble._coords[indices],\n ensemble._confs[index, indices],\n ensemble._weights[index, indices])", "def vd2kmd(vp, vs, rho):\n lam = rho * (vp ** 2 - 4/3 * vs ** 2)\n mu = rho * vs ** 2\n rho = rho\n return lam, mu, rho", "def kmd2vd(k, mu, rho):\n vp = np.sqrt((k + 4/3 * mu)/ rho)\n vs = np.sqrt(mu / rho)\n rho = rho\n return vp, vs, rho", "def earthVel(mjd):\n\n # All angular values are in radians.\n # g = mean anomaly\n # L = mean longitude, corrected for aberration\n # elong = ecliptic longitude\n # radius = distance to Sun in AU\n # eps = obliquity of ecliptic\n\n # time in days since JD 2451545.0\n tdays = mjd - REFDATE\n\n g_dot = 0.9856003 * DEG_RAD\n L_dot = 0.9856474 * DEG_RAD\n\n eps = (23.439 - 0.0000004 * tdays) * DEG_RAD\n\n g = ((357.528 + 0.9856003 * tdays) % 360.) * DEG_RAD\n L = ((280.461 + 0.9856474 * tdays) % 360.) * DEG_RAD\n\n # 1.915 degrees 0.02 degree\n elong = L + 0.033423 * N.sin(g) + 0.000349 * N.sin(2.*g)\n elong_dot = L_dot + \\\n 0.033423 * N.cos(g) * g_dot + \\\n 0.000349 * N.cos(2.*g) * 2.*g_dot\n\n radius = 1.00014 - 0.01671 * N.cos(g) - 0.00014 * N.cos(2.*g)\n radius_dot = 0.01671 * N.sin(g) * g_dot + \\\n 0.00014 * N.sin(2.*g) * 2.*g_dot\n\n x_dot = radius_dot * N.cos(elong) - \\\n radius * N.sin(elong) * elong_dot\n\n y_dot = radius_dot * N.cos(eps) * N.sin(elong) + \\\n radius * N.cos(eps) * N.cos(elong) * elong_dot\n\n z_dot = radius_dot * N.sin(eps) * N.sin(elong) + \\\n radius * N.sin(eps) * N.cos(elong) * elong_dot\n\n # Convert to km/sec with Sun as origin.\n velocity = N.zeros(3, dtype=N.float64)\n velocity[0] = -x_dot * KM_AU / SEC_DAY\n velocity[1] = -y_dot * KM_AU / SEC_DAY\n velocity[2] = -z_dot * KM_AU / SEC_DAY\n\n return velocity", "def solar_PV_model(country_masked_data_T2m,country_masked_data_ssrd,country_mask):\n # reference values, see Evans and Florschuetz, (1977)\n T_ref = 25. \n eff_ref = 0.9 #adapted based on Bett and Thornton (2016)\n beta_ref = 0.0042\n G_ref = 1000.\n \n rel_efficiency_of_pannel = eff_ref*(1 - beta_ref*(country_masked_data_T2m - T_ref))\n capacity_factor_of_pannel = np.nan_to_num(rel_efficiency_of_pannel*\n (country_masked_data_ssrd/G_ref)) \n\n\n spatial_mean_solar_cf = np.zeros([len(capacity_factor_of_pannel)])\n for i in range(0,len(capacity_factor_of_pannel)):\n spatial_mean_solar_cf[i] = np.average(capacity_factor_of_pannel[i,:,:],\n weights=country_mask)\n\n return(spatial_mean_solar_cf)", "def getSubSDM_v(SDM_v):\n\tcount_calls('getSubSDM_v')\n\tnWaves = int(len(SDM_v)**.5)\n\tSDM_s=SDM_v[:]\n\tfor i in range(len(SDM_v)):\n\t\tii = int(i/nWaves)\n\t\tjj = i-ii*nWaves\n\t\tif abs(ii-jj)>1:\n\t\t\tSDM_s[i]=0.\n\treturn SDM_s", "def get_mdf(self):\n if (self.param['quantity'] <> \"mdf\"):\n raise ValueError(\"Expected DP input: *.mdf or *.eps files\");\n\n spectra=[];\n for iqpol in range(self.nqpol): # create MDF for each polarisation\n\n E = np.asarray(self.E,float);\n eps = np.asarray(self.Reps[iqpol],complex)\\\n +1j*np.array(self.Ieps[iqpol],complex);\n \n # DP parameter (for the polarization iqpol)\n p={'iqpol': iqpol};\n p['iqpol'] = iqpol;\n p['q_rc'] = self.q_rc[iqpol];\n p['q_cc'] = self.q_cc[iqpol];\n p['qred_rc'] = self.qred_rc[iqpol];\n p['qred_cc'] = self.qred_cc[iqpol];\n p['G_rc'] = self.G_rc;\n p['G_cc'] = self.G_cc;\n\n # log\n if 'comment' not in self.param:\n self.param['comment'] = \\\n [\"# created with $Id: dp_mdf.py 479 2014-03-10 11:29:40Z hambach $ \\n\",\n \"# -read from file: %s, iqpol: %d\\n\" %(self.param['filename'],iqpol)];\n\n # create MDF\n spectra.append(MDF(self.param,p,E,eps));\n return spectra;", "def getSDM_complex(SDM_v):\n\tcount_calls('getSDM_complex')\n\tnWaves = int(len(SDM_v)**.5)\n\tSDM_c = init_array(nWaves,0.+0.j)\n\tfor i in range(nWaves**2):\n\t\tii = int(i/nWaves)\n\t\tjj = i - ii*nWaves\n\t\tSDM_c[ii][jj] += SDM_v[i]\t# This gets only the j < i entries right\n\t\tSDM_c[jj][ii] += SDM_v[i]*1.j\t#\n\tfor i in range(nWaves):\n\t\tfor j in range(nWaves):\n\t\t\tif i==j: # The diagonal elements are wrong, fis this here\n\t\t\t\tSDM_c[i][j] = SDM_c[i][j].real+0.j\n\t\t\tif j > i: # the j > i entries are set wrong before, fix this here. \n\t\t\t\tSDM_c[i][j] = SDM_c[j][i].conjugate()\n\treturn SDM_c", "def deredden(self, ebmv, law='OD94', Rv=3.1):\n from extinctions.extinction import extinction_factor\n\n if hasattr(self, 'zorig'): # Spectrum has been deredshifted\n raise ValueError, \\\n \"Dereddening should be done prior to deredshifting.\"\n\n # Extinction factor (<1)\n ext = extinction_factor(self.x, ebmv, rv=Rv, law=law)\n self.y /= ext\n if self.hasVar:\n self.v /= ext**2\n if self.hasCov:\n self.cov /= ext**2\n\n self.ebmv = ebmv # Mark spectrum as unreddened\n self.rv = Rv\n self.law = law\n\n self.setKey(MWEBMV=(ebmv, \"MW E(B-V) correction applied\"),\n MWRV=(Rv, \"R_V used for MW E(B-V) correction\"),\n MWLAW=(law, \"Extinction law used for MW correction\"))", "def getMcbsdValue(grant):\n return (-13\n if grant['operationParam']['operationFrequencyRange']['highFrequency'] > 3690e6\n else -25) + OFFSET_REF_BW_DB", "def V_LJ(mag_r, sp):\n V_rc = 4 * sp.eps * ((sp.sigma / sp.rc) ** 12 - (sp.sigma / sp.rc) ** 6)\n return 4 * sp.eps * ((sp.sigma / mag_r) ** 12 - (sp.sigma / mag_r) ** 6) - \\\n V_rc if mag_r < sp.rc else 0.0", "def rvs(self) -> Parameter:\n # Sample a parameter vector from the KDE\n val = self.kde.rvs()\n # If any of the parameter values are negative, then resample them \n while val['asymptomatic']<0 or val['secondary_school']<0 or val['primary_school']<0 or val['retail']<0 or val['presymptomatic']<0 or val['symptomatic']<0 or val['work']<0:\n val = self.kde.rvs()\n return val", "def init_vrpotential_diag13():\n global vpotr, iuar, oldcu\n farname = \"vpotrk1.\" + s1.cdrun\n in1.farname[:] = farname\n in1.modesxar = int(min(in1.modesxar,nxh+1))\n# vpotr = store selected fourier modes for radiative vector potential\n vpotr = numpy.empty((2,in1.modesxar),complex_type,'F')\n# open file: updates narrec and possibly iuar\n if (in1.narrec==0):\n mdiag1.dafopenvc1(vpotr,iuar,in1.narrec,farname)\n# oldcu = previous current density with guard cells\n oldcu = numpy.zeros((2,nxe),float_type,'F')\n# spectral analysis\n global mtar, itar, vpkwr, vpksr, vwkr\n if ((in1.ndar==2) or (in1.ndar==3)):\n mtar = int((nloop - 1)/in1.ntar) + 1; itar = 0\n# vpkwr = power spectrum for radiative vector potential\n vpkwr = numpy.empty((2,in1.modesxar,iwr,2),float_type,'F')\n# vpksr = accumulated complex spectrum for radiative vector potential\n vpksr = numpy.zeros((2,4,in1.modesxar,iwr),double_type,'F')\n# vwkr = maximum frequency as a function of k for radiative vector\n# potential\n vwkr = numpy.empty((2,in1.modesxar,2),float_type,'F')\n# create dummy arrays to avoid undefined arguments later\n else:\n vpkwr = numpy.zeros((1,1,1,1),float_type,'F')\n vwkr = numpy.zeros((1,1,1),float_type,'F')", "def dRV(dp):\n from lib.utils import typetest\n import numpy as np\n import lib.constants as const\n from astropy.io import ascii\n import pdb\n typetest('dp',dp,str)\n\n #d=ascii.read(dp+'obs_times',names=['mjd','time','exptime','airmass'])\n d=ascii.read(dp+'obs_times',comment=\"#\")\n #Texp=d['exptime'].astype('float')\n Texp=d['col3'].data#astype('float')\n vorb=v_orb(dp)\n p=phase(dp)\n P=paramget('P',dp)\n i=paramget('inclination',dp)\n typetest('P',P,float)\n typetest('i',i,float)\n\n dRV=vorb*np.cos(2.0*np.pi*p)*2.0*np.pi/(P*const.day)*np.sin(np.radians(i))\n return abs(dRV*Texp)", "def msd(self):\n msd_vec = self.model.msd_vec\n msd = self.mu ** 2 * msd_vec * self._shore_coef\n return msd.sum()", "def make_dense_mass_spectra(peak_locs, peak_intensities, max_peak_loc):\n dense_spectrum = np.zeros(max_peak_loc)\n dense_spectrum[peak_locs] = peak_intensities\n\n return dense_spectrum", "def fast_rmsd(self, mdl):\n return _modeller.mod_model_fast_rmsd(self.modpt, mdl.modpt)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
In "A Study of the LCMV and MVDR Noise Reduction Filters" Mehrez Souden elaborates an alternative formulation for the LCMV beamformer in the appendix for a rank one interference matrix. Therefore, this algorithm is only valid, when the interference PSD matrix is approximately rank one, or (in other words) only 2 speakers are present in total.
def get_lcmv_vector_souden( target_psd_matrix, interference_psd_matrix, noise_psd_matrix, ref_channel=None, eps=None, return_ref_channel=False ): raise NotImplementedError( 'This is not yet thoroughly tested. It also misses the response vector,' 'thus it is unclear, how to select, which speaker to attend to.' ) phi_in = stable_solve(noise_psd_matrix, interference_psd_matrix) phi_xn = stable_solve(noise_psd_matrix, target_psd_matrix) D = phi_in.shape[-1] # Equation 5, 6 gamma_in = np.trace(phi_in, axis1=-1, axis2=-2)[..., None, None] gamma_xn = np.trace(phi_xn, axis1=-1, axis2=-2)[..., None, None] # Can be written in a single einsum call, here separate for clarity # Equation 11 gamma = gamma_in * gamma_xn - np.trace( np.einsum('...ab,...bc->...ac', phi_in, phi_xn) )[..., None, None] # Possibly: # gamma = gamma_in * gamma_xn - np.einsum('...ab,...ba->...', phi_in, phi_xn) eye = np.eye(D)[(phi_in.ndim - 2) * [None] + [...]] # TODO: Should be determined automatically (per speaker)? ref_channel = 0 # Equation 51, first fraction if eps is None: eps = np.finfo(gamma.dtype).tiny mat = gamma_in * eye - phi_in / np.maximum(gamma.real, eps) # Equation 51 # Faster, when we select the ref_channel before matrix multiplication. beamformer = np.einsum('...ab,...bc->...ac', mat, phi_xn)[..., ref_channel] # beamformer = np.einsum('...ab,...b->...a', mat, phi_xn[..., ref_channel]) if return_ref_channel: return beamformer, ref_channel else: return beamformer
[ "def DP_Pitch_Estimation(f0_candidates,score,nonDPindices,DPindices):\r\n \r\n rows=len(f0_candidates)\r\n cols=len(f0_candidates[0])\r\n pitch = np.zeros((1,rows))\r\n indsmax=np.argmax(score,axis=1)\r\n f0_candidates_dp = np.zeros((rows,cols))\r\n for m in np.arange(0,len(nonDPindices)):\r\n f0_candidates_dp[int(nonDPindices[m])] = f0_candidates[int(nonDPindices[m]),indsmax[int(nonDPindices[m])]]\r\n #print(f0_candidates_dp[int(nonDPindices[m]),:])\r\n for m in np.arange(0,len(DPindices)):\r\n f0_candidates_dp[int(DPindices[m]),:]=f0_candidates[int(DPindices[m]),:]\r\n #print(f0_candidates_dp[int(DPindices[m]),:])\r\n \r\n VuV = np.sign(abs(np.sum(f0_candidates_dp,axis=1)))\r\n boundary = abs(VuV-np.append(VuV[1:,],np.zeros(1)))\r\n boundary_inds = np.where(boundary==1)\r\n \r\n #for m in np.arange(0,len(f0_candidates_dp)):\r\n #print(f0_candidates_dp[m,:])\r\n for i2 in np.arange(0,len(boundary_inds[0]),2):\r\n inds_temp = np.arange(boundary_inds[0][i2]+1,boundary_inds[0][i2+1]+1)\r\n \r\n if len(inds_temp)>1:\r\n x = f0_candidates_dp[inds_temp,:]\r\n rows1=len(x)\r\n cols1=len(x[0])\r\n c=np.zeros((rows1,cols1))\r\n b=np.zeros((rows1,cols1))\r\n out=np.zeros((1,rows1))\r\n temp2=np.zeros((1,cols1))\r\n \r\n for j1 in np.arange(1,rows1):\r\n for j2 in np.arange(0,cols1):\r\n for j3 in np.arange(0,cols1):\r\n temp2[0][j3]=c[j1-1,j3]+np.square(x[j1,j2]-x[j1-1,j3])\r\n c[j1,j2]=np.amin(temp2[0])\r\n b[j1,j2]=np.argmin(temp2[0])\r\n\r\n indd = np.argmin(c[-1,:])\r\n for j in np.arange(len(out[0])-1,-1,-1):\r\n out[0][j]=x[j][int(indd)]\r\n indd=b[j][int(indd)]\r\n pitch[0][inds_temp]=np.matrix.transpose(out[0])\r\n else:\r\n pitch[0][inds_temp]=f0_candidates_dp[inds_temp,indsmax[inds_temp]]\r\n \r\n uvInds = np.where(VuV==0)\r\n for m in np.arange(0,len(uvInds[0])):\r\n pitch[0][uvInds[0][m]]=f0_candidates[uvInds[0][m],indsmax[uvInds[0][m]]]\r\n pitch = np.matrix.transpose(pitch)\r\n \r\n return(pitch)", "def similarity(self, embedded, w, b, center=None):\n N = self.opt.speaker_num\n M = self.opt.utter_num \n P = self.opt.embedding_size\n ##S = opt.segment_num\n '''if self.opt.train_type == 'multi_attention' or self.opt.train_type == 'divide_attention':\n P = self.opt.embedding_size * self.opt.attention_head_num \n else: \n P = self.opt.embedding_size'''\n ##embedded_mean = torch.cat([torch.mean(embedded[i*S:(i+1)*S,:], dim=0, keepdim=True) for i in range(N*M)], dim=0)\n embedded_split = torch.reshape(embedded, (N, M, P))\n \n if center is None:\n center = self.normalize(torch.mean(embedded_split, dim=1)) # [N,P] normalized center vectors eq.(1)\n center_except = self.normalize(torch.reshape(torch.sum(embedded_split, dim=1, keepdim=True)\n - embedded_split, (N*M,P))) # [NM,P] center vectors eq.(8)\n # make similarity matrix eq.(9)\n S = torch.cat(\n [torch.cat([torch.sum(center_except[i*M:(i+1)*M,:]*embedded_split[j,:,:], dim=1, keepdim=True) if i==j\n else torch.sum(center[i:(i+1),:]*embedded_split[j,:,:], dim=1, keepdim=True) for i in range(N)],\n dim=1) for j in range(N)], dim=0)\n else :\n # If center(enrollment) exist, use it.\n S = torch.cat(\n [torch.cat([torch.sum(center[i:(i + 1), :] * embedded_split[j, :, :], dim=1, keepdim=True) for i\n in range(N)], dim=1) for j in range(N)], dim=0)\n \n if self.opt.loss_type.split('_')[1] == 'softmax' or self.opt.loss_type.split('_')[1] == 'contrast':\n S = torch.abs(w)*S + b # rescaling\n \n return S", "def matrix_p1p2(matrix):\r\n #print('Start converting data notation from winner/loser to player_1/player_2')\r\n # define a new matrix for trainning and re-arange the information for winner and loser as player 1 and player 2. For each pair, player_1_id < player_2_id.\r\n matrix_n = pd.DataFrame()\r\n \r\n # match information\r\n col_match = ['tourney_name', 'surface', 'draw_size', 'tourney_level', 'tourney_date','year', 'month', 'day', 'day_week',\r\n 'match_num', 'best_of', 'round', 'minutes']\r\n \r\n matrix_n[col_match] = matrix[col_match]\r\n \r\n # columns for winner and loser\r\n \r\n col_w = [item for item in matrix.columns if 'winner' in item] \r\n col_l = [item for item in matrix.columns if 'loser' in item] \r\n \r\n # new columns for player 1 and player 2\r\n col_p1 = [item.replace('winner', 'p1') for item in col_w] \r\n col_p2 = [item.replace('winner', 'p2') for item in col_w] \r\n \r\n # re-arange the columns based on p1 and p2\r\n matrix[['winner_id','loser_id']]=matrix[['winner_id','loser_id']].astype(np.float64)\r\n \r\n matrix_n[col_p1] = matrix.loc[matrix.winner_id<matrix.loser_id,col_w] \r\n matrix_n[col_p2] = matrix.loc[matrix.winner_id>matrix.loser_id,col_w] \r\n \r\n matrix_n['p1_win'] = matrix_n['p1_id'].map(lambda x: 1 if x>0 else 0, na_action = 'ignore').fillna(0)\r\n matrix_n['p2_win'] = matrix_n['p2_id'].map(lambda x: 1 if x>0 else 0, na_action = 'ignore').fillna(0)\r\n \r\n for i in range(len(col_p1)):\r\n matrix_n[col_p1[i]].fillna(matrix[matrix.winner_id>matrix.loser_id][col_l[i]],inplace = True)\r\n matrix_n[col_p2[i]].fillna(matrix[matrix.winner_id<matrix.loser_id][col_l[i]],inplace = True)\r\n \r\n # add information for the number of set won by each player\r\n matrix_n['p1_sets_win'] = 0.0\r\n matrix_n['p2_sets_win'] = 0.0\r\n \r\n for i in range(1,6):\r\n matrix_n['p1_sets_win'] = matrix_n['p1_sets_win'] + 1.0*(matrix_n['p1_set_'+str(i)]>matrix_n['p2_set_'+str(i)])\r\n matrix_n['p2_sets_win'] = matrix_n['p2_sets_win'] + 1.0*(matrix_n['p1_set_'+str(i)]<matrix_n['p2_set_'+str(i)])\r\n \r\n matrix_n[['p1_id','p2_id']].astype(np.int64)\r\n \r\n \r\n #print('Conversion finished')\r\n \r\n return matrix_n", "def Inference_PLMDCA(Jscore, matrix_contacts):\n val,cts = np.unique(matrix_contacts,return_counts = True)\n nbrcontacts = cts[val == 1]\n \n # inverse of the correlation matrix to get the couplings\n # inferred_couplings = np.linalg.inv(mat_corr)\n\n TP = []\n\n # order the 2d array and find the index of the sorted values in the matrix\n index_sorted_array_x, index_sorted_array_y = np.unravel_index(np.argsort(-Jscore, axis=None), Jscore.shape)\n\n\n idx_flip = list(index_sorted_array_x)\n idy_flip = list(index_sorted_array_y)\n\n\n FP = []\n\n TP_coords = []\n all_coords = []\n N = 0 \n number_pairs = []\n \n listFPJij = []\n # §TP_coords = []\n listTPJij = []\n \n \n list_tp = []\n TP = 0\n\n list_tp_fraction_allpairs = []\n\n\n for x, y in zip(idx_flip, idy_flip):\n\n # just look at the elements above the diagonal as symmetric matrix\n # to not count twice each contact\n if y > x:\n\n N = N + 1\n\n number_pairs.append(N)\n\n\n if matrix_contacts[x,y] == 1:\n TP = TP + 1\n if N <= nbrcontacts:\n TP_coords.append([x,y])\n listTPJij.append(Jscore[x,y])\n else:\n\n if N <= nbrcontacts:\n FP.append([x,y])\n listFPJij.append(Jscore[x,y])\n\n\n list_tp.append(TP)\n\n all_coords.append([x,y])\n\n list_tp_fraction_allpairs.append(TP/N)\n\n return list_tp_fraction_allpairs, FP,listFPJij,TP_coords,listTPJij", "def test_NormInvWish():\n\n # Test sample() method:\n mu_0 = np.arange(3.0)\n kappa_0 = 3.0\n Lam_0 = np.eye(3) + 0.01*np.arange(9).reshape(3,3)\n Lam_0 += Lam_0.T # To make symmetric\n nu_0 = 3\n prior = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)\n arr = prior.sample()\n assert isinstance(arr, np.void)\n assert arr.dtype == prior.model_dtype\n\n arr = prior.sample(size=1)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (1,)\n assert arr.dtype == prior.model_dtype\n\n arr = prior.sample(size=(1,))\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (1,)\n assert arr.dtype == prior.model_dtype\n\n arr = prior.sample(size=10)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (10,)\n assert arr.dtype == prior.model_dtype\n\n arr = prior.sample(size=(10, 20))\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (10, 20)\n assert arr.dtype == prior.model_dtype\n\n # Test like1() method:\n prior = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)\n x = np.arange(3.0)\n mu = np.arange(3.0)+1.0\n Sig = np.eye(3) + 0.03*np.arange(9).reshape(3, 3)\n Sig += Sig.T\n arr = prior.like1(x, mu, Sig)\n assert isinstance(arr, float)\n\n # If trailing axis of x is not dim 3 (for these prior parameters), should get and AssertionError\n xbad = np.arange(2.0)\n np.testing.assert_raises(AssertionError, prior.like1, xbad, mu, Sig)\n\n # And similar checks for mu and Sig\n mubad = np.arange(4.0)\n np.testing.assert_raises(AssertionError, prior.like1, x, mubad, Sig)\n\n Sigbad = np.eye(2)\n np.testing.assert_raises(AssertionError, prior.like1, x, mu, Sigbad)\n\n # Try some non-trival broadcasts\n mu = np.arange(6.0).reshape(2, 3)\n arr = prior.like1(x, mu, Sig)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (2,)\n for i, r in np.ndenumerate(arr):\n assert r == prior.like1(x, mu[i], Sig)\n\n theta = np.zeros((2,), dtype=prior.model_dtype)\n theta['mu'] = mu\n theta['Sig'] = Sig\n arr = prior.like1(x, theta)\n for i, r in np.ndenumerate(arr):\n assert r == prior.like1(x, theta[i])\n\n mu = np.empty((3, 4, 3), dtype=float)\n Sig = np.empty((3, 4, 3, 3), dtype=float)\n for i in range(3):\n for j in range(4):\n mu[i, j] = np.arange(3.0)\n Sig[i, j] = np.eye(3)+0.1*i+0.2*j\n arr = prior.like1(x, mu, Sig)\n for (i, j), r in np.ndenumerate(arr):\n assert r == prior.like1(x, mu[i, j], Sig[i, j])\n\n theta = np.empty((3, 4), dtype=prior.model_dtype)\n theta['mu'] = mu\n theta['Sig'] = Sig\n arr = prior.like1(x, theta)\n for (i, j), r in np.ndenumerate(arr):\n assert r == prior.like1(x, theta[i, j])\n\n mu = np.arange(6.0).reshape(2, 3)\n arr = prior.like1(x, mu[:, np.newaxis, np.newaxis, :], Sig)\n for (i, j, k), r in np.ndenumerate(arr):\n assert r == prior.like1(x, mu[i], Sig[j, k])\n\n theta = np.empty((2, 3, 4), dtype=prior.model_dtype)\n theta['mu'] = (np.arange(6.0).reshape(2, 3))[:, np.newaxis, np.newaxis, :]\n theta['Sig'] = Sig\n arr = prior.like1(x, theta)\n for (i, j, k), r in np.ndenumerate(arr):\n assert r == prior.like1(x, theta[i, j, k])\n\n # Test __call__() method:\n prior = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)\n mu = np.arange(3.0)\n Sig = np.eye(3)\n arr = prior(mu, Sig)\n assert isinstance(arr, float)\n\n theta = np.zeros(1, dtype=prior.model_dtype)\n theta['mu'] = mu\n theta['Sig'] = Sig\n arr = prior(theta[0])\n assert isinstance(arr, float)\n assert arr == prior(mu, Sig)\n\n mu = np.arange(6.0).reshape(2, 3)\n arr = prior(mu, Sig)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (2,)\n assert arr.dtype == float\n for i, r in np.ndenumerate(arr):\n assert r == prior(mu[i], Sig)\n\n theta = np.zeros(2, dtype=prior.model_dtype)\n theta['mu'] = mu\n theta['Sig'] = Sig\n arr = prior(theta)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (2,)\n assert arr.dtype == float\n for i, r in np.ndenumerate(arr):\n assert r == prior(theta[i])\n\n mu = np.empty((3, 4, 3), dtype=float)\n Sig = np.empty((3, 4, 3, 3), dtype=float)\n for i in range(3):\n for j in range(4):\n mu[i, j] = np.arange(3.0)\n Sig[i, j] = np.eye(3)+0.1*i+0.2*j\n arr = prior(mu, Sig)\n for (i, j), r in np.ndenumerate(arr):\n assert r == prior(mu[i, j], Sig[i, j])\n\n theta = np.zeros((3, 4), dtype=prior.model_dtype)\n theta['mu'] = mu\n theta['Sig'] = Sig\n arr = prior(theta)\n for (i, j), r in np.ndenumerate(arr):\n assert r == prior(theta[i, j])\n\n mu = np.arange(6.0).reshape(2, 3)\n arr = prior(mu[:, np.newaxis, np.newaxis, :], Sig)\n for (i, j, k), r in np.ndenumerate(arr):\n assert r == prior(mu[i], Sig[j, k])\n\n theta = np.zeros((2, 3, 4), dtype=prior.model_dtype)\n theta['mu'] = mu[:, np.newaxis, np.newaxis, :]\n theta['Sig'] = Sig\n arr = prior(theta)\n for (i, j, k), r in np.ndenumerate(arr):\n assert r == prior(theta[i, j, k])\n\n # Should _post_params method do any broadcasting?\n\n # Test pred method():\n prior = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)\n x = np.arange(3.0)+1\n arr = prior.pred(x)\n assert isinstance(arr, float)\n\n x = np.arange(6.0).reshape(2, 3)\n arr = prior.pred(x)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (2,)\n assert arr.dtype == float\n for i, r in np.ndenumerate(arr):\n assert r == prior.pred(x[i])\n\n x = np.arange(24.0).reshape(2, 4, 3)\n arr = prior.pred(x)\n assert isinstance(arr, np.ndarray)\n assert arr.shape == (2, 4)\n assert arr.dtype == float\n for (i, j), r in np.ndenumerate(arr):\n np.testing.assert_almost_equal(r, prior.pred(x[i, j]))", "def calc_buddi_perturbation_sample_specific(meta_df, X_full, Y_full, sample_interest, scaler, \n encoder_unlab, decoder, batch_size, \n genes_ordered, top_lim=100, use_buddi4=True):\n\n\n tot_simulated_sample = Y_full.shape[1]*len(sample_interest)*100\n\n X_temp = np.copy(X_full)\n\n #####\n # get cell type latent codes\n #####\n # the codes for cell type proportion\n # and tile to repeat for each sample\n sc_props = sc_preprocess.get_single_celltype_prop_matrix(num_samp=100, cell_order=Y_full.columns)\n sc_props = np.tile(sc_props, (len(sample_interest),1))\n\n\n #####\n # get perturbation latent codes\n #####\n\n # get the index to get the perturbation latent codes\n pert_code_idx = np.logical_and(meta_df.isTraining == \"Train\", meta_df.stim == \"STIM\")\n pert_code_idx = np.where(pert_code_idx)[0] \n pert_code_idx = np.random.choice(pert_code_idx, tot_simulated_sample, replace=True)\n\n if use_buddi4:\n z_slack, _, _, z_rot, _, _, z_pert, _, _, z_bulk, _, _ = encoder_unlab.predict(X_temp[pert_code_idx,], batch_size=batch_size)\n else:\n z_slack, _, _, z_rot, _, _, z_pert, _, _ = encoder_unlab.predict(X_temp[pert_code_idx,], batch_size=batch_size)\n\n # get the index to get the UNperturbed latent codes\n unpert_code_idx = np.logical_and(meta_df.isTraining == \"Train\", meta_df.stim == \"CTRL\")\n unpert_code_idx = np.where(unpert_code_idx)[0] \n unpert_code_idx = np.random.choice(unpert_code_idx, tot_simulated_sample, replace=True)\n if use_buddi4:\n _, _, _, _, _, _, z_unpert, _, _, _, _, _ = encoder_unlab.predict(X_temp[unpert_code_idx,], batch_size=batch_size)\n else:\n _, _, _, _, _, _, z_unpert, _, _ = encoder_unlab.predict(X_temp[unpert_code_idx,], batch_size=batch_size)\n\n #####\n # get sample latent codes\n #####\n # get the index to get the sample latent codes\n sample_code_idx = np.logical_and(meta_df.isTraining == \"Train\", \n np.isin(meta_df.sample_id, sample_interest))\n sample_code_idx = np.logical_and(sample_code_idx, meta_df.stim == \"CTRL\")\n sample_code_idx = np.where(sample_code_idx)[0] \n sample_code_idx = np.repeat(sample_code_idx, 100)\n\n if use_buddi4:\n _, _, _, z_samples, _, _, _, _, _, _, _, _ = encoder_unlab.predict(X_temp[sample_code_idx,], batch_size=batch_size)\n else:\n _, _, _, z_samples, _, _, _, _, _ = encoder_unlab.predict(X_temp[sample_code_idx,], batch_size=batch_size)\n\n # make the metadata table \n temp_meta_df = meta_df.iloc[sample_code_idx]\n temp_meta_df.isTraining = \"Test\"\n temp_meta_df.cell_prop_type = \"cell_type_specific\"\n\n prop_max = np.copy(sc_props)\n prop_max = np.argmax(prop_max, axis=1)\n prop_max = Y_full.columns[prop_max]\n temp_meta_df.Y_max = prop_max\n\n\n ######\n # now put it all together\n ######\n\n # now concatenate together and add the stim codes to the latent\n if use_buddi4:\n z_concat_perturb = np.hstack([z_slack, sc_props, z_samples, z_pert, z_bulk])\n else:\n z_concat_perturb = np.hstack([z_slack, sc_props, z_samples, z_bulk])\n decoded_0_1 = decoder.predict(z_concat_perturb, batch_size=batch_size)\n decoded_0_1 = scaler.inverse_transform(decoded_0_1)\n\n # now concatenate together and add the stim codes to the latent\n if use_buddi4:\n z_concat_unperturb = np.hstack([z_slack, sc_props, z_samples, z_unpert, z_bulk])\n else:\n z_concat_unperturb = np.hstack([z_slack, sc_props, z_samples, z_bulk])\n\n decoded_0_0 = decoder.predict(z_concat_unperturb, batch_size=batch_size)\n decoded_0_0 = scaler.inverse_transform(decoded_0_0)\n\n ######\n # now get the differential genes\n ######\n\n\n top_genes = {}\n de_genes_all = None\n for curr_cell_type in Y_full.columns:\n\n\n # this is for the \"projected\" expression\n curr_idx = np.where(temp_meta_df.Y_max == curr_cell_type)[0]\n proj_ctrl = decoded_0_0[curr_idx]\n proj_stim = decoded_0_1[curr_idx]\n\n # take the median for normalization\n\n proj_ctrl = np.median(rankdata(proj_ctrl, axis=1), axis=0)\n proj_stim = np.median(rankdata(proj_stim, axis=1), axis=0)\n proj_log2FC = np.abs(proj_stim-proj_ctrl)\n\n # make into DF\n proj_log2FC_df = pd.DataFrame(proj_log2FC, index=genes_ordered)\n\n intersect_proj = proj_log2FC_df.loc[genes_ordered][0]\n top_proj_genes = intersect_proj.index[np.argsort(np.abs(intersect_proj))].tolist()[::-1][0:top_lim]\n\n top_genes[curr_cell_type] = top_proj_genes\n\n\n\n return (temp_meta_df, decoded_0_0, decoded_0_1, top_genes)", "def particlefilter(G, J, U, V, lam, r, y, P_process, P_obs, Np):\n\n\n\tif len(r.shape) < 3:\n\t\tr.unsqueeze_(0) # this is to ensure shape is B x Nr x T\n\t\ty.unsqueeze_(0) # this is to ensure shape is B x Ny x T\n\n\tB, Nr, T \t= r.shape # no. of batches, no. of neurons, no. of time steps\n\tNs, Ny \t\t= J.shape[0], y.shape[0]\t # no. of latent variables, no. of inputs\n\tdevice = r.device \n\tdtype = r.dtype \n\n\n\t# Compute the inverse covariance of the proposal distribution q = p(x_t | x_(t-1), r_t)\n\t# Notation: Q for covariance, P for inverse covariance\n\tP_1 \t\t= torch.mm(P_obs, U) # intermediate matrix 1 \n\tP_2 \t\t= torch.mm(U.t(), P_1) # intermediate matrix 2\n\tP_proposal \t= P_process + P_2\n\tP_proposal \t= (P_proposal + P_proposal.t())/2 # make it a perfectly symmetric matrix \n\tQ_proposal \t= P_proposal.inverse()\n\n\t# Define the noise processes\n\tmvn_process = MVN.MultivariateNormal(loc = torch.zeros(Ns,device=device,dtype=dtype), precision_matrix = P_process)\n\tmvn_proposal= MVN.MultivariateNormal(loc = torch.zeros(Ns,device=device,dtype=dtype), precision_matrix = P_proposal)\n\n\t# Generate initial particles X_0 ~ N( pinv(U)r_0, Q_process )\n\t# At the time step zero generate K x Np particles and pick Np particles with highest weights\n\tK \t= 10\n\tr0 \t= r[...,0]\n\tx \t= torch.matmul(torch.pinverse(U),r0.unsqueeze(2)) + mvn_process.rsample(sample_shape=torch.Size([B,K*Np])).permute(0,2,1)\n\n\t# Compute the initial weights of the particles\n\tP_3 \t= torch.mm(torch.pinverse(U).t(), P_process) # intermediate matrix 3\n\tlogWVec = -0.5*( (x*torch.matmul(P_2,x)).sum(dim=1) -2*(r0.unsqueeze(2)*torch.matmul(P_1,x)).sum(dim=1) )\n\tlogWVec += 0.5*( (x*torch.matmul(P_process,x)).sum(dim=1) - 2*(r0.unsqueeze(2)*torch.matmul(P_3,x)).sum(dim=1) )\n\tlog_e \t= torch.max(logWVec,dim=1)[0] \t\t\t# find maximum log weight\n\tlogWVec -= log_e.unsqueeze(1) \t\t \t\t\t# subtract the maximum\n\n\t# retain only the Np best particles\n\tfor b in range(B):\n\t\tidx = torch.argsort(logWVec[b], descending = True)\n\t\tlogWVec[b] = logWVec[b,idx]\n\t\tx[b] = x[b,:,idx]\n\n\tlogWVec, x = logWVec[:,0:Np], x[...,0:Np]\n\n\t# normalized initial weights\n\tWVec = torch.exp(logWVec)\n\tWVec = WVec/torch.sum(WVec,dim=1).unsqueeze(1) \t\n\n\tParticlesAll \t= torch.zeros((B,Ns,Np,T),device=device,dtype=dtype)\n\tParticlesAll[...,0] = x\n\n\t# normalization constant for the weights\n\t# log_nu = 0.5*np.log(np.linalg.det(P_process.data.numpy())) + 0.5*np.log(np.linalg.det(P_obs.data.numpy())) -0.5*np.log(np.linalg.det(P_proposal.data.numpy()))\n\t# log_nu += -0.5*Nr*np.log(2*np.pi)\n\tlog_nu = 0\n\n\t# # Compute log p(r_0) to initialize the observed data log likelihood\n\t# P_4 = torch.mm(P_1, torch.mm(P_2.inverse(),P_1.t()))\n\t# LL = -0.5*(Nr - Ns)*np.log(2*np.pi) + 0.5*np.log(np.linalg.det(P_obs.data.numpy())) - 0.5*np.log(np.linalg.det(P_2.data.numpy()))\n\t# if B == 1:\n\t# LL += -0.5*torch.matmul(r0.unsqueeze(1),torch.matmul(P_obs - P_4,r0.unsqueeze(2))).item()\n\t# else:\n\t# LL += -0.5*torch.matmul(r0.unsqueeze(1),torch.matmul(P_obs - P_4,r0.unsqueeze(2))).squeeze()\n\tLL = 0\n\n\t#savedparticles = []\n\t#savedparticles.append(x[0].data.numpy())\n\n\tfor tt in range(1, T):\n\n\t\t\n\t\t# resample particles based on their weights if sample diversity is low\n\t\tESS = 1/torch.sum(WVec**2,dim=1)\n\n\t\tfor b in range(B):\n\t\t\tif ESS[b] < Np/2 and tt != T-1:\n\t\t\t\tidx = resampleSystematic_torch(WVec[b],Np, device, dtype)\n\t\t\t\tParticlesAll[b] = ParticlesAll[b,:,idx]\n\n\t\tx = ParticlesAll[...,tt-1]\n\n\t\tyt = y[...,tt-1]\n\t\trt = r[...,tt]\n\n\t\tMinvr = torch.matmul(P_obs,rt.unsqueeze(2)) # size B x Nr x 1\n\t\trMinvr = torch.matmul(rt.unsqueeze(1),Minvr) # size B x 1 x 1\n\t\tUMinvr = torch.matmul(U.t(),Minvr) # size B x Ns x 1\n\n\t\tf_tap = TAPnonlinearity(x,yt.unsqueeze(2),G,J,V,lam)\n\n\t\tPinvf_tap = torch.matmul(P_process, f_tap)\n\t\tv = Pinvf_tap + UMinvr\n\t\tmu_proposal = torch.matmul(Q_proposal,v) # mean of the proposal distribution\n\n\t\t# sample new particles from proposal distribution\n\t\tParticlesNew= mu_proposal + mvn_proposal.rsample(sample_shape=torch.Size([B,Np])).permute(0,2,1)\n\n\t\t# log of incremental weights\n\t\tlog_alpha = log_nu - 0.5*(rMinvr.squeeze(2) + torch.sum(f_tap*Pinvf_tap - v*mu_proposal,dim=1))\n\n\t\t# update log weights\n\t\tlogWVec = torch.log(WVec) + log_alpha\n\t\tlog_e \t = torch.max(logWVec,dim=1)[0] # find maximum log weight\n\t\tlogWVec -= log_e.unsqueeze(1) \t\t # subtract the maximum\n\n\t\t# unnormalized weights\n\t\tWVec \t = torch.exp(logWVec)\n\n\t\t# update log likelihood\n\t\tLL += torch.log(torch.sum(WVec,dim=1)) + log_e\n\n\t\t# normalize the weights\n\t\tWVec = WVec/torch.sum(WVec,dim=1).unsqueeze(1) \n\n\t\t# append particles\n\t\tParticlesAll[...,tt] = ParticlesNew\n\n\t\t#savedparticles.append(np.copy(ParticlesAll[0,:,:,0:tt+1].data.numpy()))\n\t\t#savedparticles.append(ParticlesNew[0].data.numpy())\n\n\n\txhat = torch.sum(ParticlesAll*WVec.view(B,1,Np,1), dim=2).squeeze(2)\n\n\treturn LL, xhat, ParticlesAll, WVec", "def main():\n #load data needed for ploting topicXtime\n print('loading data...')\n model_name = str(sys.argv[1])\n num_topics = str(sys.argv[2])\n file_name = get_document_topic_distribution(model_name,num_topics)\n topic_matrix = pd.read_csv(file_name,index_col=0)\n print('data loaded!!')\n #transform topic_matrix according to different model\n print('transforming data...')\n get_document_item_vectorize = np.vectorize(get_document_item)\n if (model_name=='Dc_v1'):\n topic_matrix['dealer'] = get_document_item_vectorize(topic_matrix.index,0)\n topic_matrix.index = pd.to_datetime(get_document_item_vectorize(topic_matrix.index,1))\n if (model_name=='Dc_v2'):\n topic_matrix['dealer'] = pd.Series(list(zip(get_document_item_vectorize(topic_matrix.index,0),get_document_item_vectorize(topic_matrix.index,2)))).values\n topic_matrix.index = pd.to_datetime(get_document_item_vectorize(topic_matrix.index,1))\n if (model_name=='Dc_v3'):\n topic_matrix['dealer'] = pd.Series(list(zip(get_document_item_vectorize(topic_matrix.index,0),get_document_item_vectorize(topic_matrix.index,2)))).values\n topic_matrix.index = pd.to_datetime(get_document_item_vectorize(topic_matrix.index,1))\n if (model_name=='Dc_v4'):\n topic_matrix['dealer'] = pd.Series(list(zip(get_document_item_vectorize(topic_matrix.index,0),get_document_item_vectorize(topic_matrix.index,2)))).values\n topic_matrix.index = pd.to_datetime(get_document_item_vectorize(topic_matrix.index,1))\n if (model_name=='Tc_v1'):\n topic_matrix['dealer'] = pd.Series(list(zip(get_document_item_vectorize(topic_matrix.index,0),get_document_item_vectorize(topic_matrix.index,1)))).values\n topic_matrix.index = pd.to_datetime(get_document_item_vectorize(topic_matrix.index,2))\n \"\"\"\n #transform 0-based index to 1-based indexing for readability\n increment_topic_dict = {}\n for i in range(len(topic_matrix.columns)):\n increment_topic_dict[str(i)] = str(i+1)\n topic_matrix.rename(columns=increment_topic_dict,inplace=True)\n \"\"\"\n print('data transformed!!')\n print('creating plots...')\n dealer_df_list = list(map(lambda x: get_dealer_by_ID(topic_matrix,x,model_name),list(topic_matrix['dealer'].unique())))\n cpu_cores = multiprocessing.cpu_count() - 1\n pool = multiprocessing.Pool(cpu_cores)\n #pool.map(topicXtime_plotly_parallel,dealer_df_list)\n pool.map(topicXtime_matplotlib,dealer_df_list)\n pool.close()\n print('plots created!!')", "def calculate_double_check_radii_mann(df):\n def distance_modulus(d, derr):\n dm = 5. * np.log10(d/10.)\n dmerr = 5. / np.log(10.) * derr/d\n return dm, derr\n\n def eqn1(MKs, MKs_err=None, e=False):\n \"\"\"Equation 1 from Table in Mann et al. 2016\n Erratum.\"\"\"\n R = 1.9515 - 0.3520 * MKs + 0.01680 * MKs**2\n if e==False:\n return R\n elif e==True:\n sig2_R = (0.0289 * R)**2\n sig2_MKs = (0.3520 + 0.01680 * 2 * MKs)**2 * MKs_err**2\n return np.sqrt(sig2_R + sig2_MKs)\n\n def eqn2(MKs, FeH, MKs_err=None, FeH_err=None, e=False):\n \"\"\"Equation 2 from Table in Mann et al. 2016\n Erratum.\"\"\"\n R = (1.9305 - 0.3466 * MKs + 0.01647 * MKs**2) * (1. + 0.04458 * FeH)\n if e==False:\n return R\n elif e==True:\n sig2_R = (0.027 * R)**2\n sig2_MKs = ((0.3466 + 0.01647 * 2 * MKs) * (1. + 0.04458 * FeH) * MKs_err)**2\n sig2_FeH = ((1.9305 - 0.3466 * MKs + 0.01647 * MKs**2) * 0.04458 * FeH_err)**2\n return np.sqrt(sig2_R + sig2_MKs + sig2_FeH)\n\n # calculate the distance from Gaia parallax (no goodness of fit test):\n df[\"parallax\"] = df.parallax_Gaia\n df[\"parallax_error\"] = df.parallax_error_Gaia\n df = calculate_distance_from_parallax(df)\n\n # Calculate distance modulus and convert Ks band magnitude to MKs\n DM, DMerr = distance_modulus(df.distance, df.distance_error)\n df[\"MKs\"] = df.K_2MASS - DM\n df[\"MKs_err\"] = np.sqrt(df.e_K_2MASS**2 + DMerr**2)\n\n #conditions for applying equation 1 or 2:\n condition_eqn1 = ((df.FeH.isnull() | df.e_FeH.isnull()) & (~df.MKs.isnull()) & (df.MKs > 4.6) & (df.MKs < 9.8))\n condition_eqn2 = ((~df.FeH.isnull()) & (~df.e_FeH.isnull()) & (~df.MKs.isnull()) & (df.MKs > 4.6) & (df.MKs < 9.8))\n\n # Init new columns\n df['Rstar_double_check'] = np.nan\n df['e_Rstar_double_check'] = np.nan\n\n #Apply formula whenever conditions apply\n if (condition_eqn1 == True).any():\n df.loc[condition_eqn1,'Rstar_double_check'] = df.loc[condition_eqn1,:].apply(lambda x: eqn1(x.MKs), axis=1)\n df.loc[condition_eqn1,'e_Rstar_double_check'] = df.loc[condition_eqn1,:].apply(lambda x: eqn1(x.MKs, MKs_err=x.MKs_err,\n e=True), axis=1)\n if (condition_eqn2 == True).any():\n df.loc[condition_eqn2,'Rstar_double_check'] = df.loc[condition_eqn2,:].apply(lambda x: eqn2(x.MKs, x.FeH), axis=1)\n df.loc[condition_eqn2,'e_Rstar_double_check'] = df.loc[condition_eqn2,:].apply(lambda x: eqn2(x.MKs, x.FeH,\n MKs_err=x.MKs_err, FeH_err=x.e_FeH,\n e=True), axis=1)\n # delete extra columns\n del df[\"parallax\"]\n del df[\"parallax_error\"]\n del df[\"MKs\"]\n del df[\"MKs_err\"]\n del df[\"distance\"]\n del df[\"distance_error\"]\n\n\n return df", "def calculate_svms(h, syn_x, syn_u, trial_info, trial_time, num_reps = 20, num_reps_stability = 5, \\\n decode_test = False, decode_rule = False, decode_match = False, decode_neuronal_groups = False):\n\n lin_clf = svm.SVC(C=1, kernel='linear', decision_function_shape='ovr', shrinking=False, tol=1e-3)\n\n num_time_steps = len(trial_time)\n decoding_results = {}\n\n # The synaptic efficacy is the product of syn_x and syn_u. Will decode sample\n # direction from this value\n syn_efficacy = syn_x*syn_u\n\n if par['trial_type'] == 'DMC':\n\n # Will also calculate the category decoding accuracies, assuming the first half of\n # the sample direction belong to category 1, and the second half belong to category 2\n num_motion_dirs = len(np.unique(trial_info['sample']))\n sample = np.floor(trial_info['sample']/(num_motion_dirs/2)*np.ones_like(trial_info['sample']))\n test = np.floor(trial_info['test']/(num_motion_dirs/2)*np.ones_like(trial_info['sample']))\n rule = trial_info['rule']\n match = np.array(trial_info['match'])\n elif par['trial_type'] == 'dualDMS':\n sample = trial_info['sample']\n rule = trial_info['rule'][:,0] + 2*trial_info['rule'][:,1]\n par['num_rules'] = 4\n match = np.array(trial_info['match'])\n elif par['trial_type'] == 'locationDMS':\n sample = trial_info['sample'][:, 0]\n test = trial_info['test']\n rule = trial_info['rule']\n match = np.array(trial_info['match'])\n elif par['trial_type'] == 'ABBA' or par['trial_type'] == 'ABCA':\n sample = trial_info['sample']\n rule = trial_info['rule']\n test = np.array(trial_info['test'][:,0])\n match = np.array(trial_info['match'][:,0])\n elif par['trial_type'] == 'DMS+DMC':\n # rule 0 is DMS, rule 1 is DMC\n ind_rule = np.where(trial_info['rule']==1)[0]\n num_motion_dirs = len(np.unique(trial_info['sample']))\n sample = np.array(trial_info['sample'])\n test = np.array(trial_info['test'])\n # change DMC sample motion directions into categories\n sample[ind_rule] = np.floor(trial_info['sample'][ind_rule]/(num_motion_dirs/2)*np.ones_like(trial_info['sample'][ind_rule]))\n test[ind_rule] = np.floor(trial_info['test'][ind_rule]/(num_motion_dirs/2)*np.ones_like(trial_info['sample'][ind_rule]))\n rule = trial_info['rule']\n match = np.array(trial_info['match'])\n else:\n sample = np.array(trial_info['sample'])\n rule = np.array(trial_info['rule'])\n match = np.array(trial_info['match'])\n\n if trial_info['test'].ndim == 2:\n test = trial_info['test'][:,0]\n else:\n test = np.array(trial_info['test'])\n\n if len(np.unique(np.array(trial_info['rule']))) == 1 and decode_rule:\n print('Only one unique rule; setting decode rule to False')\n decode_rule = False\n\n\n decoding_results['neuronal_sample_decoding'], decoding_results['synaptic_sample_decoding'], \\\n decoding_results['neuronal_sample_decoding_stability'], decoding_results['synaptic_sample_decoding_stability'] = \\\n svm_wraper(lin_clf, h, syn_efficacy, sample, rule, num_reps, num_reps_stability, trial_time)\n\n to = (par['dead_time']+par['fix_time']+par['sample_time']+par['delay_time'])//par['dt']\n print('Neuronal and synaptic delay period decoding', \\\n np.mean(decoding_results['neuronal_sample_decoding'][0,0,:,to-10:to]), \\\n np.mean(decoding_results['synaptic_sample_decoding'][0,0,:,to-10:to]))\n\n if decode_test:\n decoding_results['neuronal_test_decoding'], decoding_results['synaptic_test_decoding'] ,_ ,_ = \\\n svm_wraper(lin_clf, h, syn_efficacy, test, rule, num_reps, 0, trial_time)\n\n if decode_match:\n decoding_results['neuronal_match_decoding'], decoding_results['synaptic_match_decoding'] ,_ ,_ = \\\n svm_wraper(lin_clf, h, syn_efficacy, match, rule, num_reps, 0, trial_time)\n\n if decode_rule:\n decoding_results['neuronal_rule_decoding'], decoding_results['synaptic_rule_decoding'] ,_ ,_ = \\\n svm_wraper(lin_clf, h, syn_efficacy, trial_info['rule'], np.zeros_like(rule), num_reps, 0, trial_time)\n\n\n if decode_neuronal_groups:\n\n decoding_results['neuronal_sample_decoding_group'] = []\n decoding_results['synaptic_sample_decoding_group'] = []\n decoding_results['neuronal_test_decoding_group'] = []\n decoding_results['synaptic_test_decoding_group'] = []\n decoding_results['neuronal_match_decoding_group'] = []\n decoding_results['synaptic_match_decoding_group'] = []\n\n for i in range(4):\n neuronal_decoding, synaptic_decoding, _, _ = \\\n svm_wraper(lin_clf, h[neuron_groups[i],:,:], syn_efficacy[neuron_groups[i],:,:], sample, rule, 20, 0, trial_time)\n decoding_results['neuronal_sample_decoding_group'].append(neuronal_decoding)\n decoding_results['synaptic_sample_decoding_group'].append(synaptic_decoding)\n\n neuronal_decoding, synaptic_decoding, _, _ = \\\n svm_wraper(lin_clf, h[neuron_groups[i],:,:], syn_efficacy[neuron_groups[i],:,:], test, rule, 20, 0, trial_time)\n decoding_results['neuronal_test_decoding_group'].append(neuronal_decoding)\n decoding_results['synaptic_test_decoding_group'].append(synaptic_decoding)\n\n neuronal_decoding, synaptic_decoding, _, _ = \\\n svm_wraper(lin_clf, h[neuron_groups[i],:,:], syn_efficacy[neuron_groups[i],:,:], match, rule, 20, 0, trial_time)\n decoding_results['neuronal_match_decoding_group'].append(neuronal_decoding)\n decoding_results['synaptic_match_decoding_group'].append(synaptic_decoding)\n\n\n return decoding_results", "def correct_mounterr(filtered_signal, filename):\r\n peaks_idx = sc.signal.find_peaks(filtered_signal) #find indices of peaks \r\n peaks = filtered_signal[peaks_idx[0]] #take values at these indices\r\n first_peak = peaks[0] #take value of the first peak\r\n \r\n if first_peak < 240 and first_peak > 0: #if the value of the first peak is lower than 240 and higher than 0 (found by trial and error)\r\n filtered_signal = filtered_signal*-1 #flip the signal\r\n \r\n patientno = int(filename[1:4]) #store patientno, If E001LS.mat; patientno = 1\r\n swapbox_patients = [21, 66, 88, 97, 106] #data still upside down after applying swap conditions, add value to box\r\n if patientno in swapbox_patients:\r\n filtered_signal = filtered_signal*-1 #flip the signal\r\n \r\n return filtered_signal", "def test_performance_difference_lemma_discounted(M):\n\n p = random_dist(M.S, M.A)\n q = random_dist(M.S, M.A)\n\n dp = M.d(p) # Roll-in with p\n Aq = M.Advantage(q) # Roll-out with q\n # Accumulate advantages of p over q.\n z = 1/(1-M.γ) * sum(dp[s] * p[s,:] @ Aq[s,:] for s in range(M.S))\n\n assert np.allclose(M.J(p) - M.J(q), z)\n print('[pd-lemma]', ok)\n\n\n # The PD lemma is just potential-based shaping.\n # See `test_potential_based_shaping` to read about potential-based shaping.\n #\n # Let `ϕ(s) = Vq(s)` where `Vq(s)` is the value function of some policy `q`.\n # The shaped reward is\n #\n # R'(s,a,s') = R(s,a,s') + γ Vq(s') - Vq(s)\n #\n # Now take the expectation over s',\n #\n # E_{s'}[ R'(s,a,s') ]\n # = E_{s'}[ R(s,a,s') + γ Vq(s') - Vq(s) ]\n # = E_{s'}[ R(s,a,s') + γ Vq(s') ] - Vq(s)\n # = Qq(s,a) - Vq(s).\n # = Aq(s, a)\n #\n # We see that the shaped reward function is the advantage of policy `q`.\n\n ϕ = M.V(q)\n M1 = M.copy()\n M1.apply_potential_based_shaping(ϕ)\n\n assert_equal(M1.J(p), M.J(p) - M.J(q), verbose=True)\n\n # Sanity check: q should have no advantive over itself.\n assert abs(M1.J(q)) < 1e-10", "def compute_matrix_scores(ppi_matrix, training_ids, params):\n # building weighting vector \n if not hasattr(params, 'weighting') or params.weighting == \"uniform\":\n weights = np.ones(len(training_ids))\n weights /= np.sum(weights)\n scores = np.dot(ppi_matrix[:, training_ids], weights) \n\n elif params.weighting == \"sup\":\n # compute supervised weights\n weights = np.sum(ppi_matrix[training_ids, :][:, training_ids], axis=1)\n \n # normalize \n weights -= np.min(weights)\n weights /= np.sum(weights)\n weights += 1.0 / len(weights)\n weights = weights ** (-1)\n\n weights /= np.sum(weights)\n scores = np.dot(ppi_matrix[:, training_ids], weights) \n\n elif params.weighting == \"mle\":\n #train_pos = training_ids\n #X = ppi_matrix[:, train_pos]\n #N, D = X.shape\n\n #Y = np.zeros(N)\n #Y[train_pos] = 1\n\n #train_neg = get_negatives(Y, params.neg_examples*len(train_pos))\n #train_nodes = np.concatenate((train_pos, train_neg))\n #Y_train = Y[train_nodes]\n #X_train = X[train_nodes, :]\n #model = LogisticRegression(C = 1.0 / params.reg_L2, \n # fit_intercept = False, \n # class_weight = 'balanced')\n #model.fit(X_train, Y_train)\n #weights = np.array(model.coef_).reshape(-1)\n \n #Apply ReLU to Weights\n #weights += np.ones(len(training_ids))\n #weights /= np.sum(weights)\n #scores = np.dot(ppi_matrix[:, training_ids], weights) \n pass\n elif params.weighting == \"pca\":\n logging.error(\"Not Implemented\")\n \n elif params.weighting == \"max\":\n scores = np.max(ppi_matrix[:, training_ids], axis = 1)\n\n else: \n logging.error(\"Weighting scheme not recognized\")\n\n # compute scores \n return scores", "def y_dense_correlator(xpcs_data, mask):\n ind = np.where(mask > 0) # unused pixels are 0\n xpcs_data = xpcs_data[:, ind[0], ind[1]] # (n_tau, n_pix)\n del ind\n ltimes, lenmatr = np.shape(xpcs_data) # n_tau, n_pix\n meanmatr = np.array(np.mean(xpcs_data, axis=1), np.float32) # xpcs_data.sum(axis=-1).sum(axis=-1)/n_pix\n meanmatr.shape = 1, ltimes\n\n if ltimes * lenmatr > 1000 * 512 * 512:\n nn = 16\n newlen = lenmatr // nn\n num = np.dot(np.array(xpcs_data[:,:newlen], np.float32), np.array(xpcs_data[:,:newlen], np.float32).T)\n xpcs_data = xpcs_data[:, newlen:] + 0\n for i in range(1, nn - 1, 1):\n num += np.dot(np.array(xpcs_data[:,:newlen], np.float32), np.array(xpcs_data[:,:newlen], np.float32).T)\n xpcs_data = xpcs_data[:, newlen:] + 0\n num += np.dot(np.array(xpcs_data, np.float32), np.array(xpcs_data, np.float32).T)\n else:\n num = np.dot(np.array(xpcs_data, np.float32), np.array(xpcs_data, np.float32).T)\n\n num /= lenmatr\n denom = np.dot(meanmatr.T, meanmatr)\n del meanmatr\n res = np.zeros((ltimes - 1, 3)) # was ones()\n for i in range(1, ltimes, 1): # was ltimes-1, so res[-1] was always 1 !\n dia_n = np.diag(num, k=i)\n sdia_d = np.diag(denom, k=i)\n res[i - 1, 0] = i\n res[i - 1, 1] = np.sum(dia_n) / np.sum(sdia_d)\n res[i - 1, 2] = np.std(dia_n / sdia_d) / len(sdia_d) ** 0.5\n return res", "def compute_photon_statistics(rs,inv, rs_Cxx, rs_constants ):\n \n [ntimes, nalts] = rs.var_raw_molecular_counts.shape\n ones_array = np.ones((ntimes,nalts))\n Cmm = ones_array * np.transpose(rs_Cxx.Cmm[:nalts])\n Cmc = ones_array * np.transpose(rs_Cxx.Cmc[:nalts])\n Cam = ones_array * rs_Cxx.Cam\n if hasattr(rs_Cxx,'Cam_i2a'):\n Cam_i2a = ones_array * rs_Cxx.Cam_i2a\n Cmm_i2a = ones_array * np.transpose(rs_Cxx.Cmm_i2a[:nalts])\n\n \n cpol_gain = rs_constants['combined_to_cross_pol_gain_ratio']\n \n var_mol = rs.var_raw_molecular_counts\n var_comb= rs.var_raw_combined_hi_counts\n var_cp = rs.var_raw_cross_pol_counts\n if hasattr(rs,'var_raw_molecular_i2a_counts'):\n var_mol_i2a = rs.var_raw_molecular_i2a_counts\n if hasattr(rs,'var_raw_combined_1064_counts'):\n var_combined_1064_counts = rs.var_raw_combined_1064_counts\n\n Scp = rs.cross_pol_counts\n Sc = rs.combined_hi_counts\n Sm = rs.molecular_counts\n \n if hasattr(rs,'var_raw_molecular_i2a_counts'):\n var_mol_i2a = rs.var_raw_molecular_i2a_counts\n Sm_i2a = rs.molecular_i2a_counts\n\n # variance of the scatttering ratio\n mol_f = Sm - Cam * Sc\n mol_f_sqrd = mol_f**2\n aero_f = Cmm * Sc - Cmc * Sm\n \n \n SR_i2_std = np.sqrt(\n var_comb*((Cmm/mol_f-Cam*aero_f/mol_f_sqrd)**2\n + (cpol_gain * Scp * Cmm * Cam / mol_f_sqrd)**2) \n +var_mol * ((aero_f/mol_f_sqrd -Cam/mol_f)**2\n + (cpol_gain * Scp * Cmm / mol_f_sqrd)**2)\n +var_cp *(cpol_gain * Cmm/mol_f)**2)\n if hasattr(rs_Cxx,'Cam_i2a') and hasattr(rs,'molecular_i2a_counts'):\n mol_i2a_f = Sm - Cam * Sc\n mol_i2a_f_sqrd = mol_f**2\n aero_i2a_f = Cmm * Sc - Cmc * Sm\n\n SR_i2a_std = np.sqrt(\n var_comb*((Cmm_i2a/mol_f-Cam_i2a*aero_i2a_f/mol_i2a_f_sqrd)**2\n + (cpol_gain * Scp * Cmm_i2a * Cam_i2a / mol_i2a_f_sqrd)**2) \n +var_mol_i2a * ((aero_i2a_f/mol_i2a_f_sqrd -Cam_i2a/mol_i2a_f)**2\n + (cpol_gain * Scp * Cmm_i2a / mol_i2a_f_sqrd)**2)\n +var_cp *(cpol_gain * Cmm_i2a/mol_i2a_f)**2)\n #note SR is computed from average of i2 and i2a determinations\n SR_std =0.5 * np.sqrt(SR_i2_std**2 + SR_i2a_std**2)\n\n else: #if no i2a channel total scattering ratio is computed from i2 channel\n SR_std = SR_i2_std\n \n #Signal to noise ratio of molecular count\n SN_mol = Sm/np.sqrt((Cam/Cmm)**2 * var_comb +1/Cmm**2 * var_mol)\n \n if hasattr(rs_Cxx,'Cam_i2a') and hasattr(rs,'molecular_i2a_counts'):\n SN_i2a_mol = Sm_i2a/np.sqrt((Cam_i2a/Cmm_i2a)**2 * var_comb +1/Cmm_i2a**2 * var_mol_i2a)\n setattr(inv,'SN_i2a_mol',SN_i2a_mol)#FIXME add to another layer somewhere\n #standard deviation of backscatter cross section\n setattr(inv,'std_beta_a_backscat',SR_std * inv.beta_r_backscat)\n #standard deviation of the backscatter ratio\n setattr(inv,'SR_std',SR_std)#FIXME add to mean layer\n #signal-to-noise ratio for beta_a_backscatter\n setattr(inv,'SN_beta_a_backscat', inv.beta_a_backscat / (SR_std * inv.beta_r_backscat))\n #signal-to-noise ratio for the molecular count profile\n setattr(inv,'SN_mol',SN_mol)#FIXME add to mean layer\n return", "def calculate_m(self):\r\n #1. Calculate number of sensitive versus non-sensitive group members\r\n n_sens = len(self.sens_numerical_arr[self.sens_numerical_arr == 0])\r\n n_non_sens = len(self.sens_numerical_arr[self.sens_numerical_arr == 1])\r\n\r\n n_pos_sens = 0\r\n n_pos_non_sens = 0\r\n for i in range(len(self.S_train)):\r\n if self.sens_numerical_arr[i] == 0 and self.y_train[i] == 1:\r\n n_pos_sens = n_pos_sens + 1\r\n if self.sens_numerical_arr[i] == 1 and self.y_train[i] == 1:\r\n n_pos_non_sens = n_pos_non_sens + 1\r\n \r\n r_pos_sens = n_pos_sens / n_sens\r\n r_pos_non_sens = n_pos_non_sens / n_non_sens\r\n e = r_pos_non_sens - r_pos_sens\r\n m = e * ((n_sens * n_non_sens) / (n_sens + n_non_sens))\r\n \r\n self.m = round(m)\r\n return round(m)", "def pitchStrengthAllCandidates(f,L,pc,kernel,J0):\r\n \r\n S = np.zeros((len(pc),len(L[0])))\r\n NL = np.divide(L,np.matlib.repmat(np.sqrt(np.sum(np.multiply(L,L),0)),int(len(L)),1))\r\n #print(len(NL),len(NL[0]))\r\n for j in np.arange(0,len(pc)):\r\n S[j,:]=np.matmul(np.transpose(kernel[0,j+J0]),NL) \r\n return(S)", "def hw_417():\n\t# This is a brain teaser. I need to identify a case where the score of an\n\t# optimal local alignment and an optimal global alignment of 2 sequences\n\t# are not identifical, but where all entries in a scoring matrix M are >= 0.\n\t\n\t# The material in the provided link note the problem with two sequences of\n\t# very different length where the smaller strongly corresponds to a small\n\t# local region in the longer can lead to a problem where negative values\n\t# mask the high similarity found if the two regions were removed and compared\n\t# without the rest of each sequence. If the values are NOT negative, it seems\n\t# that this problem might not persist, at last not to the point where local\n\t# alignments need to be computed. I will guess that the answer is true.\n\t\n\tanswer = \"true\"\n\t\n\tprint \"Question 417 Answer:\"\n\tprint answer\n\tprint \"-\"*50\n\tprint \"\\n\"", "def get_mvdr_vector_souden(\n target_psd_matrix,\n noise_psd_matrix,\n ref_channel=None,\n eps=None,\n return_ref_channel=False\n):\n assert noise_psd_matrix is not None\n\n phi = stable_solve(noise_psd_matrix, target_psd_matrix)\n lambda_ = np.trace(phi, axis1=-1, axis2=-2)[..., None, None]\n if eps is None:\n eps = np.finfo(lambda_.dtype).tiny\n mat = phi / np.maximum(lambda_.real, eps)\n \n if ref_channel is None:\n ref_channel = get_optimal_reference_channel(\n mat, target_psd_matrix, noise_psd_matrix, eps=eps)\n\n assert np.isscalar(ref_channel), ref_channel\n beamformer = mat[..., ref_channel]\n\n if return_ref_channel:\n return beamformer, ref_channel\n else:\n return beamformer" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an instance of `DecompilationTest` with the given settings.
def create(self, test_settings): return DecompilationTest(self.decomp, test_settings)
[ "def create_drop(config):\r\n _logger.debug(\"initialize test plan\")\r\n \r\n test_plan = Ats3TestPlan(config)\r\n component_parser = acp.Ats3ComponentParser(config)\r\n \r\n for tsrc in config.tsrc_paths:\r\n lst_check_harness = []\r\n _logger.info(\"inspecting tsrc path: %s\" % tsrc)\r\n #checking if there are components without harness\r\n for sub_component in config.tsrc_paths_dict[tsrc]['content'].keys():\r\n _harness_ = config.tsrc_paths_dict[tsrc]['content'][sub_component]['harness']\r\n if _harness_ != \"\":\r\n lst_check_harness.append(_harness_)\r\n\r\n #if component has harness then insert to test set \r\n if len(lst_check_harness) > 0:\r\n component_parser.insert_test_set(test_plan, path(tsrc), config.tsrc_paths_dict)\r\n\r\n test_plan.set_plan_harness()\r\n\r\n\r\n #Checking if any non executable set exists\r\n #if yes, delete the set\r\n tesplan_counter = 0\r\n for plan_sets in test_plan.sets:\r\n tesplan_counter += 1\r\n exe_flag = False\r\n for srcanddst in plan_sets['src_dst']:\r\n _ext = ''\r\n if '.' in srcanddst[0]:\r\n _ext = srcanddst[0].rsplit(\".\")[1]\r\n #the list below are the files which are executable\r\n #if none exists, set is not executable\r\n for mat in [\"dll\", \"ini\", \"cfg\", \"exe\", \"script\"]:\r\n if mat == _ext.lower():\r\n exe_flag = True\r\n break\r\n if exe_flag: \r\n break\r\n\r\n if not exe_flag: #the set does not have executable, deleting the set\r\n _logger.info(plan_sets['component_path'] + ' has no executables so not including in xml')\r\n del test_plan.sets[tesplan_counter - 1]\r\n \r\n if config.ats4_enabled.lower() == 'true':\r\n generator = adg.Ats3TemplateTestDropGenerator()\r\n else:\r\n generator = adg.Ats3TestDropGenerator()\r\n _logger.info(\"generating drop file: %s\" % config.drop_file)\r\n generator.generate(test_plan, output_file=config.drop_file, config_file=config.config_file )", "def get_instance(self, project, parameters):\n\t\t\n\t\tparameters = project.process_node_parameters(\n\t\t\tparameters,\n\t\t\t[\"source\", \"destination\", \"from\", \"to\"],\n\t\t\t{\"replace\": False, \"retry\": 1},\n\t\t\t{\"source\": \"variable_name\", \"destination\": \"variable_name\", \"from\": \"non_empty_string\", \"to\": \"non_empty_string\", \"replace\": \"boolean\", \"retry\": \"integer\"}\n\t\t\t)\n\t\t\n\t\treturn DecompressCommand(project, parameters[\"source\"], parameters[\"destination\"], parameters[\"from\"], parameters[\"to\"], parameters[\"replace\"], parameters[\"retry\"])", "def make_sydent(test_config: Optional[dict] = None) -> Sydent:\n if test_config is None:\n test_config = {}\n\n # Use an in-memory SQLite database. Note that the database isn't cleaned up between\n # tests, so by default the same database will be used for each test if changed to be\n # a file on disk.\n test_config.setdefault(\"db\", {}).setdefault(\"db.file\", \":memory:\")\n\n # Specify a server name to avoid warnings.\n general_config = test_config.setdefault(\"general\", {})\n general_config.setdefault(\"server.name\", \":test:\")\n # Specify the default templates.\n general_config.setdefault(\n \"templates.path\",\n os.path.join(os.path.dirname(os.path.dirname(__file__)), \"res\"),\n )\n\n # Specify a signing key.\n test_config.setdefault(\"crypto\", {}).setdefault(\n \"ed25519.signingkey\", \"ed25519 0 FJi1Rnpj3/otydngacrwddFvwz/dTDsBv62uZDN2fZM\"\n )\n\n reactor = ResolvingMemoryReactorClock()\n\n sydent_config = SydentConfig()\n sydent_config.parse_config_dict(test_config)\n\n return Sydent(\n reactor=reactor,\n sydent_config=sydent_config,\n use_tls_for_federation=False,\n )", "def construct_test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(FRAMConnection))\n suite.addTest(unittest.makeSuite(FRAMActions))\n return suite", "def from_scanner_settings(cls, scanner_settings):\n return cls(view_num=scanner_settings.view_num,\n flags=scanner_settings.flags,\n normalize=scanner_settings.normalize)", "def from_test_mode(cls, access_key=None, secret=None):\n instance = cls(access_key, secret)\n instance.slurps_table = Table('test_slurps', connection=instance.connection)\n instance.failed_slurps_table = Table(\n 'test_failed_slurps', connection=instance.connection\n )\n return instance", "def create(cls, test_id):\n\n url = test_id\n name = url.rsplit(\"/\", 1)[1]\n node = DataNode(name)\n self = cls(node)\n\n self._from_file = False\n return self", "def setUp(self):\r\n\r\n self.DUT = Allocation()", "def create_tester():\n return ApiTestRunner(\n TestServiceConfig(),\n [GrpcExplainerFactory,\n GrpcPlaygrounderFactory])", "def generate_test(configs, bench_op, OperatorTestCase, run_backward):\n for config in configs:\n test_attrs = {}\n tags = None\n for attr in config:\n # tags is only used in our benchmark backend to filter tests and\n # it will be removed from config which is then passed to the init function \n # an example of config and atrr is: \n # config: [{'M': 16}, {'N': 16}, {'K': 64}, {'tags': 'short'}]\n # attr: {'tags': 'short'} \n if \"tags\" in attr:\n tags = attr[\"tags\"]\n continue\n test_attrs.update(attr)\n if tags is None:\n raise ValueError(\"Missing tags in configs\")\n op = bench_op()\n op.init(**test_attrs)\n test_name = op.test_name(**test_attrs)\n use_jit = False\n if hasattr(op, \"jit_forward\") and callable(op.jit_forward):\n use_jit = True\n input_config = str(test_attrs)[1:-1].replace('\\'', '')\n test_config = TestConfig(test_name, input_config, tags, use_jit, run_backward)\n if op is not None:\n OperatorTestCase(\n op,\n test_config)", "def factory(cls):\n parser = argparse.ArgumentParser(description=u'Consume metrics from Panoptes and send them to InfluxDB')\n\n parser.add_argument(u'--config',\n help=u'Configuration file to use for the consumer. Default: {}'.format(DEFAULT_CONFIG_FILE),\n default=DEFAULT_CONFIG_FILE)\n try:\n # Using parse_known_args is a hack to get the tests to work under nose\n # https://stackoverflow.com/questions/28976912/how-to-use-nosetests-in-python-while-also-passing\n # -accepting-arguments-for-argpar\n args = parser.parse_known_args()\n except Exception as e:\n sys.exit(u'Error parsing command line options or configuration file: {}'.format(repr(e)))\n\n try:\n return cls(args[0].config)\n except Exception as e:\n sys.exit(u'Error trying to instantiate class: {}'.format(repr(e)))", "def create_test_instance(cls):\n obj = cls()\n # choose random parameters\n if random.randrange(2):\n obj.temperature = 0\n else:\n obj.temperature = random.randrange(0, 3)\n if random.randrange(2):\n obj.threshold = 'auto'\n else:\n obj.threshold = random.random() + 0.5\n return obj", "def unit_test(bld, target, sources, uselib_local = None, uselib = None):\n test = bld.new_task_gen( 'cxx', 'program' )\n test.obj_ext = '_2.o'\n test.source = sources\n test.includes = '../include'\n test.target = target\n test.install_path = None\n test.uselib_local = uselib_local + ' SharedUnitTest'\n test.uselib = uselib\n test.unit_test = 1\n return test", "def new_instance_from_config(config):\n\tmas = new_instance()\n\t# Environment\n\tenv = e.new_instance(mas, u.cfg_env_size(config))\n\tm.set_env(mas, env)\n\tenv_capacity_distribs = u.cfg_capacity_distributions(config)\n\tfor distrib in env_capacity_distribs:\n\t e.add_capacity_from_string(env,distrib)\n\t# Agent population\n\tpop = p.new_instance(mas, u.cfg_pop_size(config), u_mod.cfg_walkers_number(config))\n\tm.set_pop(mas, pop)\n\t# Cell rules\n\tcell_rules = u.cfg_cell_rules(config)\n\tfor rule in cell_rules:\n\t m.add_cell_rule_from_string(mas,rule)\n\t# Agent rules\n\tagent_rules = u.cfg_agent_rules(config)\n\tfor rule in agent_rules:\n\t m.add_agent_rule_from_string(mas,rule)\n\t# Walker rules\n\twalker_rules = u_mod.cfg_walker_rules(config)\n\tfor rule in walker_rules:\n\t add_walker_rule_from_string(mas,rule)\n\t# Experiment settings\n\tm.set_max_cycle(mas,u.cfg_max_cycle(config))\n\treturn mas", "def __init__(self,\n config: DeltaConfig = None,\n type_comparators: Dict[Union[Tuple[Type, Type], Type], Callable[[Any, Any], bool]] = None,\n named_comparators: Dict[str, Callable[[Any, Any, str], Any]] = None,\n *excluded_keys: List[Union[str, Pattern]]):\n self._config: DeltaConfig = config or DEFAULT_DELTA_CONFIG\n self.output: Callable[[bool, Any, Any], Any] = get_output(self._config)\n none_unequal_default: bool = self._config.matches(DeltaConfig.NoneUnequalDefault)\n self.typed_equal: Callable[[Any, Any], bool] = with_comparators(type_comparators, none_unequal_default)\n self.named_comparators = named_comparators or {}\n self.keys_excluded: List[Union[str, Pattern]] = list(excluded_keys)", "def test_dynaconf():\n assert settings.TESTING is True", "def _create_test_suite(test_cases: [unittest.TestCase]) -> unittest.TestSuite:\n suite = unittest.TestSuite()\n\n # Add each test case to suite\n for case in test_cases:\n # Load tests of current test case\n case_tests = unittest.defaultTestLoader.loadTestsFromTestCase(case)\n # Add tests to suite\n suite.addTest(case_tests)\n\n return suite", "def __init__(\n self,\n tests: List[Test],\n name: str = None,\n weights=None,\n include_models: List[Model] = None,\n skip_models: List[Model] = None,\n hooks: dict = None,\n optimizer=None,\n ):\n\n self.name = name if name else \"Suite_%d\" % random.randint(0, 1e12)\n if isinstance(tests, dict):\n for key, value in tests.items():\n if not isinstance(value, Test):\n setattr(self, key, value)\n tests = [test for test in tests.values() if isinstance(test, Test)]\n self.tests = self.assert_tests(tests)\n self.weights_ = [] if not weights else list(weights)\n self.include_models = include_models if include_models else []\n self.skip_models = skip_models if skip_models else []\n self.hooks = hooks\n super(TestSuite, self).__init__()\n if optimizer:\n self.optimize = MethodType(optimizer, self)", "def debug_trainer(cls, path=\"./config/opts.yaml\", task=\"discrete\"):\n opts = load_opts(path, task)\n trainer = cls(opts)\n trainer.setup()\n return trainer" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if GCC is run in the given arguments.
def is_gcc(self, args): # On Windows, GCC is run via `sh.exe windows-gcc-32` instead of `gcc`. if on_windows(): return len(args[0]) > 1 and args[0][1] == 'windows-gcc-32.sh' return args[0][0] == 'gcc'
[ "def checkDragonegg(self):\n if not self.checkDragoneggPlugin():\n return False\n\n pfx = ''\n if os.getenv('LLVM_GCC_PREFIX') is not None:\n pfx = os.getenv('LLVM_GCC_PREFIX')\n\n cc = f'{self.path}{pfx}gcc'\n cxx = f'{self.path}{pfx}g++'\n\n return self.checkCompilers(cc, cxx)", "def has_flag(compiler, flagname):\r\n import tempfile\r\n with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f:\r\n f.write('int main (int argc, char **argv) { return 0; }')\r\n try:\r\n compiler.compile([f.name], extra_postargs=[flagname])\r\n except setuptools.distutils.errors.CompileError:\r\n return False\r\n return True", "def check_arguments():\n global nargs, progname\n nargs = len(sys.argv) - 1\n progname = os.path.basename(sys.argv[0])\n flag = True\n if nargs != 0 and N_ARGUMENTS[-1] == '*':\n flag = False\n else:\n for i in N_ARGUMENTS:\n if nargs == i:\n flag = False\n if flag:\n usage()", "def check_compile(self, args, options):\n # modify args in order to be DIR \n # mode being either standalone or madevent\n \n if options['force']:\n self.force = True\n \n if not args:\n args.append('MC')\n return\n \n if len(args) > 1:\n self.help_compile()\n raise self.InvalidCmd, 'Invalid Syntax: Too many argument'\n\n elif len(args) == 1:\n if not args[0] in ['MC', 'FO']:\n raise self.InvalidCmd, '%s is not a valid mode, please use \"FO\" or \"MC\"' % args[0]\n mode = args[0]\n \n # check for incompatible options/modes", "def check_arm_gcc_version():\n if 'output' in ESSENTIAL_BINARIES['arm-none-eabi-gcc']:\n version_number = ESSENTIAL_BINARIES['arm-none-eabi-gcc']['output'].strip()\n cli.log.info('Found arm-none-eabi-gcc version %s', version_number)\n\n return True # Right now all known arm versions are ok", "def can_compile():\n logger = logging.getLogger(\"oa-logger\")\n if \"pypy\" in platform.python_implementation().lower():\n logger.warning(\"Compiler is not available on PyPy\")\n return False\n major, minor, patch = platform.python_version_tuple()\n if int(major) >= 3 and int(minor) < 5:\n logger.warning(\"Compiler is not available on 3.4 or lower.\")\n return False\n # There's not going to be a Python 2.8 so this is safe.\n if int(major) <= 2 and (int(minor) < 7 or int(patch) < 11):\n logger.warning(\"Compiler is not available on 2.7.10 or lower.\")\n return False\n return True", "def check_compiler(options, block=False):\n\n msg = 'In order to be able to run at NLO MadGraph5_aMC@NLO, you need to have ' + \\\n 'gfortran 4.6 or later installed.\\n%s has been detected\\n'+\\\n 'Note that You can still run all MadEvent run without any problem!'\n #first check that gfortran is installed\n if options['fortran_compiler']:\n compiler = options['fortran_compiler']\n elif misc.which('gfortran'):\n compiler = 'gfortran'\n else: \n compiler = ''\n \n if 'gfortran' not in compiler:\n if block:\n raise aMCatNLOError(msg % compiler)\n else:\n logger.warning(msg % compiler)\n else:\n curr_version = misc.get_gfortran_version(compiler)\n if not ''.join(curr_version.split('.')) >= '46':\n if block:\n raise aMCatNLOError(msg % (compiler + ' ' + curr_version))\n else:\n logger.warning(msg % (compiler + ' ' + curr_version))", "def compile(filepath):\n retcode = subprocess.call(\"/usr/bin/g++ \" + filepath, shell=True)\n return retcode == 0", "def CheckUseIntelCompiled(myflags: Dict[str, Any]) -> bool:\n if myflags['hpcc_use_intel_compiled_hpl']:\n return myflags['hpcc_math_library'] == HPCC_MATH_LIBRARY_MKL\n return True", "def quote_cmdline_arg(*args) -> \"bool\":\n return _ida_pro.quote_cmdline_arg(*args)", "def _has_arg(opcode):\n return opcode == 'ildc' or opcode == 'jz' or opcode == 'jnz' or opcode == 'jmp'", "def check_args(args):\n if args.edit:\n edit_configs()\n sys.exit(0)\n\n if not any([args.TV, args.Movie, args.Music]):\n print('No media type flag set')\n sys.exit(1)\n\n if len(args.files) == 0:\n print('No files given to tag')\n sys.exit(2)", "def main(arch, *args):\n env = GetEnv(arch)\n popen = subprocess.Popen(args, shell=True, env=env, universal_newlines=True,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n out, _ = popen.communicate()\n for line in out.splitlines():\n if (not line.startswith('Copyright (C) Microsoft Corporation') and\n not line.startswith('Microsoft (R) Macro Assembler') and\n not line.startswith(' Assembling: ') and\n line):\n print(line)\n return popen.returncode", "def compiler_check(cc_name, code_string, link_option=None):\n\n cc_check_src = \"check_test_prog.cpp\"\n cc_check_exec = \"check_test.out\"\n\n with open(cc_check_src, \"w\") as write_src:\n try:\n write_src.write(code_string)\n except IOError:\n print \"Couldn't create test program source file\"\n raise\n\n cc_cmd = [cc_name, cc_check_src, \"-o\", cc_check_exec]\n if link_option is not None:\n cc_cmd.append(link_option)\n\n result = False\n retcode = 1\n\n with open(os.devnull, 'w') as write_null:\n retcode = subprocess.call(cc_cmd,\n stdout = write_null, stderr = write_null)\n\n if retcode == 0:\n print \"Compilation successful, executing\"\n retcode = subprocess.call([\"./\" + cc_check_exec])\n if retcode == 0:\n result = True\n else:\n print \"Program compiled but terminated with an error\"\n else:\n print \"Compilation check failed\"\n\n try:\n if os.path.isfile(cc_check_src):\n os.remove(cc_check_src)\n if os.path.isfile(cc_check_exec):\n os.remove(cc_check_exec)\n except OSError:\n print \"Error deleting temporary program files\"\n pass\n\n return result", "def has_ldflags(argv):\n link_flags = set(('-ldflags', '-linkmode', '-extld', '-extldflags'))\n if set(argv) & link_flags:\n return True\n for arg in argv:\n if arg.startswith('-ldflags=') or arg.startswith('-linkmode='):\n return True\n return False", "def syntax_check_with_clang(clang_comp, clang_cmdline):\n command = [clang_comp] + clang_cmdline\n gomacc = get_gomacc_command()\n if gomacc:\n command.insert(0, gomacc)\n if print_cmdline:\n print('%s\\n' % ' '.join(command))\n p = subprocess.Popen(command)\n p.wait()\n if p.returncode != 0:\n sys.exit(p.returncode)", "def check_env():\n\n # add argv[4] for design\n if(len(sys.argv[1]) < 1 and len(sys.argv[2] < 1 and len(sys.argv[3] < 1))):\n printError()\n exit()", "def commentInArgsPresent(self, args):\n return any(arg[2] != \"\" for arg in args)", "def basic_check_build():\n if \"PYODIDE_PACKAGE_ABI\" in os.environ:\n # The following check won't work in pyodide\n return\n code = textwrap.dedent(\n \"\"\"\\\n #include <stdio.h>\n int main(void) {\n return 0;\n }\n \"\"\")\n compile_test_program(code)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs the tested method so it succeeds.
def run_method_so_it_succeeds(self): raise NotImplementedError
[ "def _succeed(self):\n print(self.test_case + ': succeeded')\n exit(0)", "def test_case_passed(self):\n self.__set_test_case_result(result='PASSED', message='')", "def perform_checks(self) -> None:", "def run_post_test(self):\n pass", "def test_patch_run(self):\n pass", "def Success(self) -> bool:", "def test_get_checker_result(self):\n pass", "def test_which_fails(self):\n self.assertTrue(False)", "def test_update_checker_result(self):\n pass", "def run_test_routine(method, context_file, should_fail=False):\n print('context_file =', context_file, '\\t method =', method.__name__)\n expected = frozenset(solutions_from_file(context_file))\n found = frozenset(solutions_from_method(method, context_file))\n print('expected =', tuple(map(pprint_concept, expected)))\n print(' found =', tuple(map(pprint_concept, found)))\n if should_fail:\n assert expected != found\n else:\n assert expected == found\n print()", "def test_create_checker_result(self):\n pass", "def test_get_checker_results(self):\n pass", "def test_send_result(self):\n pass", "def test_evaluate(self):\n\t\tpass", "def test_quick_intake_with_tester(self):\n\n Test_Start(\"Testing Quick Intake - With Non-Tester\")\n\n prn_info = False\n\n # We should search for this\n p = get_object_or_404(PatientProfile, patient_id=VALID_PATIENT_ID)\n if prn_info!=False:\n print p\n\n calling_test_function = inspect.getframeinfo(inspect.currentframe().f_back)[2]\n\n if prn_info!=False:\n print \"calling:[\"+calling_test_function+\"]\"\n\n usrname = USERNAME_FOR_TEST\n passwd=PASSWORD_FOR_TEST\n output = []\n post_url = '/intake/quickintake'\n post_parameters = {\n 'first_name':VALID_INPROCESS_FIRSTNAME,\n 'last_name':VALID_INPROCESS_LASTNAME,\n 'last_4_ssn':VALID_INPROCESS_LAST_4_SSN,\n 'reciept_privacy_practices': \"\",\n 'patient_signature':PATIENT_SIGNATURE,\n }\n look_for = \"There are errors\"\n\n result = test_for_200(self, usrname, passwd, output, post_url,post_parameters, look_for, calling_test_function, prn_info )\n\n if result == None:\n Test_Msg(\"Successful Test for Error trapping \"+post_url)\n else:\n Test_Msg(\"Test Failed for Error trapping\"+post_url)\n\n post_parameters = {\n 'first_name':VALID_INPROCESS_FIRSTNAME,\n 'last_name':VALID_INPROCESS_LASTNAME,\n 'last_4_ssn':VALID_INPROCESS_LAST_4_SSN,\n 'reciept_privacy_practices': True,\n 'patient_signature':PATIENT_SIGNATURE,\n }\n\n prn_info = False\n\n look_for = \"Successfully added a new member\"\n\n result = test_for_200(self, usrname, passwd, output, post_url,post_parameters, look_for, calling_test_function, prn_info )\n\n if result == None:\n Test_Msg(\"Successful Test for Quick Intake \"+post_url +\" - \"+VALID_INPROCESS_FIRSTNAME+\" \"+VALID_INPROCESS_LASTNAME+\" (\"+VALID_INPROCESS_LAST_4_SSN+\")\")\n else:\n Test_Msg(\"Test Failed for Quick Intake\"+post_url +\" - \"+VALID_INPROCESS_FIRSTNAME+\" \"+VALID_INPROCESS_LASTNAME+\" (\"+VALID_INPROCESS_LAST_4_SSN+\")\")\n\n\n Test_End()\n\n\n return", "def runf2test(self):\n return self.runtest(self.f2) == \"fail\"", "def test_ProcessChain0300(self):\n self.assertTrue(True)", "def __dry_run_test(self):\n ran_test = False\n if constants.dry_run:\n try:\n self.__dry_run_test_number *= 2\n except AttributeError:\n self.__dry_run_test_number = 1\n num_call = self.__dry_run_test_number\n\n def raise_forbidden():\n import requests\n\n # fake a Forbidden response\n fake_response = requests.Response()\n fake_response.reason = 'DRY-RUN FORBIDDEN TEST'\n fake_response.status_code = 403\n logger.id(logger.info, self,\n 'Dry run: raising Forbidden: {reason} ...',\n reason=fake_response.reason,\n )\n raise Forbidden(fake_response)\n\n # base decision off the number of times this method has been called\n if num_call == 1:\n raise_forbidden()\n ran_test = True\n\n else:\n import random\n\n # lower the chance of raising a ratelimit exception as the\n # number of calls increases (100% the first time)\n decision = random.randint(2, num_call)\n if decision == 2:\n # throw a fake rate-limit exception\n err_type = Reddit.RATELIMIT_ERR[0]\n logger.id(logger.info, self,\n 'Dry run: raising {err_type} ...',\n err_type=err_type,\n )\n err = [\n err_type,\n\n 'you are doing that too much.'\n ' try again in {0} minutes.'.format(\n random.randint(1, 4)\n ),\n\n None,\n ]\n raise praw.exceptions.APIException(*err)\n ran_test = True\n\n elif decision == 3:\n raise_forbidden()\n ran_test = True\n\n # pretty sure this isn't needed\n return ran_test", "def execute(self, grades, module_dict, solution_dict):\n module = module_dict[self.module_name]\n function = module.__dict__[self.function_name]\n passing_all = True\n for test, solution in self.test_cases.items():\n grades.add_message(\"Testing {}...\".format(repr(test)))\n\n result = function(test)\n\n if not isinstance(result, bool):\n grades.add_message('FAIL: {}'.format(self.path))\n grades.add_message('\\tReturn type of {} must be '\n 'bool, but it is {}'.format(\n self.function_name, type(result)))\n passing_all = False\n\n if result == solution:\n grades.add_message('PASS: {}'.format(self.path))\n grades.add_message('\\t{} properly classified'.format(\n repr(test)))\n\n else:\n grades.add_message('FAIL: {}'.format(self.path))\n grades.add_message('\\t{} improperly classified'.format(\n repr(test)))\n grades.add_message('\\tstudent result: {}'.format(repr(result)))\n grades.add_message('\\tcorrect result: {}'.format(\n repr(solution)))\n passing_all = False\n return passing_all" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Solve a regularized and weighted least squares problem. If weights is a 1D array it is converted to 2D array with weights on the diagonal. If the penalty matrix is not ``None`` and nonzero, there is a closed solution. Otherwise the problem can be reduced to a least squares problem.
def solve_regularized_weighted_lstsq( coefs: NDArrayFloat, result: NDArrayFloat, *, weights: Optional[NDArrayFloat] = None, penalty_matrix: Optional[NDArrayFloat] = None, lstsq_method: LstsqMethod = lstsq_svd, ) -> NDArrayFloat: lstsq_method = _get_lstsq_method(lstsq_method) if lstsq_method is not lstsq_cholesky and ( penalty_matrix is None ): # Weighted least squares case if weights is not None: if weights.ndim == 1: weights_chol = np.diag(np.sqrt(weights)) coefs = weights_chol * coefs result = weights_chol * result else: weights_chol = scipy.linalg.cholesky(weights) coefs = weights_chol @ coefs result = weights_chol @ result return lstsq_method(coefs, result) # Cholesky case (always used for the regularized case) if weights is None: left = coefs.T @ coefs right = coefs.T @ result else: left = coefs.T @ weights @ coefs right = coefs.T @ weights @ result if penalty_matrix is not None: left += penalty_matrix return scipy.linalg.solve( # type: ignore[no-any-return] left, right, assume_a="pos", )
[ "def weighted_least_squares(self, spec, weights):\n ww = weights.T # nwave x ntrain\n wx = ww[:, :, None] * self.X # nwave x ntrain x nfeature\n\n b = np.dot(self.X.T, weights * spec).T # nwave x nfeature\n # This is the time suck\n a = np.matmul(self.X.T, wx) # nwave x nfeature x nfeature\n #a = np.dot(self.X.T, wx).transpose(1,0,2)\n return np.linalg.solve(a, b).T", "def solve_weighted(A, b, w):\n \n #- Apply weights\n # nvar = len(w)\n # W = dia_matrix((w, 0), shape=(nvar, nvar))\n # bx = A.T.dot( W.dot(b) )\n # Ax = A.T.dot( W.dot(A) )\n\n b = A.T.dot( w*b )\n A = A.T.dot( (A.T * w).T )\n\n if isinstance(A, scipy.sparse.spmatrix):\n x = scipy.sparse.linalg.spsolve(A, b)\n else:\n x = np.linalg.lstsq(A, b)[0]\n \n return x", "def _fit_regressor_stump(X, y, sample_weight, argsorted_X=None):\n Y = y.flatten()\n\n if sample_weight is None:\n sample_weight = np.ones(shape=(X.shape[0],), dtype='float') / (X.shape[0],)\n else:\n sample_weight /= np.sum(sample_weight)\n\n n_samples, n_dims = X.shape\n if X.dtype in ('float', 'float32'):\n thresholds = np.zeros((n_dims, ), dtype='float')\n else:\n thresholds = np.zeros((n_dims, ), dtype='int')\n coeffs = np.zeros((n_dims, ), dtype='float')\n constants = np.zeros((n_dims, ), dtype='float')\n errors = np.zeros((n_dims, ), dtype='float')\n\n # Iterate over all feature dimensions and train the optimal\n # regression stump for each dimension.\n for dim in six.moves.range(n_dims):\n if argsorted_X is not None:\n data_order = argsorted_X[:, dim]\n else:\n data_order = np.argsort(X[:, dim])\n\n # Sort the weights and labels with argument for this dimension.\n # Time: 25%\n sorted_weights = sample_weight[data_order]\n sorted_output = Y[data_order]\n\n # Cumulative sum of desired output multiplied with weights.\n # Time: 10 %\n Szw = (sorted_weights * sorted_output).cumsum()\n # Cumulative sum of the weights.\n Sw = sorted_weights.cumsum()\n\n # Calculate regression function parameters.\n # Time: 25 %\n b = Szw / Sw\n zz = np.where((1.0 - Sw) < 1e-10)\n Sw[zz] = 0.0\n a = ((Szw[-1] - Szw) / (1 - Sw)) - b\n Sw[zz] = 1.0\n\n # Calculate the weighted square error:\n # Time: 40 %\n e = (sorted_weights * (sorted_output * sorted_output)).sum() - \\\n (2 * a * (Szw[-1] - Szw)) - \\\n (2 * b * Szw[-1]) + \\\n ((a * a) + (2 * a * b)) * (1 - Sw) + \\\n (b * b)\n\n del sorted_weights\n del sorted_output\n del Szw, Sw\n\n min_ind = e.argmin()\n errors[dim] = e[min_ind]\n del e\n coeffs[dim] = a[min_ind]\n del a\n constants[dim] = b[min_ind]\n del b\n\n # Handle floating point data different from integer data when it comes\n # to setting the threshold.\n if X.dtype in ('float', 'float32'):\n if min_ind == (n_samples - 1):\n thresholds[dim] = X[data_order[min_ind], dim] + 0.1\n elif min_ind == 0:\n thresholds[dim] = X[data_order[min_ind], dim] - 0.1\n else:\n thresholds[dim] = (X[data_order[min_ind], dim] +\n X[data_order[min_ind + 1], dim]) / 2\n else:\n if min_ind == (n_samples - 1):\n thresholds[dim] = np.floor(X[data_order[min_ind], dim]) + 1\n elif min_ind == 0:\n thresholds[dim] = np.floor(X[data_order[min_ind], dim]) - 1\n else:\n v1 = int(X[data_order[min_ind], dim])\n v2 = int(X[data_order[min_ind + 1], dim])\n thr = (v1 + v2) / 2\n if np.abs(thr) > (2 ** 31):\n print(\"Threshold for dimension {0} was greater than 32 bit integer!\".format(dim))\n thresholds[dim] = np.int32(thr)\n\n del data_order\n\n best_dim = errors.argmin()\n results = {\n 'best_dim': int(best_dim),\n 'min_value': float(errors[best_dim]),\n 'threshold': float(thresholds[best_dim]),\n 'coefficient': float(coeffs[best_dim]),\n 'constant': float(constants[best_dim]),\n }\n\n return results", "def objective_log_linear(weights):\n\n # Compute log-linear pooled prob with given weights\n pooling_pooled, pooling_reg_const = log_linear_pooling(P, weights)\n\n # Compute log-linear payoff (Abbas (9)) (here higher is worse)\n kls = np.zeros(nviews)\n pooling_pooled_p = 1.0 * pooling_pooled / np.sum(pooling_pooled)\n for i, qk in enumerate(P):\n qk = 1.0 * qk / np.sum(qk)\n vec = rel_entr(pooling_pooled_p, qk)\n kls[i] = np.sum(vec)\n\n payoff = np.sum(np.dot(kls, weights))\n\n # Introduce constraint sum(weights)=1 through a penalty\n penalty = abs(1 - np.sum(weights))\n goal = payoff + penalty\n return (-goal)", "def least_squares(y, tx):\n \n from helpers_optimization import compute_loss\n \n weights = np.linalg.solve(tx.T.dot(tx), tx.T.dot(y))\n\n loss = compute_loss(y, tx, weights, 'rmse')\n return weights, loss", "def reweight_penalty(j0, epsilon, fxn=None):\n reweight_penalty = _bcs.f90wrap_reweight_penalty(j0=j0, epsilon=epsilon, \\\n fxn=fxn)\n return reweight_penalty", "def objective(weight_strategies):\r\n regularization = np.mean([(sum(weight_strategy)-1)**2 for weight_strategy in weight_strategies])\r\n weight_strategies = np.array(weight_strategies).T\r\n weight_strategies = dict(zip(portfolio.get_strategy_ids(), weight_strategies))\r\n weights = portfolio.get_weight_regime_format()\r\n for strategy_id in portfolio.strategies:\r\n weights[strategy_id] = regime.apply(lambda x: weight_strategies[strategy_id][x])\r\n portfolio.weights(weights)\r\n portfolio.fit()\r\n monthly_return = portfolio.extract_monthly_return().values\r\n return portfolio.sharpe_ratio() - regularization,", "def score(vals, weights):\n score = 0\n sum = 0\n for v in vals:\n try:\n score += weights[v] * vals[v]\n sum += weights[v]\n except:\n aux_w = 1 #By default, the weight is 1 (1 is the lowest possible weight, means lowest \"penalty\")\n score += vals[v] * aux_w\n sum += aux_w\n score /= sum\n return score", "def test_analytic_weighted_nlls(self):\n e = np.array([1, 2, 1, 3, 1])\n self.fitting_problem.data_e = e\n self.cost_func = WeightedNLLSCostFunc(self.fitting_problem)\n self.cost_func.jacobian = self.jacobian\n self.cost_func.hessian = self.hessian\n eval_result, _ = self.cost_func.hes_res(params=self.params)\n actual_hessian = grad2_r_weighted_nlls(\n self.fitting_problem.data_x, e, self.params)\n\n self.assertTrue(np.isclose(actual_hessian, eval_result).all())", "def solve_weighted(A, b, w):\n A, b, w = list(map(np.asarray, (A, b, w)))\n ATw2 = A.T * w ** 2\n return np.linalg.solve(np.dot(ATw2, A), np.dot(ATw2, b))", "def regularize(weights, cost, gradients):\n for n in range(1, conf.LAYERS_NUM):\n weights_without_bias = np.c_[(np.zeros(weights[n - 1].shape[0]),\n weights[n - 1])]\n regularization_offset = conf.REG_CONST * weights_without_bias\n gradients[n - 1] += regularization_offset\n cost += conf.REG_CONST * np.sum(np.multiply(weights[n - 1], weights[n - 1])) / 2\n return cost, gradients", "def fit(self, X, y):\n X, y = check_X_y(X, y)\n\n # TODO:\n # Calculate the optimal weights using the closed-form solution\n # Use only numpy functions. Don't forget regularization.\n\n w_opt = None\n # ====== YOUR CODE: ======\n #raise NotImplementedError()\n # ========================\n # check what it sais on the notebook\n\n N = X.shape[0] * 1.0\n bias = np.identity(X.shape[1])\n bias[0][0] = 0\n # the close form solution: calc - inv(X + N * reg_lambda * bias) * X*y\n bias = N * self.reg_lambda * bias\n inv_res = np.linalg.inv(X.T.dot(X) + bias)\n w_opt = inv_res.dot(X.T.dot(y))\n\n self.weights_ = w_opt\n return self", "def regularize(weights: np.matrix) -> int:\n squared_weights = np.square(weights)\n return np.sum(squared_weights)", "def fit_power_law(delay_values, param_values):\n def _solver(params):\n m, c, lambda_ = params\n return param_values - (m * (delay_values + c) ** lambda_)\n min_cost = np.inf\n for _ in range(100):\n params_0 = [np.random.rand() * 100 - 50,\n np.random.rand(),\n np.random.rand() * 4 - 2]\n try:\n result = least_squares(_solver, params_0,\n bounds=([-np.inf, -50, -25],\n [np.inf, 50, 25]))\n except ValueError:\n continue\n if result['cost'] < min_cost:\n best_params, min_cost = result['x'], result['cost']\n try:\n return best_params\n except UnboundLocalError:\n return np.nan, np.nan, np.nan", "def LP_solver(classes):\n t = tuple(sorted(classes))\n if t in memo_knap_approx:\n return NULL_KNAP\n else:\n memo_knap_approx.add(t)\n\n items = [item for cls in classes for item in items_d_d[cls]]\n\n variables = [Bool() for _ in range(len(items))]\n score_variable = Variable()\n\n weight_constraint = sum([item['weight'] * variable for item, variable in zip(items, variables)]) <= MAX_WEIGHT\n cost_constraint = sum([item['cost'] * variable for item, variable in zip(items, variables)]) <= MAX_COST\n score_objective = sum([item['score'] * variable for item, variable in zip(items, variables)]) == score_variable\n constraints = [weight_constraint, cost_constraint, score_objective]\n\n objective = Maximize(score_variable)\n\n prob = Problem(objective, constraints)\n prob.solve()\n\n\n knap = LightweightKnap()\n for i, variable in enumerate(variables):\n if variable.value is not None and round(variable.value) == 1:\n knap.add_item(items[i])\n\n return knap", "def test_weights_direction(self):\n if not self.instance.supports_weights:\n raise SkipTest(f\"{self.instance} does not support weights\")\n\n # for sanity checking: give the largest weight to best rank => should improve\n idx = self.ranks.argmin()\n weights = numpy.ones_like(self.ranks, dtype=float)\n weights[idx] = 2.0\n weighted = self.instance(ranks=self.ranks, num_candidates=self.num_candidates, weights=weights)\n unweighted = self.instance(ranks=self.ranks, num_candidates=self.num_candidates, weights=None)\n if self.instance.increasing: # increasing = larger is better => weighted should be better\n self.assertLessEqual(unweighted, weighted)\n else:\n self.assertLessEqual(weighted, unweighted)", "def numpy_lstsq(partial_semantics, delta_target):\n optimal_weights, residuals, rank, singular_values = np_lstsq(partial_semantics, delta_target, rcond=None)\n return optimal_weights", "def perform_test(alpha1, mu1, mu2, mu3, cost_per_buffer, num_wl_vec, w):\n\n assert num_wl_vec == 2 # The test is only implemented without relaxation\n\n env = examples.simple_reentrant_line_model(alpha1=alpha1, mu1=mu1, mu2=mu2, mu3=mu3,\n cost_per_buffer=cost_per_buffer)\n\n # We define the theoretical workload matrix and compare it to the one we compute in order to\n # find which relaxation we are doing\n # Theoretical workload matrix and load\n workload_mat_theory = np.array([[1. / mu1 + 1. / mu3, 1. / mu3, 1. / mu3],\n [1. / mu2, 1. / mu2, 0.]])\n load_theory = np.array([alpha1 / mu1 + alpha1 / mu3, alpha1 / mu2])\n # Computed workload matrix (sorted by load)\n _, workload_mat, _ = wl.compute_load_workload_matrix(env, num_wl_vec)\n\n # Theoretical vertexes of the \\bar{c} feasible region based on the dim of the relaxation\n vertexes_2d = np.array([[mu3 * cost_per_buffer[2],\n mu2 * cost_per_buffer[0] - cost_per_buffer[2] * mu2\n * (mu3 / mu1 + 1.)],\n [mu1 * (cost_per_buffer[0] - cost_per_buffer[1]),\n mu2 * cost_per_buffer[1] + (mu1 * mu2 / mu3) *\n (cost_per_buffer[1] - cost_per_buffer[0])],\n [mu3 * cost_per_buffer[2],\n mu2 * (cost_per_buffer[1] - cost_per_buffer[2])]])\n # We select which vertexes are feasible based on the env parameters\n if mu1 * (cost_per_buffer[0] - cost_per_buffer[1]) <= mu3 * cost_per_buffer[2]:\n feasible_vertexes = vertexes_2d[[0, 1], :, :]\n else:\n feasible_vertexes = vertexes_2d[[2], :, :]\n # The theoretical \\bar{c} vectors were computed for a specific order of the workload\n # vectors. So we compute sort_by_load_index to be able to reorder the theoretical\n # \\bar{c} components based on the sort made by load\n sort_by_load_index = np.argsort(load_theory)[::-1]\n feasible_vertexes = feasible_vertexes[:, sort_by_load_index, :]\n # Compute the index of the theoretical vertex which satisfy the max\n max_vertex_index = np.argmax(np.dot(w.T, feasible_vertexes).flatten())\n barc_theory = feasible_vertexes[max_vertex_index]\n barc, _, _ = alt_methods_test.compute_dual_effective_cost_cvxpy(w, workload_mat,\n cost_per_buffer, method='cvx.ECOS')\n np.testing.assert_almost_equal(barc, barc_theory, decimal=4)", "def _check_weights(weights, X):\n if not isinstance(weights, (np.ndarray, list)):\n if weights is not None:\n warnings.warn(\"weights should be a list or a numpy array.\")\n weights = np.array([])\n\n weights = np.asanyarray(weights)\n if weights.size > 0:\n dtype = np.complex128 if np.any(np.iscomplex(weights)) else np.float64\n weights = np.asanyarray(weights, dtype=dtype)\n if weights.ndim > 3:\n raise ValueError(\"Weights must be 3D at most\")\n if weights.shape[0] != X.shape[0]:\n raise ValueError(\"Weights should be the same n_times as X.\")\n\n if X.ndim == 2 and weights.ndim == 1:\n weights = weights[:, np.newaxis]\n if X.ndim == 3:\n if weights.ndim == 2:\n weights = weights[:, np.newaxis, :]\n elif weights.ndim == 1:\n weights = weights[:, np.newaxis, np.newaxis]\n\n if weights.shape[-1] != X.shape[-1]:\n weights = np.tile(weights, (1, 1, X.shape[-1]))\n\n if weights.ndim > 1:\n if weights.shape[1] > 1 and weights.shape[1] != X.shape[1]:\n raise ValueError(\"Weights array should have a single column.\")\n\n if np.any(np.abs(weights) > 1.):\n warnings.warn(\"weights should be between 0 and 1.\")\n weights[np.abs(weights) > 1.] = 1.\n\n return weights" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that the required version for the pod is met.
def verify_required_version(self): if self.pod.grow_version is None: return sem_current = semantic_version.Version(self.current_version) spec_required = semantic_version.Spec(self.pod.grow_version) if sem_current not in spec_required: text = 'ERROR! Pod requires Grow SDK version: {}'.format( self.pod.grow_version) logging.error(colors.stylize(text, colors.ERROR)) raise LatestVersionCheckError(text)
[ "def verify(self):\n self.installed_version = Version(VERSION)\n\n return check_version(self.installed_version, self.operator, self.version)", "def test_version_valid():\n assert type(packaging.version.parse(ertai.__version__)) is packaging.version.Version", "def check_version(self, required_version: str) -> None:\n if not check_version(redun.__version__, required_version):\n raise RedunClientError(\n \"redun version {version} does not meet requirement {required_version}\".format(\n version=redun.__version__, required_version=required_version\n )\n )", "def verify_package_version(ctx, config, remote):\n # Do not verify the version if the ceph-deploy task is being used to\n # install ceph. Verifying the ceph installed by ceph-deploy should work,\n # but the qa suites will need reorganized first to run ceph-deploy\n # before the install task.\n # see: http://tracker.ceph.com/issues/11248\n if config.get(\"extras\"):\n log.info(\"Skipping version verification...\")\n return True\n if 'repos' in config and config.get('repos'):\n log.info(\"Skipping version verification because we have custom repos...\")\n return True\n builder = _get_builder_project(ctx, remote, config)\n version = builder.version\n pkg_to_check = builder.project\n installed_ver = packaging.get_package_version(remote, pkg_to_check)\n if installed_ver and version in installed_ver:\n msg = \"The correct {pkg} version {ver} is installed.\".format(\n ver=version,\n pkg=pkg_to_check\n )\n log.info(msg)\n else:\n raise RuntimeError(\n \"{pkg} version {ver} was not installed, found {installed}.\".format(\n ver=version,\n installed=installed_ver,\n pkg=pkg_to_check\n )\n )", "def verify(self):\n installed_version_output = subprocess.check_output(['/usr/bin/xcrun', 'xcodebuild', \"-version\"])\n installed_version_re = re.compile(r\"^Xcode\\s(?P<version_text>[\\d/.]+)\")\n match = installed_version_re.match(installed_version_output)\n if not match:\n raise MissingDependencyError(self, \"Did not find Xcode version in output:\" + installed_version_output)\n version = match.groupdict().get('version_text')\n if not version:\n # The package is not installed at all.\n raise AssertionError(\"No version text found.\")\n self.installed_version = Version(version)\n return check_version(self.installed_version, self.operator, self.version)", "def validate_kind_version():\n # If kind is not installed, this first command will raise an UnexpectedExit\n # exception, and inv will exit at this point making it clear running \"kind\"\n # failed.\n min_version = \"0.9.0\"\n\n try:\n raw = run(\"kind version\", echo=True)\n except Exception as e:\n raise Exit(message=\"Could not determine kind version (is kind installed?)\")\n\n actual_version = re.search(\"v(\\d*\\.\\d*\\.\\d*)\", raw.stdout).group(1)\n delta = semver.compare(actual_version, min_version)\n\n if delta < 0:\n raise Exit(message=\"kind version >= {} required\".format(min_version))", "def verify(self):\n\n try:\n pip_version = subprocess.check_output([\"/usr/bin/env\", \"python\", \"-m\", \"pip\", \"--version\"])\n pip_tokens = pip_version.split()\n assert pip_tokens[0] == \"pip\"\n pip_version = Version(pip_tokens[1])\n\n if pip_version < Version(\"9.0.0\"):\n raise MissingDependencyError(self, \"Version of pip too old.\")\n\n pip_package_config = json.loads(subprocess.check_output([\"/usr/bin/env\",\n \"python\", \"-m\", \"pip\", \"list\", \"--format=json\"]))\n except (subprocess.CalledProcessError, OSError):\n raise MissingDependencyError(self, \"Cannot find pip\")\n\n installed = {p['name']: p['version'] for p in pip_package_config} # type: Dict[Text, Text]\n\n package = installed.get(self.package)\n\n if not package:\n # The package is not installed at all.\n raise MissingDependencyError(self, \"not in package list\")\n self.installed_version = Version(package)\n return check_version(self.installed_version, self.operator, self.version)", "def test_version_matches_expected():\n assert __version__ == \"0.1.0\"", "def _check_minimum_version(self):\n if not self.obj_attr_is_set('version'):\n return\n if not self.obj_attr_is_set('binary'):\n return\n minver = self.get_minimum_version(self._context, self.binary)\n if minver > self.version:\n raise exception.ServiceTooOld(thisver=self.version,\n minver=minver)", "def test_requirement_versions():\n request = requests.get(\n \"https://raw.githubusercontent.com/home-assistant/home-assistant/dev/requirements_all.txt\"\n )\n requirements = {}\n for line in request.text.split(\"\\n\"):\n if \"=\" in line and not \"#\" in line:\n package = line.split(\">\")[0].split(\"=\")[0]\n version = line.split(\"=\")[-1]\n requirements[package] = version\n\n with open(MANIFEST_FILE, \"r\") as manifest_file:\n for line in json.loads(manifest_file.read())[\"requirements\"]:\n package = line.split(\">\")[0].split(\"=\")[0]\n version = line.split(\"=\")[-1]\n if package in requirements:\n if version != requirements[package]:\n warnings.warn(\n \"Package has different version from HA, this might casuse problems\"\n )", "def check_version_continuity(self):\n try:\n if not (self.badge.version == self.issuer.version == self.version):\n self.non_component_errors.append([\n 'warning.version',\n \"Components assembled with different specification versions.\"\n + \" Assertion: \" + self.version + \", BadgeClass: \"\n + self.badge.version + \", Issuer: \" + self.issuer.version\n ])\n except (TypeError, AttributeError):\n pass", "def check_version():\n err = \"PaddlePaddle version 1.6 or higher is required, \" \\\n \"or a suitable develop version is satisfied as well. \\n\" \\\n \"Please make sure the version is good with your code.\" \\\n\n try:\n fluid.require_version('1.7.0')\n except Exception as e:\n logger.error(err)\n sys.exit(1)", "def test_no_version(self):\n version = VersionedDependency(name='tensorflow')\n self.assertFalse(version.has_min_version())\n self.assertFalse(version.has_max_version())\n self.assertFalse(version.has_versions())", "def _assert_version_equals(self, role: str, expected_version: int) -> None:\n md = Metadata.from_file(os.path.join(self.metadata_dir, f\"{role}.json\"))\n self.assertEqual(md.signed.version, expected_version)", "def check_match(self, **kwargs: Any) -> bool:\n name = safe_name(kwargs['name']).lower()\n if name not in self.safety_db.keys():\n return False\n\n version = kwargs['version']\n try:\n version = Version(version)\n except InvalidVersion: # pragma: no cover\n try:\n version = LegacyVersion(version)\n logger.debug(f'Package {name}=={version} is not a valid PEP 440 version, trying Legacy versioning')\n except InvalidVersion:\n logger.debug(f\"Package {name}=={version} has an invalid version\")\n return False\n\n for requirement in self.safety_db[name]:\n if version in requirement.specifier:\n logger.debug(f\"Safety DB MATCH: Release {name}=={version} matches specifier {requirement.specifier}\")\n return True\n return False", "def outofdate(self):\n if self.device_version and self.bundle_version:\n try:\n return VersionInfo.parse(self.device_version) < VersionInfo.parse(\n self.bundle_version\n )\n except ValueError as ex:\n logger.warning(\"Module '%s' has incorrect semver value.\", self.name)\n logger.warning(ex)\n return True # Assume out of date to try to update.", "def version_checking(self,meta):\n if meta[0] == self._valid_metadata:\n pass\n else:\n raise Exception('Incorrect Metadata format')", "def validate_version(self):\n valid_vers = self.rdb.list_available('product_version')\n if self.opts.oo_version:\n if not self.opts.oo_version in valid_vers:\n self.logger.error('You have specified an invalid version: '\n '%s is not one of: %s' %\n (self.opts.oo_version, ', '.join(valid_vers)))\n self.problem = True\n return False\n return True", "def verify(self):\n installed_version_output = subprocess.check_output(['/usr/bin/xcrun', 'xcodebuild', \"-showsdks\"])\n installed_version_re = re.compile(r\".*-sdk\\s+(?P<sdk_text>\\S+)\")\n\n matches = [installed_version_re.match(l).groupdict()['sdk_text']\n for l in installed_version_output.split('\\n') if installed_version_re.match(l)]\n\n if not matches:\n raise MissingDependencyError(self, \"Did not find Sdk version in output:\" + installed_version_output)\n\n extract_version_names = re.compile(r'(?P<pre>\\D*)(?P<version_text>[\\d+/.]*)(?P<post>.*)')\n\n sdks = [extract_version_names.match(sdk_text).groupdict()\n for sdk_text in matches if extract_version_names.match(sdk_text)]\n\n installed_sdks = collections.defaultdict(list)\n for sdk in sdks:\n name = sdk['pre']\n if sdk.get('post'):\n name += \".\" + sdk.get('post')\n if sdk.get('version_text'):\n version = Version(sdk['version_text'].rstrip('.'))\n else:\n continue\n installed_sdks[name].append(version)\n\n if self.sdk not in installed_sdks.keys():\n raise MissingDependencyError(self, \"{} not found in installed SDKs.\".format(self.sdk))\n\n self.installed_version = installed_sdks[self.sdk]\n\n satisfied = [check_version(s, self.operator, self.version) for s in self.installed_version]\n return any(satisfied)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a set of all known properties, granted or denied.
def known_properties() -> set[str]: return set(session.session.execute( select(Property.name).distinct() ).scalars())
[ "def _get_properties(self, force=False):\r\n return self._portal.get_properties(force)", "def get_registered_properties():\n return _metaschema_properties", "def _permissions():\n return getattr(g, '_request_permissions', {})", "def getProperties(self):\n props = list(self._db.keys())\n return props", "def getProperties(self):\n # type: () -> Dict[str]\n pass", "def permissions(self):\n return [Element.from_href(e) for e in self.granted_element]", "def permissions(self) -> Mapping[str, str]:\n return pulumi.get(self, \"permissions\")", "def getProperties(self):\n return self.properties", "def initial_permissions() -> [[str, str]]:\n return {'admin_all': ['user__Admin', 'resource__All'],\n 'guest_all': ['user__Guest', 'resource__All']}", "def _get_all_permissions() -> List[str]:\n return list(\n chain(\n [e.value for c in PermissionsEnum.__subclasses__() for e in c]\n )\n )", "def get_properties(self):\n return self._properties", "def get_all_ds_privileges_dict(self):\n return [{'datastore_url': auth_data_const.ALL_DS_URL,\n 'allow_create': 1,\n 'max_volume_size': 0,\n 'usage_quota': 0}]", "def granted_elements(self):\n return [Element.from_href(element) for element in self.get('granted_elements')]", "def access_controls(self):\n access = {'all': False, 'roles': [], 'hosts': []}\n for control in self.safeaccesscontrol_set.all():\n if control.all_hosts:\n access['all'] = True\n return access\n else:\n if type(control.acl_object) == Host:\n access['hosts'].append(control.acl_object)\n elif type(control.acl_object) == Role:\n access['roles'].append(control.acl_object)\n return access", "def _get_permissions_to_read_all(self, endpoint: str, context: CRUDBuildContext) -> List[str]:\r\n pass", "def get_all_perm_rules(self):\n return self.get_items(PermissionRule)", "def get_user_permissions(cls, user):\n return set(user.permissions.values_list(\"name\", flat=True))", "def names(self):\n return self.__propNames", "def properties(self):\n return ( Property(x) for x in self.property_codes )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Grants a property to a group.
def grant_property(group: PropertyGroup, name: str) -> Property: group.property_grants[name] = True return group.properties[name]
[ "def create_property_group(name):\n return _create_group(\"property_group\", name=name)", "def create_property(name, property_group, granted):\n new_property = Property(name=name, property_group=property_group,\n granted=granted)\n session.session.add(new_property)\n return property_group, new_property", "def set_group_properties(self,iGroupID,aGroupData):", "def deny_property(group: PropertyGroup, name: str) -> Property:\n group.property_grants[name] = False\n return group.properties[name]", "def get_group_properties(self,iGroupID,aGroupSettings):", "def set_group(self, group: t.Optional[jank.graphics.Group]):", "def test_add_role_to_ldap_group(self):\n pass", "def assign_perm_to_group(instance,name,permissioin):\n\tlogger = logging.getLogger(__name__)\n\n\ttry:\n\t\tgroup = Group.objects.get(name=name)\n\t\tassign_perm(permissioin, group, instance)\n\texcept Exception as e:\n\t\tlogger.exception(e)\n\n\treturn instance", "def update_groups(self, props, **kws):\n new_groups = props.get('group', None)\n if new_groups is not None:\n if isinstance(new_groups, str):\n new_groups = new_groups,\n [self._group.add(g) for g in new_groups]\n if self._family is not None:\n self._group.add(self._family)", "def set_property(self, entity, **kwargs):", "def isProperty(self,uid):\n return( self.id2node[uid].group==\"Property\" )", "def set_group(self, group):\n try:\n supports_group = self.supports_group(group)\n if not supports_group:\n self.get_logger().error(f\"{self.name} does not support {group}!\")\n else:\n self._group = group\n except NotImplementedError:\n self.get_logger().warning(f\"{self.name} does not support restricting on groups!\")", "def agrupar(self, grupo):\n self.grupos[grupo.tipo] = grupo", "def add_property_for_channel(channel, property_name, value):", "def delete_property_group(property_group_id):\n return _delete_group(property_group_id)", "def remove_property(group: PropertyGroup, name: str) -> None:\n if not group.properties.pop(name, None):\n raise ValueError(f\"Group {group.name} doesn't have property {name}\")", "def test_grouping_attribute() -> None:\n g = Grouping()\n assert g._groups == []", "def test_api_v3_groups_enable_put(self):\n pass", "def radius_provider_group_provider_modify(handle, group_name, name, **kwargs):\n mo = radius_provider_group_provider_get(handle, group_name, name,\n caller=\"radius_provider_group_provider_modify\")\n mo.set_prop_multiple(**kwargs)\n handle.set_mo(mo)\n handle.commit()\n return mo" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Denies a property to a group.
def deny_property(group: PropertyGroup, name: str) -> Property: group.property_grants[name] = False return group.properties[name]
[ "def remove_property(group: PropertyGroup, name: str) -> None:\n if not group.properties.pop(name, None):\n raise ValueError(f\"Group {group.name} doesn't have property {name}\")", "def grant_property(group: PropertyGroup, name: str) -> Property:\n group.property_grants[name] = True\n return group.properties[name]", "def test_api_v3_groups_disable_put(self):\n pass", "def set_group_properties(self,iGroupID,aGroupData):", "def set_group(self, group):\n try:\n supports_group = self.supports_group(group)\n if not supports_group:\n self.get_logger().error(f\"{self.name} does not support {group}!\")\n else:\n self._group = group\n except NotImplementedError:\n self.get_logger().warning(f\"{self.name} does not support restricting on groups!\")", "def group_clean(self, group, power=False):\n if group > 5 or group < 1:\n raise ValueError(\"Nozzle group must be in range 1..5\")\n if power:\n group |= 0x10\n self._cmd(\"CH\", \"\\0\" + chr(group))", "def test_incorrect_group_false(self):\n self.assertFalse(core_tags.has_group(self.user, 'dcc_analysts'))", "def test_not_group(self):\n role = BonitaRole('myrole', '', '')\n role.uuid = '1234'\n\n BonitaUser.find_by_role_and_group(role, 123.45)", "def isProperty(self,uid):\n return( self.id2node[uid].group==\"Property\" )", "def test_not_role(self):\n group = BonitaGroup('mygroup', '', '')\n group.uuid = '2345'\n\n BonitaUser.find_by_role_and_group('coucou', group)", "def test_missing_prop(self):\n del self.data['prop']\n self.assertInvalid()", "def create_property_group(name):\n return _create_group(\"property_group\", name=name)", "def remove_property(self, key):", "def set_group(self, group: t.Optional[jank.graphics.Group]):", "def delete_property_group(property_group_id):\n return _delete_group(property_group_id)", "def deny(self, role, operation, resource, assertion=None):\n assert not role or role in self._roles\n assert not resource or resource in self._resources\n self._denied[role, operation, resource] = assertion\n # if self._allowed.get((role, operation, resource)):\n # del self._allowed[role, operation, resource]", "def test_nomatch(self):\n with mock.patch(\"bluebottle.clients.settings\", Mock(spec_set=[])):\n p = TenantProperties()\n with self.assertRaises(AttributeError):\n p.foo == 1\n self.assertFalse(hasattr(p, 'foo'))", "def remove_property(subject, property):\n del subject[property]", "def test_incorrect_group_false(self):\n self.assertFalse(core_tags.has_group(self.user, 'dcc_developers'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes a property association (grant or denial) with a given group.
def remove_property(group: PropertyGroup, name: str) -> None: if not group.properties.pop(name, None): raise ValueError(f"Group {group.name} doesn't have property {name}")
[ "def delete_property_group(property_group_id):\n return _delete_group(property_group_id)", "def remove_group(self, group):\n self.groups.remove(group)", "def delete_property(self, p=None, graph=None):\n if p:\n graph = self._clean_graph(graph) if graph else \"db:schema\"\n p = \"scm:\" + p if p.find(\":\") == -1 else p\n return self.woql_and(WOQLQuery().delete_quad(p, \"v:All\", \"v:Al2\", graph),\n WOQLQuery().delete_quad(\"v:Al3\", \"v:Al4\", p, graph))\n return self", "def disassociate(self, host, group):\n return self._disassoc('groups', host, group)", "def delete_group(self, group):\n path = \"api/groups/{0}\".format(group)\n self._delete(path)", "def remove_property(subject, property):\n del subject[property]", "def delete(self, prop):\n self._reset()\n\n props = self._get_resource_properties()\n try:\n del props[prop.type][prop.name]\n except KeyError:\n pass\n patch = {\n \"metadata\": {\n \"annotations\": {\n \"airspot.krules.dev/props\": yaml.dump(props[PropertyType.DEFAULT], Dumper=yaml.SafeDumper),\n \"airspot.krules.dev/ext_props\": yaml.dump(props[PropertyType.EXTENDED], Dumper=yaml.SafeDumper),\n }\n }\n }\n\n api = self._get_api_client()\n resp = api.session.patch(url=f\"{api.url}{self._resource_path}\",\n headers={\"Content-Type\": \"application/merge-patch+json\"},\n json=patch)\n resp.raise_for_status()", "def delete_property(property_name, connection):\n url_path = CONTACTS_API_SCRIPT_NAME + '/properties/' + property_name\n connection.send_delete_request(url_path)", "def remove_property(self, prop):\n self._properties.remove(prop)\n self._pairs.difference_update((o, prop) for o in self._objects)", "def test_plonegroupOrganizationRemoved_3(self):\n set_registry_organizations([self.contacts[0].UID()]) # unselects the contact\n self.portal['acontent2'].pg_organization = None\n self.portal.restrictedTraverse(\n '{0}/{1}/department2/delete_confirmation'.format(DEFAULT_DIRECTORY_ID, PLONEGROUP_ORG))", "def deny_property(group: PropertyGroup, name: str) -> Property:\n group.property_grants[name] = False\n return group.properties[name]", "def propdel(self, key):\n self.properties[key] = None", "def remove_property(self, key):", "def delete_security_group_rule(rule):\n return IMPL.delete_security_group_rule(rule)", "def test_remove_group(self):\n\t\tself.test_add_group()\n\t\tdraft = ReviewRequestDraft.create(self.review_request)\n\t\tdraft.target_groups.remove(self.group)\n\t\tdraft.target_people = [self.user]\n\t\tself._check_counters(total_outgoing=1, pending_outgoing=1, total_incoming=1, direct_incoming=0, group_incoming=1, starred_public=1)\n\t\tself.review_request.publish(self.user)\n\t\tself._check_counters(total_outgoing=1, pending_outgoing=1, direct_incoming=1, total_incoming=1, starred_public=1)", "def remove_group_role(request, role, group, domain=None, project=None):\n ksclient = get_admin_ksclient()\n return ksclient.roles.revoke(role=role, group=group, project=project,\n domain=domain)", "def delete_google_proxy_group(\n current_session, gcm, gpg_email, google_proxy_group_from_fence_db, user\n):\n # Google will automatically remove\n # this proxy group from all GBAGs the proxy group is a member of.\n # So we skip doing that here.\n logger.debug(\n \"Attempting to delete Google proxy group with email {}...\".format(gpg_email)\n )\n\n def raise_unavailable(gpg_email):\n raise UnavailableError(\n \"Error: Google unable to delete proxy group {}. Aborting\".format(gpg_email)\n )\n\n try:\n r = gcm.delete_group(gpg_email)\n except Exception as e:\n logger.exception(e)\n raise_unavailable(gpg_email)\n\n if r != {}:\n logger.exception(r)\n raise_unavailable(gpg_email)\n\n logger.info(\n \"Google proxy group with email {} successfully removed from Google.\".format(\n gpg_email\n )\n )\n if google_proxy_group_from_fence_db:\n # (else it was google_proxy_group_from_google and there is nothing to delete in Fence db.)\n logger.debug(\"Attempting to clear proxy group records from Fence database...\")\n logger.debug(\n \"Deleting rows in {}...\".format(\n GoogleProxyGroupToGoogleBucketAccessGroup.__tablename__\n )\n )\n gpg_to_gbag = (\n current_session.query(GoogleProxyGroupToGoogleBucketAccessGroup)\n .filter(\n GoogleProxyGroupToGoogleBucketAccessGroup.proxy_group_id\n == google_proxy_group_from_fence_db.id\n )\n .all()\n )\n for row in gpg_to_gbag:\n current_session.delete(row)\n logger.debug(\n \"Deleting rows in {}...\".format(UserGoogleAccountToProxyGroup.__tablename__)\n )\n uga_to_pg = (\n current_session.query(UserGoogleAccountToProxyGroup)\n .filter(\n UserGoogleAccountToProxyGroup.proxy_group_id\n == google_proxy_group_from_fence_db.id\n )\n .all()\n )\n for row in uga_to_pg:\n current_session.delete(row)\n logger.debug(\"Deleting rows in {}...\".format(UserGoogleAccount.__tablename__))\n uga = (\n current_session.query(UserGoogleAccount)\n .filter(UserGoogleAccount.user_id == user.id)\n .all()\n )\n for row in uga:\n current_session.delete(row)\n logger.debug(\"Deleting row in {}...\".format(GoogleProxyGroup.__tablename__))\n current_session.delete(google_proxy_group_from_fence_db)\n current_session.commit()\n logger.info(\n \"Records for Google proxy group {} successfully cleared from Fence \"\n \"database, along with associated user Google accounts.\".format(gpg_email)\n )\n logger.info(\"Done with Google deletions.\")", "def delete_from_all_link_group(self, group):\n self._plm.send_standard(self._address,\n COMMAND_DELETE_FROM_ALL_LINK_GROUP_0X02_NONE,\n group)", "def remove_member_of(\n user: User,\n group: PropertyGroup,\n processor: User,\n during: Interval[DateTimeTz] = t.cast( # noqa: B008\n Interval[DateTimeTz], UnboundedInterval\n ),\n) -> None:\n\n if group.permission_level > processor.permission_level:\n raise PermissionError(\"cannot delete a membership for a group with a\"\n \" higher permission level\")\n\n memberships: list[Membership] = [\n m for m in user.active_memberships(when=during)\n if m.group == group\n ]\n intervals = IntervalSet[DateTimeTz](\n m.active_during.closure for m in memberships\n ).difference(during)\n for m in memberships:\n session.session.delete(m)\n # flush necessary because we otherwise don't have any control\n # over the order of deletion vs. addition\n session.session.flush()\n session.session.add_all(Membership(active_during=i, user=user, group=group) for i in intervals)\n\n message = deferred_gettext(\"Removed from group {group} during {during}.\")\n log_user_event(message=message.format(group=group.name,\n during=during).to_json(),\n user=user, author=processor)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Makes a user member of a group in a given interval. If the given interval overlaps with an existing membership, this method will join the overlapping intervals together, so that there will be at most one membership for particular user in particular group at any given point in time.
def make_member_of( user: User, group: PropertyGroup, processor: User, during: Interval[DateTimeTz] = t.cast( # noqa: B008 Interval[DateTimeTz], UnboundedInterval ), ) -> None: if group.permission_level > processor.permission_level: raise PermissionError("cannot create a membership for a group with a" " higher permission level") memberships: list[Membership] = [ m for m in user.active_memberships(when=during) if m.group == group ] intervals = IntervalSet[DateTimeTz]( m.active_during.closure for m in memberships ).union(during) for m in memberships: session.session.delete(m) session.session.flush() session.session.add_all(Membership(active_during=i, user=user, group=group) for i in intervals) message = deferred_gettext("Added to group {group} during {during}.") log_user_event(message=message.format(group=group.name, during=during).to_json(), user=user, author=processor)
[ "async def _set_user_in_group_rooms(\n app: web.Application, user_id: UserID, socket_id: SocketID\n) -> None:\n primary_group, user_groups, all_group = await list_user_groups(app, user_id)\n groups = [primary_group] + user_groups + ([all_group] if bool(all_group) else [])\n\n sio = get_socket_server(app)\n for group in groups:\n sio.enter_room(socket_id, f\"{group['gid']}\")", "async def join_group(self, groupid, userid):\n raise NotImplementedError()", "def create_membership(start_date, end_date, user, group):\n membership = Membership(start_date=start_date, end_date=end_date,\n user=user, group=group)\n session.session.add(membership)\n return membership", "def assign_membership(user_url, organization_url, org_group_id, token):\n\n payload = {\n \"user\": user_url,\n \"organization\": organization_url\n }\n\n headers = {\"Authorization\": \"token {0}\".format(token)}\n response = requests.post(\"https://api.elis.rossum.ai/v1/organization_groups/{0}/memberships\".format(org_group_id), headers=headers, data=payload)\n\n return response", "def groupBookingsByIntervalOfMinutes(self, start_date, end_date, interval=30, **kwargs):\n\n # Interval must be a multiple or a divider of 60. Check it\n # Note that you could need a real value there\n if (interval < 60 and (60/interval)*interval != 60) or \\\n (interval > 60 and (interval/60)*60 != interval):\n raise ValueError, \"Interval must be a multiple or a divider of 60\"\n\n # Interval can't be greater than 3600 minutes (a day)\n if interval > 3600:\n raise ValueError, \"Interval can't be greater than 3600 minutes\"\n\n # Initialize\n btool = getToolByName(self, 'portal_booking')\n default_title = btool.getBookingDefaultTitle()\n group_keys = btool.getIntervalOfMinutesGroupKeys(start_date, end_date, interval)\n booking_groups = {}\n booking_brains = self.getBookingBrains(start_date, end_date, **kwargs)\n\n # Store brains in booking groups\n for brain in booking_brains:\n # Get brain group keys\n brain_start_date = DateTime(brain.start)\n brain_end_date = DateTime(brain.end)\n if start_date.greaterThanEqualTo(brain_start_date):\n brain_start_date = start_date\n if brain_end_date.greaterThanEqualTo(end_date):\n brain_end_date = end_date\n brain_group_keys = btool.getIntervalOfMinutesGroupKeys(brain_start_date, brain_end_date, interval)\n\n # Wrap booking\n booking_info = self._getBookingStructure(brain, default_title)\n\n # Append to booking groups\n for key in brain_group_keys:\n value = booking_info.copy()\n if not booking_groups.has_key(key):\n booking_groups[key] = []\n booking_info['group_by'] = key\n booking_groups[key].append(booking_info)\n\n return group_keys, booking_groups", "def groupIdJoin(groupId):\n group = db.Group.find_one({\"_id\": ObjectId(groupId)})\n user = db.users.find_one({\"_id\": ObjectId(current_user.id)})\n if group is not None:\n if not group['enrolledIds']:\n updatedGroup = db.Group.update_one({'_id': group['_id']}, {\"$set\": {\n \"enrolledIds\": [user['_id']]\n }})\n else:\n updatedGroup = db.Group.update_one({'_id': group['_id']}, {\"$set\": {\n \"enrolledIds\": group['enrolledIds'].append(user['_id'])\n }})\n if not user['enrolledGroups']:\n updatedUser = db.users.update_one({'_id': user['_id']}, {\n \"$set\": {\n \"enrolledGroups\": [group['_id']]\n }\n })\n else:\n updatedUser = db.users.update_one({'_id': group['_id']}, {\n \"$set\": {\n \"enrolledIds\": user['enrolledGroups'].append(group['_id'])\n }\n })\n return jsonify({\"msg\": \"Group successfully joined!\"}), 200\n elif group is None:\n return jsonify({\"msg\": \"Group Not Found\"}), 404\n return jsonify({\"msg\": \"something went wrong\"})", "def test_join_group(self):\n resp = self.c.post('/groups/join/', { 'groupid':self.group.pk })\n self.assertEqual(self.user.get_profile().group, self.group)", "def addmembertogroup(self, upn, groupguid):\n\n userguid = self.getupnid(upn)\n self.addguidtogroup(userguid, groupguid)", "def test_join_group(self):\n\t\treplies = JoinHandler.test('talk %s faith' % self.group.groupname)\n\t\tself.assertTrue(replies)\n\t\tself.assertEquals(len(replies),1)\n\t\tself.assertTrue('You are now a member',replies[0])\n\t\tself.assertEquals(self.group.member_set.count(),1)", "def test_user_in_own_group(self):\n token = self.user.token\n self.test_create_group()\n rv = self.get('/group/', token=token)\n self.assertJsonOk(rv, groups=[{'id': 1,\n 'name': 'Test group',\n 'admin': True}])\n return", "def add_user_to_group(self, member, group):\n dn = 'cn=%s,ou=groups,dc=mozilla' % group\n\n modlist = [(ldap.MOD_ADD, b'memberUid', member)]\n self.c.modify_s(dn, modlist)", "def insert_interval_in_list(list_intervals, interval):\n\n merge_left, merge_right = False, False\n for (a, b) in list_intervals:\n if b == interval[0] - 1:\n merge_left = True\n merge_left_pair = (a, b)\n if a == interval[1] + 1:\n merge_right = True\n merge_right_pair = (a, b)\n if merge_left and merge_right:\n list_intervals.remove(merge_left_pair)\n list_intervals.remove(merge_right_pair)\n list_intervals.append((merge_left_pair[0], merge_right_pair[1]))\n elif merge_left:\n list_intervals.remove(merge_left_pair)\n list_intervals.append((merge_left_pair[0], interval[1]))\n elif merge_right:\n list_intervals.remove(merge_right_pair)\n list_intervals.append((interval[0], merge_right_pair[1]))\n else:\n list_intervals.append(interval)", "def add_intersection_with_interval(self, typ, branch_or_cusp, interval,\n with_sign=1):\n x = self.get_intersections_with_interval(interval)\n idx = self._path_idx(typ, branch_or_cusp)\n x[idx] += with_sign", "def interval_merge(data_df, interval_df,interval_column_key):\n\n # parse column names for easier usage\n source_col = interval_column_key['source']\n start_col = interval_column_key['start']\n end_col = interval_column_key['end']\n label_col = interval_column_key['label']\n\n # create piecewise function\n input_domain = np.linspace(np.min(interval_df[start_col]),np.max(interval_df[start_col]))\n assign_interval_label = np.piecewise()\n\n # evaluate a row and return true if val in [start,end)\n row_eval = lambda row,val: (val >=row[start_col])&(val<row[end_col])\n # evalutea all rows of a table, return true or false for each\n table_eval = lambda val: interval_df.apply(row_eval,args=(val,),axis=1)\n # return the label value for the true one\n get_label = lambda val: interval_df[label_col][table_eval(val)].item()\n\n # add the column\n data_df[label_col] = data_df[source_col].apply(get_label)\n\n return data_df", "def test_in_group(self):\n # Building the shared predicate:\n groups = set(ascii_letters)\n shared_predicate = in_group('foo')\n error = 'The current user must belong to the group \"foo\"'\n # Building the test scenarios that will share the predicate above:\n scenarios = []\n for g in groups:\n credentials = {'groups': groups.copy()}\n scenario = {'credentials': credentials, 'error': error}\n scenarios.append(scenario)\n self._share_predicate_among_threads(shared_predicate, scenarios)", "def user_in_user_group(user: User, group: UserGroup) -> bool:\n return user in group.members", "def set_availability(member_id, start_time, end_time):\n availability_start_time = start_time.toZone(TimeZone.UTC).asdatetime().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n availability_end_time = (end_time + 1).toZone(TimeZone.UTC).asdatetime().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n set_availability_json = {\"param\":\n {\"TimeRange\":\n {\"StartTime\": availability_start_time,\n \"EndTime\": availability_end_time},\n \"TeacherMemberId\": member_id,\n \"AvailabilityDetail\": [{\n \"TeacherMemberId\": member_id,\n \"StartTime\": availability_start_time,\n \"EndTime\": availability_end_time}]\n }\n }\n response = super_admin_web_request_session.post(AXIS_UPDATE_AVAILABILITY_API_URL, json=set_availability_json)\n response_content = json.loads(response.content)\n\n if response_content[0][\"status\"] != 0: # Status 0 means the availability is set successfully.\n raise Exception(\n \"Failed to set availability, message: [%s], error code: [%s].\" % (\n response_content[0][\"id\"], response_content[0][\"status\"]))", "def test_lc_members_group_created(self):\n # LC members group exists\n self.assertTrue(api.group.get(groupname='nis-members'))\n\n # CP is member of LC Members group\n self.assertIn(\n 'nis-members',\n [group.id for group in api.group.get_groups(username='jsmith')]\n )\n\n # LC Members can:\n self.assertItemsEqual(\n [\n 'Authenticated', # virtual group\n 'Contributor', # add content to their LC\n ],\n api.group.get_roles(\n groupname='nis-members',\n obj=self.portal.lc['nis'],\n )\n )", "async def create_group(self, userid, gameid):\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove a user from a group in a given interval. The interval defaults to the unbounded interval, so that the user will be removed from the group at any point in time, removing all memberships in this group retroactively. However, a common use case is terminating a membership by setting ``during=starting_from(now)``.
def remove_member_of( user: User, group: PropertyGroup, processor: User, during: Interval[DateTimeTz] = t.cast( # noqa: B008 Interval[DateTimeTz], UnboundedInterval ), ) -> None: if group.permission_level > processor.permission_level: raise PermissionError("cannot delete a membership for a group with a" " higher permission level") memberships: list[Membership] = [ m for m in user.active_memberships(when=during) if m.group == group ] intervals = IntervalSet[DateTimeTz]( m.active_during.closure for m in memberships ).difference(during) for m in memberships: session.session.delete(m) # flush necessary because we otherwise don't have any control # over the order of deletion vs. addition session.session.flush() session.session.add_all(Membership(active_during=i, user=user, group=group) for i in intervals) message = deferred_gettext("Removed from group {group} during {during}.") log_user_event(message=message.format(group=group.name, during=during).to_json(), user=user, author=processor)
[ "async def leave_group(self, userid):\n raise NotImplementedError()", "def group_disconnect(self, user: User): \n self.groups[user.group_name].disconnect_user(user)", "def remove_uptime(self, end):\n query = f\"DELETE FROM {self._schema}.uptime WHERE time < %s\"\n self.execute(query, [end])", "def remove_member(self, request, group_id, user_id, extra_context=None):\n member = get_object_or_404(User, pk=user_id)\n group = get_object_or_404(self.model, pk=group_id)\n\n if not self.is_admin(request.user, group):\n return http.HttpResponseBadRequest()\n\n if member == request.user:\n url = reverse('usergroups_leave_group', args=(self.slug, group.pk))\n return http.HttpResponseRedirect(url)\n\n extra_context = extra_context or {}\n extra_context.update({\n 'member': member,\n 'member_name': member.get_full_name() or member.username,\n })\n\n if request.method != 'POST':\n return self.confirmation(request, 'remove_member', group,\n extra_context)\n\n group.remove_admin(member)\n group.members.remove(member)\n\n if request.is_ajax():\n data = { 'user_id': member.id }\n return self.json_done(request, 'remove_member_done',\n data, group, extra_context)\n\n url = reverse('usergroups_remove_member_done',\n args=(self.slug, group.pk, member.pk))\n return http.HttpResponseRedirect(url)", "def on_jury_member_delete(sender, instance, **kwargs):\n jm_group = Group.objects.get(name=settings.JURY_MEMBER_GROUP)\n instance.user.groups.remove(jm_group)", "def remove_group(self, auth=None):\n self._require_manager_permission(auth)\n group_id = self._id\n members = list(self.members.values_list('id', flat=True))\n nodes = self.nodes\n\n self.member_group.delete()\n self.manager_group.delete()\n self.delete()\n self.update_search(deleted_id=group_id)\n\n for user in OSFUser.objects.filter(id__in=members):\n for node in nodes:\n node.disconnect_addons(user, auth)\n params = {\n 'group': group_id,\n 'node': node._id,\n }\n self.add_corresponding_node_log(node, NodeLog.GROUP_REMOVED, params, auth)\n project_signals.contributor_removed.send(node, user=user)\n node.update_search()", "def remove_group(self, group):\n self.groups.remove(group)", "def remove_users_before_date(self, ts):\n with self.__access_db() as cur:\n cur.execute(\"DELETE FROM users WHERE time < %s\"\"\", (ts,))", "def remove_from_group(self):\n self.simulator.devices['gates'].remove(self)", "def make_member_of(\n user: User,\n group: PropertyGroup,\n processor: User,\n during: Interval[DateTimeTz] = t.cast( # noqa: B008\n Interval[DateTimeTz], UnboundedInterval\n ),\n) -> None:\n\n if group.permission_level > processor.permission_level:\n raise PermissionError(\"cannot create a membership for a group with a\"\n \" higher permission level\")\n\n memberships: list[Membership] = [\n m for m in user.active_memberships(when=during)\n if m.group == group\n ]\n intervals = IntervalSet[DateTimeTz](\n m.active_during.closure for m in memberships\n ).union(during)\n for m in memberships:\n session.session.delete(m)\n session.session.flush()\n session.session.add_all(Membership(active_during=i, user=user, group=group) for i in intervals)\n message = deferred_gettext(\"Added to group {group} during {during}.\")\n log_user_event(message=message.format(group=group.name,\n during=during).to_json(),\n user=user, author=processor)", "def removememberfromgroup(self, upn, groupguid):\n\n userguid = self.getupnid(upn)\n\n self.removeguidfromgroup(userguid, groupguid)", "def delete_interval(self, interval):\n self.erase_interval_intersections(interval)\n self._unused_interval_indices.append(interval.index)\n # deleting explicitly because of possible circular references\n del interval", "def Remove_User(iam,groupname: str,username: str):\n\t\t\t\treturn iam.resource.Group(groupname).remove_user(UserName=username)", "def remove_group(username, group_name, logger, client):\n client.users.remove_from_group(username, group_name)\n logger.info('User `{0}` removed successfully from group '\n '`{1}`'.format(username, group_name))", "def remove_group():\r\n group_input = input(\"| Enter the name of the Group |\")\r\n adgroup.ADGroup.from_dn(group_input).delete()\r\n return \"| Group Removed |\"", "def remove(self, user: Optional[str] = None):\n raise NotImplementedError", "def leave_group(group_id_input):\n user_id = session['login'][1]\n user_usergroup = UserGroup.query.filter_by(user_id = user_id, group_id=group_id_input).one()\n db.session.delete(user_usergroup)\n db.session.commit()\n return redirect('/explore')", "def delete(self):\n udb = bbs.dbproxy.DBProxy('userbase')\n for chk_user in self.members:\n user = udb[chk_user]\n if self.name in user.groups:\n user.group_del (self.name)\n user.save ()\n del bbs.dbproxy.DBProxy('groupbase')[self.name]", "def remove_user(request, id):\n editor = request.user\n group = get_object_or_404(Group, id=id)\n \n if not (editor.is_superuser or editor.has_perm('admin', group)):\n return HttpResponseForbidden('You do not have sufficient privileges')\n \n if request.method != 'POST':\n return HttpResponseNotAllowed('GET')\n\n form = RemoveUserForm(group, request.POST)\n if form.is_valid():\n user = form.cleaned_data['user']\n group.user_set.remove(user)\n user.revoke_all(group)\n \n # signal\n view_remove_user.send(sender=editor, user=user, obj=group)\n \n # return success\n return HttpResponse('1', mimetype='application/json')\n \n # error in form return ajax response\n content = json.dumps(form.errors)\n return HttpResponse(content, mimetype='application/json')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
helper function to get stock data and send to kafka producer instance of a kafka producer symbol symbol of the stock, string type none
def fetch_price(producer, symbol): logger.debug('Start to fetch stock price for %s', symbol) try: engine = HexunEngine() requester = Requester(engine) stock_obj = requester.request(symbol) #print stock_obj[0].as_dict() price = json.dumps(stock_obj[0].as_dict()) logger.debug('Get stock info %s', price) producer.send(topic=topic_name, value=price, timestamp_ms = time.time()) logger.debug('Sent stock price for %s to kafka', symbol) except KafkaTimeoutError as timeout_error: logger.warn('Failed to send stock price for %s to kafka, caused by: %s', (symbol, timeout_error)) except Exception: logger.warn('Failed to get stock price for %s', symbol)
[ "def main():\n\n if len(sys.argv) < 2:\n print \"Usage: ./kafka_producer_stock.py stock-topic\"\n sys.exit(1)\n\n if len(sys.argv) >= 3:\n wait_time = float(sys.argv[2])\n else:\n wait_time = 0\n\n # Set up kafka brokers\n ipfile = open(ipfile_path, 'r')\n ips = ipfile.read()[:-1]\n ipfile.close()\n ips = ips.split('\\n')\n\n producer_user = (KafkaProducer(bootstrap_servers=ips, \n value_serializer=lambda v: json.dumps(v).encode('utf-8')))\n\n ticker_list = create_stock_list(ticker_list_path)\n\n # simulate data \n while True:\n cur_time = datetime.datetime.now()\n time_string = str(cur_time.hour).zfill(2) + \":\" + str(cur_time.minute).zfill(2) \\\n + \":\" + str(cur_time.second).zfill(2)\n for ticker_name in ticker_list:\n stock_price = float(random.randint(20, 20000)/10)\n stock_record = {\"ticker\": ticker_name, \"price\": stock_price, \"time\": time_string}\n \n # send the messages to separate topics\n producer_user.send(sys.argv[1], stock_record) \n time.sleep(wait_time)", "def send_to_pubsub_topic(self, stocks):\n pass", "def get_stock_data(x):", "def getStockData():\n stkData = json.loads(flask.request.data)\n if not checkStockData(stkData):\n print(\"getStockData(): Provided stock data was invalid!\");\n return None\n \n return sentData", "async def info(self, ctx, stock: str):\n info = self.db.get_stock(stock)\n if len(info) == 0:\n await ctx.channel.send(embed=self.embed(\"No stock called '%s' found in database.\" % stock))\n return\n rating, maxrating = await self.cf.get_rating(stock), await self.cf.get_best_rating(stock)\n market = 0\n for owner, quantity in info:\n if owner == -1:\n market = quantity\n e = Embed(title=\"Stock info for %s\" % stock, color=Color.dark_blue())\n e.add_field(name=\"Current Value\", value=\"**$%.2f**\" % self.stock_value(rating), inline=False)\n e.add_field(name=\"Max. Value\", value=\"$%.2f\" % self.stock_value(maxrating), inline=False)\n e.add_field(name=\"Available Stocks in market\", value=\"%d\" % market, inline=False)\n e.set_footer(text=\"Requested by \"+str(ctx.author), icon_url=ctx.author.avatar_url)\n await ctx.channel.send(embed=e)", "def display_stock():", "def send_market_price_request(ric_name):\n mp_req_json = {\n 'ID': 2,\n 'Key': {\n 'Name': ric_name,\n 'Service': service\n },\n }\n web_socket_app.send(json.dumps(mp_req_json))\n print(\"SENT:\")\n print(json.dumps(mp_req_json, sort_keys=True, indent=2, separators=(',', ':')))", "def create_stock():\n return {\n \"code\": \"success\",\n \"message\": \"stock created\"\n }", "def get_stock_messages(date, stock):\n with pd.HDFStore(itch_store) as store:\n stock_locate = store.select(\"R\", where=\"stock = stock\").stock_locate.iloc[0]\n target = \"stock_locate = stock_locate\"\n\n data = {}\n # trading messsage types\n messages = [\"A\", \"F\", \"E\", \"C\", \"X\", \"D\", \"U\", \"P\", \"Q\"]\n for m in messages:\n data[m] = (\n store.select(m, where=target)\n .drop(\"stock_locate\", axis=1)\n .assign(type=m)\n )\n\n order_cols = [\"order_reference_number\", \"buy_sell_indicator\", \"shares\", \"price\"]\n orders = pd.concat([data[\"A\"], data[\"F\"]], sort=False, ignore_index=True).loc[\n :, order_cols\n ]\n\n for m in messages[2:-3]:\n data[m] = data[m].merge(orders, how=\"left\")\n\n data[\"U\"] = data[\"U\"].merge(\n orders,\n how=\"left\",\n right_on=\"order_reference_number\",\n left_on=\"original_order_reference_number\",\n suffixes=[\"\", \"_replaced\"],\n )\n\n data[\"Q\"].rename(columns={\"cross_price\": \"price\"}, inplace=True)\n data[\"X\"][\"shares\"] = data[\"X\"][\"cancelled_shares\"]\n data[\"X\"] = data[\"X\"].dropna(subset=[\"price\"])\n\n data = pd.concat([data[m] for m in messages], ignore_index=True, sort=False)\n data[\"date\"] = pd.to_datetime(date, format=\"%m%d%Y\")\n data.timestamp = data[\"date\"].add(data.timestamp)\n data = data[data.printable != 0]\n\n drop_cols = [\n \"tracking_number\",\n \"order_reference_number\",\n \"original_order_reference_number\",\n \"cross_type\",\n \"new_order_reference_number\",\n \"attribution\",\n \"match_number\",\n \"printable\",\n \"date\",\n \"cancelled_shares\",\n ]\n return data.drop(drop_cols, axis=1).sort_values(\"timestamp\").reset_index(drop=True)", "async def mock_candle_producer(state: SharedState, symbol: str):\n\n i = 1\n while not state.stop:\n asyncio.sleep(2)\n raw_candle = fake_candle(i, symbol)\n message = Message(\n time =Pipe.to_datetime(time=raw_candle[\"k\"][\"T\"]),\n symbol =symbol,\n content_type =ContentType.CANDLE_STREAM,\n payload =raw_candle\n )\n await state.queue.put(message)\n next_i = i + 1 if i <= 10 else 1\n i = next_i", "def queueKafka(self, json_data ):\n self.kafka_producer.send(TOPIC_NAME, json_data)", "def checkLatestTrend(stockcode,price):\r\n\tprint DealDatum[stockcode]", "def ontick(self,data):\n ticker = self.ticker( data[\"seccode\"] )\n ticker.classcode = data[\"classcode\"]\n ticker.time = datetime.datetime.now()\n ticker.price = data[\"price\"]\n ticker.volume = 0\n self.tick(ticker)", "async def get_stock_close_data_from_pse(ticker: dict, exchange: dict) -> bool:\n pse_stock_endpoint: str = \"\"\n async with aiohttp.ClientSession() as session:\n async with session.get(pse_stock_endpoint) as response:\n response_data = await response.json()\n # TODO get net volume\n # TODO get sell volume\n # TODO get buy volume\n # If cant obtain data use create task to use eod instead\n # TODO - send net , sell and buy for the stock to save to database\n pass\n return True", "def fetch_PRINTS_data(symbol):\r\n pair_split = symbol.split('/') # symbol must be in format XXX/XXX ie. BTC/USD\r\n symbol = pair_split[0] + pair_split[1]\r\n url = f'https://api.kraken.com/0/public/Trades?pair={symbol}'\r\n response = requests.get(url)\r\n if response.status_code == 200: # check to make sure the response from server is good\r\n j = json.loads(response.text)\r\n\r\n result = j['result']\r\n keys = []\r\n for item in result:\r\n keys.append(item)\r\n if keys[0] != 'last':\r\n data = pd.DataFrame(result[keys[0]], columns=['price', 'volume', 'time', 'buysell', 'ordtype', 'misc'])\r\n else:\r\n data = pd.DataFrame(result[keys[1]], columns=['price', 'volume', 'time', 'buysell', 'ordtype', 'misc'])\r\n\r\n data['date'] = pd.to_datetime(data['time'], unit='s')\r\n data['buysell'] = data['buysell'].apply(lambda x: \"buy\" if x == 'b' else \"sell\")\r\n data['ordtype'] = data['ordtype'].apply(lambda x: \"limit\" if x == 'l' else \"market\")\r\n data['dollaramount'] = data['price'].astype(float) * data['volume'].astype(float)\r\n data.drop(columns=['misc'], inplace=True) #drop misc column that is typically blank\r\n\r\n # if we failed to get any data, print an error...otherwise write the file\r\n if data is None:\r\n print(\"Did not return any data from Kraken for this symbol\")\r\n else:\r\n data.to_csv(f'data/Kraken_{symbol}_tradeprints.csv', index=False)\r\n else:\r\n print(\"Did not receieve OK response from Kraken API\")", "def get_stock_details(symbol): \n\n response = requests.get(\"https://api.polygon.io/v1/meta/symbols/\" + symbol + \"/company?&apiKey=\" + POLY_API_KEY)\n response_json = response.json()\n \n return response_json", "def get_stock(symbol, conn):\n\n database_command = f\"SELECT * FROM STOCK WHERE SYMBOL = '{symbol}'\"\n try:\n cursor = conn.execute(database_command)\n r = [dict((cursor.description[i][0], value)\n for i, value in enumerate(row)) for row in cursor.fetchall()]\n except sqlite3.IntegrityError:\n return \"None\"\n\n if r:\n if (r[0]['VALID'] - time.time()) > 0:\n # vraceni pokud existuje a je validni\n return r[0]['JSON']\n else:\n # tady se vola pokud uz existuje ale neni validni\n return call_api_global_quote(symbol, True, conn)\n else:\n # tady vola pokud neexistuje\n return call_api_global_quote(symbol, False, conn)", "def getStockData(stockCode):\r\n import time\r\n HqString = \"http://hq.sinajs.cn/list=\" + stockCode\r\n while True: # if IOError wait 30seconds to retry\r\n try:\r\n hqList = urlopen(HqString).read()\r\n #print \"getting from remote hq.sina,consumes %f seconds.....\" % consume_t\r\n #logger.info(\"Check if the HqString changed!!==> \\n %s\" % hqList)\r\n break\r\n except IOError:\r\n print \"IOError ,sleep 20 second,then fetch again\"\r\n time.sleep(20)\r\n hqList = hqList.split(',')\r\n #print \"====retrieved Hq from sina ,the length ===>\", len(hqList)\r\n if len(hqList) != 33:\r\n #BeepNote(150, 100, 2)\r\n print \"Length Error != 33 Hqlist is invalid!!!!!!!!!!! return 0 \\n\"\r\n print \"Error List contains===>\", hqList\r\n return 0\r\n #todayOpen, yesClose, atTime = hqList[1], hqList[2], hqList[31].split('\"')[0]\r\n #tmpHigh,tmpLow, tmpVol ,tmpMoney = hqList[4], hqList[5], hqList[8] , hqList[9]\r\n #nowPrice = hqList[3]\r\n valList = map(float, hqList[1:30]) + [hqList[30]] + [atTime]\r\n return valList", "def fetch_SPREAD_data(symbol):\r\n pair_split = symbol.split('/') # symbol must be in format XXX/XXX ie. BTC/USD\r\n symbol = pair_split[0] + pair_split[1]\r\n url = f'https://api.kraken.com/0/public/Spread?pair={symbol}'\r\n response = requests.get(url)\r\n if response.status_code == 200: # check to make sure the response from server is good\r\n j = json.loads(response.text)\r\n result = j['result']\r\n keys = []\r\n for item in result:\r\n keys.append(item)\r\n if keys[0] != 'last':\r\n data = pd.DataFrame(result[keys[0]], columns=['unix', 'bid', 'ask'])\r\n else:\r\n data = pd.DataFrame(result[keys[1]], columns=['unix', 'bid', 'ask'])\r\n\r\n data['date'] = pd.to_datetime(data['unix'], unit='s')\r\n data['spread'] = data['ask'].astype(float) - data['bid'].astype(float)\r\n\r\n # if we failed to get any data, print an error...otherwise write the file\r\n if data is None:\r\n print(\"Did not return any data from Kraken for this symbol\")\r\n else:\r\n data.to_csv(f'data/Kraken_{symbol}_spreads.csv', index=False)\r\n else:\r\n print(\"Did not receieve OK response from Kraken API\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
takes pyton code and converts it to a node node is then dealt with by node_to_gast
def py_to_gast(python_input): input_ast = '' try: input_ast = ast.parse(python_input) except: # this will signal to translate that error occurred return None return py_router.node_to_gast(input_ast)
[ "def handle_nodes(nodes):\n\t# Assumptions: the node() line is all one one line\n\n\tsplit_nodes = []\n\tcurnode = -1\n\tfor m in nodes:\n\t\tsplit_nodes.append({})\n\t\tcurnode += 1\n\n\t\t# TODO: make this a function call or something so i can change the node language more easily\n\t\t# no need to error check this since we already did in process_node\n\t\tma = re.match(g.MAP_RE, m)\n\n\t\tsplit_nodes[curnode][\"label\"] = ma.group(1)\n\t\tsplit_nodes[curnode][\"rank\"] = ma.group(2)\n\t\tsplit_nodes[curnode][\"index\"] = ma.group(3)\n\t\tsplit_nodes[curnode][\"lower\"] = ma.group(4)\n\t\tsplit_nodes[curnode][\"step\"] = ma.group(5)\n\t\tsplit_nodes[curnode][\"upper\"] = ma.group(6)\n\t\tsplit_nodes[curnode][\"cond\"] = ma.group(7)\n\t\tsplit_nodes[curnode][\"pred\"] = ma.group(8)\n\t\tsplit_nodes[curnode][\"targets\"] = ma.group(9)\n\t\tsplit_nodes[curnode][\"func\"] = ma.group(10)\n\t\tsplit_nodes[curnode][\"func_name\"] = ma.group(11)\n\t\tsplit_nodes[curnode][\"in_args\"] = ma.group(12)\n\n\n\t# go through the nodes and one at a time output the code. The multiple\n\t# loops are necessary so that the code is output together for each\n\t# function.\n\t# TODO: some of these loops could be combined together for performance\n\tfor m in split_nodes:\n\t\tg.nodes[m['label']] = {'label': m['label'], 'func_name': m['func_name'], 'index': m['index'], 'rank': m['rank']}\n\t\tg.functions[m['label']] = m['func_name']\n\t\tg.intervals[m['label']] = {'lower': m['lower'], 'step': m['step'], 'upper': m['upper']}\n\t\tg.real_preds[m['label']] = make_targets(m['pred'])\n\t\tg.preds[m['label']] = flatten(g.real_preds[m['label']])\n\t\tg.real_targets[m['label']] = make_targets(m['targets'])\n\t\tg.targets[m['label']] = flatten(g.real_targets[m['label']])\n\t\t#g.targets[m['label']] = flatten(make_targets(m['targets']))\n\t\tg.target_variables[m['label']] = m['cond']\n\tfor n in g.nw_calls:\n\t\tg.real_preds[n['label']] = make_targets(n['preds'])\n\t\tg.real_targets[n['label']] = make_targets(n['succ'])\n\n\t# create the task graph\n\tg.graph[\"0\"] = graph.GraphNode(\"0\", [], [])\n\tfor n in split_nodes:\n\t\tlabel = n['label']\n\t\tnode = graph.GraphNode(label, g.real_preds[label], g.real_targets[label])\n\t\tif \"0\" in node.get_pred():\n\t\t\tg.graph[\"0\"].add_succ(label)\n\t\tg.graph[label] = node\n\tfor n in g.nw_calls:\n\t\tlabel = n['label']\n\t\tnode = graph.GraphNode(label, g.real_preds[label], g.real_targets[label])\n\t\tif \"0\" in node.get_pred():\n\t\t\tnode.output()\n\t\t\terror(\"Cannot start a graph with a network call\")\n\t\tg.graph[label] = node\n\tgraph.compute_dominance(g.graph)\n\tgraph.match_forks_to_joins(g.graph)\n\n#\tfor l in g.graph:\n#\t\tg.graph[l].output()\n\n\tfor m in split_nodes:\n\t\t# store the input args so we can refer to their type later\n\t\tprocess_func_args(m['func_name'], m['in_args'])\n\t\tprocess_input_arguments(m['func_name'], m['in_args'])\n\t\tif m['index'] == 'NULL':\n\t\t\t#warning(\"Caught a NULL loop index variable that will be replaced with '\" + g.INDEX + \"'\")\n\t\t\tm['index'] = g.INDEX\n\t\tg.indices[m['label']] = m['index']\n\n\tfor m in split_nodes:\n\t\thandle_main_node(m['label'], m['lower'], m['step'], m['upper'], m['func_name'])\n\n\tif g.OUTPUT == \"C\" or g.OUTPUT == \"omp\":\n\t\tpil2c.print_main_func()\n\t\tpil2c.print_funcs()\n\t\tpil2c.handle_nodes(split_nodes)\n\telif g.OUTPUT == \"swarm\":\n\t\tpil2swarm.print_main_func()\n\t\tpil2swarm.print_funcs()\n\t\tpil2swarm.handle_nodes(split_nodes)\n\telif g.OUTPUT == \"afl\":\n\t\tpil2afl.print_main_func()\n\t\tpil2afl.print_funcs()\n\t\tpil2afl.handle_nodes(split_nodes)\n\telif g.OUTPUT == \"ocr\":\n\t\tpil2ocr.print_main_func()\n\t\tpil2ocr.print_funcs()\n\t\tpil2ocr.handle_nodes(split_nodes)\n\telse:\n\t\terror(\"Unknown OUTPUT backend: \" + g.OUTPUT)\n\n\treturn split_nodes", "def convert_to_python(ast_source):\r\n return map(codegen.to_source, ast_source)", "def tree(self):\r\n\r\n try:\r\n code = self.code.encode('utf8') + b'\\n'\r\n return compile(code, self.filename, 'exec', ast.PyCF_ONLY_AST)\r\n except SyntaxError:\r\n return None", "def convertToTweakNodePlug(*args, **kwargs):\n \n pass", "def convert(gr, raw_node):\r\n type, value, context, children = raw_node\r\n if children or type in gr.number2symbol:\r\n # If there's exactly one child, return that child instead of\r\n # creating a new node.\r\n if len(children) == 1:\r\n return children[0]\r\n return Node(type, children, context=context)\r\n else:\r\n return Leaf(type, value, context=context)", "def parse_node_codeable(line: str):\n\n stereotypes, tagged_values = list(), list()\n\n name = line.split(\"=\")[0].strip()\n if \"stereotype_instances\" in line:\n stereotype_part = line.split(\"stereotype_instances\")[1].split(\"=\")[1].strip()\n\n if stereotype_part[0] == \"[\":\n stereotypes = [item.strip() for item in stereotype_part.split(\"]\")[0].split(\"[\")[1].split(\",\")]\n else:\n stereotypes = list()\n stereotypes.append(stereotype_part.split(\",\")[0].strip(\")\").strip())\n if \"tagged_values\" in line:\n tagged_values_dict = ast.literal_eval(line.split(\"tagged_values =\")[1].split(\")\")[0].strip())\n\n for tagged_value in tagged_values_dict:\n tagged_values.append(str(tagged_value) + \": \" + str(tagged_values_dict[tagged_value]))\n return name, stereotypes, tagged_values", "def parse_and_print_python(source_code: str) -> None:\n pretty_print_ast(python_ast.parse(source_code))", "def parse(code, path=None):\n\n\tparser = Parser(code, path)\n\tast = parser.run()\n\treturn ast", "def normalize_python(code, remove_docstrings=True):\n try:\n tree = ast.parse(code, \"<<code>>\")\n except SyntaxError:\n return None\n\n if remove_docstrings:\n tree = RemoveDocstring().visit(tree)\n\n return astunparse.unparse(tree)", "def build_ast(self):\n self._ast_node = parse(self.sourcecode)", "def loadCommand(code):\n return LoadPython(ast.parse(code))", "def run_bytecode(code_or_node, filename=None):\r\n if isinstance(code_or_node, Node):\r\n code_or_node = to_ast(code_or_node)\r\n if filename is None:\r\n filename = encode_filename(code_or_node.filename)\r\n if not isinstance(code_or_node, CodeType):\r\n if filename is None:\r\n filename = '<string>'\r\n code_or_node = compile_ast(code_or_node, filename)\r\n namespace = {}\r\n exec code_or_node in namespace\r\n return namespace", "def unastify(tree):\n # CAUTION: in `unastify`, we implement only what we minimally need.\n our_module_globals = globals()\n def lookup_thing(dotted_name):\n if not dotted_name.startswith(\"mcpyrate.quotes\"):\n raise NotImplementedError(f\"Don't know how to look up {repr(dotted_name)}\")\n path = dotted_name.split(\".\")\n if not all(component.isidentifier() for component in path):\n raise NotImplementedError(f\"Dotted name {repr(dotted_name)} contains at least one non-identifier component\")\n if len(path) < 3:\n raise NotImplementedError(f\"Dotted name {repr(dotted_name)} has fewer than two dots (expected 'mcpyrate.quotes.something')\")\n name_of_thing = path[2]\n thing = our_module_globals[name_of_thing]\n if len(path) > 3:\n for attrname in path[3:]:\n thing = getattr(thing, attrname)\n return thing\n\n T = type(tree)\n\n if T is ast.Constant:\n return tree.value\n\n # Support machinery for `Call` AST node. This serendipitously supports also\n # *args and **kwargs, because as of Python 3.6 those appear in `args` and\n # `keywords`, and `Starred` needs no special support here.\n elif T is list:\n return [unastify(elt) for elt in tree]\n elif T is ast.keyword:\n return tree.arg, unastify(tree.value)\n\n elif T is ast.List:\n return [unastify(elt) for elt in tree.elts]\n elif T is ast.Tuple:\n return tuple(unastify(elt) for elt in tree.elts)\n elif T is ast.Dict:\n return {unastify(k): unastify(v) for k, v in zip(tree.keys, tree.values)}\n elif T is ast.Set:\n return {unastify(elt) for elt in tree.elts}\n\n elif T is ast.Call:\n dotted_name = unparse(tree.func)\n\n # Drop the run-time part of `q`, if present. This is added by `q` itself,\n # not `astify`, but `unastify` is usually applied to the output of `q`.\n if dotted_name == \"mcpyrate.quotes.splice_ast_literals\": # `q[]`\n body = tree.args[0]\n return unastify(body)\n\n # Even though the unquote operators compile into calls, `unastify`\n # must not apply their run-time parts, because it's running in the\n # wrong context. Those only work properly at run time, and they\n # must run at the use site of `q`, where the user-provided names\n # (where the unquoted data comes from) will be in scope.\n #\n # So we undo what `astify` did, converting the unquote calls back into\n # the corresponding AST markers.\n elif dotted_name == \"mcpyrate.quotes.astify\": # `u[]`\n body = tree.args[0]\n return Unquote(body)\n elif dotted_name == \"mcpyrate.quotes.lift_sourcecode\": # `n[]`\n body, filename = tree.args[0], tree.args[1].value\n return LiftSourcecode(body, filename)\n elif dotted_name == \"mcpyrate.quotes.ast_literal\": # `a[]`\n body, syntax = tree.args[0], tree.args[1].value\n return ASTLiteral(body, syntax)\n elif dotted_name == \"mcpyrate.quotes.ast_list\": # `s[]`\n body = tree.args[0]\n return ASTList(body)\n elif dotted_name == \"mcpyrate.quotes.ast_tuple\": # `t[]`\n body = tree.args[0]\n return ASTTuple(body)\n elif dotted_name == \"mcpyrate.quotes.capture_value\": # `h[]` (run-time value)\n body, name = tree.args[0], tree.args[1].value\n return Capture(body, name)\n elif dotted_name == \"mcpyrate.quotes.lookup_macro\": # `h[]` (macro)\n # `capture_macro` is done and gone by the time we get here.\n # `astify` has generated an `ast.Call` to `lookup_macro`.\n #\n # To make the this work properly even across process boundaries,\n # we cannot simply run the `lookup_macro`. It injects the binding\n # once, and then becomes an inert lexical name (pointing to that\n # binding) - so that strategy only works inside the same process.\n #\n # We can't just leave the `lookup_macro` call in the AST, either,\n # since that doesn't make any sense when the tree is later sent\n # to `astify` to compile it again (we don't want another `ast.Call`\n # layer around it).\n #\n # So we need something that triggers `capture_macro` when the\n # result is astified again.\n #\n # Hence, we uncompile the `lookup_macro` into a `Capture` marker.\n #\n # But if the astified tree comes from an earlier run (in another\n # Python process), the original macro name might not be in the\n # expander's bindings any more.\n #\n # So we inject the captured macro into the expander's global\n # bindings table now (by calling `lookup_macro`), and make the\n # uncompiled capture command capture that macro.\n #\n # This does make the rather mild assumption that our input tree\n # will be astified again in the same Python process, in order for\n # the uncompiled capture to succeed when `astify` compiles it.\n key = tree.args[0]\n assert type(key) is ast.Tuple\n assert all(type(elt) is ast.Constant for elt in key.elts)\n name, unique_name, frozen_macro = [elt.value for elt in key.elts]\n uniquename_node = lookup_macro((name, unique_name, frozen_macro))\n return Capture(uniquename_node, name)\n\n else:\n # General case: an astified AST node.\n callee = lookup_thing(dotted_name)\n args = unastify(tree.args)\n kwargs = {k: v for k, v in unastify(tree.keywords)}\n node = callee(*args, **kwargs)\n node = ast.copy_location(node, tree)\n return node\n\n raise TypeError(f\"Don't know how to unastify {unparse_with_fallbacks(tree, debug=True, color=True)}\")", "def compile_ast(space, module, info):\n symbols = symtable.SymtableBuilder(space, module, info)\n return TopLevelCodeGenerator(space, module, symbols, info).assemble()", "def translate_one(node, opt):\n logger = logging.getLogger(__name__)\n\n # e.g. Get_a from user\n value = node.program.parent.kws.get(node.cls+\"_\"+node.name, None)\n\n # e.g. Get from user\n if value is None:\n value = node.program.parent.kws.get(node.cls, None)\n\n if value is None:\n\n backend = node.backend\n if backend == \"TYPE\":\n backend = \"unknown\"\n\n assert \"_\"+backend in matlab2cpp.rules.__dict__, (\n \"No rule {}; ensure your .py file is properly set up.\".format(backend))\n try:\n target = matlab2cpp.rules.__dict__[\"_\"+backend]\n\n except KeyError as err:\n logger.warning(\n \"'%s', File %s. Datatype defined in the .py file, might be wrong.\",\n err.message, node.file)\n raise\n\n specific_name = node.cls + \"_\" + node.name\n\n # e.g. Get_a (reserved typically)\n if specific_name in target.__dict__:\n value = target.__dict__[specific_name]\n\n # e.g. Get (normal behavior)\n elif node.cls in target.__dict__:\n value = target.__dict__[node.cls]\n\n else:\n print(node.program.summary())\n raise KeyError(\n \"Expected to find rule for '%s' in the file '_%s.py. Crash with file: %s, on line: %s'\" %\\\n (node.cls, node.backend, node.file, node.line))\n\n\n # let rule create a translation\n if not isinstance(value, (str, list, tuple)):\n #print(node.code)\n #print(\"\\n\\n\")\n value = value(node)\n\n # not quite right format\n if isinstance(value, (matlab2cpp.node.frontend.Node)):\n value = str(value)\n\n elif value is None:\n #print(\"\\n\\nerror:\")\n #print(node.code)\n #print(node.parent.code)\n\n #print(node.parent.parent.code)\n #print(\"\\n\")\n raise ValueError(\n\"missing return in function %s in file %s, Matlab: Crash with file: %s, on line: %s\" %\\\n(node.cls, node.backend, node.file, node.line))\n\n node.ret = repr(value)\n\n # interpolate tuples/lists\n if not isinstance(value, str):\n\n value = list(value)\n children = [\"%(\"+str(i)+\")s\" for i in range(len(node))]\n\n if len(value) == 2:\n value.insert(1, \"\")\n\n value = value[:-1] + [value[-2]] *\\\n (len(children)-len(value)+1) + value[-1:]\n\n if len(children) == 0:\n value = value[0] + value[-1]\n\n elif len(children) == 1:\n value = value[0] + children[0] + value[-1]\n\n else:\n\n out = value[0]\n for i in range(len(children)):\n out += children[i] + value[i+1]\n value = out\n\n # interpolate string\n try:\n value = value % node.properties()\n except:\n\n #print(\"..........\")\n #print(node.code)\n #print(\"----------\")\n #print(\"\\n\\n\")\n raise SyntaxError(\"interpolation in \" + node.backend + \".\" +\\\n node.cls + \" is misbehaving\\n'\" + value + \"'\\n\" +\\\n str(node.prop) + \"\\nCrash with file: \" + str(node.file) + \" , on line: \" + str(node.line) +\\\n \":\\n\" + node.code)\n\n if node.cls in (\"Assign\", \"Assigns\", \"Statement\", \"If\", \"Elif\",\n \"For\", \"Parfor\", \"While\") and node.project.builder.original:\n code_tmp = [\"// \" + line for line in node.code.splitlines()]\n value = \"\\n\".join(code_tmp) + \"\\n\" + value\n value = value.replace(\"%\", \"__percent__\")\n node.str = value", "def transform(self, tree):\r\n if not (isinstance(tree, tuple) or isinstance(tree, list)):\r\n tree = parser.ast2tuple(tree, line_info=1)\r\n return self.compile_node(tree)", "def _annotated_code(stub: str, code: str) -> str:\n context = CodemodContext()\n ApplyTypeAnnotationsVisitor.store_stub_in_context(\n context, libcst.parse_module(stub)\n )\n modified_tree = ApplyTypeAnnotationsVisitor(context).transform_module(\n libcst.parse_module(code)\n )\n return modified_tree.code", "def tree2sig(net,tree):\n## print 'length of tree = ',len(tree)\n if tree == []:\n return ~net.get_True()\n sign = tree[0]\n if type(tree[0]) == str: # tree is a sop\n sig = sop2sig(net,tree[1])\n if tree[0] == '-':\n sig = ~sig\n return sig \n PIs=PIsOf(net)\n assert len(tree[0]) > 2, tree[0]\n if len(tree) == 2: #an XOR\n## print tree[0]\n assert tree[0][1] == 'xor', tree[0][1]\n variable = tree[0][0]\n xsign = tree[0][2]\n sign = tree[0][3]\n ph = 1\n if variable < 0:\n ph = 0\n variable = -(variable+1) # -1->0,-2 -> 1\n variable= PIs[variable]\n if ph == 0:\n variable = ~variable\n N1 = tree2sig(net,tree[1])\n sig = variable ^ N1 # xor with the literal\n if xsign == '-': #add an invertor\n sig = ~sig\n if sign == '-':\n sig = ~sig\n return sig\n variable = PIs[tree[0][0]] #splitting variable\n method = tree[0][1]\n sign = tree[0][2] # '-' means complement was implemented\n assert len(tree) == 3, tree\n N1 = tree2sig(net,tree[1])\n N2 = tree2sig(net,tree[2])\n if method == 'shannon': #N1 = f_1, N2 = f_0\n sig = variable.ite(N1,N2)\n if sign == '-':\n sig = ~sig\n return sig\n elif method == '+davio': #N1 = f_0, N2 = f_2\n return N1^(variable&N2)\n elif method == '-davio': #N1 = f_1, N2 = f_2\n return N1^(N2&~variable)\n else:\n assert False, 'ERROR: Not known method'", "def generate_code(tree: ast.Ast) -> Tuple[List[bc.Constant], List[bc.Instruction]]:\n generator = CodeGenerator()\n tree.accept(generator)\n return generator.program.constants, generator.program.code" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads binary feature data from a file and converts each sample into a list.
def load_data_bin(filePath): dataFile = open(filePath) data = [] labels = [] for sample in dataFile: fields = sample.strip('\n').split('\t') fields = [int(x) for x in fields] labels.append(fields[0]) data.append(fields[1:]) dataFile.close() return data, labels
[ "def generate_data(filename):\r\n filedata = np.genfromtxt(filename, dtype=None, delimiter=\",\")\r\n\r\n features = []\r\n class_list = []\r\n\r\n # For each row, add the last index to the class list, and all other entries to the feature list\r\n for i in filedata:\r\n sample = list(i)\r\n sample.pop(-1)\r\n features.append(sample)\r\n class_list.append(float(i[-1]))\r\n\r\n # Convert the lists to numpy arrays for easier manipulation\r\n features = np.array(features)\r\n class_list = np.array(class_list)\r\n\r\n return features, class_list", "def read_bin_file(bin_fpath):\n with open(bin_fpath, 'rb') as fd:\n bstr = fd.read()\n\n label_byte = 1\n image_byte = HEIGHT * WIDTH * DEPTH\n\n array = np.frombuffer(bstr, dtype=np.uint8).reshape((-1, label_byte + image_byte))\n labels = array[:,:label_byte].flatten().astype(np.int32)\n images = array[:,label_byte:].reshape((-1, DEPTH, HEIGHT, WIDTH)).transpose((0, 2, 3, 1))\n\n return images, labels", "def load_word2vec_binary(fname):\n vocab = []\n vectors = None\n\n with open(fname) as fin:\n header = fin.readline()\n vocab_size, vector_size = map(int, header.split())\n\n vectors = np.empty((vocab_size, vector_size), dtype=np.float)\n binary_len = np.dtype(np.float32).itemsize * vector_size\n for line_no in xrange(vocab_size):\n word = ''\n while True:\n ch = fin.read(1)\n if ch == ' ':\n break\n word += ch\n vocab.append(word.strip())\n\n vector = np.fromstring(fin.read(binary_len), np.float32)\n vectors[line_no] = vector\n return pd.DataFrame(vectors, index=vocab)", "def _load(self) -> List[Tuple[float, bytes]]:\n load_path = self._filepath\n\n packets = []\n with self._fs.open(load_path) as f:\n pcr = dpkt.pcap.Reader(f)\n for ts, buf in pcr:\n packets.append((ts, buf))\n return packets", "def read_training_data():\n data_file = open('../RPCRunner/data/data', 'rb')\n labels_file = open('../RPCRunner/data/labels', 'rb')\n labels = np.loadtxt(labels_file, dtype=np.int8)\n data = np.fromstring(np.array([data_file.read(650) for i in labels]),\n dtype=np.uint8)\n return np.reshape(data, (-1, 650)), labels", "def load_raw_state_data(path_to_file):\n data = np.genfromtxt(path_to_file)\n\n return data", "def load_data(data_file=None):\n\n # Set defaults.\n if data_file is None:\n data_file = 'bin_seq.csv'\n\n bin_array = np.genfromtxt(data_file, delimiter=',')\n\n return(bin_array)", "def _read_binary_matrix(filename):\n with tf.gfile.GFile(filename, \"rb\") as f:\n s = f.read()\n magic = int(np.frombuffer(s, \"int32\", 1))\n ndim = int(np.frombuffer(s, \"int32\", 1, 4))\n eff_dim = max(3, ndim)\n raw_dims = np.frombuffer(s, \"int32\", eff_dim, 8)\n dims = []\n for i in range(0, ndim):\n dims.append(raw_dims[i])\n\n dtype_map = {507333717: \"int8\",\n 507333716: \"int32\",\n 507333713: \"float\",\n 507333715: \"double\"}\n data = np.frombuffer(s, dtype_map[magic], offset=8 + eff_dim * 4)\n data = data.reshape(tuple(dims))\n return data", "def load_data_file():\n data_mat = []\n label_mat = []\n fr = open('testSet.txt')\n for line in fr.readlines():\n lineArr = line.strip().split()\n data_mat.append([1.0, float(lineArr[0]), float(lineArr[1])])\n label_mat.append(int(lineArr[2]))\n return data_mat, label_mat", "def load_dataset (fileName):\n # open(fileName).readline(): '1.000000 0.067732 3.176513'\n # numFeat = 2\n # numFeat = len(open(fileName).readline().split('\\t')) - 1\n fr = open(fileName)\n xArr, yArr = [], []\n\n for line in fr.readlines():\n lineArr = []\n # eg: ['1.000000', '0.067732', '3.176513']\n currentLine = line.strip().split('\\t') \n # X = [ [x0, x1], [x0, x1], .... ] (str -> float)\n for i in range(len(currentLine) - 1):\n lineArr.append(float(currentLine[i]))\n \n xArr.append(lineArr)\n # Y = [y1, y2, ... ]\n yArr.append(float(currentLine[-1]))\n\n return xArr, yArr", "def read_vec_features_from_list (list_fn, config_feat, fs_class, config, limit=None):\n\n # Create feature store\n fs = fs_class(config_feat)\n \n # Setup\n precision = config['precision']\n normalize_vec = config['normalize']\n\n # Open and read list file \n list_file = open(list_fn, 'r')\n num = 0\n had_output = False\n for ln in list_file:\n ln = ln.rstrip()\n (ky, vec_fn) = ln.split()\n vec = np.fromfile(vec_fn, precision)\n if normalize_vec:\n nrm = np.linalg.norm(vec, 2)\n if (nrm > 0.0):\n vec /= nrm\n fs.add(ky, {'vector':vec})\n num += 1\n if (num % 1000)==0:\n print '{}K '.format(num/1000), \n sys.stdout.flush()\n had_output = True\n if (limit is not None) and (num>=limit):\n break\n if had_output:\n print\n return fs", "def read_csv(data_file):\n labels_from_csv = []\n features_from_csv = []\n with open(data_file, 'r') as dfile:\n for line in dfile.readlines():\n row = line.strip().split(',')\n labels_from_csv.append(row[0]) \n features_from_csv.append([float(x) for x in row[1:]])\n return features_from_csv, labels_from_csv", "def read_binary_file(filename):\n with open(filename, 'rb') as f:\n data = f.read()\n return data", "def read_wav(file):\n f=wave.open(file,\"r\")\n raw_data=f.readframes(f.getnframes())\n array=np.fromstring(raw_data,np.short)\n array.shape=-1,2\n array=array.T.astype(float)[0]\n samplerate=f.getframerate()\n f.close()\n return feature_normalize(array),samplerate", "def load_data(file: Path) -> List[int]:\n with file.open() as f:\n values = f.read().split('\\n')\n return list(map(int, filter(None, values)))", "def read_features_from_file(filename, desc_dim=132):\n\n print filename\n f = np.loadtxt(filename)\n\n if f.shape[0] == 0:\n f = np.zeros((1, desc_dim))\n print filename\n return f[:, :4], f[:, 4:] # feature locations, descriptors", "def read_data_file(self, file_name):\n\n with open(file_name) as file:\n data_list = []\n line = file.readline()\n while line:\n data_list.append(int(line))\n line = file.readline()\n\n file.close()\n\n self.data = data_list", "def read_file(file_path):\n\n payload_list = list()\n\n if os.path.isfile(file_path):\n print(\"Loading File in: \" + file_path)\n\n with open(file_path, 'rb') as f:\n while True:\n chunk = f.read(32)\n if chunk:\n payload_list.append(chunk)\n else:\n break\n else:\n print(\"ERROR: file does not exist in PATH: \" + file_path)\n\n print(\"Length of the file in chunks: \" + str(len(payload_list)))\n\n return payload_list", "def read_libsvm_file(file_path):\n data = open(file_path).read().strip().split('\\n')\n observations = [data[i].split() for i in range(len(data))]\n\n y = [float(obs[0]) for obs in observations]\n # We add the intercept\n X = [['0:1'] + obs[1:] for obs in observations]\n X = [list(map(lambda x: float(x.split(':')[1]), obs)) for obs in X]\n return X, y" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This endpoint /events is called by the datepicker (census/static/datepicker/js/datepicker.js) to retrieve a list of filtered events by selected date/month.
def get_events(data): query_params = data.GET.dict() if not query_params: # If no payload is passed to the request, simply fetch future approved events start_date = datetime.now(timezone(TIMEZONE)) # TODO: When the user first visits the homepage, all events occurring # in the week are fetched. Should this be changed instead to display # only events for the current day? end_date = datetime.now(timezone(TIMEZONE)) + timedelta(days=7) events = models.Event.objects.filter(approval_status=constants.EventApprovalStatus.APPROVED.name, start_datetime__range=(start_date, end_date))\ .order_by('start_datetime') return HttpResponse(json.dumps(make_events_data_response(events))) if 'isMonthly' in query_params and query_params['isMonthly'] == 'true': # Fetch events for the whole month month = int(query_params['month']) # TODO: Ensure that timezone differences are properly accounted for # when using the `__month` filter events = models.Event.objects.filter(approval_status=constants.EventApprovalStatus.APPROVED.name, start_datetime__month=month)\ .order_by('start_datetime') return HttpResponse(json.dumps(make_events_data_response(events))) else: # Fetch events for a selected date day = query_params['day'] month = query_params['month'] year = query_params['year'] start_date = datetime.strptime(f"{year}-{month}-{day} 00:00:00", "%Y-%m-%d %H:%M:%S") end_date = datetime.strptime(f"{year}-{month}-{day} 23:59:59", "%Y-%m-%d %H:%M:%S") current_timezone = timezone(TIMEZONE) events = models.Event.objects.filter(approval_status=constants.EventApprovalStatus.APPROVED.name, start_datetime__range=(current_timezone.localize(start_date), current_timezone.localize(end_date))) \ .order_by('start_datetime') return HttpResponse(json.dumps(make_events_data_response(events)))
[ "def get_events():\n req = request\n start_date = request.args.get(\"start_date\")\n end_date = request.args.get(\"end_date\")\n desc = request.args.get(\"event_desc\")\n sqlx, sqlx_count = DBAccess.bld_query_sql(start_date, end_date, desc)\n \n list_result = DBAccess.get_events(sqlx, sqlx_count)\n if list_result[0] == 'error':\n sj = jsonify({\"events_error\": list_result[1]})\n else:\n sj = jsonify({\"events_details\": list_result[1]})\n return sj", "def get_all_events(request):\n events = Event.objects.all()\n data = serializers.serialize(\"json\", events)\n return HttpResponse(data, content_type=\"application/json\")", "def events(self):\n r = requests.get(self.uri+'events')\n r.raise_for_status()\n return r.json()", "def list_event(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_event\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v1/events'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1EventList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_ajax_month_calendar_and_event_list(self):\n response = self.client.get(\n reverse('calendar:cal_and_list_shift'),\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n self.assertEqual(response['Content-Type'], 'application/json')\n\n data = loads(response.content.decode('utf8'))\n self.assertEqual({}, data['events'])\n self.assertIn('month', data)\n self.assertIn('<table', data['calendar'])\n self.assertIn('month_and_year', data)", "def events_filter():\n #get the incoming parameters\n location = request.args.get(\"location\")\n category = request.args.get(\"category\")\n #get the given page and number of events or set them to default\n page = request.args.get(\"page\", default=1, type=int)\n per_page = request.args.get(\"limit\", default=15, type=int)\n #check which parameter was given and use it to query the database\n if location and category:\n #if both location and category have been given,filter by both\n found_events = Events.filter_events(location, category, page, per_page)\n if found_events.items:\n event_list = make_event_list(found_events.items)\n return jsonify(event_list), 200\n return jsonify({\"message\" : \"there are no more {} events in {}\".format(category, location)}), 404\n elif location:\n found_events = Events.get_events_by_location(location, page, per_page)\n if found_events.items:\n event_list = make_event_list(found_events.items)\n return jsonify(event_list), 200\n return jsonify({\"message\" : \"there are no more events in {}\".format(location)}), 404\n elif category:\n found_events = Events.get_events_by_category(category, page, per_page)\n if found_events.items:\n event_list = make_event_list(found_events.items)\n return jsonify(event_list), 200\n return jsonify({\"message\" : \"there are no more {} events\".format(category)}), 404\n else:\n return jsonify({\"message\" : \"can not search events with the given parameter\"}), 400", "def all_events(request):\n\n events = Event.objects.all()\n query = None\n categories = None\n sort = None\n direction = None\n\n if request.GET:\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n events = events.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n events = events.order_by(sortkey)\n \n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n events = events.filter(category__name__in=categories)\n categories = Category.objects.filter(name__in=categories)\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter any search criteria!\")\n return redirect(reverse('events'))\n \n queries = Q(name__icontains=query) | Q(description__icontains=query)\n events = events.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'events': events,\n 'search_term': query,\n 'current_categories': categories,\n 'current_sorting': current_sorting,\n }\n\n return render(request, 'events/events.html', context)", "def events_view(request, course_id):\n calendar_id = get_calendar_id_by_course_id(course_id)\n try:\n response = gcal_service.events().list(calendarId=calendar_id,\n pageToken=None).execute()\n events = [{\n \"id\": api_event[\"id\"],\n \"text\": api_event[\"summary\"],\n \"start_date\": from_google_datetime(api_event[\"start\"][\"dateTime\"]),\n \"end_date\": from_google_datetime(api_event[\"end\"][\"dateTime\"]),\n \"readonly\": not has_permission(request.user, api_event)\n } for api_event in response['items']]\n except Exception as e:\n log.exception(e)\n return JsonResponse(data={'errors': e}, status=500, safe=False)\n else:\n return JsonResponse(data=events, status=200, safe=False)", "def test_07_api_can_get_all_events(self):\n response = self.app.get('/api/events', headers=headers)\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(data['events']), 2)", "def calendar_all(request, pYear, pMonth):\n lYear = int(pYear)\n lMonth = int(pMonth)\n lEvents = event_filter(Event.objects.all(),lYear,lMonth)\n lCalendar = EventCalendar(lEvents).formatmonth(lYear, lMonth)\n \n dict=make_dict(pYear,pMonth)\n dict['Calendar']=mark_safe(lCalendar)\n return render_to_response('cal/month.html',dict)", "def test_ajax_day_view(self):\n response = self.client.get(\n reverse(\n 'calendar:day_list',\n kwargs={'year': '2015', 'month': '2', 'day': '2'}\n ),\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n self.assertEqual(response['Content-Type'], 'application/json')\n\n data = loads(response.content.decode('utf8'))\n self.assertEqual([], data['events'])", "def list_events_for_month(self,calendars,year,month,resources=None,event_types=None):\n response = self.api.listeventsformonth(calendars=calendars,\n year=year,\n month=month,\n resources=resources,\n event_types=event_types)\n \n return [EventImage(event) for event in response]", "def search():\n #get the name given\n name = request.args.get('q')\n #get the given page and number of events or set them to default\n page = request.args.get(\"page\", default=1, type=int)\n per_page = request.args.get(\"limit\", default=15, type=int)\n if name:\n found_events = Events.get_events_by_name(name, page, per_page)\n if found_events.items:\n event_list = make_event_list(found_events.items)\n return jsonify(event_list), 200\n return jsonify({\"message\" : \"there are no more events matching the given name\"}), 404\n return jsonify({\"message\" : \"can not search events, provide event name\"}), 400", "def list_events(self,calendar,start,end,event_types=None,resources=None,ical=False):\n response = self.api.listevents(calendar=calendar,\n start=start.strftime('%Y-%m-%d %H:%M:%S'),\n end=end.strftime('%Y-%m-%d %H:%M:%S'),\n event_types=event_types,\n resources=resources,\n render_ical=unicode(ical))\n \n return response", "def list(self, limit: int=10, sort: Optional[str]='date', direction: Optional[str] ='asc') -> List[MispEvent]:\n url = '/events/index/sort:%s/direction:%s/limit:%d' % (sort, direction, limit)\n raw = self.server.GET(url)\n response = objectify.fromstring(raw)\n events = []\n for evtobj in response.Event:\n events.append(MispEvent.from_xml_object(evtobj))\n return events", "def get_all_events():\n\n events = Event.query.all() # list of objs\n\n events_list = []\n\n for event in events:\n events_list.append(as_dict(event))\n\n return jsonify(events_list)", "def scrape_month(month,year):\n print \"Scraping %02i/%i\"%(month,year)\n url = calendar_url%(month,year)\n req = urllib2.urlopen(url)\n if req.getcode() != 200:\n raise \"Failed to fetch, error %i\"%req.getcode()\n raw = req.read()\n soup = BeautifulSoup(raw)\n caldiv = soup.find('div', {'class':'CalendarContent'})\n days = caldiv.findAll('div', {'class':'CalendarCell'})\n events = []\n for day in days:\n events.extend(events_from_day(day, month, year))\n return events", "def events(request):\n try:\n if request.method == 'GET':\n events_list = Events.retrieve_all()\n if events_list is not []: # not empty list\n node_id = request.GET.get('node_id', '')\n user_id = request.GET.get('user_id', '')\n status = request.GET.get('status', '')\n\n if status is not '' and status not in data_checker.VALID_EVENT_STATUS_LIST:\n raise ValueError('Status ' + status + ' is not valid')\n\n node_search = node_id is not ''\n user_search = user_id is not ''\n status_search = status is not ''\n\n events_search_list = []\n\n if node_search or user_search or status_search: # has parameters to search\n if node_search and user_search and status_search: # search by node, user and status\n for event in events_list:\n if event['node_id'] == node_id and event['user_id'] == user_id and event['status'] == status:\n events_search_list.append(event)\n\n elif node_search and user_search: # search by node and user\n for event in events_list:\n if event['node_id'] == node_id and event['user_id'] == user_id:\n events_search_list.append(event)\n\n elif user_search and status_search: # search by user and status\n for event in events_list:\n if event['user_id'] == user_id and event['status'] == status:\n events_search_list.append(event)\n\n elif node_search and status_search: # search by node and status\n for event in events_list:\n if event['node_id'] == node_id and event['status'] == status:\n events_search_list.append(event)\n\n elif user_search: # search only by user\n for event in events_list:\n if event['user_id'] == user_id:\n events_search_list.append(event)\n\n elif node_search: # search only by node\n for event in events_list:\n if event['node_id'] == node_id:\n events_search_list.append(event)\n\n elif status_search: # search only by status\n for event in events_list:\n if event['status'] == status:\n events_search_list.append(event)\n\n resp = {\n 'success': 'true',\n 'data': events_search_list\n }\n\n else: # all without parameters\n resp = {\n 'success': 'true',\n 'data': events_list\n }\n\n else:\n resp = {\n 'success': 'true',\n 'data': events_list\n }\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n data_checker.check_event(data, request.method)\n\n created_event_key = Events.create(data)\n\n if created_event_key:\n print 'create event successful'\n if 'user_id' not in data:\n resp = {\n 'success': 'true',\n 'data': created_event_key\n }\n else:\n all_events_list = Events.retrieve_all()\n resp_events = []\n for event in all_events_list:\n if event['user_id'] == data['user_id'] and event['status'] == data_checker.EVENT_UNREAD:\n resp_events.append(event)\n\n resp = {\n 'success': 'true',\n 'data': resp_events\n }\n else:\n raise RuntimeError('Orchestrate service temporarily unavailable')\n else:\n raise NotImplementedError('Only GET, POST methods are allowed')\n\n return JSONResponse(resp)\n except Exception, e:\n err = {\n 'success': 'false',\n 'data': {},\n 'err_message': str(e)\n }\n\n return JSONResponse(err)", "def find_venue_events(venue_id, page):\n\n if session['startdate']:\n start_date = session['startdate']\n else:\n start_date = None\n\n if session['enddate']:\n end_date = session['enddate']\n else:\n end_date = None\n\n params = {'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'venue.id': venue_id,\n 'datetime_local.gte': start_date,\n 'datetime_local.lte': end_date,\n 'type': 'concert',\n 'per_page': 20,\n 'page': page}\n\n response = requests.get(SG_URL + 'events', params=params)\n\n return response.json()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rebases the given procedure and any nested procedures.
def update (self, procedure: Instruction) -> None: self.__rebase_instruction (procedure) for inst in procedure.instructions (): self.update (inst)
[ "def rebase_source_info (procedure: Instruction) -> ProcedureRecord:\n\n rebaser = _LineRebaser ()\n rebaser.update (procedure)\n return ProcedureRecord (procedure=procedure, line_base=rebaser.base ())", "def propositional_skeleton_helper(self, map):\n root = self.root\n\n if is_unary(root):\n for key in map:\n if self.first == map[key]: # first arg already in the map\n return PropositionalFormula(root, PropositionalFormula(key)), map\n\n first, first_map = self.first.propositional_skeleton_helper(map)\n\n return PropositionalFormula(root, first), map\n\n elif is_binary(root):\n for key in map:\n if self.first == map[key]:\n first = PropositionalFormula(key) # first arg already in the map\n break\n else:\n # first arg not in the map yet\n first, first_map = self.first.propositional_skeleton_helper(map)\n\n for key in map:\n if map[key] == self.second:\n second = PropositionalFormula(key) # second arg already in the map\n break\n else:\n # second arg not in the map yet\n second, second_map = self.second.propositional_skeleton_helper(map)\n\n return PropositionalFormula(root, first, second), map\n\n else:\n for key in map:\n if map[key] == self: # formula in map already\n return PropositionalFormula(key), map\n\n var_name = next(fresh_variable_name_generator)\n map[var_name] = self\n return PropositionalFormula(var_name), map", "def _parse_procedure(self, procedure_dict):\r\n raise NotImplementedError()", "def LoopBackrubRef(self):\n movemap=MoveMap()\n ft = self.pose.fold_tree(); ft_o = FoldTree()\n ft_o.assign(ft)\n ft.clear()\n ft, movemap, loops_object=loop_tools.InitializeLoops(self.pose, self.input_class.loops_as_strings, ft, movemap)\n print ft\n print \"Fold Tree Correct? \" + repr(ft.check_fold_tree())\n self.pose.fold_tree(ft)\n rounds=int(rounds)\n if self.score_class.score ==0:\n self.score_class.score = create_self.score_class.score_function_ws_patch('standard', 'self.score_class.score12')\n self.score_class.score.set_weight(chainbreak, 100); #Taking No Chances!\n print loops_object\n ref=LoopMover_Refine_Backrub(loops_object, self.score_class.score)\n\n self.run_protocol(ref)\n ft.assign(ft_o)\n self.pose.fold_tree(ft)\n self.score_class.score.set_weight(chainbreak, 0)", "def createProcedure(self):\n try:\n mycursor = self.mydb.cursor()\n mycursor.execute(\"DROP PROCEDURE IF EXISTS EmpDep;\")\n mycursor.execute(\"CREATE PROCEDURE EmpDep() BEGIN select * from Employees JOIN Department where Department.Dept_id=Employees.Dept_id; END\")\n self.mydb.commit()\n except Exception as e:\n print(\"Error\", e)", "def reapply_recursive(p: Pass):\n # If pass should not reapply, skip\n if p in applied_passes and not p.should_reapply(applied_passes[p]):\n return\n\n # Check dependencies first\n for dep in self._depgraph.predecessors(p):\n yield from reapply_recursive(dep)\n\n yield p", "def rebase_path(\n path: pathlib.Path, root: pathlib.Path, new_root: pathlib.Path\n) -> pathlib.Path:\n return new_root / path.relative_to(root)", "def proc(self, proc=None, owner=None, schema=None):\r\n cur = self.begin()\r\n if proc:\r\n cur.procedurecolumns(schema, owner, proc, None)\r\n else:\r\n cur.procedures(schema, owner, None)\r\n self.commit(cur)\r\n self.display()", "def rebase(info):\n get_segment_name = idaapi.get_segm_name if hasattr(idaapi, 'get_segm_name') else idaapi.get_true_segm_name\n functions, globals = map(utils.fcompose(sorted, list), [database.functions(), internal.netnode.alt.fiter(internal.comment.tagging.node())])\n\n p = ui.Progress()\n p.update(current=0, title=u\"Rebasing tagcache...\", min=0, max=sum(len(item) for item in [functions, globals]))\n fcount = gcount = 0\n\n scount = info.size()\n segmap = {info[si].to : info[si]._from for si in range(scount)}\n listable = sorted(segmap)\n six.print_(u\"{:s}.rebase({:#x}, {:#x}) : Rebasing tagcache for {:d} segments.\".format(__name__, segmap[listable[0]], listable[0], scount))\n\n # for each segment\n p.open()\n for si in range(scount):\n seg = idaapi.getseg(info[si].to)\n\n msg = u\"Rebasing tagcache for segment {:d} of {:d}{:s}: {:#x} ({:+#x}) -> {:#x}\".format(1 + si, scount, \" ({:s})\".format(get_segment_name(seg)) if seg else '', info[si]._from, info[si].size, info[si].to)\n p.update(title=msg), six.print_(msg)\n\n # for each function (using target address because ida moved the netnodes for us)\n listable = [ea for ea in functions if info[si].to <= ea < info[si].to + info[si].size]\n for i, offset in __rebase_function(info[si]._from, info[si].to, info[si].size, (item for item in listable)):\n name = database.name(info[si].to + offset)\n text = u\"Relocating function {:d} of {:d}{:s}: {:#x} -> {:#x}\".format(i + fcount, len(functions), \" ({:s})\".format(name) if name else '', info[si]._from + offset, info[si].to + offset)\n p.update(value=sum([fcount, gcount, i]), text=text)\n ui.navigation.procedure(info[si].to + offset)\n fcount += len(listable)\n\n # for each global\n listable = [(ea, count) for ea, count in globals if info[si]._from <= ea < info[si]._from + info[si].size]\n for i, offset in __rebase_globals(info[si]._from, info[si].to, info[si].size, (item for item in listable)):\n name = database.name(info[si].to + offset)\n text = u\"Relocating global {:d} of {:d}{:s}: {:#x} -> {:#x}\".format(i + gcount, len(globals), \" ({:s})\".format(name) if name else '', info[si]._from + offset, info[si].to + offset)\n p.update(value=sum([fcount, gcount, i]), text=text)\n ui.navigation.analyze(info[si].to + offset)\n gcount += len(listable)\n p.close()", "def _recrute_pacbporfs_from_parental_cbg(self,parentcbg,\n create_cache=True,\n ignore_nonexisting_edges=False,verbose=False):\n replacements = {}\n substituted = 0\n\n ####################################################################\n if verbose:\n stw = StopWatch(\"recruteParentalPacbps\")\n print stw.start()\n print \"target:\", self\n print \"source:\", parentcbg\n #################################################################### \n\n for (node1,node2) in self.pairwisecrosscombinations_node():\n # if this edge is not present in the parent, ignore it\n if not parentcbg.has_edge(node1,node2): continue\n # get PacbPORF of the parent\n origpacbporf = parentcbg.get_pacbps_by_nodes(\n\t\t node1=node1,node2=node2)[0]\n curpacbporf = None\n replace_pacbporf = False\n if not self.has_edge(node1,node2):\n if ignore_nonexisting_edges:\n # if ignore_nonexisting_edges -> do not recrute this pacbp\n continue \n else:\n # replace this Pacbporf if it exists and\n\t\t # simultaniously create novel edge\n replace_pacbporf = True\n elif self.has_edge(node1,node2) and not\\\n\t self.get_pacbps_by_nodes(node1=node1,node2=node2):\n replace_pacbporf = True\n else:\n curpacbporf = self.get_pacbps_by_nodes(\n\t\t\tnode1=node1,node2=node2)[0]\n if pacb.comparison.IsIdenticalPacbPORF(\n\t\t origpacbporf,curpacbporf):\n # Pacbporfs are already identical; not relevant to copy\n continue\n if origpacbporf.issuperset(curpacbporf):\n # store to replacements dict\n replacements[(node1,node2)] = curpacbporf\n # remove from the CBG -> replacement in progress\n self.remove_pacbp(curpacbporf,node1,node2)\n replace_pacbporf = True\n\n # check if replace_pacbporf is set to True\n if replace_pacbporf:\n ################################################################\n if verbose:\n print stw.lap(), \"REPLACING PacbPORF Source->Target:\"\n print \"T:\", curpacbporf, \"(current)\"\n print \"S:\", origpacbporf\n origpacbporf.print_protein(_linesize=100) \n ################################################################\n newkey = origpacbporf.construct_unique_key(node1,node2)\n self.set_edge_weight( node1, node2, wt=origpacbporf.bitscore )\n self.pacbps[(newkey,node1,node2)] = origpacbporf\n substituted+=1\n\n # check if substitutions have been taken place\n if create_cache and substituted:\n #####################################################################\n if verbose:\n print stw.lap(), \"CREATE_CACHE & substituted PacbPORFS:\",\n print substituted, \"edges:\", len(self.weights)/2,\n print \"pacbps:\", len(self.pacbps)\n ####for k,pacbporf in self.pacbps.iteritems():\n #### print k,\"\\n\",pacbporf\n #####################################################################\n self.clear_cache()\n # check if there is an OMSR upon recreation; in very\n # exceptional cases, OMSR can get lost in this step\n if self.has_overall_minimal_spanning_range():\n self.create_cache()\n self.update_edge_weights_by_minimal_spanning_range()\n else:\n #############################################################\n if verbose:\n print stw.lap(), \"OMSR got lost!\",\n print \"replacements:\", len(replacements)\n for (n1,n2), curpacbporf in replacements.iteritems():\n print \"REP:\", curpacbporf, n1, n2\n #############################################################\n # OMSR got lost! Restore replacements dict and as such\n # restore the original PacbPs one by one (in random order)\n # and quit as soon as an OMSR is restored\n for (node1,node2),curpacbporf in replacements.iteritems():\n newkey = curpacbporf.construct_unique_key(node1,node2)\n tobereplpacbporf = self.get_pacbps_by_nodes(node1=node1,node2=node2)[0]\n # remove from the CBG\n self.remove_pacbp(tobereplpacbporf,node1,node2)\n # and place back the original one\n self.set_edge_weight( node1, node2, wt=curpacbporf.bitscore )\n self.pacbps[(newkey,node1,node2)] = curpacbporf\n substituted-=1\n if self.has_overall_minimal_spanning_range():\n self.create_cache()\n self.update_edge_weights_by_minimal_spanning_range()\n #########################################################\n if verbose:\n print stw.lap(), \"OMSR restored, substitutions:\",\n print substituted \n print \"T:\", self\n ##########################################################\n # break out of the for loop of PacbP replacement\n break\n\n # return number of replaced/added pacbporfs\n return substituted", "def _reconstruction_calls(self, split_mixed_op, split_trace_op):\n from firedrake.assemble import OneFormAssembler\n\n # We always eliminate the velocity block first\n id0, id1 = (self.vidx, self.pidx)\n\n # TODO: When PyOP2 is able to write into mixed dats,\n # the reconstruction expressions can simplify into\n # one clean expression.\n A = Tensor(split_mixed_op[(id0, id0)])\n B = Tensor(split_mixed_op[(id0, id1)])\n C = Tensor(split_mixed_op[(id1, id0)])\n D = Tensor(split_mixed_op[(id1, id1)])\n K_0 = Tensor(split_trace_op[(0, id0)])\n K_1 = Tensor(split_trace_op[(0, id1)])\n\n # Split functions and reconstruct each bit separately\n split_residual = self.broken_residual.subfunctions\n split_sol = self.broken_solution.subfunctions\n g = AssembledVector(split_residual[id0])\n f = AssembledVector(split_residual[id1])\n sigma = split_sol[id0]\n u = split_sol[id1]\n lambdar = AssembledVector(self.trace_solution)\n\n M = D - C * A.inv * B\n R = K_1.T - C * A.inv * K_0.T\n u_rec = M.solve(f - C * A.inv * g - R * lambdar,\n decomposition=\"PartialPivLU\")\n self._sub_unknown = OneFormAssembler(u_rec,\n tensor=u,\n form_compiler_parameters=self.ctx.fc_params).assemble\n\n sigma_rec = A.solve(g - B * AssembledVector(u) - K_0.T * lambdar,\n decomposition=\"PartialPivLU\")\n self._elim_unknown = OneFormAssembler(sigma_rec,\n tensor=sigma,\n form_compiler_parameters=self.ctx.fc_params).assemble", "def parse_procedure(procedure):\n\n parsed_procedure = []\n\n for step in procedure.get('order'):\n if step != '':\n if is_911(step):\n current_main_step = extract_911_clauses(step)\n for substep in procedure.get('steps').get(step):\n substep['type'] = '911-conditional-list-item'\n current_main_step['substeps'].append(substep)\n parsed_procedure += [current_main_step]\n elif is_doctor(step):\n current_main_step = extract_doctor_clauses(step)\n for substep in procedure.get('steps').get(step):\n substep['type'] = 'doctor-conditional-list-item'\n current_main_step['substeps'].append(substep)\n parsed_procedure += [current_main_step]\n elif is_list(step):\n current_main_step = {'text': step, 'type': 'list', 'substeps': []}\n for substep in procedure.get('steps').get(step):\n substep['type'] = 'list-item'\n current_main_step['substeps'].append(substep)\n parsed_procedure += [current_main_step]\n else:\n current_main_step = parse_step(step, [], True)\n\n for substep in procedure.get('steps').get(step):\n current_main_step[0]['substeps'] += parse_step(substep.get('text'), substep.get('links'), False)\n parsed_procedure += current_main_step\n\n graph = {}\n for i, p in enumerate(parsed_procedure):\n graph[i] = p\n return graph", "def transform_one_root():\n global tot_block_len, GRAPH, NODE_OPS, tot_block_len, OP_PARENTS_NUM, \\\n priorities, predecessor_count, OP_CHILDREN_NUM, successor_count\n roots = find_roots()\n if len(roots) == 1:\n return roots.pop()\n if len(roots) < 1:\n print(\"ERROR: graph doesn't have any roots\")\n return None\n\n root_op = IRArray(9, None, None, None)\n setattr(root_op, \"line_num\", tot_block_len + 1)\n # print(\"length: %d. blcok len %d\" % (len(NODE_OPS), tot_block_len))\n NODE_OPS[tot_block_len + 1] = root_op\n\n new_root = tot_block_len + 1\n # TODO: map the new root to its operation (a NOP) in here\n for root in roots:\n REV_GRAPH[root][new_root] = False\n GRAPH[new_root][root] = False\n # OP_PARENTS_NUM[root] += 1 # PROBABLY DON'T NEED TO DO THIS\n # # increment its parents count\n # OP_CHILDREN_NUM[new_root] += 1\n # # increment its children count\n\n priorities.append(0)\n OP_CHILDREN_NUM.append(0)\n OP_PARENTS_NUM.append(0)\n predecessor_count.append(0)\n successor_count.append(0)\n\n return new_root", "def startSubroutine(self):\n self.subroutine_scope = []", "def _fixupParents(self, aprinter):\n name = aprinter.name\n i = name.rfind(\".\")\n rv = None\n while (i > 0) and not rv:\n substr = name[:i]\n if substr not in self.printerDict:\n self.printerDict[substr] = PlaceHolder(aprinter)\n else:\n obj = self.printerDict[substr]\n if isinstance(obj, Printer):\n rv = obj\n else:\n assert isinstance(obj, PlaceHolder)\n obj.append(aprinter)\n i = name.rfind(\".\", 0, i - 1)\n if not rv:\n rv = self.print_root\n aprinter.parent = rv", "def _create_subtree_aggregation(num, procname):\n first = CoopNode(\"||\", \"coop\")\n first.cooptype = \"par\"\n last = first\n for i in list(range(2, num+1)):\n nl = ProcdefNode(procname, \"procdef\")\n if i == num:\n nr = ProcdefNode(procname, \"procdef\")\n last.right = nr\n else:\n nr = CoopNode(\"||\", \"coop\")\n nr.cooptype = \"par\"\n last.right = nr\n last.left = nl\n last = nr\n return first", "def rebuild():", "def remove_left_recursion(g):\n temp_grammar = copy(g)\n new_grammar = Grammar(start=temp_grammar.start, epsilon=temp_grammar.epsilon, eof=temp_grammar.eof)\n nonterminals = nonterminal_ordering(temp_grammar)\n\n for i in range(0, len(nonterminals)):\n ai = nonterminals[i]\n for j in range(0, i):\n aj = nonterminals[j]\n for p_ai in temp_grammar.productions[ai]:\n # For each production of the form Ai -> Aj y\n if p_ai.body and aj == p_ai.body[0]:\n replaced_productions = [Rule(ai, p_aj.body + p_ai.body[1:]) for p_aj in\n temp_grammar.productions[aj]]\n can_remove_productions = any(map(lambda x: x.is_left_recursive(), replaced_productions))\n # Replace productions only if there were left-recursive ones\n if can_remove_productions:\n temp_grammar.remove_rule(p_ai)\n for p in replaced_productions:\n temp_grammar.add_rule(p)\n\n new_productions = remove_immediate_left_recursion(temp_grammar, ai)\n for p in new_productions:\n new_grammar.add_rule(p)\n\n return __normalize_productions(new_grammar)", "def rebase(self, backing_file_base, backing_file_top):\n utils.execute('qemu-img', 'rebase', '-u', '-b', backing_file_base, backing_file_top, run_as_root=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Modifies an individual instruction so that its line number is relative to the start of its containing procedure. If this is the first instruction of the procedure, then it establishes the base value to be subtracted from subsequent instructions.
def __rebase_instruction (self, inst: Instruction) -> None: locn = inst.locn () if locn is not None: assert isinstance (locn, SourceLocation) if self.__base is None: assert locn.line is not None self.__base = locn.line assert locn.line >= self.__base locn.line -= self.__base
[ "def rebase_source_info (procedure: Instruction) -> ProcedureRecord:\n\n rebaser = _LineRebaser ()\n rebaser.update (procedure)\n return ProcedureRecord (procedure=procedure, line_base=rebaser.base ())", "def adjust_relative_base(state, relative_base_ix):\n state.relative_base += state.intcode[relative_base_ix]", "def instruction_CALL(self, inst):\n\t\tif inst[-2:] == \"00\":\n\t\t\tjmpdest = int(self.regbank[inst[-5:-2]], 16)\n\t\telif inst[-2:] == \"01\":\n\t\t\tval = int(self.regbank[inst[-5:-2]], 16)\n\t\t\tif val > 32767:\n\t\t\t\tval -= 65536\n\t\t\tjmpdest = int(self.regbank[\"pc\"], 16) + val\n\t\telif inst[-2:] == \"10\":\n\t\t\tjmpdest = int(self.nextInstruction(), 16)\n\t\telif inst[-2:] == \"11\":\n\t\t\tval = int(self.nextInstruction(), 16)\n\t\t\tif val > 32767:\n\t\t\t\tval -= 65536\n\t\t\tjmpdest = int(self.regbank[\"pc\"], 16) + val\n\t\tintsp = int(self.regbank[\"sp\"], 16)\n\t\tself.setWord(intsp, inttohex(2*self.pc))\n\t\tself.regbank[\"sp\"] = inttohex(intsp-2)\n\t\tself.pc = int(jmpdest / 2)", "def getInstructionStart(self,address):\n \"\"\"is in the middle of an instruction, Hopper will look back to find the first byte of this instruction.\"\"\"\n return HopperLowLevel.nearestBlock(self.__internal_segment_addr__,address)", "def shift(self, start):\n if self.line > 1:\n self.line += start.line - 1\n else:\n self.line = start.line\n self.column += start.column", "def position_before(self, instr):\n _core.LLVMPositionBuilderBefore(self.ptr, instr.ptr)", "def precmd(self, line):\n for handler in self._precmd_handlers:\n line = handler(line)\n\n # After doing preprocessing, pass it off to the super class(es) for\n # whatever they want to do with it.\n line = pdb.Pdb.precmd(self, line)\n\n return line", "def step(self):\n\n\tif self.stopped: return # Do nothing when the machine is stopped\n\t# 2.3: \"The CI is always incremented prior to fetching an\n\t# instruction for execution...\"\n\tself.CI = comp2( (self.CI + 1) )\n\n\t# Fetch the instruction\n\tinst = self.store[ self.CI & 31]\n\n\t# Decode the line number affected by the instruction, and the\n\t# function number\n\tlineno, funcno = inst & 31, (inst >> 13) & 7\n\n\tassert 0<= funcno <=7\n\tif funcno == 0:\n\t # s,C : JMP : Copy content of Store line to CI\n\t self.CI = self.store[ lineno ]\n\telif funcno == 1:\n\t # c+s,C : JRP : Add content of Store line to CI\n\t self.CI = comp2(self.CI + self.store[ lineno ])\n\telif funcno == 2:\n\t # -s,A : LDN : Copy content of Store line, negated, to accum\n\t self.accum = comp2 (- self.store[ lineno ])\n\telif funcno == 3:\n\t # a,S : STO : Copy content of acc. to Store line\n\t self.store[ lineno ] = self.accum\n\telif funcno == 4 or funcno==5:\n\t # a-s,A : SUB : Subtract content of Store line from accum\n\t self.accum = comp2( self.accum - self.store[ lineno ] )\n\telif funcno == 6:\n\t # Test : CMP : Skip next instruction if content of accum\n\t # is negative\n\t if self.accum < 0: self.CI = comp2(self.CI + 1)\n\telif funcno == 7:\n\t # Stop : STOP : Light \"Stop\" neon and halt the machine\n\t self.stopped = 1\n\t\n\t# Assertions to test invariants\n\tassert -pow(2,31) <= self.accum <pow(2,31)\n\tassert -pow(2,31) <= self.store[ lineno ] <pow(2,31)\n\tassert -pow(2,31) <= self.CI <pow(2,31)", "def instruction_RET(self, inst):\n\t\tintsp = int(self.regbank[\"sp\"], 16) + 2\n\t\tself.pc = int(int(self.getWord(intsp), 16) / 2)\n\t\tself.regbank[\"sp\"] = inttohex(intsp)", "def start_subtract_one(ival):\n if not is_insertion(ival):\n ival.start -= 1\n return ival", "def increment_incline(self, by=0.5):\n self.incline += by", "def getStartingAddress(self):\n return HopperLowLevel.getBasicBlockStartingAddress(self.__procedure__.__segment_internal__,self.__procedure__.__procedure_index__,self.__basic_block_index__)", "def jmp(self, offset):\n self.ip += int(offset)", "def __sub__(self, offset):\n return self + -offset", "def run_instruction(self, instr):\n nibs = [(instr[0] & 0xF0) >> 4,\n instr[0] & 0x0F,\n (instr[1] & 0xF0) >> 4,\n instr[1] & 0x0F]\n\n #print(instr.hex(), nibs)\n instr_i = int(instr[0] * 256 + instr[1])\n\n increment_pc = True\n\n\n if instr_i == 0x00E0:\n # 00e0\n # CLS\n self.clear_screen()\n elif instr_i == 0x00EE:\n # 00ee\n # RET\n self.ret()\n elif nibs[0] == 0:\n # 0nnn\n # SYS addr\n pass\n elif nibs[0] == 1:\n # 1nnn\n # JP addr\n # addr is 12-bit\n self.jump(address=instr_i & 0x0FFF)\n increment_pc = False\n elif nibs[0] == 2:\n # 2nnn\n # CALL addr\n self.call(address=instr_i & 0x0FFF)\n increment_pc = False\n elif nibs[0] == 3:\n # 3xbb\n # SE Vx, byte\n self.skip_if_equalv(register=nibs[1], value=instr[1])\n elif nibs[0] == 4:\n # 4xbb\n # SNE Vx, byte\n self.skip_if_not_equalv(register=nibs[1], value=instr[1])\n elif nibs[0] == 5 and nibs[3] == 0:\n # 5xy0\n # SE Vx, Vy\n self.skip_if_equalr(register1=nibs[1], register2=nibs[2])\n elif nibs[0] == 6:\n # 6xkk\n # LD Vx, byte\n self.loadv(register=nibs[1], value=instr[1])\n elif nibs[0] == 7:\n # 7xkk\n # ADD Vx, byte\n self.add(register=nibs[1], value=instr[1])\n elif nibs[0] == 8:\n if nibs[3] == 0:\n # 8xy0\n # LD Vx, Vy\n self.loadr(target_register=nibs[1], source_register=nibs[2])\n elif nibs[3] == 1:\n # 8xy1\n # OR Vx, Vy\n self.orr(register1=nibs[1], register2=nibs[2])\n elif nibs[3] == 2:\n # 8xy2\n # AND Vx, Vy\n self.andr(register1=nibs[1], register2=nibs[2])\n elif nibs[3] == 3:\n # 8xy3\n # XOR Vx, Vy\n self.xorr(register1=nibs[1], register2=nibs[2])\n elif nibs[3] == 4:\n # 8xy4\n # ADD Vx, Vy\n self.addr(register1=nibs[1], register2=nibs[2])\n elif nibs[3] == 5:\n # 8xy5\n # SUB Vx, Vy\n self.subr(register1=nibs[1], register2=nibs[2])\n elif nibs[3] == 6:\n # 8xy6\n # SHR Vx, {Vy}\n self.shift_rightr(register=nibs[1])\n elif nibs[3] == 7:\n # 8xy7\n # SUBN Vx, Vy\n self.subnr(register1=nibs[1], register2=nibs[2])\n elif nibs[3] == 0xE:\n # 8xyE\n # SHL Vx, {Vy}\n self.shift_leftr(register=nibs[1])\n elif nibs[0] == 9 and nibs[3] == 0:\n # 9xy0\n # SNE Vx, Vy\n self.skip_if_not_equalr(register1=nibs[1], register2=nibs[2])\n elif nibs[0] == 0xA:\n # Annn\n # LD I, addr\n self.load_memory_register(address=instr_i & 0x0FFF)\n elif nibs[0] == 0xB:\n # Bnnn\n # JP V0, addr\n self.jump_add(address=instr_i & 0x0FFF)\n elif nibs[0] == 0xC:\n # Cxkk\n # RND Vx, byte\n self.rnd_and(register=nibs[1], value=instr[1])\n elif nibs[0] == 0xD:\n # Dxyn\n # DRW Vx, Vy, size\n self.draw_sprite(register1=nibs[1], register2=nibs[2], sprite_size=nibs[3])\n elif nibs[0] == 0xE and instr[1] == 0x9E:\n # Ex9E\n # SKP Vx\n self.skip_if_key_pressed(key_register=nibs[1])\n elif nibs[0] == 0xE and instr[1] == 0xA1:\n # ExA1\n # SKNP Vx\n self.skip_if_key_not_pressed(key_register=nibs[1])\n elif nibs[0] == 0xF:\n if instr[1] == 0x07:\n # Fx07\n # LD Vx, DT\n self.read_delay_timer(register=nibs[1])\n elif instr[1] == 0x0A:\n # Fx0A\n # LD Vx, K\n self.wait_and_load_key(register=nibs[1])\n elif instr[1] == 0x15:\n # Fx15\n # LD DT, Vx\n self.set_delay_timer(register=nibs[1])\n elif instr[1] == 0x18:\n # Fx18\n # LD ST, Vx\n self.set_sound_timer(register=nibs[1])\n elif instr[1] == 0x1E:\n # Fx1E\n # ADD I, Vx\n self.add_to_I(register=nibs[1])\n elif instr[1] == 0x29:\n # Fx29\n # LD F, Vx\n self.set_I_to_digit_sprite(register=nibs[1])\n elif instr[1] == 0x33:\n # Fx33\n # LD B, Vx\n self.set_mem_to_bcd(register=nibs[1])\n elif instr[1] == 0x55:\n # Fx55\n # LD [I], Vx\n self.store_to_mem(register_to=nibs[1])\n elif instr[1] == 0x65:\n # Fx0A\n # LD Vx, [I]\n self.read_mem(register_to=nibs[1])\n else:\n # do nothing - illegal instruction\n print(\"Illegal instruction: {}\".format(instr.hex()))\n\n return increment_pc", "def forward_pc(self):\r\n self.pc += INSTRUCTION_SIZE", "def instruction_JMP(self, inst):\n\t\tif inst[-2:] == \"00\":\n\t\t\tjmpdest = int(self.regbank[inst[-5:-2]], 16)\n\t\telif inst[-2:] == \"01\":\n\t\t\tval = int(self.regbank[inst[-5:-2]], 16)\n\t\t\tif val > 32767:\n\t\t\t\tval -= 65536\n\t\t\tjmpdest = int(self.regbank[\"pc\"], 16) + val\n\t\telif inst[-2:] == \"10\":\n\t\t\tjmpdest = int(self.nextInstruction(), 16)\n\t\telif inst[-2:] == \"11\":\n\t\t\tval = int(self.nextInstruction(), 16)\n\t\t\tif val > 32767:\n\t\t\t\tval -= 65536\n\t\t\tjmpdest = int(self.regbank[\"pc\"], 16) + val\n\t\tif inst[-10] == \"1\":\n\t\t\tself.pc = int(jmpdest / 2)\n\t\telif int(inst[-9:-5], 2) & int(hextobin(self.regbank[\"flags\"])[-16:-12], 2) > 0:\n\t\t\tself.pc = int(jmpdest / 2)", "def single_slope_subtract(file__read, num_points_to_average_beg=50, num_points_to_average_end=50):\n\n # mean of first N points\n avg_y_beg = file__read.iloc[:num_points_to_average_beg].mean()\n\n # mean of last N points\n avg_y_end = file__read.iloc[-num_points_to_average_end:].mean()\n\n # x value for beginning (assume first xval)\n first_xval = file__read.first_valid_index()\n\n # x value for ending (assume last xval)\n last_xval = file__read.last_valid_index()\n\n slope = (avg_y_end - avg_y_beg) / (last_xval - first_xval)\n # y' = mx\n # caveat...only works with monitoring a single mass-- update 7/13/17 appears to have fixed this...\n # y_prime = pd.DataFrame(slope.values * file_read.index.values, columns=file_read.columns)\n\n y_prime = pd.DataFrame(np.matmul(file__read.index.values[:, np.newaxis], np.transpose(slope.values[:, np.newaxis])),\n columns=file__read.columns)\n\n y_prime.index = file__read.index\n\n # first attempt at fix\n # y_prime = slope.values[0]*file_read.index+avg_y_beg\n # ynew = y - m_hat*x\n difference = file__read - y_prime\n difference = difference - difference.iloc[:(num_points_to_average_beg)].mean()\n\n new_file_read = difference\n\n return new_file_read", "def prev_line(rule):\n return shift_line(-1, rule)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rebases the sourceline correspondence for a procedure such that all of the instructions are relative rather than absolute. That is, the original line numbers can be recovered by adding the 'base' line number to the source lines store in the instruction's location.
def rebase_source_info (procedure: Instruction) -> ProcedureRecord: rebaser = _LineRebaser () rebaser.update (procedure) return ProcedureRecord (procedure=procedure, line_base=rebaser.base ())
[ "def adjust_relative_base(state, relative_base_ix):\n state.relative_base += state.intcode[relative_base_ix]", "def __rebase_instruction (self, inst: Instruction) -> None:\n\n locn = inst.locn ()\n if locn is not None:\n assert isinstance (locn, SourceLocation)\n if self.__base is None:\n assert locn.line is not None\n self.__base = locn.line\n assert locn.line >= self.__base\n locn.line -= self.__base", "def precmd(self, line):\n for handler in self._precmd_handlers:\n line = handler(line)\n\n # After doing preprocessing, pass it off to the super class(es) for\n # whatever they want to do with it.\n line = pdb.Pdb.precmd(self, line)\n\n return line", "def _AddBaselineToBaselineNameToBaselinesMap(baseline):\n baseline_name = dgen_decoder.BaselineName(baseline)\n bases = BASELINE_NAME_TO_BASELINES_MAP.get(baseline_name)\n if bases == None:\n bases = set()\n BASELINE_NAME_TO_BASELINES_MAP[baseline_name] = bases\n bases.add(baseline)", "def discover_line_and_source(self, node, bias, bias_if_present):\n if node.line is None:\n\n # Let's try to find the line number ... look at our parent, then\n # let's find ``node`` in the parent, and look at our preceeding\n # sibling\n parent = node.parent\n while parent is not None:\n lower_bound = parent.line\n if parent.line is not None:\n break\n else:\n parent = parent.parent\n\n # Can we improve on the lower bound? Check our immediate sibling.\n preceding_sibling = node.parent.index(node) - 1\n if preceding_sibling >= 0:\n if node.parent.children[preceding_sibling].line is not None:\n lower_bound = node.parent.children[preceding_sibling].line\n\n lower_bound = max(self.ucomment['last_line'], lower_bound)\n\n # Last resort\n try:\n upper_bound = node.parent.children[preceding_sibling+2].line\n except IndexError:\n upper_bound = None\n\n node.line_lower = lower_bound\n node.line_upper = upper_bound\n\n if lower_bound is None:\n if upper_bound is not None:\n node.line = upper_bound - bias\n else:\n node.line = lower_bound + bias\n else:\n node.line += bias_if_present\n\n # We must have a source file, so that we can calculate the offset later\n if node.source is None:\n if node.parent.source:\n node.source = node.parent.source\n\n # Parent doesn't contain source; sometimes the child node does:\n elif node.parent.source is None:\n if len(node.children) and node.children[0].source is not None:\n node.source = node.children[0].source\n else:\n\n # Last resort: try looking higher and higher up the document\n parent = node.parent\n while parent is not None:\n node.source = parent.source\n if parent.source is not None:\n break\n else:\n parent = parent.parent\n\n # At this point we give up: this node won't be commentable", "def correct_baseline(self, points=250, order=3):\n baseline_trace = self.detect_baseline(points, order)\n new_y = self.y - baseline_trace.y\n new_y -= min(new_y)\n return Trace.from_xy(self.x, new_y)", "def linear_baseline_correction(x, y, pts):\r\n corrected = []\r\n pts = int(pts)\r\n slope = (np.mean(y[-pts:]) - np.mean(y[0:pts]))/(np.mean(x[-pts:]) - np.mean(x[0:pts]))\r\n offset = (np.mean(y[0:pts]) * np.mean(x[-pts:]) - \\\r\n np.mean(y[-pts:]) * np.mean(x[0:pts]))/(np.mean(x[-pts:]) - np.mean(x[0:pts]));\r\n for i, j in enumerate(y):\r\n corrected.append(j-offset-slope*x[i])\r\n \r\n return np.array(corrected)", "def transform_instructions(lines, symbolic_table):\n address_count = 0\n for lnum in range(len(lines)):\n line = lines[lnum]\n field = parse_line(line)\n if field[\"kind\"] == AsmSrcKind.SYM:\n lines[lnum] = build_resolved(symbolic_table, field, address_count)\n if field[\"kind\"] != AsmSrcKind.COMMENT:\n address_count += 1", "def BackTrace(self, start_method, deepcmp = False):\n totalpath = []\n tracepath = []\n for i in self.source:\n tracepath = self.BackTraceSingleSource(start_method, i, deepcmp)\n if tracepath:\n totalpath.append(tracepath)\n return totalpath", "def _compute_lineno(cls, table, code):\n for offset, lineno in dis.findlinestarts(code):\n adj_offset = offset + _FIXED_OFFSET\n if adj_offset in table:\n table[adj_offset].lineno = lineno\n # Assign unfilled lineno\n # Start with first bytecode's lineno\n known = code.co_firstlineno\n for inst in table.values():\n if inst.lineno >= 0:\n known = inst.lineno\n else:\n inst.lineno = known\n return table", "def BackTraceSingleSource(self, start_method, src_mtd = None, deepcmp = False):\n #self.d.create_xref()\n queue = []\n tmp_queue =[] # for avoiding duplicate\n ret = []\n paths = []\n tmp_path = []\n \n \n start_key, start_node = self.Method2Node(start_method)\n \n queue.append(start_node)\n tmp_queue.append(start_node)\n \n tmp_path.append(start_node)\n paths.append(tmp_path)\n \n while len(queue)>0:\n try:\n for q in queue:\n pass\n #guo 0329\n #print q.method_name + \"\\n\" \n #print \"***new method node queue*****\" \n tmp_node = queue.pop(0)\n except Exception, e:\n #guo 0329\n #print \"The gvm could not find the method node!\"\n break\n \n if tmp_node != None:\n #print tmp_node.method_name + \"\\n\"\n \n \n if not self.MethodCmp(tmp_node, src_mtd, deepcmp):\n #ret.append(tmp_node)\n methodprenodes = self.GetPreNodes(tmp_node)[:]\n \n #0405 guo: for avoiding node duplicate\n prenodes_tmp = copy.copy(methodprenodes)\n for node in prenodes_tmp:\n if node in tmp_queue:\n methodprenodes.remove(node)\n \n if len(methodprenodes)>0: \n paths = self.PathsWithNewnodes(paths,tmp_node, methodprenodes)\n \n for prenode in methodprenodes:\n if not self.WhiteListCmp(prenode): #whitelist avoid redundancy\n queue.append(prenode)\n tmp_queue.append(prenode)\n else:pass\n else: \n ret.append(self.FindKeynodePath(paths,tmp_node)) \n #ret.append(tmp_node)\n \n #guo 0403 handle abtrary method,need to return all the paths\n if len(queue)==0 and src_mtd[\"method\"]== csdConf.ABTRARY:\n ret = paths \n \n return ret", "def line_origins(origins):\n oiter = iter(origins)\n prev = next(oiter)\n\n for cur in oiter:\n try:\n refs = cur.extra_references\n except AttributeError:\n cur.extra_references = {prev}\n else:\n refs.add(prev)\n prev = cur", "def getStartingAddress(self):\n return HopperLowLevel.getBasicBlockStartingAddress(self.__procedure__.__segment_internal__,self.__procedure__.__procedure_index__,self.__basic_block_index__)", "def add_source_and_line(self, *nodes: List[nodes.Node]):\n location = self.node.source, self.node.line\n for node in nodes:\n node.source, node.line = location\n for child in node.traverse():\n child.source, child.line = location", "def adjust_along_path_to_base(self, leaf, base, up = False):\n count = 0\n start = leaf\n LLs = []\n while start is not base:\n oldstart = start\n start = start.parent\n LLs.append(start.children[oldstart.which_child() ^ 1])\n if up:\n LLs.reverse()\n for i in range(0,len(LLs),2):\n if i == len(LLs) - 1:\n break\n T2 = LLs[i]\n T1 = LLs[i+1]\n if T2 is None:\n print(leaf)\n self.visualize(\"debug.png\")\n if T1 is None:\n print(leaf)\n self.visualize(\"debug.png\")\n if random.uniform(0,1) < self.p and len(T1) + len(T2) < len(T2.parent.children[T2.which_child() ^ 1]):\n self.merge(T1, T2)", "def _FixBaselineNameToBaselinesMap():\n for baseline_name in BASELINE_NAME_TO_BASELINES_MAP.keys():\n BASELINE_NAME_TO_BASELINES_MAP[baseline_name] = sorted(\n BASELINE_NAME_TO_BASELINES_MAP[baseline_name])", "def apply_local_fixes(source, options):\r\n def find_ge(a, x):\r\n \"\"\"Find leftmost item greater than or equal to x.\"\"\"\r\n i = bisect.bisect_left(a, x)\r\n if i != len(a):\r\n return i, a[i]\r\n return len(a) - 1, a[-1]\r\n\r\n def find_le(a, x):\r\n \"\"\"Find rightmost value less than or equal to x.\"\"\"\r\n i = bisect.bisect_right(a, x)\r\n if i:\r\n return i - 1, a[i - 1]\r\n return 0, a[0]\r\n\r\n def local_fix(source, start_log, end_log,\r\n start_lines, end_lines, indents, last_line):\r\n \"\"\"apply_global_fixes to the source between start_log and end_log.\r\n\r\n The subsource must be the correct syntax of a complete python program\r\n (but all lines may share an indentation). The subsource's shared indent\r\n is removed, fixes are applied and the indent prepended back. Taking\r\n care to not reindent strings.\r\n\r\n last_line is the strict cut off (options.line_range[1]), so that\r\n lines after last_line are not modified.\r\n\r\n \"\"\"\r\n if end_log < start_log:\r\n return source\r\n\r\n ind = indents[start_log]\r\n indent = _get_indentation(source[start_lines[start_log]])\r\n\r\n sl = slice(start_lines[start_log], end_lines[end_log] + 1)\r\n\r\n subsource = source[sl]\r\n # Remove indent from subsource.\r\n if ind:\r\n for line_no in start_lines[start_log:end_log + 1]:\r\n pos = line_no - start_lines[start_log]\r\n subsource[pos] = subsource[pos][ind:]\r\n\r\n # Fix indentation of subsource.\r\n fixed_subsource = apply_global_fixes(''.join(subsource),\r\n options,\r\n where='local')\r\n fixed_subsource = fixed_subsource.splitlines(True)\r\n\r\n # Add back indent for non multi-line strings lines.\r\n msl = multiline_string_lines(''.join(fixed_subsource),\r\n include_docstrings=False)\r\n for i, line in enumerate(fixed_subsource):\r\n if not i + 1 in msl:\r\n fixed_subsource[i] = indent + line if line != '\\n' else line\r\n\r\n # We make a special case to look at the final line, if it's a multiline\r\n # *and* the cut off is somewhere inside it, we take the fixed\r\n # subset up until last_line, this assumes that the number of lines\r\n # does not change in this multiline line.\r\n changed_lines = len(fixed_subsource)\r\n if (start_lines[end_log] != end_lines[end_log]\r\n and end_lines[end_log] > last_line):\r\n after_end = end_lines[end_log] - last_line\r\n fixed_subsource = (fixed_subsource[:-after_end] +\r\n source[sl][-after_end:])\r\n changed_lines -= after_end\r\n\r\n options.line_range[1] = (options.line_range[0] +\r\n changed_lines - 1)\r\n\r\n return (source[:start_lines[start_log]] +\r\n fixed_subsource +\r\n source[end_lines[end_log] + 1:])\r\n\r\n def is_continued_stmt(line,\r\n continued_stmts=frozenset(['else', 'elif',\r\n 'finally', 'except'])):\r\n return re.split('[ :]', line.strip(), 1)[0] in continued_stmts\r\n\r\n assert options.line_range\r\n start, end = options.line_range\r\n start -= 1\r\n end -= 1\r\n last_line = end # We shouldn't modify lines after this cut-off.\r\n\r\n try:\r\n logical = _find_logical(source)\r\n except (SyntaxError, tokenize.TokenError):\r\n return ''.join(source)\r\n\r\n if not logical[0]:\r\n # Just blank lines, this should imply that it will become '\\n' ?\r\n return apply_global_fixes(source, options)\r\n\r\n start_lines, indents = zip(*logical[0])\r\n end_lines, _ = zip(*logical[1])\r\n\r\n source = source.splitlines(True)\r\n\r\n start_log, start = find_ge(start_lines, start)\r\n end_log, end = find_le(start_lines, end)\r\n\r\n # Look behind one line, if it's indented less than current indent\r\n # then we can move to this previous line knowing that its\r\n # indentation level will not be changed.\r\n if (start_log > 0\r\n and indents[start_log - 1] < indents[start_log]\r\n and not is_continued_stmt(source[start_log - 1])):\r\n start_log -= 1\r\n start = start_lines[start_log]\r\n\r\n while start < end:\r\n\r\n if is_continued_stmt(source[start]):\r\n start_log += 1\r\n start = start_lines[start_log]\r\n continue\r\n\r\n ind = indents[start_log]\r\n for t in itertools.takewhile(lambda t: t[1][1] >= ind,\r\n enumerate(logical[0][start_log:])):\r\n n_log, n = start_log + t[0], t[1][0]\r\n # start shares indent up to n.\r\n\r\n if n <= end:\r\n source = local_fix(source, start_log, n_log,\r\n start_lines, end_lines,\r\n indents, last_line)\r\n start_log = n_log if n == end else n_log + 1\r\n start = start_lines[start_log]\r\n continue\r\n\r\n else:\r\n # Look at the line after end and see if allows us to reindent.\r\n after_end_log, after_end = find_ge(start_lines, end + 1)\r\n\r\n if indents[after_end_log] > indents[start_log]:\r\n start_log, start = find_ge(start_lines, start + 1)\r\n continue\r\n\r\n if (indents[after_end_log] == indents[start_log]\r\n and is_continued_stmt(source[after_end])):\r\n # find n, the beginning of the last continued statement\r\n # Apply fix to previous block if there is one.\r\n only_block = True\r\n for n, n_ind in logical[0][start_log:end_log + 1][::-1]:\r\n if n_ind == ind and not is_continued_stmt(source[n]):\r\n n_log = start_lines.index(n)\r\n source = local_fix(source, start_log, n_log - 1,\r\n start_lines, end_lines,\r\n indents, last_line)\r\n start_log = n_log + 1\r\n start = start_lines[start_log]\r\n only_block = False\r\n break\r\n if only_block:\r\n end_log, end = find_le(start_lines, end - 1)\r\n continue\r\n\r\n source = local_fix(source, start_log, end_log,\r\n start_lines, end_lines,\r\n indents, last_line)\r\n break\r\n\r\n return ''.join(source)", "def _relative_path(self,fname,base):\n\t\n\tif os.path.commonprefix([fname,base])!=base:\n\t raise ValueError, \"Unexpected base in file\" + fname\n\n\n\t# Make sure base ends in a slash, or the following will fail\n\tif base[-1] != '/':\n\t base = base + '/'\n\treturn fname.replace(base,'')", "def source_line(self) -> str:\n if not self.__source_line:\n self.__source_line = util.get_line(self.file_path, self.line)\n\n return self.__source_line" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of days in the year.
def NumberDaysYear(year): return 365 + IsLeapYear(year)
[ "def days_in_year(year):\n\n return days_in_year(year)", "def elapsed_days(cls, year):\n months_elapsed = quotient(235 * year - 234, 19)\n parts_elapsed = 12084 + 13753 * months_elapsed\n days = 29 * months_elapsed + quotient(parts_elapsed, 25920)\n return days + 1 if mod(3 * (days + 1), 7) < 3 else days", "def total_days(self):\n\t\t\t\tyear = self.year + calendar.year_offset\n\t\t\t\td_o_year_offset = int(self.day_of_year) - 1\n\t\t\t\treturn datetime.date(year, 1, 1).toordinal() + d_o_year_offset", "def calcNumLeapYearsSinceBaseYear(year):\n return (year - baseYear) / 4", "def countDays (firstYear, lastYear):\n days = 0\n leapYear = 366\n nonleapYear = 365\n # for loop with 2 arguments\n for i in range (firstYear, lastYear+1):\n # pass boolean value function to check condition\n if isLeapYear(i):\n days = days + leapYear\n else:\n days = days + nonleapYear\n # print(days) #temporary value\n return days", "def get_day_of_year(time: datetime) -> int:\n return time.timetuple().tm_yday - 1", "def getYear():", "def find_leap_years(year):", "def mod_year(self) -> int:\n return self._mod_year", "def centuryFromYear(y):\n return 1 + (y-1)//100", "def day_length(doy, yr_days, latitude):\n deg2rad = pi / 180.0\n latr = latitude * deg2rad\n sindec = -sin(23.5 * deg2rad) * cos(2.0 * pi * (doy + 10.0) / yr_days)\n a = sin(latr) * sindec\n b = cos(latr) * cos(asin(sindec))\n dayl = 12.0 * (1.0 + (2.0 / pi) * asin(a / b))\n \n return dayl", "def get_num_days_in_time_period(year, month=None):\n if month:\n return get_num_days_in_month(year, month)\n else:\n days = 0\n for month in range(1, 13):\n days += len(get_ordered_dates_in_month(year, month))\n return days", "def death_year(df):\n try: # If person is deceased\n dod = df.loc['date of death'][0][0]\n dod_year = int(dod[0:4], 10) # Extract death year\n except KeyError: # If person is still alive, returns current year\n date = datetime.date.today()\n dod_year = date.year # Current year\n return dod_year", "def to_years(days):\n return \"{} y {} d\".format(int(days/365), days % 365)", "def get_year():\n return dt.now().year", "def this_year():\n surprising_math = int(2020 * 0 / 20 + 200 / 0.2 * 2 + 20)\n return surprising_math", "def get_start_year(self) -> int:\n return self.start_date.year", "def get_years(det_page):\n div = det_page.find('div', \n class_='nytint-detainee-fullcol')\n matches = time_pattern.findall(div.text)\n return int(matches[0].rstrip(' year'))", "def years_in_existence(self):\n return self.franchise.cosmos.year-self.franchise.founded" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of days in the month. If any of the arguments is missing (month or year) the current month/year is assumed.
def NumberDaysMonth(month = None, year = None): if month is None: m = time.localtime()[1] else: m = month if year is None: y = time.localtime()[0] else: y = year if m == 2: if IsLeapYear(y): return 29 else: return 28 elif m in (1, 3, 5, 7, 8, 10, 12): return 31 else: return 30
[ "def daysOfMonth(year, month):\r\n d = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\r\n if isLeap(year):\r\n d[1] += 1\r\n return d[month-1]", "def get_num_days_in_month(year, month):\n range = calendar.monthrange(year, month)\n return range[1]", "def day_of_month(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"day_of_month\")", "def get_num_days_in_time_period(year, month=None):\n if month:\n return get_num_days_in_month(year, month)\n else:\n days = 0\n for month in range(1, 13):\n days += len(get_ordered_dates_in_month(year, month))\n return days", "def days(month):\n if month == 'September' or month == 'April' or month == 'June' or month == 'November':\n print(\"30\")\n elif month == 'February':\n print(\"28\")\n else:\n print(\"31\")", "def day_of_month(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"day_of_month\")", "def ndays(nmonth=3):\n today0 = datetime.now()\n year3, month3 = (today0.year, today0.month - nmonth) if today0.month - nmonth >= 1 \\\n else (today0.year - 1, today0.month - nmonth + 12)\n date3 = datetime(year3, month3, today0.day)\n ndays = (today0 - date3).days\n\n return ndays", "def test_numdays(months):\n assert months[1].numdays() == 31", "def get_num_days_in_month(self, str_dateval):\n assert len(str_dateval) == 6, \"Dateval needs to have six values\"\n str_yy = str_dateval[2:4]\n str_mm = str_dateval[4:6]\n if(str_yy[0] == '9'):\n # 199x\n yy_val = int(\"19\" + str_yy)\n else:\n yy_val = int(\"20\" + str_yy)\n mm_val = int(str_mm)\n\n date_val = pd.to_datetime(str(yy_val) + str_mm + \"01 12:00:00\")\n str_datetime = str(yy_val) + str_mm + \"01 01:30:00\"\n date_kp_val = pd.to_datetime(str_datetime)\n\n rng_month = monthrange(yy_val, mm_val)\n return rng_month[1], date_val, date_kp_val", "def in_mois((start_day, start_month, end_day, end_month), today=today):\n start_date = date(today.year, start_month, start_day)\n #Afin de prendre en compte un mois à cheval sur deux années\n if(start_month > end_month):\n start_date = date(today.year-1, start_month, start_day)\n end_date = date(today.year, end_month, end_day)\n if today >= start_date and today <= end_date:\n return (today - start_date).days + 1\n\n return 0", "def set_days_in_month(month_picked):\n if month_picked in ['July', 'August']:\n days = 31\n marks = {1: '1', 10: '10', 20: '20', 31: '31'}\n else:\n days = 30\n marks = {1: '1', 10: '10', 20: '20', 30: '30'}\n\n return days, marks", "def diff_month(date1: date, date2: date) -> int:\n return (date1.year - date2.year) * YEAR_MONTHS + date1.month - date2.month", "def test_daycount(months):\n assert months[2].daycount == 31", "def _get_days_in_months(start_date, end_date, n_months, list_yr_mo):\n if n_months == 1:\n days_in_months = np.array([(end_date - start_date).days])\n else:\n days_in_month_1 = ((start_date + MonthEnd()) - start_date).days\n days_in_month_n = (end_date - (end_date - MonthBegin())).days + 1\n days_in_months = [days_in_month_1]\n for month in list_yr_mo[1:-1]:\n Y, m = list(map(int, month.split(\"-\")))\n days_in_months.append(calendar.monthrange(Y, m)[1])\n days_in_months.append(days_in_month_n)\n return np.array(days_in_months)", "def months_passed(self):\n\n return relativedelta(self.start_date, date.today()).months", "def month_number(name):\n return Article.MONTH_NUMBERS[name]", "def month(self):\n return self.__date[\"month\"]", "def get_post_consolidation_days(self):\n\n #sanity check, make sure package is consolidated and date_consolidated available\n if not self.is_consolidated or not self.date_consolidated:\n return -1\n\n #get only year month and day\n now = datetime.now().replace(\n hour=0, minute=0, second=0, microsecond=0)\n #get only year month and day\n date_consolidated = self.date_consolidated.replace(\n hour=0, minute=0, second=0, microsecond=0)\n post_consolidation_time = now - date_consolidated\n return post_consolidation_time.days", "def eomonth(y, m):\n year = y\n if m == 12:\n month = 1\n else:\n month = int(m)+1\n given_day = datetime(year, month, 1)\n required_day = given_day - timedelta(days=1)\n return required_day" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The initializer has an optional argument, time, in the time module format, wether as in seconds since the epoch (Unix time) wether as a tuple (time tuple). If it is not provided, then it returns the current date.
def __init__(self, tm = None): if tm is None: t = time.localtime() else: if isinstance(tm, int): t = time.localtime(tm) else: t = tm self.year, self.month, self.day = t[:3]
[ "def get_time_initializer(self):\n (_hour, _minute, _seconds,\n _month, _day_of_month, _year,\n gmt_offset, _DAYLIGHT_SAVINGS_ENABLED) = self._get_time()\n date_string = \"20\" + str(_year).zfill(2) + \"-\" + \\\n str(_month).zfill(2) + \"-\" + \\\n str(_day_of_month).zfill(2) + \"T\" + \\\n str(_hour).zfill(2) + \\\n \":\" + str(_minute).zfill(2) + \\\n \":\" + str(_seconds).zfill(2)\n return date_string", "def dateTime(self, args:list):\n\t\t_globals._console.write(str(datetime.datetime.now()))", "def time():\n return datetime.datetime.now()", "def epoch_time(time):\n date_time = datetime.datetime.fromtimestamp(time).strftime('%a, %d %b %Y %H:%M:%S')\n return date_time", "def current_timestamp():\n # return \"%d-%02d-%02dT%02d:%02d:%02dZ\" % utime.localtime()[:6]\n return utime.time()", "def _get_timestamp(self):\n return datetime.datetime.now()", "def get_current_time():\r\n return datetime.now().strftime(\"%B %d, %Y %H:%M\")", "def __init__(\n self, precision=None, system_time_tuple=None, time_zone_offset=None):\n super(Systemtime, self).__init__(\n precision=precision or definitions.PRECISION_1_MILLISECOND,\n time_zone_offset=time_zone_offset)\n self._number_of_seconds = None\n self._day_of_month = None\n self._day_of_week = None\n self._hours = None\n self._milliseconds = None\n self._minutes = None\n self._month = None\n self._seconds = None\n self._year = None\n\n if system_time_tuple:\n if len(system_time_tuple) < 8:\n raise ValueError('Invalid system time tuple 8 elements required.')\n\n if system_time_tuple[0] < 1601 or system_time_tuple[0] > 30827:\n raise ValueError('Year value out of bounds.')\n\n if system_time_tuple[1] not in range(1, 13):\n raise ValueError('Month value out of bounds.')\n\n if system_time_tuple[2] not in range(0, 7):\n raise ValueError('Day of week value out of bounds.')\n\n days_per_month = self._GetDaysPerMonth(\n system_time_tuple[0], system_time_tuple[1])\n if system_time_tuple[3] < 1 or system_time_tuple[3] > days_per_month:\n raise ValueError('Day of month value out of bounds.')\n\n if system_time_tuple[4] not in range(0, 24):\n raise ValueError('Hours value out of bounds.')\n\n if system_time_tuple[5] not in range(0, 60):\n raise ValueError('Minutes value out of bounds.')\n\n # TODO: support a leap second?\n if system_time_tuple[6] not in range(0, 60):\n raise ValueError('Seconds value out of bounds.')\n\n if system_time_tuple[7] < 0 or system_time_tuple[7] > 999:\n raise ValueError('Milliseconds value out of bounds.')\n\n self._day_of_month = system_time_tuple[3]\n self._day_of_week = system_time_tuple[2]\n self._hours = system_time_tuple[4]\n self._milliseconds = system_time_tuple[7]\n self._minutes = system_time_tuple[5]\n self._month = system_time_tuple[1]\n self._seconds = system_time_tuple[6]\n self._year = system_time_tuple[0]\n\n self._number_of_seconds = self._GetNumberOfSecondsFromElements(\n self._year, self._month, self._day_of_month, self._hours,\n self._minutes, self._seconds)", "def __init__(self, tstamp=None):\n if tstamp is None:\n tstamp = self.now\n self.tstamp = tstamp\n self.start()", "def __init__(self, day, month, stuff={}):\n\t\tsuper(LiturgicalDate,self).__init__(day, month)\n\t\tself.stuff = stuff", "def __init__(self, date='now', days=0):\n self.valid = 1\n if isinstance(date, datetime): #trap a datetime\n self.datetime = date\n elif isinstance(date, DATE): #trap a DATE\n self.datetime = date.datetime\n self.valid = date.valid\n elif date == 'now':\n self.datetime = datetime.today()\n elif date:\n try:\n h = mn = s = 0\n try: #we cant use safeint from here, so do it the hard way\n z = int(date)\n except:\n z = False\n if z: #integer date a la mysql eg 20071003\n date = str(date) #just to make sure\n y = date[:-4]\n m = date[-4:-2]\n d = date[-2:]\n else: #user input date or date/time\n date = date.strip()\n sd = date.split()\n # print sd\n if len(sd) > 1: # presumably we have time\n try:\n hms = sd[1].split(\":\")\n # print 'hms',hms\n h = int(hms[0])\n # print 'h',h\n if len(hms) >= 2:\n mn = int(hms[1])\n# print 'mn',mn\n if len(hms) >= 3:\n s = int(hms[2])\n# print 's',s\n except:\n # raise\n pass\n date = sd[0]\n date = date.replace('-', '/').replace(\n ' ', '') # sort out minor user input anomalies\n d, m, y = date.split('/')\n #allow for shorthand years\n y = int(y)\n y += (y < 70 and 2000 or ((y < 100 and 1900) or 0))\n self.datetime = datetime(y,\n int(m),\n int(d), int(h), int(mn), int(s))\n except: #return '1/1/00' to indicate a problem\n self.datetime = datetime(1900, 1, 1)\n self.valid = 0\n days = 0 #just in case...\n else: # date is \"\" or None - eg mysql will return None for a blank date - default to invalid\n self.datetime = datetime(1900, 1, 1)\n self.valid = 0\n days = 0 #just in case...\n if days:\n self.datetime = self.datetime + timedelta(days=days)", "def __init__(self, precision=None, time_zone_offset=None, timestamp=None):\n super(OLEAutomationDate, self).__init__(\n precision=precision or definitions.PRECISION_1_MICROSECOND,\n time_zone_offset=time_zone_offset)\n self._timestamp = timestamp", "def _get_current_datetime():\n return datetime.datetime.now()", "def set_system_today(self):\r\n # get path to directory of current file main\r\n # file \"superpy_date.txt\" contains last exported reference_date\r\n path_current_file = os.path.dirname(os.path.realpath(__file__))\r\n if os.path.isfile(f'{path_current_file}/superpy_date.txt'):\r\n datefile_to_open = f'{path_current_file}/superpy_date.txt'\r\n with open(datefile_to_open) as superpy_date:\r\n date_from_file = superpy_date.readline()\r\n self.today = self.proper_date(date_from_file)\r\n # if no such file as superpy_date.txt, notice user + set virtual date\r\n else:\r\n virtual_date = '0001-01-01 00:00:00.000000'\r\n self.today = self.proper_date(virtual_date)\r\n print(\"\"\"No valid date could be found. Please look at the user guide\r\n how to provide a system date.\r\n A file called 'superpy_date.txt' should be present.\"\"\")\r\n return self.today", "def getCurrentTime():\n now = datetime.datetime.now()\n return '{}-{}-{}'.format(now.year, now.month, now.day), '{}:{}:{}'.format(now.hour, now.minute, now.second)", "def set_date_and_time_Unix(self, year, month,day,hour,min,sec):\n print '*INFO* Setting date/time to: %04d-%02d-%02d %02d:%02d:%02d' %(int(year),int(month),int(day),int(hour),int(min),int(sec))\n return _set_datetime(year, month,day,hour,min,sec, 'unix')", "def timestamp(self):\n def get_tstp(y, mo, d, h, mi, s):\n ts = time.strptime(str(y) + '-' + str(mo) + '-' + str(d) + 'T' + str(h) + ':' + \\\n str(mi) + ':' + str(s), '%Y-%m-%dT%H:%M:%S')\n return time.mktime(ts)\n y = 1970\n mo = 1\n d = 1\n h = 0\n mi = 0\n s = 0\n # syntacic hack - 'while' stmt is not important, but 'break' makes there goto stmt\n while 1:\n if self._content['year'] is None: break\n y = self._content['year']\n if self._content['month'] is None: break\n mo = self._content['month']\n if self._content['day'] is None: break\n d = self._content['day']\n if self._content['hour'] is None: break\n h = self._content['hour']\n if self._content['minute'] is None: break\n mi = self._content['minute']\n if self._content['second'] is None: break\n s = self._content['second']\n break\n if y < 1970: return 0.0\n return get_tstp(y, mo, d, h, mi, s)", "def currentTime(time, update=bool):\n pass", "def setTime():\n global local_time\n local_time = time.time()", "def get_now():\n return localtime(now())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deep copy of Date objects.
def copy(self): ret = Date() ret.year, ret.month, ret.day = self.year, self.month, self.day return ret
[ "def copy(self) -> \"DataArray\":\n return deepcopy(self)", "def __get_dict_deep_copy(self):\n return copy(dict(self))", "def copy(self):\n return self.__class__( self.first, self.last )", "def copy(self):\n chart = Chart.__new__(Chart)\n chart.date = self.date\n chart.pos = self.pos\n chart.hsys = self.hsys\n chart.objects = self.objects.copy()\n chart.houses = self.houses.copy()\n chart.angles = self.angles.copy()\n return chart", "def test_deepcopy(self):\n copy = deepcopy(Address._restrictions)\n for key in copy:\n Restriction.legacy(copy[key])", "def copied(object, original):", "def dictcopy(self):\n return self.__dict__.copy()", "def copy(self) -> \"Atoms\":\n return deepcopy(self)", "def copy_data(self):\n self._load()\n return copy.deepcopy(self._data)", "def copy(self):\n return FoodList(self.dataframe.copy())", "def __iter__(self):\n return iter(self.__dates__)", "def convert_date_type(self):\r\n # Convert Date from Object to Datetime datatype\r\n self.all_data[self._date] = pd.to_datetime(self.all_data[self._date], dayfirst = True)\r\n\r\n # Set Index\r\n self.all_data = self.all_data.set_index(self._date)", "def copy(self, deep=True):\n series = dict((k, v.copy()) for k, v in self.iteritems())\n return SparseDataFrame(series, index=self.index, columns=self.columns,\n default_fill_value=self.default_fill_value,\n default_kind=self.default_kind)", "def copy(self):\n bin_copy = self.bin.copy()\n obj_copy = [o for o in self.objects]\n next_obj_copy = self.next_object.copy()\n # new_state = State(self.bin, self.objects, self.next_object)\n new_state = State(bin_copy, obj_copy, next_obj_copy)\n return new_state", "def copy(self):\n\n models_copy = [m.copy() for m in self._models]\n return self.__class__(init=models_copy)", "def copy(self):\r\n return oDict(self._thedict)", "def clone(self) -> \"ScXMLDataObj *\":\n return _coin.ScXMLRealDataObj_clone(self)", "def carbon_copy(self):\n return self._carbon_copy", "def create_copy(self):\n print('WARNING: Implementation and testing still in progress!!!!')\n\n new_obj = self.__class__()\n new_obj.data = copy.deepcopy(self.data)\n new_obj.topography = copy.deepcopy(self.topography)\n new_obj.electrode_positions = copy.deepcopy(\n self.electrode_positions)\n\n # what about the log?\n print('WARNING: Journal and log is not copied!')\n\n return new_obj", "def clone(self) -> \"ScXMLDataObj *\":\n return _coin.ScXMLXMLDataObj_clone(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the year day of a date.
def GetYearDay(self): ret = self.day for month in range(1, self.month): ret += NumberDaysMonth(month, self.year) return ret
[ "def get_day_of_year(time: datetime) -> int:\n return time.timetuple().tm_yday - 1", "def get_year():\n\ttoday = datetime.today()\n\tif today.month==1 and today.day<=7:\n\t\treturn(today.year+1)\n\telse:\n\t\treturn(today.year)", "def liturgical_year(date):\n if date <= liturgical_year_end(date.year):\n return date.year\n else:\n return date.year + 1", "def get_year():\n return dt.now().year", "def _get_year():\n x = datetime.datetime.now()\n return x.year if x >= datetime.datetime(x.year, 12, 1) else x.year - 1", "def school_year_for_date(date = date.today()):\n query = SchoolYear.all(keys_only=True)\n keys = query.fetch(100)\n years = db.get(keys)\n year = None\n year_list = SchoolYear.filter_results_for_inrange(years, date,\n start_date_extension = 0, end_date_extension = 0)\n if (not year_list):\n year_list = SchoolYear.filter_results_for_inrange(years, date,\n start_date_extension = 30, end_date_extension = 60)\n if year_list:\n year = year_list[0]\n return year", "def getYear():", "def get_start_year(self) -> int:\n return self.start_date.year", "def death_year(df):\n try: # If person is deceased\n dod = df.loc['date of death'][0][0]\n dod_year = int(dod[0:4], 10) # Extract death year\n except KeyError: # If person is still alive, returns current year\n date = datetime.date.today()\n dod_year = date.year # Current year\n return dod_year", "def NumberDaysYear(year):\n return 365 + IsLeapYear(year)", "def get_year_ending(date) -> datetime.date:\n\tdate = getdate(date)\n\tnext_year_start = datetime.date(date.year + 1, 1, 1)\n\treturn add_to_date(next_year_start, days=-1)", "def first_day_of_year(year):\n\n if (year == 0):\n print 'Error: A year value of 0 is not possible'\n raise Exception\n\n elif (year < 0):\n first_day = (year * 365) + int((year - 1) / 4) - 693596\n else: # Positive year\n leap_adj = int ((year + 3) / 4)\n if (year > 1600):\n leap_adj = leap_adj - int((year + 99 - 1600) / 100) + \\\n int((year + 399 - 1600) / 400)\n\n first_day = year * 365 + leap_adj - 693963\n\n if (year > 1582):\n first_day -= 10\n\n return first_day", "def relative_year(month, day):\n t = today()\n # If incoming date is Jan 1st\n if month == 1 and day == 1:\n # and current UTC is Dec 31st,\n # then increment the current year\n if t.month == 12 and t.day == 31:\n return t.year + 1\n return t.year", "def centuryFromYear(y):\n return 1 + (y-1)//100", "def get_year(ax_id):\n modern_ax_id = re.compile(r\"([0-9]{2})([0-9]{2})\\.([0-9]+)\")\n search_modern = re.search(modern_ax_id, ax_id)\n if search_modern:\n year = \"20\" + search_modern[1]\n else:\n old_ax_id = re.compile(r\"([a-zA-Z]+[-]?[a-zA-Z]+)/([0-9]{2})([0-9]+)\")\n search_old = re.search(old_ax_id, ax_id)\n # get century right\n if search_old[2][0] == \"9\":\n year = \"19\" + search_old[2]\n else:\n year = \"20\" + search_old[2]\n return year", "def get_year(ax_id):\n modern_ax_id = re.compile(r'([0-9]{2})([0-9]{2})\\.([0-9]+)')\n search_modern = re.search(modern_ax_id, ax_id)\n if search_modern:\n year = '20' + search_modern[1]\n else:\n old_ax_id = re.compile(r'([a-zA-Z]+[-]?[a-zA-Z]+)/([0-9]{2})([0-9]+)')\n search_old = re.search(old_ax_id, ax_id)\n # get century right\n if search_old[2][0] == \"9\":\n year = '19' + search_old[2]\n else:\n year = '20' + search_old[2]\n return year", "def get_year(year):\n years = factory.get_elem_solo(Year, year)\n return years", "def leapday_day_of_week(year):\n if not isinstance(year, int) or year < 0:\n raise ValueError('Invalid year: {}'.format(year))\n if not is_leap_year(year):\n return None\n # Use Gauss' algorithm\n # https://en.wikipedia.org/wiki/Determination_of_the_day_of_the_week\n # since we only ever want Feb 29th, simplify it a bit with jan1st algorithm\n jan1_dow = (1 + 5 * ((year - 1) % 4)\n + 4 * ((year - 1) % 100)\n + 6 * ((year - 1) % 400)) % 7\n return (jan1_dow + 59) % 7 # Feb 29th is 59 days from Jan 1st", "def centuryFromYear(year):\n \n return year // 100 + (0 if year % 100 == 0 else 1)", "def get_school_year(self):\n return SchoolYear.school_year_for_date(self.start_date)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a (signed) number of days to the date.
def __add__(self, n): if isinstance(n, int): #Calculate julian day number and add n. temp = self.ToJDNumber() + n #Convert back to date format. return DateFromJDNumber(temp) else: raise TypeError, "%s is not an integer." % str(n)
[ "def incr_date(self, num_days=1):\n self.currentDate += timedelta(days=num_days)", "def add_days(self, day, days):\n if days < 0:\n sign = -1\n look_forward = False\n else:\n sign = 1\n look_forward = True\n\n if self.weekends:\n weeklen = 7 - len(self.weekends)\n weeks_add = abs(days) // weeklen * sign\n days_add = abs(days) % weeklen * sign\n else:\n weeks_add = 0\n days_add = days\n\n new_date = day + timedelta(days=weeks_add * 7)\n while days_add:\n # remaining days may or may not include weekends;\n new_date = new_date + timedelta(sign)\n if not self.is_weekend(new_date):\n days_add -= 1\n\n days_add = self.holidays_between(day, new_date) # any holidays?\n if days_add:\n return self.add(new_date, days_add * sign)\n else:\n return self.closest_biz_day(new_date, look_forward)", "def add(self, day, delta):\n\n if isinstance(delta, int):\n delta = timedelta(days=delta)\n\n if isinstance(day, datetime):\n # Add hours only if the given day is a datetime\n day = self.add_seconds(day, delta.seconds)\n\n return self.add_days(day, delta.days)", "def add_gigasecond(date):\r\n return date + timedelta(seconds = 10**9)", "def delta(value, arg):\n return value + timedelta(days=arg)", "def test_add_time_delta_date(self):\n t = datetime.date(2013, 4, 1)\n ret = fleming.add_timedelta(t, datetime.timedelta(days=2))\n self.assertEquals(ret, datetime.date(2013, 4, 3))", "def increment_date(d, amount, units='years'):\n if units == 'years':\n try:\n return d.replace(year = d.year + amount)\n except ValueError:\n return d + (datetime.date(d.year + amount, 1, 1) - datetime.date(d.year, 1, 1))\n elif units == 'months':\n years = 0\n month = d.month + amount\n if month > 12:\n years, month = divmod(month-1, 12)\n month += 1\n try:\n return d.replace(year = d.year + years, month=month)\n except ValueError:\n return d + (datetime.date(d.year + years, month, 1) - datetime.date(d.year, month, 1))\n else:\n multiplier = 1\n if units == 'weeks':\n multiplier = 7\n return d + datetime.timedelta(days=amount*multiplier)", "def add_workdays(my_date: date, workdays: Day) -> date:\n for _ in range(workdays):\n my_date = my_date + timedelta(days=1)\n while my_date.weekday() >= 5:\n my_date = my_date + timedelta(days=1)\n return my_date", "def total_days(self):\n\t\t\t\tyear = self.year + calendar.year_offset\n\t\t\t\td_o_year_offset = int(self.day_of_year) - 1\n\t\t\t\treturn datetime.date(year, 1, 1).toordinal() + d_o_year_offset", "def _dayForward(self):\n\n self.currentDay += 1", "def days(self, days):\n \n self._days = days", "def offset(self, date: datetime.date, days: int) -> datetime.date:\n if not date in self:\n raise ValueError(f\"{date} is not in the calendar\")\n if self.index(date) + days < 0:\n raise IndexError(f\"Out of bounds\")\n if self.index(date) + days >= len(self):\n raise IndexError(f\"Out of bounds\")\n return self[self.index(date) + days]", "def increment_day_range(self):\n self.start = self.start + timedelta(days=7)\n # self.end = self.end + timedelta(days=7)\n self.end = self.start + timedelta(days=3)", "def _date_to_days(d):\n return (d - BASE_DATE).days", "def business_date_operation(date, days):\n ret_date = date + relativedelta(days=days)\n # If weekend (saturday/sunday), add another day so that\n # \"days\" number of business days are incremented\n if ret_date.weekday() in [5,6]:\n if days > 0:\n ret_date = ret_date + relativedelta(days=1)\n elif days < 0:\n ret_date = ret_date + relativedelta(days=-1)\n return ret_date", "def days(n):\n return timedelta(days=n)", "def add_years(d, years):\r\n try:\r\n return d.replace(year = d.year + years)\r\n except Exception:\r\n return d + (datetime.date(d.year + years, 1, 1) - datetime.date(d.year, 1, 1))", "def date_add_year(d, years):\n\t try:\n\t return d.replace(year = d.year + years)\n\t except ValueError:\n\t return d + (date(d.year + years, 1, 1) - date(d.year, 1, 1))", "def day(self, new_day_value):\n if not (1 <= new_day_value <= 31):\n raise ActivityValidatorError(\"Day must be an integer in [1, 31]!\\n\")\n self.__date[\"day\"] = new_day_value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a COM time directly into the Date format.
def DateFromCOM(t): return Date(int(t))
[ "def convertTime(self,date,time):\n #Creates a datetime from the paramatised date and time\n #Create time object\n timeSplit = [int(i) for i in time.split(':')]\n timeObj = datetime.time(timeSplit[0], timeSplit[1]); #hours, minutes\n #Create date object\n currDate = date\n dateSplit = [int(i) for i in currDate.split('/')]\n dateObj = datetime.date(dateSplit[2], dateSplit[1], dateSplit[0]) #year, day, month\n #combine into datetime object\n dateTime = datetime.datetime.combine(dateObj, timeObj)\n return dateTime", "def parse_datetime(value, datemode):\n if datemode is None:\n # set to modern Excel\n datemode = 1\n date_tuple = xlrd.xldate_as_tuple(value, datemode)\n if date_tuple[:3] == (0, 0, 0):\n # must be time only\n value = datetime.time(*date_tuple[3:])\n elif date_tuple[3:] == (0, 0, 0):\n # must be date only\n value = datetime.date(*date_tuple[:3])\n else:\n value = datetime.datetime(*date_tuple)\n return value", "def convert_to_datetime(line):\n data = line.split(' ')\n line_datetime = data[1].split('T')\n line_date = [int(x) for x in line_datetime[0].split('-')]\n line_time = [int(x) for x in line_datetime[1].split(':')]\n return datetime(line_date[0], line_date[1], line_date[2], line_time[0], line_time[1], line_time[2])", "def Time2Internaldate(date_time):\n if isinstance(date_time, (int, float)):\n dt = datetime.fromtimestamp(date_time,\n timezone.utc).astimezone()\n elif isinstance(date_time, tuple):\n try:\n gmtoff = date_time.tm_gmtoff\n except AttributeError:\n if time.daylight:\n dst = date_time[8]\n if dst == -1:\n dst = time.localtime(time.mktime(date_time))[8]\n gmtoff = -(time.timezone, time.altzone)[dst]\n else:\n gmtoff = -time.timezone\n delta = timedelta(seconds=gmtoff)\n dt = datetime(*date_time[:6], tzinfo=timezone(delta))\n elif isinstance(date_time, datetime):\n if date_time.tzinfo is None:\n raise ValueError(\"date_time must be aware\")\n dt = date_time\n elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('\"','\"'):\n return date_time # Assume in correct format\n else:\n raise ValueError(\"date_time not of a known type\")\n fmt = '\"%d-{}-%Y %H:%M:%S %z\"'.format(Months[dt.month])\n return dt.strftime(fmt)", "def icsConvertData(icsDate):\n # 1998 04 15 T23 59 59\n # print \"converting\",icsDate\n year = int(icsDate[0:4])\n month = int(icsDate[4:6])\n day = int(icsDate[6:8])\n date = datetime.date(year,month,day)\n time=None\n if \"T\" in icsDate: \n time = icsDate[9:13]\n minute = icsDate[11:13]\n hour = icsDate[9:11]\n # Assume offset (this is hacky, not sure if Z is timezone)\n # This should be replaced - only because times came later\n if \"Z\" in icsDate: \n # Increment hour and day according to timezone\n hour=\"1\"*(timezone-len(hour))+hour\n time=hour+minute\n # increment date incase of rollover\n # date+=datetime.timedelta(days=1)\n # date+=datetime.timedelta(hours=2)\n # if \"Z\" in icsDate: date+=datetime.timedelta(hours=1)\n return date,time", "def _stringtime_2_datetime(stringtime):\n\n if stringtime is None:\n return None\n\n elif '/Date(' in stringtime: # oData gives unix time. Unix date time in milliseconds from 1.1.1970\n unix_date_time = int(stringtime[6:-2])\n unix_datetime_in_seconds = unix_date_time / 1000 # For some reason they are given in miliseconds\n date = dt.datetime.fromtimestamp(int(unix_datetime_in_seconds))\n\n else:\n date = parse(stringtime)\n\n return date", "def decode_datetime(date: int, time: int) -> datetime:\n year = date // 10000\n month = (date % 10000) // 100\n day = date % 100\n hour = time // 10000\n minute = (time % 10000) // 100\n sec = time % 100\n return datetime(year, month, day, hour, minute, sec)", "def ah_str2date(date_str):\n return dt.datetime.strptime(date_str, \"%d/%m/%Y %H:%M\")", "def test_cast_regular_as_datetime():\n result = datetime_utils.cast_regular_as_datetime(\n \"2019-08-19 05:51:45.694869\")\n assert result.year == 2019\n assert result.month == 8\n assert result.day == 19\n assert result.hour == 5\n assert result.minute == 51\n assert result.second == 45", "def get_time(self,time,date):\n\t\tsts = date[6:] + '-' + date[3:5] + '-' + date[0:2] + ' ' + time[0:12]\n\t\tgmtplus = float(time[18])\n\t\tsjd = Time(sts, format='iso', scale='utc').jd - gmtplus/24.0 # subtract +1 hr\n\t\treturn sjd", "def convertTime(unixTime):\n\treturn datetime.datetime.fromtimestamp(float(unixTime)).strftime('%d-%m-%Y %H:%M:%S')", "def dateConvertor(obj):\n return obj.strftime('%Y-%m-%d %H:%M:%S') if type(obj) is datetime.datetime else obj", "def combine (cls, date, time) :\n if isinstance (date, CAL._DTW_) :\n date = date._body\n if isinstance (time, CAL._DTW_) :\n time = time._body\n return cls (** {cls._kind : datetime.datetime.combine (date, time)})", "def strp_time(u_time):\n try:\n d = datetime.datetime.strptime(u_time, '%Y%m%d%H%M%S')\n\n except (ValueError, TypeError) as e:\n LOG.warning(e)\n d = None\n\n return d", "def strptime(self, value, format):\n return datetime.datetime.strptime(value, format).date()", "def py_date_like_to_net_datetime(datetime_like: tp.Union[datetime, date, str, pd.Period]):\n if isinstance(datetime_like, str):\n datetime_like = dateutil.parser.parse(datetime_like)\n if hasattr(datetime_like, 'hour'):\n time_args = (datetime_like.hour, datetime_like.minute, datetime_like.second)\n else:\n time_args = (0, 0, 0)\n return dotnet.DateTime(datetime_like.year, datetime_like.month, datetime_like.day, *time_args)", "def readDate(self):\n ms = self.stream.read_double() / 1000.0\n tz = self.stream.read_short()\n\n # Timezones are ignored\n d = datetime.datetime.utcfromtimestamp(ms)\n self.context.addObject(d)\n\n return d", "def convert_time_format(otime):\n\n save = []\n prev = 0\n for ent in otime:\n out = Chandra.Time.DateTime(ent).date\n atemp = re.split(':', out)\n\n year = int(atemp[0])\n yday = float(atemp[1])\n hh = float(atemp[2])\n mm = float(atemp[3])\n ss = float(atemp[4])\n\n yday += hh /24.0 + mm / 1440.0 + ss / 86400.0\n\n if prev == 0:\n prev = year\n save.append(yday)\n if mcf.is_leapyear(year):\n base = 366\n else:\n base = 365\n else:\n if year != prev:\n save.append(yday + base)\n else:\n save.append(yday)\n\n return [save, prev]", "def xls_datetime(num):\n return datetime.datetime(1900,1,1)+datetime.timedelta(days=num-2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the retries of this V1beta1PipelineTask.
def retries(self, retries): self._retries = retries
[ "def http_client_retry_max_retries(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"http_client_retry_max_retries\")", "def max_retries(self, max_retries):\n\n self._max_retries = max_retries", "def _retry(self):\n # TODO(dcramer): this needs to handle too-many-retries itself\n assert self.task_id\n\n task = Task.query.filter(\n Task.task_name == self.task_name,\n Task.task_id == self.task_id,\n Task.parent_id == self.parent_id,\n ).first()\n if task and self.max_retries and task.num_retries > self.max_retries:\n date_finished = datetime.utcnow()\n self._update({\n Task.date_finished: date_finished,\n Task.date_modified: date_finished,\n Task.status: Status.finished,\n Task.result: Result.failed,\n })\n db.session.commit()\n\n raise TooManyRetries('%s failed after %d retries' %\n (self.task_name, task.num_retries))\n\n self._update({\n Task.date_modified: datetime.utcnow(),\n Task.status: Status.in_progress,\n Task.num_retries: Task.num_retries + 1,\n })\n\n db.session.commit()\n\n kwargs = self.kwargs.copy()\n kwargs['task_id'] = self.task_id\n kwargs['parent_task_id'] = self.parent_id\n\n retry_number = db.session.query(Task.num_retries).filter(\n Task.task_name == self.task_name,\n Task.task_id == self.task_id,\n Task.parent_id == self.parent_id,\n ).scalar() or 0\n\n retry_countdown = min(BASE_RETRY_COUNTDOWN + (retry_number ** 2), 300)\n\n queue.delay(\n self.task_name,\n kwargs=kwargs,\n countdown=retry_countdown,\n )", "def set_retry(self, re):\n _ldns.ldns_resolver_set_retry(self,re)\n #parameters: ldns_resolver *,uint8_t,\n #retvals: ", "def retryable(self, retryable):\n\n self._retryable = retryable", "def set_retry_times(self, num):\n self._resend_times = num", "def get_num_retries(self):\n pass", "def retry_interval(self) -> Optional[str]:\n return pulumi.get(self, \"retry_interval\")", "def AddMaxRetriesFlag(parser):\n parser.add_argument(\n '--max-retries',\n type=arg_parsers.BoundedInt(lower_bound=0),\n help=(\n 'Number of times a task is allowed to restart in case of '\n 'failure before being failed permanently. This applies per-task, not '\n 'per-job. If set to 0, tasks will only run once and never be '\n 'retried on failure.'\n ),\n )", "def retry_policy(self) -> pulumi.Output['outputs.RetryPolicyResponse']:\n return pulumi.get(self, \"retry_policy\")", "def retry_run(task_fn, args, kwargs, options):\n\n assert 'retry_policy' in options,\\\n 'Specify `retry_policy` in `options` to retry task'\n\n log.info(u'Retrying {} with retry_policy={}'.format(\n _func_signature(task_fn), options['retry_policy']))\n\n retry_options = _update_options_for_retry(options)\n\n if not retry_options.get('retry', True):\n log.info(u'Maximum retries reached for {}'.format(\n _func_signature(task_fn)))\n\n run.apply_async(\n kwargs={\n 'task_fn': task_fn,\n 'args': args or tuple(),\n 'kwargs': kwargs or {},\n 'options': options.copy(),\n },\n **retry_options\n )", "def cluster_retry_interval(self, cluster_retry_interval):\n\n self._cluster_retry_interval = cluster_retry_interval", "def RetryTimeout(self):\n if self.force_auto_sync:\n self.get('RetryTimeout')\n return self._RetryTimeout", "def http_client_retry_enabled(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"http_client_retry_enabled\")", "def exponential_retry(self, retries, delay, condition=None):\n return exponential_retry(retries, delay, condition, self)", "def discover_retries(self, discover_retries):\n\n self._discover_retries = discover_retries", "def retry(self, task_id, force=False):\n return", "def cluster_retry_interval_multiplier(self, cluster_retry_interval_multiplier):\n\n self._cluster_retry_interval_multiplier = cluster_retry_interval_multiplier", "def http_client_retry_enabled(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"http_client_retry_enabled\")", "def getRetryCount():\n return opencue.cuebot.Cuebot.getConfig().get('cuebot.exception_retries', 3)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the run_after of this V1beta1PipelineTask.
def run_after(self, run_after): self._run_after = run_after
[ "def after(self, func: Action) -> AsyncAction:\n setattr(self, 'run_after', wrap_async(func))\n return self.run_after", "def end_date_after(self, end_date_after):\n\n self._end_date_after = end_date_after", "def onRunTaskCompletedEvent(self, event):\n\n # Ignore all other events\n if event.task != 'intertest':\n return\n\n # Schedule next deployment\n self.goto(ChainedDeploymentPolicy.Deploy)", "def set_last_run(self, last_run: datetime):\n self.updater.dispatcher.bot_data['last_run'] = last_run", "def runner_on_second(self, runner_on_second):\n\n self._runner_on_second = runner_on_second", "def send_after(self, send_after):\n\n self._send_after = send_after", "def post_execute(self, output_path):\r\n self.done = True\r\n if type(self.do_after) is list:\r\n for action in self.do_after:\r\n action(self, output_path)", "def create_date_after(self, create_date_after):\n\n self._create_date_after = create_date_after", "def endof_routine_failure_analysis_date_epoch(\n self, endof_routine_failure_analysis_date_epoch):\n\n self._endof_routine_failure_analysis_date_epoch = endof_routine_failure_analysis_date_epoch", "def _after_execution(self):\n pass", "def testAfterExecute(self):\n l = TestLayer(\"test2\")\n self.assertFalse(l.afterExecuteSet)\n l.afterExecute()\n self.assertTrue(l.afterExecuteSet)", "async def on_thread_update(self, _: disnake.Thread, after: disnake.Thread) -> None:\n await self.init_task\n\n if not after.archived:\n return\n\n if await self.threads_to_bump.contains(after.id):\n await self.unarchive_threads_not_manually_archived([after])", "def delete_version_after(self, delete_version_after):\n\n self._delete_version_after = delete_version_after", "def after_invoke(self, coro: HookT) -> HookT:\n if not asyncio.iscoroutinefunction(coro):\n raise TypeError(\"The post-invoke hook must be a coroutine.\")\n\n self._after_invoke = coro\n return coro", "def after_val_epoch(self,\n runner,\n metrics: Optional[Dict[str, float]] = None) -> None:\n\n if runner.param_schedulers is None:\n return\n\n # avoid counting scheduler._global_step\n # it has counted in after_train_* hook\n if metrics is None:\n return\n\n def step(param_schedulers):\n # check param_schedulers is list and built\n if not is_list_of(param_schedulers, _ParamScheduler):\n return\n\n for scheduler in param_schedulers:\n if (scheduler.by_epoch\n and getattr(scheduler, 'need_val_args', False)):\n scheduler.step(metrics)\n\n if isinstance(runner.param_schedulers, list):\n step(runner.param_schedulers)\n elif isinstance(runner.param_schedulers, dict):\n for param_schedulers in runner.param_schedulers.values():\n step(param_schedulers)\n else:\n raise TypeError(\n 'runner.param_schedulers should be list of ParamScheduler or '\n 'a dict containing list of ParamScheduler, '\n f'but got {runner.param_schedulers}')", "def after_execute(self, f):\n\n self.after_funcs.append(f)\n return f", "def force_run_final(self, run_id: str, history: BoboHistory):\n\n with self._lock:\n self.on_run_final(run_id=run_id,\n history=history,\n notify=False)", "def post_run(callback):\n def f(name, stop_doc):\n if name != 'stop':\n return\n uid = stop_doc['run_start']\n start = run_start_given_uid(uid)\n descriptors = descriptors_by_start(uid)\n # For convenience, I'll rely on the broker to get Events.\n header = db[uid]\n events = get_events(header)\n callback('start', start)\n for d in descriptors:\n callback('descriptor', d)\n for e in events:\n callback('event', e)\n callback('stop', stop_doc)\n return f", "def endof_routine_failure_analysis_date(\n self, endof_routine_failure_analysis_date):\n\n self._endof_routine_failure_analysis_date = endof_routine_failure_analysis_date" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the task_ref of this V1beta1PipelineTask.
def task_ref(self, task_ref): self._task_ref = task_ref
[ "def assignTask(self, task, document_id):\n return", "def set_task(self, task):\n self._environment.set_task(task)\n self._assertions()", "def put(self, task):\n self._input_tube.put((task,0))", "def task_uuid_gt(self, task_uuid_gt):\n\n self._task_uuid_gt = task_uuid_gt", "def resolve_task(task):\n try:\n do_update = task.pid is None\n except AttributeError:\n do_update = False\n\n return self.trace.get_task_id(task, update=do_update)", "def run_task(ctx, tasks_file, task_name, fabric_env=None,\n task_properties=None, **kwargs):\n if kwargs.get('hide_output'):\n ctx.logger.debug('`hide_output` input is not '\n 'supported for `run_task` operation')\n func = _get_task(tasks_file, task_name)\n ctx.logger.info('Running task: {0} from {1}'.format(task_name, tasks_file))\n return _run_task(ctx, func, task_properties, fabric_env)", "def __init__(self, tasks=None): # noqa: E501\n self.openapi_types = {\n 'tasks': List[Task]\n }\n\n self.attribute_map = {\n 'tasks': 'tasks'\n }\n\n self._tasks = tasks", "def switch_task(self, task_id):\n self._skuld.cmd(SkuldCmd(name='switch_task',\n args=task_id, block=False))", "def update(self, task):\n\t\tself.tasks.append(db.Text(task))\n\t\treturn self", "def tasks(self, value):\n if self._tasks:\n raise RuntimeError(\"Tasks already set!\")\n self._tasks = value", "def plot_task_activation(self, task: TaskID, **kwargs):\n return self.plot_tasks_activation(tasks=[task], **kwargs)", "def task_uuid_in(self, task_uuid_in):\n\n self._task_uuid_in = task_uuid_in", "def release_task(self, task):\n # TODO: validate if this task was really acquired by us\n assert task\n self._log(\"Remove %s from processing list\" % task.tid)\n self._etcd.delete(self._key(QUEUE_PREFIX, task.tid))\n del self._acquired_tasks[task.tid]", "def from_task(cls, task) -> 'TaskOutput':\n product_attributes = task.get_product_attributes()\n return cls(name=task.name, files=product_attributes)", "def add_task(self, task: Task):\n self.update_from_file(self.storage_path)\n task.task_id = self.next_id\n self.tasks.append(task)\n self.next_id += 1\n self.save_to_file(self.storage_path)\n return task", "def parallel(self, task: Union[MitTask, \"TaskGraph\"]):\n raise TypeError(\"MitEx.parallel forbidden.\")", "def lockTask(self, task):\n \n locked = requests.get(self.client.baseurl\n +task['stepElement'],\n auth = self.client.cred)\n eTag = locked.headers['ETag']\n locked = requests.put(self.client.baseurl\n + task['stepElement'],\n auth = self.client.cred,\n params={'action':'lock',\n 'If-Match':eTag}\n )", "def stop_task(self, task):\n raise NotImplementedError('stop_task')", "def get_task_id(task):\n return task['task_id']['value']", "def schedule(self, task, metadata=None):\n\n if self._noschedule:\n return\n\n task.errorvalue = None\n task.returnvalue = None\n task.metadata = metadata\n\n taskfilename = (str(uuid()) + '.pkl')\n with (self._directory / 'todo' / taskfilename).open('wb') as f:\n dill.dump(task, f)\n self._log('schedule', taskfilename)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the task_spec of this V1beta1PipelineTask.
def task_spec(self, task_spec): self._task_spec = task_spec
[ "def set_task(self, task):\n self._environment.set_task(task)\n self._assertions()", "def post_task_spec(self, task_yaml: dict) -> dict:\n return task_yaml", "def build_task_inputs_spec(\n task_spec: pipeline_spec_pb2.PipelineTaskSpec,\n pipeline_params: List[_pipeline_param.PipelineParam],\n tasks_in_current_dag: List[str],\n is_parent_component_root: bool,\n) -> None:\n for param in pipeline_params or []:\n\n param_full_name, subvar_name = _exclude_loop_arguments_variables(param)\n input_name = additional_input_name_for_pipelineparam(param.full_name)\n\n param_name = param.name\n if subvar_name:\n task_spec.inputs.parameters[\n input_name].parameter_expression_selector = (\n 'parseJson(string_value)[\"{}\"]'.format(subvar_name))\n param_name = _for_loop.LoopArguments.remove_loop_item_base_name(\n _exclude_loop_arguments_variables(param_name)[0])\n\n if type_utils.is_parameter_type(param.param_type):\n if param.op_name and dsl_utils.sanitize_task_name(\n param.op_name) in tasks_in_current_dag:\n task_spec.inputs.parameters[\n input_name].task_output_parameter.producer_task = (\n dsl_utils.sanitize_task_name(param.op_name))\n task_spec.inputs.parameters[\n input_name].task_output_parameter.output_parameter_key = (\n param_name)\n else:\n task_spec.inputs.parameters[\n input_name].component_input_parameter = (\n param_full_name if is_parent_component_root else\n additional_input_name_for_pipelineparam(param_full_name)\n )\n else:\n if param.op_name and dsl_utils.sanitize_task_name(\n param.op_name) in tasks_in_current_dag:\n task_spec.inputs.artifacts[\n input_name].task_output_artifact.producer_task = (\n dsl_utils.sanitize_task_name(param.op_name))\n task_spec.inputs.artifacts[\n input_name].task_output_artifact.output_artifact_key = (\n param_name)\n else:\n task_spec.inputs.artifacts[\n input_name].component_input_artifact = (\n param_full_name\n if is_parent_component_root else input_name)", "def update_task_inputs_spec(\n task_spec: pipeline_spec_pb2.PipelineTaskSpec,\n parent_component_inputs: pipeline_spec_pb2.ComponentInputsSpec,\n pipeline_params: List[_pipeline_param.PipelineParam],\n tasks_in_current_dag: List[str],\n input_parameters_in_current_dag: List[str],\n input_artifacts_in_current_dag: List[str],\n) -> None:\n if not hasattr(task_spec, 'inputs'):\n return\n\n for input_name in getattr(task_spec.inputs, 'parameters', []):\n\n if task_spec.inputs.parameters[input_name].WhichOneof(\n 'kind') == 'task_output_parameter' and (\n task_spec.inputs.parameters[input_name]\n .task_output_parameter.producer_task\n not in tasks_in_current_dag):\n\n param = _pipeline_param.PipelineParam(\n name=task_spec.inputs.parameters[input_name]\n .task_output_parameter.output_parameter_key,\n op_name=task_spec.inputs.parameters[input_name]\n .task_output_parameter.producer_task)\n\n component_input_parameter = (\n additional_input_name_for_pipelineparam(param.full_name))\n\n if component_input_parameter in parent_component_inputs.parameters:\n task_spec.inputs.parameters[\n input_name].component_input_parameter = component_input_parameter\n continue\n\n # The input not found in parent's component input definitions\n # This could happen because of loop arguments variables\n param_name, subvar_name = _exclude_loop_arguments_variables(param)\n if subvar_name:\n task_spec.inputs.parameters[\n input_name].parameter_expression_selector = (\n 'parseJson(string_value)[\"{}\"]'.format(subvar_name))\n\n component_input_parameter = (\n additional_input_name_for_pipelineparam(param_name))\n\n assert component_input_parameter in parent_component_inputs.parameters, \\\n 'component_input_parameter: {} not found. All inputs: {}'.format(\n component_input_parameter, parent_component_inputs)\n\n task_spec.inputs.parameters[\n input_name].component_input_parameter = component_input_parameter\n\n elif task_spec.inputs.parameters[input_name].WhichOneof(\n 'kind') == 'component_input_parameter':\n\n component_input_parameter = (\n task_spec.inputs.parameters[input_name]\n .component_input_parameter)\n\n if component_input_parameter in parent_component_inputs.parameters:\n continue\n\n if additional_input_name_for_pipelineparam(\n component_input_parameter\n ) in parent_component_inputs.parameters:\n task_spec.inputs.parameters[\n input_name].component_input_parameter = (\n additional_input_name_for_pipelineparam(\n component_input_parameter))\n continue\n\n # The input not found in parent's component input definitions\n # This could happen because of loop arguments variables\n component_input_parameter, subvar_name = _exclude_loop_arguments_variables(\n component_input_parameter)\n\n if subvar_name:\n task_spec.inputs.parameters[\n input_name].parameter_expression_selector = (\n 'parseJson(string_value)[\"{}\"]'.format(subvar_name))\n\n if component_input_parameter not in input_parameters_in_current_dag:\n component_input_parameter = (\n additional_input_name_for_pipelineparam(\n component_input_parameter))\n\n if component_input_parameter not in parent_component_inputs.parameters:\n component_input_parameter = (\n additional_input_name_for_pipelineparam(\n component_input_parameter))\n assert component_input_parameter in parent_component_inputs.parameters, \\\n 'component_input_parameter: {} not found. All inputs: {}'.format(\n component_input_parameter, parent_component_inputs)\n\n task_spec.inputs.parameters[\n input_name].component_input_parameter = component_input_parameter\n\n for input_name in getattr(task_spec.inputs, 'artifacts', []):\n\n if task_spec.inputs.artifacts[input_name].WhichOneof(\n 'kind') == 'task_output_artifact' and (\n task_spec.inputs.artifacts[input_name].task_output_artifact\n .producer_task not in tasks_in_current_dag):\n\n param = _pipeline_param.PipelineParam(\n name=task_spec.inputs.artifacts[input_name].task_output_artifact\n .output_artifact_key,\n op_name=task_spec.inputs.artifacts[input_name]\n .task_output_artifact.producer_task)\n component_input_artifact = (\n additional_input_name_for_pipelineparam(param))\n assert component_input_artifact in parent_component_inputs.artifacts, \\\n 'component_input_artifact: {} not found. All inputs: {}'.format(\n component_input_artifact, parent_component_inputs)\n\n task_spec.inputs.artifacts[\n input_name].component_input_artifact = component_input_artifact\n\n elif task_spec.inputs.artifacts[input_name].WhichOneof(\n 'kind') == 'component_input_artifact':\n\n component_input_artifact = (\n task_spec.inputs.artifacts[input_name].component_input_artifact)\n\n if component_input_artifact not in input_artifacts_in_current_dag:\n component_input_artifact = (\n additional_input_name_for_pipelineparam(\n task_spec.inputs.artifacts[input_name]\n .component_input_artifact))\n assert component_input_artifact in parent_component_inputs.artifacts, \\\n 'component_input_artifact: {} not found. All inputs: {}'.format(\n component_input_artifact, parent_component_inputs)\n\n task_spec.inputs.artifacts[\n input_name].component_input_artifact = component_input_artifact", "def put(self, task):\n self._input_tube.put((task,0))", "def parallel(self, task: Union[MitTask, \"TaskGraph\"]):\n raise TypeError(\"MitEx.parallel forbidden.\")", "def run_tracked_task(task_spec):\n # create the context object, and initialize\n if \"context\" in task_spec:\n context = task_spec[\"context\"]\n else:\n cfg = task_spec[\"config_file\"]\n context = create_context(cfg)\n task_spec[\"context\"] = context\n\n task_id = task_spec[\"id\"]\n task_name = task_spec[\"name\"]\n job_name = task_spec[\"job_name\"]\n expt_name = task_spec[\"__tracker_experiment_name\"]\n parent_run_id = task_spec[\"__tracker_run_id\"]\n\n try:\n with start_experiment(context, expt_name, run_id=parent_run_id, nested=True):\n with start_experiment(\n context, expt_name, run_name=f\"{job_name}:{task_name}\", nested=True\n ) as _:\n\n # execute the task\n out = run_task(task_spec)\n tracker.set_tag(\"mlflow.note.content\", out.msg)\n return out\n\n except BaseException:\n msg = f\"Failed to complete task : {task_id} : Unexpected Error\"\n logger.exception(msg)\n tracker.set_tag(\"mlflow.note.content\", msg)\n return TaskStatus(\"Fail\", msg)", "def schedule(self, task, metadata=None):\n\n if self._noschedule:\n return\n\n task.errorvalue = None\n task.returnvalue = None\n task.metadata = metadata\n\n taskfilename = (str(uuid()) + '.pkl')\n with (self._directory / 'todo' / taskfilename).open('wb') as f:\n dill.dump(task, f)\n self._log('schedule', taskfilename)", "def from_task(cls, task) -> 'TaskOutput':\n product_attributes = task.get_product_attributes()\n return cls(name=task.name, files=product_attributes)", "def patch(self, task_name: str, wrap_task=False, wrap_config=False) -> firesim.Task:\n\n t = firesim.TASKS[task_name]\n mocker.patch.dict(t)\n if wrap_task:\n t['task'] = mocker.MagicMock(wraps=t['task'])\n else:\n t['task'] = mocker.MagicMock(spec_set=t['task'])\n\n if t['config']:\n if wrap_config:\n t['config'] = mocker.MagicMock(wraps=t['config'])\n else:\n t['config'] = mocker.MagicMock(spec_set=t['config'])\n\n return t", "def task_uuid_gt(self, task_uuid_gt):\n\n self._task_uuid_gt = task_uuid_gt", "def set_task_type(self, task_type):\n self._task_type = task_type", "def saveAndUnlockTask(self, task, comment = None):\n \n etag = task['ETag']\n stepEl = requests.get(self.client.baseurl\n +task['stepElement'],\n auth = self.client.cred)\n try:\n if comment:\n updatedJson = stepEl.json()\n updatedJson['systemProperties']['comment'] = comment\n self.lockTask(task)\n unlocked = requests.put(stepEl.url, auth = self.client.cred,\n params = {'action':'saveAndUnlock',\n 'If-Match':etag},\n json = updatedJson) \n else: \n unlocked = requests.put(self.client.baseurl\n + task['stepElement'],\n auth = self.client.cred,\n params={'action':'saveAndUnlock',\n 'If-Match':etag})\n \n\n for k, v in self.client.workbaskets.items():\n if task.get('queueName') in v:\n queue = self.getQueue(v.split('/')[-1])\n tasks = self.getTasks(queue)\n for newtask in tasks:\n if newtask['workObjectNumber'] == task['workObjectNumber']:\n task = newtask\n break\n except Exception as e:\n self.abort(task)\n return task", "def __init__(self, tasks=None): # noqa: E501\n self.openapi_types = {\n 'tasks': List[Task]\n }\n\n self.attribute_map = {\n 'tasks': 'tasks'\n }\n\n self._tasks = tasks", "def pop_input_from_task_spec(\n task_spec: pipeline_spec_pb2.PipelineTaskSpec,\n input_name: str,\n) -> None:\n task_spec.inputs.artifacts.pop(input_name)\n task_spec.inputs.parameters.pop(input_name)\n\n if task_spec.inputs == pipeline_spec_pb2.TaskInputsSpec():\n task_spec.ClearField('inputs')", "def task_uuid_not(self, task_uuid_not):\n\n self._task_uuid_not = task_uuid_not", "def assignTask(self, task, document_id):\n return", "def set_task_group(self, task_group):\n self._task_group = task_group", "def update(self, task):\n with self.cursor() as cursor:\n cursor.execute(\"UPDATE TASK SET NAME = ?, DESCRIPTION = ?\"\n \"WHERE TASK = ?\",\n (task.name, task.description, task.task_id))", "def update(self, task):\n\t\tself.tasks.append(db.Text(task))\n\t\treturn self" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the workspaces of this V1beta1PipelineTask.
def workspaces(self, workspaces): self._workspaces = workspaces
[ "def set_workspace(self, ws):\n if len(ws) == 0:\n self._g.set_workspace(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)\n else:\n if len(ws) == 4:\n self._g.set_workspace(ws[0], ws[1], 0.0, ws[2], ws[3], 0.0)\n else:\n if len(ws) == 6:\n self._g.set_workspace(ws[0], ws[1], ws[2], ws[3], ws[4], ws[5])\n else:\n raise MoveItCommanderException(\n \"Expected 0, 4 or 6 values in list specifying workspace\"\n )", "def set_workspaces(self):\n for fn in os.listdir(paths.wkps):\n fn_observed, ext_observed = os.path.splitext(fn)\n if ext_observed.lower() == sppasWorkspaces.ext:\n # remove path and extension to set the name of the workspace\n wkp_name = os.path.basename(fn_observed)\n # append in the list\n self.__wkps.append(wkp_name)\n logging.debug('Founded workspace {:s}'.format(wkp_name))", "def taskmanagers(self, taskmanagers):\n\n self._taskmanagers = taskmanagers", "def setWorkingSpace(self, space: 'SbMatrix') -> \"void\":\n return _coin.SbSphereProjector_setWorkingSpace(self, space)", "def setWorkingSpace(self, space: 'SbMatrix') -> \"void\":\n return _coin.SbProjector_setWorkingSpace(self, space)", "def getWorkspaces(self) -> List[ghidra.framework.model.Workspace]:\n ...", "def set_workdir(self, workdir):\n\n self.workdir = workdir\n for inpt in self.inputs:\n inpt.workdir = workdir\n\n for outpt in self.outputs:\n outpt.workdir = workdir", "def _put_workspace(key, workspace):\n _WORKSPACES[key] = workspace", "def setWorkingSpace(self, space: 'SbMatrix') -> \"void\":\n return _coin.SbCylinderProjector_setWorkingSpace(self, space)", "def execute_workspaces(command):\n\tclient = boto3.client('workspaces')\n\tresponse = client.describe_workspaces()['Workspaces']\n\n\t# get WorkspacesIds\n\tworkspaceIds = [ {'WorkspaceId': workspace['WorkspaceId']} for workspace in response if workspace['WorkspaceId'] not in WORKSPACES_EXCEPTION ]\n\n\tif command == 'start':\n\t\tresponse = client.start_workspaces(StartWorkspaceRequests=workspaceIds)\n\t\n\telif command == 'stop':\n\t\tresponse = client.stop_workspaces(StopWorkspaceRequests=workspaceIds)", "def _get_workspaces(self) -> \"adsk::core::Ptr< adsk::core::WorkspaceList >\" :\n return _core.Product__get_workspaces(self)", "def create_workspace_with_http_info(self, account_id, **kwargs):\n\n all_params = ['account_id', 'workspace']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_workspace\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'account_id' is set\n if ('account_id' not in params) or (params['account_id'] is None):\n raise ValueError(\"Missing the required parameter `account_id` when calling `create_workspace`\")\n\n\n collection_formats = {}\n\n resource_path = '/v2.1/accounts/{accountId}/workspaces'.replace('{format}', 'json')\n path_params = {}\n if 'account_id' in params:\n path_params['accountId'] = params['account_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'workspace' in params:\n body_params = params['workspace']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Workspace',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def tasks(self, value):\n if self._tasks:\n raise RuntimeError(\"Tasks already set!\")\n self._tasks = value", "def runMP(cls, taskname, tasks, wprocess, chunksiz, doMP):\n if doMP:\n from multiprocessing import (Process, Queue, cpu_count)\n Q = Queue()\n wloads = cls.getWorkloads(tasks,\n maxProcessces = cpu_count(),\n chunksiz = chunksiz)\n\n # debug(\"workloads '{}' {}: {}\"\n # .format(taskname, len(wloads), map(len,wloads)))\n\n workers = [Process(target = wprocess,\n args = (wl, Q)) for wl in wloads]\n for w in workers: w.start()\n wrs = []\n for _ in workers: wrs.extend(Q.get())\n else:\n wrs = wprocess(tasks, Q = None)\n\n return wrs", "def setWorkspace(self, workspaceName):\n if not self.contextHelper.isAccessibleWorkspaceName(workspaceName):\n raise Exception('Specified workspace not valid for your credentials')\n self.contextHelper.setWorkspace(workspaceName)", "def getWorkloads(cls, tasks, maxProcessces, chunksiz):\n assert len(tasks) >= 1, tasks\n assert maxProcessces >= 1, maxProcessces\n assert chunksiz >= 1, chunksiz\n\n # Determine # of processes\n ntasks = len(tasks)\n nprocesses = int(round(ntasks/float(chunksiz)))\n if nprocesses > maxProcessces:\n nprocesses = maxProcessces\n\n # Determine workloads \n cs = int(round(ntasks/float(nprocesses)))\n wloads = []\n for i in range(nprocesses):\n s = i*cs\n e = s+cs if i < nprocesses-1 else ntasks\n wl = tasks[s:e]\n if wl: # Could be 0, e.g., getWorkloads(range(12),7,1)\n wloads.append(wl)\n\n return wloads", "def setup_batch_file():\n print(f'Setting up {SET_SPACE_FILENAME}')\n batch_file = get_workspace_batch_file_location()\n scripts_root = get_project_root()\n\n scripts_env = f\"SET WORKSPACE_SCRIPTS={scripts_root}\"\n\n with open(batch_file, 'w') as file:\n file.write(scripts_env)", "def replace_workspace_inputs(self,\n w_id: str,\n t_id: str,\n *,\n env_values: List[object] = None,\n values: str = None,\n variablestore: List['WorkspaceVariableRequest'] = None,\n **kwargs\n ) -> DetailedResponse:\n\n if w_id is None:\n raise ValueError('w_id must be provided')\n if t_id is None:\n raise ValueError('t_id must be provided')\n if variablestore is not None:\n variablestore = [convert_model(x) for x in variablestore]\n headers = {}\n sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,\n service_version='V1',\n operation_id='replace_workspace_inputs')\n headers.update(sdk_headers)\n\n data = {\n 'env_values': env_values,\n 'values': values,\n 'variablestore': variablestore\n }\n data = {k: v for (k, v) in data.items() if v is not None}\n data = json.dumps(data)\n headers['content-type'] = 'application/json'\n\n if 'headers' in kwargs:\n headers.update(kwargs.get('headers'))\n headers['Accept'] = 'application/json'\n\n path_param_keys = ['w_id', 't_id']\n path_param_values = self.encode_path_vars(w_id, t_id)\n path_param_dict = dict(zip(path_param_keys, path_param_values))\n url = '/v1/workspaces/{w_id}/template_data/{t_id}/values'.format(**path_param_dict)\n request = self.prepare_request(method='PUT',\n url=url,\n headers=headers,\n data=data)\n\n response = self.send(request)\n return response", "def sync(self):\n request = self.workspaces_service.sync(path=self.path)\n return request.execute()", "def WSL(self, wavegens, tables=None):\n debug('GCSCommands.WSL(wavegens=%r, tables=%r)', wavegens, tables)\n wavegens, tables = getitemsvaluestuple(wavegens, tables)\n cmdstr = self.__getcmdstr('WSL', wavegens, tables)\n self.__msgs.send(cmdstr)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[Lib] Item acquisition dialog
def talk_m10_34_x15(lot1=_, flag2=_): """State 0,1: Item acquisition dialog: Display""" SetEventFlag(flag2, 1) AwardItem(lot1, 1) assert ItemAwardDisplay() != 0 """State 2: Item acquisition dialog: Wait""" assert ItemAwardDisplay() != 1 """State 3: End state""" return 0
[ "def dialog_handler_cb(self, item, data) -> None:\n # Dialog box initialization event\n if item == KDialogInitEvent:\n vs.SetItemText(self.dialog, self.kWidgetID_fileName, self.parameters.excelFileName)\n # vs.SetItemText(self.dialog, self.kWidgetID_imageFolderName, self.settings.imageFolderName)\n\n vs.ShowItem(self.dialog, self.kWidgetID_excelSheetNameLabel, False)\n vs.ShowItem(self.dialog, self.kWidgetID_excelSheetName, False)\n self.show_parameters(False)\n\n vs.EnableItem(self.dialog, self.kWidgetID_importButton, False)\n vs.EnableItem(self.dialog, self.kWidgetID_importNewCount, False)\n vs.EnableItem(self.dialog, self.kWidgetID_importUpdatedCount, False)\n vs.EnableItem(self.dialog, self.kWidgetID_importDeletedCount, False)\n\n elif item == self.kWidgetID_fileName:\n self.parameters.excelFileName = vs.GetItemText(self.dialog, self.kWidgetID_fileName)\n\n elif item == self.kWidgetID_fileBrowseButton:\n result, self.parameters.excelFileName = vs.GetFileN(\"Open Excel file\", \"\", \"xlsm\")\n if result:\n vs.SetItemText(self.dialog, self.kWidgetID_fileName, self.parameters.excelFileName)\n\n elif item == self.kWidgetID_excelSheetName:\n new_excel_sheet_name = vs.GetChoiceText(self.dialog, self.kWidgetID_excelSheetName, data)\n if self.parameters.excelSheetName != new_excel_sheet_name:\n self.parameters.excelSheetName = new_excel_sheet_name\n self.show_parameters(False)\n if data != 0:\n self.show_parameters(True)\n\n elif item == self.kWidgetID_withImageSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_withImage, data == 0)\n self.parameters.withImageSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_withImageSelector, data)\n elif item == self.kWidgetID_withImage:\n self.parameters.pictureParameters.withImage = \"{}\".format(data != 0)\n # elif item == self.kWidgetID_imageFolderName:\n # self.settings.imageFolderName = vs.GetItemText(\n # self.dialog, self.kWidgetID_imageFolderName)\n # elif item == self.kWidgetID_imageFolderBrowseButton:\n # result, self.settings.imageFolderName = vs.GetFolder(\"Select the images folder\")\n # if result == 0:\n # vs.SetItemText(self.dialog, self.kWidgetID_imageFolderName, self.settings.imageFolderName)\n elif item == self.kWidgetID_imageTextureSelector:\n self.parameters.imageTextureSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_withImageSelector, data)\n elif item == self.kWidgetID_imageWidthSelector:\n self.parameters.imageWidthSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_imageWidthSelector, data)\n elif item == self.kWidgetID_imageHeightSelector:\n self.parameters.imageHeightSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_imageHeightSelector, data)\n elif item == self.kWidgetID_imagePositionSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_imagePosition, data == 0)\n self.parameters.imagePositionSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_imagePositionSelector, data)\n elif item == self.kWidgetID_imagePosition:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_imagePosition, 3)\n if valid:\n self.parameters.pictureParameters.imagePosition = str(value)\n elif item == self.kWidgetID_withFrameSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_withFrame, data == 0)\n self.parameters.withFrameSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_withFrameSelector, data)\n elif item == self.kWidgetID_withFrame:\n self.parameters.pictureParameters.withFrame = \"{}\".format(data != 0)\n elif item == self.kWidgetID_frameWidthSelector:\n self.parameters.frameWidthSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameWidthSelector, data)\n elif item == self.kWidgetID_frameHeightSelector:\n self.parameters.frameHeightSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameHeightSelector, data)\n elif item == self.kWidgetID_frameThicknessSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_frameThickness, data == 0)\n self.parameters.frameThicknessSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameThicknessSelector, data)\n elif item == self.kWidgetID_frameThickness:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_frameThickness, 3)\n if valid:\n self.parameters.pictureParameters.frameThickness = str(value)\n elif item == self.kWidgetID_frameDepthSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_frameDepth, data == 0)\n self.parameters.frameDepthSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameDepthSelector, data)\n elif item == self.kWidgetID_frameDepth:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_frameDepth, 3)\n if valid:\n self.parameters.pictureParameters.frameDepth = str(value)\n elif item == self.kWidgetID_frameClassSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_frameClass, data == 0)\n self.parameters.frameClassSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameClassSelector, data)\n elif item == self.kWidgetID_frameClass:\n index, self.parameters.pictureParameters.frameClass = vs.GetSelectedChoiceInfo(self.dialog, self.kWidgetID_frameClass, 0)\n elif item == self.kWidgetID_frameTextureScaleSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureScale, data == 0)\n self.parameters.frameTextureScaleSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameTextureScaleSelector, data)\n elif item == self.kWidgetID_frameTextureScale:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_frameTextureScale, 1)\n if valid:\n self.parameters.pictureParameters.frameTextureScale = str(value)\n elif item == self.kWidgetID_frameTextureRotationSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureRotation, data == 0)\n self.parameters.frameTextureRotationSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameTextureRotationSelector, data)\n elif item == self.kWidgetID_frameTextureRotation:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_frameTextureRotation, 1)\n if valid:\n self.parameters.pictureParameters.frameTextureRotation = str(value)\n elif item == self.kWidgetID_withMatboardSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_withMatboard, data == 0)\n self.parameters.withMatboardSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_withMatboardSelector, data)\n elif item == self.kWidgetID_withMatboard:\n self.parameters.pictureParameters.withMatboard = \"{}\".format(data != 0)\n elif item == self.kWidgetID_matboardPositionSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_matboardPosition, data == 0)\n self.parameters.matboardPositionSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_matboardPositionSelector, data)\n elif item == self.kWidgetID_windowWidthSelector:\n self.parameters.windowWidthSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_windowWidthSelector, data)\n elif item == self.kWidgetID_windowHeightSelector:\n self.parameters.windowHeightSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_windowHeightSelector, data)\n elif item == self.kWidgetID_matboardPosition:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_matboardPosition, 3)\n if valid:\n self.parameters.pictureParameters.matboardPosition = str(value)\n elif item == self.kWidgetID_matboardClassSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_matboardClass, data == 0)\n self.parameters.matboardClassSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_matboardClassSelector, data)\n elif item == self.kWidgetID_matboardClass:\n index, self.parameters.pictureParameters.matboardClass = vs.GetSelectedChoiceInfo(self.dialog, self.kWidgetID_matboardClass, 0)\n elif item == self.kWidgetID_matboardTextureScaleSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureScale, data == 0)\n self.parameters.matboardTextureScaleSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_matboardTextureScaleSelector, data)\n elif item == self.kWidgetID_matboardTextureScale:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_matboardTextureScale, 1)\n if valid:\n self.parameters.pictureParameters.matboardTextureScale = str(value)\n elif item == self.kWidgetID_matboardTextureRotatSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureRotat, data == 0)\n self.parameters.matboardTextureRotatSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_matboardTextureRotatSelector, data)\n elif item == self.kWidgetID_matboardTextureRotat:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_matboardTextureRotat, 1)\n if valid:\n self.parameters.pictureParameters.matboardTextureRotat = str(value)\n elif item == self.kWidgetID_withGlassSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_withGlass, data == 0)\n self.parameters.withGlassSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_withGlassSelector, data)\n elif item == self.kWidgetID_withGlass:\n self.parameters.pictureParameters.withGlass = \"{}\".format(data != 0)\n elif item == self.kWidgetID_glassPositionSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_glassPosition, data == 0)\n self.parameters.glassPositionSelector = vs.GetChoiceText(\n self.dialog, self.kWidgetID_glassPositionSelector, data)\n elif item == self.kWidgetID_glassPosition:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_glassPosition, 3)\n if valid:\n self.parameters.pictureParameters.glassPosition = str(value)\n elif item == self.kWidgetID_glassClassSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_glassClass, data == 0)\n self.parameters.glassClassSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_glassClassSelector, data)\n elif item == self.kWidgetID_glassClass:\n index, self.parameters.pictureParameters.glassClass = vs.GetSelectedChoiceInfo(self.dialog, self.kWidgetID_glassClass, 0)\n elif item == self.kWidgetID_excelCriteriaSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_excelCriteriaValue, data != 0)\n new_excel_criteria_selector = vs.GetChoiceText(self.dialog, self.kWidgetID_excelCriteriaSelector, data)\n if new_excel_criteria_selector != self.parameters.excelCriteriaSelector:\n self.parameters.excelCriteriaSelector = new_excel_criteria_selector\n self.update_criteria_values(False)\n if data != 0:\n self.update_criteria_values(True)\n else:\n index = vs.GetChoiceIndex(self.dialog, self.kWidgetID_excelCriteriaValue, self.parameters.excelCriteriaValue)\n if index == -1:\n vs.SelectChoice(self.dialog, self.kWidgetID_excelCriteriaValue, 0, True)\n self.parameters.excelCriteriaValue = \"Select a value ...\"\n else:\n vs.SelectChoice(self.dialog, self.kWidgetID_excelCriteriaValue, index, True)\n elif item == self.kWidgetID_excelCriteriaValue:\n self.parameters.excelCriteriaValue = vs.GetChoiceText(self.dialog, self.kWidgetID_excelCriteriaValue, data)\n elif item == self.kWidgetID_symbolCreateSymbol:\n self.parameters.symbolCreateSymbol = \"{}\".format(data != 0)\n selector_index = vs.GetSelectedChoiceIndex(self.dialog, self.kWidgetID_symbolFolderSelector, 0)\n vs.EnableItem(self.dialog, self.kWidgetID_symbolFolderSelector, data)\n vs.EnableItem(self.dialog, self.kWidgetID_symbolFolder, selector_index == 0 and data == 1)\n elif item == self.kWidgetID_symbolFolderSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_symbolFolder, data == 0)\n self.parameters.symbolFolderSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_symbolFolderSelector, data)\n elif item == self.kWidgetID_classAssignPictureClass:\n self.parameters.classAssignPictureClass = \"{}\".format(data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_classPictureClassSelector, data == 1)\n selector_index = vs.GetPopUpChoiceIndex(self.dialog, self.kWidgetID_classPictureClassSelector, self.parameters.classClassPictureSelector)\n vs.EnableItem(self.dialog, self.kWidgetID_classPictureClass, selector_index == 0 and data != 0)\n elif item == self.kWidgetID_classPictureClassSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_classPictureClass, data == 0)\n self.parameters.classClassPictureSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_classPictureClassSelector, data)\n elif item == self.kWidgetID_classPictureClass:\n index, self.parameters.pictureParameters.pictureClass = vs.GetSelectedChoiceInfo(self.dialog, self.kWidgetID_classPictureClass, 0)\n elif item == self.kWidgetID_classCreateMissingClasses:\n self.parameters.createMissingClasses = \"{}\".format(data == 1)\n elif item == self.kWidgetID_metaImportMetadata:\n self.parameters.metaImportMetadata = \"{}\".format(data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaArtworkTitleSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaAuthorNameSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaArtworkCreationDateSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaArtworkMediaSelector, data == 1)\n # vs.EnableItem(self.dialog, self.kWidgetID_metaTypeSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaRoomLocationSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaArtworkSourceSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaRegistrationNumberSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaAuthorBirthCountrySelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaAuthorBirthDateSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaAuthorDeathDateSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaDesignNotesSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaExhibitionMediaSelector, data == 1)\n elif item == self.kWidgetID_metaArtworkTitleSelector:\n self.parameters.metaArtworkTitleSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaArtworkTitleSelector, data)\n elif item == self.kWidgetID_metaAuthorNameSelector:\n self.parameters.metaAuthorNameSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaAuthorNameSelector, data)\n elif item == self.kWidgetID_metaArtworkCreationDateSelector:\n self.parameters.metaArtworkCreationDateSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaArtworkCreationDateSelector, data)\n elif item == self.kWidgetID_metaArtworkMediaSelector:\n self.parameters.metaArtworkMediaSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaArtworkMediaSelector, data)\n # elif item == self.kWidgetID_metaTypeSelector:\n # self.parameters.metaTypeSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaTypeSelector, data)\n elif item == self.kWidgetID_metaRoomLocationSelector:\n self.parameters.metaRoomLocationSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaRoomLocationSelector, data)\n elif item == self.kWidgetID_metaArtworkSourceSelector:\n self.parameters.metaArtworkSourceSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaArtworkSourceSelector, data)\n elif item == self.kWidgetID_metaRegistrationNumberSelector:\n self.parameters.metaRegistrationNumberSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaRegistrationNumberSelector, data)\n elif item == self.kWidgetID_metaAuthorBirthCountrySelector:\n self.parameters.metaAuthorBirthCountrySelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaAuthorBirthCountrySelector, data)\n elif item == self.kWidgetID_metaAuthorBirthDateSelector:\n self.parameters.metaAuthorBirthDateSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaAuthorBirthDateSelector, data)\n elif item == self.kWidgetID_metaAuthorDeathDateSelector:\n self.parameters.metaAuthorDeathDateSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaAuthorDeathDateSelector, data)\n elif item == self.kWidgetID_metaDesignNotesSelector:\n self.parameters.metaDesignNotesSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaDesignNotesSelector, data)\n elif item == self.kWidgetID_metaExhibitionMediaSelector:\n self.parameters.metaExhibitionMediaSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaExhibitionMediaSelector, data)\n elif item == self.kWidgetID_importIgnoreErrors:\n self.parameters.importIgnoreErrors = \"{}\".format(data != 0)\n vs.ShowItem(self.dialog, self.kWidgetID_importErrorCount, data == 0)\n elif item == self.kWidgetID_importIgnoreExisting:\n self.parameters.importIgnoreExisting = \"{}\".format(data != 0)\n elif item == self.kWidgetID_importIgnoreUnmodified:\n self.parameters.importIgnoreUnmodified = \"{}\".format(data != 0)\n elif item == self.kWidgetID_importButton:\n self.import_pictures()\n vs.SetItemText(self.dialog, self.kWidgetID_importNewCount, \"New Pictures: {}\".format(self.importNewCount))\n vs.SetItemText(self.dialog, self.kWidgetID_importUpdatedCount, \"Updated Pictures: {}\".format(self.importUpdatedCount))\n vs.SetItemText(self.dialog, self.kWidgetID_importDeletedCount, \"Deleted Pictures: {}\".format(self.importDeletedCount))\n vs.SetItemText(self.dialog, self.kWidgetID_importErrorCount, \"Error Pictures: {}\".format(self.importErrorCount))\n\n # This section handles the following cases:\n # - The Dialog is initializing\n # - The name of the workbook file has changed\n if item == self.kWidgetID_fileName or item == self.kWidgetID_fileBrowseButton or item == KDialogInitEvent:\n self.set_workbook()\n\n # The image selection has changed\n if item == self.kWidgetID_withImageSelector or item == self.kWidgetID_withImage or item == self.kWidgetID_excelSheetName:\n state = vs.GetSelectedChoiceIndex(self.dialog, self.kWidgetID_withImageSelector, 0) != 0 or \\\n vs.GetBooleanItem(self.dialog, self.kWidgetID_withImage) is True\n\n vs.EnableItem(self.dialog, self.kWidgetID_imageWidthLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imageWidthSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imageHeightLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imageHeightSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imagePositionLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imagePositionSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imagePosition, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imageTextureLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imageTextureSelector, state)\n\n # The frame selection has changed\n if item == self.kWidgetID_withFrameSelector or item == self.kWidgetID_withFrame or item == self.kWidgetID_excelSheetName:\n state = vs.GetSelectedChoiceIndex(self.dialog, self.kWidgetID_withFrameSelector, 0) != 0 or \\\n vs.GetBooleanItem(self.dialog, self.kWidgetID_withFrame) is True\n\n vs.EnableItem(self.dialog, self.kWidgetID_frameWidthLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameWidthSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameHeightLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameHeightSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameThicknessLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameThicknessSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameThickness, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameDepthLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameDepthSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameDepth, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameClassLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameClassSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameClass, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureScaleLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureScaleSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureScale, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureRotationLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureRotationSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureRotation, state)\n\n # The matboard selection has changed\n if item == self.kWidgetID_withMatboardSelector or item == self.kWidgetID_withMatboard or item == self.kWidgetID_excelSheetName:\n state = vs.GetSelectedChoiceIndex(self.dialog, self.kWidgetID_withMatboardSelector, 0) != 0 or \\\n vs.GetBooleanItem(self.dialog, self.kWidgetID_withMatboard) is True\n\n vs.EnableItem(self.dialog, self.kWidgetID_windowWidthLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_windowWidthSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_windowHeightLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_windowHeightSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardPositionLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardPositionSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardPosition, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardClassLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardClassSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardClass, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureScaleLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureScaleSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureScale, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureRotatLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureRotatSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureRotat, state)\n\n # The glass selection has changed\n if item == self.kWidgetID_withGlassSelector or item == self.kWidgetID_withGlass or item == self.kWidgetID_excelSheetName:\n state = vs.GetSelectedChoiceIndex(self.dialog, self.kWidgetID_withGlassSelector, 0) != 0 or \\\n vs.GetBooleanItem(self.dialog, self.kWidgetID_withGlass) is True\n\n vs.EnableItem(self.dialog, self.kWidgetID_glassPositionLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_glassPositionSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_glassPosition, state)\n vs.EnableItem(self.dialog, self.kWidgetID_glassClassLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_glassClassSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_glassClass, state)\n\n # After the event has been handled, update some of the import validity settings accordingly\n self.parameters.imageValid = ((self.parameters.withImageSelector == \"-- Manual\" and self.parameters.pictureParameters.withImage == \"True\") or\n self.parameters.withImageSelector != \"-- Manual\") and \\\n (self.parameters.imageTextureSelector != \"-- Select column ...\") and \\\n (self.parameters.imageWidthSelector != \"-- Select column ...\") and \\\n (self.parameters.imageHeightSelector != \"-- Select column ...\")\n\n self.parameters.frameValid = ((self.parameters.withFrameSelector == \"-- Manual\" and self.parameters.pictureParameters.withFrame == \"True\") or\n self.parameters.withFrameSelector != \"-- Manual\") and \\\n (self.parameters.frameWidthSelector != \"-- Select column ...\") and \\\n (self.parameters.frameHeightSelector != \"-- Select column ...\")\n\n self.parameters.matboardValid = ((self.parameters.withMatboardSelector == \"-- Manual\" and self.parameters.pictureParameters.withMatboard == \"True\") or\n self.parameters.withMatboardSelector != \"-- Manual\") and \\\n (self.parameters.windowWidthSelector != \"-- Select column ...\") and \\\n (self.parameters.windowHeightSelector != \"-- Select column ...\")\n\n self.parameters.glassValid = ((self.parameters.withGlassSelector == \"-- Manual\" and\n self.parameters.pictureParameters.withGlass == \"True\") or self.parameters.withGlassSelector != \"-- Manual\")\n\n self.parameters.criteriaValid = \\\n (self.parameters.excelCriteriaSelector != \"-- Select column ...\" and self.parameters.excelCriteriaValue != \"Select a value ...\")\n\n self.parameters.importValid = (self.parameters.imageValid or self.parameters.frameValid) and self.parameters.criteriaValid\n\n vs.EnableItem(self.dialog, self.kWidgetID_importButton, self.parameters.importValid)\n vs.EnableItem(self.dialog, self.kWidgetID_importNewCount, self.parameters.importValid)\n vs.EnableItem(self.dialog, self.kWidgetID_importUpdatedCount, self.parameters.importValid)\n vs.EnableItem(self.dialog, self.kWidgetID_importDeletedCount, self.parameters.importValid)", "def item_menu(room, bundle):\n\n clear()\n print(\"***\", bundle, \"***\")\n print(\"Select the item you've donated to the Community Center\")\n print('-' * 55)\n user_selected_item = show_menu(get_names(get_bundle_items(room, bundle)), allow_cancellation=False)\n while user_selected_item is not None:\n bundle_items = get_bundle_items(room, bundle)\n for item in bundle_items:\n if user_selected_item == item.name:\n print(f\"You've donated the {item.name}\")\n time.sleep(1)\n clear()\n print(\"***\", bundle, \"***\")\n print(\"Select the item you've donated to the Community Center\")\n print('-' * 55)\n user_selected_item = show_menu(get_names(get_bundle_items(room, bundle)), allow_cancellation=False)\n else:\n room_menu()", "def item_select(self):\r\n layout = [[sg.Text(\"Inventory:\", border_width=0)]]\r\n for i, item in enumerate(self.player.inventory):\r\n layout.append([sg.Button(item, key=i, size=(10, 1), border_width=0)])\r\n layout.append([sg.Button(\"Exit\", key=\"EXIT\", size=(10, 1), button_color=(\"#edf2ce\", \"#444444\"))])\r\n window = sg.Window(\"Inventory Viewer\", layout, size=(200, 500), element_justification='c')\r\n choice = window.read()[0]\r\n window.close()\r\n if choice is None:\r\n return None\r\n if choice == \"EXIT\":\r\n return None\r\n else:\r\n print(choice)\r\n return self.player.inventory[choice]", "def callbackFunc(event): # this function used to get selected item from the combo box and load into oid i/p box\r\n choice = quality_combo.get()\r\n choice = int((choice.strip())[0])\r\n\r\n oid.delete(0,1)\r\n oid.insert(0, choice)", "def littleDialog():\r\n psm = uno.getComponentContext().ServiceManager\r\n dp = psm.createInstance(\"com.sun.star.awt.DialogProvider\")\r\n dlg = dp.createDialog(\"vnd.sun.star.script:Standard.Dialog1?location=application\")\r\n dlg.execute()\r\n return None", "def select_stock_fr_gui(self):\n import pyET_tools.easygui as gui", "def _show_edit_library_dialog(self):\n selected_item = self.library_list_view.selected_item()\n self.library_widget = CreateLibraryDialog(mode=\"update\", parent=self)\n self.library_widget.update.connect(self.update_library)\n if not self.user.library_permission:\n self.library_widget.exec_button.setHidden(True)\n self.library_widget.set_name(selected_item.name)\n self.library_widget.set_type(selected_item.type)\n self.library_widget.set_windows_path(selected_item.windows_path)\n self.library_widget.set_linux_path(selected_item.linux_path)\n self.library_widget.set_mac_path(selected_item.mac_path)\n self.library_widget.set_icon_path(selected_item.icon_path)\n self.library_widget.set_description(selected_item.description)\n self.library_widget.exec_()", "def btn2_on(self):\n\n dialog = NewSupplier(self)\n self.dialogs.append(dialog)\n dialog.show()", "async def take_item(self, item_name: str):", "def popup(self, index, dummy):\n mylogger.debug(\"multi-selector form popup(%d) invoked\",index)\n self.dialog = Selector_Form(index)\n mylogger.debug(\"dialog is type %s\", type(self.dialog))\n self.dialog.setupUi(self.button_text, label_default=\"Port\", cols=2)\n self.dialog.setWindowTitle(\"IF \"+str(index))\n self.dialog.show()\n self.dialog.signal.stateChanged.connect(\n slotgen(index,self.update_selector))\n mylogger.debug(\"multi-selector form popup(%d) completed\",index)", "def btn3_on(self):\n\n dialog = NewOrder(self)\n self.dialogs.append(dialog)\n dialog.show()", "def node_choose_use_item(caller, raw_string, **kwargs):\n text = \"Select the item\"\n action_dict = kwargs[\"action_dict\"]\n\n options = [\n {\n \"desc\": item.get_display_name(caller),\n \"goto\": (\n _step_wizard,\n {**kwargs, **{\"action_dict\": {**action_dict, **{\"item\": item}}}},\n ),\n }\n for item in caller.equipment.get_usable_objects_from_backpack()\n ]\n if not options:\n text = \"There are no usable items in your inventory!\"\n\n options.extend(_get_default_wizard_options(caller, **kwargs))\n return text, options", "def btn5_on(self):\n\n dialog = EmailSupplier(self)\n self.dialogs.append(dialog)\n dialog.show()", "def __Action_editSong__(self):\r\n dialog = dialogSongEdit.SongEditWindow(MpGlobal.Window)\r\n\r\n dialog.initData(self.getSelection())\r\n \r\n dialog.exec_()\r\n \r\n del dialog", "def on_book_ok_clicked(self, obj):\n if self.book.item_list:\n BookDialog(self.dbstate, self.uistate,\n self.book, BookOptions)\n else:\n WarningDialog(_('No items'), _('This book has no items.'))\n return\n self.close()", "def _new_item(self, item: \"RegistryDetailsFreespaceMenu.Items\") -> None:\r\n self.callbacks[self.Events.NEW_ITEM](item)", "def open(self) -> None:\n self.selected_item = -1", "def showTextSettingsDialog(self, item):\n TextSettingsDialog(item, parent=self).exec_()", "def pick_up_item(self, key):\n\t\tself.player.begin_pick_up_item()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Number of edges between two node sets. nbunches should be SETS for efficiency. If nbunch1==nbunch2, return double the number of edges.
def n_edges_between(g, nbunch1, nbunch2): n = 0 #nbunch2 = set(nbunch2) if len(nbunch1) > len(nbunch2): nbunch1, nbunch2 = nbunch2, nbunch1 for n1 in nbunch1: for neighbor in g.adj[n1]: if neighbor in nbunch2: n += 1 return n
[ "def numConnectedEdges(*args, **kwargs):\n \n pass", "def testNumberEdges(g1, g2):\n return len(g1.edges) == len(g2.edges)", "def number_of_edges(self) -> int:\n count = 0\n for vertice in self.__graph:\n count += len(self.__graph[vertice])\n return count // 2", "def get_num_edges(graph):\n return sum(cell for row in graph for cell in row) // 2", "def test_the_number_of_edges(self):\n num_vertices = len(self.mst.vertices)\n num_edges = len(self.mst.graph_edges)\n\n self.assertEqual(num_vertices-1, num_edges)", "def number_of_loops_graph(self) -> int:\n\n count = 0\n for vertice in self.__graph:\n count += self.number_of_loops_vertice(vertice)\n\n return count", "def getNumEdges(self): \n return self.__E", "def n_sets(self):\n return self._sets_count()", "def different_edges(graph1, graph2):\n edgesG1 = get_sorted_edges(graph1)\n edgesG2 = get_sorted_edges(graph2)\n count = 0\n\n for edge in edgesG1:\n if edge not in edgesG2:\n count += 1\n\n percentage = count / len(edgesG1)\n\n return percentage", "def check_two_edge_sets_are_identical(edges1, edges2, is_directed):\n if is_directed:\n C1 = Counter(map(frozenset, edges1))\n C2 = Counter(map(frozenset, edges2))\n else:\n C1 = Counter(map(tuple, edges1))\n C2 = Counter(map(tuple, edges2))\n check_counters_agree(C1, C2)", "def n_edges(self):\n if self._prop_vals:\n return self.n_syns\n else:\n return np.count_nonzero(self.nsyn_table)", "def same_edges(graph1, graph2):\n\n edgesG1 = get_sorted_edges(graph1)\n edgesG2 = get_sorted_edges(graph2)\n count = 0\n for edge in edgesG1:\n if edge in edgesG2:\n count += 1\n\n percentage = count / len(edgesG2)\n\n return percentage", "def number_of_nodes(self):\n\t\treturn number_of_nodes(self.network)", "def test_ipv6network_num_addresses(self):\n n = 10**6\n net = ip.IPv6Network('1:2:3:4::/120')\n time1, result1 = timefn(n, lambda: net.num_addresses)\n enet = eip.IPv6Network('1:2:3:4::/120')\n time2, result2 = timefn(n, lambda: enet.num_addresses)\n results = (time1, result1), (time2, result2)\n self.report_6n.report(fn_name(), n, results, net)", "def numSets(self):\n return self.sets", "def NumberOfSets(points):\n\tcount = 0\n\tfor pointset in points:\n\t\tif pointset[0] is not None and pointset[1] is not None:\n\t\t\tcount += 1\n\n\treturn count", "def network_number_nodes(g):\n number_nodes = g.number_of_nodes()\n return number_nodes", "def num_selected_edges(df):\n\n # Count number of edges going from and to defined proteins\n number_of_selected_edges = len(df.index.get_level_values(0))\n\n return number_of_selected_edges", "def link_count(self):\n return int(sum([len(self._net[n].values()) for n in self._net]) / 2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For a networkx graph, add all 'nodes' to 'cmtyID'.
def cmtyAddFromList(g, cmtyID, nodes): #print cmtyID, nodes #for n in g.nbunch_iter(nodes): for n in nodes: g.node[n]['cmtys'].add(cmtyID)
[ "def from_nodecmtys(cls, nodecmtys, **kwargs):\n cmtynodes = { }\n for n, c in nodecmtys.iteritems():\n cmtynodes.setdefault(c, set()).add(n)\n return cls.from_dict(cmtynodes=cmtynodes, **kwargs)", "def nodecmtys(self):\n if hasattr(self, '_ncmtys'):\n return self._ncmtys\n nodecmtys = { }\n for c, nodes in self.iteritems():\n for n in nodes:\n nodecmtys.setdefault(n, set())\n nodecmtys[n].add(c)\n self._ncmtys = nodecmtys\n return nodecmtys", "def add_nodes(self):\n\t\twith open(self.fname, 'a') as f:\n\t\t\tf.write(\"\\n%%%%%%%%%% ADDING NODES %%%%%%%%%%%%%\\n\\n\")\n\t\t\ti = 0\n\t\t\tfor v in self.G.nodes:\n\t\t\t\tf.write('\\t\\\\Vertex[x={}, y={}]{{{}}}\\n'.format(round(self.factor*v.x, 3), round(self.factor*v.y, 3), i))\n\t\t\t\t\n\t\t\t\tself.vtoid[v] = i\t\t\t\t\n\t\t\t\t\n\t\t\t\ti += 1", "def load_networkx_online(self, g, attrnameset='cmtys', type_=None, \n clear=True):\n #from fitz import interact ; interact.interact('x')\n if clear:\n for node, data in g.nodes_iter(data=True):\n #data.pop(attrnameset, None)\n data[attrnameset] = set()\n nodes = g.node\n for cname, cnodes in self.iteritems():\n #print cname, cnodes\n for node in cnodes:\n #g.node[node].setdefault(attrnameset, set()).add(cname)\n g.node[node][attrnameset].add(cname)", "def from_networkx(cls, g):\n nodes = set(g.nodes_iter())\n cmtynodes = { }\n for node, d in g.nodes_iter(data=True):\n for c in pcd.nxutil._iterCmtys(d):\n if c not in cmtynodes: cmtynodes[c] = set()\n cmtynodes[c].add(node)\n return cls.from_dict(cmtynodes=cmtynodes, nodes=nodes)", "def setNodeIds(self, *args):\n return _coin.SoColorPacker_setNodeIds(self, *args)", "def cmtynodes(self, copy=False):\n if not copy:\n return self._cmtynodes\n else:\n cmtynodes = { }\n for c, nodes in self._cmtynodes.iteritems():\n cmtynodes[c] = set(nodes)\n return cmtynodes", "def transfer_node_sets(self):\n self.train_nodes = torch.LongTensor(self.train_nodes).to(self.device)\n self.test_nodes = torch.LongTensor(self.test_nodes).to(self.device)", "def add_edges(self):\n for node_value in self.node_dict.values():\n for prefix_key, prefix_value in self.node_prefix.items():\n if node_value.get_suffix() == prefix_value \\\n and node_value.get_node_sequence() != prefix_key:\n node_value.add_connected_nodes(prefix_key)", "def createNodes(self, node_set: set):\n for n in node_set:\n self.addNode(n)", "def add_connected_nodes(self, conn_node):\n self.connected_nodes.append(conn_node)", "def _set_dependencies_on_new_nodes(self):\n if not self._new_node_ids:\n # There are no new nodes...\n return\n\n # We copy the collection of new node IDs, as the act of setting up\n # the dependencies may cause new nodes to be added in a re-entrant way...\n node_ids = self._new_node_ids.copy()\n self._new_node_ids.clear()\n\n for node_id in node_ids:\n # We check that the node is in the graph. It is possible\n # that is was added and removed before this function got\n # called...\n if node_id in self._nodes:\n node = self._nodes[node_id]\n node.set_dependencies()\n\n # Setting the dependencies may have caused new nodes to be created. If so,\n # they will needs setting up. We call this function recursively to set\n # them up...\n self._set_dependencies_on_new_nodes()", "def add_nodes(self, config):\n logger.debug(\"add nodes to config: %s\", config)\n if type(config) is not list:\n raise Exception(\"nodes must be a list\")\n for n in config:\n # expect only id and hostname within node config\n if \"id\" not in n:\n raise Exception(\"'id' field required in each node\")\n if \"hostname\" not in n:\n raise Exception(\"'hostname' field required in each node\")\n node = {\"id\": 0, \"hostname\": \"\"}\n for k in n:\n if k not in node: raise Exception(\"unexpected attribute '%s' for node\" % k)\n node[k] = n[k]\n if type(node[\"id\"]) is not int or node[\"id\"]<1 or node[\"id\"] > ClusterConfig.MAX_NODES:\n raise Exception(\"invalid node id '%s', should be an integer between 1 and %s\" % (\n node[\"id\"], ClusterConfig.MAX_NODES))\n self.nodes[node[\"id\"]] = node\n logger.debug(\"adding node_id %s = %s\", node[\"id\"], node[\"hostname\"])", "def update (self, nodes=[], edges=[]):\n for n in nodes:\n props = \",\".join([\n f\"\"\" s.{k} = \"{v}\" \"\"\"\n for k, v in n.items()\n if not k == \"id\"\n ])\n statement = f\"\"\"MATCH (s {{ id : \"{n['id']}\" }}) SET {props} \"\"\"\n self.exec (statement)\n\n # TODO: determine best way to represent hierarchical node properties.\n # TODO: analogous logic for updating edges.", "def setNodeIds(self, diffuse: 'SbUniqueId const', transp: 'SbUniqueId const') -> \"void\":\n return _coin.SoColorPacker_setNodeIds(self, diffuse, transp)", "def _connect_nodes(self, node_a, node_b):\n self._graph[node_a].append(node_b)\n self._graph[node_b].append(node_a)", "def attach_nodes(nodes, ev_ids):\n global EV\n\n for node in nodes:\n if not node in STATE['nodes']:\n vlog(3, 'skipping not bad node %s' % (node)) \n continue\n \n for ev_id in ev_ids:\n if not ev_id in STATE['nodes'][node]['extraview']:\n STATE['nodes'][node]['extraview'].append(ev_id)\n vlog(3, 'node %s add extraview %s' % (node, ev_id)) \n\n save_state()", "def add_node_attributes(self, attribute_key, attribute_value):\n G_prime = self.__deepcopy__() # create a deepcopy of the bipartite graph\n for node in G_prime.get_left_nodeset().union(G_prime.get_right_nodeset()): # for every node in the graph\n # add the attribute key-value pair to the attributes registry of the node\n node.add_attribute(attribute_key, attribute_value)\n return G_prime # return the modified graph", "def reset_ids(network): \n nodes = network.nodes.copy()\n edges = network.edges.copy()\n to_ids = edges['to_id'].to_numpy()\n from_ids = edges['from_id'].to_numpy()\n new_node_ids = range(len(nodes))\n #creates a dictionary of the node ids and the actual indices\n id_dict = dict(zip(nodes.id,new_node_ids))\n nt = np.copy(to_ids)\n nf = np.copy(from_ids) \n #updates all from and to ids, because many nodes are effected, this\n #is quite optimal approach for large dataframes\n for k,v in id_dict.items():\n nt[to_ids==k] = v\n nf[from_ids==k] = v\n edges.drop(labels=['to_id','from_id'],axis=1,inplace=True)\n edges['from_id'] = nf\n edges['to_id'] = nt\n nodes.drop(labels=['id'],axis=1,inplace=True)\n nodes['id'] = new_node_ids\n edges['id'] = range(len(edges))\n edges.reset_index(drop=True,inplace=True)\n nodes.reset_index(drop=True,inplace=True)\n return Network(edges=edges,nodes=nodes)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a binary quadratic model encoding an integer.
def binary_encoding(v: Variable, upper_bound: int) -> BinaryQuadraticModel: # note: the paper above also gives a nice way to handle bounded coefficients # if we want to do that in the future. if upper_bound < 2: raise ValueError("upper_bound must be greater than or equal to 2, " f"received {upper_bound}") upper_bound = math.floor(upper_bound) bqm = BinaryQuadraticModel(Vartype.BINARY) max_pow = math.floor(math.log2(upper_bound)) for exp in range(max_pow): val = 1 << exp bqm.set_linear((v, val), val) else: val = upper_bound - ((1 << max_pow) - 1) bqm.set_linear((v, val, 'msb'), val) return bqm
[ "def _quadratic_form(self):\n K = self.number_field()\n if K.degree() == 2:\n from sage.quadratic_forms.binary_qf import BinaryQF\n gens = self.gens_reduced()\n if len(gens) == 1:\n u, v = K.ring_of_integers().basis()\n alpha, beta = gens[0] * u, gens[0] * v\n else:\n alpha, beta = gens\n if QQ((beta * alpha.galois_conjugate() - alpha * beta.galois_conjugate()) / K.gen()) < 0:\n alpha, beta = beta, alpha\n N = self.norm()\n a = alpha.norm() // N\n b = ZZ(alpha * beta.galois_conjugate() +\n beta * alpha.galois_conjugate()) // N\n c = beta.norm() // N\n return BinaryQF([a, b, c])\n\n raise ValueError(\"not defined for ideals in number fields of degree > 2 over Q.\")", "def model(n):\r\n \r\n return \"\".join([str(i) for i in range(1, n + 1)])", "def quadratic(xx: np.ndarray) -> np.ndarray:\n return xx**2", "def build_bqm(matrix):\n # Set up\n n = len(matrix) # Number of rows/columns in sudoku\n m = int(math.sqrt(n)) # Number of rows/columns in sudoku subsquare\n digits = range(1, n+1)\n\n bqm = dimod.BinaryQuadraticModel({}, {}, 0.0, dimod.SPIN)\n\n # Constraint: Each node can only select one digit\n for row in range(n):\n for col in range(n):\n node_digits = [get_label(row, col, digit) for digit in digits]\n one_digit_bqm = combinations(node_digits, 1)\n bqm.update(one_digit_bqm)\n\n # Constraint: Each row of nodes cannot have duplicate digits\n for row in range(n):\n for digit in digits:\n row_nodes = [get_label(row, col, digit) for col in range(n)]\n row_bqm = combinations(row_nodes, 1)\n bqm.update(row_bqm)\n\n # Constraint: Each column of nodes cannot have duplicate digits\n for col in range(n):\n for digit in digits:\n col_nodes = [get_label(row, col, digit) for row in range(n)]\n col_bqm = combinations(col_nodes, 1)\n bqm.update(col_bqm)\n\n # Constraint: Each sub-square cannot have duplicates\n # Build indices of a basic subsquare\n subsquare_indices = [(row, col) for row in range(m) for col in range(m)]\n\n # Build full sudoku array\n for r_scalar in range(m):\n for c_scalar in range(m):\n for digit in digits:\n # Shifts for moving subsquare inside sudoku matrix\n row_shift = r_scalar * m\n col_shift = c_scalar * m\n\n # Build the labels for a subsquare\n subsquare = [get_label(row + row_shift, col + col_shift, digit)\n for row, col in subsquare_indices]\n subsquare_bqm = combinations(subsquare, 1)\n bqm.update(subsquare_bqm)\n\n # Constraint: Fix known values\n for row, line in enumerate(matrix):\n for col, value in enumerate(line):\n if value > 0:\n # Recall that in the \"Each node can only select one digit\"\n # constraint, for a given cell at row r and column c, we\n # produced 'n' labels. Namely,\n # [\"r,c_1\", \"r,c_2\", ..., \"r,c_(n-1)\", \"r,c_n\"]\n #\n # Due to this same constraint, we can only select one of these\n # 'n' labels (achieved by 'generators.combinations(..)').\n #\n # The 1 below indicates that we are selecting the label\n # produced by 'get_label(row, col, value)'. All other labels\n # with the same 'row' and 'col' will be discouraged from being\n # selected.\n bqm.fix_variable(get_label(row, col, value), 1)\n\n return bqm", "def Binomial(n,i):\n product_ = float(1)\n for j in range(1,(n-i)+1,1):\n onepart_ = float((i + j)/j)\n product_ *= float(onepart_)\n return product_", "def quadratic(x, a, b, c):\n \"*** YOUR CODE HERE ***\"\n tt = mul_interval(x, x)\n att_lower = min(a * lower_bound(tt), a* upper_bound(tt))\n att_upper = max(a * lower_bound(tt), a* upper_bound(tt))\n att = interval(att_lower, att_upper)\n\n bt_lower = min(b * lower_bound(x), b * upper_bound(x))\n bt_upper = max(b * lower_bound(x), b * upper_bound(x)) \n bt = interval(bt_lower, bt_upper)\n \n attbt = add_interval(att, bt)\n \n return interval(c + lower_bound(attbt), c + upper_bound(attbt))", "def intToBin(i):\n return (i).to_bytes(2, byteorder=\"little\")", "def initialize_qureg_given_int(a_int, qreg, circuit):\n a_str = binary.get_bitstring_from_int(a_int, len(qreg))\n return initialize_qureg_given_bitstring(a_str, qreg, circuit)", "def number_squared(b):\n return b**b", "def test_to_quadratic_program_d2(self):\n vehicle_routing = VehicleRouting(self.graph, depot=2)\n op = vehicle_routing.to_quadratic_program()\n # Test name\n self.assertEqual(op.name, \"Vehicle routing\")\n # Test variables\n self.assertEqual(op.get_num_vars(), 12)\n for var in op.variables:\n self.assertEqual(var.vartype, VarType.BINARY)\n # Test objective\n obj = op.objective\n self.assertEqual(obj.sense, QuadraticObjective.Sense.MINIMIZE)\n self.assertEqual(obj.constant, 0)\n self.assertDictEqual(\n obj.linear.to_dict(),\n {\n 0: 49.0,\n 1: 36.0,\n 2: 21.0,\n 3: 49.0,\n 4: 65.0,\n 5: 67.0,\n 6: 36.0,\n 7: 65.0,\n 8: 29.0,\n 9: 21.0,\n 10: 67.0,\n 11: 29.0,\n },\n )\n self.assertEqual(obj.quadratic.to_dict(), {})\n # Test constraint\n lin = op.linear_constraints\n self.assertEqual(len(lin), 12)\n c012 = [-1, 0, 2]\n for i in range(3):\n j = c012[i]\n self.assertEqual(lin[i].sense, Constraint.Sense.EQ)\n self.assertEqual(lin[i].rhs, 1)\n self.assertEqual(\n lin[i].linear.to_dict(),\n {3 * (j + 1): 1, 3 * (j + 1) + 1: 1, 3 * (j + 1) + 2: 1},\n )\n self.assertEqual(lin[3].sense, Constraint.Sense.EQ)\n self.assertEqual(lin[3].rhs, 1)\n self.assertEqual(lin[3].linear.to_dict(), {3: 1, 6: 1, 9: 1})\n self.assertEqual(lin[4].sense, Constraint.Sense.EQ)\n self.assertEqual(lin[4].rhs, 1)\n self.assertEqual(lin[4].linear.to_dict(), {0: 1, 7: 1, 10: 1})\n self.assertEqual(lin[5].sense, Constraint.Sense.EQ)\n self.assertEqual(lin[5].rhs, 1)\n self.assertEqual(lin[5].linear.to_dict(), {2: 1, 5: 1, 8: 1})\n self.assertEqual(lin[6].sense, Constraint.Sense.EQ)\n self.assertEqual(lin[6].rhs, 2)\n self.assertEqual(lin[6].linear.to_dict(), {1: 1, 4: 1, 11: 1})\n self.assertEqual(lin[7].sense, Constraint.Sense.EQ)\n self.assertEqual(lin[7].rhs, 2)\n self.assertEqual(lin[7].linear.to_dict(), {6: 1, 7: 1, 8: 1})\n self.assertEqual(lin[8].sense, Constraint.Sense.LE)\n self.assertEqual(lin[8].rhs, 1)\n self.assertEqual(lin[8].linear.to_dict(), {0: 1, 3: 1})\n self.assertEqual(lin[9].sense, Constraint.Sense.LE)\n self.assertEqual(lin[9].rhs, 1)\n self.assertEqual(lin[9].linear.to_dict(), {2: 1, 9: 1})\n self.assertEqual(lin[10].sense, Constraint.Sense.LE)\n self.assertEqual(lin[10].rhs, 1)\n self.assertEqual(lin[10].linear.to_dict(), {5: 1.0, 10: 1.0})\n self.assertEqual(lin[11].sense, Constraint.Sense.LE)\n self.assertEqual(lin[11].rhs, 2)\n self.assertEqual(lin[11].linear.to_dict(), {0: 1, 2: 1, 3: 1, 5: 1, 9: 1, 10: 1})", "def rational_quadratic(input_dim, variance=1., lengthscale=1., power=1.):\n part = rational_quadraticpart(input_dim, variance, lengthscale, power)\n return kern(input_dim, [part])", "def quadratic_form(u, Q, v, workers=1, **kwargs):\n with _setup_gulinalg_threads(workers):\n out = _impl.quadratic_form(u, Q, v, **kwargs)\n return out", "def quadratic_vertex(x, y):\n q = _quadratic(x, y)\n return -q.c[1] / (2 * q.c[0])", "def main():\n\n decimal = int(input().strip())\n binary_text = '{:b}'.format(decimal)\n print(str(int(binary_text[::-1], base=2)))", "def biclique(m, n):\n\n # Instantiate a Graph\n pattern = Graph()\n for u in range(m):\n for v in range(m, m + n):\n pattern.add_edge(u, v)\n\n # Return the biclique\n return pattern", "def binary_var_constraints(self):\n constraints = []\n for i in range(1, self.x + 1):\n for j in range(1, self.z + 1):\n equation = f\"\\tbin{i}{j}: \"\n constants = []\n for k in range(1, self.y + 1):\n constants.append(f\"u{i}{k}{j}\")\n equation += \" + \".join(constants)\n equation += \" = 2\"\n constraints.append(equation)\n binary_constraints = \"\\n\".join(constraints)\n binary_constraints += \"\\n\"\n return binary_constraints", "def bin_svd_codelength(A,u,s,v,q):", "def gen_poly(s, n):\n coeff = np.random.randint(0, 10, n - 1)\n poly = [s]\n for a_i in coeff:\n poly.append(a_i * random.randint(0, 100))\n return poly", "def cyclotomic_polynomial(self, n):\n if n <= 0:\n raise ArithmeticError(\"n=%s must be positive\"%n)\n elif n == 1:\n return self.gen() - 1\n else:\n return self(cyclotomic.cyclotomic_coeffs(n), check=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test to assert if API creates user successfully
def test_api_can_create_user(self): self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)
[ "def test_endpoint_creates_user(self):\n new_user = {\n \"username\": \"maina\",\n \"password\": \"password123\"\n }\n response = self.client.post('/api/v1/auth/register', data=new_user)\n # status CREATED\n self.assertEqual(response.status_code, 201)", "def test_user_exists(self):\n payload = {\n 'name': 'Test1 Test2',\n 'email': 'test@test.com',\n 'password': 'hello'\n }\n create_user(**payload)\n\n response = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_api_v1_users_post(self):\n pass", "def test_handle_create_account(self):\n\n create_account_data = {'fname': 'Create',\n 'lname': 'Account',\n 'email': 'create@test.test',\n 'username': 'create',\n 'password': 'test',\n 'phone-number': '44444'}\n \n res = self.client.post('/handle-create-account',\n data=create_account_data,\n follow_redirects=True)\n self.assertEqual(User.query.all()[-1], User.query.filter_by(username='create').first())", "def test_create_token_for_user(self):\n payload = {\n \"email\": \"test2gmail.com\",\n \"password\": \"Test1234\"\n }\n create_user(**payload)\n\n # Here, we are generating token to a created user(by passing payload)\n response = self.client.post(TOKEN_URL, payload)\n self.assertIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # And we are checking that there is token in our HTTP POST response\n # If there is token HTTP 200 OK should be sent back in in our response", "def test_create_token_for_user(self):\n payload = {'email': 'test@qdstudio.com', 'password': 'password123'}\n create_user(**payload)\n\n res = self.client.post(OBTAIN_TOKEN_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('access', res.data)\n self.assertIn('refresh', res.data)", "def test_post_user(self):\n response = self.app.post('/api/v1/users')\n self.assertEqual(response.status_code, 400)", "def test_create_account(self):\n\n response = self.client.post(\n '/api/v1/users', data=json.dumps(create_account), content_type='application/json')\n result = json.loads(response.data.decode())\n self.assertEqual(result['message'], 'Account created successfully')\n assert response.status_code == 201", "def test_add_valid_user(mocker):\n password = fake.password()\n response = send_add_user_request(mocker, 1, fake.name(), fake.email(),\n fake.address(), float(fake.latitude()),\n float(fake.longitude()), password,\n password)\n assert response.status_code == 201\n assert response.json()[\"user_id\"] == 1", "def test_registration_of_existing_user(self):\n response = self.client.post(self.register_url, self.register_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n response = self.client.post(self.register_url, self.register_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n response = self.client.post(self.register_url, self.register_data, format=\"json\")\n assert response.data[\"errors\"][\"email\"][0] == \"Email address exists\"\n assert response.data[\"errors\"][\"username\"][0] == \"Username already exists\"", "def test_user_registration(self):\n post_data = {'username': 'roy1',\n 'first_name': 'Roy',\n 'last_name': 'Hanley',\n 'email': 'royhanley8@gmail.com',\n 'password': 'small fat gibbon'}\n response = Client().post('/register_new_user/', post_data)\n\n # tests response\n self.assertEqual('user roy1 successfully created' in response.content, True)\n\n # tests if User object has successfully been created\n self.assertEqual(User.objects.filter(username='roy1').exists(), True)\n\n # test if UserExtended has successfully been created and correctly linked to User object\n self.assertEqual(UserExtended.objects.filter(user__username='roy1').exists(), True)\n return", "def test_register_user_successful(self):\n with app.test_client() as client:\n request = client.post('/register', data=d, follow_redirects=True)\n self.assertEqual(request.status_code, 200)\n response = request.get_data(as_text=True)\n self.assertIn(\"<li>First Name: John</li>\", response)\n self.assertIn(\"<title>Details for newuser1</title>\", response)\n self.assertEqual(User.query.count(), 1)\n self.assertEqual(session[\"user_id\"], \n User.query.filter_by(username='newuser1').first().username)", "def test_register_new_user(self):\n pass", "def test_user_create_page(self):\n url = reverse(\"admin:user_user_add\")\n response = self.client.get(url)\n\n self.assertEquals(response.status_code, 200)", "def test_user_create_token(self):\n pass", "def test_create_account_using_post(self):\n pass", "def test_create_user_page(self) -> None:\n url = reverse(\"admin:core_user_add\")\n response_ = self.client.get(url)\n\n self.assertEquals(response_.status_code, 200)", "def test_user_add(self):\n\n with app.test_client() as client:\n d = {\"first_name\": \"Lucas\", \"last_name\": \"Paga\", \"image_url\": \"\"}\n resp = client.post('/users/new', data=d, follow_redirects=True)\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn(\"Lucas\", html)\n self.assertIn('id=\"users-list\"', html)", "def test_signup_user_valid(self):\n\n user = User.signup('newusername', 'newuser@gmail.com', 'password123', None)\n\n user.id = 7\n db.session.commit()\n\n self.assertIsInstance(user, User)\n self.assertEqual(user.id, 7)\n self.assertEqual(user.username, 'newusername')\n self.assertEqual(user.email, 'newuser@gmail.com')", "def test_user_create(self):\n # Create a user.\n new_user_email = 'nijntje27@hetkonijntje.nl'\n new_user_password = 'password'\n response = self.client.post(self.user_create_api_url, {'email': new_user_email,\n 'password': new_user_password})\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)\n user_id = response.data['id']\n self.assertEqual(len(mail.outbox), 1)\n\n # Logging in before activation shouldn't work. Test this by trying to access the settings page.\n self.client.login(username=new_user_email, password=new_user_password)\n new_user_settings_url = \"{0}{1}\".format(self.user_settings_api_url, user_id)\n response = self.client.get(new_user_settings_url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data)\n\n # Activate the newly created user.\n activation_key = RegistrationProfile.objects.filter(user__email=new_user_email).get().activation_key\n new_user_activation_url = \"{0}{1}\".format(self.user_activation_api_url, activation_key)\n response = self.client.get(new_user_activation_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # User should be auto-logged in after activation and settings should be able to be updated.\n response = self.client.get(new_user_settings_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n self.assertEqual(response.data['email'], new_user_email)\n self.assertFalse(response.data['newsletter'])\n\n # Test that the settings can be updated.\n response = self.client.put(new_user_settings_url, json.dumps({'newsletter': True}), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n self.assertTrue(response.data['newsletter'])\n\n self.client.logout()\n\n # A second activation of a used activation code shouldn't work.\n response = self.client.get(new_user_activation_url)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n # User should not be logged in after second activation attempt.\n new_user_settings_url = \"{0}{1}\".format(self.user_settings_api_url, user_id)\n response = self.client.get(new_user_settings_url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data)\n\n # Test that the email field is required on user create.\n response = self.client.post(self.user_create_api_url, {'password': new_user_password})\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data)\n self.assertEqual(response.data['email'][0], 'This field is required.')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implementing three learning algorithms. backpropagation ("BP") using entire batch of samples stohastic backpropagation ("SBP") learning sample by sample minibatch backpropagation ("MBBP") learning by batches of smaller size (20)
def learning_algortihms(eta, samples, neural_net, choose_algorithm="BP"): if choose_algorithm == "BP": return backpropagation(eta, samples, neural_net) elif choose_algorithm == "SBP": nn = neural_net for sample in samples: nn = backpropagation(eta, sample, nn) return nn elif choose_algorithm == "MBBP": alphas = [] betas = [] gammas = [] deltas = [] epsilons = [] for sample in samples: outputs = sample.outputs.tolist() if (outputs > [1, 0, 0, 0, 0]) - (outputs < [1, 0, 0, 0, 0]) == 0: alphas.append(sample) elif (outputs > [0, 1, 0, 0, 0]) - (outputs < [0, 1, 0, 0, 0]) == 0: betas.append(sample) elif (outputs > [0, 0, 1, 0, 0]) - (outputs < [0, 0, 1, 0, 0]) == 0: gammas.append(sample) elif (outputs > [0, 0, 0, 1, 0]) - (outputs < [0, 0, 0, 1, 0]) == 0: deltas.append(sample) elif (outputs > [0, 0, 0, 0, 1]) - (outputs < [0, 0, 0, 0, 1]) == 0: epsilons.append(sample) nn = neural_net for i in range(0, 10, 2): # select every other i mini_batch = [] mini_batch.extend([alphas[i], alphas[i+1]]) mini_batch.extend([betas[i], betas[i + 1]]) mini_batch.extend([gammas[i], gammas[i + 1]]) mini_batch.extend([deltas[i], deltas[i + 1]]) mini_batch.extend([epsilons[i], epsilons[i + 1]]) mini_batch = numpy.array(mini_batch) nn = backpropagation(eta, mini_batch, nn) return nn
[ "def batch_backprop(self, alpha, lamb, batch_size):\n # init derivated function\n if self.activation_type==1:\n derivative = lambda a: 1-ny.square(a)\n else:\n derivative = lambda a: a*(1.0-a)\n\n # init deltas\n delta_W = []\n delta_b = []\n\n z = self.data_amount if batch_size > self.data_amount else self.data_amount / batch_size\n for k in range(z):\n\n for i in range(self.number_hidden_layers+1):\n delta_W.append(0)\n delta_b.append(0)\n\n for j in range(batch_size):\n\n i = j + batch_size*k\n\n inp = self.inputs[i]\n target = self.targets[i]\n\n self.calc_activation(inp)\n\n target_rightform = ny.matrix( target ).T\n tmp = self.a[-1] - target_rightform\n\n tmp = ny.multiply(tmp, derivative(self.a[-1]))\n\n self.delta = [tmp]\n\n for i in range(self.number_hidden_layers):\n tmp = (ny.dot(self.weights_layer[-1-i].T, self.delta[i]))\n tmp = ny.multiply(tmp, derivative(self.a[-1-1-i]))\n\n self.delta.append(tmp)\n\n for i in range(len(self.weights_layer)):\n delta_W[i] += (ny.dot(self.delta[-1-i], self.a[i].T))\n delta_b[i] += self.delta[-1-i]\n \n\n for i in range(len(self.weights_layer)):\n self.weights_layer[i] -= alpha*(delta_W[i]/self.data_amount + lamb*self.weights_layer[i])\n self.bias[i] -= alpha * delta_b[i] / self.data_amount", "def full_batch_backprop(self, alpha, lamb):\n if self.activation_type==1:\n derivative = lambda a: 1-ny.square(a)\n else:\n derivative = lambda a: a*(1.0-a)\n\n\n delta_W = []\n delta_b = []\n for i in range(self.number_hidden_layers+1):\n delta_W.append(0)\n delta_b.append(0)\n\n for i in range(self.data_amount):\n\n inp = self.inputs[i]\n target = self.targets[i]\n\n self.calc_activation(inp)\n\n target_rightform = ny.matrix( target ).T\n tmp = self.a[-1] - target_rightform\n\n tmp = ny.multiply(tmp, derivative(self.a[-1]))\n\n self.delta = [tmp]\n\n for i in range(self.number_hidden_layers):\n tmp = (ny.dot(self.weights_layer[-1-i].T, self.delta[i]))\n tmp = ny.multiply(tmp, derivative(self.a[-1-1-i]))\n\n self.delta.append(tmp)\n\n for i in range(len(self.weights_layer)):\n delta_W[i] += (ny.dot(self.delta[-1-i], self.a[i].T))\n delta_b[i] += self.delta[-1-i]\n #self.weights_layer[i] -= alpha*(ny.dot(self.delta[-1-i], self.a[i].T))\n\n #for i in range(len(self.bias)):\n #delta_b[i] += self.delta[-1-i]\n #self.bias[i] -= alpha * self.delta[-1-i]\n\n for i in range(len(self.weights_layer)):\n self.weights_layer[i] -= alpha*(delta_W[i]/self.data_amount + lamb*self.weights_layer[i])\n self.bias[i] -= alpha * delta_b[i] / self.data_amount", "def backprop(x, y, biases, weights, cost, num_layers):\n\t# initial zero list for store gradient of biases and weights\n\tnabla_b = [np.zeros(b.shape) for b in biases]\n\tnabla_w = [np.zeros(w.shape) for w in weights]\n\n\t#print(num_layers)\n\n\t#print(x.shape)\n\t#print(weights[0].shape)\n\t#print(biases[0].shape)\n\n\t### Implement here\n\t# feedforward\n\t# Here you need to store all the activations of all the units\n\t# by feedforward pass\n\t###\n\n\th = []\n\th.append(x)\n\n\tfor i in range((num_layers-1)):\n\t\ta = sigmoid(np.dot(weights[i],h[i]) + biases[i])\n\t\th.append(a)\n\n\t#h1 = sigmoid(np.dot(weights[0],x) + biases[0])\n\t#h2 = sigmoid(np.dot(weights[1],h1) + biases[1])\n\n\t# compute the gradient of error respect to output\n\t# activations[-1] is the list of activations of the output layer\n\n\tdelta = (cost).delta(h[-1], y)\n\n\t### Implement here\n\t# backward pass\n\t# Here you need to implement the backward pass to compute the\n\t# gradient for each weight and bias\n\t###\n\n\tnabla_b[-1] = delta*sigmoid_prime(h[-1])\n\tnabla_w[-1] = np.dot(nabla_b[-1],h[-2].transpose())\n\n\t\n\tfor i in range(num_layers-3,-1,-1):\n\t\tnabla_b[i] = np.dot(weights[i+1].transpose(),nabla_b[i+1])*sigmoid_prime(h[i+1])\t\n\t\tnabla_w[i] = np.dot(nabla_b[i],h[i].transpose())\n\t\n\t# for i in range(0,num_layers-1):\n\t# \tnabla_b[-2-i] = np.dot(weights[-2-i+1].transpose(),nabla_b[-2-i+1])*sigmoid_prime(h[-2-i+1])\t\n\t# \tnabla_w[-2-i] = np.dot(nabla_b[-2-i],h[-2-i].transpose())\n\t\n\n\t#nabla_b[0] = np.dot(weights[1].transpose(),nabla_b[1])*sigmoid_prime(h[1])\t\n\t#nabla_w[0] = np.dot(nabla_b[0],h[0].transpose())\n\t#nabla_b[0] = sigmoid_prime(h1)\n\n\n\t#print(weights[1].shape)\n\t#print(nabla_w[0].shape)\n\t#print(nabla_w[1].shape)\t\n\n\t#print(np.dot(nabla_b[1],h1.transpose()).shape)\n\t#print(np.dot(nabla_b[0],x.transpose()).shape)\n\t \n\t\n\n\treturn (nabla_b, nabla_w)", "def train_bw_model(self, update):\n obs, actions, _, obs_next, weights, idxes = self.sample_batch(self.args.k_states)\n batch_size = min(self.args.k_states, len(self.buffer))\n if obs is not None and obs_next is not None:\n # need to get the masks\n # get basic information of network..\n obs = torch.tensor(obs, dtype=torch.float32)\n obs_next = torch.tensor(obs_next, dtype=torch.float32)\n actions = torch.tensor(actions, dtype=torch.int64).unsqueeze(1)\n if self.args.per_weight:\n weights = torch.tensor(weights, dtype=torch.float32).unsqueeze(1)\n max_nlogp = torch.tensor(np.ones((len(idxes), 1)) * self.args.max_nlogp, dtype=torch.float32)\n if self.args.cuda:\n obs = obs.cuda()\n obs_next = obs_next.cuda()\n actions = actions.cuda()\n if self.args.per_weight:\n weights = weights.cuda()\n max_nlogp = max_nlogp.cuda()\n pi = self.bw_actgen(obs_next)\n mu = self.bw_stategen(obs_next, self.indexes_to_one_hot(actions))\n\n if self.args.per_weight:\n # Losses with weightings and entropy regularization\n action_log_probs, dist_entropy = evaluate_actions_sil(pi, actions)\n action_log_probs = -action_log_probs\n clipped_nlogp = torch.min(action_log_probs, max_nlogp)\n action_loss = torch.mean(weights * clipped_nlogp)\n entropy_reg = torch.sum(weights*dist_entropy) / batch_size\n loss_actgen = action_loss - entropy_reg * self.args.entropy_coef\n square_error = ((obs - obs_next - mu)**2).view(batch_size , -1)\n loss_stategen = torch.mean(torch.mean((square_error),1)*weights)\n else:\n # Naive losses without weighting\n criterion1 = torch.nn.NLLLoss()\n criterion2 = nn.MSELoss()\n loss_actgen = criterion1(torch.log(pi), actions.squeeze(1))\n loss_stategen = criterion2(obs-obs_next, mu)\n\n total_loss = loss_actgen + self.args.state_coef*loss_stategen\n self.bw_optimizer.zero_grad()\n total_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.bw_params, self.args.max_grad_norm)\n self.bw_optimizer.step()\n\n #Now updating the priorities in the PER Buffer. Use Net Value estimates\n with torch.no_grad():\n value, _ = self.network(obs_next)\n value = torch.clamp(value, min=0)\n self.buffer.update_priorities(idxes, value.squeeze(1).cpu().numpy())\n return loss_actgen, self.args.state_coef*loss_stategen\n else:\n return None, None", "def backprop(self, n, activations, zs, train_y, learning_rate,\n regilarisation, inercia):\n m = train_y.shape[1]\n # m is the size of mini-batch training set\n\n # Collecting dW and dB\n dW = [0] * (len(self.layer_sizes) - 1)\n dB = [0] * (len(self.layer_sizes) - 1)\n # iterating over the layers, where the layers are indexed by j and k.\n # the derivative is a bit differrent for the last layer, so calculating it outside of the loop\n dAk = None\n dZk = activations[-1] - train_y # cross-entropy cost derivative\n for k, Zk, Ak, Aj in reversed(list(zip(\n itertools.count(), zs, activations[1:], activations[:-1]\n ))):\n if dAk is not None: # not the first loop\n dZk = dAk * sigmoid_prime(Zk)\n\n dW[k] = np.dot(dZk, Aj.T) / m # (k_layer_size, m) x (j_layer_size, m).T -> (k_ls, j_ls)\n dB[k] = np.sum(dZk, axis=1, keepdims=1) / m # (k_layer_size, 1)\n\n dAj = np.dot(self.weights[k].T, dZk) # (j_ls, k_ls) x (k_ls, m) -> (j_ls, m)\n # moving to the next layer\n dAk = dAj\n\n # Updating weights and biases\n for k, Mk, Wk, Bk, dWk, dBk, in reversed(list(zip(\n itertools.count(), self.momentums, self.weights, self.biases, dW, dB\n ))):\n # for Vk, Wk, Bk, dWk, dBk in zip(self.velocities, self.weights, self.biases, dW, dB):\n # self.velocities[k] = friction * self.velocities[k] - learning_rate * dW[k]\n # self.weights[k] += self.velocities[k]\n # self.weights[k] -= learning_rate * (regul_param / n * self.weights[k]) # regularization\n # Vk = friction * Vk - learning_rate * dWk\n Mk = Mk * inercia - learning_rate * dWk # calculating new momentum at the moment\n Wk += Mk # updating the weight with the momentum\n Wk -= learning_rate * (regilarisation / n * self.weights[k]) # applying regularization\n Bk -= learning_rate * dBk # updating the biases - simply, without momentum or regularization\n self.momentums[k] = Mk\n self.weights[k] = Wk\n self.biases[k] = Bk", "def belief_propagation(self,input):\n ## PUT CODE HERE ##\n \n # Alpha/beta table will be a numpy 2D array, each row is a sequence,\n # and each column represent the class in each position of sequence\n \n ## LOG-ALPHA\n self.alpha = np.zeros((0,0))\n # Create the first row, with 0 column\n self.alpha = np.vstack((self.alpha, [[]]))\n # Add #number_of_classes columns\n self.alpha = np.hstack((self.alpha, np.zeros([1, self.n_classes])))\n \n # Initialize for alpha1\n for y2 in range(self.n_classes) :\n tmp = self.unary_log_factors_0[0] + self.lateral_weights[:,y2]\n# self.alpha[0][y2] += np.max(tmp) + np.log(np.sum(np.exp(tmp - np.max(tmp))))\n self.alpha[0][y2] += logsumexp(tmp)\n \n alpha_tmp = np.zeros([1, self.n_classes])\n for k in range(1, input.shape[0]-1):\n for y2 in range(self.n_classes):\n tmp = self.unary_log_factors_0[k] + self.lateral_weights[:,y2] + self.alpha[k-1]\n# alpha_tmp[0][y2] = np.max(tmp) + np.log(np.sum(np.exp(tmp - np.max(tmp))))\n alpha_tmp[0][y2] = logsumexp(tmp)\n \n self.alpha = np.vstack((self.alpha, alpha_tmp))\n \n \n ## LOG-BETA\n self.beta = np.zeros((0,0))\n # Create the first row, with 0 column\n self.beta = np.vstack((self.beta, [[]]))\n # Add #number_of_classes columns\n self.beta = np.hstack((self.beta, np.zeros([1, self.n_classes])))\n \n # Initialize for beta1\n for y1 in range(self.n_classes):\n tmp = self.unary_log_factors_0[-1] + self.lateral_weights[y1,:]\n# self.beta[0][y1] += np.max(tmp) + np.log(np.sum(np.exp(tmp - np.max(tmp))))\n self.beta[0][y1] += logsumexp(tmp)\n \n beta_tmp = np.zeros((1, self.n_classes)) \n for k in range(input.shape[0]-2, 0, -1):\n for y1 in range(self.n_classes):\n # because beta table is insert from the bottom up, so referring \n # to the previous one is simply the first one up to now\n tmp = self.unary_log_factors_0[k] + self.lateral_weights[y1,:] + self.beta[0]\n# beta_tmp[0][y1] = np.max(tmp) + np.log(np.sum(np.exp(tmp - np.max(tmp)))) \n beta_tmp[0][y1] = logsumexp(tmp)\n \n self.beta = np.vstack((beta_tmp, self.beta))\n \n ## PARTITION FUNCTION LOG-Z(X)\n tmp = self.unary_log_factors_0[-1] + self.alpha[-1]\n# self.Z_alpha = np.exp(np.max(tmp) + np.log(np.sum(np.exp(tmp - np.max(tmp)))))\n self.Z_alpha = logsumexp(tmp)\n \n tmp = self.unary_log_factors_0[0] + self.beta[0]\n# self.Z_beta = np.exp(np.max(tmp) + np.log(np.sum(np.exp(tmp - np.max(tmp)))))\n self.Z_beta = logsumexp(tmp)", "def multi_bw(init, y, X, n, k, family, tol, max_iter, rss_score, gwr_func,\n bw_func, sel_func, multi_bw_min, multi_bw_max, bws_same_times,\n verbose=False):\n if init is None:\n bw = sel_func(bw_func(y, X))\n optim_model = gwr_func(y, X, bw)\n else:\n bw = init\n optim_model = gwr_func(y, X, init)\n bw_gwr = bw\n err = optim_model.resid_response.reshape((-1, 1))\n param = optim_model.params\n\n XB = np.multiply(param, X)\n if rss_score:\n rss = np.sum((err)**2)\n iters = 0\n scores = []\n delta = 1e6\n BWs = []\n bw_stable_counter = 0\n bws = np.empty(k)\n gwr_sel_hist = []\n\n try:\n from tqdm.auto import tqdm #if they have it, let users have a progress bar\n except ImportError:\n\n def tqdm(x, desc=''): #otherwise, just passthrough the range\n return x\n\n for iters in tqdm(range(1, max_iter + 1), desc='Backfitting'):\n new_XB = np.zeros_like(X)\n params = np.zeros_like(X)\n\n for j in range(k):\n temp_y = XB[:, j].reshape((-1, 1))\n temp_y = temp_y + err\n temp_X = X[:, j].reshape((-1, 1))\n bw_class = bw_func(temp_y, temp_X)\n\n if bw_stable_counter >= bws_same_times:\n #If in backfitting, all bws not changing in bws_same_times (default 5) iterations\n bw = bws[j]\n else:\n bw = sel_func(bw_class, multi_bw_min[j], multi_bw_max[j])\n gwr_sel_hist.append(deepcopy(bw_class.sel_hist))\n\n optim_model = gwr_func(temp_y, temp_X, bw)\n err = optim_model.resid_response.reshape((-1, 1))\n param = optim_model.params.reshape((-1, ))\n new_XB[:, j] = optim_model.predy.reshape(-1)\n params[:, j] = param\n bws[j] = bw\n \n #If bws remain the same as from previous iteration\n if (iters > 1) and np.all(BWs[-1] == bws):\n bw_stable_counter += 1\n else:\n bw_stable_counter = 0\n \n num = np.sum((new_XB - XB)**2) / n\n den = np.sum(np.sum(new_XB, axis=1)**2)\n score = (num / den)**0.5\n XB = new_XB\n\n if rss_score:\n predy = np.sum(np.multiply(params, X), axis=1).reshape((-1, 1))\n new_rss = np.sum((y - predy)**2)\n score = np.abs((new_rss - rss) / new_rss)\n rss = new_rss\n scores.append(deepcopy(score))\n delta = score\n BWs.append(deepcopy(bws))\n\n if verbose:\n print(\"Current iteration:\", iters, \",SOC:\", np.round(score, 7))\n print(\"Bandwidths:\", ', '.join([str(bw) for bw in bws]))\n\n if delta < tol:\n break\n\n opt_bws = BWs[-1]\n return (opt_bws, np.array(BWs), np.array(scores), params, err, gwr_sel_hist, bw_gwr)", "def train(self):\r\n hidden_size, output_size, num_epochs = self.params[\"h_size\"], \\\r\n self.params[\"o_size\"], self.params[\"num_epochs\"]\r\n \r\n # initialize weights to small random numbers, biases to 0\r\n w1 = np.random.randn(hidden_size, self.X.shape[1])\r\n b1 = np.zeros((hidden_size, 1))\r\n w2 = np.random.randn(output_size, hidden_size)\r\n b2 = np.zeros((output_size, 1))\r\n \r\n for i in range(0, num_epochs):\r\n # do a backprop update\r\n cost, w1, b1, w2, b2 = self.backprop(w1, b1, w2, b2)\r\n \r\n # epoch check and print current cost\r\n if (i % 1 == 0):\r\n print(\"Epoch \", i, \"cost: \", cost)\r\n \r\n self.model = { 'W1': w1, 'b1': b1, 'W2': w2, 'b2': b2}", "def _propagate_bayesian(self, batch_images):\n\t\traise NotImplementedError('bayesian method out of date since compare_output')\n\t\t# #reset activity (important for cases in which no noise is added)\n\t\t# self.hid_neurons_greedy = None\n\t\t# self.hid_neurons_explore = None\n\n\t\t# #compute activation of hidden neurons\n\t\t# hid_activ = ex.propagate_layerwise(batch_images, self.hid_W, SM=False, log_weights=self.log_weights)\n\t\t\n\t\t# #add noise to activation of hidden neurons (exploration)\n\t\t# if self.exploration and self._e >= self.n_epi_crit + self.n_epi_fine:\n\t\t# \tself.hid_neurons_explore = hid_activ + np.random.normal(0, np.std(hid_activ)*self.noise_xplr_hid, np.shape(hid_activ))\n\t\t# \tself.hid_neurons_explore = ex.softmax(self.hid_neurons_explore, t=self.t_hid)\n\n\t\t# #softmax hidden neurons\n\t\t# self.hid_neurons_greedy = ex.softmax(hid_activ, t=self.t_hid)\n\t\t\n\t\t# #set activation values for neurons when no exploration\n\t\t# if self.hid_neurons_explore is None: self.hid_neurons_explore = np.copy(self.hid_neurons_greedy)\n\n\t\t# #compute posteriors of the bayesian decoder in greedy and explorative cases\n\t\t# if self._e >= self.n_epi_crit + self.n_epi_fine:\n\t\t# \tposterior_greedy = bc.bayesian_decoder(self.hid_neurons_greedy, self._pdf_marginals, self._pdf_evidence, self._pdf_labels, self.pdf_method)\n\t\t# \tgreedy = self.classes[np.argmax(posterior_greedy,1)]\n\t\t\t\n\t\t# \tposterior_explore = bc.bayesian_decoder(self.hid_neurons_explore, self._pdf_marginals, self._pdf_evidence, self._pdf_labels, self.pdf_method)\n\t\t# \texplore = self.classes[np.argmax(posterior_explore,1)]\n\t\t# else:\n\t\t# \tposterior_greedy = None\n\t\t# \tgreedy = None\n\t\t# \texplore = None\t\t\n\n\t\t# return greedy, explore, None, posterior_greedy", "def construct_model_for_backpropagation(self):\n\n tf.reset_default_graph() # Reset the graph\n X = tf.placeholder(shape=(self.X_train.shape[0], self.X_train.shape[1]), dtype=tf.float64, name='X')\n y = tf.placeholder(shape=(self.X_train.shape[0], 1), dtype=tf.float64, name='y')\n self.define_weights_for_backpropagation() # Initialize the weights\n a_h = tf.sigmoid(tf.matmul(X, self.w01))\n a_o = tf.sigmoid(tf.matmul(a_h, self.w12))\n loss = tf.reduce_mean(tf.square(a_o - y))\n optimizer = tf.train.GradientDescentOptimizer(self.backpropagation_eta)\n train = optimizer.minimize(loss)\n sess = tf.Session() # Initialize the session\n sess.run(tf.global_variables_initializer()) # Initialize global variables\n\n for epoch in range(self.backpropagation_epochs): # Run for some epcohs\n sess.run(train, feed_dict={X: self.X_train, y: self.y_train})\n self.error_list.append(sess.run(loss, feed_dict={X: self.X_train, y: self.y_train}))\n self.weights01 = sess.run(self.w01) # Update the weights\n self.weights12 = sess.run(self.w12) # Update the weights\n\n print(\"Loss with \", self.hidden_nodes, \" hidden nodes and \", self.backpropagation_epochs, \" epochs = \",\n self.error_list[-1])\n sess.close()", "def MiniBatchGD(network, X_train, Y_train, lossfunction, batch_size,\n learning_rate, regularizer, accelerator):\n prev_loss = 0\n while True:\n permut = np.random.permutation(len(Y_train))\n X_train = X_train[permut]\n Y_train = Y_train[permut]\n\n for batch_num in range(len(Y_train) // batch_size):\n start_idx = batch_num * batch_size\n end_idx = min(len(Y_train), batch_num * batch_size + batch_size)\n\n Wgrad = []\n for idx in range(len(network.Wgrad)):\n Wgrad.append(np.zeros(np.shape(network.Wgrad[idx])))\n bias_grad = np.zeros(np.shape(network.bias_grad))\n\n for idx in range(start_idx, end_idx):\n\n network.clear_outputs()\n ypred = network.forward(X_train[idx, None])\n op_gradient = lossfunction.gradient(ypred, Y_train[idx, None])\n network.backward(op_gradient, regularizer)\n\n bias_grad += network.bias_grad\n for idx in range(len(Wgrad)):\n Wgrad[idx] += network.Wgrad[idx]\n\n if accelerator != None:\n weights_update, bias_update = accelerator.calc_update(learning_rate, Wgrad, bias_grad)\n network.update(weights_update = weights_update, bias_update = bias_update)\n else:\n network.update(learning_rate = learning_rate)\n\n total_loss = 0\n network.clear_outputs()\n for idx, x in enumerate(X_train):\n ypred = network.forward(x)\n total_loss += lossfunction.calc_loss(ypred, Y_train[idx, None])\n\n if regularizer != None:\n total_loss += regularizer.calc_loss(network.W)\n\n if abs(prev_loss - total_loss) < 0.01:\n break # stopping condition\n\n elif prev_loss != 0 and total_loss > 3 * prev_loss:\n print('Exploding cost')\n break\n\n print(total_loss)\n prev_loss = total_loss", "def backward_propagation(self, batch_loss, alpha = 0.001, eta = 0.4):\n\t\tif self.optimizer == 'normal':\n\t\t\t## Calculating E(t)\n\t\t\t# E_t_4 = np.multiply(-(self.y - self.output) * (self.output), self.layer3) ##For weights 4\n\t\t\tE_t_3 = np.multiply(-(self.y - self.output) * (self.output), self.layer2) ##For Weights 3\n\t\t\tE_t_2 = np.multiply(-(self.y - self.output) * (self.output), self.layer1) ##For Weights 2\n\t\t\tE_t_1 = np.multiply(-(self.y - self.output) * (self.output), self.input) ##For Weights 1\n\n\t\t\t##Compute d_weights(t)\n\t\t\t# d_weights_t_4 = np.add((-eta * E_t_4), np.multiply(alpha,self.d_weights_prev_t_4))\n\t\t\td_weights_t_3 = np.add((-eta * E_t_3), np.multiply(alpha,self.d_weights_prev_t_3))\n\t\t\td_weights_t_2 =\tnp.add((-eta * E_t_2), np.multiply(alpha,self.d_weights_prev_t_2))\n\t\t\td_weights_t_1 = np.add((-eta * E_t_1), np.multiply(alpha,self.d_weights_prev_t_1))\n\n\t\t\t##Update the Weights using the derived formula\n\t\t\t# self.weights4 = np.add(self.weights4, d_weights_t_4)\n\t\t\tself.weights3 = self.weights3 + d_weights_t_3\n\t\t\tself.weights2 = self.weights2 + d_weights_t_2\n\t\t\tself.weights1 = self.weights1 + d_weights_t_1\n\n\t\t\t# self.weights1 = MinMaxFuzzy.normalise(self.weights1)\n\t\t\t# self.weights2 = MinMaxFuzzy.normalise(self.weights2)\n\t\t\t# self.weights3 = MinMaxFuzzy.normalise(self.weights3)\n\n\t\t\t## Change the Values of d_weight(t-1)\n\t\t\t# d_weights_prev_t_4 = d_weights_t_4\n\t\t\td_weights_prev_t_3 = d_weights_t_3\n\t\t\td_weights_prev_t_2 = d_weights_t_2\n\t\t\td_weights_prev_t_1 = d_weights_t_1\n\n\t\telif self.optimizer == 'sgd':\n\n\t\t\td_weights_t_3 = 2 * (batch_loss) * self.layer2\n\t\t\td_weights_t_2 = 2 * (batch_loss) * np.dot(self.weights3.T,self.layer1)\n\t\t\td_weights_t_1 = 2 * (batch_loss) * np.dot(np.dot(self.weights3,self.weights2).T,self.input)\n\n\t\t\t## Update the Weights\n\t\t\t# self.weights4 = self.weights4 - (alpha * d_weights_t_4)\n\t\t\tself.weights3 = np.subtract(self.weights3,(alpha * d_weights_t_3))\n\t\t\tself.weights2 = np.subtract(self.weights2,(alpha * d_weights_t_2))\n\t\t\tself.weights1 = np.subtract(self.weights1,(alpha * d_weights_t_1))", "def train(self, alpha=0.5):\n for i in range(self.data_amount):\n self.backprop(self.inputs[i],self.targets[i],alpha)", "def check_reweighting_by_ML_gb(original, target, original_weights, target_weights=None, n_iterations=1):\n if target_weights is None:\n target_weights = numpy.ones(target.shape[0])\n \n aucs = []\n \n data = numpy.concatenate([original, target])\n labels = numpy.array([0] * original.shape[0] + [1] * target.shape[0])\n W = numpy.concatenate([original_weights, target_weights])\n \n for _ in range(n_iterations):\n Xtr, Xts, Ytr, Yts, Wtr, Wts = train_test_split(data, labels, W, train_size=0.51)\n \n original_w = Wtr[Ytr == 0] \n target_w = Wtr[Ytr == 1] \n\n original_w /= numpy.sum(original_w)\n target_w /= numpy.sum(target_w)\n\n original_tr = Xtr[Ytr == 0]\n target_tr = Xtr[Ytr == 1]\n \n # put target events with negative weights into original samples with -weights\n data_neg = target_tr[target_w < 0]\n weights_neg = -target_w[target_w < 0]\n original_tr = numpy.concatenate((original_tr, data_neg))\n original_w = numpy.concatenate((original_w, weights_neg))\n target_tr = target_tr[target_w >= 0]\n target_w = target_w[target_w >= 0]\n \n Xtr = numpy.concatenate([original_tr, target_tr])\n Ytr = numpy.array([0] * original_tr.shape[0] + [1] * target_tr.shape[0])\n Wtr = numpy.concatenate([original_w, target_w])\n \n clf = GradientBoostingClassifier(n_estimators=200, subsample=0.5, \n min_samples_leaf=100, learning_rate=0.1, \n max_depth=6).fit(Xtr, Ytr, sample_weight=Wtr)\n proba = clf.predict_proba(Xts)[:, 1]\n aucs.append(roc_auc_score(Yts, proba, sample_weight=Wts))\n \n fpr, tpr, _ = roc_curve(Yts, proba, sample_weight=Wts)\n return fpr, tpr, numpy.mean(aucs), numpy.std(aucs)", "def train(self, inputs, targets, learning_rate, n_batches, batch_size):\n \n if len(inputs) != len(targets):\n return ValueError(\"Input and target vectors should have same length.\")\n \n start = time.time()\n print (\"training dummynet using SGD with %d batches of %d elements each.\"%(n_batches, batch_size))\n \n # convert everything to numpy arrays\n inputs = [colvec(x).astype(dummylayer.dtype) for x in inputs]\n targets = [asarray(t, dtype=dummylayer.dtype) for t in targets]\n \n # loop over the mini batches, save total cost for each batch\n costs = []\n training_set = list(zip(inputs, targets))\n for ib in tqdm.tqdm(range(n_batches)):\n \n # allocate space for average of parameters derivatives\n sum_dw, sum_db = [], []\n for lyr in self.layers:\n sum_dw.append(np.zeros(lyr.w.shape, dtype=dummylayer.dtype))\n sum_db.append(np.zeros(lyr.b.shape, dtype=dummylayer.dtype))\n \n # select a random mini-batch TODO: use np random\n batch = random.sample(training_set, batch_size)\n inputs_batch, targets_batch = zip(*batch)\n \n # now derive gradients for each input/label pair, derive cost function \n # wrt all the weigths in the net and sum together parameter gradients\n cost_buff = np.zeros(len(inputs_batch))\n for it in range(len(inputs_batch)):\n \n # forward pass (also compute cost and save it) and backward\n cost_buff[it] = self.cost(inputs_batch[it], targets_batch[it])\n self.backprop(inputs_batch[it], targets_batch[it])\n \n # sum up the gradients\n for il, lyr in enumerate(self.layers):\n sum_dw[il] += lyr.dw\n sum_db[il] += lyr.db\n \n # update parameters using average from batch\n for il, lyr in enumerate(self.layers):\n lyr.w = lyr.w - learning_rate*sum_dw[il]/batch_size\n lyr.b = lyr.b - learning_rate*sum_db[il]/batch_size\n \n # compute total cost for this batch\n costs.append(np.sum(cost_buff))\n end = time.time()\n print (\"dummynet has been trained! took %.2e sec\"%(end-start))\n return costs", "def My_Bootstrap(self):\n\ts = len(self.X_test) #200\n t = len(self.X_training) #400\n\tr = np.size(self.X_test,1) #1600\n # Ordinary Least Square method\n if self.method == 'OLS':\n m = np.zeros((self.B,s))\n c = np.zeros((self.B,r))\n for i in range(self.B):\n index = randint(0, t, t)\n X_resample = self.X_training[index]\n z_resample = self.z[index]\n lr = My_Linear_Regression(X_resample, self.X_test, z_resample, self.lambda_)\n lr.My_OLS()\n z_predict = lr.My_Predict(self.X_test, False)\n\t\tcoeff = lr.My_Beta()\n m[i,:] = z_predict\n c[i,:] = coeff\n\n # Ridge regression\n elif self.method == 'Ridge':\n m = np.zeros((self.B,s))\n c = np.zeros((self.B,r))\t\n for i in range(self.B):\n index = randint(0, t, t)\n X_resample = self.X_training[index]\n z_resample = self.z[index]\n lr = My_Linear_Regression(X_resample, self.X_test, z_resample, self.lambda_)\n lr.My_Ridge()\n z_predict = lr.My_Predict(self.X_test, False)\n\t\tcoeff = lr.My_Beta()\n m[i,:] = z_predict\n c[i,:] = coeff\n \n #Lasso regression\n elif self.method == 'Lasso':\n m = np.zeros((self.B,s))\n c = np.zeros((self.B,r))\t\n for i in range(self.B):\n index = randint(0, t, t)\n X_resample = self.X_training[index]\n z_resample = self.z[index]\n lr = My_Linear_Regression(X_resample, self.X_test, z_resample, self.lambda_)\n lr.My_Lasso()\n z_predict = lr.My_Predict(self.X_test, True)\n\t\tcoeff = lr.My_Beta()\n m[i,:] = z_predict\n c[i,:] = coeff\n \n else:\n print('You have forgotten to select method; OLS, Ridge or Lasso.')\n\n return m, c", "def learnModel(n_users, m_items, regU, regI, regJ,\n learningRate, R, features, epochs, numberOfIterations, lossF, dlossF,\n W = None, H = None):\n\n \"\"\"Learning rate is constant.\"\"\"\n MIN_SCALING_FACTOR = 1E-5\n y = 1.0\n np.random.seed(1234567890)\n # loss = logLoss(0, 0)\n\n sigma = 0.1\n mu = 0\n # Random initialization of W and H between mean=0 ; sigma=0.1\n if W == None:\n W = sigma * np.random.randn(n_users + 1, features) + mu\n if H == None:\n H = sigma * np.random.randn(m_items + 1, features) + mu\n\n printDelay = int(0.01 * numberOfIterations)\n sum_loss = 0.0\n y = 1.0\n\n scaling_factorU = 1.0 - (learningRate * regU)\n scaling_factorI = 1.0 - (learningRate * regI)\n scaling_factorJ = 1.0 - (learningRate * regJ)\n\n if scaling_factorU < MIN_SCALING_FACTOR:\n scaling_factorU = MIN_SCALING_FACTOR\n if scaling_factorI < MIN_SCALING_FACTOR:\n scaling_factorI = MIN_SCALING_FACTOR\n if scaling_factorJ < MIN_SCALING_FACTOR:\n scaling_factorJ = MIN_SCALING_FACTOR\n\n for e in xrange(0, epochs):\n iter = 0\n t = 0\n while iter <= numberOfIterations:\n iter += 1\n\n u = random.choice(R.keys())\n\n # if not R.has_key(u):\n # continue\n if len(R[u]) == 0:\n continue\n userItems = [x[0] for x in R[u]]\n # the positive example\n i = userItems[np.random.random_integers(0, len(userItems) - 1)]\n # the negative example\n j = np.random.random_integers(0, m_items)\n # if j is also relevant for u we continue\n # we need to see a negative example to contrast the positive one\n while j in userItems:\n j = np.random.random_integers(0, m_items)\n\n X = H[i] - H[j]\n # rank labels\n # positive label :\n # yi = 1.0\n # negative label :\n # yj = 0.0\n # this is equivalent to the sign(yi - yj)\n # y = 1.0 if (yi > yj) else -1.0 if (yi < yj) else 0.0\n # since in this case the positive example is always yi, then y =\n # 1.0\n wx = np.dot(W[u], X)\n dloss = dlossF(wx, y)\n\n sum_loss += lossF(wx, y)\n\n # temp\n wu = W[u]\n hi = H[i]\n hj = H[j]\n\n if dloss != 0.0:\n # Updates\n eta_dloss = learningRate * dloss\n W[u] += eta_dloss * (hi - hj)\n H[i] += eta_dloss * wu\n H[j] += eta_dloss * (-wu)\n\n W[u] *= scaling_factorU\n H[i] *= scaling_factorI\n H[j] *= scaling_factorJ\n\n t += 1 # increment the iteration\n if t % printDelay == 0:\n print(\"Epoch: %i/%i | iteration %i/%i | learning rate=%f\"\n \" | average_loss for the last %i iterations = %f\" %\n (e + 1, epochs, t, numberOfIterations, learningRate,\n printDelay, sum_loss / printDelay))\n sum_loss = 0.0\n\n return W, H", "def SGD(network, X_train, Y_train, lossfunction, batch_size,\n learning_rate, regularizer, accelerator):\n prev_loss = 0\n while True:\n permut = np.random.permutation(len(Y_train))\n X_train = X_train[permut]\n Y_train = Y_train[permut]\n for idx, x in enumerate(X_train):\n\n network.clear_outputs()\n ypred = network.forward(x)\n op_gradient = lossfunction.gradient(ypred, Y_train[idx, None])\n network.backward(op_gradient, regularizer) # gradients are available\n\n if accelerator != None:\n weights_update, bias_update = accelerator.calc_update(learning_rate, network.Wgrad, network.bias_grad)\n network.update(weights_update = weights_update, bias_update = bias_update)\n else:\n network.update(learning_rate)\n\n total_loss = 0\n network.clear_outputs()\n for idx, x in enumerate(X_train):\n ypred = network.forward(x)\n total_loss += lossfunction.calc_loss(ypred, Y_train[idx, None])\n\n if regularizer != None:\n total_loss += regularizer.calc_loss(network.W)\n\n if abs(prev_loss - total_loss) < 0.01:\n break # stopping condition\n\n elif prev_loss != 0 and total_loss > 3 * prev_loss:\n print(prev_loss, total_loss)\n print('Exploding cost')\n break\n\n print(total_loss)\n prev_loss = total_loss", "def pretrain(self, X=None, batch_size=10, pcdk=20, NS=20 ,maxiter=100, learn_rate_a=0.01, learn_rate_b=0.01, learn_rate_W=0.01, change_rate=0.8, adjust_change_rate_at=None, adjust_coef=1.02, change_every_many_iters=10, init_chain_time=100, train_subset_size_for_compute_error=100, valid_subset_size_for_compute_error=100, track_reconstruct_error=True, track_free_energy=True, reinit_a_use_data_stat=False, if_plot_error_free_energy=False, dir_save=\"./\", prefix=\"RBM\", figwidth=5, figheight=3):\n start_time=time.clock()\n # different layers can have different learning rates \n if numpy.isscalar(learn_rate_b):\n learn_rate_b=[learn_rate_b]*self.NK\n if numpy.isscalar(learn_rate_W):\n learn_rate_W=[learn_rate_W]*self.NK \n \n self.X=X\n rbm_X=self.X\n visible_type=self.visible_type\n #self.rbms=[] # define it in initialization\n self.H_pretrain=[]\n print(\"Start pretraining DBM...\")\n for nk in range(self.NK):\n print(\"the {0}-th hidden layer...\".format(nk+1))\n if nk==0 and self.NK>1: # bottom RBM\n tie_W_for_pretraining_DBM_bottom=True\n tie_W_for_pretraining_DBM_top=False\n rbm_visible_length=self.M\n rbm_hidden_length=self.K[nk]\n visible_type=self.visible_type\n visible_type_fixed_param=self.visible_type_fixed_param\n hidden_type=self.hidden_type[nk]\n hidden_type_fixed_param=self.hidden_type_fixed_param[nk]\n rbm_if_fix_vis_bias=self.if_fix_vis_bias\n a=self.a\n rbm_fix_a_log_ind=self.fix_a_log_ind\n rbm_track_reconstruct_error=track_reconstruct_error\n rbm_track_free_energy=track_free_energy\n rbm_reinit_a_use_data_stat=reinit_a_use_data_stat\n rbm_learn_rate_a=learn_rate_a\n elif nk==self.NK-1 and self.NK>1: # top RBM\n tie_W_for_pretraining_DBM_bottom=False\n tie_W_for_pretraining_DBM_top=True\n rbm_visible_length=self.K[nk-1]\n rbm_hidden_length=self.K[nk]\n visible_type=self.hidden_type[nk-1]\n visible_type_fixed_param=self.hidden_type_fixed_param[nk-1]\n hidden_type=self.hidden_type[nk]\n hidden_type_fixed_param=self.hidden_type_fixed_param[nk]\n rbm_if_fix_vis_bias=True\n rbm_fix_a_log_ind=None\n rbm_track_reconstruct_error=track_reconstruct_error\n rbm_track_free_energy=track_free_energy\n rbm_reinit_a_use_data_stat=False\n rbm_learn_rate_a=learn_rate_b[nk-1]\n elif nk==0 and self.NK==1: # there is only one hidden layer\n tie_W_for_pretraining_DBM_bottom=False\n tie_W_for_pretraining_DBM_top=False\n rbm_visible_length=self.M\n rbm_hidden_length=self.K[nk]\n visible_type=self.visible_type\n visible_type_fixed_param=self.visible_type_fixed_param\n hidden_type=self.hidden_type[nk]\n hidden_type_fixed_param=self.hidden_type_fixed_param[nk]\n rbm_if_fix_vis_bias=self.if_fix_vis_bias\n a=self.a\n rbm_fix_a_log_ind=self.fix_a_log_ind\n rbm_track_reconstruct_error=track_reconstruct_error\n rbm_track_free_energy=track_free_energy\n rbm_reinit_a_use_data_stat=reinit_a_use_data_stat\n rbm_learn_rate_a=learn_rate_a\n else: # middle RBMs\n tie_W_for_pretraining_DBM_bottom=False\n tie_W_for_pretraining_DBM_top=False\n rbm_visible_length=self.K[nk-1]\n rbm_hidden_length=self.K[nk]\n visible_type=self.hidden_type[nk-1]\n visible_type_fixed_param=self.hidden_type_fixed_param[nk-1]\n hidden_type=self.hidden_type[nk]\n hidden_type_fixed_param=self.hidden_type_fixed_param[nk]\n rbm_if_fix_vis_bias=True\n rbm_fix_a_log_ind=None\n rbm_track_reconstruct_error=track_reconstruct_error\n rbm_track_free_energy=track_free_energy\n rbm_reinit_a_use_data_stat=False\n rbm_learn_rate_a=learn_rate_b[nk-1]\n # initialize RBM\n rbm_model=restricted_boltzmann_machine.restricted_boltzmann_machine(M=rbm_visible_length, K=rbm_hidden_length, visible_type=visible_type, visible_type_fixed_param=visible_type_fixed_param, hidden_type=hidden_type, hidden_type_fixed_param=hidden_type_fixed_param, tie_W_for_pretraining_DBM_bottom=tie_W_for_pretraining_DBM_bottom, tie_W_for_pretraining_DBM_top=tie_W_for_pretraining_DBM_top, if_fix_vis_bias=rbm_if_fix_vis_bias, a=a, fix_a_log_ind=rbm_fix_a_log_ind, tol_poisson_max=self.tol_poisson_max, rng=self.rng)\n # train RBM\n #print \"The shape of rbm_X is{0}\".format(rbm_X.shape)\n rbm_model.train(X=rbm_X, batch_size=batch_size, pcdk=pcdk, NS=NS, maxiter=maxiter, learn_rate_a=rbm_learn_rate_a, learn_rate_b=learn_rate_b[nk], learn_rate_W=learn_rate_W[nk], change_rate=change_rate, adjust_change_rate_at=adjust_change_rate_at, adjust_coef=adjust_coef, change_every_many_iters=change_every_many_iters, init_chain_time=init_chain_time, train_subset_size_for_compute_error=train_subset_size_for_compute_error, valid_subset_size_for_compute_error=valid_subset_size_for_compute_error, track_reconstruct_error=rbm_track_reconstruct_error, track_free_energy=rbm_track_free_energy, reinit_a_use_data_stat=rbm_reinit_a_use_data_stat, if_plot_error_free_energy=if_plot_error_free_energy, dir_save=dir_save, prefix=prefix+\"_RBM_\"+str(nk), figwidth=figwidth, figheight=figheight)\n # assign parameters to corresponding layers\n if nk==0 and self.NK>1: # bottom RBM\n a_nk,b_nk,W_nk=rbm_model.get_param()\n self.a=a_nk\n self.W[nk]=W_nk\n self.b[nk]=b_nk\n elif nk==self.NK-1 and self.NK>1: # top RBM\n a_nk,b_nk,W_nk=rbm_model.get_param()\n self.W[nk]=W_nk\n self.b[nk]=b_nk\n elif nk==0 and self.NK==1: # there is only one hidden layer\n a_nk,b_nk,W_nk=rbm_model.get_param()\n self.a=a_nk\n self.W[nk]=W_nk\n self.b[nk]=b_nk\n else: # middle RBMs\n a_nk,b_nk,W_nk=rbm_model.get_param()\n self.W[nk]=0.5*W_nk\n self.b[nk]=b_nk\n\n #rbm_X,_=rbm_model.sample_h_given_x(rbm_X) # the output of this layer is used as input of the next layer\n _,rbm_X=rbm_model.sample_h_given_x(rbm_X) # Hinton suggested to use probabilities\n a=b_nk # the bias of the nk-th hidden layer is used as the bias of visible notes of the nk+1-th layer \n\n # save the trained rbms for initialize mean-filed approximation and Gibbs sampling.\n self.rbms.append(rbm_model)\n self.H_pretrain.append(rbm_X) # H of each RBM, for the purpose of (1) initializing mean-field approximation inference, (2) Gibbs sampling, and (3) building multi-modal DBM.\n\n print(\"Finished pretraining of DBM!\")\n end_time = time.clock()\n self.pretrain_time=end_time-start_time\n return self.pretrain_time\n print(\"It took {0} seconds.\".format(self.pretrain_time))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creating neural network based on architecture given by arch_numbers.
def create_network_architecture(arch_numbers): list_of_layers = [] for layer_index, number in enumerate(arch_numbers): list_of_neurons = [] for neuron_idx in range(number): if layer_index == 0: neuron = InputNeuron(numpy.array([])) else: neuron = Neuron(list_of_layers[layer_index-1]) list_of_neurons.append(neuron) list_of_layers.append(numpy.array(list_of_neurons)) list_of_layers = numpy.array(list_of_layers) return list_of_layers
[ "def makeNetwork(inputCount, hiddensCount, outputCount, initSetup=None, activationFn=fnSigmoid(1)):\n inputs = [INode(activationFn=activationFn) for _ in range(inputCount)]\n hiddens = [[Node(activationFn=activationFn) for _ in range(hn)] for hn in hiddensCount]\n outputs = [ONode(activationFn=activationFn) for _ in range(outputCount)]\n\n def initWeight(i, i0, i1):\n if initSetup == None:\n return None\n else:\n return initSetup[0][i][i0][i1]\n\n def initBias(i, i0):\n return initSetup[1][i][i0]\n\n # Link each layer\n layers = [inputs] + hiddens + [outputs]\n for ((layer0, layer1), i) in zip(zip(layers, layers[1:]), range(len(layers))):\n for (l0, i0) in zip(layer0, range(len(layer0))):\n for (l1, i1) in zip(layer1, range(len(layer1))):\n l0.link(l1, weight=initWeight(i, i0, i1))\n if initSetup != None:\n l1.setBias(initBias(i, i1))\n return Network(inputs, outputs)", "def build_network(unit_multiplier = 1, num_lstm_stacks = 5):\n print(\"got to training!\")\n model = Sequential()\n model.add(Conv1D(filters= BASE_NUM_FILTERS*unit_multiplier, kernel_size = KERNEL_SIZE, activation='relu', input_shape=(PAD_LENGTH, WORD_DIM)))\n model.add(MaxPool1D())\n for i in range(1,num_lstm_stacks): \n model.add(LSTM(units=BASE_NUM_UNITS*unit_multiplier, return_sequences = True, recurrent_dropout = .20, dropout = .20))\n model.add(BatchNormalization())\n model.add(LSTM(units=BASE_NUM_UNITS*unit_multiplier, return_sequences = False, recurrent_dropout = .20, dropout = .20))\n model.add(BatchNormalization())\n model.add(Dense(units=1, activation=\"sigmoid\"))\n return model", "def create_network(self, neurons_input=1, neurons_hidden=0):\n\t\t\n\t\tself.rate = 0.01\t#Learning rate\n\t\tself.weights_input = []\n\t\tself.weights_hidden = []\n\t\tself.weights_output = []\n\t\tself.neurons_input = neurons_input\n\t\tself.neurons_hidden = neurons_hidden\n\n\t\tif neurons_input > 1:\n\t\t\tneurons_output = 1\n\t\telse:\n\t\t\tneurons_output = 0\n\t\tself.neurons_output = neurons_output\n\n\t\t# set random starting weights\n\t\tfor i in range(neurons_input):\n\t\t\tself.weights_input.append(randint(-1,1))\n\t\tfor i in range(neurons_hidden):\n\t\t\tfor j in range(neurons_input*neurons_hidden):\n\t\t\t\tself.weights_hidden.append(randint(-1,1))\n\t\tfor i in range(neurons_output):\n\t\t\tfor j in range(neurons_hidden):\n\t\t\t\tself.weights_output.append(randint(-1,1))", "def _initialize_neural_network(self, topology):\n\n # Create shallow copy of topology.\n neural_network = copy(topology)\n # Create output neuron.\n neural_network.output_neuron = create_neuron('identity', None)\n # Create hidden layer.\n neural_network.hidden_layers = self._initialize_hidden_layers(neural_network)\n # Establish connections\n self._connect_nodes(neural_network.sensors, neural_network.hidden_layers[0], random=True)\n previous_neurons = neural_network.hidden_layers[0]\n for hidden_layer in neural_network.hidden_layers[1:]:\n self._connect_nodes(previous_neurons, hidden_layer, random=False)\n previous_neurons = hidden_layer\n # Calculate hidden neurons.\n for layer in neural_network.hidden_layers:\n for neuron in layer:\n neuron.calculate()\n # Connect last neuron to output neuron with learning step.\n self._connect_learning_step(neural_network)\n # Calculate output semantics.\n neural_network.output_neuron.calculate()\n # Return neural network.\n return neural_network", "def build_model(classes, height, width):\n print(\"> Building Keras neural network...\")\n network_model = model.simple_3(classes=classes, height=height, width=width)\n return network_model", "def create_model(self, neurons_1=16, neurons_2=8):\n\n # Neural Network initialisation\n nn_model = Sequential()\n\n # Add hidden dense layer with prescribed number of neurons_1\n nn_model.add(Dense(neurons_1, kernel_initializer='uniform', activation='relu', input_dim=30))\n\n # Add hidden dense layer with prescribed number of neurons_2\n nn_model.add(Dense(neurons_2, kernel_initializer='uniform', activation='relu'))\n\n # Add final layer with set parameters\n nn_model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))\n\n # Compile neural network with set parameters\n nn_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n\n return nn_model", "def make_classifier(arch, hidden_layers, model, model_type):\n # Number of flower categories\n output_size = 102 \n \n # Determine input_size from model type\n if model_type == 'alexnet':\n input_size = model.classifier[1].in_features\n elif model_type == 'resnet':\n input_size = model.fc.in_features\n elif model_type == 'vgg':\n input_size = model.classifier[0].in_features\n elif model_type == 'densenet':\n input_size = model.classifier.in_features\n elif model_type == 'squeezenet':\n input_size = model.classifier[1].in_channels\n elif model_type == 'inception':\n input_size = model.fc.in_features\n # Need to adjust auxillary output\n num_ftrs = model.AuxLogits.fc.in_features\n model.AuxLogits.fc = nn.Linear(num_ftrs, output_size)\n \n # If the user didn't give any input for --hidden_units, make default hidden layer\n if hidden_layers is None:\n hidden_layers = [int((input_size + output_size)/2)]\n \n # Make sure that user-inputted hidden units are above zero\n if all(i > 0 for i in hidden_layers) is False:\n print('Please ensure hidden units are above 0.')\n print('Exiting...')\n sys.exit(0) \n \n # Dropout rate\n drop = 0.2\n # Length of hidden layers\n len_hl = len(hidden_layers)\n # Initialize OrderedDict\n network = OrderedDict()\n # First layer\n network['fc1'] = nn.Linear(input_size, hidden_layers[0])\n \n if len_hl > 1:\n # Add a variable number of more hidden layers\n layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])\n hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])\n \n # Use for tracking layers\n x=1\n # Number of hidden layers\n y=len_hl\n\n # Iterate through only the nn.Linear objects, which starts at y, and add layers to OrderedDict\n for each in hidden_layers[len_hl:]:\n # Add ReLU activation\n network['relu'+str(x)] = nn.ReLU()\n # Add dropout \n network['drop'+str(x)] = nn.Dropout(drop)\n # Add hidden layer \n network['fc'+str(x+1)] = hidden_layers[y]\n x+=1\n y+=1\n if y==len(hidden_layers):\n network['relu'+str(x)] = nn.ReLU()\n network['drop'+str(x)] = nn.Dropout(drop)\n else:\n network['relu'] = nn.ReLU()\n network['drop'] = nn.Dropout(drop) \n \n # Add hidden layer with number of categories to classify \n network['fc'+str(len_hl+1)] = nn.Linear(hidden_layers[len_hl-1], output_size) \n # Add log softmax activation\n network['output'] = nn.LogSoftmax(dim=1)\n \n # Create classifier\n classifier = nn.Sequential(network)\n \n if model_type in ['inception', 'resnet']:\n # Set classifier for model\n model.fc = classifier\n elif model_type == 'squeezenet':\n # reinitialize the Conv2d layer \n model.classifier[1] = nn.Conv2d(input_size, output_size, kernel_size=(1,1), stride=(1,1))\n # In forward pass, there is a view function call which depends on the final output class size\n # https://discuss.pytorch.org/t/fine-tuning-squeezenet/3855/6\n model.num_classes = output_size\n else:\n model.classifier = classifier \n\n print(model)\n \n return model", "def architecture(self, inp_nodes, hidden_nodes, output_nodes):\n self.inp_nodes = inp_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n self.labels = np.eye(self.output_nodes, dtype=int)", "def initiate_DNN(model, arch, hidden_units_1, hidden_units_2,\n learning_rate, device):\n\n for param in model.parameters():\n param.requires_grad = False\n\n if arch[:3] in [\"vgg\", \"den\", \"ale\", \"squ\", \"mob\"]:\n input_features = model.classifier[0].in_features\n else:\n input_features = model.fc.in_features\n\n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(input_features, hidden_units_1)),\n ('ReLu1', nn.ReLU()),\n ('Dropout1', nn.Dropout(p=0.5)),\n ('fc2', nn.Linear(hidden_units_1, hidden_units_2)),\n ('ReLu1', nn.ReLU()),\n ('fc3', nn.Linear(hidden_units_2, 102)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n\n if arch[:3] in [\"vgg\", \"den\", \"ale\", \"squ\", \"mob\"]:\n model.classifier = classifier\n else:\n model.fc = classifier\n\n criterion = nn.NLLLoss()\n optimizer = optim.Adam(classifier.parameters(), lr=learning_rate)\n model.to(device);\n\n return model, classifier, criterion, optimizer", "def createNets(x, y, layerNumMin, layerNumMax, minNodes, maxNodes, step=1):\n\n\t#create hidden layer structures \n\tstructures = []\n\tfor i in range(layerNumMin, layerNumMax):\n\t\tstructures.append(itertools.product(range(minNodes, maxNodes, step), repeat = i))\n\n\t#create nn's\n\tnetworks = []\n\tfor struct in structures:\n\t\tfor s in struct:\n\t\t\tnetworks.append(NeuralNetwork([x] + list(s) + [y]))\n\n\treturn networks", "def buildNetworks(self, netBuilder):\n #None is default, so old scripts can still run. self not defined in signature\n for index in self.environments:\n environment = self.environments[index]\n environment.network(netBuilder)\n\n self.graph = nx.Graph()\n self.graph.add_nodes_from(list(range(len(self.populace))))\n #add the edges of each environment to a single networkx graph\n for environment in self.environments: self.graph.add_weighted_edges_from(self.environments[environment].edges, weight = \"transmission_weight\")\n self.isBuilt = True", "def build_neural_network_from_binary_string(data: str, input_nbr: int, output_nbr: int) -> Network:\n # todo test\n return build_neural_network(*read(*parse(data, input_nbr, output_nbr)))", "def build_network(self): \r\n self.network = input_data(shape = [None, 48, 48, 1])\r\n print(\"Input data \",self.network.shape[1:])\r\n self.network = conv_2d(self.network, 64, 5, activation = 'relu')\r\n print(\"Conv1 \",self.network.shape[1:])\r\n self.network = max_pool_2d(self.network, 3, strides = 2)\r\n print(\"Maxpool1 \",self.network.shape[1:])\r\n self.network = conv_2d(self.network, 64, 5, activation = 'relu')\r\n print(\"Conv2 \",self.network.shape[1:])\r\n self.network = max_pool_2d(self.network, 3, strides = 2)\r\n print(\"Maxpool2 \",self.network.shape[1:])\r\n self.network = conv_2d(self.network, 128, 4, activation = 'relu')\r\n print(\"Conv3 \",self.network.shape[1:])\r\n self.network = dropout(self.network, 0.3)\r\n print(\"Dropout \",self.network.shape[1:])\r\n self.network = fully_connected(self.network, 3072, activation = 'relu')\r\n print(\"Fully connected\",self.network.shape[1:])\r\n self.network = fully_connected(self.network, len(self.target_classes), activation = 'softmax')\r\n print(\"Output \",self.network.shape[1:])\r\n print(\"\\n\")\r\n # Generates a TrainOp which contains the information about optimization process - optimizer, loss function, etc\r\n self.network = regression(self.network,optimizer = 'momentum',metric = 'accuracy',loss = 'categorical_crossentropy')\r\n # Creates a model instance.\r\n self.model = tflearn.DNN(self.network,checkpoint_path = 'model_1_atul',max_checkpoints = 1,tensorboard_verbose = 2)\r\n # Loads the model weights from the checkpoint\r\n self.load_model()", "def build_net(self):\r\n # define a list of weights, size of each layer is n_l*n_(l+1)\r\n # define a list to store the deritives\r\n self.weights = []\r\n self.derivatives = []\r\n for i in range(self.n_layers -1):\r\n weight = np.random.rand(self.n_neuron[i],self.n_neuron[i+1])\r\n self.weights.append(weight)\r\n num = self.n_neuron[i]\r\n derivarive = np.zeros(shape= (self.n_neuron[i],self.n_neuron[i+1]))\r\n self.derivatives.append(derivarive)\r\n\r\n # define a list to contain the activations\r\n self.activations = []\r\n for i in range(self.n_layers ):\r\n activation = np.zeros(shape = self.n_neuron[i]) # horizontal vector\r\n self.activations.append(activation)", "def createNeurons(self, verbose=False):\n\t\t# Depending on the set_reduction_method, we use different algorithms to calculate prototypes\n\t\tif self.set_reduction_method == \"means\":\n\t\t\tprint(\"Calculating centers for Gaussian function by means...\")\n\t\t\tself.prototypes = Cluster.byMeans(self.training_set, number_of_clusters=self.cluster_count,\n\t\t\t\t\t\t\t\t\t\t\t class_header=self.class_header, verbosity=0)\n\t\telif self.set_reduction_method == \"medoids\":\n\t\t\tprint(\"Calculating centers for Gaussian function by medoids...\")\n\t\t\tself.prototypes = Cluster.byMedoids(self.training_set, self.cluster_count, self.class_header, verbosity=0)\n\n\t\telif self.set_reduction_method == \"condensed\":\n\t\t\tprint(\"Calculating centers for Gaussian function using condensed nearest neighbor...\")\n\t\t\tself.prototypes = NearestNeighbor.condensedNearestNeighbor(self.training_set, self.class_header)\n\n\t\telse:\n\t\t\tprint(\"'%s' is an invalid set reduction method, please check it and try again.\" % self.set_reduction_method)\n\t\t\tsys.exit()\n\n\t\tif not self.regression:\n\t\t\tprint(\"Generating output layer of size %d with sigmoid activation functions...\" % self.output_count) if verbose else None\n\t\t\tself.output_layer = FFNetwork(len(self.prototypes),\n\t\t\t\t\t\t\t\t\t\t [self.output_count, 'sigmoid'],\n\t\t\t\t\t\t\t\t\t\t self.training_set,\n\t\t\t\t\t\t\t\t\t\t class_header=self.class_header,\n\t\t\t\t\t\t\t\t\t\t learning_rate=self.learning_rate,\n\t\t\t\t\t\t\t\t\t\t use_momentum=self.use_momentum,\n\t\t\t\t\t\t\t\t\t\t regression=self.regression)\n\t\telse:\n\t\t\tprint(\"Generating output layer with a single linear activation function for regression...\") if verbose else None\n\t\t\tself.output_layer = FFNetwork(len(self.prototypes),\n\t\t\t\t\t\t\t\t\t\t [self.output_count, 'linear'],\n\t\t\t\t\t\t\t\t\t\t self.training_set,\n\t\t\t\t\t\t\t\t\t\t class_header=self.class_header,\n\t\t\t\t\t\t\t\t\t\t learning_rate=self.learning_rate,\n\t\t\t\t\t\t\t\t\t\t use_momentum=self.use_momentum,\n\t\t\t\t\t\t\t\t\t\t regression=self.regression)\n\n\t\tprint(\"Generating widths for basis functions using nearest neighbor proximity...\") if verbose else None\n\t\tsigma_list = self.findSigma()\n\n\t\t# for every point in prototype list, create a neuron and store that point and sigma in said neuron\n\t\tprint(\"Generating layer of Gaussian basis functions of size %d...\" % len(self.prototypes)) if verbose else None\n\t\tfor i in range(len(self.prototypes)):\n\t\t\tself.function_layer.append(RBFNeuron(self.prototypes.iloc[i], sigma_list[i], self.class_header))\n\n\t\tprint(\"\\nTRAINING NEURONS ON TRAINING DATA OF %d ENTRIES\" % len(self.training_set)) if verbose else None\n\t\tself.training_set.apply(lambda row: self.train(row), axis=1)", "def get_mobilenet(architecture, num_classes):\n \n # parse architecture string\n pattern = r\"^(?P<cifar>cifar_)?mobilenet(?P<version>v1|v2|v3_small|v3_large)_w(?P<width_numerator>[0-9]+)(d(?P<width_denominator>[0-9]+))?(_(?P<bsconv_variant>bsconvu|bsconvs_p1d6))?$\"\n match = re.match(pattern, architecture)\n if match is None:\n raise ValueError(\"Model architecture '{}' is not supported\".format(architecture))\n cifar = (match.group(\"cifar\") is not None)\n version = match.group(\"version\")\n width_numerator = match.group(\"width_numerator\")\n width_denominator = match.group(\"width_denominator\")\n bsconv_variant = match.group(\"bsconv_variant\")\n \n # determine the width_multiplier\n if width_denominator is None:\n width_multiplier = float(width_numerator)\n else:\n width_multiplier = float(width_numerator) / float(width_denominator)\n \n # base net\n if version == \"v1\":\n model = build_mobilenet_v1(num_classes=num_classes, width_multiplier=width_multiplier, cifar=cifar)\n elif version == \"v2\":\n model = build_mobilenet_v2(num_classes=num_classes, width_multiplier=width_multiplier, cifar=cifar)\n elif version == \"v3_small\":\n model = build_mobilenet_v3(num_classes=num_classes, version=\"small\", width_multiplier=width_multiplier, cifar=cifar)\n elif version == \"v3_large\":\n model = build_mobilenet_v3(num_classes=num_classes, version=\"large\", width_multiplier=width_multiplier, cifar=cifar)\n \n # apply BSConv\n if bsconv_variant is None:\n pass\n elif bsconv_variant == \"bsconvu\":\n if version == \"v1\":\n model = transform_mobilenetv1(model)\n else:\n raise ValueError(\"For MobileNetV1, only BSConv-U is supported\")\n elif bsconv_variant.startswith(\"bsconvs_p1d6\"):\n if version in (\"v2\", \"v3_small\", \"v3_large\"):\n model = transform_mobilenetv2(model)\n else:\n raise ValueError(\"For MobileNetV2/V3, only BSConv-S (p=1/6) is supported\")\n\n return model", "def _build_graph(self):\n\n # build simple architecture to multiply two numbers\n w1 = keras.layers.Input(shape=(1,), name=\"w1\")\n w2 = keras.layers.Input(shape=(1,), name=\"w2\")\n\n add = keras.layers.add([w1, w2])\n mult = keras.layers.multiply([w1, w2])\n out = keras.layers.concatenate([add, mult])\n\n return keras.models.Model(inputs=[w1, w2], outputs=out)", "def create_network_action(self, netsim, number, prefix):\n self.log.info('Creating new netsim network')\n response = None\n while True:\n # Create the network\n create_response = netsim.create_network(number, prefix)\n response = create_response\n if create_response.error:\n break\n # Init netsim device configuration\n init_response = netsim.init_config('')\n if init_response.error:\n response = init_response\n break\n # Load init configuration to cdb\n load_response = netsim.load_config()\n if load_response.error:\n response = load_response\n break\n # all operations finished\n break\n\n return response", "def __init__(self, inputs, targets, hidden_nodes = [2], learning_rate=0.01, momentum = 0.9, activation_type = 1):\n\n self.inputs = ny.array( inputs )\n self.targets = ny.array( targets )\n\n self.nodes_in = len( inputs[0] )\n self.nodes_out = len( targets[0] )\n self.data_amount = len( inputs )\n\n self.nodes_hidden = hidden_nodes\n self.learning_rate = learning_rate\n self.momentum = momentum\n\n # range tanh from -1 to 1, range sigmoid from 0 to 1\n self.activation_type = activation_type\n if activation_type == 1:\n self.activation_function = ny.tanh\n #self.derivative_function = lambda x : 1-ny.square(self.activation_function(x))\n else:\n self.activation_function = expit\n #self.derivative_function = lambda (x): self.activation_function(x)*(1-self.activation_function(x))\n\n self.number_hidden_layers = len(hidden_nodes)\n\n self._init_weights()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Getting output out of neural network.
def get_neural_net_output(input, neural_net): for layer_index, layer in enumerate(neural_net): for neuron_idx, neuron in enumerate(layer): if layer_index == 0: neural_net[layer_index][neuron_idx].input = input[neuron_idx] neural_net[layer_index][neuron_idx].output = input[neuron_idx] else: neurons_from_previous_layer = neuron.previous_layer_neurons weights_from_previous_layer = neuron.weights_from_previous_layer sum_of_products = 0 for weight_idx, previous_neuron in enumerate(neurons_from_previous_layer): sum_of_products += previous_neuron.output*weights_from_previous_layer[weight_idx] neural_net[layer_index][neuron_idx].output = float(sigmoid(sum_of_products)) if layer_index == len(neural_net) - 1: nn_output = [neuron.output for neuron in layer] return nn_output
[ "def getOutputNeuron(self):\n return self.outputNeuron", "def calculate_output(self):\n output = reduce(lambda ret, conn: ret + conn.upstream_node.output * conn.weight, self.upstream, 0.0)\n\n self.output = sigmoid(output)", "def evaluate(self, neural_network: NeuralNetwork) -> np.ndarray:\n return neural_network.feed_forward(self.test_set)", "def output(self):\n return self.layers[len(self.layers) - 1].data", "def getPrediction(nnOutput):\n\treturn [nnOutput, 1.0]", "def get_hidden_values(self, input):\n# print T.dot(input, self.W).eval()\n return T.nnet.sigmoid(T.dot(input, self.W) + self.b)", "def display_stuff(self):\n # print(\"i1 | i2 | b | Net| O/p | Thresh\")\n #print(\"neuron display\")\n print(*self.inp_list, self.y, self.out, self.threshold, *self.weight_list, sep=\" | \")\n # print(\"Weights used are: \")\n # print(*self.weight_list)", "def summarize(self):\n\n if not self.from_torch_called_ and not self.from_tensorflow_called_ and self.layers_ < 2:\n if not self.from_torch_called_ and not self.from_tensorflow_called_:\n raise ValueError('This model has not yet been created. Create the model first by calling `from_pytorch()` or calling `from_tensorflow()`')\n else:\n raise ValueError('The model has not been built yet or the model is not supported.\\n Check the docs for further information')\n \n title = \"Neural Network Architecture\"\n hline = \"+\"+\"-\"*69+\"+\"\n\n print(hline)\n print(\"|\"+title.center(69)+\"|\")\n print(hline)\n print(\"|\"+\"Layer Name\".center(28)+\"|\"+\"Layer Type\".center(24)+\"|\"+\"Layer Units\".center(15)+\"|\")\n print(hline)\n for i in range(self.layers_):\n col1 = self.layer_names_[i].center(28)\n col2 = self.layer_types_[i].capitalize().center(24)\n col3 = str(self.layer_units_[i]).center(15)\n print(\"|\"+col1+\"|\"+col2+\"|\"+col3+\"|\")\n print(hline)\n\n return", "def compute_output(self):\n s = 0\n if self._selfw:\n s += self._selfw * self._value\n for (w, i) in zip(self._weights, self._inputs):\n s += w * i.value()\n self._value = self._f(s)\n _logger.info('Neuron {0}: activation: {1}'.format(self._name, self._value))", "def _nn_read_data(self):\n\t\treaData = True\n\t\tnnIncomingData = False\n\t\tnnData = \"\"\n\t\twhile reaData and self._neuralNetwork.poll()==None:\n\t\t\tnnIncomingMsg = self._neuralNetwork.stdout.readline().rstrip(\"\\n\").split()\n\t\t\tif \"COMM_OUT\" in nnIncomingMsg: nnIncomingData = True\n\t\t\telif \"END\" in nnIncomingMsg: reaData = False\n\t\t\telif nnIncomingData: nnData += \" \".join(nnIncomingMsg)+\"\\n\"\n\t\t\tif self._debug: print \"\\t\\tPyNN: \"+\" \".join(nnIncomingMsg)\n\t\treturn nnData", "def generate_output(self, data, w):\n # GENERATE AND UPDATE WEIGHT MATRICES\n print(\"rightnow at the genereate output in network.py, the wts are\",w)\n self.decode(w)\n\n # INIT VARIABLES\n # size = data.shape[0]\n # Input = np.zeros((1, self.topology[0]))\n # fx = np.zeros((size,self.topology[2]))\n \n # # READ DATA ROW BY ROW AND CARRY OUT FORWARD PASS\n # for i in range(0, size):\n # Input = data[i, 0:self.topology[0]]\n # self.forward_pass(Input)\n # fx[i] = self.out\n train=data[:,0:self.topology[0]]\n train=Variable(torch.from_numpy(train)).float()\n # print(train.shape)\n# =============================================================================\n# print(\"fc 1 wt layer\",self.fc1.weight.data)\n# print(\"fc 1 bias layer\",self.fc1.bias.data)\n# print(\"fc 2 wt layer\",self.fc2.weight.data)\n# print(\"fc 1 bias layer\",self.fc1.bias.data)\n# =============================================================================\n\n x = F.relu(self.fc1(train))\n x = F.sigmoid(self.fc2(x))\n return x.detach().numpy()", "def output(self, read_data):\n state = torch.cat([self.lstm_h]+read_data,dim=1)\n output = self.output_fc(state)\n output = torch.sigmoid(output)\n return output", "def print_results(self, outputs=True, net=False):\n # Print outputs\n if (outputs):\n results = self.pso.best.outputs\n df = self.ideal.copy()\n df['results'] = results\n print(df.head(10))\n # Print the net\n if (net):\n self.pso.best.network.print_net()", "def inference(self):\n\n\n # Network Parameters\n n_hidden_1 = 256 # 1st layer number of features\n n_hidden_2 = 256 # 2nd layer number of features\n n_input = 784 # MNIST data input (img shape: 28*28)\n n_classes = 10 # MNIST total classes (0-9 digits)\n\n # Store layers weight & bias\n weights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))\n }\n biases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n }\n\n def multilayer_perceptron(x, weights, biases):\n # Hidden layer with RELU activation\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n layer_1 = tf.nn.relu(layer_1)\n # Hidden layer with RELU activation\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n layer_2 = tf.nn.relu(layer_2)\n # Output layer with linear activation\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n #import ipdb;\n #ipdb.set_trace()\n return out_layer\n\n # Construct model\n self.logits = multilayer_perceptron(self.X, weights, biases)\n self.Y_hat = tf.nn.softmax(self.logits)", "def measureNeurons():\n layerSizes = []\n networks = []\n print()\n print(\"**********************************************************************\")\n\n # Get the number of networks the user would like to train\n numNetworks = getNumNetworksFromUser()\n\n # Get the number of hidden layers from the user\n numHiddenLayers = getNumHiddenLayersFromUser()\n\n # Get the pattern type from the user\n patternType = getPatternTypeFromUser(\"neurons per hidden layer\")\n\n # Get the values that represent the number of neurons in each hidden layer of each network from the user\n layerSizes = getValuesFromPattern(patternType, numNetworks)\n\n # Get the number of epochs the user would like to train the networks for\n numEpochs = getNumEpochsFromUser()\n\n # TODO: Refactor this into the getActivFuncFromUser() function\n # Get the hidden layer neurons' activation function from the user\n activationFunctionSelection = getActivFuncFromUser()\n\n # Get the string representation of the user's selection\n activationFunction = activFuncSelectionMap.get(activationFunctionSelection)\n\n # Translate the user's activation function selection into a keras compatible string\n activationFunction = activFuncKerasMap.get(activationFunction)\n\n # Create the networks\n for i in range(numNetworks):\n networks.append(NeuralNetwork(numHiddenLayers, layerSizes[i], numEpochs, activationFunction))\n\n return networks, layerSizes", "def getOutput(self, *args):\n return self.getSimData(*args)", "def inference_network(self):\n return self.DNN", "def n_neuron(self):\n pass", "def get_layer_outputs(obj):\n layers = obj.model.layers[1:]\n dense_outputs = get_preactivation_tensors(layers)\n return dense_outputs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reconstruct a missing fragment from a subset of available fragments.
def reconstruct(self, available_fragment_payloads, missing_fragment_indexes): return self.ec_lib_reference.reconstruct( available_fragment_payloads, missing_fragment_indexes)
[ "def test_preserveFragments(self):\n self.assertEqual(\n client._urljoin(b\"http://foo.com/bar#frag\", b\"/quux\"),\n b\"http://foo.com/quux#frag\",\n )\n self.assertEqual(\n client._urljoin(b\"http://foo.com/bar\", b\"/quux#frag2\"),\n b\"http://foo.com/quux#frag2\",\n )\n self.assertEqual(\n client._urljoin(b\"http://foo.com/bar#frag\", b\"/quux#frag2\"),\n b\"http://foo.com/quux#frag2\",\n )", "def prune_non_seg(self):\n self.fullsequence = self.sequence # First back up the original sequence\n self.fullvariantset = self.variantset\n self.fullvariants = self.variants\n self.sequence = MultipleSeqAlignment([]) # Blank the sequence to be worked on\n\n print \"\\nPruning non-segregating sites...\"\n locs = []\n for curvar in self.variantset:\n locs.append(curvar)\n locs.sort()\n\n stripped = {}\n seqnames = []\n for seq in self.fullsequence:\n stripped[seq.name] = []\n seqnames.append(seq.name)\n\n for i in xrange(len(locs)):\n loc = locs[i]\n self.pruned_to_full.append(loc)\n seqbits = self.fullsequence[:, loc]\n name = 0\n for seqbit in seqbits:\n stripped[seqnames[name]].append(seqbit)\n name += 1\n\n for strip in stripped.keys():\n self.sequence.append(SeqRecord(Seq(''.join(stripped[strip])), name=strip, id=strip))\n\n self.variantset = set()\n self.variants = {}\n self.variants_from_sequence() # Re-run on stripped sequence", "def fragments_needed(self, reconstruction_indexes,\n exclude_indexes=None):\n if exclude_indexes is None:\n exclude_indexes = []\n return self.ec_lib_reference.fragments_needed(reconstruction_indexes,\n exclude_indexes)", "def delete_incomplete_backbone_residues(molecule: Union[Universe, AtomGroup]) -> Universe:\r\n protein = molecule.select_atoms(\"protein\")\r\n not_protein = molecule.select_atoms(\"not protein\")\r\n\r\n incomplete_backbone_residues = []\r\n for residue in protein.residues:\r\n if len(residue.atoms.select_atoms(\"backbone and not element hydrogen\")) < 4:\r\n incomplete_backbone_residues.append(residue)\r\n\r\n if len(incomplete_backbone_residues) > 0:\r\n protein = delete_residues(protein, incomplete_backbone_residues)\r\n\r\n if len(not_protein) > 0:\r\n return Merge(protein.atoms, not_protein.atoms)\r\n else:\r\n return Merge(protein.atoms)", "def test_fail_missing_signature_fragment_underflow(self):\n # Remove the last input's second signature fragment, and the change\n # transaction.\n del self.bundle.transactions[-2:]\n for (i, txn) in enumerate(self.bundle): # type: Tuple[int, Transaction]\n txn.current_index = i\n txn.last_index = 1\n\n # Fix bundle balance, since we removed the change transaction.\n self.bundle[1].value = -self.bundle[0].value\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n 'Reached end of bundle while looking for '\n 'signature fragment 2 for transaction 1.'\n ],\n )", "def get_fragment(request, identifier):\n\n try:\n return request._feincms_applicationcontents_fragments[identifier]\n except (AttributeError, KeyError):\n return u''", "def return_fragment(atom, fragment_array):\r\n\r\n safe = False\r\n non_terminal = list()\r\n \r\n for valence_atom in openbabel.OBAtomAtomIter(atom):\r\n if get_valence(valence_atom) > 1:\r\n if valence_atom not in fragment_array and\\\r\n valence_atom not in non_terminal and valence_atom not in linker_atoms:\r\n fragment_array.append(valence_atom)\r\n non_terminal.append(valence_atom)\r\n safe = True\r\n else:\r\n if valence_atom not in fragment_array and\\\r\n valence_atom not in linker_atoms:\r\n fragment_array.append(valence_atom)\r\n\r\n if safe:\r\n for each in non_terminal:\r\n return_fragment(each, fragment_array)\r\n else:\r\n return True", "def fragmentRandomizer (inputPro, fragment, dimension, trieMax):\n\n # Get the best protein from a 100 random foldings\n go = 0\n\n while go == 0:\n\n randomPro = randomizer(inputPro, trieMax, dimension)\n\n if randomPro.strength != 0:\n go = 1\n\n origPro = randomPro\n bestPro = origPro\n\n invalidFold = 0\n temp = 3.5\n\n while temp > 0.1:\n\n # Define start as random amino acid\n start = len(origPro.proteinChain)\n\n while start > (len(origPro.proteinChain) - fragment):\n\n start = randint(0, (len(origPro.proteinChain) - fragment))\n\n if(start == 0):\n newCoordinates = beginFragment(origPro, fragment, dimension)\n elif(start == len(origPro.proteinChain) - fragment):\n newCoordinates = endFragment(origPro, start, fragment, dimension)\n elif(start < len(origPro.proteinChain) - fragment):\n middleInfo = middleFragment(origPro, start, fragment, dimension)\n newCoordinates = middleInfo[0]\n\n if newCoordinates != 'none':\n\n # Create protein with these coordinates for the fragments\n newPro = Protein(origPro.proteinChain)\n\n # Start fragment\n if(start == 0):\n\n newPro.aminoCoordinates = newCoordinates\n newPro.aminoCoordinates.extend(origPro.aminoCoordinates[fragment:])\n\n # End fragment\n elif(start == len(origPro.proteinChain) - fragment):\n\n newPro.aminoCoordinates = origPro.aminoCoordinates[0 : start]\n newPro.aminoCoordinates.extend(newCoordinates)\n\n # Middle fragment\n elif(start < len(origPro.proteinChain) - fragment):\n\n newPro.aminoCoordinates = origPro.aminoCoordinates[0 : start]\n newPro.aminoCoordinates.extend(newCoordinates)\n newPro.aminoCoordinates.extend(origPro.aminoCoordinates[middleInfo[1] + 1:])\n\n\n # Compare score with old protein\n newPro.strength = calculateFolding(newPro.aminoCoordinates, newPro.proteinChain)\n\n # Calculate probability of acceptance (simulated annealing)\n probab = min(1,(math.expm1(newPro.strength/temp)/math.expm1(origPro.strength/temp)))\n\n randumb = random.uniform(0,1)\n\n if probab > randumb:\n origPro = newPro\n if origPro.strength > bestPro.strength:\n bestPro = origPro\n\n temp *= 0.9998\n invalidFold = 0\n\n # It is undesireble for a protein to keep folding a conformation that\n # will not work, this prevents excessive folding of a doomed protein.\n else:\n invalidFold += 1\n if invalidFold > 20:\n bestPro = fragmentRandomizer(inputPro, fragment, dimension, trieMax)\n\n\n return(bestPro)", "def fragment(self, space_left, fragment_msg):\n new_args = []\n key_length = 2 # 2bytes for size\n for i, arg in enumerate(self.args):\n if space_left >= key_length:\n space_left -= key_length\n\n if arg is not None:\n arg_length = len(arg)\n if space_left < arg_length:\n fragment_msg.args.append(arg[space_left:])\n new_args.append(arg[:space_left])\n space_left = 0\n else:\n new_args.append(arg)\n space_left -= arg_length\n if space_left <= key_length:\n # boundary for arg\n fragment_msg.args.append(\"\")\n else:\n new_args.append(\"\")\n else:\n for l in range(i, len(self.args)):\n fragment_msg.args.append(self.args[l])\n break\n\n self.args = new_args\n if space_left >= 0 and len(fragment_msg.args) == 0:\n # don't need to fragment any more\n return None\n else:\n self.flags = FlagsType.fragment\n fragment_msg.id = self.id\n return fragment_msg", "def add_fragment(self, _):\n if not self.selection_string:\n self.fragment_add_message.message = \"\"\"<span style=\"color:red\"> Error:</span> Please select a fragment first.\"\"\"\n return\n if not self.new_fragment_name.value:\n self.fragment_add_message.message = \"\"\"<span style=\"color:red\">Error:</span> Please enter a name for the fragment.\"\"\"\n return\n self.fragment_add_message.message = f\"\"\"<span style=\"color:blue\">Info:</span> Adding {self.new_fragment_name.value} ({self.selection_string}) to the fragment list.\"\"\"\n self.fragments = self.fragments + [\n Fragment(indices=self.selection_string, name=self.new_fragment_name.value)\n ]\n self.new_fragment_name.value = \"\"", "def fragment(self, molecule: Molecule) -> List[FragmentData]:\n from fragmenter import fragment\n\n # make sure the molecule has at least one conformer as this can cause issues\n if molecule.n_conformers == 0:\n molecule.generate_conformers(n_conformers=1)\n\n # set up the fragmenter\n fragment_factory = fragment.WBOFragmenter(\n molecule=molecule.to_openeye(), verbose=False\n )\n\n fragments: List[FragmentData] = []\n try:\n # fragment the molecule\n fragment_factory.fragment(\n threshold=self.wbo_threshold,\n keep_non_rotor_ring_substituents=self.keep_non_rotor_ring_substituents,\n )\n # now we work out the relation between the fragment and the parent\n fragments_data = fragment_factory.to_torsiondrive_json()\n # now store the data\n for data in fragments_data.values():\n off_frag = Molecule.from_mapped_smiles(\n data[\"identifiers\"][\n \"canonical_isomeric_explicit_hydrogen_mapped_smiles\"\n ]\n )\n # get the fragment parent mapping\n frag_dihedral = data[\"dihedral\"][0][1:3]\n\n # in some cases we get one fragment back which is the parent molecule\n # we should not work out a mapping\n if not molecule.is_isomorphic_with(off_frag):\n mapping = self._get_fragment_parent_mapping(\n fragment=off_frag, parent=molecule\n )\n # get the parent torsion\n parent_dihedral = tuple([mapping[i] for i in frag_dihedral])\n parent_molecule = molecule\n else:\n # reuse the current fragment data as dummy parent data\n mapping = dict((i, i) for i in range(molecule.n_atoms))\n parent_dihedral = frag_dihedral\n parent_molecule = off_frag\n # this is the data we need so make the fragmnetdata\n frag_data = FragmentData(\n parent_molecule=parent_molecule,\n parent_torsion=parent_dihedral,\n fragment_molecule=off_frag,\n fragment_torsion=frag_dihedral,\n fragment_attributes=data[\"identifiers\"],\n fragment_parent_mapping=mapping,\n )\n fragments.append(frag_data)\n\n return fragments\n\n except RuntimeError:\n raise FragmenterError(\n f\"The molecule {molecule} could not be fragmented so no fitting target was made.\"\n )", "def find_contiguous_fragments(residues_z, max_gap=1, min_fragment_length=3):\n # We will assume that there are no missing residues in the PDB file, so that\n # we can rely on the indices of the residues in the list to determine\n # whether two residues are consecutive.\n\n fragments = []\n\n if residues_z:\n # Build up each fragment element by element, starting a new fragment\n # when the next element isn't compatible with the current fragment\n # either because there is too big a gap between residue numbers or\n # because they are on separate chains\n # Recall that the list residues_z contains pairs (index, residue_obj)\n current_index = residues_z[0][0]\n current_residue = residues_z[0][1]\n current_chain_obj = current_residue.get_parent()\n\n working_fragment = [residues_z[0][1]]\n for target in residues_z[1:]:\n new_index = target[0]\n new_residue = target[1]\n new_chain_obj = new_residue.get_parent()\n\n if new_chain_obj == current_chain_obj:\n assert new_index > current_index, \\\n \"List of indices must be sorted {} {}\".format(new_index, current_index)\n\n gap = (new_index - current_index) - 1\n # If the gap is bigger than allowed or the chain has changed\n # then we must start a new fragment\n if new_chain_obj != current_chain_obj or gap > max_gap:\n # Add the completed fragment to the list of fragments if it is long enough\n if len(working_fragment) >= min_fragment_length:\n fragments.append(working_fragment)\n # Start a new fragment\n working_fragment = [new_residue]\n else:\n if gap:\n # Select the residues strictly between these two indices\n working_fragment.extend(find_missing_residues(current_residue,\n new_residue))\n\n working_fragment.append(new_residue)\n\n current_chain_obj = new_chain_obj\n current_index = new_index\n current_residue = new_residue\n\n if len(working_fragment) >= min_fragment_length:\n fragments.append(working_fragment)\n\n return fragments", "def ringbreak_frag_handling(new_core, mcs_ringbreak_idx):\n\n ##########################\n # Check for fragmentation, if core is fragmented than additional\n # processing is required to find the largest frag and to then reassign the\n # indexes in the ligs and core\n\n check_fragmentation = Chem.GetMolFrags(new_core, asMols=True, sanitizeFrags=False)\n num_frag_len = len(check_fragmentation)\n iso_core_frag_list = copy.deepcopy(mcs_ringbreak_idx)\n\n if num_frag_len > 1:\n # determine the largest fragment in the list of frags\n largest_frag, largest_frag_index_num = find_biggest_frag(check_fragmentation)\n\n # the core is now the largest fragment\n core = check_fragmentation[largest_frag_index_num]\n\n # make a list without the largest fragment\n list_frag_mols = []\n list_of_frag_idxs = range(0, len(check_fragmentation))\n\n for i in list_of_frag_idxs:\n if i == largest_frag_index_num:\n continue\n\n frag = check_fragmentation[int(i)]\n list_frag_mols.append(frag)\n\n # get the idx for all atoms in all frags EXCEPT THE LARGEST FRAG.\n # these will be the idx's of the original common core, before deleting\n # things which will be identified using the Isolabels we added before.\n # We will be deleting these atoms shortly\n for frag in list_frag_mols:\n\n # get all atom idx's (for the original unaltered common_core) in\n # the frag based on the Iso-labels we added before\n for atoms in frag.GetAtoms():\n index_val = atoms.GetIsotope() - 10000\n iso_core_frag_list.append(index_val)\n\n # Remove redundancy\n iso_core_frag_list = list(set(iso_core_frag_list))\n\n return iso_core_frag_list\n\n # if no fragmentation occured\n return iso_core_frag_list", "def fragmenter(source, remix_lines, debug=False):\n\n # fragments not present in this source.\n not_found_on_source = []\n for line in remix_lines:\n if line not in source:\n not_found_on_source.append(line)\n\n def iterate(lines):\n\n current = source\n not_found = []\n for line in lines:\n if line in not_found_on_source:\n continue\n\n if line not in current:\n not_found.append(line)\n continue\n current = current.replace(line, '\\n{}\\n'.format(line))\n current = current.replace('\\n ', '\\n').replace('\\n\\n', '\\n')\n return current, not_found\n\n results = []\n count = 1\n while True:\n logging.info('Fragmenting source. Iteration %s', count)\n result, not_found = iterate(remix_lines)\n if debug:\n d = tempfile.mkstemp(suffix='-iter{}.txt'.format(count))[1]\n logging.debug('Writing fragmented source to {}'.format(d))\n with open(d, 'w') as _t:\n _t.write(result)\n count += 1\n results.append(result)\n if not not_found:\n # finish, as all remix lines were found\n break\n remix_lines = not_found\n\n return results, not_found_on_source", "def fragments_fromstring(html, no_leading_text=False,\n guess_charset=False, parser=None):\n if not isinstance(html, _strings):\n raise TypeError('string required')\n\n if parser is None:\n parser = html_parser\n\n children = parser.parseFragment(html, 'div', useChardet=guess_charset)\n if children and isinstance(children[0], _strings):\n if no_leading_text:\n if children[0].strip():\n raise etree.ParserError('There is leading text: %r' %\n children[0])\n del children[0]\n return children", "def _parse_fragment_spread(fragment_spread_ast: dict) -> \"FragmentSpreadNode\":\n return FragmentSpreadNode(\n name=_parse_name(fragment_spread_ast[\"name\"]),\n directives=_parse_directives(fragment_spread_ast[\"directives\"]),\n location=_parse_location(fragment_spread_ast[\"loc\"]),\n )", "def contiguize_from_fragment_pattern(frag_pattern, geom=None, verbose=1, throw_reorder=False, **kwargs):\n\n vsplt = np.cumsum([len(fr) for fr in frag_pattern])\n nat = vsplt[-1]\n fragment_separators = vsplt[:-1]\n\n do_reorder = False\n if not np.array_equal(np.sort(np.concatenate(frag_pattern)), np.arange(nat)):\n raise ValidationError(\"\"\"Fragmentation pattern skips atoms: {}\"\"\".format(frag_pattern))\n\n if not np.array_equal(np.concatenate(frag_pattern), np.arange(nat)):\n print(\"\"\"Warning: QCElemental is reordering atoms to accommodate non-contiguous fragments\"\"\")\n do_reorder = True\n\n if do_reorder and throw_reorder:\n raise ValidationError(\n \"\"\"Error: QCElemental would need to reorder atoms to accommodate non-contiguous fragments\"\"\")\n\n if geom is not None:\n ncgeom = np.array(geom).reshape(-1, 3)\n if nat != ncgeom.shape[0]:\n raise ValidationError(\"\"\"dropped atoms! nat = {} != {}\"\"\".format(nat, ncgeom.shape[0]))\n geom = np.vstack([ncgeom[fr] for fr in frag_pattern])\n geom = geom.reshape((-1))\n\n def reorder(arr):\n if nat != len(arr):\n raise ValidationError(\"\"\"wrong number of atoms in array: nat = {} != {}\"\"\".format(nat, len(arr)))\n return np.concatenate([np.array(arr)[fr] for fr in frag_pattern], axis=0)\n\n returns = {'fragment_separators': fragment_separators}\n if geom is not None:\n returns.update({'geom': geom})\n extras = {k: (None if v is None else reorder(v)) for k, v in kwargs.items()}\n returns.update(extras)\n\n return returns", "def resolve_fragment(self, document, fragment):\n\n fragment = fragment.lstrip(u\"/\")\n parts = unquote(fragment).split(u\"/\") if fragment else []\n\n for part in parts:\n part = part.replace(u\"~1\", u\"/\").replace(u\"~0\", u\"~\")\n\n if isinstance(document, Sequence):\n # Array indexes should be turned into integers\n try:\n part = int(part)\n except ValueError:\n pass\n try:\n document = document[part]\n except (TypeError, LookupError):\n raise exceptions.RefResolutionError(\n \"Unresolvable JSON pointer: %r\" % fragment\n )\n\n return document", "def decode_fragment(pdus):\n messages = []\n undecoded_pdus = []\n\n message = bytearray()\n length = 0\n for pdu in pdus:\n undecoded_pdus += [pdu]\n if pdu[0] & 0x80:\n # first pdu\n if message:\n raise RuntimeError(\"The message is broken in the middle\")\n length = pdu[0] & 0x7f\n else:\n length -= 1\n if length != pdu[0] & 0x7f:\n raise RuntimeError(\"Message out of sequence received\")\n\n message.extend(pdu[1:])\n if length == 0:\n messages += [message]\n message = bytearray()\n undecoded_pdus = []\n\n return (messages, undecoded_pdus)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine which fragments are needed to reconstruct some subset of missing fragments.
def fragments_needed(self, reconstruction_indexes, exclude_indexes=None): if exclude_indexes is None: exclude_indexes = [] return self.ec_lib_reference.fragments_needed(reconstruction_indexes, exclude_indexes)
[ "def prune_non_seg(self):\n self.fullsequence = self.sequence # First back up the original sequence\n self.fullvariantset = self.variantset\n self.fullvariants = self.variants\n self.sequence = MultipleSeqAlignment([]) # Blank the sequence to be worked on\n\n print \"\\nPruning non-segregating sites...\"\n locs = []\n for curvar in self.variantset:\n locs.append(curvar)\n locs.sort()\n\n stripped = {}\n seqnames = []\n for seq in self.fullsequence:\n stripped[seq.name] = []\n seqnames.append(seq.name)\n\n for i in xrange(len(locs)):\n loc = locs[i]\n self.pruned_to_full.append(loc)\n seqbits = self.fullsequence[:, loc]\n name = 0\n for seqbit in seqbits:\n stripped[seqnames[name]].append(seqbit)\n name += 1\n\n for strip in stripped.keys():\n self.sequence.append(SeqRecord(Seq(''.join(stripped[strip])), name=strip, id=strip))\n\n self.variantset = set()\n self.variants = {}\n self.variants_from_sequence() # Re-run on stripped sequence", "def chip(G,config,mean_frag_length):\n #config = sorted(config)\n lamb = 1.0/mean_frag_length\n splits = make_splits(G,lamb)\n bound_fragments = []\n for pos in config:\n i = bisect.bisect(splits,pos)\n bound_fragments.append((splits[i-1],splits[i]))\n return list(set(bound_fragments))", "def best_segmentation(fragments: np.ndarray, ground_truth: np.ndarray,\n random_seed: int = None) -> np.ndarray:\n if random_seed is not None:\n np.random.seed(random_seed)\n assignments = ev.assignment_table(fragments, ground_truth).tocsc()\n indptr = assignments.indptr\n rag = Rag(fragments)\n for i in range(len(indptr) - 1):\n rag.merge_subgraph(assignments.indices[indptr[i]:indptr[i+1]])\n return rag.current_segmentation()", "def find_contiguous_fragments(residues_z, max_gap=1, min_fragment_length=3):\n # We will assume that there are no missing residues in the PDB file, so that\n # we can rely on the indices of the residues in the list to determine\n # whether two residues are consecutive.\n\n fragments = []\n\n if residues_z:\n # Build up each fragment element by element, starting a new fragment\n # when the next element isn't compatible with the current fragment\n # either because there is too big a gap between residue numbers or\n # because they are on separate chains\n # Recall that the list residues_z contains pairs (index, residue_obj)\n current_index = residues_z[0][0]\n current_residue = residues_z[0][1]\n current_chain_obj = current_residue.get_parent()\n\n working_fragment = [residues_z[0][1]]\n for target in residues_z[1:]:\n new_index = target[0]\n new_residue = target[1]\n new_chain_obj = new_residue.get_parent()\n\n if new_chain_obj == current_chain_obj:\n assert new_index > current_index, \\\n \"List of indices must be sorted {} {}\".format(new_index, current_index)\n\n gap = (new_index - current_index) - 1\n # If the gap is bigger than allowed or the chain has changed\n # then we must start a new fragment\n if new_chain_obj != current_chain_obj or gap > max_gap:\n # Add the completed fragment to the list of fragments if it is long enough\n if len(working_fragment) >= min_fragment_length:\n fragments.append(working_fragment)\n # Start a new fragment\n working_fragment = [new_residue]\n else:\n if gap:\n # Select the residues strictly between these two indices\n working_fragment.extend(find_missing_residues(current_residue,\n new_residue))\n\n working_fragment.append(new_residue)\n\n current_chain_obj = new_chain_obj\n current_index = new_index\n current_residue = new_residue\n\n if len(working_fragment) >= min_fragment_length:\n fragments.append(working_fragment)\n\n return fragments", "def check_frag_size(self):\n global check, error_details\n for row_index, row in self.primer_df.iterrows():\n if row['Frag_size'] is not None:\n if (not isinstance(row['Frag_size'], float)) and (not isinstance(row['Frag_size'], int)):\n check += 1\n error = \"Fragment size not a valid entry, see row %s in file\" % (row_index + 4)\n error_details.append(error)\n elif (row['Frag_size'] < 0) or (row['Frag_size'] > 1000):\n check += 1\n error = \"Fragment size not within acceptable range, see row %s\" % (row_index + 4)\n error_details.append(error)", "def fragmentation(self):\n free = {\"cpu\": 0, \"mem\": 0}\n freemax = None\n resources = [\"cpu\", \"mem\"]\n for bin_ in utils.concatenate_lists_generator(self.occupied_bins,\n self.bins):\n free_space = bin_.free_space()\n for res in resources:\n free[res] += free_space[res]\n if freemax:\n if freemax[res] < free_space[res]:\n freemax[res] = free_space[res]\n else:\n freemax = free_space\n\n frag = {\"cpu\": 0, \"mem\": 0}\n for res in resources:\n if free[res] < 1e-8:\n frag[res] = 0\n else:\n frag[res] = (free[res] - freemax[res]) / free[res]\n\n return frag", "def fragmentRandomizer (inputPro, fragment, dimension, trieMax):\n\n # Get the best protein from a 100 random foldings\n go = 0\n\n while go == 0:\n\n randomPro = randomizer(inputPro, trieMax, dimension)\n\n if randomPro.strength != 0:\n go = 1\n\n origPro = randomPro\n bestPro = origPro\n\n invalidFold = 0\n temp = 3.5\n\n while temp > 0.1:\n\n # Define start as random amino acid\n start = len(origPro.proteinChain)\n\n while start > (len(origPro.proteinChain) - fragment):\n\n start = randint(0, (len(origPro.proteinChain) - fragment))\n\n if(start == 0):\n newCoordinates = beginFragment(origPro, fragment, dimension)\n elif(start == len(origPro.proteinChain) - fragment):\n newCoordinates = endFragment(origPro, start, fragment, dimension)\n elif(start < len(origPro.proteinChain) - fragment):\n middleInfo = middleFragment(origPro, start, fragment, dimension)\n newCoordinates = middleInfo[0]\n\n if newCoordinates != 'none':\n\n # Create protein with these coordinates for the fragments\n newPro = Protein(origPro.proteinChain)\n\n # Start fragment\n if(start == 0):\n\n newPro.aminoCoordinates = newCoordinates\n newPro.aminoCoordinates.extend(origPro.aminoCoordinates[fragment:])\n\n # End fragment\n elif(start == len(origPro.proteinChain) - fragment):\n\n newPro.aminoCoordinates = origPro.aminoCoordinates[0 : start]\n newPro.aminoCoordinates.extend(newCoordinates)\n\n # Middle fragment\n elif(start < len(origPro.proteinChain) - fragment):\n\n newPro.aminoCoordinates = origPro.aminoCoordinates[0 : start]\n newPro.aminoCoordinates.extend(newCoordinates)\n newPro.aminoCoordinates.extend(origPro.aminoCoordinates[middleInfo[1] + 1:])\n\n\n # Compare score with old protein\n newPro.strength = calculateFolding(newPro.aminoCoordinates, newPro.proteinChain)\n\n # Calculate probability of acceptance (simulated annealing)\n probab = min(1,(math.expm1(newPro.strength/temp)/math.expm1(origPro.strength/temp)))\n\n randumb = random.uniform(0,1)\n\n if probab > randumb:\n origPro = newPro\n if origPro.strength > bestPro.strength:\n bestPro = origPro\n\n temp *= 0.9998\n invalidFold = 0\n\n # It is undesireble for a protein to keep folding a conformation that\n # will not work, this prevents excessive folding of a doomed protein.\n else:\n invalidFold += 1\n if invalidFold > 20:\n bestPro = fragmentRandomizer(inputPro, fragment, dimension, trieMax)\n\n\n return(bestPro)", "def assessFragments(num_fragments): \n\tmin_frag = min(num_fragments) \n\tmax_frag = max(num_fragments)\n\n\n\tfor i in range(min_frag, max_frag+1):\n\t\tcount = num_fragments.count(i)\n\t\tfrag_freq = (count/len(num_fragments))*100 \n\t\tprint(\"{:.2f}% of integrations were broken into \".format(frag_freq)+str(i)+\" fragments\")", "def delete_incomplete_backbone_residues(molecule: Union[Universe, AtomGroup]) -> Universe:\r\n protein = molecule.select_atoms(\"protein\")\r\n not_protein = molecule.select_atoms(\"not protein\")\r\n\r\n incomplete_backbone_residues = []\r\n for residue in protein.residues:\r\n if len(residue.atoms.select_atoms(\"backbone and not element hydrogen\")) < 4:\r\n incomplete_backbone_residues.append(residue)\r\n\r\n if len(incomplete_backbone_residues) > 0:\r\n protein = delete_residues(protein, incomplete_backbone_residues)\r\n\r\n if len(not_protein) > 0:\r\n return Merge(protein.atoms, not_protein.atoms)\r\n else:\r\n return Merge(protein.atoms)", "def _compute_partial_patterns(self):\n self._partial_pats = {'start':{}, 'end':{}, 'short':{}}\n for offset in range(self.mod_pos - 1):\n self._partial_pats['start'][\n self.motif_len - offset - 1] = (self._parse_motif(\n self.raw_motif[offset + 1:]), self.mod_pos - offset - 1)\n for offset in range(self.motif_len - self.mod_pos):\n self._partial_pats['end'][\n self.motif_len - offset - 1] = (self._parse_motif(\n self.raw_motif[:-(offset + 1)]), self.mod_pos)\n for short_len in range(1, self.motif_len):\n self._partial_pats['short'][short_len] = [\n (self._parse_motif(self.raw_motif[offset:offset + short_len]),\n self.mod_pos - offset)\n for offset in range(\n max(0, self.mod_pos - short_len),\n min(self.motif_len - short_len + 1, self.mod_pos))]\n return", "def get_fragfiles( self ) -> pd.DataFrame:\n fragpath = Path(core.get_option('loop_master', 'fragments'))\n self.log.debug(f'Listing available fragment files at: {fragpath.name}')\n if not fragpath.is_dir():\n raise NodeDataError(f'{fragpath.name} is not a folder.')\n return pd.DataFrame([(x.name[:4], x.name[5:6], x, y) for x, y in zip(sorted(fragpath.glob('*/*3mers.gz')),\n sorted(fragpath.glob('*/*9mers.gz')))],\n columns=['pdb', 'chain', '3mers', '9mers'])", "def get_fragfiles( self ) -> pd.DataFrame:\n fragpath = Path(core.get_option('loop_master', 'fragments'))\n self.log.debug(f'Listing available fragment files at: {fragpath.name}')\n if not fragpath.is_dir():\n raise NodeDataError(f'{fragpath.name} is not a folder.')\n return pd.DataFrame([(x.name[:4], x.name[5:6], x, y) for x, y in zip(sorted(fragpath.glob('*/*3mers.gz')),\n sorted(fragpath.glob('*/*9mers.gz')))],\n columns=['pdb', 'chain', '3mers', '9mers'])", "def filter_incomplete_primer_sets(primers, seq_cats):\n # any primer sets lacking a seq_cat are filtered\n missing_cnt = {x:0 for x in seq_cats}\n to_rm = set()\n for num in primers.keys():\n for cat in seq_cats:\n try:\n _ = primers[num][cat]\n except KeyError:\n to_rm.add(num)\n missing_cnt[cat] += 1\n ## filtering\n for x in to_rm:\n primers.pop(x, None)\n # status\n msg = ' No. of incomplete primer sets due to oligo filtering: {}'\n logging.info(msg.format(len(to_rm)))\n for k in seq_cats:\n msg = ' No. of {} oligo missing: {}'\n logging.info(msg.format(k, missing_cnt[k]))\n msg = ' No. of primers retained: {}'\n logging.info(msg.format(len(primers.keys())))\n return primers", "def get_open_fragments(user, language):\n return PreProcessFragment.objects \\\n .filter(language=language) \\\n .filter(document__corpus__in=get_available_corpora(user)) \\\n .exclude(selection__is_final=True) \\\n .select_related('document') \\\n .prefetch_related('sentence_set')", "def reads_from_chip_ps_np(fragments,min_seq_length):\n reads = []\n for (start,stop) in fragments:\n if stop - start < min_seq_length:\n continue\n strand = \"+\" if random.random() < 0.5 else \"-\"\n if strand == \"+\":\n reads.append((strand,start,start+min_seq_length))\n else:\n reads.append((strand,stop-min_seq_length,stop))\n return reads", "def test_get_candidates_none(self):\n memory = {'a': auto.MemoryNode({'b':\n auto.MemoryNode({'c': \n auto.MemoryNode({}, 0)}, 0)}, 0)}\n fragment = 'a'\n correct_answer = []\n output = auto.get_candidates(fragment, memory)\n self.assertEqual(output, correct_answer)", "def get_missing_chunks(self):\n l = []\n for file in self.files.values():\n l.extend(file.get_missing_chunks())\n return l", "def missing_per_segment(self, group_segments_by=None):\n # col_list = [s for s in self.data.columns if s not in group_segments_by]\n group_segments_by = group_segments_by or self.segment_by\n missing_df = self.data.groupby(group_segments_by).apply(\n lambda s: compute_if_dask(is_missing(s, self.NA_VALUES).sum())\n )\n col_sums = missing_df.sum()\n missing_df = missing_df[col_sums[col_sums > 0].index.values]\n row_sums = missing_df.sum(axis=1)\n missing_df = missing_df.loc[row_sums[row_sums > 0].index.values]\n return missing_df.reset_index()", "def get_not_always_used(self):\n results_list = []\n\n # initial list is made of fixtures that are in the children\n initial_list = self.gather_all_required(include_parents=False)\n\n for c in self.get_leaves():\n j = 0\n for i in range(len(initial_list)):\n fixture_name = initial_list[j]\n if fixture_name not in c.gather_all_required():\n del initial_list[j]\n results_list.append(fixture_name)\n else:\n j += 1\n\n return results_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get opaque metadata for a fragment. The metadata is opaque to the client, but meaningful to the underlying library. It is used to verify stripes in verify_stripe_metadata().
def get_metadata(self, fragment, formatted=0): return self.ec_lib_reference.get_metadata(fragment, formatted)
[ "def _get_metadata(self, artifact_hash: str) -> dict:\n return self._send({\"name\": \"getMetadata\", \"args\": [artifact_hash]})", "def metadata(self):\r\n metadataurlpath = 'content/items/' + self.itemid + '/info/metadata/metadata.xml'\r\n try:\r\n return self._portal.con.get(metadataurlpath, try_json=False)\r\n\r\n # If the get operation returns a 400 HTTP Error then the metadata simply\r\n # doesn't exist, let's just return None in this case\r\n except HTTPError as e:\r\n if e.code == 400 or e.code == 500:\r\n return None\r\n else:\r\n raise e", "def get_metadata(self):\n return meta.get_metadata(self.ast)", "def ex_get_metadata_for_node(self, node):\r\n return node.extra['metadata']", "def __fetch_block_metadata(self, metadata: MetadataRecord) -> MetadataRecord:\n\n cursor = self.conn.cursor()\n\n try:\n cursor.execute(\n 'SELECT * FROM blocks WHERE NPA = ? AND NXX = ? AND X = ?',\n [\n metadata.phone_number[0:3],\n metadata.phone_number[3:6],\n metadata.phone_number[6:7]\n ]\n )\n block_record = cursor.fetchone()\n if block_record is None:\n return metadata\n\n return MetadataRecord(\n metadata.phone_number,\n metadata.country,\n metadata.time_zone,\n block_record['State'],\n block_record['Rate_Center'],\n block_record['OCN'],\n block_record['Assigned_To']\n )\n finally:\n cursor.close()", "async def fetch_metadata(self):\n\n data = await self.http.fetch_metadata()\n return Metadata.from_data(data)", "def get_metadata_for_node(self, node):\r\n return self.manager.get_metadata(self, node=node)", "def _get_cached_artifact_metadata(self, artifact):\n raise NotImplementedError", "def get_metadata(self, queue):\r\n uri = \"/%s/%s/metadata\" % (self.uri_base, utils.get_id(queue))\r\n resp, resp_body = self.api.method_get(uri)\r\n return resp_body", "def _getMetadataForObject(self, obj):\r\n # get the hash of the object and use it as a dict key for the metadata dict\r\n fshash = obj.hash()\r\n\r\n # no entry for this hash? make one first\r\n if fshash not in self._md:\r\n self._md[fshash] = {}\r\n\r\n return self._md[fshash]", "def get_xmp_metadata(self): #pylint: too hudge change for the moment disable=invalid-name\n metadata = self.get(\"/Metadata\", None)\n\n if metadata is None:\n return None\n\n metadata = metadata.getObject()\n\n if not isinstance(metadata, xmp.XmpInformation):\n metadata = xmp.XmpInformation(metadata)\n self[NameObject(\"/Metadata\")] = metadata\n\n return metadata", "def readFragment(file_, piecesize=33):\n with open(file_, 'rb') as f:\n if f.read(4) == b\"#CL\\x00\":\n metaSize = bs.bytes2int(f.read(4))\n meta = json.loads(f.read(metaSize).decode())\n y = []\n for b in iter(partial(f.read, piecesize), b''):\n y.append(bs.bytes2int(b))\n else:\n raise RuntimeError(\"{} is not a Cirrolus fragment\".format(file_))\n return (meta, y)", "def get_cached_object_metadata(\n account=None,\n reference=None,\n path=None,\n cid=None,\n version=None,\n properties=False,\n cache=None,\n force_master=False,\n **kwargs\n):\n if cache is None or version:\n # Cache isn't compatible with versioning\n return None, None\n\n if force_master:\n # Cache cannot be reliably considered up-to-date\n return None, None\n\n cache_key = _get_object_metadata_cache_key(\n account=account, reference=reference, path=path, cid=cid\n )\n cache_value = cache.get(cache_key)\n if cache_value is None:\n return None, None\n\n content_meta = cache_value.get(\"meta\")\n if content_meta is None:\n return None, None\n if properties:\n content_properties = cache_value.get(\"properties\")\n if content_properties is None:\n return None, None\n content_meta = content_meta.copy()\n content_meta[\"properties\"] = content_properties\n content_chunks = cache_value.get(\"chunks\")\n return content_meta, content_chunks", "def _get_trailing_metadata(self, response, exception):\n if exception:\n return exception.error.trailing_metadata()\n else:\n return response.trailing_metadata()", "def get_metadata(self):\r\n return self.manager.get_metadata(self, node=self)", "def GetMetadata(client_id, client_full_info):\n\n metadata = base.ExportedMetadata()\n\n last_snapshot = None\n if client_full_info.HasField(\"last_snapshot\"):\n last_snapshot = client_full_info.last_snapshot\n\n metadata.client_urn = client_id\n metadata.client_age = client_full_info.metadata.first_seen\n\n if last_snapshot is not None:\n kb = client_full_info.last_snapshot.knowledge_base\n\n metadata.hostname = kb.fqdn\n metadata.os = kb.os\n metadata.uname = last_snapshot.Uname()\n metadata.os_release = last_snapshot.os_release\n metadata.os_version = last_snapshot.os_version\n metadata.usernames = \",\".join(user.username for user in kb.users)\n\n addresses = last_snapshot.GetMacAddresses()\n if addresses:\n metadata.mac_address = \"\\n\".join(last_snapshot.GetMacAddresses())\n metadata.hardware_info = last_snapshot.hardware_info\n metadata.kernel_version = last_snapshot.kernel\n\n ci = last_snapshot.cloud_instance\n if ci is not None:\n if ci.cloud_type == ci.InstanceType.AMAZON:\n metadata.cloud_instance_type = metadata.CloudInstanceType.AMAZON\n metadata.cloud_instance_id = ci.amazon.instance_id\n elif ci.cloud_type == ci.InstanceType.GOOGLE:\n metadata.cloud_instance_type = metadata.CloudInstanceType.GOOGLE\n metadata.cloud_instance_id = ci.google.unique_id\n\n system_labels = set()\n user_labels = set()\n for l in client_full_info.labels:\n if l.owner == \"GRR\":\n system_labels.add(l.name)\n else:\n user_labels.add(l.name)\n\n metadata.labels = \",\".join(sorted(system_labels | user_labels))\n metadata.system_labels = \",\".join(sorted(system_labels))\n metadata.user_labels = \",\".join(sorted(user_labels))\n\n return metadata", "def get_metadata(self, idx):\n meta_data = copy.deepcopy(self.meta_data)\n meta_data[\"identifier\"] = self.get_identifier(idx)\n return meta_data", "def get_item_metadata(self, handle):\n raise(NotImplementedError())", "def _read_manifest_metadata_v2(self, bundle_uuid: str) -> Optional[Dict[str, Any]]:\n metadata_file_path = os.path.join(self.outbox_path, f\"{bundle_uuid}.metadata.json\")\n try:\n with open(metadata_file_path) as metadata_file:\n metadata_dict = json.load(metadata_file)\n except Exception:\n return None\n return cast(Dict[str, Any], metadata_dict)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get segmentation info for a given data length and segment size.
def get_segment_info(self, data_len, segment_size): return self.ec_lib_reference.get_segment_info(data_len, segment_size)
[ "def get_segment_info_byterange(self, ranges, data_len, segment_size):\n\n segment_info = self.ec_lib_reference.get_segment_info(\n data_len, segment_size)\n\n segment_size = segment_info['segment_size']\n\n sorted_ranges = ranges[:]\n sorted_ranges.sort(key=lambda obj: obj[0])\n\n recipe = {}\n\n for r in ranges:\n segment_map = {}\n begin_off = r[0]\n end_off = r[1]\n begin_segment = begin_off // segment_size\n end_segment = end_off // segment_size\n\n if begin_segment == end_segment:\n begin_relative_off = begin_off % segment_size\n end_relative_off = end_off % segment_size\n segment_map[begin_segment] = (begin_relative_off,\n end_relative_off)\n else:\n begin_relative_off = begin_off % segment_size\n end_relative_off = end_off % segment_size\n\n segment_map[begin_segment] = (begin_relative_off,\n segment_size - 1)\n\n for middle_segment in range(begin_segment + 1, end_segment):\n segment_map[middle_segment] = (0, segment_size - 1)\n\n segment_map[end_segment] = (0, end_relative_off)\n\n recipe[r] = segment_map\n\n return recipe", "def _parse_segment(self, seg_data):\n if seg_data.is_empty():\n err_str = 'Segment \"%s\" is empty' % (seg_data)\n self._seg_error('8', err_str, None, src_line=self.cur_line + 1)\n if not seg_data.is_seg_id_valid():\n err_str = 'Segment identifier \"%s\" is invalid' % (\n seg_data.get_seg_id())\n self._seg_error('1', err_str, None, src_line=self.cur_line + 1)\n seg_id = seg_data.get_seg_id()\n if seg_id == 'ISA':\n if len(seg_data) != 16:\n raise pyx12.errors.X12Error('The ISA segment must have 16 elements (%s)' % (seg_data))\n interchange_control_number = seg_data.get_value('ISA13')\n if interchange_control_number in self.isa_ids:\n err_str = 'ISA Interchange Control Number '\n err_str += '%s not unique within file' % (interchange_control_number)\n self._isa_error('025', err_str)\n self.loops.append(('ISA', interchange_control_number))\n self.isa_ids.append(interchange_control_number)\n self.gs_count = 0\n self.gs_ids = []\n self.isa_usage = seg_data.get_value('ISA15')\n elif seg_id == 'GS':\n group_control_number = seg_data.get_value('GS06')\n if group_control_number in self.gs_ids:\n err_str = 'GS Interchange Control Number '\n err_str += '%s not unique within file' % (group_control_number)\n self._gs_error('6', err_str)\n self.gs_count += 1\n self.gs_ids.append(group_control_number)\n self.loops.append(('GS', group_control_number))\n self.st_count = 0\n self.st_ids = []\n elif seg_id == 'ST':\n self.hl_stack = []\n self.hl_count = 0\n transaction_control_number = seg_data.get_value('ST02')\n if transaction_control_number in self.st_ids:\n err_str = 'ST Interchange Control Number '\n err_str += '%s not unique within file' % (transaction_control_number)\n self._st_error('23', err_str)\n self.st_count += 1\n self.st_ids.append(transaction_control_number)\n self.loops.append(('ST', transaction_control_number))\n self.seg_count = 1\n self.hl_count = 0\n #elif seg_id == 'LS':\n # self.seg_count += 1\n # self.loops.append(('LS', seg_data.get_value('LS06')))\n #elif seg_id == 'LE':\n # self.seg_count += 1\n # del self.loops[-1]\n elif seg_id == 'HL':\n self.hl_count += 1\n hl_count = seg_data.get_value('HL01')\n if self.hl_count != self._int(hl_count):\n #raise pyx12.errors.X12Error, \\\n # 'My HL count %i does not match your HL count %s' \\\n # % (self.hl_count, seg[1])\n err_str = 'My HL count %i does not match your HL count %s' % (self.hl_count, hl_count)\n self._seg_error('HL1', err_str)\n if seg_data.get_value('HL02') != '':\n hl_parent = self._int(seg_data.get_value('HL02'))\n if hl_parent not in self.hl_stack:\n err_str = 'HL parent (%i) is not a valid parent' % (hl_parent)\n self._seg_error('HL2', err_str)\n while self.hl_stack and hl_parent != self.hl_stack[-1]:\n del self.hl_stack[-1]\n else:\n if len(self.hl_stack) != 0:\n pass\n #err_str = 'HL parent is blank, but stack not empty'\n #self._seg_error('HL2', err_str)\n self.hl_stack.append(self.hl_count)\n elif self.check_837_lx and seg_id == 'CLM':\n self.lx_count = 0\n elif self.check_837_lx and seg_id == 'LX':\n self.lx_count += 1\n if seg_data.get_value('LX01') != '%i' % (self.lx_count):\n err_str = 'Your 2400/LX01 Service Line Number %s does not match my count of %i' % \\\n (seg_data.get_value('LX01'), self.lx_count)\n self._seg_error('LX', err_str)\n # count all regular segments\n if seg_id not in ('ISA', 'IEA', 'GS', 'GE', 'ST', 'SE'):\n self.seg_count += 1\n self.cur_line += 1", "def _get_segments(self, segments):\n if type(segments) == str or type(segments) == list:\n segmentation = self._segments[segments]\n elif segments.__class__.__name__ == Segments.__name__:\n segmentation =segments\n else:\n raise ValueError('Invalid data type! Please input a class instance of Segments or a str indicating a set segmentation in the class instance')\n return segmentation", "def create_dict_main_segmentation(self, ocromore_data):\n segmentation_dict = {}\n seg_data = ocromore_data['segmentation']\n seg_classes = seg_data.my_classes\n lines = ocromore_data['lines']\n for segclass in seg_classes:\n if not segclass.is_start_segmented():\n continue\n start_index = segclass.get_start_line_index()\n # extract the real tag from line\n tag = dh.get_real_tag_from_segment(segclass,lines[start_index])\n if tag in segmentation_dict.keys():\n segmentation_dict[tag] = segmentation_dict[tag] + 1\n else:\n segmentation_dict[tag] = 1\n\n return segmentation_dict", "def get_segmentation_image(self):\n if self._sexconfig is None:\n return None\n\n self.execute()\n\n filename = SExImageCatalog._get_checkname(\n self._sexconfig,\n check_image_type='SEGMENTATION'\n )\n\n if os.path.isfile(filename):\n return fits.getdata(filename, ext=0)\n else:\n return None", "def __extract_segment_from_volume(self, volume, segment_shape, label_shape, center):\n offset = 1 # I will be extracting data samples only\n indices_data = np.zeros((len(segment_shape) + 1, 2), dtype=np.uint16)\n paddings_data = np.zeros((len(segment_shape) + 1, 2), dtype=np.int16)\n indices_label = np.zeros((len(label_shape) + 1, 2), dtype=np.uint16)\n paddings_label = np.zeros((len(label_shape) + 1, 2), dtype=np.int16)\n\n # specifying fields for channel (first) dimension in data.\n indices_data[0] = [0, volume.shape[0]]\n paddings_data[0] = [0, 0] # no padding along channel axis\n indices_label[0] = [0, self.num_classes]\n paddings_label[0] = [0, 0] # no padding along channel axis\n\n # calculate left and right bounds for indices, and corresponding padding\n for i in range(len(segment_shape)): # (H, W, D)\n indices_data[i + offset][0] = int(max(0, center[i] - np.floor(segment_shape[i] / 2)))\n indices_data[i + offset][1] = int(min(volume.shape[i + offset], center[i] - np.floor(segment_shape[i] / 2) +\n segment_shape[i]))\n\n indices_label[i + offset][0] = int(max(0, center[i] - np.floor(label_shape[i] / 2)))\n indices_label[i + offset][1] = int(min(volume.shape[i + offset], center[i] - np.floor(label_shape[i] / 2) +\n label_shape[i]))\n\n paddings_data[i + offset][0] = int(np.abs(min(0, center[i] - np.floor(segment_shape[i] / 2))))\n paddings_data[i + offset][1] = int(max(volume.shape[i + offset],\n np.ceil(center[i] - np.floor(segment_shape[i] / 2) +\n segment_shape[i])) - volume.shape[i + offset])\n\n paddings_label[i + offset][0] = int(np.abs(min(0, center[i] - np.floor(label_shape[i] / 2))))\n paddings_label[i + offset][1] = int(max(volume.shape[i + offset],\n np.ceil(center[i] - np.floor(label_shape[i] / 2) +\n label_shape[i])) - volume.shape[i + offset])\n\n # converting to explicit list of coordinates for np.ix_()\n indices_list_data = [range(indices_data[i][0], indices_data[i][1]) for i in range(indices_data.shape[0])]\n\n volume_to_return = volume[np.ix_(*indices_list_data)]\n volume_to_return = np.pad(volume_to_return, pad_width=paddings_data, mode='constant', constant_values=0)\n\n # coordinates and paddings data for label segment.\n indices_list_label = [range(indices_label[i][0], indices_label[i][1]) for i in range(indices_label.shape[0])]\n label_undo_shape = [self.num_classes] + label_shape\n paddings_list_label = [range(paddings_label[i][0], label_undo_shape[i] - paddings_label[i][1])\n for i in range(paddings_label.shape[0])]\n\n return volume_to_return, indices_list_label, paddings_list_label", "def _read_common(self, dataset: pydicom.Dataset, result: _ReadResultBase) -> None:\n if dataset.SOPClassUID != SegmentationStorage or dataset.Modality != \"SEG\":\n raise ValueError(\"DICOM dataset is not a DICOM-SEG storage\")\n\n result.dataset = dataset\n result.segment_infos = reader_utils.get_segment_map(dataset)\n result.spacing = reader_utils.get_declared_image_spacing(dataset)\n result.direction = reader_utils.get_image_direction(dataset)\n result.direction.flags.writeable = False\n result.origin, extent = reader_utils.get_image_origin_and_extent(\n dataset, result.direction\n )\n result.size = (\n dataset.Columns,\n dataset.Rows,\n int(np.rint(extent / result.spacing[-1])) + 1,\n )", "def extract_indexes_segments(data_length, n_segments):\n\n # Extract the segment length\n\n segment_length = int(data_length / n_segments)\n indexes = []\n\n # For each segment, determine the correspondent indexes (checking the position of the end one)\n\n for k in range(n_segments):\n\n start = k * segment_length\n end = (k + 1) * segment_length\n\n if (k + 2) * segment_length > data_length:\n end = data_length\n\n indexes.append((start, end))\n\n return indexes", "def get_seg_count(self):\n return self.seg_count", "def readSegment(self):\n try:\n packet,addr = self.sock.recvfrom(1024)\n # no connection\n except socket.timeout as e:\n self.log.debug(\"no update\")\n return [0 for _ in range(conf.Config.segment()[0])]\n # parse data\n data = struct.unpack('i'*int(len(packet)/4), packet)\n self.log.debug(\"update\")\n return data", "def get_segment_length(self):\n # extract segment length for calculating minimun drop later\n reaches = self.reaches[[\"geometry\", \"iseg\", \"rchlen\"]].copy()\n seglen = reaches.groupby(\"iseg\")[\"rchlen\"].sum()\n self.segment_data.loc[seglen.index, \"seglen\"] = seglen\n return seglen", "def record_segment_info(self, segment_prefix, seg):\n # Remove any pathname components from the prefix.\n segment_prefix = basename(segment_prefix)\n\n attrs = seg.get_attrs()\n self.add_segment_info(\"%s.%s\" % (segment_prefix, attrs.name),\n seg.get_elf_segment())\n\n # Return a memsection describing the segment.\n seg.get_attrs().scrub = False\n ms = MemSection(segment = seg)\n if attrs.elf_flags is not None:\n if (attrs.elf_flags & weaver.bootinfo_elf.BI_FULLYACCESSIBLE) == \\\n weaver.bootinfo_elf.BI_FULLYACCESSIBLE:\n master = Cap(\"master\", [\"master\"])\n ms.add_cap(master)\n else:\n if (attrs.elf_flags & weaver.bootinfo_elf.BI_READABLE) == \\\n weaver.bootinfo_elf.BI_READABLE:\n r_cap = Cap(\"read\", [\"read\"])\n ms.add_cap(r_cap)\n if (attrs.elf_flags & weaver.bootinfo_elf.BI_WRITABLE) == \\\n weaver.bootinfo_elf.BI_WRITABLE:\n w_cap = Cap(\"write\", [\"write\"])\n ms.add_cap(w_cap)\n if (attrs.elf_flags & weaver.bootinfo_elf.BI_EXECUTABLE) == \\\n weaver.bootinfo_elf.BI_EXECUTABLE:\n e_cap = Cap(\"execute\", [\"execute\"])\n ms.add_cap(e_cap)\n\n else:\n raise MergeError, 'No elf flags \"%s\" not found.' % ms.name \n return ms", "def get_segment(self, n):\r\n segment_header = self._get_segment_header(n)\r\n return self._make_segment(segment_header)", "def get_segment(self, i=None):\n i = i if i is not None else self.i\n return self.segs[i], self.scores[i], self.marks[i]", "def split_sound_into_segments(self, file_name, segment_size, overlap):\n\n sound_raw, sample_rate = librosa.load(file_name)\n\n # normalization\n normalization_factor = 1 / np.max(np.abs(sound_raw))\n sound_raw = sound_raw * normalization_factor\n\n def sample_splitter(data):\n \"\"\"\n This method returns start and end time of the shorter segments produced from the raw sound.\n :param data: raw sound\n :return: (start, end) couples for the new segments\n \"\"\"\n start = 0.0\n end = start + segment_size\n\n while start <= len(data):\n # if last segment\n if (end > len(data)):\n yield int(len(data) - segment_size), int(len(data))\n break\n\n yield int(start), int(end)\n start += float(segment_size * (1 - overlap))\n end += float(segment_size * (1 - overlap))\n\n observations = []\n labels = []\n\n # label is the second part of the filename, i.e. 3 for dog bark\n label = file_name.split('/')[-1].split('-')[1]\n\n if len(sound_raw) < segment_size: # one single segment\n sound_raw = np.pad(sound_raw, (0, segment_size - len(sound_raw)), 'constant')\n observations.append(sound_raw)\n labels = np.append(labels, label)\n else:\n for (start, end) in sample_splitter(sound_raw):\n segment = sound_raw[start:end]\n # TODO discard silent segments in a better way?\n if np.any(segment):\n observations.append(segment)\n labels = np.append(labels, label)\n\n return observations, labels", "def extract_segments(annoation_folder, image_folder, output_dir=\"segmentation_data\"):\n if not exists(output_dir):\n os.mkdir(output_dir)\n labelmap = read_labelmap(join(annoation_folder, \"labelmap.txt\"))\n masks = io.ImageCollection(join(annoation_folder,\"SegmentationClass\", \"*.png\"))\n images = io.ImageCollection(join(image_folder, \"*.jpg\"))\n counts = {label : 0 for label in labelmap.keys()}\n for image, mask in zip(images, masks):\n for label, color in labelmap.items():\n segment = extract(image, mask, color)\n # Save segment in corresponding folder\n segmentfolder = join(output_dir, label)\n if not np.all(segment == 0):\n if not exists(segmentfolder):\n os.mkdir(segmentfolder)\n io.imsave(join(segmentfolder, str(counts[label]) + \".jpg\"), segment)\n counts[label] += 1", "def get_segmentation_image(self):\n return None", "def parse_segment(input_seg):\n extracted_dict = {}\n\n def assign_if_present(value,\n dict_key=None,\n interior_key=None,\n proc_val=lambda val: val):\n \"\"\"\n :param value: type?\n :param dict_key:\n :param interior_key:\n :param proc_val:\n :return: type?\n\n Assigns value to extracted_dict object if present in input_seg\n \"\"\"\n dict_key = value if dict_key is None else dict_key\n\n if value in input_seg and interior_key and interior_key in input_seg[\n value]:\n extracted_dict[dict_key] = proc_val(input_seg[value][interior_key])\n elif value in input_seg and not interior_key:\n extracted_dict[dict_key] = proc_val(input_seg[value])\n\n seg = None\n try:\n assign_if_present(\"channel\")\n assign_if_present(\"startTimeSec\", \"start\")\n assign_if_present(\"stopTimeSec\", \"stop\")\n assign_if_present(\"endTimeSec\", \"stop\")\n assign_if_present(\"transcript\", \"text\")\n assign_if_present(\"corrected_transcript\", \"text\")\n assign_if_present(\"formatted_transcript\", \"formatted_text\")\n assign_if_present(\"punctuated_transcript\", \"formatted_text\")\n assign_if_present(\"speakerInfo\", \"speaker\", \"ID\")\n assign_if_present(\"genderInfo\", \"label\", \"gender\",\n lambda gender: \"<o,f0,{:}>\".format(gender))\n assign_if_present(\"confidence\", \"confidence\")\n\n seg = segment(extracted_dict)\n\n except Exception as exc:\n LOGGER.exception(exc)\n\n return seg if seg and seg.validate() else None", "def getSegmentRanges(fullSize, segmentSize):\n overlapRatio = 1.1\n if fullSize <= segmentSize:\n return [(0, fullSize)]\n firstCenter = int(segmentSize/2)\n lastCenter = fullSize - int(segmentSize/2)\n assert lastCenter > firstCenter\n flexSize = lastCenter - firstCenter\n numSegments = math.ceil(flexSize / (segmentSize/overlapRatio))\n offset = flexSize / numSegments\n ranges = []\n for i in range(numSegments):\n center = firstCenter + round(i * offset)\n start = center - int(segmentSize/2)\n end = min(start + segmentSize, fullSize)\n ranges.append((start,end))\n ranges.append((fullSize - segmentSize, fullSize))\n # print('ranges', fullSize, segmentSize, ranges)\n # lastC = 0\n # for i, r in enumerate(ranges):\n # c = (r[0] + r[1])/2\n # print(i, r[0], r[1], c, c - lastC)\n # lastC = c\n return ranges" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get segmentation info for a byterange request, given a data length and segment size. This will return a mapofmaps that represents a recipe describing the segments and ranges within each segment needed to satisfy a range request. Assume a range request is given for an object with segment size 3K and
def get_segment_info_byterange(self, ranges, data_len, segment_size): segment_info = self.ec_lib_reference.get_segment_info( data_len, segment_size) segment_size = segment_info['segment_size'] sorted_ranges = ranges[:] sorted_ranges.sort(key=lambda obj: obj[0]) recipe = {} for r in ranges: segment_map = {} begin_off = r[0] end_off = r[1] begin_segment = begin_off // segment_size end_segment = end_off // segment_size if begin_segment == end_segment: begin_relative_off = begin_off % segment_size end_relative_off = end_off % segment_size segment_map[begin_segment] = (begin_relative_off, end_relative_off) else: begin_relative_off = begin_off % segment_size end_relative_off = end_off % segment_size segment_map[begin_segment] = (begin_relative_off, segment_size - 1) for middle_segment in range(begin_segment + 1, end_segment): segment_map[middle_segment] = (0, segment_size - 1) segment_map[end_segment] = (0, end_relative_off) recipe[r] = segment_map return recipe
[ "def getSegmentRanges(fullSize, segmentSize):\n overlapRatio = 1.1\n if fullSize <= segmentSize:\n return [(0, fullSize)]\n firstCenter = int(segmentSize/2)\n lastCenter = fullSize - int(segmentSize/2)\n assert lastCenter > firstCenter\n flexSize = lastCenter - firstCenter\n numSegments = math.ceil(flexSize / (segmentSize/overlapRatio))\n offset = flexSize / numSegments\n ranges = []\n for i in range(numSegments):\n center = firstCenter + round(i * offset)\n start = center - int(segmentSize/2)\n end = min(start + segmentSize, fullSize)\n ranges.append((start,end))\n ranges.append((fullSize - segmentSize, fullSize))\n # print('ranges', fullSize, segmentSize, ranges)\n # lastC = 0\n # for i, r in enumerate(ranges):\n # c = (r[0] + r[1])/2\n # print(i, r[0], r[1], c, c - lastC)\n # lastC = c\n return ranges", "def get_segment_info(self, data_len, segment_size):\n return self.ec_lib_reference.get_segment_info(data_len, segment_size)", "def _get_start_overlap_dict(start_ngs, len_overlap, len_start):\n result = {}\n for ng in start_ngs:\n if len(ng) != len_start:\n raise ValueError(ILLEGAL_LEN_MSG.format(ng, len_start))\n result.setdefault(ng[-len_overlap:], set()).add(ng)\n return result", "def get_marker_data_by_range(self, chr, start, end):\n docs = []\n msg = \"\"\n start_pos = int(start)\n end_pos = int(end)\n\n # Some basic sanity checking\n if (end_pos - start_pos) > 250000:\n msg = \"Range is too great should be 250Kb or less [%d]\" % (end_pos - start_pos)\n return (docs, msg)\n if (end_pos - start_pos) < 0:\n msg = \"Start pos is greater than End pos\" \n return (docs, msg)\n\n query = {}\n query['chromosome'] = chr = \"%.2d\" % (int(chr))\n query['position'] = {}\n query['position']['$gte'] = start_pos\n query['position']['$lte'] = end_pos\n\n print \"RANGE QUERY\", query\n\n try:\n cursor = self.markers.find(query)\n except:\n msg = \"Unexpected error:\" + sys.exc_info()[0]\n \n for doc in cursor:\n if len(doc[\"alleleA\"]) > 10:\n doc[\"alleleA\"] = doc[\"alleleA\"][0:10] + \" ...\"\n if len(doc[\"alleleB\"]) > 10:\n doc[\"alleleB\"] = doc[\"alleleB\"][0:10] + \" ...\"\n doc[\"samplecount\"] = self.sample_coll.get_count(doc[\"assaytype\"])\n docs.append(doc)\n # can return [] if query fails\n if len(docs) == 0:\n msg = \"Nothing found in range\"\n return (docs, msg)", "def ranges_from_event(self, flat_events, handler):\n ranges = {}\n for key, value in flat_events.items():\n matches = handler.find_matches(value)\n for match in matches:\n if not ranges.get(key):\n ranges[key] = []\n\n begin = match.start()\n end = match.end()\n\n data_range = {\"label\": handler.label, \"begin\": begin, \"end\": end}\n\n ranges[key].append(data_range)\n\n return ranges", "def _get_segments(self, start, request_size):\n end = start + request_size\n futures = []\n\n while request_size > self._max_request_size:\n futures.append(self._get_segment(start, self._max_request_size))\n request_size -= self._max_request_size\n start += self._max_request_size\n if start < end:\n futures.append(self._get_segment(start, end-start))\n return [fut.get_result() for fut in futures]", "def _collect_map_ranges(\n state: SDFGState, memlet_path: List[gr.MultiConnectorEdge[mm.Memlet]]\n) -> List[Tuple[str, subsets.Range]]:\n ranges: List[Tuple[str, subsets.Range]] = []\n # Outgoing (write) memlet path\n if any(isinstance(e.src, nodes.MapExit) for e in memlet_path):\n for e in reversed(memlet_path):\n if isinstance(e.src, nodes.MapExit):\n entry = state.entry_node(e.src)\n ranges.extend([(p, r)\n for p, r in zip(entry.params, entry.range)])\n else: # Incoming (read) memlet path\n for e in memlet_path:\n if isinstance(e.dst, nodes.MapEntry):\n ranges.extend([(p, r)\n for p, r in zip(e.dst.params, e.dst.range)])\n return ranges", "def segment_count(start: int, stop: int, step: int = 5) -> Iterable[Tuple[int, int]]:\n return gen_bounded_segments(start, stop, step)", "def get_segments_analysis(self):\n # segment_by = segment_by or self.segment_by\n # if not metric_column:\n # metric_column = self._current_y\n segmented_analysis = {\n \"summary\": self.segments_summary(),\n \"number_of_rows\": self.rows_per_segment(),\n \"no_of_missing_values\": self.missing_per_segment(),\n \"no_of_outliers\": self.outliers_per_segment(),\n }\n return segmented_analysis", "def create_dict_main_segmentation(self, ocromore_data):\n segmentation_dict = {}\n seg_data = ocromore_data['segmentation']\n seg_classes = seg_data.my_classes\n lines = ocromore_data['lines']\n for segclass in seg_classes:\n if not segclass.is_start_segmented():\n continue\n start_index = segclass.get_start_line_index()\n # extract the real tag from line\n tag = dh.get_real_tag_from_segment(segclass,lines[start_index])\n if tag in segmentation_dict.keys():\n segmentation_dict[tag] = segmentation_dict[tag] + 1\n else:\n segmentation_dict[tag] = 1\n\n return segmentation_dict", "def histogram_segments(segments):\n\n # sum(.values()) is the number of segments within (start, end)\n active_segments = defaultdict(int)\n\n consider_segments = list(segments_generator(segments))\n\n # TODO(pwaller): This function doesn't need to consider the active segments\n # It should just maintain a counter. (expect a ~O(10%) speedup)\n\n # Look ahead to the next start, and that's the end of the interesting range\n for this, next in zip(consider_segments, consider_segments[1:]):\n\n # (start, end) is the range until the next segment\n (start, seg, disappearing), (end, _, _) = this, next\n\n # Did the segment appear or disappear? Key on the segment coordinates\n if not disappearing:\n active_segments[seg] += 1\n\n else:\n active_segments[seg] -= 1\n\n if start == end:\n # This happens if a segment appears more than once.\n # Then we don't care about considering this zero-length range.\n continue\n\n yield (start, end), sum(active_segments.values())", "def gen_slice_info(self, service=None):\n\n\t\tif (service is None ):\n\t\t\tservice = RequestRouterService.objects.get()\n\n\t\tmapping = {}\n\t\n \tfor slice in service.service.all():\n \t\tname = slice.name\n \t\tfor sliver in slice.slivers.all():\n\t \t\t\tmapping[sliver.name] = str(sliver.ip)\n\n\t\treturn mapping", "def _ip_addr_mapping(self, stix_data, full_block_size):\n\n cidr_parts = stix_data.get('value', '').split('/')\n cidr_suffix = cidr_parts[1] if len(cidr_parts) > 1 else str(full_block_size)\n if cidr_suffix == str(full_block_size):\n return {\n 'type': 'Address',\n 'ip': '@.value',\n 'confidence': '@.confidence',\n }\n return {\n 'confidence': '@.confidence',\n 'type': 'CIDR',\n 'block': '@.value',\n }", "def simple_seg(bin_count, binsize = 10000, min_cn = 5, std_scale = 8):\n df = bin_count.copy()\n\n # set min_cn by mean + n * std\n if (min_cn is None):\n xx = np.array(df['CN'])\n min_cn = np.mean(xx) + std_scale * np.std(xx, ddof = 1)\n print(min_cn)\n\n # call copy number segmentations in each chromosome\n cnsegs = []\n for chr in set(df['Chrom']):\n # print(chr)\n dfsub = df[df['Chrom'] == chr]\n\n # segmentation\n x = np.array(dfsub['CN'])\n seg = simple_amplicon_segment(x, min_cn)\n seg = [list(dfsub.iloc[i[0],0:2]) +\n list(dfsub.iloc[i[1]-1,1:2]+binsize) +\n [i[2]] for i in seg]\n\n cnsegs.append(pd.DataFrame(seg,\n columns=['Chrom', 'Start', 'End', 'CN']))\n seg_df = pd.concat(cnsegs)\n\n return seg_df[seg_df.CN >= min_cn]", "def sampling_map(data, n_req, n_step):\n map_ = []\n for i, sequence in enumerate(data):\n for j in range(0, len(sequence) - n_req, n_step):\n map_.append((i, j, j + n_req))\n return map_", "def pssm_recovery_map_range(struct, pssm_map, min, max):\n struct_residues = struct.get_residues()\n recovery_map = {}\n for residue in struct_residues:\n score = residue.get_list()[1].get_bfactor()\n if score >= min and score <= max:\n residue_name = residue.get_resname()\n residue_num = residue.get_id()[1]\n try:\n status = pssm_map.conserved(residue_num, residue_name)\n except KeyError:\n warnings.warn(\"ignoring noncanonical amino acid \" + residue_name + \" in pssm calculation\")\n continue\n if status:\n try:\n recovery_map[residue_name] += 1\n except KeyError:\n recovery_map[residue_name] = 1\n return recovery_map", "def extract_indexes_segments(data_length, n_segments):\n\n # Extract the segment length\n\n segment_length = int(data_length / n_segments)\n indexes = []\n\n # For each segment, determine the correspondent indexes (checking the position of the end one)\n\n for k in range(n_segments):\n\n start = k * segment_length\n end = (k + 1) * segment_length\n\n if (k + 2) * segment_length > data_length:\n end = data_length\n\n indexes.append((start, end))\n\n return indexes", "def map2region(self, sg, type):\n \n if not isinstance(sg, SpliceGraph.SpliceGraph) and os.path.exists(sg):\n sg = SpliceGraph.SpliceGraph(filename=sg)\n\n if isinstance(sg, SpliceGraph.SpliceGraph):\n \n # get annotation\n if not self.annotationFiles.has_key(type): # give up\n self.__inform(\"ERROR in map2region: unknown annotation type [%s]\" % type)\n raise Errors.ArgumentError(\"map2region\", \"unknown annotation type [%s]\" % type)\n\n elif not self.exonInfo.has_key(type): # parse it\n self.parseAnnotation(type)\n\n # get region\n #regStart, regEnd = sg.genomicRange()\n ex = sg.allExons()\n regStart = ex[0][0]\n regEnd = ex[-1][1]\n\n # get overlap\n genes = {}\n (overlapGns, overlapExons) = self.overlappingGenesExons(type,regStart,regEnd,detailed=1)\n\n for overlapGn in overlapGns:\n if genes.has_key(overlapGn):\n genes[overlapGn] += 1\n else:\n genes[overlapGn] = 1\n \n return genes, overlapExons", "def gen_servicemap_slice_info(self, servicemap):\n\n\t\twzone = Set(['arizona', 'stanford', 'on.lab', 'housten']) # zone=1 in cooden.conf\n\t\tezone = Set(['princeton', 'atlanta', 'new york', 'georgia tech']) # zone=2 in coodeen.conf\n\n mapping_zone = {}\n\t\tmapping_ip = {}\n\t\tslice = servicemap.slice\n name = slice.name\n for sliver in slice.slivers.all():\n\t\t\tmapping_ip[sliver.name] = str(sliver.ip)\n\t\t\tsite = sliver.node.site.name\n\t\t\tif(site.lower() in wzone):\n\t\t\t\tmapping_zone[sliver.name] = str(1)\n\t\t\telse:\n\t\t\t\tmapping_zone[sliver.name] = str(2)\n\n return mapping_ip, mapping_zone" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Logs a message to the command line
def log(message): print(message)
[ "def log(message: str, level: int = logging.INFO):\n pipe.send(LogMessage(message, level))", "def log(self, message, log_type = Constants.LOG_CONSOLE):\n \n self.view.log(message, log_type)", "def log(self, msg):\n self.logger.write(msg)", "def write_to_console(self, message):\n logger.console(\"\\n\" + message)", "def main():\n\n # get the logger object\n logger = MyLog()\n t = MyLogTest(logger)\n\n # set the values for MyLog from the command-line arguments\n a = MyLogArguments()\n args = a.get()\n a.process(logger)\n\n # open the file and/or console for MyLog\n # as a script, the arguments must be processed before the output is opened\n logger.openOutput()\n\n logger.logPrint(\"INFO\", \"Starting MyLog ...\")\n\n logger.logPrint(\"INFO\", \"args = \" + str(args))\n\n # include test cases for future changes\n t.testA1(logger)\n t.testA2()\n t.testB1(logger)\n t.testC1(logger)\n t.testD1(logger)\n t.testE1(logger)\n\n a.help()\n\n logger.logPrint(\"INFO\", \"message to user file\")\n\n logger.closeMyLog(logger)", "def message(msg, flag=None):\n run_command(\"g.message\", flags=flag, message=msg, errors='ignore')", "def _log_step(self, msg):\n # TODO(jonni): Is direct print() is better for command-line use?\n logging.info('Step %d: %s', self.current_step(), msg)", "def print_message(message):\r\n print(message)", "def started(message: str) -> None:\n _console.print(message, style=\"underline\")\n _console.line()", "def log_message(self) -> global___LogMessage:", "def send_log_entry(self, text):\n self._post_data(\"log\", data={'log_entry':text})\n\n #Log to file if a logger variable is set on this class instance\n logger = getattr(self, 'logger', None)\n if logger:\n logger.debug(text)", "def job(self, msg, *args, **kwargs):\n self.print(50, msg, *args, **kwargs)", "def __logStdout(self, message):\n # Get timestamp.\n dts = str(datetime.datetime.utcnow())\n \n # Keep the log looking pretty and uniform.\n if len(dts) == 19:\n dts = dts + \".000000\"\n \n # Dump the message.\n sys.stdout.write(\"%s - %s\\n\" %(dts, message))", "def log_script_result(self, message):\n\n testlog.wtl_log(\"!*** %s\" %(message), force=True)", "def log(self, msg):\n self.logs.append(str(msg))", "def log(self, message: str):\r\n with open(self.log_file, 'a') as f:\r\n f.write(message)", "def log(self, msg=\"\", level=1):\n\n if self.log_level >= level:\n print(\"[%s] %s\" % (time.strftime(\"%I:%M.%S\"), msg))", "def log() -> None:\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger('discord')\n logger.setLevel(logging.DEBUG)\n handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')\n handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s'))\n logger.addHandler(handler)", "def system_message(self,msg,ukey):\n self.log(source=\"system\",destination=ukey,message=msg,types=[\"info\"])", "def log(self, message, category=\"misc\", data=None):\n self._dirty = True\n entry = {\"message\": message, \"timestamp\": time.time(), \"category\": category}\n if data is not None:\n entry[\"data\"] = data\n\n # write to log file\n json.appendline(entry, self.filename)\n # write to stdout\n msg = \"{INTENSE_CYAN}\" + category + \"{PURPLE}:\"\n msg += \"{INTENSE_WHITE}\" + message + \"{NO_COLOR}\"\n print_color(msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test parse of table of negocios realizados, compare parsed values versus a manually curated file.
def test__parse_add_negocios_realizados(self): print("_parse_add_negocios_realizados") id_test_cases = [0, 1, 2, 3, 4, 5, 6] for id_test in id_test_cases: in_case = hio.import_object_as_literal( os.path.join( path_data, f"_parse_add_negocios_realizados_table_negocios_realizados_{id_test}.in", ) ) out_case = hio.import_object_as_literal( os.path.join( path_data, f"_parse_add_negocios_realizados_table_negocios_realizados_{id_test}.out", ) ) out_test = pystock.parse_notas_corretagem._parse_add_negocios_realizados( in_case, out_case[0][-1] ) self.assertListEqual(out_case, out_test)
[ "def test__parse_resumo_dos_negocios(self):\n print(\"_parse_resumo_dos_negocios\")\n id_test_cases = [0]\n for id_test in id_test_cases:\n in_case = hio.read_strings(\n os.path.join(path_data, f\"_parse_resumo_dos_negocios_{id_test}.in\")\n )\n out_case = hio.import_object_as_literal(\n os.path.join(\n path_data,\n f\"_parse_resumo_dos_negocios_valores_resumo_dos_negocios_{id_test}.out\",\n )\n )\n out_test = pystock.parse_notas_corretagem._parse_resumo_dos_negocios(\n in_case\n )\n self.assertListEqual(out_case, out_test)", "def test_filter_table_neg_control(self):\n otu_table = \"\"\"# QIIME v%s OTU table\\n#OTU ID\\tsample1\\tsample2\\tsample3\n0\\t0\\t2\\t0\n1\\t1\\t0\\t0\n2\\t1\\t1\\t1\"\"\" % __version__\n otu_table = otu_table.split('\\n')\n samples = ['sample2', 'sample3']\n result = _filter_table_neg_control(otu_table, samples)\n self.assertEqual(result, \"\"\"# QIIME v1.3.0 OTU table\n#OTU ID\\tsample1\n1\\t1\"\"\")\n #works with lineages\n otu_table = \"\"\"# QIIME v%s OTU table\\n#OTU ID\\tsample1\\tsample2\\tsample3\\tConsensus Lineage\n0\\t0\\t2\\t0\\ttaxon1\n1\\t1\\t0\\t0\\ttaxon2\n2\\t1\\t1\\t1\\ttaxon3\"\"\" % __version__\n otu_table = otu_table.split('\\n')\n samples = ['sample2', 'sample3']\n result = _filter_table_neg_control(otu_table, samples)\n self.assertEqual(result, \"\"\"# QIIME v1.3.0 OTU table\n#OTU ID\\tsample1\\tConsensus Lineage\n1\\t1\\ttaxon2\"\"\")\n samples = ['sample3']\n result = _filter_table_neg_control(otu_table, samples)\n self.assertEqual(result, \"\"\"# QIIME v1.3.0 OTU table\n#OTU ID\\tsample1\\tsample2\\tConsensus Lineage\n0\\t0\\t2\\ttaxon1\n1\\t1\\t0\\ttaxon2\"\"\")", "def test_parser(self):\n atab = Tabular(str(DATA_DIR / \"test.atab\"))\n self.assertEqual(len(atab.hits), 2, 'Read 2 hits')\n\n for i, hits in enumerate(zip(atab.hits, self._EXPECTED_HITS)):\n with self.subTest(\"Checking hit against known data\", hit=i):\n self._test_hits_equal(hits[0], hits[1])", "def test_read_file():\n # Check that main test input is read in with correct values\n input_file = os.path.join(orbitize.DATADIR, \"test_val.csv\")\n _compare_table(read_file(input_file))\n # Check that an input value with all valid entries and only ra/dec\n # columns can be read\n input_file_radec = os.path.join(orbitize.DATADIR, \"test_val_radec.csv\")\n read_file(input_file_radec)", "def test_generate_negative_matches():\n # type: () -> None\n test_negative_lines = open(\"test_negative/negative.py\").readlines()\n # Grab the line number of the failures\n errors_27 = set(\n int(l.split(\":\")[1])\n for l in open(\"test_negative/output.expected.2.7\").readlines()\n if not _is_summary(l)\n )\n errors_35 = set(\n int(l.split(\":\")[1])\n for l in open(\"test_negative/output.expected.3.5\").readlines()\n if not _is_summary(l)\n )\n\n expected_errors_27 = set(\n idx + 1 for idx, line in enumerate(test_negative_lines) if \"E:2.7\" in line\n )\n expected_errors_35 = set(\n idx + 1 for idx, line in enumerate(test_negative_lines) if \"E:3.5\" in line\n )\n\n assert errors_27 == expected_errors_27\n assert errors_35 == expected_errors_35\n\n # Some sanity checks to make sure we don't mess this up. Please update as necessary.\n assert len(errors_27) == 30\n assert len(errors_35) == 30", "def test_enfrentaimiento_consistency():\n #TODO complete test function\n df_matches = pd.read_csv('output/bbva_matches.csv')\n for unique_enfrentamiento in df_matches.enfrentamiento.unique():\n if len(df_matches[df_matches.enfrentamiento == unique_enfrentamiento].equipo_local.unique()) != 2:\n test= df_matches[df_matches.enfrentamiento == unique_enfrentamiento]\n print(\"ERROR\")\n else:\n continue\n assert True", "def test_ReadASCIITable(self):\n # ReadASCIITable is a wrapper for numpy.genfromtxt() that turns things into formatted\n # arrays if necessary, so we don't really need to test much of the functionality--just make\n # sure that both natively formatted arrays (table_with_string) and natively raw arrays\n # (treecorr_output) are both returned as formatted arrays.\n results = stile.ReadASCIITable('test_data/TreeCorr_output.dat', comments='#')\n numpy.testing.assert_equal(results, self.table1)\n results = stile.ReadASCIITable('test_data/table_with_string.dat')\n numpy.testing.assert_equal(results, self.table2_withstring)\n results = stile.ReadTable('test_data/table_with_string.dat')\n numpy.testing.assert_equal(results, self.table2_withstring)", "def test_parse_volpiano_examples(self):\n examples = pd.read_csv('chant21/examples/cantus-volpiano-examples.csv', index_col=0)\n for idx, data in examples.iterrows():\n parser = ParserCantusVolpiano()\n parse = parser.parser.parse(data['volpiano'])\n self.assertTrue(True)", "def test_a_file(fname):\n differ = UCSV([fname], [f'gold/{fname}'], zeroThreshold=5e-14)\n differ.diff()\n return differ._same, differ._message", "def test_parse_text_examples(self):\n examples = pd.read_csv('chant21/examples/cantus-volpiano-examples.csv', index_col=0)\n for idx, data in examples.iterrows():\n parser = ParserCantusText()\n parse = parser.parse(data['full_text_manuscript'])\n self.assertTrue(True)", "def test_perfect_parse(self):\n assert self.run_tests(self.perfect_rule) == []", "def test_data_with_extra_column_in_case_2(self):\n response = math_diff(\n self.thresh_dict,\n os.path.join(self.diff_files_dir, 'eplusout.csv'),\n os.path.join(self.diff_files_dir, 'eplusout_extra_column.csv'),\n os.path.join(self.temp_output_dir, 'abs_diff.csv'),\n os.path.join(self.temp_output_dir, 'rel_diff.csv'),\n os.path.join(self.temp_output_dir, 'math_diff.log'),\n os.path.join(self.temp_output_dir, 'summary.csv'),\n )\n self.assertIn('All Equal', response[0]) # diff status\n self.assertEqual(24, response[1]) # num records compared\n self.assertEqual(0, response[2]) # big diffs\n self.assertEqual(0, response[3]) # small diffs", "def test_filter_table_no_taxa(self):\n \n params = {'min_otu_count': 1, 'min_otu_samples': 2,\n 'included_taxa': '', 'excluded_taxa': ''}\n \n otu_file = open(self.sample_input_otu_table_no_taxa, \"U\")\n \n filtered_otu_table_fp = get_tmp_filename(prefix = \"filtered_otu_table_\",\n suffix = \".txt\")\n \n filtered_otu_table_f =\\\n open(filtered_otu_table_fp, \"w\")\n \n \n filter_table(params, filtered_otu_table_f, otu_file)\n \n filtered_otu_table_f.close()\n \n # Output has 3 of the OTUs removed, that only show up in 1 sample\n \n actual_result_f = open(filtered_otu_table_fp, \"U\")\n \n \n actual_results = \"\\n\".join([line.strip() for line in actual_result_f])\n \n self.assertEqual(actual_results,\n self.expected_otu_table_output_default_no_taxa)\n \n self._files_to_remove.append(filtered_otu_table_fp)", "def test_file_format(self):\n with charitycheck.IRSNonprofitDataContextManager() as irs_data:\n in_expected_format = True\n # check first two lines are \\n characters\n in_expected_format = (in_expected_format and\n irs_data.readline() == '\\n')\n in_expected_format = (in_expected_format and\n irs_data.readline() == '\\n')\n for i, line in enumerate(irs_data):\n m = re.match(\n r'^(?:\\d{9}\\|.+\\|.+(?:\\|[A-Z]{2})?\\|.+\\|(?:[A-Z],?)+''\\n|\\n)$',\n line)\n in_expected_format = in_expected_format and bool(m)\n self.assertTrue(in_expected_format)", "def show_diff_table(self):\n return not self.outputs_match() and not \\\n (self.given_empty and not self.correct_empty)", "def test_process_file():\n # Read the land use file to get the expected admin codes\n with open(\"../../../data/Haiti/Haiti_LandUse_And_Pop.csv\") as exp_file:\n exp_csv = csv.reader(exp_file)\n exp_codes = set()\n first = True\n for rec in exp_csv:\n if first:\n first = False\n continue\n exp_codes.add(rec[0])\n\n # Process the broken input file\n fhf.in_csv = \"../../../data/Haiti/Haiti_Admin_Names.csv\"\n fhf.out_fixed_csv = \"test_Haiti_Admin_Names_Fixed.csv\"\n fhf.process_file()\n\n # Get the presumed fixed admin codes from the output file\n with open(fhf.out_fixed_csv) as act_file:\n act_csv = csv.reader(act_file)\n act_codes = set()\n first = True\n for rec in act_csv:\n if first:\n first = False\n continue\n act_codes.add(rec[0])\n\n # Compare the admin codes from the fixed file against expected\n assert act_codes == exp_codes", "def test_split_otu_table_on_sample_metadata(self):\n actual = list(split_otu_table_on_sample_metadata(self.otu_table_f1,\n self.mapping_f1,\n \"Treatment\"))\n for id_, e in actual:\n try:\n parse_biom_table(e)\n except:\n print e\n actual = [(id_,parse_biom_table(e)) for id_, e in actual]\n exp = [(id_,parse_biom_table(e)) for id_, e in otu_table_exp1]\n \n actual.sort()\n exp.sort()\n \n for a,e in zip(actual,exp):\n self.assertEqual(a,e,\"OTU tables are not equal:\\n%s\\n%s\" % \\\n (format_biom_table(a[1]),format_biom_table(e[1])))", "def compare_text(filename1, filename2):\n textpairs1 = read_text(filename1)\n textpairs2 = read_text(filename2)\n\n if len(textpairs1) != len(textpairs2):\n print(\"Two file different rows\")\n return 0\n\n label1 = [tp.label for tp in textpairs1]\n label2 = [tp.label for tp in textpairs2]\n\n acc, f1 = evaluate(label1, label2)\n\n print(\"Accuracy: \", acc)\n print(\"macroF1: \", f1)", "def test_bad_data(self):\n\tself.state = {StateKey.UNPROCESSED_DATA:[[0, len(CtdmoParserUnitTestCase.BAD_TEST_DATA)]],\n\t StateKey.IN_PROCESS_DATA:[],\n\t StateKey.TIMESTAMP:0.0}\n self.stream_handle = StringIO(CtdmoParserUnitTestCase.BAD_TEST_DATA)\n self.parser = CtdmoParser(self.config, self.state, self.stream_handle,\n self.state_callback, self.pub_callback) # last one is the link to the data source\n\n result = self.parser.get_records(1)\n\tself.stream_handle.close()\n self.assert_result(result, [[894,1085,12,1],[1472,1663,12,0],[2297,2487,12,0]],\n\t\t\t [[0,50],[374,507],[894,1085],[1199,1663],[2297,2487]],\n\t\t\t self.timestamp4, self.particle_d_new)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test parse of table of resumo dos negocios.
def test__parse_resumo_dos_negocios(self): print("_parse_resumo_dos_negocios") id_test_cases = [0] for id_test in id_test_cases: in_case = hio.read_strings( os.path.join(path_data, f"_parse_resumo_dos_negocios_{id_test}.in") ) out_case = hio.import_object_as_literal( os.path.join( path_data, f"_parse_resumo_dos_negocios_valores_resumo_dos_negocios_{id_test}.out", ) ) out_test = pystock.parse_notas_corretagem._parse_resumo_dos_negocios( in_case ) self.assertListEqual(out_case, out_test)
[ "def test__parse_add_negocios_realizados(self):\n print(\"_parse_add_negocios_realizados\")\n\n id_test_cases = [0, 1, 2, 3, 4, 5, 6]\n for id_test in id_test_cases:\n in_case = hio.import_object_as_literal(\n os.path.join(\n path_data,\n f\"_parse_add_negocios_realizados_table_negocios_realizados_{id_test}.in\",\n )\n )\n out_case = hio.import_object_as_literal(\n os.path.join(\n path_data,\n f\"_parse_add_negocios_realizados_table_negocios_realizados_{id_test}.out\",\n )\n )\n out_test = pystock.parse_notas_corretagem._parse_add_negocios_realizados(\n in_case, out_case[0][-1]\n )\n self.assertListEqual(out_case, out_test)", "def test_filter_table_neg_control(self):\n otu_table = \"\"\"# QIIME v%s OTU table\\n#OTU ID\\tsample1\\tsample2\\tsample3\n0\\t0\\t2\\t0\n1\\t1\\t0\\t0\n2\\t1\\t1\\t1\"\"\" % __version__\n otu_table = otu_table.split('\\n')\n samples = ['sample2', 'sample3']\n result = _filter_table_neg_control(otu_table, samples)\n self.assertEqual(result, \"\"\"# QIIME v1.3.0 OTU table\n#OTU ID\\tsample1\n1\\t1\"\"\")\n #works with lineages\n otu_table = \"\"\"# QIIME v%s OTU table\\n#OTU ID\\tsample1\\tsample2\\tsample3\\tConsensus Lineage\n0\\t0\\t2\\t0\\ttaxon1\n1\\t1\\t0\\t0\\ttaxon2\n2\\t1\\t1\\t1\\ttaxon3\"\"\" % __version__\n otu_table = otu_table.split('\\n')\n samples = ['sample2', 'sample3']\n result = _filter_table_neg_control(otu_table, samples)\n self.assertEqual(result, \"\"\"# QIIME v1.3.0 OTU table\n#OTU ID\\tsample1\\tConsensus Lineage\n1\\t1\\ttaxon2\"\"\")\n samples = ['sample3']\n result = _filter_table_neg_control(otu_table, samples)\n self.assertEqual(result, \"\"\"# QIIME v1.3.0 OTU table\n#OTU ID\\tsample1\\tsample2\\tConsensus Lineage\n0\\t0\\t2\\ttaxon1\n1\\t1\\t0\\ttaxon2\"\"\")", "def test_negation_interaction(self):\n query, sort = beets.library.parse_query_string('-bar+',\n beets.library.Item)\n self.assertEqual(len(query.subqueries), 1)\n self.assertTrue(isinstance(query.subqueries[0],\n dbcore.query.TrueQuery))\n self.assertTrue(isinstance(sort, dbcore.query.SlowFieldSort))\n self.assertEqual(sort.field, '-bar')", "def test_compile_negative_operator(self):\n self.td_engine.execute(operators.neg(self.table.c.c1))\n\n assert(self.last_compiled == '-t_test.c1')", "def test_negation(\n large_game_roles: tuple[Role, ...], example_statement: Statement\n ) -> None:\n expected = Statement(\n \"NOT - test\",\n ((2, const.ROLE_SET - frozenset({Role.ROBBER})),),\n speaker=Role.ROBBER,\n )\n\n result = example_statement.negation\n\n assert str(result) == str(expected)", "def test_generate_negative_matches():\n # type: () -> None\n test_negative_lines = open(\"test_negative/negative.py\").readlines()\n # Grab the line number of the failures\n errors_27 = set(\n int(l.split(\":\")[1])\n for l in open(\"test_negative/output.expected.2.7\").readlines()\n if not _is_summary(l)\n )\n errors_35 = set(\n int(l.split(\":\")[1])\n for l in open(\"test_negative/output.expected.3.5\").readlines()\n if not _is_summary(l)\n )\n\n expected_errors_27 = set(\n idx + 1 for idx, line in enumerate(test_negative_lines) if \"E:2.7\" in line\n )\n expected_errors_35 = set(\n idx + 1 for idx, line in enumerate(test_negative_lines) if \"E:3.5\" in line\n )\n\n assert errors_27 == expected_errors_27\n assert errors_35 == expected_errors_35\n\n # Some sanity checks to make sure we don't mess this up. Please update as necessary.\n assert len(errors_27) == 30\n assert len(errors_35) == 30", "def tabuleiro_terminado(tab):\n vazio = tabuleiro_posicoes_vazias(tab)\n copitab1 = copia_tabuleiro(tab)\n copitab2 = copia_tabuleiro(tab)\n copitab3 = copia_tabuleiro(tab)\n copitab4 = copia_tabuleiro(tab)\n return (vazio == [] and tabuleiro_reduz(tab, 'N') == copitab1 and tabuleiro_reduz(tab, 'S') == copitab2 \\\n and tabuleiro_reduz(tab, 'E') == copitab3 and tabuleiro_reduz(tab, 'W') == copitab4)", "def testFalseWhenEmpty(self):\n row = sqlresult.ResultRow([], [])\n self.assertFalse(row)", "def test_no_tests(self):\n with self.assertRaises(ValueError):\n _ = unity_test_parser.TestResults(\n \"\"\"\n\n-----------------------\n0 Tests 0 Failures 0 Ignored\nPASS\n\"\"\"\n )", "def ignore_row(row):\n return is_title_row(row) or is_not_data(row) or is_empty_row(row)", "def column_mismatches_none(self) -> None:\r\n self._ui.print_('No mismatches for this set.', fore='green')", "def test_srt_nodata_r(self):\n self.assertEqual(jc.parsers.srt.parse('', raw=True, quiet=True), [])", "def test_math_negative_matrix():\n data = [1, 2, 3, 4]\n expect = [-1, -2, -3, -4]\n assert math_util.list_negative(data) == expect", "def testNonEqualityWithNonResultRow(self):\n self.assertNotEqual(self.row, self.values)\n self.assertNotEqual(self.row, list(self.values))", "def test_negative_number(self) -> None:\n val = self.parse(self.arithmetic_lexer.lex(\"0-13\"))\n self.assertEqual(-13, val)", "def show_diff_table(self):\n return not self.outputs_match() and not \\\n (self.given_empty and not self.correct_empty)", "def test_In_lhood_field_in_best_model_table(self):\n correct = [(\"N\" if l in self.exclude_lines else \"Y\") for l in self.lines]\n self.assertTrue(self.DF_best[\"In_lhood?\"].values.tolist() == correct)", "def test_notax_over_edge():\n ans = tax_table(18201)\n numpy.testing.assert_allclose(ans, 0.19)", "def test_srt_nodata(self):\n self.assertEqual(jc.parsers.srt.parse('', quiet=True), [])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test parse of table clearing.
def test__parse_clearing(self): print("_parse_clearing") id_test_cases = [0] for id_test in id_test_cases: in_case = hio.read_strings( os.path.join(path_data, f"_parse_clearing_{id_test}.in") ) out_case = hio.import_object_as_literal( os.path.join(path_data, f"_parse_clearing_values_{id_test}.out",) ) out_test = pystock.parse_notas_corretagem._parse_clearing(in_case) self.assertListEqual(out_case, out_test)
[ "def clear_table(self):\n self.takes_table.setRowCount(0)\n self.takes_table.clearContents()", "def clear_table(self):\n self.clearContents()\n self.setRowCount(0)", "def empty():\n\n return EmptyTable()", "def handle_clear_table(self):\n \n self.number_of_rows = 0\n self.tablefrequency.setColumnCount(4)\n self.tablefrequency.setRowCount(self.number_of_rows)", "def clear(self):\n self._table = {}\n self._cache_table()", "def clear(self, clear_stats=True, clear_default=False):\n logging.debug(\"Clearing table %s\" % self.name)\n with self.cond_var:\n if clear_stats:\n self.packet_count = 0\n self.byte_count = 0\n for entry in self.entries:\n # For now, do nothing\n pass\n self.entries = []\n if clear_default:\n self.default_entry = None", "def test_empty_table(self):\n\n html = \"\"\"\n <html>\n <head>\n </head>\n <body>\n <table>\n </table>\n </body>\n </html>\n \"\"\"\n\n with self.assertLogs(\"xhtml2pdf\", level=\"DEBUG\") as cm:\n context = pisaParser(BytesIO(html.encode(\"utf-8\")), pisaContext(None))\n\n self.assertEqual(\n context.story, [], \"Empty table doesn't return an empty story!\"\n )\n self.assertEqual(\n cm.output,\n [\n \"DEBUG:xhtml2pdf:Col widths: []\",\n \"WARNING:xhtml2pdf:<table> is empty\\n'<table> </table>'\",\n ],\n )", "def test_clean_data_is_not_empty(self):\n self.assertTrue(self.data_processor.clean_data_frame)", "def table_clear(database, table):\n db_con_var = connect()\n try:\n rtdb.db(database).table(\n table\n ).delete().run(db_con_var)\n print(\n \"log: db {}.{} table has been cleared.\"\n .format(database, table)\n )\n except rtdb.ReqlError as err:\n err = sys.exc_info()[0]\n print(\"EXCEPT == {}\".format(err))", "def test_empty_table(db_connection):\n with db_connection.cursor() as curs:\n curs.execute(\"SELECT * FROM order_item\")\n assert curs.rowcount is 0", "def clear_vessel_batches_table_contents(self):\n self.tableWidget.setRowCount(0)", "def test_print_table_empty(self):\n with patch('sys.stdout', new=StringIO()) as stdout:\n print(self.rule_table)\n expected_output = 'Rule table is empty'\n output = stdout.getvalue().strip()\n assert_equal(output, expected_output.strip())", "def delete_data_table():", "def clear_test_result(self, test):", "def tearDown(self):\n with database() as db:\n db.query('DROP TABLE test_data')", "def clear_side_tables(self):\n self.clear_queue('side_tables')", "def clear_report_results(self):", "def test_rowcount_nodata(self):\r\n self.cursor.execute(\"create table t1(i int)\")\r\n # This is a different code path internally.\r\n self.cursor.execute(\"delete from t1\")\r\n self.assertEquals(self.cursor.rowcount, 0)", "def testAccessEmptyTable(self):\n results = [(idx,) for idx in self.manager.snimpyEmptyDescr]\n self.assertEqual(results, [])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs the quality tests over the given tables.
def execute(self, context): postgres = PostgresHook(self._ddbb_conn_id) for table in self._tables: if table not in self._available_tables: tables = ', '.join(self._available_tables) message = 'Only these tables are supported: {}'.format(tables) raise ValueError(message) query = queries.SELECTION_QUERIES['count'].format(table) records = postgres.get_records(query) if len(records) == 0 or len(records[0]) == 0 or records[0][0] == 0: message = 'The table {} has not passed the data quality check.'.format(table) raise ValueError(message) message = 'The table {} has passed the data quality check with {} records.'.format(table, records[0][0]) self.log.info(message)
[ "def test_qual_tab(self):\n self.check_fails(\"Quality/error_qual_tab.fastq\", 4)\n self.check_general_passes(\"Quality/error_qual_tab.fastq\", 5)", "def run_all(self):\n\n self.fill_table()\n\n self.traceback()\n\n self.alignment()\n\n self.total_score()", "def quality_check(cur, conn): \n \n # perform rudimentary quality check by counting rows in tables\n for table, query in count_table_queries.items():\n try:\n cur.execute(query)\n result = cur.fetchone()\n print(f\"Count table query succeeded for table '{table}'. Total records: {result[0]}.\") \n except psycopg2.Error as e:\n print(f\"Count table query failed for table '{table}'.\")\n print(e)\n continue", "def test_process_table():\n lc1 = MockColumn('l1', 'INTEGER', None, None, None, None)\n lc2 = MockColumn('l2', 'INTEGER', None, None, None, None)\n fc1 = MockColumn('f1', 'INTEGER', None, None, None, None)\n fc2 = MockColumn('f2', 'INTEGER', None, None, None, None)\n fortab = MockTable('ft', [fc1, fc2], [])\n r1 = MockRef(lc1, fortab, fc1, 'NO ACTION', 'NO ACTION')\n r2 = MockRef(lc2, fortab, fc2, 'NO ACTION', 'NO ACTION')\n loctab = MockTable('lt', [lc1, lc2], [r1, r2])\n o = processTable(loctab, 'full', False, True)\n assert o == \"CREATE TABLE lt (\\n l1 INTEGER,\\n l2 INTEGER,\\n FOREIGN KEY(l1) REFERENCES ft(f1) ON UPDATE NO ACTION ON DELETE NO ACTION,\\n FOREIGN KEY(l2) REFERENCES ft(f2) ON UPDATE NO ACTION ON DELETE NO ACTION\\n);\\n\"", "def test_all_tables(self):\n job = base_job.Job(os.path.join(os.getcwd(), 'my_sql_all.json'))\n job.connect_to_database()\n base_tables_to_keep = ['books', 'cities', 'states']\n self.assertEqual(base_tables_to_keep, mysql_worker.get_tables(job))", "def test_qual_vtab(self):\n self.check_fails(\"Quality/error_qual_vtab.fastq\", 0)\n self.check_general_passes(\"Quality/error_qual_vtab.fastq\", 5)", "def _generate_table(df_iree, df_shark, df_baseline, title):\n summary = pd.DataFrame(\n columns=[\n _MODEL,\n _BASELINE,\n _DATA_TYPE,\n _DIALECT,\n _DEVICE,\n _BASELINE_LATENCY,\n _IREE_LATENCY,\n _SHARK_LATENCY,\n _IREE_VS_BASELINE,\n _SHARK_VS_BASELINE,\n _IREE_VS_SHARK,\n _BASELINE_MEMORY,\n _IREE_MEMORY,\n _SHARK_MEMORY,\n ]\n )\n\n models = df_iree.model.unique()\n for model in models:\n iree_results_per_model = df_iree.loc[df_iree.model == model]\n dialects = iree_results_per_model.dialect.unique()\n for dialect in dialects:\n iree_results_per_dialect = iree_results_per_model.loc[\n iree_results_per_model.dialect == dialect\n ]\n data_types = iree_results_per_dialect.data_type.unique()\n for data_type in data_types:\n iree_results_per_datatype = iree_results_per_dialect.loc[\n iree_results_per_dialect.data_type == data_type\n ]\n device_types = iree_results_per_datatype.device.unique()\n for device in device_types:\n iree_results = iree_results_per_datatype.loc[\n iree_results_per_datatype.device == device\n ]\n if len(iree_results) != 3:\n print(\n f\"Warning! Expected number of results to be 3. Got\"\n f\" {len(iree_results)}\"\n )\n print(iree_results)\n continue\n\n baseline_results = df_baseline.loc[\n (df_baseline.model == model)\n & (df_baseline.dialect == dialect)\n & (df_baseline.data_type == data_type)\n & (df_baseline.device == device)\n ]\n\n if baseline_results.empty:\n # We use snapshots of latencies for baseline. If it is a new\n # benchmark that is not included in the snapshot yet, emit a\n # warning.\n print(\n f\"Warning: No baseline results found for {model}, {dialect},\"\n f\" {data_type}, {device}. Using IREE version as baseline. Please\"\n f\" update baseline csv.\"\n )\n engine = iree_results.engine.iloc[0]\n baseline_df = iree_results.loc[iree_results.engine == engine]\n baseline_latency = baseline_df.iloc[0][\"ms/iter\"]\n baseline_device_mb = baseline_df.iloc[0][\"device_memory_mb\"]\n else:\n engine = baseline_results.engine.iloc[0]\n baseline_df = baseline_results.loc[\n baseline_results.engine == engine\n ]\n baseline_latency = baseline_df.iloc[0][\"ms/iter\"]\n baseline_device_mb = baseline_df.iloc[0][\"device_memory_mb\"]\n\n iree_df = iree_results.loc[iree_results.engine == \"shark_iree_c\"]\n iree_latency = iree_df.iloc[0][\"ms/iter\"]\n iree_device_mb = iree_df.iloc[0][\"device_memory_mb\"]\n iree_vs_baseline = html_utils.format_latency_comparison(\n iree_latency, baseline_latency\n )\n\n if df_shark is not None:\n shark_results = df_shark.loc[\n (df_shark.model == model)\n & (df_shark.dialect == dialect)\n & (df_shark.data_type == data_type)\n & (df_shark.device == device)\n ]\n if shark_results.empty:\n print(\n f\"Warning: No SHARK results for {model}, {dialect}, {data_type}, {device}.\"\n )\n continue\n\n shark_df = shark_results.loc[\n shark_results.engine == \"shark_iree_c\"\n ]\n shark_latency = shark_df.iloc[0][\"ms/iter\"]\n shark_device_mb = shark_df.iloc[0][\"device_memory_mb\"]\n shark_vs_baseline = html_utils.format_latency_comparison(\n shark_latency, baseline_latency\n )\n iree_vs_shark = html_utils.format_latency_comparison(\n iree_latency, shark_latency\n )\n else:\n # If there are no SHARK benchmarks available, use default values.\n # These columns will be hidden later.\n shark_latency = 0\n shark_vs_baseline = \"<missing_comparison>\"\n iree_vs_shark = \"<missing_comparison>\"\n\n summary.loc[len(summary)] = [\n model,\n engine,\n data_type,\n dialect,\n device,\n f\"{baseline_latency:.1f}\",\n f\"{iree_latency:.1f}\",\n f\"{shark_latency:.1f}\",\n iree_vs_baseline,\n shark_vs_baseline,\n iree_vs_shark,\n f\"{baseline_device_mb:.3f}\",\n f\"{iree_device_mb:.3f}\",\n f\"{shark_device_mb:.3f}\",\n ]\n\n summary = summary.round(2)\n\n st = summary.style.set_table_styles(html_utils.get_table_css())\n st = st.hide(axis=\"index\")\n if df_shark is None:\n st = st.hide_columns(\n subset=[_SHARK_LATENCY, _SHARK_VS_BASELINE, _IREE_VS_SHARK]\n )\n st = st.set_caption(title)\n st = st.applymap(html_utils.style_performance, subset=_PERF_COLUMNS)\n st = st.set_properties(\n subset=[_MODEL],\n **{\n \"width\": \"300px\",\n \"text-align\": \"left\",\n },\n )\n st = st.set_properties(\n subset=[_BASELINE],\n **{\n \"width\": \"140\",\n \"text-align\": \"center\",\n },\n )\n st = st.set_properties(\n subset=[_DIALECT, _DATA_TYPE, _DEVICE],\n **{\n \"width\": \"100\",\n \"text-align\": \"center\",\n },\n )\n st = st.set_properties(\n subset=_LATENCY_COLUMNS,\n **{\n \"width\": \"100\",\n \"text-align\": \"right\",\n },\n )\n st = st.set_properties(\n subset=_PERF_COLUMNS,\n **{\"width\": \"150px\", \"text-align\": \"right\", \"color\": \"#ffffff\"},\n )\n st = st.set_properties(\n subset=_MEMORY_COLUMNS,\n **{\n \"width\": \"100\",\n \"text-align\": \"right\",\n },\n )\n\n return st.to_html() + \"<br/>\"", "def test_evaluate_test_datasets(self):\n print self.genome_table1.ObservationIds\n print self.genome_table2.ObservationIds\n \n obs= evaluate_test_dataset(self.genome_table1,self.genome_table2)\n print \"Obs:\",obs", "def table_analysis():\n pass", "def identify_tables(self):\n\n mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument]\n self.query_table = eval('{}ReadnoiseQueryHistory'.format(mixed_case_name))\n self.stats_table = eval('{}ReadnoiseStats'.format(mixed_case_name))", "def compare_all(self, **kwargs):\n #num_tests = 1\n #num_maps = len(self.maps)\n #arr_data = np.zeros([num_tests, num_maps])\n\n # For each given 3D field, run all the tests and add a row to the table.\n for map3D in self.maps:\n # Get the data\n arr_data = map3D.data\n\n # Store the results from each test for this field.\n lis_results = [ map3D.meta.get('extrapolator_routine', 'Unknown Routine'),\n map3D.meta.get( 'extrapolator_duration', 0.0 ) ]\n\n # Run through all the tests and append results to the list.\n lis_results.append(self.L_infin_norm(arr_data))\n\n # Now add the results to the table.\n self.results.add_row(lis_results)\n\n\n if self.normalise:\n self.results_normalised\n else:\n self.results", "def test_get_table_list_4(self):\n columns = [self.PhageID, self.GeneID]\n table_list = querying.get_table_list(columns)\n\n self.assertTrue(self.phage in table_list)\n self.assertTrue(self.gene in table_list)", "def test_filter_table_samples(self):\n\n otu_table = \"\"\"# QIIME v%s OTU table\\n#OTU ID\\tsample1\\tsample2\\tsample3\n0\\t0\\t2\\t0\n1\\t1\\t0\\t0\n2\\t1\\t1\\t1\"\"\" % __version__\n otu_table = otu_table.split('\\n')\n result = _filter_table_samples(otu_table, 2)\n self.assertEqual(result, \"# QIIME v%s OTU table\\n#OTU ID\\tsample1\\tsample2\\n0\\t0\\t2\\n1\\t1\\t0\\n2\\t1\\t1\" % __version__)\n result = _filter_table_samples(otu_table, 1)\n self.assertEqual(result, '\\n'.join(otu_table))\n result = _filter_table_samples(otu_table, 3)\n self.assertEqual(result, \"# QIIME v%s OTU table\\n#OTU ID\\tsample2\\n0\\t2\\n1\\t0\\n2\\t1\" % __version__)", "def identify_tables(self):\n mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self.instrument]\n self.query_table = eval('{}BadPixelQueryHistory'.format(mixed_case_name))\n self.pixel_table = eval('{}BadPixelStats'.format(mixed_case_name))", "def test_all_scenarios(self):\n\n exr_bash = self.prep_exr()\n percents = [1, 50, 90]\n # TODO: Don't use a for loop, use the trials kwarg\n for i in range(0, 2):\n Simulator().run(attack_types=Attack.runnable_attacks,\n adopt_policies=list(Non_Default_Policies.__members__.values()),\n percents=percents,\n exr_bash=exr_bash)", "def test_get_table_list_3(self):\n table_list = querying.get_table_list(self.PhageID)\n\n self.assertTrue(self.phage in table_list)\n self.assertFalse(self.gene in table_list)", "def test_file_consistency(tables, pkg_settings, pkg_dir):\n pkg_name = pkg_settings['name']\n # remove the '.csv' or the '.csv.gz' from the file names\n file_tbls = [re.sub(r'(\\.csv.*$)', '', x) for x in os.listdir(\n os.path.join(pkg_dir, 'data'))]\n # given list of table names and partitions, generate list of expected files\n pkg_files = tables\n # pkg_files = []\n # for table in tables:\n # pkg_file = package_files_from_table(table, pkg_settings)\n # pkg_files.extend(pkg_file)\n\n dependent_tbls = list(get_dependent_tables_from_list(tables))\n etl_tbls = tables\n\n dependent_tbls.sort()\n file_tbls.sort()\n pkg_files.sort()\n etl_tbls.sort()\n # TODO: determine what to do about the dependent_tbls... right now the\n # dependent tables include some glue tables for FERC in particular, but\n # we are imagining the glue tables will be in another data package...\n if (file_tbls == pkg_files): # & (dependent_tbls == etl_tbls)):\n logger.info(f\"Tables are consistent for {pkg_name} package\")\n else:\n inconsistent_tbls = []\n for tbl in file_tbls:\n if tbl not in pkg_files:\n inconsistent_tbls.extend(tbl)\n raise AssertionError(f\"{tbl} from CSVs not in ETL tables\")\n\n # for tbl in dependent_tbls:\n # if tbl not in etl_tbls:\n # inconsistent_tbls.extend(tbl)\n # raise AssertionError(\n # f\"{tbl} from forgien key relationships not in ETL tables\")\n # this is here for now just in case the previous two asserts don't work..\n # we should probably just stick to one.\n raise AssertionError(\n f\"Tables are inconsistent. \"\n f\"Missing tables include: {inconsistent_tbls}\")", "def test_interpolation():\n for table in tables_list:\n ctable = getattr(sestab, table)\n R_array = ctable['R_Array']\n T_array = ctable['T_Array']\n F_array = ctable['F_Array']\n\n R,T = np.meshgrid(R_array, T_array, indexing='ij')\n\n F_array_itp = ctable(R, T)\n #F_array_itp = F_array_itp.T.reshape(F_array.shape)\n yield np.testing.assert_allclose, F_array, F_array_itp, 1e-15", "def build_tables():\n yield setup_tables()\n IOLoop.current().stop()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run the blktrace program on remote machines.
def execute(ctx, config): procs = [] testdir = teuthology.get_testdir(ctx) log_dir = '/home/ubuntu/archive/performance/blktrace'#.format(tdir=testdir) osds = ctx.cluster.only(teuthology.is_type('osd')) for remote, roles_for_host in osds.remotes.iteritems(): roles_to_devs = config['remote_to_roles_to_dev'][remote.name] for id_ in teuthology.roles_of_type(roles_for_host, 'osd'): if roles_to_devs.get(int(id_)): dev = roles_to_devs[int(id_)] log.info("running blktrace on %s: %s" % (remote.name, dev)) proc = remote.run( args=[ 'daemon-helper', daemon_signal, 'sudo', blktrace, '-d', dev, '-D', log_dir, '-o', dev.rsplit("/", 1)[1], ], wait=False, stdin=run.PIPE, ) procs.append(proc) log.info(proc) # for id_ in teuthology.roles_of_type(roles_for_host, 'osd'): # if roles_to_devs.get(int(id_)): # dev = roles_to_devs[int(id_)] # remote.run( # args=[ # 'sudo', # 'chmod', # '0664', # '{0}/{1}.blktrace.*'.format(log_dir, dev.rsplit("/",1)[1]), # ], # wait=False, # ) try: yield finally: osds = ctx.cluster.only(teuthology.is_type('osd')) for remote, roles_for_host in osds.remotes.iteritems(): roles_to_devs = config['remote_to_roles_to_dev'][remote.name] for id_ in teuthology.roles_of_type(roles_for_host, 'osd'): if roles_to_devs.get(int(id_)): dev = roles_to_devs[int(id_)] log.info("running blkparse on %s: %s" % (remote.name, dev)) remote.run( args=[ 'cd', log_dir, run.Raw(';'), blkparse, '-i', '{0}.blktrace.0'.format(dev.rsplit("/", 1)[1]), '-o', '{0}.blkparse'.format(dev.rsplit("/", 1)[1]), ], wait=False, ) log.info('stopping blktrace processs') for proc in procs: proc.stdin.close()
[ "def remote_main(cls, sys_args, group):\n args = sys_args[1:]\n if len(args) != len(cls.args):\n raise ValueError(\"Usage: %s %s\" % (\n sys_args[0], \" \".join(cls.args)))\n for gw in group:\n for line in cls.remote_op(gw, args):\n print \"[%s] %s\" % (gw.remoteaddress, line)", "def run_flrtvc(machine, output, params):\n\n global WORKDIR\n\n # Run 'lslpp -Lcq' on the remote machine and save to file\n lslpp_file = os.path.join(WORKDIR, 'lslpp_{}.txt'.format(machine))\n thd1 = threading.Thread(target=run_lslpp, args=(machine, lslpp_file))\n thd1.start()\n\n # Run 'emgr -lv3' on the remote machine and save to file\n emgr_file = os.path.join(WORKDIR, 'emgr_{}.txt'.format(machine))\n thd2 = threading.Thread(target=run_emgr, args=(machine, emgr_file))\n thd2.start()\n\n # Wait threads to finish\n thd1.join()\n thd2.join()\n\n try:\n # Prepare flrtvc command\n cmd = ['/usr/bin/flrtvc.ksh', '-e', emgr_file, '-l', lslpp_file]\n if params['apar_type'] and params['apar_type'] != 'all':\n cmd += ['-t', params['apar_type']]\n if params['apar_csv']:\n cmd += ['-f', params['apar_csv']]\n if params['filesets']:\n cmd += ['-g', params['filesets']]\n\n # Run flrtvc in compact mode\n logging.debug('{}: run cmd \"{}\"'.format(machine, ' '.join(cmd)))\n stdout_c = subprocess.check_output(args=cmd, stderr=subprocess.STDOUT)\n output.update({'0.report': stdout_c.splitlines()})\n\n # Save to file\n if params['dst_path']:\n if not os.path.exists(params['dst_path']):\n os.makedirs(params['dst_path'])\n filename = os.path.join(params['dst_path'], 'flrtvc_{}.txt'.format(machine))\n with open(filename, 'w') as myfile:\n if params['verbose']:\n cmd += ['-v']\n myfile.write(subprocess.check_output(args=cmd, stderr=subprocess.STDOUT))\n else:\n myfile.write(stdout_c)\n except subprocess.CalledProcessError as exc:\n logging.warn('{}: EXCEPTION cmd={} rc={} output={}'\n .format(machine, exc.cmd, exc.returncode, exc.output))\n output.update({'0.report': []})\n MODULE.exit_json(changed=CHANGED, msg='error executing flrtvc', meta=output)", "def run(vm=\"\", output=\"/tmp/tc3\", verbose=True, run=\"1\"):\n vboxcfg = forgeosi.VboxConfig()\n vboxcfg.get_nat_network(run)\n vbox_c1 = forgeosi.Vbox(basename=vm1, clonename=\"testrun\"+run+\"client1\")\n if verbose:\n print \"vm1 created\"\n time.sleep(10)\n vbox_c2 = forgeosi.Vbox(basename=vm2, clonename=\"testrun\"+run+\"client2\")\n if verbose:\n print \"vm2 created\"\n time.sleep(10)\n vbox_c3 = forgeosi.Vbox(basename=vm3, clonename=\"testrun\"+run+\"client3\")\n if verbose:\n print \"vm3 created\"\n time.sleep(10)\n vbox_s = forgeosi.Vbox(basename=vms, clonename=\"testrun\"+run+\"server\")\n if verbose:\n print \"vms created\"\n time.sleep(10)\n p_c1 = vbox_c1.start(session_type=forgeosi.SessionType.gui, wait=False)\n p_c2 = vbox_c2.start(session_type=forgeosi.SessionType.gui, wait=False)\n vbox_s.start(session_type=forgeosi.SessionType.gui, wait=True)\n vbox_c3.start(session_type=forgeosi.SessionType.gui, wait=True)\n p_c1.wait_for_completion()\n p_c2.wait_for_completion()\n\n if verbose:\n print \"all machines booted\"\n time.sleep(60)\n\n vbox_c1.create_guest_session()\n vbox_c2.create_guest_session()\n vbox_c3.create_guest_session()\n vbox_s.create_guest_session()\n if verbose:\n print \"all guest_sessions created\"\n vbox_c1.add_to_nat_network(run)\n vbox_c2.add_to_nat_network(run)\n vbox_c3.add_to_nat_network(run)\n vbox_s.add_to_nat_network(run)\n vbox_s.start_network_trace(path=output+\"/server.pcap\")\n vbox_c1.start_network_trace(path=output+\"/client1.pcap\")\n time.sleep(60)\n\n vbox_s.os.make_dir(\"/home/default/server\")\n\n if verbose:\n print \"downloading files to server\"\n time.sleep(10)\n vbox_s.os.download_file(rhino1, \"/home/default/server/rhino1.jpg\")\n time.sleep(10)\n vbox_s.os.download_file(rhino2, \"/home/default/server/rhino2.jpg\")\n time.sleep(10)\n #install ssh-server for using scp later\n vbox_c1.os.run_shell_cmd(\"\"\"sudo apt-get install openssh-server\nsleep_hack\n12345\nsleep_hack\ny\n\"\"\", gui=True)\n time.sleep(10)\n\n if verbose:\n print \"starting webserver\"\n vbox_s.os.serve_directory(\"~/server\", port=8080)\n time.sleep(10)\n ip_server = vbox_s.get_ip()\n ip_client1 = vbox_c1.get_ip()\n if verbose:\n print \"ip server: \"+str(ip_server)\n print \"ip client1: \"+str(ip_client1)\n\n vbox_c1.os.open_browser(ip_server+\":8080/rhino1.jpg\")\n vbox_c2.os.open_browser(ip_server+\":8080/rhino2.jpg\")\n vbox_c3.os.open_browser(\"http://\"+ip_server+\":8080/rhino2.jpg\",\n method=forgeosi.RunMethod.run)\n if verbose:\n print \"all webbrowsers opened\"\n time.sleep(30)\n vbox_c1.os.make_dir(\"~/rhinopix\")\n time.sleep(10)\n vbox_c1.os.download_file(ip_server+\":8080/rhino1.jpg\",\n \"~/rhinopix/rhino1.jpg\")\n time.sleep(30)\n # client 2 gets one picture form client 1 via scp\n vbox_c2.os.run_shell_cmd(\n\"\"\"cd\nscp default@\"\"\"+ip_client1+\"\"\":~/rhinopix/rhino1.jpg .\nsleep_hack\nyes\nsleep_hack\n12345\n\"\"\", gui=True)\n\n vbox_s.stop_network_trace()\n vbox_s.stop(confirm=forgeosi.StopConfirm.xfce)\n vbox_c1.stop_network_trace()\n vbox_c1.stop()\n vbox_c2.stop()\n vbox_c3.stop()\n\n if verbose:\n print \"machines stopped\"\n vbox_c1.log.write_xml_log(output+\"/log_c1.xml\")\n vbox_c2.log.write_xml_log(output+\"/log_c2.xml\")\n vbox_c3.log.write_xml_log(output+\"/log_c3.xml\")\n vbox_s.log.write_xml_log(output+\"/log_s.xml\")\n #vbox_c1.export(path=output+\"/disk_c1.img\", raw=True)\n #vbox_c2.export(path=output+\"/disk_c2.img\", raw=True)\n #vbox_c3.export(path=output+\"/disk_c3.img\", raw=True)\n #vbox_s.export(path=output+\"/disk_s.img\", raw=True)\n\n vbox_c1.cleanup_and_delete()\n vbox_c2.cleanup_and_delete()\n vbox_c3.cleanup_and_delete()\n vbox_s.cleanup_and_delete()", "def run(devices=None, flags=None, **kw):\n if devices is None:\n devices = []\n elif isinstance(devices, str):\n devices = [devices]\n if flags is None:\n flags = []\n lsblk = kw.get('lsblk', 'lsblk')\n return backtick([lsblk, '-J', '-O', '-p'] + flags + devices)", "def run(self):\r\n\r\n self.wait_answer(['vagrant', 'ssh', self.machine, '-c', self.cmd])", "def start(config, machines):\n logging.info(\"Start writing QEMU config files for cloud / edge\")\n\n # Get the SSH public key\n with open(\"%s.pub\" % (config[\"ssh_key\"]), \"r\", encoding=\"utf-8\") as f:\n ssh_key = f.read().rstrip()\n f.close()\n\n # --------------------------------------------------------------------------------------------\n # NOTE\n # If an error occurs in the following lines, please:\n # 1. Comment this part of the code between the two ---- lines out\n # 2. Set the \"bridge_name\" variable to the name of your bridge (e.g. br0, virbr0, etc.)\n # 3. Set the gateway variable to the IP of your gateway (e.g. 10.0.2.2, 192.168.122.1, etc)\n # --------------------------------------------------------------------------------------------\n # Find out what bridge to use\n bridge = find_bridge(config, machines[0], \"br0\")\n bridge_name = \"br0\"\n if bridge == 0:\n bridge = find_bridge(config, machines[0], \"virbr0\")\n bridge_name = \"virbr0\"\n if bridge == 0:\n logging.error(\"ERROR: Could not find a network bridge\")\n sys.exit()\n\n # Get gateway address\n output, error = machines[0].process(\n config, \"ip route | grep ' %s '\" % (bridge_name), shell=True\n )[0]\n if error != [] or output == []:\n logging.error(\"ERROR: Could not find gateway address\")\n sys.exit()\n\n gateway = 0\n pattern = re.compile(r\"(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\")\n gatewaylists = [pattern.findall(line) for line in output]\n\n if bridge_name == \"br0\":\n # For br0, pick gateway of machine\n gateway = gatewaylists[0][0]\n else:\n # For virbr0\n for gatewaylist in gatewaylists:\n if len(gatewaylist) > 1:\n if gateway != 0:\n logging.error(\"ERROR: Found multiple gateways\")\n sys.exit()\n\n gateway = gatewaylist[1].rstrip()\n # --------------------------------------------------------------------------------------------\n\n cc = config[\"infrastructure\"][\"cloud_cores\"]\n ec = config[\"infrastructure\"][\"edge_cores\"]\n pc = config[\"infrastructure\"][\"endpoint_cores\"]\n\n period = 100000\n pinnings = []\n\n for machine in machines:\n # Counter for pinning vcpu to physical cpu\n start_core = 0\n\n # Clouds\n for ip, name in zip(\n machine.cloud_controller_ips + machine.cloud_ips,\n machine.cloud_controller_names + machine.cloud_names,\n ):\n with open(\".tmp/domain_%s.xml\" % (name), \"w\", encoding=\"utf-8\") as f:\n memory = int(1048576 * config[\"infrastructure\"][\"cloud_memory\"])\n\n if config[\"infrastructure\"][\"cpu_pin\"]:\n pinnings = [\n ' <vcpupin vcpu=\"%i\" cpuset=\"%i\"/>' % (a, b)\n for a, b in zip(range(cc), range(start_core, start_core + cc))\n ]\n start_core += cc\n\n f.write(\n DOMAIN\n % (\n name,\n memory,\n cc,\n period,\n int(period * config[\"infrastructure\"][\"cloud_quota\"]),\n \"\\n\".join(pinnings),\n bridge_name,\n config[\"infrastructure\"][\"base_path\"],\n name,\n config[\"infrastructure\"][\"cloud_read_speed\"],\n config[\"infrastructure\"][\"cloud_write_speed\"],\n config[\"infrastructure\"][\"cloud_read_speed\"],\n config[\"infrastructure\"][\"cloud_write_speed\"],\n config[\"infrastructure\"][\"base_path\"],\n name,\n )\n )\n f.close()\n\n with open(\".tmp/user_data_%s.yml\" % (name), \"w\", encoding=\"utf-8\") as f:\n hostname = name.replace(\"_\", \"\")\n f.write(USER_DATA % (hostname, hostname, name, name, ssh_key, name, ip, gateway))\n f.close()\n\n # Edges\n for ip, name in zip(machine.edge_ips, machine.edge_names):\n with open(\".tmp/domain_%s.xml\" % (name), \"w\", encoding=\"utf-8\") as f:\n memory = int(1048576 * config[\"infrastructure\"][\"edge_memory\"])\n\n if config[\"infrastructure\"][\"cpu_pin\"]:\n pinnings = [\n ' <vcpupin vcpu=\"%i\" cpuset=\"%i\"/>' % (a, b)\n for a, b in zip(range(ec), range(start_core, start_core + ec))\n ]\n start_core += ec\n\n f.write(\n DOMAIN\n % (\n name,\n memory,\n ec,\n period,\n int(period * config[\"infrastructure\"][\"edge_quota\"]),\n \"\\n\".join(pinnings),\n bridge_name,\n config[\"infrastructure\"][\"base_path\"],\n name,\n config[\"infrastructure\"][\"edge_read_speed\"],\n config[\"infrastructure\"][\"edge_write_speed\"],\n config[\"infrastructure\"][\"edge_read_speed\"],\n config[\"infrastructure\"][\"edge_write_speed\"],\n config[\"infrastructure\"][\"base_path\"],\n name,\n )\n )\n f.close()\n\n with open(\".tmp/user_data_%s.yml\" % (name), \"w\", encoding=\"utf-8\") as f:\n hostname = name.replace(\"_\", \"\")\n f.write(USER_DATA % (hostname, hostname, name, name, ssh_key, name, ip, gateway))\n f.close()\n\n # Endpoints\n for ip, name in zip(machine.endpoint_ips, machine.endpoint_names):\n with open(\".tmp/domain_%s.xml\" % (name), \"w\", encoding=\"utf-8\") as f:\n memory = int(1048576 * config[\"infrastructure\"][\"endpoint_memory\"])\n\n if config[\"infrastructure\"][\"cpu_pin\"]:\n pinnings = [\n ' <vcpupin vcpu=\"%i\" cpuset=\"%i\"/>' % (a, b)\n for a, b in zip(range(pc), range(start_core, start_core + pc))\n ]\n start_core += pc\n\n f.write(\n DOMAIN\n % (\n name,\n memory,\n pc,\n period,\n int(period * config[\"infrastructure\"][\"endpoint_quota\"]),\n \"\\n\".join(pinnings),\n bridge_name,\n config[\"infrastructure\"][\"base_path\"],\n name,\n config[\"infrastructure\"][\"endpoint_read_speed\"],\n config[\"infrastructure\"][\"endpoint_write_speed\"],\n config[\"infrastructure\"][\"endpoint_read_speed\"],\n config[\"infrastructure\"][\"endpoint_write_speed\"],\n config[\"infrastructure\"][\"base_path\"],\n name,\n )\n )\n f.close()\n\n with open(\".tmp/user_data_%s.yml\" % (name), \"w\", encoding=\"utf-8\") as f:\n hostname = name.replace(\"_\", \"\")\n f.write(USER_DATA % (hostname, hostname, name, name, ssh_key, name, ip, gateway))\n f.close()\n\n # Base image(s)\n for ip, name in zip(machine.base_ips, machine.base_names):\n with open(\".tmp/domain_%s.xml\" % (name), \"w\", encoding=\"utf-8\") as f:\n\n f.write(\n DOMAIN\n % (\n name,\n 1048576,\n 1,\n 0,\n 0,\n \"\",\n bridge_name,\n config[\"infrastructure\"][\"base_path\"],\n name,\n 0,\n 0,\n 0,\n 0,\n config[\"infrastructure\"][\"base_path\"],\n name,\n )\n )\n f.close()\n\n with open(\".tmp/user_data_%s.yml\" % (name), \"w\", encoding=\"utf-8\") as f:\n hostname = name.replace(\"_\", \"\")\n f.write(USER_DATA % (hostname, hostname, name, name, ssh_key, name, ip, gateway))\n f.close()", "def remote_console(ctx, app_name, ssh_opts, ssh_cmd):\n gigalixir_app.remote_console(ctx.obj['host'], app_name, ssh_opts, ssh_cmd)", "def main():\n # the `GenericDriver` is a good place to start if your platform is not supported by a \"core\"\n # platform drivers\n conn = GenericDriver(**MY_DEVICE)\n conn.open()\n\n print(conn.channel.get_prompt())\n print(conn.send_command(\"show run | i hostname\").result)\n\n # IMPORTANT: paging is NOT disabled w/ GenericDriver driver!\n conn.send_command(\"terminal length 0\")\n print(conn.send_command(\"show run\").result)\n conn.close()\n\n # Context manager is a great way to use scrapli, it will auto open/close the connection for you:\n with GenericDriver(**MY_DEVICE) as conn:\n result = conn.send_command(\"show run | i hostname\")\n print(result.result)", "def main():\n utils.vip_main(LightAgent, version=__version__)", "def main():\n badger = connect_badger()\n safe = ApeSafe(badger.testMultisig.address)\n helper = ApeSafeHelper(badger, safe)\n\n for guest_list_key in guest_lists_by_id:\n guestList = helper.contract_from_abi(\n badger.getGuestList(guest_list_key).address,\n \"VipCappedGuestListBbtcUpgradeable\",\n VipCappedGuestListBbtcUpgradeable.abi,\n )\n console.print(\n f\"Set guest root to [green]{root}[/green] on Guest list [yellow]{guest_list_key} ({guestList.address})[/yellow]\"\n )\n guestList.setGuestRoot(root)\n assert guestList.guestRoot() == root\n\n helper.publish()", "def main():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Runs a remote IOC server.\",\n )\n\n parser.add_argument(\"--pv_prefix\", required=True, type=six.text_type,\n help=\"The PV prefix of this instrument.\")\n parser.add_argument(\"--subsystem_prefix\", type=six.text_type,\n default=\"REMIOC:\",\n help=\"The subsystem prefix to use for this remote IOC server\")\n parser.add_argument(\"--gateway_pvlist_path\", type=six.text_type,\n default=os.path.normpath(\n os.path.join(os.getenv(\"ICPCONFIGROOT\"), \"AccessSecurity\", \"gwremoteioc.pvlist\")),\n help=\"The path to the gateway pvlist file to generate\")\n parser.add_argument(\"--gateway_acf_path\", type=six.text_type,\n default=os.path.normpath(\n os.path.join(os.getenv(\"ICPCONFIGROOT\"), \"AccessSecurity\", \"gwremoteioc.acf\")),\n help=\"The path to the gateway access security file to generate\")\n parser.add_argument(\"--gateway_restart_script_path\", type=six.text_type,\n default=DEFAULT_GATEWAY_START_BAT,\n help=\"The path to the script to call to restart the remote ioc gateway\")\n\n args = parser.parse_args()\n\n FILEPATH_MANAGER.initialise(os.path.normpath(os.getenv(\"ICPCONFIGROOT\")), \"\", \"\")\n\n serve_forever(\n args.pv_prefix,\n args.subsystem_prefix,\n args.gateway_pvlist_path,\n args.gateway_acf_path,\n args.gateway_restart_script_path\n )", "def trace_start(self, tracefile=None, interface=None, capsize=None, clients=None):\n self.trace_stop()\n if tracefile:\n self.tracefile = tracefile\n else:\n self.tracefile = \"%s/%s_%d.cap\" % (self.tmpdir, self.tracename, self.traceidx)\n self.traceidx += 1\n if not self.notrace:\n if len(self.nfsdebug) or len(self.rpcdebug):\n self.nfs_debug_enable()\n self.tracefiles.append(self.tracefile)\n\n if clients is None:\n clients = self.clients\n\n if interface is None:\n interface = self.interface\n\n opts = \"\"\n if interface is not None:\n opts += \" -i %s\" % interface\n\n if capsize:\n opts += \" -C %d\" % capsize\n\n hosts = self.ipaddr\n for cobj in clients:\n hosts += \" or %s\" % cobj.ipaddr\n\n cmd = \"%s%s -n -B %d -s 0 -w %s host %s\" % (self.tcpdump, opts, self.tbsize, self.tracefile, hosts)\n self.run_cmd(cmd, sudo=True, dlevel='DBG2', msg=\"Trace start: \", wait=False)\n self.traceproc = self.process\n\n # Make sure tcpdump has started\n out = self.traceproc.stderr.readline()\n if not re.search('listening on', out):\n time.sleep(1)\n if self.process.poll() is not None:\n raise Exception(out)\n return self.tracefile", "def test_02_set_trace(self):\n time.sleep(0.2) # allows debugger to start\n self.ikpdb.run_script()\n\n i_msg = self.ikpdb.receive()\n self.assertEqual(i_msg['command'], 'programBreak', \"Unexpected message received.\")\n self.assertEqual(i_msg['result']['executionStatus'], 'stopped', \"Unexpected break.\")\n self.assertEqual(i_msg['frames'][0]['line_number'], 14, \"broke on unexpected line number.\")", "def run_on_remote_slaves(slave_host_name, cmd):\n proc = _launch_on_remote_host(slave_host_name, _get_remote_slaves_cmd(cmd))\n return _get_result(proc)", "def run(targets, fname, block=True):", "def start_tracing(self,\n bdevs: list = [],\n buffer: Size = None,\n trace_file_size: Size = None,\n timeout: timedelta = None,\n label: str = None,\n shortcut: bool = False):\n\n if len(bdevs) == 0:\n disks = TestRun.dut.disks\n for disk in disks:\n bdevs.append(disk.system_path)\n\n buffer_range = range(1, 1025)\n trace_file_size_range = range(1, 100000001)\n timeout_range = range(1, 4294967296)\n\n command = 'iotrace' + (' -S' if shortcut else ' --start-tracing')\n command += (' -d ' if shortcut else ' --devices ') + ','.join(bdevs)\n\n if buffer is not None:\n if not int(buffer.get_value(Unit.MebiByte)) in buffer_range:\n raise CmdException(f\"Given buffer is out of range {buffer_range}.\")\n command += ' -b ' if shortcut else ' --buffer '\n command += f'{int(buffer.get_value(Unit.MebiByte))}'\n\n if trace_file_size is not None:\n if not int(trace_file_size.get_value(Unit.MebiByte)) in trace_file_size_range:\n raise CmdException(f\"Given size is out of range {trace_file_size_range}.\")\n command += ' -s ' if shortcut else ' --size '\n command += f'{int(trace_file_size.get_value(Unit.MebiByte))}'\n\n if timeout is not None:\n if not int(timeout.total_seconds()) in timeout_range:\n raise CmdException(f\"Given time is out of range {timeout_range}.\")\n command += ' -t ' if shortcut else ' --time '\n command += f'{int(timeout.total_seconds())}'\n\n if label is not None:\n command += ' -l ' if shortcut else ' --label ' + f'{label}'\n\n self.pid = str(TestRun.executor.run_in_background(command))\n TestRun.LOGGER.info(\"Started tracing of: \" + ','.join(bdevs))\n # Make sure there's a >0 duration in all tests\n time.sleep(2)", "def run_on_remote_host(slave_host_name, cmd):\n proc = _launch_on_remote_host(slave_host_name, cmd)\n return _get_result(proc)", "def test_blockdev_list(self):\n self.unittest_command([_STRATIS_CLI, \"blockdev\", \"list\"], 0, True, False)", "def run_remote_guest(ip, domain, command):\n\n cmd = 'python %s %s \"%s\"' % (CONSOLE_APP_PATH, domain, command)\n\n return run_remote(ip, cmd)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the total distance to table cost. input waypoint, output scalar cost
def table_cost(self, waypt): feature = self.environment.table_features(waypt) feature_idx = self.feat_list.index('table') return feature*self.weights[feature_idx]
[ "def distance(self, start, goal):\n ### START: 1e\n return self.cost_map[start]\n ### END: 1e", "def evaluate_solution(self, route):\n dist_matrix = self.dist_matrix\n prev = route[-1]\n total = 0.0\n for node in route:\n total += dist_matrix[prev][node]\n prev = node\n return total", "def calculate_total_cost(state):\r\n\r\n ### STUDENT CODE GOES HERE ###\r\n manh_d=calculate_manhattan_dist(state)\r\n costfn=manh_d+state.cost\r\n return costfn", "def get_tour_cost(tour, cost_matrix):\n # Calculate cost of the new tour\n total_cost = 0\n # Get start point\n _from = tour[0]\n # Do not iterate over start and add return to destination\n _tour = tour[1:] + tour[:1]\n for _to in _tour:\n total_cost += cost_matrix[_from][_to]\n _from = _to\n\n return total_cost", "def cost(self):\n return self._link.cost(self._direction)", "def heuristic_cost_estimate(self, node):\n # TODO: Return the heuristic cost estimate of a node\n \n d=self.distance(node,self.goal)\n \n return d", "def get_total_cost(self):\n dvs = [norm(dv) for dv in self._dvs]\n return sum(dvs, 0 * u.km / u.s)", "def tour_cost(g: Graph, tour: List[int]) -> float:\n return sum([g.edge_weight(tour[i], tour[i+1]) for i in range(len(tour)-1)])", "def total_cost(self):\n return self.heuristic() + self.backward_cost", "def calculate_cost_of_route(graph, route):\n try:\n return sum(graph[route[i]][route[(i+1) % len(graph)]]['weight'] for i in range(len(graph)))\n except KeyError:\n raise ValueError('The passed route is invalid for given graph.')", "def _calculate_cost(self):\n self.destination.set_parking_cost()\n self.cost_to_park = self.destination.parking_cost \n return self.distance * self.mile_rate + self.cost_to_park", "def cost_to_all_cells(filename, src_waypoint, output_filename):\n\n # Load and display the level.\n level = load_level(filename)\n # show_level(level)\n\n # Retrieve the source coordinates from the level.\n src = level['waypoints'][src_waypoint]\n \n # Calculate the cost to all reachable cells from src and save to a csv file.\n costs_to_all_cells = dijkstras_shortest_path_to_all(src, level, navigation_edges)\n save_level_costs(level, costs_to_all_cells, output_filename)", "def get_heuristic_cost(gameState, heuristic='manhattan'):\n cost = 0\n board = gameState.board\n for i in range(3):\n for j in range(3):\n cost = cost + get_heuristic_distance(board[i][j], i, j, heuristic)\n return cost", "def cost(config, dist):\n cost = np.sum([dist[config[i]][config[(i + 1) % len(config)]] for i in range(len(config))])\n return cost", "def calc_distance(self, tour):\r\n\r\n # Determines the distance from first city until last city.\r\n distance = 0\r\n for city in range(self.cities - 1):\r\n distance += self.dist_mat[tour[city], tour[city + 1]]\r\n\r\n # Also include last connection (from last to first city)\r\n distance += self.dist_mat[tour[-1], tour[0]]\r\n return distance", "def total_cost(self):\n return sum(self.edges[e].V for e in self.edges)", "def heuristic_cost(self, node):\n score = 0\n target_positions = self.tgt_positions[:]\n px, py = node.get_player_pos()\n mintx, minty = 0, 0\n\n for bx, by in node.box_positions:\n distance = float(\"inf\") # Set distance to be infinity to start\n for tx, ty in target_positions:\n man_dist = abs(bx - tx) + abs(by - ty)\n if man_dist < distance:\n distance = man_dist\n mintx, minty = tx, ty\n target_positions.remove((mintx, minty))\n score += distance\n return score", "def compute_total_distance(road_map):\n # sqrt((x1-x2)^2 + (y1-y2)^2)\n import math\n import copy\n distance_road_map = copy.deepcopy(road_map)\n distance_road_map.append(road_map[0])\n total_distance = 0\n for city in range(0, len(road_map)):\n city_a = distance_road_map[city][2], distance_road_map[city][3]\n city_b = distance_road_map[city + 1][2], distance_road_map[city + 1][3]\n distance = math.sqrt(((float(city_a[0]) - float(city_b[0])) ** 2) + ((float(city_a[1]) - float(city_b[1])) ** 2))\n total_distance += distance\n\n return total_distance", "def _total_cost(self, data, lmbda):\n\t\tcost = 0.0\n\t\tfor inpt, goal in data:\n\t\t\ta = self._feed_forward(inpt)\n\t\t\tcost += self.cost.fn(a, goal) / len(data)\n\t\tcost += (0.5 * (lmbda/len(data)) *\n\t\t\t\tsum(np.linalg.norm(w)**2 for w in self.weights))\n\t\treturn cost" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the total coffee (EE orientation) cost. input waypoint, output scalar cost
def coffee_cost(self, waypt): feature = self.environment.coffee_features(waypt) feature_idx = self.feat_list.index('coffee') return feature*self.weights[feature_idx]
[ "def get_total_cost(self):\n cost = self.pizza_base.cost\n for topping in self.toppings:\n cost += topping.cost\n return cost", "def _calculate_cost(self):\n self.destination.set_parking_cost()\n self.cost_to_park = self.destination.parking_cost \n return self.distance * self.mile_rate + self.cost_to_park", "def tour_cost(g: Graph, tour: List[int]) -> float:\n return sum([g.edge_weight(tour[i], tour[i+1]) for i in range(len(tour)-1)])", "def get_tour_cost(tour, cost_matrix):\n # Calculate cost of the new tour\n total_cost = 0\n # Get start point\n _from = tour[0]\n # Do not iterate over start and add return to destination\n _tour = tour[1:] + tour[:1]\n for _to in _tour:\n total_cost += cost_matrix[_from][_to]\n _from = _to\n\n return total_cost", "def cost(self):\n return self._link.cost(self._direction)", "def total_cost(self):\n return self.heuristic() + self.backward_cost", "def get_total_cost(self):\n dvs = [norm(dv) for dv in self._dvs]\n return sum(dvs, 0 * u.km / u.s)", "def total_cost(self):\n return sum(self.edges[e].V for e in self.edges)", "def calculate_total_cost(state):\r\n\r\n ### STUDENT CODE GOES HERE ###\r\n manh_d=calculate_manhattan_dist(state)\r\n costfn=manh_d+state.cost\r\n return costfn", "def get_cost(self):\n pizza_cost = 0\n\n for ingridient in self._ingridients:\n pizza_cost += ingridient.get_cost()\n\n return pizza_cost", "def get_cost(self) -> float:\n if self.pulp_problem.status == pulp.LpStatusNotSolved: # Not solved\n raise ValueError(\"Cannot get the cost of an unsolved problem\")\n return sum(\n self.cooked.instance_prices[ic] * self.cooked.map_res[app, ic].varValue\n for ic in self.cooked.instances_res\n for app in self.system.apps\n ) + sum(\n self.cooked.instance_prices[ic]\n * self.cooked.map_dem[app, ic, wl].varValue\n * self.load_hist[wl]\n for ic in self.cooked.instances_dem\n for app in self.system.apps\n for wl in self.load_hist.keys()\n )", "def heuristic_cost_estimate(self, node):\n # TODO: Return the heuristic cost estimate of a node\n \n d=self.distance(node,self.goal)\n \n return d", "def single_astar_cost(maze, state, prize_location):\n cost_to_reach_node = state.cost\n estimate_cost_to_prize = maze.manhattan_distance(state.id, prize_location)\n \n return cost_to_reach_node + estimate_cost_to_prize", "def path_cost(path):\n if len(path) < 3:\n return 0\n else:\n action, total_cost = path[-2]\n return total_cost", "def cost(self):\r\n return sum([player.cost for player in self._squad])/10.0", "def getCost(self):\n\t\tself.cost = 0\t# Set the cost to 0 so we can add to it\n\t\tfor r in self.unassigned:\t# For all unassigned reservations ...\n\t\t\tself.cost += r.p1\t\t# Add the P1 cost to the total cost\n\t\tfor r,c in self.resCars.items():\t# For all reservations which are in a neighbouring zone ...\n\t\t\tif self.carZones[c].getName() != r.getZone():\n\t\t\t\tself.cost += r.p2\t\t\t# Add the P2 cost to the total cost\n\n\t\treturn self.cost", "def get_total_cost(self):\n cost = 0\n for pizza in self._pizzas:\n cost += pizza.get_total_cost()\n return cost", "def calculate_cost_of_route(graph, route):\n try:\n return sum(graph[route[i]][route[(i+1) % len(graph)]]['weight'] for i in range(len(graph)))\n except KeyError:\n raise ValueError('The passed route is invalid for given graph.')", "def estimate_cost(self, board):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replan the trajectory from start to goal given weights.
def replan(self, start, goal, goal_pose, weights, T, timestep, seed=None): assert weights is not None, "The weights vector is empty. Cannot plan without a cost preference." self.weights = weights waypts = self.trajOpt(start, goal, goal_pose, traj_seed=seed) waypts_time = np.linspace(0.0, T, self.num_waypts) traj = Trajectory(waypts, waypts_time) return traj.upsample(int(T/timestep) + 1)
[ "def set_recurrent_weights(self, weights):\n self.w_rec.weight = nn.Parameter(torchify(weights))", "def lerp(self, end, weight): # real signature unknown; restored from __doc__\n pass", "def lerp_(self, end, weight): # real signature unknown; restored from __doc__\n pass", "def plan(self, start, goal, env):\n # construct graph\n graph = self.construct(env)\n # find collision free point in graph closest to start_point\n min_dist = float(\"inf\")\n for node in graph.keys():\n dist = math.sqrt(\n (node.x - start.position.x) ** 2 + (node.y - start.position.y) ** 2\n )\n traj = Trajectory(\n [RobotState(position=node), RobotState(position=start.position)]\n )\n if dist < min_dist and (env.get_traj_status(traj)):\n min_dist = dist\n s = node\n # find collision free point in graph closest to end_point\n min_dist = float(\"inf\")\n for node in graph.keys():\n dist = math.sqrt(\n (node.x - goal.position.x) ** 2 + (node.y - goal.position.y) ** 2\n )\n traj = Trajectory(\n [RobotState(position=node), RobotState(position=goal.position)]\n )\n if dist < min_dist and (env.get_traj_status(traj)):\n min_dist = dist\n e = node\n # add start_point to path\n path = [start]\n traj = Trajectory(path)\n # perform astar search\n p = astar(graph, s, e)\n if len(p.path) == 1:\n return traj\n else:\n traj.path.extend(p.path)\n # add end_point to path\n traj.path.append(goal)\n return traj", "def update_target_model(self):\n self.target_network.set_weights(self.q_network.get_weights())\n # vedere se funziona invece questo\n #for t, e in zip(self.target_network.trainable_variables,\n # self.primary_network.trainable_variables): t.assign(t * (1 - TAU) + e * TAU)", "def set_rev_grad_weight(\n self,\n weight: float,\n ) -> None:\n self.weight = weight\n return", "def set_weights(self, weights):\n self.actor_critic.load_state_dict(weights)\n self.alpha_optimizer.step()\n self.alpha = self.log_alpha.detach().exp()\n\n # Update target networks by polyak averaging.\n self.iter += 1\n self.update_target_networks()", "def set_weights(self, weights):\n self.weights = copy.deepcopy(weights)", "def set_recurrent_weights(self, weights):\n self.rnn_layer.weight_hh_l0 = nn.Parameter(torchify(weights))", "def plan(self, start_config, goal_config=None):\n\n # Compute the closest velocity bin for this starting configuration\n idx = tf.squeeze(self._compute_bin_idx_for_start_velocities(start_config.speed_nk1()[:, :, 0])).numpy()\n\n # Convert waypoints for this velocity bin into world coordinates\n self.waypt_configs_world[idx] = self.system_dynamics.to_world_coordinates(start_config, self.waypt_configs[idx],\n self.waypt_configs_world[idx],\n mode='assign')\n # Setup world coordinate tensors if needed\n self._ensure_world_coordinate_tensors_exist(goal_config)\n\n if goal_config is None:\n waypt_configs, horizons, trajectories_lqr, trajectories_spline, controllers = \\\n self._plan_to_all_waypoints(idx, start_config)\n else:\n waypt_configs, horizons, trajectories_lqr, trajectories_spline, controllers = \\\n self._plan_to_a_waypoint(idx, start_config, goal_config)\n \n trajectories_lqr.update_valid_mask_nk()\n return waypt_configs, horizons, trajectories_lqr, trajectories_spline, controllers", "def _update_sparse_rewards(self, points_this_step):\n raise NotImplementedError(\"TODO\")", "def adjust_weight(self, new_weight):\n self.weight = new_weight", "def plan(self):\n self.start.cost = 0\n self.tree.add(self.start)\n for i in range(self.max_iter):\n #Generate a random node (rnd_node)\n rnd = self.get_random_node()\n # Get nearest node\n nearest_node = self.tree.nearest(rnd)\n # Get new node by connecting rnd_node and nearest_node\n new_node = self.steer(nearest_node, rnd)\n # If path between new_node and nearest node is not in collision\n if not self.map.collision(nearest_node.p,new_node.p):\n #add the node to tree\n self.add(new_node)\n #Return path if it exists\n if not self.goal.parent: path = None\n else: path = self.final_path()\n return path, self.goal.cost", "def update(self):\n self.weight_mom[self.index] = self.sub_weight_mom\n self.weight[self.index] = self.sub_weight", "def update(self, index, weight, grad, state):\n weight[:] += grad * self.rescale_grad\n state[:] = weight", "def step(self):\n weight = self._get_weight() if self.iteration > self.warmup else 0.0\n if self.iteration < self.early_stop_iter:\n self._weight = weight\n self._weight = min(self._weight, self.max_weight)\n self._weight = max(self._weight, self.min_weight)\n self.iteration += 1", "def update(self, env, trajectory):\n self.cfg = config.cfg\n self.trajectory = trajectory\n\n # update cost\n # self.cost.env = env\n # self.cost.cfg = config.cfg\n # if len(self.env.objects) > 0:\n # self.cost.target_obj = self.env.objects[self.env.target_idx]\n\n # update optimizer\n self.optim = Optimizer(env, self.cost)\n\n # load grasps if needed\n if self.cfg.goal_set_proj:\n print(\"Not implemented\")\n # if self.cfg.scene_file == \"\" or self.cfg.traj_init == \"grasp\":\n # self.load_grasp_set(env)\n # self.setup_goal_set(env)\n # else:\n # self.load_goal_from_scene()\n\n # self.grasp_init(env)\n # self.learner = Learner(env, trajectory, self.cost)\n else:\n self.trajectory.interpolate_waypoints()\n self.history_trajectories = []\n self.info = []\n self.ik_cache = []", "def slow_down(traj):\n\n new_traj = RobotTrajectory()\n new_traj.joint_trajectory = traj.joint_trajectory\n n_joints = len(traj.joint_trajectory.joint_names)\n n_points = len(traj.joint_trajectory.points)\n\n spd = 0.2 # Lower this when using the Gazebo Robot\n\n for i in range(n_points):\n new_traj.joint_trajectory.points[i].time_from_start = traj.joint_trajectory.points[i].time_from_start / spd\n\n # rospy.loginfo(type(traj.joint_trajectory.points[i]))\n v = list(new_traj.joint_trajectory.points[i].velocities)\n a = list(new_traj.joint_trajectory.points[i].accelerations)\n p = list(new_traj.joint_trajectory.points[i].positions)\n\n for j in range(n_joints):\n # rospy.loginfo(type(new_traj.joint_trajectory.points[i].velocities))\n v[j] = traj.joint_trajectory.points[i].velocities[j] * spd\n a[j] = traj.joint_trajectory.points[i].accelerations[j] * spd**2\n p[j] = traj.joint_trajectory.points[i].positions[j]\n\n # new_traj.joint_trajectory.points[i].accelerations[j] = traj.joint_trajectory.points[i].accelerations[j] * spd\n # new_traj.joint_trajectory.points[i].positions[j] = traj.joint_trajectory.points[i].positions[j]\n\n v = tuple(v)\n a = tuple(a)\n p = tuple(p)\n\n new_traj.joint_trajectory.points[i].velocities = v\n new_traj.joint_trajectory.points[i].accelerations = a\n new_traj.joint_trajectory.points[i].positions = p\n\n # rospy.loginfo( new_traj.joint_trajectory.points[i].velocities[j])\n # rospy.loginfo( new_traj.joint_trajectory.points[i].accelerations[j])\n # rospy.loginfo( new_traj.joint_trajectory.points[i].positions[j])\n return new_traj", "def updateWeights(self, initialInputs):\n self.firstLayer.updateWeight(initialInputs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the least divisor of a number, above some floor.
def least_divisor(num, floor=2): assert num >= floor trial = floor while num % trial != 0: trial += 1 return trial
[ "def floor(n: float) -> int:\n return (int(n//1))", "def smallestDivisor(self, nums: List[int], threshold: int) -> int:\n low, high = 1, max(nums)\n smallest = float('inf')\n while low <= high:\n mid = (low + high) // 2\n s = sum(ceil(num / mid) for num in nums)\n if s <= threshold:\n high = mid - 1\n smallest = min(smallest, mid)\n else:\n low = mid + 1\n return smallest", "def _least_divisor_limit(n):\n return int(math.sqrt(n)) + 1", "def get_smallest_divisible_number(max_factor):\n res = 1\n for factor_i in range(1, max_factor + 1):\n res = least_common_multiple(res, factor_i)\n return res", "def floor(x) -> int:\n pass", "def which_floor(n:int) -> int:\n return(n // 4 + min(1, n % 4))", "def divisible_by_all_lower(max_divisor):\n smallest = 1\n pl = Prime()\n prime_divisors = pl.up_to(max_divisor)\n\n for p in prime_divisors:\n temp = p\n while temp <= max_divisor:\n temp *= p\n smallest *= p\n\n return smallest", "def get_smallest_divisible_number_brute_force(max_factor):\n number_i = max_factor\n while True:\n divisible = True\n for factor_i in range(1, max_factor+1):\n if number_i % factor_i > 0:\n divisible = False\n break\n if divisible:\n return number_i\n number_i += 1", "def min_even_divisor(num):\n i = 2\n while i <= num:\n if (num % i == 0):\n return i\n i += 1", "def smallest_factor(n):\n sqrt=n**0.5\n i=2\n while i<=sqrt:\n if n%i==0:\n return i #If we get here, return i as the value.\n i+=1\n return n #If we get through the whole while loop, return n.", "def floor(x):\n return 0.0", "def which_num(n:int) -> int:\n floor = which_floor(n)\n return(n - (floor - 1) * 4)", "def ceil_division(dividend: int, divisor: int) -> int:\n return -(-dividend // divisor)", "def ifloor(x):\n\n return np.floor(x).astype(int)", "def least_factor(n):\n if n == 0:\n return 0\n if n % 1 > 0 or n * n < 2:\n return 1\n if n % 2 == 0:\n return 2\n if n % 3 == 0:\n return 3\n if n % 5 == 0:\n return 5\n m = math.sqrt(n)\n i = 7\n while i <= m:\n if n % i == 0:\n return i\n if n % (i + 4) == 0:\n return i + 4\n if n % (i + 6) == 0:\n return i + 6\n if n % (i + 10) == 0:\n return i + 10\n if n % (i + 12) == 0:\n return i + 12\n if n % (i + 16) == 0:\n return i + 16\n if n % (i + 22) == 0:\n return i + 22\n if n % (i + 24) == 0:\n return i + 24\n i += 30\n return n", "def min_divider(n):\n for i in range(2, n+1):\n if n % i == 0:\n return i", "def topfloor(p):\n return p == 5", "def divide_ceil(a, b):\n q, r = divmod(a, b)\n if r > 0:\n q += 1\n return q", "def smallest_factor_ge(n: int, min_factor: int, brute_force: int = 5):\n assert min_factor <= n\n\n # this shortcut force shortcut costs 1us max\n for factor in range(min_factor, min(min_factor + brute_force, n)):\n if n % factor == 0:\n return factor\n else:\n return min(filter(partial(le, min_factor),\n sympy.ntheory.divisors(n, generator=True)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines if a number has a nontrivial divisor.
def has_nontrivial_divisor(num): divisor = least_divisor(num) return bool(divisor < num)
[ "def is_deficient_number(number: int) -> bool:\n return get_sum_of_divisors(number) < number", "def divisible(number, divisor):\n try:\n number = int(number)\n divisor = int(divisor)\n return number % divisor == 0\n except:\n return False", "def isDivisible(dividend: int, divisor: int) -> bool:\n assert divisor != 0\n return ((dividend % divisor) == 0)", "def check_natural_divisible(number, divisors):\n for divired in divisors:\n if number % divired == 0:\n return True", "def not_divisible_by(m, n):\n return n % m != 0", "def isperfect(n:Integral) -> bool:\r\n return n == sum(factors(n))", "def perfect_number(n):\n divisors = find_divisors(n)\n divisors.remove(n)\n sum_divisors = sum(divisors)\n return sum_divisors == n", "def is_perfect(number):\n validate_integers(number)\n if number < 1:\n return False\n\n return len(num_and_sum_of_div(number)) == 1", "def isprime_division(n):\n _validate_int(n)\n if n < 2:\n return False\n limit = n**0.5\n for divisor in primes():\n if divisor > limit: break\n if n % divisor == 0: return False\n return True", "def is_divisible_by(divisor: Union[int, float]) -> Callable[[Union[int, float]], bool]:\n return lambda dividend: dividend % divisor == 0", "def is_multiple_of(x, y):\n try:\n return x % y == 0\n except ZeroDivisionError:\n return False", "def perfect( n ):\n return sum(divisorsr(n,1)) == n", "def is_abundant_number(number: int) -> bool:\n return get_sum_of_divisors(number) > number", "def is_factor(n, f):\n return n % f == 0", "def is_defecient(n):\n return sod(n) < 2*n and n > 0", "def is_abundant(number):\n if number < sum(find_divisors(number)):\n return True\n else:\n return False", "def is_divisible(x,y):\n if x % y ==0:\n return true\n else:\n return false", "def isHarmoDiv(n:Integral) -> bool:\r\n facts = factors(n)\r\n return int(harmean(facts)) == harmean(facts)", "def is_amicable(n):\n div_sum_n = divisor_sum(n)\n return n == divisor_sum(div_sum_n) and n != div_sum_n" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a cache of factorizations and precompute primes. bound the bound of numbers that the factorizer is expected to deal with.
def __init__(self, bound=10**6): self.bound = bound self.cache = {} self._update_primes()
[ "def prime_factors(n):\n if n >= (prime_set_max*prime_set_max):\n # this algorithm doesn't work\n print('prime_factors fails. Insufficient precomputed primes')\n return None\n n0 = n\n d = {}\n P = []\n while (n0 > 1):\n found = False\n for p in prime_set:\n q,r = divmod(n0,p)\n if r == 0:\n # p divides n0\n if p not in d:\n d[p] = 0\n P.append(p)\n d[p] = d[p] + 1\n n0 = q\n found = True\n break\n if not found:\n # maybe n0 is a prime?\n if isPrime_basic(n0):\n p = n0\n q,r = divmod(n0,p)\n if r == 0:\n # p divides n0\n if p not in d:\n d[p] = 0\n P.append(p)\n d[p] = d[p] + 1\n n0 = q\n found = True\n break\n if not found:\n print('prime_factors error: n0 = %s' % n0)\n return None\n return d", "def _precompute_local_factors(self, bound, prec):\n pass", "def _precompute_local_factors(self, bound, prec=None):\n from sage.modular.all import delta_qexp\n T = self._T\n T2 = T**2\n f = delta_qexp(bound)\n for p in prime_range(bound):\n if not self._lf.has_key(p):\n self._lf[p] = 1 - f[p]*T + (p**11)*T2", "def _update_primes(self):\n from math import ceil, sqrt\n\n # we only need primes up to sqrt(bound) because if none of those primes divide a number under bound, then bound must be prime\n if hasattr(self, \"list_primes\") and self.list_primes[-1] ** 2 > self.bound:\n return\n self.list_primes = all_primes_under(ceil(sqrt(self.bound)))\n self.set_primes = set(self.list_primes)", "def factorize(num):\n for p in primes():\n if p > num: return\n c = 0\n while num % p == 0:\n num //= p\n c += 1\n yield c", "def prime_factorise(x):\n factors = []\n f = factor(x)\n while f:\n while x % f == 0:\n factors.append(f)\n x //= f\n f = factor(x)\n if x != 1:\n factors.append(x)\n return factors", "def all_primes_under(bound):\n\n def is_prime_with_cache(num, cache):\n \"\"\"\n This is a subroutine for dynamic programming.\n Given a cache of primes below the square root of a number, determine if it is prime.\n The cache must be of ascending order.\n \"\"\"\n from math import sqrt, ceil\n\n for _p in cache:\n if _p > ceil(sqrt(num)):\n break\n if num % _p == 0:\n return False\n cache.append(num)\n return True\n\n # use a list for keeping primes in ascending order\n cache_primes = []\n for candidate in tqdm(range(2, bound), desc=f\"Calculating primes under {bound}\"):\n is_prime_with_cache(candidate, cache_primes)\n return cache_primes[:]", "def setFactors(self, number):\n self.number = number\n length = len(self.primes)\n p = self.primes[:self.closestPrimeIndex(self.primes, self.number**0.5) + 1]\n\n self.facts = cuda_factor(self.number, p)\n\n c = 1\n for fact in self.facts:\n c = c * fact\n\n if c != self.number:\n num = self.number / c\n for fact in self.facts:\n while num % fact == 0:\n num = num / fact\n\n if num != 1:\n self.facts.append(num)", "def prime_factors(number):\n\n if number <= 1:\n return Counter()\n\n factor = pollard_rho(number)\n if factor == number:\n return Counter([number])\n\n return prime_factors(factor) + prime_factors(number // factor)", "def factorizer(nums, nprocs):\n def worker(nums, out_q):\n\t\"\"\" Maintains results' dictionary\"\"\"\n\toutdict = {}\n\tfor n in nums:\n\t outdict[n] = factorize(n)\n\tout_q.put(outdict)\n\n out_q = Queue()\n chunksize = int(math.ceil(len(nums) / float(nprocs)))\n procs = []\n\n for i in range(nprocs):\n\tp = Process(target=worker, args=(nums[chunksize * i:chunksize * (i + 1)], out_q))\n\tprocs.append(p)\n\tp.start()\n\n resultdict = {}\n for i in range(nprocs):\n\tresultdict.update(out_q.get())\n\n for p in procs:\n p.join()\n\n return resultdict", "def factor(N):\n global F\n if N in F:\n return F[N]\n if N<-1:\n return [-1,abs(N)]\n if N<3:\n return N\n if N%2==0: #This reduces the factorization Dict significantly, and gets half of all values.\n return [2,N//2]\n #TODO: Replace with multiprocess structure, starting with trial division.\n #TODO: Add BailliePSW and/or one of its submethods after trial division.\n found = fermat_factorization(N)\n F[N] = found\n return found", "def get_prime_factors(num):\n test, primes = 2, {}\n while test <= num:\n if num % test == 0:\n num /= test\n # Increase the value of the prime key or add key\n primes[test] = 1 + (primes[test] if test in primes else 0)\n else:\n test += 1\n return primes", "def cheap_factor(x):\n ret = list()\n i = 2\n while i <= x:\n if x % i == 0:\n ret.append(i)\n while x % i == 0:\n x //= i\n i += 1\n return ret", "def primeFactorize(n, powers=True, primes=False):\n n = int(n)\n if not primes:\n primes = Eratosthenes(n)\n pfacts = []\n if n in [0,1]:\n print('0 and 1 have no prime factors')\n return []\n else:\n while n != 1:\n for p in primes:\n d, r = n / p, n % p\n c = 0\n while not r:\n c += 1\n n = d\n d, r = n / p, n % p\n if c:\n pfacts.append([p, c])\n if powers:\n return pfacts\n else:\n newlist = []\n for i in pfacts:\n newlist.append(i[0])\n return newlist", "def _grow_primes():\n global _primes\n global _primes_len\n global _primes_max\n val = _primes_max\n not_found = True\n while not_found:\n val += 1\n if all(map(lambda x: val % x != 0, _primes)):\n _primes.append(val)\n _primes_len += 1\n _primes_max = val\n not_found = False", "def factoriza(n):\n\n if n in Factorizaciones:\n # Si ya conocemos la factorización, no necesitamos recalcular\n return Factorizaciones[n]\n\n F=dict()\n if n in ConjuntoPrimos:\n # ¿Redundante? Las factorizaciones de primos las conocíamos\n F[n]=1 # Añadimos {n:1} al diccionario.\n Factorizaciones[n]=F #\n return F\n\n MaxPrimo=Primos[-1]\n\n for p in Primos:\n if n%p==0: # Si encontramos un divisor primo:\n m=n//p # n = m*p\n F=factoriza(m) # Factoriza el cociente\n if len(F)==0:\n return\n # Para evitar excepción por clave inválida\n if p in F: # Si aparece otro factor p...\n F[p]=F[p]+1 # ...aumenta el exponente en 1\n else:\n F[p]=1 # Si no, crea la clave\n print(\"adding\", n,F)\n Factorizaciones[n]=F\n return F\n if MaxPrimo*MaxPrimo < n:\n print(\"NO PUEDO FACTORIZAR\")\n return dict()\n else: # Enconramos un nuevo primo\n return {n:1}", "def prime_factors(num, start=2):\n candidates = range(start, int(sqrt(num)) + 1)\n factor = next((x for x in candidates if (num % x == 0)), None)\n return ([factor] + prime_factors(num // factor, factor) if factor else [num])", "def __init__(self, capacity, defVal, loadfactor, growthFactor):\n self.capacity = capacity\n self.defVal = defVal\n self.loadfactor = loadfactor\n self.growthFactor = growthFactor\n self._prime_multiplier = 37\n self.table = [defVal]*self.capacity\n self.rehashing = False", "def compute_primes(bound):\r\n \r\n answer = list(range(2, bound))\r\n for divisor in range(2, bound):\r\n for i in answer:\r\n if i % divisor == 0 and not i == divisor:\r\n answer.remove(i)\r\n\r\n return answer" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the list and set of primes, up to some bound.
def _update_primes(self): from math import ceil, sqrt # we only need primes up to sqrt(bound) because if none of those primes divide a number under bound, then bound must be prime if hasattr(self, "list_primes") and self.list_primes[-1] ** 2 > self.bound: return self.list_primes = all_primes_under(ceil(sqrt(self.bound))) self.set_primes = set(self.list_primes)
[ "def _grow_primes():\n global _primes\n global _primes_len\n global _primes_max\n val = _primes_max\n not_found = True\n while not_found:\n val += 1\n if all(map(lambda x: val % x != 0, _primes)):\n _primes.append(val)\n _primes_len += 1\n _primes_max = val\n not_found = False", "def compute_primes(bound):\r\n \r\n answer = list(range(2, bound))\r\n for divisor in range(2, bound):\r\n for i in answer:\r\n if i % divisor == 0 and not i == divisor:\r\n answer.remove(i)\r\n\r\n return answer", "def __init__(self, bound=10**6):\n self.bound = bound\n self.cache = {}\n self._update_primes()", "def primes_up_to(limit):\n def seive_next(seive, start):\n for index in range(start, limit + 1):\n if seive[index] == None:\n # mark item as prime\n seive[index] = index\n # mark multiples of that number as non prime\n for marker in range(index*2, limit + 1, index):\n seive[marker] = 0\n return index\n return False\n\n # create 1 extra to prevent zero based arithmetic\n seive = [None] * (limit + 1)\n seive[0] = 0\n seive[1] = 0\n start = 2\n while start:\n start = seive_next(seive, start)\n return filter(lambda x: x != 0, seive)", "def all_primes_under(bound):\n\n def is_prime_with_cache(num, cache):\n \"\"\"\n This is a subroutine for dynamic programming.\n Given a cache of primes below the square root of a number, determine if it is prime.\n The cache must be of ascending order.\n \"\"\"\n from math import sqrt, ceil\n\n for _p in cache:\n if _p > ceil(sqrt(num)):\n break\n if num % _p == 0:\n return False\n cache.append(num)\n return True\n\n # use a list for keeping primes in ascending order\n cache_primes = []\n for candidate in tqdm(range(2, bound), desc=f\"Calculating primes under {bound}\"):\n is_prime_with_cache(candidate, cache_primes)\n return cache_primes[:]", "def update_primal_bound(self, bound_value):\n if math.isnan(bound_value):\n return\n if self.objective_sense == minimize:\n self.primal_bound = min(bound_value, self.primal_bound)\n self.primal_bound_improved = (\n self.primal_bound < self.primal_bound_progress[-1]\n )\n else:\n self.primal_bound = max(bound_value, self.primal_bound)\n self.primal_bound_improved = (\n self.primal_bound > self.primal_bound_progress[-1]\n )\n self.primal_bound_progress.append(self.primal_bound)\n self.primal_bound_progress_time.append(get_main_elapsed_time(self.timing))\n if self.primal_bound_improved:\n self.update_gap()", "def main(bound):\n\n # Generates a list such that could_be_prime[i] is true so long as i\n # could still be prime. We only care about factors <= sqrt(n), so\n # the array stops there.\n #\n # Note we're marking 0 and 1 as \"could be prime,\" but we never access\n # those values so it doesn't matter\n could_be_prime = [True] * (bound + 1)\n\n # List of confirmed primes returned by function\n confirmed_primes = []\n\n # Start at 2, the first prime number, and continue until reaching the bound\n for i in range(2, bound + 1):\n # If we reach a number, and it can still be prime...\n if could_be_prime[i]:\n # ...it is confirmed prime, so we add it to the list.\n confirmed_primes.append(i)\n\n # From there, we knock out all multiples below the bound as \"not prime\"\n j = 2 * i\n while j < bound + 1:\n could_be_prime[j] = False\n j = j + i\n\n # After completing this for all values below the bound, we have our list of primes!\n # Now we just take the sum\n prime_sum = sum(confirmed_primes)\n print(prime_sum)\n return prime_sum", "def compute_primes(bound):\n \n answer = list(range(2, bound))\n\n for divisor in range(2, bound):\n # remove all multiple of divisor from answer\n for i in range(len(answer)):\n if answer[i] != 1:\n if answer[i] != divisor:\n if answer[i] % divisor == 0:\n answer[i] = 1\n \n return([num for num in answer if num != 1])", "def resize_table(self):\n nextPrime = self.get_next_prime()\n if nextPrime > -1:\n oldValues = self.values\n self.values = [None] * nextPrime\n self.count = 0\n for i in range(len(oldValues)):\n if oldValues[i] is not None and (not oldValues[i].is_empty):\n self.insert(oldValues[i].value)", "def primesUpTo(num):\n primes = [1]\n for i in range(2, num + 1):\n for j in range(2, i):\n if i % j == 0:\n break\n else:\n primes.append(i)\n\n return primes", "def prime_generator(upper_limit: int, start: int=2) -> typing.Generator[int, None, None]:\n found = collections.defaultdict(list)\n current = start\n while current < upper_limit:\n if current in found:\n for factor in found[current]:\n found[factor + current].append(factor)\n del found[current]\n else:\n found[current * current].append(current)\n yield current\n current += 1", "def primes(number):\r\n \r\n # INITIALIZE\r\n primes = [2]\r\n \r\n # WORK THROUGH LIST\r\n for number in range(3, number):\r\n index = 0\r\n is_prime = True\r\n \r\n # CHECK DIVISIBILITY BY PRIME NUMBERS\r\n while index < len(primes) and primes[index] < sqrt(number) + 1:\r\n \r\n # DIVISIBLE BY OTHER PRIME -> NOT PRIME\r\n if number % primes[index] == 0:\r\n is_prime = False\r\n break\r\n \r\n index += 1\r\n\r\n # IF NOT DIVISIBLE BY OTHER PRIMES -> APPEND TO PRIMES \r\n if is_prime:\r\n primes.append(number)\r\n \r\n return primes", "def primes(upper_bound):\r\n if upper_bound >= 2:\r\n yield 2\r\n sieve_bound = (upper_bound - 1) // 2\r\n sieve = [True for _ in range(sieve_bound)]\r\n crosslimit = (round(upper_bound ** 0.5) - 1) // 2\r\n for i in range(crosslimit):\r\n if sieve[i]:\r\n n = 2 * i + 3\r\n\r\n j = 3\r\n m = (n * j - 3) // 2\r\n while m < sieve_bound:\r\n sieve[m] = False\r\n j += 2\r\n m = (n * j - 3) // 2\r\n\r\n for i in range(sieve_bound):\r\n if sieve[i]:\r\n yield 2 * i + 3", "def update_prob(self, minimum):\n # Complete the ranking\n if np.any(self.total.values() == 0):\n return\n\n improvements = np.array(\n self.improvements.values()) / self.total.values()\n nonzero = np.nonzero(~np.isnan(improvements))\n\n total = float(improvements[nonzero].sum())\n\n if total == 0:\n return\n\n prob_local = improvements[nonzero] / total\n dim = len(self.improvements)\n prob = np.zeros(dim)\n prob[nonzero] = prob_local\n # add a minimum value\n prob = np.maximum(minimum, prob)\n # Check that it its sum is 1\n total = float(prob.sum())\n prob = prob / total\n\n self.cumProb = prob.cumsum()\n # Init again the list and the count", "def checkPrimes():\n for prime in primes:\n for combo in digitReplacementCombos(prime):\n if numberOfPrimeDigitReplacements(prime,combo) == 8:\n return prime", "def prime(low, high):\r\n\r\n global global_prime_numbers\r\n collect_prime_numbers(high)\r\n result = []\r\n for i in global_prime_numbers:\r\n if low <= i <= high:\r\n result.append(i)\r\n return result", "def proportion_of_primes(bound, **args):\n v = []\n k = 0.0\n for n in range(1,bound+1):\n if is_prime(n):\n k += 1\n v.append((n,k/n))\n return plot_step_function(v, **args)", "def mult_parities_python(bound, verbose=False):\n v = [None]*bound\n v[0] = None\n v[1] = int(0)\n P = [int(p) for p in prime_range(bound)]\n for p in P:\n v[p] = int(1)\n last = P\n last_parity = int(1)\n loops = floor(log(bound,2))+1\n bound = int(bound)\n for k in range(loops):\n cur = []\n cur_parity = (last_parity+int(1))%int(2)\n if verbose:\n print \"loop %s (of %s); last = %s\"%(k,loops, len(last))\n for n in last:\n for p in P:\n m = n * p\n if m >= bound:\n break\n if v[m] is None:\n v[m] = cur_parity\n cur.append(m)\n last_parity = cur_parity\n last = cur\n return v", "def sum_of_primes(upper_bound: int)-> int:\n\n if upper_bound <= 0:\n raise ValueError(\"upper_bound must be a positive integer!\")\n\n num_list = list(range(0, upper_bound + 1))\n prime_list = num_list[:]\n\n for i in range(0, int(sqrt(upper_bound)) + 1):\n if num_list[i] == 0 or num_list[i] == 1:\n prime_list.remove(num_list[i])\n elif is_prime_number(num_list[i]):\n prime_list = remove_multiples(num_list[i], prime_list)\n\n return sum(prime_list)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Restore the original number from a factorization.
def restore_from_factorization(factorization): retval = 1 for _base, _power in factorization.items(): retval *= int(_base) ** int(_power) return retval
[ "def restore(self):\n self.cur_val = self.max", "def revert_value(self):\r\n\r\n self.value_var.set(self.previous)", "def restore_value(self):\n\n self._value = self._saved_value.detach().clone()", "def restore_state(self):\n self._restore_input()\n self._restore_output()", "def _renormalize(self,new_partition):\n factor = self.partition/new_partition\n self.f *= factor\n self.f_squared *= factor\n # save the old partition function\n self._old_partition = self.partition\n self.partition = new_partition", "def restore_field(self):\r\n self.__field = self.__backup_field\r\n self.__player_pos = self.__backup_pos", "def restore_normalization(image):\r\n image = image * 128\r\n image = image + 128\r\n return image", "def restore(self, *args, **kwargs):\n raise NotImplementedError()", "def restore(self, iteration: int) -> torch.nn.Module:\n\n sparsity_path = os.path.join(self.netadapt_dir, f'iter_{iteration}_sparsity.yml')\n weights_path = os.path.join(self.netadapt_dir, f'iter_{iteration}.pth')\n\n for path in (sparsity_path, weights_path):\n if not os.path.exists(path):\n raise FileNotFoundError(f'{path} is required for restoring the model')\n\n config = self.load_config(sparsity_path)\n\n self.prune_subgraph(config)\n self.model.load_state_dict(torch.load(weights_path, map_location='cpu'))\n\n self.iteration = config['iteration'] + 1\n\n log.info(f\"Restored from iteration {iteration}\")\n\n return self.model", "def revers_number(number: int) -> int:\n return int(str(number)[::-1])", "def factor(N):\n global F\n if N in F:\n return F[N]\n if N<-1:\n return [-1,abs(N)]\n if N<3:\n return N\n if N%2==0: #This reduces the factorization Dict significantly, and gets half of all values.\n return [2,N//2]\n #TODO: Replace with multiprocess structure, starting with trial division.\n #TODO: Add BailliePSW and/or one of its submethods after trial division.\n found = fermat_factorization(N)\n F[N] = found\n return found", "def reassign_val(self):\n self.val = Term.values[self.x]", "def factor_reparto(self, factor_reparto):\n\n self._factor_reparto = factor_reparto", "def restore_factory(self):\n self.send_command(api.restore_factory)", "def restore_quality_profile(self, backup, organization):", "def inverse(self):\n a=self.numerator\n self.numerator=self.denominator\n self.denominator=a\n return(self)", "def _restore(self):\n\n # check restore\n if not self.restore:\n return\n\n # restore\n settings = self._settings\n settings.data_format = self.data_format\n settings.byte_order = self.byte_order\n self.restore = False", "def normalize(factor):\n factor = factor * (1/factor.sum())\n return factor", "def restoreProfile( self, profile ):\n return profile.restore(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine the number of different divisors given a factorization.
def get_num_divisors(factorization): from functools import reduce powers = list(factorization.values()) num_divisors = reduce(lambda x, y: x * y, [_p + 1 for _p in powers]) return num_divisors
[ "def num_divisors(n):\n pf = prime_factorization(n)\n fc = frequency_count(pf).values()\n return product(f + 1 for f in fc)", "def get_number_of_divisors(x):\n if x == 1:\n return 1\n elif is_prime(x):\n return 2\n\n prime_decomposition = get_prime_decomposition(x)\n prime = prime_decomposition[0]\n power_of_primes = [1] # this will contain the power of each different primes\n\n for i in range(1, len(prime_decomposition)):\n if prime != prime_decomposition[i]:\n prime = prime_decomposition[i]\n power_of_primes += [1]\n else:\n power_of_primes[len(power_of_primes) - 1] = power_of_primes[len(power_of_primes) - 1] + 1\n\n number_of_divisors = 1\n\n # the formula to get the number of divisors using the product of prime factors\n for i in range(0, len(power_of_primes)):\n number_of_divisors *= (power_of_primes[i] + 1)\n\n return number_of_divisors", "def count_divisors(n):\n # for n = (p ** a) * (q ** b) * ... * (r ** c),\n # number of positive divisors of n = (a + 1) * (b + 1) * ... * (c + 1)\n return reduce(mul, (e + 1 for (p, e) in factorize(n)), 1)", "def find_factors(num):\n validate_integers(num)\n zero_divisors_error(num)\n factors = set()\n for potential_factor in range(1, int(num**.5) + 1):\n if num % potential_factor == 0:\n factors.add(potential_factor)\n factors.add(int(num/potential_factor))\n return factors", "def get_sum_proper_divisors(factorization):\n sum_divisors = 1\n original_number = 1\n for _base, _power in factorization.items():\n factors = [_base**k for k in range(0, _power + 1)]\n sum_divisors *= sum(factors)\n original_number *= _base**_power\n return sum_divisors - original_number", "def get_all_divisors(factorization):\n divisors = [1]\n for _base, _power in factorization.items():\n divisors = [\n _div * (_base**_p) for _p in range(0, _power + 1) for _div in divisors\n ]\n return divisors", "def factor(self):\n\t\tif (self.isprime()): return n\n\t\tfor fact in [GaussInt(1,1), GaussInt(2,1), GaussInt(1,2), \n\t\t\t GaussInt(3,0), GaussInt(3,2), GaussInt(2,3)]:\n\t\t\tif self%fact == 0: return fact\n\t\treturn self.factorPR() # Needs work - no guarantee that a prime factor will be returned", "def count_divisors(n):\r\n\r\n # initialize with one for the number itself\r\n count = 1\r\n # only need to go up to half n since already counted n\r\n for i in range(1, int(n/2 + 1)):\r\n if n % i == 0:\r\n count += 1\r\n\r\n return count", "def find_proper_divisors(number):\n validate_integers(number)\n results = find_factors(number)\n results.discard(number)\n return results", "def sum_of_divisors(n):\n return reduce(mul, ((p ** (k + 1) - 1) / (p - 1) for p, k in factorization(n) ), 1) - n", "def getFactors(n):\r\n factors = []\r\n for i in range(2, round(sqrt(n)) + 1):\r\n if n % i == 0:\r\n factors.append(i)\r\n factors.append(int(n / i))\r\n factors.append(n)\r\n return sorted(factors)", "def calculate_sum_divisors(number):\n prime_factors_list = prime_factors(number)\n print(\"List of prime factors of {} : {}\".format(n, ', '.join(str(divisor) for divisor in prime_factors_list)), file=sys.stderr)\n prime_factors_dict= dict((x,prime_factors_list.count(x)) for x in set(prime_factors_list))\n pp=pprint.PrettyPrinter(stream=sys.stderr, compact=True)\n pp.pprint(prime_factors_dict)\n product = 1\n for key, value in prime_factors_dict.items():\n product = product * (((key ** (value+1)) - 1)//(key - 1))\n return product", "def divisors2(n):\n factors = primes.findFactors(n)\n divisors = set([1])\n for i in range(1, int(len(factors) / 2) + 1):\n for j in itertools.combinations(factors, i):\n p = product(j)\n if p in divisors:\n continue\n divisors.add(p)\n divisors.add(n / p)\n return divisors", "def prime_factors(number):\n\n if number <= 1:\n return Counter()\n\n factor = pollard_rho(number)\n if factor == number:\n return Counter([number])\n\n return prime_factors(factor) + prime_factors(number // factor)", "def count_prime_divisors(n):\n divisors = []\n for i in eratostenes(2, n):\n while n != 1:\n if n % i == 0:\n divisors.append(i)\n n /= i\n else:\n break\n return divisors", "def prime_factors(x):\n factors = [] \n for i in range(2,x+1):\n count = 0\n while x % i == 0:\n count += 1 \n x /= i\n if count > 0:\n factors.append((i,count))\n return factors", "def prime_factors(n):\n factors = []\n powers = []\n f = 2\n while n > 1:\n if n % f == 0:\n if factors and factors[-1] == f:\n powers[-1] += 1\n else:\n factors.append(f)\n powers.append(1)\n n /= f\n else:\n f += 1\n return factors, powers", "def is_factor(a, b):\n if b % a == 0:\n return 1\n return 0", "def prime_factors(num):\n\n factors = ()\n if is_prime(num):\n return (num,)\n\n mid = num // 2 # Only need to check first half of numbers.\n for val in range(2, mid + 1):\n if num % val == 0:\n # Treat the quotient and the divisor as separate branches of a tree:\n # if either of these values are not prime, recurse down them until \n # their prime factors are determined.\n result = num // val \n\n # Do not allow duplicates to be added to the current iteration.\n if result in factors or val in factors: \n continue\n if is_prime(val): \n factors += (val,)\n else: \n factors += prime_factors(val)\n if is_prime(result):\n factors += (result,)\n else:\n factors += prime_factors(result)\n if reduce(lambda x, y: x * y, factors) == num:\n break\n\n return factors" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all the divisors of a number given its factorization.
def get_all_divisors(factorization): divisors = [1] for _base, _power in factorization.items(): divisors = [ _div * (_base**_p) for _p in range(0, _power + 1) for _div in divisors ] return divisors
[ "def find_proper_divisors(number):\n validate_integers(number)\n results = find_factors(number)\n results.discard(number)\n return results", "def find_factors(num):\n validate_integers(num)\n zero_divisors_error(num)\n factors = set()\n for potential_factor in range(1, int(num**.5) + 1):\n if num % potential_factor == 0:\n factors.add(potential_factor)\n factors.add(int(num/potential_factor))\n return factors", "def get_divisors(input_number: int) -> list:\n list_of_divisors = []\n x = range(1, int(input_number/2) + 1)\n for i in x:\n if input_number % i == 0:\n list_of_divisors.append(i)\n list_of_divisors.append(input_number)\n return list_of_divisors", "def divisors2(n):\n factors = primes.findFactors(n)\n divisors = set([1])\n for i in range(1, int(len(factors) / 2) + 1):\n for j in itertools.combinations(factors, i):\n p = product(j)\n if p in divisors:\n continue\n divisors.add(p)\n divisors.add(n / p)\n return divisors", "def find_divisors(number) -> list:\n list_of_divisors = []\n for numb in range(1, number + 1):\n if number % numb == 0:\n list_of_divisors.append(numb)\n if len(list_of_divisors) != 2 and len(list_of_divisors) != 1:\n list_of_divisors.remove(1)\n list_of_divisors.remove(list_of_divisors[len(list_of_divisors) - 1])\n return list_of_divisors", "def divisors(n):\n if n == 0:\n return []\n else:\n return divisors_from(abs(n), SMALLER_DIVISOR, []) # Return a list of\n # all divisors bigger than (or equal to) n", "def factors(num):\n return list(generateFactors(num))", "def getFactors(n):\r\n factors = []\r\n for i in range(2, round(sqrt(n)) + 1):\r\n if n % i == 0:\r\n factors.append(i)\r\n factors.append(int(n / i))\r\n factors.append(n)\r\n return sorted(factors)", "def proper_divisors(n):\n if n < 2:\n return []\n divisors = {1}\n for i in up_to_sqrt_of(n):\n if n % i == 0:\n divisors.add(i)\n divisors.add(n//i)\n return sorted(list(divisors))", "def factorization(value):\r\n\r\n prime_numbers = prime(2, value)\r\n if value in (0, 1) or value in prime_numbers:\r\n return [value]\r\n\r\n result = []\r\n v = value\r\n for p in prime_numbers:\r\n while v % p == 0:\r\n result.append(p)\r\n v = v // p\r\n if v in prime_numbers:\r\n result.append(v)\r\n v = 1\r\n if v == 1:\r\n break\r\n return result", "def get_divisors(num):\n yield 1\n\n for i in range(2, num / 2 + 1):\n if not num % i:\n yield i", "def prime_factors(number):\n factors = []\n\n # factor == 2\n number, mul = divmul(number, 2)\n factors.extend([2] * mul)\n\n # factor >= 3\n for fac in xrange(3, number + 1, 2):\n number, mul = divmul(number, fac)\n factors.extend([fac] * mul)\n if number == 1:\n break\n\n return factors", "def prime_factors(n):\r\n assert isinstance(n, int)\r\n assert n >= 1\r\n if n == 1:\r\n return []\r\n return prime_divider_list(n)", "def calculate_sum_divisors(number):\n prime_factors_list = prime_factors(number)\n print(\"List of prime factors of {} : {}\".format(n, ', '.join(str(divisor) for divisor in prime_factors_list)), file=sys.stderr)\n prime_factors_dict= dict((x,prime_factors_list.count(x)) for x in set(prime_factors_list))\n pp=pprint.PrettyPrinter(stream=sys.stderr, compact=True)\n pp.pprint(prime_factors_dict)\n product = 1\n for key, value in prime_factors_dict.items():\n product = product * (((key ** (value+1)) - 1)//(key - 1))\n return product", "def distinct_factors(number):\n\n factors = [1]\n for prime, exponents in prime_factors(number).items():\n pfactors = [prime**i for i in range(1, exponents + 1)]\n factors += [i * factor for factor in factors for i in pfactors]\n return factors", "def prime_factors(num):\n\n factors = ()\n if is_prime(num):\n return (num,)\n\n mid = num // 2 # Only need to check first half of numbers.\n for val in range(2, mid + 1):\n if num % val == 0:\n # Treat the quotient and the divisor as separate branches of a tree:\n # if either of these values are not prime, recurse down them until \n # their prime factors are determined.\n result = num // val \n\n # Do not allow duplicates to be added to the current iteration.\n if result in factors or val in factors: \n continue\n if is_prime(val): \n factors += (val,)\n else: \n factors += prime_factors(val)\n if is_prime(result):\n factors += (result,)\n else:\n factors += prime_factors(result)\n if reduce(lambda x, y: x * y, factors) == num:\n break\n\n return factors", "def factors(number):\n try:\n if int(number) != float(number):\n raise ValueError(\"number was not an integer.\")\n elif int(number) < 1:\n raise ValueError(\"number was not positive.\")\n else:\n number = int(number)\n except (TypeError, ValueError) as err:\n print(\"ERROR: argument must be a positive integer.\")\n print(\"%s: %s\" % (type(err).__name__, err))\n return\n factors = [1]\n if number != 1:\n factors.append(number)\n for d in range(2, int(number ** 0.5) + 1):\n if number % d == 0:\n factors.append(d)\n if number // d != d:\n factors.append(number // d)\n return sorted(factors)", "def get_prime_factors(num):\n for i in xrange(2, num):\n if num % i == 0 and is_prime(i):\n yield i", "def prime_factors(x):\n factors = [] \n for i in range(2,x+1):\n count = 0\n while x % i == 0:\n count += 1 \n x /= i\n if count > 0:\n factors.append((i,count))\n return factors" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine the sum of proper divisors given a factorization.
def get_sum_proper_divisors(factorization): sum_divisors = 1 original_number = 1 for _base, _power in factorization.items(): factors = [_base**k for k in range(0, _power + 1)] sum_divisors *= sum(factors) original_number *= _base**_power return sum_divisors - original_number
[ "def get_num_divisors(factorization):\n from functools import reduce\n\n powers = list(factorization.values())\n num_divisors = reduce(lambda x, y: x * y, [_p + 1 for _p in powers])\n return num_divisors", "def sum_of_divisors(n):\n return reduce(mul, ((p ** (k + 1) - 1) / (p - 1) for p, k in factorization(n) ), 1) - n", "def get_all_divisors(factorization):\n divisors = [1]\n for _base, _power in factorization.items():\n divisors = [\n _div * (_base**_p) for _p in range(0, _power + 1) for _div in divisors\n ]\n return divisors", "def calculate_sum_divisors(number):\n prime_factors_list = prime_factors(number)\n print(\"List of prime factors of {} : {}\".format(n, ', '.join(str(divisor) for divisor in prime_factors_list)), file=sys.stderr)\n prime_factors_dict= dict((x,prime_factors_list.count(x)) for x in set(prime_factors_list))\n pp=pprint.PrettyPrinter(stream=sys.stderr, compact=True)\n pp.pprint(prime_factors_dict)\n product = 1\n for key, value in prime_factors_dict.items():\n product = product * (((key ** (value+1)) - 1)//(key - 1))\n return product", "def factor(self):\n\t\tif (self.isprime()): return n\n\t\tfor fact in [GaussInt(1,1), GaussInt(2,1), GaussInt(1,2), \n\t\t\t GaussInt(3,0), GaussInt(3,2), GaussInt(2,3)]:\n\t\t\tif self%fact == 0: return fact\n\t\treturn self.factorPR() # Needs work - no guarantee that a prime factor will be returned", "def sum_of_proper_divisors(n):\n if n == 1:\n return 1\n limit = int(n ** 0.5)\n if limit * limit == n:\n answer = 1 + limit\n limit -= 1\n else:\n answer = 1\n divisor_ini, step = (3, 2) if (n & 1) else (2, 1)\n answer += sum(\n divisor + n // divisor\n for divisor in range(divisor_ini, limit + 1, step)\n if n % divisor == 0\n )\n return answer", "def find_proper_divisors(number):\n validate_integers(number)\n results = find_factors(number)\n results.discard(number)\n return results", "def get_sum_of_divisors(number: int) -> int:\n return sum(divisors(number))", "def sum_of_proper_divisors(number):\n hash_hit = _hash.get(number)\n if hash_hit:\n return hash_hit\n computed_sum = sum(util.proper_divisors(number))\n _hash[number] = computed_sum\n return computed_sum", "def is_factor(n, f):\n return n % f == 0", "def find_factors(num):\n validate_integers(num)\n zero_divisors_error(num)\n factors = set()\n for potential_factor in range(1, int(num**.5) + 1):\n if num % potential_factor == 0:\n factors.add(potential_factor)\n factors.add(int(num/potential_factor))\n return factors", "def reduce_by_even_factors(k, factor_list):\n div = k\n for f in factor_list:\n if div == 1: return 1\n if not div % f:\n div /= f\n return div", "def get_sum_divisors(n):\n ret = 1\n for x in range(2, int(math.sqrt(n))+1):\n if n % x == 0:\n ret += x\n if n/x != x:\n ret += n/x\n return ret", "def sum_divisors(n):\n # for n = (p ** a) * ... * (q ** b),\n # sum of positive divisors of n\n # = (p ** (a + 1) - 1) // (p - 1) * ... * (q ** (b + 1) - 1) // (q - 1)\n g = ((p ** (e + 1) - 1) // (p - 1) for p, e in factorize(n))\n return reduce(mul, g, 1)", "def count_divisors(n):\n # for n = (p ** a) * (q ** b) * ... * (r ** c),\n # number of positive divisors of n = (a + 1) * (b + 1) * ... * (c + 1)\n return reduce(mul, (e + 1 for (p, e) in factorize(n)), 1)", "def is_factor(a, b):\n if b % a == 0:\n return 1\n return 0", "def num_divisors(n):\n pf = prime_factorization(n)\n fc = frequency_count(pf).values()\n return product(f + 1 for f in fc)", "def isperfect(n:Integral) -> bool:\r\n return n == sum(factors(n))", "def divisors2(n):\n factors = primes.findFactors(n)\n divisors = set([1])\n for i in range(1, int(len(factors) / 2) + 1):\n for j in itertools.combinations(factors, i):\n p = product(j)\n if p in divisors:\n continue\n divisors.add(p)\n divisors.add(n / p)\n return divisors" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a factorization dict, determine if the original number is a prime.
def is_prime_given_factorization(factorization): if len(factorization.keys()) == 1: if list(factorization.values())[0] == 1: return True return False
[ "def is_prime(number):\n division = 2\n while number % division != 0:\n division += 1\n if division == number:\n return True\n return False", "def is_factor(n, f):\n return n % f == 0", "def is_prime_power(n):\n if len(factor(n)) == 1:\n return True\n return False", "def test_prime_factor(num, res):\n assert _pf.prime_factor(num) == res", "def isprimeF(self,base):\n\t\tif type(base) is not GaussInt:\n\t\t base = GaussInt(base) # Coerce if base not GaussInt (works for int or complex)\n\t\treturn base.powmod(self.norm()-1,self) == GaussInt(1,0)", "def isprimeF(self,base):\n\t\tif (type(base) != type(GaussInt(1,0))):\n\t\t base = GaussInt(base) # Coerce if base not GaussInt (works for int or complex)\n\t\treturn base.powmod(self.norm()-1,self) == GaussInt(1,0)", "def isperfect(n:Integral) -> bool:\r\n return n == sum(factors(n))", "def is_factor(a, b):\n if b % a == 0:\n return 1\n return 0", "def isprime_imperative(number):\n if number < 2: return False\n if number == 2: return True\n if number % 2 == 0: return False\n for i in range(3, 1 + int(math.sqrt(number)), 2):\n if number % i == 0: \n return False\n return True", "def is_prime(self, p):\n raise NotImplementedError", "def primep(n):\n return fermat_test(n) and miller_rabin_test(n)", "def isprime_division(n):\n _validate_int(n)\n if n < 2:\n return False\n limit = n**0.5\n for divisor in primes():\n if divisor > limit: break\n if n % divisor == 0: return False\n return True", "def factor_check(number, factor):", "def isprime( k:int, coprime:int )->bool:\n if k < coprime*coprime: return True\n if k % coprime == 0: return False\n return isprime( k, coprime+2)", "def is_prime_field(self):\n return False", "def isprime(k, coprime):\n if k < coprime*coprime:\n return True\n if k % coprime == 0:\n return False\n return isprime(k, coprime + 2)", "def iscoprime(self, n):\n return n not in self.phi_factors", "def is_prime(n):\n if n < 2:\n return False\n elif n <= _PRIME_LIST[-1]:\n # check to see if the number is in our list of precomputed primes\n index = bisect_left(_PRIME_LIST, n)\n return _PRIME_LIST[index] == n\n else:\n if not is_probable_prime(n):\n return False\n # otherwise, use trial division to ensure primality\n d = trial_division(n)\n return d == n", "def isprime(n):\n _validate_int(n)\n # Deal with trivial cases first.\n if n < 2:\n return False\n elif n == 2:\n return True\n elif n%2 == 0:\n return False\n elif n <= 7: # 3, 5, 7\n return True\n bases = _choose_bases(n)\n flag = miller_rabin(n, bases)\n if flag and len(bases) > 7 and warn_probably:\n import warnings\n warnings.warn(\"number is only probably prime not certainly prime\")\n return flag" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute all the prime numbers below a bound.
def all_primes_under(bound): def is_prime_with_cache(num, cache): """ This is a subroutine for dynamic programming. Given a cache of primes below the square root of a number, determine if it is prime. The cache must be of ascending order. """ from math import sqrt, ceil for _p in cache: if _p > ceil(sqrt(num)): break if num % _p == 0: return False cache.append(num) return True # use a list for keeping primes in ascending order cache_primes = [] for candidate in tqdm(range(2, bound), desc=f"Calculating primes under {bound}"): is_prime_with_cache(candidate, cache_primes) return cache_primes[:]
[ "def compute_primes(bound):\r\n \r\n answer = list(range(2, bound))\r\n for divisor in range(2, bound):\r\n for i in answer:\r\n if i % divisor == 0 and not i == divisor:\r\n answer.remove(i)\r\n\r\n return answer", "def main(bound):\n\n # Generates a list such that could_be_prime[i] is true so long as i\n # could still be prime. We only care about factors <= sqrt(n), so\n # the array stops there.\n #\n # Note we're marking 0 and 1 as \"could be prime,\" but we never access\n # those values so it doesn't matter\n could_be_prime = [True] * (bound + 1)\n\n # List of confirmed primes returned by function\n confirmed_primes = []\n\n # Start at 2, the first prime number, and continue until reaching the bound\n for i in range(2, bound + 1):\n # If we reach a number, and it can still be prime...\n if could_be_prime[i]:\n # ...it is confirmed prime, so we add it to the list.\n confirmed_primes.append(i)\n\n # From there, we knock out all multiples below the bound as \"not prime\"\n j = 2 * i\n while j < bound + 1:\n could_be_prime[j] = False\n j = j + i\n\n # After completing this for all values below the bound, we have our list of primes!\n # Now we just take the sum\n prime_sum = sum(confirmed_primes)\n print(prime_sum)\n return prime_sum", "def prime(low, high):\r\n\r\n global global_prime_numbers\r\n collect_prime_numbers(high)\r\n result = []\r\n for i in global_prime_numbers:\r\n if low <= i <= high:\r\n result.append(i)\r\n return result", "def compute_primes(bound):\n \n answer = list(range(2, bound))\n\n for divisor in range(2, bound):\n # remove all multiple of divisor from answer\n for i in range(len(answer)):\n if answer[i] != 1:\n if answer[i] != divisor:\n if answer[i] % divisor == 0:\n answer[i] = 1\n \n return([num for num in answer if num != 1])", "def primes(upper_bound):\r\n if upper_bound >= 2:\r\n yield 2\r\n sieve_bound = (upper_bound - 1) // 2\r\n sieve = [True for _ in range(sieve_bound)]\r\n crosslimit = (round(upper_bound ** 0.5) - 1) // 2\r\n for i in range(crosslimit):\r\n if sieve[i]:\r\n n = 2 * i + 3\r\n\r\n j = 3\r\n m = (n * j - 3) // 2\r\n while m < sieve_bound:\r\n sieve[m] = False\r\n j += 2\r\n m = (n * j - 3) // 2\r\n\r\n for i in range(sieve_bound):\r\n if sieve[i]:\r\n yield 2 * i + 3", "def collect_prime_numbers(high):\r\n\r\n global global_prime_numbers\r\n\r\n if not global_prime_numbers:\r\n global_prime_numbers = [2]\r\n start = 3\r\n else:\r\n start = global_prime_numbers[-1] + 2\r\n\r\n for val in range(start, high + 1, 2): # only the odd numbers\r\n is_prime = True\r\n sqrt_val = val ** 0.5\r\n for j in range(1, len(global_prime_numbers)): # dividing only by prime numbers, without 2\r\n if global_prime_numbers[j] > sqrt_val: # it's enough to divide to square root\r\n break\r\n if val % global_prime_numbers[j] == 0:\r\n is_prime = False\r\n break\r\n if is_prime:\r\n global_prime_numbers.append(val)", "def primes_up_to(limit):\n def seive_next(seive, start):\n for index in range(start, limit + 1):\n if seive[index] == None:\n # mark item as prime\n seive[index] = index\n # mark multiples of that number as non prime\n for marker in range(index*2, limit + 1, index):\n seive[marker] = 0\n return index\n return False\n\n # create 1 extra to prevent zero based arithmetic\n seive = [None] * (limit + 1)\n seive[0] = 0\n seive[1] = 0\n start = 2\n while start:\n start = seive_next(seive, start)\n return filter(lambda x: x != 0, seive)", "def prime():\n array = []\n for i in range(2, 1000):\n if i % 2 != 0 and i % 3 != 0 and i % 5 != 0 and i % 7 != 0 and i % 11 != 0 or i == 2 or i == 3 or i == 5 or i == 7 or i == 11:\n array.append(i)\n return array", "def generate_primes(lower_limit, upper_limit):\n\n if not isinstance(lower_limit, int) or not isinstance(upper_limit, \n int):\n raise ValueError(\"Both interval values must be integers\")\n\n if lower_limit < 0 or upper_limit < 0:\n return \"all limit intervals must be positive\"\n\n prime_list = []\n\n for num in range(lower_limit, upper_limit + 1):\n if num > 1:\n for i in range(2, num):\n if num % i == 0:\n break\n else:\n prime_list.append(num)\n return prime_list", "def prime_generator(upper_limit: int, start: int=2) -> typing.Generator[int, None, None]:\n found = collections.defaultdict(list)\n current = start\n while current < upper_limit:\n if current in found:\n for factor in found[current]:\n found[factor + current].append(factor)\n del found[current]\n else:\n found[current * current].append(current)\n yield current\n current += 1", "def ranged_primes(x, y):\r\n if x == 1: x += 1\r\n max_prime = int(math.sqrt(y))\r\n # still don't have a good way to differentiate between just calculating the primes\r\n # and segmenting the sieve\r\n if y < 317:\r\n \tprimes = prime_sieve(y)\r\n return [n for n in range(x, y+1) if n in primes]\r\n else:\r\n \tprimes = prime_sieve(max_prime)\r\n return [n for n in range(x, y+1) if all(n % p for p in primes)]", "def mult_parities_python(bound, verbose=False):\n v = [None]*bound\n v[0] = None\n v[1] = int(0)\n P = [int(p) for p in prime_range(bound)]\n for p in P:\n v[p] = int(1)\n last = P\n last_parity = int(1)\n loops = floor(log(bound,2))+1\n bound = int(bound)\n for k in range(loops):\n cur = []\n cur_parity = (last_parity+int(1))%int(2)\n if verbose:\n print \"loop %s (of %s); last = %s\"%(k,loops, len(last))\n for n in last:\n for p in P:\n m = n * p\n if m >= bound:\n break\n if v[m] is None:\n v[m] = cur_parity\n cur.append(m)\n last_parity = cur_parity\n last = cur\n return v", "def prime_numbers(_max):\n a = [True] * _max\n for n in range(2, int(mt.sqrt(_max)) + 1):\n if not a[n]:\n continue\n for j in range(n ** 2, _max, n):\n a[j] = False\n return [n for n in range(2, _max) if a[n]]", "def prime_numbers_range(self, lower, upper):\r\n print(\"Prime numbers between \", lower, \" and \", upper, \" are: \")\r\n\r\n my_list = []\r\n # Traversing though the range\r\n for num in range(lower, upper + 1):\r\n # Checking if every number is divisible by their previous all the numbers\r\n if num > 1:\r\n for i in range(2, num):\r\n if num % i == 0:\r\n break\r\n else:\r\n my_list.append(num)\r\n print(my_list)", "def _grow_primes():\n global _primes\n global _primes_len\n global _primes_max\n val = _primes_max\n not_found = True\n while not_found:\n val += 1\n if all(map(lambda x: val % x != 0, _primes)):\n _primes.append(val)\n _primes_len += 1\n _primes_max = val\n not_found = False", "def primesUpTo(num):\n primes = [1]\n for i in range(2, num + 1):\n for j in range(2, i):\n if i % j == 0:\n break\n else:\n primes.append(i)\n\n return primes", "def find_all_primes(x=22):\n allprimes = []\n for i in range(2, x + 1):\n if is_prime(i):\n allprimes.append(i)\n print \"There are %d primes between 2 and %d\" % (len(allprimes), x)\n return allprimes", "def proportion_of_primes(bound, **args):\n v = []\n k = 0.0\n for n in range(1,bound+1):\n if is_prime(n):\n k += 1\n v.append((n,k/n))\n return plot_step_function(v, **args)", "def find_goldbach_numbers(limit):\n goldbach_numbers = set()\n primes = prime_sieve(limit)\n doubled_squares = {i**2 * 2 for i in range(1, int(limit**.5))}\n for prime in primes:\n for doubled_square in doubled_squares:\n goldbach_number = doubled_square + prime\n if goldbach_number < limit:\n goldbach_numbers.add(goldbach_number)\n return goldbach_numbers" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is a subroutine for dynamic programming. Given a cache of primes below the square root of a number, determine if it is prime. The cache must be of ascending order.
def is_prime_with_cache(num, cache): from math import sqrt, ceil for _p in cache: if _p > ceil(sqrt(num)): break if num % _p == 0: return False cache.append(num) return True
[ "def isprime_generator_lazy(number):\n if number < 2: return False\n if number == 2: return True\n if number % 2 == 0: return False\n return not any(\n number % p == 0 \n for p in range(3, int(math.sqrt(number)) + 1, 2)\n )", "def is_probable_prime(n):\n # return all(solovay_strassen(n, a) for a in xrange(2, min(n - 1, 20)))\n return all(miller_rabin(n, a) for a in xrange(2, min(n - 1, 20)))", "def all_primes_under(bound):\n\n def is_prime_with_cache(num, cache):\n \"\"\"\n This is a subroutine for dynamic programming.\n Given a cache of primes below the square root of a number, determine if it is prime.\n The cache must be of ascending order.\n \"\"\"\n from math import sqrt, ceil\n\n for _p in cache:\n if _p > ceil(sqrt(num)):\n break\n if num % _p == 0:\n return False\n cache.append(num)\n return True\n\n # use a list for keeping primes in ascending order\n cache_primes = []\n for candidate in tqdm(range(2, bound), desc=f\"Calculating primes under {bound}\"):\n is_prime_with_cache(candidate, cache_primes)\n return cache_primes[:]", "def primep(n):\n return fermat_test(n) and miller_rabin_test(n)", "def is_prime(number):\n division = 2\n while number % division != 0:\n division += 1\n if division == number:\n return True\n return False", "def isprime( k:int, coprime:int )->bool:\n if k < coprime*coprime: return True\n if k % coprime == 0: return False\n return isprime( k, coprime+2)", "def isprime(k, coprime):\n if k < coprime*coprime:\n return True\n if k % coprime == 0:\n return False\n return isprime(k, coprime + 2)", "def is_prime(n):\n if n < 2:\n return False\n elif n <= _PRIME_LIST[-1]:\n # check to see if the number is in our list of precomputed primes\n index = bisect_left(_PRIME_LIST, n)\n return _PRIME_LIST[index] == n\n else:\n if not is_probable_prime(n):\n return False\n # otherwise, use trial division to ensure primality\n d = trial_division(n)\n return d == n", "def _isPrimeN(n):\r\n if n == 1:\r\n return False\r\n if n == 2:\r\n return True\r\n if n == 3:\r\n return True\r\n if n % 2 == 0:\r\n return False\r\n if n % 3 == 0:\r\n return False\r\n\r\n i = 5\r\n w = 2\r\n\r\n while i * i <= n:\r\n if n % i == 0:\r\n return False\r\n\r\n i += w\r\n w = 6 - w\r\n\r\n return True", "def isprime_imperative(number):\n if number < 2: return False\n if number == 2: return True\n if number % 2 == 0: return False\n for i in range(3, 1 + int(math.sqrt(number)), 2):\n if number % i == 0: \n return False\n return True", "def isSuperprime(nr):\n while nr > 0:\n if not isPrime(nr):\n return False\n nr = nr // 10\n return True", "def isprime_recursive(number):\n def isprime(k, coprime):\n \"\"\"Is k relatively prime to the value coprime?\"\"\"\n if k < coprime*coprime:\n return True\n if k % coprime == 0:\n return False\n return isprime(k, coprime + 2)\n if number < 2: return False\n if number == 2: return True\n if number % 2 == 0: return False\n return isprime(number, 3)", "def primes(number):\r\n \r\n # INITIALIZE\r\n primes = [2]\r\n \r\n # WORK THROUGH LIST\r\n for number in range(3, number):\r\n index = 0\r\n is_prime = True\r\n \r\n # CHECK DIVISIBILITY BY PRIME NUMBERS\r\n while index < len(primes) and primes[index] < sqrt(number) + 1:\r\n \r\n # DIVISIBLE BY OTHER PRIME -> NOT PRIME\r\n if number % primes[index] == 0:\r\n is_prime = False\r\n break\r\n \r\n index += 1\r\n\r\n # IF NOT DIVISIBLE BY OTHER PRIMES -> APPEND TO PRIMES \r\n if is_prime:\r\n primes.append(number)\r\n \r\n return primes", "def Solution10():\n return sum(get_primes(2000000))", "def is_goldbach(num):\r\n \r\n primes = [i for i in range(3, num - 1) if is_prime(i)]\r\n for prime in primes:\r\n idx = 1\r\n while (prime + 2 * int(math.pow(idx, 2))) <= num:\r\n if (prime + 2 * int(math.pow(idx, 2))) == num:\r\n print(num, \"\\t\", prime, \"\\t\", idx, \"\\tTrue\")\r\n return True\r\n idx += 1\r\n# print(num, \"\\t\", prime, \"\\t\", idx)\r\n print(num, \"\\t\", prime, \"\\t\", idx, \"\\tFalse\")\r\n return False", "def is_circular_prime(n, prime_set):\n for x in rotate_digits(n):\n if x not in prime_set:\n return False\n return True", "def is_Prime(n): # Taken from https://rosettacode.org/wiki/Miller%E2%80%93Rabin_primality_test\n if n != int(n):\n return False\n n = int(n)\n # Miller-Rabin test for prime\n if n == 0 or n == 1 or n == 4 or n == 6 or n == 8 or n == 9:\n return False\n\n if n == 2 or n == 3 or n == 5 or n == 7:\n return True\n s = 0\n d = n - 1\n while d % 2 == 0:\n d >>= 1\n s += 1\n assert (2 ** s * d == n - 1)\n\n def trial_composite(a):\n if pow(a, d, n) == 1:\n return False\n for i in range(s):\n if pow(a, 2 ** i * d, n) == n - 1:\n return False\n return True\n\n for i in range(8): # number of trials\n a = random.randrange(2, n)\n if trial_composite(a):\n return False\n\n return True", "def prime_10001st(x=10001):\n i = 3\n primes = [2]\n while len(primes) < x:\n i += 2\n if is_prime_check_known(i, primes):\n primes.append(i)\n else:\n return primes[-1]", "def fast_prime(n):\n if n == 1: return 2\n count = 1\n candidate = 1\n\n while count < n:\n candidate += 2\n if fast_is_prime(candidate):\n count += 1\n\n return candidate" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all permutations of digits between m and n, in string form.
def permutations_m_to_n_str(bound_m, bound_n): def add(perms, new_digit): """ Add a digit to existing permutations. Assumes that all existing permutations have the same length. """ # base case: no permutation so far if not perms: return [new_digit] # common case perm_length = len(perms[0]) retlist = [] for _perm in perms: new_perms = [ (_perm[:i] + new_digit + _perm[i:]) for i in range(0, perm_length) ] new_perms.append(_perm + new_digit) retlist += new_perms return retlist permutations = [] for _d in range(bound_m, bound_n + 1): permutations = add(permutations, str(_d)) return permutations
[ "def nth_lex_permutation(n, digits):\n\n perms = []\n result = \"\"\n\n # generate list of permutations\n for i in itertools.permutations(range(digits)):\n perms.append(i)\n\n # join the answer together\n for j in perms[n - 1]:\n result += str(j)\n\n return int(result)", "def gen_permutations(n):\n max_int = '0b' + '1' * n\n for i in range(0, int(max_int, 2)+1):\n yield str(format(i, 'b').zfill(n))", "def permute_words(n, r):\n\n\treturn [\" \".join(map(str, comb)) for comb in permutations(n, r)]", "def permcalculator(n):\r\n\r\n #Factorial is the product of all positive integers less than or equal to n\r\n print(math.factorial(n))\r\n\r\n perms = itertools.permutations(list(range(1, n+1)))\r\n\r\n for counter, perm in enumerate(list(perms)):\r\n permutation = ''\r\n for item in perm:\r\n permutation += str(item) + ' '\r\n print(permutation)", "def get_permutations(x):\n str_x = str(x)\n return [ to_int(tuple) for tuple in itertools.permutations(str_x) ]", "def generate_permutations(n):\n return list(itertools.permutations(range(1, n + 1)))", "def lexicographic_permutations():\n ans = list()\n x = copy.copy(MILLIONTH)\n nums = copy.copy(NUMS)\n while nums:\n a = x // fac(len(nums) - 1)\n x = x % fac(len(nums) - 1)\n # 刚好整除 要退一位 不进位\n a = a - 1 if x == 0 else a\n ans.append(nums[a])\n nums.remove(nums[a])\n return ''.join(str(x) for x in ans)", "def pandigitals(N, base=1):\n\tNUMBERS = list(range(base,N+base))\n\tpandigits = []\n\tfor i in list(itertools.permutations(NUMBERS)):\n\t\tif i[0] != 0:\n\t\t\ttmp = \"\"\n\t\t\tfor j in i:\n\t\t\t\ttmp = tmp + str(j)\n\t\t\tpandigits.append(int(tmp))\n\treturn sorted(pandigits)", "def iter_pandigitals(n):\n if not 1 <= n <= 9:\n return\n for p in permutations(range(n, 0, -1)):\n yield digits_to_number(p)", "def get_permutations(num_items) :\n return list(itertools.permutations(range(num_items), num_items))", "def digitReplacementCombos(n):\n replacements = tuple()\n for y in range(1,len(str(n))):\n indices = [x for x in range(len(str(n))-1)] #Last digit of a prime must be 1,3,7, or 9 so don't try to check digit changes on the last digit since there are not enough options.\n replacements += tuple(combinations(indices,y))\n replacements = [x for x in replacements if isValidReplacementCombo(n,x)]\n return replacements", "def Permutations(s):\n\n\tperms = [s]\n\n\tif len(s) <s= 1:\n\t\treturn perms\n\n\tfor pos, i in enumerate(s):\n\n\t\trest_of_string = s[:pos] + s[pos+1:]\n\n\t\tsub_perms = Permutations(rest_of_string)\n\n\t\tfor sub in sub_perms:\n\t\t\tif i+sub not in perms:\n\t\t\t\tperms.append(i+sub)\n\n\treturn perms", "def rotate_digits(n):\n n = str(n)\n\n for i in range(len(n)):\n n = n[-1] + n[:-1]\n yield int(n)", "def generate_permutations(arr, pos=0):\n if pos == len(arr):\n output.append(''.join(arr))\n return\n for i in range(len(arr)):\n swap(arr, pos, i)\n generate_permutations(arr, pos + 1)\n swap(arr, pos, i)", "def create_nine_digit_product(num):\n result = ''\n counter = 1\n while len(result) < 9:\n result += str(num * counter)\n counter += 1\n if len(result) > 9:\n result = 0\n return result", "def get_rotations(n):\n # need to add len(n) - 1 integers to set\n rotations = {n}\n for i in range(len(str(n)) - 1):\n string = str(n)[i + 1 :] + str(n)[: i + 1]\n rotations.add(int(string))\n return rotations", "def finalPermutation(code):\n return_list = ''\n for i in range(16):\n list = ''\n for j in range(4):\n list += code[DS.ip_1[i * 4 + j] - 1]\n return_list += \"%x\" % int(list, 2)\n return return_list", "def allPandigitals(N, base=1):\n\tret = []\n\tfor i in range(base,base+N):\n\t\tret += pandigitals(N, base)\n\treturn ret", "def permute(s, i):\n p = tuple(range(len(s)))\n for j in i.split(\",\"):\n if j.startswith(\"s\"):\n n = int(j[1:])\n s = s[-n:] + s[:-n]\n p = p[-n:] + p[:-n]\n else:\n a, b = tuple(int(i) for i in j[1:].split(\"/\"))\n if j.startswith(\"p\"):\n a, b = p.index(a), p.index(b)\n if a < b:\n s = s[:a] + s[b:b + 1] + s[a + 1:b] + s[a:a + 1] + s[b + 1:]\n p = p[:a] + p[b:b + 1] + p[a + 1:b] + p[a:a + 1] + p[b + 1:]\n else:\n s = s[:b] + s[a:a + 1] + s[b + 1:a] + s[b:b + 1] + s[a + 1:]\n p = p[:b] + p[a:a + 1] + p[b + 1:a] + p[b:b + 1] + p[a + 1:]\n # print(s, j)\n return s" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a digit to existing permutations. Assumes that all existing permutations have the same length.
def add(perms, new_digit): # base case: no permutation so far if not perms: return [new_digit] # common case perm_length = len(perms[0]) retlist = [] for _perm in perms: new_perms = [ (_perm[:i] + new_digit + _perm[i:]) for i in range(0, perm_length) ] new_perms.append(_perm + new_digit) retlist += new_perms return retlist
[ "def add(tile):\n if tile not in permutation:\n permutation.append(tile)", "def nth_lex_permutation(n, digits):\n\n perms = []\n result = \"\"\n\n # generate list of permutations\n for i in itertools.permutations(range(digits)):\n perms.append(i)\n\n # join the answer together\n for j in perms[n - 1]:\n result += str(j)\n\n return int(result)", "def renumber(self, e, write=False):\n e.insert(0, 2)\n self.permute(e, write)", "def permcalculator(n):\r\n\r\n #Factorial is the product of all positive integers less than or equal to n\r\n print(math.factorial(n))\r\n\r\n perms = itertools.permutations(list(range(1, n+1)))\r\n\r\n for counter, perm in enumerate(list(perms)):\r\n permutation = ''\r\n for item in perm:\r\n permutation += str(item) + ' '\r\n print(permutation)", "def create_nine_digit_product(num):\n result = ''\n counter = 1\n while len(result) < 9:\n result += str(num * counter)\n counter += 1\n if len(result) > 9:\n result = 0\n return result", "def digitReplacementCombos(n):\n replacements = tuple()\n for y in range(1,len(str(n))):\n indices = [x for x in range(len(str(n))-1)] #Last digit of a prime must be 1,3,7, or 9 so don't try to check digit changes on the last digit since there are not enough options.\n replacements += tuple(combinations(indices,y))\n replacements = [x for x in replacements if isValidReplacementCombo(n,x)]\n return replacements", "def generate_permutations(arr, pos=0):\n if pos == len(arr):\n output.append(''.join(arr))\n return\n for i in range(len(arr)):\n swap(arr, pos, i)\n generate_permutations(arr, pos + 1)\n swap(arr, pos, i)", "def add_motif_permutation(self,reduction_index,permutation):\n self.motif_reduction_dict[reduction_index].merged_history_permutations.append(permutation)\n for child in nx.topological_sort(self.digraph):\n for parent in self.digraph.predecessors(child):\n for parent_perm,child_perm in it.product(\n self.motif_reduction_dict[parent].merged_history_permutations,\n self.motif_reduction_dict[child].merged_history_permutations):\n new_perm = child_perm.copy()\n for i,p in enumerate(parent_perm):\n new_perm[i] = child_perm[p]\n if not new_perm in self.motif_reduction_dict[child].merged_history_permutations:\n self.motif_reduction_dict[child].merged_history_permutations.append(new_perm)", "def permute(arr):\n acc = [[]]\n while len(arr) is not 0:\n n = arr.pop() # number to add to every spot in the lists\n acc = [r[0:i] + [n] + r[i:] for r in acc for i in range(0, len(r) + 1)]\n # don't want to return array of digits that start with 0\n return [p for p in acc if p[0] != 0]", "def add(self, number: int) -> None:\n self.nums.append(number)\n self.hash[number] = self.count\n self.count += 1", "def add(self, number):\n if number not in self.nums:\n self.nums[number] = 0\n self.nums[number] += 1", "def related_by_digit_permutation(num_a, num_b):\n from collections import Counter\n\n return Counter(str(num_a)) == Counter(str(num_b))", "def numberOfPrimeDigitReplacements(n,replacement):\n primes = []\n n = str(n)\n for x in '0123456789':\n newN = ''\n for index, digit in enumerate(n):\n if index not in replacement:\n newN += n[index]\n else:\n newN += x\n if newN[0] != '0' and int(newN) in primesSet:\n primes.append(newN)\n return len(primes)", "def perm(n):\n return Singleton.get_instance().perm(n).astype(np.int)", "def ptr_addition(ptrs, permute_idxs):\n\n init = ptrs[permute_idxs[0]]\n\n for idx in permute_idxs[1:]:\n init += ptrs[idx]\n\n return init", "def plus_one(A):\n A[-1] += 1\n for i in reversed(range(1, len(A))):\n if A[i] != 10:\n break\n\n A[i] = 0\n A[i - 1] += 1\n\n if A[0] == 10:\n \"\"\"\n There is a carry-out, so we need one more digit to store the result.\n A slick way to do this is to append 0 at the end of the array,\n and update the first entry to 1.\n \"\"\"\n A[0] = 1\n A.append(0)\n\n return A", "def addDigit(nums,divisor):\n digits = set('0123456789')\n newNums = []\n for num in nums:\n for digit in digits.difference(set(num)):\n if int((digit+num)[:3]) % divisor == 0:\n newNums.append(digit+num)\n return newNums", "def add(*args):\n padded = match_padding(*args)\n x = padded[0]\n res = [0] * len(x)\n for i in range(len(x)):\n index = len(x) - (1 + i)\n res[index] += sum([num[index] for num in padded])\n res = carry_left(res, i)\n return res", "def pandigitals(N, base=1):\n\tNUMBERS = list(range(base,N+base))\n\tpandigits = []\n\tfor i in list(itertools.permutations(NUMBERS)):\n\t\tif i[0] != 0:\n\t\t\ttmp = \"\"\n\t\t\tfor j in i:\n\t\t\t\ttmp = tmp + str(j)\n\t\t\tpandigits.append(int(tmp))\n\treturn sorted(pandigits)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the first n hexagonal numbers.
def get_hexagonals(num): return [int(i * (2 * i - 1)) for i in range(1, num + 1)]
[ "def create_hexagon_numbers(limit):\n hexagons = {0}\n increment = 1\n value = 0\n while True:\n value += increment\n increment += 4\n if value > limit:\n break\n hexagons.add(value)\n return hexagons", "def rand_hex_color(n=1):\n\n colors = [\n RGB_to_hex([x*255 for x in np.random.rand(3)])\n for i in range(n)\n ]\n if n == 1:\n return colors[0]\n else:\n return colors", "def to_rghex(n):\n return matplotlib.colors.rgb2hex([n, 1-n, 0, 1])", "def get_heptagonals(num):\n return [int(i * (5 * i - 3) / 2) for i in range(1, num + 1)]", "def piece_number(n):\n return tuple(bool(n >> i & 1) for i in range(SIZE))", "def build_hexagonal_position(index_matrix):\n pix_positions = []\n for i in range(index_matrix.shape[0]):\n for j in range(index_matrix.shape[1]):\n if not index_matrix[i, j] == -1:\n pix_positions.append([j - i/2, -i])\n return pix_positions", "def get_colours(n):\n if n <= 3:\n return base[0:n]\n\n # how many new colours to we need to insert between\n # red and green and between green and blue?\n needed = (old_div(((n - 3) + 1), 2), old_div((n - 3), 2))\n\n colours = []\n for start in (0, 1):\n for x in np.linspace(0, 1, needed[start]+2)[:-1]:\n colours.append((base[start] * (1.0 - x)) +\n (base[start+1] * x))\n colours.append(base[2])\n\n return [pastel(c) for c in colours[0:n]]", "def _cosnx(n):\n\n p = np.polynomial.chebyshev.Chebyshev.basis(n)\n return p.convert(kind=np.polynomial.Polynomial)", "def get_octagonals(num):\n return [int(i * (3 * i - 2)) for i in range(1, num + 1)]", "def fahrentheit_to_celsuis(n):\n return n-32*5/9", "def isHexagonal(number):\n\n if number == 1:\n return True\n else:\n n = ((8 * number + 1) ** .5 + 1)\n if n % 4 == 0:\n return True\n else:\n return False", "def pandigitals(N, base=1):\n\tNUMBERS = list(range(base,N+base))\n\tpandigits = []\n\tfor i in list(itertools.permutations(NUMBERS)):\n\t\tif i[0] != 0:\n\t\t\ttmp = \"\"\n\t\t\tfor j in i:\n\t\t\t\ttmp = tmp + str(j)\n\t\t\tpandigits.append(int(tmp))\n\treturn sorted(pandigits)", "def _get_special_triple_numbers() -> Iterable[int]:\n n = 1\n while True:\n number = get_hexagonal_number(n)\n if is_triangle_number(number) and is_pentagonal_number(number):\n yield number\n n += 1", "def HammingOrder(n):\n for i in range(0,15):\n N=2**i\n if N-i-1>=n: return i", "def get_colours(self, n):\n\t\tbase = np.asarray([[1,0,0], [0,1,0], [0,0,1]])\n\n\t\tif n <= 3:\n\t\t\treturn base[0:n]\n\n\t\t# how many new colours to we need to insert between\n\t\t# red and green and between green and blue?\n\t\tneeded = (((n - 3) + 1) / 2, (n - 3) / 2)\n\n\t\tcolours = []\n\t\tfor start in (0, 1):\n\t\t\tfor x in np.linspace(0, 1, needed[start]+2):\n\t\t\t\tcolours.append((base[start] * (1.0 - x)) +\n\t\t\t\t\t\t\t (base[start+1] * x))\n\n\t\treturn [self.pastel(c) for c in colours[0:n]]", "def hexnumber():\n return regex(r'0x[0-9a-fA-F]+').parsecmap(st(s.NUMBER))", "def sixty_one(n):\n if n < 61:\n return 0\n elif n % 100 == 61:\n return 1 + sixty_one(n // 100)\n else:\n return sixty_one(n // 10)", "def H(n):\r\n if n <= -8:\r\n return H(n+5) + H(n+4) + H(n+2)\r\n elif -8 < n and n < 10:\r\n return n\r\n else: # n >= 10\r\n return H(n-8) + H(n-5) + H(n-3)", "def square_to_hexagonal_index_matrix(image):\n index_matrix = torch.ones(image.shape[1],\n image.shape[2] + int(np.ceil(image.shape[1] / 2))) * -1\n n = 0\n for i in range(image.shape[1]):\n for j in range(image.shape[2]):\n index_matrix[i, j + int(np.ceil(i / 2))] = n\n n += 1\n return index_matrix" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }