query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Goldman Sachs' indicative charge of the bond (dollars).
def gs_charge_dollars(self) -> RangeFilter: return self.__gs_charge_dollars
[ "def get_charge(symbol):\n atom = as_atom(symbol)\n return atom.GetFormalCharge()", "def charge_2(dists, charges):\n d6 = dists <= 6.0\n d8 = dists <= 8.0\n d6_8 = logical_and(logical_not(d6), d8)\n epsilons = (d6*4.0) + \\\n d6_8*(38.0*dists-224.0) + \\\n logical_not(d8)*80.0\n charge = (charges / ( epsilons * dists ))\n return sum(charge)", "def charge(self):\n return self.__charge", "def charge_1(dists, charges):\n charge = charges / ( map(epsilon, dists) * dists )\n return sum(charge)", "def gasteiger_charges(mol):\n AllChem.ComputeGasteigerCharges(mol)\n return [\n mol.GetAtomWithIdx(i).GetDoubleProp(\"_GasteigerCharge\") for i in range(mol.GetNumAtoms())\n ]", "def gasteiger_charges(mol):\n\n rdPartialCharges.ComputeGasteigerCharges(mol)\n return mol.atoms.props.pop('_GasteigerCharge')", "def total_squared_charge(mol, g_charges):\n\n return np.power(g_charges, 2).sum()", "def bond_price(maturity, principal=100, coupon_rate=.03, coupons_per_year=12, discount_rate=.03):\n\n cash_flows = bond_cash_flows(maturity, principal, coupon_rate, coupons_per_year)\n pv = present_value(cash_flows, discount_rate / coupons_per_year)\n\n return pv", "def service_charge_arrears(self):\n\n amt = Decimal(\"0.0\")\n\n for sc in self.service_charges.all():\n amt += sc.amount_outstanding()\n\n return amt", "def charge(posJ,boss):\n d = math.sqrt((posJ[1] - boss.position[1])**2 + (posJ[2] - boss.position[2])**2)\n boss.directionCharge = [(posJ[1]-0.5 - boss.position[1])/d*1/16,(posJ[2]-0.5 - boss.position[2])/d*1/16] \n #definit la direction de la charge\n boss.aura = \"charge\"\n boss.auratimer = 0", "def total_charge(amount_received):\n # google checkout fees are 0.02 * charge + 0.20\n # amount_received = total_charge - (total_charge * 0.02 + 0.20)\n # amount_received = (1.0 - .02) * total_charge - 0.20\n return (float(amount_received) + 0.20) / (1.0 - 0.02)", "def total_positive_charge(mol, g_charges):\n\n\n return g_charges[g_charges > 0].sum()", "def total_absolute_charge(mol, g_charges):\n return np.abs(g_charges).sum()", "def additional_charge(self):\n self._balance=self._balance+1", "def calc_charge(resid, pH):\n # print(resid)\n # resid_letter = amino3to1(resid[1])\n resid_letter = resid\n\n resPka = {\n \"Y\":[+1,10.46],\n \"H\":[-1,6],\n \"C\":[1,8.5],\n \"D\":[-1,4.4],\n \"Z\":[-1,4.4],\n \"K\":[+1,10],\n \"R\":[1,12]\n }\n\n qi = resPka[resid_letter][0]\n pKa = resPka[resid_letter][1]\n charge = qi/(1+10**(qi*(pH-pKa)))\n return(charge)", "def cash(self):\n return self.cents / 100", "def calculate_gdp_per_capita():\n pass", "def price_mg():\n return Gold.price_oz() / Gold.GRAM_PER_OZ / 1000.0", "def gate_drive_charge(self, V_d: float, V_gs: float) -> float:\n C_gd = self.C_rss\n C_gs = self.C_iss - self.C_rss\n C_equiv = C_gs + C_gd * (1 + V_d / V_gs)\n Q_gs = V_gs * C_equiv\n return Q_gs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a screen using GsScreenApi if it doesn't exist. Update the report if it does.
def save(self): parameters = self.__to_target_parameters() target_screen = TargetScreen(name=self.name, parameters=parameters) if self.id: target_screen.id = self.id GsScreenApi.update_screen(target_screen) else: screen = GsScreenApi.create_screen(target_screen) self.__id = screen.id logging.info(f'New screen created with ID: {self.id} \n')
[ "def createScreen(self, screen_name):\n self.screenname_field = screen_name\n self.click_ok_button()", "def post(self, args, id, test):\n # check current user authorization\n fulltext = db.session.query(Fulltext).get(id)\n if not fulltext:\n return not_found_error('<Fulltext(id={})> not found'.format(id))\n if g.current_user.reviews.filter_by(id=fulltext.review_id).one_or_none() is None:\n return forbidden_error(\n '{} forbidden to screen fulltexts for this review'.format(\n g.current_user))\n if fulltext.filename is None:\n return forbidden_error(\n \"user can't screen {} without first having uploaded its content\".format(\n fulltext))\n # validate and add screening\n if args['status'] == 'excluded' and not args['exclude_reasons']:\n return validation_error('screenings that exclude must provide a reason')\n screening = FulltextScreening(\n fulltext.review_id, g.current_user.id, id,\n args['status'], args['exclude_reasons'])\n if fulltext.screenings.filter_by(user_id=g.current_user.id).one_or_none():\n return forbidden_error('{} has already screened {}'.format(\n g.current_user, fulltext))\n if test is False:\n fulltext.screenings.append(screening)\n db.session.commit()\n current_app.logger.info('inserted %s', screening)\n else:\n db.session.rollback()\n return ScreeningSchema().dump(screening).data", "def screenshot_created(req, screenshot):", "def save_screen(self, surface):\n\n i_str = pg.image.tostring(surface, 'RGB')\n self.screenshot = pg.image.fromstring(i_str, pg_init.SCREEN_SIZE, 'RGB')", "def _add_dscreen_object(self, dscreen: DSCREEN) -> None:\n key = dscreen.rtype\n assert key not in self.dscreen, 'DSCREEN=%s old=\\n%snew=\\n%s' % (\n key, self.dscreen[key], dscreen)\n assert len(key) > 0, 'key=%r' % key\n self.dscreen[key] = dscreen\n self._type_to_id_map[dscreen.type].append(key)", "def take_screen_shot(self, screen_id, address, width, height):\n if not isinstance(screen_id, baseinteger):\n raise TypeError(\"screen_id can only be an instance of type baseinteger\")\n if not isinstance(address, basestring):\n raise TypeError(\"address can only be an instance of type basestring\")\n if not isinstance(width, baseinteger):\n raise TypeError(\"width can only be an instance of type baseinteger\")\n if not isinstance(height, baseinteger):\n raise TypeError(\"height can only be an instance of type baseinteger\")\n self._call(\"takeScreenShot\",\n in_p=[screen_id, address, width, height])", "def screen(*args, **kwargs):\n actual_screen(*args, **kwargs)", "def cmd_toscreen(self, screen=None):\r\n if screen is None:\r\n screen = self.qtile.currentScreen\r\n else:\r\n screen = self.qtile.screens[screen]\r\n screen.setGroup(self)", "def add_screenshot(self, driver):\n file_screenshot = self.make_screenshot(driver, self.sPathOut)\n if file_screenshot not in self.aScreenshots:\n self.aScreenshots.append(file_screenshot)", "def cmd_toscreen(self, screen=None):\n if screen is None:\n screen = self.qtile.currentScreen\n else:\n screen = self.qtile.screens[screen]\n screen.setGroup(self)", "def copyScreen(self, screen_name, new_screen_name):\n copy_screen_dialog = CopyScreenDialog(self.driver)\n self.selectScreen(screen_name)\n self.click_object(*AppEditorPageLocators.copy_icon)\n time.sleep(1)\n copy_screen_dialog.copyScreen(new_screen_name)\n time.sleep(1)", "def update_screen_id(self):\n self.status_update_event.clear()\n # This gets the screenId but always throws. Couldn't find a better way.\n try:\n self.send_message({MESSAGE_TYPE: TYPE_GET_SCREEN_ID})\n except UnsupportedNamespace:\n pass\n status = self.status_update_event.wait(10)\n if not status:\n _LOGGER.warning(\"Failed to update screen_id\")\n self.status_update_event.clear()", "def post(self, args, review_id, user_id, test):\n if g.current_user.is_admin is False:\n return forbidden_error('FulltextsScreeningsResource.post is admin-only')\n # check current user authorization\n review = db.session.query(Review).get(review_id)\n if not review:\n return not_found_error(\n '<Review(id={})> not found'.format(review_id))\n # bulk insert fulltext screenings\n screener_user_id = user_id or g.current_user.id\n screenings_to_insert = []\n for screening in args:\n screening['review_id'] = review_id\n screening['user_id'] = screener_user_id\n screenings_to_insert.append(screening)\n if test is False:\n db.session.bulk_insert_mappings(\n FulltextScreening, screenings_to_insert)\n db.session.commit()\n current_app.logger.info(\n 'inserted %s fulltext screenings', len(screenings_to_insert))\n # bulk update fulltext statuses\n num_screeners = review.num_fulltext_screening_reviewers\n fulltext_ids = sorted(s['fulltext_id'] for s in screenings_to_insert)\n # results = db.session.query(FulltextScreening)\\\n # .filter(FulltextScreening.fulltext_id.in_(fulltext_ids))\n # studies_to_update = [\n # {'id': cid, 'fulltext_status': assign_status(list(scrns), num_screeners)}\n # for cid, scrns in itertools.groupby(results, attrgetter('fulltext_id'))\n # ]\n with db.engine.connect() as connection:\n query = \"\"\"\n SELECT fulltext_id, ARRAY_AGG(status)\n FROM fulltext_screenings\n WHERE fulltext_id IN ({fulltext_ids})\n GROUP BY fulltext_id\n ORDER BY fulltext_id\n \"\"\".format(fulltext_ids=','.join(str(cid) for cid in fulltext_ids))\n results = connection.execute(query)\n studies_to_update = [\n {'id': row[0], 'fulltext_status': assign_status(row[1], num_screeners)}\n for row in results]\n if test is False:\n db.session.bulk_update_mappings(\n Study, studies_to_update)\n db.session.commit()\n current_app.logger.info(\n 'updated fulltext_status for %s studies', len(studies_to_update))\n # now add data extractions for included fulltexts\n # normally this is done automatically, but not when we're hacking\n # and doing bulk changes to the database\n results = db.session.query(Study.id)\\\n .filter_by(review_id=review_id)\\\n .filter_by(fulltext_status='included')\\\n .filter(~Study.data_extraction.has())\\\n .order_by(Study.id)\n data_extractions_to_insert = [\n {'id': result[0], 'review_id': review_id}\n for result in results]\n db.session.bulk_insert_mappings(DataExtraction, data_extractions_to_insert)\n db.session.commit()\n current_app.logger.info('inserted %s data extractions', len(data_extractions_to_insert))\n # now update include/exclude counts on review\n status_counts = db.session.query(Study.fulltext_status, db.func.count(1))\\\n .filter(Study.review_id == review_id)\\\n .filter(Study.fulltext_status.in_(['included', 'excluded']))\\\n .group_by(Study.fulltext_status)\\\n .all()\n status_counts = dict(status_counts)\n review.num_fulltexts_included = status_counts.get('included', 0)\n review.num_fulltexts_excluded = status_counts.get('excluded', 0)\n db.session.commit()", "def add_screens(self, *screens):\n for screen in screens:\n if isinstance(screen, Screen):\n self.screens.append(screen)\n else:\n raise InvalidGameElementException('Can`t add an element of type '+str(type(screen)))", "def test_screens_well_create_user_update_user(self):\n # Data for the construction submission.\n data = {\n 'screen_set': [\n {\n 'start': 0,\n 'end': 10,\n 'assembly_type': 'SCREEN'\n }\n ]\n }\n # Post an construction submissions.\n response = self.client.post(reverse('CON', kwargs={'version': 'v1'}), data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n # Get the well back.\n well = Well.objects.get(well_tag_number=response.data['well'])\n # Get the resultant lithology record\n screen = well.screen_set.all()[0]\n self.assertEqual(screen.create_user, self.user.username)\n self.assertEqual(screen.update_user, self.user.username)", "def init_screen(): # pragma: no cover\n global SCREEN\n SCREEN = Screen()", "def create_screens (self):\n self.screens = OrderedDict()\n for screen in self.screennames:\n self.screens[screen] = tk.Frame(self)\n self.screens[screen].config(bd=self.master.borderwidth,\n bg=CONFIG.COLORS.BACKGROUND,\n width=self.master.width, height=410)\n self.screens[screen].grid_propagate(0)\n self.screens[screen].grid_columnconfigure(0, minsize=self.master.width - \\\n 2*self.master.borderwidth)\n getattr(self, \"create_screen_{}\".format(screen))() # call create_screen_X()\n\n self.screens['main'].grid()", "def _in_screen(cmd: str, screen_name: str, **kw) -> None:\n screen = \"screen -dmS {} bash -c '{} ; exec bash'\".format(screen_name, cmd)\n print(\"Screen to run: [{0}]\".format(screen))\n run(screen, pty=False, **kw)", "def missed_shot(img_screen):\n # TODO: template match to the button\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mock Sense object for authenticatation.
def mock_sense(): with patch( "homeassistant.components.sense.config_flow.ASyncSenseable" ) as mock_sense: mock_sense.return_value.authenticate = AsyncMock(return_value=True) mock_sense.return_value.validate_mfa = AsyncMock(return_value=True) mock_sense.return_value.sense_access_token = "ABC" mock_sense.return_value.sense_user_id = "123" mock_sense.return_value.sense_monitor_id = "456" mock_sense.return_value.device_id = "789" mock_sense.return_value.refresh_token = "XYZ" yield mock_sense
[ "def test_access_token_set(self, client, mocker):\n session_get = mocker.spy(requests.Session, \"get\")\n client.access_token = \"token\"\n assert client.access_token, \"token\"\n client.get_object(\"user\", \"me\")\n session_get.assert_called_with(\n mocker.ANY,\n \"https://api.deezer.com/user/me\",\n params={\"access_token\": \"token\"},\n )", "def test_auth(self):\n pass", "def test_check_authorization(self):\n self.instance.set_client_id(\"client-id\", \"client-secret\")\n self.instance.check_authorization(\"super-fake-access-token\")\n\n self.session.get.assert_called_once_with(\n url_for(\"applications/client-id/tokens/super-fake-access-token\"),\n params={\"client_id\": None, \"client_secret\": None},\n auth=(\"client-id\", \"client-secret\"),\n )", "def test_get_with_auth(self):\n response = self.client.get(\n '/api/v1/restock/',\n HTTP_AUTHORIZATION='Token {}'.format(self.token)\n )\n\n # Access allow\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(response.json(), self.data)", "def mock_controller_service_authenticated():\n with patch(\n \"homeassistant.components.icloud.config_flow.PyiCloudService\"\n ) as service_mock:\n service_mock.return_value.requires_2fa = False\n service_mock.return_value.requires_2sa = False\n service_mock.return_value.is_trusted_session = True\n service_mock.return_value.trusted_devices = TRUSTED_DEVICES\n service_mock.return_value.send_verification_code = Mock(return_value=True)\n service_mock.return_value.validate_2fa_code = Mock(return_value=True)\n service_mock.return_value.validate_verification_code = Mock(return_value=True)\n yield service_mock", "def setup_mock(self):\n idinfo_mock = {'name': 'Temp Temp', 'email': 'tempt3699@gmail.com', }\n utils.verify_id_token = MagicMock(return_value=idinfo_mock)", "def test_get_a_sale(self):\n sale_url = 'api/v1/sales/{0}'.format(2)\n http_response = self.client.get(sale_url, content_type='application/json')\n self.assertFalse(self.is_sale_avaialble(32))\n self.assertTrue(self.is_sale_avaialble(2))\n self.assertEqual(http_response.status_code, 200)\n\n \"\"\"implement after implementing user authentication\n if user_role == 2:\n self.assertEqual(http_response.status_code, 200)\n elif author:\n self.assertEqual(http_response.status_code, 200)\n else:\n self.assertEqual(http_response.status_code, 403)\"\"\"", "def test_access_token_mixcloud_instance(self):\n async def coroutine():\n \"\"\"Return an access token after supposedly successful\n OAuth transaction.\n \"\"\"\n return {'access_token': 'jf8n'}\n\n mock_mixcloud = make_mock_mixcloud(coroutine)\n\n auth = MixcloudOAuthSync(client_id='v9a', redirect_uri='abc.org',\n client_secret='21p', mixcloud=mock_mixcloud)\n result = auth.access_token('k89r')\n\n mock_mixcloud._session.get.assert_called_once_with(\n yarl.URL('https://www.mixcloud.com/oauth/access_token'),\n params={'client_id': 'v9a', 'redirect_uri': 'abc.org',\n 'client_secret': '21p', 'code': 'k89r'})\n self.assertEqual(result, 'jf8n')", "def testGetSecurityInfoMocked(self):\n\n with unittest.mock.patch(\"requests.get\") as mock:\n\n # set up mock iterator for response.json()\n instance = mock.return_value\n instance.json.return_value = self.get_security_info_mock\n\n security = pyasx.data.securities.get_security_info('CBAPC')\n\n self.assertEquals(security[\"ticker\"], \"TICKER\")\n self.assertEquals(security[\"isin\"], \"AU000ABC123\")\n self.assertEquals(security[\"type\"], \"DESC FULL\")\n self.assertEquals(security[\"open_price\"], 2)\n self.assertEquals(security[\"last_price\"], 1)\n self.assertEquals(security[\"bid_price\"], 9)\n self.assertEquals(security[\"offer_price\"], 10)\n\n self.assertEquals(pyasx.data._format_date(\n security[\"last_trade_date\"]),\n \"2018-03-23T00:00:00+1100\"\n )\n\n self.assertEquals(security[\"day_high_price\"], 3)\n self.assertEquals(security[\"day_low_price\"], 4)\n self.assertEquals(security[\"day_change_price\"], 5)\n self.assertEquals(security[\"day_change_percent\"], \"7%\")\n self.assertEquals(security[\"day_volume\"], 8)\n self.assertEquals(security[\"prev_day_close_price\"], 11)\n self.assertEquals(security[\"prev_day_change_percent\"], \"-12%\")\n self.assertEquals(security[\"year_high_price\"], 13)\n\n self.assertEquals(\n pyasx.data._format_date(security[\"year_high_date\"]),\n \"2018-03-23T00:00:00+1100\"\n )\n\n self.assertEquals(security[\"year_low_price\"], 14)\n\n self.assertEquals(\n pyasx.data._format_date(security[\"year_low_date\"]),\n \"2018-03-22T00:00:00+1100\"\n )\n\n self.assertEquals(security[\"year_open_price\"], 15)\n self.assertEquals(security[\"year_change_price\"], -16)\n self.assertEquals(security[\"year_change_percent\"], \"-17%\")\n self.assertEquals(security[\"average_daily_volume\"], 20)\n self.assertEquals(security[\"pe\"], 18)\n self.assertEquals(security[\"eps\"], 19)\n self.assertEquals(security[\"annual_dividend_yield\"], 21)\n self.assertEquals(security[\"securities_outstanding\"], 23)\n self.assertEquals(security[\"market_cap\"], 22)\n self.assertEquals(security[\"is_suspended\"], False)\n self.assertTrue(len(security[\"indices\"]))", "def test_provision_good(self):\n f = Mock()\n f.provision_user.return_value = 'http://example.org/a/user'\n app.config['PROVIDERS'] = {'foo': f}\n\n with uaac_set(app):\n with app.test_client() as c:\n rv = c.post('/api/v1/provision', data={'provider': 'foo', 'id': 'bar'})\n assert rv.status_code == 201\n assert rv.location == 'http://example.org/a/user'", "def mock_profile(mock_api, mock_user):\n from sdss_brain.api.manager import ApiProfile\n profile = ApiProfile('marvin')\n profile.check_for_token = lambda: 'xyz123'\n profile.check_for_refresh_token = lambda: 'abc123'\n yield profile\n profile = None", "def test_authorization_show(self):\n pass", "def test_impersonate_token(self):\n pass", "def test_init(self):\n iiq = insightiq_api.InsightiqApi(username='pat', password='a')\n\n self.fake_renew_session.assert_called()", "def mocked_get_threescale_account_secret_header():\n return \"secret\"", "def setUp(self):\n self.factory = RequestFactory()\n self.client = Client()\n self.user = User.objects.create_user(\"username\", \"user@example.com\", \"123456\")\n self.project_owner = self.user.projectowner\n self.app = ApplicationModel.objects.create(\n name='app',\n client_type=ApplicationModel.CLIENT_CONFIDENTIAL,\n authorization_grant_type=ApplicationModel.GRANT_CLIENT_CREDENTIALS,\n user=self.user\n )\n self.token = AccessToken.objects.create(user=self.user,\n token='tokstr',\n application=self.app,\n expires=now() + timedelta(days=365),\n scope=\"read write\")", "def test_expenses_get(self):\n pass", "def test_authentication_challenge_get_post(self):\n pass", "def test_2_create_spotify_object(self):\n SpotifyTest.spotify_obj = spotify.create_spotify_object(self.token)\n self.assertIsNotNone(SpotifyTest.spotify_obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare the dict of values to create the new refund from the invoice. This method may be overridden to implement custom refund generation (making sure to call super() to establish a clean extension chain).
def _prepare_refund(self, invoice, date=None, period_id=None, description=None, journal_id=None): values = {} for field in ['name', 'reference', 'comment', 'date_due', 'cost_center_id', 'partner_id', 'company_id', 'account_id', 'currency_id', 'payment_term', 'user_id', 'fiscal_position']: if invoice._fields[field].type == 'many2one': values[field] = invoice[field].id else: values[field] = invoice[field] or False values['invoice_line'] = self._refund_cleanup_lines(invoice.invoice_line) tax_lines = filter(lambda l: l.manual, invoice.tax_line) values['tax_line'] = self._refund_cleanup_lines(tax_lines) if journal_id: journal = self.env['account.journal'].browse(journal_id) elif invoice['type'] == 'in_invoice': journal = self.env['account.journal'].search([('type', '=', 'purchase_refund')], limit=1) else: journal = self.env['account.journal'].search([('type', '=', 'sale_refund')], limit=1) values['journal_id'] = journal.id values['type'] = TYPE2REFUND[invoice['type']] values['date_invoice'] = date or fields.Date.context_today(invoice) values['state'] = 'draft' values['number'] = False values['origin'] = invoice.number if period_id: values['period_id'] = period_id if description: values['name'] = description return values
[ "def _prepare_invoice_line(self, invoice_id=False, invoice_values=False):\n vals = super()._prepare_invoice_line(invoice_id=invoice_id, \n invoice_values=invoice_values)\n vals['agents'] = [\n (0, 0, {'agent': x.agent.id,\n 'commission': x.commission.id}) for x in self.agents]\n vals['purchase_price'] = self.purchase_price\n return vals", "def _prepare_insurance_tax_line_vals(self):\n\n# insurance_tax = self.company_id.insurance_tax_id\n invoice_line_ids = self.invoice_line_ids.filtered(\n lambda i: i.product_id.type != 'service'\n )\n insurance_tax = invoice_line_ids.mapped('invoice_line_tax_ids')\n if insurance_tax:\n insurance_tax = insurance_tax[0]\n taxes = insurance_tax.compute_all(\n price_unit=self.insurance_amount,\n currency=self.currency_id,\n quantity=1.0,\n# product=self.company_id.insurance_product_id,\n partner=self.partner_id\n )['taxes']\n# tax = taxes[0]\n tax_vals = []\n for tax in taxes:\n vals = {\n 'invoice_id': self.id,\n 'name': tax['name'],\n 'tax_id': tax['id'],\n 'amount': tax['amount'],\n 'base': tax['base'],\n 'manual': False,\n 'sequence': tax['sequence'],\n 'account_id': tax['account_id'] or tax['refund_account_id'],\n }\n tax_vals.append(vals)\n return tax_vals", "def _prepare_invoice(self):\n result = super(PosOrder, self)._prepare_invoice()\n result['service_charge'] = self.service_charge\n result['amount_service'] = self.amount_service\n return result", "def _prepare_invoice(self):\n invoice_vals = super(SaleOrder, self)._prepare_invoice()\n if self.auto_workflow_process_id:\n invoice_vals.update({\n 'journal_id': self.auto_workflow_process_id.sale_journal_id.id})\n if self.auto_workflow_process_id.invoice_date_is_order_date:\n invoice_vals['date'] = self.date_order.date()\n invoice_vals['invoice_date'] = fields.Date.context_today(self)\n return invoice_vals", "def refund(self):\n urn = \"/v1/invoices/{invoice_id}/refund\".format(invoice_id=self.id)\n\n # This below if to avoid a request because the API not allow this operation\n # but all API can to change theirs behaviors so to allow to refund\n # invoices with status difference of \"paid\".\n # The approach without if also to raise exception with error from directly\n # API responses but here the focus is less requests.\n if self.status == \"paid\":\n response = self.__conn.post(urn, [])\n obj = IuguInvoice(**response)\n else:\n raise errors.IuguGeneralException(value=\"Refund operation support only \" \\\n \"invoices with status: paid.\")\n\n return obj", "def refund(**kwargs):\n kwargs.update({'status_to': 'refund'})\n return status_change(**kwargs)", "def refund_payment(self,\n body):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/refunds')\n .http_method(HttpMethodEnum.POST)\n .header_param(Parameter()\n .key('Content-Type')\n .value('application/json'))\n .body_param(Parameter()\n .value(body))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .body_serializer(APIHelper.json_serialize)\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def refund_by_amount(self, refund_amount: int) -> Dict[str, Any]:\n wrapped_params = {\"refund_amount\": refund_amount}\n\n response = Requestor(self._client).request(\n method=RequestMethod.POST,\n url=\"/referral_customers/refunds\",\n params=wrapped_params,\n beta=True,\n )\n\n return convert_to_easypost_object(response=response)", "def _create_from_refund(self, debit_note, refund, account, bucket):\n from Acquire.Accounting import DebitNote as _DebitNote\n from Acquire.Accounting import Refund as _Refund\n from Acquire.Accounting import TransactionRecord as _TransactionRecord\n from Acquire.Accounting import TransactionState as _TransactionState\n from Acquire.Accounting import Account as _Account\n\n if not isinstance(debit_note, _DebitNote):\n raise TypeError(\"You can only create a CreditNote \"\n \"with a DebitNote\")\n\n if not isinstance(refund, _Refund):\n raise TypeError(\"You can only refund a Refund object: %s\"\n % str(refund.__class__))\n\n # get the transaction behind this refund and ensure it is in the\n # refunding state...\n transaction = _TransactionRecord.load_test_and_set(\n refund.transaction_uid(),\n _TransactionState.REFUNDING,\n _TransactionState.REFUNDING, bucket=bucket)\n\n # ensure that the receipt matches the transaction...\n transaction.assert_matching_refund(refund)\n\n if account is None:\n account = _Account(transaction.debit_account_uid(), bucket)\n elif account.uid() != refund.debit_account_uid():\n raise ValueError(\"The accounts do not match when refunding \"\n \"the receipt: %s versus %s\" %\n (account.uid(), refund.debit_account_uid()))\n\n (uid, datetime) = account._credit_refund(debit_note, refund, bucket)\n\n self._account_uid = account.uid()\n self._debit_account_uid = debit_note.account_uid()\n self._datetime = datetime\n self._uid = uid\n self._debit_note_uid = debit_note.uid()\n self._value = debit_note.value()\n self._is_provisional = debit_note.is_provisional()\n\n if self._is_provisional:\n self._receipt_by = debit_note.receipt_by()\n\n # finally(!) move the transaction into the refunded state\n _TransactionRecord.load_test_and_set(\n refund.transaction_uid(),\n _TransactionState.REFUNDING,\n _TransactionState.REFUNDED, bucket=bucket)", "def _prepare_purchase_order(self, company_id, origins, values):\n\t\tdates = [fields.Datetime.from_string(value['date_planned']) for value in values]\n\n\t\tprocurement_date_planned = min(dates)\n\t\tschedule_date = (procurement_date_planned - relativedelta(days=company_id.po_lead))\n\n\t\t# Since the procurements are grouped if they share the same domain for\n\t\t# PO but the PO does not exist. In this case it will create the PO from\n\t\t# the common procurements values. The common values are taken from an\n\t\t# arbitrary procurement. In this case the first.\n\t\tvalues = values[0]\n\t\tpartner = values['supplier'].name\n\t\tpurchase_date = schedule_date - relativedelta(days=int(1))\n\n\t\tfpos = self.env['account.fiscal.position'].with_context(force_company=company_id.id).get_fiscal_position(partner.id)\n\n\t\tgpo = self.group_propagation_option\n\t\tgroup = (gpo == 'fixed' and self.group_id.id) or \\\n\t\t\t\t(gpo == 'propagate' and values.get('group_id') and values['group_id'].id) or False\n\n\t\treturn {\n\t\t\t'partner_id': partner.id,\n\t\t\t'user_id': False,\n\t\t\t'picking_type_id': self.picking_type_id.id,\n\t\t\t'company_id': company_id.id,\n\t\t\t'currency_id': partner.with_context(force_company=company_id.id).property_purchase_currency_id.id or company_id.currency_id.id,\n\t\t\t'dest_address_id': values.get('partner_id', False),\n\t\t\t'origin': ', '.join(origins),\n\t\t\t'payment_term_id': partner.with_context(force_company=company_id.id).property_supplier_payment_term_id.id,\n\t\t\t'date_order': purchase_date,\n\t\t\t'fiscal_position_id': fpos,\n\t\t\t'group_id': group\n\t\t}", "def refund_items(self, refund_items):\n\n self._refund_items = refund_items", "def retrieve(self, refund_id, **kwargs):\n return super(Refund, self)._retrieve(refund_id, **kwargs)", "def create_return_sales_invoice(order_dict, order, changes):\n\n # Check there is a refund.\n if order['order_payment_status'] not in ('FULLY_REFUNDED',\n 'PARTIALLY_REFUNDED'):\n # If no refund, return now.\n return\n\n # Find the existing Sales Invoice, and its latest amendment.\n ebay_user_id = order_dict['ebay_user_id']\n ebay_order_id = order_dict['ebay_order_id']\n customer = order_dict['customer']\n customer_name = order_dict['customer_name']\n cancelled_names = []\n\n sinv_fields = db_get_ebay_doc(\n 'Sales Invoice', ebay_order_id, fields=['name', 'docstatus'],\n log=changes, none_ok=True)\n if sinv_fields is None:\n # No SINV, so don't create refund\n return\n while sinv_fields.docstatus != 1:\n if sinv_fields.docstatus == 0:\n # Don't create refund from non-submitted SINV\n return\n cancelled_names.append(sinv_fields.name)\n search = frappe.get_all(\n 'Sales Invoice',\n fields=['name', 'docstatus'],\n filters={'amended_from': sinv_fields.name}\n )\n if not search:\n # No amended document\n return\n elif len(search) > 1:\n raise ValueError(f'Multiple amended documents! {sinv_fields.name}')\n sinv_fields = search[0]\n # sinv_fields is now the final SINV\n sinv_name = sinv_fields.name\n\n # Check for a return to any of the cancelled documents\n for cancelled_name in cancelled_names:\n return_sinvs = frappe.get_all(\n 'Sales Invoice',\n fields=['name'],\n filters={'return_against': cancelled_name, 'docstatus': ['!=', 2]}\n )\n if return_sinvs:\n return_names = ', '.join([x.name for x in return_sinvs])\n raise ValueError(\n f'Cancelled {cancelled_name} has return(s) {return_names}!')\n\n # Check for return from the final SINV\n sinv_ret = frappe.get_all(\n 'Sales Invoice',\n fields=['name'],\n filters={'return_against': sinv_name, 'docstatus': ['!=', 2]}\n )\n if sinv_ret:\n return\n\n # Need to create return SINV - gather info and run checks\n if len(order['payment_summary']['refunds']) != 1:\n frappe.throw(f'Order {ebay_order_id} has multiple refunds!',\n exc=ErpnextEbaySyncError)\n refund = order['payment_summary']['refunds'][0]\n\n default_currency = get_default_currency()\n ebay_payment_account = f'eBay Managed {default_currency}'\n posting_date = datetime.datetime.strptime(\n refund['refund_date'][:-1] + 'UTC', '%Y-%m-%dT%H:%M:%S.%f%Z')\n base_refund_amount = float(refund['amount']['value'])\n if refund['amount']['currency'] != default_currency:\n raise ValueError('Unexpected base refund currency!')\n\n # Create a return Sales Invoice for the relevant quantities and amount.\n sinv_doc = frappe.get_doc('Sales Invoice', sinv_name)\n return_doc = make_return_doc(\"Sales Invoice\", sinv_name)\n return_doc.update_stock = False\n return_doc.posting_date = posting_date.date()\n return_doc.posting_time = posting_date.time()\n return_doc.due_date = posting_date\n return_doc.set_posting_time = True\n refund_type_str = REFUND_NAME[order['order_payment_status']]\n return_doc.title = f\"\"\"eBay {refund_type_str} Refund: {customer_name}\"\"\"\n\n if len(return_doc.payments) != 1:\n raise ValueError('Wrong number of payments!')\n if return_doc.payments[0].mode_of_payment != ebay_payment_account:\n raise ValueError('Wrong mode of payment!')\n\n if order['order_payment_status'] == 'PARTIALLY_REFUNDED':\n # Adjust quantities and rates on return\n exc_rate = return_doc.conversion_rate\n refund_total = round(base_refund_amount / exc_rate, 2)\n\n # Adjust payment\n return_doc.payments[0].amount = -refund_total\n\n # Calculate taxes\n tax = (sinv_doc.taxes or [None])[0]\n\n if not sinv_doc.taxes:\n tax_rate = 0\n elif len(sinv_doc.taxes) != 1:\n frappe.throw(f'Sales invoice {sinv_name} has multiple tax entries!',\n exc=ErpnextEbaySyncError)\n elif tax.included_in_print_rate:\n frappe.throw(f'Sales invoice {sinv_name} has inclusive tax',\n exc=ErpnextEbaySyncError)\n elif tax.charge_type != 'Actual':\n frappe.throw(f'Sales invoice {sinv_name} has calculated tax',\n exc=ErpnextEbaySyncError)\n else:\n # Need to adjust actual taxes\n tax_rate = round(tax.total / (tax.total - tax.tax_amount), 3)\n\n # Calculate tax amount and adjust taxes\n if tax_rate:\n tax_amount = round(refund_total - (refund_total / tax_rate), 2)\n ex_tax_refund = refund_total - tax_amount\n ret_tax = return_doc.taxes[0]\n ret_tax.charge_type = 'Actual'\n ret_tax.total = -refund_total\n ret_tax.tax_amount = -tax_amount\n else:\n ex_tax_refund = refund_total\n\n # Delete shipping items if refund amount is less than total of\n # other items\n non_shipping_total = sum(\n x.amount for x in sinv_doc.items\n if x.item_code != SHIPPING_ITEM\n )\n if ex_tax_refund < non_shipping_total:\n # We can remove shipping items\n return_doc.items[:] = [\n x for x in return_doc.items if x.item_code != SHIPPING_ITEM\n ]\n\n # Get return items in quantity order\n return_items = [x for x in return_doc.items]\n return_items.sort(key=operator.attrgetter('qty'), reverse=True)\n\n # Divide refund across items proportionally\n refund_frac = (\n ex_tax_refund / sum(-x.amount for x in return_items)\n )\n original_rates = [x.rate for x in return_items]\n for i, item in enumerate(return_items):\n item.rate = min(item.rate, round(item.rate * refund_frac, 2))\n item.amount = item.rate * item.qty\n refund_remainder = (\n ex_tax_refund\n - sum(-x.amount for x in return_items)\n )\n\n for i, item in enumerate(return_items):\n if abs(refund_remainder) < 0.005:\n # Done\n break\n if refund_remainder > 0:\n # Must add more refund to item\n possible_refund = (original_rates[i] - item.rate) * -item.qty\n amount_change = min(refund_remainder, possible_refund)\n else:\n # Must remove refund (all quantities negative)\n # max() returns value closer to zero here\n amount_change = max(refund_remainder, item.amount)\n item.rate = min(\n original_rates[i],\n round(item.rate + (amount_change / -item.qty), 2)\n )\n item.amount = item.qty * item.rate\n refund_remainder = (\n ex_tax_refund\n - sum(-x.amount for x in return_items)\n )\n\n if refund_remainder:\n raise ErpnextEbaySyncError(\n 'Refund allocation algorithm insufficiently clever')\n\n # Delete items that have zero value or qty\n return_doc.items[:] = [\n x for x in return_doc.items if (x.rate and x.qty)\n ]\n if sum(round(x.amount, 2) for x in return_doc.items) != -ex_tax_refund:\n raise ErpnextEbaySyncError('Problem calculating refund rates!')\n\n return_doc.insert()\n #return_doc.submit()\n\n if CREATE_WARRANTY_CLAIMS:\n # Create a Warranty Claim for the refund, if one does not exist.\n wc_doc = frappe.get_doc({\n 'doctype': 'Warranty Claim',\n 'status': 'Open',\n 'complaint_date': return_doc.posting_date,\n 'customer': customer,\n 'customer_name': customer_name\n })\n sinv_url = frappe.utils.get_url_to_form('Sales Invoice', sinv_doc.name)\n ret_url = frappe.utils.get_url_to_form('Sales Invoice', return_doc.name)\n sinv_name_html = frappe.utils.escape_html(sinv_doc.name)\n ret_name_html = frappe.utils.escape_html(return_doc.name)\n refund_html = frappe.utils.escape_html(\n frappe.utils.fmt_money(base_refund_amount,\n currency=default_currency)\n )\n if return_doc.currency == default_currency:\n refund_currency_html = ''\n else:\n cur_str = frappe.utils.fmt_money(return_doc.paid_amount,\n currency=return_doc.currency)\n refund_currency_html = frappe.utils.escape_html(f' ({cur_str})')\n\n wc_doc.complaint = f\"\"\"\n <p>eBay {refund_type_str} Refund</p>\n <p>SINV <a href=\"{sinv_url}\">{sinv_name_html}</a>;\n Return SINV <a href=\"{ret_url}\">{ret_name_html}</a></p>\n <p>Refund amount: {refund_html}{refund_currency_html}</p>\n <p>This Warranty Claim has been auto-generated in response\n to a refund on eBay.</p>\"\"\"\n wc_doc.insert()\n\n debug_msgprint('Adding return Sales Invoice: ' + ebay_user_id\n + ' : ' + return_doc.name)\n changes.append({\"ebay_change\": \"Adding return Sales Invoice\",\n \"ebay_user_id\": ebay_user_id,\n \"customer_name\": customer_name,\n \"customer\": customer,\n \"address\": order_dict['address'],\n \"ebay_order\": ebay_order_id})\n\n # Commit changes to database\n frappe.db.commit()", "def refund(self, cr, uid, ids, context=None):\n clone_list = []\n line_obj = self.pool.get('pos.order.line')\n \n for order in self.browse(cr, uid, ids, context=context):\n current_session_ids = self.pool.get('pos.session').search(cr, uid, [\n ('state', '!=', 'closed'),\n ('user_id', '=', uid)], context=context)\n if not current_session_ids:\n raise osv.except_osv(_('Error!'), _('To return product(s), you need to open a session that will be used to register the refund.'))\n\n clone_id = self.copy(cr, uid, order.id, {\n 'name': order.name + ' REFUND', # not used, name forced by create\n 'session_id': current_session_ids[0],\n 'date_order': time.strftime('%Y-%m-%d %H:%M:%S'),\n }, context=context)\n clone_list.append(clone_id)\n\n for clone in self.browse(cr, uid, clone_list, context=context):\n for order_line in clone.lines:\n line_obj.write(cr, uid, [order_line.id], {\n 'qty': -order_line.qty\n }, context=context)\n\n new_order = ','.join(map(str,clone_list))\n abs = {\n #'domain': \"[('id', 'in', [\"+new_order+\"])]\",\n 'name': _('Return Products'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'pos.order',\n 'res_id':clone_list[0],\n 'view_id': False,\n 'context':context,\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'current',\n }\n return abs", "def _prepare_cost_invoice(\n self, partner, company_id, currency_id, analytic_lines):\n account_payment_term_obj = self.env['account.payment.term']\n invoice_name = analytic_lines[0].account_id.name\n account_id = partner.property_account_receivable_id\n\n date_due = False\n if partner.property_payment_term_id:\n for pt in account_payment_term_obj:\n pterm_list = pt.compute(\n value=1, date_ref=fields.Date.context_today(self),currency=currency_id)\n if pterm_list:\n pterm_list = [line[0] for line in pterm_list]\n pterm_list.sort()\n date_due = pterm_list[-1]\n\n vals = {\n #'name': \"%s - %s\" % (time.strftime('%d/%m/%Y'), invoice_name),\n 'name': \"/\",\n 'partner_id': partner.id,\n 'company_id': company_id,\n #'payment_term_id': partner.property_payment_term_id.id or False,\n 'invoice_payment_term_id' : partner.property_payment_term_id.id or False,\n #'account_id': account_id and account_id.id or False,account_id is not availble on account.move\n 'currency_id': currency_id,\n #'date_due': date_due,\n 'invoice_date_due': date_due,\n 'fiscal_position_id': partner.property_account_position_id.id,\n #'move_type': 'out_invoice'\n }\n _logger.info(\"^^^^^^^^^^^^^^_prepare_cost_invoice^^^^^^^^^^^^^^^^ %s\",vals)\n return vals", "def _amount_residual(self, cr, uid, ids, field_names, args, context=None):\n res = {}\n if context is None:\n context = {}\n cur_obj = self.pool.get('res.currency')\n acc_move_recon_obj = self.pool.get('account.move.reconcile')\n for move_line in self.browse(cr, uid, ids, context=context):\n res[move_line.id] = {\n 'amount_original':0.0,\n 'amount_residual': 0.0,\n 'amount_residual_currency': 0.0,\n 'amount_res': 0.0,\n }\n\n if move_line.reconcile_id:\n continue\n if not move_line.account_id.type in ('payable', 'receivable'):\n #this function does not suport to be used on move lines not related to payable or receivable accounts\n continue\n\n if move_line.currency_id:\n move_line_total = move_line.amount_currency\n sign = move_line.amount_currency < 0 and -1 or 1\n else:\n move_line_total = move_line.debit - move_line.credit\n sign = (move_line.debit - move_line.credit) < 0 and -1 or 1\n amount_original = move_line_total\n line_total_in_company_currency = move_line.debit - move_line.credit\n context_unreconciled = context.copy()\n if move_line.reconcile_partial_id:\n acc_move_recon_id = acc_move_recon_obj.browse(cr, uid, move_line.reconcile_partial_id.id, context=None)\n\n for payment_line in acc_move_recon_id.line_partial_ids:\n if payment_line.id == move_line.id:\n continue\n if payment_line.currency_id and move_line.currency_id and payment_line.currency_id.id == move_line.currency_id.id:\n move_line_total += payment_line.amount_currency\n else:\n if move_line.currency_id:\n context_unreconciled.update({'date': payment_line.date})\n amount_in_foreign_currency = float_round(cur_obj.compute(cr, uid, move_line.company_id.currency_id.id, move_line.currency_id.id, (payment_line.debit - payment_line.credit), round=False, context=context_unreconciled),2)\n move_line_total += amount_in_foreign_currency\n else:\n move_line_total += (payment_line.debit - payment_line.credit)\n line_total_in_company_currency += (payment_line.debit - payment_line.credit)\n\n result = move_line_total\n# res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)\n res[move_line.id]['amount_original'] = sign * float_round((move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, amount_original) or amount_original),2)\n\n res[move_line.id]['amount_residual'] = sign * line_total_in_company_currency\n ctx = {'date': move_line.cur_date or move_line.date}\n \n res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)\n if move_line.currency_id:\n move_line_res = abs((move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result))\n else:\n move_line_res = abs(line_total_in_company_currency)\n\n res[move_line.id]['amount_res'] = move_line_res\n return res", "def _prepare_object_values(self, row):\n prepared_row = dict()\n prepared_row.update(row)\n self.prepare_object_values(prepared_row)\n return prepared_row", "def get_payment_refund(self,\n refund_id):\n\n return super().new_api_call_builder.request(\n RequestBuilder().server('default')\n .path('/v2/refunds/{refund_id}')\n .http_method(HttpMethodEnum.GET)\n .template_param(Parameter()\n .key('refund_id')\n .value(refund_id)\n .should_encode(True))\n .header_param(Parameter()\n .key('accept')\n .value('application/json'))\n .auth(Single('global'))\n ).response(\n ResponseHandler()\n .deserializer(APIHelper.json_deserialize)\n .is_api_response(True)\n .convertor(ApiResponse.create)\n ).execute()", "def create_budget_confirmation_invoice(self):\n confirmation_pool = self.env['account.budget.confirmation']\n currency_pool = self.env['res.currency']\n new_confirm_id = False\n flag = False\n for invoice in self:\n # v9: if invoice.invoice_type in ('purchase','sale'): super(account_invoice,self).compute_tax(cr, uid, [invoice.id], context=context)\n if invoice.journal_id.type == 'purchase':\n for invoice_line in invoice.invoice_line_ids:\n if invoice_line.account_budget_required == True:\n # v9: TEST ME if invoice_line.account_id and invoice_line.account_id.user_type_id.analytic_wk:\n if invoice_line.account_id:\n total_amount = invoice.company_id.currency_id.with_context(date=invoice.date).compute(\n invoice_line.price_subtotal, invoice.currency_id)\n amount = invoice.company_id.currency_id.with_context(date=invoice.date).compute(\n invoice_line.price_subtotal,invoice.currency_id)\n val = {\n 'reference': invoice.number,\n 'partner_id': invoice.partner_id.id,\n 'account_id': invoice_line.account_id.id,\n 'date': invoice.date_invoice,\n 'analytic_account_id': invoice_line.account_analytic_id and invoice_line.account_analytic_id.id,\n 'amount': total_amount or amount,\n 'residual_amount': total_amount or amount,\n #'type': self._context.get('type', 'other'),\n 'type': 'other',\n 'note': invoice_line.name or '/',\n\n }\n\n if invoice_line.invoice_line_tax_ids:\n val_amount = val.get('amount', 0)\n net_amount = 0\n total = 0\n tax_amount = 0\n tax_info = invoice_line.invoice_line_tax_ids.compute_all(invoice_line.price_unit, invoice.currency_id,\n invoice_line.quantity, invoice_line.product_id,\n invoice.partner_id)\n total += tax_info.get('total_included', 0.0)\n tax_amount += sum([t.get('amount', 0.0) for t in tax_info.get('taxes', False)])\n net_amount = tax_amount + val_amount\n val.update({'amount': net_amount or amount, })\n new_confirm_id = False\n\n if invoice_line.budget_confirm_id:\n flag = True\n # confirmation_pool.write([invoice_line.budget_confirm_id.id], val)\n # new_confirm_id = invoice_line.budget_confirm_id.id\n elif not invoice_line.budget_confirm_id:\n flag = True\n confirm = confirmation_pool.create(val)\n new_confirm_id = int(confirm)\n invoice_line.write({'budget_confirm_id': new_confirm_id})\n # v11 condition is worng ???\n # if new_confirm_id and not invoice.company_id.auto_budget:#v9: test me\n if new_confirm_id and invoice.company_id.auto_budget:\n confirmation_pool.browse(new_confirm_id).action_cancel_draft()\n confirmation_pool.browse(new_confirm_id).budget_complete()\n confirmation_pool.browse(new_confirm_id).check_budget_invoice()\n\n return flag" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the key returned from the SAT attack is correct. It does this by creating a miter circuit with a locked version and an oracle. If the diff signal returned from the miter circuit cannot be True, then the circuits are equivalent.
def _check_key(self, key): locked_ckt = circuit.Circuit.specify_inputs(key, self.nodes, self.output_names) miter = circuit.Circuit.miter(locked_ckt, self.oracle_ckt) s = z3.Solver() s.add(miter.outputs()["diff"] == True) return s.check() == z3.unsat
[ "def test_equal_on_equal(self):\n a = SymmetricKey(\n enums.CryptographicAlgorithm.AES, 128, self.bytes_128a)\n b = SymmetricKey(\n enums.CryptographicAlgorithm.AES, 128, self.bytes_128a)\n\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def test_problem11(self):\n blocklen = 16\n for i in range(100):\n guess, real = cryptanalysis.encryption_detection_oracle_ecb_cbc(ciphers.black_box1, blocklen, True)\n self.assertEqual(real, guess)", "def test_not_equal_on_equal(self):\n a = SymmetricKey(\n enums.CryptographicAlgorithm.AES, 128, self.bytes_128a)\n b = SymmetricKey(\n enums.CryptographicAlgorithm.AES, 128, self.bytes_128a)\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def test_not_equal_on_equal(self):\n a = payloads.DeriveKeyRequestPayload()\n b = payloads.DeriveKeyRequestPayload()\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)\n\n a = payloads.DeriveKeyRequestPayload(\n object_type=enums.ObjectType.SYMMETRIC_KEY,\n unique_identifiers=[\n \"fb4b5b9c-6188-4c63-8142-fe9c328129fc\",\n \"5c9b81ef-4ee5-42cd-ba2d-c002fdd0c7b3\",\n \"1703250b-4d40-4de2-93a0-c494a1d4ae40\"\n ],\n derivation_method=enums.DerivationMethod.HASH,\n derivation_parameters=attributes.DerivationParameters(\n cryptographic_parameters=attributes.CryptographicParameters(\n hashing_algorithm=enums.HashingAlgorithm.SHA_256\n ),\n initialization_vector=b'\\x39\\x48\\x74\\x32\\x49\\x28\\x34\\xA3',\n derivation_data=b'\\xFA\\xD9\\x8B\\x6A\\xCA\\x6D\\x87\\xDD'\n ),\n template_attribute=objects.TemplateAttribute(\n attributes=[\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Algorithm\"\n ),\n attribute_value=primitives.Enumeration(\n enums.CryptographicAlgorithm,\n value=enums.CryptographicAlgorithm.AES,\n tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM\n )\n ),\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Length\"\n ),\n attribute_value=primitives.Integer(\n value=128,\n tag=enums.Tags.CRYPTOGRAPHIC_LENGTH\n )\n )\n ]\n )\n )\n b = payloads.DeriveKeyRequestPayload(\n object_type=enums.ObjectType.SYMMETRIC_KEY,\n unique_identifiers=[\n \"fb4b5b9c-6188-4c63-8142-fe9c328129fc\",\n \"5c9b81ef-4ee5-42cd-ba2d-c002fdd0c7b3\",\n \"1703250b-4d40-4de2-93a0-c494a1d4ae40\"\n ],\n derivation_method=enums.DerivationMethod.HASH,\n derivation_parameters=attributes.DerivationParameters(\n cryptographic_parameters=attributes.CryptographicParameters(\n hashing_algorithm=enums.HashingAlgorithm.SHA_256\n ),\n initialization_vector=b'\\x39\\x48\\x74\\x32\\x49\\x28\\x34\\xA3',\n derivation_data=b'\\xFA\\xD9\\x8B\\x6A\\xCA\\x6D\\x87\\xDD'\n ),\n template_attribute=objects.TemplateAttribute(\n attributes=[\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Algorithm\"\n ),\n attribute_value=primitives.Enumeration(\n enums.CryptographicAlgorithm,\n value=enums.CryptographicAlgorithm.AES,\n tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM\n )\n ),\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Length\"\n ),\n attribute_value=primitives.Integer(\n value=128,\n tag=enums.Tags.CRYPTOGRAPHIC_LENGTH\n )\n )\n ]\n )\n )\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def test_not_equal_on_not_equal_value(self):\n a = SymmetricKey(\n enums.CryptographicAlgorithm.AES, 128, self.bytes_128a)\n b = SymmetricKey(\n enums.CryptographicAlgorithm.AES, 128, self.bytes_128b)\n\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def verify(cypher, key):\n\n return gluechops(cypher, key['e'], key['n'], encrypt_int)", "def test_partial_key_backup_verification_failure(self) -> None:\n # Arrange\n mediator = KeyCeremonyMediator(\"mediator_challenge\", CEREMONY_DETAILS)\n KeyCeremonyOrchestrator.perform_round_1(self.GUARDIANS, mediator)\n KeyCeremonyOrchestrator.perform_round_2(self.GUARDIANS, mediator)\n\n # Round 3 - Guardians only\n verification1 = self.GUARDIAN_1.verify_election_partial_key_backup(\n GUARDIAN_2_ID,\n )\n\n # Act\n failed_verification2 = ElectionPartialKeyVerification(\n GUARDIAN_1_ID,\n GUARDIAN_2_ID,\n GUARDIAN_2_ID,\n False,\n )\n mediator.receive_backup_verifications([verification1, failed_verification2])\n\n state = mediator.get_verification_state()\n\n # Assert\n self.assertTrue(state.all_sent)\n self.assertFalse(state.all_verified)\n self.assertIsNone(mediator.publish_joint_key())\n self.assertEqual(len(state.failed_verifications), 1)\n self.assertEqual(\n state.failed_verifications[0], GuardianPair(GUARDIAN_1_ID, GUARDIAN_2_ID)\n )\n\n # Act\n challenge = self.GUARDIAN_1.publish_election_backup_challenge(GUARDIAN_2_ID)\n mediator.verify_challenge(challenge)\n new_state = mediator.get_verification_state()\n all_verified = mediator.all_backups_verified()\n joint_key = mediator.publish_joint_key()\n\n # Assert\n self.assertTrue(new_state.all_sent)\n self.assertTrue(new_state.all_verified)\n self.assertEqual(len(new_state.failed_verifications), 0)\n self.assertTrue(all_verified)\n self.assertIsNotNone(joint_key)", "def test_equal_on_not_equal_value(self):\n a = SymmetricKey(\n enums.CryptographicAlgorithm.AES, 128, self.bytes_128a)\n b = SymmetricKey(\n enums.CryptographicAlgorithm.AES, 128, self.bytes_128b)\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def compare_diffs(pairwise_diffs_scores, attack_direction, correct_key):\n if attack_direction == AttackDirection.OUTPUT:\n end_key = cwa.aes_funcs.key_schedule_rounds(correct_key, 0, 10)\n correct_key = np.asarray(end_key, np.uint8)\n correct_diffs = np.zeros((16, 16), np.uint8)\n for i in range(16):\n for j in range(i, 16):\n correct_diffs[i, j] = correct_key[i] ^ correct_key[j]\n correct_diffs[j, i] = correct_diffs[i, j]\n return pairwise_diffs_scores[:, :, 0] == correct_diffs", "def test_equal_on_equal(self):\n a = payloads.DeriveKeyRequestPayload()\n b = payloads.DeriveKeyRequestPayload()\n\n self.assertTrue(a == b)\n self.assertTrue(b == a)\n\n a = payloads.DeriveKeyRequestPayload(\n object_type=enums.ObjectType.SYMMETRIC_KEY,\n unique_identifiers=[\n \"fb4b5b9c-6188-4c63-8142-fe9c328129fc\",\n \"5c9b81ef-4ee5-42cd-ba2d-c002fdd0c7b3\",\n \"1703250b-4d40-4de2-93a0-c494a1d4ae40\"\n ],\n derivation_method=enums.DerivationMethod.HASH,\n derivation_parameters=attributes.DerivationParameters(\n cryptographic_parameters=attributes.CryptographicParameters(\n hashing_algorithm=enums.HashingAlgorithm.SHA_256\n ),\n initialization_vector=b'\\x39\\x48\\x74\\x32\\x49\\x28\\x34\\xA3',\n derivation_data=b'\\xFA\\xD9\\x8B\\x6A\\xCA\\x6D\\x87\\xDD'\n ),\n template_attribute=objects.TemplateAttribute(\n attributes=[\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Algorithm\"\n ),\n attribute_value=primitives.Enumeration(\n enums.CryptographicAlgorithm,\n value=enums.CryptographicAlgorithm.AES,\n tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM\n )\n ),\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Length\"\n ),\n attribute_value=primitives.Integer(\n value=128,\n tag=enums.Tags.CRYPTOGRAPHIC_LENGTH\n )\n )\n ]\n )\n )\n b = payloads.DeriveKeyRequestPayload(\n object_type=enums.ObjectType.SYMMETRIC_KEY,\n unique_identifiers=[\n \"fb4b5b9c-6188-4c63-8142-fe9c328129fc\",\n \"5c9b81ef-4ee5-42cd-ba2d-c002fdd0c7b3\",\n \"1703250b-4d40-4de2-93a0-c494a1d4ae40\"\n ],\n derivation_method=enums.DerivationMethod.HASH,\n derivation_parameters=attributes.DerivationParameters(\n cryptographic_parameters=attributes.CryptographicParameters(\n hashing_algorithm=enums.HashingAlgorithm.SHA_256\n ),\n initialization_vector=b'\\x39\\x48\\x74\\x32\\x49\\x28\\x34\\xA3',\n derivation_data=b'\\xFA\\xD9\\x8B\\x6A\\xCA\\x6D\\x87\\xDD'\n ),\n template_attribute=objects.TemplateAttribute(\n attributes=[\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Algorithm\"\n ),\n attribute_value=primitives.Enumeration(\n enums.CryptographicAlgorithm,\n value=enums.CryptographicAlgorithm.AES,\n tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM\n )\n ),\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Length\"\n ),\n attribute_value=primitives.Integer(\n value=128,\n tag=enums.Tags.CRYPTOGRAPHIC_LENGTH\n )\n )\n ]\n )\n )\n\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def __verifyAccess(self):\r\n trypass = self.entry.get()\r\n KFPath = self.keyfilePath\r\n trykey = Key(trypass) # add key file path as second arg\r\n trykey.updatePath(KFPath)\r\n if trykey.verified():\r\n self.truepass = trypass\r\n self.key = trykey\r\n self.verified = True\r\n self.master.destroy()\r\n else:\r\n self.verified = False", "def test_secure_connections_pairing_fails_if_comparison_fails(central, peripheral):\n central_ss, peripheral_ss = init_security_sessions(central, peripheral,\n initiator_mitm=True, initiator_io_caps=\"IO_CAPS_DISPLAY_YESNO\",\n responder_mitm=True, responder_io_caps=\"IO_CAPS_DISPLAY_YESNO\")\n # Get Secure connections support\n sc_initiator = central.securityManager.getSecureConnectionsSupport().result\n sc_responder = peripheral.securityManager.getSecureConnectionsSupport().result\n\n # Skip if unsupported\n if not sc_initiator or not sc_responder:\n return\n\n central_ss.connect(peripheral_ss)\n\n # Always start peripheral_ss first\n peripheral_ss.wait_for_event()\n central_ss.start_pairing()\n\n # We should get a request\n peripheral_ss.expect_pairing_request()\n\n # Accept request\n peripheral_ss.accept_pairing_request()\n\n # Get passkeys (should be identical)\n passkey1 = peripheral_ss.expect_passkey_display()\n passkey2 = central_ss.expect_passkey_display()\n\n assert passkey1 == passkey2\n\n # Expect confirmation requests\n # FIXME: too slow to set that expectation :(\n # peripheral_ss.expect_confirmation_request()\n # central_ss.expect_confirmation_request()\n\n # Reject on one end\n peripheral_ss.enter_confirmation(True, asynchronous=True)\n central_ss.enter_confirmation(False)\n\n # Both should fail here\n peripheral_ss.expect_pairing_failure()\n central_ss.expect_pairing_failure()", "def test_pairing_fail_if_passkey_wrong(central, peripheral):\n central_ss, peripheral_ss = init_security_sessions(central, peripheral,\n initiator_mitm=True, initiator_io_caps=\"IO_CAPS_KEYBOARD_ONLY\",\n responder_mitm=True, responder_io_caps=\"IO_CAPS_KEYBOARD_ONLY\")\n central_ss.connect(peripheral_ss)\n\n # Always start peripheral_ss first\n peripheral_ss.wait_for_event()\n central_ss.start_pairing()\n\n # We should get a request\n peripheral_ss.expect_pairing_request()\n\n # Accept request\n peripheral_ss.accept_pairing_request()\n\n # Wait for passkey requests\n peripheral_ss.expect_passkey_request()\n central_ss.expect_passkey_request()\n\n # Input passkeys\n peripheral_ss.enter_passkey(123456, asynchronous=True)\n central_ss.enter_passkey(654321)\n\n # Both should fail here\n peripheral_ss.expect_pairing_failure()\n central_ss.expect_pairing_failure()", "def test_partial_key_backup_verification_success(self) -> None:\n # Arrange\n mediator = KeyCeremonyMediator(\"mediator_verification\", CEREMONY_DETAILS)\n KeyCeremonyOrchestrator.perform_round_1(self.GUARDIANS, mediator)\n KeyCeremonyOrchestrator.perform_round_2(self.GUARDIANS, mediator)\n\n # Round 3 - Guardians only\n verification1 = self.GUARDIAN_1.verify_election_partial_key_backup(\n GUARDIAN_2_ID,\n )\n verification2 = self.GUARDIAN_2.verify_election_partial_key_backup(\n GUARDIAN_1_ID,\n )\n\n # Act\n mediator.receive_backup_verifications([verification1])\n\n # Assert\n self.assertFalse(mediator.get_verification_state().all_sent)\n self.assertFalse(mediator.all_backups_verified())\n self.assertIsNone(mediator.publish_joint_key())\n\n # Act\n mediator.receive_backup_verifications([verification2])\n joint_key = mediator.publish_joint_key()\n\n # Assert\n self.assertTrue(mediator.get_verification_state().all_sent)\n self.assertTrue(mediator.all_backups_verified())\n self.assertIsNotNone(joint_key)", "def test_equal_on_equal(self):\n a = objects.SecretData(self.bytes_a, enums.SecretDataType.PASSWORD)\n b = objects.SecretData(self.bytes_a, enums.SecretDataType.PASSWORD)\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def test_bad_key(self):\n locker = Locker.create(self.tempdir, self.content_path(), b'01234567')\n\n success = locker.unpack(self.work_path('unpacked', create=True), b'bogus', locker.mac)\n\n self.assertFalse(success)", "def test_not_equal_on_equal(self):\n a = payloads.DeriveKeyResponsePayload()\n b = payloads.DeriveKeyResponsePayload()\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)\n\n a = payloads.DeriveKeyResponsePayload(\n unique_identifier=\"fb4b5b9c-6188-4c63-8142-fe9c328129fc\",\n template_attribute=objects.TemplateAttribute(\n attributes=[\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Algorithm\"\n ),\n attribute_value=primitives.Enumeration(\n enums.CryptographicAlgorithm,\n value=enums.CryptographicAlgorithm.AES,\n tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM\n )\n ),\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Length\"\n ),\n attribute_value=primitives.Integer(\n value=128,\n tag=enums.Tags.CRYPTOGRAPHIC_LENGTH\n )\n )\n ]\n )\n )\n b = payloads.DeriveKeyResponsePayload(\n unique_identifier=\"fb4b5b9c-6188-4c63-8142-fe9c328129fc\",\n template_attribute=objects.TemplateAttribute(\n attributes=[\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Algorithm\"\n ),\n attribute_value=primitives.Enumeration(\n enums.CryptographicAlgorithm,\n value=enums.CryptographicAlgorithm.AES,\n tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM\n )\n ),\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Length\"\n ),\n attribute_value=primitives.Integer(\n value=128,\n tag=enums.Tags.CRYPTOGRAPHIC_LENGTH\n )\n )\n ]\n )\n )\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def test_not_equal_on_not_equal_template_attribute(self):\n a = payloads.DeriveKeyRequestPayload(\n template_attribute=objects.TemplateAttribute(\n attributes=[\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Algorithm\"\n ),\n attribute_value=primitives.Enumeration(\n enums.CryptographicAlgorithm,\n value=enums.CryptographicAlgorithm.AES,\n tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM\n )\n ),\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Length\"\n ),\n attribute_value=primitives.Integer(\n value=128,\n tag=enums.Tags.CRYPTOGRAPHIC_LENGTH\n )\n )\n ]\n )\n )\n b = payloads.DeriveKeyRequestPayload(\n template_attribute=objects.TemplateAttribute(\n attributes=[\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Algorithm\"\n ),\n attribute_value=primitives.Enumeration(\n enums.CryptographicAlgorithm,\n value=enums.CryptographicAlgorithm.BLOWFISH,\n tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM\n )\n ),\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Length\"\n ),\n attribute_value=primitives.Integer(\n value=64,\n tag=enums.Tags.CRYPTOGRAPHIC_LENGTH\n )\n )\n ]\n )\n )\n\n self.assertTrue(a != b)\n self.assertTrue(b != a)\n\n a = payloads.DeriveKeyRequestPayload(\n template_attribute=objects.TemplateAttribute(\n attributes=[\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Algorithm\"\n ),\n attribute_value=primitives.Enumeration(\n enums.CryptographicAlgorithm,\n value=enums.CryptographicAlgorithm.AES,\n tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM\n )\n ),\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Length\"\n ),\n attribute_value=primitives.Integer(\n value=128,\n tag=enums.Tags.CRYPTOGRAPHIC_LENGTH\n )\n )\n ]\n )\n )\n b = payloads.DeriveKeyRequestPayload(\n template_attribute=objects.TemplateAttribute()\n )\n\n self.assertTrue(a != b)\n self.assertTrue(b != a)\n\n a = payloads.DeriveKeyRequestPayload(template_attribute=None)\n b = payloads.DeriveKeyRequestPayload(\n template_attribute=objects.TemplateAttribute(\n attributes=[\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Algorithm\"\n ),\n attribute_value=primitives.Enumeration(\n enums.CryptographicAlgorithm,\n value=enums.CryptographicAlgorithm.AES,\n tag=enums.Tags.CRYPTOGRAPHIC_ALGORITHM\n )\n ),\n objects.Attribute(\n attribute_name=objects.Attribute.AttributeName(\n \"Cryptographic Length\"\n ),\n attribute_value=primitives.Integer(\n value=128,\n tag=enums.Tags.CRYPTOGRAPHIC_LENGTH\n )\n )\n ]\n )\n )\n\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def assertEqualTo(self,other):\n assert isinstance(other, Chemplate), \"argument to assertEqualTo must be a CP.Chemplate\"\n self_keys = self.getIDs()\n other_keys = other.getIDs()\n different_keys = self_keys - other_keys\n assert len(different_keys)==0, f\"different primary keys:{' '.join(different_keys)}\"\n for key in self_keys:\n self_item = self.getID(key)\n other_item = other.getID(key)\n different_keys = self_item.keys() - other_item.keys()\n assert len(different_keys)==0, f\"different secondary keys:{' '.join(different_keys)}\"\n for innerkey in self_item.keys():\n self_val = self.getIDattr(key,innerkey)\n other_val = other.getIDattr(key,innerkey)\n #print(f\"for {key},{innerkey} self_val:{self_val} other_val:{other_val}\")\n assert str(self_val) == str(other_val),f\"different values for key:{key} innerkey:{innerkey}: ({self_val}, {other_val})\"\n return True\n #print(\"TEMPLATE:\",pformat(answer_template_1))\n #print(\"ANSWERS:\",pformat(filled))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the topmost visible child that overlaps with time t.
def top_clip_at_time(in_stack, t): # ensure that it only runs on stacks if not isinstance(in_stack, schema.Stack): raise ValueError( "Argument in_stack must be of type otio.schema.Stack, " "not: '{}'".format( type(in_stack) ) ) # build a range to use the `each_child`method. search_range = opentime.TimeRange( start_time=t, # 0 duration so we are just sampling a point in time. # XXX Should this duration be equal to the length of one sample? # opentime.RationalTime(1, rate)? duration=opentime.RationalTime(0, t.rate) ) # walk through the children of the stack in reverse order. for track in reversed(in_stack): valid_results = [] if hasattr(track, "each_child"): valid_results = list( c for c in track.each_clip(search_range, shallow_search=True) if c.visible() ) # XXX doesn't handle nested tracks/stacks at the moment for result in valid_results: return result return None
[ "def snapshot_at_time(self, t: float) -> Snapshot:\n for i, _ in enumerate(self.snapshots[1:]):\n if t >= self.snapshots[i - 1].time and t < self.snapshots[i].time:\n return self.snapshots[i - 1]\n\n return self.snapshots[-1]", "def best_child(self, node):\n if node.player_to_move == 1:\n cmp = max(node.children, key=attrgetter(\"q\"))\n else:\n cmp = min(node.children, key=attrgetter(\"q\"))\n return choice([n for n in node.children if n.q == cmp.q])", "def find_parent_or_child_of_time(self, start_cell: fire_rs.geodata.geo_data.Cell, time: float):\n\n coord = start_cell\n found = False\n while not found:\n if self.geodata.data[self._ignition_layer][coord] <= time and time < \\\n self.geodata.data[self._ignition_end_layer][coord]:\n found = True\n else:\n uphill_cell = (coord[0] + self.geodata.data[self._prop_x_layer][coord],\n coord[1] + self.geodata.data[self._prop_y_layer][coord])\n downhill_cell = (coord[0] - self.geodata.data[self._prop_x_layer][coord],\n coord[1] - self.geodata.data[self._prop_y_layer][coord])\n if uphill_cell[0] == downhill_cell[0] and uphill_cell[1] == downhill_cell[1]:\n # Extrema reached\n coord = uphill_cell\n found = True\n next_cell = downhill_cell if self.geodata.data[self._ignition_layer][\n coord] > time else uphill_cell\n\n if (next_cell[0] < 0 or next_cell[0] >\n self.geodata.data[self._ignition_layer].shape[0] - 1) and (\n next_cell[1] < 0 or next_cell[1] >\n self.geodata.data[self._ignition_layer].shape[1] - 1):\n found = True\n if (next_cell == downhill_cell) and (\n self.geodata.data[self._ignition_layer][next_cell] >\n self.geodata.data[self._ignition_layer][coord]) or (\n next_cell == uphill_cell) and (\n self.geodata.data[self._ignition_layer][next_cell] <\n self.geodata.data[self._ignition_layer][coord]):\n # Extrema reached\n found = True\n if self.geodata.data[self._ignition_layer][next_cell] < np.inf:\n coord = next_cell\n return coord", "def find(self, t):\n node = self.root\n while node is not None:\n if t == node.data:\n return node\n elif t < node.data:\n node = node.left\n else:\n node = node.right\n return None", "def best_child_visits(self, node):\n if node.player_to_move == 1:\n cmp = max(node.children, key=attrgetter(\"visits\"))\n else:\n cmp = min(node.children, key=attrgetter(\"visits\"))\n return choice([n for n in node.children if n.visits == cmp.visits])", "def _FindLastEventWithTimeElseMostRecentEventLessthanTime_ORI(self, timeToFind):\n pos = 0\n lastoccurenceoftime = -1\n\n for event in self.mediator.story:\n eventtime = eventStrToList(event)[0]\n eventtime = eval(eventtime)\n\n if self.turnMgr.TimeToTimePosition(eventtime) <= self.turnMgr.TimeToTimePosition(\n timeToFind\n ):\n # if eventtime <= timeToFind:\n lastoccurenceoftime = pos\n pos += 1\n else:\n break\n # pos should now be positioned at the insertion point\n\n if lastoccurenceoftime != -1:\n return lastoccurenceoftime\n maxpos = len(self.mediator.story) - 1\n return min(maxpos, pos)", "def peek(self, since_time: datetime) -> list:\n peeked_entries: deque = deque()\n\n for timestamp, entry in self.entries:\n entry_was_on_or_after_requested_time = since_time <= timestamp\n if entry_was_on_or_after_requested_time:\n peeked_entries.append(entry)\n else:\n break\n\n return list(peeked_entries)", "def get_next_point(self, t): \n assert ('t' in self.mode)\n t_index = self.mode.index('t')\n res = self.first()\n while (res[t_index] < t and not self.is_empty()):\n self.pop()\n res = self.first()\n\n if self.is_empty():\n return None\n return res", "def closest_visible(self, node: Hashable):\n attr = self.HIDDEN_ATTR\n g, tree = self.source\n if node not in tree:\n return None\n if not is_hidden(tree, node, attr):\n return node\n predecesors = list(tree.predecessors(node))\n assert (\n len(predecesors) <= 1\n ), f\"Expected only a single parent for `{node}` not {len(predecesors)}\"\n for pred in tree.predecessors(node):\n return self.closest_visible(pred)\n raise ValueError(f\"Unable to find visible ancestor for `{node}`\")", "def UCTSelectChild(self):\r\n UCTK = 200\r\n s = sorted(self.childNodes, key=lambda c: c.wins / c.visits + UCTK * sqrt(2 * log(self.visits) / c.visits))[-1]\r\n return s", "def find_counterpart_in(self, tree_b):\n\n tree_b_hierarchy_entry = tree_b.process_hierarchy.get_by_qualifier(self.time_qualifier)\n if not tree_b_hierarchy_entry:\n # special case when tree with more levels depends on the tree with smaller amount of levels\n # for example ThreeLevel Financial tree depends on TwoLevel Google Channel\n # in this case - we just verify time-periods that matches in both trees;\n # for levels that have no match, we assume that dependency does not exists\n # for example Financial Monthly has no counterpart in Google Daily Report -\n # so we assume that its not blocked\n node_b = None\n else:\n node_b = tree_b.get_node(tree_b_hierarchy_entry.process_entry.process_name, self.timeperiod)\n\n return node_b", "def lookup(self, time, time_cutoff=None):\n\t\t#do a binary search over the point set, comparing times\n\t\tpos = bisect(self.times, time)\n\t\tposition = None\n\t\tif pos==self.point_count:\n\t\t\tposition = self.points[pos-1]\n\t\telif pos>0 and (time - self.times[pos-1]) < (self.times[pos]-time):\n\t\t\t#check which of the two adjacent times is closer to time\n\t\t\tposition = self.points[pos-1]\n\t\telse:\n\t\t\tposition = self.points[pos]\n\n\t\tif time_cutoff is None or abs(position.time - time) <= time_cutoff:\n\t\t\treturn position\n\t\treturn None", "def _findFirst(self, t):\n if self.isEmpty():\n return -1\n\n self._updateActivePriorities()\n\n currentJobs = [(i, job) for (i,job) in enumerate(self.jobs) if job.releaseTime <= t]\n if len(currentJobs) == 0:\n return -1\n\n currentJobs.sort(key = lambda x: (self.activePriorities[x[1]], x[1].task.id, x[1].id))\n\n if self.protocol == 'PIP':\n highestActive = self.activePriorities[currentJobs[0][1]]\n highestJobs = []\n for (i, job) in currentJobs:\n if self.activePriorities[job] == highestActive:\n highestJobs.append((i, job))\n #highestJobs = filter(lambda x: self.activePriorities[x[1]] == self.activePriorities[x[1]], currentJobs)\n for (i, job) in highestJobs:\n if job.getResourceHeld() is not None:\n return i\n\n return currentJobs[0][0] # get the index from the tuple in the 0th position", "def range_of_child(self, child):\n\n return self.tracks.range_of_child(child)", "def UCTSelectChild(self):\n UCTK = 200\n s = sorted(self.childNodes, key=lambda c: (c.wins / c.visits) + (UCTK * sqrt(2 * log(self.visits) / c.visits)))[-1]\n return s", "def lastVisibleNodeOf(self, node):\n if len(node.children ) == 0 or not node.expanded:\n return node\n return self.lastVisibleNodeOf(node.children[-1])", "def topmost(pts):\n return withmax(ycoord, pts)", "def findTwin(self, tid, flow0, twins):\n # Common ancestry implies corresponding tasks always occupy the same\n # dependency level. This greatly reduces the search domain.\n level = self._tasklevs[tid]\n if level in flow0._depsmap:\n match = None\n for tid0 in flow0._depsmap[level]:\n if not twins.has_key(tid0):\n # Find all tasks with same ancestors; return the closest match.\n if self.sameLineage(tid, tid0, flow0, twins, match):\n match = tid0\n elif twins[tid0] == tid:\n return tid0\n\n return match", "def get_event_by_timestamp(self, time: dt):\n # ensure that the given time uses the same timezone as the computer\n now = dt.now()\n time = time.astimezone(now.tzinfo)\n\n events = self.get_events()\n filtered_events = []\n # find the wanted event\n for e in events:\n event_start = next(v for k, v in e[\"start\"].items() if \"date\" in k)\n event_start = dt.fromisoformat(event_start).astimezone(now.tzinfo)\n\n event_end = next(v for k, v in e[\"end\"].items() if \"date\" in k)\n event_end = dt.fromisoformat(event_end).astimezone(now.tzinfo)\n\n # check if the given time is between the start and end of an event\n if time >= event_start and time <= event_end:\n filtered_events.append(e)\n return filtered_events" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Flatten a Stack, or a list of Tracks, into a single Track. Note that the 1st Track is the bottom one, and the last is the top.
def flatten_stack(in_stack): flat_track = schema.Track() flat_track.name = "Flattened" # map of track to track.range_of_all_children range_track_map = {} def _get_next_item( in_stack, track_index=None, trim_range=None ): if track_index is None: # start with the top-most track track_index = len(in_stack) - 1 if track_index < 0: # if you get to the bottom, you're done return track = in_stack[track_index] if trim_range is not None: track = track_algo.track_trimmed_to_range(track, trim_range) track_map = range_track_map.get(track) if track_map is None: track_map = track.range_of_all_children() range_track_map[track] = track_map for item in track: if ( item.visible() or track_index == 0 or isinstance(item, schema.Transition) ): yield item else: trim = track_map[item] if trim_range is not None: trim = opentime.TimeRange( start_time=trim.start_time + trim_range.start_time, duration=trim.duration ) track_map[item] = trim for more in _get_next_item(in_stack, track_index - 1, trim): yield more for item in _get_next_item(in_stack): flat_track.append(copy.deepcopy(item)) return flat_track
[ "def getFirstTrack(self):\n\n selectedTracks = self.getSelectedTracks()\n if not selectedTracks:\n return None\n\n t = selectedTracks[0]\n p = self.getPreviousTrack(t)\n while p != None:\n t = p\n p = self.getPreviousTrack(p)\n\n return t", "def convert_to_stack(deck):\n return Stack(list(deck.cards))", "def reverse_stack(stack):\n\n holder_stack = Stack()\n while not stack.is_empty():\n popped_element = stack.pop()\n holder_stack.push(popped_element)\n\n return holder_stack", "def flattened_metadata(self):\n self.__log.call()\n\n snapshot = self.metadata_snapshot\n snapshot.pop(\"__custom\") # already incorporated into each track\n\n # to \"flatten\" the metadata, just add the album metadata to each track\n flattened = snapshot.pop(\"__tracks\")[1:] # zero-based indexing here\n for i in range(len(flattened)):\n flattened[i].update(snapshot)\n\n self.__log.return_(flattened)\n return flattened", "def overlay(tracks):\n main_track = tracks[0]\n for track in tracks[1:]:\n main_track = main_track.overlay(track, loop=True)\n return main_track", "def reverse(stack):\n new_stack = LifoQueue()\n while not stack.empty():\n top_thing = stack.get()\n new_stack.put(top_thing)\n return new_stack", "def copy_stack(stack: Stack) -> Stack:\n temp = Stack()\n result = Stack()\n while not stack.is_empty():\n temp.add(stack.remove())\n\n while not temp.is_empty():\n item = temp.remove()\n stack.add(item)\n result.add(item)\n\n return result", "def stack_to_tensor(self, stack, transpose = True):\n if transpose: return torch.stack([self.to_tensor(image) for image in stack], dim=0).type(torch.FloatTensor).transpose(0,1)\n else: return torch.stack([self.to_tensor(image) for image in stack], dim=0).type(torch.FloatTensor)", "def stack_nests(nests, dim=0):\n if len(nests) == 1:\n return nest.map_structure(lambda tensor: tensor.unsqueeze(dim),\n nests[0])\n else:\n return nest.map_structure(lambda *tensors: torch.stack(tensors, dim),\n *nests)", "def get_top_tracks_from_artist(self, artist, market=None):\n q = {\"country\": market or self.get_market()}\n url = \"artists/{}/top-tracks\".format(artist['id'])\n result = self.get_api_v1(url, q)\n\n if result:\n return tuple(Track(t) for t in result[\"tracks\"])\n else:\n return []", "def top_clip_at_time(in_stack, t):\n\n # ensure that it only runs on stacks\n if not isinstance(in_stack, schema.Stack):\n raise ValueError(\n \"Argument in_stack must be of type otio.schema.Stack, \"\n \"not: '{}'\".format(\n type(in_stack)\n )\n )\n\n # build a range to use the `each_child`method.\n search_range = opentime.TimeRange(\n start_time=t,\n # 0 duration so we are just sampling a point in time.\n # XXX Should this duration be equal to the length of one sample?\n # opentime.RationalTime(1, rate)?\n duration=opentime.RationalTime(0, t.rate)\n )\n\n # walk through the children of the stack in reverse order.\n for track in reversed(in_stack):\n valid_results = []\n if hasattr(track, \"each_child\"):\n valid_results = list(\n c for c in track.each_clip(search_range, shallow_search=True)\n if c.visible()\n )\n\n # XXX doesn't handle nested tracks/stacks at the moment\n\n for result in valid_results:\n return result\n\n return None", "def tracksplit(self):\n return [self.clone(shallow=True).setattribute('_trackindex', k).tracks(t).activityfilter(lambda a: a.hastrack(tk)) for (k,(tk,t)) in enumerate(self.tracks().items())]", "def displayStack(myStack):\n stackCopy = myStack[:]\n while len(stackCopy) > 0:\n # get the value from the top of the stack...\n crumb = stackCopy.pop()\n\n # ...and display it\n print(crumb)", "def getLastTrack(self):\n\n selectedTracks = self.getSelectedTracks()\n if not selectedTracks:\n return None\n\n t = selectedTracks[0]\n n = self.getNextTrack(t)\n while n != None:\n t = n\n n = self.getNextTrack(n)\n\n return t", "def get_back_tags():\n tracks = Track.objects.all()\n for t in tracks:\n tags = _tags_for_track(t.artist_name, t.track_name)\n t.tags = \" \".join(tags)\n t.save()\n print t", "def tlist_to_flat(trajs):\n # Check all trajectories are same order tensors.\n traj_orders = np.array([len(np.shape(ti)) for ti in trajs])\n if np.any(traj_orders != traj_orders[0]):\n raise ValueError(\"Input Trajectories have varying dimension\")\n if traj_orders[0] == 1:\n trajs = [t_i.reshape(-1, 1) for t_i in trajs]\n # Get dimensions of traj object.\n d = len(trajs[0][0])\n # Populate the large trajectory.\n traj_2d = []\n traj_edges = [0]\n len_traj_2d = 0\n for i, traj in enumerate(trajs):\n # Check that trajectory is of right format.\n if len(np.shape(traj)) != 2:\n raise ValueError('Trajectory %d is not two dimensional!' % i)\n d2 = np.shape(traj)[1]\n if d2 != d:\n raise ValueError('Trajectories are of incompatible dimension. The first trajectory has dimension %d and trajectory %d has dimension %d' % (d, i, d2))\n traj_2d += list(traj)\n len_traj_2d += len(traj)\n traj_edges.append(len_traj_2d)\n return np.array(traj_2d), np.array(traj_edges)", "def Flatten(self, tensors):\n if self._is_no_op:\n return tensors\n flat_tensors = [\n tf.reshape(t, self._GetFlatShape(t)) for t in tf.nest.flatten(tensors)\n ]\n return self._PackAs(tensors, flat_tensors)", "def get_dataset_representation_from_track(track, feature_qty=10, prediction_qty=1):\n\n x = []\n y = []\n if len(track) < feature_qty + prediction_qty:\n raise Exception(\"Track is shorter than the moving window size {0} for dataset representation.\".format(feature_qty + prediction_qty))\n\n sample_qty = len(track) - prediction_qty - feature_qty # number of samples we can generate from one track if we always increment by one tone\n\n for first_note_index in range(sample_qty):\n features = track[first_note_index:first_note_index + feature_qty]\n prediction = track[first_note_index + feature_qty: first_note_index + feature_qty + prediction_qty]\n \n x.append(features)\n y.append(prediction)\n \n return np.array(x), np.array(y)", "def chop(t):\n t.pop(0)\n t.pop()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
1) Gets subscribers of feed 2) Checks subscribers entries to find passive feeds. 3) Returns active_feeds, passive_feeds
def subscribers_of(feed_id): subscribers = [] try: feed_info = ff_api.fetch_feed_info(feed_id) except urllib2.HTTPError: feed_info = None print "Could'nt read subscribers:", feed_id if feed_info: print "Feed info fetched:", feed_info['id'] # get subscribers subscribers = feed_info['subscribers'] # filter as user subscribers = filter(lambda f: f['type']=="user", subscribers) else: subscribers = [] return subscribers
[ "def get_feeds(self):\n return self.feeds", "def subscribers_for(item_uid):", "def get(self):\n feeds = []\n with self.get_db_session() as session:\n user = session.query(User).get(self.require_auth(session))\n for feed in user.subscriptions:\n feeds.append({\n 'id': feed.id,\n 'name': feed.title,\n 'url': feed.site_url,\n 'image_url': feed.image_url,\n 'unreads': user.get_num_unread_entries(feed),\n })\n self.write({'feeds': feeds})\n self.set_status(200)", "def refresh_rss_feeds(self):\n ## > IMPORTS ##\n import dryxPython.webcrawlers as wc\n import dryxPython.mysql as m\n import dryxPython.commonutils as cu\n\n ## >SETTINGS ##\n\n ## LOGGING HEADER ##\n log.info('<m> STARTING TO REFRESH THE FEEDS FOUND IN ' +\n self.subscriptionTable + '<m>')\n\n ###########################################################\n # >ACTION(S) #\n ###########################################################\n # CREATE DOWNLOADS DIRECTORY\n cu.dryx_mkdir(self._downloadDirectory)\n\n # READ THE FEED NAMES AND URLS FROM SUBSCRIPTION TABLE\n sqlQuery = 'SELECT rssFeedName, feedURL, rssFeedSource, dateLastRead, uniqueKeyCols from ' + \\\n self.subscriptionTable\n try:\n log.debug(\"attempting to reading feed data from the subscription table : %s\" % (\n self.subscriptionTable,))\n feeds = m.execute_mysql_read_query(sqlQuery, dbConn, log)\n except Exception, e:\n log.error(\"could not reading feed data from the subscription table : %s - failed with this error %s: \" %\n (self.subscriptionTable, str(e),))\n return -1\n\n # DOWNLOAD THE FEED CHANNEL XML FILES AND SWITCH TO LOCAL URL\n remoteURLList = []\n for feed in feeds:\n remoteURLList += [feed['feedURL']]\n try:\n log.debug(\"attempting to downloading the feed channel xml files\")\n localUrls = wc.multiWebDocumentDownloader(\n remoteURLList, self._downloadDirectory, 1)\n except Exception, e:\n log.error(\n \"could not downloading the feed channel xml files - failed with this error %s: \" % (str(e),))\n return -1\n\n ifc = 0\n for feed in feeds:\n feed['remoteFeedUrl'] = feed['feedURL']\n feed['feedURL'] = localUrls[ifc]\n ifc += 1\n\n # INSTANTIATE THE XML FILE OBJECT\n xf = xml_file()\n xf.feedUrl = feed['feedURL']\n xf.rssFeedName = feed['rssFeedName']\n\n # DETERMINE UNQUIE KEY\n ukCols = str.split(feed['uniqueKeyCols'])\n\n # CHANNEL ITEMS = BASE LEVEL XML FEED METADATA - THE NEWS/CONTENT\n # GRAB THE LIST OF XML ITEM DICTIONARIES\n xml_channel_items = xf.get_channel_items()\n # ADD EXTRA COLUMNS TO THE DICTIONARY\n now = str(cu.get_now_sql_datetime())\n for item in xml_channel_items:\n item['dateCreated'] = now\n item['dateLastModified'] = now\n item['awaitingAction'] = 1\n item['rssFeedUrl'] = feed['remoteFeedUrl']\n item['rssFeedName'] = feed['rssFeedName']\n item['rssFeedSource'] = feed['rssFeedSource']\n\n feedTableName = self._feedTablePrefix + feed['rssFeedName']\n feedTableName = cu.make_lowercase_nospace(feedTableName)\n\n # APPEND THE DATA TO THE TABLE\n try:\n log.debug(\"attempting to 'adding data to the %s table\" %\n (feedTableName,))\n for i in range(len(xml_channel_items)):\n log.debug('here is the element dictionary: %s' %\n (str(xml_channel_items[i].keys()),))\n m.convert_dictionary_to_mysql_table(\n dbConn, xml_channel_items[i], feedTableName, ukCols)\n except Exception, e:\n log.error(\"could not 'adding data to the %s table - failed with this error %s: \" %\n (feedTableName, str(e),))\n return -1\n\n ## LOGGING FOOTER ##\n log.info('<m> SUCCESSFULLY ATTEMPTED TO REFRESH THE FEEDS FOUND IN ' +\n self.subscriptionTable + '<m>')\n\n return None", "def user_feeds(request):\n subscription, created = Subscription.objects.get_or_create(user=request.user)\n if created:\n return redirect(reverse('feed_index'))\n feeds = request.user.subscription.feeds.filter(published=True)\n return render_to_response('fast_userpage.html',\n {\n 'feeds':feeds,\n 'title':'Your Feeds',\n },RequestContext(request))", "def test_subscibe_multiple(self):\n request = self.factory.get(\n '/feeder/subscribe_user_to_feed/?username=Mohit&feedname=Yoga')\n response = subscribe_user_to_feed(request)\n response = subscribe_user_to_feed(request)\n request = self.factory.get('/feeder/get_user_feeds/?username=Mohit')\n response = get_user_feeds(request)\n self.assertEqual(response.content, 'Yoga')", "def subscribeFeed(feeds, uri):\n feeds_found = feedfinder.getFeeds(uri)\n\n if len(feeds_found) == 0: \n raise SubsNoFeedsFound(uri)\n elif len(feeds_found) > 1: \n raise SubsMultipleFeedsFound(uri, feeds_found)\n else:\n feed_uri = feeds_found[0]\n if feed_uri in feeds:\n raise SubsAlreadySubscribed(feed_uri)\n feeds.append(feed_uri)\n return feed_uri", "def apply_filters(self, filters, starred):\n\n feed_count = 0\n item_count = 0\n processed_feeds = set()\n\n try:\n print u\"Retrieving subscribed feeds...\"\n subs_list = self._subscription_list()\n if starred:\n print u\"Retrieving starred items...\"\n else:\n print u\"Retrieving unread items...\"\n self._retrieve_entries(starred)\n except KeyboardInterrupt:\n exit(\"cancelled\")\n\n print u\"Applying filters...\"\n\n universal_patterns = filters.get(u\"*\", [])\n\n for tag in subs_list:\n tag_has_matching_feeds = False\n for feed in subs_list[tag]:\n # get the applicable filters\n patterns = universal_patterns[:]\n try:\n patterns.extend(filters[feed[u\"title\"]])\n except KeyError:\n pass\n\n if not feed[u\"feed_id\"] in processed_feeds:\n processed_feeds.add(feed[u\"feed_id\"])\n\n if not patterns:\n # skip to next feed\n continue\n\n # since there are applicable patterns, the current tag has at least one matching feed\n if not tag_has_matching_feeds:\n tag_has_matching_feeds = True\n print u\"\\n{}\\n{}\".format(tag, u\"=\" * len(tag))\n\n feed_count += 1\n items_found = self._apply_filter(feed, patterns)\n if items_found is not None:\n print u\"found {}.\".format(items_found)\n item_count += items_found\n\n if self.to_be_filtered:\n self._filter(starred)\n\n return feed_count, item_count", "def feed_exists(self, feed):\n if not self.listening:\n if not feed in self.feeds:\n if self.redis.sismember('feeds', feed):\n self.feeds.add(feed)\n return True\n return False\n else:\n return True\n return feed in self.feeds", "def getFeeds(self):\n\t\tfor i in range(len(self.getURLs())):\n\t\t\tself.feed[i] = rss.parse(self.getURLs()[i])\n\n\t\treturn self.feed", "def does_sub_exist(self, feed):\n row = (feed,)\n self.cursor.execute('SELECT COUNT (*) FROM subscriptions WHERE feed = ?', row)\n return_string = str(self.cursor.fetchone())[1]\n if return_string == \"0\":\n return 0\n else:\n return 1", "def list_newsletterusers_by_active():\n\n return NewsLetterMailing.objects.filter(is_active=True)", "def send_rfps_to_subscribers(self):\r\n\r\n results = []\r\n subs = Subscription.all()\r\n for sub in subs:\r\n\r\n try:\r\n\r\n # Grab what user info, add first, last name later\r\n user = User.query(query.FilterNode('username', '=', sub.username)).get()\r\n\r\n # Ensure the the sub's username is associated with an actual account\r\n # by checking if the email exists.\r\n if user.email:\r\n self._send_rfps_to_subscribers(sub, user.first_name, user.email, results)\r\n else:\r\n msg = 'No email found for username: %s and keyword: %s' % (sub.username, sub.keyword)\r\n\r\n logging.info(msg)\r\n results.append('Error: ' + msg)\r\n except:\r\n msg = 'Problem with sending RFPs for some subscription, maybe bad user object'\r\n logging.info(msg)\r\n results.append('Error: ' + msg)\r\n\r\n\r\n return results", "def get_webhook_subscriptions():\r\n MSGRAPH.base_url = config.RESOURCE \r\n # print(\"MSGRAPH.base_url\", MSGRAPH.base_url) \r\n subscriptions = MSGRAPH.get(config.ISG_VERSION + '/subscriptions').data\r\n print(\"Active subscriptions :\", subscriptions)\r\n if b'' in subscriptions:\r\n print(\"Please Sign-in using a on.microsoft.com account for demo data\")\r\n subscriptions = None\r\n elif 'error' in subscriptions:\r\n if subscriptions['error']['code'] == 'InvalidAuthenticationToken':\r\n\r\n return flask.redirect(flask.url_for('login'))\r\n\r\n MSGRAPH.base_url = config.RESOURCE + config.API_VERSION + '/'\r\n return subscriptions", "def subscribe_to_PDS():\n global pds_feeds\n # subscribe to PDS feeds and store them\n pds_feeds = [rospy.Subscriber('wheel_motor_currents', Currents, wheel_current_callback),\n rospy.Subscriber('wheel_motor_voltages', Voltages, wheel_voltage_callback),\n rospy.Subscriber('arm_motor_currents', Currents, arm_current_callback),\n rospy.Subscriber('arm_motor_voltages', Voltages, arm_voltage_callback),\n rospy.Subscriber('control_current', Currents, control_current_callback),\n rospy.Subscriber('control_voltage', Voltages, control_voltage_callback)]", "async def check_feed(self, feed: dict) -> None:\n for item in feed[\"items\"]:\n await self.check_item(item)", "def getSubscriptions(state=None):", "def FindFeeds():\n rss_page = \"http://www.latimes.com/services/site/la-rssinfopage,0,5039586.htmlstory\"\n\n html = ukmedia.FetchURL( rss_page )\n soup = BeautifulSoup( html )\n\n feeds = []\n div = soup.find('div',{'id':'story-body'} )\n for td in div.table.findAll('td', {'class':'rssTitleCell'} ):\n a = td.a\n url = urlparse.urljoin( rss_page, a['href'] )\n\n title = ukmedia.FromHTMLOneLine( a.renderContents(None) )\n feeds.append( (title,url) )\n\n return feeds", "async def join(feeds: str) -> None:\n global _feeds\n\n feeds = json.loads(feeds)\n\n subtract = [x for x in feeds.keys() if x not in _feeds.keys()]\n _feeds = { **_feeds, **feeds}\n for feed in subtract:\n await check_role_channel(feed)\n\n save()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks if event is a private slack channel
def is_private(event): return event.get('channel').startswith('D')
[ "def is_private(channel):\n\treturn isinstance(channel, discord.abc.PrivateChannel)", "def is_private_check(message):\r\n if(message.channel != message.author.dm_channel):\r\n message.content = \"is_private\"\r\n return message.channel == message.author.dm_channel", "def is_open(channel):\n return channel.topic is None", "def is_ww_game_channel(ctx):\n return ctx.message.channel.id in ww_game_channel_list", "async def check_controlled_channels(message):\n channel = message.channel\n id = message.server.id\n found = False\n for private_channel_pair in servers[id][\"created_channels\"]:\n user_channel = private_channel_pair.user_channel\n admin_channel = private_channel_pair.admin_channel\n if channel.id == user_channel.id:\n author = message.author.mention\n mirror_message = f\"{author}: {message.content}\"\n await bot.send_message(admin_channel, mirror_message)\n found = True\n if channel.id == admin_channel.id:\n if message.content.startswith(prefix):\n message_no_prefix = message.content.lstrip(prefix)\n if message_no_prefix.startswith(\"resolve\"):\n await bot.delete_channel(user_channel)\n await bot.delete_channel(admin_channel)\n else:\n admin_message = f\"Admins: {message_no_prefix}\"\n await bot.send_message(user_channel, admin_message)\n found = True\n return found", "def check_channel(self, remote):\n remote_id = remote.id()\n for c in self.rpc.channels():\n channel = self.rpc.channel(c)\n if channel['nodeid'] == remote_id:\n return channel['state'] == 'NORMAL'\n return False", "def should_handle(self, msg) -> bool:\n return (\n bool(msg.parameters)\n and msg.command == \"PRIVMSG\"\n and msg.parameters[0] in self.config.channels\n )", "def is_channel(message):\n try:\n # because 'message' argument is passed as a string without the @ sign, we\n # add this sign with .format()\n name_or_not = bot.get_chat(\"@{}\".format(message)).type\n except Exception:\n return False\n\n if name_or_not == \"channel\":\n return True\n else:\n return False", "def IsPrivate(self) -> bool:", "def check_on_guild(ctx: commands.Context) -> bool:\n if ctx.guild is None:\n raise errors.NoPrivateMessage()\n return True", "def is_server_check(message):\r\n if(message.channel == message.author.dm_channel):\r\n message.content = \"is_server\"\r\n return message.channel != message.author.dm_channel", "def acceptPrivateMessage(self, channel, connection, message, subchannel):\n return True", "def check_cooldown(db: Database, channel_name: str) -> bool:\n if channel_name[0] == \"#\":\n channel_name = channel_name[1:]\n log.error(\"Someplace in the code is using channels with #.\")\n cooldown_time = cooldowns[channel_name] if channel_name in cooldowns else None\n if cooldown_time is None:\n return False # no cooldown found.\n cooldown = db.get_cd(channel_name)\n if cooldown is None:\n cooldown = int(settings[\"default_cooldown\"])\n return not datetime.datetime.utcnow() - cooldown_time > timedelta(seconds=cooldown)", "def check_open_event(event: Event):\n if not event.is_open:\n raise errors.Forbidden('The event is not open.')", "def check_channel_request(self, kind, chanid):\n if kind == 'session':\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED", "def in_voice_channel():\n def predicate(ctx):\n if ctx.author.voice is None:\n return False\n return True\n return commands.check(predicate)", "def bot_and_user_direct_channel(channel):\n name = channel[\"name\"]\n\n user_chan = driver.user_id in name\n bot_chan = MAIN_BOT_ID in name\n # D = direct message channel\n direct = channel[\"type\"] == \"D\"\n\n return user_chan and bot_chan and direct", "def can_embed(ctx):\r\n return isinstance(ctx.channel, discord.DMChannel) or ctx.channel.permissions_for(ctx.guild.me).embed_links", "def was_subscribed(self, ctx) -> bool:\n return ctx.channel.id in self.channels.keys()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Says hi to the user by formatting their mention
def say_hi(user_mention): response_template = random.choice(['Sup, {mention}...', 'Yo! {mention}', 'Ni hao']) return response_template.format(mention=user_mention)
[ "async def say_hi(self, to):\n name = to\n if to == 'me':\n name = self._message.author.name\n return f'Hello {name}, how are you?'", "def greet_user(self):\n print(\"Greetings \" + self.f_name.title() + \" \" + self.l_name.title() + \" we hope you enjoy your stay with us!\")", "def new_msg(word, word_eol, userdata):\n user = xchat.strip(word[0])\n # If the user logged in before we did (which means the Join part of\n # filter_msg didn't take effect), add him to the dict.\n if user not in last_seen:\n last_seen[user]= [time(), 1]\n # If the user has never spoken before, let us know when he logged in.\n if last_seen[user][1] == 0:\n time_diff = time() - last_seen[user][0]\n # Bold the username and color the text if it's a hilight\n if \"Hilight\" in userdata:\n s_user = \"\\002\" + word[0]\n s_msg = \"\\017\\00319\" + word[1]\n else:\n s_user = word[0]\n s_msg = \"\\017\" + word[1]\n if \"Action\" in userdata:\n s_user = \"\\00319*\\t%s \" % s_user\n else:\n s_user += '\\t'\n xchat.prnt(\"%s%s \\00307(logged in %ss ago)\" % (s_user, s_msg,\n int(time_diff)))\n last_seen[user]= [time(), 1]\n return xchat.EAT_XCHAT\n else:\n last_seen[user]= [time(), 1]", "def create_welcome_message(username):\n general_greetings_list = [\"hello\", \"hi\", \"welcome\"]\n secondary_statement_list = [\"hope you're having a great day!\",\n \"miao miao miao (that's cat for have a good day)!\",\n \"enjoy!\",\n \"good luck!\",\n \"happy writing!\"]\n first = random.choice(general_greetings_list)\n uname = username.capitalize()\n second = random.choice(secondary_statement_list)\n msg = first + \" \" + uname + \"! \" + second\n return msg", "def mention_as_text(mention):\n name = mention['name']\n symbol = modality_symbol[mention['choice_id']]\n return '{}{}'.format(symbol, name)", "def say_hello(self, message, args):\n if args.favorite_number is None:\n return f'Hello {args.name}.'\n else:\n return f'Hello {args.name}, I hear your favorite number is {args.favorite_number}.'", "def greet_user(self):\n print(\"\\nHello, \" + self.full_name.title() + \"!\")", "def inform(msg: str):\n # Dynamic user update messages\n print(\" %-80s\" % msg, end=\"\\r\", flush=True)", "def say(self, msg):\n if self.silent:\n return\n\n tts_msg = (msg.replace(u'½', u' and a half')\n .replace(u' AM', ' A.M.')\n .replace(u' PM', ' P.M.')).lstrip()\n if tts_msg.startswith('and a half'):\n tts_msg = tts_msg[6:]\n\n tts_msg = tts_msg.encode('utf-8')\n\n proc = subprocess.Popen([\"festival\", \"--tts\"], # nosec\n stdin=subprocess.PIPE)\n proc.stdin.write(tts_msg)\n proc.stdin.close()", "async def mentioned(message: discord.Message, member: discord.Member=Annotate.Self):\n after = datetime.utcnow() - timedelta(hours=24)\n was_found = False\n await client.send_typing(message.channel)\n\n # Go through all messages since 24 hours ago\n async for mention_message in client.logs_from(message.channel, limit=5000, before=message, after=after):\n if member not in mention_message.mentions:\n continue\n\n was_found = True\n\n # Format the message when it's found, along with messages from prior 15 seconds and latter 15 seconds\n after = mention_message.timestamp - timedelta(seconds=15)\n message_content = []\n async for nm in client.logs_from(message.channel, limit=50, after=after, before=after + timedelta(seconds=30)):\n if nm.author == mention_message.author:\n # Add an invisible separator and some spaces for an indent effect\n message_content.append(\"\\N{INVISIBLE SEPARATOR}\" + \" \" * 4 + nm.clean_content)\n\n found_message = await client.say(message, \"**{0} - {1:%A, %d %B %Y %H:%M}**\\n{2}\".format(\n mention_message.author.display_name, after, \"\\n\".join(reversed(message_content))))\n\n # The member will be able to search for another mention by typing next\n next_message = await client.say(message, \"Type `next` to expand your search.\")\n reply = await client.wait_for_message(timeout=30, author=member, channel=message.channel, content=\"next\")\n\n # Remove the previously sent help message and break if there was no response\n if reply is None:\n await client.delete_message(next_message)\n break\n\n await client.delete_messages([found_message, reply, next_message])\n await client.send_typing(message.channel)\n else:\n await client.say(message, \"{} mentioning you in the last 24 hours.\".format(\n \"Found no more messages\" if was_found else \"Could not find a message\"))", "async def smack(self, ctx):\n mess = ctx.content.split(' ', 1)[1]\n smack = discord.Embed(color=self.client.embed_color)\n smack.add_field(name=\"You have made someone very upset...\",\n value=f\"*{mess} has been smacked by a robot hand!*\")\n await ctx.channel.send(embed=smack)", "def hello(self, message, args):\n if args.favorite_number is None:\n return \"Hello {name}\".format(name=args.name)\n else:\n return \"Hello {name}, I hear your favorite number is {number}\".format(\n name=args.name, number=args.favorite_number\n )", "async def user_info(self, ctx, *, target: discord.Member = None):\r\n # Set words according to who is the target.\r\n if target is None:\r\n target = ctx.message.author\r\n p1, p2, p3 = 'Your', 'You', 'have'\r\n elif target.bot:\r\n p1, p2, p3 = 'Its', 'It', 'has'\r\n else:\r\n p1, p2, p3 = 'Their', 'They', 'have'\r\n # Determine certain properties and text regarding the target\r\n nick = target.display_name\r\n username = f'{target.name}#{target.discriminator}'\r\n join_time = target.joined_at\r\n # Say a member's top role if they have one beyond @everyone\r\n if len(target.roles) != 1:\r\n role = target.top_role\r\n r_msg = f'{p1} top role is {role}.'\r\n else:\r\n r_msg = f'{p2} {p3} no special roles.'\r\n # Point out if the member is a bot\r\n bot_msg = f'{nick} is a bot' if target.bot else ''\r\n # Send the message\r\n await ctx.send(f'Full username: {username}.\\n'\r\n f'{p2} joined at {join_time}.\\n'\r\n f'{r_msg} {bot_msg}')", "def send():\n user_question = EntryBox.get(\"1.0\", 'end-1c').strip()\n EntryBox.delete(\"0.0\", END)\n ChatLog.config(state=NORMAL)\n if (user_question != ''):\n ChatLog.insert(END, user_question + '\\n\\n', 'you_text')\n ChatLog.update()\n\n ChatLog.insert(END, \"Bot: \", 'bot')\n ChatLog.update()\n\n # Get answer for the user question\n answer = ml.get_answer_for_most_similar_title(user_question)\n\n# for letter in ml.get_letter_at_random_interval(answer):\n for letter in answer:\n ChatLog.insert(END, letter, 'bot_text')\n ChatLog.update()\n ChatLog.yview(END)\n\n ChatLog.insert(END, '\\n\\n', 'bot_text')\n ChatLog.insert(END, \"You: \", 'you')\n ChatLog.update()\n ChatLog.config(state=DISABLED)\n ChatLog.yview(END)", "async def hello(ctx):\n await ctx.send(f\"{ctx.author.mention} hello!\")", "def greet_and_update(name='friend', day=1):\n print('Hello {}. It\\'s your day {}.'.format(name, day))", "def say(self, phrase):\n if self.use_Nao:\n print(phrase)\n naoqiutils.speak(phrase)\n else:\n print(phrase)", "def message_author(msg, include_email, hide_email=True):\n if msg['authorName'] and msg['authorName'] != msg['profile']:\n res = \"%s (%s)\" % (msg['authorName'], msg['profile'])\n else:\n res = \"%s\" % (msg['profile'],)\n\n if include_email and msg['from']:\n if hide_email:\n disp = msg['from'].rsplit(\"@\", 1)[0] + \"@...\"\n else:\n disp = msg['from']\n res += \" <%s>\" % disp\n\n return res", "def homophone_suggest(self, sc, event, *args):\n try:\n text_words = [strip_punctuation(word) for word in\n event[0]['text'].lower().split(' ')\n if strip_punctuation(word) in self.homophones.keys()]\n\n for word in text_words:\n message = \"Hey <@{u}>!\\n\\tYou typed {k}, but you probably meant {v}.\".\\\n format(u=event[0]['user'],\n k=word,\n v=self.homophones[word])\n sc.rtm_send_message(event[0]['channel'], message)\n\n except KeyError:\n if 'type' not in event[0].keys():\n logger.debug(\"Don't worry about this one.\")\n logger.debug(event)\n else:\n raise" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a list of words as input and returns a list of the n most frequently occurring words ordered from most to least frequently occurring.
def get_top_n_words(word_list, n): word_frequencies = {} for word in word_list: word_frequencies[word.lower()] = word_frequencies.get(word.lower(), 0) + 1 top_words = sorted(word_frequencies, key=word_frequencies.get, reverse=True)[:n] return [(word_frequencies[word], word) for word in top_words]
[ "def most_frequent_words(text, n, lowercase=False):\r\n # YOUR CODE HERE\r\n\r\n from collections import Counter\r\n\r\n if lowercase:\r\n words = [word.strip().lower() for word in text.split()]\r\n else:\r\n words = [word.strip() for word in text.split()]\r\n\r\n word_count = Counter(words)\r\n # most_freq = list(word_count.most_common(n))\r\n\r\n most_freq_list = []\r\n for i,j in word_count.most_common(n):\r\n most_freq_list.append(i)\r\n\r\n return most_freq_list\r\n\r\n pass", "def count_words(s, n):\n \n # TODO: Count the number of occurences of each word in s\n words = set()\n for word in s.split():\n count = 0\n for iterat in s.split():\n if word == iterat:\n count += 1\n words.add((word, count))\n # TODO: Sort the occurences in descending order (alphabetically in case of ties)\n \n # TODO: Return the top n words as a list of tuples (<word>, <count>)\n top_n = sorted(list(words), key=lambda x: (-x[1], x[0]))\n \n return top_n[:n]", "def get_top_n_words(corpus, n=None):\r\n vec = CountVectorizer().fit(corpus)\r\n bag_of_words = vec.transform(corpus)\r\n sum_words = bag_of_words.sum(axis=0)\r\n words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]\r\n words_freq = sorted(words_freq, key=lambda x: x[1], reverse=True)\r\n return words_freq[:n]", "def GetMostFrequentWords(self, num_words, text_chunk):\n ret = self.GetStopwordFreqs(text_chunk, text_chunk)\n return [b for (a,b) in list(reversed(sorted(list(set(ret)))))][:num_words]", "def get_top_words(data_list, n_top_words=160):\n top_words = []\n \n d = Counter(concatenate_all_text(data_list))\n d_sorted = sorted(d.items(), key=itemgetter(1), reverse=True)\n \n assert len(d_sorted) >= n_top_words, 'Too many top words'\n \n for i in range(n_top_words):\n top_words.append(d_sorted[i][0])\n \n return top_words", "def most_frequent(s):\n words=[]\n words=s.split(\" \")\n words=sorted(words)\n word_count={}\n counts=[]\n for word in words:\n counts.append(words.count(word))\n m=counts.index(max(counts))\n return (words[m])\n \n # USING OrderedDict\n '''\n for word in words:\n word_count[word]=words.count(word)\n max_count=max(word_count.values())\n for word in OrderedDict(sorted(word_count.items(), key=lambda t:t[0])):\n if word_count[word]==ma\n x_count:\n return (\"Using OrderedDict:\", word)\n '''\n \n \n \n # HINT: Use the built-in split() function to transform the string s into an\n # array\n \n # HINT: Sort the new array by using the built-in sorted() function or\n # .sort() list method\n \n # HINT: Iterate through the array and count each occurance of every word\n # using the .count() list method\n \n # HINT: Find the number of times the most common word appears using max()\n \n # HINT: Locate the index of the most frequently seen word\n \n # HINT: Return the most frequent word. Remember that if there is a tie,\n # return the first (tied) word in alphabetical order.", "def n_most_common(people, n):\n return [name\n for name, count in hobby_counter(people).most_common(n)]", "def top_ngrams(tokenized_words, n=2, top=10):\n\tall_ngrams = []\n\tfor each in tokenized_words:\n\t\tall_ngrams += ngrams(each, n)\n\treturn FreqDist(all_ngrams).most_common(top)", "def most_words(self, n):\n big_tags = [[x[2], len(x[2].split(' '))] for x in self.data]\n big_tags = sorted(big_tags, key=lambda x: -int(x[1]))[:n]\n return collections.OrderedDict(big_tags)", "def extract_most_freq_terms(counter, vocab_list):\r\n\tword_list=[]\r\n\tlist_of_counts=counter.most_common(50)\r\n\tfor i in range(len(list_of_counts)):\r\n\t\tif list_of_counts[i][0] in vocab_list:\r\n\t\t\tword_list.append(list_of_counts[i][0])\r\n\treturn word_list", "def most_frequent_bigrams(text, n, lowercase=False):\r\n # YOUR CODE HERE\r\n\r\n from collections import Counter\r\n\r\n if lowercase:\r\n words = [word.strip().lower() for word in text.split()]\r\n else:\r\n words = [word.strip() for word in text.split()]\r\n\r\n bigrams = list(zip(words,words[1:]))\r\n bi_count = Counter(bigrams)\r\n\r\n most_freq_biagram = []\r\n\r\n for i,j in bi_count.most_common(n):\r\n most_freq_biagram.append(i)\r\n\r\n return most_freq_biagram\r\n\r\n pass", "def more_efficient_most_common(d, n):\n if n <= 0:\n return []\n items = d.items()\n most_common_items = []\n \n # The complexity in range handles the case where n is less then the number\n # of entries in the dictionary.\n for i in range(n if n < len(items) and n > 0 else len(items)):\n # This could be faster by writing a max method that also poped the best\n # item. It goes from T(n) = 2n * mc to n * mc where c is some constant\n # factor, n is len(d) and m is the n most common words.\n most_common_item = max(items, key=itemgetter(1))\n items.remove(most_common_item)\n most_common_items.append(most_common_item[0])\n return most_common_items", "def top_10_words(hist, num = 10):\n t = most_common(hist)\n for freq, word in t[:num]:\n print(word,\"\\t\", freq)", "def most_frequent(s):\n\n # HINT: Use the built-in split() function to transform the string s into an\n # array\n words = s.split(\" \")\n\n # HINT: Sort the new array by using the built-in sorted() function or\n # .sort() list method\n\n # HINT: Iterate through the array and count each occurance of every word\n # using the .count() list method\n\n dict = {}\n for word in words:\n dict[word] = words.count(word)\n\n # HINT: Find the number of times the most common word appears using max()\n result = max(dict, key=dict.get)\n\n # HINT: Locate the index of the most frequently seen word\n\n # HINT: Return the most frequent word. Remember that if there is a tie,\n # return the first (tied) word in alphabetical order.\n\n return result", "def most_words(self, n):\n if n > self.length:\n raise IndexError('Index out of tags\\' range')\n sorted_list = sorted(self.tags, key=lambda x: len(x[2].split(' ')), reverse=True)\n return dict([[x[2], len(x[2].split(' '))] for x in sorted_list[:n]])", "def top_words_by_tf_idf(document, counts, n):\n words = list(analyze(document))\n idfs = get_idf_dict(words, counts)\n tfs = FreqDist(words)\n words.sort(key=lambda w: -(tfs[w] * idfs[w]))\n return words[0:n]", "def sort_by_count_word(people):\n return sorted(people, key=count_word)", "def find_most_common_term(words_list):\n\n\twords_frequency = words_to_freq(words_list)\n\tunique_terms = generate_unique_terms(words_list)\n\tmax_index = words_frequency.index(max(words_frequency))\n\treturn unique_terms[max_index]", "def get_top_n_words(n):\n top_n_words = rdd_review_data\\\n .map(lambda x: x[\"text\"])\\\n .flatMap(lambda line: line.lower().split(' ')) \\\n .filter(lambda x: x not in stop_words)\\\n .map(lambda x: (trim(x), 1))\\\n .reduceByKey(lambda a, b: a + b)\\\n .sortBy(lambda x: -x[1])\\\n .keys()\\\n .take(n)\n\n results[\"E\"] = top_n_words" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
V.InterpolateLine(vtkRenderer, vtkContourRepresentation, int, int) > int
def InterpolateLine(self, vtkRenderer, vtkContourRepresentation, p_int, p_int_1): ...
[ "def nearest_point_on_line(point, line): \n return line.interpolate(line.project(point))", "def extrapolate_lines(image_shape, line):\n slope, intercept = line\n y1 = image_shape[0]\n y2 = int(y1 * (3 / 5))\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n return np.array([x1, y1, x2, y2])", "def plotMulticolorLine(ax,xs,ys,zs,cmap='viridis',n_interp=50,**kwargs):\n\n xs = linearInterpolate(xs,n_interp)\n ys = linearInterpolate(ys,n_interp)\n zs = linearInterpolate(zs,n_interp)\n\n n_interp = max(3,n_interp)\n points = np.array([xs, ys]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n\n ## duplicate the final entry because otherwise it's ignored and you don't\n ## make it to zs[-1] ever, no matter how many n_interp you have\n segments = np.append(segments,segments[-1:],axis=0)\n zs = np.append(zs,zs[-1])\n\n lc = LineCollection(segments, cmap=cmap,norm=plt.Normalize(0, 1),**kwargs)\n lc.set_array(zs)\n lc.set_linewidth(3)\n ax.add_collection(lc)", "def intersection(self, intersected_line):\n a = np.dot(self.normal_vector, intersected_line.direction_vector)\n if a != 0:\n lam = self.d - np.dot(self.normal_vector, intersected_line.point)\n lam = lam/a\n intersection = intersected_line.point + np.dot(lam, intersected_line.direction_vector)\n else:\n intersection = (-10,-10,-5)\n return intersection", "def line_sample2d(x,y,z,x1,y1):\n from scipy.interpolate import RectBivariateSpline as rbs\n # Extract the values along the line, using cubic interpolation\n f = rbs(x,y,z.T)\n return f.ev(x1,y1)\n #return scipy.ndimage.map_coordinates(z, np.vstack((y,x)))", "def _interpolated_line(ax, x, y, npoints: int = 100, **kwargs):\r\n ls = kwargs.pop(\"linestyle\", kwargs.pop(\"ls\", rcParams[\"lines.linestyle\"]))\r\n marker = kwargs.pop(\"marker\", rcParams[\"lines.marker\"])\r\n label = kwargs.pop(\"label\")\r\n\r\n ip_x, ip_y = _interpolated_coords(x, y, npoints)\r\n (line_h,) = ax.plot(ip_x, ip_y, marker=\"None\", ls=ls, label=f\"_{label}_line\", **kwargs)\r\n\r\n if marker.lower() not in [\"none\", \"\"]:\r\n ax.plot(x, y, ls=\"None\", marker=marker, label=f\"_{label}_markers\", **kwargs)\r\n\r\n # fake handle for legend\r\n handle = mlines.Line2D([], [], color=line_h.get_color(), ls=ls, marker=marker, label=label)\r\n return handle, ip_x, ip_y", "def ipfline(center=[0,0],csym='cubic'):\n xc = []; yc = []\n if csym!='cubic': print(\"Only Cubic!\"); raise IOError\n xc.append( center[0])\n yc.append( center[1])\n\n for i in np.linspace(0.,1/math.sqrt(3.)):\n yaux = i\n xaux = math.sqrt((1. - yaux**2)/2.)\n zaux = xaux\n t1 = math.sqrt(1. - zaux) / math.sqrt(xaux**2 + yaux**2)\n t2 = t1/math.sqrt(1. + zaux)\n ## equal area\n # xc.append(xaux*t1)\n # yc.append(yaux*t1)\n ## stereo\n xc.append(xaux*t2)\n yc.append(yaux*t2)\n\n xc.append(center[0])\n yc.append(center[1])\n return np.array([xc,yc])", "def projectPointToLine(self, *args):\n return _coin.SbDPViewVolume_projectPointToLine(self, *args)", "def _interp_continuum(self, x, y, wv=None):\n if wv is None:\n wv = self.wavelength.value\n\n if len(y) >= 5:\n # need 5 points to define an Akima Spline\n spline = AkimaSpline(x, y)\n co = spline(wv)\n else:\n co = np.interp(wv, x, y)\n\n return co", "def interpolLin(wave, spec, new_wave): \n inter = interpolate.interp1d(wave, spec, bounds_error = False)\n return inter(new_wave)", "def _segment_approx_value_linear(self, r, i1, i2):\n\n Dr = self._r[i2] - self._r[i1]\n X = (r - self._r[i1])/Dr\n \n return self._y[i1] + (self._y[i2] - self._y[i1])*X", "def intersectWithLine(self, p0, p1):\n if not self.line_locator:\n self.line_locator = vtk.vtkOBBTree()\n self.line_locator.SetDataSet(self.polydata())\n self.line_locator.BuildLocator()\n\n intersectPoints = vtk.vtkPoints()\n self.line_locator.IntersectWithLine(p0, p1, intersectPoints, None)\n pts = []\n for i in range(intersectPoints.GetNumberOfPoints()):\n intersection = [0, 0, 0]\n intersectPoints.GetPoint(i, intersection)\n pts.append(intersection)\n return pts", "def projectPointToLine(self, pt: 'SbVec2f') -> \"PyObject *\":\n return _coin.SbViewVolume_projectPointToLine(self, pt)", "def fit_line(edge_contour: list) -> ([float, float], [float, float]):\n rect_x, rect_y, width, height = cv.boundingRect(edge_contour)\n vec_x, vec_y, line_x, line_y = cv.fitLine(edge_contour, cv.DIST_L2, 0, 0.01, 0.01)\n points = []\n if abs(vec_x) <= 0.01: # Detected line is vertical\n points.append((line_x, rect_y))\n points.append((line_x, rect_y + height))\n elif abs(vec_y) <= 0.01: # Detected line is horizontal\n points.append((rect_x, line_y))\n points.append((rect_x + width, line_y))\n else: # Detected line is oblique\n # Calculate intersection points on horizontal lines (x coordinates):\n # y = rect_y (top rectangle line)\n # y = rect_y + height (bottom rectangle line)\n top_intersect = ((rect_y - line_y) * vec_x / vec_y + line_x)\n bottom_intersect = ((rect_y + height - line_y) * vec_x / vec_y + line_x)\n # and vertical lines (y coordinates):\n # x = rect_x (left rectangle line)\n # x = rect_x + width (right rectangle line)\n left_intersect = ((rect_x - line_x) * vec_y / vec_x + line_y)\n right_intersect = ((rect_x + width - line_x) * vec_y / vec_x + line_y)\n\n # Find those 2 intersections that occur on rectangle line segments\n # horizontal:\n if rect_x <= top_intersect <= rect_x + width:\n points.append([top_intersect, rect_y])\n if rect_x <= bottom_intersect <= rect_x + width:\n points.append([bottom_intersect, rect_y + height])\n # vertical\n if rect_y <= left_intersect <= rect_y + height:\n points.append([rect_x, left_intersect])\n if rect_y <= right_intersect <= rect_y + height:\n points.append([rect_x + width, right_intersect])\n\n return np.array(points[0], dtype=float), np.array(points[1], dtype=float)", "def get_linear(self):\n return self._v_lin.copy()", "def line_draw(image):\n img = image.copy()\n \n #read in background for paper appearance\n paper = cv2.imread(\"ink-paper.jpg\", cv2.IMREAD_COLOR)\n\n paper = cv2.resize(paper, (img.shape[1], img.shape[0]))\n\n img = cv2.medianBlur(img, 5)\n edges = cv2.Canny(img, 100 , 125)\n\n c_img, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_NONE)\n \n #iterate through each contour found in the image\n for c in contours:\n #draw contours on image. Can vary intensity of lines\n #c_img = cv2.drawContours(c_img, c, -1, (125,125,0), 4)\n c_img = cv2.drawContours(c_img, c, -1, (255,255,255), 2) \n \n #Invert the line drawing\n c_img = 255 - c_img\n c_img = cv2.cvtColor(c_img, cv2.COLOR_GRAY2BGR)\n\n c_img_blur = cv2.blur(c_img, (5,5))\n \n #convert to BGR to enable adding\n edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)\n \n edges = np.uint8(edges) \n c_img_blur = np.uint8(c_img_blur)\n \n #add blurred and contoured to paper to create an overlay/blend\n output = cv2.addWeighted(c_img_blur, .35, paper, .65, 0)\n output = np.uint8(output)\n \n return output", "def axvline(self, x=0, ymin=0, ymax=1, **kwargs):\n\n trans = mtrans.blend_xy_sep_transform( self.transData, self.transAxes )\n l, = self.plot([x,x], [ymin,ymax] , transform=trans, scaley=False, **kwargs)\n return l", "def project_point_onto_line(o, v, p):\n return o + dv.vector_projection(p - o, v)", "def intersectWithLine(self, *args) -> \"adsk::core::Ptr< adsk::core::Point3D >\" :\n return _core.Plane_intersectWithLine(self, *args)", "def draw_line(Irgb, line, color=(0, 255, 0)):\n if len(Irgb.shape) != 3:\n Irgb = cv2.cvtColor(Irgb, cv.CV_GRAY2BGR)\n \n Irgb = Irgb.copy()\n h, w = Irgb.shape[0:2]\n pts = []\n for x in xrange(w):\n y = compute_line_y(line, x)\n if y > 0 and y < h:\n pts.append((x,y))\n cv.Line(cv.fromarray(Irgb), tuple(intrnd(*pts[0])), tuple(intrnd(*pts[-1])), color)\n return Irgb" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write output file. `d` is the dict returned by parse_input().
def write_output(args, d): fout = args.outfile if args.head: fout.write(args.head.read() + '\n') fout.write('# ------------------------------------------\n') fout.write('# valgrind suppressions generated from\n') fout.write('# %s\n' % args.infile.name) fout.write('# ------------------------------------------\n') for s in d.values(): fout.write(str(s)) if args.tail: fout.write(args.tail.read())
[ "def _write_output_file(self, output):\n self._make_output_dirs_if_needed()\n\n with open(self.output_path, \"w\") as output_file:\n json.dump(output, output_file, indent=1)\n output_file.write(\"\\n\") # json.dump does not write trailing newline", "def write_to_output_file(output_dir, filename, data):\n\n if not output_dir or not prepare_output_dir(output_dir):\n return\n filename = os.path.join(output_dir, filename)\n try:\n with open(filename, 'w') as outfile:\n if isinstance(data, string_types):\n outfile.write(data)\n else:\n json.dump(data, outfile, sort_keys=True, indent=4, default=_no_fail)\n # pylint: disable=broad-except; do not want serialization/write to break for any reason\n except Exception as exc:\n display.warning(\"Could not write output file {}: {}\".format(filename, exc))", "def write_file(output_name, id_dic, eQTL_list):\n with open(output_name, \"w\") as thefile:\n for eQTL in eQTL_list:\n if eQTL[0] in id_dic:\n line = \"{} {} {} {} {} {}\".format(id_dic[eQTL[0]], eQTL[1], eQTL[2], eQTL[3], eQTL[4], eQTL[5])\n else:\n line = \"{} {} {} {} {} {}\".format(eQTL[0], eQTL[1], eQTL[2], eQTL[3], eQTL[4], eQTL[5])\n thefile.write(line + \"\\n\")", "def __output_to_file(self):\n\n fn = self.out_dir + self.output_file\n map_keys = self.ordered_keys\n row_count = len(self.output_map[map_keys[0]])\n\n with open(fn, 'w') as csvfile:\n wr = writer(csvfile)\n wr.writerow(map_keys)\n\n for row in range(row_count):\n temp = []\n for col in map_keys:\n temp.append(self.output_map[col][row])\n\n wr.writerow(temp)", "def _write_json_output_to_file(self):\n try:\n with open(self.output_file, 'w') as f:\n f.write(json.dumps(self.__output, f, sort_keys=True, indent=2))\n except IOError:\n logging.error(\"Unable to write to result.out\")\n else:\n logging.info(\"Output written to result.out\")", "def write(self):\n\n d = {} # to contain mappings of term to file cursor value\n with open(self.p_file, \"wb\") as f:\n for word, posting_list in self.dictionary.items():\n cursor = f.tell()\n d[word] = cursor # updating respective (term to file cursor value) mappings\n pickle.dump(posting_list, f, protocol=4)\n\n with open(self.d_file, \"wb\") as f:\n pickle.dump(d, f) # (term to file cursor value) mappings dictionary\n pickle.dump(self.doc_lengths, f) # document lengths regardless of zone/field types\n pickle.dump(self.docid_term_mappings, f) # (doc_id to K most common terms) mappings", "def __write_to_file(output_dir, p_values, nans, fname):\n fname = output_dir + \"/\" + fname\n \n f = open(fname, 'w')\n f.write('name\\tp-val\\tenrinched in\\n')\n p_values.sort()\n \n for tp in p_values:\n pval = (\"%.12f\" % __round_sig(tp[0])).rstrip('0')\n attr_name = str(tp[1])\n enriched_in = str(tp[2])\n f.write(attr_name + \"\\t\" + pval + \"\\t\" + enriched_in + \"\\n\")\n\n for n in nans:\n attr_name = str(n[1])\n f.write(attr_name + \"\\tn/a\\n\")\n\n f.close()", "def WriteDict( d, filename, *fields ):\r\n\tif len( fields ): d = dict( ( k, v ) for k, v in d.items() if k in fields )\r\n\tfile = open( MakeWayFor( filename ), 'wt' )\r\n\tfile.write( '{\\n' )\r\n\tfor k, v in sorted( d.items() ): file.write( '\\t%s : %s,\\n' % ( repr( k ), repr( v ) ) )\r\n\tfile.write( '}\\n' )\r\n\tfile.close()", "def write_to_file(filename, output):\n path = \"../data/\" + \"scored_\" + filename + \".tsv\"\n fObj = open(path, \"w+\")\n fObj.write(output)\n fObj.close()", "def output_file2(in_dict, directory, binned=None, pairs=None, ordered=None):\n import json\n\n if binned == True:\n json = json.dumps(in_dict)\n filename = directory + '/binned_dict2.json'\n with open(filename, 'w') as output:\n output.write(json)\n print('saved binned_dict to %s' %filename)\n \n elif pairs == True:\n json = json.dumps(in_dict)\n filename = directory + '/pairs_list2.json'\n with open(filename, 'w') as output:\n output.write(json)\n print('saved pairs_list to %s' %filename)\n \n elif ordered == True:\n json = json.dumps(in_dict)\n filename = directory + '/ordered_list2.json'\n with open(filename, 'w') as output:\n output.write(json)\n print('saved ordered_list to %s' %filename)\n\n else:\n json = json.dumps(in_dict)\n filename = directory + '/output.json'\n with open(filename, 'w') as output:\n output.write(json)\n print('saved dict to \"output.json\"')", "def write_file(output_name, parsed_xQTL_list):\n with open(output_name, \"w\") as thefile:\n thefile.write(\"metabolite\\tchr\\tpeak_mb\\tinf_mb\\tsup_mb\\tlod\\n\")\n for xQTL in parsed_xQTL_list:\n xQTL = [str(element) for element in xQTL]\n line = \"\\t\".join(xQTL)\n thefile.write(line + \"\\n\")", "def to_file(self, fileout):\n dirout =os.path.split(fileout)[0]\n pathlib.Path(dirout).mkdir(parents=True, exist_ok=True)\n\n jout = {'constraints': {}, 'agents': {}, 'variables': {}}\n for a in self.agents:\n agt = self.agents[a]\n jout['agents'][a] = {'vars': [v.name for v in agt.variables]}\n\n for i, v in enumerate(self.variables):\n var = self.variables[v]\n jout['variables'][v] = {'id': i, 'cons': [c.name for c in var.constraints],\n 'domain': var.domain, 'type': 1, 'value': None,\n 'agent': var.controlled_by.name}\n\n for c in self.constraints:\n con = self.constraints[c]\n jout['constraints'][c] = {'vals': [int(v) for v in con.values.values()],\n 'scope': [v.name for v in con.scope]}\n\n print('Writing dcop instance on file', fileout)\n with open(fileout, 'w') as fp:\n json.dump(jout, fp, sort_keys=True, indent=4)", "def write_output(self, file, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__\n pass", "def write(self, path):\n\n self.find_nodes()\n self.nodes = self.input + self.additional_nodes\n self.build_edges()\n with open(path+\".nodes.tsv\", \"w\") as f:\n f.write(\"\\n\".join(\n [\"id\\tlabel\\ttype\"] + [\n \"{}\\t{}\\t{}\".format(\n str(self.nodes.index(node)), node, str(int(node in self.input))\n ) for node in self.nodes\n ]\n ))\n\n with open(path+\".edges.tsv\", \"w\") as f:\n f.write(\"\\n\".join(\n [\"source\\ttarget\\tweight\"] + [\n \"\\t\".join(edge) for edge in self.edges\n ]\n ))", "def write(self):\n for key, value in self.templates.iteritems():\n template_in, template_out = value\n path = '{0}/{1}'.format(self.dest or '.', template_out)\n audit(\"Writing: {0}\".format(path))\n with open(path, 'w') as f:\n f.write(self.__generate_code(template_in))", "def write_tag_data(data):\n utils.makedirs(os.path.dirname(output_name))\n with open(output_name, 'w+') as fd:\n json.dump(data, fd, sort_keys=True)", "def write_to_files(self,catalog,input):\n\n\n\n metadata = self.metadata({'filename':self.uuid})\n catalog.write(json.dumps(metadata) + \"\\n\")\n text = self.parsed.get_payload().replace(\"\\n\",\"\\\\n\\\\n\").replace(\"\\t\",\" \")\n input.write(metadata['filename'] + \"\\t\" + text.encode(\"utf-8\",\"ignore\") + \"\\n\")", "def to_file(filename, dicts):\n\n with open(filename, \"w\") as f:\n for order, dictionary in dicts:\n f.write(\"%s \" % order)", "def save_output(final_dict, output_dir):\n\n for ID, final_path in final_dict.items():\n out_path = os.path.join(output_dir, os.path.basename(final_path))\n shutil.copyfile(final_path, out_path)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a plot normalizing 1 fiber data to the isosbestic
def plot_1fiber_norm_iso(file_name): # Open file # Check for FileNotFound and Permission Error exceptions try: f = open(file_name, 'r',) except FileNotFoundError: print('No ' + file_name + ' file found') sys.exit(1) except PermissionError: print('Unable to access file ' + file_name) sys.exit(1) # Initialize lists for the fluorophores and time f1GreenIso = [] f1GreenGreen = [] f1GreenTime = [] f1RedIso = [] f1RedRed = [] f1RedTime = [] # Read through each line of the dataframe # Append the isosbectic, fluorophore and time data to their # respective vectors, depending on color header = None for line in f: if header is None: header = line continue A = line.rstrip().split(',') f1GreenIso.append(float(A[0])) f1GreenGreen.append(float(A[2])) f1GreenTime.append(float(A[8])) f1RedIso.append(float(A[3])) f1RedRed.append(float(A[4])) f1RedTime.append(float(A[7])) # Get coefficients for normalized fit regGreen = np.polyfit(f1GreenIso, f1GreenGreen, 1) aGreen = regGreen[0] bGreen = regGreen[1] regRed = np.polyfit(f1RedIso, f1RedRed, 1) aRed = regRed[0] bRed = regRed[1] # Use the coefficients to create a control fit controlFitGreen = [] for value in f1GreenIso: controlFitGreen.append(aGreen * value + bGreen) controlFitRed = [] for value in f1RedIso: controlFitRed.append(aRed * value + bRed) # Normalize the fluorophore data using the control fit normDataGreen = [] for i in range(len(f1GreenGreen)): normDataGreen.append((f1GreenGreen[i] - controlFitGreen[i]) / controlFitGreen[i]) normDataRed = [] for i in range(len(f1RedRed)): normDataRed.append((f1RedRed[i] - controlFitRed[i]) / controlFitRed[i]) # Plot the data for green plt.plot(f1GreenTime, normDataGreen) plt.title('Green Normalized to Isosbestic') # Save the plot in a png file figGreen = plt.savefig('f1GreenNormIso.png') plt.close(figGreen) # Plot the data for red plt.plot(f1RedTime, normDataRed) plt.title('Red Normalized to Isosbestic') # Save the plot in a png file figRed = plt.savefig('f1RedNormIso.png') plt.close(figRed) f.close()
[ "def plot(self):\n norm = self.normal_vector()\n plt.plot(self.x, self.y)\n plt.quiver(self.x, self.y, norm[:, 0], norm[:, 1])\n if self.has_shadows():\n def consecutive(data, stepsize=1):\n return np.split(data, np.where(np.diff(data) != stepsize)[0]+1)\n\n def area_fill(xmin, xmax, options):\n ymin, ymax = plt.gca().get_ylim()\n # print(xmin, xmax, ymin, ymax)\n plt.fill([\n xmin,\n xmin,\n xmax,\n xmax,\n ],\n [\n ymin,\n ymax,\n ymax,\n ymin,\n ],\n options,\n alpha=0.1,\n )\n plt.gca().set_ylim([ymin, ymax])\n\n where = self.get_shadows()\n plt.plot(self.x[where], self.y[where], 'rx')\n clust_shs = consecutive(where)\n clust_fre = consecutive(np.delete(np.arange(len(self.x)), where))\n for cl in clust_shs:\n mi, ma = float(self.x[cl].min()), float(self.x[cl].max())\n area_fill(mi, ma, 'r')\n for cl in clust_fre:\n mi, ma = float(self.x[cl].min()), float(self.x[cl].max())\n area_fill(mi, ma, 'g')", "def munifinance_demographics():\n\n\t### Set plot parameters and style\n\tsb.set(style='ticks')\n\tfig, axes = plt.subplots(figsize=(15,2.5), ncols=3)\n\tfig.subplots_adjust(wspace=0.2)\n\n\t### Initialize path to municipal finance CSV file\n\tmunifinance_fn = 'Fiscal_Vulnerability.csv'\n\tmunifinance_csv_uri = os.path.join(paths.outputs_dir, munifinance_fn)\n\n\t### Read municipal finance CSV to Pandas DataFrame\n\tdf = pd.read_csv(munifinance_csv_uri)\n\n\t### Initialize path to county ACS CSV file\n\tcounty_acs_fn = 'acs_extract.csv'\t\n\tcounty_acs_csv_uri = os.path.join(\n\t\tpaths.data_dir, 'ACS/Counties', county_acs_fn)\n\n\t### Read county ACS CSV to Pandas DataFrame\n\tdf_acs = pd.read_csv(county_acs_csv_uri)\n\n\t### Merge ACS dataframe to municipal finance dataframe\n\tdf = df.merge(df_acs, how='left', left_on='fips', right_on='GEOID')\n\n\tdf['perc_white'] = df['B03002e3'] / df['B03002e1']\n\n\t### Subset dataframe to only include vulnerable municipalities\n\tdf_subset = df[(df['sx']>=np.nanpercentile(df['sx'], 80)) & \n\t\t\t\t (df['sy']>=np.nanpercentile(df['sy'], 80))]\n\n\t### Initialize general args\n\targs = {'stat':'density', 'alpha':0.4, 'fill':True}\n\n\t### Initialize colors\n\targs1 = args | {'color':'grey'}\n\targs2 = args | {'color':'r'}\n\n\t### Initialize number of bins\n\tbins = 50\n\n\t### Initialize column labels\n\tcols = ['B01003e1', 'B19013e1', 'perc_white']\n\n\tprint(len(df_subset))\n\n\t### Iterate through column labels\n\tfor i, col in enumerate(cols):\n\t\t### Set axis\n\t\tax = axes[i]\n\t\t\n\t\t### If column is population size...\n\t\tif col=='B01003e1':\n\t\t\tax.set_xlim(0,100000)\n\t\t\tax.set_xlabel('Population size')\n\n\t\t### If column is household median income...\n\t\tif col=='B19013e1':\n\t\t\tax.set_xlim(0,120000)\n\t\t\tax.set_xlabel('Household median income ($)')\n\n\t\t### If column is percent white...\n\t\tif col=='perc_white':\n\t\t\tax.set_xlim(0,1)\n\t\t\tax.set_xlabel('Percent white (%)')\n\n\t\tdf2 = df[(df[col]>=ax.get_xlim()[0]) & \n\t\t\t\t (df[col]<=ax.get_xlim()[1])]\n\t\tdf_subset2 = df_subset[(df_subset[col]>=ax.get_xlim()[0]) & \n\t\t\t\t\t\t\t (df_subset[col]<=ax.get_xlim()[1])]\n\n\t\tdf2 = df2[~df2['fips'].isin(df_subset2['fips'])]\n\n\t\t### Plot histograms\n\t\tsb.histplot(data=df2[col], ax=ax, bins=bins, \n\t\t\tbinrange=ax.get_xlim(), **args1)\n\t\tsb.histplot(data=df_subset2[col], ax=ax, bins=bins, \n\t\t\tbinrange=ax.get_xlim(), **args2)\n\n\t\t### Plot vertical lines for median values\n\t\tvaxline_args = {'linestyle':'--', 'zorder':10}\n\t\tax.axvline(df2[col].median(), color='k', alpha=0.8, **vaxline_args)\n\t\tax.axvline(df_subset2[col].median(), color='r', **vaxline_args)\n\n\t\t### Hide y-axis ticks, y-axis label, and top, left, and right spines\n\t\tax.set_yticks([])\n\t\tax.set_ylabel('')\n\n\t\tfor s in ['top', 'left', 'right']:\n\t\t\tax.spines[s].set_visible(False)\n\n\t### Save figure\n\tfn = 'munifinance_demographics.png'\n\turi = os.path.join(paths.figures_dir, fn)\n\tplt.savefig(uri, bbox_inches='tight', dpi=600)\n\tplt.savefig(uri.replace('png', 'pdf'), bbox_inches='tight')\n\n\t### Open figure\n\ttime.sleep(0.5)\n\tsubprocess.run(['open', uri])\n\n\treturn None", "def grafFourier(Sas , x , nfr , Nfig):\n#\n plt.figure(Nfig)\n plt.plot(x,Sas)\n plt.grid()\n plt.xlabel('Frecuencia (Hz)')\n plt.ylabel('Amplitud')\n#\n return", "def plot_inverse_fourier_transform(fs, wave, time, title):\n plt.figure(num=title+\" - \"+filename[:-4], figsize=(8, 5))\n plt.plot(time, wave, color=\"blue\", label=\"ifft(t)\")\n plt.legend(loc=1)\n plt.xlim(time[0], time[-1])\n plt.xlabel('Time (s)')\n plt.ylabel('ifft(t)')\n plt.title(title)", "def visualize_1d(a):\n true_v=utils.read_flow_field(os.path.join('data/velocity', str(a)))\n ff=utils.read_mag_field(os.path.join('data/magnitude', str(a)))\n ff=np.array(ff)\n true_v=np.array(true_v)\n size=ff.shape[0]\n x,y = np.meshgrid(np.array([i for i in range(25)]), np.array([i for i in range(24,-1,-1)]))\n z = ff \n plt.subplot(1,2,1)\n plt.contourf(x,y,z,10, alpha=.75, cmap='jet')\n plt.colorbar()\n plt.title(\"Magnitude Map\")\n\n x, y = np.meshgrid(np.arange(25),np.arange(24,-1,-1))\n u = true_v[:,:,0]\n v = true_v[:,:,1]\n plt.subplot(1,2,2)\n plt.quiver(x,y,u,v, scale=300)\n plt.title(\"Velocity Map\")\n plt.show()", "def _make_normal_plot(weights):\n _17_rankit._make_normal_plot(weights,\n root='nsfg_birthwgt_normal',\n ylabel='Birth weights (oz)', )", "def visualize(self):\n plt.show()", "def plot_intensity(self, freq = 1000):\n id_f = np.where(self.controls.freq <= freq)\n id_f = id_f[0][-1]\n # Intensities\n Ix = 0.5*np.real(self.pres_s[0][:,id_f] *\\\n np.conjugate(self.ux_s[0][:,id_f]))\n Iy = 0.5*np.real(self.pres_s[0][:,id_f] *\\\n np.conjugate(self.uy_s[0][:,id_f]))\n Iz = 0.5*np.real(self.pres_s[0][:,id_f] *\\\n np.conjugate(self.uz_s[0][:,id_f]))\n I = np.sqrt(Ix**2+Iy**2+Iz**2)\n # # Figure\n fig = plt.figure() #figsize=(8, 8)\n fig.canvas.set_window_title('Intensity distribution map')\n cmap = 'viridis'\n plt.title('Reference Intensity (BEM sim)')\n # if streamlines:\n # q = plt.streamplot(self.receivers.coord[:,0], self.receivers.coord[:,2],\n # Ix/I, Iz/I, color=I, linewidth=2, cmap=cmap)\n # fig.colorbar(q.lines)\n # else:\n q = plt.quiver(self.receivers.coord[:,0], self.receivers.coord[:,2],\n Ix/I, Iz/I, I, cmap = cmap, width = 0.010)\n #fig.colorbar(q)\n plt.xlabel(r'$x$ [m]')\n plt.ylabel(r'$z$ [m]')\n return plt\n # Figure\n # fig = plt.figure() #figsize=(8, 8)\n # ax = fig.gca(projection='3d')\n # cmap = 'seismic'\n # # fig = plt.figure()\n # # fig.canvas.set_window_title('Intensity distribution map')\n # plt.title('|I|')\n # q = ax.quiver(self.receivers.coord[:,0], self.receivers.coord[:,1],\n # self.receivers.coord[:,2], Ix, Iy, Iz,\n # cmap = cmap, length=0.01, normalize=True)\n # c = I\n # c = getattr(plt.cm, cmap)(c)\n # # fig.colorbar(p)\n # fig.colorbar(q)\n # q.set_edgecolor(c)\n # q.set_facecolor(c)\n # plt.xlabel(r'$x$ [m]')\n # plt.ylabel(r'$z$ [m]')", "def normalization_plot(segments, segment_n, suptitle='Figure'):\n old_font_size = matplotlib.rcParams['font.size']\n matplotlib.rc('font', **{ 'size': 20 })\n fig, axs = plt.subplots(1,3, figsize=(30,10))\n fig.suptitle(suptitle)\n axs[0].set_title('')\n\n segment = segments[segment_n].reset_index()\n\n bell_curve = np.sin(np.linspace(0, np.pi, len(segments[segment_n]))) ** 2\n axs[0].plot(bell_curve)\n axs[0].set_title('Bell curve')\n\n axs[1].plot(segment.Values)\n axs[1].set_title('Segment')\n\n axs[2].plot(bell_curve * segment.Values)\n axs[2].set_title('normalized segment')\n\n plt.tight_layout(rect=[0, 0.03, 1, 0.95])\n plt.savefig('output/k-means/normalization.pdf', bbox_inches='tight')\n plt.show()\n matplotlib.rc('font', **{ 'size': old_font_size })", "def make_streamplot(f, xmin=-3, xmax=3, ymin=-3,ymax=3, xres=20, yres=20, plot_shape=True, plot_io_pattern=False, quiver_plot=False, use_convolution_method=True): # todo implement the remove fore field shape as a flag\n if use_convolution_method:\n X,Y,Ux,Uy=compute_full_velocity_field_conv(f, xmin, xmax, ymin, ymax, xres, yres)\n else:\n X,Y,Ux,Uy=compute_full_velocity_field(f, xmin, xmax, ymin, ymax, xres, yres)\n if abs(Ux).sum()+abs(Uy).sum()<=0:\n print(f\"Warning: There seems to be an issue with this grid. I'm not plotting anything.\\n(xmin={ xmin } ,xmax={ xmax } ,ymin={ ymin } ,ymax={ ymax } ,xres={ xres } ,yres={ yres })\")\n else:\n if plot_io_pattern:\n fig=plt.figure(figsize=(11,9))\n else:\n fig=plt.figure(figsize=(9,9))\n if plot_shape:\n dX=(X[0,1]-X[0,0])/2\n dY=(Y[1,0]-Y[0,0])/2\n sX = X+dX # shifted X, for plotting purposes\n sY = Y+dY # shifted Y, for plotting purposes\n ind=get_domain(f, sX,sY)\n plt.pcolormesh(X,Y,ind, cmap='Greys', alpha=.5, edgecolor='none')\n if plot_io_pattern:\n io = get_inflow_matrix(X,Y,Ux,Uy)\n im = plt.pcolormesh(X,Y,io, cmap='bwr', alpha=.6, vmin=-1, vmax=1)\n fig.colorbar(im)\n if quiver_plot:\n plt.quiver(X,Y,Ux,Uy)\n else:\n plt.streamplot(X,Y,Ux,Uy)\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n return fig, X,Y, Ux,Uy", "def vizualization():\n X = np.array(pandas.read_csv(\"dbscan-paintedData.csv\", sep=\"\\t\"))\n plt.figure()\n plt.subplot(2, 1, 1)\n for k in [1, 3, 15]:\n dists = k_dist(X, k=k)\n plt.plot(dists, label=\"k=%d\" % k)\n plt.legend()\n plt.xlabel(\"i-ti primer\")\n plt.ylabel(\"razdalja\")\n # plt.show()\n plt.subplot(2, 1, 2)\n dbs = DBSCAN(3, 0.07)\n clusters = dbs.fit_predict(X)\n classes = np.unique(clusters)\n for cls in classes:\n mask = clusters == cls\n plt.scatter(X[mask, 0], X[mask, 1], 10, label=\"Noise\" if cls == -1 else cls)\n plt.legend()\n plt.show()", "def plot_normalized(mat, i):\n col = mat.getcol(i)\n col_nonz = col[col.nonzero()[0]]\n game_line = np.array(col_nonz.todense())\n plt.hist(game_line)\n plt.show()", "def plotBaraffe():\n root = '/u/jlu/work/gc/stellar_models/B98_a1_'\n\n ages = [4.0, 6.0, 8.0]\n\n # Distance modulus\n dist = 8000.0 # pc\n distMod = 5.0 * pylab.log10(dist / 10.0)\n\n # Extinction\n AV = 25.0\n AH = 0.175 * AV # Rieke & Lebofsky 1985\n AK = 0.112 * AV # Rieke & Lebofsky 1985\n AL = 0.058 * AV # Rieke & Lebofsky 1985\n AM = 0.058 * AV # Viehmann et al. 2005\n\n masses = []\n hmags = []\n kmags = []\n lmags = []\n mmags = []\n for age in ages:\n filename = '%s%dmyr.models' % (root, age)\n table = asciidata.open(filename)\n\n # Masses\n mass = table[0].tonumarray()\n masses.append(mass)\n\n # Intrinsic Magnitudes\n hmag = table[9].tonumarray()\n kmag = table[10].tonumarray()\n lmag = table[11].tonumarray()\n mmag = table[12].tonumarray()\n\n # Switch to apparent magnitudes\n hmag += distMod + AH\n kmag += distMod + AK\n lmag += distMod + AL\n mmag += distMod + AM\n\n hmags.append(hmag)\n kmags.append(kmag)\n lmags.append(lmag)\n mmags.append(mmag)\n\n\n #----------\n #\n # Plotting\n #\n #----------\n pylab.clf()\n pylab.plot(kmags[1]-mmags[1], kmags[1])\n pylab.plot(kmags[0]-mmags[0], kmags[0], 'k--')\n pylab.plot(kmags[2]-mmags[2], kmags[2], 'r--')\n pylab.axis([-1, 4, 28, 8])", "def plot_ngon(self):\n\t\tplt.scatter(*zip(*self.c))\n\t\tplt.axis('equal')\n\t\tplt.axis('off')\n\t\tmarker='.'\n\t\tplt.show()", "def plot_neutral_graph():\n name = [] # name\n friendliness = [] # friendliness\n dominance = [] # dominance\n\n for row in PersonDao.get_all():\n name.append(row[0])\n friendliness.append(row[1])\n dominance.append(row[2])\n\n fig, ax = plt.subplots()\n ax.scatter(friendliness, dominance)\n\n # set the graph to display only (-10,10)\n # since this is the maximum range of personalities\n # that we allow in our model of traits\n ax.set_xlim([-10, 10])\n ax.set_ylim([-10, 10])\n\n # set the axis tick labels to be integers only\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n\n # draw lines depicting the friendliness and dominance axes\n plt.axhline(0, color='grey')\n plt.axvline(0, color='grey')\n\n for i, txt in enumerate(name):\n ax.annotate(txt, (friendliness[i], dominance[i]))\n\n plt.xlabel('friendliness')\n plt.ylabel('dominance')\n\n plt.show()", "def _plot_fitness_v_fscore(self):\n\n plt.clf()\n fig, ax = plt.subplots()\n sns.set(rc={\"figure.figsize\": (11.7, 8.27)})\n\n cmap = sns.cubehelix_palette(dark=0.3, light=0.8, as_cmap=True)\n sns.scatterplot(\n x=\"f_score\",\n y=\"elo_rating\",\n # hue='generation',\n # palette = cmap,\n label=\"Rating\",\n data=self.fitness_by_f_score,\n ax=ax,\n )\n\n sns.scatterplot(\n x=\"f_score\",\n y=\"win_ratio\",\n # hue='generation',\n # palette = cmap,\n label=\"Win ratio\",\n data=self.fitness_by_f_score,\n ax=ax,\n )\n\n ax.legend(loc=\"lower right\")\n ax.set_xlabel(\"F score\")\n ax.set_ylabel(\"Fitness (as probability)\")\n # bplot.set_ylim((0,1))\n ax.set_xlim((-0.05, 1.05))\n save_to_file = os.path.join(\n self.save_directory, \"fitness_v_fscore.png\".format(self.qmla_id)\n )\n\n ax.figure.savefig(save_to_file)", "def plot_grismFoV(basename,edgecut=0.0,markbeams=[None],cmap='viridis',verbose=True):\n outfile = basename+'grismFoV.pdf'\n if verbose: print(' - Generating full-FoV figure of NIRCam grisms. Storing in\\n '+outfile)\n\n grismFLTs_F277W = glob.glob(basename+'F277W*.GrismFLT.fits')\n grismFLTs_F356W = glob.glob(basename+'F356W*.GrismFLT.fits')\n grismFLTs_F444W = glob.glob(basename+'F444W*.GrismFLT.fits')\n\n fig = plt.figure(figsize=[12,4])\n Fsize = 10.0\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif',size=Fsize)\n plt.rc('xtick', labelsize=Fsize)\n plt.rc('ytick', labelsize=Fsize)\n plt.clf()\n plt.ioff()\n\n\n datalist = [afits.open(grismFLTs_F277W[0])['DREF'].data[0+edgecut:-1*edgecut,0+edgecut:-1*edgecut]*5e19,\n afits.open(grismFLTs_F277W[0])['GSCI'].data[0+edgecut:-1*edgecut,0+edgecut:-1*edgecut],\n afits.open(grismFLTs_F356W[0])['DREF'].data[0+edgecut:-1*edgecut,0+edgecut:-1*edgecut]*5e19,\n afits.open(grismFLTs_F356W[0])['GSCI'].data[0+edgecut:-1*edgecut,0+edgecut:-1*edgecut],\n afits.open(grismFLTs_F444W[0])['DREF'].data[0+edgecut:-1*edgecut,0+edgecut:-1*edgecut]*5e19,\n afits.open(grismFLTs_F444W[0])['GSCI'].data[0+edgecut:-1*edgecut,0+edgecut:-1*edgecut],\n afits.open(grismFLTs_F277W[1])['DREF'].data[0+edgecut:-1*edgecut,0+edgecut:-1*edgecut]*5e19,\n afits.open(grismFLTs_F277W[1])['GSCI'].data[0+edgecut:-1*edgecut,0+edgecut:-1*edgecut],\n afits.open(grismFLTs_F356W[1])['DREF'].data[0+edgecut:-1*edgecut,0+edgecut:-1*edgecut]*5e19,\n afits.open(grismFLTs_F356W[1])['GSCI'].data[0+edgecut:-1*edgecut,0+edgecut:-1*edgecut],\n afits.open(grismFLTs_F444W[1])['DREF'].data[0+edgecut:-1*edgecut,0+edgecut:-1*edgecut]*5e19,\n afits.open(grismFLTs_F444W[1])['GSCI'].data[0+edgecut:-1*edgecut,0+edgecut:-1*edgecut]]\n\n namelist = [grismFLTs_F277W[0]+'DREF',\n grismFLTs_F277W[0]+'GSCI',\n grismFLTs_F356W[0]+'DREF',\n grismFLTs_F356W[0]+'GSCI',\n grismFLTs_F444W[0]+'DREF',\n grismFLTs_F444W[0]+'GSCI',\n grismFLTs_F277W[1]+'DREF',\n grismFLTs_F277W[1]+'GSCI',\n grismFLTs_F356W[1]+'DREF',\n grismFLTs_F356W[1]+'GSCI',\n grismFLTs_F444W[1]+'DREF',\n grismFLTs_F444W[1]+'GSCI']\n\n\n beamSCIheaders = []\n coordlist = []\n for beamfile in markbeams:\n if beamfile is not None:\n bhdu = afits.open(beamfile)\n for hdu in bhdu[1:]:\n if (hdu.header['EXTNAME'] == 'SCI') or (hdu.header['EXTNAME'] == 'REF'):\n beamSCIheaders.append(hdu.header)\n coordlist.append([afits.open(beamfile)[0].header['RA'],afits.open(beamfile)[0].header['DEC']])\n\n for ii, datashow in enumerate(datalist):\n ax = fig.add_subplot(2,6,ii+1)\n # goodpix = datashow[np.isfinite(datashow) & (datashow!=0.0)]\n # pixsort = np.sort(goodpix)\n ax.imshow(datashow, vmin=-0.01, vmax=0.05, cmap=cmap, origin='lower', aspect='auto')\n\n for bb, beamhdr in enumerate(beamSCIheaders):\n if (beamhdr['PARENT'].split('_flt.fits')[0] in namelist[ii]):\n if namelist[ii].endswith('REF') & (beamhdr['EXTNAME'] == 'REF'):\n xpix, ypix = beamhdr['ORIGINX']-edgecut, beamhdr['ORIGINY']-edgecut\n width = beamhdr['NAXIS1']\n height = beamhdr['NAXIS2']\n\n radius = 5\n objpatch = patch.Circle((xpix+width/2.,ypix+height/2.),radius,color='red',fill=None)\n ax.add_patch(objpatch)\n\n objpatch = patch.Rectangle((xpix,ypix), width, height, color='red', linewidth=1.0, linestyle='-', fill=None)\n ax.add_patch(objpatch)\n\n skipWCSpatch = True\n if not skipWCSpatch:\n # Base patch location on WCS and coordinates instead of header info\n wcs_GrismFLT = wcs.WCS(afits.open(namelist[ii][:-4])[namelist[ii][-4:]].header)\n coords = SkyCoord(coordlist[bb][0],coordlist[bb][1], unit=\"deg\")\n pixcoord = wcs.utils.skycoord_to_pixel(coords,wcs_GrismFLT,origin=1)\n xpix, ypix = pixcoord[0]-edgecut, pixcoord[1]-edgecut\n radius = 30\n objpatch = patch.Circle((xpix,ypix),radius,color='white',fill=None)\n ax.add_patch(objpatch)\n\n if namelist[ii].endswith('SCI') & (beamhdr['EXTNAME'] == 'SCI'):\n # xpix, ypix = beamhdr['CRPIX1']-edgecut, beamhdr['CRPIX2']-edgecut\n xpix, ypix = beamhdr['ORIGINX']-edgecut, beamhdr['ORIGINY']-edgecut\n width = beamhdr['NAXIS1']\n height = beamhdr['NAXIS2']\n objpatch = patch.Rectangle((xpix,ypix), width, height, color='red', linewidth=1.0, linestyle='-', fill=None)\n ax.add_patch(objpatch)\n\n ax.set_xticks([]); ax.set_yticks([])\n ax.set_xticklabels([]); ax.set_yticklabels([])\n\n fig.axes[0].set_title('F277W REF IMG')\n fig.axes[2].set_title('F356W REF IMG')\n fig.axes[4].set_title('F444W REF IMG')\n\n fig.axes[1].set_title('F277W GRISM')\n fig.axes[3].set_title('F356W GRISM')\n fig.axes[5].set_title('F444W GRISM')\n\n fig.axes[0].set_ylabel('GRISM R')\n fig.axes[6].set_ylabel('GRISM C')\n\n fig.tight_layout(pad=0.1)\n plt.savefig(outfile)\n plt.clf()\n plt.close('all')", "def plot_example2():\n\n matplotlib.rcParams['text.usetex'] = True\n matplotlib.rcParams['text.latex.unicode'] = True\n\n # load control data\n File = os.getcwd() + '/npyfiles/ex2_data.npy' \n control_setup1a = np.load(File, encoding='latin1')[()]['control_setup1a']\n control_setup1b = np.load(File, encoding='latin1')[()]['control_setup1b']\n control_setup2a = np.load(File, encoding='latin1')[()]['control_setup2a']\n control_setup2b = np.load(File, encoding='latin1')[()]['control_setup2b']\n\n # pre-allocate difference of controls\n control_diff_setup1 = control_setup1a - control_setup1b\n control_diff_setup2 = control_setup2a - control_setup2b\n\n # load mesh and set-up parameters\n mesh = fsi.Set_Mesh_Attributes('mesh.npy')\n prm = fsi.Set_Parameters(T=2.0, r=1.0, mu=2.0, eps=0.1, tau=0.0025,\n gf=1.0, gs1=1.0, gs2=1.0, gs3=0.01, a=1e-6)\n tmesh = fsi.Set_Temporal_Grid(prm)\n Mat = fsi.Set_Matrices(mesh, prm)\n\n # pre-allocation\n norm2_control_diff_setup1 = np.zeros((tmesh.NumNode-1,), dtype=np.float)\n norm2_control_diff_setup2 = np.zeros((tmesh.NumNode-1,), dtype=np.float)\n ncds_1f = np.zeros((tmesh.NumNode-1,), dtype=np.float)\n ncds_1s = np.zeros((tmesh.NumNode-1,), dtype=np.float)\n ncds_2f = np.zeros((tmesh.NumNode-1,), dtype=np.float)\n ncds_2s = np.zeros((tmesh.NumNode-1,), dtype=np.float)\n\n # compute L2-norms of difference in controls\n for i in range(tmesh.NumNode-1):\n vecx = control_diff_setup1[:mesh.dof, i]\n vecy = control_diff_setup1[mesh.dof:, i]\n norm2_control_diff_setup1[i] = \\\n np.dot(vecx, Mat.M * vecx) + np.dot(vecy, Mat.M * vecy)\n vecx = control_diff_setup1[mesh.IndexFluid, i]\n vecy = control_diff_setup1[mesh.dof + mesh.IndexFluid, i]\n ncds_1f[i] = (np.dot(vecx, Mat.Mf_block * vecx) \n + np.dot(vecy, Mat.Mf_block * vecy))\n vecx = control_diff_setup1[mesh.NodeSolidIndex, i]\n vecy = control_diff_setup1[mesh.dof + mesh.NodeSolidIndex, i]\n ncds_1s[i] = (np.dot(vecx, Mat.Ms_block * vecx) \n + np.dot(vecy, Mat.Ms_block * vecy))\n\n vecx = control_diff_setup2[:mesh.dof, i]\n vecy = control_diff_setup2[mesh.dof:, i]\n norm2_control_diff_setup2[i] = \\\n np.dot(vecx, Mat.M * vecx) + np.dot(vecy, Mat.M * vecy)\n vecx = control_diff_setup2[mesh.IndexFluid, i]\n vecy = control_diff_setup2[mesh.dof + mesh.IndexFluid, i]\n ncds_2f[i] = (np.dot(vecx, Mat.Mf_block * vecx) \n + np.dot(vecy, Mat.Mf_block * vecy))\n vecx = control_diff_setup2[mesh.NodeSolidIndex, i]\n vecy = control_diff_setup2[mesh.dof + mesh.NodeSolidIndex, i]\n ncds_2s[i] = (np.dot(vecx, Mat.Ms_block * vecx) \n + np.dot(vecy, Mat.Ms_block * vecy))\n\n # create and save figure \n fig = plt.figure(figsize=(9,6))\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212)\n time_grid = tmesh.Grid[1:len(tmesh.Grid)-1]\n ax1.plot(time_grid, norm2_control_diff_setup1[:len(time_grid)],\n linestyle='-', color='black',\n label=r'$\\|q_a(t) - q_b(t)\\|_{\\Omega}^2$')\n ax1.plot(time_grid, ncds_1f[:len(time_grid)],\n linestyle='-.', color='blue',\n label=r'$\\|q_a(t) - q_b(t)\\|_{\\Omega_{fh}}^2$')\n ax1.plot(time_grid, ncds_1s[:len(time_grid)],\n linestyle='--', color='red',\n label=r'$\\|q_a(t) - q_b(t)\\|_{\\Omega_{sh}}^2$')\n ax2.plot(time_grid, norm2_control_diff_setup2[:len(time_grid)],\n linestyle='-', color='black',\n label=r'$\\|q_a(t) - q_b(t)\\|_{\\Omega}^2$')\n ax2.plot(time_grid, ncds_2f[:len(time_grid)],\n linestyle='-.', color='blue',\n label=r'$\\|q_a(t) - q_b(t)\\|_{\\Omega_{fh}}^2$')\n ax2.plot(time_grid, ncds_2s[:len(time_grid)],\n linestyle='--', color='red',\n label=r'$\\|q_a(t) - q_b(t)\\|_{\\Omega_{sh}}^2$')\n ax1.set_title(r'$\\alpha = 10^{-3}$', fontsize=15)\n ax2.set_title(r'$\\alpha = 10^{-6}$', fontsize=15)\n ax1.set_xlim(0, 2)\n ax1.set_ylim(0, 0.2)\n ax2.set_xlim(0, 2)\n ax2.set_ylim(0, 7)\n ax1.legend(loc='best', fontsize=14)\n ax2.legend(loc='best', fontsize=14)\n plt.subplots_adjust(left=0.12, bottom=0.11, right=0.90,\n top=0.90, wspace=0.20, hspace=0.60)\n FileName = os.getcwd() + '/figfiles/ex2.eps'\n fig.savefig(FileName, format='eps', dpi=900, bbox_inches='tight')", "def plot_transformation(args):\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n\n MEDIUM_SIZE = 18\n BIGGER_SIZE = 24\n\n plt.rc('text', usetex=True) # controls default text sizes\n plt.rc('font', size=BIGGER_SIZE) # controls default text sizes\n plt.rc('font', family=\"serif\") # controls default text sizes\n plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize\n\n model: GraphObjectIDFeaturizerEmbedder = cast(GraphObjectIDFeaturizerEmbedder,\n GraphEmbedder.from_file(args.model_file))\n\n inputs_tensor = model.get_featurizer_graph_embedder().retrieve_nodes(\n model.graph_dataset.n_nodes()\n )\n inputs = inputs_tensor.detach().numpy()\n\n if args.input:\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(111)\n draw_wireframe(ax, inputs)\n plot_input(ax, model, inputs)\n fig.tight_layout()\n\n if args.input_path:\n fig.savefig(args.input_path)\n else:\n fig.show()\n input(\"Press any key to exit.\")\n if args.output:\n output_tensor = model.retrieve_nodes(model.graph_dataset.n_nodes())\n outputs = output_tensor.detach().numpy()\n\n outputs = project_to_ambient(model.out_manifold, outputs)\n fig = plt.figure(figsize=(8, 8))\n if outputs.shape[-1] == 2:\n ax = fig.add_subplot(111)\n else:\n assert outputs.shape[-1] == 3\n ax = fig.add_subplot(111, projection='3d')\n\n draw_manifold_wireframe(ax, model.out_manifold)\n draw_wireframe(ax, inputs, model.model)\n plot_output(ax, model.graph_dataset, model.out_manifold, inputs, outputs)\n\n fig.tight_layout()\n if args.output_path:\n fig.savefig(args.output_path)\n else:\n fig.show()\n input(\"Press any key to exit.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists all service accounts for the current project.
def list(self): sa = ( self.resource.projects() .serviceAccounts() .list(name="projects/" + self.project_id) .execute() ) msg = "\n".join([f"{_['email']}" for _ in sa["accounts"]]) return f"```{msg}```"
[ "def list_accounts(ctx):\n accounts = ctx.obj['app'].services.accounts\n if len(accounts) == 0:\n click.echo('no accounts found')\n else:\n fmt = '{i:>4} {address:<40} {id:<36} {locked:<1}'\n click.echo(' {address:<40} {id:<36} {locked}'.format(address='Address (if known)',\n id='Id (if any)',\n locked='Locked'))\n for i, account in enumerate(accounts):\n click.echo(fmt.format(i='#' + str(i + 1),\n address=encode_hex(account.address or ''),\n id=account.uuid or '',\n locked='yes' if account.locked else 'no'))", "def service_accounts(ctx, *args, **kwargs):\n admin_check(ctx.obj[\"user_id\"])\n ctx.obj[\"sa_actions\"] = ServiceAccountActions(ctx.obj[\"project\"])\n return ctx.obj[\"sa_actions\"].list()", "def list_service_account(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_service_account\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v1/serviceaccounts'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1ServiceAccountList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def listAccounts(cls, api_client, **kwargs):\n\n cmd = {}\n # cmd.update(kwargs)\n if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():\n cmd['listall'] = True\n return api_client.listProjectAccounts(**cmd)", "def list_accounts(self):\n accounts = get_accounts()\n print(\"Accounts\")\n print (\"-\"* 15)\n for account in accounts[\"accounts\"]:\n print(account)", "def list_accounts(self):\n accounts = self.analytics.management().accounts().list().execute()\n return accounts, accounts.get('items', [])", "def list_google_accounts(self):\n return self._get(route='GoogleCloudAccount')", "def service_account(self) -> 'outputs.ServiceAccountResponse':\n return pulumi.get(self, \"service_account\")", "def test_list_accounts(self):\n\n r = self.client.list_accounts(include=None)", "def service_account():\n # This name should be same as SERVICE_NAME as it determines scheduler DCOS_LABEL value.\n name = config.SERVICE_NAME\n sdk_security.create_service_account(\n service_account_name=name, service_account_secret=name)\n # TODO(mh): Fine grained permissions needs to be addressed in DCOS-16475\n sdk_cmd.run_cli(\n \"security org groups add_user superusers {name}\".format(name=name))\n yield name\n sdk_security.delete_service_account(\n service_account_name=name, service_account_secret=name)", "def get_accounts(self) -> List[list]:\n\n accounts = []\n for account in self.accounts.values():\n accounts.append([account.name] + account.credentials)\n\n return accounts", "def list_accounts(*args):\n for account, owner in bank.get_all_accounts():\n print(str(account), \"/ Owner:\", owner.name)", "def services():\n dbSession = current_app.config['DBSESSION'] # get the db session\n # get the list of services from the db\n serv = dbSession.query(Service).all()\n return render_template('org/services.html', services=serv)", "def list(cls, api_client, **kwargs):\n\n cmd = {}\n cmd.update(kwargs)\n if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():\n cmd['listall'] = True\n return api_client.listNetworkServiceProviders(**cmd)", "def watch_service_account_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_service_account_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v1/watch/serviceaccounts'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_accounts():\n return AccountModel.query.all()", "def send_accounts(self) -> None:\n\n msg = \"account list\\r\\n\"\n logger.debug(\"sending account list message: %s\", msg)\n self._send(msg)", "def services_all(ctx):\n ctx.run(KUBERNETES_GET_SERVICES_ALL_CMD)", "def display_all_credential():\n return Credentials.display_all_credential()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Changes a service account's display name.
def rename(self, email, new_display_name): resource_name = f"projects/-/serviceAccounts/{email}" account = ( self.resource.projects().serviceAccounts().get(name=resource_name).execute() ) old_display_name = account["displayName"] account["displayName"] = new_display_name account = ( self.resource.projects() .serviceAccounts() .update(name=resource_name, body=account) .execute() ) msg = f"Updated display name of `{account['email']}`" msg = f"{msg} from `{old_display_name}` to `{account['displayName']}`" return msg
[ "def set_user_display_name(self, value: str) -> None:\n if value is None:\n raise ValueError('Administrator full name is invalid')\n self._settings[USER_DISPLAY_NAME_KEY].set_value(value)", "async def set_displayname(\n self,\n target_user: UserID,\n requester: Requester,\n new_displayname: str,\n by_admin: bool = False,\n deactivation: bool = False,\n ) -> None:\n if not self.hs.is_mine(target_user):\n raise SynapseError(400, \"User is not hosted on this homeserver\")\n\n if not by_admin and target_user != requester.user:\n raise AuthError(400, \"Cannot set another user's displayname\")\n\n if not by_admin and not self.hs.config.registration.enable_set_displayname:\n profile = await self.store.get_profileinfo(target_user)\n if profile.display_name:\n raise SynapseError(\n 400,\n \"Changing display name is disabled on this server\",\n Codes.FORBIDDEN,\n )\n\n if not isinstance(new_displayname, str):\n raise SynapseError(\n 400, \"'displayname' must be a string\", errcode=Codes.INVALID_PARAM\n )\n\n if len(new_displayname) > MAX_DISPLAYNAME_LEN:\n raise SynapseError(\n 400, \"Displayname is too long (max %i)\" % (MAX_DISPLAYNAME_LEN,)\n )\n\n displayname_to_set: Optional[str] = new_displayname.strip()\n if new_displayname == \"\":\n displayname_to_set = None\n\n # If the admin changes the display name of a user, the requesting user cannot send\n # the join event to update the display name in the rooms.\n # This must be done by the target user themselves.\n if by_admin:\n requester = create_requester(\n target_user,\n authenticated_entity=requester.authenticated_entity,\n )\n\n await self.store.set_profile_displayname(target_user, displayname_to_set)\n\n profile = await self.store.get_profileinfo(target_user)\n await self.user_directory_handler.handle_local_profile_change(\n target_user.to_string(), profile\n )\n\n await self._third_party_rules.on_profile_update(\n target_user.to_string(), profile, by_admin, deactivation\n )\n\n await self._update_join_states(requester, target_user)", "def change_name(self):\n if self.user_can_update_information():\n old_firstname = self.user.firstname\n old_surname = self.user.surname\n self.user.firstname = input(\"What is your firstname?\\n\")\n self.user.surname = input(\"What is your lastname?\\n\")\n update_user(self.user)\n print_message(f\"The name '{old_firstname} {old_surname}' has been updated to \"\n f\"'{self.user.firstname}' {self.user.surname}'\")\n else:\n print_error(\"Password is incorrect. Cannot update name.\")", "def autoname(self):\n\t\tif not self.email_account_name:\n\t\t\tself.email_account_name = (\n\t\t\t\tself.email_id.split(\"@\", 1)[0].replace(\"_\", \" \").replace(\".\", \" \").replace(\"-\", \" \").title()\n\t\t\t)\n\n\t\tself.name = self.email_account_name", "def set_name(self, new_name):\n self.name = new_name", "def changeName(self, uid, acc_num, account_name):\n with open('model/account_model.json', 'r+') as json_file:\n data = json.load(json_file)\n for index, account in enumerate(data):\n if (account['uid'] == uid) and (account['acc_num'] == acc_num):\n data[index]['acc_name'] = str(account_name)\n json_file.seek(0)\n json.dump(data, json_file, indent=4)\n return True", "def _set_display_name(ctx):\n ctx.ext.display_name = None\n if hasattr(ctx.doc, \"short_name\") and ctx.doc.short_name:\n ctx.ext.display_name = ctx.doc.short_name", "def Edit_Contact_Name(self, index, name):\n self.__contactList[index].Set_Name(name)", "def setDisplayName( self, name ):\r\n\t\tself._nativePointer.setname('')\r\n\t\tself._nativePointer.setname( str(name) )\r\n\t\t# Reset the metadata and requery it from the scene file. That will\r\n\t\t# force an update of the name value and ensure we don't create a\r\n\t\t# disconnect between the metadata entry and the newly-renamed layer.\r\n\t\tself._metaData = None\r\n\t\tself.metaData()\r\n\t\treturn", "def change_first_name(self, name):\n\n if not fullmatch(self.__MATCH_NAME, name):\n raise InvalidCustomerNameException(name)\n\n self.first_name = name", "def change_screen_name(\n user: User,\n new_screen_name: str,\n initiator: User,\n *,\n reason: str | None = None,\n) -> UserScreenNameChangedEvent:\n event, log_entry = user_domain_service.change_screen_name(\n user, new_screen_name, initiator, reason=reason\n )\n\n _persist_screen_name_change(event, log_entry)\n\n return event", "async def cname(self, ctx, channel: typing.Union[discord.TextChannel, discord.VoiceChannel, discord.CategoryChannel, None], *, name):\n\t\tchan = channel or ctx.channel\n\t\toldname = chan.name\n\t\tawait chan.edit(name=name, reason=f\"Name changed by {ctx.author} ({ctx.author.id}).\")\n\t\tawait ctx.send(f\"Changed <#{chan.id}>'s name!\\nBefore: `#{oldname}`\\nAfter: `#{chan.name}`\")", "def reformat_contacts_name(self):\n partner_ids = self.env[\"res.partner\"].search([\n \"&\",\n \"&\",\n\n (\"is_company\", \"!=\", \"False\"),\n (\"is_family\", \"!=\", \"False\"),\n\n \"|\",\n \"|\",\n\n (\"first_name\", \"!=\", \"False\"),\n (\"middle_name\", \"!=\", \"False\"),\n (\"last_name\", \"!=\", \"False\"),\n ])\n\n partner_ids.auto_format_name()", "def service_account_resource_name(self, account) -> str:\n return f\"projects/{self.project}/serviceAccounts/{account}\"", "def last_user_name(self, value):\n self._last_user_name = value", "def set_user_name(self, user_name): \n self.user_name = user_name", "def set_realname(self, name):\n\n self.realname = name\n\n self.server_mem.clients[self.socket] = self.nickname\n self.send_welcome_messages() # separated this for atomicity.", "def _set_display_name_on_save(context):\n params = context.get_current_parameters()\n if not params['display_name']:\n return params[\"canonical_facts\"].get(\"fqdn\") or params['id']", "def SetCustomName(self, givenName): # real signature unknown; restored from __doc__\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Disables a service account.
def disable(self, email): self.resource.projects().serviceAccounts().disable( name=f"projects/-/serviceAccounts/{email}" ).execute() return f"Service account `{email}` disabled."
[ "def disable(nitro, service):\n __service = NSService()\n __service.set_name(service.get_name())\n __service.set_delay(service.get_delay())\n __service.set_graceful(service.get_graceful())\n return __service.perform_operation(nitro, \"disable\")", "def disableAccount():\n\tif UserModel.disableUser(g.currentUser['_id']):\n\t\treturn json.dumps({'result':'OK'}), 200\n\n\treturn abort(400)", "def disable_account(self):\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/admin/account/disable'\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.post(_query_url, headers=_headers)\r\n OAuth2.apply(_request)\r\n _context = self.execute_request(_request)\r\n\r\n # Endpoint and global error handling using HTTP status codes.\r\n if _context.response.status_code == 400:\r\n raise APIException('Bad request', _context)\r\n elif _context.response.status_code == 403:\r\n raise APIException('Forbidden (Access denied)', _context)\r\n elif _context.response.status_code == 500:\r\n raise APIException('Internal server error', _context)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body)", "def user_disable(self, username):\n self._request('user_disable', username)", "def disable_user(session, user):\n\n get_plugin_proxy().will_disable_user(session, user)\n\n user.enabled = False\n Counter.incr(session, \"updates\")", "def disable(self, host, binary):\n body = {\"host\": host, \"binary\": binary}\n result = self._update(\"/os-services/disable\", body)\n return self.resource_class(self, result, resp=result.request_ids)", "def disable_user(self, username):\n\n print('Updating user %s\\'s account and setting it to disabled.' % username)\n domain = ','.join(['dc=' + dc for dc in settings.AD_DOMAIN.split('.')])\n username = username.replace('.', ' ')\n query = 'cn=%s,cn=Users,%s' % (username, domain) \n result = self.connection.modify(\n query, \n {'userAccountControl': [\n MODIFY_REPLACE, \n [settings.AD_DISABLE]\n ]\n }\n )\n\n return result", "def test_disable_user_account(mocker):\n mocker.patch(\n \"TrendMicroVisionOneV3.Client.http_request\",\n enable_disable_user_account_mock_response,\n )\n client = Client(\"https://apimock-dev.trendmicro.com\", api_key, proxy, verify)\n args = {\n \"accountName\": \"ghost@trendmicro.com\",\n \"description\": \"Disabling user account.\",\n }\n result = enable_or_disable_user_account(\n client, \"trendmicro-visionone-disable-user-account\", args\n )\n assert result.outputs[\"status_code\"] == 202\n assert result.outputs[\"taskId\"] == \"00000001\"\n assert result.outputs_prefix == \"VisionOne.User_Account\"\n assert result.outputs_key_field == \"taskId\"", "def disable_user(self, uid):\n self.delete_user(uid)\n return True", "def disable_server(self, server_name):\n command = \"disable server servers/\" + server_name\n\n self.send_command(command)\n\n return", "def disable(self, subcmd, user):\n\n return self._toggle_user(user, enable=False)", "def disable_module(address, name, module):\n explore = explorepy.explore.Explore()\n explore.connect(mac_address=address, device_name=name)\n explore.disable_module(module)", "def disable(id, session_key=None):\n \n return NotableEventSuppression.set_suppression(id, False, session_key)", "def disable_adapter(self):\n return self.do_cmd(\"disable_adapter\")", "def disable(self, user: User):\n user.user.set_unusable_password()\n user.save()", "def disable_user_entitlement(\n entitlement_id: str,\n user_id: str,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = DisableUserEntitlement.create(\n entitlement_id=entitlement_id,\n user_id=user_id,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def applicationcredentials_disable(self, applicationcredentials_disable):\n\n self._applicationcredentials_disable = applicationcredentials_disable", "def disconnect_account(self):\n url = self.revocation_endpoint if self.oauth2 else self.disconnect_url\n result = self.make_request(\"GET\", url)\n return result", "def disable_tfa(self):\n result = self._client.post('/profile/tfa-disable')\n\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enables a service account.
def enable(self, email): self.resource.projects().serviceAccounts().enable( name=f"projects/-/serviceAccounts/{email}" ).execute() return f"Service account `{email}` enabled."
[ "def enable(nitro, service):\n __service = NSService()\n __service.set_name(service.get_name())\n return __service.perform_operation(nitro, \"enable\")", "def enable_service(credentials, service_name):\n service_usage = discovery.build(\"serviceusage\", \"v1\", credentials=credentials)\n\n service_usage.services().enable(name=service_name).execute()", "def user_enable(self, username):\n self._request('user_enable', username)", "def enable_service_sync(self, project_id: str, service: str):\n\n service_name = '/'.join(['projects', project_id, 'services', service])\n request = self._service_usage_service.services().enable(\n name=service_name)\n response = request.execute()\n\n # When the api call succeed, the response is a Service object.\n # See\n # https://cloud.google.com/service-usage/docs/reference/rest/v1/services/get\n if 'name' not in response:\n raise EnableServiceError(\n 'unexpected response enabling service \"{}\": {}'.format(\n service_name, response))\n\n while True:\n request = self._service_usage_service.services().get(\n name=service_name)\n response = request.execute()\n # Response format:\n # https://cloud.google.com/service-usage/docs/reference/rest/v1/Service\n if response['state'] == 'ENABLED':\n return\n elif response['state'] == 'DISABLED':\n time.sleep(2)\n continue\n else:\n # In 'STATE_UNSPECIFIED' state.\n raise EnableServiceError(\n 'unexpected service status after enabling: {!r}: [{!r}]'.\n format(response['status'], response))", "def test_enable_user_account(mocker):\n mocker.patch(\n \"TrendMicroVisionOneV3.Client.http_request\",\n enable_disable_user_account_mock_response,\n )\n client = Client(\"https://apimock-dev.trendmicro.com\", api_key, proxy, verify)\n args = {\n \"accountName\": \"ghost@trendmicro.com\",\n \"description\": \"Enabling user account.\",\n }\n result = enable_or_disable_user_account(\n client, \"trendmicro-visionone-enable-user-account\", args\n )\n assert result.outputs[\"status_code\"] == 202\n assert result.outputs[\"taskId\"] == \"00000001\"\n assert result.outputs_prefix == \"VisionOne.User_Account\"\n assert result.outputs_key_field == \"taskId\"", "def enable(self, host, binary):\n body = {\"host\": host, \"binary\": binary}\n result = self._update(\"/os-services/enable\", body)\n return self.resource_class(self, result, resp=result.request_ids)", "def enable_user(self, uid, password): \n return True", "def applicationcredentials_enable(self, applicationcredentials_enable):\n\n self._applicationcredentials_enable = applicationcredentials_enable", "def mark_disabled_user_as_service_account(self, name, description=\"\", mdbset=\"\"):\n # type: (str, str, str) -> None\n user = SQLUser.get(self.session, name=name)\n if not user:\n raise UserNotFoundException(name)\n\n service_account = SQLServiceAccount(\n user_id=user.id, description=description, machine_set=mdbset\n )\n service_account.add(self.session)\n\n user.is_service_account = True", "def enable(cls, client, resource) :\n try :\n if type(resource) is not list :\n enableresource = nsfeature()\n enableresource.feature = resource.feature\n return enableresource.perform_operation(client,\"enable\")\n except Exception as e :\n raise e", "def enable_server(self, server_name):\n command = \"enable server servers/\" + server_name\n\n self.send_command(command)\n\n return", "def enable_user_entitlement(\n entitlement_id: str,\n user_id: str,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = EnableUserEntitlement.create(\n entitlement_id=entitlement_id,\n user_id=user_id,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def enable(self, subcmd, user):\n\n return self._toggle_user(user, enable=True)", "async def enable_user_entitlement_async(\n entitlement_id: str,\n user_id: str,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = EnableUserEntitlement.create(\n entitlement_id=entitlement_id,\n user_id=user_id,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def enable_services(credentials, project_number):\n enabled_services = []\n\n services_to_enable = get_services_to_enable()\n\n project_name = \"projects/\" + project_number\n\n services = get_enabled_services(credentials=credentials, project_name=project_name)\n\n for service in services:\n service_name = service[\"config\"][\"name\"]\n\n if service_name in services_to_enable:\n services_to_enable[service_name] = False\n\n for service_name, should_enable in services_to_enable.items():\n if should_enable:\n service_long_name = project_name + \"/services/\" + service_name\n enable_service(credentials=credentials, service_name=service_long_name)\n enabled_services.append(service_long_name)\n\n return enabled_services", "def enable(self, workflowName):\n if workflowName in self.namedServices.keys(): \n msg = \"Enabling '%s' workflow...\" % workflowName\n twisted_logger.writeLog(self.logPrefix, self.logName, msg)\n self.namedServices[workflowName].workflow.enable()", "def enable(self):\n # Schedule self._checkEnabled()\n self._scheduleCheckEnabled()\n self.enabled = True\n msg = \"'%s' workflow is enabled\" % self.name\n twisted_logger.writeLog(self.logPrefix, self.logName, msg)", "def test02_disable_enable_account(self):\n self.lg('create new account %s' % self.account)\n self.Accounts.create_new_account(self.account, self.admin_username+\"@itsyouonline\")\n self.Accounts.open_account_page(self.account)\n self.assertTrue(self.Accounts.account_disable(self.account))\n self.assertTrue(self.Accounts.account_edit_all_items(self.account))\n self.assertTrue(self.Accounts.account_enable(self.account))\n self.assertTrue(self.Accounts.account_edit_all_items(self.account))", "def enable_throttling(self, enable_throttling):\n\n self._enable_throttling = enable_throttling" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes a service account.
def delete(self, email): self.resource.projects().serviceAccounts().delete( name=f"projects/-/serviceAccounts/{email}" ).execute() return f"Service account `{email}` deleted."
[ "def deleteServiceAcct(name, namespace):\n txClient = TxKubernetesClient()\n\n d = txClient.call(txClient.coreV1.delete_namespaced_service_account,\n name=name,\n namespace=namespace,\n body=txClient.V1DeleteOptions(),\n )\n return d", "def delete_account(l):\n user = acquire_user()\n if not user:\n l.interrupt()\n login(l, username=user[\"username\"], password=user[\"password\"])\n simulate_loading_profile_page(l)\n\n with l.client.post(\n USER_DELETE_ACCOUNT_ENDPOINT,\n json={\"password\": user[\"password\"]},\n headers={\"X-CSRF-Token\": l.client.cookies[\"token\"]},\n catch_response=True,\n ) as res:\n if res.status_code == 200:\n get_db().users.find_one_and_update(\n {\"username\": user[\"username\"]}, {\"$set\": {\"deleted\": True}}\n )\n res.success()\n else:\n res.failure(\"Failed to delete account: \" + str(res.json()))\n release_user(user[\"username\"])\n l.interrupt()", "def delete(self, aws_cloud_account_id: str):\n\t\troute = f'{AwsCloudAccountConsts.CLOUD_ACCOUNTS.value}/{aws_cloud_account_id}'\n\t\treturn self._delete(route=route)", "def test_delete_token_service_account(self):\n pass", "def delete_core_v1_namespaced_service_account(self, name, namespace, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_core_v1_namespaced_service_account_with_http_info(name, namespace, body, **kwargs)\n else:\n (data) = self.delete_core_v1_namespaced_service_account_with_http_info(name, namespace, body, **kwargs)\n return data", "def delete_account(self, account_number: int):\n\t\taccount = self.accounts.pop(account_number)\n\t\taccount.destroy()\n\t\tself._status += f\"{account} successfully deleted.\"\n\t\tdel account", "def delete_aws_account(self: object, parameters: dict = None, **kwargs) -> Dict[str, Union[int, dict]]:\n if kwargs.get(\"organization_ids\", None):\n kwargs[\"organization-ids\"] = kwargs.get(\"organization_ids\", None)\n\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"DeleteCSPMAwsAccount\",\n keywords=kwargs,\n params=parameters\n )", "def delete(self, social_account_id):\n\n token_payload = get_jwt()\n user_id = token_payload.get(\"sub\")\n\n SocialAccount.query.filter_by(id=social_account_id, user_id=user_id).first_or_404()\n user_service.delete_socail_account(user_id=user_id, social_account_id=social_account_id)\n\n return {\"msg\": \"Social account deleted.\"}", "def delete_service():\n config.load_kube_config()\n service = input('Enter name of service to be deleted: ')\n namespace = input('Enter namespace of deployment: ')\n\n apps_v1 = client.AppsV1.Api()\n result = v1.delete_nampespaced_service(\n name=service,\n namespace=namespace,\n body = {}\n )\n print('Service deleted. status=%s' % (result.metadata.name))", "def delete_credentials(credentials):\n\tcredentials.delete_credentials()", "def delete_azure_account(self: object, *args, parameters: dict = None, **kwargs) -> Dict[str, Union[int, dict]]:\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"DeleteCSPMAzureAccount\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )", "def advapi32_DeleteService(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hService\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def delete_account(request):\n email = request.matchdict['email']\n params = params_from_request(request)\n plaintext_password = params.get('password', None)\n if not email:\n raise ValueError('No email provided')\n\n if config.SSO_TEST_EMAIL == \"\":\n raise MethodNotAllowedError(\"Configuration error\")\n\n # SEC only allow test user to self delete\n if email != config.SSO_TEST_EMAIL:\n raise MethodNotAllowedError(\"This method is only for the test user\")\n\n try:\n user = User.objects.get(email=email)\n except UserNotFoundError:\n return OK\n if not user.check_password(password=plaintext_password):\n raise MethodNotAllowedError(\"Password is wrong!!\")\n user.delete()\n return OK", "def delete_user_account():\n\n # get user\n user = g.user\n\n # delete post\n user.status = User.STATUS_DELETED\n user.status_changed_at = datetime.now()\n\n # delete user profile\n if user.profile:\n user.profile.status = UserProfile.STATUS_DELETED\n user.profile.status_changed_at = datetime.now()\n\n db.session.commit()\n\n # response\n return '', 204", "def catalog_service(service_id):\n return app.manager.admin_catalog_services_controller.process_delete(service_id)", "def delete_core_v1_collection_namespaced_service_account(self, namespace, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.delete_core_v1_collection_namespaced_service_account_with_http_info(namespace, **kwargs)\n else:\n (data) = self.delete_core_v1_collection_namespaced_service_account_with_http_info(namespace, **kwargs)\n return data", "def remove_storage_account(self):\n self.storage_client.storage_accounts.delete(\n self.storage_acc_group, self.storage_acc_name\n )", "def cleanup_service_account(service_name: str, service_account_info: Dict[str, Any]) -> None:\n if isinstance(service_account_info, str):\n service_account_info = {\"name\": service_account_info}\n\n sdk_security.cleanup_security(service_name, service_account_info)", "def delete(self, service_template_id, **kwargs):\n service_template = self.model.service_template.get(service_template_id)\n self.core.delete_service_template(service_template_id)\n return service_template, 200" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists all keys for a service account.
def list_keys(self, email): keys = ( self.resource.projects() .serviceAccounts() .keys() .list(name=f"projects/-/serviceAccounts/{email}") .execute() ) msg = "\n".join(f"{key['name']} ({key['keyType']})" for key in keys["keys"]) return f"```{msg}```"
[ "def list_keys(\n self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config):\n # Construct URL\n url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CognitiveServices/accounts/{accountName}/listKeys'\n path_format_arguments = {\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str'),\n 'accountName': self._serialize.url(\"account_name\", account_name, 'str', max_length=64, min_length=2, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*$'),\n 'subscriptionId': self._serialize.url(\"self.config.subscription_id\", self.config.subscription_id, 'str')\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n query_parameters['api-version'] = self._serialize.query(\"self.api_version\", self.api_version, 'str')\n\n # Construct headers\n header_parameters = {}\n header_parameters['Content-Type'] = 'application/json; charset=utf-8'\n if self.config.generate_client_request_id:\n header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())\n if custom_headers:\n header_parameters.update(custom_headers)\n if self.config.accept_language is not None:\n header_parameters['accept-language'] = self._serialize.header(\"self.config.accept_language\", self.config.accept_language, 'str')\n\n # Construct and send request\n request = self._client.post(url, query_parameters)\n response = self._client.send(request, header_parameters, **operation_config)\n\n if response.status_code not in [200]:\n raise models.ErrorException(self._deserialize, response)\n\n deserialized = None\n\n if response.status_code == 200:\n deserialized = self._deserialize('CognitiveServicesAccountKeys', response)\n\n if raw:\n client_raw_response = ClientRawResponse(deserialized, response)\n return client_raw_response\n\n return deserialized", "def list_service_account(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_service_account\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v1/serviceaccounts'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1ServiceAccountList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def List_AccessKeys(iam,username: str,marker=None,maxitems=100):\n\t\t\t\treturn list(iam.resource.User(username).access_keys.all())", "def list_keys(ctx, private):\n keys = ctx.parent.gpg.list_keys(private)\n\n length = len(keys)\n logging.info(f\"{length} {'public' if private is False else 'private'} keys exist.\")\n if not length:\n ctx.exit(1)\n\n click.secho(\"Current key is:\")\n click.secho(keys.curkey.get(\"fingerprint\"))\n click.secho(\"All keys are:\")\n for key, value in keys.key_map.items():\n click.secho(value.get(\"fingerprint\"))\n\n logging.info(\"List keys finished.\")", "def list_tsigkeys(self, params=None, headers=None):\n return self._list_request('tsigkeys', params=params, headers=headers)", "def get_all_sshkeys(self):\n self.mock_data = \"keys/all.json\"\n data = self.get_data(\"account/keys/\")\n ssh_keys = list()\n for jsoned in data['ssh_keys']:\n ssh_key = SSHKey(**jsoned)\n ssh_key.token = self.token\n ssh_key.mocked = self.mocked\n ssh_keys.append(ssh_key)\n return ssh_keys", "def test_list_service_keys(self):\n pass", "def get_service_public_keys(self, service_id):\n response = self._transport.post(\n \"{}/keys/list\".format(self.__service_base_path[0:-1]),\n self._subject, service_id=str(service_id))\n\n public_keys = []\n\n for key in response.data:\n key_data = self._validate_response(key, PublicKeyValidator)\n public_key = PublicKey(key_data)\n public_keys.append(public_key)\n\n return public_keys", "def list_accounts(ctx):\n accounts = ctx.obj['app'].services.accounts\n if len(accounts) == 0:\n click.echo('no accounts found')\n else:\n fmt = '{i:>4} {address:<40} {id:<36} {locked:<1}'\n click.echo(' {address:<40} {id:<36} {locked}'.format(address='Address (if known)',\n id='Id (if any)',\n locked='Locked'))\n for i, account in enumerate(accounts):\n click.echo(fmt.format(i='#' + str(i + 1),\n address=encode_hex(account.address or ''),\n id=account.uuid or '',\n locked='yes' if account.locked else 'no'))", "def get_oauth2_service_account_keys():\n return _OAUTH2_SERVICE_ACCOUNT_KEYS", "def list_accounts(self):\n accounts = get_accounts()\n print(\"Accounts\")\n print (\"-\"* 15)\n for account in accounts[\"accounts\"]:\n print(account)", "def get_storage_account_keys(self, group_name=None, storage_acc_name=None):\n try:\n storage_keys = self.storage_client.storage_accounts.list_keys(\n group_name, storage_acc_name\n )\n except CloudError as cloud_err:\n self.colored_print(cloud_err.message, level=\"error\")\n raise\n storage_keys = {v.key_name: v.value for v in storage_keys.keys}\n return storage_keys", "def list(self, limit):\n try:\n for kp in self.ec2_resource.key_pairs.limit(limit):\n print(f\"Found {kp.key_type} key {kp.name} with fingerprint:\")\n print(f\"\\t{kp.key_fingerprint}\")\n except ClientError as err:\n logger.error(\n \"Couldn't list key pairs. Here's why: %s: %s\",\n err.response['Error']['Code'], err.response['Error']['Message'])\n raise", "def _get_all_keys(self, bucket, prefix = \"logs_001\"):\n self.logger.info(\"_get_all_keys starts\")\n all_keys = []\n # all_keys = bucket.get_all_keys(prefix = prefix) # max_keys_limit = 1000\n for key in bucket.list():\n all_keys.append(key)\n self.logger.info(\"_get_all_keys finished\")\n return all_keys", "def list_keys(\n self,\n resource_group_name: str,\n communication_service_name: str,\n **kwargs: Any\n ) -> \"_models.CommunicationServiceKeys\":\n cls = kwargs.pop('cls', None) # type: ClsType[\"_models.CommunicationServiceKeys\"]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n\n api_version = kwargs.pop('api_version', \"2021-10-01-preview\") # type: str\n\n \n request = build_list_keys_request(\n subscription_id=self._config.subscription_id,\n resource_group_name=resource_group_name,\n communication_service_name=communication_service_name,\n api_version=api_version,\n template_url=self.list_keys.metadata['url'],\n )\n request = _convert_request(request)\n request.url = self._client.format_url(request.url)\n\n pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access\n request,\n stream=False,\n **kwargs\n )\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)\n raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)\n\n deserialized = self._deserialize('CommunicationServiceKeys', pipeline_response)\n\n if cls:\n return cls(pipeline_response, deserialized, {})\n\n return deserialized", "def List(apig):\n\t\t\t\treturn apig.client.get_api_keys()['items']", "def display_all_credential():\n return Credentials.display_all_credential()", "def keys(self):\n return _NamelistKeysView(self)", "def ex_list_ssh_keys(self):\r\n data = self.connection.request('/ssh_keys').object['ssh_keys']\r\n return list(map(self._to_ssh_key, data))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a service account key.
def create_key(self, email): key = ( self.resource.projects() .serviceAccounts() .keys() .create(name=f"projects/-/serviceAccounts/{email}", body={}) .execute() ) bucket_name = os.environ["KEY_FILES_BUCKET"] bucket_gs = f"gs://{bucket_name}/keys" key_file = f"{key['name']}.json" with SimpleStorage(bucket_gs) as storage: storage.put_file( file_path=key_file, content=base64.b64decode(key["privateKeyData"]), compress=None, cache_control="no-cache", ) url = utils.generate_signed_url(bucket_name, f"keys/{key_file}") msg = f"Key created `{key['name'].split('/')[-1]}`." msg = f"{msg}\nAvailable <{url}|here> (link valid for" return f"{msg} {int(os.environ['KEY_LINK_EXPIRATION'])/60}m)."
[ "def CreateServiceAccountKey(service_account_name):\n default_credential_path = os.path.join(\n config.Paths().global_config_dir,\n _Utf8ToBase64(service_account_name) + '.json')\n credential_file_path = encoding.GetEncodedValue(os.environ,\n 'LOCAL_CREDENTIAL_PATH',\n default_credential_path)\n if os.path.exists(credential_file_path):\n return files.ReadFileContents(credential_file_path)\n\n warning_msg = ('Creating a user-managed service account key for '\n '{service_account_name}. This service account key will be '\n 'the default credential pointed to by '\n 'GOOGLE_APPLICATION_CREDENTIALS in the local development '\n 'environment. The user is responsible for the storage,'\n 'rotation, and deletion of this key. A copy of this key will '\n 'be stored at {local_key_path}.\\n'\n 'Only use service accounts from a test project. Do not use '\n 'service accounts from a production project.').format(\n service_account_name=service_account_name,\n local_key_path=credential_file_path)\n console_io.PromptContinue(\n message=warning_msg, prompt_string='Continue?', cancel_on_no=True)\n\n service = apis.GetClientInstance('iam', 'v1')\n message_module = service.MESSAGES_MODULE\n\n create_key_request = (\n message_module.IamProjectsServiceAccountsKeysCreateRequest(\n name=service_account_name,\n createServiceAccountKeyRequest=message_module\n .CreateServiceAccountKeyRequest(\n privateKeyType=message_module.CreateServiceAccountKeyRequest\n .PrivateKeyTypeValueValuesEnum.TYPE_GOOGLE_CREDENTIALS_FILE)))\n key = service.projects_serviceAccounts_keys.Create(create_key_request)\n\n files.WriteFileContents(credential_file_path, key.privateKeyData)\n\n return six.ensure_text(key.privateKeyData)", "def test_create_service_key(self):\n pass", "def create_serviceaccount(request):\n class_instance = request.node.cls\n\n def finalizer():\n \"\"\"\n Delete the service account\n \"\"\"\n helpers.remove_scc_policy(\n sa_name=class_instance.sa_obj.name,\n namespace=class_instance.project_obj.namespace,\n )\n class_instance.sa_obj.delete()\n\n request.addfinalizer(finalizer)\n\n class_instance.sa_obj = helpers.create_serviceaccount(\n namespace=class_instance.project_obj.namespace,\n )\n helpers.add_scc_policy(\n sa_name=class_instance.sa_obj.name,\n namespace=class_instance.project_obj.namespace,\n )\n assert class_instance.sa_obj, \"Failed to create serviceaccount\"", "def CreateKMSKey(self) -> str:\n\n client = self.aws_account.ClientApi(common.KMS_SERVICE)\n try:\n kms_key = client.create_key()\n except client.exceptions.ClientError as exception:\n raise errors.ResourceCreationError(\n 'Could not create KMS key: {0!s}'.format(\n exception), __name__) from exception\n\n # The response contains the key ID\n key_id = kms_key['KeyMetadata']['KeyId'] # type: str\n return key_id", "def CreateKey(*, session, name):\n ec2conn = session.connect_to(\"ec2\")\n return ec2conn.create_key_pair(key_name=name)", "def create_key(self, sp, creator, email):\n import hashlib\n import random\n\n from dateutil.relativedelta import relativedelta\n\n date = timezone.now() + relativedelta(months=1)\n activation_key = hashlib.sha1(str(random.random()).encode(\"utf-8\")).hexdigest()\n key = self.create(sp=sp, creator=creator, activation_key=activation_key, email=email, valid_until=date)\n return key", "def _create_key(self):\n return uuid.uuid4().hex", "def create_new(self):\n log.info(\"Creating a new key\")\n response = self.connection.create_access_key(self.ask_amazon_for_username())[\"create_access_key_response\"][\"create_access_key_result\"][\"access_key\"]\n log.info(\"Created %s\", response[\"access_key_id\"])\n iam_pair = IamPair(str(response[\"access_key_id\"]), str(response[\"secret_access_key\"]), create_epoch=self.amazon_date_to_epoch(response[\"create_date\"]))\n iam_pair.wait_till_works()\n return iam_pair", "def Create(apig,key_name: str,purpose: str,enabled=True,value='',generate_distict_id=True):\n\n\t\t\t\tapi_key_list = AWS.APIGateway.Key.List(apig)\n\n\t\t\t\tactive_api_keys = [x for x in api_key_list if x['name'] == key_name]\n\n\t\t\t\tif len(active_api_keys) <= 0:\n\t\t\t\t\treturn apig.client.create_api_key(name=key_name,description=purpose,\\\n\t\t\t\t\t\t\t\t\t enabled=enabled,generateDistinctId=generate_distict_id,value=value)\n\t\t\t\telse:\n\t\t\t\t\treturn AWS.APIGateway.Key.Get_Key(apig,active_api_keys[0]['id'],include_value=True)", "def service_account():\n # This name should be same as SERVICE_NAME as it determines scheduler DCOS_LABEL value.\n name = config.SERVICE_NAME\n sdk_security.create_service_account(\n service_account_name=name, service_account_secret=name)\n # TODO(mh): Fine grained permissions needs to be addressed in DCOS-16475\n sdk_cmd.run_cli(\n \"security org groups add_user superusers {name}\".format(name=name))\n yield name\n sdk_security.delete_service_account(\n service_account_name=name, service_account_secret=name)", "def make_key(self):\n\t\tif self.key:\n\t\t\tif not os.path.isfile(os.path.join(self.root, self.key + \".biprivatekey\")):\n\t\t\t\tprint_green(\"\\nRequested key does not exist.\")\n\t\t\t\tret = subprocess.call([self.dscreatekey, self.key], stdout = subprocess.DEVNULL if self.quiet else None, stderr = subprocess.DEVNULL if self.quiet else None) # Created in root\n\t\t\t\tif ret == 0:\n\t\t\t\t\tprint_blue(\"Created: \" + os.path.join(self.root, self.key + \".biprivatekey\"))\n\t\t\t\telse:\n\t\t\t\t\tprint_error(\"Failed to create key!\")\n\n\t\t\t\ttry:\n\t\t\t\t\tprint_blue(\"Copying public key to release directory.\\n\")\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.makedirs(os.path.join(self.release_dir, \"Keys\"))\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\tpass\n\n\t\t\t\t\tshutil.copyfile(os.path.join(self.root, self.key + \".bikey\"), os.path.join(self.release_dir, \"Keys\", self.key + \".bikey\"))\n\n\t\t\t\texcept:\n\t\t\t\t\tprint_error(\"Could not copy key to release directory.\\n\")\n\t\t\t\t\traise\n\n\t\t\telse:\n\t\t\t\tprint_green(\"\\nNOTE: Using key \" + os.path.join(self.root, self.key + \".biprivatekey\\n\"))\n\n\t\t\tself.key = os.path.join(self.root, self.key + \".biprivatekey\")", "def create_API_key(self, **OPargs):\n params = {}\n params.update(OPargs)\n data = self.api_signed_request(\"POST\", \"/apiKey\", params)\n return (data)", "def createServiceAccount(filePath, namespace):\n txClient = TxKubernetesClient()\n \n with open(filePath, 'r') as file:\n body = yaml.load(file)\n\n d = txClient.call(txClient.coreV1.create_namespaced_service_account,\n namespace,\n body,\n )\n return d", "def new_account(ctx, uuid):\n app = ctx.obj['app']\n if uuid:\n id_ = uuid4()\n else:\n id_ = None\n password = ctx.obj['password']\n if password is None:\n password = click.prompt('Password to encrypt private key', default='', hide_input=True,\n confirmation_prompt=True, show_default=False)\n account = Account.new(password, uuid=id_)\n account.path = os.path.join(app.services.accounts.keystore_dir, encode_hex(account.address))\n try:\n app.services.accounts.add_account(account)\n except IOError:\n click.echo('Could not write keystore file. Make sure you have write permission in the '\n 'configured directory and check the log for further information.')\n sys.exit(1)\n else:\n click.echo('Account creation successful')\n click.echo(' Address: {}'.format(encode_hex(account.address)))\n click.echo(' Id: {}'.format(account.uuid))", "def build_token_service_key(credentials, params, duration_minutes):\n issuer = credentials._service_account_email\n return _build_token(credentials, issuer, params, duration_minutes)", "def create_key(username):\n\n key = str(username) + str(datetime.datetime.now())\n msg = 'opendsa.cc.vt.edu'\n hash_key = hmac.new(key, msg, sha1)\n return hash_key.digest().encode('hex')", "def setup_service_account(\n service_name: str, service_account_secret: Optional[str] = None\n) -> Dict[str, Any]:\n\n if sdk_utils.is_open_dcos():\n log.error(\"The setup of a service account requires DC/OS EE. service_name=%s\", service_name)\n raise Exception(\"The setup of a service account requires DC/OS EE\")\n\n secret = service_name + \"-secret\" if service_account_secret is None else service_account_secret\n\n service_account = \"{}-service-account\".format(service_name.replace(\"/\", \"\"))\n\n service_account_info = sdk_security.setup_security(\n service_name,\n linux_user=\"nobody\",\n service_account=service_account,\n service_account_secret=secret,\n )\n\n log.info(\"Adding permissions required for TLS.\")\n if sdk_utils.dcos_version_less_than(\"1.11\"):\n sdk_cmd.run_cli(\"security org groups add_user superusers {sa}\".format(sa=service_account))\n else:\n acls = [\n {\"rid\": \"dcos:secrets:default:/{}/*\".format(service_name.strip(\"/\")), \"action\": \"full\"},\n {\n \"rid\": \"dcos:secrets:list:default:/{}\".format(service_name.strip(\"/\")),\n \"action\": \"read\",\n },\n {\"rid\": \"dcos:adminrouter:ops:ca:rw\", \"action\": \"full\"},\n {\"rid\": \"dcos:adminrouter:ops:ca:ro\", \"action\": \"full\"},\n ]\n\n for acl in acls:\n cmd_list = [\n \"security\",\n \"org\",\n \"users\",\n \"grant\",\n \"--description\",\n '\"Allow provisioning TLS certificates\"',\n service_account,\n acl[\"rid\"],\n acl[\"action\"],\n ]\n\n sdk_cmd.run_cli(\" \".join(cmd_list))\n\n return service_account_info", "def test_azure_service_api_keypair_generate_post(self):\n pass", "def create_key_pair(self, name):\r\n raise NotImplementedError(\r\n 'create_key_pair not implemented for this driver')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes a service account key.
def delete_key(self, full_key_name): self.resource.projects().serviceAccounts().keys().delete( name=full_key_name ).execute() return f"Deleted `{full_key_name}`."
[ "def delete_key(self, key_id):\n return self.sshkey.deleteObject(id=key_id)", "def remove_service_public_key(self, service_id, key_id):\n self._transport.delete(\n \"{}/keys\".format(self.__service_base_path[0:-1]),\n self._subject, service_id=str(service_id), key_id=key_id)", "def delete_access_key(self, username, key_id):\n self._client.delete_access_key(\n UserName=username,\n AccessKeyId=key_id\n )", "def delete_key(self, key):\n try:\n os.unlink(key)\n except OSError:\n pass", "def delete(self):\n if self.key_pair is None:\n logger.info(\"No key pair to delete.\")\n return\n\n key_name = self.key_pair.name\n try:\n self.key_pair.delete()\n self.key_pair = None\n except ClientError as err:\n logger.error(\n \"Couldn't delete key %s. Here's why: %s : %s\", key_name,\n err.response['Error']['Code'], err.response['Error']['Message'])\n raise", "def test_delete_service_key(self):\n pass", "def test_delete_key(client):\n resp = client.delete_key(PROJECT_ID, 48855760)\n assert resp['project_id'] == PROJECT_ID\n assert resp['key_removed']", "def Delete(apig,key_id: str):\n\t\t\t\treturn apig.client.delete_api_key(apiKey=key_id)", "def delete(self, api_client):\n cmd = {'name': self.name}\n api_client.deleteSSHKeyPair(**cmd)", "def delete(self, user, key):\n self._delete('/gd-userinfo/%s/keypairs/%s'\n % (base.getid(user), base.getid(key)))", "def delete_key():\n SSH_KEY_DIR.cleanup()", "def delete(self, key):\n del self.dict[key]", "def delete(self):\n return self.delete_access_key(self.aws_access_key_id)", "def delete(self, headers=None):\n return self.bucket.delete_key(self.name, version_id=self.version_id,\n headers=headers)", "def delete(self, keyname):\n self.db.delete_by_name(KEY, name=keyname)", "def test_delete_token_service_account(self):\n pass", "def test_vmware_service_resources_keypair_delete(self):\n pass", "def delete(self, key: str, path: Optional[str] = Path.root_path()) -> int:\n return self.execute_command(\"JSON.DEL\", key, str(path))", "def etcd_remove_key(key):\n try:\n subprocess.check_output([\"etcdctl\", \"del\", \"--\", key], stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n logging.error(\"Error returned while removing the %s key from etcd: %s\", key, e)\n raise EisIntegError(CODES.EXT_CMD_ERROR)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Group for Service Account commands.
def service_accounts(ctx, *args, **kwargs): admin_check(ctx.obj["user_id"]) ctx.obj["sa_actions"] = ServiceAccountActions(ctx.obj["project"]) return ctx.obj["sa_actions"].list()
[ "def service_account():\n # This name should be same as SERVICE_NAME as it determines scheduler DCOS_LABEL value.\n name = config.SERVICE_NAME\n sdk_security.create_service_account(\n service_account_name=name, service_account_secret=name)\n # TODO(mh): Fine grained permissions needs to be addressed in DCOS-16475\n sdk_cmd.run_cli(\n \"security org groups add_user superusers {name}\".format(name=name))\n yield name\n sdk_security.delete_service_account(\n service_account_name=name, service_account_secret=name)", "def get_mgr_group():\n\n @click.group(name=\"mgr\")\n def mgr_group():\n \"\"\" Arcus Manager service \"\"\"\n\n mgr_group.add_command(pull)\n mgr_group.add_command(start)\n mgr_group.add_command(update)\n return mgr_group", "def sub_command_group(self, **attrs):\n def inner(func: Callable):\n return self.add_child(SlashCommandGroup(func, **attrs))\n\n return inner", "def register_to(self, group: click.Group) -> None:\n for command in self.commands:\n group.add_command(command)", "def gcp_commands(self):\n lc=self.launch_config\n commands={}\n commands['create_service_account']=\"gcloud iam service-accounts create \"+lc['g_service_account_name']+ \" --display-name \"+ lc['g_service_account_name']\n commands['create_key']=\"gcloud iam service-accounts keys create \"+self.cwd+\"/gcp/\"+lc['g_authorization_file'] +\" --iam-account \"+lc['g_service_account_name']+\"@\"+lc['g_project']+\".iam.gserviceaccount.com\"\n commands['get_policy']=\"gcloud iam service-accounts get-iam-policy \"+lc['g_service_account_name']+\"@\"+lc['g_project']+\".iam.gserviceaccount.com --format json > \"+self.cwd+\"gcp/policy.json\"\n commands['set_policy']=\"gcloud iam service-accounts set-iam-policy \"+lc['g_service_account_name']+\"@\"+lc['g_project']+\".iam.gserviceaccount.com \"+self.cwd+\"/gcp/policy.json\"\n commands['login']=\"gcloud auth login\"\n commands['login_sa']=\"gcloud auth activate-service-account --key-file \"+self.cwd+\"/gcp/\"+ lc['g_authorization_file']\n commands['create_project']=\"gcloud projects create \"+lc['g_project']+\" --set-as-default\"\n commands['set_project']=\"gcloud config set project \"+lc['g_project']\n commands['set_zone']=\"gcloud config set compute/zone \"+lc['g_zone']\n commands['create']=\"gcloud container clusters create \"+lc['g_cluster_name']+\" --num-nodes=\"+str(lc['g_num_nodes'])+\" --machine-type=\"+lc['g_machine_type']+\" --zone=\"+lc['g_zone']\n commands['get_credentials']=\"gcloud container clusters get-credentials \"+lc['g_cluster_name']\n commands['stop']=\"gcloud container clusters resize \"+lc['g_cluster_name']+\" --size=0 --quiet\"\n commands['normal_size']=\"gcloud container clusters resize \"+lc['g_cluster_name']+\" --size=\"+str(lc['g_num_nodes'])+\" --quiet\"\n commands['class_size']=\"gcloud container clusters resize \"+lc['g_cluster_name']+\" --size=\"+str(lc['g_num_nodes_class'])+\" --quiet\"\n commands['delete']=\"gcloud container clusters delete \"+lc['g_cluster_name']+\" --zone=\"+lc['g_zone']+\" --quiet\"\n commands['autoscale']=\"gcloud alpha container clusters update \"+lc['g_cluster_name']+\" --enable-autoscaling --min-nodes=\"+str(lc['g_num_nodes'])+\" --max-nodes=\"+str(lc['g_max_nodes'])+\" --zone=\"+lc['g_zone']+\" --node-pool=default-pool\"\n commands['create_fixedip']=\"gcloud compute addresses create \"+lc['g_fixedip_namespace']+\" --region=\"+lc['g_region']\n commands['describe_fixedip']=\"gcloud compute addresses describe \"+lc['g_fixedip_namespace']+\" --region=\"+lc['g_region']\n commands['delete_forwarding_rule']=\"gcloud compute forwarding-rules delete forwarding_rule --quiet\"\n commands['delete_fixedip']=\"gcloud compute addresses delete \"+lc['g_fixedip_namespace']+\" --region=\"+lc['g_region']+\" --quiet\"\n commands['describe_cluster']=\"gcloud container clusters describe \"+lc['g_cluster_name']\n #commands['backup_ssh']=\"mkdir \"+self.cwd+\"/.ssh &&\"+ \"cp ~/.ssh/id_rsa \"+self.cwd+\"/.ssh/id_rsa_\"+lc['cluster_name']+\"&& cp ~/.ssh/id_rsa.pub \"+self.cwd+\"/.ssh/id_rsa_\"+lc['cluster_name']+\".pub\"\n return commands", "def add_subcommands(command_group, plugin_manager):", "def service_account(self) -> 'outputs.ServiceAccountResponse':\n return pulumi.get(self, \"service_account\")", "def add_target_command_groups(self, target: \"SoCTarget\", command_set: \"CommandSet\"):\n pass", "def __list_communication_service_by_resource_group(args):\n print(\"\\nList by resource group...\")\n\n acs_client = __get_communication_management_client()\n resources = acs_client.communication_service.list_by_resource_group(args.resource_group_name)\n print(\"Found resources: \")\n for resource in resources:\n print(\"\")\n __print_resource(resource)", "def cmd_groups(self):\r\n return dict({i.name: i.info() for i in self.groups})", "def add_subcommands(self):\n self.add_subcommand(AwsInstanceCommand())\n self.add_subcommand(AwsNetworkCommand())\n self.add_subcommand(AwsAccessCommand())\n self.add_subcommand(AwsQueryCommand())\n self.add_subcommand(AwsDnsCommand())", "def do_service(self, cmd, target):\n if cmd.startswith(\"list\"):\n self.do_status(\"list all\", target) # Don't duplicate code\n elif cmd.startswith(\"set\"):\n self.do_status(\"status %s\" % cmd, target) # Don't duplicate code\n elif cmd.startswith(\"add\"):\n text = re.sub(\"^add\", \"\", cmd).strip()\n service = text.split(\" \")[0]\n if not service:\n if not self.quiet:\n self.msg(\"You have to specify a service\", target)\n else:\n if len(query('select s_service from status where '\n 's_service=\"%s\"' % service)) > 0:\n if not self.quiet:\n self.msg(\"%s is already in the list of services!\"\n % service, target)\n else:\n modquery('insert into status values (0, \"%s\", \"OK\", true)'\n % service)\n if not self.quiet:\n self.msg(\"%s added to the list of services!\"\n % service, target)\n elif self.startswitharray(cmd, [\"remove\", \"delete\"]):\n text = re.sub(\"^(remove|delete)\", \"\", cmd).strip()\n service = text.split(\" \")[0]\n if not service:\n if not self.quiet:\n self.msg(\"You have to specify a service\", target)\n else:\n if len(query('select s_service from status where '\n 's_service=\"%s\"' % service)) == 0:\n if not self.quiet:\n self.msg(\"%s is not in the list of services!\"\n % service, target)\n else:\n modquery('delete from status where s_service=\"%s\"'\n % service)\n if not self.quiet:\n self.msg(\"%s removed from the list of services!\"\n % service, target)\n elif self.startswitharray(cmd, [\"change\", \"edit\", \"modify\", \"rename\"]):\n text = re.sub(\"^(change|edit|modify|rename)\", \"\", cmd).strip()\n services = text.split(\" \")\n if len(services) < 2:\n if not self.quiet:\n self.msg(\"You have to specify two names\", target)\n elif len(services) == 2:\n s1 = services[0]\n s2 = services[1]\n if len(query('select s_service from status where '\n 's_service=\"%s\"' % s1)) == 0:\n if not self.quiet:\n self.msg(\"%s is not in the list of services!\"\n % s1, target)\n else:\n modquery('update status set s_service=\"%s\" where '\n 's_service=\"%s\"' % (s2, s1))\n if not self.quiet:\n self.msg(\"Changed the name of service '%s' to '%s'\"\n % (s1, s2), target)\n else:\n raise CommanderError('too many parameters (%s)' % cmd)\n else:\n raise CommanderError('unparseable command (%s)' % cmd)", "def addGroupCommandInput(self, *args) -> \"adsk::core::Ptr< adsk::core::GroupCommandInput >\" :\n return _core.CommandInputs_addGroupCommandInput(self, *args)", "def AddServiceAccountFlag(parser, managed_only=False):\n help_text = (\n 'Service account associated with the revision of the service. '\n 'The service account represents the identity of '\n 'the running revision, and determines what permissions the revision has. '\n )\n if managed_only:\n help_text += 'This is the email address of an IAM service account.'\n else:\n help_text += (\n 'For the {} platform, this is the email address of an IAM service '\n 'account. For the Kubernetes-based platforms ({}, {}), this is the '\n 'name of a Kubernetes service account in the same namespace as the '\n 'service. If not provided, the revision will use the default service '\n 'account of the project, or default Kubernetes namespace service '\n 'account respectively.'.format(\n platforms.PLATFORM_MANAGED,\n platforms.PLATFORM_GKE,\n platforms.PLATFORM_KUBERNETES,\n )\n )\n\n parser.add_argument('--service-account', help=help_text)", "def test_get_asset_service_user_groups(self):\n pass", "def get_account_groups(self, user, account):\n return {}", "def droplet_actions_group():\n\tpass", "def create_default_group():\n group_entry = CommandGroupEntry.objects.create()\n return group_entry", "def default_admin_group(account):\n return '%s:%s' % (__ensure_acc_id(account), ADMIN)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the key and optionally add ``VirtualField`` helpers to the schema if create_helpers=True.
def __setkey__(self, schema: Schema, key: str) -> None: super().__setkey__(schema, key) if self.create_helpers: for mode in self.modes: schema._add_field("is_%s_mode" % mode, self._create_helper(mode))
[ "def set_key_field(self, key_field):\n return self.set_param('key_field', key_field)", "def _initialize_key(model_class, name):\r\n model_class._key = Key(model_class._meta['key'] or name)", "def _create_hstore_virtual_fields(self, cls, hstore_field_name):\n if not self.schema_mode:\n return\n\n # add hstore_virtual_fields attribute to class\n if not hasattr(cls, '_hstore_virtual_fields'):\n cls._hstore_virtual_fields = {}\n\n # loop over all fields defined in schema\n for field in self.schema:\n source = field.get('source', field['name'])\n # initialize the virtual field by specifying the class, the kwargs and the hstore field name\n virtual_field = create_hstore_virtual_field(field['class'],\n field.get('kwargs', {}),\n field['name'],\n source,\n hstore_field_name)\n # this will call the contribute_to_class method in virtual.HStoreVirtualMixin\n cls.add_to_class(field['name'], virtual_field)\n # add this field to hstore_virtual_fields dict\n cls._hstore_virtual_fields[source] = virtual_field", "def customkv(self, customkv):\n\n self._customkv = customkv", "def createFieldKey(record, key_fileds):\n key = tuple ( [ record[field] for field in key_fields ] )\n return key", "def setattr(self, handle, key, val):\n if key in read_only_keys:\n print 'Attempt to modify read-only key'\n return False\n opt = self.desc[handle]\n opt[key] = val\n self.desc[handle] = opt\n return True", "def test_create_key(self):\n self.instance.create_key(\"key_name\", \"key text\", read_only=False)\n\n self.post_called_with(\n url_for(\"user/keys\"),\n data={\"title\": \"key_name\", \"key\": \"key text\", \"read_only\": False},\n )", "def _set_key(self, key, hexkey=False):\n self.key = self.converter.to_bin(key, hexkey)", "def add_helper(self, helpers, fmt):\n c_helper = wformat(helpers, fmt)\n for i, helper in enumerate(c_helper.split()):\n self.c_helpers[helper] = True\n if helper not in LuaHelpers:\n raise RuntimeError(\"No such helper {}\".format(helper))\n setattr(fmt, \"hnamefunc\" + str(i),\n LuaHelpers[helper].get(\"name\", helper))", "def normalize_structure_key(cls, structure_key):", "def setKeyPath(object):\n pass", "def set_generic(self, _key: str, _type, _value):\n set_func = {\n \"bool\" : self.set_bool,\n \"float\" : self.set_float,\n \"int\" : self.set_int,\n \"point\" : self.set_point,\n \"points\": self.set_points,\n \"str\" : self.set_str\n }\n\n # noinspection PyArgumentList\n set_func.get(_type)(_key, _value)", "def __setattr__(self, key, value):\n if key[0] == '_':\n self.__dict__[key] = value\n else:\n self.write(key,value)", "def test_create_key_with_readonly(self):\n self.instance.create_key(\"key_name\", \"key text\", read_only=True)\n\n self.post_called_with(\n url_for(\"user/keys\"),\n data={\"title\": \"key_name\", \"key\": \"key text\", \"read_only\": True},\n )", "def __setattr__(self, key, val):\n if key.startswith(\"_\"):\n object.__setattr__(self, key, val)\n else:\n self._kwargs[key] = val", "def add_generic_metadata(self, key, value):\n\n self.debug(\"Adding generic metadata {0}='{1}'\".format(key, value))\n self.generic_metadata[key] = value", "def test_set_key_value_non_model_field(self) -> None:\n test_field = 'new_non_model_field'\n self.form.set_key_value(test_field, True)\n self.assertTrue(self.form.get_key_value(test_field))", "def add(self, key, keypacker, val, valpacker):\n if keypacker is None:\n raise TypeError(\"keypacker not given\")\n if valpacker is None:\n raise TypeError(\"valpacker not given\")\n self.fields[key] = (val, keypacker, valpacker)\n return val", "def _create_fields(self):\r\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute returns for each ticker and date in close.
def calculate_returns(close): # TODO: Implement Function return (close - close.shift(1)) / close.shift(1)
[ "def compute_returns(self):\n import numpy as np\n\n print(\"Compute returns and log returns...\")\n self.data['log_price'] = np.log(self.data['close'])\n self.data['log_returns'] = self.data['log_price'].diff()\n\n\n self.data['lagged_returns'] = self.data['returns'].shift(-1)\n self.data['returns2'] = self.data['returns'] ** 2\n print(\"Done!\")", "def daily_valuations(self):\n df = pd.DataFrame(self.close_prices, columns=[\"date\", \"price\"])\n df = df.set_index(\"date\")\n df[\"quantity\"] = float(\"nan\")\n df[\"market_val\"] = float(\"nan\")\n # the prices starting from the first date the security was held\n start_date = str(self.breakdown[0][0])\n\n df2 = df.loc[start_date:]\n df2 = df2.copy() # copied to prevent chained assignment\n # update the quantity at each date\n for row in self.breakdown:\n df2.at[str(row[0]), \"quantity\"] = row[1]\n df2[\"price\"] = df2[\"price\"].fillna(method=\"ffill\")\n df2[\"quantity\"] = df2[\"quantity\"].fillna(method=\"ffill\")\n\n df2[\"price\"] = pd.to_numeric(df2[\"price\"])\n df2[\"market_val\"] = round((df2[\"price\"] * df2[\"quantity\"]), 3)\n\n df2 = df2[[\"market_val\"]]\n new_name = f\"market_val_{self.ticker}\"\n new_header = {\"market_val\": new_name}\n df2 = df2.rename(columns=new_header)\n return df2", "def calculate_portfolio_return(self, price_df: pd.DataFrame) -> None:\n # Keep only data of stocks in the portfolio\n select_query = ' or '.join(f\"symbol == '{val[1]}'\" for val in self.stocks)\n self.price_df = price_df.query(select_query) \n # Calculate returns\n self.price_df['weighted_ret'] = self.price_df['dailyret'] * self.price_df['weight'] # weight * daily return\n self.portfolio_daily_returns = self.price_df.groupby('date')['weighted_ret'].sum()\n self.expected_daily_return = self.portfolio_daily_returns.mean()\n self.volatility = self.portfolio_daily_returns.std()", "def compute_daily_returns(self):\n # Note: Returned DataFrame must have the same number of rows\n daily_returns = (self.prices / self.prices.shift(1)) - 1\n daily_returns.ix[0, :] = 0\n return daily_returns", "def stockvals(df,start_date,end_date):\r\n #convert pd dataframes to strings\r\n symbols, names = df.Symbol, df.Security\r\n symbols = symbols.to_numpy()\r\n symbols = symbols.astype(str)\r\n names = names.to_numpy()\r\n names = names.astype(str)\r\n start_date_int = datetime_to_integer(start_date)\r\n #Stocks under consideration (from S&P500)\r\n n_stocks = len(symbols)\r\n #Open - Closing value of stocks (as float)\r\n indices = []; open_val = []; close_val = []\r\n for j in tqdm(range(0,n_stocks),position=0,desc='Loading Stock Data'):\r\n if j == 91:\r\n continue\r\n date_string=(df.iloc[j][6]).replace('-',''); #print(date_string)\r\n date_added = int(date_string[:8])\r\n if(date_added <= start_date_int):\r\n index = j\r\n indices = np.append(indices,index)\r\n quotes = web.DataReader(symbols[j], 'yahoo', start_date, end_date)\r\n opening = quotes.Open\r\n closing = quotes.Close\r\n open_val = np.append(open_val,opening,axis=0)\r\n close_val = np.append(close_val,closing,axis=0)\r\n open_val = open_val.reshape(len(indices),-1)\r\n close_val = close_val.reshape(len(indices),-1)\r\n variation = open_val-close_val\r\n return names[indices.astype(int)],symbols[indices.astype(int)],variation,close_val,open_val", "def get_close_for_per(self, tickers: List[str], date: Timestamp) -> pd.DataFrame:\n date_date = self.data.loc[date]\n return date_date.loc[date_date.index.isin(tickers)][['ticker','Close']].copy()", "def compute_monthly_returns(dbm: database_manager.DatabaseManager, tbl_name: str) -> \\\n Union[Tuple[pd.DataFrame, Tuple[str, str, str, str, str], datetime.datetime], Tuple[None, None]]:\n tbl, info = dbm.get_table(tbl_name)\n\n if tbl is None:\n return None, None\n\n tbl.dropna(axis=0, inplace=True)\n\n first_date = tbl.index[0]\n last_date = tbl.index[-1]\n prev_month = first_date.month\n\n row_idx = 0\n curr_date, prev_date = None, None\n\n monthly_returns = []\n daily_ret = 0\n monthly_ret = 0\n\n while curr_date != last_date:\n row_idx += 1\n\n curr_date = tbl.index[row_idx]\n\n curr_month = curr_date.month\n\n curr_price = tbl.iloc[row_idx]['PX_LAST']\n prev_price = tbl.iloc[row_idx - 1]['PX_LAST']\n\n if curr_price == 0:\n daily_ret = 0\n elif prev_price == 0:\n daily_ret = tbl.iloc[row_idx - 2]['PX_LAST']\n else:\n daily_ret = (curr_price / prev_price) - 1.0\n\n monthly_ret = monthly_ret * (daily_ret + 1) if monthly_ret != 0 else daily_ret + 1\n\n if curr_month != prev_month:\n # remove compounding of last daily return\n monthly_ret /= (daily_ret + 1)\n\n monthly_returns.append((prev_date, monthly_ret - 1))\n\n # reset for next month\n monthly_ret = daily_ret + 1\n\n prev_month = curr_month\n prev_date = curr_date\n\n df = pd.DataFrame(monthly_returns, columns=['Dates', 'Monthly_Return'])\n df.set_index('Dates', inplace=True)\n\n return df, info, first_date", "def compute_daily_returns(df):\n daily_returns = df.copy()\n daily_returns[1:] = (fd[1:] / df[:-1].values) - 1\n daily_returns.ix[0:, ] = 0 #set daily returns for row 0 to 0\n return daily_returns", "def getDailyReturns(self, startDate, endDate):\n self.startDate = startDate\n self.endDate = endDate\n \n price = yf.download(stock,startDate,endDate)\n self.dReturns = pd.DataFrame(np.log(price)-np.log(price).shift(1),index=price.index)\n self.dReturns.columns = self.tickers\n self.dReturns.dropna(inplace = True)", "def get_daily_returns():\n portfolio = request.get_json(force=True)\n start_date = parse_date(request.args.get('start'))\n end_date = parse_date(request.args.get('end'))\n prices_df = prepare_dataframe(portfolio, start_date, end_date)\n performance = compute_daily_returns(prices_df)\n return performance.to_json(orient='index')", "def compute_daily_returns(df):\n # (value[t] / value[t-1]) - 1\n rtn = (df/df.shift(1)) - 1\n rtn.ix[0, :] = 0\n return rtn", "def portfolio_returns(df_long, df_short, lookahead_returns, n_stocks):\n pos_long = lookahead_returns * df_long\n pos_short = lookahead_returns * df_short\n total_returns = (pos_long - pos_short)/n_stocks\n return total_returns", "def populate_portfolio_by_symbols(self, symbols: List[str], price_df: pd.DataFrame) -> None:\n # Keep only portfolio stocks' data\n select_query = ' or '.join(f\"symbol == '{symbol}'\" for symbol in symbols)\n self.price_df = price_df.query(select_query) \n\n # Calculate stocks' daily return\n self.price_df['dailyret'] = self.price_df.groupby('symbol')['close'].pct_change()\n self.price_df['dailyret'].fillna(self.price_df['close']/self.price_df['open']-1.0, inplace=True)\n self.price_df.set_index('date', inplace=True)\n\n # Calculate portoflio daily return\n self.price_df['weighted_ret'] = self.price_df['dailyret'] * self.price_df['weight'] # weight * daily return\n self.portfolio_daily_returns = self.price_df.groupby('date')['weighted_ret'].sum()\n self.portfolio_daily_cumulative_returns = (self.portfolio_daily_returns + 1.0).cumprod() - 1.0\n self.cumulative_return = self.portfolio_daily_cumulative_returns[-1] # last day's cumulative return", "def get_prices(ticker_list, start, stop, price_types=['Close'], logger=logger):\n\n price_array = []\n num = 1\n total = len(ticker_list)\n for stock in ticker_list:\n logger.info(f'Scraping {stock} - {num} out of {total} tickers')\n try:\n price_array.append(web.DataReader(stock, 'yahoo', start, stop))\n except: # noqa\n price_array.append('NA')\n num += 1\n price_df = dict(zip(ticker_list, price_array))\n dels = []\n for key in price_df.keys():\n if type(price_df[key]) == str:\n dels.append(key)\n for key in dels:\n price_df.pop(key, None)\n price_df = pd.concat(price_df)\n price_df = price_df[['Close']].reset_index()\n price_df.columns = ['ticker', 'date'] + [i.lower() for i in ['Close']]\n return price_df", "def _compute_returns_fn(rewards, discounts):\n returns = np.zeros(len(rewards))\n next_state_return = 0.0\n for t in range(len(returns) - 1, -1, -1):\n returns[t] = rewards[t] + discounts[t] * next_state_return\n next_state_return = returns[t]\n return returns.astype(np.float32)", "def _run(prices, options, verbose=True, get_invested_value=None):\n # For each stock, calculate the running yearly volatility:\n sigmas = Simulation._calculate_sigmas_wrapper(prices)\n\n if verbose:\n print(\"Finished calculating the yearly sigmas.\")\n\n # Then extract the values and call the ROI-calculation for investing one given day:\n payouts = np.empty_like(prices.values)\n payouts[:] = np.nan\n\n for stock_idx, stock in tqdm(enumerate(prices.columns), total=len(prices.columns)):\n # We do not want to start trading right away, when a stock becomes available. So we need to find out\n # the \"date of birth\" for a stock (which is the minimum index where an isna-comparison changes from\n # True to False) and then add our minimum maturity.\n # Alternative for date_of_stock_trading_start: np.argmin(prices[stock].isna().values), which is a bit less\n # readable.\n #date_of_stock_trading_start = prices[stock].isna().idxmin()\n date_of_stock_trading_start = np.argmin(prices[stock].isna().values)\n #trading_start_idx = prices.index.get_loc(date_of_stock_trading_start) + options.minimum_maturity\n trading_start_idx = date_of_stock_trading_start + options.minimum_maturity\n\n for k in range(trading_start_idx, len(sigmas)):\n try:\n payouts[k, stock_idx] = get_invested_value(\n prices=prices[stock].values.squeeze(),\n sigmas=sigmas[stock].values.squeeze(),\n index=k,\n horizon=options.horizon,\n out_of_money_factor=options.out_of_money_factor,\n r=options.r,\n bet_long=options.bet_long,\n )\n except AssertionError as a:\n # Raise and enhanced assertion error with info that was not available within the\n # get_invested_value-function:\n s = f'Encountered AssertionError for improper shape of vectors ' \\\n f'prices/sigmas for {stock} (idx: {stock_idx}) in step {k}: {a}\\n'\n s += f'Shapes passed were: {prices[stock].values.squeeze().shape} for prices ' \\\n f'and {sigmas[stock].values.squeeze().shape} for sigmas.\\n'\n raise AssertionError(s)\n except ValueError as v:\n # Raise an enhanced value error with info that was not available within the get_invested_value-function:\n s = f'Encountered ValueError \"{v}\" for {stock} (idx: {stock_idx}) in step {k}.\\n'\n s += f'Stock price at index {k} was {prices[stock].values.squeeze()[k]}.\\n'\n s += f'Sigma at index {k} was {sigmas[stock].values.squeeze()[k]}.\\n'\n s += f'Passed parameters were:\\n{options}\\n'\n raise ValueError(s)\n except Exception as e:\n raise Exception(\"Unknown exception raised.\")\n\n pay_outs = pd.DataFrame(payouts, index=sigmas.index, columns=sigmas.columns)\n\n # Store the results in a separate dataframe, where entries are nan where we could not calculate the ROI.\n return pay_outs, sigmas", "def linear_returns(price_df: pd.DataFrame):\n big_dict = {}\n\n for ticker in price_df.columns:\n # iterates through the security list provided and extract provided values via dict manipulation\n linear_ret = price_df[ticker].pct_change() # pct_change pandas function for linear returns\n big_dict[ticker] = linear_ret[1:].tolist() # eliminates the first NaN row and returns list from an array\n\n big_df = pd.DataFrame.from_dict(data=big_dict) # dataframe index by data column all ticker symbols\n return big_df", "def compute_log_returns(prices):\n # TODO: Implement Function\n \n return None", "def join_data(df_trade, df_stock):\n df_list = []\n for index, data in df_trade.iterrows():\n df_date = df_stock[data['date0']:data['date1']].copy()\n\n # change last close price into stop loss price\n df_date.loc[df_date.index.values[-1], 'close'] = data['close1']\n\n df_date['pct_chg'] = df_date['close'].pct_change()\n df_date['pct_chg'] = df_date['pct_chg'].fillna(value=0)\n df_date['pct_chg'] = df_date['pct_chg'].apply(\n lambda x: 0 if x == np.inf else x\n )\n\n if data['signal0'] == 'SELL':\n df_date['pct_chg'] = -df_date['pct_chg'] + 0\n\n df_date.reset_index(inplace=True)\n df_date = df_date[['date', 'close', 'pct_chg']]\n df_date.columns = ['date', 'price', 'pct_chg']\n\n df_list.append(df_date)\n\n return df_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the set of the top industries for the date
def date_top_industries(prices, sector, date, top_n): # TODO: Implement Function return set(sector.loc[prices.loc[date].nlargest(top_n).index])
[ "def get_top_expenses_data(date, next_date):\n data = []\n\n if date is None:\n expenses = Expense.objects().order_by('-amount').limit(10)\n else:\n expenses = []\n num = 1\n for expense in Expense.objects().order_by('-amount'):\n if expense.date >= date and expense.date <= next_date and num <= 10:\n expenses.append(expense)\n num += 1\n\n for expense in expenses:\n data.append({\n 'name': expense.name,\n 'amount': expense.amount,\n 'date': expense.date\n })\n\n return data", "def getTopArtists(self, user=None, period=\"overall\"):\n pass", "def get_top20(teams, category, ascend):\n\tteams_sorted = teams.sort_values(by = [category], ascending = ascend)\n\ttop20 = pd.DataFrame(teams_sorted.head(20), columns = ['TeamName', category])\n\treturn top20", "def top5_countries_by_date(day: int, month: int, year: int = 2020) -> List[str]:\n\n # Your code goes here (remove pass)\n y = year % 100\n data=f'{month}/{day}/{y}'\n top = confirmed_cases.groupby([\"Country/Region\"]).max().sort_values(by=data).tail(5).iloc[:,0].keys().tolist()[::-1]\n return top", "def top_cities():\n day = request.args.get('day', datetime.now().strftime(\"%Y%m%d\"), type=str)\n num = request.args.get('num', 1, type=int)\n\n # input validation\n if num <= 1:\n num = 1\n\n try:\n datetime.strptime(day, '%Y%m%d')\n except ValueError:\n day = datetime.now().strftime(\"%Y%m%d\") # when missing, set today as default date\n\n return jsonify(get_top_cities(day, num))", "def stock_screener_filter_top(conn_path,var_list,date,order,top,industry='None',since_ipo = {'condition': '>=', 't': 0},in_universe = False):\n var_mapping= pd.read_excel(conn_path+'value_mapping.xlsx')\n var2 = var_mapping[var_mapping['Chinese'].isin(var_list)]\n var2 = (var2.iloc[:,0]) \n if in_universe == True:\n industry2 = 'None'\n since_ipo['min'] = 0\n since_ipo['max'] = 30\n else:\n industry2 = industry\n db = select_top(conn_path,var2.iloc[0],date,industry = industry2,since_ipo = since_ipo,top = top[0],order = order[0])\n n = 1\n while(n<len(var_list)):\n temp = select_top(conn_path,var2.iloc[0],date,industry = industry2,since_ipo = since_ipo,top=top[n],order = order[n])\n db = db.merge(pd.DataFrame(temp.iloc[:,[0,5,6]]),how = 'inner',left_on = 'Code',right_on = 'Code')\n n = n + 1\n if industry == 'None':\n db = db\n else:\n if isinstance(industry,str):\n db = db[db['Industry']==(industry)]\n else:\n db = db[db['Industry'].isin(industry)]\n if(db.empty):\n raise ValueError('No Stock meets criteria!')\n return db", "def get_top(case='Confirmed', num=10):\n case = case.title()\n data = load_data()\n top = {}\n for country in data[list(data)[-1]]:\n top[country['Country_Region']]=country[case]\n return {k:v for k, v in\n sorted(top.items(), key=lambda x: x[1], reverse=True)[:num]}", "def getTopArtists(self):\r\n\r\n sp = getSP()\r\n\r\n artistsPageShort = sp.current_user_top_artists(limit=50, offset=0, time_range=\"short_term\")\r\n artistsPageMed = sp.current_user_top_artists(limit=50, offset=0, time_range=\"medium_term\")\r\n artistsPageLong = sp.current_user_top_artists(limit=50, offset=0, time_range=\"long_term\")\r\n\r\n\r\n topArtistsShort = getItems(artistsPageShort)\r\n topArtistsMed = getItems(artistsPageMed)\r\n topArtistsLong = getItems(artistsPageLong)\r\n\r\n return topArtistsShort, topArtistsMed, topArtistsLong", "def top(self, k):\n dic_list = []\n order_collect = sorted(\n self.collection.items(), key=lambda x: x[1], reverse=True)\n for num in range(k):\n for item in order_collect:\n small_dic = {}\n small_dic[\"systemname\"] = item[0]\n small_dic[\"lifetimeperformance\"] = item[1]\n dic_list.append(small_dic)\n\n return dic_list", "def top():\n top_ = []\n for category in (\"day\", \"week\", \"month\"):\n downloads = (\n RecentDownloadCount.query.filter_by(category=category)\n .filter(RecentDownloadCount.package != \"__all__\")\n .order_by(RecentDownloadCount.downloads.desc())\n .limit(20)\n .all()\n )\n top_.append(\n {\"category\": category, \"packages\": [{\"package\": d.package, \"downloads\": d.downloads} for d in downloads]}\n )\n return render_template(\"top.html\", top=top_, user=g.user)", "def get_top_items(interactions: pd.DataFrame, N:int = 5) -> pd.DataFrame:\n iid2recid = load_mapping(\"../../data/iid2recid\")\n books_full = pd.read_parquet(\"../../data/books_full.parquet.gzip\")\n\n interactions[\"recId\"] = interactions[\"item_id\"].map(iid2recid)\n interactions = interactions.merge(books_full[[\"recId\", \"title\", \"author\", \"rubric_name\"]], on=\"recId\")\n interactions = interactions.query(\"dt > '2021-08-01'\").copy()\n\n pop_items = (\n interactions\n .groupby(\"recId\", as_index=False)\n .agg({\"user_id\": \"count\", \"title\": \"first\", \"author\": \"first\", \"rubric_name\": \"first\"})\n .sort_values(\"user_id\", ascending=False)\n )\n pop_items = pop_items.fillna(\"unknown\")\n top = pop_items[pop_items[\"rubric_name\"].str.contains(\"Худ\")].head(5)[[\"recId\", \"title\", \"author\"]]\n top = top.rename(columns={\"recId\": \"item_id\"})\n top[\"ranking\"] = list(range(1,6))\n top[\"user_id\"] = 0\n top = top[[\"user_id\", \"item_id\", \"title\", \"author\", \"ranking\"]]\n return top", "def _get_top_temps(temps):\n top_temps = sorted(temps, key=lambda k: k['temp'], reverse=True)[:10]\n return sorted(top_temps, key=lambda k: _create_date(k['date']))", "def _get_top_results(self):\n return Counter(self.pkg_files).most_common(TOP_N)", "def top_tvshow(self):\n top_tvshow = {}\n data = requests.get(self.url.format('Top250TVs', self.api_key)).json()\n #Loops through the data\n for item in data['items']:\n top_tvshow.setdefault(data['id'], [data['title'], data['year'], data['rank'], data['imDbRating']])\n\n return top_tvshow", "def get_top_five():\n\n # this is simply a placeholder until I create the logic to query top movies based on num reviews and star ratings...\n t1 = Movie.objects.get(name__icontains='out of the past')\n t2 = Movie.objects.get(name__icontains='double indem')\n t3 = Movie.objects.get(name__icontains='big sleep')\n t4 = Movie.objects.get(name__icontains='scarlet street')\n t5 = Movie.objects.get(name__icontains='maltese falcon')\n\n top_five = [t1, t2, t3, t4, t5]\n\n return top_five", "def getTopAlbums(self, user=None, period=\"overall\"):\n pass", "def main_top_n_count():\n print(\"# Top %d DOIs per source by count on %s\" % (top_n, date))\n print(\"Useful for sources that may mention the same DOI repeatedly, e.g. Twitter, Wikipedia, Newsfeed, Reddit\")\n for source in sources:\n print(\"## Source: %s\" % source)\n url = api_date_source % {\"source\": source, \"date\": date, \"view\": \"collected\"}\n\n events = requests.get(url).json()['events']\n\n proj_obj = lambda event: event['obj_id']\n \n doi_events = ((doi, list(events)) for doi, events in itertools.groupby(sorted(events, key=proj_obj), key=proj_obj))\n doi_count_events = [(doi, len(events), events) for doi, events in doi_events]\n \n # sorted by number of events\n dois = sorted(doi_count_events, key=lambda x: x[1], reverse=True)\n\n for (doi, count, events) in dois[:top_n]:\n print(\"### %s\" % doi)\n print(\"%d events\" % count)\n for event in events[:top_n]:\n print_json_indent(event)", "def NYT_mostPopular(num_days=1, type=\"mostviewed\", sec_list=[\"all-sections\"]):\n\t#type = \"mostemailed\" / type = \"mostshared\"\n\tsections = \";\".join(sec_list)\n\tbase = \"http://api.nytimes.com/svc/mostpopular/v2/%s/%s/%d.json\"\\\n\t\t\"?api-key=32a8ad498501475cb0fa4abbc04f4e4e:5:61481359\"\n\tr = requests.get(base % (type, sections, num_days))\n\tjresp = json.loads(r.content)\n\t\n\treturn NYT_get_articles(jresp)", "def get_most_up_to_date_10k_filing(sf1_art, caldate_cur: pd.datetime, datekey_cur: pd.datetime, years):\n desired_calendardate = get_calendardate_x_quarters_ago(caldate_cur, 4*years)\n candidates = sf1_art.loc[sf1_art.calendardate==desired_calendardate]\n\n\n candidates = candidates.loc[candidates.datekey <= datekey_cur] # Ensure that no future information gets used\n\n if len(candidates) == 0:\n # raise KeyError(\"No 10K filing for report period {}\".format(desired_calendardate))\n return pd.Series(index=sf1_art.columns)\n\n candidates = candidates.sort_values(by=\"datekey\", ascending=True)\n\n return candidates.iloc[-1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test run analyze_returns() with net strategy returns from a file.
def test_run(filename='net_returns.csv'): net_returns = pd.Series.from_csv(filename, header=0, sep=',') t, p = analyze_returns(net_returns) print("t-statistic: {:.3f}\np-value: {:.6f}".format(t, p))
[ "def read_simulation_results(output_filename):\n\n # open the results file\n sp = openmc.StatePoint(output_filename)\n\n # access the tally\n tbr_tally = sp.get_tally(name=\"TBR\")\n df = tbr_tally.get_pandas_dataframe()\n tbr_tally_result = df[\"mean\"].sum()\n\n # print result\n print(\"The tritium breeding ratio was found, TBR = \", tbr_tally_result)\n return tbr_tally_result", "def test_result(file): \r\n file_data_input = file + \".in\"\r\n result = alghorithm(file_data_input)\r\n file_correct_result = file + \".out\"\r\n with open(file_correct_result) as f:\r\n correct_result = int(f.readline())\r\n f.close()\r\n if correct_result == result:\r\n print(file_data_input, \"OK\")\r\n else:\r\n print(file_data_input, \"Error, wrong result!\")", "def test_get_file_analysis_result_1(mocker):\n mocker.patch(\"TrendMicroVisionOneV3.Client.http_request\", mock_file_result_response)\n client = Client(\"https://apimock-dev.trendmicro.com\", api_key, proxy, verify)\n args = {\"reportId\": \"800f908d-9578-4333-91e5-822794ed5483\"}\n result = get_file_analysis_result(client, args)\n assert len(result.outputs) > 0", "def run(filename):\n lap_records = load_data(filename)\n sorted_laps = sort_laps(lap_records)\n race_results = build_results(sorted_laps)\n print_results(race_results)\n return race_results", "def parse_results(filename):\n with open(filename) as f:\n line_list = f.readlines()\n\n vs_lines = [line for line in line_list if ' vs ' in line]\n\n results = []\n\n for line in vs_lines:\n m = re.search(r'(.+) vs (.+) \\(result: ([0-9]+)-([0-9]+)\\)', line)\n if m is not None:\n result = MatchResult(\n blue=m.group(1),\n orange=m.group(2),\n blue_goals=int(m.group(3)),\n orange_goals=int(m.group(4)),\n blue_shots=0,\n orange_shots=0,\n blue_saves=0,\n orange_saves=0,\n blue_points=0,\n orange_points=0)\n\n results.append(result)\n\n return results", "def get_results(filename):\n\n ttype, method, N, M, d, parts = get_info(filename)\n\n with open(filename) as f:\n time_c, time_i, iter, inter_s, inter_e, intra_s, intra_e, tot, tinter, tintra, f = read_simulation(f, ttype,\n method)\n time_c = parse_time(time_c)\n time_i = parse_time(time_i)\n iter = parse_int(iter)\n inter_s = parse_float(inter_s)\n inter_e = parse_float(inter_e)\n intra_s = parse_float(intra_s)\n intra_e = parse_float(intra_e)\n tot = parse_float(tot)\n tinter = tinter if None in tinter else parse_float(tinter)\n tintra = tintra if None in tintra else parse_float(tintra)\n\n instances = len(time_c)\n\n avg_cl = get_avg_time(time_c)\n min_cl = min(time_c)\n max_cl = max(time_c)\n sd_cl = sqrt(sum((t - float(avg_cl)) ** 2 for t in time_c) / instances)\n\n avg_ii = get_avg_time(time_i)\n min_ii = min(time_i)\n max_ii = max(time_i)\n sd_ii = sqrt(sum((t - float(avg_ii)) ** 2 for t in time_i) / instances)\n\n time_tot = time_c + time_i\n avg_tot = get_avg_time(time_tot)\n min_tot = min(time_tot)\n max_tot = max(time_tot)\n sd_tot = sqrt(sum((t - float(avg_tot)) ** 2 for t in time_tot) / instances)\n\n avg_iter = sum(iter) / instances\n min_iter = min(iter)\n max_iter = max(iter)\n sd_iter = sqrt(sum((t - float(avg_iter)) ** 2 for t in iter) / instances)\n\n if None in tinter:\n avg_optgap = min_optgap = max_optgap = sd_optgap = None\n else:\n opt_gap = []\n for i in range(len(intra_e)):\n opt_gap.append(abs(float(tintra[i]) - intra_e[i]) / intra_e[i])\n\n avg_optgap = sum(opt_gap) / instances\n min_optgap = min(opt_gap)\n max_optgap = max(opt_gap)\n sd_optgap = sqrt(sum((t - float(avg_optgap)) ** 2 for t in opt_gap) / instances)\n\n if None in tinter:\n avg_optper = min_optper = max_optper = sd_optper = None\n else:\n opt_per = []\n for i in range(len(intra_e)):\n opt_per.append(abs(float(tintra[i]) - intra_e[i]) / float(tintra[i]))\n\n avg_optper = sum(opt_per) / instances\n min_optper = min(opt_per)\n max_optper = max(opt_per)\n sd_optper = sqrt(sum((t - float(avg_optper)) ** 2 for t in opt_per) / instances)\n\n improvement_per = []\n for i in range(len(intra_e)):\n improvement_per.append(abs(intra_s[i] - intra_e[i]) / intra_s[i])\n\n avg_impper = sum(improvement_per) / instances\n min_impper = min(improvement_per)\n max_impper = max(improvement_per)\n sd_impper = sqrt(sum((t - float(avg_impper)) ** 2 for t in improvement_per) / instances)\n\n return avg_cl, max_cl, min_cl, sd_cl, \\\n avg_ii, max_ii, min_ii, sd_ii, avg_tot, max_tot, min_tot, sd_tot, avg_iter, min_iter, \\\n max_iter, sd_iter, avg_optgap, min_optgap, max_optgap, sd_optgap, avg_optper, min_optper, \\\n max_optper, sd_optper, avg_impper, min_impper, max_impper, sd_impper", "def evaluate_results():\n parser = argparse.ArgumentParser()\n parser.add_argument('-network_outputs', type=str, required=True, help='path to networks test outputs folder')\n parser.add_argument('-ensemble_outputs', type=str, required=True, help='path to ensembles test outputs folder')\n parser.add_argument('-evaluation_output', type=str, required=True, help='path to outputs folder')\n parser.add_argument('-device', type=str, default='cpu', help='device on which to execute the script')\n args = parser.parse_args()\n\n targets = \"targets.npy\"\n\n # Discovers present\n network_sets = set()\n for subfold in os.walk(args.ensemble_outputs):\n fold_name = path.split(subfold[0])[1]\n fold_name_split = fold_name.split('_')\n if fold_name_split[0] != \"output\":\n continue\n\n netw_set = frozenset(fold_name_split[1:-2])\n network_sets.add(netw_set)\n\n # Load targets and network predictions, compute accuracies\n tar = torch.from_numpy(np.load(path.join(args.network_outputs, targets)))\n num_images = tar.shape[0]\n computed_accuracies = [1, 5]\n net_predictions = {}\n nets_df = pd.DataFrame(columns=('net', *['top' + str(k) for k in computed_accuracies]))\n print(\"Processing nets folder {}\".format(args.network_outputs))\n for f in listdir(args.network_outputs):\n if path.splitext(f)[1] == '.npy' and f != targets:\n print(\"Found network {}\".format(f))\n cur_net = torch.from_numpy(np.load(path.join(args.network_outputs, f)))\n accuracies = [compute_acc_topk(tar, cur_net, k) for k in computed_accuracies]\n net_abrv = path.splitext(f)[0][:4]\n nets_df.loc[len(nets_df)] = [net_abrv, *accuracies]\n net_predictions[net_abrv] = cur_net\n\n nets_df.to_csv(path.join(args.evaluation_output, \"nets.csv\"), index=False)\n\n # Compute standard accuracies of ensembles\n methods = ['bc', 'm1', 'm2']\n comb_df = pd.DataFrame(columns=('method', 'topl', *net_predictions.keys(),\n *['top' + str(k) for k in computed_accuracies]))\n ptrn = r'output_(' + '|'.join([n_abr + \"_\" for n_abr in net_predictions.keys()]) + ')+topl_\\d+'\n\n print(\"Processing combin folder {}\".format(args.ensemble_outputs))\n for fold in listdir(args.ensemble_outputs):\n if path.isdir(path.join(args.ensemble_outputs, fold)) and re.search(ptrn, fold) is not None:\n print(\"Found combin output {}\".format(fold))\n fold_split = fold.split('_')\n topl = int(fold_split[-1])\n cur_nets = fold_split[1:-2]\n for m in methods:\n pred = torch.from_numpy(np.load(path.join(args.ensemble_outputs, fold, \"prob_\" + m + \".npy\")))\n accuracies = [compute_acc_topk(tar, pred, k) for k in computed_accuracies]\n comb_df.loc[len(comb_df)] = [m, topl, *[1 if net in cur_nets else 0 for net in net_predictions.keys()],\n *accuracies]\n\n comb_df.to_csv(path.join(args.evaluation_output, \"combins.csv\"), index=False)\n\n # Create top1 correctness masks for nets\n net_cor_masks = {}\n for net in net_predictions:\n cor_m = get_correctness_masks(net_predictions[net], tar, [1])\n net_cor_masks[net] = cor_m\n\n net_pred_keys = net_predictions.keys()\n del net_predictions\n # Create masks for net sets\n net_sets_masks = {}\n for st in network_sets:\n set_list = sorted(list(st))\n # Contains top1 correctness masks in rows for nets from set\n nets_cor = torch.cat([net_cor_masks[na].unsqueeze(0) for na in set_list], 0)\n masks = torch.zeros([2]*len(set_list) + [num_images], dtype=torch.bool)\n for cor_comb in range(2**len(set_list)):\n bin_comb = ('{0:0' + str(len(set_list)) + 'b}').format(cor_comb)\n mask_ind = [[int(b)] for b in bin_comb]\n mask_tens = torch.tensor(mask_ind)\n # Inverts correctness masks which should be false and computes logical and over the rows\n masks[mask_ind] = torch.prod(nets_cor == mask_tens, 0).type(torch.bool)\n\n net_sets_masks[st] = masks\n\n # Compute subset accuracies\n comb_ss_df = pd.DataFrame(columns=('method', 'topl', *net_pred_keys,\n *[na + \"_cor\" for na in net_pred_keys],\n *['top' + str(k) for k in computed_accuracies]))\n print(\"Processing combin folder {}\".format(args.ensemble_outputs))\n for fold in listdir(args.ensemble_outputs):\n if path.isdir(path.join(args.ensemble_outputs, fold)) and re.search(ptrn, fold) is not None:\n print(\"Found combin output {}\".format(fold))\n fold_split = fold.split('_')\n topl = int(fold_split[-1])\n cur_nets = sorted(fold_split[1:-2])\n cur_nets_set = frozenset(cur_nets)\n nets_cor = torch.cat([net_cor_masks[na].unsqueeze(0) for na in cur_nets], 0)\n for m in methods:\n pred = torch.from_numpy(np.load(path.join(args.ensemble_outputs, fold, \"prob_\" + m + \".npy\")))\n ens_cor_masks = get_correctness_masks(pred, tar, computed_accuracies)\n for cor_comb in range(2 ** len(cur_nets)):\n bin_comb = ('{0:0' + str(len(cur_nets)) + 'b}').format(cor_comb)\n mask_ind = [[int(b)] for b in bin_comb]\n mask = net_sets_masks[cur_nets_set][mask_ind].squeeze()\n cur_ens_cor_masks = ens_cor_masks[:, mask]\n cur_accur = torch.true_divide(torch.sum(cur_ens_cor_masks, 1), torch.sum(mask).item())\n\n comb_ss_df.loc[len(comb_ss_df)] = [m, topl, *[1 if net in cur_nets else 0 for net in net_pred_keys],\n *[-1 if net not in cur_nets else int(bin_comb[cur_nets.index(net)]) for net in net_pred_keys],\n *cur_accur.tolist()]\n\n comb_ss_df.to_csv(path.join(args.evaluation_output, \"combins_ss.csv\"), index=False)", "def _get_analysis_result(protocol_files: List[Path]) -> Tuple[int, Any]:\n with tempfile.TemporaryDirectory() as temp_dir:\n analysis_output_file = Path(temp_dir) / \"analysis_output.json\"\n runner = CliRunner()\n result = runner.invoke(\n analyze,\n [\n \"--json-output\",\n str(analysis_output_file),\n *[str(p.resolve()) for p in protocol_files],\n ],\n )\n if result.exception is not None:\n raise result.exception\n else:\n return result.exit_code, json.loads(analysis_output_file.read_bytes())", "def test_return_rate(self):\n df_t = functions.invest_dataframe(FILE_NAME)\n start = pd.Timestamp(str(BOND_START_YEAR) + '-01-02 00:00:00', tz=None)\n end = pd.Timestamp(str(BOND_END_YEAR) + '-01-03 00:00:00', tz=None)\n ror_percent = functions.calc_return(df_t, start, end, return_type='percent', annualize=True)\n self.assertGreaterEqual(ror_percent, 0)\n self.assertLessEqual(ror_percent, 100)", "def _run_test(self, cfg, **kwargs):\n all_values = self.m.bisect_tester_staging.run_test(\n cfg, **kwargs)\n overall_success = True\n if (not kwargs.get('allow_flakes', True) and\n cfg.get('test_type', 'perf') != 'return_code'):\n overall_success = all(v == 0 for v in all_values['retcodes'])\n return {\n 'results': all_values,\n 'ret_code': overall_success,\n 'output': ''.join(all_values['output'])\n }", "def test_results_file(\n self, setup_teardown, file_regression: FileRegressionFixture\n ):\n for case in self.CASES.keys():\n prefix = f'{case}__results'\n outputs = setup_teardown\n file_regression.check(outputs[prefix], basename=prefix)", "def analyze(file, analyzedir, basedir):\n analyzefile = path.join(analyzedir, file)\n basefile = path.join(basedir, file)\n # check if two files passed or failed test, if the two have\n # different result, ??\n assert(os.path.exists(analyzefile))\n with open(analyzefile) as f:\n analyze_content = f.read()\n analyze_status = passed(analyze_content)\n if not os.path.exists(basefile):\n base_status = \"NotFound\"\n else:\n with open(basefile) as f:\n base_content = f.read()\n base_status = passed(base_content)\n return (analyze_status, base_status, analyze_status == base_status)", "def extractNNBenchResults(lines):\n nnbench_results = {}\n SUCCESSFUL_FILE_OPS = \"Successful file operations\"\n MAPS_MISSED_BARRIER = \"maps that missed the barrier\"\n TPS_OPEN_READ = \"TPS: Open/Read\"\n AVG_EXEC_TIME_OPEN_READ = \"Avg Exec time (ms): Open/Read\"\n AVG_LAT_OPEN = \"Avg Lat (ms): Open\"\n RAW_DATA_AL_TOTAL_1 = \"RAW DATA: AL Total #1\"\n RAW_DATA_AL_TOTAL_2 = \"RAW DATA: AL Total #2\"\n RAW_DATA_TPS_TOTAL = \"RAW DATA: TPS Total (ms)\"\n RAW_DATA_LONGEST_MAP_TIME = \"RAW DATA: Longest Map Time (ms)\"\n RAW_DATA_LATE_MAPS = \"RAW DATA: Late maps\"\n RAW_DATA_EXCEPTIONS = \"RAW DATA: # of exceptions\"\n\n for line in lines:\n if SUCCESSFUL_FILE_OPS in line:\n addResult(nnbench_results, SUCCESSFUL_FILE_OPS, line)\n if MAPS_MISSED_BARRIER in line:\n addResult(nnbench_results, MAPS_MISSED_BARRIER, line)\n if TPS_OPEN_READ in line:\n addResult(nnbench_results, TPS_OPEN_READ, line)\n if AVG_EXEC_TIME_OPEN_READ in line:\n addResult(nnbench_results, AVG_EXEC_TIME_OPEN_READ, line)\n if AVG_LAT_OPEN in line:\n addResult(nnbench_results, AVG_LAT_OPEN, line)\n if RAW_DATA_AL_TOTAL_1 in line:\n addResult(nnbench_results, RAW_DATA_AL_TOTAL_1, line)\n if RAW_DATA_AL_TOTAL_2 in line:\n addResult(nnbench_results, RAW_DATA_AL_TOTAL_2, line)\n if RAW_DATA_TPS_TOTAL in line:\n addResult(nnbench_results, RAW_DATA_TPS_TOTAL, line)\n if RAW_DATA_LONGEST_MAP_TIME in line:\n addResult(nnbench_results, RAW_DATA_LONGEST_MAP_TIME, line)\n if RAW_DATA_LATE_MAPS in line:\n addResult(nnbench_results, RAW_DATA_LATE_MAPS, line)\n if RAW_DATA_EXCEPTIONS in line:\n addResult(nnbench_results, RAW_DATA_EXCEPTIONS, line)\n\n return nnbench_results", "def test_analysis_hr(game_file):\n with stdout() as out, stderr() as err:\n assert run(\n \"analyze\", \"-i\", game_file, \"--hr\", \"r0:3;r1:2\", \"-p1\"\n ), err.getvalue()\n assert \"With hierarchical reduction: r0:3 r1:2\" in out.getvalue()", "def run_tests(filename, output=sys.stdout):\n\n # get the module name from the filename\n path, ext = filename[:-3], filename[-3:]\n if ext != '.py':\n raise Exception('not a *.py file: ' + str(filename))\n module_name = path.replace(os.path.sep, '.')\n\n # needed when the file is in a subdirectory\n sys.path.append(os.getcwd())\n\n # import the module and determine the test target\n module = importlib.import_module(module_name)\n target_module = getattr(module, '__test_target__', None)\n if target_module is None:\n message = (\n 'Warning: '\n '%s missing attribute __test_target__. '\n 'Coverage will not be tracked.'\n )\n print(message % module_name, file=output)\n target_file = None\n else:\n target_file = target_module.replace('.', os.path.sep) + '.py'\n\n if target_file:\n # trace execution while loading the target file\n tracer = CodeTracer.from_source_file(target_file)\n global_vars = tracer.run()\n\n # make the target's globals available to the test module\n for key in global_vars:\n if key[:2] != '__':\n setattr(module, key, global_vars[key])\n\n # load and run unit tests\n tests = unittest.defaultTestLoader.loadTestsFromModule(module)\n runner = unittest.TextTestRunner(\n stream=output,\n verbosity=2,\n resultclass=TestResult\n )\n unit_info = runner.run(tests)\n\n if target_file:\n coverage_results = tracer.get_coverage()\n else:\n coverage_results = None\n\n # return unit and coverage results\n return {\n 'unit': unit_info.results,\n 'coverage': coverage_results,\n 'target_module': target_module,\n 'target_file': target_file,\n }", "def predictFile(self, testFileName, \n outputFileName=None,\n showPreds=True, showStats=True):\n truePositives = 0\n falsePositives = 0\n trueNegatives = 0\n falseNegatives = 0\n positives = 0\n negatives = 0\n count = 0\n testFile = open(testFileName, \"r\")\n if outputFileName:\n outputFile = open(outputFileName, \"w\")\n for line in testFile:\n count += 1\n (lbl, x) = convertLineToArray(line.strip(), \n self.d, 'int')\n (pred, score) = self.predictInstance(x)\n if showPreds:\n print pred, score\n if outputFileName:\n outputFile.write(\"%d, %s\\n\" % (pred, str(score)))\n if lbl == 1:\n positives += 1\n elif lbl == -1:\n negatives += 1 \n if pred == 1 and lbl == 1:\n truePositives += 1\n elif pred == 1 and lbl == -1:\n falsePositives += 1\n elif pred == -1 and lbl == 1:\n falseNegatives += 1\n elif pred == -1 and lbl == -1:\n trueNegatives += 1 \n testFile.close()\n if showStats and positives and negatives:\n corrects = truePositives + trueNegatives\n accuracy = float(100 * corrects) / count\n print \"Accuracy = %f (%d/%d)\" % (accuracy, corrects, count)\n print \"True Positive Rate = %f\" % (float(100 * truePositives) / positives)\n print \"False Positive Rate = %f\" % (float(100 * falsePositives) / negatives)\n print \"True Negative Rate = %f\" % (float(100 * trueNegatives) / negatives)\n print \"False Negative Rate = %f\" % (float(100 * falseNegatives) / positives)\n precision = float(100 * truePositives) / (truePositives + falsePositives)\n recall = float(100 * truePositives) / (positives)\n F = (2 * precision * recall) / (precision + recall)\n print \"Precision = %f\" % precision\n print \"Recall = %f\" % recall\n print \"F-score = %f\" % F\n print \"Total Positives = %d\" % positives\n print \"Total Negatives = %d\" % negatives\n print \"Total instances = %d\" % count\n if outputFileName:\n outputFile.close()\n pass", "def __run_analyze(self, config_file_path: str, extra_options=None):\n # Create analyze command.\n analyze_cmd = [self._codechecker_cmd, \"analyze\", self.build_json,\n \"-o\", self.reports_dir,\n \"--config\", config_file_path]\n\n if extra_options:\n analyze_cmd.extend(extra_options)\n\n # Run analyze.\n process = subprocess.Popen(\n analyze_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n encoding=\"utf-8\",\n errors=\"ignore\")\n out, err = process.communicate()\n print(err)\n return out, process.returncode", "def parse(filename):\n return Solution(filename)", "def process_one_simulation(filename_result, path_pdr_in, path_results, df_input_params, df_lines_to_extract):\n dict_result = {}\n\n # get input parameters\n root_filename_in = filename_result.split(\"_s_20.stat\")[0]\n path_file_in = f\"{path_pdr_in}/{root_filename_in}.in\"\n dict_result = extract_input_parameters(path_file_in, df_input_params, dict_result)\n\n # get result data\n path_file_result = f\"{path_results}/{filename_result}\"\n dict_result = extract_result_data(path_file_result, df_lines_to_extract, dict_result)\n\n return dict_result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get item by id. Called with `GET /collections/{collectionId}/items/{itemId}`.
async def get_item(self, item_id: str, collection_id: str, **kwargs) -> Item: # If collection does not exist, NotFoundError wil be raised await self.get_collection(collection_id, **kwargs) req = self.search_request_model( ids=[item_id], collections=[collection_id], limit=1 ) item_collection = await self._search_base(req, **kwargs) if not item_collection["features"]: raise NotFoundError( f"Item {item_id} in Collection {collection_id} does not exist." ) return Item(**item_collection["features"][0])
[ "def get(self, id: int) -> Optional[Item]:\n return self.session.query(Item).get(id)", "def get(category_id, item_id):\n category = CategoryModel.find_by_id(category_id)\n if not category:\n raise NotFound()\n item = ItemModel.find_by_id_and_category(category_id, item_id)\n if not item:\n raise NotFound()\n return Item.schema.dump(item), 200", "def get_food_item_by_item_id(self, item_id):\n sql = 'SELECT * FROM {} WHERE id={}'.format(FoodItem.DB_TABLE_NAME, item_id)\n row = self._query_db(sql, (), True)\n return self.__parse_food_item(row)", "def get_item(id='', title='', category_id='', category_name=''):\n if id:\n # search by id\n if session.query(Item.id).filter_by(id=id).scalar() is None:\n # return None if it doesn't exist\n return None\n else:\n return session.query(Item).filter_by(id=id).one()\n elif title and (category_id or category_name):\n if not category_id:\n category_id = category.get_category(name=category_name).id\n # search by name\n if (session.query(Item.id)\n .filter_by(title=title,\n category_id=category_id).scalar()) is None:\n # return None if it doesn't exist\n return None\n else:\n return (session.query(Item)\n .filter_by(title=title,\n category_id=category_id).one())\n else:\n dprint(1, \"Insufficient parameters passed to get_item.\"\n \"id=%s, title=%s, category_id=%s\" % (id, title, category_id))\n return None", "def getById(self, id):\n for item in self.list: \n if item.getId() == id:\n return item", "def item_detail(request, id):\n item = get_object_or_404(Item, pk=id)\n return render_to_response('items/item_detail.html',\n {'item': item, },\n RequestContext(request))", "def view_item(item_id):\n try:\n item = db.query(Item).filter_by(id = item_id).one()\n except NoResultFound:\n abort(404)\n return render_template(\"api/item.html\", item = item)", "def get_item(menu, item):\n for i in menu['items']:\n if i['id'] == item:\n return i", "def getSpecificItem(itemName):\r\n return session.query(Item).filter_by(name=itemName).one()", "def get_inventory_item_by_id(inv_id):\n app.logger.info(\"Request for pet with id: %\", inv_id)\n inv = Inventory.find(inv_id)\n if not inv:\n raise NotFound(\"Inventory Item with id '{}' was not found.\".format(inv_id))\n return make_response(jsonify(inv.serialize()), status.HTTP_200_OK)", "def test_get_item_by_id(init_client, init_db):\n test_id = 1\n resp = init_client.get(f'/items/{test_id}')\n assert resp.status_code == 200\n assert resp.get_json()['name'] == 'ball'\n assert resp.get_json()['description'] == 'A ball'\n assert resp.get_json()['id'] == test_id\n assert resp.get_json()['user_id'] == 1\n\n test_id = 0\n resp = init_client.get(f'/items/{test_id}')\n assert resp.status_code == 404\n assert resp.get_json()['message'] == f'Item with id {test_id} not found.'", "def _GetFileItem(self, file_id):\n # get a file item by file id\n client = self._GetClient()\n try:\n file_item = client.files().get(\n supportsAllDrives=True,\n fileId=file_id,\n fields=_FIELDS).execute(num_retries=constant.NUM_RETRIES)\n return file_item\n except apiclient.errors.HttpError as e:\n if e.resp.status == constant.HTTP_NOT_FOUND_ERROR_CODE:\n raise errors.FileNotFoundError(_INVALID_FILE_ID_ERROR % file_id)\n raise", "def test_search_item_id_from_collection(self):\n self._request_valid(\n f\"collections/{self.tested_product_type}/items/foo\",\n expected_search_kwargs={\n \"id\": \"foo\",\n \"productType\": self.tested_product_type,\n },\n )", "def get_item(self, identifier):", "def get_collection_by_id(collection_id):\n\n return Collection.query.filter(Collection.collection_id == collection_id).first()", "def get_file_item(item_id, gc):\n file_generator = gc.listFile(item_id)\n try:\n return next(file_generator)\n except StopIteration as e:\n return None", "def item_detail(request, item_id): \n item = get_object_or_404(Item, pk=item_id)\n context = {\n 'item' : item,\n }\n return render(request, 'items/item_detail.html', context)", "def retrieve(self, request, pk=None):\n try:\n single_item = Item.objects.get(pk=pk)\n serializer = ItemSerializer(\n single_item, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "def beets_get_item(self, path):\n query = library.MatchQuery('path', path)\n item = self.lib.items(query).get()\n if item:\n return item\n else:\n log.info(u'mpdstats: item not found: {0}'.format(\n displayable_path(path)\n ))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the HTML code of an horizontal bar included in a potentially wider chart.
def GenerateHTMLHorizontalBar(relWidth,relErrorWidth,color): if not (0. <= relWidth <= 1.): raise ValueError("Invalid relwidth '%s', it must be between 0 and 1" % relWidth) if not (0. <= relErrorWidth <= 1.): raise ValueError("Invalid relwidth '%s', it must be between 0 and 1" % relErrorWidth) if relWidth+relErrorWidth>1.: raise ValueError("Invalid relwidth and relErrorwidth (%s,%s), their sum must not be greater than one" % (relErrorWidth,relErrorWidth)) # use floor to amplify a little the error bar firstPartWidth = math.floor(100*min(1.,max(0,relWidth-relErrorWidth))) secondPartWidth = 100*relWidth-firstPartWidth thirdPartWidth = min(math.ceil(100*relErrorWidth),100-secondPartWidth-firstPartWidth) return """\ <table cellspacing="0" cellpadding="0" border="0" style="width:100%%"> <tr> <td style="width:%.0f%%;height:1ex;background-color:%s;"></td> <td style="width:%.0f%%;height:1ex;background-color:%s;text-align:left">|</td> <td style="width:%.0f%%;height:1ex;text-align:right">|</td> <td></td> </tr> </table>""" % (firstPartWidth,color,secondPartWidth,color,thirdPartWidth)
[ "def GenerateHTMLHorizontalBarChart(dataSamples,numStdev,color):\n if numStdev<0:\n raise ValueError(\"numStdev is negative (%s) but it is expected be positive\" % numStdev)\n norm = max(ds.value+(numStdev*ds.stdev) for ds in dataSamples)\n bars = [ GenerateHTMLHorizontalBar(float(d.value)/norm,float(numStdev*d.stdev)/norm,color) for d in dataSamples ]\n return \"\"\"\\\n<table cellspacing=\"0\" cellpadding=\"0\" border=\"0\" style=\"width:80ex;font-family:monospace;\">\n%s\n</table>\"\"\" % \"\\n\".join([GenerateHTMLLabelledRow(d.label,\"%s(+/-%s)\"%(d.value,numStdev*d.stdev),b) for d,b in zip(dataSamples,bars)])", "def draw_horizontal_bar_chart(dataN,title,xlabel,ylabel,legend_label):\n \n fig,ax=plt.subplots(figsize=(85,10))\n ax.set_title(title) \n y_pos=list(range(len(dataN[0]))) \n ax.set_yticks(y_pos)\n ax.set_yticklabels(dataN[0].keys()) \n ax.set_ylabel(ylabel)\n ax.set_xlabel(xlabel)\n for i in range(len(dataN)):\n ax.barh(y_pos,dataN[i].values(),align=\"center\")\n ax.legend(legend_label,loc=2)\n publish(fig,title + \" bar chart\")", "def barh(self, y, width, height=0.8, left=None, *, align=\"center\",\n data=None, **kwargs):\n kwargs.setdefault('orientation', 'horizontal')\n patches = self.bar(x=left, height=height, width=width, bottom=y,\n align=align, data=data, **kwargs)\n return patches", "def hbar(length=80):\n return '='*length", "def barh(self, bottom, width, height=0.8, left=None, **kwargs):\n\n patches = self.bar(left=left, height=height, width=width, bottom=bottom,\n orientation='horizontal', **kwargs)\n return patches", "def broken_barh(self, xranges, yrange, **kwargs):\n col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)\n self.add_collection(col, autolim=True)\n self.autoscale_view()\n\n return col", "def render_bar(panel, x, y, total_width, name, value, maximum, foreground_color, background_color):\n bar_width = int((value / maximum) * total_width)\n\n # Going to be honest, don't know why it has to be done this way\n tcod.console_set_default_background(panel, background_color)\n tcod.console_rect(panel, x, y, total_width, 1, False, tcod.BKGND_SCREEN)\n tcod.console_set_default_background(panel, foreground_color)\n\n if bar_width > 0:\n tcod.console_rect(panel, x, y, bar_width, 1, False, tcod.BKGND_SCREEN)\n\n tcod.console_set_default_foreground(panel, tcod.white)\n tcod.console_print_ex(panel, int(x + total_width / 2), y, tcod.BKGND_NONE, tcod.CENTER,\n \"{0}: {1}/{2}\".format(name, value, maximum))", "def _get_chart(type, title, column_names, scales, *bar_data):\n global bar_html\n return bar_html.safe_substitute(type=type, title=title, column_names=column_names, scales=scales,\n bar_data=dumps([*bar_data]))", "def create_bar_chart(data):\n top_vehicles = sorted(data, key=lambda x: x['total_sales'], reverse=True)[:10]\n vehicle_names = [format_car(vehicle['car']) for vehicle in top_vehicles]\n total_revenues = [locale.atof(vehicle['price'].strip(\"$\")) * vehicle['total_sales'] for vehicle in top_vehicles]\n\n drawing = Drawing(width=500, height=300)\n bar_chart = HorizontalBarChart()\n bar_chart.x = 50\n bar_chart.y = 50\n bar_chart.width = 400\n bar_chart.height = 200\n bar_chart.data = [total_revenues]\n bar_chart.categoryAxis.categoryNames = vehicle_names\n drawing.add(bar_chart)\n\n return drawing", "def _draw_bars_base(self, first_column_is_legend=False, first_row_is_heading=False, vertical=True):\n bar_fn = plt.bar if vertical else plt.barh\n colors = {\n 0: 'g',\n 1: 'r',\n 2: 'c',\n 3: 'm',\n 4: 'y',\n 5: 'k',\n 6: 'r',\n }\n\n n_heading_row = 1 if first_row_is_heading else 0\n\n # Calculate bar width\n n_legend_col = 1 if first_column_is_legend else 0\n n_bars_per_group = len(self.data[0]) - n_legend_col\n bar_width = 1.0 / (n_bars_per_group + 2)\n\n # Set range to range of legend or just 0 to length of data\n if first_column_is_legend:\n self.range = npy.arange(self.data[0][0], self.data[-1][0] + 1, 1)\n else:\n self.range = npy.arange(0, len(self.data) - n_heading_row, 1)\n\n plt.grid(True)\n\n for n_col, data_col in enumerate(self.data[0][n_legend_col:]):\n color = colors.get(n_col % 6, 'g')\n\n bar_fn([row + (-n_bars_per_group / 2.0) * bar_width + n_col * bar_width for row in self.range],\n [cols[n_col + n_legend_col] for cols in self.data[n_heading_row:]], width=bar_width, color=color)\n\n plt.show()", "def draw(self, chart: IChart) -> None:\n chart.vertical_bar(x=\"weeks\", top=\"throughputs\", source=self.to_data_source())", "def addSeriesBarHorizontal(self, dataSrc, fieldX, fieldY = None, fieldLabel = None, sortType = None):\n self.graphSeries.append(_SeriesBar(False, dataSrc, fieldY, fieldX, fieldLabel, sortType))", "def _extract_bars(self, data):", "def horizontal_divider():\n divider = QtGui.QFrame()\n divider.setFrameShape(QtGui.QFrame.HLine)\n divider.setFrameShadow(QtGui.QFrame.Sunken)\n return divider", "def wide_bar_stat_card(\n box: str,\n title: str,\n value: str,\n aux_value: str,\n progress: float,\n plot_color: Optional[str] = None,\n data: Optional[PackedRecord] = None,\n commands: Optional[List[Command]] = None,\n) -> WideBarStatCard:\n return WideBarStatCard(\n box,\n title,\n value,\n aux_value,\n progress,\n plot_color,\n data,\n commands,\n )", "def bar(results, h='pdf', **kwargs): # pragma: no cover\n if 'edgecolor' not in kwargs:\n kwargs['edgecolor'] = 'k'\n fig = plt.bar(x=results.bin_centers, height=getattr(results, h),\n width=results.bin_widths, **kwargs)\n xlab = [attr for attr in results.__dir__() if not attr.startswith('_')][0]\n plt.xlabel(xlab)\n plt.ylabel(h)\n return fig", "def _get_horizontal_line(self):\n line = [self.SEPARATOR] * self._width\n return ''.join(line)", "def add_bar_chart(\n slide,\n dataframe,\n left=Cm(0.79), top=Cm(4.1), width=Cm(23.84), height=Cm(11.5),\n chart_style=2,\n\n #Legend properties\n has_legend=True,\n legend_position='right',\n legend_in_layout=False,\n legend_horz_offset = 0.1583,\n legend_font_name=\"Calibri\",\n legend_font_size=10,\n legend_font_bold=False,\n legend_font_italic=False,\n legend_font_color=(89,89,89),\n legend_font_brightness=0,\n\n #Category axis properties\n caxis_visible=True,\n caxis_tick_label_position='low',\n caxis_tick_labels_offset=730,\n caxis_has_major_gridlines=False,\n caxis_has_minor_gridlines=False,\n caxis_major_tick_mark='outside',\n caxis_minor_tick_mark='none',\n caxis_tick_labels_font_name=\"Calibri\",\n caxis_tick_labels_font_size=10,\n caxis_tick_labels_font_bold=False,\n caxis_tick_labels_font_italic=False,\n caxis_tick_labels_font_color=(89,89,89),\n\n #Value axis properties\n vaxis_visible=True,\n vaxis_tick_label_position='low',\n vaxis_has_major_gridlines=True,\n vaxis_has_minor_gridlines=False,\n vaxis_major_tick_mark='outside',\n vaxis_minor_tick_mark='none',\n vaxis_max_scale=1,\n vaxis_min_scale=0,\n vaxis_major_unit=0.1,\n vaxis_minor_unit=None,\n vaxis_tick_labels_num_format='0%',\n vaxis_tick_labels_num_format_is_linked=False,\n vaxis_tick_labels_font_name=\"Calibri\",\n vaxis_tick_labels_font_bold=True,\n vaxis_tick_labels_font_size=10,\n vaxis_tick_labels_font_italic=False,\n vaxis_tick_labels_font_color=(89,89,89),\n\n #Datalabel properties\n plot_has_data_labels=True,\n data_labels_position='outside_end',\n data_labels_num_format='0%',\n data_labels_num_format_is_linked=False,\n data_labels_font_name=\"Calibri\",\n data_labels_font_size=9,\n data_labels_font_bold=False,\n data_labels_font_italic=False,\n data_labels_font_color=(0,0,0),\n\n #Plot properties\n plot_vary_by_cat=False,\n series_color_order='reverse',\n invert_series_color_if_negative=False,\n plot_gap_width=150,\n plot_overlap=-10,\n series_line_color=None,\n series_line_width=None,\n\n #Excel table\n excel_num_format='0.00%',\n \n #Color for separator\n separator_color=(255,255,255)\n ):\n #-------------------------------------------------------------------------\n\n #strips html code\n dataframe = clean_axes_labels(dataframe)\n\n #if category labels are split from the chart shape then determine the width of the textboxes and the chart shape.\n #textboxes in this case will take up 40% of the overall width of the chart shape. From this we can calculate the\n #width of the chart shape\n\n if (caxis_visible == False) or (caxis_visible == True and str(tick_label_pos_dct[caxis_tick_label_position]) == \"NONE (-4142)\"):\n catwidth = percentage_of_num(40, width)\n width = width - catwidth\n left = left + catwidth\n\n # orientation of chart type requires that we reverse the row and column order.\n dataframe = dataframe[::-1]\n dataframe = dataframe[dataframe.columns[::-1]]\n\n # add chart data\n chart_data = ChartData()\n chart_data.categories = dataframe.index\n\n for i, col in enumerate(dataframe.columns):\n chart_data.add_series(col, (dataframe.ix[:, i].values), excel_num_format)\n\n # add chart\n x, y, cx, cy = left, top, width, height\n graphic_frame = slide.shapes.add_chart(\n XL_CHART_TYPE.BAR_CLUSTERED, x, y, cx, cy, chart_data\n )\n chart = graphic_frame.chart\n\n # ---------------- adjust chart properties ----------------\n\n # chart style\n chart.chart_style = chart_style\n\n # set legend properties\n chart.has_legend = has_legend\n if has_legend:\n legend = chart.legend\n legend.include_in_layout = legend_in_layout\n legend.position = legend_pos_dct[legend_position]\n legend.horz_offset = legend_horz_offset\n legend.font.name = legend_font_name\n legend.font.size = Pt(legend_font_size)\n legend.font.bold = legend_font_bold\n legend.font.italic = legend_font_italic\n legend.font.color.rgb = RGBColor(*legend_font_color)\n legend.font.color.brightness = legend_font_brightness\n\n # set category axis (vertical) properties\n category_axis = chart.category_axis\n category_axis.has_major_gridlines = caxis_has_major_gridlines\n category_axis.has_minor_gridlines = caxis_has_minor_gridlines\n category_axis.major_tick_mark = tick_mark_pos_dct[caxis_major_tick_mark]\n category_axis.minor_tick_mark = tick_mark_pos_dct[caxis_minor_tick_mark]\n category_axis.tick_label_position = tick_label_pos_dct[caxis_tick_label_position]\n\n category_axis.visible = caxis_visible\n if caxis_visible:\n caxis_tick_labels = category_axis.tick_labels\n caxis_tick_labels.offset = caxis_tick_labels_offset\n caxis_tick_labels.font.name = caxis_tick_labels_font_name\n caxis_tick_labels.font.size = Pt(caxis_tick_labels_font_size)\n caxis_tick_labels.font.bold = caxis_tick_labels_font_bold\n caxis_tick_labels.font.italic = caxis_tick_labels_font_italic\n caxis_tick_labels.font.color.rgb = RGBColor(*caxis_tick_labels_font_color)\n\n # set value axis (horizontal) properties\n value_axis = chart.value_axis\n value_axis.has_major_gridlines = vaxis_has_major_gridlines\n value_axis.has_minor_gridlines = vaxis_has_minor_gridlines\n value_axis.maximum_scale = vaxis_max_scale\n value_axis.minimum_scale = vaxis_min_scale\n value_axis.major_unit = vaxis_major_unit\n value_axis.minor_unit = vaxis_minor_unit\n value_axis.major_tick_mark = tick_mark_pos_dct[vaxis_major_tick_mark]\n value_axis.minor_tick_mark = tick_mark_pos_dct[vaxis_minor_tick_mark]\n value_axis.tick_label_position = tick_label_pos_dct[vaxis_tick_label_position]\n\n value_axis.visible = vaxis_visible\n if vaxis_visible:\n vaxis_tick_labels = value_axis.tick_labels\n vaxis_tick_labels.font.bold = vaxis_tick_labels_font_bold\n vaxis_tick_labels.font.size = Pt(vaxis_tick_labels_font_size)\n vaxis_tick_labels.font.italic = vaxis_tick_labels_font_italic\n vaxis_tick_labels.font.name = vaxis_tick_labels_font_name\n vaxis_tick_labels.font.color.rgb = RGBColor(*vaxis_tick_labels_font_color)\n if vaxis_tick_labels_num_format is not None:\n vaxis_tick_labels.number_format = vaxis_tick_labels_num_format\n vaxis_tick_labels.number_format_is_linked = vaxis_tick_labels_num_format_is_linked\n\n # set plot area properties\n plot = chart.plots[0]\n plot.vary_by_categories = plot_vary_by_cat\n plot.gap_width = plot_gap_width\n plot.overlap = plot_overlap\n\n plot.has_data_labels = plot_has_data_labels\n if plot_has_data_labels:\n data_labels = plot.data_labels\n data_labels.position = data_label_pos_dct[data_labels_position]\n data_labels.font.size = Pt(data_labels_font_size)\n data_labels.font.bold = data_labels_font_bold\n data_labels.font.italic = data_labels_font_italic\n data_labels.font.name = data_labels_font_name\n data_labels.font.color.rgb = RGBColor(*data_labels_font_color)\n if data_labels_num_format is not None:\n data_labels.number_format = data_labels_num_format\n data_labels.number_format_is_linked = data_labels_num_format_is_linked\n\n # Show Net settings\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\n net_separator = ['net_separator']\n category_labels = [i.label for i in chart_data.categories]\n if net_separator[0] in category_labels:\n pos_net_separator = [category_labels.index(i) for i in net_separator]\n # Delete the net separator text\n category_labels[pos_net_separator[0]] = ''\n chart_data.categories = category_labels\n chart.replace_data(chart_data)\n\n # Add fill to the separator\n bar = chart.series[0].points\n for x in pos_net_separator:\n\n point = bar[x]\n fill = point.format.fill\n fill.solid()\n fill.fore_color.rgb = RGBColor(*separator_color)\n\n # Hide data label\n chart_values = get_chart_values(chart)\n\n for s, series in enumerate(chart_values):\n values = [\n value for value in list(series.values())[0]\n ]\n\n for v, value in enumerate(values):\n point = chart.series[s].points[v]\n #point.format.line.color.rgb = RGBColor(0,0,0)\n frame = point.data_label.text_frame\n frame.text = '' if value == 1.01 else str(int(round(float(value) * 100))) + \"%\"\n run = frame.paragraphs[0].runs\n for point_label in run:\n point_label.font.size = Pt(data_labels_font_size)\n point_label.font.name = data_labels_font_name\n point_label.font.bold = data_labels_font_bold\n point_label.font.italic = data_labels_font_italic\n point_label.font.color.rgb = RGBColor(*data_labels_font_color)\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n \n if series_color_order and len(dataframe.columns) > 1:\n ser_colors_list = color_setter(len(dataframe.columns), series_color_order)\n\n for i, ser in enumerate(dataframe.columns):\n ser = plot.series[i]\n ser.invert_if_negative = invert_series_color_if_negative\n\n if series_line_color is not None and series_line_width is not None:\n ser.line.color.rgb = RGBColor(*series_line_color)\n ser.line.width = Pt(series_line_width)\n\n elif series_line_color is not None and series_line_width is None:\n ser.line.color.rgb = RGBColor(*series_line_color)\n\n if series_color_order and len(dataframe.columns) > 1:\n try:\n fill = ser.fill\n except:\n fill = ser.format.fill\n fill.solid()\n color_code = ser_colors_list[i]\n fill.fore_color.rgb = RGBColor(*color_code)\n\n # generate overlay axis labels\n if (caxis_visible == False) or (caxis_visible == True and str(tick_label_pos_dct[caxis_tick_label_position]) == \"NONE (-4142)\"):\n cht_plot_height = get_cht_plot_height(height)\n heightPerLabel = cht_plot_height/len(dataframe.index)\n rightofchart = left + width\n txtbx_width = width / 5\n firstposition = top + get_upper_cht_plot_gap(height)\n\n cat_labels = dataframe.T.columns\n\n for i, label in enumerate(cat_labels):\n\n top = 0\n pointRelPos = len(cat_labels) - (i + 1)\n top = firstposition + pointRelPos * heightPerLabel\n\n add_textbox(slide,\n left=142875, top=top, width=rightofchart - width, height=heightPerLabel,\n text=label,\n font_name=caxis_tick_labels_font_name,\n font_size=caxis_tick_labels_font_size,\n fit_text=False,\n word_wrap=True,\n font_bold=False,\n font_color=caxis_tick_labels_font_color,\n horizontal_alignment='right',\n vertical_alignment='middle')", "def bars(self):\n return ''.join(map((lambda val: self._val2bars[val]), self.symbol_values))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a row with the given label and data.
def GenerateHTMLLabelledRow(label,title,htmlRowData): return """\ <tr title="%s"> <th style="padding-top:.5ex;padding-right:1ex;text-align:right;">%s</th> <td style="padding-top:.5ex;width:100%%;"> %s </td> </tr>""" % (title,label,"\n".join(" %s"%line for line in htmlRowData.splitlines()))
[ "def make_label_row(self, row, row_name, dictionary, **kwargs):\n rowLabel = QtGui.QLabel(row_name)\n rowLabel.setSizePolicy(8,0)\n # the numeric arguments below are: row, column,rowspan, colspan\n self.gridLayout.addWidget(rowLabel, row, 0, 1, 1,\n QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)\n self.logger.debug(\"make_label_row: processing dictionary: %s\",\n str(dictionary))\n flatdict = flattenDict(dictionary)\n keys = flatdict.keys()\n keys.sort()\n self.logger.debug(\"make_label_row: new keys for label row: %s\", str(keys))\n labels = {}\n # the following code figures out where to put the widgets\n col = 1\n if len(keys):\n keylen = len(keys[0])\n if kwargs.has_key('format'):\n format = kwargs['format']\n else:\n format = \"%s\"\n for key in keys:\n col, colspan = self._position_widget(key,keylen,col)\n labels[key] = QtGui.QLabel()\n labels[key].setSizePolicy(8,0)\n self.gridLayout.addWidget(labels[key],\n row, col, 1, colspan,\n QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)\n if flatdict[key]:\n labels[key].setText(format % flatdict[key])\n labels[key].setFrameShape(QtGui.QFrame.Panel)\n else:\n labels[key].setText(\"None\")\n col += colspan\n #if kwargs.has_key('slots'):\n # for pair in kwargs['slots']:\n # signal = pair[0]\n # self.logger.debug(\"make_label_row: signal = %s\", signal)\n # slot = pair[1]\n # self.logger.debug(\"make_label_row: slot = %s\", slot)\n # signal.connect(slot)\n return labels", "def _construct_row(name, user_id=None):\r\n row = {'name': name, 'user_id': user_id}\r\n for status in labels.keys():\r\n # Include an extra entry for summary.\r\n row[status] = [{'hours': Decimal(), 'percent': Decimal()}\r\n for i in range(len(labels[status]) + 1)]\r\n row['work_total'] = Decimal()\r\n row['grand_total'] = Decimal()\r\n return row", "def nextRow(*args, **kwargs):\n \n pass", "def get_data_row(self, col, data):\n this_row = ''\n data = self.get_cell_formatted(col, data)\n if self.is_col_first(col):\n this_row += '\\n'\n if 'OUTLINE' in self._type:\n this_row += self._types[self._type]['L']\n else:\n this_row += ' '\n else:\n this_row += self._types[self._type]['M']\n\n this_row += str(data)\n if 'OUTLINE' in self._type:\n if self.is_col_last(col):\n this_row += self._types[self._type]['R']\n return this_row", "def create_random_row(key):\n row = [key]\n for _ in range(2):\n random_val = randint(1, 100)\n row.append(random_val)\n return Row(row[0], row[1:])", "def __make_label__( self, a_frame, a_row, a_col, a_text, label_id = None, label_dict = None ):\r\n a_row += 1\r\n if a_row >= 2:\r\n a_row = 0\r\n a_col += 1\r\n\r\n a_label = ( Label( a_frame, text = a_text, relief = RAISED, ) )\r\n a_label.grid( row=a_row, column=a_col, sticky = E + W + N + S ) # sticky=W+E+N+S ) # relief = RAISED)\r\n\r\n if not( label_id is None ):\r\n label_dict[ label_id ] = a_label\r\n\r\n return ( a_row, a_col, a_label )", "def row(self, val: int):\n # todo: figure out how many rows are in the event table\n\n self.visa_write(f':row {val}')", "def make_row(row, fields):\n row_fields = [row.get(f.name, str(default_value(f.name, f.datatype)))\n for f in fields]\n return encode_row(row_fields)", "def table_row(\n name: str,\n cells: List[str],\n) -> TableRow:\n return TableRow(\n name,\n cells,\n )", "def build_rows(metric, data):\n logging.debug(\"build_row\")\n rows = []\n\n labelDescriptors = data[\"timeSeriesDescriptor\"][\"labelDescriptors\"]\n pointDescriptors = data[\"timeSeriesDescriptor\"][\"pointDescriptors\"]\n\n for timeseries in data[\"timeSeriesData\"]:\n labelValues = timeseries[\"labelValues\"]\n pointData = timeseries[\"pointData\"]\n\n # handle >= 1 points, potentially > 1 returned from Monitoring API call\n for point_idx in range(len(pointData)):\n labels = []\n for i in range(len(labelDescriptors)):\n for v1 in labelDescriptors[i].values():\n labels.append(\n {\"key\": v1, \"value\": \"\"})\n for i in range(len(labelValues)):\n for v2 in labelValues[i].values():\n if type(v2) is bool:\n labels[i][\"value\"] = str(v2)\n else:\n labels[i][\"value\"] = v2\n\n point_descriptors = []\n for j in range(len(pointDescriptors)):\n for k, v in pointDescriptors[j].items():\n point_descriptors.append({\"key\": k, \"value\": v})\n\n row = {\n \"timeSeriesDescriptor\": {\n \"pointDescriptors\": point_descriptors,\n \"labels\": labels,\n }\n }\n\n interval = {\n \"start_time\": pointData[point_idx][\"timeInterval\"][\"startTime\"],\n \"end_time\": pointData[point_idx][\"timeInterval\"][\"endTime\"]\n }\n\n # map the API value types to the BigQuery value types\n value_type = pointDescriptors[0][\"valueType\"]\n bigquery_value_type_index = config.BQ_VALUE_MAP[value_type]\n api_value_type_index = config.API_VALUE_MAP[value_type]\n value_type_label = {}\n\n value = timeseries[\"pointData\"][point_idx][\"values\"][0][api_value_type_index]\n\n if value_type == DISTRIBUTION:\n value_type_label[bigquery_value_type_index] = build_distribution_value(\n value)\n else:\n value_type_label[bigquery_value_type_index] = value\n\n point = {\n \"timeInterval\": interval,\n \"values\": value_type_label\n }\n row[\"pointData\"] = point\n row[\"metricName\"] = metric\n rows.append(row)\n\n return rows", "def addRow(self, row_info):\n pass", "def create_line_item(self, resource_link_id, label):\n raise NotImplementedError()", "def _generate_row(self, weekday, lesson_number, columns):\n row = []\n for item in columns:\n cell = '%d-%d-%d' % (weekday, lesson_number,\n self._get_column_name(item))\n row.append(cell)\n return row", "def addRow( self, data ):\n self.tableData.append( data )", "def handle_row(self, row):\n pass", "def makeEntryFromRowFancy(row, labels):\n name = row[0].value.strip()\n if name[-1] == '*':\n name = name[:-1].strip()\n vals = {}\n for i in range(1, 14):\n vals[labels[i].value.lower().strip()] = row[i].value\n \n hosting = {}\n for i in range(14, 26):\n hosting[labels[i].value.lower().strip()] = True if row[i].value != None else False\n \n purpose = {}\n for i in range(26, 31):\n purpose[labels[i].value.lower().strip()] = True if row[i].value != None else False\n\n scope = {}\n for i in range(31, 36):\n scope[labels[i].value.lower().strip()] = True if row[i].value != None else False\n\n focus = {}\n for i in range(36, 48):\n focus[labels[i].value.lower().strip()] = True if row[i].value != None else False\n\n development = {}\n for i in range(48, 52):\n development[labels[i].value.lower().strip()] = True if row[i].value != None else False\n\n support = {}\n for i in range(52, 60):\n support[labels[i].value.lower().strip()] = True if row[i].value != None else False\n \n vals['host'] = hosting\n vals['purpose'] = purpose\n vals['scope'] = 'scope'\n vals['focus'] = focus\n vals['professional development'] = development\n vals['financial support'] = support\n \n return {name: vals}", "def add_row(self, data):\n self.new_row()\n self.rewind_column()\n for item in data:\n try:\n self.set_value(item.encode())\n except AttributeError:\n self.set_value(item)\n if item == \".\":\n self.set_typeofvalue(b\"null\")\n try:\n self.next_column()\n except Exception:\n break", "def create_row(line, indent):\n return r'{indent}{indent}{content} \\\\'.format(\n indent=indent,\n content=' & '.join(line))", "def add_row(self, row):\n if len(row) != self.dimension:\n print('Cannot add a row of length {} to a dataset with {} columns'.format(len(row), self.dimension))\n else:\n self.data.append(row)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the code of an HTML table showing one horizontal bar for each data sample. Error bars are also shown for each dataSample at 'value+/(numStdevstdev)'.
def GenerateHTMLHorizontalBarChart(dataSamples,numStdev,color): if numStdev<0: raise ValueError("numStdev is negative (%s) but it is expected be positive" % numStdev) norm = max(ds.value+(numStdev*ds.stdev) for ds in dataSamples) bars = [ GenerateHTMLHorizontalBar(float(d.value)/norm,float(numStdev*d.stdev)/norm,color) for d in dataSamples ] return """\ <table cellspacing="0" cellpadding="0" border="0" style="width:80ex;font-family:monospace;"> %s </table>""" % "\n".join([GenerateHTMLLabelledRow(d.label,"%s(+/-%s)"%(d.value,numStdev*d.stdev),b) for d,b in zip(dataSamples,bars)])
[ "def create_tex_table(dbs):\n obs, series, pts = get_ordered_series(dbs)\n\n head = r\"\"\"\\begin{center}\n\\begin{tabular}{l|c|c|c}\n\\hline\n\"\"\"\n head += r\"\"\"Year & Cases & median Attack Ratio $ $S_0$ \\\\\n\\hline\n\"\"\"\n bot = r\"\"\"\n\\hline\n\\end{tabular}\n\\end{center}\n \"\"\"\n body = r\"\"\n st = []\n # years = sorted(list(series.keys()))\n print (series.keys())\n for i, (Y, V) in enumerate(series.items()):\n cases = obs[Y].sum()\n first_week = V.index[0]\n s0 = array(series[Y].S.ix[first_week])\n try:\n ratio = 1.0*cases/s0\n body += Y + r\" & {:.3} & {:.2} ({:.2}-{:.2}) & {:.3}({:.2}-{:.2})\\\\\".format(cases*100, nanmedian(ratio),\n stats.scoreatpercentile(ratio, 2.5),\n stats.scoreatpercentile(ratio, 97.5),\n nanmedian(s0)*100,\n stats.scoreatpercentile(s0, 2.5)*100,\n stats.scoreatpercentile(s0, 97.2)*100\n )\n body += \"\\n\"\n except KeyError as e:\n print (Y, first_week, e)\n except ValueError as e:\n print (s0, e)\n\n return head + body + bot", "def plot_table(self, ax: plt.Axes):\n row_labels = ['length', 'count', 'missing', 'min',\n 'q1', 'median', 'q3', 'max', 'mean', 'mode', 'std']\n cell_text = [[str(self._stats[row])] for row in row_labels]\n col_labels = ['statistics']\n ax.axis('tight')\n ax.axis('off')\n ax.table(cellText=cell_text,\n rowLabels=row_labels,\n colLabels=col_labels,\n colWidths=[0.5, 0.5],\n loc='center')\n\n return ax", "def plot_stats(stats):\n stat_names = [\"min\", \"max\", \"count\", \"std\", \"Q1\", \"Q2\", \"Q3\", \"Unique\", \"Top\", \"OP\"]\n feature_names = []\n data = []\n\n for feature, value in stats.items():\n feature_names.append(feature)\n row = []\n for stat_name in stat_names:\n row.append(value[stat_name])\n data.append(row)\n\n # print(\"data:\",data)\n title_text = \"Statistics Table\"\n print(title_text)\n\n # Get some lists of color specs for row and column headers\n rcolors = plt.cm.BuPu(np.full(len(feature_names), 0.1))\n ccolors = plt.cm.BuPu(np.full(len(stat_names), 0.1))\n # Create the figure. Setting a small pad on tight_layout\n # seems to better regulate white space. Sometimes experimenting\n # with an explicit figsize here can produce better outcome.\n plt.figure(\n linewidth=4,\n tight_layout={\"pad\": 1},\n )\n # Add a table at the bottom of the axes\n the_table = plt.table(\n cellText=data,\n rowLabels=feature_names,\n rowColours=rcolors,\n rowLoc=\"right\",\n colColours=ccolors,\n colLabels=stat_names,\n loc=\"center\",\n )\n # Scaling is the only influence we have over top and bottom cell padding.\n # Make the rows taller (i.e., make cell y scale larger).\n the_table.scale(4, 4.5)\n # Hide axes\n ax = plt.gca()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n # Hide axes border\n plt.box(on=None)", "def visualize_table(self):\n headers = ['frame', 'fulfillable', 'missing_cap', 'recommended', 'possible']\n lines = []\n for frame in self.specification.frames:\n fulfillable = self.can_fulfil(frame)\n recommended = self.get_recommendation(frame)\n line = [frame, fulfillable, recommended.reason_missing, recommended.nearest, recommended.possible]\n lines.append(line)\n display(HTML(tabulate.tabulate(lines, headers=headers, tablefmt='html')))", "def __str__(self):\n if self.stats:\n return r\"\"\"$$\\begin{table}[]\n\\centering\n\\begin{tabular}{llll}\n\\hline\n\\multicolumn{1}{|l|}{\\textbf{Recall}} & \\multicolumn{1}{l|}{\\textbf{Precision}} & \\multicolumn{1}{l|}{\\textbf{Accuracy}} & \\multicolumn{1}{l|}{\\textbf{F-score}} \\\\ \\hline\nStrain 1 & 140 & 1390648 & 149577\n\\end{tabular}\n\\caption{}\n\\label{tab:my-table}\n\\end{table}$$\"\"\"\n else:\n return \"Statistics not computed.\"", "def counters_table ( counters , prefix = '' , title = '' ) :\n \n if isinstance ( counters , dictlike_types ) : pass \n elif isinstance ( counters , sequence_types ) :\n cnts = {}\n for i,c in enumerate ( counters , start = 1 ) : cnts [ i ] = c\n counters = cnts \n elif isntance ( counters , ( SE , WSE , NSE ) ) :\n counters = { 1 : counters } \n else :\n raise TypeError ( \"cnt_table: illegay type for 'counters' %s\" % type ( counters ) )\n\n rows = [ ( '' , '#' , 'Sum' , 'Mean' , 'RMS' , 'min/max' ) ] \n for key in counters :\n \n counter = counters [ key ]\n \n mean = counter.mean ()\n rms = counter.rms ()\n minv, maxv = counter.minmax () \n \n row = ( '%s' % key ,\n '%d' % counter.nEntries() ,\n '%+.6g' % counter.sum () ,\n '( %-+.6g +/- %.6g )' % ( mean.value() , mean.error () ) ,\n '%.6g' % counter.rms () ,\n '( %-+.6g / %+.6g )' % ( minv , maxv ) ) \n \n rows.append ( row )\n\n import ostap.logger.table as T\n if not title : title = 'Table of %d counters' % len ( counters ) \n table = T.table ( rows , prefix = prefix , title = title , alignment = \"llcccc\" )\n #\n return table", "def GenerateHTMLHorizontalBar(relWidth,relErrorWidth,color):\n if not (0. <= relWidth <= 1.):\n raise ValueError(\"Invalid relwidth '%s', it must be between 0 and 1\" % relWidth)\n if not (0. <= relErrorWidth <= 1.):\n raise ValueError(\"Invalid relwidth '%s', it must be between 0 and 1\" % relErrorWidth)\n if relWidth+relErrorWidth>1.:\n raise ValueError(\"Invalid relwidth and relErrorwidth (%s,%s), their sum must not be greater than one\" % (relErrorWidth,relErrorWidth))\n # use floor to amplify a little the error bar\n firstPartWidth = math.floor(100*min(1.,max(0,relWidth-relErrorWidth)))\n secondPartWidth = 100*relWidth-firstPartWidth\n thirdPartWidth = min(math.ceil(100*relErrorWidth),100-secondPartWidth-firstPartWidth)\n return \"\"\"\\\n<table cellspacing=\"0\" cellpadding=\"0\" border=\"0\" style=\"width:100%%\">\n<tr>\n <td style=\"width:%.0f%%;height:1ex;background-color:%s;\"></td>\n <td style=\"width:%.0f%%;height:1ex;background-color:%s;text-align:left\">|</td>\n <td style=\"width:%.0f%%;height:1ex;text-align:right\">|</td>\n <td></td>\n</tr>\n</table>\"\"\" % (firstPartWidth,color,secondPartWidth,color,thirdPartWidth)", "def render_anode09_table(filename):\n # small nodules, large nodules, isolated nodules, vascular nodules,\n # pleural nodules, peri-fissural nodules, all nodules\n variables = parse_php_arrays(filename)\n assert variables != {}, (\n \"parsed result of '%s' was emtpy. I cannot create table\" % filename\n )\n\n table_id = id_generator()\n table_html = (\n \"\"\"<table border=1 class = \"csvtable sortable\" id=\"%s\">\n <thead><tr>\n <td class =\"firstcol\">FPs/scan</td><td align=center width='54'>1/8</td>\n <td align=center width='54'>1/4</td>\n <td align=center width='54'>1/2</td><td align=center width='54'>1</td>\n <td align=center width='54'>2</td><td align=center width='54'>4</td>\n <td align=center width='54'>8</td><td align=center width='54'>average</td>\n </tr></thead>\"\"\"\n % table_id\n )\n table_html += \"<tbody>\"\n table_html += array_to_table_row(\n [\"small nodules\"] + variables[\"smallscore\"]\n )\n table_html += array_to_table_row(\n [\"large nodules\"] + variables[\"largescore\"]\n )\n table_html += array_to_table_row(\n [\"isolated nodules\"] + variables[\"isolatedscore\"]\n )\n table_html += array_to_table_row(\n [\"vascular nodules\"] + variables[\"vascularscore\"]\n )\n table_html += array_to_table_row(\n [\"pleural nodules\"] + variables[\"pleuralscore\"]\n )\n table_html += array_to_table_row(\n [\"peri-fissural nodules\"] + variables[\"fissurescore\"]\n )\n table_html += array_to_table_row([\"all nodules\"] + variables[\"frocscore\"])\n table_html += \"</tbody>\"\n table_html += \"</table>\"\n return '<div class=\"tablecontainer\">' + table_html + \"</div>\"", "def generate_table_report(self):\n # create header row\n html_content = \"<table cellspacing='{0}' border='0'>\".format(self.cellspacing)\n html_content += \"<tr style='font-size:{0}pt; font-family:{1}; color:{2};'>\".format(\n self.font_size_heading_2,\n self.font_family,\n pyani.core.ui.CYAN\n )\n\n if not self.headings:\n self.headings = [\"Could not build headings\"]\n self.col_widths = [\"100\"]\n self.data = [\"Heading build error, could not construct data portion of table.\"]\n\n for index, heading in enumerate(self.headings):\n html_content += \"<td width='{0}%'>\".format(self.col_widths[index])\n html_content += heading\n html_content += \"</td>\"\n html_content += \"</tr>\"\n\n # add spacer row\n html_content += \"<tr>\"\n for _ in self.headings:\n html_content += \"</td>&nbsp;</td>\"\n html_content += \"</tr>\"\n\n if self.data:\n for data in self.data:\n html_content += \"<tr style='font-size:{0}pt; font-family:{1}; color: #ffffff;'>\".format(\n self.font_size_body,\n self.font_family\n )\n for item in data:\n html_content += \"<td>\"\n html_content += item\n html_content += \"</td>\"\n html_content += \"</tr>\"\n\n html_content += \"</table>\"\n self.show_content(html_content)", "def create_dash_sample_table(net, amx, sample = None):\n\n htr_style = {}\n htd_style = {\n 'text-align' : 'left', 'padding' : '5px',\n 'margin': '0px', 'padding' : '0 0 0 20',\n 'width' : '30%', \"border-bottom\": \"1px solid #BBBBBB\"}\n td_style = {\n 'text-align' : 'left', 'padding' : '5px',\n 'margin': '0px', 'padding' : '0 0 0 20', \"border-bottom\": \"1px solid #BBBBBB\"\n }\n\n columns = amx.columns\n div_children = []\n if sample:\n div_children.append(html.H4(\n children = ['Sample: {sample}'.format(sample = sample)],\n style = {'padding' : '0px', 'margin' : '0px'}\n ))\n table_rows = []\n row = amx.loc[sample]\n for col in columns:\n table_rows.append(\n html.Tr([\n html.Th(col, style=htd_style),\n html.Td(row[col], style=td_style)\n ])\n )\n\n div_children.append(\n html.Table(\n style = {\n \"background-color\" : 'white', 'color' : 'black',\n 'margin-top' : '10px',\n 'margin-bottom' : '10px', 'width' : '100%',\n },\n children=table_rows\n )\n )\n else:\n div_children.append(\n html.Div('To view sample details, click an edge in the network, then in the edge scatterplot click a sample.')\n )\n\n return html.Div(\n id='sample-table',\n children = div_children\n )", "def print_table(sensors):\n for sen in sensors:\n if sen.value is None:\n print(\"{:>25}\".format(sen.name))\n else:\n print(\"{:>25}{:>15} {}\".format(sen.name, str(sen.value), sen.unit))", "def array_html_block_table(self, arr):\n\n (suppress, suppress_thresh) = self._get_suppress()\n\n st_tab = \"style='border: 2px solid black;'\"\n st_tr = \"style='border: 1px dotted; padding: 2px;'\"\n st_th = \"style='border: 1px dotted; padding: 2px; text-align: center;'\"\n st_tdval = \"style='border: 1px dotted; padding: 2px; text-align: right;'\"\n spc = arr.space\n if len(spc.ket_set):\n ket_indices = list(spc.ket_space().index_iter())\n else:\n ket_indices = [None]\n if len(spc.bra_set):\n bra_indices = list(spc.bra_space().index_iter())\n else:\n bra_indices = [None]\n fmt = spc.base_field.latex_formatter(arr.nparray.flatten(), dollar_if_tex=True)\n\n ht = ''\n\n if self.use_latex_label_in_html:\n ht += '$'+spc._latex_()+'$'\n else:\n # FIXME - here, and elsewhere, use unicode symbols '&#x27e8;' and '&#x27e9;'\n # for html.\n ht += spc._html_()+'<br>'\n\n ht += \"<table style='margin: 0px 0px;'>\\n\"\n\n if spc.ket_set:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n if len(spc.bra_set):\n colgrp_size = spc.bra_space().shape[-1]\n for i in range(spc.bra_space().dim() // colgrp_size):\n ht += (\"<colgroup span=%d \"+st_tab+\"></colgroup>\\n\") % colgrp_size\n else:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n\n if spc.bra_set:\n ht += \"<tbody \"+st_tab+\">\\n\"\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'> </td>'\n\n for b_idx in bra_indices:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left< '\n # ht += ','.join([str(x) for x in b_idx]) # FIXME - latex label for indices?\n # ht += r' \\right|}$'\n #else:\n ht += '&#x27e8;'+(','.join(['<tt>'+str(x)+'</tt>' for x in b_idx]))+'|'\n\n ht += '</nobr></td>'\n\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n\n last_k = None\n for k_idx in ket_indices:\n if k_idx is not None and len(k_idx) > 1 and k_idx[-2] != last_k:\n if last_k is not None:\n ht += '</tbody>\\n'\n ht += \"<tbody \"+st_tab+\">\\n\"\n last_k = k_idx[-2]\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left| '\n # ht += ','.join([str(x) for x in k_idx]) # FIXME - latex label for indices?\n # ht += r' \\right>}$'\n #else:\n ht += '|'+(','.join(['<tt>'+str(x)+'</tt>' for x in k_idx]))+'&#x27e9;'\n\n ht += '</nobr></td>'\n for b_idx in bra_indices:\n if k_idx is None and b_idx is None:\n assert 0\n elif k_idx is None:\n idx = b_idx\n elif b_idx is None:\n idx = k_idx\n else:\n idx = k_idx + b_idx\n v = arr[idx]\n if suppress and spc.base_field.eval_suppress_small(v, suppress_thresh):\n if self.zero_color_html != '':\n vs = \"<font color='\"+self.zero_color_html+\"'>0</font>\"\n else:\n vs = \"0\"\n else:\n vs = \"<nobr><tt>\"+fmt(v)+\"</tt></nobr>\"\n ht += '<td '+st_tdval+'>'+vs+'</td>'\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n ht += '</table>\\n'\n\n return ht", "def render_sample_html(sample: dict) -> str:\n formatted_samples = {}\n for key in sample:\n formatted_samples[key] = sample[key].to_html(classes=\"sample table-striped\")\n sample_html = templates.template(\"sample.html\").render(values=formatted_samples)\n # Previously, we only displayed the first samples.\n # sample_html = templates.template('sample.html').render(sample_table_html=sample.to_html(classes=\"sample\"))\n return sample_html", "def units_html_list():\n from IPython.display import HTML\n table = \"<table>\"\n table += \"<tr><th>Name</th><th>Base Unit</th><th>Quantity</th></tr>\"\n for name in unit_table:\n unit = unit_table[name]\n if isinstance(unit, PhysicalUnit):\n if unit.prefixed is False:\n if isinstance(unit.baseunit, PhysicalUnit):\n baseunit = '$ %s $' % unit.baseunit\n else:\n baseunit = '$ %s $' % _pretty(unit.baseunit.name)\n table += \"<tr><td>\" + unit.name + '</td><td>' + baseunit + \\\n '</td><td><a href=\"' + unit.url + '\" target=\"_blank\">' + unit.verbosename + \\\n '</a></td></tr>'\n table += \"</table>\"\n return HTML(table)", "def _document_summary_table(self, pass_num: int, fail_num: int) -> None:\n with self.doc.create(Tabularx('|Y|Y|Y|', booktabs=True)) as tabular:\n package = Package('seqsplit')\n if package not in tabular.packages:\n tabular.packages.append(package)\n\n # add table heading\n tabular.add_row((\"Total Tests\", \"Total Passed \", \"Total Failed\"), strict=False)\n tabular.add_hline()\n\n tabular.add_row((pass_num + fail_num, pass_num, fail_num), strict=False)", "def _tab_print_ ( t , title = '' , prefix = '' , alignment = 'll' , xfmt = '%+.5g' , yfmt = '%+-.5g' ) :\n rows = [ ('Abscissa' , 'Value' ) ] \n for i in range ( t.size() ) :\n x = t.x ( i )\n y = t.y ( i )\n row = xfmt % x, yfmt % y\n rows.append ( row )\n \n if not title : title = 'Interpolation Table' \n import ostap.logger.table as T\n return T.table ( rows , title = title , prefix = prefix , alignment = alignment )", "def HTMLSummary():\n Output = open('Results.html', 'w')\n Output.write( \"<html><head><title>Summary</title></head>\\n\")\n Query = \"SELECT * FROM experiments ORDER BY experiment\"\n \n cur.execute(Query)\n AllExperiments = cur.fetchall()\n \n for Table, TimeField, Fields in TableDefs:\n print Table\n Query = ReturnQuery(Table, TimeField, Fields)\n cur.execute(Query)\n \n GasResults = cur.fetchall()\n AppendToMasterTable(AllExperiments, GasResults)\n\n cur.execute(\"SELECT MAX(experiment) FROM experiments\")\n MaxExperiment = cur.fetchone()\n AppendToMasterTable(AllExperiments,GetGasVolume(range(1,int(MaxExperiment[0])+1,1)))\n \n Output.write(\"<table border=\\\"1\\\">\\n\")\n #Need to generate table headers here\n Query = \"select column_name from information_schema.columns where table_name='experiments';\"\n cur.execute(Query)\n Rows = cur.fetchall()\n \n Output.write(\"\\t<tr>\\n\")\n for Row in Rows:\n Output.write(\"\\t\\t<th>{}</th>\\n\".format(Row[0]))\n \n for Table, TimeField, Fields in TableDefs:\n for Field in Fields:\n Output.write(\"\\t\\t<th>{}</th>\\n\".format(Field))\n Output.write(\"\\t\\t<th>Gas Volume</th>\\n\\t</tr>\\n\")\n \n #Write out all data\n for ExperimentRow in AllExperiments:\n Output.write( \"\\t<tr>\\n\")\n for ExpVal in ExperimentRow:\n Output.write( \"\\t\\t<td>{}</td>\\n\".format(ExpVal))\n Output.write(\"\\t</tr>\\n\")\n Output.write( \"</table>\")\n Output.write( \"</body>\\n</html>\")", "def render_variables_html(stats_object: dict) -> str:\n rows_html = u\"\"\n\n n_obs_unique = config[\"n_obs_unique\"].get(int)\n n_obs_bool = config[\"n_obs_bool\"].get(int)\n n_extreme_obs = config[\"n_extreme_obs\"].get(int)\n n_freq_table_max = config[\"n_freq_table_max\"].get(int)\n\n messages = stats_object[\"messages\"]\n\n # TODO: move to for loop in template\n for idx, row in stats_object[\"variables\"].items():\n formatted_values = row\n formatted_values.update({\"varname\": idx, \"varid\": hash(idx), \"row_classes\": {}})\n\n # TODO: obtain from messages (ignore)\n for m in messages:\n if m.column_name == idx:\n if m.message_type == MessageType.SKEWED:\n formatted_values[\"row_classes\"][\"skewness\"] = \"alert\"\n elif m.message_type == MessageType.HIGH_CARDINALITY:\n # TODO: rename alert to prevent overlap with bootstrap classes\n formatted_values[\"row_classes\"][\"distinct_count\"] = \"alert\"\n elif m.message_type == MessageType.ZEROS:\n formatted_values[\"row_classes\"][\"zeros\"] = \"alert\"\n elif m.message_type == MessageType.MISSING:\n formatted_values[\"row_classes\"][\"missing\"] = \"alert\"\n\n if row[\"type\"] in {Variable.TYPE_NUM, Variable.TYPE_DATE}:\n\n formatted_values[\"histogram\"] = histogram(row[\"histogramdata\"], row)\n formatted_values[\"mini_histogram\"] = mini_histogram(\n row[\"histogramdata\"], row\n )\n\n if row[\"type\"] in {Variable.TYPE_CAT, Variable.TYPE_BOOL}:\n # The number of column to use in the display of the frequency table according to the category\n mini_freq_table_nb_col = {Variable.TYPE_CAT: 6, Variable.TYPE_BOOL: 3}\n\n formatted_values[\"minifreqtable\"] = freq_table(\n stats_object[\"variables\"][idx][\"value_counts_without_nan\"],\n stats_object[\"table\"][\"n\"],\n \"mini_freq_table.html\",\n max_number_to_print=n_obs_bool,\n idx=idx,\n nb_col=mini_freq_table_nb_col[row[\"type\"]],\n )\n\n if row[\"type\"] in {Variable.TYPE_URL}:\n keys = [\"scheme\", \"netloc\", \"path\", \"query\", \"fragment\"]\n for url_part in keys:\n formatted_values[\"freqtable_{}\".format(url_part)] = freq_table(\n freqtable=stats_object[\"variables\"][idx][\n \"{}_counts\".format(url_part)\n ],\n # TODO: n - missing\n n=stats_object[\"table\"][\"n\"],\n table_template=\"freq_table.html\",\n idx=idx,\n max_number_to_print=n_freq_table_max,\n )\n\n if row[\"type\"] == Variable.S_TYPE_UNIQUE:\n table = stats_object[\"variables\"][idx][\n \"value_counts_without_nan\"\n ].sort_index()\n obs = table.index\n\n formatted_values[\"firstn\"] = pd.DataFrame(\n list(obs[0:n_obs_unique]),\n columns=[\"First {} values\".format(n_obs_unique)],\n ).to_html(classes=\"example_values\", index=False)\n formatted_values[\"lastn\"] = pd.DataFrame(\n list(obs[-n_obs_unique:]),\n columns=[\"Last {} values\".format(n_obs_unique)],\n ).to_html(classes=\"example_values\", index=False)\n\n if row[\"type\"] not in {\n Variable.S_TYPE_UNSUPPORTED,\n Variable.S_TYPE_CORR,\n Variable.S_TYPE_CONST,\n Variable.S_TYPE_RECODED,\n }:\n formatted_values[\"freqtable\"] = freq_table(\n freqtable=stats_object[\"variables\"][idx][\"value_counts_without_nan\"],\n n=stats_object[\"table\"][\"n\"],\n table_template=\"freq_table.html\",\n idx=idx,\n max_number_to_print=n_freq_table_max,\n )\n\n formatted_values[\"firstn_expanded\"] = extreme_obs_table(\n freqtable=stats_object[\"variables\"][idx][\"value_counts_without_nan\"],\n number_to_print=n_extreme_obs,\n n=stats_object[\"table\"][\"n\"],\n ascending=True,\n )\n formatted_values[\"lastn_expanded\"] = extreme_obs_table(\n freqtable=stats_object[\"variables\"][idx][\"value_counts_without_nan\"],\n number_to_print=n_extreme_obs,\n n=stats_object[\"table\"][\"n\"],\n ascending=False,\n )\n # if row['type'] in [Variable.TYPE_NUM, Variable.TYPE_DATE]:\n # TODO: move histograms here\n\n rows_html += templates.template(\n \"variables/row_{}.html\".format(row[\"type\"].value.lower())\n ).render(values=formatted_values)\n return rows_html", "def _createSummaryTable():\n summary = ElementTree.Element(\"div\")\n summary.set(\"id\", \"summary\")\n\n h1 = ElementTree.Element(\"h1\")\n h1.text = \"Summary\"\n\n table = ElementTree.Element(\"table\")\n header = ElementTree.Element(\"tr\")\n header.set(\"class\", \"table_header\")\n headers = [\"Test Suite\",\n \"Tests Executed\",\n \"Failures\",\n \"Errors\",\n \"Percent Passing\"]\n for h in headers:\n cell = ElementTree.Element(\"td\")\n cell.text = h\n header.append(cell)\n table.append(header)\n\n for f in _junitFiles:\n with open(f) as g:\n doc = ElementTree.parse(g)\n\n testsuites = doc.iter(\"testsuite\")\n for suite in testsuites:\n name = suite.get(\"name\")\n numTest = suite.get(\"tests\")\n numFail = suite.get(\"failures\")\n numErr = suite.get(\"errors\")\n numSkip = suite.get(\"skipped\")\n if numSkip is None:\n numSkip = 0\n numExec = str(int(numTest) - int(numSkip))\n if numSkip == numTest:\n pass\n else:\n try:\n percentPass = 100 * \\\n (float(numTest) - float(numFail) - float(numErr) - float(numSkip)) \\\n / float(numExec)\n except:\n percentPass = \"N/A\"\n\n row = ElementTree.Element(\"tr\")\n cells = [name,\n numExec,\n numFail,\n numErr,\n str(percentPass) + \"%\"]\n for c in cells:\n cell = ElementTree.Element(\"td\")\n cell.text = c\n row.append(cell)\n table.append(row)\n\n summary.append(h1)\n summary.append(table)\n return summary" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reorder buffered internal state (for incremental generation).
def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer[k] = input_buffer[k].index_select(0, new_order) self._set_input_buffer(incremental_state, input_buffer)
[ "def reorder_decoder_incremental_state(\n self, incremental_state: Dict[int, dict], inds: torch.Tensor\n ) -> Dict[int, dict]:\n incremental_state = fix_incremental_state(\n self.generation_model, incremental_state\n )\n if not incremental_state:\n return incremental_state\n return {\n idx: layer.reorder_incremental_state(incremental_state[idx], inds)\n for idx, layer in enumerate(self.seq2seq_decoder.layers)\n }", "def _data_reorder(self):\n temp_log = self._get_temper_switches()\n runner, trj_frame_incr = self._get_swap_rates()\n c = 0\n d = self.data.copy()\n for i in range(0, temp_log.shape[0], runner):\n if runner != 1:\n cr = slice(c, c+1, 1)\n c += 1\n else:\n cr = slice(trj_frame_incr*(i), trj_frame_incr*(i+1), 1)\n self.data[temp_log[i, 1:], cr, :] = d[:, cr, :]\n return self.data.copy()", "def reorder_incremental_state(\n self,\n incremental_state: Dict[str, Dict[str, Optional[Tensor]]],\n new_order: Tensor,\n ):\n self.self_attn.reorder_incremental_state(incremental_state, new_order)\n\n if self.encoder_attn is not None:\n self.encoder_attn.reorder_incremental_state(incremental_state, new_order)\n\n if self.num_cross_attentions > 0:\n [attn.reorder_incremental_state(incremental_state, new_order) for attn in self.cross_attentions]\n #for i in range(len(self.cross_attentions)):\n # self.cross_attentions[i].reorder_incremental_state(incremental_state, new_order)", "def reverse_move(self):\n self.arr = self.arr_old.copy()\n self.position = self.position_old.copy()", "def pre_arranged(self, pre_arranged):\n\n self._pre_arranged = pre_arranged", "def _reorder_cache(\n past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor\n ) -> Tuple[Tuple[torch.Tensor]]:\n # Necessary for beam_search\n return tuple(\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)\n for layer_past in past_key_values\n )", "def flush_prebuffer(self):\n self.pbuf = []", "def restore_state(self):\n self._restore_input()\n self._restore_output()", "def flush(self):\n inp = np.array(self.inp)\n self.inp = []\n out = self.model(inp)\n for i,o in enumerate(out):\n self.out[0][i] = o\n self.out[1].set()\n self.out_reset()", "def matrix_revert_coordinate_order(batch: torch.Tensor) -> torch.Tensor:\n batch[:, :-1, :] = batch[:, :-1, :].flip(1).clone()\n batch[:, :-1, :-1] = batch[:, :-1, :-1].flip(2).clone()\n return batch", "def redefine_order(self):\n\n # If we are doing a torsion_test we have to redo the order as follows:\n # 1 Skip the finalise step to create a pickled ligand at the torsion_test stage\n # 2 Do the torsion_test and delete the torsion_test attribute\n # 3 Do finalise again to save the ligand with the correct attributes\n if getattr(self.molecule, \"torsion_test\", None) not in [None, False]:\n self.order = OrderedDict(\n [\n (\"finalise\", self.skip),\n (\"torsion_test\", self.torsion_test),\n (\"finalise\", self.skip),\n ]\n )\n\n else:\n start = (\n self.molecule.restart\n if self.molecule.restart is not None\n else \"parametrise\"\n )\n end = self.molecule.end if self.molecule.end is not None else \"finalise\"\n skip = self.molecule.skip if self.molecule.skip is not None else []\n\n # Create list of all keys\n stages = list(self.order)\n\n # Cut out the keys before the start_point and after the end_point\n # Add finalise back in if it's removed (finalise should always be called).\n stages = stages[stages.index(start) : stages.index(end) + 1] + [\"finalise\"]\n\n # Redefine self.order to only contain the key, val pairs from stages\n self.order = OrderedDict(\n pair for pair in self.order.items() if pair[0] in set(stages)\n )\n\n for pair in self.order.items():\n self.order[pair[0]] = self.skip if pair[0] in skip else pair[1]", "def mirror_stacked_state(self):\n from util.state_modifier_util import mirror_state\n copy_stacked = deepcopy(self)\n new_deque = deque(maxlen=self.max_len)\n for state in copy_stacked.deque_collection:\n new_deque.append(mirror_state(state))\n print(mirror_state(state).turn)\n copy_stacked.deque_collection = new_deque\n print(copy_stacked.deque_collection[0].turn)\n return copy_stacked", "def restore_state(self):\n if self:\n self.pop()\n else:\n log.warning(\"Can't reset empty state\")", "def backstep(self):\n\n self.input.setDelta(self.output.getNetDelta())\n self.output.value = self.history.pop()", "def _reorder_series_by_idx(self, neworder, inplace=False):\n\n if inplace:\n out = self\n else:\n out = self.copy()\n\n oldorder = list(range(len(neworder)))\n for oi, ni in enumerate(neworder):\n frm = oldorder.index(ni)\n to = oi\n utils.swap_rows(out._data, frm, to)\n out._series_ids[frm], out._series_ids[to] = out._series_ids[to], out._series_ids[frm]\n # TODO: re-build series tags (tag system not yet implemented)\n oldorder[frm], oldorder[to] = oldorder[to], oldorder[frm]\n out.__renew__()\n\n return out", "def _resort(self):\n # type: () -> None\n self._fs_sequence = None", "def undo_placement():\n\tglobal balls, ramps, add_buffer\n\tif len(add_buffer) > 0:\n\t\ttemp = add_buffer.pop()\n\t\tif temp == 'ball':\n\t\t\tballs.pop()\n\t\telif temp == 'ramp':\n\t\t\tramps.pop()", "def toggleBuffer(self):\n\t\tself.currentBuffer = 1 - self.currentBuffer", "def refreshBuffer(self):\n self._tour_buffer = []", "def reset_index(self):\n self.new = []\n self.new_edges = []\n self.visible_updates = []\n self.state_updates = []\n self.visible_edge_updates = []\n\n self.index = {}\n self.next_index = 0\n self.index_edge = {}\n self.next_edge_index = 0\n\n for key in self.x:\n self.index[key] = self.next_index\n self.next_index += 1\n self.new.append(key)\n for dep in self.scheduler.tasks[key].dependencies:\n edge = (dep.key, key)\n self.index_edge[edge] = self.next_edge_index\n self.next_edge_index += 1\n self.new_edges.append(edge)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reorder buffered internal state (for incremental generation).
def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer[k] = input_buffer[k].index_select(0, new_order) self._set_input_buffer(incremental_state, input_buffer)
[ "def reorder_decoder_incremental_state(\n self, incremental_state: Dict[int, dict], inds: torch.Tensor\n ) -> Dict[int, dict]:\n incremental_state = fix_incremental_state(\n self.generation_model, incremental_state\n )\n if not incremental_state:\n return incremental_state\n return {\n idx: layer.reorder_incremental_state(incremental_state[idx], inds)\n for idx, layer in enumerate(self.seq2seq_decoder.layers)\n }", "def _data_reorder(self):\n temp_log = self._get_temper_switches()\n runner, trj_frame_incr = self._get_swap_rates()\n c = 0\n d = self.data.copy()\n for i in range(0, temp_log.shape[0], runner):\n if runner != 1:\n cr = slice(c, c+1, 1)\n c += 1\n else:\n cr = slice(trj_frame_incr*(i), trj_frame_incr*(i+1), 1)\n self.data[temp_log[i, 1:], cr, :] = d[:, cr, :]\n return self.data.copy()", "def reorder_incremental_state(\n self,\n incremental_state: Dict[str, Dict[str, Optional[Tensor]]],\n new_order: Tensor,\n ):\n self.self_attn.reorder_incremental_state(incremental_state, new_order)\n\n if self.encoder_attn is not None:\n self.encoder_attn.reorder_incremental_state(incremental_state, new_order)\n\n if self.num_cross_attentions > 0:\n [attn.reorder_incremental_state(incremental_state, new_order) for attn in self.cross_attentions]\n #for i in range(len(self.cross_attentions)):\n # self.cross_attentions[i].reorder_incremental_state(incremental_state, new_order)", "def reverse_move(self):\n self.arr = self.arr_old.copy()\n self.position = self.position_old.copy()", "def pre_arranged(self, pre_arranged):\n\n self._pre_arranged = pre_arranged", "def _reorder_cache(\n past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor\n ) -> Tuple[Tuple[torch.Tensor]]:\n # Necessary for beam_search\n return tuple(\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)\n for layer_past in past_key_values\n )", "def flush_prebuffer(self):\n self.pbuf = []", "def restore_state(self):\n self._restore_input()\n self._restore_output()", "def flush(self):\n inp = np.array(self.inp)\n self.inp = []\n out = self.model(inp)\n for i,o in enumerate(out):\n self.out[0][i] = o\n self.out[1].set()\n self.out_reset()", "def matrix_revert_coordinate_order(batch: torch.Tensor) -> torch.Tensor:\n batch[:, :-1, :] = batch[:, :-1, :].flip(1).clone()\n batch[:, :-1, :-1] = batch[:, :-1, :-1].flip(2).clone()\n return batch", "def redefine_order(self):\n\n # If we are doing a torsion_test we have to redo the order as follows:\n # 1 Skip the finalise step to create a pickled ligand at the torsion_test stage\n # 2 Do the torsion_test and delete the torsion_test attribute\n # 3 Do finalise again to save the ligand with the correct attributes\n if getattr(self.molecule, \"torsion_test\", None) not in [None, False]:\n self.order = OrderedDict(\n [\n (\"finalise\", self.skip),\n (\"torsion_test\", self.torsion_test),\n (\"finalise\", self.skip),\n ]\n )\n\n else:\n start = (\n self.molecule.restart\n if self.molecule.restart is not None\n else \"parametrise\"\n )\n end = self.molecule.end if self.molecule.end is not None else \"finalise\"\n skip = self.molecule.skip if self.molecule.skip is not None else []\n\n # Create list of all keys\n stages = list(self.order)\n\n # Cut out the keys before the start_point and after the end_point\n # Add finalise back in if it's removed (finalise should always be called).\n stages = stages[stages.index(start) : stages.index(end) + 1] + [\"finalise\"]\n\n # Redefine self.order to only contain the key, val pairs from stages\n self.order = OrderedDict(\n pair for pair in self.order.items() if pair[0] in set(stages)\n )\n\n for pair in self.order.items():\n self.order[pair[0]] = self.skip if pair[0] in skip else pair[1]", "def mirror_stacked_state(self):\n from util.state_modifier_util import mirror_state\n copy_stacked = deepcopy(self)\n new_deque = deque(maxlen=self.max_len)\n for state in copy_stacked.deque_collection:\n new_deque.append(mirror_state(state))\n print(mirror_state(state).turn)\n copy_stacked.deque_collection = new_deque\n print(copy_stacked.deque_collection[0].turn)\n return copy_stacked", "def restore_state(self):\n if self:\n self.pop()\n else:\n log.warning(\"Can't reset empty state\")", "def backstep(self):\n\n self.input.setDelta(self.output.getNetDelta())\n self.output.value = self.history.pop()", "def _reorder_series_by_idx(self, neworder, inplace=False):\n\n if inplace:\n out = self\n else:\n out = self.copy()\n\n oldorder = list(range(len(neworder)))\n for oi, ni in enumerate(neworder):\n frm = oldorder.index(ni)\n to = oi\n utils.swap_rows(out._data, frm, to)\n out._series_ids[frm], out._series_ids[to] = out._series_ids[to], out._series_ids[frm]\n # TODO: re-build series tags (tag system not yet implemented)\n oldorder[frm], oldorder[to] = oldorder[to], oldorder[frm]\n out.__renew__()\n\n return out", "def _resort(self):\n # type: () -> None\n self._fs_sequence = None", "def undo_placement():\n\tglobal balls, ramps, add_buffer\n\tif len(add_buffer) > 0:\n\t\ttemp = add_buffer.pop()\n\t\tif temp == 'ball':\n\t\t\tballs.pop()\n\t\telif temp == 'ramp':\n\t\t\tramps.pop()", "def toggleBuffer(self):\n\t\tself.currentBuffer = 1 - self.currentBuffer", "def refreshBuffer(self):\n self._tour_buffer = []", "def reset_index(self):\n self.new = []\n self.new_edges = []\n self.visible_updates = []\n self.state_updates = []\n self.visible_edge_updates = []\n\n self.index = {}\n self.next_index = 0\n self.index_edge = {}\n self.next_edge_index = 0\n\n for key in self.x:\n self.index[key] = self.next_index\n self.next_index += 1\n self.new.append(key)\n for dep in self.scheduler.tasks[key].dependencies:\n edge = (dep.key, key)\n self.index_edge[edge] = self.next_edge_index\n self.next_edge_index += 1\n self.new_edges.append(edge)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets or sets the silent status for this Session. Returns Logical Silent status.
def silent(self): return self.__silent
[ "def silent(self):\n ret = self._get_attr(\"silent\")\n return ret", "def silent(self): # bool\n return self._silent", "def silent(self):\n \n self.options[\"silent\"] = True", "def mute(self, value=None):\n if value is None:\n self._logger.info(\"Retrieving state of muting function...\")\n return \"on\" if int(self._player_info().get(\"mute\")) == 1 else \"off\"\n if not value or (isinstance(value, str) and value.lower() in ['0', 'off', 'false']):\n return self.mute_off()\n return self.mute_on()", "def mute(self, mute: 'SbBool'=1) -> \"void\":\n return _coin.SoAudioDevice_mute(self, mute)", "def mute(self):\n with self.read_lock:\n self.muted = True\n self.wrapped_stream.stop_stream()", "def sound_mode(self):\n return self._soundmode", "def _toggle_mute(self):\n # if self.room is None:\n # return\n # if not self.room.channel_speaker_permission:\n # print(\"[/] You aren't a speaker at the moment.\")\n # return\n\n if RTC:\n self.is_mute = not self.is_mute\n result = RTC.muteLocalAudioStream(self.is_mute)\n if result < 0:\n print(\"[/] Failed to toggle mute status.\")\n return\n if self.is_mute:\n print(\"[/] Microphone muted.\")\n else:\n print(\"[/] Microphone enabled. You are broadcasting.\")", "def power_status(self):\n \n if not self._connect():\n return False\n \n # Get response from sensor\n resp = self._send_comand(G.SENSOR_CONTROL.STATUS_CMD)\n return resp", "def get_silence_status(time_frame, np_freqs):\n # Silence:\n # TODO: rewrite description of calculation.\n amps_list = [a for a in time_frame['amplitudes']]\n amps_range, amps_sum, amps_avg, amps_std_dev = utils.get_list_stats(amps_list)\n if amps_avg < AMPS_AVG_MIN and amps_std_dev < TURB_AMPS_DEV_MIN:\n time_frame['silence'] = True\n else:\n time_frame['silence'] = False\n return time_frame", "def omniSnmpStatus(self):\n status = -1\n try:\n status = self.netcool.getSnmpStatus(system=self.getOrganizerName())\n status = self.convertStatus(status)\n except Exception: pass\n return status", "def get_status(self):\n status = lowlevel.SM_PATH_STATUS_PARMS()\n status.path = self.path\n\n rc = lowlevel.sm_path_status(status)\n if rc:\n raise AculabSpeechError(rc, 'sm_path_status')\n\n return status.status", "def initialmute():\r\n initial_mute = mastervol().GetMute()\r\n return initial_mute", "async def get_microsoft_status(self, ctx):\n data = await self.microsoft_services_status()\n embed = status(data)\n await ctx.send(embed=embed)", "def get_status():\n \n return db.get_db().getRoot().getS(ns.l2tpDeviceStatus, rdf.Type(ns.L2tpDeviceStatus))", "def clear_status(self):\n return self.send_cmd(SHT30.CLEAR_STATUS_CMD, None);", "def get_player_status(self):\n\n return self.isin", "def screen_status(self) -> str:\n return self.details.get('screen_status', 'unknown')", "def disable_streaming_server(self, testcase=None):\n\n self.log.debug(\"Disabling streaming server ...\")\n result = {'successful': False, 'verified': False}\n\n try:\n # edit system setup to disable streaming server\n settings = [\n ['live view enabled', 'false'],\n ['live view server', ''],\n ['live view user', ''],\n ['live view password', ''],\n ]\n result['verified'] = self.configure_vim_system_settings(settings=settings)['verified']\n\n if result['verified']:\n self.log.trace(\"Disabled streaming server.\")\n result['successful'] = True\n except BaseException, e:\n self.handle_exception(e, operation=\"disable streaming server\")\n\n # return\n if testcase is not None: testcase.processing = result['successful']\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets or sets the print_cmd status of the SyncroSim Session. Returns Logical print_cmd status.
def print_cmd(self): return self.__print_cmd
[ "def printable_status(self):\n return self._printable_status", "def _printer_status(self,printer):\n\n\t\t(stdout,stderr,status) = self._shell_command(['/usr/bin/lpstat','-p',printer],{'LANG':'C'})\n\t\tif status == 0:\n\t\t\tif ' enabled ' in stdout:\n\t\t\t\treturn 'enabled'\n\t\t\tif ' disabled ' in stdout:\n\t\t\t\treturn 'disabled'\n\t\treturn 'unknown'", "def get_device_status(self):\n self.i2c_writer.write('Status\\00')\n time.sleep(0.5) # 'Status' command requires 300ms timeout for response\n return self.read()", "def GetStatus(self):\n self.__SendMsg(\"status\")\n ##TODO: Parse the response into some struct so it can be queried later.\n\n ## \"Status\" is the only command that returns a multi\n ## line response so handle it separately.\n response = \"\"\n while(self.SocketIsReadable()):\n data = self.my_Socket.recv(1)\n if not data:\n break\n else:\n response += data.decode(\"UTF-8\")\n return response", "def power_status(self):\n \n if not self._connect():\n return False\n \n # Get response from sensor\n resp = self._send_comand(G.SENSOR_CONTROL.STATUS_CMD)\n return resp", "def status(self):\n self.lastStatus = ord(self.hardware.transfer(chr(Cmd.NOP), 1)[0])\n return self.lastStatus", "def status(self, cmd):\n\n self.actor.sendVersionKey(cmd)\n\n cmd.inform('text=\"monitors: %s\"' % self.actor.monitors)\n cmd.inform('text=\"config id=0x%08x %r\"' % (id(self.actor.actorConfig), self.actor.actorConfig.keys()))\n\n self.genPersistedKeys(cmd)\n self.actor.genInstConfigKeys(cmd)\n self.actor.metaStates.update(cmd)\n\n if 'all' in cmd.cmd.keywords:\n for c in self.actor.controllers:\n self.actor.callCommand(\"%s status\" % c)\n\n cmd.finish(self.controllerKey())", "def get_status(self) -> Status:\n with self.io.lock:\n self.io.write(b'\\x1B\\x69\\x53')\n data = self.io.read(32)\n\n if not data:\n raise IOError(\"No Response from printer\")\n\n if len(data) < 32:\n raise IOError(\"Invalid Response from printer\")\n\n return Status(data)", "def xmms2_status(self):\n self.writeCommand('xmms2_status')\n return self", "def query_output(self):\n ret = self.driver.read_status_output()\n if ret==True:\n onoff = 1\n else:\n onoff = 0\n return onoff", "def cam_status(self):\n return self.cmd_cam_status()", "def status(dev):\n dev.print_status()", "def status(self, raw=False):\n data = self.send_cmd(SHT30.STATUS_CMD, 3, read_delay_ms=20); \n\n if raw:\n return data\n\n status_register = data[0] << 8 | data[1]\n return status_register", "def query_status(self):\n info = {}\n self._write(CMD_STATUS_INFO)\n\n # I think this was supposed to be \"WORK_PERCENT\".\n match = self._readline(b\"WORK_PARSENT:(\\\\d+)\")\n info[\"percent_complete\"] = int(match.group(1))\n\n match = self._readline(b\"WORK_TIME:(\\\\d+)\")\n info[\"work_time_mins\"] = int(match.group(1))\n\n match = self._readline(b\"EST_TIME:(\\\\d+)\")\n info[\"estimated_time_mins\"] = int(match.group(1))\n\n match = self._readline(b\"ET0:(\\\\d+)\")\n info[\"extruder_temp_celsius\"] = int(match.group(1))\n\n match = self._readline(b\"BT:(\\\\d+)\")\n info[\"bed_temp_celsius\"] = int(match.group(1))\n\n # \"MCH_STATE\" appears to describe the current state of the system.\n # Values I have seen:\n # \"16\" if a setting has recently been changed\n # \"26\" if the printer is idle\n # \"27\" if the printer is printing or cooling\n self._readline(b\"MCH_STATE:(\\\\d+)\")\n\n # \"PRN_STATE\" appears to describe the progress of the print job.\n # Values I have seen:\n # \"1\" when heating the print bed\n # \"2\" in the main build phase\n # \"5\" when cooling after a job\n # \"7\" when lowering bed after a job\n # \"571449\" on the \"JOB CANCELLING COMPLETE\" screen (this is probably a\n # glitch, but we interpret it anyway)\n # Also, note that \"PRN_STATE\" seems to be absent when MCH_STATE is 26,\n # and is sometimes also be absent when MCH_STATE is 27.\n match = self._readline(b\"PRN_STATE:(\\\\d+)\", optional = True)\n if match is None:\n info[\"print_state\"] = \"idle\"\n else:\n table = {\n \"1\": \"heating\",\n \"2\": \"building\",\n \"5\": \"cooling\",\n \"7\": \"lowering_bed\",\n \"571449\": \"complete\"\n }\n info[\"print_state\"] = table.get(\n match.group(1),\n \"unknown({})\".format(match.group(1)))\n\n # I think 0 and 1 are the only possible values for LANG, because English\n # and Japanese are the only two choices in the printer language list.\n match = self._readline(b\"LANG:(\\\\d+)\")\n info[\"language\"] = {b'0': \"English\", b'1': \"Japanese\"}[match.group(1)]\n\n return info", "def get_chassis_status():\n\n status, ret_values = \\\n grk.run_key_u(\"Run IPMI Standard Command chassis status\")\n result = vf.key_value_outbuf_to_dict(ret_values, process_indent=1)\n\n return result", "def get_status():\n \n return db.get_db().getRoot().getS(ns.l2tpDeviceStatus, rdf.Type(ns.L2tpDeviceStatus))", "def help_status(self):\n print(help_msg.cmds['status'])", "def sendStatusKeys(self, cmd): \n \n cmd.inform('text=\"Number of AG cameras = %d\"' % self.numberOfCamera)\n for n in range(nCams):\n if self.cams[n] != None:\n if self.cams[n].isReady():\n tempstr = '%5.1f' % self.cams[n].getTemperature()\n cmd.inform('agc%d_stat=READY' % (n + 1))\n else:\n tempstr = '<%5.1f>' % self.cams[n].temp\n cmd.inform('agc%d_stat=BUSY' % (n + 1))\n cmd.inform('text=\"[%d] %s SN=%s status=%s temp=%s regions=%s bin=(%d,%d) expArea=%s\"'\n % (n + 1, self.cams[n].devname, self.cams[n].devsn,\n self.cams[n].getStatusStr(), tempstr, self.cams[n].regions,\n self.cams[n].hbin, self.cams[n].vbin, self.cams[n].expArea))\n else:\n cmd.inform('agc%d_stat=ABSENT' % (n + 1))", "def get_ovp_state(self):\r\n ovp_state = str(self.inst.query(\"VOLT:PROT:STAT?\"))\r\n return(ovp_state)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show kernel information Including 1. max threads per block, 2. active warps per MP, 3. thread block per MP, 4. usage of shared memory, 5. const memory , 6. local memory 7. registers 8. hardware occupancy 9. limitation of the hardware occupancy
def get_kernel_function_info(a, W1=0, W2=1, W3=1): import pycuda.tools as tl import pycuda.driver as dri dev = dri.Device(0) td = tl.DeviceData() if not W1: W1 = a.max_threads_per_block to = tl.OccupancyRecord(td, W1*W2*W3, a.shared_size_bytes, a.num_regs) print "***************************************" print " Function Info " print " -> max threads per block: %d / %d / %d" % \ (a.max_threads_per_block, dev.max_threads_per_block, dev.max_threads_per_multiprocessor) print " -> shared mem : %d / %d" % (a.shared_size_bytes, td.shared_memory) print " -> const mem : %d" % a.const_size_bytes print " -> local mem : %d" % a.local_size_bytes print " -> register : %d / %d" % (a.num_regs, td.registers) print " -> thread block per MP %d / %d" % \ (to.tb_per_mp, td.thread_blocks_per_mp) print " -> warps per MP %d / %d" % (to.warps_per_mp, td.warps_per_mp) print " -> occupancy %f" % to.occupancy print " -> limitation %s" % to.limited_by print " Block size : %dx%dx%d" % (W1, W2, W3) print "***************************************"
[ "def definekernel():\n time_list, volt_list=importandseparate(10)\n time_sec=makenparray(time_list)\n volt_mV=makenparray(volt_list)\n volt_mV=removeDCoffset(volt_mV)\n kernel, kernel_size=createkernel(time_sec,volt_mV)\n return kernel, kernel_size", "def _request_kernel_info(self):\n self.log.debug(\"requesting kernel info\")\n self.session.send(self.kernel_info_channel, \"kernel_info_request\")", "def list_large_kernels():\n\n sbcc_kernels = [\n NS(length=50, factors=[10, 5], use_3steps_large_twd={\n 'sp': 'true', 'dp': 'true'}, threads_per_block=256),\n NS(length=52, factors=[13, 4], use_3steps_large_twd={\n 'sp': 'true', 'dp': 'true'}),\n NS(length=60, factors=[6, 10], use_3steps_large_twd={\n 'sp': 'false', 'dp': 'false'}),\n NS(length=64, factors=[8, 8], use_3steps_large_twd={\n 'sp': 'true', 'dp': 'false'}),\n NS(length=72, factors=[8, 3, 3], use_3steps_large_twd={\n 'sp': 'true', 'dp': 'false'}),\n NS(length=80, factors=[10, 8], use_3steps_large_twd={\n 'sp': 'false', 'dp': 'false'}),\n NS(length=81, factors=[3, 3, 3, 3], use_3steps_large_twd={\n 'sp': 'true', 'dp': 'true'}),\n NS(length=84, factors=[7, 2, 6], use_3steps_large_twd={\n 'sp': 'true', 'dp': 'true'}),\n NS(length=96, factors=[6, 16], use_3steps_large_twd={\n 'sp': 'false', 'dp': 'false'}),\n NS(length=100, factors=[5, 5, 4], use_3steps_large_twd={\n 'sp': 'true', 'dp': 'false'}, threads_per_block=100),\n NS(length=104, factors=[13, 8], use_3steps_large_twd={\n 'sp': 'true', 'dp': 'false'}),\n NS(length=108, factors=[6, 6, 3], use_3steps_large_twd={\n 'sp': 'true', 'dp': 'false'}),\n NS(length=112, factors=[4, 7, 4], use_3steps_large_twd={\n 'sp': 'false', 'dp': 'false'}),\n NS(length=128, factors=[8, 4, 4], use_3steps_large_twd={\n 'sp': 'true', 'dp': 'true'}, threads_per_block=256),\n NS(length=160, factors=[4, 10, 4], use_3steps_large_twd={\n 'sp': 'false', 'dp': 'false'}, flavour='wide'),\n NS(length=168, factors=[7, 6, 4], use_3steps_large_twd={\n 'sp': 'false', 'dp': 'false'}, threads_per_block=128),\n # NS(length=192, factors=[6, 4, 4, 2], use_3steps_large_twd={\n # 'sp': 'false', 'dp': 'false'}),\n NS(length=200, factors=[8, 5, 5], use_3steps_large_twd={\n 'sp': 'false', 'dp': 'false'}),\n NS(length=208, factors=[13, 16], use_3steps_large_twd={\n 'sp': 'false', 'dp': 'false'}),\n NS(length=216, factors=[8, 3, 3, 3], use_3steps_large_twd={\n 'sp': 'false', 'dp': 'false'}),\n NS(length=224, factors=[8, 7, 4], use_3steps_large_twd={\n 'sp': 'false', 'dp': 'false'}),\n NS(length=240, factors=[8, 5, 6], use_3steps_large_twd={\n 'sp': 'false', 'dp': 'false'}),\n NS(length=256, factors=[8, 4, 8], use_3steps_large_twd={\n 'sp': 'true', 'dp': 'false'}, flavour='wide'),\n NS(length=336, factors=[6, 7, 8], use_3steps_large_twd={\n 'sp': 'false', 'dp': 'false'})\n ]\n\n # for SBCC kernel, increase desired threads_per_block so that columns per\n # thread block is also increased. currently targeting for 16 columns\n block_width = 16\n for k in sbcc_kernels:\n k.scheme = 'CS_KERNEL_STOCKHAM_BLOCK_CC'\n if not hasattr(k, 'threads_per_block'):\n k.threads_per_block = block_width * \\\n functools.reduce(mul, k.factors, 1) // min(k.factors)\n if not hasattr(k, 'length'):\n k.length = functools.reduce(lambda a, b: a * b, k.factors)\n\n # SBRC\n # still have room to improve...such as 200\n sbrc_kernels = [\n NS(length=50, factors=[10, 5], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=50, threads_per_transform=5, block_width=10),\n # SBRC64: tpb=256 poor in MI50, FIXME: need to investigate why we can't set tpt=8? 61 128 256 fault\n NS(length=64, factors=[4, 4, 4], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=128, block_width=16),\n NS(length=81, factors=[3, 3, 3, 3], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=81, threads_per_transform=27, block_width=9),\n NS(length=100, factors=[5, 5, 4], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=100, threads_per_transform=25, block_width=4),\n NS(length=128, factors=[8, 4, 4], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=128, threads_per_transform=16, block_width=8),\n # NS(length=128, factors=[8, 4, 4], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=256, threads_per_transform=32, block_width=8), # correctness issue\n NS(length=200, factors=[10, 10, 2], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=100, threads_per_transform=10, block_width=10),\n NS(length=256, factors=[4, 4, 4, 4], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=256, threads_per_transform=64, block_width=8), # tpt should be 32?\n ]\n\n # NB:\n # Technically, we could have SBCR kernels the same amount as SBCC.\n #\n # sbcr_kernels = copy.deepcopy(sbcc_kernels)\n # for k in sbcr_kernels:\n # k.scheme = 'CS_KERNEL_STOCKHAM_BLOCK_CR'\n #\n # Just enable length 100 and 200 for now.\n\n sbcr_kernels = [\n NS(length=100, factors=[10, 10], use_3steps_large_twd={\n 'sp': 'true', 'dp': 'false'}, threads_per_block=100),\n NS(length=200, factors=[8, 5, 5], use_3steps_large_twd={\n 'sp': 'false', 'dp': 'false'})\n ]\n\n block_width = 16\n for k in sbcr_kernels:\n k.scheme = 'CS_KERNEL_STOCKHAM_BLOCK_CR'\n if not hasattr(k, 'threads_per_block'):\n k.threads_per_block = block_width * \\\n functools.reduce(mul, k.factors, 1) // min(k.factors)\n if not hasattr(k, 'length'):\n k.length = functools.reduce(lambda a, b: a * b, k.factors)\n\n return sbcc_kernels + sbcr_kernels + sbrc_kernels", "def machine_info():\n BYTES_IN_GIG = 1073741824\n free_bytes = psutil.virtual_memory().available\n return [{\"memory\": int(free_bytes / BYTES_IN_GIG), \"cores\": multiprocessing.cpu_count(),\n \"name\": socket.gethostname()}]", "def print_device_counts():\n print('Workstation has {0:.0f} CPUs.'.format(get_device_count(device_type='CPU')))\n print('Workstation has {0:.0f} GPUs.'.format(get_device_count(device_type='GPU')))", "def summary(cpu_mem_lists, gpu_mem_lists=None):\n cpu_reports = {}\n gpu_reports = {}\n\n cpu_reports['process_name'] = cpu_mem_lists[0]['process_name']\n cpu_reports['rss(MB)'] = max([float(i['rss(MB)']) for i in cpu_mem_lists])\n cpu_reports['vms(MB)'] = max([float(i['vms(MB)']) for i in cpu_mem_lists])\n cpu_reports['shared(MB)'] = max([float(i['shared(MB)']) for i in cpu_mem_lists])\n cpu_reports['dirty(MB)'] = max([float(i['dirty(MB)']) for i in cpu_mem_lists])\n cpu_reports['cpu_usage(%)'] = max([float(i['cpu_usage(%)']) for i in cpu_mem_lists])\n\n logger.info(\"----------------------- Res info -----------------------\")\n logger.info(\"process_name: {0}, cpu rss(MB): {1}, \\\nvms(MB): {2}, shared(MB): {3}, dirty(MB): {4}, \\\ncpu_usage(%): {5} \".format(cpu_reports['process_name'],\n cpu_reports['rss(MB)'],\n cpu_reports['vms(MB)'],\n cpu_reports['shared(MB)'],\n cpu_reports['dirty(MB)'],\n cpu_reports['cpu_usage(%)']))\n\n if gpu_mem_lists:\n logger.info(\"=== gpu info was recorded ===\")\n gpu_reports['gpu_id'] = int(os.environ.get(\"CUDA_VISIBLE_DEVICES\"))\n gpu_reports['total(MB)'] = max([float(i['total(MB)']) for i in gpu_mem_lists])\n gpu_reports['free(MB)'] = max([float(i['free(MB)']) for i in gpu_mem_lists])\n gpu_reports['used(MB)'] = max([float(i['used(MB)']) for i in gpu_mem_lists])\n gpu_reports['gpu_utilization_rate(%)'] = max([float(i['gpu_utilization_rate(%)']) for i in gpu_mem_lists])\n gpu_reports['gpu_mem_utilization_rate(%)'] = max([float(i['gpu_mem_utilization_rate(%)']) for i in gpu_mem_lists])\n\n logger.info(\"gpu_id: {0}, total(MB): {1}, \\\nfree(MB): {2}, used(MB): {3}, gpu_utilization_rate(%): {4}, \\\ngpu_mem_utilization_rate(%): {5} \".format(gpu_reports['gpu_id'],\n gpu_reports['total(MB)'],\n gpu_reports['free(MB)'],\n gpu_reports['used(MB)'],\n gpu_reports['gpu_utilization_rate(%)'],\n gpu_reports['gpu_mem_utilization_rate(%)']))\n return cpu_reports, gpu_reports\n else:\n return cpu_reports, None", "def gpu_usage(device=device, digits=4):\n print(\n f\"GPU Usage: {round((torch.cuda.memory_allocated(device=device) / 1e9), digits)} GB\\n\"\n )", "async def systeminfo(self, ctx):\r\n\r\n\t\tres = f\"[OS Type][{sys.platform}]\"\r\n\t\tinfo = cpuinfo.get_cpu_info()\r\n\t\tres += f\"\\n[CPU][{psutil.cpu_count(logical=False)} Cores / {psutil.cpu_count()} Threads]\"\r\n\t\tres += f\"\\n[CPU Usage][%{str(psutil.cpu_percent())}]\"\r\n\t\tvmem = psutil.virtual_memory()\r\n\t\tres += f\"\\n[Memory][Total Memory: {int(vmem[0]/2**30)}GB Used: {int(vmem[0]/2**30)-int(vmem[1]/2**30)}GB(%{vmem[2]}) Available: {int(vmem[1]/2**30)}GB]\"\r\n\t\tif str(sys.platform) == 'linux': # Check Windows\r\n\t\t\tsmem = psutil.swap_memory()\r\n\t\t\tres += f\"\\n[Swap Memory][Total Swap Memory: {int(smem[0]/2**30)}GB Used: {int(smem[2]/2**30)}GB(%{smem[3]}) Available: {int(smem[2]/2**30)}GB]\"\r\n\t\t\r\n\t\tres += f\"\\n[Python Version][{sysconfig.get_python_version()}]\"\r\n\r\n\t\tINFO = f\"**{self.bot.user.name}**'s System Hardware:\\n```md\\n{res}\\n```\"\r\n\t\t\r\n\t\tif ctx.author.top_role.colour:\r\n\t\t\tcol = ctx.author.top_role.colour\r\n\t\telse:\r\n\t\t\tcol =self.settings.randomColor()\r\n\r\n\t\tembed = discord.Embed(\r\n\t\t\tdescription = INFO,\r\n\t\t\tcolour = col\r\n\t\t)\r\n\t\tawait ctx.send(embed=embed)", "def print_opencl_info():\n # REF: https://github.com/benshope/PyOpenCL-Tutorial\n\n msg = \"\\n\" + \"=\" * 60 + \"\\nOpenCL Platforms and Devices \\n\"\n # Print each platform on this computer\n for platform in cl.get_platforms():\n msg += \"=\" * 60 + \"\\n\"\n msg += \"Platform - Name: \" + platform.name + \"\\n\"\n msg += \"Platform - Vendor: \" + platform.vendor + \"\\n\"\n msg += \"Platform - Version: \" + platform.version + \"\\n\"\n msg += \"Platform - Profile: \" + platform.profile + \"\\n\"\n # Print each device per-platform\n for device in platform.get_devices():\n msg += \"\\t\" + \"-\" * 56 + \"\\n\"\n msg += \"\\tDevice - Name: \" + device.name + \"\\n\"\n msg += \"\\tDevice - Type: \" + cl.device_type.to_string(device.type) + \"\\n\"\n msg += f\"\\tDevice - Max Clock Speed: {device.max_clock_frequency} Mhz\" + \"\\n\"\n\n msg += f\"\\tDevice - Compute Units: {device.max_compute_units}\" + \"\\n\"\n msg += f\"\\tDevice - Local Memory: {device.local_mem_size / 1024.0:.0f} KB\" + \"\\n\"\n msg += f\"\\tDevice - Constant Memory: {device.max_constant_buffer_size / 1024.0:.0f} KB\" + \"\\n\"\n msg += f\"\\tDevice - Global Memory: {device.global_mem_size / 1073741824.0:.0f} GB\" + \"\\n\"\n msg += f\"\\tDevice - Max Buffer/Image Size: {device.max_mem_alloc_size / 1048576.0:.0f} MB\" + \"\\n\"\n msg += f\"\\tDevice - Max Work Group Size: {device.max_work_group_size:.0f}\" + \"\\n\"\n\n return msg", "def _gpu_info_subprocess():\n total_gpus = 0\n total_mem = 0\n try:\n import py3nvml.py3nvml\n py3nvml.py3nvml.nvmlInit()\n total_gpus = py3nvml.py3nvml.nvmlDeviceGetCount()\n\n import os\n cudavis = os.getenv(\"CUDA_VISIBLE_DEVICES\")\n if cudavis is not None:\n lencudavis = len(cudavis)\n if lencudavis == 0:\n total_gpus = 0\n else:\n total_gpus =\\\n min(total_gpus,\n os.getenv(\"CUDA_VISIBLE_DEVICES\").count(\",\") + 1)\n\n total_mem = \\\n min([py3nvml.py3nvml.nvmlDeviceGetMemoryInfo(\n py3nvml.py3nvml.nvmlDeviceGetHandleByIndex(i)).total for i in\n range(total_gpus)])\n except NVMLError as e:\n print(\"No GPU, setting total_gpus=0 and total_mem=0\")\n print(e)\n sys.stdout.flush()\n return total_gpus, total_mem", "def CollectSystemInfo():\n global cpu, cpuCores, cpuFreqMHz, uname\n uname = \" \".join(platform.uname())\n #print(\"KK_ uname: \", uname)\n code, cpuinfo, err = Run(['cat', '/proc/cpuinfo'])\n #print(\"KK_ cpuinfo:1 ==========================================\\n\", cpuinfo)\n cpuinfo = cpuinfo.split(\"\\n\")\n #print(\"KK_ cpuinfo:2 ==========================================\\n\", cpuinfo)\n if 'ppc64' in uname:\n # Implement grep and sed in Python...\n #print(\"KK_CollectSystemInfo_000001\")\n cpu = grep(cpuinfo, r'model')[0].split(': ')[1].replace('(R)', '').replace('(TM)', '')\n cpuCores = len(grep(cpuinfo, r'processor'))\n try:\n code, dmidecode, err = Run(['dmidecode', '--type', 'processor'])\n cpuFreqMHz = int(round(float(grep(dmidecode.split(\"\\n\"), r'Current Speed')[0].rstrip().lstrip().split(\" \")[2])))\n except:\n cpuFreqMHz = int(round(float(grep(cpuinfo, r'clock')[0].split(': ')[1][:-3])))\n else:\n #model_names = grep(cpuinfo, r'model name')\n model_names = grep(cpuinfo, 'model name') #KK_\n #print(\"KK_CollectSystemInfo_000002\", model_names)\n cpu = model_names[0].split(': ')[1].replace('(R)', '').replace('(TM)', '')\n cpuCores = len(model_names)\n #print(\"\\nKK_cpu\", cpu, \", cpuCores\", cpuCores)\n try:\n code, dmidecode, err = Run(['dmidecode', '--type', 'processor'])\n cpuFreqMHz = int(round(float(grep(dmidecode.split(\"\\n\"), r'Current Speed')[0].rstrip().lstrip().split(\" \")[2])))\n except:\n cpuFreqMHz = int(round(float(grep(cpuinfo, r'cpu MHz')[0].split(': ')[1])))\n #print(\"KK_ cpuFreqMHz: \", cpuFreqMHz)", "def main():\n print(\"CPU temp: \", str(get_cpu_temp()))\n print(\"GPU temp: \", str(get_gpu_temp()))", "def write_system_info():\n\n # get system information, and write them into the log file\n system, node, release, version, machine, processor = platform.uname()\n\n if system in ['Linux']:\n # find how many physical processers\n p = subprocess.Popen('grep \"physical id\" /proc/cpuinfo|sort|uniq|wc -l',\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n processor_number = int(p.stdout.readlines()[0])\n\n # find the model name of the processors\n p = subprocess.Popen('grep \"model name\" /proc/cpuinfo|uniq', shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n processor = '; '.join([row.decode('utf-8').split(':')[1].strip()\n for row in p.stdout.readlines()])\n\n # find how many cores\n p = subprocess.Popen('grep \"cpu cores\" /proc/cpuinfo|uniq',shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n cores = int(p.stdout.readlines()[0].decode('utf-8').split(':')[1])\n\n # get the memory\n p = subprocess.Popen('free -mh',shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n row = p.stdout.readlines()[1]\n info = row.split()\n memory = '%s (total); %s (used); %s (free)'%(info[1],info[2],info[3])\n else:\n processor_number = 0\n processor = processor\n cores = 0\n memory = 'Unknown'\n\n\n distribution = ' '.join(platform.dist())\n username = getpass.getuser()\n node = platform.node()\n abspath = os.path.abspath(os.curdir)\n python_version = platform.python_version()\n\n info = ['Start reduction.',\n 'Node: %s'%node,\n 'Processor: %d x %s (%d cores)'%(processor_number, processor, cores),\n 'System: %s %s %s'%(system, release, machine),\n 'Distribution: %s'%distribution,\n 'Memory: %s'%memory,\n 'Username: %s'%username,\n 'Python version: %s'%python_version,\n 'Working directory: %s'%abspath,\n ]\n separator = os.linesep + ' '\n logger.info(separator.join(info))", "def memlog(gpu=None, msg=\"\"):\n return\n import torch.cuda\n import inspect\n usage = int(torch.cuda.memory_allocated(gpu) * 1e-6)\n prev = inspect.currentframe().f_back\n fname, lineno, fun, lines, index = inspect.getframeinfo(prev)", "def init_kernel_info(self):\n timeout = self.kernel_timeout\n tic = time.time()\n self.client.hb_channel.unpause()\n msg_id = self.client.kernel_info()\n while True:\n try:\n reply = self.client.get_shell_msg(timeout=1)\n except Empty:\n if (time.time() - tic) > timeout:\n logging.error(\"Kernel didn't respond to kernel_info_request\")\n else:\n if reply['parent_header'].get('msg_id') == msg_id:\n self.kernel_info = reply['content']\n return", "def get_gpu_info(handle):\n\n def get_process_info(nv_process):\n \"\"\"Get the process information of specific pid\"\"\"\n process = {}\n ps_process = psutil.Process(pid=nv_process.pid)\n process['username'] = ps_process.username()\n # cmdline returns full path; as in `ps -o comm`, get short cmdnames.\n _cmdline = ps_process.cmdline()\n if not _cmdline: # sometimes, zombie or unknown (e.g. [kworker/8:2H])\n process['command'] = '?'\n else:\n process['command'] = os.path.basename(_cmdline[0])\n # Bytes to MBytes\n process['gpu_memory_usage'] = int(nv_process.usedGpuMemory / 1024 / 1024)\n process['pid'] = nv_process.pid\n return process\n\n def _decode(b):\n if isinstance(b, bytes):\n return b.decode() # for python3, to unicode\n return b\n\n name = _decode(N.nvmlDeviceGetName(handle))\n uuid = _decode(N.nvmlDeviceGetUUID(handle))\n\n try:\n temperature = N.nvmlDeviceGetTemperature(handle, N.NVML_TEMPERATURE_GPU)\n except N.NVMLError:\n temperature = None # Not supported\n\n try:\n memory = N.nvmlDeviceGetMemoryInfo(handle) # in Bytes\n except N.NVMLError:\n memory = None # Not supported\n\n try:\n utilization = N.nvmlDeviceGetUtilizationRates(handle)\n except N.NVMLError:\n utilization = None # Not supported\n\n try:\n power = N.nvmlDeviceGetPowerUsage(handle)\n except:\n power = None\n\n try:\n power_limit = N.nvmlDeviceGetEnforcedPowerLimit(handle)\n except:\n power_limit = None\n\n processes = []\n try:\n nv_comp_processes = N.nvmlDeviceGetComputeRunningProcesses(handle)\n except N.NVMLError:\n nv_comp_processes = None # Not supported\n try:\n nv_graphics_processes = N.nvmlDeviceGetGraphicsRunningProcesses(handle)\n except N.NVMLError:\n nv_graphics_processes = None # Not supported\n\n if nv_comp_processes is None and nv_graphics_processes is None:\n processes = None # Not supported (in both cases)\n else:\n nv_comp_processes = nv_comp_processes or []\n nv_graphics_processes = nv_graphics_processes or []\n for nv_process in (nv_comp_processes + nv_graphics_processes):\n # TODO: could be more information such as system memory usage,\n # CPU percentage, create time etc.\n try:\n process = get_process_info(nv_process)\n processes.append(process)\n except psutil.NoSuchProcess:\n # TODO: add some reminder for NVML broken context\n # e.g. nvidia-smi reset or reboot the system\n pass\n\n index = N.nvmlDeviceGetIndex(handle)\n gpu_info = {\n 'index': index,\n 'uuid': uuid,\n 'name': name,\n 'temperature.gpu': temperature,\n 'utilization.gpu': utilization.gpu if utilization else None,\n 'power.draw': int(power / 1000) if power is not None else None,\n 'enforced.power.limit': int(power_limit / 1000) if power_limit is not None else None,\n # Convert bytes into MBytes\n 'memory.used': int(memory.used / 1024 / 1024) if memory else None,\n 'memory.total': int(memory.total / 1024 / 1024) if memory else None,\n 'processes': processes,\n }\n return gpu_info", "def currentPerfLevel():\r\n global CoreFreq\r\n global MemoryFreq\r\n result = runBashCommand(\"rocm-smi\")\r\n core = -1\r\n mem = -1\r\n line = result.split('\\n')\r\n line = line[5].split(\" \")\r\n # Find indices of Core and Mem frequency\r\n indices = [i for i, s in enumerate(line) if 'Mhz' in s]\r\n core = line[indices[0]].replace(\"Mhz\", '')\r\n mem = line[indices[1]].replace(\"Mhz\", '')\r\n\r\n print(core + \",\" + mem)\r\n\r\n return CoreFreq.index(int(core)), MemoryFreq.index(int(mem))", "def list_small_kernels():\n\n kernels1d = [\n NS(length= 1, threads_per_block= 64, threads_per_transform= 1, factors=(1,)),\n NS(length= 2, threads_per_block= 64, threads_per_transform= 1, factors=(2,)),\n NS(length= 3, threads_per_block= 64, threads_per_transform= 1, factors=(3,)),\n NS(length= 4, threads_per_block=128, threads_per_transform= 1, factors=(4,)),\n NS(length= 5, threads_per_block=128, threads_per_transform= 1, factors=(5,)),\n NS(length= 6, threads_per_block=128, threads_per_transform= 1, factors=(6,)),\n NS(length= 7, threads_per_block= 64, threads_per_transform= 1, factors=(7,)),\n NS(length= 8, threads_per_block= 64, threads_per_transform= 4, factors=(4, 2)),\n NS(length= 9, threads_per_block= 64, threads_per_transform= 3, factors=(3, 3)),\n NS(length= 10, threads_per_block= 64, threads_per_transform= 1, factors=(10,)),\n NS(length= 11, threads_per_block=128, threads_per_transform= 1, factors=(11,)),\n NS(length= 12, threads_per_block=128, threads_per_transform= 6, factors=(6, 2)),\n NS(length= 13, threads_per_block= 64, threads_per_transform= 1, factors=(13,)),\n NS(length= 14, threads_per_block=128, threads_per_transform= 7, factors=(7, 2)),\n NS(length= 15, threads_per_block=128, threads_per_transform= 5, factors=(3, 5)),\n NS(length= 16, threads_per_block= 64, threads_per_transform= 4, factors=(4, 4)),\n NS(length= 17, threads_per_block=256, threads_per_transform= 1, factors=(17,)),\n NS(length= 18, threads_per_block= 64, threads_per_transform= 6, factors=(3, 6)),\n NS(length= 20, threads_per_block=256, threads_per_transform= 10, factors=(5, 4)),\n NS(length= 21, threads_per_block=128, threads_per_transform= 7, factors=(3, 7)),\n NS(length= 22, threads_per_block= 64, threads_per_transform= 2, factors=(11, 2)),\n NS(length= 24, threads_per_block=256, threads_per_transform= 8, factors=(8, 3)),\n NS(length= 25, threads_per_block=256, threads_per_transform= 5, factors=(5, 5)),\n NS(length= 26, threads_per_block= 64, threads_per_transform= 2, factors=(13, 2)),\n NS(length= 27, threads_per_block=256, threads_per_transform= 9, factors=(3, 3, 3)),\n NS(length= 28, threads_per_block= 64, threads_per_transform= 4, factors=(7, 4)),\n NS(length= 30, threads_per_block=128, threads_per_transform= 10, factors=(10, 3)),\n NS(length= 32, threads_per_block= 64, threads_per_transform= 16, factors=(16, 2)),\n NS(length= 36, threads_per_block= 64, threads_per_transform= 6, factors=(6, 6)),\n NS(length= 40, threads_per_block=128, threads_per_transform= 10, factors=(10, 4)),\n NS(length= 42, threads_per_block=256, threads_per_transform= 7, factors=(7, 6)),\n NS(length= 44, threads_per_block= 64, threads_per_transform= 4, factors=(11, 4)),\n NS(length= 45, threads_per_block=128, threads_per_transform= 15, factors=(5, 3, 3)),\n NS(length= 48, threads_per_block= 64, threads_per_transform= 16, factors=(4, 3, 4)),\n NS(length= 49, threads_per_block= 64, threads_per_transform= 7, factors=(7, 7)),\n NS(length= 50, threads_per_block=256, threads_per_transform= 10, factors=(10, 5)),\n NS(length= 52, threads_per_block= 64, threads_per_transform= 4, factors=(13, 4)),\n NS(length= 54, threads_per_block=256, threads_per_transform= 18, factors=(6, 3, 3)),\n NS(length= 56, threads_per_block=128, threads_per_transform= 8, factors=(7, 8)),\n NS(length= 60, threads_per_block= 64, threads_per_transform= 10, factors=(6, 10)),\n NS(length= 64, threads_per_block= 64, threads_per_transform= 16, factors=(4, 4, 4)),\n NS(length= 72, threads_per_block= 64, threads_per_transform= 9, factors=(8, 3, 3)),\n NS(length= 75, threads_per_block=256, threads_per_transform= 25, factors=(5, 5, 3)),\n NS(length= 80, threads_per_block= 64, threads_per_transform= 10, factors=(5, 2, 8)),\n NS(length= 81, threads_per_block=128, threads_per_transform= 27, factors=(3, 3, 3, 3)),\n NS(length= 84, threads_per_block=128, threads_per_transform= 12, factors=(7, 2, 6)),\n NS(length= 88, threads_per_block=128, threads_per_transform= 11, factors=(11, 8)),\n NS(length= 90, threads_per_block= 64, threads_per_transform= 9, factors=(3, 3, 10)),\n NS(length= 96, threads_per_block=128, threads_per_transform= 16, factors=(6, 16), half_lds=False),\n NS(length= 100, threads_per_block= 64, threads_per_transform= 10, factors=(10, 10)),\n NS(length= 104, threads_per_block= 64, threads_per_transform= 8, factors=(13, 8)),\n NS(length= 108, threads_per_block=256, threads_per_transform= 36, factors=(6, 6, 3)),\n NS(length= 112, threads_per_block=256, threads_per_transform= 16, factors=(16, 7), half_lds=False),\n NS(length= 120, threads_per_block= 64, threads_per_transform= 12, factors=(6, 10, 2)),\n NS(length= 121, threads_per_block=128, threads_per_transform= 11, factors=(11, 11)),\n NS(length= 125, threads_per_block=256, threads_per_transform= 25, factors=(5, 5, 5), half_lds=False),\n NS(length= 128, threads_per_block=256, threads_per_transform= 16, factors=(16, 8)),\n NS(length= 135, threads_per_block=128, threads_per_transform= 9, factors=(5, 3, 3, 3)),\n NS(length= 144, threads_per_block=128, threads_per_transform= 12, factors=(6, 6, 4)),\n NS(length= 150, threads_per_block= 64, threads_per_transform= 5, factors=(10, 5, 3)),\n NS(length= 160, threads_per_block=256, threads_per_transform= 16, factors=(16, 10)),\n NS(length= 162, threads_per_block=256, threads_per_transform= 27, factors=(6, 3, 3, 3)),\n NS(length= 168, threads_per_block=256, threads_per_transform= 56, factors=(8, 7, 3), half_lds=False),\n NS(length= 169, threads_per_block=256, threads_per_transform= 13, factors=(13, 13)),\n NS(length= 176, threads_per_block= 64, threads_per_transform= 16, factors=(11, 16)),\n NS(length= 180, threads_per_block=256, threads_per_transform= 60, factors=(10, 6, 3), half_lds=False),\n NS(length= 192, threads_per_block=128, threads_per_transform= 16, factors=(6, 4, 4, 2)),\n NS(length= 200, threads_per_block= 64, threads_per_transform= 20, factors=(10, 10, 2)),\n NS(length= 208, threads_per_block= 64, threads_per_transform= 16, factors=(13, 16)),\n NS(length= 216, threads_per_block=256, threads_per_transform= 36, factors=(6, 6, 6)),\n NS(length= 224, threads_per_block= 64, threads_per_transform= 16, factors=(7, 2, 2, 2, 2, 2)),\n NS(length= 225, threads_per_block=256, threads_per_transform= 75, factors=(5, 5, 3, 3)),\n NS(length= 240, threads_per_block=128, threads_per_transform= 48, factors=(8, 5, 6)),\n NS(length= 243, threads_per_block=256, threads_per_transform= 81, factors=(3, 3, 3, 3, 3)),\n NS(length= 250, threads_per_block=128, threads_per_transform= 25, factors=(10, 5, 5)),\n NS(length= 256, threads_per_block= 64, threads_per_transform= 64, factors=(4, 4, 4, 4)),\n NS(length= 270, threads_per_block=128, threads_per_transform= 27, factors=(10, 3, 3, 3)),\n NS(length= 272, threads_per_block=128, threads_per_transform= 17, factors=(16, 17)),\n NS(length= 288, threads_per_block=128, threads_per_transform= 24, factors=(6, 6, 4, 2)),\n NS(length= 300, threads_per_block= 64, threads_per_transform= 30, factors=(10, 10, 3)),\n NS(length= 320, threads_per_block= 64, threads_per_transform= 16, factors=(10, 4, 4, 2)),\n NS(length= 324, threads_per_block= 64, threads_per_transform= 54, factors=(3, 6, 6, 3)),\n NS(length= 336, threads_per_block=128, threads_per_transform= 56, factors=(8, 7, 6)),\n NS(length= 343, threads_per_block=256, threads_per_transform= 49, factors=(7, 7, 7)),\n NS(length= 360, threads_per_block=256, threads_per_transform= 60, factors=(10, 6, 6)),\n NS(length= 375, threads_per_block=128, threads_per_transform= 25, factors=(5, 5, 5, 3)),\n NS(length= 384, threads_per_block=128, threads_per_transform= 32, factors=(6, 4, 4, 4)),\n NS(length= 400, threads_per_block=128, threads_per_transform= 40, factors=(4, 10, 10)),\n NS(length= 405, threads_per_block=128, threads_per_transform= 27, factors=(5, 3, 3, 3, 3)),\n NS(length= 432, threads_per_block= 64, threads_per_transform= 27, factors=(3, 16, 3, 3)),\n NS(length= 450, threads_per_block=128, threads_per_transform= 30, factors=(10, 5, 3, 3)),\n NS(length= 480, threads_per_block= 64, threads_per_transform= 16, factors=(10, 8, 6)),\n NS(length= 486, threads_per_block=256, threads_per_transform=162, factors=(6, 3, 3, 3, 3)),\n NS(length= 500, threads_per_block=128, threads_per_transform=100, factors=(10, 5, 10)),\n NS(length= 512, threads_per_block= 64, threads_per_transform= 64, factors=(8, 8, 8)),\n NS(length= 528, threads_per_block= 64, threads_per_transform= 48, factors=(4, 4, 3, 11)),\n NS(length= 540, threads_per_block=256, threads_per_transform= 54, factors=(3, 10, 6, 3)),\n NS(length= 576, threads_per_block=128, threads_per_transform= 96, factors=(16, 6, 6)),\n NS(length= 600, threads_per_block= 64, threads_per_transform= 60, factors=(10, 6, 10)),\n NS(length= 625, threads_per_block=128, threads_per_transform=125, factors=(5, 5, 5, 5)),\n NS(length= 640, threads_per_block=128, threads_per_transform= 64, factors=(8, 10, 8)),\n NS(length= 648, threads_per_block=256, threads_per_transform=216, factors=(8, 3, 3, 3, 3)),\n NS(length= 675, threads_per_block=256, threads_per_transform=225, factors=(5, 5, 3, 3, 3)),\n NS(length= 720, threads_per_block=256, threads_per_transform=120, factors=(10, 3, 8, 3)),\n NS(length= 729, threads_per_block=256, threads_per_transform=243, factors=(3, 3, 3, 3, 3, 3)),\n NS(length= 750, threads_per_block=256, threads_per_transform=250, factors=(10, 5, 3, 5)),\n NS(length= 768, threads_per_block= 64, threads_per_transform= 48, factors=(16, 3, 16)),\n NS(length= 800, threads_per_block=256, threads_per_transform=160, factors=(16, 5, 10)),\n NS(length= 810, threads_per_block=128, threads_per_transform= 81, factors=(3, 10, 3, 3, 3)),\n NS(length= 864, threads_per_block= 64, threads_per_transform= 54, factors=(3, 6, 16, 3)),\n NS(length= 900, threads_per_block=256, threads_per_transform= 90, factors=(10, 10, 3, 3)),\n NS(length= 960, threads_per_block=256, threads_per_transform=160, factors=(16, 10, 6), half_lds=False),\n NS(length= 972, threads_per_block=256, threads_per_transform=162, factors=(3, 6, 3, 6, 3)),\n NS(length=1000, threads_per_block=128, threads_per_transform=100, factors=(10, 10, 10)),\n NS(length=1024, threads_per_block=128, threads_per_transform=128, factors=(8, 8, 4, 4)),\n NS(length=1040, threads_per_block=256, threads_per_transform=208, factors=(13, 16, 5)),\n NS(length=1080, threads_per_block=256, threads_per_transform=108, factors=(6, 10, 6, 3)),\n NS(length=1125, threads_per_block=256, threads_per_transform=225, factors=(5, 5, 3, 3, 5)),\n NS(length=1152, threads_per_block=256, threads_per_transform=144, factors=(4, 3, 8, 3, 4)),\n NS(length=1200, threads_per_block=256, threads_per_transform= 75, factors=(5, 5, 16, 3)),\n NS(length=1215, threads_per_block=256, threads_per_transform=243, factors=(5, 3, 3, 3, 3, 3)),\n NS(length=1250, threads_per_block=256, threads_per_transform=250, factors=(5, 10, 5, 5)),\n NS(length=1280, threads_per_block=128, threads_per_transform= 80, factors=(16, 5, 16)),\n NS(length=1296, threads_per_block=128, threads_per_transform=108, factors=(6, 6, 6, 6)),\n NS(length=1350, threads_per_block=256, threads_per_transform=135, factors=(5, 10, 3, 3, 3)),\n NS(length=1440, threads_per_block=128, threads_per_transform= 90, factors=(10, 16, 3, 3)),\n NS(length=1458, threads_per_block=256, threads_per_transform=243, factors=(6, 3, 3, 3, 3, 3)),\n NS(length=1500, threads_per_block=256, threads_per_transform=150, factors=(5, 10, 10, 3)),\n NS(length=1536, threads_per_block=256, threads_per_transform=256, factors=(16, 16, 6)),\n NS(length=1600, threads_per_block=256, threads_per_transform=100, factors=(10, 16, 10)),\n NS(length=1620, threads_per_block=256, threads_per_transform=162, factors=(10, 3, 3, 6, 3)),\n NS(length=1728, threads_per_block=128, threads_per_transform=108, factors=(3, 6, 6, 16)),\n NS(length=1800, threads_per_block=256, threads_per_transform=180, factors=(10, 6, 10, 3)),\n NS(length=1875, threads_per_block=256, threads_per_transform=125, factors=(5, 5, 5, 5, 3)),\n NS(length=1920, threads_per_block=256, threads_per_transform=120, factors=(10, 6, 16, 2)),\n NS(length=1944, threads_per_block=256, threads_per_transform=243, factors=(3, 3, 3, 3, 8, 3)),\n NS(length=2000, threads_per_block=128, threads_per_transform=125, factors=(5, 5, 5, 16)),\n NS(length=2025, threads_per_block=256, threads_per_transform=135, factors=(3, 3, 5, 5, 3, 3)),\n NS(length=2048, threads_per_block=256, threads_per_transform=256, factors=(16, 16, 8)),\n NS(length=2160, threads_per_block=256, threads_per_transform= 60, factors=(10, 6, 6, 6)),\n NS(length=2187, threads_per_block=256, threads_per_transform=243, factors=(3, 3, 3, 3, 3, 3, 3)),\n NS(length=2250, threads_per_block=256, threads_per_transform= 90, factors=(10, 3, 5, 3, 5)),\n NS(length=2304, threads_per_block=256, threads_per_transform=192, factors=(6, 6, 4, 4, 4), runtime_compile=True),\n NS(length=2400, threads_per_block=256, threads_per_transform=240, factors=(4, 10, 10, 6)),\n NS(length=2430, threads_per_block=256, threads_per_transform= 81, factors=(10, 3, 3, 3, 3, 3)),\n NS(length=2500, threads_per_block=256, threads_per_transform=250, factors=(10, 5, 10, 5)),\n NS(length=2560, threads_per_block=128, threads_per_transform=128, factors=(4, 4, 4, 10, 4)),\n NS(length=2592, threads_per_block=256, threads_per_transform=216, factors=(6, 6, 6, 6, 2)),\n NS(length=2700, threads_per_block=128, threads_per_transform= 90, factors=(3, 10, 10, 3, 3)),\n NS(length=2880, threads_per_block=256, threads_per_transform= 96, factors=(10, 6, 6, 2, 2, 2)),\n NS(length=2916, threads_per_block=256, threads_per_transform=243, factors=(6, 6, 3, 3, 3, 3)),\n NS(length=3000, threads_per_block=128, threads_per_transform=100, factors=(10, 3, 10, 10)),\n NS(length=3072, threads_per_block=256, threads_per_transform=256, factors=(6, 4, 4, 4, 4, 2)),\n NS(length=3125, threads_per_block=128, threads_per_transform=125, factors=(5, 5, 5, 5, 5)),\n NS(length=3200, threads_per_block=256, threads_per_transform=160, factors=(10, 10, 4, 4, 2)),\n NS(length=3240, threads_per_block=128, threads_per_transform=108, factors=(3, 3, 10, 6, 6)),\n NS(length=3375, threads_per_block=256, threads_per_transform=225, factors=(5, 5, 5, 3, 3, 3)),\n NS(length=3456, threads_per_block=256, threads_per_transform=144, factors=(6, 6, 6, 4, 4)),\n NS(length=3600, threads_per_block=256, threads_per_transform=120, factors=(10, 10, 6, 6)),\n NS(length=3645, threads_per_block=256, threads_per_transform=243, factors=(5, 3, 3, 3, 3, 3, 3)),\n NS(length=3750, threads_per_block=256, threads_per_transform=125, factors=(3, 5, 5, 10, 5)),\n NS(length=3840, threads_per_block=256, threads_per_transform=128, factors=(10, 6, 2, 2, 2, 2, 2, 2)),\n NS(length=3888, threads_per_block=512, threads_per_transform=324, factors=(16, 3, 3, 3, 3, 3)),\n NS(length=4000, threads_per_block=256, threads_per_transform=200, factors=(10, 10, 10, 4)),\n NS(length=4050, threads_per_block=256, threads_per_transform=135, factors=(10, 5, 3, 3, 3, 3)),\n NS(length=4096, threads_per_block=256, threads_per_transform=256, factors=(16, 16, 16)),\n ]\n\n kernels = [NS(**kernel.__dict__,\n scheme='CS_KERNEL_STOCKHAM',\n precision=['sp', 'dp']) for kernel in kernels1d]\n\n return kernels", "def gpu_info():\n from concurrent.futures import ProcessPoolExecutor\n with ProcessPoolExecutor(max_workers=1) as executor:\n future = executor.submit(_gpu_info_subprocess)\n res = future.result()\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replace the pageable array to pagelocked array
def get_page_locked_array(a): import pycuda.driver as drv temp_page_lock_p = drv.pagelocked_zeros_like(a, mem_flags=drv.host_alloc_flags.DEVICEMAP) if len(a.shape) == 1: temp_page_lock_p[:] = a else: temp_page_lock_p[:, :] = a assert numpy.allclose(a, temp_page_lock_p) return temp_page_lock_p
[ "def _copy_on_write(self):\n if (self._lazycopy):\n self._lazycopy = False\n pages = IntervalTree()\n lookup = dict()\n for p in self._lookup.values():\n n = p.copy()\n lookup[(p.begin, p.end)] = n\n pages.addi(n.begin, n.end, n)\n self._pages = pages\n self._lookup = lookup", "def memoryReplacement(self, pages):\r\n freeBlock = self.occupiedMemory[pages].removeReturn()\r\n self.hardDrive.append(freeBlock)\r\n freeBlock.item.pID = 0\r\n return freeBlock", "def pin_memory(self): # real signature unknown; restored from __doc__\n pass", "def rotate(self, count):\n count = count % self._n\n count *= 12\n tail = bytearray(self._data[-count:])\n head = bytearray(self._data[:-count])\n self._data[count:] = head\n self._data[:count] = tail\n del head, tail\n gc.collect()", "def _ndarray_to_readonly(arr: np.ndarray) -> np.ndarray:\n arr.flags['WRITEABLE'] = False\n return arr", "def resymbolize(self):\n\n for i, p_id in enumerate(self.state.memory._pages):\n if i % 100 == 0:\n l.info(\"%s/%s memory pages symbolized\", i, len(self.state.memory._pages))\n addr_start = p_id * self.state.memory.page_size\n length = self.state.memory.page_size\n self._resymbolize_region(self.state.memory, addr_start, length)", "def quick_sort_memory(arr):\n if len(arr) <= 1:\n return\n\n privot = arr[0]\n less = []\n equal = []\n greater = []\n for x in arr:\n if x < privot:\n less.append(x)\n elif x > privot:\n greater.append(x)\n else: # x == privot\n equal.append(x)\n quick_sort_memory(less)\n quick_sort_memory(greater)\n arr[:] = less[:] + equal[:] + greater[:]", "def numpy_array(self, data):\n self._data = data.view()\n self._data_needs_writeback = True\n data.setflags(write=False)", "def _swap(array, a, b):\n\n if len(array) and a != b:\n tmp = array[a]\n array[a] = array[b]\n array[b] = tmp", "def resize_table(self):\n nextPrime = self.get_next_prime()\n if nextPrime > -1:\n oldValues = self.values\n self.values = [None] * nextPrime\n self.count = 0\n for i in range(len(oldValues)):\n if oldValues[i] is not None and (not oldValues[i].is_empty):\n self.insert(oldValues[i].value)", "def reallocate(mem_banks, target):\n blocks = mem_banks[target]\n mem_banks[target] = 0\n num_of_banks = len(mem_banks)\n pointer = (target + 1) % num_of_banks \n while blocks > 0:\n mem_banks[pointer] += 1\n blocks -= 1\n pointer = (pointer + 1) % num_of_banks", "def _swap(self, array, index1, index2):\n\t\ttemp = array[index1]\n\t\tarray[index1] = array[index2]\n\t\tarray[index2] = temp", "def swap_in_array(arr, i, j, tmp):\n tmp[:] = arr[i][:]\n arr[i][:] = arr[j][:]\n arr[j][:] = tmp[:]", "def _map_function_on_high_bw_mem(self, site, obj, storage, read_only=False):\n alloc = self._Parallelizer._map_to(obj)\n\n if read_only is False:\n free = c.Collection([self._Parallelizer._map_update(obj),\n self._Parallelizer._map_release(obj)])\n else:\n free = self._Parallelizer._map_delete(obj)\n\n storage.update(obj, site, allocs=alloc, frees=free)", "def mapBuffer(self, offset, count, access):\n vbo_ptr = self._vbo.mapRange( offset, count, access )\n vp_array = ctypes.cast(ctypes.c_void_p(int(vbo_ptr)), ctypes.POINTER(ctypes.c_byte * self._vbo.size())).contents\n # Note: we could have returned the raw ctypes.c_byte array instead... see pyglet github for map/unmap classes\n array = np.frombuffer( vp_array, 'B' )\n return array", "def invalidate(self, pages):\n for p in pages:\n self.cache.invalidate(p)", "def shrink(self):\n half = int(len(self._items) / 2)\n halfArray = Array(half)\n if half > ArraySortedBag.DEFAULT_CAPACITY:\n for i in range(len(self)):\n halfArray[i] = self._items[i]\n self._items = halfArray\n else:\n pass", "def _promote(self):\n #Release all our read locks...\n self.rwlock -= self.tlocal.rcount\n while self.rwlock != 0:\n self.writers_waiting += 1\n self.writers_ok.wait()\n self.writers_waiting -= 1\n self.writer = threading.currentThread()\n self.rwlock = -1\n #Convert count of read locks to count of write locks, \n # this converts allour held read lock to write, and adds one for our new lock!\n self.wcount = self.tlocal.rcount + 1\n self.tlocal.rcount = 0", "def _grow_array(self):\n\n # Copy the current array\n olddata = copy.deepcopy(self.data)\n self.occupied = 0\n\n # Double the size of the array\n self.size = 2*self.size\n\n self.data = [None]*self.size\n\n # Re-write the data into the new array\n for contents in olddata:\n\n if contents is None:\n continue\n elif contents is (None, None, None):\n continue\n else:\n self.add_elem(contents[1], contents[2])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of dummy notes.
def _get_dummy_notes(self, count=1): return [self._get_dummy_note(i) for i in range(count)]
[ "def list_all_notes(self) -> List[str]:\n if self.current_notebook is None:\n return [\"No currently opened notebook\"]\n names = []\n for name in self.notebooks[self.current_notebook].notes:\n names.append(name)\n if names:\n return names\n return [\"There are no notes\"]", "def _get_notes(self):\n notes = self.server.get_all_notes()\n assert len(notes) > 0, 'Notes are empty.'\n return notes", "def create_notes_obj():\n notes = []\n\n return notes", "def _get_dummy_note(self, uid=0):\n nid = uuid4().hex\n return {\n \"id\": nid,\n \"created\": \"2014-10-31T10:05:00.000000\",\n \"updated\": \"2014-10-31T10:50:00.101010\",\n \"user\": \"dummy-user-id\",\n \"usage_id\": \"dummy-usage-id-\" + str(uid),\n \"course_id\": \"dummy-course-id\",\n \"text\": \"dummy note text \" + nid,\n \"quote\": \"dummy note quote\",\n \"ranges\": [\n {\n \"start\": \"/p[1]\",\n \"end\": \"/p[1]\",\n \"startOffset\": 0,\n \"endOffset\": 10,\n }\n ],\n }", "def get_notes(self):\n return {i: [str(note) for note in track.notes]\n for i, track in enumerate(self.tracks)}", "def test_notes_collection_when_no_notes(self):\n\n # Delete all notes\n self.test_cleanup()\n\n # Get default page\n response = requests.get(self._get_url(\"api/v1/annotations\"), params={\"user\": \"dummy-user-id\"})\n assert response.ok\n self._verify_pagination_info(\n response=response.json(),\n total_notes=0,\n num_pages=0,\n notes_per_page=0,\n start=0,\n current_page=1,\n next_page=None,\n previous_page=None\n )", "def notes_xml(self):\n\n if self.notes == []:\n return ''\n xml = '<Notes>\\n'\n for note in self.notes:\n xml += note\n xml += '</Notes>\\n'\n return xml", "def _list_notes(options, notes_dir):\n if options.toplevel:\n notes_dir = os.path.join(notes_dir, options.toplevel)\n\n for root, files in _walk_notes_dir(notes_dir):\n notes = [note for note in files if not note.startswith('.') and\n note.endswith(NOTES_EXT)]\n\n if not notes:\n continue\n\n print(\"%s: \" % os.path.basename(root))\n for note in notes:\n print(\" %s\" % os.path.splitext(note)[0])\n print(\"\")", "def detect_MIDI_notes(self):\n\n (framerate, sample) = wav.read(self.wav_file)\n if get_channels_no(self.wav_file) > 1:\n sample = sample.mean(axis=1)\n duration = getDuration(self.wav_file)\n midi_notes = []\n\n # Consider only files with a duration longer than 0.18 seconds.\n if duration > 0.18:\n FFT, filteredFreqs, maxFreq, magnitudes, significant_freq = self.calculateFFT(duration, framerate, sample)\n #plotPowerSpectrum(FFT, filteredFreqs, 1000)\n clusters = self.clusterFrequencies(filteredFreqs)\n averagedClusters = self.getClustersMeans(clusters)\n f0_candidates = self.getF0Candidates(averagedClusters)\n midi_notes = self.matchWithMIDINotes(f0_candidates)\n\n '''\n OCTAVE CORRECTION METHOD\n '''\n '''\n\n # Include a note with a significant magnitude:\n # if its magnitude is higher than the sum of magnitudes\n # of all other spectral peaks\n # include it in the list of detected notes and\n # remove the note that's octave lower than this one\n # if it was also detected.\n if significant_freq > 0:\n significant_midi_notes = self.matchWithMIDINotes([\n significant_freq])\n significant_midi_note = significant_midi_notes[0]\n if significant_midi_note not in midi_notes:\n midi_notes.append(significant_midi_note)\n midi_notes = self.remove_lower_octave(\n significant_midi_note, midi_notes)\n '''\n\n return midi_notes", "def get_footnotes(self):\n return [(number, self._notes[number]) for number in self._notes.keys()]", "def build_notes(self):\n for c in self.chords:\n \n # Generate riff sounds terrible\n for note in c.scale.generate_riff():\n self.notes.append(note)\n\n #t = random.choice([4, 8, 16, 8, 8, 8, 16])\n #for i in range(t):\n # self.notes.append(Note.from_chord(c, t))", "def filter_new_notes(entities, repo):\n notes = []\n for entity in entities:\n # Send an an email notification for new notes only\n if isinstance(entity, Note):\n if not Note.get(repo, entity.get_note_record_id()):\n notes.append(entity)\n return notes", "def _notes_from_note_texts(cls, note_texts):\n return [PDFNote.from_text(text) for text in note_texts]", "def get_all_notes(self):\n q=\"select * from note order by time desc;\"\n try:\n NoteDB.cursor.execute(q)\n notes=[]\n results=NoteDB.cursor.fetchall()\n for result in results:\n obj=Note(idt=result[0],msg=result[1],time=result[2])\n notes.append(obj)\n return notes\n except Exception as e:\n raise", "def genNotes(self):\n noteLst = [self.root]\n num = ord(self.root.letter) # ASCII value of the root letter\n\n step1 = 0\n stepinc = 0\n letStep = 2\n\n if self.qual == \"M\":\n step1 = 4 # W W\n stepinc = 3 # H W\n elif self.qual == \"m\":\n step1 = 3 # W H\n stepinc = 4 # W W\n elif self.qual == \"dim\":\n step1 = 3 # W H\n stepinc = 3 # W H\n else:\n raise \"Not a valid triad type.\"\n\n for _ in range(2):\n # The new number of piano key of the next note in the chord\n newNum = (((notes[self.root.letter + self.root.acc] + step1) - 40) % 12) + 40\n # Look in the keys and try to find a match that is either 2 or 4 letters after the root EX: A -> C# -> E\n newLets = [x for x in keys[newNum] if ord(x[0]) == (((num + letStep) - 65) % 7) + 65]\n # There will only ever be one result\n newLet = newLets[0]\n # Take the letter\n newLetter = newLet[0]\n # Take the accidental if there is one\n try:\n newAcc = newLet[1]\n except:\n newAcc = \"\"\n noteLst.append(Note(newLetter + newAcc))\n\n step1 += stepinc\n letStep *= 2\n\n return noteLst", "def get_notes(session, user): # pylint: disable=unused-argument\n\n notes = [\n permission.note\n for permission in user.permissions\n if permission.type == PermissionType.READ\n ]\n\n return notes", "def get_notes(self):\n tracks = []\n\n self.rel_notelist = None\n self.rel_poslist = None\n self.rel_durlist = None\n\n self.abs_notelist = None\n self.abs_poslist = None\n self.abs_durlist = None\n\n for track in self._pattern:\n tr = []\n tracks.append(tr)\n pos = 0\n note_idx = 0\n\n for idx, msg in enumerate(track):\n\n if isinstance(msg, midi.SetTempoEvent):\n self.bps = msg.bpm/60\n\n if isinstance(msg, midi.NoteOnEvent):\n off = self._get_note_off_event(msg.get_pitch(), track, idx)\n\n pos += msg.tick/(self._pattern.resolution*self.bps)\n duration = off.tick/(self._pattern.resolution*self.bps)\n\n tr.append((off.get_pitch(), duration, pos, note_idx))\n\n note_idx += 1\n pos += duration\n\n return tracks", "def get_notes():\n \n # empty list to contain all notes\n notes = []\n \n # get all files in midi_songs directory in the form of\n # \"midi_songs/*.mid\"\n for file in glob.glob(\"midi_songs/*.mid\"):\n \n # from file get Score produced from parse function\n midi = converter.parse(file)\n \n # notify which file is being parsed\n print(\"Parsing %s\" % file)\n\n notes_to_parse = None\n\n try: # file has instrument parts\n # partition the midi file by instruments, return that list\n # into s2\n s2 = instrument.partitionByInstrument(midi)\n \n # parses first part of midi \n # recurse() will visit every element in the stream, \n # starting from the beginning, and if any of the \n # subelements are also Streams, they will visit \n # every element in that Stream.\n notes_to_parse = s2.parts[0].recurse() \n \n except: # file has notes in a flat structure\n notes_to_parse = midi.flat.notes\n\n # loop through elements in notes_to_parse\n for element in notes_to_parse:\n # is element a note object?\n if isinstance(element, note.Note):\n # if so append the pitch (note) to the notes list\n notes.append(str(element.pitch))\n # is element a chord object?\n elif isinstance(element, chord.Chord):\n # if so append the chord to the notes list by joining\n # each element in normalOrder list of integer representation\n notes.append('.'.join(str(n) for n in element.normalOrder))\n \n # open 'data/notes' file for writing in binary format since we are\n # dealing with non text format\n with open('data/notes', 'wb') as filepath:\n # write notes in binary format to filepath\n pickle.dump(notes, filepath)\n # return notes list\n return notes", "def parse_notes():\n notes = []\n for note_filename in os.listdir(NOTES_DIR):\n # Parse note file\n assert note_filename.endswith(\".txt\")\n note = parse(os.path.join(NOTES_DIR, note_filename))\n assert note_filename == note[\"id\"] + \".txt\", note_filename\n notes.append(note)\n return notes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a single dummy note.
def _get_dummy_note(self, uid=0): nid = uuid4().hex return { "id": nid, "created": "2014-10-31T10:05:00.000000", "updated": "2014-10-31T10:50:00.101010", "user": "dummy-user-id", "usage_id": "dummy-usage-id-" + str(uid), "course_id": "dummy-course-id", "text": "dummy note text " + nid, "quote": "dummy note quote", "ranges": [ { "start": "/p[1]", "end": "/p[1]", "startOffset": 0, "endOffset": 10, } ], }
[ "def get_short_note(self):\n try:\n note_id = self.note_id\n return __notes__.get_short_note(note_id)\n except AttributeError:\n return ''", "def get_random_note(self) -> str:\n i = random.randint(0, len(self._config[\"notes\"]) - 1)\n return self._config[\"notes\"][i]", "def random():\n return Note(random.randrange(12))", "def get_note(self, note_id):\n return self.__get_object('notes', None, note_id)", "def test_note_get_single_by_case_id(self):\n # create case\n case = self.cm_helper.create_case()\n\n # note data\n note_data = {\n 'case_id': case.id,\n 'text': f'sample note for {__name__} test case.',\n }\n\n # create note\n note = self.cm.note(**note_data)\n note.submit()\n\n # get single note by id\n note = self.cm.note(id=note.id)\n note.get()\n\n # run assertions on returned data\n assert note.text == note_data.get('text')", "def get_note(name):\n rv = query_db('select note from entries where name = ?',\n [name], one=True)\n return rv[0] if rv else None", "def _get_dummy_notes(self, count=1):\n return [self._get_dummy_note(i) for i in range(count)]", "def get_one_note(self,idt):\n q=\"select * from note where id=%d\"%(idt)\n try:\n NoteDB.cursor.execute(q)\n result=NoteDB.cursor.fetchall()\n obj=Note(idt=result[0],msg=result[1],time=result[2])\n return obj\n except Exception as e:\n raise", "def create_note(\n self, *, label: str | None = None, note: str, **other_settings: Any\n ) -> praw.models.ModNote:\n return self.thing.subreddit.mod.notes.create(\n label=label, note=note, thing=self.thing, **other_settings\n )", "def factory(*args):\n\n noteName = None\n note = None\n desc = None\n n = None\n d = None\n\n try:\n noteName = args[0].name()\n except:\n if len(args) > 1 and isinstance(args[0], int) and isinstance(args[1], int):\n n = args[0]\n d = args[1]\n noteName = '%s/%s' % (n, d)\n elif isinstance(args[0], tuple):\n n = args[0][0]\n d = args[0][1]\n noteName = '%s/%s' % (n, d)\n else:\n noteName = str(args[0])\n n = args[0]\n\n # If we have already generated this note, use it - we're treating notes like singletons\n if noteName in Note.dNotes:\n return Note.dNotes[noteName]\n\n if isinstance(args[-1], str):\n desc = args[-1]\n else:\n desc = ''\n\n if d:\n # We've detected the second number necessary for a ratio\n note = JustNote(n, d, desc)\n else:\n note = Note(n, desc)\n\n Note.dNotes[noteName] = note\n return note", "def get_note(self):\n cmd = self._repo._repo.git\n try:\n return cmd.notes('--ref', self.NOTE_REF, 'show', self.sha)\n except GitCommandError:\n return None", "def get_note(self, id):\n response = requests.get(self.notes_url, params = {'id':id}, headers = self.headers)\n response = self.__handle_response(response)\n n = response.json()['notes'][0]\n return Note.from_json(n)", "def created_note_id(client, sample_note, token_header) -> int:\n creation_response = client.post(\n url='/notes', headers=token_header, json=sample_note\n )\n assert creation_response.ok\n\n note_id: int = creation_response.json().get('id')\n return note_id", "def midi_to_note(midi: int) -> Note:\r\n return Note(CHROMATIC[midi % 12], (midi - 12) // 12)", "def note_midi(note: Union[Note, Rest, Chord]) -> Union[int, None]:\n if(note.isRest):\n return None\n elif(note.isChord):\n return note.root().midi\n else:\n return note.pitch.midi", "def newNote(self):\n self.note_ref = str(\"note_%d\" % StickyNotes.note_id)\n StickyNotes().show()\n StickyNotes.note_id += 1", "def parse(cls, note: str) -> 'Note':\n match = note_regexp.match(note)\n if match:\n pitch = match[1]\n value = int(match[5]) if match[5] else NOTE_VALUES[\"whole\"]\n dots = len(match[6]) if match[6] else 0\n return Note(pitch, value, dots)\n else:\n raise ValueError(f\"Note {note} is not correct format.\")", "def note(self, msg, raw=False):\n self._msg(('' if raw else 'NOTE: ') + str(msg), self.NOTE, raw)", "def note_from_int(integer: int) -> Note:\n letter = \"AABCCDDEFFGG\"[integer % 12]\n accident = \"♮♯♮♮♯♮♯♮♮♯♮♯\"[integer % 12]\n return Note(letter_from_str(letter), accident_from_str(accident))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test search with usage ids.
def test_search_usage_ids(self, usage_ids): url = self._get_url('api/v1/search') + usage_ids response = requests.get(url, params={ 'user': 'dummy-user-id', 'course_id': 'dummy-course-id' }) assert response.ok response = response.json() parsed = six.moves.urllib.parse.urlparse(url) query_params = six.moves.urllib.parse.parse_qs(parsed.query) query_params['usage_id'].reverse() assert len(response) == len(query_params['usage_id']) for index, usage_id in enumerate(query_params['usage_id']): assert response[index]['usage_id'] == usage_id
[ "def test_search_query(self):\n pass", "def test_ids_post_search(self):\n self._request_valid(\n \"search\",\n protocol=\"POST\",\n post_data={\n \"collections\": [self.tested_product_type],\n \"ids\": [\"foo\", \"bar\"],\n },\n )", "def test_get_search(self):\n pass", "def test_search_all_food(self):\n pass", "def test_search_custom_foods(self):\n pass", "def test_search_id_not_all_numerical_values(self):\n self.assertFalse(self.queryset.search(f\"{self.obj1.pk},Ärzte\"))", "def test_api_search_get(self):\n pass", "def test_api_v3_search_get(self):\n pass", "def test_query_ids(self):\n data_values = {\n \"object_name\": \"Program\",\n \"type\": \"values\",\n \"filters\": {\n \"expression\": {\n \"left\": \"title\",\n \"op\": {\"name\": \"~\"},\n \"right\": \"Cat ipsum\",\n },\n },\n }\n programs_values = self._get_first_result_set(data_values, \"Program\")\n\n data_ids = {\n \"object_name\": \"Program\",\n \"type\": \"ids\",\n \"filters\": {\n \"expression\": {\n \"left\": \"title\",\n \"op\": {\"name\": \"~\"},\n \"right\": \"Cat ipsum\",\n },\n },\n }\n programs_ids = self._get_first_result_set(data_ids, \"Program\")\n\n self.assertEqual(\n set(obj.get(\"id\") for obj in programs_values[\"values\"]),\n set(programs_ids[\"ids\"]),\n )", "def test_quick_search(test_db, glucose, glucose_id):\n assert glucose not in queries.quick_search(test_db,\n 'WQZGKKKJIJFFOK-UHFFFAOYSA-N')\n assert glucose in queries.quick_search(\n test_db, 'InChIKey=WQZGKKKJIJFFOK-GASJEMHNSA-N')\n assert glucose in queries.quick_search(\n test_db, \"Ccffda1b2e82fcdb0e1e710cad4d5f70df7a5d74f\")\n assert glucose in queries.quick_search(test_db, \"917030\")\n assert glucose in queries.quick_search(test_db, \"cpd00027\")\n assert glucose in queries.quick_search(test_db, 'C00031')\n assert glucose in queries.quick_search(test_db, 'Glucose')\n assert glucose_id in queries.quick_search(\n test_db, 'WQZGKKKJIJFFOK-GASJEMHNSA-N', {'_id': 1})", "def test_exists_by_id(self, _id):", "def test_view_with_search(self, staff_client, program_data):\n fin_aid_status = FinancialAidStatus.AUTO_APPROVED\n profiles = ProfileFactory.create_batch(\n 4,\n first_name=factory.Iterator(['match_name', 'x', 'y', 'z']),\n last_name=factory.Iterator(['x', 'y', 'z', 'match_name']),\n )\n FinancialAidFactory.create_batch(\n 4,\n tier_program=program_data.tier_programs[\"0k\"],\n status=fin_aid_status,\n user=factory.Iterator([p.user for p in profiles])\n )\n name_query = 'match_name'\n url = self.review_url(program_data.program.id, status=fin_aid_status, search_param=name_query)\n resp = staff_client.get(url)\n assert resp.status_code == status.HTTP_200_OK\n financial_aid_objects = resp.context_data[\"financial_aid_objects\"]\n\n # Two users should match the search term - one for first_name, one for last_name\n assert len(financial_aid_objects) == 2\n assert all(\n name_query in (fin_aid.user.profile.first_name, fin_aid.user.profile.last_name)\n for fin_aid in financial_aid_objects\n )", "def search_ALL(self, query, id, msg):\n return True", "def test_datahub_search_1(self):\n pass", "def test_search():\r\n assert Movie_Management.movie_search(\"Hulk\") == 1\r\n assert Movie_Management.movie_search(\"Godfather\") == 0", "def test_batch_search(es_testapp, wb_project, wb_institution):\n item_list = [wb_institution[\"uuid\"]]\n search_term = \"institution.uuid\"\n item_type = \"Item\"\n project = wb_project[\"@id\"]\n fields = [\"uuid\", \"project\"]\n response = CommonUtils.batch_search(\n es_testapp,\n item_list,\n search_term,\n item_type=item_type,\n project=project,\n fields=fields,\n )\n assert len(response) > 25\n for item in response:\n assert item[\"uuid\"]\n assert item[\"project\"][\"@id\"] == project", "def test_term_id_endpoint(self):\n for id in go_ids:\n response = test_client.get(f\"/api/ontology/term/{id}\")\n self.assertEqual(response.status_code, 200)", "def test_datahub_search_0(self):\n pass", "def test_command_search(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify the pagination information.
def _verify_pagination_info( self, response, total_notes, num_pages, notes_per_page, current_page, previous_page, next_page, start ): def get_page_value(url): """ Return page value extracted from url. """ if url is None: return None parsed = six.moves.urllib.parse.urlparse(url) query_params = six.moves.urllib.parse.parse_qs(parsed.query) page = query_params["page"][0] return page if page is None else int(page) assert response['total'] == total_notes assert response['num_pages'] == num_pages assert len(response['rows']) == notes_per_page assert response['current_page'] == current_page assert get_page_value(response['previous']) == previous_page assert get_page_value(response['next']) == next_page assert response['start'] == start
[ "def test_true_validate_pagination_args():\n\n PaginationViewUtils.validate_pagination_args(PaginationDataRepository.get_valid_pagination().GET['page_num'],\n PaginationDataRepository.get_valid_pagination().GET['page_size'])", "def test_true_get_pagination_args():\n\n PaginationViewUtils.get_pagination_args(PaginationDataRepository.get_valid_pagination())", "def test_pagination(self):\n res = self.client().get('/api/questions?page=1')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[\"success\"], True)\n self.assertEqual(data[\"total_exhibited_questions\"], 10)", "def test_pagination(self):\n\n indv1 = Individual.objects.create(mk='185c4b709e5446d250b4fde0e34b78a2b4fde0e3')\n indv2 = Individual.objects.create(mk='a9b403e150dd4af8953a52a4bb841051e4b705d9')\n indv3 = Individual.objects.create(mk='c6d2504fde0e34b78a185c4b709e5442d045451c')\n\n client = graphene.test.Client(schema)\n test_query = SH_INDIVIDUALS_UUID_PAGINATION % (1, 2)\n executed = client.execute(test_query,\n context_value=self.context_value)\n\n indvs = executed['data']['individuals']['entities']\n self.assertEqual(len(indvs), 2)\n\n indv = indvs[0]\n self.assertEqual(indv['mk'], indv1.mk)\n\n indv = indvs[1]\n self.assertEqual(indv['mk'], indv2.mk)\n\n pag_data = executed['data']['individuals']['pageInfo']\n self.assertEqual(len(pag_data), 8)\n self.assertEqual(pag_data['page'], 1)\n self.assertEqual(pag_data['pageSize'], 2)\n self.assertEqual(pag_data['numPages'], 2)\n self.assertTrue(pag_data['hasNext'])\n self.assertFalse(pag_data['hasPrev'])\n self.assertEqual(pag_data['startIndex'], 1)\n self.assertEqual(pag_data['endIndex'], 2)\n self.assertEqual(pag_data['totalResults'], 3)", "def test_list_pagination_meta(self):\n # create reports\n report_1 = models.Report.objects.create(customer=self.customer, start_date=date(2019, 1, 1), end_date=date(2019, 1, 31))\n models.Report.objects.create(customer=self.customer, start_date=date(2019, 2, 1), end_date=date(2019, 2, 28))\n # request\n response = self.client.get(reverse(self.view_name), {'page_size': '1'})\n repsonse_body = json.loads(response.content.decode('utf-8'))\n # test response\n self.assertIn('page', repsonse_body)\n self.assertEqual(repsonse_body['page'], 1)\n self.assertIn('page_count', repsonse_body)\n self.assertEqual(repsonse_body['page_count'], 2)\n self.assertIn('page_size', repsonse_body)\n self.assertEqual(repsonse_body['page_size'], 1)\n self.assertIn('page_next', repsonse_body)\n self.assertIn('page_previous', repsonse_body)\n self.assertIn('results_count', repsonse_body)\n self.assertEqual(repsonse_body['results_count'], 2)", "def test_pagination(self):\n for i in range(21):\n self.create_report()\n response = self._get(get_kwargs={'page': 2})\n self.assertEquals(response.status_code, 200)\n queryset, form = self._extract(response)\n self.assertEquals(queryset.count(), 21)\n page = response.context['table'].page\n self.assertEquals(page.object_list.data.count(), 1)", "def paginated(self):\n return len(self) > 1", "def test_get_paginated_questions_past_valid_page(self):\n res = self.client().get('/questions?page=100')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['error'], 404)\n self.assertEqual(data['message'], 'Resource Not Found')\n pass", "def test_missing_page_num_pagination_args(self):\n\n self.assertRaises(ValidationError,\n PaginationViewUtils.get_pagination_args,\n PaginationDataRepository.get_missing_page_num_pagination())", "def test_len_pages(self):\n self.assertEqual(len(self.pdf.pages), 2)", "def test_determine_page(self):\n req_args = {'page': 1}\n page = determine_page(req_args)\n self.assertEquals(page, 1)", "def pages_check(pages):\n if not pages:\n pages = 1\n return pages", "def test_page_limit(self, response):\n try:\n num_entries = len(response.json()[\"data\"])\n except AttributeError:\n raise ResponseError(\"Unable to test endpoint page limit.\")\n if num_entries > self.page_limit:\n raise ResponseError(\n f\"Endpoint did not obey page limit: {num_entries} entries vs {self.page_limit} limit\"\n )\n return (\n True,\n f\"Endpoint obeyed page limit of {self.page_limit} by returning {num_entries} entries.\",\n )", "def test_page_out_of_bound(self):\n\n response = self.client().get('/questions?page=100')\n response_data = json.loads(response.data)\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response_data['success'], False)\n self.assertEqual(response_data['message'], 'Not found error')", "def ensure_paging_info_without_counts_support(self):\n return self._ensure_support({\n 'version': (7, 4, 0),\n 'label': 'optimized pagination'\n }, False)", "def test_page_size_not_set(self):\n\n client = graphene.test.Client(schema)\n test_query = SH_OPERATIONS_QUERY_PAGINATION_NO_PAGE_SIZE % 1\n executed = client.execute(test_query,\n context_value=self.context_value)\n\n pag_data = executed['data']['operations']['pageInfo']\n self.assertEqual(len(pag_data), 8)\n self.assertEqual(pag_data['page'], 1)\n self.assertEqual(pag_data['pageSize'], settings.DEFAULT_GRAPHQL_PAGE_SIZE)\n self.assertEqual(pag_data['totalResults'], 6)", "def check_nb_pages(self, data):\n try:\n s_io = StringIO(data)\n reader = pypdf.PdfReader(s_io)\n num_pages = reader.getNumPages()\n print((\"num pages: %d\" % num_pages))\n return num_pages > 2\n except PyPdfError as e:\n return False", "def test_missing_page_size_pagination_args(self):\n\n self.assertRaises(ValidationError,\n PaginationViewUtils.get_pagination_args,\n PaginationDataRepository.get_missing_page_size_pagination())", "async def _check_in_page(self, body_json, current_page):\n if current_page > self.PAGE_THRESHOLD:\n return False\n\n if self._has_business(body_json[\"data\"]):\n return True\n\n paging = body_json.get(\"paging\", {})\n if \"next\" not in paging:\n return False\n\n try:\n next_page_url = paging[\"next\"]\n with urllib.request.urlopen(next_page_url) as response:\n body = response.read()\n return await self._check_in_page(json.loads(body), current_page + 1)\n except Exception:\n raise HTTPError(500, \"Authorization failed\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return page value extracted from url.
def get_page_value(url): if url is None: return None parsed = six.moves.urllib.parse.urlparse(url) query_params = six.moves.urllib.parse.parse_qs(parsed.query) page = query_params["page"][0] return page if page is None else int(page)
[ "def parse_page_number(url):\n if '?' not in url:\n return 1\n params = url.split('?')[1].split('=')\n params = {k: v for k, v in zip(params[0::2], params[1::2])}\n if 'page' not in params:\n return 1\n return int(params['page'])", "def page_url(self, page_pk): \n self.c.execute(\"SELECT url FROM pages WHERE id=%s\", (page_pk,))\n return self.c.fetchone()[0]", "async def get_page(path: str) -> str:\n if path:\n result = pattern.findall(path)\n if result:\n return result[-1]\n return '1'", "def get_review_page_number_from_url(url : str) -> int:\n return int(\n url[url.find(\n REVIEW_PAGE_NO_URL_IDENTIFIER[1]\n ) + len(REVIEW_PAGE_NO_URL_IDENTIFIER[1]):]\n )", "def get_data_from_page(self):\n\t\tpayload = {\n\t\t\t'action': 'query',\n\t\t\t'format': 'json',\n\t\t\t'pageids': self.pageid,\n\t\t\t'prop': 'extracts',\n\t\t\t'explaintext': True,\n\t\t\t'exintro': True\n\t\t}\n\t\tresponse_page = requests.get(self._url, payload)\n\t\tresponse_page_json = response_page.json()['query']['pages']\n\t\treturn response_page_json[str(self.pageid)]['extract']", "def get_url_page(self, product):\n return product.get('url')", "def get_pages(url):\n return url.json()['size'] // 10", "def get_page_no(payload):\n page_no = payload.get('page', 1)\n try:\n page_no = int(page_no)\n except ValueError:\n page_no = 1\n if page_no < 1:\n page_no = 1\n return page_no", "def parse_code(url):\n result = urlparse(url)\n query = parse_qs(result.query)\n return query['code']", "def get_param(url, pname):\n return parse_qs(urlparse(url).query).get(pname, [u''])[0]", "def get_pagination_info(self, sel, response):\n rightmost_a = response.xpath('//div[@class=\"pagination\"]/a')[-1]\n a_text = rightmost_a.xpath('span//text()').extract()[0]\n url = response.urljoin(rightmost_a.xpath('@href').extract()[0])\n return url, a_text", "def page_from_word(word):\n ...", "def fetch_url(self, url):\n try:\n response = self.network_get(url)\n return self.tokenize_html(response)\n except:\n return None", "def get_page(span):\n span = span if isinstance(span, TemporarySpan) else span[0]\n return span.get_attrib_tokens('page')[0]", "def get_hits_on_name(name):\n # url_root is a template string that is used to build a URL.\n url_root = 'https://xtools.wmflabs.org/articleinfo/en.wikipedia.org/{}'\n response = simple_get(url_root.format(name))\n\n if response is not None:\n html = BeautifulSoup(response, 'html.parser')\n\n hit_link = [a for a in html.select('a')\n if a['href'].find('latest-60') > -1]\n\n if len(hit_link) > 0:\n # Strip commas\n link_text = hit_link[0].text.replace(',', '')\n try:\n # Convert to integer\n return int(link_text)\n except BaseException:\n log_error(\"couldn't parse {} as an `int`\".format(link_text))\n\n log_error('No pageviews found for {}'.format(name))\n return None", "def get_after_tag(url):\n parsed_url = urlparse(url)\n captured_value = parse_qs(parsed_url.query)['after'][0]\n return captured_value", "def fetch_page(name):\n\n params = {\"action\": \"parse\", \"format\": \"json\", \"page\": name}\n rv = requests.get(WIKIMEDIA_API_URL, params=params)\n if rv.status_code != 200:\n print(f\"Unexpected HTTP code: {rv.status_code}\\n{rv}\")\n return None\n\n rv.encoding = \"utf-8\"\n data = rv.json()\n try:\n body = data[\"parse\"][\"text\"][\"*\"]\n title = data[\"parse\"][\"title\"]\n except ValueError:\n print(\"Something is wrong with the server response\")\n raise\n\n return title, body", "def _find_next_url(self, page_soup: BeautifulSoup) -> str:", "def get_seamus_id_from_url(url):\n if url.startswith('http://www.npr.org') or url.startswith('http://npr.org'):\n url_parts = url.split('/')\n id = url_parts[-2]\n if id.isdigit():\n return id\n\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test paginated response of notes api
def test_notes_collection(self): # Without user response = requests.get(self._get_url("api/v1/annotations")) assert response.status_code == 400 # Without any pagination parameters response = requests.get(self._get_url("api/v1/annotations"), params={"user": "dummy-user-id"}) assert response.ok self._verify_pagination_info( response=response.json(), total_notes=5, num_pages=3, notes_per_page=2, start=0, current_page=1, next_page=2, previous_page=None ) # With pagination parameters response = requests.get(self._get_url("api/v1/annotations"), params={ "user": "dummy-user-id", "page": 2, "page_size": 3 }) assert response.ok self._verify_pagination_info( response=response.json(), total_notes=5, num_pages=2, notes_per_page=2, start=3, current_page=2, next_page=None, previous_page=1 )
[ "def test_notes_collection_next_previous_with_one_page(self):\n response = requests.get(self._get_url(\"api/v1/annotations\"), params={\n \"user\": \"dummy-user-id\",\n \"page_size\": 10\n })\n\n assert response.ok\n self._verify_pagination_info(\n response=response.json(),\n total_notes=5,\n num_pages=1,\n notes_per_page=5,\n start=0,\n current_page=1,\n next_page=None,\n previous_page=None\n )", "def test_pagination(self):\n res = self.client().get('/api/questions?page=1')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[\"success\"], True)\n self.assertEqual(data[\"total_exhibited_questions\"], 10)", "def test_list_pagination_meta(self):\n # create reports\n report_1 = models.Report.objects.create(customer=self.customer, start_date=date(2019, 1, 1), end_date=date(2019, 1, 31))\n models.Report.objects.create(customer=self.customer, start_date=date(2019, 2, 1), end_date=date(2019, 2, 28))\n # request\n response = self.client.get(reverse(self.view_name), {'page_size': '1'})\n repsonse_body = json.loads(response.content.decode('utf-8'))\n # test response\n self.assertIn('page', repsonse_body)\n self.assertEqual(repsonse_body['page'], 1)\n self.assertIn('page_count', repsonse_body)\n self.assertEqual(repsonse_body['page_count'], 2)\n self.assertIn('page_size', repsonse_body)\n self.assertEqual(repsonse_body['page_size'], 1)\n self.assertIn('page_next', repsonse_body)\n self.assertIn('page_previous', repsonse_body)\n self.assertIn('results_count', repsonse_body)\n self.assertEqual(repsonse_body['results_count'], 2)", "def test_get_questions_paginated(self):\n res = self.client().get('/api/questions?page=2')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(len(data['categories']), 6)\n self.assertEqual(data['total_questions'], 19)\n # since total amount of questions is 19, questions per page = 10 =>\n # there are 9 questions on page 2\n self.assertEqual(len(data['questions']), 9)\n self.assertEqual(data['questions'][0]['id'], 15)", "def test_pages_list(self):\n\n url = reverse('page-list')\n response = self.client.get(url, format='json')\n\n self.maxDiff = None\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Page.objects.count(), 3)\n self.assertEqual(json.loads(response.content), {'count': 3, 'next': None, 'previous': None, 'results': [\n {'title': 'API Page 1', 'url': 'http://testserver/pages/1/'},\n {'title': 'API Page 2', 'url': 'http://testserver/pages/2/'},\n {'title': 'API Page 3', 'url': 'http://testserver/pages/3/'}]})", "def test_get_paginated_questions(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n pass", "def test_can_paginate_items(self):\n page_size = 50 # from settings.py, rest framework settings\n self.as_user(self.rw_user)\n for i in range(0, int(page_size * 2.5)):\n response = self.client.post(\n \"/submissions\", make_submission(f\"https://localhost/?{i}\")\n )\n\n # first page\n self.as_user(self.rw_user) # same user\n response = self.client.get(\"/submissions?ordering=-date_created\", follow=True)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n self.assertEqual(response.data[\"count\"], page_size * 2.5)\n self.assertIn(f\"limit={page_size}\", response.data[\"next\"])\n self.assertIn(f\"offset={page_size}\", response.data[\"next\"])\n self.assertEqual(len(response.data[\"results\"]), page_size)\n first = int(page_size * 2.5) - 1\n self.assertEqual(\n response.data[\"results\"][0][\"target_url\"],\n f\"https://localhost/?{first}\",\n )\n\n # second page\n response = self.client.get(response.data[\"next\"], follow=True)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n self.assertEqual(response.data[\"count\"], page_size * 2.5)\n self.assertIn(f\"limit={page_size}\", response.data[\"next\"])\n self.assertIn(f\"offset={page_size * 2}\", response.data[\"next\"])\n self.assertEqual(len(response.data[\"results\"]), page_size)\n first = first - page_size # is in reverse order\n self.assertEqual(\n response.data[\"results\"][0][\"target_url\"],\n f\"https://localhost/?{first}\",\n )\n\n # third and last page\n response = self.client.get(response.data[\"next\"], follow=True)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n self.assertEqual(response.data[\"count\"], page_size * 2.5)\n self.assertIsNone(response.data[\"next\"])\n self.assertEqual(len(response.data[\"results\"]), page_size * 0.5)\n first = first - page_size\n self.assertEqual(\n response.data[\"results\"][0][\"target_url\"],\n f\"https://localhost/?{first}\",\n )", "def test_pagination(self):\n for i in range(21):\n self.create_report()\n response = self._get(get_kwargs={'page': 2})\n self.assertEquals(response.status_code, 200)\n queryset, form = self._extract(response)\n self.assertEquals(queryset.count(), 21)\n page = response.context['table'].page\n self.assertEquals(page.object_list.data.count(), 1)", "def test_19_api_can_offset_the_number_of_documents(self):\n res = self.client.get('/documents?offset=1')\n assert json.loads(res.content)['rows'][0]['id'] == 1", "def test_18_api_can_limit_the_number_of_documents(self):\n res = self.client.get('/documents?limit=1')\n data = json.loads(res.content)\n assert len(data['rows']) == 1\n assert data['rows'][0]['id'] == 2", "def _verify_pagination_info(\n self,\n response,\n total_notes,\n num_pages,\n notes_per_page,\n current_page,\n previous_page,\n next_page,\n start\n ):\n def get_page_value(url):\n \"\"\"\n Return page value extracted from url.\n \"\"\"\n if url is None:\n return None\n\n parsed = six.moves.urllib.parse.urlparse(url)\n query_params = six.moves.urllib.parse.parse_qs(parsed.query)\n\n page = query_params[\"page\"][0]\n return page if page is None else int(page)\n\n assert response['total'] == total_notes\n assert response['num_pages'] == num_pages\n assert len(response['rows']) == notes_per_page\n assert response['current_page'] == current_page\n assert get_page_value(response['previous']) == previous_page\n assert get_page_value(response['next']) == next_page\n assert response['start'] == start", "def test_get_paginated_books(self):\n \n res = self.client().get('/books')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_books'])\n self.assertTrue(len(data['books']))", "def test_get_paginated_questions_past_valid_page(self):\n res = self.client().get('/questions?page=100')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['error'], 404)\n self.assertEqual(data['message'], 'Resource Not Found')\n pass", "def _get_paginated_response(self, notes, page_num, page_size):\n start = (page_num - 1) * page_size\n end = start + page_size\n total_notes = len(notes)\n url_path = \"http://{server_address}:{port}{path}\".format(\n server_address=self.client_address[0],\n port=self.server.port,\n path=self.path_only\n )\n\n next_url = None if end >= total_notes else self._get_next_prev_url(\n url_path, self.get_params, page_num + 1, page_size\n )\n prev_url = None if page_num == 1 else self._get_next_prev_url(\n url_path, self.get_params, page_num - 1, page_size)\n\n # Get notes from range\n notes = deepcopy(notes[start:end])\n\n paginated_response = {\n 'total': total_notes,\n 'num_pages': int(ceil(float(total_notes) / page_size)),\n 'current_page': page_num,\n 'rows': notes,\n 'next': next_url,\n 'start': start,\n 'previous': prev_url\n }\n\n return paginated_response", "def test_pagination(self):\n make(self.model, bandalias__extra=1)\n response = self.get_response(self.changelist_path)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context['cl'].paginator.count, 2)", "def test_single_page(self):\n\n url = reverse('page-detail', kwargs={'pk': 4})\n response = self.client.get(url, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(json.loads(response.content), {'title': 'API Page 1',\n 'items': [{'title': 'audio 1', 'bitrate': 100, 'counter': 0},\n {'title': 'video 1', 'video_file': 'video1.mkv',\n 'srt_file': 'srt.srt', 'counter': 0},\n {'title': 'text 1', 'content': 'Long text content',\n 'counter': 0}]})", "def test_multiple_page_response_query():\n WORKSPACE = 'Rally'\n rally = Rally(server=RALLY, apikey=APIKEY, \n workspace=WORKSPACE, project=ORG_LEVEL_PROJECT)\n \"\"\"\n response = rally.get('Story', fetch='ObjectID,FormattedID,Name', pagesize=100, limit=1500, projectScopeDown=True)\n count = 0\n for ix, story in enumerate(response):\n count += 1\n\n assert response.resultCount > 1000\n assert count <= response.resultCount\n assert count == 1500\n\n response = rally.get('Story', fetch='ObjectID,FormattedID,Name', pagesize=200, limit=11500, start=500, projectScopeDown=True)\n stories = [story for story in response]\n\n assert response.resultCount > 11000\n assert len(stories) <= response.resultCount\n assert len(stories) == 11500\n assert response.startIndex == 11900\n \"\"\"\n\n response = rally.get('Story', fetch='ObjectID,FormattedID,Name', pagesize=1000, projectScopeDown=True)\n count = 0\n for istory in response:\n count += 1\n\n assert response.resultCount > 15000\n assert count == response.resultCount", "def test_product_pager():\n restApp = TestApp(webservice.app.wsgifunc(*[]))\n req = restApp.get('/products/?page=2')\n product_list = json.loads(req.body)\n\n assert len(product_list) == 10", "def test_get_multiple_pages_lro(self, client):\n from azure.mgmt.core.polling.arm_polling import ARMPolling\n poller = client.paging.begin_get_multiple_pages_lro(polling=ARMPolling(timeout=0))\n pager = poller.result()\n\n items = list(pager)\n\n assert len(items) == 10\n assert items[0].properties.id == 1\n assert items[1].properties.id == 2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test next and previous urls of paginated response of notes api when number of pages are 1
def test_notes_collection_next_previous_with_one_page(self): response = requests.get(self._get_url("api/v1/annotations"), params={ "user": "dummy-user-id", "page_size": 10 }) assert response.ok self._verify_pagination_info( response=response.json(), total_notes=5, num_pages=1, notes_per_page=5, start=0, current_page=1, next_page=None, previous_page=None )
[ "def test_next_prev(self, client, blog_posts):\n article = blog_posts[\"article\"]\n feature = blog_posts[\"project_feature\"]\n announcement = blog_posts[\"announcement\"]\n # feature is oldest post; should have no prev and only next\n response = client.get(feature.get_url())\n assert response.context[\"previous\"] == None\n assert response.context[\"next\"] == article\n # article is in the middle; should have prev and next\n response = client.get(article.get_url())\n assert response.context[\"previous\"] == feature\n assert response.context[\"next\"] == announcement\n # announcement is newest; should have prev but no next\n response = client.get(announcement.get_url())\n assert response.context[\"previous\"] == article\n assert response.context[\"next\"] == None", "def _verify_pagination_info(\n self,\n response,\n total_notes,\n num_pages,\n notes_per_page,\n current_page,\n previous_page,\n next_page,\n start\n ):\n def get_page_value(url):\n \"\"\"\n Return page value extracted from url.\n \"\"\"\n if url is None:\n return None\n\n parsed = six.moves.urllib.parse.urlparse(url)\n query_params = six.moves.urllib.parse.parse_qs(parsed.query)\n\n page = query_params[\"page\"][0]\n return page if page is None else int(page)\n\n assert response['total'] == total_notes\n assert response['num_pages'] == num_pages\n assert len(response['rows']) == notes_per_page\n assert response['current_page'] == current_page\n assert get_page_value(response['previous']) == previous_page\n assert get_page_value(response['next']) == next_page\n assert response['start'] == start", "def construct_pagination_urls(request, course_id, api_next_url, api_previous_url):\n def lms_url(url):\n \"\"\"\n Create lms url from api url.\n \"\"\"\n if url is None:\n return None\n\n keys = ('page', 'page_size', 'text')\n parsed = urlparse(url)\n query_params = parse_qs(parsed.query)\n\n encoded_query_params = urlencode({key: query_params.get(key)[0] for key in keys if key in query_params})\n return f\"{request.build_absolute_uri(base_url)}?{encoded_query_params}\"\n\n base_url = reverse(\"notes\", kwargs={\"course_id\": course_id})\n next_url = lms_url(api_next_url)\n previous_url = lms_url(api_previous_url)\n\n return next_url, previous_url", "def test_next_prev(self, client, blog_posts):\n announcement = blog_posts[\"announcement\"]\n feature = blog_posts[\"project_feature\"]\n article = blog_posts[\"article\"]\n response = client.get(article.get_url())\n assertContains(\n response,\n '<a rel=\"prev\" href=\"%s\">%s</a>' % (feature.get_url(), feature.title),\n html=True,\n )\n assertContains(\n response,\n '<a rel=\"next\" href=\"%s\">%s</a>'\n % (announcement.get_url(), announcement.title),\n html=True,\n )", "def test_api_bucketlist_next_and_previous_page_links(self):\n\n self.client().post('/v1/api/bucketlists/', data=json.dumps(self.bucketlist1),\n headers={\"Authorization\": \"Bearer \" + self.access_token['access_token'],\n \"Content-Type\": \"application/json\"})\n\n self.client().post('/v1/api/bucketlists/', data=json.dumps(self.bucketlist2),\n headers={\"Authorization\": \"Bearer \" + self.access_token['access_token'],\n \"Content-Type\": \"application/json\"})\n\n self.client().post('/v1/api/bucketlists/', data=json.dumps(self.bucketlist3),\n headers={\"Authorization\": \"Bearer \" + self.access_token['access_token'],\n \"Content-Type\": \"application/json\"})\n\n self.client().post('/v1/api/bucketlists/', data=json.dumps(self.bucketlist4),\n headers={\"Authorization\": \"Bearer \" + self.access_token['access_token'],\n \"Content-Type\": \"application/json\"})\n\n self.client().post('/v1/api/bucketlists/', data=json.dumps({\"name\":\"Learn Piano\"}),\n headers={\"Authorization\": \"Bearer \" + self.access_token['access_token'],\n \"Content-Type\": \"application/json\"})\n\n self.client().post('/v1/api/bucketlists/', data=json.dumps({\"name\": \"Learn Guitar\"}),\n headers={\"Authorization\": \"Bearer \" + self.access_token['access_token'],\n \"Content-Type\": \"application/json\"})\n\n get_response = self.client().get('/v1/api/bucketlists/?start=1&limit=3',\n headers={\"Authorization\": \"Bearer \" + self.access_token['access_token'],\n \"Content-Type\": \"application/json\"})\n\n data = json.loads(get_response.data)\n\n self.assertEqual(get_response.status_code, 200)\n self.assertIn(data['next'], '/v1/api/bucketlists/?start=4&limit=3', \"Next page link not provided\")\n self.assertIn(data['previous'], '', 'Previous link should be empty for start of 1')", "def testNavigationGoIntegration(self):\n \n self.assert_(self.pageLen >= 5, \"Failed crawling more than 5 pages in %s.\" % gUrl )\n \n self.pageLen = 5\n \n iterResultPages = []\n nextResultPages = []\n previousResultPages = []\n stepResultPages = [None]*self.pageLen\n \n \n for i in range(self.pageLen):\n nextResultPages.append(self.crawler.get_page_info())\n if i < self.pageLen-1:\n self.crawler.go_next()\n \n for i in range(self.pageLen):\n previousResultPages.insert(0, self.crawler.get_page_info())\n if i < self.pageLen-1:\n self.crawler.go_previous()\n \n # get page 1, 3, 5, 4, 2\n self.crawler.go_recent()\n stepResultPages[0] = self.crawler.get_page_info()\n self.crawler.go_next(2)\n stepResultPages[2] = self.crawler.get_page_info()\n self.crawler.go_next(2)\n stepResultPages[4] = self.crawler.get_page_info()\n self.crawler.go_previous()\n stepResultPages[3] = self.crawler.get_page_info()\n self.crawler.go_previous(2)\n stepResultPages[1] = self.crawler.get_page_info()\n \n i = 0\n for page in self.crawler:\n iterResultPages.append(page)\n i += 1\n if i==self.pageLen:\n break\n \n # check result #\n for i in range(self.pageLen):\n self.assert_(stepResultPages[i].url == iterResultPages[i].url == \n nextResultPages[i].url == previousResultPages[i].url)\n self.assert_(stepResultPages[i].imageUrls == iterResultPages[i].imageUrls == \n nextResultPages[i].imageUrls == previousResultPages[i].imageUrls)", "def test_can_paginate_items(self):\n page_size = 50 # from settings.py, rest framework settings\n self.as_user(self.rw_user)\n for i in range(0, int(page_size * 2.5)):\n response = self.client.post(\n \"/submissions\", make_submission(f\"https://localhost/?{i}\")\n )\n\n # first page\n self.as_user(self.rw_user) # same user\n response = self.client.get(\"/submissions?ordering=-date_created\", follow=True)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n self.assertEqual(response.data[\"count\"], page_size * 2.5)\n self.assertIn(f\"limit={page_size}\", response.data[\"next\"])\n self.assertIn(f\"offset={page_size}\", response.data[\"next\"])\n self.assertEqual(len(response.data[\"results\"]), page_size)\n first = int(page_size * 2.5) - 1\n self.assertEqual(\n response.data[\"results\"][0][\"target_url\"],\n f\"https://localhost/?{first}\",\n )\n\n # second page\n response = self.client.get(response.data[\"next\"], follow=True)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n self.assertEqual(response.data[\"count\"], page_size * 2.5)\n self.assertIn(f\"limit={page_size}\", response.data[\"next\"])\n self.assertIn(f\"offset={page_size * 2}\", response.data[\"next\"])\n self.assertEqual(len(response.data[\"results\"]), page_size)\n first = first - page_size # is in reverse order\n self.assertEqual(\n response.data[\"results\"][0][\"target_url\"],\n f\"https://localhost/?{first}\",\n )\n\n # third and last page\n response = self.client.get(response.data[\"next\"], follow=True)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)\n self.assertEqual(response.data[\"count\"], page_size * 2.5)\n self.assertIsNone(response.data[\"next\"])\n self.assertEqual(len(response.data[\"results\"]), page_size * 0.5)\n first = first - page_size\n self.assertEqual(\n response.data[\"results\"][0][\"target_url\"],\n f\"https://localhost/?{first}\",\n )", "def test_list_pagination_meta(self):\n # create reports\n report_1 = models.Report.objects.create(customer=self.customer, start_date=date(2019, 1, 1), end_date=date(2019, 1, 31))\n models.Report.objects.create(customer=self.customer, start_date=date(2019, 2, 1), end_date=date(2019, 2, 28))\n # request\n response = self.client.get(reverse(self.view_name), {'page_size': '1'})\n repsonse_body = json.loads(response.content.decode('utf-8'))\n # test response\n self.assertIn('page', repsonse_body)\n self.assertEqual(repsonse_body['page'], 1)\n self.assertIn('page_count', repsonse_body)\n self.assertEqual(repsonse_body['page_count'], 2)\n self.assertIn('page_size', repsonse_body)\n self.assertEqual(repsonse_body['page_size'], 1)\n self.assertIn('page_next', repsonse_body)\n self.assertIn('page_previous', repsonse_body)\n self.assertIn('results_count', repsonse_body)\n self.assertEqual(repsonse_body['results_count'], 2)", "def _next_url(self, response):\n return response.links.get(\"page-next\", {}).get(\"url\", None)", "def next_url():\n for k, v in pages_status.items():\n if v == 0:\n return k\n # Uncomment this when setting live for whole site\n # else:\n # return False\n input('No more pages to scan')", "def test_pages_list(self):\n\n url = reverse('page-list')\n response = self.client.get(url, format='json')\n\n self.maxDiff = None\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Page.objects.count(), 3)\n self.assertEqual(json.loads(response.content), {'count': 3, 'next': None, 'previous': None, 'results': [\n {'title': 'API Page 1', 'url': 'http://testserver/pages/1/'},\n {'title': 'API Page 2', 'url': 'http://testserver/pages/2/'},\n {'title': 'API Page 3', 'url': 'http://testserver/pages/3/'}]})", "def test_get_paginated_questions_past_valid_page(self):\n res = self.client().get('/questions?page=100')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['error'], 404)\n self.assertEqual(data['message'], 'Resource Not Found')\n pass", "def test_prevPage(self):\n self.positionController.nextPage()\n self.positionController.prevPage()\n\n assert self.positionController.startIndex == 0\n assert self.positionController.arePrev == False\n assert self.positionController.areMore == True\n assert self.positionController.page == 0\n assert self.positionController.pageNumber == 1\n assert self.positionController.currentPageItems == ['Item0', 'Item1', 'Item2', 'Item3',\n 'Item4']", "def test_list_properties_specific_page_as_common(self):\n expected_data = {\n 'count': 4,\n 'next': 'http://testserver/api/properties?page=4&page_size=1',\n 'previous': 'http://testserver/api/properties?page=2&page_size=1',\n 'results': [\n self.all_results[2]\n ]\n }\n params = {'page_size': '1', 'page': 3}\n response = self.client.get(\n '/api/properties', params, **self.common_headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, expected_data)", "def test_task_get_tasks_pagination(client):\n Task.create_task(str(uuid4()))\n Task.create_task(str(uuid4()))\n Task.create_task(str(uuid4()))\n\n tasks = Task.get_tasks(page=1, per_page=1)\n expect(tasks.total).to_equal(3)\n expect(tasks.pages).to_equal(3)\n expect(tasks.items).to_length(1)\n\n expect(tasks.has_next).to_be_true()\n expect(tasks.has_prev).to_be_false()", "def testCommentNext(self):\n a = Article.objects.get(pk=1)\n data = self.getValidData(a)\n response = self.client.post(\"/post/\", data)\n location = response[\"Location\"]\n match = post_redirect_re.match(location)\n self.assertTrue(match != None, \"Unexpected redirect location: %s\" % location)\n\n data[\"next\"] = \"/somewhere/else/\"\n data[\"comment\"] = \"This is another comment\"\n response = self.client.post(\"/post/\", data)\n location = response[\"Location\"]\n match = re.search(r\"^/somewhere/else/\\?c=\\d+$\", location)\n self.assertTrue(match != None, \"Unexpected redirect location: %s\" % location)\n\n data[\"next\"] = \"http://badserver/somewhere/else/\"\n data[\"comment\"] = \"This is another comment with an unsafe next url\"\n response = self.client.post(\"/post/\", data)\n location = response[\"Location\"]\n match = post_redirect_re.match(location)\n self.assertTrue(match != None, \"Unsafe redirection to: %s\" % location)", "def testCommentNext(self):\n a = Article.objects.get(pk=1)\n data = self.getValidData(a)\n response = self.client.post(\"/post/\", data)\n self.assertRedirects(\n response,\n '/posted/?c=%s' % Comment.objects.latest('id').pk,\n fetch_redirect_response=False,\n )\n data[\"next\"] = \"/somewhere/else/\"\n data[\"comment\"] = \"This is another comment\"\n response = self.client.post(\"/post/\", data)\n self.assertRedirects(\n response,\n '/somewhere/else/?c=%s' % Comment.objects.latest('id').pk,\n fetch_redirect_response=False,\n )\n data[\"next\"] = \"http://badserver/somewhere/else/\"\n data[\"comment\"] = \"This is another comment with an unsafe next url\"\n response = self.client.post(\"/post/\", data)\n self.assertRedirects(\n response,\n '/posted/?c=%s' % Comment.objects.latest('id').pk,\n fetch_redirect_response=False,\n )", "def test_pagination(self):\n res = self.client().get('/api/questions?page=1')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[\"success\"], True)\n self.assertEqual(data[\"total_exhibited_questions\"], 10)", "def _next_page(self, tags):\r\n selector = self._selectors(\"next\")\r\n next_page = self._get_tag_item(tags.select_one(selector), \"href\")\r\n url = (self._base_url + next_page) if next_page else None\r\n return {\"url\": url, \"data\": None}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test paginated response of notes api when there's no note present
def test_notes_collection_when_no_notes(self): # Delete all notes self.test_cleanup() # Get default page response = requests.get(self._get_url("api/v1/annotations"), params={"user": "dummy-user-id"}) assert response.ok self._verify_pagination_info( response=response.json(), total_notes=0, num_pages=0, notes_per_page=0, start=0, current_page=1, next_page=None, previous_page=None )
[ "def test_notes_collection(self):\n\n # Without user\n response = requests.get(self._get_url(\"api/v1/annotations\"))\n assert response.status_code == 400\n\n # Without any pagination parameters\n response = requests.get(self._get_url(\"api/v1/annotations\"), params={\"user\": \"dummy-user-id\"})\n\n assert response.ok\n self._verify_pagination_info(\n response=response.json(),\n total_notes=5,\n num_pages=3,\n notes_per_page=2,\n start=0,\n current_page=1,\n next_page=2,\n previous_page=None\n )\n\n # With pagination parameters\n response = requests.get(self._get_url(\"api/v1/annotations\"), params={\n \"user\": \"dummy-user-id\",\n \"page\": 2,\n \"page_size\": 3\n })\n\n assert response.ok\n self._verify_pagination_info(\n response=response.json(),\n total_notes=5,\n num_pages=2,\n notes_per_page=2,\n start=3,\n current_page=2,\n next_page=None,\n previous_page=1\n )", "def test_notes_collection_next_previous_with_one_page(self):\n response = requests.get(self._get_url(\"api/v1/annotations\"), params={\n \"user\": \"dummy-user-id\",\n \"page_size\": 10\n })\n\n assert response.ok\n self._verify_pagination_info(\n response=response.json(),\n total_notes=5,\n num_pages=1,\n notes_per_page=5,\n start=0,\n current_page=1,\n next_page=None,\n previous_page=None\n )", "def _verify_pagination_info(\n self,\n response,\n total_notes,\n num_pages,\n notes_per_page,\n current_page,\n previous_page,\n next_page,\n start\n ):\n def get_page_value(url):\n \"\"\"\n Return page value extracted from url.\n \"\"\"\n if url is None:\n return None\n\n parsed = six.moves.urllib.parse.urlparse(url)\n query_params = six.moves.urllib.parse.parse_qs(parsed.query)\n\n page = query_params[\"page\"][0]\n return page if page is None else int(page)\n\n assert response['total'] == total_notes\n assert response['num_pages'] == num_pages\n assert len(response['rows']) == notes_per_page\n assert response['current_page'] == current_page\n assert get_page_value(response['previous']) == previous_page\n assert get_page_value(response['next']) == next_page\n assert response['start'] == start", "def test_get_paginated_questions_past_valid_page(self):\n res = self.client().get('/questions?page=100')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['error'], 404)\n self.assertEqual(data['message'], 'Resource Not Found')\n pass", "def test_no_page_querystring_skips_pagination(self):\n url = reverse('v0:enterprise-learner-completed-courses-list',\n kwargs={'enterprise_id': self.enterprise_id})\n url += '?no_page=true'\n expected_result = [{'completed_courses': 1, 'user_email': 'test@example.com'}]\n response = self.client.get(url)\n assert response.status_code == status.HTTP_200_OK\n result = response.json()\n\n # without pagination results are a list, not dict so we assert the data type and length\n assert isinstance(result, list)\n assert len(result) == 1\n assert result == expected_result", "def test_pagination(self):\n res = self.client().get('/api/questions?page=1')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[\"success\"], True)\n self.assertEqual(data[\"total_exhibited_questions\"], 10)", "def test_emptyResponse(self):\n self.assertWellFormedResponse({\"todoItems\": []})", "def test_19_api_can_offset_the_number_of_documents(self):\n res = self.client.get('/documents?offset=1')\n assert json.loads(res.content)['rows'][0]['id'] == 1", "def test_page_out_of_bound(self):\n\n response = self.client().get('/questions?page=100')\n response_data = json.loads(response.data)\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response_data['success'], False)\n self.assertEqual(response_data['message'], 'Not found error')", "def test_404_get_questions_beyond_valid_page(self):\n res = self.client().get('/api/questions?page=1000')\n # res = self.client().get('/books?page=1', json={'rating': 1})\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'resource not found')", "def test_index_view_with_no_photos(self):\n response = self.client.get(reverse('photosite:index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"No photos match the chosen criteria.\")\n self.assertQuerysetEqual(response.context['photo_list'], [])", "def test_ner_docs_404(self):\n params = {'doc_ids': [999, 1111, 300]}\n response = self.client.post(self.url, params)\n print(response.json())\n assert response.status_code == 404, \"No documents matched should give 404\"", "def test_no_donation_posts(self): \n response = self.client.get(reverse('donations:donation_list'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"No Donation Requests have been posted yet\")\n self.assertQuerysetEqual(response.context['donation_list'], [])", "def test_404_requesting_beyond_valid_pagination(self):\n \n res = self.client().get('/books?page=1000', json={'rating': 1})\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'Not Found')", "def _get_paginated_response(self, notes, page_num, page_size):\n start = (page_num - 1) * page_size\n end = start + page_size\n total_notes = len(notes)\n url_path = \"http://{server_address}:{port}{path}\".format(\n server_address=self.client_address[0],\n port=self.server.port,\n path=self.path_only\n )\n\n next_url = None if end >= total_notes else self._get_next_prev_url(\n url_path, self.get_params, page_num + 1, page_size\n )\n prev_url = None if page_num == 1 else self._get_next_prev_url(\n url_path, self.get_params, page_num - 1, page_size)\n\n # Get notes from range\n notes = deepcopy(notes[start:end])\n\n paginated_response = {\n 'total': total_notes,\n 'num_pages': int(ceil(float(total_notes) / page_size)),\n 'current_page': page_num,\n 'rows': notes,\n 'next': next_url,\n 'start': start,\n 'previous': prev_url\n }\n\n return paginated_response", "def test_traverse_notfound(self):\n content = self.api.traverse('nowhere')\n self.assertEqual(content, None)", "def test_get_questions_with_invalid_page(self):\n response = self.client().get('/questions?page=1000')\n data = json.loads(response.data)\n\n self.assertEqual(response.status_code, HTTP_STATUS.NOT_FOUND)\n self.assertEqual(data.get('success'), False)\n self.assertEqual(\n data.get('message'),\n ERROR_MESSAGES[HTTP_STATUS.NOT_FOUND]\n )", "def test_no_posts(self):\n blog = self.client.get(reverse('blog:list'))\n self.assertEqual(blog.status_code, 200)\n self.assertContains(blog, 'No posts have been written yet!')\n self.assertQuerysetEqual(blog.context['posts'], [])", "def test_get_paginated_questions(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of notes from the stub EdxNotes service.
def _get_notes(self): notes = self.server.get_all_notes() assert len(notes) > 0, 'Notes are empty.' return notes
[ "def get_notes(self) -> TodoistNotesResponse:\n api = self._get_api()\n return TodoistNotesResponse(api.state['notes'])", "def get_all_notes(self):\n q=\"select * from note order by time desc;\"\n try:\n NoteDB.cursor.execute(q)\n notes=[]\n results=NoteDB.cursor.fetchall()\n for result in results:\n obj=Note(idt=result[0],msg=result[1],time=result[2])\n notes.append(obj)\n return notes\n except Exception as e:\n raise", "def create_notes_obj():\n notes = []\n\n return notes", "def get_notes(self, filter_expired=True):\n return Note.get_by_person_record_id(\n self.subdomain, self.record_id, filter_expired=filter_expired)", "def list_all_notes(self) -> List[str]:\n if self.current_notebook is None:\n return [\"No currently opened notebook\"]\n names = []\n for name in self.notebooks[self.current_notebook].notes:\n names.append(name)\n if names:\n return names\n return [\"There are no notes\"]", "def notes(self):\n\n notes = self.object.note_set.all().order_by('-creation_date')\n form = NoteForm()\n\n return {'notes': notes, 'form': form}", "def get_notes(limit):\n ret = []\n sort_params = [(\"time\", pymongo.DESCENDING)]\n\n # Get all notes\n for note in notes_collection.find(sort=sort_params, limit=limit):\n note['_id'] = str(note.get('_id'))\n ret.append(note)\n\n # Return jsonified array of notes\n return json.dumps(ret)", "def get_project_notes(self) -> TodoistProjectNotesResponse:\n api = self._get_api()\n return TodoistProjectNotesResponse(api.state['project_notes'])", "def get_notes_by_user(user_id: int, db: Session = Depends(get_db)):\n return crud.get_all_notes_by_user(db=db, user_id=user_id)", "def get_notes_for_user(uuid):\n user: 'User' = services.users.get_by_uuid(uuid)\n\n notes = services.notes.get_notes_for_user(user)\n\n return jsonify([note.serialize() for note in notes])", "def _list_notes(options, notes_dir):\n if options.toplevel:\n notes_dir = os.path.join(notes_dir, options.toplevel)\n\n for root, files in _walk_notes_dir(notes_dir):\n notes = [note for note in files if not note.startswith('.') and\n note.endswith(NOTES_EXT)]\n\n if not notes:\n continue\n\n print(\"%s: \" % os.path.basename(root))\n for note in notes:\n print(\" %s\" % os.path.splitext(note)[0])\n print(\"\")", "def get_note_comments(self, note_id):\n return self.__get_object('notes', ['comments'], note_id, 'comments')", "def get_queryset(self):\n \n notes = self.kwargs['pk']\n \n return note.objects.filter(body=notes)", "def get_notes(session, user): # pylint: disable=unused-argument\n\n notes = [\n permission.note\n for permission in user.permissions\n if permission.type == PermissionType.READ\n ]\n\n return notes", "def get_course_notes(self, courseid: int, userid: int = 0) -> CourseNotes:\n res = self.moodle.post('core_notes_get_course_notes',\n courseid=courseid,\n userid=userid)\n return from_dict(CourseNotes, res)", "def get_notes(self):\n return {i: [str(note) for note in track.notes]\n for i, track in enumerate(self.tracks)}", "def parse_notes():\n notes = []\n for note_filename in os.listdir(NOTES_DIR):\n # Parse note file\n assert note_filename.endswith(\".txt\")\n note = parse(os.path.join(NOTES_DIR, note_filename))\n assert note_filename == note[\"id\"] + \".txt\", note_filename\n notes.append(note)\n return notes", "def test_notes_collection_when_no_notes(self):\n\n # Delete all notes\n self.test_cleanup()\n\n # Get default page\n response = requests.get(self._get_url(\"api/v1/annotations\"), params={\"user\": \"dummy-user-id\"})\n assert response.ok\n self._verify_pagination_info(\n response=response.json(),\n total_notes=0,\n num_pages=0,\n notes_per_page=0,\n start=0,\n current_page=1,\n next_page=None,\n previous_page=None\n )", "def notes_xml(self):\n\n if self.notes == []:\n return ''\n xml = '<Notes>\\n'\n for note in self.notes:\n xml += note\n xml += '</Notes>\\n'\n return xml" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writting .tchanges file. .changes file file with special format that maintained by RMDupdaterAddin.
def write_changes_file(changes_string, filename): filename += '.changes' with open(filename, 'wb') as changes_file: changes_file.write(changes_string.encode('UTF-8'))
[ "def write_tchanges_file(tchanges_string, filename):\n filename += '.tchanges'\n with open(filename, 'wb') as tchanges_file:\n tchanges_file.write(tchanges_string.encode('UTF-8'))", "def log_diffs_to_file(latest_file_path, latest_file_ms, track_index, message_index):\n with open(os.path.join(os.path.dirname(latest_file_path), \"changes.gitbit\"), \"a\") as changes_log_file:\n changes_log_file.write(str(track_index) + \":\" + str(message_index) + \":\" + str(latest_file_ms) + \"\\n\")", "def save_tm3(self, path):\n with open(path, 'w') as f:\n f.write('')\n with open(path, 'a') as f:\n for entry in self.entries:\n line = '\\t'.join([str(e) for e in entry]) + '\\n'\n f.write(line)", "def write_to_file(self):\n\n filename = self.entries[0].timestamp + \" workout log.txt\"\n with open(filename, mode=\"w\") as file:\n file.write(str(self) + \"\\n\")", "def write_xml_changes(self, outfile):\n raise NotImplementedError", "def edit_transaction_history(created_transaction):\n with open(\"transactions.txt\", \"r\") as file:\n transaction_list = file.readlines()\n\n transaction_list[-1] += \"\\n\"\n transaction_list.append(created_transaction)\n transactions = \"\".join(transaction_list)\n\n with open(\"transactions.txt\", \"w\") as file:\n file.write(transactions)\n\n return \"Transaction recorded.\"", "def write_mtime(self):\n data = {\"name\": str(self.source),\n \"mtime\": self.source.stat().st_mtime}\n with self.target_mtime_path.open(\"w\") as f:\n json.dump(data, f)\n return True", "def joDumps(self):\n # Output to file. Will evolve.\n filename = \"myFlatOptions.py\"\n try:\n # Open file stream\n file = open(filename, \"w\")\n except IOError:\n #exception()\n self._msgpieces = [\"There was an error writing to %s\" % filename]\n self._printMsg()\n sys.exit()\n \n for change in JOT._changesTrace:\n newline = \"\"\n joLine = change.traceback[-2][3]\n if type(change.property.name) is str:\n propName = change.property.owner.name()+\".\"+change.property.name\n else:\n propName = change.property.name()\n \n if propName == \"ApplicationMgr\": propName = \"theApp\"\n try:\n value = change.property.properties()[change.attribute]\n except:\n #exception()\n value = change.value\n if joLine:\n # There is indeed a recorded property change.\n # Do not report setattr changes though\n if \"setattr\" not in joLine:\n # Tried different more simple solutions.\n # Unfortunately they do not cover all possible cases\n if type(change.value) != str:\n # the property value should be changed thusly\n newline = \"%s.%s = %s\\n\" % (propName,\n change.attribute,\n value)\n else:\n newline = '%s.%s = \"%s\"\\n' % (propName,\n change.attribute,\n change.value)\n \n # Sequences can be tricky as developers play with them.\n # Preserve \"+=\" if possible, otherwise keep above general case.\n if joLine.find(\"+=\")>0:\n # and sequence is complete\n if joLine.rfind(']')+1 == len(joLine) :\n newline = joLine + \"\\n\"\n # cover local variable computations\n if newline.find(\"%\")>0:\n newline = \"%s.%s = %s\\n\" % (propName,\n change.attribute,\n value)\n \n # Some property names are bogus: contain \"::\".\n # Make no sense, hence get the actual line:\n if propName.find(\"::\")>0:\n newline = joLine + \"\\n\"\n \n # Very rare but happens: missing line but property\n # has a tracedbacked change anyway\n else:\n if type(change.value) != str:\n newline = \"%s.%s = %s\\n\" % (propName,\n change.attribute,\n value)\n else:\n newline = '%s.%s = \"%s\"\\n' % (propName,\n change.attribute,\n change.value)\n \n # Define locally named properties as Algs/Svcs.\n # Only first time and for main Properties only (not \"prop.prop\" Svcs/Algs)\n if propName.find(\".\")>0:\n propName = propName[:propName.find(\".\")]\n if propName.find(\"::\")>0:\n propName = propName[propName.find(\"::\")+2:]\n # and there are non-pythonic names as well????? ::\n if not self._dclSvcAlg.has_key(propName):\n if type(change.property) is iAlgorithm:\n dcl_as = ' = Algorithm(\"%s\")\\n' % propName\n doDefine(dcl_as)\n elif type(change.property) is iService:\n dcl_as = ' = Service(\"%s\")\\n' % propName\n doDefine(dcl_as)\n\n def doDefine(as): \n propdef = self._dclSvcAlg.setdefault(propName,as)\n declaration = propName + propdef\n # Output local property definition\n self._msgpieces.append(declaration)\n file.write(declaration)\n # actual lines - debug only\n #actline = \"#DEBUG %s at line %d\\n\" % (change.traceback[-2][0] , change.traceback[-2][1])\n #file.write(actline)\n\n # Output configuration change\n self._msgpieces.append(newline)\n file.write(newline)\n \n \n self._printMsg()\n file.close()", "def write_file(self, new_text, filename, old_text, encoding=None):\r\n try:\r\n f = _open_with_encoding(filename, \"w\", encoding=encoding)\r\n except os.error as err:\r\n self.log_error(\"Can't create %s: %s\", filename, err)\r\n return\r\n try:\r\n f.write(_to_system_newlines(new_text))\r\n except os.error as err:\r\n self.log_error(\"Can't write %s: %s\", filename, err)\r\n finally:\r\n f.close()\r\n self.log_debug(\"Wrote changes to %s\", filename)\r\n self.wrote = True", "def store_tolerance(tol_fnm,chg_txt,add_txt,del_txt,err_fp):\n try:\n tol_fp = open(tol_fnm,\"w\")\n toldiff_files.save_tolerances(tol_fp,chg_txt,add_txt,del_txt,err_fp)\n tol_fp.close()\n except IOError, e:\n (errno,errmsg) = e\n try:\n err_fp.write(\"toldiff: I/O error encountered attempting to write: \")\n err_fp.write(tol_fnm)\n err_fp.write(\"\\n\")\n err_fp.write(\"toldiff: I/O error message: \")\n err_fp.write(errmsg)\n err_fp.write(\"\\n\")\n except IOError, e:\n pass\n sys.exit(30)", "def write_to_file(self, data):", "def tidy_texfile(texfile, tex_changes, new_texfile=None):\n\n thetex = open(texfile, 'r').read()\n \n # Add strings\n if 'addstrs' in tex_changes:\n alladdstrs = [a + '\\n' for a in tex_changes['addstrs']]\n thetex = alladdstrs + thetex\n\n # Replace strings\n if 'replacestrs' in tex_changes:\n for r in tex_changes['replacestrs']: thetex = thetex.replace(r[0],r[1])\n \n open(new_texfile,'w').writelines(thetex)", "def _timestamp_file(self, action):\n fname = self._action_fname(action)\n mode = \"w\"\n if file_exists(fname) and not self.fo:\n mode = \"a\"\n with open(fname, mode) as out_handle:\n out_handle.write(\"{}\\n\".format(datetime.datetime.now().isoformat()))", "def write_file(entry):\n\n # If both of these are false, then need to generate a new file name for this. Don't update the entry because\n # content hasn't been defined for it yet.\n if entry.file is None:\n file_path = log_file.generate_file_name(entry)\n overwrite = False\n else:\n file_path = entry.file\n overwrite = True\n\n # Write out the file to the entry's file\n post = frontmatter.Post(entry.content, **entry.metadata)\n\n # If the entry already has a file, then we are going to overwrite the content\n log_directory, file_path = log_file.insert_file(entry.date, file_path, frontmatter.dumps(post), overwrite)\n\n # Update the entry with the new file path\n entry = log_file.rebuild_entry(entry, file=log_directory / file_path)\n\n return entry", "def save(self):\r\n with open(self._filename, 'w') as f:\r\n pytoml.dump(f, self._collapse(self._toml))", "def _writeTreatments(self, fout):\n fout.write(\"*TREATMENTS\\r\\n\")\n fout.write(\" 5 1 0 0 140 kg N as urea(2/3 18 D\\r\\n\")", "def save(self):\n self.path.write_text(toml.dumps(self.tomldoc))", "def create_modified_file(self):\n file_name = os.path.join(self.dir, str(uuid.uuid4()))\n # create the file\n with open(file_name, \"wb\") as file_handler:\n file_handler.write(b\"\\0\")\n\n st = os.stat(file_name)\n access_time = st[ST_ATIME]\n modified_time = st[ST_MTIME]\n\n os.utime(file_name, (access_time, modified_time + (4 * 3600)))", "def save_to_file(self, filename, tc_data, earliest_time=None, latest_time=None, delay=0, notify=False):\n pdu = self._prepare_cltu_pdu(tc_data, earliest_time, latest_time, delay, notify)\n\n with open(filename, \"wb\") as f:\n f.write(self.encode_pdu(pdu))\n\n ait.core.log.info('Saved TC Data to {}.'.format(filename))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writting .tchanges file. .tchanges file file with special format that maintained by RMDupdaterAddin.
def write_tchanges_file(tchanges_string, filename): filename += '.tchanges' with open(filename, 'wb') as tchanges_file: tchanges_file.write(tchanges_string.encode('UTF-8'))
[ "def write_changes_file(changes_string, filename):\n filename += '.changes'\n with open(filename, 'wb') as changes_file:\n changes_file.write(changes_string.encode('UTF-8'))", "def log_diffs_to_file(latest_file_path, latest_file_ms, track_index, message_index):\n with open(os.path.join(os.path.dirname(latest_file_path), \"changes.gitbit\"), \"a\") as changes_log_file:\n changes_log_file.write(str(track_index) + \":\" + str(message_index) + \":\" + str(latest_file_ms) + \"\\n\")", "def write_to_file(self):\n\n filename = self.entries[0].timestamp + \" workout log.txt\"\n with open(filename, mode=\"w\") as file:\n file.write(str(self) + \"\\n\")", "def save_tm3(self, path):\n with open(path, 'w') as f:\n f.write('')\n with open(path, 'a') as f:\n for entry in self.entries:\n line = '\\t'.join([str(e) for e in entry]) + '\\n'\n f.write(line)", "def write_mtime(self):\n data = {\"name\": str(self.source),\n \"mtime\": self.source.stat().st_mtime}\n with self.target_mtime_path.open(\"w\") as f:\n json.dump(data, f)\n return True", "def edit_transaction_history(created_transaction):\n with open(\"transactions.txt\", \"r\") as file:\n transaction_list = file.readlines()\n\n transaction_list[-1] += \"\\n\"\n transaction_list.append(created_transaction)\n transactions = \"\".join(transaction_list)\n\n with open(\"transactions.txt\", \"w\") as file:\n file.write(transactions)\n\n return \"Transaction recorded.\"", "def write_xml_changes(self, outfile):\n raise NotImplementedError", "def _timestamp_file(self, action):\n fname = self._action_fname(action)\n mode = \"w\"\n if file_exists(fname) and not self.fo:\n mode = \"a\"\n with open(fname, mode) as out_handle:\n out_handle.write(\"{}\\n\".format(datetime.datetime.now().isoformat()))", "def write_to_file(self, data):", "def write_file(entry):\n\n # If both of these are false, then need to generate a new file name for this. Don't update the entry because\n # content hasn't been defined for it yet.\n if entry.file is None:\n file_path = log_file.generate_file_name(entry)\n overwrite = False\n else:\n file_path = entry.file\n overwrite = True\n\n # Write out the file to the entry's file\n post = frontmatter.Post(entry.content, **entry.metadata)\n\n # If the entry already has a file, then we are going to overwrite the content\n log_directory, file_path = log_file.insert_file(entry.date, file_path, frontmatter.dumps(post), overwrite)\n\n # Update the entry with the new file path\n entry = log_file.rebuild_entry(entry, file=log_directory / file_path)\n\n return entry", "def store_tolerance(tol_fnm,chg_txt,add_txt,del_txt,err_fp):\n try:\n tol_fp = open(tol_fnm,\"w\")\n toldiff_files.save_tolerances(tol_fp,chg_txt,add_txt,del_txt,err_fp)\n tol_fp.close()\n except IOError, e:\n (errno,errmsg) = e\n try:\n err_fp.write(\"toldiff: I/O error encountered attempting to write: \")\n err_fp.write(tol_fnm)\n err_fp.write(\"\\n\")\n err_fp.write(\"toldiff: I/O error message: \")\n err_fp.write(errmsg)\n err_fp.write(\"\\n\")\n except IOError, e:\n pass\n sys.exit(30)", "def save(self):\r\n with open(self._filename, 'w') as f:\r\n pytoml.dump(f, self._collapse(self._toml))", "def save_to_file(self, filename, tc_data, earliest_time=None, latest_time=None, delay=0, notify=False):\n pdu = self._prepare_cltu_pdu(tc_data, earliest_time, latest_time, delay, notify)\n\n with open(filename, \"wb\") as f:\n f.write(self.encode_pdu(pdu))\n\n ait.core.log.info('Saved TC Data to {}.'.format(filename))", "def create_modified_file(self):\n file_name = os.path.join(self.dir, str(uuid.uuid4()))\n # create the file\n with open(file_name, \"wb\") as file_handler:\n file_handler.write(b\"\\0\")\n\n st = os.stat(file_name)\n access_time = st[ST_ATIME]\n modified_time = st[ST_MTIME]\n\n os.utime(file_name, (access_time, modified_time + (4 * 3600)))", "def writeTrades(trade_factory, filename):\n # TODO-implement\n pass", "def write_file(self, new_text, filename, old_text, encoding=None):\r\n try:\r\n f = _open_with_encoding(filename, \"w\", encoding=encoding)\r\n except os.error as err:\r\n self.log_error(\"Can't create %s: %s\", filename, err)\r\n return\r\n try:\r\n f.write(_to_system_newlines(new_text))\r\n except os.error as err:\r\n self.log_error(\"Can't write %s: %s\", filename, err)\r\n finally:\r\n f.close()\r\n self.log_debug(\"Wrote changes to %s\", filename)\r\n self.wrote = True", "def joDumps(self):\n # Output to file. Will evolve.\n filename = \"myFlatOptions.py\"\n try:\n # Open file stream\n file = open(filename, \"w\")\n except IOError:\n #exception()\n self._msgpieces = [\"There was an error writing to %s\" % filename]\n self._printMsg()\n sys.exit()\n \n for change in JOT._changesTrace:\n newline = \"\"\n joLine = change.traceback[-2][3]\n if type(change.property.name) is str:\n propName = change.property.owner.name()+\".\"+change.property.name\n else:\n propName = change.property.name()\n \n if propName == \"ApplicationMgr\": propName = \"theApp\"\n try:\n value = change.property.properties()[change.attribute]\n except:\n #exception()\n value = change.value\n if joLine:\n # There is indeed a recorded property change.\n # Do not report setattr changes though\n if \"setattr\" not in joLine:\n # Tried different more simple solutions.\n # Unfortunately they do not cover all possible cases\n if type(change.value) != str:\n # the property value should be changed thusly\n newline = \"%s.%s = %s\\n\" % (propName,\n change.attribute,\n value)\n else:\n newline = '%s.%s = \"%s\"\\n' % (propName,\n change.attribute,\n change.value)\n \n # Sequences can be tricky as developers play with them.\n # Preserve \"+=\" if possible, otherwise keep above general case.\n if joLine.find(\"+=\")>0:\n # and sequence is complete\n if joLine.rfind(']')+1 == len(joLine) :\n newline = joLine + \"\\n\"\n # cover local variable computations\n if newline.find(\"%\")>0:\n newline = \"%s.%s = %s\\n\" % (propName,\n change.attribute,\n value)\n \n # Some property names are bogus: contain \"::\".\n # Make no sense, hence get the actual line:\n if propName.find(\"::\")>0:\n newline = joLine + \"\\n\"\n \n # Very rare but happens: missing line but property\n # has a tracedbacked change anyway\n else:\n if type(change.value) != str:\n newline = \"%s.%s = %s\\n\" % (propName,\n change.attribute,\n value)\n else:\n newline = '%s.%s = \"%s\"\\n' % (propName,\n change.attribute,\n change.value)\n \n # Define locally named properties as Algs/Svcs.\n # Only first time and for main Properties only (not \"prop.prop\" Svcs/Algs)\n if propName.find(\".\")>0:\n propName = propName[:propName.find(\".\")]\n if propName.find(\"::\")>0:\n propName = propName[propName.find(\"::\")+2:]\n # and there are non-pythonic names as well????? ::\n if not self._dclSvcAlg.has_key(propName):\n if type(change.property) is iAlgorithm:\n dcl_as = ' = Algorithm(\"%s\")\\n' % propName\n doDefine(dcl_as)\n elif type(change.property) is iService:\n dcl_as = ' = Service(\"%s\")\\n' % propName\n doDefine(dcl_as)\n\n def doDefine(as): \n propdef = self._dclSvcAlg.setdefault(propName,as)\n declaration = propName + propdef\n # Output local property definition\n self._msgpieces.append(declaration)\n file.write(declaration)\n # actual lines - debug only\n #actline = \"#DEBUG %s at line %d\\n\" % (change.traceback[-2][0] , change.traceback[-2][1])\n #file.write(actline)\n\n # Output configuration change\n self._msgpieces.append(newline)\n file.write(newline)\n \n \n self._printMsg()\n file.close()", "def _writerecordsv1(self, records):\n f = self._repo.vfs(self.statepathv1, 'wb')\n irecords = iter(records)\n lrecords = next(irecords)\n assert lrecords[0] == RECORD_LOCAL\n f.write(hex(self._local) + '\\n')\n for rtype, data in irecords:\n if rtype == RECORD_MERGED:\n f.write('%s\\n' % _droponode(data))\n f.close()", "def export_as_text_file(forecast_report, new_file_path):\r\n\r\n with open(new_file_path, 'w+') as txt_file:\r\n txt_file.write(forecast_report)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check for a pending or processing instance that matches the requested dates.
def pending_instance_exists(self, start_date, end_date): if self.instance is not None: # This is an update and does not need to check for existence. return queryset = self.queryset.filter( status__in=(DataExportRequest.PENDING, DataExportRequest.PROCESSING), start_date=start_date, end_date=end_date, ) return queryset.exists()
[ "def _check_date(self, cr, uid, ids, context=None):\n for act in self.browse(cr, uid, ids, context):\n if act.start_date and act.expiration_date:\n if self.get_date(act.start_date) > self.get_date(act.expiration_date):\n raise osv.except_osv(_(''), _(\"Start Date Must Be Less Than Expiration Date!\"))\n\n if act.date and act.start_date:\n if self.get_datetime(act.date) > self.get_date(act.start_date):\n raise osv.except_osv(_(''), _(\"Request Date Must Be Less Than Start Date!\"))\n return True", "def _check_date(self, cr, uid, ids):\n for deleg in self.browse(cr, uid, ids):\n if deleg.dismissal_date <= deleg.employee_id.first_employement_date:\n return False\n return True", "def __contains__(self, date):\n return self._first_day <= date <= self._last_day", "def check_dates(self, cr, uid, ids, context=None): \n exp = self.read(cr, uid, ids[0], ['violation_date', 'decision_date'])\n if exp['violation_date'] and exp['decision_date']:\n if exp['violation_date'] > exp['decision_date']:\n return False\n return True", "def resolved_between(self, start_date, end_date):\n for transaction in self.transactions:\n if (transaction['transactionType'] == 'status' and\n transaction['newValue'] == 'resolved'):\n timestamp = int(transaction['dateCreated'])\n transaction_date = datetime.fromtimestamp(timestamp)\n return (\n transaction_date >= start_date and\n transaction_date < end_date\n )\n return False", "def check_dates3(self, cr, uid, ids, context=None):\n exp = self.read(cr, uid, ids[0], ['violation_date', 'start_date'])\n if exp['violation_date'] and exp['start_date']:\n if exp['violation_date'] > exp['start_date']:\n return False\n return True", "def is_period_valid(choosed_car, new_from, new_to, reservation_to_miss=None):\n\n # Creates a list of tuples with dates of reservations\n dates_of_reservations = []\n for reservation in Reservation.get_reservations(choosed_car):\n if reservation == reservation_to_miss:\n continue\n else:\n dates_of_reservations.append((reservation.date_from, reservation.date_to))\n\n # Converts string into datetime object if needed\n new_from = datetime.strptime(new_from, '%Y-%m-%dT%H:%M:%S%z') if not isinstance(new_from,\n datetime) else new_from\n new_to = datetime.strptime(new_to, '%Y-%m-%dT%H:%M:%S%z') if not isinstance(new_to, datetime) else new_to\n\n if choosed_car.date_of_next_technical_examination >= new_to.date():\n if new_from <= new_to:\n if dates_of_reservations:\n for reservation in dates_of_reservations:\n if any(map(lambda x: reservation[0] <= x <= reservation[1], (new_from, new_to))) or (\n new_from <= reservation[0] and new_to >= reservation[1]):\n return False\n return True\n else:\n return True\n else:\n return False\n else:\n return False", "def is_busy(self, day, start, end):\n busy = False\n for task in self._schedule:\n if task.day == day and (task.start < start < task.end or task.start < end < task.end or start < task.start < end or start < task.end < end):\n busy = True\n return busy", "def exists(self, initdate, enddate):\n return self.queue.exists(initdate, enddate)", "def CheckDate(self, date): # ............................. Event.CheckDate\n # Check if this is the correct type\n if type(date) != dt.date:\n if type(date) == dt.datetime:\n date = date.date()\n else:\n logging.error(\"Invalid date object.\")\n return False\n \n # Check assuming no repeats \n if self.dtStart.date() == date:\n return True\n elif self.dtStart.date() > date:\n return False\n \n # Check if this event repeats\n r = self.rrule # Just keeps things simple\n if r:\n # Is this date in the excluded dates?\n if self.exdate and date in self.exdate:\n print(date)\n return False\n if \"UNTIL\" in r.keys() and r[\"UNTIL\"].date() < date:\n return False\n if \"FREQ\" in r.keys() and r[\"FREQ\"] == \"WEEKLY\":\n if \"BYDAY\" in r.keys():\n weekday = {\"MO\":0, \"TU\":1, \"WE\":2, \"TH\":3, \"FR\":4}.get(\n r[\"BYDAY\"].strip())\n return weekday == date.weekday()\n return False", "def test_pending_at_date(self, fake_billed_org):\n\n BillFactory(\n generated_by=fake_billed_org.billed_by,\n period_start=weeks_ago(7)(),\n period_end=weeks_ago(6)(),\n )\n\n # There is already a bill for this period\n assert not list(BilledOrganization.pending_for_current_period(weeks_ago(6)()))\n\n # It has been 6 weeks since the last bill here\n pending = [org for org in BilledOrganization.pending_for_current_period()]\n assert fake_billed_org in pending", "def check_event_dates(draft):\n ret = []\n for event in DesignerEvent.objects.filter(draft=draft):\n if not __is_in_rounds(event.event_date):\n ret.append(Error(message=\"Event date isn't in a round\", action=event))\n if not __is_in_challenge(event.event_date):\n ret.append(Error(message=\"Event date isn't in the challenge\", action=event))\n return ret", "def validation_compare_dates_in_range(self, date1, date2):\n try:\n end_date = datetime.datetime.strptime(date2, settings.DATE_FORMAT)\n date = datetime.datetime.strptime(date1, settings.DATE_FORMAT)\n delta = end_date - date\n if delta > datetime.timedelta(0):\n return True\n except Exception, e:\n # Not datetime or improper datetime values given. Rising no exception.\n log.error('Search validation exception: %s' % e)\n pass\n return False", "def instance_exists(self):\n requested_start = datetime.combine(date.today(), self.send_time)\n instance_set = self.instances.filter(requested_start=requested_start)\n if instance_set.exists():\n return True\n return False", "def is_valid(self):\n return (self.date_begin < self.date_end and 0 < self.hours < 12 and\n (self.date_end - self.date_begin).days < 1)", "def _check_period(self, cr, uid, ids):\n \n lines = self.browse(cr, uid, ids)\n for l in lines:\n \n # if a line's period is entierly before \\\n #the budget's period or entierly after it, \\\n #the line's period does not overlay the budget's period\n if (l.period_id.date_start < l.budget_version_id.budget_id.start_date \\\n and l.period_id.date_stop < l.budget_version_id.budget_id.start_date) \\\n or (l.period_id.date_start > l.budget_version_id.budget_id.end_date \\\n and l.period_id.date_stop > l.budget_version_id.budget_id.end_date):\n return False\n \n return True", "def find_available_dates(self, number_of_dates):\n\t\tavailable_dates = []\n\t\trule = rrule.rrule(self.repeat_period, dtstart=self.end_date,\n\t\t\t\t\t\t interval=self.repeat_every, count=number_of_dates*4)\n\t\truleset = rrule.rruleset()\n\t\truleset.rrule(rule)\n\t\truleset.exdate(datetime.combine(self.end_date, time()))\n\n\t\texclude_query = Q(end_time__lte=self.start_time) | Q(start_time__gte=self.end_time) | Q(id=self.id)\n\t\tconflict_slots = Appointment.objects.filter(healer=self.healer, confirmed=True).\\\n\t\t\t\t\t\t\t\t\t\t\t\tfilter_by_date(self.end_date).\\\n\t\t\t\t\t\t\t\t\t\t\t\texclude(exclude_query)\n\n\t\texdates = []\n\t\tif len(conflict_slots):\n\t\t\tfrom_date = rule[1]\n\t\t\tto_date = rule[-1]\n\t\t\tfor slot in conflict_slots:\n\t\t\t\tif slot.is_single():\n\t\t\t\t\texdates.append(datetime.combine(slot.start_date, time()))\n\t\t\t\telse:\n\t\t\t\t\texruleset = rrule.rruleset()\n\t\t\t\t\texruleset.rrule(slot.get_rrule_object(skip_time=True))\n\t\t\t\t\tfor timestamp in slot.exceptions:\n\t\t\t\t\t\texruleset.exdate(datetime.utcfromtimestamp(timestamp))\n\t\t\t\t\texdates.extend(exruleset.between(from_date, to_date, inc=True))\n\n\t\trepeat_count = 0\n\t\texceptions = []\n\t\tfor rule_date in ruleset:\n\t\t\trepeat_count += 1\n\t\t\tif rule_date not in exdates:\n\t\t\t\tavailable_dates.append(rule_date)\n\t\t\t\tif len(available_dates) == number_of_dates:\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\texceptions.append(get_timestamp(rule_date))\n\n\t\tif len(available_dates)==number_of_dates:\n\t\t\treturn {\n\t\t\t\t'dates': available_dates,\n\t\t\t\t'exceptions': exceptions,\n\t\t\t\t'repeat_count': repeat_count\n\t\t\t}", "def test_start_criteria_from_date_flag(self):\n\n flags = {\n 'from_date':\n float(\n dateutil.parser.parse('2018-01-18 20:09:50').strftime(\"%s.%f\"))\n }\n date = float(\n dateutil.parser.parse('2018-01-18 20:09:50').strftime(\"%s.%f\"))\n assert phout.start_criteria(date, flags), \"from_date flag should hit\"\n\n date = float(\n dateutil.parser.parse('2018-01-18 20:09:51').strftime(\"%s.%f\"))\n assert phout.start_criteria(date, flags), \"from_date flag should hit\"\n\n date = float(\n dateutil.parser.parse('2018-01-18 20:09:49').strftime(\"%s.%f\"))\n assert not phout.start_criteria(\n date, flags), \"from_date flag should not hit\"", "def demand(cls, request, context):\n auction_period = request.validated['json_data'].get('auctionPeriod')\n if auction_period and auction_period.get('startDate'):\n return cls\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function places the students in their corresponding buildings, floor and rooms. It uses the hash functions in order to determine where the students should be placed according to the value of the corresponding hash.
def placeStudents(list): buildings = createBuilding() for line in list: name, furniture = line.split() floors = buildings.get(name) rooms = floors.get(name) room = rooms.get(name) if room.AddtoRoom(name, furniture): print("student", name, "already present in", buildings.hash_function(name),"floor", floors.hash_function(name) , "in room", rooms.hash_function(name), ". Added furniture", furniture) # They were already in the room and their furniture was added else: print('Added student', name, 'with', furniture, 'to building', buildings.hash_function(name), "floor", floors.hash_function(name), "in room", rooms.hash_function(name))
[ "def map_the_home(hlst):\n\n hdct = {}\n for e in hlst:\n if e.room in hdct.keys():\n hdct[e.room].append(e)\n else:\n hdct[e.room] = [e]\n return hdct", "def student_clusters_in_classes(self, students, class_rooms):\n # Closure function to verify if the student is inside of classRoom\n def verify(student, clas):\n if not hasattr(clas, 'students'):\n clas.students = []\n if clas.is_student_in_class_room(student.position):\n clas.students.append(student)\n return clas\n\n for student in students:\n class_rooms = map((lambda c: verify(student, c)), class_rooms)\n\n # filter classRoom that has at least two students\n class_rooms = filter((lambda c: len(c.students) >= 2), class_rooms)\n return class_rooms", "def hallways():\n for i in range(len(rooms)-1):\n roomA = rooms[i]\n roomB = rooms[i+1]\n for r in range(roomA.r,roomB.r):\n cells[(r,roomA.c)] = 1\n for c in range(roomA.c, roomB.c):\n cells[(roomA.r,c)] = 1\n for r in range(roomB.r,roomA.r):\n cells[(r,roomA.c)] = 1\n for c in range(roomB.c, roomA.c):\n cells[(roomA.r,c)] = 1", "def place_single(self):\n\n # create empty coordinates list for each type of house\n single_coordinatenlijst = [[0,0]] * self.amount_single\n coordinaten_maison = [[0,0]] * self.amount_maison\n coordinaten_bungalow = [[0,0]] * self.amount_bungalow\n\n # amount of times a house is placed\n for i in range(0,self.amount_single):\n\n # generate random coordinates for the upper-left corner of a single family home\n # the coordinates should fit the range of the neighbourhood, which is dependent on the size of the house\n # the size of a single family home is 10x10 metres including obligatory free space\n # from the upper-left corner of the house, the ranges of the random coordinates should fall between 2-150 (x-axis) and 2-170 (y-axis)\n x = random.randrange(2,150)\n y = random.randrange(2,170)\n\n # create new coordinates whenever a coordinate does not meet the requirements\n check = True\n while check == True:\n\n # new coordinates when water present in new house\n if self.index_water in self.neighbourhood[y:(y+8),x:(x+8)]:\n x = random.randrange(2,150)\n y = random.randrange(2,170)\n\n # new coordinate when another house already present in new house\n elif self.index_single in self.neighbourhood[(y-2):(y+10),(x-2):(x+10)] or self.index_bungalow in self.neighbourhood[(y-2):(y+10),(x-2):(x+10)] or self.index_maison in self.neighbourhood[(y-2):(y+10),(x-2):(x+10)]:\n x = random.randrange(2,150)\n y = random.randrange(2,170)\n\n # new coordinate when obligatory free space from another house present in new house\n elif self.index_free_space in self.neighbourhood[y:(y+8),x:(x+8)]:\n x = random.randrange(2,150)\n y = random.randrange(2,170)\n\n # coordinate is valid\n else:\n check = False\n\n # new coordinates are saved in coordinates list\n new_coor = [x,y]\n single_coordinatenlijst[i] = new_coor\n\n # draw house on the gridmap\n self.neighbourhood[(y - 2):(y + 10),(x - 2):(x + 10)] = 5\n self.neighbourhood[y:(y + 8),x:(x + 8)] = 1\n\n return single_coordinatenlijst", "def building_ruined_house(w=6, h=6, material=None):\n\n # Initial checks. Don't accept too small/big house.\n if w < 6 or h < 6:\n raise ValueError('Building is too small: w or h < 6')\n elif w > 10 or h > 10:\n raise ValueError('Building is too big: w or h > 10')\n\n # Choose materials\n wall_material = None\n if not material:\n wall_material = random.choice([C.wall_block, C.wall_plank, C.wall_stone, C.wall_brick])\n elif material not in (['block', 'plank', 'stone', 'brick']):\n raise ValueError('Material should be \"block\", \"plank\", \"stone\" or \"brick\"')\n\n if material == 'stone':\n wall_material = C.wall_stone\n elif material == 'block':\n wall_material = C.wall_block\n elif material == 'plank':\n wall_material = C.wall_plank\n elif material == 'brick':\n wall_material = C.wall_brick\n\n M = room_default(w, h, wall_type=wall_material, floor_type=C.floor_rocks)\n\n # Calculate % of replaced walls and added grass. 10% for walls and 20% for grass.\n grass_count = int((w - 2) * (h - 2) * 0.2)\n wall_ruined = int(w * h * 0.1)\n M[w//2, h-1] = C.door_open_dark()\n\n # Place some furniture and animals.\n all_coord = [(w//2, h-1), (w//2, h-2)]\n for item_class in (\n T.furniture_chimney, \n A.animal_bat,\n A.animal_spider,\n T.web,\n T.furniture_barrel\n ):\n while True:\n x = random.randint(1, w-2)\n y = random.randint(1, h-2)\n if (x, y) not in all_coord:\n M[x, y].put(item_class())\n all_coord.append((x, y))\n break\n\n # Place some grass.\n for _ in range(grass_count):\n while True:\n x = random.randint(0, w-1)\n y = random.randint(0, h-1)\n if (x, y) not in all_coord:\n M[x, y] = C.flora_grass()\n all_coord.append((x, y))\n break\n\n # Replace some walls with rocks.\n for _ in range(wall_ruined):\n while True:\n x = random.randint(0, w-1)\n y = random.choice([0, h-1])\n if (x, y) not in all_coord:\n M[x, y] = C.floor_rocks()\n all_coord.append((x, y))\n break\n\n return M", "def categorize(street):\n # South campus dorms\n SCD = [\n \"CRAIGE\",\n \"EHRINGHAUS\", \n \"HINTON JAMES\",\n \"MORRISON\",\n \"KOURY\",\n \"CRAIGE NORTH\",\n \"HORTON\",\n \"HARDIN\",\n \"RAMS VILLAGE\",\n \"VILLAGE\"\n ] # Freshman dorms\n \n # Main campus dorms\n MCD = [\"MANLY\",\n \"RUFFIN\",\n \"AYCOCK\",\n \"MANGUM\",\n \"MANLY\",\n \"GRAHAM\",\n \"OLD EAST\",\n \"OLD WEST\",\n \"COBB\",\n \"JOYNER\",\n \"ALEXANDER\",\n \"CONNOR\",\n \"WINSTON\",\n \"GRAHAM\",\n \"EVERETT\",\n \"LEWIS\",\n \"MCIVER\",\n \"STACY\",\n \"ALDERMAN\",\n \"SPENCER\",\n \"GRIMES\",\n \"CARMICHAEL\",\n \"PARKER\",\n \"TEAGUE\"\n ]\n \n # On campus\n ACB = [ \n \"LENOIR\", \n \"GENOME\", \n \"STONE CENTER\", \n \"HOUSE LIBRARY\", \n \"DAVIS\",\n \"STUDENT UNION\",\n \"PEABODY\",\n \"MCCOLL\",\n \"MCCOLL BUSINESS\",\n \"DENTAL\",\n \"BONDURANT\",\n \"MEDICAL SCHOOL\",\n \"VAN HECKE\",\n \"MCGAVRAN\",\n \"GREENBERG\", \n \"HILL\",\n \"MED SCH\",\n \"FETZER GYM\",\n \"GARDNER\",\n \"GRAHAM STUDENT UNION\",\n \"GREENLAW\", \n \"DEY\",\n \"BINGHAM\",\n \"MURPHEY\",\n \"HANES ART\",\n \"HANES\",\n \"MITCHELL\",\n \"MURRAY\",\n \"VENABLE\",\n \"CARROLL\",\n \"SAUNDERS\",\n \"CAROLINA HALL\",\n \"STUDENT HEALTH\",\n \"CAMPUS HEALTH\",\n \"ARBORETUM\",\n \"PLANETARIUM\",\n \"MOREHEAD\",\n \"BELL TOWER\",\n \"HAMILTON\",\n \"KNAPP\",\n \"CALDWELL\",\n \"CHAPMAN\",\n \"PHILLIPS\",\n \"SITTERSON\",\n \"BROOKS\",\n \"SWAIN\",\n \"COKER\",\n \"WILSON\",\n \"FORDHAM\",\n \"WHITEHEAD\",\n \"FEDEX GLOBAL\",\n \"CARR\",\n \"DAVIE\",\n \"STEELE\",\n \"BYNUM\",\n \"PLAYMAKERS\",\n \"PLAY-MAKERS\",\n \"STUDENT STORE\",\n \"DANIELS STUDENT STORE\",\n \"STUDENT STORE\",\n \"HOWELL\",\n \"WOOLLEN\",\n \"POLK PLACE\",\n \"BRAUER\",\n \"BIOINFORMATICS\",\n \"RECREATION\",\n \"BENNETT\",\n \"BOWLES\",\n \"BOSHAMER\",\n \"BERRYHILL\",\n \"BEARD\",\n \"HOOKER\",\n \"LOUDERMILK\",\n \"PUBLIC SAFETY\"\n ]\n\n # B School\n BUS = [\n \"BUSINESS\"\n ]\n # Hospitals\n HSP = [\n \"HOSPITAL\",\n \"CARE CENTER\",\n \"EMERGENCY\",\n \"AMBULATORY\"\n ]\n\n # Arbitrary streets including parking lots\n ARS = [\n \" RD\",\n \" DR\",\n \" AVE\",\n \" BLVD\",\n \"PARKING\",\n \" ST\",\n \"ROAD\",\n \"STREET\",\n \"AVENUE\",\n \"BOULEVARD\",\n \"DRIVE\"\n ]\n # Northside\n # Academic Building\n\n street = street.upper()\n\n for i in BUS:\n if i in street:\n return \"BUS\"\n for i in SCD:\n if i in street:\n return \"SCD\"\n for i in MCD:\n if i in street:\n return \"MCD\"\n for i in ACB:\n if i in street:\n return \"ACB\"\n\n for i in ARS:\n if i in street:\n return \"ARS\"\n \n for i in HSP:\n if i in street:\n return \"HSP\"\n\n return \"NAN\"", "def setup_school(self):\n\n self.school = School()\n # setting up the initial faculty\n random.seed()\n num_teachers = random.randint(0, 12)\n print('num_teachers:', num_teachers) #comment\n # print('No Teachers'\n for i in range(num_teachers):\n t = Teacher();\n t.hire()\n self.school.teachers = list_teachers\n\n #if no teachers have arrived, there are no students\n if len(list_teachers) < 1:\n self.school.students = []\n else:\n av_teachers = self.school.gen_grade_teacher_dict()\n student_enrollment_number = random.randint(82, 100)\n print(student_enrollment_number)\n for i in range(student_enrollment_number): #largest HS in USA has 8076 #!more in readme\n s = Student()\n s = s.enroll(av_teachers)\n try:\n list_students.append(s)\n remove_maxed_teacher(s, av_teachers)\n except Exception as e:\n break;\n print('Enrollment completed')\n self.school.students = list_students\n # self.grade_levels = self.gen_grade_levels()\n # new_student_testcase_full_full(av_teachers)\n print('Current size of faculty is: ', len(self.school.teachers))\n print('Hiring new teacher with no grade assigned:....')\n t = Teacher()\n t.hire()\n self.school.teachers.append(t)\n print('Teacher: ', t.name, ', hired for Grade: ', t.grade_level)\n print('New size of faculty is: ', len(self.school.teachers))\n print('Current size of faculty is: ', len(self.school.teachers))\n print('Hiring a new teacher to work in Grade 12:.....')\n grade_level = '12'\n t_for_12= Teacher()\n t_for_12.hire(grade_level)\n print('Teacher: ', t_for_12.name, ', hired for Grade: ', t_for_12.grade_level)\n print('New size of faculty is: ', len(self.school.teachers))\n self.school.teachers.append(t)\n self.output_startup()", "def collect_schools():\n MIN_OFFSET = 0\n MAX_OFFSET = 6700\n STEP_SIZE = 20\n school2id = {}\n num_failed = 0\n for offset in np.arange(MIN_OFFSET, MAX_OFFSET+STEP_SIZE, step=STEP_SIZE):\n if offset % 100 == 0: print(offset)\n url = DOMAIN + '/search.jsp?query=&queryoption=HEADER&stateselect=&country=united+states&dept=&queryBy=schoolName&facetSearch=&schoolName=&offset={}&max=20'.format(offset)\n r = requests.get(url)\n soup = BeautifulSoup(r.content, 'html.parser')\n schools = soup.find_all('li', attrs={'class':'listing SCHOOL'})\n for s in schools:\n try:\n link = s.find('a')\n school_id = int(link['href'].split('=')[-1])\n name = link.find('span', attrs={'class':'listing-name'}).find('span', attrs={'class':'main'}).text\n school2id[name] = school_id\n except:\n print('Failed:', s.text.strip())\n num_failed += 1\n print('Num schools found:', len(school2id))\n for s in school2id:\n if 'Columbia' in s:\n print(s, school2id[s])\n pickle.dump(school2id, open('../rate_my_prof/school2id.pkl', 'wb'))", "def assignSuitBuildings(self, numToAssign):\n # Look for a suitable zone. First, get a copy of the\n # SuitHoodInfo array, so we can remove elements from it as we\n # discover they're unsuitable.\n hoodInfo = self.SuitHoodInfo[:]\n totalWeight = self.TOTAL_BWEIGHT\n totalWeightPerTrack = self.TOTAL_BWEIGHT_PER_TRACK[:]\n totalWeightPerHeight = self.TOTAL_BWEIGHT_PER_HEIGHT[:]\n\n # Count up the number of each track of building already in the\n # world, so we can try to balance the world by preferring the\n # rarer tracks.\n numPerTrack = {'c': 0, 'l': 0, 'm': 0, 's':0}\n for sp in self.air.suitPlanners.values():\n sp.countNumBuildingsPerTrack(numPerTrack)\n numPerTrack['c'] += sp.pendingBuildingTracks.count('c')\n numPerTrack['l'] += sp.pendingBuildingTracks.count('l')\n numPerTrack['m'] += sp.pendingBuildingTracks.count('m')\n numPerTrack['s'] += sp.pendingBuildingTracks.count('s')\n\n # Also count up the number of each height of building.\n numPerHeight = {0:0, 1: 0 , 2: 0, 3: 0, 4: 0,}\n for sp in self.air.suitPlanners.values():\n sp.countNumBuildingsPerHeight(numPerHeight)\n numPerHeight[0] += sp.pendingBuildingHeights.count(0)\n numPerHeight[1] += sp.pendingBuildingHeights.count(1)\n numPerHeight[2] += sp.pendingBuildingHeights.count(2)\n numPerHeight[3] += sp.pendingBuildingHeights.count(3)\n numPerHeight[4] += sp.pendingBuildingHeights.count(4)\n\n # For each building:\n while numToAssign > 0:\n\n # Choose the track with the smallest representation for\n # this building.\n smallestCount = None\n smallestTracks = []\n for trackIndex in range(4):\n if totalWeightPerTrack[trackIndex]:\n track = SuitDNA.suitDepts[trackIndex]\n count = numPerTrack[track]\n if smallestCount == None or count < smallestCount:\n smallestTracks = [track]\n smallestCount = count\n elif count == smallestCount:\n smallestTracks.append(track)\n\n if not smallestTracks:\n self.notify.info(\"No more room for buildings, with %s still to assign.\" % (numToAssign))\n return\n\n # Now smallestTracks is the list of all tracks with the\n # fewest number of buildings. (There might be more than\n # one with the same number.)\n buildingTrack = random.choice(smallestTracks)\n buildingTrackIndex = SuitDNA.suitDepts.index(buildingTrack)\n\n # Do that again, choosing a suitable height.\n smallestCount = None\n smallestHeights = []\n for height in range(5):\n if totalWeightPerHeight[height]:\n count = float(numPerHeight[height]) / float(self.BUILDING_HEIGHT_DISTRIBUTION[height])\n if smallestCount == None or count < smallestCount:\n smallestHeights = [height]\n smallestCount = count\n elif count == smallestCount:\n smallestHeights.append(height)\n\n if not smallestHeights:\n self.notify.info(\"No more room for buildings, with %s still to assign.\" % (numToAssign))\n return\n\n # Remember, buildingHeight is numFloors - 1.\n buildingHeight = random.choice(smallestHeights)\n \n self.notify.info(\"Existing buildings are (%s, %s), choosing from (%s, %s), chose %s, %s.\" %\n (self.formatNumSuitsPerTrack(numPerTrack),\n self.formatNumSuitsPerTrack(numPerHeight),\n smallestTracks, smallestHeights,\n buildingTrack, buildingHeight))\n \n # Look for a suitable street to have this building.\n repeat = 1\n while repeat and buildingTrack != None and buildingHeight != None:\n if len(hoodInfo) == 0:\n self.notify.warning(\"No more streets can have suit buildings, with %d buildings unassigned!\" % (numToAssign))\n return\n \n repeat = 0\n \n currHoodInfo = self.chooseStreetWithPreference(hoodInfo, buildingTrackIndex, buildingHeight)\n\n # Get the DistributedSuitPlannerAI associated with this zone.\n zoneId = currHoodInfo[ self.SUIT_HOOD_INFO_ZONE ]\n\n if self.air.suitPlanners.has_key(zoneId):\n sp = self.air.suitPlanners[zoneId]\n \n # How many suit buildings does this zone already have?\n numTarget = sp.targetNumSuitBuildings\n numTotalBuildings = len(sp.frontdoorPointList)\n else:\n # There's no SuitPlanner for this zone. We must\n # be running with want-suits-everywhere turned\n # off.\n numTarget = 0\n numTotalBuildings = 0\n \n if numTarget >= currHoodInfo[ self.SUIT_HOOD_INFO_BMAX ] or \\\n numTarget >= numTotalBuildings:\n # This zone has enough buildings.\n self.notify.info(\"Zone %d has enough buildings.\" % (zoneId))\n hoodInfo.remove(currHoodInfo)\n weight = currHoodInfo[self.SUIT_HOOD_INFO_BWEIGHT]\n tracks = currHoodInfo[self.SUIT_HOOD_INFO_TRACK]\n heights = currHoodInfo[self.SUIT_HOOD_INFO_HEIGHTS]\n totalWeight -= weight\n\n totalWeightPerTrack[0] -= weight * tracks[0]\n totalWeightPerTrack[1] -= weight * tracks[1]\n totalWeightPerTrack[2] -= weight * tracks[2]\n totalWeightPerTrack[3] -= weight * tracks[3]\n\n totalWeightPerHeight[0] -= weight * heights[0]\n totalWeightPerHeight[1] -= weight * heights[1]\n totalWeightPerHeight[2] -= weight * heights[2]\n totalWeightPerHeight[3] -= weight * heights[3]\n totalWeightPerHeight[4] -= weight * heights[4]\n\n if totalWeightPerTrack[buildingTrackIndex] <= 0:\n # Oops, no more of this building track can be\n # allocated.\n assert(totalWeightPerTrack[buildingTrackIndex] == 0)\n buildingTrack = None\n\n if totalWeightPerHeight[buildingHeight] <= 0:\n # Oops, no more of this building height can be\n # allocated.\n assert(totalWeightPerHeight[buildingHeight] == 0)\n buildingHeight = None\n \n repeat = 1\n\n # Ok, now we've got a randomly-chosen zone that wants a\n # building. Hand it over.\n if buildingTrack != None and buildingHeight != None:\n sp.targetNumSuitBuildings += 1\n sp.pendingBuildingTracks.append(buildingTrack)\n sp.pendingBuildingHeights.append(buildingHeight)\n self.notify.info(\"Assigning building to zone %d, pending tracks = %s, pending heights = %s\" % (zoneId, sp.pendingBuildingTracks, sp.pendingBuildingHeights))\n numPerTrack[buildingTrack] += 1\n numPerHeight[buildingHeight] += 1\n numToAssign -= 1", "def test_resolve_rooms_id_floor_by_floor_edilizia(self):\n\n\n floor = self.db_building[\"dxf\"][\"floors\"][0]\n DXFRoomIdsResolver.resolve_rooms_id(\n self.building,\n floor,\n \"edilizia\"\n )\n\n self.assertEqual(floor[\"rooms\"][\"R002\"], self.final_rooms[\"R002\"])\n\n\n floor = self.db_building[\"dxf\"][\"floors\"][1]\n DXFRoomIdsResolver.resolve_rooms_id(\n self.building,\n floor,\n \"edilizia\"\n )\n\n self.assertEqual(floor[\"rooms\"][\"R022\"], self.final_rooms[\"R022\"])\n self.assertTrue(\"R023\" not in floor[\"rooms\"])\n self.assertTrue(\"R003\" not in floor[\"rooms\"])", "def prepare_patients(patients, hashing_rules, salt):\n lut_patient_hashes = {}\n lut_patient_id = {}\n\n for count, patient in enumerate(patients):\n norm_patient = NormalizedPatient(patient)\n pat_hashes = get_hashes(norm_patient, hashing_rules, salt)\n lut_patient_hashes[str(count)] = pat_hashes\n lut_patient_id[str(count)] = patient.id\n log.debug(\"Hashing: {} \\n{}\".format(norm_patient, pat_hashes))\n\n return lut_patient_id, lut_patient_hashes", "def test_enroll_students_with_dupes() -> None:\n quinn = Student(1, 'Quinn')\n tomasz = Student(2, 'Tomasz')\n ellen = Student(3, \"Ellen\")\n csc148 = Course('csc148')\n csc148.enroll_students([ellen])\n assert csc148.students == [ellen]\n studs = [quinn, tomasz, ellen]\n csc148.enroll_students(studs)\n assert csc148.students == [ellen]", "def populate_database():\n database_schools = []\n n = 0\n cols = []\n cols_with_size = get_sizes()\n while n < len(colleges_with_sat):\n c = C(colleges_with_sat[n], colleges_with_sat[n+1], colleges_with_sat[n+2], colleges_with_sat[n+3], colleges_with_sat[n+4])\n cols.append(c)\n n+=5\n \n for i in range(0, len(colleges)):\n name = colleges[i]\n if False: #db_college_exists(name):\n continue\n sats = {}\n size = 0\n tuition = 0\n address = \"\"\n zipcode = 0\n matched = False\n for c in cols:\n if levenshtein(c.name, name) < 3:\n matched = True\n sats['math'] = c.math_range\n sats['reading'] = c.read_range\n if not matched:\n sats = None\n for c in cols_with_size:\n #print c[0]\n if levenshtein(c[0], name) < 3:\n size = c[1]\n tuition = c[2]\n address = c[3]\n zipcode = c[4]\n #print c\n break\n college = College(name, \"\", i, sats, size, tuition, address, zipcode)\n #print college\n database_schools.append(college)\n #college.print_college()\n user = User()\n user.name = \"Aaron\"\n user.sats = {\"math\" : 800, \"reading\" : 800}\n\n #print college.find_location()\n #print college.get_difficulty()\n return database_schools", "def test_resolve_rooms_id_floor_by_floor_easyroom(self):\n\n floor = self.db_building[\"dxf\"][\"floors\"][0]\n DXFRoomIdsResolver.resolve_rooms_id(\n self.building,\n floor,\n \"easyroom\"\n )\n\n self.assertEqual(floor[\"rooms\"][\"R003\"], self.final_rooms[\"R003\"])\n\n\n floor = self.db_building[\"dxf\"][\"floors\"][1]\n DXFRoomIdsResolver.resolve_rooms_id(\n self.building,\n floor,\n \"easyroom\"\n )\n\n self.assertEqual(floor[\"rooms\"][\"R023\"], self.final_rooms[\"R023\"])\n self.assertTrue(\"R022\" not in floor[\"rooms\"])\n self.assertTrue(\"R002\" not in floor[\"rooms\"])", "def random_buildings(game_board, level):\n random_area = [random.randrange(0, 11, 10), random.randrange(0, 31, 30)]\n x = random_area[1]\n y = random_area[0]\n player_x = 0\n player_y = 0\n if level == 1:\n game_board = generate_build(game_board, x, y, [3, 8], [7, 19])\n game_board[y + 7][x + 5 + 7] = '.' # tavern doors\n game_board[y + 7][x + 6 + 7] = '.' # tavern doors\n game_board[y + 4][x + 5 + 7] = 'O' # tavern man\n game_board[y + 6][x + 5 + 7] = '@'\n player_x, player_y = y+6, x+5+7 # x is y and x is y, i know, it's confusing\n game_board = random_item(game_board, ['a', 'b', 'c', 'd', 'e']) # clothes\n if level == 2:\n game_board = generate_build(game_board, x, y, [3, 8], [4, 19])\n game_board[y + 7 - 1][x + 4] = '.'\n game_board[y + 5][x + 9 + 7] = '❤' # farmer wife\n game_board[y + 5][x + 2 + 7] = '@'\n player_x, player_y = y+5, x+2+7\n while x == random_area[1] and y == random_area[0]: # random area for corn\n x = random.randrange(0, 31, 30)\n y = random.randrange(0, 11, 10)\n for i in range(y + 2, y + 8): # corn generating\n for z in range(x + 3, x + 27):\n game_board[i][z] = chr(182)\n if level == 3:\n game_board = generate_build(game_board, x, y, [3, 8], [9, 17])\n game_board[y + 7][x + 5 + 7] = '.'\n game_board[y + 7][x + 6 + 7] = '.'\n game_board[y + 4][x + 5 + 7] = \"☠\" # boss\n game_board[2][2] = '@'\n player_x, player_y = 2, 2\n while x == random_area[1] and y == random_area[0]: # random area for new build\n x = random.randrange(0, 31, 30)\n y = random.randrange(0, 11, 10)\n game_board = generate_build(game_board, x, y, [2, 7], [8, 18])\n game_board[y + 3][x + 8] = '.'\n game_board[y + 5][x + 5 + 7] = '¢'\n game_board = random_item(game_board, [\"♏\"])\n return game_board, player_x, player_y", "def sort_students(roster):\n\n merge_sort = []\n for i in range(len(roster)):\n merge_sort.append([roster[i]])\n i = 0\n while i < len(merge_sort) - 1:\n a1 = merge_sort[i]\n a2 = merge_sort[i + 1]\n newl = merge(a1, a2)\n merge_sort.append(newl)\n i += 2\n if len(merge_sort) != 0:\n roster[:] = merge_sort[-1][:]\n return roster", "def generate_map(self):\n while (self.room_count < self.room_limit):\n self.room_count += 1\n\n if (self.room_count <= self.room_limit/2):\n Dungeon.map.append(Direction.North.value)\n self.branches.append([self.current_room])\n self.current_room.north = Room(self.room_count)\n self.current_room.north.south = self.current_room\n self.current_room = self.current_room.north\n else:\n flag = False\n\n\n while (flag == False):\n self.random_integer = random.randint(1, self.room_limit/2)\n current_branch = self.branches[random_integer-1]\n room_list = getAvailableRooms(self.branches[random_integer-1])\n if(len(room_list)>0):\n \n \n\n \n\n\n # self.random_integer = random.randint(1, 4)\n # if(self.random_integer <= 6):\n # Dungeon.map.append(Direction.North.value)\n # self.current_room.north = Room(self.room_count)\n # self.current_room.north.south = self.current_room\n # self.current_room = self.current_room.north\n # elif(self.random_integer == 7 or self.random_integer == 8):\n # Dungeon.map.append(Direction.West.value)\n # self.current_room.west = Room(self.room_count)\n # self.current_room.west.east = self.current_room\n # self.current_room = self.current_room.west\n # elif(self.random_integer == 9 or self.random_integer == 10):\n # Dungeon.map.append(Direction.East.value)\n # self.current_room.east = Room(self.room_count)\n # self.current_room.east.west = self.current_room\n # self.current_room = self.current_room.east\n\n self.current_room = self.first_room", "def building_roadhouse(w=15, h=15, wall_material=None, floor_material=None):\n # Initial checks. Don't accept too small/big inn\n if w < 15 or h < 15:\n raise ValueError('Building is too small: w or h < 15')\n elif w > 21 or h > 21:\n raise ValueError('Building is too big: w or h > 21')\n # Choose materials\n if not wall_material:\n wall_material = random.choice([C.wall_block, C.wall_plank, C.wall_brick, C.wall_stone])\n elif wall_material not in (['block', 'plank', 'brick', 'stone']):\n raise ValueError('Wall material should be \"block\", \"plank\", \"brick\" or \"stone\"')\n if wall_material == 'block':\n wall_material = C.wall_block\n elif wall_material == 'plank':\n wall_material = C.wall_plank\n elif wall_material == 'brick':\n wall_material = C.wall_brick\n elif wall_material == 'stone':\n wall_material = C.wall_stone\n\n if not floor_material:\n floor_material = random.choice([C.floor_dirt, C.floor_parquet, C.floor_cobblestone])\n elif floor_material not in (['dirt', 'parquet', 'cobblestone']):\n raise ValueError('Floor material should be \"dirt\", \"parquet\" or \"cobblestone\"')\n if floor_material == 'dirt':\n floor_material = C.floor_dirt\n elif floor_material == 'parquet':\n floor_material = C.floor_parquet\n elif floor_material == 'cobblestone':\n floor_material = C.floor_cobblestone\n M = room_default(w, h, wall_type=wall_material, floor_type=floor_material)\n M[13, h-1] = C.door_closed_window()\n kitchen = _room_kitchen(w, 6, wall_material, floor_material)\n M.meld(kitchen, 0, 0)\n living_room = _room_living(9, h-5, wall_material, floor_material)\n M.meld(living_room, 0, 5)\n vending = _interior_vending(w-10, h-7, wall_material, floor_material)\n M.meld(vending, 9, 6)\n\n return M", "def draw_mountains(islands, grid):\n mountain_spacing_x = 30\n mountain_spacing_y = 20\n default_mountain_width = 90\n default_mountain_height = 40\n bounding_scale = 1.5 # How much extra room to check for\n mountain_positions = []\n same_mountain_threshold = 10. # No mountains will be closer than this\n for island in islands:\n # Rivers\n min_x, min_y, max_x, max_y = vec.get_bounds([node.p for node in island])\n h_scale, h_sign = maps.get_heightmap_adjustment_for_island(island)\n for y in range(int(min_y), int(max_y), mountain_spacing_y):\n for x in range(int(min_x), int(max_x), mountain_spacing_x):\n h = h_scale*h_sign*maps.heightmap(x, y)\n if h > 0.5:\n # We could start a river here\n if random(1) > 0.9:\n river = maps.get_river(x, y, grid, h_scale, h_sign)\n draw_river(river)\n # Hills and mountains\n for y in range(int(min_y), int(max_y), mountain_spacing_y):\n for x in range(int(min_x), int(max_x), mountain_spacing_x):\n h = h_scale*h_sign*maps.heightmap(x, y)\n if h > 0.5 and h < 0.75:\n # We could draw a hill here.\n if random(1) > 0.5:\n hill_x = x + (random(mountain_spacing_x)\n -0.5*mountain_spacing_x)\n if vec.rect_within((hill_x-8, y-4, 16, 4),\n (min_x, min_y,\n max_x-min_x, max_y-min_y)):\n draw_hill(hill_x, y)\n if h > 0.75:\n mountain_scale = min(h, 1)\n mountain_y = y-default_mountain_height*mountain_scale\n # Check that there is space for a mountain here\n if vec.rect_within((x-default_mountain_width*\n bounding_scale*0.5*mountain_scale,\n y-default_mountain_height*\n bounding_scale*mountain_scale,\n default_mountain_width*\n bounding_scale*mountain_scale,\n default_mountain_height*\n bounding_scale*mountain_scale),\n (min_x, min_y,\n max_x-min_x, max_y-min_y)):\n # Check that there isn't already a mountain here\n for mountain in mountain_positions:\n if (vec.distance((x, mountain_y), mountain)\n < same_mountain_threshold):\n break\n else:\n draw_mountain(x+random(mountain_spacing_x)-\n mountain_spacing_x*0.5,\n mountain_y,\n default_mountain_width*mountain_scale,\n default_mountain_height*mountain_scale)\n mountain_positions.append((x, mountain_y))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The main function reads in a file, converts it to a list and then proceeds to place the students.
def main(): textfile = input("input filename: ") list = readStudents(textfile) placeStudents(list)
[ "def addStudentsFromFile(self, filename):\n filereader=open(filename)\n lines=filereader.readlines()\n for line in lines[5:]:\n line=line.strip('\\n')\n rollno,name,*hwk=line.split(':')\n #Convert homework into numbers\n marks=[eval(mark) for mark in hwk]\n #create a student\n student=Student(rollno,name)\n #set the marks\n student.setMarks(marks)\n #add to list\n self.addStudent(student)\n #close file\n filereader.close()", "def read_student_file():\n filename = input(\"Students file name: \")\n\n with open(filename, \"r\") as file:\n students = []\n for line in file.readlines():\n students.append(file_line_into_tuple(line))\n print(students)\n return students", "def student_list() -> List[str]:\n path = os.path.join(BASE_PATH, 'hta/groups/students.txt')\n return line_read(path)", "def readstu(self) -> None:\n path :str = os.path.join(self.directory_path,\"students.txt\")\n for cwid, name, major in file_reader(path, 3, sep='\\t',header=True): \n b: Student = Student(cwid,name,major)\n self.studict[cwid]=b", "def placeStudents(list):\r\n buildings = createBuilding()\r\n\r\n for line in list:\r\n name, furniture = line.split()\r\n floors = buildings.get(name)\r\n rooms = floors.get(name)\r\n room = rooms.get(name)\r\n if room.AddtoRoom(name, furniture):\r\n print(\"student\", name, \"already present in\", buildings.hash_function(name),\"floor\", floors.hash_function(name)\r\n , \"in room\", rooms.hash_function(name), \". Added furniture\", furniture)\r\n # They were already in the room and their furniture was added\r\n else:\r\n print('Added student', name, 'with', furniture, 'to building', buildings.hash_function(name), \"floor\",\r\n floors.hash_function(name), \"in room\", rooms.hash_function(name))", "def __loadFromFile(self):\r\n try:\r\n f=open(self.__fileName,\"r\")\r\n except IOError:\r\n raise RepositoryError()\r\n linie = f.readline().strip()\r\n while linie!=\"\":\r\n part=linie.split(\",\")\r\n st=Student(part[0],part[1])\r\n self.__listStudents.append(st)\r\n linie=f.readline().strip()\r\n f.close()", "def _get_students(self,path):\n try:\n for cwid, name, major in file_reading_gen(path, 3, sep=\"\\t\",header=False):\n self._students[cwid] = Student(cwid,name,major)\n except FileNotFoundError as fnfe:\n print(fnfe)\n except ValueError as ve:\n print(ve)", "def main():\n\n roster = []\n student = input('Enter a student record (blank to end): ')\n while student.strip() != '':\n roster.append(tuple(student.split(', ')))\n student = input('Enter a student record (blank to end): ')\n new_list = sort_students(roster)\n print()\n for student in new_list:\n print('{}, {}, {}, {}'.format(student[0], student[1], student[2],\n student[3]))", "def gather_marks(students,names,filename):\r\n try:\r\n f = open(filename,'r')\r\n for line in f:\r\n temp=line.split(\",\")\r\n name=temp[0]\r\n mark=eval(temp[1]) \r\n students[name]=mark\r\n names.append(name)\r\n f.close()\r\n except IOError as errorNo:\r\n print(\"There is an error with the file: \",errorNo)", "def load_students() -> List[List[str]]:\n path = os.path.join(BASE_PATH, 'hta/groups/students.csv')\n return line_read(path, delim=\",\")", "def student(self,path):\n try:\n sfile = open(path, 'r')\n except FileNotFoundError:\n logging.exception('There is an error with opening the student file in this directory')\n else:\n if sfile.readlines() == ['\\n']:\n print('This file is an empty!')\n else:\n sfile.seek(0)\n for lines in sfile:\n studentid, studentname, studentmajor = lines.strip().split('\\t')\n self.studentdict[studentid] = Student(studentid,studentname,studentmajor)", "def readgra(self) -> None:\n path :str = os.path.join(self.directory_path,\"grades.txt\")\n for stucwid, coursename, grade, instcwid in file_reader(path, 4, sep='\\t',header=True): \n if stucwid not in self.studict.keys():\n print(f\" There is no Student with CWID: {stucwid}\")\n continue\n if instcwid not in self.instdict.keys():\n print(f\" There is no Instructor with CWID: {instcwid}\")\n continue\n self.studict[stucwid].set_courses(coursename,grade)\n self.instdict[instcwid].set_courses(coursename)", "def main():\n file = open_file()\n word_list = format_file(file)\n new_list = add_to_list(word_list)\n sorted_list = msort(new_list)\n print_words(sorted_list)", "def add_student(self, args):\n gfile = writers.GradesFile(args.filename, self.ignore_char)\n\n student = defaultdict(str)\n for col in gfile.table.columns:\n if not col['evalu']:\n student[col['title']] = input(\"Enter student's \" +\n col['title'] + ': ')\n gfile.table.students.append(student)\n\n ofile = open(args.filename, 'w')\n if args.table_format:\n gfile.table_format = args.table_format\n gfile.print_file(file=ofile,\n min_width=self.min_cell_width,\n padding_left=self.padding_left,\n padding_right=self.padding_right,\n precision=self.precision)\n ofile.close()", "def get_students():\n try:\n with open(STUDENTS_FILE, \"r\") as fp:\n return json.load(fp)\n except FileNotFoundError:\n # Returns an empty list if the file does not exist\n return list()", "def __loadFromFile(self):\n try:\n f = open(self.__fName, \"r\")\n except IOError:\n #file not exist\n return\n line = f.readline().strip()\n rez = []\n while line!=\"\":\n attrs = line.split(\";\")\n st = Student(attrs[0], attrs[1], Address(attrs[2], attrs[3], attrs[4]))\n rez.append(st)\n line = f.readline().strip()\n f.close()\n return rez", "def get_moodle_students(filename):\n lines = open(filename).readlines()\n names = []\n for line in lines[1:]:\n fields = line.split(',')\n firstname = fields[0].replace('\"', '').replace(\"'\", '').strip()\n lastname = fields[1].replace('\"', '').replace(\"'\", '').strip()\n name = \"{} {}\".format(firstname, lastname)\n names.append(name)\n return names", "def __storeInFile(self):\r\n with open(self.__fileName,\"w\") as f:\r\n for st in self.__listStudents:\r\n stf=st.getID()+\",\"+st.getName()+'\\n'\r\n f.write(stf)", "def handle_poor_input_file(self, Student, line_list):\n #set nine_five_number and email to be none \n nine_five_number = None\n email = None\n used = list()\n #loop through the list and check if there is a 95 number or an email\n for item in line_list:\n #if there is a 95 number set that to the students id attribute\n if(\"95\" in item):\n nine_five_number = item\n Student.id = item\n used.append(item)\n #if there is an email set the student email attribute (ex. check for .com or .edu)\n if(\".com\" in item or \".edu\" in item):\n email = item\n Student.email = email\n used.append(item)\n #removed all used items from the line_list\n for item in used:\n print(item)\n line_list.remove(item)\n #if no email was found set the attribute to unknown \n if(email == None):\n Student.email = \"Unknown\"\n #same for the nine five number \n if(nine_five_number == None):\n Student.id = \"Unknown\"\n #check the remaining items in the list\n if(len(line_list) == 0):\n #if nothing is left set the first and last name to be unknown \n Student.first_name = \"Unknown\"\n Student.last_name = \"Unknown\"\n #if one item is left set that to be the first name\n elif(len(line_list) == 1):\n Student.first_name = line_list[0]\n Student.last_name = \"Unknown\"\n else:\n #otherwise set the first two items in the list to be first or last\n Student.first_name = line_list[0]\n Student.last_name = line_list[1]\n return Student" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This fuction checks valid ASIN.
def check_ASIN_validity(self,X): if self.check_ASIN == True: col = X['ASIN'].copy() uniq_col = pd.Series(col.unique()) mask = (uniq_col.str.match(r'\b[B\d][\dA-Z]{9}\b')) & (uniq_col.str.len()==10) inval_ASIN = uniq_col[~mask] print(inval_ASIN) return inval_ASIN
[ "def check_ASIN_validity(self,X,y=None):\n \n \n if self.check_ASIN == True:\n col = X['ASIN'].copy()\n uniq_col = pd.Series(col.unique())\n mask = (uniq_col.str.match(r'\\b[B\\d][\\dA-Z]{9}\\b')) & (uniq_col.str.len()==10)\n inval_ASIN = uniq_col[~mask]\n print(inval_ASIN)\n return inval_ASIN", "def is_valid_ssin(ssin):\n if ssin_veto(ssin):\n return False\n return True", "def verify_ean(arg):\n\n arg = str(arg)\n if not arg.isdigit():\n return False\n check = arg[-1]\n if ean_digit(arg[:-1]) != check:\n return False\n return True", "def is_valid(vin):\n vin=str(vin).strip()\n if len(vin) != 17:\n return False\n else:\n converted=[]\n vin=vin.upper()\n for i in range(len(vin)):\n converted.insert(i,convert_vin(vin[i]))\n multiplier=[8,7,6,5,4,3,2,10,0,9,8,7,6,5,4,3,2]\n add=0\n for i in range(len(vin)):\n add+=(converted[i]*multiplier[i])\n final= (add%11)\n if final ==10:\n final='X'\n if str(final)==vin[8]:\n return True\n else:\n return False", "def check_alarm_input(alarm_time):\n if len(alarm_time) == 1:\n if alarm_time[0] < 24 and alarm_time[0] >= 0:\n return True\n if len(alarm_time) == 2: \n if alarm_time[0] < 24 and alarm_time[0] >= 0 and alarm_time[1] < 60 and alarm_time[1] >= 0:\n return True\n elif len(alarm_time) == 3: \n if alarm_time[0] < 24 and alarm_time[0] >= 0 and alarm_time[1] < 60 and alarm_time[1] >= 0 and alarm_time[2] < 60 and alarm_time[2] >= 0:\n return True\n return False", "def gstin_check(value):\n pattern = re.compile(GSTIN_REGEX)\n if pattern.match(value) is None:\n raise ValidationError(\n _(\"Invalid GSTIN\"), code=\"invalid_gst_number\"\n )", "def test_asl_quantification_filter_validate_inputs():\n validate_filter_inputs(AslQuantificationFilter, INPUT_VALIDATION_DICT)", "def _valid_seq(self, seq):\n if self.filter_AA and self.filter_minlength:\n forbidden_AAs = re.search(r'[BXZOUJ]', seq)\n if len(seq) >= int(self.minlength) and not forbidden_AAs:\n return True\n elif self.filter_AA and not self.filter_minlength:\n forbidden_AAs = re.search(r'[BXZOUJ]', seq)\n if not forbidden_AAs:\n return True\n elif not self.filter_AA and self.filter_minlength:\n if seq >= int(self.minlength):\n return True\n else:\n return False", "def ssin_validator(ssin):\n msg = ssin_veto(ssin)\n if msg:\n raise ValidationError(msg)", "def is_asn(v):\n try:\n v = int(v)\n return v >= 0\n except ValueError:\n return False", "def valid_visa_format(entrant):\n # looks for visas in entrant file and checks the correspondence to the regular general expression\n for word in entrant:\n if word == \"visa\":\n if re.match('^.{5}-.{5}$', entrant['visa']['code']) is not None:\n return True\n else:\n return False", "def __validate_format(format):\n\n return format.lower() in ['vmf', 'midi', 'xml']", "def validateaddress(self, address):\n return Address.is_valid(address)", "def validate(seq, alphabet='dna'):\n## Source : https://www.biostars.org/p/102/ Giovanni M Dall'Olio\n alphabets = {'dna': re.compile('^[acgtn]*$', re.I), \n 'protein': re.compile('^[acdefghiklmnpqrstvwy]*$', re.I)}\n if alphabets[alphabet].search(seq) is not None:\n return True\n else:\n return False", "def valid_account_number(account_number):\n length = 8\n return (is_integer_string(account_number)\n and len(account_number)==length)", "def validate_payment_account(account):\n if not account or (not len(account) == 20 and not len(account) == 24):\n return False\n\n # We will check control code\n try:\n cc = account[-20:]\n values = [1, 2, 4, 8, 5, 10, 9, 7, 3, 6]\n control_cc = control_cs = 0\n for i in range(8):\n control_cs += int(cc[i]) * values[i + 2]\n control_cs = 11 - (control_cs % 11)\n\n if control_cs == 11:\n control_cs = 0\n elif control_cs == 10:\n control_cs = 1\n\n for i in range(10, 20):\n control_cc += int(cc[i]) * values[i - 10]\n control_cc = 11 - (control_cc % 11)\n\n if control_cc == 11:\n control_cc = 0\n elif control_cc == 10:\n control_cc = 1\n dc = control_cs + control_cc\n if dc != cc[8:10]:\n return False\n except Exception:\n return False\n\n # If account length == 20 we have to generate iban\n try:\n ccc = int(cc + \"142800\")\n iban = 98 - (ccc % 97)\n except Exception:\n return False\n\n if len(account) != 20 and account[:4] != \"ES{:0>2d}\".format(iban):\n return False\n return True", "def validate_account_number(account):\n try:\n account_key(account)\n return True\n except:\n return False", "def validating(self, parameter, parameter_name):\n\n if not (len(parameter) == 3 and parameter.isalpha()\n and parameter.isupper()):\n print(f'Error in {parameter_name}: '\n f'Invalid IATA-code. IATA-code must '\n 'be three capital letters')\n return False\n\n return super().validating(parameter, parameter_name)", "def parseAbn(abn):\r\n abn=abn.replace(' ','')\r\n if len(abn)<11:\r\n return parseAbn.TOO_SHORT\r\n if len(abn)>11:\r\n return parseAbn.TOO_LONG\r\n if not re.match('[0-9]+$',abn):\r\n return parseAbn.INVALID\r\n if isValidAbn(abn):\r\n return abn\r\n return parseAbn.INVALID" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This fuction checks valid ASIN.
def check_ASIN_validity(self,X,y=None): if self.check_ASIN == True: col = X['ASIN'].copy() uniq_col = pd.Series(col.unique()) mask = (uniq_col.str.match(r'\b[B\d][\dA-Z]{9}\b')) & (uniq_col.str.len()==10) inval_ASIN = uniq_col[~mask] print(inval_ASIN) return inval_ASIN
[ "def check_ASIN_validity(self,X):\n \n if self.check_ASIN == True:\n col = X['ASIN'].copy()\n uniq_col = pd.Series(col.unique())\n mask = (uniq_col.str.match(r'\\b[B\\d][\\dA-Z]{9}\\b')) & (uniq_col.str.len()==10)\n inval_ASIN = uniq_col[~mask]\n print(inval_ASIN)\n return inval_ASIN", "def is_valid_ssin(ssin):\n if ssin_veto(ssin):\n return False\n return True", "def verify_ean(arg):\n\n arg = str(arg)\n if not arg.isdigit():\n return False\n check = arg[-1]\n if ean_digit(arg[:-1]) != check:\n return False\n return True", "def is_valid(vin):\n vin=str(vin).strip()\n if len(vin) != 17:\n return False\n else:\n converted=[]\n vin=vin.upper()\n for i in range(len(vin)):\n converted.insert(i,convert_vin(vin[i]))\n multiplier=[8,7,6,5,4,3,2,10,0,9,8,7,6,5,4,3,2]\n add=0\n for i in range(len(vin)):\n add+=(converted[i]*multiplier[i])\n final= (add%11)\n if final ==10:\n final='X'\n if str(final)==vin[8]:\n return True\n else:\n return False", "def check_alarm_input(alarm_time):\n if len(alarm_time) == 1:\n if alarm_time[0] < 24 and alarm_time[0] >= 0:\n return True\n if len(alarm_time) == 2: \n if alarm_time[0] < 24 and alarm_time[0] >= 0 and alarm_time[1] < 60 and alarm_time[1] >= 0:\n return True\n elif len(alarm_time) == 3: \n if alarm_time[0] < 24 and alarm_time[0] >= 0 and alarm_time[1] < 60 and alarm_time[1] >= 0 and alarm_time[2] < 60 and alarm_time[2] >= 0:\n return True\n return False", "def gstin_check(value):\n pattern = re.compile(GSTIN_REGEX)\n if pattern.match(value) is None:\n raise ValidationError(\n _(\"Invalid GSTIN\"), code=\"invalid_gst_number\"\n )", "def test_asl_quantification_filter_validate_inputs():\n validate_filter_inputs(AslQuantificationFilter, INPUT_VALIDATION_DICT)", "def _valid_seq(self, seq):\n if self.filter_AA and self.filter_minlength:\n forbidden_AAs = re.search(r'[BXZOUJ]', seq)\n if len(seq) >= int(self.minlength) and not forbidden_AAs:\n return True\n elif self.filter_AA and not self.filter_minlength:\n forbidden_AAs = re.search(r'[BXZOUJ]', seq)\n if not forbidden_AAs:\n return True\n elif not self.filter_AA and self.filter_minlength:\n if seq >= int(self.minlength):\n return True\n else:\n return False", "def ssin_validator(ssin):\n msg = ssin_veto(ssin)\n if msg:\n raise ValidationError(msg)", "def is_asn(v):\n try:\n v = int(v)\n return v >= 0\n except ValueError:\n return False", "def valid_visa_format(entrant):\n # looks for visas in entrant file and checks the correspondence to the regular general expression\n for word in entrant:\n if word == \"visa\":\n if re.match('^.{5}-.{5}$', entrant['visa']['code']) is not None:\n return True\n else:\n return False", "def __validate_format(format):\n\n return format.lower() in ['vmf', 'midi', 'xml']", "def validateaddress(self, address):\n return Address.is_valid(address)", "def validate(seq, alphabet='dna'):\n## Source : https://www.biostars.org/p/102/ Giovanni M Dall'Olio\n alphabets = {'dna': re.compile('^[acgtn]*$', re.I), \n 'protein': re.compile('^[acdefghiklmnpqrstvwy]*$', re.I)}\n if alphabets[alphabet].search(seq) is not None:\n return True\n else:\n return False", "def valid_account_number(account_number):\n length = 8\n return (is_integer_string(account_number)\n and len(account_number)==length)", "def validate_payment_account(account):\n if not account or (not len(account) == 20 and not len(account) == 24):\n return False\n\n # We will check control code\n try:\n cc = account[-20:]\n values = [1, 2, 4, 8, 5, 10, 9, 7, 3, 6]\n control_cc = control_cs = 0\n for i in range(8):\n control_cs += int(cc[i]) * values[i + 2]\n control_cs = 11 - (control_cs % 11)\n\n if control_cs == 11:\n control_cs = 0\n elif control_cs == 10:\n control_cs = 1\n\n for i in range(10, 20):\n control_cc += int(cc[i]) * values[i - 10]\n control_cc = 11 - (control_cc % 11)\n\n if control_cc == 11:\n control_cc = 0\n elif control_cc == 10:\n control_cc = 1\n dc = control_cs + control_cc\n if dc != cc[8:10]:\n return False\n except Exception:\n return False\n\n # If account length == 20 we have to generate iban\n try:\n ccc = int(cc + \"142800\")\n iban = 98 - (ccc % 97)\n except Exception:\n return False\n\n if len(account) != 20 and account[:4] != \"ES{:0>2d}\".format(iban):\n return False\n return True", "def validate_account_number(account):\n try:\n account_key(account)\n return True\n except:\n return False", "def validating(self, parameter, parameter_name):\n\n if not (len(parameter) == 3 and parameter.isalpha()\n and parameter.isupper()):\n print(f'Error in {parameter_name}: '\n f'Invalid IATA-code. IATA-code must '\n 'be three capital letters')\n return False\n\n return super().validating(parameter, parameter_name)", "def parseAbn(abn):\r\n abn=abn.replace(' ','')\r\n if len(abn)<11:\r\n return parseAbn.TOO_SHORT\r\n if len(abn)>11:\r\n return parseAbn.TOO_LONG\r\n if not re.match('[0-9]+$',abn):\r\n return parseAbn.INVALID\r\n if isValidAbn(abn):\r\n return abn\r\n return parseAbn.INVALID" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a User object from a player file.
def get_player(self, name): return User(name)
[ "def load_user():\n assert has_saved_user()\n with open(_user_path(), 'r') as f:\n json_string = f.read()\n return User.from_json(json_string)", "def from_file(file_path: str) -> \"UserData\":\n file_name = path.basename(file_path)\n\n if not file_name.startswith(\"user_\"):\n raise Exception(\"Invalid file name\")\n\n return UserData(path.dirname(file_path), file_name[len(\"_user\"):-len(\".json\")])", "def load_user(username):\n return User.get_by_username(username)", "def load_player_from_file(file_path):\n class_inst = None\n expected_class = 'Player'\n\n mod_name,file_ext = os.path.splitext(os.path.split(file_path)[-1])\n\n if file_ext.lower() == '.py':\n py_mod = imp.load_source(mod_name, file_path)\n\n elif file_ext.lower() == '.pyc':\n py_mod = imp.load_compiled(mod_name, file_path)\n\n if hasattr(py_mod, expected_class):\n class_inst = getattr(py_mod, expected_class)()\n\n return class_inst", "def load_user(user_id):\n u = mongo.db.users.find_one({\"user_id\": user_id})\n if not u:\n return None\n return User(u)", "def extract_players_from_file(players_file):\n reader = csv.DictReader(players_file)\n players = []\n for row in reader:\n players.append(row)\n return players", "def loadUsername(securityID: str, filePath: str = '../userInfo/singleValue/infoData/usernameInfo.csv') -> object:\n if path.exists(filePath):\n return SingleValue(securityID, filePath)\n else:\n print('Data File Does not exist for username... Please call createNewUsername()')\n return None", "def get_seeker_by_username(db_file, username):\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM seekers_personal WHERE username = ?;\",(username,))\n row = cur.fetchone()\n conn.commit()\n cur.close()\n conn.close()\n if row == None:\n return None\n id, fname, lname, birth_date, phone, email, city, education, hobbies, skills, username, password, bio = row\n user = Seeker(fname, lname, birth_date, phone, email, city, education, hobbies, skills, username, password, bio, id)\n return user", "def get_user(ctx):\n with open('lastfm_user.json') as f:\n lastfm_usernames = json.load(f)\n member_username = lastfm_usernames[str(ctx.author)]\n return member_username", "def load_players_from_file(self, filename):\n tournament.deletePlayers(self.database,self.cursor)\n try:\n players = open(filename, \"r\")\n except IOError, error:\n raise IOError(error.message)\n else:\n for line in players:\n player = line.strip('\\n')\n tournament.registerPlayer(player, self.database, self.cursor)\n finally:\n players.close()", "def get_user(fields):\n return User(fields['username'])", "def get_stored_username() :\n filename = 'username.json'\n try :\n with open(filename) as f :\n username = json.load(f)\n except FileNotFoundError :\n return None\n else :\n return username", "def load(cls, data):\n\n player = super().load(data)\n player.name = data[\"name\"]\n\n player.user_id = data[\"user_id\"]\n player.resources = {\n Resource.retrieve(resource): value\n for resource, value in data[\"resources\"].items()\n }\n\n player.money = data[\"money\"]\n\n return player", "def load_users(path):\r\n with io.open(path + 'files/users.json', 'r', encoding='utf8') as f:\r\n list_dict = json.load(f)\r\n\r\n return [User(a['name'], a['city'], a['country'], a['radius'], coords=a['coords']) for a in list_dict]", "def user_to_player(cls, u, game=None):\n if u.is_anonymous:\n raise Player.DoesNotExist\n\n game = game or Game.nearest_game()\n return cls.objects.get(game=game, user=u)", "def get_player(self, name):\n\t\t\n\t\tname = \"\".join(ch.lower() for ch in name if ch not in set(string.punctuation)).capitalize()\n\t\titem = self.db.get(name)\n\t\t\n\t\tif item.value:\n\t\t\titem.value = data.Object(item.value)\n\t\telse:\n\t\t\tplayer = data.Object()\n\t\t\t\n\t\t\tplayer.name = name\n\t\t\tplayer.title = \"\"\n\t\t\tplayer.full_name = name\n\t\t\tplayer.karma = 0\n\t\t\tplayer.alignment = ALIGNMENT_NEUTRAL\n\t\t\tplayer.unaligned_name = random.choice(UNALIGNED_NAMES)\n\t\t\tplayer.damage = random.choice(DAMAGE_TYPES)\n\t\t\tplayer.next_karma = 0\n\t\t\tplayer.next_fight = 0\n\t\t\tplayer.wins = 0\n\t\t\tplayer.losses = 0\n\t\t\tplayer.ties = 0\n\t\t\t\n\t\t\titem.value = player\n\t\t\titem.commit()\n\t\t\n\t\treturn item", "def extract_player(user):\n\n player = Player.objects.get(user=user)\n\n serializer = league_serializer()\n user_data = serializer.serialize(\n [user], \n fields=(\n 'username',\n 'first_name',\n 'last_name',\n 'email'\n )\n )\n player_data = serializer.serialize(\n [player], \n fields=(\n 'age',\n 'position',\n 'team',\n 'league',\n 'about'\n )\n )\n\n # Merge datasets\n user_data[0].update(player_data[0])\n\n # Swap pk's for league and team for names\n try:\n user_data[0]['league'] = player.league.name\n user_data[0]['team'] = player.team.name\n except:\n pass\n\n return user_data", "def from_protobuf(obj: ByteString) -> \"User\":\n user_message = UserMessage()\n user_message.ParseFromString(obj)\n\n return User(\n type=user_message.entity.type,\n id=user_message.entity.id,\n first_name=user_message.first_name if user_message.HasField(\"first_name\") else None,\n last_name=user_message.last_name if user_message.HasField(\"last_name\") else None,\n email=user_message.email if user_message.HasField(\"email\") else None,\n username=user_message.username,\n password=user_message.password,\n external_reference=user_message.external_reference if user_message.HasField(\"external_reference\") else None,\n active=user_message.active\n )", "def get_or_make_user(self, aga_id):\n while aga_id in self._pin_changes:\n aga_id = self._pin_changes[aga_id]\n if aga_id in self._users:\n return self._users[aga_id]\n else:\n user = User(aga_id=aga_id, email=uuid4(), fake=True)\n\n db.session.add(user)\n db.session.commit()\n\n player = Player(id=aga_id, name='', user_id=user.id, server_id=self.server_id, token=uuid4())\n db.session.add(player)\n\n self._users[aga_id] = user\n return user" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert targets by image to targets by feature level. [target_img0, target_img1] > [target_level0, target_level1, ...]
def images_to_levels(target, num_level_anchors): target = torch.stack(target, 0) level_targets = [] start = 0 for n in num_level_anchors: end = start + n level_targets.append(target[:, start:end]) start = end return level_targets
[ "def images_to_levels(target, num_levels):\n target = stack_boxes(target, 0)\n level_targets = []\n start = 0\n for n in num_levels:\n end = start + n\n # level_targets.append(target[:, start:end].squeeze(0))\n level_targets.append(target[:, start:end])\n start = end\n return level_targets", "def name_targets(img, box_targets, cls_targets):\n return img, {'regression': box_targets, 'classification': cls_targets}", "def make_image_features_targets(data, projection):\n \n print(\"Make image features and targets ...\")\n \n # Make numpy sets\n features = np.empty((len(data), 128, 128, 3), dtype=np.uint8)\n targets = np.empty((len(data),), dtype=np.uint8)\n\n for i, event in enumerate(data):\n e = event[0]\n if e is None:\n print(\"Event, \", i, \"is None:\", e)\n if projection == 'zy':\n x = e[:, Z_COL].flatten()\n z = e[:, Y_COL].flatten()\n c = e[:, CHARGE_COL].flatten()\n elif projection == 'xy':\n x = e[:, X_COL].flatten()\n z = e[:, Y_COL].flatten()\n c = e[:, CHARGE_COL].flatten()\n else:\n raise ValueError('Invalid projection value.')\n fig = plt.figure(figsize=(1, 1), dpi=128)\n if projection == 'zy':\n plt.xlim(0.0, 1250.0)\n elif projection == 'xy':\n plt.xlim(-275.0, 275.0)\n plt.ylim((-275.0, 275.0))\n plt.axis('off')\n plt.scatter(x, z, s=0.6, c=c, cmap='Greys')\n fig.canvas.draw()\n image = np.array(fig.canvas.renderer._renderer, dtype=np.uint8)\n image = np.delete(image, 3, axis=2)\n features[i] = image\n targets[i] = event[1]\n plt.close()\n return features, targets", "def computeFeatures(img, features=...) -> features:\n ...", "def upsampleMultioutput(self, img, imgs_new, scale_factors, node_names) -> None:\n ...", "def transform_img(x, y, path, cla):\n #Scale as in LeCun\n scaler = MinMaxScaler(feature_range=(-0.1, 1.175))\n all_img = os.listdir(path)\n #List structure so I can .append\n aux = x.tolist()\n for img in all_img:\n if int(img[0:2]) == cla:\n image_path = path + '/' + img\n #prepare parameters for randomization\n intensity = 0.75\n image_read = cv2.imread(image_path, 0) #read in greyscale\n resize = cv2.resize(image_read, (32, 32), interpolation=cv2.INTER_CUBIC)\n image_shape = resize.shape\n image_size = image_shape[0]\n d = image_size * 0.3 * intensity\n #With these 8 parameters we can perform a transofrmation of the image in such a way\n #that the image is different enough from the original but not too different, since\n #we should be able to still recognize the class in the transformed image.\n tl_top = random.uniform(-d, d) # Top left corner, top margin\n tl_left = random.uniform(-d, d) # Top left corner, left margin\n bl_bottom = random.uniform(-d, d) # Bottom left corner, bottom margin\n bl_left = random.uniform(-d, d) # Bottom left corner, left margin\n tr_top = random.uniform(-d, d) # Top right corner, top margin\n tr_right = random.uniform(-d, d) # Top right corner, right margin\n br_bottom = random.uniform(-d, d) # Bottom right corner, bottom margin\n br_right = random.uniform(-d, d) # Bottom right corner, right margin\n transform = ProjectiveTransform()\n transform.estimate(np.array((\n (tl_left, tl_top),\n (bl_left, image_size - bl_bottom),\n (image_size - br_right, image_size - br_bottom),\n (image_size - tr_right, tr_top)\n )), np.array((\n (0, 0),\n (0, image_size),\n (image_size, image_size),\n (image_size, 0)\n )))\n warped = warp(image_read,\n transform, output_shape=(image_size, image_size), order = 1, mode = 'edge')\n X_new = scaler.fit_transform(warped)\n warped = np.reshape(X_new, (32, 32, 1))\n aux.append(warped)\n y.append(cla)\n return np.array(aux), y", "def transform_tiff_stack(\n all_classes, slice_no, reduced_classes_savepath, reduced_classes_rgb_savepath\n):\n image_stack = Image.open(all_classes)\n class_images = []\n rgb_images = []\n for i in tqdm(range(slice_no)):\n image_stack.seek(i)\n img = np.array(image_stack)\n img[img > 1] = 2\n class_images.append(Image.fromarray(img))\n rgb_images.append(class_to_rgb(img))\n\n class_images[0].save(\n reduced_classes_savepath, save_all=True, append_images=class_images[1:]\n )\n rgb_images[0].save(\n reduced_classes_rgb_savepath, save_all=True, append_images=rgb_images[1:]\n )", "def ExtractTargetAndSave(labels, images, path):\r\n i = 1\r\n for x in labels:\r\n index = x[0]-1\r\n crop = images[index][x[1]:x[1]+x[3], x[2]:x[2]+x[4]]\r\n face = path + '%d'%(i)+ '.jpg'\r\n io.imsave(face, crop) \r\n i = i+1", "def connections_to_targets(self,img,targets,labels=None):\r\n\r\n stat_map = self.get_paths(img)\r\n stat_data = stat_map.get_data()\r\n target_data = nib.load(targets).get_data().round()\r\n connections = [stat_data[np.where(target_data==i).sum()] for i in range(1,target_data.max() + 1)]\r\n df = pd.DataFrame()\r\n df['connections'] = pd.Series(connections)\r\n \r\n if labels is not None:\r\n df['labels'] = pd.Series(labels)\r\n \r\n return df", "def data_processing(labels_df, x_train, y_train, label_map):\n subset = str()\n\n if labels_df.shape[0] == 32384 or labels_df.shape[0] == 3120 or labels_df.shape[0] == 16 or labels_df.shape[0] == 64:\n batch_size = 8 ### Modified for smaller images\n subset = \"train\"\n elif labels_df.shape[0] == 8080 or labels_df.shape[0] == 1920 or labels_df.shape[0] == 8:\n batch_size = 4\n subset = \"valid\"\n elif labels_df.shape[0] == 40669:\n batch_size = 4\n subset = \"test\" \n elif labels_df.shape[0] == 20522:\n batch_size = 2\n subset = \"test-add\" \n else:\n raise ValueError('The dataset format is different than expected')\n\n label_map = label_map\n# images_size = (256, 256)\n images_size = (64, 64)\n\n # Iterate through batches of rows of the dataset\n for i in range(labels_df.shape[0]//batch_size):\n \n temp_labels_df = labels_df.iloc[i*batch_size:((i+1)*batch_size) , :]\n \n # Iterate through the samples batch and create x and y for training\n for f, tags in tqdm(temp_labels_df.values, miniters=100):\n # load a .tif file\n img = io.imread('data/{}-jpg/{}.jpg'.format(subset,f)) ######## Modified for train jpg folder\n img = transform.resize(img, images_size)\n\n### Removed for use of JPEG files:\n# # Add NDVI layer // Removed for usage of JPG files\n# np.seterr(all='warn') # divide by zero, NaN values\n# img_ndvi = np.expand_dims((img[:, :, 3] - img[:, :, 2]) / (img[:, :, 3] + img[:, :, 2]), axis=2) # (NIR - RED) / (NIR + RED)\n# img = np.concatenate((img, img_ndvi), axis=2)\n \n # Create the target array for an image\n targets = np.zeros(17)\n for t in tags.split(' '):\n targets[label_map[t]] = 1 \n\n x_train.append(img)\n y_train.append(targets)\n\n # Format values\n y_train = np.array(y_train, np.uint8)\n x_train = np.array(x_train, np.float16) / 255.\n\n### Removed for use of JPEG files: \n# x_train = np.array(x_train, np.float16) / 65536.\n#### x_train -= 0.5\n#### x_train *= 2 \n\n\n # Save subsets in npz files\n np.save('data/{}-npy/npdatasetX{}'.format(subset, i), x_train)\n x_train = []\n np.save('data/{}-npy/npdatasetY{}'.format(subset, i), y_train)\n y_train = []\n #print \"{} data saved\".format(subset)", "def _extract_features(self, preprocessed_inputs): \n preprocessed_inputs = shape_utils.check_min_image_dim(33, preprocessed_inputs)\n image_features = self.net(ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))\n layouts = {self._used_nodes[i]: image_features[i] for i, x in enumerate(self._used_nodes) if x}\n feature_maps = self._feature_map_generator(layouts)\n if self._additional_layer_depth:\n final_feature_map = []\n for idx, feature in enumerate(feature_maps.values()):\n feature = tf.keras.layers.Conv2D(filters=self._additional_layer_depth,\n kernel_size=1,\n strides=[1, 1],\n use_bias=True,\n data_format=self._data_format,\n name='conv1x1_'+str(idx))(feature)\n feature = tf.keras.layers.BatchNormalization()(feature, training=self._is_training)\n feature = tf.keras.layers.ReLU(max_value=6)(feature)\n final_feature_map.append(feature)\n return final_feature_map\n else:\n return feature_maps.values() \n \n # with tf.variable_scope(\"EfficientNetFeatureExtractor\", reuse=tf.AUTO_REUSE):\n # # architecture \n # _, endpoints = build_model_base(preprocessed_inputs, self._network_name, training=self._is_training)\n # arch_feature_nodes = [x for x in self._feature_map_layout[\"from_layer\"] if x]\n # arch_features = {x: endpoints[x] for x in arch_feature_nodes}\n # feature_maps = self._feature_map_generator(arch_features)\n # if self._additional_layer_depth:\n # final_feature_map = []\n # for idx, feature in enumerate(feature_maps.values()):\n # feature = tf.keras.layers.Conv2D(filters=self._additional_layer_depth,\n # kernel_size=1,\n # strides=[1, 1],\n # use_bias=True,\n # data_format=self._data_format,\n # name='conv1x1_'+str(idx))(feature)\n # feature = tf.keras.layers.BatchNormalization()(feature, training=self._is_training)\n # feature = tf.keras.layers.ReLU(max_value=6)(feature)\n # final_feature_map.append(feature)\n # return final_feature_map\n # else:\n # return feature_maps ", "def levels_to_images(mlvl_tensor: List[torch.Tensor]) -> List[torch.Tensor]:\n batch_size = mlvl_tensor[0].size(0)\n batch_list = [[] for _ in range(batch_size)]\n channels = mlvl_tensor[0].size(1)\n for t in mlvl_tensor:\n t = t.permute(0, 2, 3, 1)\n t = t.view(batch_size, -1, channels).contiguous()\n for img in range(batch_size):\n batch_list[img].append(t[img])\n return [torch.cat(item, 0) for item in batch_list]", "def multiclass_dataset(train_files,test_files,\n label=0.9,bias=1.,\n scale_min=0., scale_max=1.,scale_prop=\"local\",\n feature_key=\"features\",target_key=\"target_midi\"):\n # read all features\n features = []\n targets = []\n feature = []\n target = []\n for file in train_files:\n data = shelve.open(file)\n print file,\"feature shape:\", data[feature_key].shape\n feature.append(data[feature_key])\n target.append(data[target_key])\n data.close()\n features.append(feature)\n targets.append(target)\n feature = []\n target = []\n for file in test_files:\n data = shelve.open(file)\n print file,\"feature shape:\", data[feature_key].shape\n feature.append(data[feature_key])\n target.append(data[target_key])\n data.close()\n features.append(feature)\n targets.append(target)\n \n # make data preprocessing\n data_preprocessing(features,bias,scale_min,scale_max,0,scale_prop)\n\n # make targets\n \n # check how many pitch classes we have\n all_keys = []\n for el in targets[0]:\n all_keys += el.tolist()\n for el in targets[1]:\n all_keys += el.tolist()\n classes = list(set(all_keys))\n classes.sort()\n print \"classes:\", classes\n print \"nr classes:\",len(classes)\n \n # make (binary) target data\n cl_targets = []\n targ = []\n for piece in targets[0]:\n target = np.ones((len(piece), len(classes))) * (-1)*label\n for n in range(len(piece)):\n ind = classes.index( piece[n] )\n target[n,ind] = label\n targ.append(target)\n cl_targets.append(targ)\n targ = []\n for piece in targets[1]:\n target = np.ones((len(piece), len(classes))) * (-1)*label\n for n in range(len(piece)):\n ind = classes.index( piece[n] )\n target[n,ind] = label\n targ.append(target)\n cl_targets.append(targ)\n \n # make train and test data\n trainin = features[0]\n testin = features[1]\n trainout = cl_targets[0]\n testout = cl_targets[1]\n\n return trainin, trainout, testin, testout", "def auto_levels(img):\n\n out_levels = (0, 255) if img.dtype == np.uint8 else (0.0, 1.0)\n return adjust_levels(img, get_image_minmax(img), out_levels)", "def image_features(img, model):\n features = model.predict(img)\n return features", "def multiclass_anytime2(data_files,train_range,test_range,\n label=0.9,bias=1.,\n scale_min=0., scale_max=1.,scale_prop=\"local\",\n feature_key=\"features\",target_key=\"target_midi\"):\n # read features and targets\n features = []\n targets = []\n feature = []\n target = []\n for file in data_files:\n data = shelve.open(file)\n print file,\"feature shape:\", data[feature_key].shape\n feature.append(data[feature_key])\n target.append(data[target_key])\n data.close()\n features.append(feature)\n targets.append(target)\n \n # make data preprocessing\n data_preprocessing(features,bias,scale_min,scale_max,0,scale_prop)\n\n # make targets\n \n # check how many pitch classes we have\n all_keys = []\n for el in targets[0]:\n all_keys += el.tolist()\n classes = list(set(all_keys))\n classes.sort()\n print \"classes:\", classes\n print \"nr classes:\",len(classes)\n\n # make (binary) target data\n cl_targets = []\n for piece in targets[0]:\n target = np.ones((len(piece), len(classes))) * (-1)*label\n for n in range(len(piece)):\n ind = classes.index( piece[n] )\n target[n,ind] = label\n cl_targets.append(target)\n \n # make train and test data\n trainin = []\n testin = []\n trainout = []\n testout = []\n nr_ex = len(train_range)\n for n in range(nr_ex):\n trainin.append( features[0][n][ train_range[n][0]:train_range[n][1] ] )\n trainout.append( cl_targets[n][ train_range[n][0]:train_range[n][1] ] )\n testin.append( features[0][n][ test_range[n][0]:test_range[n][1] ] )\n testout.append( cl_targets[n][ test_range[n][0]:test_range[n][1] ] )\n \n return trainin, trainout, testin, testout", "def multiclass_dataset_enc(train_files,test_files,\n label=0.9,bias=1.,\n scale_min=0., scale_max=1.,scale_prop=\"local\",\n feature_key=\"features\",target_key=\"target_chromas\"):\n # read all features\n features = []\n targets = []\n feature = []\n target = []\n for file in train_files:\n data = shelve.open(file)\n print file,\"feature shape:\", data[feature_key].shape\n feature.append(data[feature_key])\n target.append(data[target_key])\n data.close()\n features.append(feature)\n targets.append(target)\n feature = []\n target = []\n for file in test_files:\n data = shelve.open(file)\n print file,\"feature shape:\", data[feature_key].shape\n feature.append(data[feature_key])\n target.append(data[target_key])\n data.close()\n features.append(feature)\n targets.append(target)\n \n # make data preprocessing\n data_preprocessing(features,bias,scale_min,scale_max,0,scale_prop)\n\n # make targets\n mins,maxs = calc_minmax(targets[0])\n mins = mins.min()\n maxs = maxs.max()\n for m in range(2):\n for n in range(len(targets[0])):\n targets[m][n][np.where(targets[m][n] == mins)] = (-1)*label\n targets[m][n][np.where(targets[m][n] == maxs)] = label\n \n # make train and test data\n trainin = features[0]\n testin = features[1]\n trainout = targets[0]\n testout = targets[1]\n\n return trainin, trainout, testin, testout", "def extract_img_features(img_paths, model, device): \n start = time()\n img_features = []\n\n for image_path in img_paths:\n img_features.append(\n encode_image(model, image_path, device).cpu().data.numpy()\n )\n \n print(f\"Extracting image features took: {hms_string(time()-start)}\")\n\n return img_features", "def create_classification_targets(groundtruth_labels, match):\n return match.gather_based_on_match(\n groundtruth_labels,\n unmatched_value=tf.constant([1,0], tf.float32),\n ignored_value=tf.constant([0,0], tf.float32))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dynamically create a Mock sub class that implements the given Zope interface class.
def create_interface_mock(interface_class): # the init method, automatically specifying the interface methods def init(self, *args, **kwargs): MagicMock.__init__(self, spec=interface_class.names(), *args, **kwargs) # we derive the sub class name from the interface name name = interface_class.__name__ + "Mock" # create the class object and provide the init method klass = types.TypeType(name, (MagicMock, ), {"__init__": init}) # the new class should implement the interface classImplements(klass, interface_class) # make the class available to unit tests return {name:klass} # globals()[name] = klass
[ "def test_set_interface():\n from .test_interface_base import Concrete\n\n e = Experiment()\n inst = Concrete()\n e.interface = inst\n assert_equal(e.interface, inst)", "def wrap_interface(self):\n api = TestAPI()\n module = Bar()\n wrapped = api.wrap_interface(module.foo)\n self.assertEqual(wrapped, module.foo)", "def interface(comp_cls):\n class MyInterface(Interface):\n pass\n MyInterface.__name__ = 'I' + comp_cls.__name__\n return MyInterface", "def test_zope35(self):\r\n with SetAsideModule(\"zope\"):\r\n self.install((3, 5))\r\n from zope.interface import Interface, implementer\r\n class IDummy(Interface):\r\n pass\r\n try:\r\n @implementer(IDummy)\r\n class Dummy(object):\r\n pass\r\n except TypeError as exc:\r\n self.assertEqual(\r\n \"Can't use implementer with classes. \"\r\n \"Use one of the class-declaration functions instead.\",\r\n str(exc))", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .memorial2poligono import MemorialDescrPoligono\n return MemorialDescrPoligono(iface)", "def test_newZopeInterface(self):\r\n with SetAsideModule(\"zope\"):\r\n _install(_zope36)\r\n self.assertEqual(None, _checkRequirements())", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .qgis_acoustics import QGISAcoustics\n return QGISAcoustics(iface)", "def test_oneDecorator(self):\n @empowerment(TestInterface)\n class TI(Item):\n pass\n\n self.assertEqual(TI()._getPowerupInterfaces(), [(TestInterface, 0)])\n self.assertTrue(TestInterface.implementedBy(TI))", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .ZonalStatsForMultipleRaster import ZonalStatsForMultipleRaster\n return ZonalStatsForMultipleRaster(iface)", "def addInterface(interface): #@NoSelf", "def test_plugin_sets_interface_type():\n from .test_interface_fork import Fork\n\n plugin = \"hydrotrend\"\n e = Experiment(plugin=plugin)\n assert_is_instance(e.interface, Fork)", "def test_component_sets_interface_type():\n from .test_interface_fork import Fork\n\n component = \"hydrotrend\"\n e = Experiment(component=component)\n assert_is_instance(e.interface, Fork)", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .CreateTimeTable import timetablecreater\n return timetablecreater(iface)", "def test_set_interface_fails_if_not_instance():\n e = Experiment()\n answer = 42\n e.interface = answer", "def registerInterface(interface): #@NoSelf", "def make_interface(backend):\n return contract_interface.ContractInterface(\n {\"path\": contract_path, \"ctor\": [genesis, m, k]},\n backend=backend,\n profiler=profiler,\n )", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .avaframeConnector import AvaFrameConnectorPlugin\n return AvaFrameConnectorPlugin()", "def setUpClass(cls):\n\n class Child(BaseAPITestCase):\n \"\"\"An empty child class.\"\"\"\n\n pass\n\n with mock.patch.object(config, \"get_config\"):\n Child.setUpClass()\n for i in range(random.randint(1, 100)):\n Child.resources.add(i)\n\n # Make class available to test methods\n cls.child = Child", "def _create_wrapper(cls_spec, element_info, myself):\n # only use the meta class to find the wrapper for BaseWrapper\n # so allow users to force the wrapper if they want\n if cls_spec != myself:\n obj = object.__new__(cls_spec)\n obj.__init__(element_info)\n return obj\n\n new_class = cls_spec.find_wrapper(element_info)\n obj = object.__new__(new_class)\n\n obj.__init__(element_info)\n\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine default machine folder. Return str.
def get_machine_folder(): properties = subprocess.check_output(['VBoxManage', 'list', 'systemproperties']) prop_name = "Default machine folder:" skip = len(prop_name) machine_folder = '' for line in properties.decode().split('\n'): if prop_name in line: machine_folder = line[skip:].lstrip() break assert machine_folder != '', "Default machine folder is unknown" return machine_folder
[ "def default_machine_folder(self):\n ret = self._get_attr(\"defaultMachineFolder\")\n return ret", "def _get_default_path(self):\n #return os.path.join(cfg.DATA_DIR, 'SNUBH_BUS')\n return cfg.DATA_DIR", "def get_default_path(self, default_path=''):\n return default_path if default_path else os.path.dirname(self.last_im_path)", "def get_default_path(self, default_path=''):\n return default_path if default_path else os.path.dirname(self.last_path)", "def default_folder(settings):\r\n try:\r\n folder = settings.get('Folder')\r\n if not folder:\r\n folder = os.getcwd()\r\n except KeyError:\r\n settings.new('Folder', os.getcwd())\r\n folder = settings.get('Folder')\r\n return folder", "def _get_default_path(self):\n return os.path.join('/mnt/saturn/datasets/MSCOCO');", "def getDefaultOutputPath(self):\n return self.session.request('bootcdbuilder/defaults')", "def _get_initialDirectory(self) -> \"std::string\" :\n return _core.FolderDialog__get_initialDirectory(self)", "def _output_directory_default(self):\n return os.getcwd()", "def get_base_directory() -> str:\n return SO5CGConfig.base_directory \\\n if SO5CGConfig.base_directory is not None \\\n else expanduser(SO5CGConfig.default_base_directory)", "def get_minecraft_directory() -> str:\n if platform.system() == \"Windows\":\n return os.path.join(os.getenv(\"APPDATA\", os.path.join(pathlib.Path.home(), \"AppData\", \"Roaming\")), \".minecraft\")\n elif platform.system() == \"Darwin\":\n return os.path.join(str(pathlib.Path.home()), \"Library\", \"Application Support\", \"minecraft\")\n else:\n return os.path.join(str(pathlib.Path.home()), \".minecraft\")", "def platform_root(self):\n return os.getcwd()", "def _get_default_data_dir_name():\n return _get_path(DATA_DIR)", "def setting_default_out_dir(self):\n root_dir = Path.cwd() # Setting root directory.\n\n data_dir = root_dir / \"data\" / \"makeup_splits\" # Setting data directory.\n\n return data_dir", "def get_current_model_folder(self):\n model_folder = \"Domain\"\n model_folder_list = self.get_model_folders()\n if model_folder_list:\n model_folder = model_folder_list[-1]\n return model_folder", "def home_folder(self):\n ret = self._get_attr(\"homeFolder\")\n return ret", "def site_creation_default_managed_path(self):\n return self.properties.get(\"siteCreationDefaultManagedPath\", None)", "def get_workdir(self, default=None):\n return getnattr(self._raw, [\"settings\", \"workdir\"], default)", "def users_folder(self):\n folder = self.random.choice(FOLDERS)\n user = self.user()\n for platform in PLATFORMS:\n if self.platform == PLATFORMS[platform]['name']:\n path_separator = PLATFORMS[platform]['path_separator']\n users_folder = (user + '{}' + folder).format(path_separator)\n return users_folder" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check for VM using VBoxManage. If exist return True. Else return False
def _checkreg(self): retval = True try: with open('/dev/null') as devnull: subprocess.check_call(['VBoxManage', 'showvminfo', self.name], stdout=devnull, stderr=devnull ) except subprocess.CalledProcessError: retval = False return retval
[ "def is_in_virtualbox():\n if not isfile(__VIRT_WHAT) or not access(__VIRT_WHAT, X_OK):\n raise IOError(\"virt-what not available\")\n try:\n return subprocess.check_output([\"sudo\", \"-n\", __VIRT_WHAT]).split('\\n')[0:2] == __VIRT_WHAT_VIRTUALBOX_WITH_KVM\n except subprocess.CalledProcessError as e:\n raise IOError(\"virt-what failed execution with {}\".format(e))", "def does_vm_exist_on_provider(self):\n return self.provider_crud.get_mgmt_system().does_vm_exist(self.name)", "def checkvm(self):\n if self._checkreg() or self._checkfiles():\n err = \"{} already exist!\".format(self.name)\n raise VirtualMachineExistsError(err)\n return 0", "def vm_check_onoff(vmname: str):\n status = subprocess.run('virsh --connect qemu:///system -q list | grep -i \"{0}\"'.format(vmname), shell=True, check=False, stdout=subprocess.DEVNULL).returncode\n return bool(status == 0)", "def host_is_vmware():\n [rv, stdout, stderr] = run_command([constants.CMD_LSPCI], retval=runcommand.FAIL)\n r = re.compile(r'^.*?vmware.*?$')\n for l in stdout.split('\\n'):\n t = l.lower()\n if r.match(t):\n return True\n return False", "def IsInstalled(vm):\n resp, _ = vm.RemoteCommand('command -v docker', ignore_failure=True)\n return bool(resp.rstrip())", "def isRegistered(self):\n from VirtualBoxException import VirtualBoxObjectNotFoundException\n try:\n VirtualMachine.get(self.id)\n registered = True\n except VirtualBoxObjectNotFoundException, e:\n registered = False\n except Exception, e:\n raise\n return registered", "def does_vm_exist_in_cfme(self):\n try:\n self.find_quadicon()\n return True\n except VmNotFound:\n return False", "def startup(self):\n\n # Do not launch the virtual machine\n if not self.browser_config.get('launch', False):\n return True\n\n self.info_log(\"Starting up...\")\n\n try:\n vm_already_running_cmd = [\n \"VBoxManage\",\n \"showvminfo\",\n self.browser_config.get('vbname'),\n \"--machinereadable\",\n \"|\",\n \"grep\",\n \"VMState=\",\n \"|\",\n \"cut\",\n \"-d'='\",\n \"-f2\"\n ]\n\n output = subprocess.check_output(\n ' '.join(vm_already_running_cmd),\n stderr=subprocess.STDOUT,\n shell=True\n ).decode('utf').strip()\n\n print(\n \"Is vm already running output: {output}\"\n .format(output=output)\n )\n\n if output.find('running') != -1:\n return True\n\n # Cleanup the vbox guestproperty variable\n subprocess.call([\n 'VBoxManage',\n 'guestproperty',\n 'delete',\n self.browser_config.get('vbname'),\n 'wait_until_ready'\n ])\n subprocess.call([\n 'VBoxManage',\n 'guestproperty',\n 'delete',\n self.browser_config.get('vbname'),\n 'hub_ip'\n ])\n\n startvm = [\n \"VBoxManage\",\n \"startvm\",\n \"'{vbname}'\"\n .format(\n vbname=self.browser_config.get('vbname')\n ),\n \"--type\",\n self.browser_config.get('vbox_type', 'gui')\n ]\n\n out = subprocess.check_output(\n ' '.join(startvm),\n stderr=subprocess.STDOUT,\n shell=True\n )\n self.info_log('VBoxManage output: {out}'.format(out=out))\n\n instance_ready = False\n # TODO should be configurable\n timeout = 60\n\n self.info_log('Waiting for instance to start...')\n\n for i in range(timeout):\n getproperty = [\n 'VBoxManage',\n 'guestproperty',\n 'get',\n self.browser_config.get('vbname'),\n 'wait_until_ready'\n ]\n output = subprocess.check_output(\n ' '.join(getproperty),\n stderr=subprocess.STDOUT,\n shell=True\n ).decode('utf').strip()\n self.info_log(\n 'VBoxManage guestproperty output: {output}'\n .format(output=output)\n )\n\n if output.find('ready') != -1:\n instance_ready = True\n break\n\n sleep(1)\n\n sleep(3)\n if instance_ready:\n self.info_log('[Done] Instance ready...')\n else:\n raise Exception(\"Timeout error: the virtualbox machine is still not ready.\") # noqa\n\n # HUB IP\n hub_ip = ni.ifaddresses('en0')[2][0]['addr']\n\n self.info_log(\"Hub ip: %s\" % hub_ip)\n\n # Start selenium on the node\n # LINUX\n if self.browser_config.get('platform').lower() == \"linux\":\n\n self.info_log('Starting the selenium node server')\n\n # Update the hub_ip browser config\n self.browser_config.config['hub_ip'] = hub_ip\n\n command = self.browser_config.get(\n \"selenium_command\"\n ).format(**self.browser_config.config)\n self.execute_command(command)\n\n # WINDOWS\n elif self.browser_config.get('platform').lower() == \"windows\":\n\n self.info_log(\"Setting the guest property in Windows\")\n\n # user_session.machine.set_guest_property(\n # \"hub_ip\", \"%s:%s\" % (hub_ip, '4444'), ''\n # )\n\n return True\n\n except Exception as e:\n self.error_log('Exception: %s' % e)\n raise", "def vagrant_is_active(self):\r\n\r\n return active_view().settings().get('vagrant_environment') is not None", "def is_vboot():\n if os.path.isfile(\"/usr/local/bin/vboot-util\"):\n return True\n return False", "def can_run():\n # Is it GCE VM?\n try:\n get_metadata('id')\n except: # pylint: disable=bare-except\n log.error('Please run from a GCE VM.')\n return False\n\n # Running as sudo?\n if os.geteuid() != 0:\n log.error('Requires sudo access.')\n return False\n\n return True", "def get_wait_for_vm(_, port):\n return not (port.vm.purged)", "def verify_ssh(vm_name):\n host_resource = helpers.get_vm_resource(vm_name)\n return host_resource.executor().is_connective()", "def _is_running(self):\n try:\n p = subprocess.Popen([self.vmware.get(\"path\"), \"-T\", \"ws\", \"list\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n output, error = p.communicate()\n output = output.decode(\"utf-8\")\n except OSError as e:\n print(\"Unable to check running status for %s. Reason: %s\" % (self.vmx_path, e))\n else:\n if output:\n output_lines = output.splitlines()\n print(output_lines)\n if self.vmx_path in output_lines:\n print(\"Found the snapshots name is %s\" % self.vmx_path)\n return True\n else:\n print(\"Doesn't has the correct snapshot setting\")\n return False\n else:\n return False", "def get_oem_remote_kvm_available(self):\n return False", "def does_vm_pool_exist(vmpool_name):\n if get_vm_pool_object(vmpool_name) is None:\n return False\n return True", "def isAVirtualMachine(self):\n\n status = False\n\n # if we are running inside a VM, then linux will put 'hypervisor' in cpuinfo\n with open(\"/proc/cpuinfo\", \"r\") as fd:\n lines = fd.readlines()\n for line in lines:\n if \"hypervisor\" in line:\n status = True\n break\n\n return status", "def _verify(vm_):\n log.info(\"Verifying credentials for %s\", vm_[\"name\"])\n\n win_installer = config.get_cloud_config_value(\"win_installer\", vm_, __opts__)\n\n if win_installer:\n\n log.debug(\"Testing Windows authentication method for %s\", vm_[\"name\"])\n\n if not HAS_SMB:\n log.error(\"smbprotocol library not found\")\n return False\n\n # Test Windows connection\n kwargs = {\n \"host\": vm_[\"ssh_host\"],\n \"username\": config.get_cloud_config_value(\n \"win_username\", vm_, __opts__, default=\"Administrator\"\n ),\n \"password\": config.get_cloud_config_value(\n \"win_password\", vm_, __opts__, default=\"\"\n ),\n }\n\n # Test SMB connection\n try:\n log.debug(\"Testing SMB protocol for %s\", vm_[\"name\"])\n if __utils__[\"smb.get_conn\"](**kwargs) is False:\n return False\n except (smbSessionError) as exc:\n log.error(\"Exception: %s\", exc)\n return False\n\n # Test WinRM connection\n use_winrm = config.get_cloud_config_value(\n \"use_winrm\", vm_, __opts__, default=False\n )\n\n if use_winrm:\n log.debug(\"WinRM protocol requested for %s\", vm_[\"name\"])\n if not HAS_WINRM:\n log.error(\"WinRM library not found\")\n return False\n\n kwargs[\"port\"] = config.get_cloud_config_value(\n \"winrm_port\", vm_, __opts__, default=5986\n )\n kwargs[\"timeout\"] = 10\n\n try:\n log.debug(\"Testing WinRM protocol for %s\", vm_[\"name\"])\n return __utils__[\"cloud.wait_for_winrm\"](**kwargs) is not None\n except (\n ConnectionError,\n ConnectTimeout,\n ReadTimeout,\n SSLError,\n ProxyError,\n RetryError,\n InvalidSchema,\n WinRMTransportError,\n ) as exc:\n log.error(\"Exception: %s\", exc)\n return False\n\n return True\n\n else:\n\n log.debug(\"Testing SSH authentication method for %s\", vm_[\"name\"])\n\n # Test SSH connection\n kwargs = {\n \"host\": vm_[\"ssh_host\"],\n \"port\": config.get_cloud_config_value(\n \"ssh_port\", vm_, __opts__, default=22\n ),\n \"username\": config.get_cloud_config_value(\n \"ssh_username\", vm_, __opts__, default=\"root\"\n ),\n \"password\": config.get_cloud_config_value(\n \"password\", vm_, __opts__, search_global=False\n ),\n \"key_filename\": config.get_cloud_config_value(\n \"key_filename\",\n vm_,\n __opts__,\n search_global=False,\n default=config.get_cloud_config_value(\n \"ssh_keyfile\", vm_, __opts__, search_global=False, default=None\n ),\n ),\n \"gateway\": vm_.get(\"gateway\", None),\n \"maxtries\": 1,\n }\n\n log.debug(\"Testing SSH protocol for %s\", vm_[\"name\"])\n try:\n return __utils__[\"cloud.wait_for_passwd\"](**kwargs) is True\n except SaltCloudException as exc:\n log.error(\"Exception: %s\", exc)\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check for VM files. Return True if exists. Else False.
def _checkfiles(self, build=None): mf = get_machine_folder() inroot = os.path.exists(os.path.join(mf, self.name)) if build == 'stable': group = paths.vm_group_stable else: group = paths.vm_group insu = os.path.exists(os.path.join(mf, group, self.name)) return inroot or insu
[ "def checkvm(self):\n if self._checkreg() or self._checkfiles():\n err = \"{} already exist!\".format(self.name)\n raise VirtualMachineExistsError(err)\n return 0", "def check_files_in_directory(self, path):\n if os.path.exists(path):\n return os.path.isfile(path)", "def check_if_file_exist(\n positive,\n vm,\n vm_resource,\n path=config_virt.TEMP_PATH,\n full_path=False\n):\n if not full_path:\n path = os.path.join(path, config_virt.FILE_NAME)\n testflow.step(\n \"checking if file: %s exists in vm: %s. expecting result: %s\",\n path, vm, positive\n )\n file_exists = vm_resource.fs.exists(path)\n if not (file_exists == positive):\n raise exceptions.VMException(\"Error: file exists on vm: '%s'\" % vm)", "def file_exists(self, path):\n path = path.strip('/')\n file_collection = self._get_fs_instance().list()\n if path == '':\n return False\n if path in file_collection:\n return True\n return False", "def _file_exists(self, name):\n return self.dir.file_exists(name)", "def check_for_local_file(self, filename=None):\n files = glob.glob(filename)\n return bool(files)", "def does_vm_exist_on_provider(self):\n return self.provider_crud.get_mgmt_system().does_vm_exist(self.name)", "def test_files_exist(self):\n for filename in template_files:\n print(filename)\n self.assertTrue(\n os.path.exists(os.path.join(self.builtdir, filename))\n )", "def areFilesStillThere(self):\n if self.fBinariesDeleted:\n return False;\n\n for sBinary in self.sBinaries.split(','):\n sBinary = sBinary.strip();\n if not sBinary:\n continue;\n # Same URL tests as in webutils.downloadFile().\n if sBinary.startswith('http://') \\\n or sBinary.startswith('https://') \\\n or sBinary.startswith('ftp://'):\n # URL - don't bother trying to verify that (we don't use it atm).\n fRc = None;\n else:\n # File.\n if config.g_ksBuildBinRootDir is not None:\n sFullPath = os.path.join(config.g_ksBuildBinRootDir, sBinary);\n fRc = os.path.isfile(sFullPath);\n if not fRc \\\n and not os.path.isfile(os.path.join(config.g_ksBuildBinRootDir, config.g_ksBuildBinRootFile)):\n fRc = None; # Root file missing, so the share might not be mounted correctly.\n else:\n fRc = None;\n if fRc is not True:\n return fRc;\n\n return True;", "def check():\n path = os.path.abspath(os.path.dirname(__file__))\n fold = os.path.join(path, \"temp_difflibjs\")\n r = os.path.exists(fold)\n if not r:\n return r\n f = os.path.join(fold, \"jsdifflib.zip\")\n r = os.path.exists(f)\n if not r:\n return r\n size = os.stat(f).st_size\n return size > 0", "def does_vm_exist_in_cfme(self):\n try:\n self.find_quadicon()\n return True\n except VmNotFound:\n return False", "def check_file_exists(hosts, filename, user=None, directory=False,\n sudo=False):\n missing_file = NodeSet()\n command = \"test -e {0}\".format(filename)\n if user is not None and not directory:\n command = \"test -O {0}\".format(filename)\n elif user is not None and directory:\n command = \"test -O {0} && test -d {0}\".format(filename)\n elif directory:\n command = \"test -d '{0}'\".format(filename)\n\n if sudo:\n command = \"sudo \" + command\n\n task = run_task(hosts, command, verbose=True)\n for ret_code, node_list in task.iter_retcodes():\n if ret_code != 0:\n missing_file.add(NodeSet.fromlist(node_list))\n\n return len(missing_file) == 0, missing_file", "def check_files_exist(self):\n\n files_fail = [\n 'Dockerfile',\n 'environment.yml',\n 'data',\n 'scripts'\n ]\n files_warn = [\n \n ]\n\n for files in files_fail:\n if not os.path.isfile(self.pf(files)):\n self.failed.append((1, 'File {} not found.'.format(files)))\n else:\n self.passed.append((1, 'File {} found.'.format(files)))\n\n for files in files_warn:\n if not os.path.isdir(self.pf(files)):\n self.warned.append((1, 'Dir {} not found.'.format(files)))\n else:\n self.passed.append((1, 'Dir {} found.'.format(files)))\n\n if os.path.isfile(self.pf('environment.yml')):\n self.load_environment_config()", "def exist_file(filename):\n\n all_files = find_all_raw_files()\n filename = basename(filename) # Remove path from filename\n if filename in all_files:\n return 1\n else:\n return 0", "def _exists(self, fhash):\n # Check file (dir) exists\n return exists(self.storage.path('fhash'))", "def file_exists(file_path):\n return True if os.path.isfile(file_path) else False", "def is_in_virtualbox():\n if not isfile(__VIRT_WHAT) or not access(__VIRT_WHAT, X_OK):\n raise IOError(\"virt-what not available\")\n try:\n return subprocess.check_output([\"sudo\", \"-n\", __VIRT_WHAT]).split('\\n')[0:2] == __VIRT_WHAT_VIRTUALBOX_WITH_KVM\n except subprocess.CalledProcessError as e:\n raise IOError(\"virt-what failed execution with {}\".format(e))", "def inDir(fil):\n currentdir = os.listdir('.')\n if fil in currentdir :\n return False\n else :\n return True", "def file_exists(self) -> bool:\n return self._path.exists()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raise VirtualMachineError if such VM exists. Else return 0
def checkvm(self): if self._checkreg() or self._checkfiles(): err = "{} already exist!".format(self.name) raise VirtualMachineExistsError(err) return 0
[ "def does_vm_exist_on_provider(self):\n return self.provider_crud.get_mgmt_system().does_vm_exist(self.name)", "def does_vm_exist_in_cfme(self):\n try:\n self.find_quadicon()\n return True\n except VmNotFound:\n return False", "def test_get_one_inexistent_virtual_interface(self):\n\n # Make a GET request\n response = self.client.get(\n '/api/v4/virtual-interface/1000/',\n HTTP_AUTHORIZATION=self.authorization\n )\n\n self.compare_status(404, response.status_code)\n\n self.compare_values(\n u'Virtual Interface 1000 do not exist.',\n response.data['detail']\n )", "def test_create_error(self, client):\n vm = mfactory.VirtualMachineFactory()\n msg = self.create_msg(operation='OP_INSTANCE_CREATE',\n instance=vm.backend_vm_id,\n status='error')\n update_db(client, msg)\n self.assertTrue(client.basic_ack.called)\n db_vm = VirtualMachine.objects.get(id=vm.id)\n self.assertEqual(db_vm.operstate, 'ERROR')", "def _checkreg(self):\n retval = True\n try:\n with open('/dev/null') as devnull:\n subprocess.check_call(['VBoxManage', 'showvminfo', self.name],\n stdout=devnull,\n stderr=devnull\n )\n except subprocess.CalledProcessError:\n retval = False\n return retval", "def test_vm_create_failure():\n results = []\n cluster_obj = prism.Cluster(api_client=_api())\n clusters = cluster_obj.get_all_uuids()\n vms_obj = prism.Vms(api_client=_api())\n for each_uuid in clusters:\n result = False\n vm_config = {\n 'name': 'api_test_v2_failure_{0}'.format(random_string),\n 'cores': 16,\n 'memory_gb': 128,\n 'add_cdrom': True,\n 'disks': [\n {\n 'size_gb': 20,\n 'storage_container_name': 'home_compression',\n },\n ],\n 'nics': [\n {\n 'network_name': '192.168.1.0',\n 'connect': True,\n 'ipam': True,\n }\n ]\n }\n\n result = vms_obj.create(clusteruuid=each_uuid, **vm_config)\n results.append(result)\n assert not all(results)", "def error(self):\n ret = self._get_attr(\"error\")\n return IVirtualBoxErrorInfo(ret)", "def ex_get_vm(self, node_or_uuid):\n if isinstance(node_or_uuid, Node):\n node_or_uuid = node_or_uuid.extra['instance_uuid']\n vm = self.connection.content.searchIndex.FindByUuid(\n None, node_or_uuid, True, True)\n if not vm:\n raise LibcloudError(\"Unable to locate VirtualMachine.\")\n return vm", "def test_azure_service_api_vm_get(self):\n pass", "def find(cls, nameOrId):\n with VirtualBoxException.ExceptionHandler():\n machine = cls._vbox.findMachine(nameOrId)\n return VirtualMachine(machine)", "def test_other_error(self, client):\n vm = mfactory.VirtualMachineFactory()\n msg = self.create_msg(operation='OP_INSTANCE_STARTUP',\n instance=vm.backend_vm_id,\n status='error')\n update_db(client, msg)\n self.assertTrue(client.basic_ack.called)\n db_vm = VirtualMachine.objects.get(id=vm.id)\n self.assertEqual(db_vm.operstate, vm.operstate)\n self.assertEqual(db_vm.backendtime, vm.backendtime)", "def test_add_vmdk_no_vmdks(self, fake_consume_task):\n fake_the_vm = MagicMock()\n fake_the_vm.config.hardware.device = []\n disk_size = 1\n\n with self.assertRaises(RuntimeError):\n virtual_machine.add_vmdk(fake_the_vm, disk_size)", "def getVirtualMachineByName(si,vmname):\n vmList = []\n for vm in getVirtualMachines(si):\n if vmname == vm.getName():\n vmList.append(vm)\n\n if len(vmList) > 1:\n print \"Multiple virtual machines named %s. Please lookup by UUID.\" % vmname\n return None\n else:\n return vmList[0]", "def _check_return(cls, retval):\n if retval != 0:\n sys.exit(retval)", "def _check_call(ret):\n if ret != 0:\n raise TreeliteError(_LIB.TreeliteGetLastError().decode(\"utf-8\"))", "def __virtual__():\n if \"azurearm_compute.availability_set_create_or_update\" in __salt__:\n return __virtualname__\n return (False, \"azurearm module could not be loaded\")", "def host_is_virtualpc():\n # XXX: implement when required\n raise Exception('unimplemented')", "def _verify_vector_execution(self):\r\n \r\n # If self._result is set. False is probably a good return value.\r\n if self._result or self._result == False:\r\n raise ProbeSucceed(self.name,'Command succeeded')", "def get_wait_for_vm(_, port):\n return not (port.vm.purged)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import VM and group into paths.vm_group.
def importvm(self, ova): assert os.path.exists(ova), "{} not found" % ova subprocess.call(['VBoxManage', 'import', ova, '--options', 'keepallmacs']) time.sleep(10) grouped = self._groupvm() sfolders = self._sharedfolders() return grouped, sfolders
[ "def just_import(ova):\n name = os.path.split(ova)[1].split('.')[0]\n v_machine = VirtualMachine(name)\n # This must throw exception if such VM already exists.\n try:\n v_machine.checkvm()\n except VirtualMachineExistsError:\n print(\"WARNING: %s already exists. Skipping...\" % name)\n else:\n v_machine.importvm(ova)\n return name", "def vmimport(self, func=just_import):\n ovas = len(self.vmlist)\n if ovas == 1:\n vmname = func(self.vmlist[0])\n self.results.append(vmname)\n elif ovas <= self.threads:\n self._import_pool(ovas, self.vmlist, func)\n else:\n tmplist = self.vmlist\n while tmplist:\n self._import_pool(self.threads, tmplist[:self.threads], func)\n tmplist = tmplist[self.threads:]\n return self.results", "def force_import(ova):\n name = os.path.split(ova)[1].split('.')[0]\n v_machine = VirtualMachine(name)\n try:\n v_machine.checkvm()\n except VirtualMachineExistsError:\n v_machine.removevm()\n v_machine.importvm(ova)\n return name", "def _load_entry_point_group(self, entry_point_group):\n for ep in iter_entry_points(group=entry_point_group):\n self.register_processor(ep.name, ep.load())", "def cmd_load(self, group_or_file):\n lg = GroupLoadableFiles(self.sources_dir, group_or_file)\n tlinfo = eval(open(lg.tli_filename(), 'r').read())\n tlinfo['DirectorOp'] = 'DefineGroup'\n tlinfo['Group'] = lg.group_name\n r = self._ask_director(self.asys, self.director, tlinfo, 'DeclaredGroup')\n if not r:\n return 1\n r = self._ask_director(self.asys, self.director,\n { 'DirectorOp': 'LoadSource',\n 'Source': lg.tls_filenames()[0],\n 'Group': lg.group_name,\n },\n 'SourceLoading',\n self.ask_wait)\n if not r:\n return 1\n print('Loaded \"%s\" %s: %s' % (lg.group_name, lg.tls_filenames()[0],\n r['SourceHash']))\n return 0", "def _add_to_consistencygroup(self, group, add_volumes):\n LOG.debug(_(\"Adding %(vols)s to consistencygroup %(group)s\") %\n {'vols': add_volumes, 'group': group})\n\n if not add_volumes:\n add_volumes = []\n elif hasattr(add_volumes, 'isdigit'):\n add_volumes = [add_volumes, ]\n\n for volume in add_volumes:\n self._ensure_snapshot_resource_area(volume)\n\n ans = self.vmem_mg.snapshot.add_luns_to_snapgroup(group, add_volumes)\n\n if not ans['success']:\n msg = (_(\"Failed to add volumes %(vols)s to \" +\n \"consistencygroup %(group)s: %(msg)s\") %\n {'vols': add_volumes, 'group': group, 'msg': ans['msg']})\n raise exception.ViolinBackendErr(message=msg)", "def import_group_info(self, src):\n self.typ_bnd_elem = src.typ_bnd_elem\n if self.typ_bnd_elem is None:\n tmp_typ_bnd_elem = 0\n else:\n tmp_typ_bnd_elem = self.typ_bnd_elem\n ikle_bnd = src.get_bnd_connectivity()\n nelebd, ndp = ikle_bnd.shape\n tmp_ikle_bnd = ikle_bnd.T.reshape((nelebd*ndp))\n\n self.logger.debug(\"Transfering group information\")\n self.error = HermesFile._hermes.transfer_group_info(\\\n self.fformat, src.my_id,\n self.my_id, self.typ_elem,\n tmp_typ_bnd_elem, tmp_ikle_bnd, nelebd, ndp,\n False, False)", "def add_virtual_machine(self, obj):\n\n name = get_string_or_none(grab(obj, \"name\"))\n\n if name is not None and self.settings.strip_vm_domain_name is True:\n name = name.split(\".\")[0]\n\n #\n # Filtering\n #\n\n # get VM UUID\n vm_uuid = grab(obj, \"config.instanceUuid\")\n\n if vm_uuid is None or vm_uuid in self.processed_vm_uuid and obj not in self.objects_to_reevaluate:\n return\n\n log.debug(f\"Parsing vCenter VM: {name}\")\n\n # get VM power state\n status = \"active\" if get_string_or_none(grab(obj, \"runtime.powerState\")) == \"poweredOn\" else \"offline\"\n\n # check if vm is template\n template = grab(obj, \"config.template\")\n if bool(self.settings.skip_vm_templates) is True and template is True:\n log.debug2(f\"VM '{name}' is a template. Skipping\")\n return\n\n if bool(self.settings.skip_srm_placeholder_vms) is True \\\n and f\"{grab(obj, 'config.managedBy.extensionKey')}\".startswith(\"com.vmware.vcDr\"):\n log.debug2(f\"VM '{name}' is a SRM placeholder VM. Skipping\")\n return\n\n # ignore offline VMs during first run\n if self.parsing_vms_the_first_time is True and status == \"offline\":\n log.debug2(f\"Ignoring {status} VM '{name}' on first run\")\n return\n\n # add to processed VMs\n self.processed_vm_uuid.append(vm_uuid)\n\n parent_host = self.get_parent_object_by_class(grab(obj, \"runtime.host\"), vim.HostSystem)\n cluster_object = self.get_parent_object_by_class(parent_host, vim.ClusterComputeResource)\n\n # get single host 'cluster' if VM runs on one\n if cluster_object is None:\n cluster_object = self.get_parent_object_by_class(parent_host, vim.ComputeResource)\n\n if self.settings.set_source_name_as_cluster_group is True:\n group = self.inventory.get_by_data(NBClusterGroup, data={\"name\": self.name})\n else:\n group = self.get_parent_object_by_class(cluster_object, vim.Datacenter)\n\n if None in [parent_host, cluster_object, group]:\n log.error(f\"Requesting host or cluster for Virtual Machine '{name}' failed. Skipping.\")\n return\n\n nb_cluster_object = self.get_object_from_cache(cluster_object)\n\n # check VM cluster\n if nb_cluster_object is None:\n log.debug(f\"Virtual machine '{name}' is not part of a permitted cluster. Skipping\")\n return\n\n parent_name = grab(parent_host, \"name\")\n cluster_name = grab(nb_cluster_object, \"data.name\")\n cluster_full_name = f\"{group.name}/{cluster_name}\"\n\n if name in self.processed_vm_names.get(cluster_full_name, list()) and obj not in self.objects_to_reevaluate:\n log.warning(f\"Virtual machine '{name}' for cluster '{cluster_full_name}' already parsed. \"\n \"Make sure to use unique VM names. Skipping\")\n return\n\n # add vm to processed list\n if self.processed_vm_names.get(cluster_full_name) is None:\n self.processed_vm_names[cluster_full_name] = list()\n\n self.processed_vm_names[cluster_full_name].append(name)\n\n # filter VMs by name\n if self.passes_filter(name, self.settings.vm_include_filter, self.settings.vm_exclude_filter) is False:\n return\n\n #\n # Collect data\n #\n\n # check if cluster is a Standalone ESXi\n site_name = nb_cluster_object.get_site_name()\n if site_name is None:\n site_name = self.get_site_name(NBCluster, cluster_full_name)\n\n # first check against vm_platform_relation\n platform = get_string_or_none(grab(obj, \"config.guestFullName\"))\n platform = get_string_or_none(grab(obj, \"guest.guestFullName\", fallback=platform))\n\n if platform is not None:\n platform = self.get_object_relation(platform, \"vm_platform_relation\", fallback=platform)\n\n hardware_devices = grab(obj, \"config.hardware.device\", fallback=list())\n\n disk = int(sum([getattr(comp, \"capacityInKB\", 0) for comp in hardware_devices\n if isinstance(comp, vim.vm.device.VirtualDisk)\n ]) / 1024 / 1024)\n\n annotation = None\n if self.settings.skip_vm_comments is False:\n annotation = get_string_or_none(grab(obj, \"config.annotation\"))\n\n # assign vm_tenant_relation\n tenant_name = self.get_object_relation(name, \"vm_tenant_relation\")\n\n # assign vm_tag_relation\n vm_tags = self.get_object_relation(name, \"vm_tag_relation\")\n\n # get vCenter tags\n vm_tags.extend(self.collect_object_tags(obj))\n\n vm_data = {\n \"name\": name,\n \"cluster\": nb_cluster_object,\n \"status\": status,\n \"memory\": grab(obj, \"config.hardware.memoryMB\"),\n \"vcpus\": grab(obj, \"config.hardware.numCPU\"),\n \"disk\": disk\n }\n\n # Add adaption for change in NetBox 3.3.0 VM model\n # issue: https://github.com/netbox-community/netbox/issues/10131#issuecomment-1225783758\n if version.parse(self.inventory.netbox_api_version) >= version.parse(\"3.3.0\"):\n vm_data[\"site\"] = {\"name\": site_name}\n\n if self.settings.track_vm_host:\n vm_data[\"device\"] = self.get_object_from_cache(parent_host)\n\n if platform is not None:\n vm_data[\"platform\"] = {\"name\": platform}\n if annotation is not None:\n vm_data[\"comments\"] = annotation\n if tenant_name is not None:\n vm_data[\"tenant\"] = {\"name\": tenant_name}\n if len(vm_tags) > 0:\n vm_data[\"tags\"] = vm_tags\n\n # add custom fields if present and configured\n vm_custom_fields = self.get_object_custom_fields(obj)\n if len(vm_custom_fields) > 0:\n vm_data[\"custom_fields\"] = vm_custom_fields\n\n vm_primary_ip4 = None\n vm_primary_ip6 = None\n vm_default_gateway_ip4 = None\n vm_default_gateway_ip6 = None\n\n # check vm routing to determine which is the default interface for each IP version\n for route in grab(obj, \"guest.ipStack.0.ipRouteConfig.ipRoute\", fallback=list()):\n\n # we found a default route\n if grab(route, \"prefixLength\") == 0:\n\n try:\n ip_a = ip_address(grab(route, \"network\"))\n except ValueError:\n continue\n\n try:\n gateway_ip_address = ip_address(grab(route, \"gateway.ipAddress\"))\n except ValueError:\n continue\n\n if ip_a.version == 4 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv4 gateway {gateway_ip_address}\")\n vm_default_gateway_ip4 = gateway_ip_address\n elif ip_a.version == 6 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv6 gateway {gateway_ip_address}\")\n vm_default_gateway_ip6 = gateway_ip_address\n\n nic_data = dict()\n nic_ips = dict()\n\n # track MAC addresses in order add dummy guest interfaces\n processed_interface_macs = list()\n\n # get VM interfaces\n for vm_device in hardware_devices:\n\n # sample: https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getvnicinfo.py\n\n # not a network interface\n if not isinstance(vm_device, vim.vm.device.VirtualEthernetCard):\n continue\n\n int_mac = normalize_mac_address(grab(vm_device, \"macAddress\"))\n\n device_class = grab(vm_device, \"_wsdlName\")\n\n log.debug2(f\"Parsing device {device_class}: {int_mac}\")\n\n device_backing = grab(vm_device, \"backing\")\n\n # set defaults\n int_mtu = None\n int_mode = None\n int_network_vlan_ids = None\n int_network_vlan_id_ranges = None\n int_network_name = None\n int_network_private = False\n\n processed_interface_macs.append(int_mac)\n\n # get info from local vSwitches\n if isinstance(device_backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):\n\n int_network_name = get_string_or_none(grab(device_backing, \"deviceName\"))\n int_host_pgroup = grab(self.network_data, f\"host_pgroup|{parent_name}|{int_network_name}\",\n separator=\"|\")\n\n if int_host_pgroup is not None:\n int_network_vlan_ids = [int_host_pgroup.get(\"vlan_id\")]\n int_network_vlan_id_ranges = [str(int_host_pgroup.get(\"vlan_id\"))]\n\n int_vswitch_name = int_host_pgroup.get(\"vswitch\")\n int_vswitch_data = grab(self.network_data, f\"vswitch|{parent_name}|{int_vswitch_name}\",\n separator=\"|\")\n\n if int_vswitch_data is not None:\n int_mtu = int_vswitch_data.get(\"mtu\")\n\n # get info from distributed port group\n else:\n\n dvs_portgroup_key = grab(device_backing, \"port.portgroupKey\", fallback=\"None\")\n int_portgroup_data = grab(self.network_data, f\"dpgroup|{dvs_portgroup_key}\", separator=\"|\")\n\n if int_portgroup_data is not None:\n int_network_name = grab(int_portgroup_data, \"name\")\n int_network_vlan_ids = grab(int_portgroup_data, \"vlan_ids\")\n if len(grab(int_portgroup_data, \"vlan_id_ranges\")) > 0:\n int_network_vlan_id_ranges = grab(int_portgroup_data, \"vlan_id_ranges\")\n else:\n int_network_vlan_id_ranges = [str(int_network_vlan_ids[0])]\n int_network_private = grab(int_portgroup_data, \"private\")\n\n int_dvswitch_uuid = grab(device_backing, \"port.switchUuid\")\n int_dvswitch_data = grab(self.network_data, f\"pswitch|{parent_name}|{int_dvswitch_uuid}\", separator=\"|\")\n\n if int_dvswitch_data is not None:\n int_mtu = int_dvswitch_data.get(\"mtu\")\n\n int_connected = grab(vm_device, \"connectable.connected\", fallback=False)\n int_label = grab(vm_device, \"deviceInfo.label\", fallback=\"\")\n\n int_name = \"vNIC {}\".format(int_label.split(\" \")[-1])\n\n int_full_name = int_name\n if int_network_name is not None:\n int_full_name = f\"{int_full_name} ({int_network_name})\"\n\n int_description = f\"{int_label} ({device_class})\"\n if int_network_vlan_ids is not None:\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] == 4095:\n vlan_description = \"all vlans\"\n int_mode = \"tagged-all\"\n else:\n vlan_description = \"vlan ID: %s\" % \", \".join(int_network_vlan_id_ranges)\n\n if len(int_network_vlan_ids) == 1:\n int_mode = \"access\"\n else:\n int_mode = \"tagged\"\n\n if int_network_private is True:\n vlan_description = f\"{vlan_description} (private)\"\n\n int_description = f\"{int_description} ({vlan_description})\"\n\n # find corresponding guest NIC and get IP addresses and connected status\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC\n if int_mac != normalize_mac_address(grab(guest_nic, \"macAddress\")):\n continue\n\n int_connected = grab(guest_nic, \"connected\", fallback=int_connected)\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if self.settings.permitted_subnets.permitted(int_ip_address, interface_name=int_full_name) is False:\n continue\n\n nic_ips[int_full_name].append(int_ip_address)\n\n # check if primary gateways are in the subnet of this IP address\n # if it matches IP gets chosen as primary IP\n if vm_default_gateway_ip4 is not None and \\\n vm_default_gateway_ip4 in ip_interface(int_ip_address).network and \\\n vm_primary_ip4 is None:\n\n vm_primary_ip4 = int_ip_address\n\n if vm_default_gateway_ip6 is not None and \\\n vm_default_gateway_ip6 in ip_interface(int_ip_address).network and \\\n vm_primary_ip6 is None:\n\n vm_primary_ip6 = int_ip_address\n\n vm_nic_data = {\n \"name\": unquote(int_full_name),\n \"virtual_machine\": None,\n \"mac_address\": int_mac,\n \"description\": unquote(int_description),\n \"enabled\": int_connected,\n }\n\n if int_mtu is not None and self.settings.sync_vm_interface_mtu is True:\n vm_nic_data[\"mtu\"] = int_mtu\n if int_mode is not None:\n vm_nic_data[\"mode\"] = int_mode\n\n if int_network_vlan_ids is not None and int_mode != \"tagged-all\":\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] != 0:\n\n vm_nic_data[\"untagged_vlan\"] = {\n \"name\": unquote(int_network_name),\n \"vid\": int_network_vlan_ids[0],\n \"site\": {\n \"name\": site_name\n }\n }\n else:\n tagged_vlan_list = list()\n for int_network_vlan_id in int_network_vlan_ids:\n\n if int_network_vlan_id == 0:\n continue\n\n tagged_vlan_list.append({\n \"name\": unquote(f\"{int_network_name}-{int_network_vlan_id}\"),\n \"vid\": int_network_vlan_id,\n \"site\": {\n \"name\": site_name\n }\n })\n\n if len(tagged_vlan_list) > 0:\n vm_nic_data[\"tagged_vlans\"] = tagged_vlan_list\n\n nic_data[int_full_name] = vm_nic_data\n\n # find dummy guest NIC interfaces\n if self.settings.sync_vm_dummy_interfaces is True:\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC MAC\n guest_nic_mac = normalize_mac_address(grab(guest_nic, \"macAddress\"))\n\n # skip interfaces of MAC addresses for already known interfaces\n if guest_nic_mac is None or guest_nic_mac in processed_interface_macs:\n continue\n\n processed_interface_macs.append(guest_nic_mac)\n\n int_full_name = \"vNIC Dummy-{}\".format(\"\".join(guest_nic_mac.split(\":\")[-2:]))\n\n log.debug2(f\"Parsing dummy network device: {guest_nic_mac}\")\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if self.settings.permitted_subnets.permitted(int_ip_address, interface_name=int_full_name) is True:\n nic_ips[int_full_name].append(int_ip_address)\n\n vm_nic_data = {\n \"name\": int_full_name,\n \"virtual_machine\": None,\n \"mac_address\": guest_nic_mac,\n \"enabled\": grab(guest_nic, \"connected\", fallback=False),\n }\n\n if len(nic_ips.get(int_full_name, list())) == 0:\n log.debug(f\"Dummy network interface '{int_full_name}' has no IP addresses assigned. Skipping\")\n continue\n\n nic_data[int_full_name] = vm_nic_data\n\n # add VM to inventory\n self.add_device_vm_to_inventory(NBVM, object_data=vm_data, vnic_data=nic_data,\n nic_ips=nic_ips, p_ipv4=vm_primary_ip4, p_ipv6=vm_primary_ip6,\n vmware_object=obj)\n\n return", "def start_VM(self, host):\n action = self.cmc.virtual_machines.start(group_name(host), vm_name(host))\n action.wait()", "def __init__(self, path: ghidra.program.util.GroupPath, treeName: unicode, blockModelService: ghidra.app.services.BlockModelService, partitioningModelName: unicode):\n ...", "def process_docker_import(self, param_import):", "def import_instance(DryRun=None, Description=None, LaunchSpecification=None, DiskImages=None, Platform=None):\n pass", "def add_hgrps_from_dir(self, dirname):\n return self.host_group_manager.add_objects_from_dir(dirname)", "def cnv_import(\n root_path: pathlib.Path,\n study_id: str,\n ped_path: pathlib.Path, cnv_paths: list[pathlib.Path],\n gpf_instance: GPFInstance,\n project_config_update: Optional[dict[str, Any]] = None,\n project_config_overwrite: Optional[dict[str, Any]] = None\n) -> ImportProject:\n study = StudyInputLayout(study_id, ped_path, [], [], [], cnv_paths)\n project = setup_import_project(\n root_path, study, gpf_instance,\n project_config_update=project_config_update,\n project_config_overwrite=project_config_overwrite)\n return project", "def load_groups(self):\n\n self.groups.load()", "def add_virtual_machine(self, obj):\n\n name = get_string_or_none(grab(obj, \"name\"))\n\n if name is not None and self.strip_vm_domain_name is True:\n name = name.split(\".\")[0]\n\n #\n # Filtering\n #\n\n # get VM UUID\n vm_uuid = grab(obj, \"config.uuid\")\n\n if vm_uuid is None or vm_uuid in self.processed_vm_uuid:\n return\n\n log.debug2(f\"Parsing vCenter VM: {name}\")\n\n # get VM power state\n status = \"active\" if get_string_or_none(grab(obj, \"runtime.powerState\")) == \"poweredOn\" else \"offline\"\n\n # check if vm is template\n template = grab(obj, \"config.template\")\n if bool(self.skip_vm_templates) is True and template is True:\n log.debug2(f\"VM '{name}' is a template. Skipping\")\n return\n\n # ignore offline VMs during first run\n if self.parsing_vms_the_first_time is True and status == \"offline\":\n log.debug2(f\"Ignoring {status} VM '{name}' on first run\")\n return\n\n # add to processed VMs\n self.processed_vm_uuid.append(vm_uuid)\n\n parent_name = get_string_or_none(grab(obj, \"runtime.host.name\"))\n cluster_name = get_string_or_none(grab(obj, \"runtime.host.parent.name\"))\n\n # honor strip_host_domain_name\n if cluster_name is not None and self.strip_host_domain_name is True and \\\n parent_name.split(\".\")[0] == cluster_name.split(\".\")[0]:\n cluster_name = cluster_name.split(\".\")[0]\n\n # check VM cluster\n if cluster_name is None:\n log.error(f\"Requesting cluster for Virtual Machine '{name}' failed. Skipping.\")\n return\n\n elif self.permitted_clusters.get(cluster_name) is None:\n log.debug(f\"Virtual machine '{name}' is not part of a permitted cluster. Skipping\")\n return\n\n if name in self.processed_vm_names.get(cluster_name, list()):\n log.warning(f\"Virtual machine '{name}' for cluster '{cluster_name}' already parsed. \"\n \"Make sure to use unique VM names. Skipping\")\n return\n\n # add host to processed list\n if self.processed_vm_names.get(cluster_name) is None:\n self.processed_vm_names[cluster_name] = list()\n\n self.processed_vm_names[cluster_name].append(name)\n\n # filter VMs by name\n if self.passes_filter(name, self.vm_include_filter, self.vm_exclude_filter) is False:\n return\n\n #\n # Collect data\n #\n\n # check if cluster is a Standalone ESXi\n site_name = self.permitted_clusters.get(cluster_name)\n if site_name is None:\n site_name = self.get_site_name(NBCluster, cluster_name)\n\n # first check against vm_platform_relation\n platform = grab(obj, \"config.guestFullName\")\n platform = get_string_or_none(grab(obj, \"guest.guestFullName\", fallback=platform))\n\n for platform_relation in grab(self, \"vm_platform_relation\", fallback=list()):\n\n if platform is None:\n break\n\n object_regex = platform_relation.get(\"object_regex\")\n if object_regex.match(platform):\n platform = platform_relation.get(\"platform_name\")\n log.debug2(f\"Found a match ({object_regex.pattern}) for {platform}, using mapped platform '{platform}'\")\n break\n\n hardware_devices = grab(obj, \"config.hardware.device\", fallback=list())\n\n disk = int(sum([getattr(comp, \"capacityInKB\", 0) for comp in hardware_devices\n if isinstance(comp, vim.vm.device.VirtualDisk)\n ]) / 1024 / 1024)\n\n annotation = None\n if bool(self.skip_vm_comments) is False:\n annotation = get_string_or_none(grab(obj, \"config.annotation\"))\n\n # assign vm_tenant_relation\n tenant_name = None\n for tenant_relation in grab(self, \"vm_tenant_relation\", fallback=list()):\n object_regex = tenant_relation.get(\"object_regex\")\n if object_regex.match(name):\n tenant_name = tenant_relation.get(\"tenant_name\")\n log.debug2(f\"Found a match ({object_regex.pattern}) for {name}, using tenant '{tenant_name}'\")\n break\n\n vm_data = {\n \"name\": name,\n \"cluster\": {\"name\": cluster_name},\n \"status\": status,\n \"memory\": grab(obj, \"config.hardware.memoryMB\"),\n \"vcpus\": grab(obj, \"config.hardware.numCPU\"),\n \"disk\": disk\n }\n\n if platform is not None:\n vm_data[\"platform\"] = {\"name\": platform}\n if annotation is not None:\n vm_data[\"comments\"] = annotation\n if tenant_name is not None:\n vm_data[\"tenant\"] = {\"name\": tenant_name}\n\n vm_primary_ip4 = None\n vm_primary_ip6 = None\n vm_default_gateway_ip4 = None\n vm_default_gateway_ip6 = None\n\n # check vm routing to determine which is the default interface for each IP version\n for route in grab(obj, \"guest.ipStack.0.ipRouteConfig.ipRoute\", fallback=list()):\n\n # we found a default route\n if grab(route, \"prefixLength\") == 0:\n\n try:\n ip_a = ip_address(grab(route, \"network\"))\n except ValueError:\n continue\n\n try:\n gateway_ip_address = ip_address(grab(route, \"gateway.ipAddress\"))\n except ValueError:\n continue\n\n if ip_a.version == 4 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv4 gateway {gateway_ip_address}\")\n vm_default_gateway_ip4 = gateway_ip_address\n elif ip_a.version == 6 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv6 gateway {gateway_ip_address}\")\n vm_default_gateway_ip6 = gateway_ip_address\n\n nic_data = dict()\n nic_ips = dict()\n\n # get VM interfaces\n for vm_device in hardware_devices:\n\n # sample: https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getvnicinfo.py\n\n # not a network interface\n if not isinstance(vm_device, vim.vm.device.VirtualEthernetCard):\n continue\n\n int_mac = normalize_mac_address(grab(vm_device, \"macAddress\"))\n\n device_class = grab(vm_device, \"_wsdlName\")\n\n log.debug2(f\"Parsing device {device_class}: {int_mac}\")\n\n device_backing = grab(vm_device, \"backing\")\n\n # set defaults\n int_mtu = None\n int_mode = None\n int_network_vlan_ids = None\n int_network_vlan_id_ranges = None\n int_network_name = None\n int_network_private = False\n\n # get info from local vSwitches\n if isinstance(device_backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):\n\n int_network_name = get_string_or_none(grab(device_backing, \"deviceName\"))\n int_host_pgroup = grab(self.network_data, f\"host_pgroup|{parent_name}|{int_network_name}\",\n separator=\"|\")\n\n if int_host_pgroup is not None:\n int_network_vlan_ids = [int_host_pgroup.get(\"vlan_id\")]\n int_network_vlan_id_ranges = [str(int_host_pgroup.get(\"vlan_id\"))]\n\n int_vswitch_name = int_host_pgroup.get(\"vswitch\")\n int_vswitch_data = grab(self.network_data, f\"vswitch|{parent_name}|{int_vswitch_name}\",\n separator=\"|\")\n\n if int_vswitch_data is not None:\n int_mtu = int_vswitch_data.get(\"mtu\")\n\n # get info from distributed port group\n else:\n\n dvs_portgroup_key = grab(device_backing, \"port.portgroupKey\", fallback=\"None\")\n int_portgroup_data = grab(self.network_data, f\"dpgroup|{dvs_portgroup_key}\", separator=\"|\")\n\n if int_portgroup_data is not None:\n int_network_name = grab(int_portgroup_data, \"name\")\n int_network_vlan_ids = grab(int_portgroup_data, \"vlan_ids\")\n if len(grab(int_portgroup_data, \"vlan_id_ranges\")) > 0:\n int_network_vlan_id_ranges = grab(int_portgroup_data, \"vlan_id_ranges\")\n else:\n int_network_vlan_id_ranges = [str(int_network_vlan_ids[0])]\n int_network_private = grab(int_portgroup_data, \"private\")\n\n int_dvswitch_uuid = grab(device_backing, \"port.switchUuid\")\n int_dvswitch_data = grab(self.network_data, f\"pswitch|{parent_name}|{int_dvswitch_uuid}\", separator=\"|\")\n\n if int_dvswitch_data is not None:\n int_mtu = int_dvswitch_data.get(\"mtu\")\n\n int_connected = grab(vm_device, \"connectable.connected\", fallback=False)\n int_label = grab(vm_device, \"deviceInfo.label\", fallback=\"\")\n\n int_name = \"vNIC {}\".format(int_label.split(\" \")[-1])\n\n int_full_name = int_name\n if int_network_name is not None:\n int_full_name = f\"{int_full_name} ({int_network_name})\"\n\n int_description = f\"{int_label} ({device_class})\"\n if int_network_vlan_ids is not None:\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] == 4095:\n vlan_description = \"all vlans\"\n int_mode = \"tagged-all\"\n else:\n vlan_description = \"vlan ID: %s\" % \", \".join(int_network_vlan_id_ranges)\n\n if len(int_network_vlan_ids) == 1:\n int_mode = \"access\"\n else:\n int_mode = \"tagged\"\n\n if int_network_private is True:\n vlan_description = f\"{vlan_description} (private)\"\n\n int_description = f\"{int_description} ({vlan_description})\"\n\n # find corresponding guest NIC and get IP addresses and connected status\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC\n if int_mac != normalize_mac_address(grab(guest_nic, \"macAddress\")):\n continue\n\n int_connected = grab(guest_nic, \"connected\", fallback=int_connected)\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if ip_valid_to_add_to_netbox(int_ip_address, self.permitted_subnets, int_full_name) is False:\n continue\n\n nic_ips[int_full_name].append(int_ip_address)\n\n # check if primary gateways are in the subnet of this IP address\n # if it matches IP gets chosen as primary IP\n if vm_default_gateway_ip4 is not None and \\\n vm_default_gateway_ip4 in ip_interface(int_ip_address).network and \\\n vm_primary_ip4 is None:\n\n vm_primary_ip4 = int_ip_address\n\n if vm_default_gateway_ip6 is not None and \\\n vm_default_gateway_ip6 in ip_interface(int_ip_address).network and \\\n vm_primary_ip6 is None:\n\n vm_primary_ip6 = int_ip_address\n\n vm_nic_data = {\n \"name\": int_full_name,\n \"virtual_machine\": None,\n \"mac_address\": int_mac,\n \"description\": int_description,\n \"enabled\": int_connected,\n }\n\n if int_mtu is not None:\n vm_nic_data[\"mtu\"] = int_mtu\n if int_mode is not None:\n vm_nic_data[\"mode\"] = int_mode\n\n if int_network_vlan_ids is not None and int_mode != \"tagged-all\":\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] != 0:\n\n vm_nic_data[\"untagged_vlan\"] = self.get_vlan_object_if_exists({\n \"name\": int_network_name,\n \"vid\": int_network_vlan_ids[0],\n \"site\": {\n \"name\": site_name\n }\n })\n else:\n tagged_vlan_list = list()\n for int_network_vlan_id in int_network_vlan_ids:\n\n if int_network_vlan_id == 0:\n continue\n\n tagged_vlan_list.append(self.get_vlan_object_if_exists({\n \"name\": f\"{int_network_name}-{int_network_vlan_id}\",\n \"vid\": int_network_vlan_id,\n \"site\": {\n \"name\": site_name\n }\n }))\n\n if len(tagged_vlan_list) > 0:\n vm_nic_data[\"tagged_vlans\"] = tagged_vlan_list\n\n nic_data[int_full_name] = vm_nic_data\n\n # add VM to inventory\n self.add_device_vm_to_inventory(NBVM, object_data=vm_data, site_name=site_name, vnic_data=nic_data,\n nic_ips=nic_ips, p_ipv4=vm_primary_ip4, p_ipv6=vm_primary_ip6)\n\n return", "def __LoadGroup(self, module_directory, parent_group, module_path=None,\n allow_non_existing_modules=False, exception_if_present=None,\n top_group=None):\n if not os.path.isdir(module_directory):\n if allow_non_existing_modules:\n return None\n raise backend.LayoutException(\n 'The given module directory does not exist: {}'.format(\n module_directory))\n elif exception_if_present:\n # pylint: disable=raising-bad-type, This will be an actual exception.\n raise exception_if_present\n\n module_root, module = os.path.split(module_directory)\n if not module_path:\n module_path = [module]\n # If this is the top level, don't register the name of the module directory\n # itself, it should assume the name of the command. If this is another\n # module directory, its name gets explicitly registered under the root\n # command.\n is_top = not parent_group\n sub_parser = parent_group.SubParser() if parent_group else None\n path = [self.__name] if is_top else [self.__name] + module_path\n group = backend.CommandGroup(\n module_root, [module], path, uuid.uuid4().hex, sub_parser,\n self.__config_hooks, help_func=self.__help_func,\n parent_group=top_group)\n\n return group", "def import_group(self,iSurveyID,sImportData,sImportDataType,\n sNewGroupName=None,sNewGroupDescription=None):\n params = self.__format_params(locals().copy())\n method = \"import_group\"\n r = self.call_rpc(method,params)\n return r.json()['result']", "def vcf_import(\n root_path: pathlib.Path,\n study_id: str,\n ped_path: pathlib.Path, vcf_paths: list[pathlib.Path],\n gpf_instance: GPFInstance,\n project_config_update: Optional[dict[str, Any]] = None,\n project_config_overwrite: Optional[dict[str, Any]] = None\n) -> ImportProject:\n study = StudyInputLayout(study_id, ped_path, vcf_paths, [], [], [])\n project = setup_import_project(\n root_path, study, gpf_instance,\n project_config_update=project_config_update,\n project_config_overwrite=project_config_overwrite)\n return project" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build virtual machine. Remove existing if needed.
def build_vm(vmname, build=None): if build == 'stable': v_machine = VirtualMachine(vmname) else: v_machine = VirtualMachine(vmname) try: v_machine.checkvm() except VirtualMachineExistsError: v_machine.removevm() if build == 'stable': return v_machine.buildvm('stable') else: return v_machine.buildvm()
[ "def clean_build():\r\n env.clean_build = True", "def __create_virtual_machine(self):\n vm_name = 'arista-cvx'\n logger.info('Launching the {} VM'.format(vm_name))\n\n arista_image_path = self.framework.model.resources.fetch(\n 'arista-image')\n\n # Officially Arista CVX requires more:\n # https://www.arista.com/en/cg-cv/cv-deploying-cvx\n # But experience shows that this is enough for using as a test fixture:\n resources = ['--ram=3072', '--vcpus=1']\n\n subprocess.check_call([\n 'virt-install', '--name', vm_name, *resources,\n '--boot', 'menu=on', '--disk',\n 'path={},device=disk,bus=ide,size=10'.format(arista_image_path),\n '--graphics', 'none', '--network',\n 'bridge:{},model=e1000'.format(\n self.__CONFIG_CONTEXT['linux_bridge_name']),\n '--autostart', '--noautoconsole', '--os-variant=generic'])", "def _add_virtual_machine_stub(self, content):\n content['Machine'] = None", "def init_vm(name, root):\n build_path = root + \"/build\"\n v = vagrant.Vagrant(root=root)\n\n if not os.path.exists(root):\n os.makedirs(root)\n\n print(\" - Setting up VM \", root)\n if not os.path.exists(build_path):\n os.makedirs(build_path)\n v.init(box_name=name)", "def setup_vm(request):\n\n result = list()\n\n def fin_verify_results():\n \"\"\"\n Check if none of finalizers failed.\n \"\"\"\n global_helper.raise_if_false_in_list(results=result)\n\n def fin_vm():\n \"\"\"\n Teardown:\n Safely remove VM.\n \"\"\"\n testflow.teardown(\"Safely remove test VM.\")\n result.append(\n (\n ll_vms.safely_remove_vms(\n vms=[\n vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n vcons_conf.VIRT_CONSOLE_CLONE_VM_NAME,\n vcons_conf.VIRT_CONSOLE_VM_IMPORT_NEW\n ]\n ), \"Failed to safelly remove {vm} as part of teardown.\".format(\n vm=vcons_conf.VIRT_CONSOLE_VM_SYSTEM\n )\n )\n )\n\n def fin_vm_from_export_domain():\n \"\"\"\n Teardown:\n Remove VM from export domain\n \"\"\"\n\n testflow.teardown(\"Remove exported VM from export domain.\")\n if ll_vms.is_vm_exists_in_export_domain(\n vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n vcons_conf.EXPORT_DOMAIN_NAME\n ):\n result.append(\n (\n ll_vms.remove_vm_from_export_domain(\n True,\n vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n vcons_conf.DC_NAME[0],\n vcons_conf.EXPORT_DOMAIN_NAME\n ), \"Failed to remove VM from export domain.\"\n )\n )\n\n def fin_instance_type():\n \"\"\"\n Teardown:\n Remove instance type.\n \"\"\"\n testflow.teardown(\"Remove custom instance_type.\")\n result.append(\n (\n ll_inst_type.remove_instance_type(\n instance_type_name=vcons_conf.VIRT_CONSOLE_VM_INSTANCE_TYPE\n ), \"Was not able to remove test instance_type.\"\n )\n )\n\n def fin_templates():\n \"\"\"\n Teardown:\n Remove test template.\n \"\"\"\n testflow.teardown(\"Remove test template.\")\n result.append(\n (\n ll_templates.safely_remove_templates(\n templates=[\n vcons_conf.VIRT_CONSOLE_TEMPLATE,\n vcons_conf.VIRT_CONSOLE_TEMPLATE_IMPORT_NEW\n ]\n ), \"Was not able to remove test Template.\"\n )\n )\n\n def fin_template_from_export_domain():\n \"\"\"\n Teardown:\n Remove template from export domain\n \"\"\"\n\n testflow.teardown(\"Remove exported template from export domain.\")\n if ll_templates.export_domain_template_exist(\n vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n vcons_conf.EXPORT_DOMAIN_NAME\n ):\n result.append(\n (\n ll_templates.removeTemplateFromExportDomain(\n True,\n vcons_conf.VIRT_CONSOLE_TEMPLATE,\n vcons_conf.EXPORT_DOMAIN_NAME\n ), \"Failed to remove Template from export domain.\"\n )\n )\n\n request.addfinalizer(fin_instance_type)\n request.addfinalizer(fin_template_from_export_domain)\n request.addfinalizer(fin_templates)\n request.addfinalizer(fin_vm_from_export_domain)\n request.addfinalizer(fin_vm)\n request.addfinalizer(fin_verify_results)\n\n testflow.setup(\n \"Create a instance_type for Virt console test cases execution.\"\n )\n\n assert ll_inst_type.create_instance_type(\n instance_type_name=vcons_conf.VIRT_CONSOLE_VM_INSTANCE_TYPE,\n **vcons_conf.INSTANCE_TYPE_PARAMS\n ), \"Failed to create instance_type.\"\n vm_name = vcons_conf.VM_NAME[0]\n testflow.setup(\"Stop VM {} safely\".format(vm_name))\n assert ll_vms.stop_vms_safely([vm_name])\n testflow.setup(\"Create a Template for Virt console test cases execution.\")\n assert ll_templates.createTemplate(\n positive=True,\n vm=vm_name,\n name=vcons_conf.VIRT_CONSOLE_TEMPLATE,\n cluster=vcons_conf.CLUSTER_NAME[0]\n ), \"Was not able to create template.\"\n\n testflow.setup(\"Create a VM for Virt console test cases execution.\")\n assert ll_vms.createVm(\n positive=True,\n vmName=vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n vmDescription=vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n cluster=vcons_conf.CLUSTER_NAME[0],\n template=vcons_conf.VIRT_CONSOLE_TEMPLATE,\n os_type=vcons_conf.VM_OS_TYPE,\n display_type=vcons_conf.VM_DISPLAY_TYPE,\n nic=vcons_conf.VIRT_CONSOLE_VM_NIC,\n network=vcons_conf.MGMT_BRIDGE\n ), \"Was not able to create VM.\"\n\n testflow.setup(\"Update VM to use test instance type and 2 monitors.\")\n assert ll_vms.updateVm(\n positive=True,\n vm=vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n instance_type=vcons_conf.VIRT_CONSOLE_VM_INSTANCE_TYPE\n ), \"Failed to set instance_type for VM.\"", "def main():\n\n args = parseArgs()\n\n vm = VMBuilder(args)\n\n if vm.args.command == 'list_disk_pools':\n print(vm.getDiskPools())\n elif vm.args.command == 'list_pool_volumes':\n print(vm.getDiskPoolVolumes())\n elif vm.args.command == 'create_vm':\n logging.debug(\"about to run vm.getbuild.createvm\")\n vm.verifyMinimumCreateVMArgs()\n vm.getBuild().createVM()\n else:\n logging.critical(\"The command you entered is not recognized.\")", "def create(self, vm_name):\n\n sub_conf = self.conf['virtualbox']['vms'][vm_name]\n hostname = vm_name\n dir_isocustom = self.conf['general']['dir_isocustom']\n if 'install' in sub_conf.keys() and sub_conf['install']:\n iso = os.path.join(dir_isocustom, sub_conf['install'])\n else:\n iso = None\n\n logging.info('Create virtualbox vm')\n l_vm = self.list_vms()\n\n isexist = [x['name'] for x in l_vm if hostname == x['name']]\n assert isexist == [], \"Error : la vm '\"+hostname+\"' existe déjà\"\n\n # msg = \"Error : la recipe '\"+recipe+\"' n'existe pas\"\n # assert recipe in self.conf['virtualbox']['recipes'].keys(), msg\n\n # dir1 = conf['disk-dir']+'/'+conf['hostname']\n # assert(not os.path.exists(dir1)), \"Le dossier \"+dir1+\" existe déjà !\"\n\n # dir_iso = self.conf['general']['dir_input']\n # dir_isocustom = self.conf['general']['dir_isocustom']\n os_type = sub_conf['os_type']\n file_disk_type = sub_conf['file_disk_type']\n ram = str(sub_conf['ram'])\n vram = str(sub_conf['vram'])\n disk_size = sub_conf['disk_size']\n interface_name = sub_conf['interface_name']\n interface_type = sub_conf['interface_type']\n\n dir_vm = self.get_machine_folder()\n if not os.path.isdir(dir_vm):\n os.mkdir(dir_vm)\n\n os.chdir(dir_vm)\n\n os.mkdir(dir_vm+os.sep+hostname)\n os.chdir(dir_vm+os.sep+hostname)\n\n # Create vm\n run_cmd(\n 'VBoxManage createvm '\n '--name \"'+hostname+'\" '\n '--ostype \"'+os_type+'\" ' # Ex: \"Debian_64\"\n '--register')\n\n # Add SATA controller\n run_cmd(\n 'VBoxManage storagectl \"'+hostname+'\" '\n '--name \"SATA Controller\" '\n '--add sata '\n '--controller IntelAHCI')\n\n # Add disks SATA controller\n if isinstance(disk_size, int):\n disk_size = [disk_size]\n run_cmd(\n 'VBoxManage storagectl '+hostname+' '\n '--name \"SATA Controller\" '\n '--portcount '+str(len(disk_size))) # Number of disque\n\n i = 0\n for on_disk_size in disk_size:\n ds = str(on_disk_size)\n it = str(i)\n disk_name = hostname+'_'+it+'.'+file_disk_type\n\n # Create one disk\n run_cmd(\n 'VBoxManage createhd '\n '--filename \"'+disk_name+'\" ' # Ex:test_0.vmdk\n '--size '+ds) # Disk size in Mo\n\n # Attach one disk to SATA controller\n run_cmd(\n 'VBoxManage storageattach \"'+hostname+'\" '\n '--storagectl \"SATA Controller\" '\n '--port '+it+' '\n '--device 0 '\n '--type hdd '\n '--medium \"'+disk_name+'\"') # Ex:test_0.vmdk\n i += 1\n\n # Add IDE Controller\n run_cmd(\n 'VBoxManage storagectl \"'+hostname+'\" '\n '--name \"IDE Controller\" '\n '--add ide')\n\n # Mount the iso to the IDE controller\n if iso:\n run_cmd(\n 'VBoxManage storageattach \"'+hostname+'\" '\n '--storagectl \"IDE Controller\" '\n '--port 0 '\n '--device 0 '\n '--type dvddrive '\n '--medium \"'+iso+'\"')\n\n # Enable Input/Output (mouse, keyboard, ...)\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--ioapic on')\n\n # Define boot order\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--boot1 dvd '\n '--boot2 disk '\n '--boot3 none '\n '--boot4 none')\n\n # Define RAM and VRAM(video)\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--memory '+ram+' '\n '--vram '+vram)\n\n # Connect network bridge interface\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--nic1 '+interface_type+' '\n '--bridgeadapter1 '+interface_name)", "def run(self):\n call('rm -vrf ./build ./dist ./*.pyc ./*.egg-info', shell=True)\n call('make -C docs clean', shell=True)", "def stop_and_remove_vm_(vm_name):\n\n testflow.teardown(\"Remove vm %s\", vm_name)\n return ll_vms.safely_remove_vms([vm_name])", "def make_vm(mod, exec_mode) -> Tuple[relax.VirtualMachine, tvm.runtime.Device]:\n target = tvm.target.Target(\"llvm\", host=\"llvm\")\n exec = relax.build(TestVMSetInput, target, exec_mode=exec_mode)\n exec.export_library(\"exec.so\")\n exec_loaded = tvm.runtime.load_module(\"exec.so\")\n os.remove(\"exec.so\")\n device = tvm.cpu()\n return relax.VirtualMachine(exec_loaded, device), device", "def create_second_vm(request, storage):\n self = request.node.cls\n\n def finalizer():\n \"\"\"\n Remove the second VM\n \"\"\"\n testflow.teardown(\"Remove VM: %s\", self.vm_name_2)\n assert ll_vms.safely_remove_vms([self.vm_name_2]), (\n \"Failed to power off and remove VM %s\" % self.vm_name_2\n )\n ll_jobs.wait_for_jobs([config.JOB_REMOVE_VM])\n request.addfinalizer(finalizer)\n\n self.vm_name_2 = storage_helpers.create_unique_object_name(\n self.__name__, config.OBJECT_TYPE_VM\n )\n testflow.setup(\"Create VM: %s\", self.vm_name_2)\n vm_args = config.create_vm_args.copy()\n vm_args['storageDomainName'] = self.storage_domain\n vm_args['cluster'] = config.CLUSTER_NAME\n vm_args['vmName'] = self.vm_name_2\n vm_args['deep_copy'] = False\n testflow.setup(\"Creating VM %s\", self.vm_name_2)\n assert storage_helpers.create_vm_or_clone(**vm_args), (\n \"Failed to create VM %s\" % self.vm_name_2\n )", "def build(self, bld=None):\n vm_number = len(self.vmlist)\n if vm_number == 1:\n if bld == 'stable':\n ova = build_vm(self.vmlist[0], 'stable')\n self.results.append(ova)\n else:\n ova = build_vm(self.vmlist[0])\n self.results.append(ova)\n elif vm_number <= self.threads:\n if bld == 'stable':\n self._build_pool(vm_number, self.vmlist, 'stable')\n else:\n self._build_pool(vm_number, self.vmlist)\n else:\n tmplist = self.vmlist\n while tmplist:\n if bld == 'stable':\n self._build_pool(self.threads, tmplist[:self.threads], 'stable')\n tmplist = tmplist[self.threads:]\n else:\n self._build_pool(self.threads, tmplist[:self.threads])\n tmplist = tmplist[self.threads:]\n return self.results", "def clean():\n rm_rf(cwd/'_build')", "def do_build():\n dochdir(ssdroot)\n if flag_snapshot:\n dochdir(flag_snapshot)\n else:\n dochdir(flag_subvol)\n if flag_binutils_build:\n dochdir(\"binutils-build\")\n nworkers = multiprocessing.cpu_count()\n doscmd(\"make -j%d\" % nworkers)\n doscmd(\"make -j%d all-gold\" % nworkers)\n dochdir(\"..\")\n else:\n u.verbose(0, \"... binutils build stubbed out\")\n if flag_run_ninja:\n dochdir(\"build.opt\")\n docmd(\"ninja\")\n dochdir(\"..\")\n else:\n u.verbose(0, \"... ninja build stubbed out\")", "def cleanup_files(base_dir, builder):\n builder.run_root('rm -rf /build')", "def create_second_vm(request, storage):\n self = request.node.cls\n\n def finalizer():\n \"\"\"\n Remove the second VM\n \"\"\"\n testflow.teardown(\"Remove VM %s\", self.vm_name_2)\n assert ll_vms.safely_remove_vms([self.vm_name_2]), (\n \"Failed to power off and remove VM %s\" % self.vm_name_2\n )\n ll_jobs.wait_for_jobs([config.JOB_REMOVE_VM])\n request.addfinalizer(finalizer)\n\n self.vm_name_2 = storage_helpers.create_unique_object_name(\n self.__name__, config.OBJECT_TYPE_VM\n )\n vm_args = config.create_vm_args.copy()\n vm_args['storageDomainName'] = self.storage_domain\n vm_args['vmName'] = self.vm_name_2\n testflow.setup(\"Creating VM %s\", self.vm_name_2)\n assert storage_helpers.create_vm_or_clone(**vm_args), (\n \"Failed to create VM %s\" % self.vm_name_2\n )", "def Uninstall(self, vm):\n vm.RemoteCommand('sudo docker rmi {}'.format(self.name))", "def add_virtual_machine(self, obj):\n\n name = get_string_or_none(grab(obj, \"name\"))\n\n if name is not None and self.strip_vm_domain_name is True:\n name = name.split(\".\")[0]\n\n #\n # Filtering\n #\n\n # get VM UUID\n vm_uuid = grab(obj, \"config.uuid\")\n\n if vm_uuid is None or vm_uuid in self.processed_vm_uuid:\n return\n\n log.debug2(f\"Parsing vCenter VM: {name}\")\n\n # get VM power state\n status = \"active\" if get_string_or_none(grab(obj, \"runtime.powerState\")) == \"poweredOn\" else \"offline\"\n\n # check if vm is template\n template = grab(obj, \"config.template\")\n if bool(self.skip_vm_templates) is True and template is True:\n log.debug2(f\"VM '{name}' is a template. Skipping\")\n return\n\n # ignore offline VMs during first run\n if self.parsing_vms_the_first_time is True and status == \"offline\":\n log.debug2(f\"Ignoring {status} VM '{name}' on first run\")\n return\n\n # add to processed VMs\n self.processed_vm_uuid.append(vm_uuid)\n\n parent_name = get_string_or_none(grab(obj, \"runtime.host.name\"))\n cluster_name = get_string_or_none(grab(obj, \"runtime.host.parent.name\"))\n\n # honor strip_host_domain_name\n if cluster_name is not None and self.strip_host_domain_name is True and \\\n parent_name.split(\".\")[0] == cluster_name.split(\".\")[0]:\n cluster_name = cluster_name.split(\".\")[0]\n\n # check VM cluster\n if cluster_name is None:\n log.error(f\"Requesting cluster for Virtual Machine '{name}' failed. Skipping.\")\n return\n\n elif self.permitted_clusters.get(cluster_name) is None:\n log.debug(f\"Virtual machine '{name}' is not part of a permitted cluster. Skipping\")\n return\n\n if name in self.processed_vm_names.get(cluster_name, list()):\n log.warning(f\"Virtual machine '{name}' for cluster '{cluster_name}' already parsed. \"\n \"Make sure to use unique VM names. Skipping\")\n return\n\n # add host to processed list\n if self.processed_vm_names.get(cluster_name) is None:\n self.processed_vm_names[cluster_name] = list()\n\n self.processed_vm_names[cluster_name].append(name)\n\n # filter VMs by name\n if self.passes_filter(name, self.vm_include_filter, self.vm_exclude_filter) is False:\n return\n\n #\n # Collect data\n #\n\n # check if cluster is a Standalone ESXi\n site_name = self.permitted_clusters.get(cluster_name)\n if site_name is None:\n site_name = self.get_site_name(NBCluster, cluster_name)\n\n # first check against vm_platform_relation\n platform = grab(obj, \"config.guestFullName\")\n platform = get_string_or_none(grab(obj, \"guest.guestFullName\", fallback=platform))\n\n for platform_relation in grab(self, \"vm_platform_relation\", fallback=list()):\n\n if platform is None:\n break\n\n object_regex = platform_relation.get(\"object_regex\")\n if object_regex.match(platform):\n platform = platform_relation.get(\"platform_name\")\n log.debug2(f\"Found a match ({object_regex.pattern}) for {platform}, using mapped platform '{platform}'\")\n break\n\n hardware_devices = grab(obj, \"config.hardware.device\", fallback=list())\n\n disk = int(sum([getattr(comp, \"capacityInKB\", 0) for comp in hardware_devices\n if isinstance(comp, vim.vm.device.VirtualDisk)\n ]) / 1024 / 1024)\n\n annotation = None\n if bool(self.skip_vm_comments) is False:\n annotation = get_string_or_none(grab(obj, \"config.annotation\"))\n\n # assign vm_tenant_relation\n tenant_name = None\n for tenant_relation in grab(self, \"vm_tenant_relation\", fallback=list()):\n object_regex = tenant_relation.get(\"object_regex\")\n if object_regex.match(name):\n tenant_name = tenant_relation.get(\"tenant_name\")\n log.debug2(f\"Found a match ({object_regex.pattern}) for {name}, using tenant '{tenant_name}'\")\n break\n\n vm_data = {\n \"name\": name,\n \"cluster\": {\"name\": cluster_name},\n \"status\": status,\n \"memory\": grab(obj, \"config.hardware.memoryMB\"),\n \"vcpus\": grab(obj, \"config.hardware.numCPU\"),\n \"disk\": disk\n }\n\n if platform is not None:\n vm_data[\"platform\"] = {\"name\": platform}\n if annotation is not None:\n vm_data[\"comments\"] = annotation\n if tenant_name is not None:\n vm_data[\"tenant\"] = {\"name\": tenant_name}\n\n vm_primary_ip4 = None\n vm_primary_ip6 = None\n vm_default_gateway_ip4 = None\n vm_default_gateway_ip6 = None\n\n # check vm routing to determine which is the default interface for each IP version\n for route in grab(obj, \"guest.ipStack.0.ipRouteConfig.ipRoute\", fallback=list()):\n\n # we found a default route\n if grab(route, \"prefixLength\") == 0:\n\n try:\n ip_a = ip_address(grab(route, \"network\"))\n except ValueError:\n continue\n\n try:\n gateway_ip_address = ip_address(grab(route, \"gateway.ipAddress\"))\n except ValueError:\n continue\n\n if ip_a.version == 4 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv4 gateway {gateway_ip_address}\")\n vm_default_gateway_ip4 = gateway_ip_address\n elif ip_a.version == 6 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv6 gateway {gateway_ip_address}\")\n vm_default_gateway_ip6 = gateway_ip_address\n\n nic_data = dict()\n nic_ips = dict()\n\n # get VM interfaces\n for vm_device in hardware_devices:\n\n # sample: https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getvnicinfo.py\n\n # not a network interface\n if not isinstance(vm_device, vim.vm.device.VirtualEthernetCard):\n continue\n\n int_mac = normalize_mac_address(grab(vm_device, \"macAddress\"))\n\n device_class = grab(vm_device, \"_wsdlName\")\n\n log.debug2(f\"Parsing device {device_class}: {int_mac}\")\n\n device_backing = grab(vm_device, \"backing\")\n\n # set defaults\n int_mtu = None\n int_mode = None\n int_network_vlan_ids = None\n int_network_vlan_id_ranges = None\n int_network_name = None\n int_network_private = False\n\n # get info from local vSwitches\n if isinstance(device_backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):\n\n int_network_name = get_string_or_none(grab(device_backing, \"deviceName\"))\n int_host_pgroup = grab(self.network_data, f\"host_pgroup|{parent_name}|{int_network_name}\",\n separator=\"|\")\n\n if int_host_pgroup is not None:\n int_network_vlan_ids = [int_host_pgroup.get(\"vlan_id\")]\n int_network_vlan_id_ranges = [str(int_host_pgroup.get(\"vlan_id\"))]\n\n int_vswitch_name = int_host_pgroup.get(\"vswitch\")\n int_vswitch_data = grab(self.network_data, f\"vswitch|{parent_name}|{int_vswitch_name}\",\n separator=\"|\")\n\n if int_vswitch_data is not None:\n int_mtu = int_vswitch_data.get(\"mtu\")\n\n # get info from distributed port group\n else:\n\n dvs_portgroup_key = grab(device_backing, \"port.portgroupKey\", fallback=\"None\")\n int_portgroup_data = grab(self.network_data, f\"dpgroup|{dvs_portgroup_key}\", separator=\"|\")\n\n if int_portgroup_data is not None:\n int_network_name = grab(int_portgroup_data, \"name\")\n int_network_vlan_ids = grab(int_portgroup_data, \"vlan_ids\")\n if len(grab(int_portgroup_data, \"vlan_id_ranges\")) > 0:\n int_network_vlan_id_ranges = grab(int_portgroup_data, \"vlan_id_ranges\")\n else:\n int_network_vlan_id_ranges = [str(int_network_vlan_ids[0])]\n int_network_private = grab(int_portgroup_data, \"private\")\n\n int_dvswitch_uuid = grab(device_backing, \"port.switchUuid\")\n int_dvswitch_data = grab(self.network_data, f\"pswitch|{parent_name}|{int_dvswitch_uuid}\", separator=\"|\")\n\n if int_dvswitch_data is not None:\n int_mtu = int_dvswitch_data.get(\"mtu\")\n\n int_connected = grab(vm_device, \"connectable.connected\", fallback=False)\n int_label = grab(vm_device, \"deviceInfo.label\", fallback=\"\")\n\n int_name = \"vNIC {}\".format(int_label.split(\" \")[-1])\n\n int_full_name = int_name\n if int_network_name is not None:\n int_full_name = f\"{int_full_name} ({int_network_name})\"\n\n int_description = f\"{int_label} ({device_class})\"\n if int_network_vlan_ids is not None:\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] == 4095:\n vlan_description = \"all vlans\"\n int_mode = \"tagged-all\"\n else:\n vlan_description = \"vlan ID: %s\" % \", \".join(int_network_vlan_id_ranges)\n\n if len(int_network_vlan_ids) == 1:\n int_mode = \"access\"\n else:\n int_mode = \"tagged\"\n\n if int_network_private is True:\n vlan_description = f\"{vlan_description} (private)\"\n\n int_description = f\"{int_description} ({vlan_description})\"\n\n # find corresponding guest NIC and get IP addresses and connected status\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC\n if int_mac != normalize_mac_address(grab(guest_nic, \"macAddress\")):\n continue\n\n int_connected = grab(guest_nic, \"connected\", fallback=int_connected)\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if ip_valid_to_add_to_netbox(int_ip_address, self.permitted_subnets, int_full_name) is False:\n continue\n\n nic_ips[int_full_name].append(int_ip_address)\n\n # check if primary gateways are in the subnet of this IP address\n # if it matches IP gets chosen as primary IP\n if vm_default_gateway_ip4 is not None and \\\n vm_default_gateway_ip4 in ip_interface(int_ip_address).network and \\\n vm_primary_ip4 is None:\n\n vm_primary_ip4 = int_ip_address\n\n if vm_default_gateway_ip6 is not None and \\\n vm_default_gateway_ip6 in ip_interface(int_ip_address).network and \\\n vm_primary_ip6 is None:\n\n vm_primary_ip6 = int_ip_address\n\n vm_nic_data = {\n \"name\": int_full_name,\n \"virtual_machine\": None,\n \"mac_address\": int_mac,\n \"description\": int_description,\n \"enabled\": int_connected,\n }\n\n if int_mtu is not None:\n vm_nic_data[\"mtu\"] = int_mtu\n if int_mode is not None:\n vm_nic_data[\"mode\"] = int_mode\n\n if int_network_vlan_ids is not None and int_mode != \"tagged-all\":\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] != 0:\n\n vm_nic_data[\"untagged_vlan\"] = self.get_vlan_object_if_exists({\n \"name\": int_network_name,\n \"vid\": int_network_vlan_ids[0],\n \"site\": {\n \"name\": site_name\n }\n })\n else:\n tagged_vlan_list = list()\n for int_network_vlan_id in int_network_vlan_ids:\n\n if int_network_vlan_id == 0:\n continue\n\n tagged_vlan_list.append(self.get_vlan_object_if_exists({\n \"name\": f\"{int_network_name}-{int_network_vlan_id}\",\n \"vid\": int_network_vlan_id,\n \"site\": {\n \"name\": site_name\n }\n }))\n\n if len(tagged_vlan_list) > 0:\n vm_nic_data[\"tagged_vlans\"] = tagged_vlan_list\n\n nic_data[int_full_name] = vm_nic_data\n\n # add VM to inventory\n self.add_device_vm_to_inventory(NBVM, object_data=vm_data, site_name=site_name, vnic_data=nic_data,\n nic_ips=nic_ips, p_ipv4=vm_primary_ip4, p_ipv6=vm_primary_ip6)\n\n return", "def vm_create(vmname: str, img_path: str, isopath: str):\n kvm_video = \"virtio\"\n kvm_diskinterface = \"virtio\"\n kvm_netdevice = \"virtio\"\n # Copy efi firmware (ensure non-secureboot firmware is chosen)\n efi_bin, efi_nvram = Pkvm.ovmf_bin_nvramcopy(os.path.dirname(img_path), vmname, secureboot=False)\n # virt-install manual: https://www.mankier.com/1/virt-install\n # List of os: osinfo-query os\n CREATESCRIPT_KVM = \"\"\"virt-install --connect qemu:///system --name={vmname} --install bootdev=cdrom --boot=hd,cdrom --disk device=cdrom,path=\"{isopath}\",bus=sata,target=sda,readonly=on --disk path={fullpathtoimg},bus={kvm_diskinterface} --graphics spice --vcpu={cpus} --ram={memory} --network bridge=virbr0,model={kvm_netdevice} --filesystem source=/,target=root,mode=mapped --os-variant={kvm_variant} --import --noautoconsole --noreboot --video={kvm_video} --channel unix,target_type=virtio,name=org.qemu.guest_agent.0 --channel spicevmc,target_type=virtio,name=com.redhat.spice.0 --boot loader={efi_bin},loader_ro=yes,loader_type=pflash,nvram={efi_nvram}\"\"\".format(vmname=vmname, memory=args.memory, cpus=CPUCORES, fullpathtoimg=img_path, kvm_variant=kvm_variant, kvm_video=kvm_video, kvm_diskinterface=kvm_diskinterface, kvm_netdevice=kvm_netdevice, isopath=isopath, efi_bin=efi_bin, efi_nvram=efi_nvram)\n subprocess.run(CREATESCRIPT_KVM, shell=True, check=True)\n # Log the launch command.\n logging.info(\"\"\"KVM launch command: virt-install --connect qemu:///system --name={vmname} --disk path={fullpathtoimg},bus={kvm_diskinterface} --disk device=cdrom,bus=sata,target=sda,readonly=on --graphics spice --vcpu={cpus} --ram={memory} --network bridge=virbr0,model={kvm_netdevice} --filesystem source=/,target=root,mode=mapped --os-variant={kvm_variant} --import --noautoconsole --noreboot --video={kvm_video} --channel unix,target_type=virtio,name=org.qemu.guest_agent.0 --channel spicevmc,target_type=virtio,name=com.redhat.spice.0 --boot loader={efi_bin},loader_ro=yes,loader_type=pflash,nvram={efi_nvram}\"\"\".format(vmname=vmname, memory=args.memory, cpus=CPUCORES, fullpathtoimg=img_path, kvm_variant=kvm_variant, kvm_video=kvm_video, kvm_diskinterface=kvm_diskinterface, kvm_netdevice=kvm_netdevice, efi_bin=efi_bin, efi_nvram=efi_nvram))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import VM and group it. Return str. Import VM from specified ova and return VM name. If VM with such name already exists raise VirtualMachineExistsError.
def just_import(ova): name = os.path.split(ova)[1].split('.')[0] v_machine = VirtualMachine(name) # This must throw exception if such VM already exists. try: v_machine.checkvm() except VirtualMachineExistsError: print("WARNING: %s already exists. Skipping..." % name) else: v_machine.importvm(ova) return name
[ "def force_import(ova):\n name = os.path.split(ova)[1].split('.')[0]\n v_machine = VirtualMachine(name)\n try:\n v_machine.checkvm()\n except VirtualMachineExistsError:\n v_machine.removevm()\n v_machine.importvm(ova)\n return name", "def importvm(self, ova):\n assert os.path.exists(ova), \"{} not found\" % ova\n subprocess.call(['VBoxManage', 'import', ova,\n '--options', 'keepallmacs'])\n time.sleep(10)\n grouped = self._groupvm()\n sfolders = self._sharedfolders()\n return grouped, sfolders", "def create(self, vm_name):\n\n sub_conf = self.conf['virtualbox']['vms'][vm_name]\n hostname = vm_name\n dir_isocustom = self.conf['general']['dir_isocustom']\n if 'install' in sub_conf.keys() and sub_conf['install']:\n iso = os.path.join(dir_isocustom, sub_conf['install'])\n else:\n iso = None\n\n logging.info('Create virtualbox vm')\n l_vm = self.list_vms()\n\n isexist = [x['name'] for x in l_vm if hostname == x['name']]\n assert isexist == [], \"Error : la vm '\"+hostname+\"' existe déjà\"\n\n # msg = \"Error : la recipe '\"+recipe+\"' n'existe pas\"\n # assert recipe in self.conf['virtualbox']['recipes'].keys(), msg\n\n # dir1 = conf['disk-dir']+'/'+conf['hostname']\n # assert(not os.path.exists(dir1)), \"Le dossier \"+dir1+\" existe déjà !\"\n\n # dir_iso = self.conf['general']['dir_input']\n # dir_isocustom = self.conf['general']['dir_isocustom']\n os_type = sub_conf['os_type']\n file_disk_type = sub_conf['file_disk_type']\n ram = str(sub_conf['ram'])\n vram = str(sub_conf['vram'])\n disk_size = sub_conf['disk_size']\n interface_name = sub_conf['interface_name']\n interface_type = sub_conf['interface_type']\n\n dir_vm = self.get_machine_folder()\n if not os.path.isdir(dir_vm):\n os.mkdir(dir_vm)\n\n os.chdir(dir_vm)\n\n os.mkdir(dir_vm+os.sep+hostname)\n os.chdir(dir_vm+os.sep+hostname)\n\n # Create vm\n run_cmd(\n 'VBoxManage createvm '\n '--name \"'+hostname+'\" '\n '--ostype \"'+os_type+'\" ' # Ex: \"Debian_64\"\n '--register')\n\n # Add SATA controller\n run_cmd(\n 'VBoxManage storagectl \"'+hostname+'\" '\n '--name \"SATA Controller\" '\n '--add sata '\n '--controller IntelAHCI')\n\n # Add disks SATA controller\n if isinstance(disk_size, int):\n disk_size = [disk_size]\n run_cmd(\n 'VBoxManage storagectl '+hostname+' '\n '--name \"SATA Controller\" '\n '--portcount '+str(len(disk_size))) # Number of disque\n\n i = 0\n for on_disk_size in disk_size:\n ds = str(on_disk_size)\n it = str(i)\n disk_name = hostname+'_'+it+'.'+file_disk_type\n\n # Create one disk\n run_cmd(\n 'VBoxManage createhd '\n '--filename \"'+disk_name+'\" ' # Ex:test_0.vmdk\n '--size '+ds) # Disk size in Mo\n\n # Attach one disk to SATA controller\n run_cmd(\n 'VBoxManage storageattach \"'+hostname+'\" '\n '--storagectl \"SATA Controller\" '\n '--port '+it+' '\n '--device 0 '\n '--type hdd '\n '--medium \"'+disk_name+'\"') # Ex:test_0.vmdk\n i += 1\n\n # Add IDE Controller\n run_cmd(\n 'VBoxManage storagectl \"'+hostname+'\" '\n '--name \"IDE Controller\" '\n '--add ide')\n\n # Mount the iso to the IDE controller\n if iso:\n run_cmd(\n 'VBoxManage storageattach \"'+hostname+'\" '\n '--storagectl \"IDE Controller\" '\n '--port 0 '\n '--device 0 '\n '--type dvddrive '\n '--medium \"'+iso+'\"')\n\n # Enable Input/Output (mouse, keyboard, ...)\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--ioapic on')\n\n # Define boot order\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--boot1 dvd '\n '--boot2 disk '\n '--boot3 none '\n '--boot4 none')\n\n # Define RAM and VRAM(video)\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--memory '+ram+' '\n '--vram '+vram)\n\n # Connect network bridge interface\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--nic1 '+interface_type+' '\n '--bridgeadapter1 '+interface_name)", "def vmimport(self, func=just_import):\n ovas = len(self.vmlist)\n if ovas == 1:\n vmname = func(self.vmlist[0])\n self.results.append(vmname)\n elif ovas <= self.threads:\n self._import_pool(ovas, self.vmlist, func)\n else:\n tmplist = self.vmlist\n while tmplist:\n self._import_pool(self.threads, tmplist[:self.threads], func)\n tmplist = tmplist[self.threads:]\n return self.results", "def launch_vm_on_network(tenant_name, vm_name, network_id):\n #pdb.set_trace()\n instance=None \n tenant_credentials = get_tenant_nova_credentials(tenant_name)\n \n nova = nvclient.Client(**tenant_credentials)\n nova.quotas.update(tenant_name, instances=-1, cores=-1, ram=-1, fixed_ips=-1, floating_ips=-1)\n with open('user.txt') as userdata:\n user_data = userdata.read()\n try:\n\timage_list=nova.images.find(name=\"ubuntu\")\n except NotFound:\n\tupload_image_glance()\n\n #for img in image:\n #if img.name == 'ubuntu':\n #print \"image found\"\n try:\n\n flavor = nova.flavors.find(name='traffic')\n except:\n flavor = nova.flavors.create(name=\"traffic\",ram=\"2048\",vcpus=\"1\",disk=\"10\")\n\n \n try:\n \n instance = nova.servers.create(name=vm_name, image=image_list,\n flavor=flavor,\n key_name=\"admin\",\n nics=[{'net-id': network_id}],userdata=user_data)\n except Exception:\n pass\n\n # Poll at 15 second intervals, until the status is no longer 'BUILD'\n print \" * Instance <%s> created on network <%s>: \"%(vm_name,str(network_id))\n status = instance.status\n while status == 'BUILD':\n time.sleep(15)\n # Retrieve the instance again so the status field updates\n instance = nova.servers.get(instance.id)\n status = instance.status\n\n print \" - Current status: %s\" % status\n if FLOATING_IP_CREATION:\n add_floating_ip_for_vm(tenant_name, instance)\n\n ins_data = {'instance_name': vm_name, 'status': status}\n return ins_data", "def create_vm_from_ovf(ovf_file_path, vms_service):\n ovf_data = open(ovf_file_path, 'r').read()\n vm = vms_service.add(\n types.Vm(\n cluster=types.Cluster(\n name='mycluster',\n ),\n initialization = types.Initialization(\n configuration = types.Configuration(\n type = types.ConfigurationType.OVF,\n data = ovf_data\n )\n ),\n ),\n )\n return vm.id", "def add_virtual_machine(self, obj):\n\n name = get_string_or_none(grab(obj, \"name\"))\n\n if name is not None and self.strip_vm_domain_name is True:\n name = name.split(\".\")[0]\n\n #\n # Filtering\n #\n\n # get VM UUID\n vm_uuid = grab(obj, \"config.uuid\")\n\n if vm_uuid is None or vm_uuid in self.processed_vm_uuid:\n return\n\n log.debug2(f\"Parsing vCenter VM: {name}\")\n\n # get VM power state\n status = \"active\" if get_string_or_none(grab(obj, \"runtime.powerState\")) == \"poweredOn\" else \"offline\"\n\n # check if vm is template\n template = grab(obj, \"config.template\")\n if bool(self.skip_vm_templates) is True and template is True:\n log.debug2(f\"VM '{name}' is a template. Skipping\")\n return\n\n # ignore offline VMs during first run\n if self.parsing_vms_the_first_time is True and status == \"offline\":\n log.debug2(f\"Ignoring {status} VM '{name}' on first run\")\n return\n\n # add to processed VMs\n self.processed_vm_uuid.append(vm_uuid)\n\n parent_name = get_string_or_none(grab(obj, \"runtime.host.name\"))\n cluster_name = get_string_or_none(grab(obj, \"runtime.host.parent.name\"))\n\n # honor strip_host_domain_name\n if cluster_name is not None and self.strip_host_domain_name is True and \\\n parent_name.split(\".\")[0] == cluster_name.split(\".\")[0]:\n cluster_name = cluster_name.split(\".\")[0]\n\n # check VM cluster\n if cluster_name is None:\n log.error(f\"Requesting cluster for Virtual Machine '{name}' failed. Skipping.\")\n return\n\n elif self.permitted_clusters.get(cluster_name) is None:\n log.debug(f\"Virtual machine '{name}' is not part of a permitted cluster. Skipping\")\n return\n\n if name in self.processed_vm_names.get(cluster_name, list()):\n log.warning(f\"Virtual machine '{name}' for cluster '{cluster_name}' already parsed. \"\n \"Make sure to use unique VM names. Skipping\")\n return\n\n # add host to processed list\n if self.processed_vm_names.get(cluster_name) is None:\n self.processed_vm_names[cluster_name] = list()\n\n self.processed_vm_names[cluster_name].append(name)\n\n # filter VMs by name\n if self.passes_filter(name, self.vm_include_filter, self.vm_exclude_filter) is False:\n return\n\n #\n # Collect data\n #\n\n # check if cluster is a Standalone ESXi\n site_name = self.permitted_clusters.get(cluster_name)\n if site_name is None:\n site_name = self.get_site_name(NBCluster, cluster_name)\n\n # first check against vm_platform_relation\n platform = grab(obj, \"config.guestFullName\")\n platform = get_string_or_none(grab(obj, \"guest.guestFullName\", fallback=platform))\n\n for platform_relation in grab(self, \"vm_platform_relation\", fallback=list()):\n\n if platform is None:\n break\n\n object_regex = platform_relation.get(\"object_regex\")\n if object_regex.match(platform):\n platform = platform_relation.get(\"platform_name\")\n log.debug2(f\"Found a match ({object_regex.pattern}) for {platform}, using mapped platform '{platform}'\")\n break\n\n hardware_devices = grab(obj, \"config.hardware.device\", fallback=list())\n\n disk = int(sum([getattr(comp, \"capacityInKB\", 0) for comp in hardware_devices\n if isinstance(comp, vim.vm.device.VirtualDisk)\n ]) / 1024 / 1024)\n\n annotation = None\n if bool(self.skip_vm_comments) is False:\n annotation = get_string_or_none(grab(obj, \"config.annotation\"))\n\n # assign vm_tenant_relation\n tenant_name = None\n for tenant_relation in grab(self, \"vm_tenant_relation\", fallback=list()):\n object_regex = tenant_relation.get(\"object_regex\")\n if object_regex.match(name):\n tenant_name = tenant_relation.get(\"tenant_name\")\n log.debug2(f\"Found a match ({object_regex.pattern}) for {name}, using tenant '{tenant_name}'\")\n break\n\n vm_data = {\n \"name\": name,\n \"cluster\": {\"name\": cluster_name},\n \"status\": status,\n \"memory\": grab(obj, \"config.hardware.memoryMB\"),\n \"vcpus\": grab(obj, \"config.hardware.numCPU\"),\n \"disk\": disk\n }\n\n if platform is not None:\n vm_data[\"platform\"] = {\"name\": platform}\n if annotation is not None:\n vm_data[\"comments\"] = annotation\n if tenant_name is not None:\n vm_data[\"tenant\"] = {\"name\": tenant_name}\n\n vm_primary_ip4 = None\n vm_primary_ip6 = None\n vm_default_gateway_ip4 = None\n vm_default_gateway_ip6 = None\n\n # check vm routing to determine which is the default interface for each IP version\n for route in grab(obj, \"guest.ipStack.0.ipRouteConfig.ipRoute\", fallback=list()):\n\n # we found a default route\n if grab(route, \"prefixLength\") == 0:\n\n try:\n ip_a = ip_address(grab(route, \"network\"))\n except ValueError:\n continue\n\n try:\n gateway_ip_address = ip_address(grab(route, \"gateway.ipAddress\"))\n except ValueError:\n continue\n\n if ip_a.version == 4 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv4 gateway {gateway_ip_address}\")\n vm_default_gateway_ip4 = gateway_ip_address\n elif ip_a.version == 6 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv6 gateway {gateway_ip_address}\")\n vm_default_gateway_ip6 = gateway_ip_address\n\n nic_data = dict()\n nic_ips = dict()\n\n # get VM interfaces\n for vm_device in hardware_devices:\n\n # sample: https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getvnicinfo.py\n\n # not a network interface\n if not isinstance(vm_device, vim.vm.device.VirtualEthernetCard):\n continue\n\n int_mac = normalize_mac_address(grab(vm_device, \"macAddress\"))\n\n device_class = grab(vm_device, \"_wsdlName\")\n\n log.debug2(f\"Parsing device {device_class}: {int_mac}\")\n\n device_backing = grab(vm_device, \"backing\")\n\n # set defaults\n int_mtu = None\n int_mode = None\n int_network_vlan_ids = None\n int_network_vlan_id_ranges = None\n int_network_name = None\n int_network_private = False\n\n # get info from local vSwitches\n if isinstance(device_backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):\n\n int_network_name = get_string_or_none(grab(device_backing, \"deviceName\"))\n int_host_pgroup = grab(self.network_data, f\"host_pgroup|{parent_name}|{int_network_name}\",\n separator=\"|\")\n\n if int_host_pgroup is not None:\n int_network_vlan_ids = [int_host_pgroup.get(\"vlan_id\")]\n int_network_vlan_id_ranges = [str(int_host_pgroup.get(\"vlan_id\"))]\n\n int_vswitch_name = int_host_pgroup.get(\"vswitch\")\n int_vswitch_data = grab(self.network_data, f\"vswitch|{parent_name}|{int_vswitch_name}\",\n separator=\"|\")\n\n if int_vswitch_data is not None:\n int_mtu = int_vswitch_data.get(\"mtu\")\n\n # get info from distributed port group\n else:\n\n dvs_portgroup_key = grab(device_backing, \"port.portgroupKey\", fallback=\"None\")\n int_portgroup_data = grab(self.network_data, f\"dpgroup|{dvs_portgroup_key}\", separator=\"|\")\n\n if int_portgroup_data is not None:\n int_network_name = grab(int_portgroup_data, \"name\")\n int_network_vlan_ids = grab(int_portgroup_data, \"vlan_ids\")\n if len(grab(int_portgroup_data, \"vlan_id_ranges\")) > 0:\n int_network_vlan_id_ranges = grab(int_portgroup_data, \"vlan_id_ranges\")\n else:\n int_network_vlan_id_ranges = [str(int_network_vlan_ids[0])]\n int_network_private = grab(int_portgroup_data, \"private\")\n\n int_dvswitch_uuid = grab(device_backing, \"port.switchUuid\")\n int_dvswitch_data = grab(self.network_data, f\"pswitch|{parent_name}|{int_dvswitch_uuid}\", separator=\"|\")\n\n if int_dvswitch_data is not None:\n int_mtu = int_dvswitch_data.get(\"mtu\")\n\n int_connected = grab(vm_device, \"connectable.connected\", fallback=False)\n int_label = grab(vm_device, \"deviceInfo.label\", fallback=\"\")\n\n int_name = \"vNIC {}\".format(int_label.split(\" \")[-1])\n\n int_full_name = int_name\n if int_network_name is not None:\n int_full_name = f\"{int_full_name} ({int_network_name})\"\n\n int_description = f\"{int_label} ({device_class})\"\n if int_network_vlan_ids is not None:\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] == 4095:\n vlan_description = \"all vlans\"\n int_mode = \"tagged-all\"\n else:\n vlan_description = \"vlan ID: %s\" % \", \".join(int_network_vlan_id_ranges)\n\n if len(int_network_vlan_ids) == 1:\n int_mode = \"access\"\n else:\n int_mode = \"tagged\"\n\n if int_network_private is True:\n vlan_description = f\"{vlan_description} (private)\"\n\n int_description = f\"{int_description} ({vlan_description})\"\n\n # find corresponding guest NIC and get IP addresses and connected status\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC\n if int_mac != normalize_mac_address(grab(guest_nic, \"macAddress\")):\n continue\n\n int_connected = grab(guest_nic, \"connected\", fallback=int_connected)\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if ip_valid_to_add_to_netbox(int_ip_address, self.permitted_subnets, int_full_name) is False:\n continue\n\n nic_ips[int_full_name].append(int_ip_address)\n\n # check if primary gateways are in the subnet of this IP address\n # if it matches IP gets chosen as primary IP\n if vm_default_gateway_ip4 is not None and \\\n vm_default_gateway_ip4 in ip_interface(int_ip_address).network and \\\n vm_primary_ip4 is None:\n\n vm_primary_ip4 = int_ip_address\n\n if vm_default_gateway_ip6 is not None and \\\n vm_default_gateway_ip6 in ip_interface(int_ip_address).network and \\\n vm_primary_ip6 is None:\n\n vm_primary_ip6 = int_ip_address\n\n vm_nic_data = {\n \"name\": int_full_name,\n \"virtual_machine\": None,\n \"mac_address\": int_mac,\n \"description\": int_description,\n \"enabled\": int_connected,\n }\n\n if int_mtu is not None:\n vm_nic_data[\"mtu\"] = int_mtu\n if int_mode is not None:\n vm_nic_data[\"mode\"] = int_mode\n\n if int_network_vlan_ids is not None and int_mode != \"tagged-all\":\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] != 0:\n\n vm_nic_data[\"untagged_vlan\"] = self.get_vlan_object_if_exists({\n \"name\": int_network_name,\n \"vid\": int_network_vlan_ids[0],\n \"site\": {\n \"name\": site_name\n }\n })\n else:\n tagged_vlan_list = list()\n for int_network_vlan_id in int_network_vlan_ids:\n\n if int_network_vlan_id == 0:\n continue\n\n tagged_vlan_list.append(self.get_vlan_object_if_exists({\n \"name\": f\"{int_network_name}-{int_network_vlan_id}\",\n \"vid\": int_network_vlan_id,\n \"site\": {\n \"name\": site_name\n }\n }))\n\n if len(tagged_vlan_list) > 0:\n vm_nic_data[\"tagged_vlans\"] = tagged_vlan_list\n\n nic_data[int_full_name] = vm_nic_data\n\n # add VM to inventory\n self.add_device_vm_to_inventory(NBVM, object_data=vm_data, site_name=site_name, vnic_data=nic_data,\n nic_ips=nic_ips, p_ipv4=vm_primary_ip4, p_ipv6=vm_primary_ip6)\n\n return", "def add_virtual_machine(self, obj):\n\n name = get_string_or_none(grab(obj, \"name\"))\n\n if name is not None and self.settings.strip_vm_domain_name is True:\n name = name.split(\".\")[0]\n\n #\n # Filtering\n #\n\n # get VM UUID\n vm_uuid = grab(obj, \"config.instanceUuid\")\n\n if vm_uuid is None or vm_uuid in self.processed_vm_uuid and obj not in self.objects_to_reevaluate:\n return\n\n log.debug(f\"Parsing vCenter VM: {name}\")\n\n # get VM power state\n status = \"active\" if get_string_or_none(grab(obj, \"runtime.powerState\")) == \"poweredOn\" else \"offline\"\n\n # check if vm is template\n template = grab(obj, \"config.template\")\n if bool(self.settings.skip_vm_templates) is True and template is True:\n log.debug2(f\"VM '{name}' is a template. Skipping\")\n return\n\n if bool(self.settings.skip_srm_placeholder_vms) is True \\\n and f\"{grab(obj, 'config.managedBy.extensionKey')}\".startswith(\"com.vmware.vcDr\"):\n log.debug2(f\"VM '{name}' is a SRM placeholder VM. Skipping\")\n return\n\n # ignore offline VMs during first run\n if self.parsing_vms_the_first_time is True and status == \"offline\":\n log.debug2(f\"Ignoring {status} VM '{name}' on first run\")\n return\n\n # add to processed VMs\n self.processed_vm_uuid.append(vm_uuid)\n\n parent_host = self.get_parent_object_by_class(grab(obj, \"runtime.host\"), vim.HostSystem)\n cluster_object = self.get_parent_object_by_class(parent_host, vim.ClusterComputeResource)\n\n # get single host 'cluster' if VM runs on one\n if cluster_object is None:\n cluster_object = self.get_parent_object_by_class(parent_host, vim.ComputeResource)\n\n if self.settings.set_source_name_as_cluster_group is True:\n group = self.inventory.get_by_data(NBClusterGroup, data={\"name\": self.name})\n else:\n group = self.get_parent_object_by_class(cluster_object, vim.Datacenter)\n\n if None in [parent_host, cluster_object, group]:\n log.error(f\"Requesting host or cluster for Virtual Machine '{name}' failed. Skipping.\")\n return\n\n nb_cluster_object = self.get_object_from_cache(cluster_object)\n\n # check VM cluster\n if nb_cluster_object is None:\n log.debug(f\"Virtual machine '{name}' is not part of a permitted cluster. Skipping\")\n return\n\n parent_name = grab(parent_host, \"name\")\n cluster_name = grab(nb_cluster_object, \"data.name\")\n cluster_full_name = f\"{group.name}/{cluster_name}\"\n\n if name in self.processed_vm_names.get(cluster_full_name, list()) and obj not in self.objects_to_reevaluate:\n log.warning(f\"Virtual machine '{name}' for cluster '{cluster_full_name}' already parsed. \"\n \"Make sure to use unique VM names. Skipping\")\n return\n\n # add vm to processed list\n if self.processed_vm_names.get(cluster_full_name) is None:\n self.processed_vm_names[cluster_full_name] = list()\n\n self.processed_vm_names[cluster_full_name].append(name)\n\n # filter VMs by name\n if self.passes_filter(name, self.settings.vm_include_filter, self.settings.vm_exclude_filter) is False:\n return\n\n #\n # Collect data\n #\n\n # check if cluster is a Standalone ESXi\n site_name = nb_cluster_object.get_site_name()\n if site_name is None:\n site_name = self.get_site_name(NBCluster, cluster_full_name)\n\n # first check against vm_platform_relation\n platform = get_string_or_none(grab(obj, \"config.guestFullName\"))\n platform = get_string_or_none(grab(obj, \"guest.guestFullName\", fallback=platform))\n\n if platform is not None:\n platform = self.get_object_relation(platform, \"vm_platform_relation\", fallback=platform)\n\n hardware_devices = grab(obj, \"config.hardware.device\", fallback=list())\n\n disk = int(sum([getattr(comp, \"capacityInKB\", 0) for comp in hardware_devices\n if isinstance(comp, vim.vm.device.VirtualDisk)\n ]) / 1024 / 1024)\n\n annotation = None\n if self.settings.skip_vm_comments is False:\n annotation = get_string_or_none(grab(obj, \"config.annotation\"))\n\n # assign vm_tenant_relation\n tenant_name = self.get_object_relation(name, \"vm_tenant_relation\")\n\n # assign vm_tag_relation\n vm_tags = self.get_object_relation(name, \"vm_tag_relation\")\n\n # get vCenter tags\n vm_tags.extend(self.collect_object_tags(obj))\n\n vm_data = {\n \"name\": name,\n \"cluster\": nb_cluster_object,\n \"status\": status,\n \"memory\": grab(obj, \"config.hardware.memoryMB\"),\n \"vcpus\": grab(obj, \"config.hardware.numCPU\"),\n \"disk\": disk\n }\n\n # Add adaption for change in NetBox 3.3.0 VM model\n # issue: https://github.com/netbox-community/netbox/issues/10131#issuecomment-1225783758\n if version.parse(self.inventory.netbox_api_version) >= version.parse(\"3.3.0\"):\n vm_data[\"site\"] = {\"name\": site_name}\n\n if self.settings.track_vm_host:\n vm_data[\"device\"] = self.get_object_from_cache(parent_host)\n\n if platform is not None:\n vm_data[\"platform\"] = {\"name\": platform}\n if annotation is not None:\n vm_data[\"comments\"] = annotation\n if tenant_name is not None:\n vm_data[\"tenant\"] = {\"name\": tenant_name}\n if len(vm_tags) > 0:\n vm_data[\"tags\"] = vm_tags\n\n # add custom fields if present and configured\n vm_custom_fields = self.get_object_custom_fields(obj)\n if len(vm_custom_fields) > 0:\n vm_data[\"custom_fields\"] = vm_custom_fields\n\n vm_primary_ip4 = None\n vm_primary_ip6 = None\n vm_default_gateway_ip4 = None\n vm_default_gateway_ip6 = None\n\n # check vm routing to determine which is the default interface for each IP version\n for route in grab(obj, \"guest.ipStack.0.ipRouteConfig.ipRoute\", fallback=list()):\n\n # we found a default route\n if grab(route, \"prefixLength\") == 0:\n\n try:\n ip_a = ip_address(grab(route, \"network\"))\n except ValueError:\n continue\n\n try:\n gateway_ip_address = ip_address(grab(route, \"gateway.ipAddress\"))\n except ValueError:\n continue\n\n if ip_a.version == 4 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv4 gateway {gateway_ip_address}\")\n vm_default_gateway_ip4 = gateway_ip_address\n elif ip_a.version == 6 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv6 gateway {gateway_ip_address}\")\n vm_default_gateway_ip6 = gateway_ip_address\n\n nic_data = dict()\n nic_ips = dict()\n\n # track MAC addresses in order add dummy guest interfaces\n processed_interface_macs = list()\n\n # get VM interfaces\n for vm_device in hardware_devices:\n\n # sample: https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getvnicinfo.py\n\n # not a network interface\n if not isinstance(vm_device, vim.vm.device.VirtualEthernetCard):\n continue\n\n int_mac = normalize_mac_address(grab(vm_device, \"macAddress\"))\n\n device_class = grab(vm_device, \"_wsdlName\")\n\n log.debug2(f\"Parsing device {device_class}: {int_mac}\")\n\n device_backing = grab(vm_device, \"backing\")\n\n # set defaults\n int_mtu = None\n int_mode = None\n int_network_vlan_ids = None\n int_network_vlan_id_ranges = None\n int_network_name = None\n int_network_private = False\n\n processed_interface_macs.append(int_mac)\n\n # get info from local vSwitches\n if isinstance(device_backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):\n\n int_network_name = get_string_or_none(grab(device_backing, \"deviceName\"))\n int_host_pgroup = grab(self.network_data, f\"host_pgroup|{parent_name}|{int_network_name}\",\n separator=\"|\")\n\n if int_host_pgroup is not None:\n int_network_vlan_ids = [int_host_pgroup.get(\"vlan_id\")]\n int_network_vlan_id_ranges = [str(int_host_pgroup.get(\"vlan_id\"))]\n\n int_vswitch_name = int_host_pgroup.get(\"vswitch\")\n int_vswitch_data = grab(self.network_data, f\"vswitch|{parent_name}|{int_vswitch_name}\",\n separator=\"|\")\n\n if int_vswitch_data is not None:\n int_mtu = int_vswitch_data.get(\"mtu\")\n\n # get info from distributed port group\n else:\n\n dvs_portgroup_key = grab(device_backing, \"port.portgroupKey\", fallback=\"None\")\n int_portgroup_data = grab(self.network_data, f\"dpgroup|{dvs_portgroup_key}\", separator=\"|\")\n\n if int_portgroup_data is not None:\n int_network_name = grab(int_portgroup_data, \"name\")\n int_network_vlan_ids = grab(int_portgroup_data, \"vlan_ids\")\n if len(grab(int_portgroup_data, \"vlan_id_ranges\")) > 0:\n int_network_vlan_id_ranges = grab(int_portgroup_data, \"vlan_id_ranges\")\n else:\n int_network_vlan_id_ranges = [str(int_network_vlan_ids[0])]\n int_network_private = grab(int_portgroup_data, \"private\")\n\n int_dvswitch_uuid = grab(device_backing, \"port.switchUuid\")\n int_dvswitch_data = grab(self.network_data, f\"pswitch|{parent_name}|{int_dvswitch_uuid}\", separator=\"|\")\n\n if int_dvswitch_data is not None:\n int_mtu = int_dvswitch_data.get(\"mtu\")\n\n int_connected = grab(vm_device, \"connectable.connected\", fallback=False)\n int_label = grab(vm_device, \"deviceInfo.label\", fallback=\"\")\n\n int_name = \"vNIC {}\".format(int_label.split(\" \")[-1])\n\n int_full_name = int_name\n if int_network_name is not None:\n int_full_name = f\"{int_full_name} ({int_network_name})\"\n\n int_description = f\"{int_label} ({device_class})\"\n if int_network_vlan_ids is not None:\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] == 4095:\n vlan_description = \"all vlans\"\n int_mode = \"tagged-all\"\n else:\n vlan_description = \"vlan ID: %s\" % \", \".join(int_network_vlan_id_ranges)\n\n if len(int_network_vlan_ids) == 1:\n int_mode = \"access\"\n else:\n int_mode = \"tagged\"\n\n if int_network_private is True:\n vlan_description = f\"{vlan_description} (private)\"\n\n int_description = f\"{int_description} ({vlan_description})\"\n\n # find corresponding guest NIC and get IP addresses and connected status\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC\n if int_mac != normalize_mac_address(grab(guest_nic, \"macAddress\")):\n continue\n\n int_connected = grab(guest_nic, \"connected\", fallback=int_connected)\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if self.settings.permitted_subnets.permitted(int_ip_address, interface_name=int_full_name) is False:\n continue\n\n nic_ips[int_full_name].append(int_ip_address)\n\n # check if primary gateways are in the subnet of this IP address\n # if it matches IP gets chosen as primary IP\n if vm_default_gateway_ip4 is not None and \\\n vm_default_gateway_ip4 in ip_interface(int_ip_address).network and \\\n vm_primary_ip4 is None:\n\n vm_primary_ip4 = int_ip_address\n\n if vm_default_gateway_ip6 is not None and \\\n vm_default_gateway_ip6 in ip_interface(int_ip_address).network and \\\n vm_primary_ip6 is None:\n\n vm_primary_ip6 = int_ip_address\n\n vm_nic_data = {\n \"name\": unquote(int_full_name),\n \"virtual_machine\": None,\n \"mac_address\": int_mac,\n \"description\": unquote(int_description),\n \"enabled\": int_connected,\n }\n\n if int_mtu is not None and self.settings.sync_vm_interface_mtu is True:\n vm_nic_data[\"mtu\"] = int_mtu\n if int_mode is not None:\n vm_nic_data[\"mode\"] = int_mode\n\n if int_network_vlan_ids is not None and int_mode != \"tagged-all\":\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] != 0:\n\n vm_nic_data[\"untagged_vlan\"] = {\n \"name\": unquote(int_network_name),\n \"vid\": int_network_vlan_ids[0],\n \"site\": {\n \"name\": site_name\n }\n }\n else:\n tagged_vlan_list = list()\n for int_network_vlan_id in int_network_vlan_ids:\n\n if int_network_vlan_id == 0:\n continue\n\n tagged_vlan_list.append({\n \"name\": unquote(f\"{int_network_name}-{int_network_vlan_id}\"),\n \"vid\": int_network_vlan_id,\n \"site\": {\n \"name\": site_name\n }\n })\n\n if len(tagged_vlan_list) > 0:\n vm_nic_data[\"tagged_vlans\"] = tagged_vlan_list\n\n nic_data[int_full_name] = vm_nic_data\n\n # find dummy guest NIC interfaces\n if self.settings.sync_vm_dummy_interfaces is True:\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC MAC\n guest_nic_mac = normalize_mac_address(grab(guest_nic, \"macAddress\"))\n\n # skip interfaces of MAC addresses for already known interfaces\n if guest_nic_mac is None or guest_nic_mac in processed_interface_macs:\n continue\n\n processed_interface_macs.append(guest_nic_mac)\n\n int_full_name = \"vNIC Dummy-{}\".format(\"\".join(guest_nic_mac.split(\":\")[-2:]))\n\n log.debug2(f\"Parsing dummy network device: {guest_nic_mac}\")\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if self.settings.permitted_subnets.permitted(int_ip_address, interface_name=int_full_name) is True:\n nic_ips[int_full_name].append(int_ip_address)\n\n vm_nic_data = {\n \"name\": int_full_name,\n \"virtual_machine\": None,\n \"mac_address\": guest_nic_mac,\n \"enabled\": grab(guest_nic, \"connected\", fallback=False),\n }\n\n if len(nic_ips.get(int_full_name, list())) == 0:\n log.debug(f\"Dummy network interface '{int_full_name}' has no IP addresses assigned. Skipping\")\n continue\n\n nic_data[int_full_name] = vm_nic_data\n\n # add VM to inventory\n self.add_device_vm_to_inventory(NBVM, object_data=vm_data, vnic_data=nic_data,\n nic_ips=nic_ips, p_ipv4=vm_primary_ip4, p_ipv6=vm_primary_ip6,\n vmware_object=obj)\n\n return", "def test_vms_with_same_name(self):\n # Step 1\n # Create VM on cluster wide\n configs = Configurations.list(\n self.apiclient,\n name=\"vm.instancename.flag\")\n orig_value = configs[0].value\n\n if orig_value == \"false\":\n Configurations.update(self.apiclient,\n name=\"vm.instancename.flag\",\n value=\"true\"\n )\n\n # Restart management server\n self.RestartServer()\n time.sleep(120)\n\n self.testdata[\"small\"][\"displayname\"] = \"TestName\"\n self.testdata[\"small\"][\"name\"] = \"TestName\"\n VirtualMachine.create(\n self.userapiclient_1,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account_1.name,\n domainid=self.account_1.domainid,\n serviceofferingid=self.service_offering.id,\n zoneid=self.zone.id,\n )\n\n with self.assertRaises(Exception):\n VirtualMachine.create(\n self.userapiclient_2,\n self.testdata[\"small\"],\n templateid=self.template.id,\n accountid=self.account_2.name,\n domainid=self.account_2.domainid,\n serviceofferingid=self.service_offering.id,\n zoneid=self.zone.id,\n )\n return", "def createVM(request, VMname, imageName, flavorName):\n api.createVM(VMname, imageName, flavorName)\n return HttpResponseRedirect('/project_space/manage')", "def test_make_ova_provide_name_extension(self, fake_rmtree, fake_listdir, fake_rename, fake_open,\n fake_sleep, fake_makedirs, fake_tarfile, fake_download_vmdk, fake_get_vm_ovf_xml,\n fake_block_on_lease, fake_power):\n fake_listdir.return_value = ['vm01.ova']\n fake_vcenter = MagicMock()\n fake_vm = MagicMock()\n fake_vm.name = 'myVM'\n fake_log = MagicMock()\n\n output = virtual_machine.make_ova(fake_vcenter, fake_vm, '/save/ova/here', fake_log, ova_name='vm01')\n expected = '/save/ova/here/vm01.ova'\n\n self.assertEqual(output, expected)", "def test_vm_create_from_image_vg_nic_ipam():\n results = []\n cluster_obj = prism.Cluster(api_client=_api())\n clusters = cluster_obj.get_all_uuids()\n vms_obj = prism.Vms(api_client=_api())\n for each_uuid in clusters:\n result = False\n vm_config = {\n 'name': 'api_test_v2_image_vg_nic_ipam_{0}'.format(random_string),\n 'cores': 1,\n 'memory_gb': 0.1,\n 'add_cdrom': True,\n 'power_state': 'off',\n 'disks': [\n {\n 'image_name': 'api_test_image1',\n },\n {\n 'volume_group_name': 'TEST_VG',\n },\n ],\n 'nics': [\n {\n 'network_name': '192.168.1.0',\n 'connect': True,\n 'ipam': True,\n }\n ]\n }\n\n result = vms_obj.create(clusteruuid=each_uuid, **vm_config)\n if result:\n vm_cleanup.append(vm_config['name'])\n results.append(result)\n assert all(results)", "def wait_for_v2v_import_event(vm_name, cluster, timeout=V2V_IMPORT_TIMEOUT):\n data_center = ll_clusters.get_cluster_data_center_name(cluster)\n event_message = (\n \"Vm %s was imported successfully to Data Center %s, Cluster %s\" %\n (vm_name, data_center, cluster)\n )\n last_event = ll_events.get_max_event_id()\n return ll_events.wait_for_event(\n query=event_message, start_id=last_event, timeout=timeout\n )", "def test_make_ova_provide_name(self, fake_rmtree, fake_listdir, fake_rename, fake_open,\n fake_sleep, fake_makedirs, fake_tarfile, fake_download_vmdk, fake_get_vm_ovf_xml,\n fake_block_on_lease, fake_power):\n fake_listdir.return_value = ['vm01.ova']\n fake_vcenter = MagicMock()\n fake_vm = MagicMock()\n fake_vm.name = 'myVM'\n fake_log = MagicMock()\n\n output = virtual_machine.make_ova(fake_vcenter, fake_vm, '/save/ova/here', fake_log, ova_name='vm01.ova')\n expected = '/save/ova/here/vm01.ova'\n\n self.assertEqual(output, expected)", "def checkvm(self):\n if self._checkreg() or self._checkfiles():\n err = \"{} already exist!\".format(self.name)\n raise VirtualMachineExistsError(err)\n return 0", "def vm_create(vmname: str, img_path: str, isopath: str):\n kvm_video = \"virtio\"\n kvm_diskinterface = \"virtio\"\n kvm_netdevice = \"virtio\"\n # Copy efi firmware (ensure non-secureboot firmware is chosen)\n efi_bin, efi_nvram = Pkvm.ovmf_bin_nvramcopy(os.path.dirname(img_path), vmname, secureboot=False)\n # virt-install manual: https://www.mankier.com/1/virt-install\n # List of os: osinfo-query os\n CREATESCRIPT_KVM = \"\"\"virt-install --connect qemu:///system --name={vmname} --install bootdev=cdrom --boot=hd,cdrom --disk device=cdrom,path=\"{isopath}\",bus=sata,target=sda,readonly=on --disk path={fullpathtoimg},bus={kvm_diskinterface} --graphics spice --vcpu={cpus} --ram={memory} --network bridge=virbr0,model={kvm_netdevice} --filesystem source=/,target=root,mode=mapped --os-variant={kvm_variant} --import --noautoconsole --noreboot --video={kvm_video} --channel unix,target_type=virtio,name=org.qemu.guest_agent.0 --channel spicevmc,target_type=virtio,name=com.redhat.spice.0 --boot loader={efi_bin},loader_ro=yes,loader_type=pflash,nvram={efi_nvram}\"\"\".format(vmname=vmname, memory=args.memory, cpus=CPUCORES, fullpathtoimg=img_path, kvm_variant=kvm_variant, kvm_video=kvm_video, kvm_diskinterface=kvm_diskinterface, kvm_netdevice=kvm_netdevice, isopath=isopath, efi_bin=efi_bin, efi_nvram=efi_nvram)\n subprocess.run(CREATESCRIPT_KVM, shell=True, check=True)\n # Log the launch command.\n logging.info(\"\"\"KVM launch command: virt-install --connect qemu:///system --name={vmname} --disk path={fullpathtoimg},bus={kvm_diskinterface} --disk device=cdrom,bus=sata,target=sda,readonly=on --graphics spice --vcpu={cpus} --ram={memory} --network bridge=virbr0,model={kvm_netdevice} --filesystem source=/,target=root,mode=mapped --os-variant={kvm_variant} --import --noautoconsole --noreboot --video={kvm_video} --channel unix,target_type=virtio,name=org.qemu.guest_agent.0 --channel spicevmc,target_type=virtio,name=com.redhat.spice.0 --boot loader={efi_bin},loader_ro=yes,loader_type=pflash,nvram={efi_nvram}\"\"\".format(vmname=vmname, memory=args.memory, cpus=CPUCORES, fullpathtoimg=img_path, kvm_variant=kvm_variant, kvm_video=kvm_video, kvm_diskinterface=kvm_diskinterface, kvm_netdevice=kvm_netdevice, efi_bin=efi_bin, efi_nvram=efi_nvram))", "def start_VM(self, host):\n action = self.cmc.virtual_machines.start(group_name(host), vm_name(host))\n action.wait()", "def start_vm(self,vm):\n\t\tvmx_path = vm.vmx_path\n\t\tcmd = self.vmrun_path + \" -T fusion start '\" + vmx_path + \"' nogui\" \n\t\tos.system(cmd)", "def test_return_v2_vm_create_with_vdisk_nic_ipam_ip():\n results = []\n cluster_obj = prism.Cluster(api_client=_api())\n clusters = cluster_obj.get_all_uuids()\n vms_obj = prism.Vms(api_client=_api())\n for each_uuid in clusters:\n result = False\n vm_config = {\n 'name': 'api_test_v2_vdisk_nic_ipam_ip_{0}'.format(random_string),\n 'cores': 1,\n 'memory_gb': 0.1,\n 'add_cdrom': True,\n 'power_state': 'off',\n 'disks': [\n {\n 'size_gb': 20,\n 'storage_container_name': 'home_compression',\n },\n ],\n # {network_name, network_uuid, adaptor_type, connect, mac_address, ipam, requested_ip_address}\n\n 'nics': [\n {\n 'network_name': '192.168.1.0',\n 'ipam': True,\n 'requested_ip_address': '192.168.1.251'\n }\n ]\n }\n\n result = vms_obj.create(clusteruuid=each_uuid, **vm_config)\n if result:\n vm_cleanup.append(vm_config['name'])\n results.append(result)\n assert all(results)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import and group VM. Remove existing if needed.
def force_import(ova): name = os.path.split(ova)[1].split('.')[0] v_machine = VirtualMachine(name) try: v_machine.checkvm() except VirtualMachineExistsError: v_machine.removevm() v_machine.importvm(ova) return name
[ "def importvm(self, ova):\n assert os.path.exists(ova), \"{} not found\" % ova\n subprocess.call(['VBoxManage', 'import', ova,\n '--options', 'keepallmacs'])\n time.sleep(10)\n grouped = self._groupvm()\n sfolders = self._sharedfolders()\n return grouped, sfolders", "def just_import(ova):\n name = os.path.split(ova)[1].split('.')[0]\n v_machine = VirtualMachine(name)\n # This must throw exception if such VM already exists.\n try:\n v_machine.checkvm()\n except VirtualMachineExistsError:\n print(\"WARNING: %s already exists. Skipping...\" % name)\n else:\n v_machine.importvm(ova)\n return name", "def setup_vm(request):\n\n result = list()\n\n def fin_verify_results():\n \"\"\"\n Check if none of finalizers failed.\n \"\"\"\n global_helper.raise_if_false_in_list(results=result)\n\n def fin_vm():\n \"\"\"\n Teardown:\n Safely remove VM.\n \"\"\"\n testflow.teardown(\"Safely remove test VM.\")\n result.append(\n (\n ll_vms.safely_remove_vms(\n vms=[\n vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n vcons_conf.VIRT_CONSOLE_CLONE_VM_NAME,\n vcons_conf.VIRT_CONSOLE_VM_IMPORT_NEW\n ]\n ), \"Failed to safelly remove {vm} as part of teardown.\".format(\n vm=vcons_conf.VIRT_CONSOLE_VM_SYSTEM\n )\n )\n )\n\n def fin_vm_from_export_domain():\n \"\"\"\n Teardown:\n Remove VM from export domain\n \"\"\"\n\n testflow.teardown(\"Remove exported VM from export domain.\")\n if ll_vms.is_vm_exists_in_export_domain(\n vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n vcons_conf.EXPORT_DOMAIN_NAME\n ):\n result.append(\n (\n ll_vms.remove_vm_from_export_domain(\n True,\n vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n vcons_conf.DC_NAME[0],\n vcons_conf.EXPORT_DOMAIN_NAME\n ), \"Failed to remove VM from export domain.\"\n )\n )\n\n def fin_instance_type():\n \"\"\"\n Teardown:\n Remove instance type.\n \"\"\"\n testflow.teardown(\"Remove custom instance_type.\")\n result.append(\n (\n ll_inst_type.remove_instance_type(\n instance_type_name=vcons_conf.VIRT_CONSOLE_VM_INSTANCE_TYPE\n ), \"Was not able to remove test instance_type.\"\n )\n )\n\n def fin_templates():\n \"\"\"\n Teardown:\n Remove test template.\n \"\"\"\n testflow.teardown(\"Remove test template.\")\n result.append(\n (\n ll_templates.safely_remove_templates(\n templates=[\n vcons_conf.VIRT_CONSOLE_TEMPLATE,\n vcons_conf.VIRT_CONSOLE_TEMPLATE_IMPORT_NEW\n ]\n ), \"Was not able to remove test Template.\"\n )\n )\n\n def fin_template_from_export_domain():\n \"\"\"\n Teardown:\n Remove template from export domain\n \"\"\"\n\n testflow.teardown(\"Remove exported template from export domain.\")\n if ll_templates.export_domain_template_exist(\n vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n vcons_conf.EXPORT_DOMAIN_NAME\n ):\n result.append(\n (\n ll_templates.removeTemplateFromExportDomain(\n True,\n vcons_conf.VIRT_CONSOLE_TEMPLATE,\n vcons_conf.EXPORT_DOMAIN_NAME\n ), \"Failed to remove Template from export domain.\"\n )\n )\n\n request.addfinalizer(fin_instance_type)\n request.addfinalizer(fin_template_from_export_domain)\n request.addfinalizer(fin_templates)\n request.addfinalizer(fin_vm_from_export_domain)\n request.addfinalizer(fin_vm)\n request.addfinalizer(fin_verify_results)\n\n testflow.setup(\n \"Create a instance_type for Virt console test cases execution.\"\n )\n\n assert ll_inst_type.create_instance_type(\n instance_type_name=vcons_conf.VIRT_CONSOLE_VM_INSTANCE_TYPE,\n **vcons_conf.INSTANCE_TYPE_PARAMS\n ), \"Failed to create instance_type.\"\n vm_name = vcons_conf.VM_NAME[0]\n testflow.setup(\"Stop VM {} safely\".format(vm_name))\n assert ll_vms.stop_vms_safely([vm_name])\n testflow.setup(\"Create a Template for Virt console test cases execution.\")\n assert ll_templates.createTemplate(\n positive=True,\n vm=vm_name,\n name=vcons_conf.VIRT_CONSOLE_TEMPLATE,\n cluster=vcons_conf.CLUSTER_NAME[0]\n ), \"Was not able to create template.\"\n\n testflow.setup(\"Create a VM for Virt console test cases execution.\")\n assert ll_vms.createVm(\n positive=True,\n vmName=vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n vmDescription=vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n cluster=vcons_conf.CLUSTER_NAME[0],\n template=vcons_conf.VIRT_CONSOLE_TEMPLATE,\n os_type=vcons_conf.VM_OS_TYPE,\n display_type=vcons_conf.VM_DISPLAY_TYPE,\n nic=vcons_conf.VIRT_CONSOLE_VM_NIC,\n network=vcons_conf.MGMT_BRIDGE\n ), \"Was not able to create VM.\"\n\n testflow.setup(\"Update VM to use test instance type and 2 monitors.\")\n assert ll_vms.updateVm(\n positive=True,\n vm=vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n instance_type=vcons_conf.VIRT_CONSOLE_VM_INSTANCE_TYPE\n ), \"Failed to set instance_type for VM.\"", "def vmimport(self, func=just_import):\n ovas = len(self.vmlist)\n if ovas == 1:\n vmname = func(self.vmlist[0])\n self.results.append(vmname)\n elif ovas <= self.threads:\n self._import_pool(ovas, self.vmlist, func)\n else:\n tmplist = self.vmlist\n while tmplist:\n self._import_pool(self.threads, tmplist[:self.threads], func)\n tmplist = tmplist[self.threads:]\n return self.results", "def delete_VM(self, host):\n action = self.rmc.resource_groups.delete(group_name(host))\n action.wait()", "def add_virtual_machine(self, obj):\n\n name = get_string_or_none(grab(obj, \"name\"))\n\n if name is not None and self.settings.strip_vm_domain_name is True:\n name = name.split(\".\")[0]\n\n #\n # Filtering\n #\n\n # get VM UUID\n vm_uuid = grab(obj, \"config.instanceUuid\")\n\n if vm_uuid is None or vm_uuid in self.processed_vm_uuid and obj not in self.objects_to_reevaluate:\n return\n\n log.debug(f\"Parsing vCenter VM: {name}\")\n\n # get VM power state\n status = \"active\" if get_string_or_none(grab(obj, \"runtime.powerState\")) == \"poweredOn\" else \"offline\"\n\n # check if vm is template\n template = grab(obj, \"config.template\")\n if bool(self.settings.skip_vm_templates) is True and template is True:\n log.debug2(f\"VM '{name}' is a template. Skipping\")\n return\n\n if bool(self.settings.skip_srm_placeholder_vms) is True \\\n and f\"{grab(obj, 'config.managedBy.extensionKey')}\".startswith(\"com.vmware.vcDr\"):\n log.debug2(f\"VM '{name}' is a SRM placeholder VM. Skipping\")\n return\n\n # ignore offline VMs during first run\n if self.parsing_vms_the_first_time is True and status == \"offline\":\n log.debug2(f\"Ignoring {status} VM '{name}' on first run\")\n return\n\n # add to processed VMs\n self.processed_vm_uuid.append(vm_uuid)\n\n parent_host = self.get_parent_object_by_class(grab(obj, \"runtime.host\"), vim.HostSystem)\n cluster_object = self.get_parent_object_by_class(parent_host, vim.ClusterComputeResource)\n\n # get single host 'cluster' if VM runs on one\n if cluster_object is None:\n cluster_object = self.get_parent_object_by_class(parent_host, vim.ComputeResource)\n\n if self.settings.set_source_name_as_cluster_group is True:\n group = self.inventory.get_by_data(NBClusterGroup, data={\"name\": self.name})\n else:\n group = self.get_parent_object_by_class(cluster_object, vim.Datacenter)\n\n if None in [parent_host, cluster_object, group]:\n log.error(f\"Requesting host or cluster for Virtual Machine '{name}' failed. Skipping.\")\n return\n\n nb_cluster_object = self.get_object_from_cache(cluster_object)\n\n # check VM cluster\n if nb_cluster_object is None:\n log.debug(f\"Virtual machine '{name}' is not part of a permitted cluster. Skipping\")\n return\n\n parent_name = grab(parent_host, \"name\")\n cluster_name = grab(nb_cluster_object, \"data.name\")\n cluster_full_name = f\"{group.name}/{cluster_name}\"\n\n if name in self.processed_vm_names.get(cluster_full_name, list()) and obj not in self.objects_to_reevaluate:\n log.warning(f\"Virtual machine '{name}' for cluster '{cluster_full_name}' already parsed. \"\n \"Make sure to use unique VM names. Skipping\")\n return\n\n # add vm to processed list\n if self.processed_vm_names.get(cluster_full_name) is None:\n self.processed_vm_names[cluster_full_name] = list()\n\n self.processed_vm_names[cluster_full_name].append(name)\n\n # filter VMs by name\n if self.passes_filter(name, self.settings.vm_include_filter, self.settings.vm_exclude_filter) is False:\n return\n\n #\n # Collect data\n #\n\n # check if cluster is a Standalone ESXi\n site_name = nb_cluster_object.get_site_name()\n if site_name is None:\n site_name = self.get_site_name(NBCluster, cluster_full_name)\n\n # first check against vm_platform_relation\n platform = get_string_or_none(grab(obj, \"config.guestFullName\"))\n platform = get_string_or_none(grab(obj, \"guest.guestFullName\", fallback=platform))\n\n if platform is not None:\n platform = self.get_object_relation(platform, \"vm_platform_relation\", fallback=platform)\n\n hardware_devices = grab(obj, \"config.hardware.device\", fallback=list())\n\n disk = int(sum([getattr(comp, \"capacityInKB\", 0) for comp in hardware_devices\n if isinstance(comp, vim.vm.device.VirtualDisk)\n ]) / 1024 / 1024)\n\n annotation = None\n if self.settings.skip_vm_comments is False:\n annotation = get_string_or_none(grab(obj, \"config.annotation\"))\n\n # assign vm_tenant_relation\n tenant_name = self.get_object_relation(name, \"vm_tenant_relation\")\n\n # assign vm_tag_relation\n vm_tags = self.get_object_relation(name, \"vm_tag_relation\")\n\n # get vCenter tags\n vm_tags.extend(self.collect_object_tags(obj))\n\n vm_data = {\n \"name\": name,\n \"cluster\": nb_cluster_object,\n \"status\": status,\n \"memory\": grab(obj, \"config.hardware.memoryMB\"),\n \"vcpus\": grab(obj, \"config.hardware.numCPU\"),\n \"disk\": disk\n }\n\n # Add adaption for change in NetBox 3.3.0 VM model\n # issue: https://github.com/netbox-community/netbox/issues/10131#issuecomment-1225783758\n if version.parse(self.inventory.netbox_api_version) >= version.parse(\"3.3.0\"):\n vm_data[\"site\"] = {\"name\": site_name}\n\n if self.settings.track_vm_host:\n vm_data[\"device\"] = self.get_object_from_cache(parent_host)\n\n if platform is not None:\n vm_data[\"platform\"] = {\"name\": platform}\n if annotation is not None:\n vm_data[\"comments\"] = annotation\n if tenant_name is not None:\n vm_data[\"tenant\"] = {\"name\": tenant_name}\n if len(vm_tags) > 0:\n vm_data[\"tags\"] = vm_tags\n\n # add custom fields if present and configured\n vm_custom_fields = self.get_object_custom_fields(obj)\n if len(vm_custom_fields) > 0:\n vm_data[\"custom_fields\"] = vm_custom_fields\n\n vm_primary_ip4 = None\n vm_primary_ip6 = None\n vm_default_gateway_ip4 = None\n vm_default_gateway_ip6 = None\n\n # check vm routing to determine which is the default interface for each IP version\n for route in grab(obj, \"guest.ipStack.0.ipRouteConfig.ipRoute\", fallback=list()):\n\n # we found a default route\n if grab(route, \"prefixLength\") == 0:\n\n try:\n ip_a = ip_address(grab(route, \"network\"))\n except ValueError:\n continue\n\n try:\n gateway_ip_address = ip_address(grab(route, \"gateway.ipAddress\"))\n except ValueError:\n continue\n\n if ip_a.version == 4 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv4 gateway {gateway_ip_address}\")\n vm_default_gateway_ip4 = gateway_ip_address\n elif ip_a.version == 6 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv6 gateway {gateway_ip_address}\")\n vm_default_gateway_ip6 = gateway_ip_address\n\n nic_data = dict()\n nic_ips = dict()\n\n # track MAC addresses in order add dummy guest interfaces\n processed_interface_macs = list()\n\n # get VM interfaces\n for vm_device in hardware_devices:\n\n # sample: https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getvnicinfo.py\n\n # not a network interface\n if not isinstance(vm_device, vim.vm.device.VirtualEthernetCard):\n continue\n\n int_mac = normalize_mac_address(grab(vm_device, \"macAddress\"))\n\n device_class = grab(vm_device, \"_wsdlName\")\n\n log.debug2(f\"Parsing device {device_class}: {int_mac}\")\n\n device_backing = grab(vm_device, \"backing\")\n\n # set defaults\n int_mtu = None\n int_mode = None\n int_network_vlan_ids = None\n int_network_vlan_id_ranges = None\n int_network_name = None\n int_network_private = False\n\n processed_interface_macs.append(int_mac)\n\n # get info from local vSwitches\n if isinstance(device_backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):\n\n int_network_name = get_string_or_none(grab(device_backing, \"deviceName\"))\n int_host_pgroup = grab(self.network_data, f\"host_pgroup|{parent_name}|{int_network_name}\",\n separator=\"|\")\n\n if int_host_pgroup is not None:\n int_network_vlan_ids = [int_host_pgroup.get(\"vlan_id\")]\n int_network_vlan_id_ranges = [str(int_host_pgroup.get(\"vlan_id\"))]\n\n int_vswitch_name = int_host_pgroup.get(\"vswitch\")\n int_vswitch_data = grab(self.network_data, f\"vswitch|{parent_name}|{int_vswitch_name}\",\n separator=\"|\")\n\n if int_vswitch_data is not None:\n int_mtu = int_vswitch_data.get(\"mtu\")\n\n # get info from distributed port group\n else:\n\n dvs_portgroup_key = grab(device_backing, \"port.portgroupKey\", fallback=\"None\")\n int_portgroup_data = grab(self.network_data, f\"dpgroup|{dvs_portgroup_key}\", separator=\"|\")\n\n if int_portgroup_data is not None:\n int_network_name = grab(int_portgroup_data, \"name\")\n int_network_vlan_ids = grab(int_portgroup_data, \"vlan_ids\")\n if len(grab(int_portgroup_data, \"vlan_id_ranges\")) > 0:\n int_network_vlan_id_ranges = grab(int_portgroup_data, \"vlan_id_ranges\")\n else:\n int_network_vlan_id_ranges = [str(int_network_vlan_ids[0])]\n int_network_private = grab(int_portgroup_data, \"private\")\n\n int_dvswitch_uuid = grab(device_backing, \"port.switchUuid\")\n int_dvswitch_data = grab(self.network_data, f\"pswitch|{parent_name}|{int_dvswitch_uuid}\", separator=\"|\")\n\n if int_dvswitch_data is not None:\n int_mtu = int_dvswitch_data.get(\"mtu\")\n\n int_connected = grab(vm_device, \"connectable.connected\", fallback=False)\n int_label = grab(vm_device, \"deviceInfo.label\", fallback=\"\")\n\n int_name = \"vNIC {}\".format(int_label.split(\" \")[-1])\n\n int_full_name = int_name\n if int_network_name is not None:\n int_full_name = f\"{int_full_name} ({int_network_name})\"\n\n int_description = f\"{int_label} ({device_class})\"\n if int_network_vlan_ids is not None:\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] == 4095:\n vlan_description = \"all vlans\"\n int_mode = \"tagged-all\"\n else:\n vlan_description = \"vlan ID: %s\" % \", \".join(int_network_vlan_id_ranges)\n\n if len(int_network_vlan_ids) == 1:\n int_mode = \"access\"\n else:\n int_mode = \"tagged\"\n\n if int_network_private is True:\n vlan_description = f\"{vlan_description} (private)\"\n\n int_description = f\"{int_description} ({vlan_description})\"\n\n # find corresponding guest NIC and get IP addresses and connected status\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC\n if int_mac != normalize_mac_address(grab(guest_nic, \"macAddress\")):\n continue\n\n int_connected = grab(guest_nic, \"connected\", fallback=int_connected)\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if self.settings.permitted_subnets.permitted(int_ip_address, interface_name=int_full_name) is False:\n continue\n\n nic_ips[int_full_name].append(int_ip_address)\n\n # check if primary gateways are in the subnet of this IP address\n # if it matches IP gets chosen as primary IP\n if vm_default_gateway_ip4 is not None and \\\n vm_default_gateway_ip4 in ip_interface(int_ip_address).network and \\\n vm_primary_ip4 is None:\n\n vm_primary_ip4 = int_ip_address\n\n if vm_default_gateway_ip6 is not None and \\\n vm_default_gateway_ip6 in ip_interface(int_ip_address).network and \\\n vm_primary_ip6 is None:\n\n vm_primary_ip6 = int_ip_address\n\n vm_nic_data = {\n \"name\": unquote(int_full_name),\n \"virtual_machine\": None,\n \"mac_address\": int_mac,\n \"description\": unquote(int_description),\n \"enabled\": int_connected,\n }\n\n if int_mtu is not None and self.settings.sync_vm_interface_mtu is True:\n vm_nic_data[\"mtu\"] = int_mtu\n if int_mode is not None:\n vm_nic_data[\"mode\"] = int_mode\n\n if int_network_vlan_ids is not None and int_mode != \"tagged-all\":\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] != 0:\n\n vm_nic_data[\"untagged_vlan\"] = {\n \"name\": unquote(int_network_name),\n \"vid\": int_network_vlan_ids[0],\n \"site\": {\n \"name\": site_name\n }\n }\n else:\n tagged_vlan_list = list()\n for int_network_vlan_id in int_network_vlan_ids:\n\n if int_network_vlan_id == 0:\n continue\n\n tagged_vlan_list.append({\n \"name\": unquote(f\"{int_network_name}-{int_network_vlan_id}\"),\n \"vid\": int_network_vlan_id,\n \"site\": {\n \"name\": site_name\n }\n })\n\n if len(tagged_vlan_list) > 0:\n vm_nic_data[\"tagged_vlans\"] = tagged_vlan_list\n\n nic_data[int_full_name] = vm_nic_data\n\n # find dummy guest NIC interfaces\n if self.settings.sync_vm_dummy_interfaces is True:\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC MAC\n guest_nic_mac = normalize_mac_address(grab(guest_nic, \"macAddress\"))\n\n # skip interfaces of MAC addresses for already known interfaces\n if guest_nic_mac is None or guest_nic_mac in processed_interface_macs:\n continue\n\n processed_interface_macs.append(guest_nic_mac)\n\n int_full_name = \"vNIC Dummy-{}\".format(\"\".join(guest_nic_mac.split(\":\")[-2:]))\n\n log.debug2(f\"Parsing dummy network device: {guest_nic_mac}\")\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if self.settings.permitted_subnets.permitted(int_ip_address, interface_name=int_full_name) is True:\n nic_ips[int_full_name].append(int_ip_address)\n\n vm_nic_data = {\n \"name\": int_full_name,\n \"virtual_machine\": None,\n \"mac_address\": guest_nic_mac,\n \"enabled\": grab(guest_nic, \"connected\", fallback=False),\n }\n\n if len(nic_ips.get(int_full_name, list())) == 0:\n log.debug(f\"Dummy network interface '{int_full_name}' has no IP addresses assigned. Skipping\")\n continue\n\n nic_data[int_full_name] = vm_nic_data\n\n # add VM to inventory\n self.add_device_vm_to_inventory(NBVM, object_data=vm_data, vnic_data=nic_data,\n nic_ips=nic_ips, p_ipv4=vm_primary_ip4, p_ipv6=vm_primary_ip6,\n vmware_object=obj)\n\n return", "def removeVM(self, vm):\n self.lock.acquire()\n machine = self.machines.get(vm.name)\n machine[0].remove(vm.id)\n self.machines.set(vm.name, machine)\n self.lock.release()", "def add_virtual_machine(self, obj):\n\n name = get_string_or_none(grab(obj, \"name\"))\n\n if name is not None and self.strip_vm_domain_name is True:\n name = name.split(\".\")[0]\n\n #\n # Filtering\n #\n\n # get VM UUID\n vm_uuid = grab(obj, \"config.uuid\")\n\n if vm_uuid is None or vm_uuid in self.processed_vm_uuid:\n return\n\n log.debug2(f\"Parsing vCenter VM: {name}\")\n\n # get VM power state\n status = \"active\" if get_string_or_none(grab(obj, \"runtime.powerState\")) == \"poweredOn\" else \"offline\"\n\n # check if vm is template\n template = grab(obj, \"config.template\")\n if bool(self.skip_vm_templates) is True and template is True:\n log.debug2(f\"VM '{name}' is a template. Skipping\")\n return\n\n # ignore offline VMs during first run\n if self.parsing_vms_the_first_time is True and status == \"offline\":\n log.debug2(f\"Ignoring {status} VM '{name}' on first run\")\n return\n\n # add to processed VMs\n self.processed_vm_uuid.append(vm_uuid)\n\n parent_name = get_string_or_none(grab(obj, \"runtime.host.name\"))\n cluster_name = get_string_or_none(grab(obj, \"runtime.host.parent.name\"))\n\n # honor strip_host_domain_name\n if cluster_name is not None and self.strip_host_domain_name is True and \\\n parent_name.split(\".\")[0] == cluster_name.split(\".\")[0]:\n cluster_name = cluster_name.split(\".\")[0]\n\n # check VM cluster\n if cluster_name is None:\n log.error(f\"Requesting cluster for Virtual Machine '{name}' failed. Skipping.\")\n return\n\n elif self.permitted_clusters.get(cluster_name) is None:\n log.debug(f\"Virtual machine '{name}' is not part of a permitted cluster. Skipping\")\n return\n\n if name in self.processed_vm_names.get(cluster_name, list()):\n log.warning(f\"Virtual machine '{name}' for cluster '{cluster_name}' already parsed. \"\n \"Make sure to use unique VM names. Skipping\")\n return\n\n # add host to processed list\n if self.processed_vm_names.get(cluster_name) is None:\n self.processed_vm_names[cluster_name] = list()\n\n self.processed_vm_names[cluster_name].append(name)\n\n # filter VMs by name\n if self.passes_filter(name, self.vm_include_filter, self.vm_exclude_filter) is False:\n return\n\n #\n # Collect data\n #\n\n # check if cluster is a Standalone ESXi\n site_name = self.permitted_clusters.get(cluster_name)\n if site_name is None:\n site_name = self.get_site_name(NBCluster, cluster_name)\n\n # first check against vm_platform_relation\n platform = grab(obj, \"config.guestFullName\")\n platform = get_string_or_none(grab(obj, \"guest.guestFullName\", fallback=platform))\n\n for platform_relation in grab(self, \"vm_platform_relation\", fallback=list()):\n\n if platform is None:\n break\n\n object_regex = platform_relation.get(\"object_regex\")\n if object_regex.match(platform):\n platform = platform_relation.get(\"platform_name\")\n log.debug2(f\"Found a match ({object_regex.pattern}) for {platform}, using mapped platform '{platform}'\")\n break\n\n hardware_devices = grab(obj, \"config.hardware.device\", fallback=list())\n\n disk = int(sum([getattr(comp, \"capacityInKB\", 0) for comp in hardware_devices\n if isinstance(comp, vim.vm.device.VirtualDisk)\n ]) / 1024 / 1024)\n\n annotation = None\n if bool(self.skip_vm_comments) is False:\n annotation = get_string_or_none(grab(obj, \"config.annotation\"))\n\n # assign vm_tenant_relation\n tenant_name = None\n for tenant_relation in grab(self, \"vm_tenant_relation\", fallback=list()):\n object_regex = tenant_relation.get(\"object_regex\")\n if object_regex.match(name):\n tenant_name = tenant_relation.get(\"tenant_name\")\n log.debug2(f\"Found a match ({object_regex.pattern}) for {name}, using tenant '{tenant_name}'\")\n break\n\n vm_data = {\n \"name\": name,\n \"cluster\": {\"name\": cluster_name},\n \"status\": status,\n \"memory\": grab(obj, \"config.hardware.memoryMB\"),\n \"vcpus\": grab(obj, \"config.hardware.numCPU\"),\n \"disk\": disk\n }\n\n if platform is not None:\n vm_data[\"platform\"] = {\"name\": platform}\n if annotation is not None:\n vm_data[\"comments\"] = annotation\n if tenant_name is not None:\n vm_data[\"tenant\"] = {\"name\": tenant_name}\n\n vm_primary_ip4 = None\n vm_primary_ip6 = None\n vm_default_gateway_ip4 = None\n vm_default_gateway_ip6 = None\n\n # check vm routing to determine which is the default interface for each IP version\n for route in grab(obj, \"guest.ipStack.0.ipRouteConfig.ipRoute\", fallback=list()):\n\n # we found a default route\n if grab(route, \"prefixLength\") == 0:\n\n try:\n ip_a = ip_address(grab(route, \"network\"))\n except ValueError:\n continue\n\n try:\n gateway_ip_address = ip_address(grab(route, \"gateway.ipAddress\"))\n except ValueError:\n continue\n\n if ip_a.version == 4 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv4 gateway {gateway_ip_address}\")\n vm_default_gateway_ip4 = gateway_ip_address\n elif ip_a.version == 6 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv6 gateway {gateway_ip_address}\")\n vm_default_gateway_ip6 = gateway_ip_address\n\n nic_data = dict()\n nic_ips = dict()\n\n # get VM interfaces\n for vm_device in hardware_devices:\n\n # sample: https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getvnicinfo.py\n\n # not a network interface\n if not isinstance(vm_device, vim.vm.device.VirtualEthernetCard):\n continue\n\n int_mac = normalize_mac_address(grab(vm_device, \"macAddress\"))\n\n device_class = grab(vm_device, \"_wsdlName\")\n\n log.debug2(f\"Parsing device {device_class}: {int_mac}\")\n\n device_backing = grab(vm_device, \"backing\")\n\n # set defaults\n int_mtu = None\n int_mode = None\n int_network_vlan_ids = None\n int_network_vlan_id_ranges = None\n int_network_name = None\n int_network_private = False\n\n # get info from local vSwitches\n if isinstance(device_backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):\n\n int_network_name = get_string_or_none(grab(device_backing, \"deviceName\"))\n int_host_pgroup = grab(self.network_data, f\"host_pgroup|{parent_name}|{int_network_name}\",\n separator=\"|\")\n\n if int_host_pgroup is not None:\n int_network_vlan_ids = [int_host_pgroup.get(\"vlan_id\")]\n int_network_vlan_id_ranges = [str(int_host_pgroup.get(\"vlan_id\"))]\n\n int_vswitch_name = int_host_pgroup.get(\"vswitch\")\n int_vswitch_data = grab(self.network_data, f\"vswitch|{parent_name}|{int_vswitch_name}\",\n separator=\"|\")\n\n if int_vswitch_data is not None:\n int_mtu = int_vswitch_data.get(\"mtu\")\n\n # get info from distributed port group\n else:\n\n dvs_portgroup_key = grab(device_backing, \"port.portgroupKey\", fallback=\"None\")\n int_portgroup_data = grab(self.network_data, f\"dpgroup|{dvs_portgroup_key}\", separator=\"|\")\n\n if int_portgroup_data is not None:\n int_network_name = grab(int_portgroup_data, \"name\")\n int_network_vlan_ids = grab(int_portgroup_data, \"vlan_ids\")\n if len(grab(int_portgroup_data, \"vlan_id_ranges\")) > 0:\n int_network_vlan_id_ranges = grab(int_portgroup_data, \"vlan_id_ranges\")\n else:\n int_network_vlan_id_ranges = [str(int_network_vlan_ids[0])]\n int_network_private = grab(int_portgroup_data, \"private\")\n\n int_dvswitch_uuid = grab(device_backing, \"port.switchUuid\")\n int_dvswitch_data = grab(self.network_data, f\"pswitch|{parent_name}|{int_dvswitch_uuid}\", separator=\"|\")\n\n if int_dvswitch_data is not None:\n int_mtu = int_dvswitch_data.get(\"mtu\")\n\n int_connected = grab(vm_device, \"connectable.connected\", fallback=False)\n int_label = grab(vm_device, \"deviceInfo.label\", fallback=\"\")\n\n int_name = \"vNIC {}\".format(int_label.split(\" \")[-1])\n\n int_full_name = int_name\n if int_network_name is not None:\n int_full_name = f\"{int_full_name} ({int_network_name})\"\n\n int_description = f\"{int_label} ({device_class})\"\n if int_network_vlan_ids is not None:\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] == 4095:\n vlan_description = \"all vlans\"\n int_mode = \"tagged-all\"\n else:\n vlan_description = \"vlan ID: %s\" % \", \".join(int_network_vlan_id_ranges)\n\n if len(int_network_vlan_ids) == 1:\n int_mode = \"access\"\n else:\n int_mode = \"tagged\"\n\n if int_network_private is True:\n vlan_description = f\"{vlan_description} (private)\"\n\n int_description = f\"{int_description} ({vlan_description})\"\n\n # find corresponding guest NIC and get IP addresses and connected status\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC\n if int_mac != normalize_mac_address(grab(guest_nic, \"macAddress\")):\n continue\n\n int_connected = grab(guest_nic, \"connected\", fallback=int_connected)\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if ip_valid_to_add_to_netbox(int_ip_address, self.permitted_subnets, int_full_name) is False:\n continue\n\n nic_ips[int_full_name].append(int_ip_address)\n\n # check if primary gateways are in the subnet of this IP address\n # if it matches IP gets chosen as primary IP\n if vm_default_gateway_ip4 is not None and \\\n vm_default_gateway_ip4 in ip_interface(int_ip_address).network and \\\n vm_primary_ip4 is None:\n\n vm_primary_ip4 = int_ip_address\n\n if vm_default_gateway_ip6 is not None and \\\n vm_default_gateway_ip6 in ip_interface(int_ip_address).network and \\\n vm_primary_ip6 is None:\n\n vm_primary_ip6 = int_ip_address\n\n vm_nic_data = {\n \"name\": int_full_name,\n \"virtual_machine\": None,\n \"mac_address\": int_mac,\n \"description\": int_description,\n \"enabled\": int_connected,\n }\n\n if int_mtu is not None:\n vm_nic_data[\"mtu\"] = int_mtu\n if int_mode is not None:\n vm_nic_data[\"mode\"] = int_mode\n\n if int_network_vlan_ids is not None and int_mode != \"tagged-all\":\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] != 0:\n\n vm_nic_data[\"untagged_vlan\"] = self.get_vlan_object_if_exists({\n \"name\": int_network_name,\n \"vid\": int_network_vlan_ids[0],\n \"site\": {\n \"name\": site_name\n }\n })\n else:\n tagged_vlan_list = list()\n for int_network_vlan_id in int_network_vlan_ids:\n\n if int_network_vlan_id == 0:\n continue\n\n tagged_vlan_list.append(self.get_vlan_object_if_exists({\n \"name\": f\"{int_network_name}-{int_network_vlan_id}\",\n \"vid\": int_network_vlan_id,\n \"site\": {\n \"name\": site_name\n }\n }))\n\n if len(tagged_vlan_list) > 0:\n vm_nic_data[\"tagged_vlans\"] = tagged_vlan_list\n\n nic_data[int_full_name] = vm_nic_data\n\n # add VM to inventory\n self.add_device_vm_to_inventory(NBVM, object_data=vm_data, site_name=site_name, vnic_data=nic_data,\n nic_ips=nic_ips, p_ipv4=vm_primary_ip4, p_ipv6=vm_primary_ip6)\n\n return", "def test_09_expunge_instance_in_network(self):\n\n # Validate the following\n # 1. Recover the virtual machines.\n # 2. Vm should be in stopped state. State both the instances\n # 3. Make sure that all the PF,LB and Static NAT rules on this VM\n # works as expected.\n # 3. Make sure that we are able to access google.com from this user Vm\n\n self.debug(\"Validating if the network rules work properly or not?\")\n self.validate_network_rules()\n\n self.debug(\"Delete virtual machines in account: %s\" %\n self.account.name)\n try:\n self.vm_1.delete(self.apiclient)\n self.vm_2.delete(self.apiclient)\n self.vm_3.delete(self.apiclient)\n except Exception as e:\n self.fail(\"Failed to destroy the virtual instances, %s\" % e)\n\n # Check if the network rules still exists after Vm stop\n self.debug(\"Checking if NAT rules existed\")\n with self.assertRaises(Exception):\n NATRule.list(\n self.apiclient,\n id=self.nat_rule.id,\n listall=True\n )\n\n LoadBalancerRule.list(\n self.apiclient,\n id=self.lb_rule.id,\n listall=True\n )\n return", "def remove(self, name_or_uid):\n\n logging.info('Remove virtualbox vm')\n # Remove vm\n run_cmd(\n 'VBoxManage unregistervm \"'+name_or_uid+'\" '\n '--delete')", "def Uninstall(self, vm):\n vm.RemoteCommand('sudo docker rmi {}'.format(self.name))", "def remove_vm_from_storage_domain(vm_name, export_domain):\n\n if ll_vms.is_vm_exists_in_export_domain(vm_name, export_domain):\n return ll_vms.remove_vm_from_export_domain(\n True, vm_name, config.DC_NAME[0], export_domain\n )", "def test_05_destroy_instance_in_network(self):\n\n # Validate the following\n # 1. Destory the virtual machines.\n # 2. Rules should be still configured on virtual router.\n # 3. Recover the virtual machines.\n # 4. Vm should be in stopped state. State both the instances\n # 5. Make sure that all the PF,LB and Static NAT rules on this VM\n # works as expected.\n # 6. Make sure that we are able to access google.com from this user Vm\n\n self.debug(\"Validating if the network rules work properly or not?\")\n self.validate_network_rules()\n\n self.debug(\"Destroying the virtual machines in account: %s\" %\n self.account.name)\n try:\n self.vm_1.delete(self.apiclient, expunge=False)\n\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.vm_1.id\n )\n\n vm_response = list_vm_response[0]\n\n self.assertEqual(\n vm_response.state,\n 'Destroyed',\n \"VM state should be destroyed\"\n )\n\n except Exception as e:\n self.fail(\"Failed to stop the virtual instances, %s\" % e)\n\n # Check if the network rules still exists after Vm stop\n self.debug(\"Checking if NAT rules \")\n nat_rules = NATRule.list(\n self.apiclient,\n id=self.nat_rule.id,\n listall=True\n )\n self.assertEqual(\n isinstance(nat_rules, list),\n True,\n \"List NAT rules shall return a valid list\"\n )\n\n lb_rules = LoadBalancerRule.list(\n self.apiclient,\n id=self.lb_rule.id,\n listall=True\n )\n self.assertEqual(\n isinstance(lb_rules, list),\n True,\n \"List LB rules shall return a valid list\"\n )\n\n self.debug(\"Recovering the expunged virtual machine vm1 in account: %s\" %\n self.account.name)\n try:\n self.vm_1.recover(self.apiclient)\n\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.vm_1.id\n )\n\n vm_response = list_vm_response[0]\n\n self.assertEqual(\n vm_response.state,\n 'Stopped',\n \"VM state should be stopped\"\n )\n\n except Exception as e:\n self.fail(\"Failed to recover the virtual instances, %s\" % e)\n\n try:\n self.vm_2.delete(self.apiclient, expunge=False)\n\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.vm_2.id\n )\n\n vm_response = list_vm_response[0]\n\n self.assertEqual(\n vm_response.state,\n 'Destroyed',\n \"VM state should be destroyed\"\n )\n\n\n\n\n except Exception as e:\n self.fail(\"Failed to stop the virtual instances, %s\" % e)\n\n self.debug(\"Recovering the expunged virtual machine vm2 in account: %s\" %\n self.account.name)\n try:\n self.vm_2.recover(self.apiclient)\n\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.vm_2.id\n )\n\n vm_response = list_vm_response[0]\n\n self.assertEqual(\n vm_response.state,\n 'Stopped',\n \"VM state should be stopped\"\n )\n except Exception as e:\n self.fail(\"Failed to recover the virtual instances, %s\" % e)\n\n self.debug(\"Starting the two instances..\")\n try:\n self.vm_1.start(self.apiclient)\n\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.vm_1.id\n )\n\n vm_response = list_vm_response[0]\n\n self.assertEqual(\n vm_response.state,\n 'Running',\n \"VM state should be running\"\n )\n\n self.vm_2.start(self.apiclient)\n\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.vm_2.id\n )\n\n vm_response = list_vm_response[0]\n\n self.assertEqual(\n vm_response.state,\n 'Running',\n \"VM state should be running\"\n )\n except Exception as e:\n self.fail(\"Failed to start the instances, %s\" % e)\n\n # Wait until vms are up\n time.sleep(120)\n self.debug(\"Validating if the network rules work properly or not?\")\n self.validate_network_rules()\n\n return", "def test_05_destroy_instance_in_network(self):\n\n # Validate the following\n # 1. Destory the virtual machines.\n # 2. Rules should be still configured on virtual router.\n # 3. Recover the virtual machines.\n # 4. Vm should be in stopped state. State both the instances\n # 5. Make sure that all the PF,LB and Static NAT rules on this VM\n # works as expected.\n # 6. Make sure that we are able to access google.com from this user Vm\n\n self.debug(\"Validating if the network rules work properly or not?\")\n self.validate_network_rules()\n\n self.debug(\"Destroying the virtual machines in account: %s\" %\n self.account.name)\n try:\n self.vm_1.delete(self.apiclient, expunge=False)\n\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.vm_1.id\n )\n\n vm_response = list_vm_response[0]\n\n self.assertEqual(\n vm_response.state,\n 'Destroyed',\n \"VM state should be destroyed\"\n )\n\n except Exception as e:\n self.fail(\"Failed to stop the virtual instances, %s\" % e)\n\n # Check if the network rules still exists after Vm stop\n self.debug(\"Checking if NAT rules \")\n nat_rules = NATRule.list(\n self.apiclient,\n id=self.nat_rule.id,\n listall=True\n )\n self.assertEqual(\n isinstance(nat_rules, list),\n True,\n \"List NAT rules shall return a valid list\"\n )\n\n lb_rules = LoadBalancerRule.list(\n self.apiclient,\n id=self.lb_rule.id,\n listall=True\n )\n self.assertEqual(\n isinstance(lb_rules, list),\n True,\n \"List LB rules shall return a valid list\"\n )\n\n self.debug(\"Recovering the expunged virtual machine vm1 in account: %s\" %\n self.account.name)\n try:\n self.vm_1.recover(self.apiclient)\n\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.vm_1.id\n )\n\n vm_response = list_vm_response[0]\n\n self.assertEqual(\n vm_response.state,\n 'Stopped',\n \"VM state should be stopped\"\n )\n\n except Exception as e:\n self.fail(\"Failed to recover the virtual instances, %s\" % e)\n\n try:\n self.vm_2.delete(self.apiclient, expunge=False)\n\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.vm_2.id\n )\n\n vm_response = list_vm_response[0]\n\n self.assertEqual(\n vm_response.state,\n 'Destroyed',\n \"VM state should be destroyed\"\n )\n\n except Exception as e:\n self.fail(\"Failed to stop the virtual instances, %s\" % e)\n\n self.debug(\"Recovering the expunged virtual machine vm2 in account: %s\" %\n self.account.name)\n try:\n self.vm_2.recover(self.apiclient)\n\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.vm_2.id\n )\n\n vm_response = list_vm_response[0]\n\n self.assertEqual(\n vm_response.state,\n 'Stopped',\n \"VM state should be stopped\"\n )\n except Exception as e:\n self.fail(\"Failed to recover the virtual instances, %s\" % e)\n\n self.debug(\"Starting the two instances..\")\n try:\n self.vm_1.start(self.apiclient)\n\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.vm_1.id\n )\n\n vm_response = list_vm_response[0]\n\n self.assertEqual(\n vm_response.state,\n 'Running',\n \"VM state should be running\"\n )\n\n self.vm_2.start(self.apiclient)\n\n list_vm_response = list_virtual_machines(\n self.apiclient,\n id=self.vm_2.id\n )\n\n vm_response = list_vm_response[0]\n\n self.assertEqual(\n vm_response.state,\n 'Running',\n \"VM state should be running\"\n )\n except Exception as e:\n self.fail(\"Failed to start the instances, %s\" % e)\n\n # Wait until vms are up\n time.sleep(120)\n self.debug(\"Validating if the network rules work properly or not?\")\n self.validate_network_rules()\n\n return", "def addVM(self, vm):\n self.lock.acquire()\n machine = self.machines.get(vm.name)\n machine[0].append(vm.id)\n self.machines.set(vm.name, machine)\n self.lock.release()", "def create_second_vm(request, storage):\n self = request.node.cls\n\n def finalizer():\n \"\"\"\n Remove the second VM\n \"\"\"\n testflow.teardown(\"Remove VM %s\", self.vm_name_2)\n assert ll_vms.safely_remove_vms([self.vm_name_2]), (\n \"Failed to power off and remove VM %s\" % self.vm_name_2\n )\n ll_jobs.wait_for_jobs([config.JOB_REMOVE_VM])\n request.addfinalizer(finalizer)\n\n self.vm_name_2 = storage_helpers.create_unique_object_name(\n self.__name__, config.OBJECT_TYPE_VM\n )\n vm_args = config.create_vm_args.copy()\n vm_args['storageDomainName'] = self.storage_domain\n vm_args['vmName'] = self.vm_name_2\n testflow.setup(\"Creating VM %s\", self.vm_name_2)\n assert storage_helpers.create_vm_or_clone(**vm_args), (\n \"Failed to create VM %s\" % self.vm_name_2\n )", "def test_vmware_service_resources_vm_delete(self):\n pass", "def prepare_vm_for_sparsification(\n vm_name,\n storage_manager,\n storage_domain_name,\n file_size=config_virt.FILE_SIZE_IN_MB,\n all_disks=False,\n lun_id=None\n):\n\n disk_path, disks_ids = get_disk_path(\n vm_name=vm_name, storage_domain_name=storage_domain_name\n )\n testflow.step(\"Start vm %s\", vm_name)\n ll_vms.start_vms([vm_name])\n vm_resource = helpers.get_vm_resource(vm_name)\n lun_space = fetch_actual_disk_size(\n storage_manager, lun_id, disk_path\n )\n logger.info(\"use space before create file: %s\", lun_space)\n testflow.step(\n \"Write a %sMB file on vm: %s\", config_virt.FILE_SIZE_IN_MB, vm_name\n )\n new_files = create_file_in_vm(\n vm=vm_name, vm_resource=vm_resource, size_in_mb=file_size,\n all_disks=all_disks\n )\n lun_space = fetch_actual_disk_size(\n storage_manager=storage_manager,\n lun_id=lun_id,\n disk_path=disk_path\n )\n assert bool(lun_space), (\n \"Failed to get lun used space for lun: %s\" % lun_id\n )\n logger.info(\"Used space on lun after file creation: %s\", lun_space)\n testflow.step(\"removing the files: %s\", new_files)\n for file_path in new_files:\n assert delete_file_from_vm(\n vm=vm_name, vm_resource=vm_resource, path=file_path\n )\n new_used_space = fetch_actual_disk_size(\n storage_manager=storage_manager,\n lun_id=lun_id,\n disk_path=disk_path\n )\n testflow.step(\"Used space on lun after file deletion: %2f\", new_used_space)\n testflow.step(\"Stopping vm: %s\", vm_name)\n assert ll_vms.stop_vms_safely([vm_name])\n return new_used_space, disks_ids", "def __create(self, vm, cnt):\n vmms = self.vmms[vm.vmms]\n self.log.debug(\"__create: Using VMMS %s \" % (Config.VMMS_NAME))\n for i in range(cnt):\n newVM = copy.deepcopy(vm)\n newVM.id = self._getNextID()\n self.log.debug(\"__create|calling initializeVM\")\n vmms.initializeVM(newVM)\n self.log.debug(\"__create|done with initializeVM\")\n time.sleep(Config.CREATEVM_SECS)\n\n self.addVM(newVM)\n self.freeVM(newVM)\n self.log.debug(\"__create: Added vm %s to pool %s \" % (newVM.id, newVM.name))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build VMs from self.vmlist.
def build(self, bld=None): vm_number = len(self.vmlist) if vm_number == 1: if bld == 'stable': ova = build_vm(self.vmlist[0], 'stable') self.results.append(ova) else: ova = build_vm(self.vmlist[0]) self.results.append(ova) elif vm_number <= self.threads: if bld == 'stable': self._build_pool(vm_number, self.vmlist, 'stable') else: self._build_pool(vm_number, self.vmlist) else: tmplist = self.vmlist while tmplist: if bld == 'stable': self._build_pool(self.threads, tmplist[:self.threads], 'stable') tmplist = tmplist[self.threads:] else: self._build_pool(self.threads, tmplist[:self.threads]) tmplist = tmplist[self.threads:] return self.results
[ "def create_vms(self):\n\n\t\tfor vmx_path in self.vmx_files:\n\t\t\t#vm = self.create_vm(\"/Users/alex/Documents/Virtual Machines.localized/macOS 10.12.vmwarevm/macOS 10.12.vmx\")\n\t\t\tvm = self.create_vm(vmx_path)\n\t\t\tself.start_vm(vm)\n\t\t\tself.virtual_machines[str(vm.uuid)] = vm\n\n\t\ttime.sleep(5)", "def create_vm_list(vms):\n return [(v[0], v[2]) for v in vms]", "def add_virtual_machine(self, obj):\n\n name = get_string_or_none(grab(obj, \"name\"))\n\n if name is not None and self.settings.strip_vm_domain_name is True:\n name = name.split(\".\")[0]\n\n #\n # Filtering\n #\n\n # get VM UUID\n vm_uuid = grab(obj, \"config.instanceUuid\")\n\n if vm_uuid is None or vm_uuid in self.processed_vm_uuid and obj not in self.objects_to_reevaluate:\n return\n\n log.debug(f\"Parsing vCenter VM: {name}\")\n\n # get VM power state\n status = \"active\" if get_string_or_none(grab(obj, \"runtime.powerState\")) == \"poweredOn\" else \"offline\"\n\n # check if vm is template\n template = grab(obj, \"config.template\")\n if bool(self.settings.skip_vm_templates) is True and template is True:\n log.debug2(f\"VM '{name}' is a template. Skipping\")\n return\n\n if bool(self.settings.skip_srm_placeholder_vms) is True \\\n and f\"{grab(obj, 'config.managedBy.extensionKey')}\".startswith(\"com.vmware.vcDr\"):\n log.debug2(f\"VM '{name}' is a SRM placeholder VM. Skipping\")\n return\n\n # ignore offline VMs during first run\n if self.parsing_vms_the_first_time is True and status == \"offline\":\n log.debug2(f\"Ignoring {status} VM '{name}' on first run\")\n return\n\n # add to processed VMs\n self.processed_vm_uuid.append(vm_uuid)\n\n parent_host = self.get_parent_object_by_class(grab(obj, \"runtime.host\"), vim.HostSystem)\n cluster_object = self.get_parent_object_by_class(parent_host, vim.ClusterComputeResource)\n\n # get single host 'cluster' if VM runs on one\n if cluster_object is None:\n cluster_object = self.get_parent_object_by_class(parent_host, vim.ComputeResource)\n\n if self.settings.set_source_name_as_cluster_group is True:\n group = self.inventory.get_by_data(NBClusterGroup, data={\"name\": self.name})\n else:\n group = self.get_parent_object_by_class(cluster_object, vim.Datacenter)\n\n if None in [parent_host, cluster_object, group]:\n log.error(f\"Requesting host or cluster for Virtual Machine '{name}' failed. Skipping.\")\n return\n\n nb_cluster_object = self.get_object_from_cache(cluster_object)\n\n # check VM cluster\n if nb_cluster_object is None:\n log.debug(f\"Virtual machine '{name}' is not part of a permitted cluster. Skipping\")\n return\n\n parent_name = grab(parent_host, \"name\")\n cluster_name = grab(nb_cluster_object, \"data.name\")\n cluster_full_name = f\"{group.name}/{cluster_name}\"\n\n if name in self.processed_vm_names.get(cluster_full_name, list()) and obj not in self.objects_to_reevaluate:\n log.warning(f\"Virtual machine '{name}' for cluster '{cluster_full_name}' already parsed. \"\n \"Make sure to use unique VM names. Skipping\")\n return\n\n # add vm to processed list\n if self.processed_vm_names.get(cluster_full_name) is None:\n self.processed_vm_names[cluster_full_name] = list()\n\n self.processed_vm_names[cluster_full_name].append(name)\n\n # filter VMs by name\n if self.passes_filter(name, self.settings.vm_include_filter, self.settings.vm_exclude_filter) is False:\n return\n\n #\n # Collect data\n #\n\n # check if cluster is a Standalone ESXi\n site_name = nb_cluster_object.get_site_name()\n if site_name is None:\n site_name = self.get_site_name(NBCluster, cluster_full_name)\n\n # first check against vm_platform_relation\n platform = get_string_or_none(grab(obj, \"config.guestFullName\"))\n platform = get_string_or_none(grab(obj, \"guest.guestFullName\", fallback=platform))\n\n if platform is not None:\n platform = self.get_object_relation(platform, \"vm_platform_relation\", fallback=platform)\n\n hardware_devices = grab(obj, \"config.hardware.device\", fallback=list())\n\n disk = int(sum([getattr(comp, \"capacityInKB\", 0) for comp in hardware_devices\n if isinstance(comp, vim.vm.device.VirtualDisk)\n ]) / 1024 / 1024)\n\n annotation = None\n if self.settings.skip_vm_comments is False:\n annotation = get_string_or_none(grab(obj, \"config.annotation\"))\n\n # assign vm_tenant_relation\n tenant_name = self.get_object_relation(name, \"vm_tenant_relation\")\n\n # assign vm_tag_relation\n vm_tags = self.get_object_relation(name, \"vm_tag_relation\")\n\n # get vCenter tags\n vm_tags.extend(self.collect_object_tags(obj))\n\n vm_data = {\n \"name\": name,\n \"cluster\": nb_cluster_object,\n \"status\": status,\n \"memory\": grab(obj, \"config.hardware.memoryMB\"),\n \"vcpus\": grab(obj, \"config.hardware.numCPU\"),\n \"disk\": disk\n }\n\n # Add adaption for change in NetBox 3.3.0 VM model\n # issue: https://github.com/netbox-community/netbox/issues/10131#issuecomment-1225783758\n if version.parse(self.inventory.netbox_api_version) >= version.parse(\"3.3.0\"):\n vm_data[\"site\"] = {\"name\": site_name}\n\n if self.settings.track_vm_host:\n vm_data[\"device\"] = self.get_object_from_cache(parent_host)\n\n if platform is not None:\n vm_data[\"platform\"] = {\"name\": platform}\n if annotation is not None:\n vm_data[\"comments\"] = annotation\n if tenant_name is not None:\n vm_data[\"tenant\"] = {\"name\": tenant_name}\n if len(vm_tags) > 0:\n vm_data[\"tags\"] = vm_tags\n\n # add custom fields if present and configured\n vm_custom_fields = self.get_object_custom_fields(obj)\n if len(vm_custom_fields) > 0:\n vm_data[\"custom_fields\"] = vm_custom_fields\n\n vm_primary_ip4 = None\n vm_primary_ip6 = None\n vm_default_gateway_ip4 = None\n vm_default_gateway_ip6 = None\n\n # check vm routing to determine which is the default interface for each IP version\n for route in grab(obj, \"guest.ipStack.0.ipRouteConfig.ipRoute\", fallback=list()):\n\n # we found a default route\n if grab(route, \"prefixLength\") == 0:\n\n try:\n ip_a = ip_address(grab(route, \"network\"))\n except ValueError:\n continue\n\n try:\n gateway_ip_address = ip_address(grab(route, \"gateway.ipAddress\"))\n except ValueError:\n continue\n\n if ip_a.version == 4 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv4 gateway {gateway_ip_address}\")\n vm_default_gateway_ip4 = gateway_ip_address\n elif ip_a.version == 6 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv6 gateway {gateway_ip_address}\")\n vm_default_gateway_ip6 = gateway_ip_address\n\n nic_data = dict()\n nic_ips = dict()\n\n # track MAC addresses in order add dummy guest interfaces\n processed_interface_macs = list()\n\n # get VM interfaces\n for vm_device in hardware_devices:\n\n # sample: https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getvnicinfo.py\n\n # not a network interface\n if not isinstance(vm_device, vim.vm.device.VirtualEthernetCard):\n continue\n\n int_mac = normalize_mac_address(grab(vm_device, \"macAddress\"))\n\n device_class = grab(vm_device, \"_wsdlName\")\n\n log.debug2(f\"Parsing device {device_class}: {int_mac}\")\n\n device_backing = grab(vm_device, \"backing\")\n\n # set defaults\n int_mtu = None\n int_mode = None\n int_network_vlan_ids = None\n int_network_vlan_id_ranges = None\n int_network_name = None\n int_network_private = False\n\n processed_interface_macs.append(int_mac)\n\n # get info from local vSwitches\n if isinstance(device_backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):\n\n int_network_name = get_string_or_none(grab(device_backing, \"deviceName\"))\n int_host_pgroup = grab(self.network_data, f\"host_pgroup|{parent_name}|{int_network_name}\",\n separator=\"|\")\n\n if int_host_pgroup is not None:\n int_network_vlan_ids = [int_host_pgroup.get(\"vlan_id\")]\n int_network_vlan_id_ranges = [str(int_host_pgroup.get(\"vlan_id\"))]\n\n int_vswitch_name = int_host_pgroup.get(\"vswitch\")\n int_vswitch_data = grab(self.network_data, f\"vswitch|{parent_name}|{int_vswitch_name}\",\n separator=\"|\")\n\n if int_vswitch_data is not None:\n int_mtu = int_vswitch_data.get(\"mtu\")\n\n # get info from distributed port group\n else:\n\n dvs_portgroup_key = grab(device_backing, \"port.portgroupKey\", fallback=\"None\")\n int_portgroup_data = grab(self.network_data, f\"dpgroup|{dvs_portgroup_key}\", separator=\"|\")\n\n if int_portgroup_data is not None:\n int_network_name = grab(int_portgroup_data, \"name\")\n int_network_vlan_ids = grab(int_portgroup_data, \"vlan_ids\")\n if len(grab(int_portgroup_data, \"vlan_id_ranges\")) > 0:\n int_network_vlan_id_ranges = grab(int_portgroup_data, \"vlan_id_ranges\")\n else:\n int_network_vlan_id_ranges = [str(int_network_vlan_ids[0])]\n int_network_private = grab(int_portgroup_data, \"private\")\n\n int_dvswitch_uuid = grab(device_backing, \"port.switchUuid\")\n int_dvswitch_data = grab(self.network_data, f\"pswitch|{parent_name}|{int_dvswitch_uuid}\", separator=\"|\")\n\n if int_dvswitch_data is not None:\n int_mtu = int_dvswitch_data.get(\"mtu\")\n\n int_connected = grab(vm_device, \"connectable.connected\", fallback=False)\n int_label = grab(vm_device, \"deviceInfo.label\", fallback=\"\")\n\n int_name = \"vNIC {}\".format(int_label.split(\" \")[-1])\n\n int_full_name = int_name\n if int_network_name is not None:\n int_full_name = f\"{int_full_name} ({int_network_name})\"\n\n int_description = f\"{int_label} ({device_class})\"\n if int_network_vlan_ids is not None:\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] == 4095:\n vlan_description = \"all vlans\"\n int_mode = \"tagged-all\"\n else:\n vlan_description = \"vlan ID: %s\" % \", \".join(int_network_vlan_id_ranges)\n\n if len(int_network_vlan_ids) == 1:\n int_mode = \"access\"\n else:\n int_mode = \"tagged\"\n\n if int_network_private is True:\n vlan_description = f\"{vlan_description} (private)\"\n\n int_description = f\"{int_description} ({vlan_description})\"\n\n # find corresponding guest NIC and get IP addresses and connected status\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC\n if int_mac != normalize_mac_address(grab(guest_nic, \"macAddress\")):\n continue\n\n int_connected = grab(guest_nic, \"connected\", fallback=int_connected)\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if self.settings.permitted_subnets.permitted(int_ip_address, interface_name=int_full_name) is False:\n continue\n\n nic_ips[int_full_name].append(int_ip_address)\n\n # check if primary gateways are in the subnet of this IP address\n # if it matches IP gets chosen as primary IP\n if vm_default_gateway_ip4 is not None and \\\n vm_default_gateway_ip4 in ip_interface(int_ip_address).network and \\\n vm_primary_ip4 is None:\n\n vm_primary_ip4 = int_ip_address\n\n if vm_default_gateway_ip6 is not None and \\\n vm_default_gateway_ip6 in ip_interface(int_ip_address).network and \\\n vm_primary_ip6 is None:\n\n vm_primary_ip6 = int_ip_address\n\n vm_nic_data = {\n \"name\": unquote(int_full_name),\n \"virtual_machine\": None,\n \"mac_address\": int_mac,\n \"description\": unquote(int_description),\n \"enabled\": int_connected,\n }\n\n if int_mtu is not None and self.settings.sync_vm_interface_mtu is True:\n vm_nic_data[\"mtu\"] = int_mtu\n if int_mode is not None:\n vm_nic_data[\"mode\"] = int_mode\n\n if int_network_vlan_ids is not None and int_mode != \"tagged-all\":\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] != 0:\n\n vm_nic_data[\"untagged_vlan\"] = {\n \"name\": unquote(int_network_name),\n \"vid\": int_network_vlan_ids[0],\n \"site\": {\n \"name\": site_name\n }\n }\n else:\n tagged_vlan_list = list()\n for int_network_vlan_id in int_network_vlan_ids:\n\n if int_network_vlan_id == 0:\n continue\n\n tagged_vlan_list.append({\n \"name\": unquote(f\"{int_network_name}-{int_network_vlan_id}\"),\n \"vid\": int_network_vlan_id,\n \"site\": {\n \"name\": site_name\n }\n })\n\n if len(tagged_vlan_list) > 0:\n vm_nic_data[\"tagged_vlans\"] = tagged_vlan_list\n\n nic_data[int_full_name] = vm_nic_data\n\n # find dummy guest NIC interfaces\n if self.settings.sync_vm_dummy_interfaces is True:\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC MAC\n guest_nic_mac = normalize_mac_address(grab(guest_nic, \"macAddress\"))\n\n # skip interfaces of MAC addresses for already known interfaces\n if guest_nic_mac is None or guest_nic_mac in processed_interface_macs:\n continue\n\n processed_interface_macs.append(guest_nic_mac)\n\n int_full_name = \"vNIC Dummy-{}\".format(\"\".join(guest_nic_mac.split(\":\")[-2:]))\n\n log.debug2(f\"Parsing dummy network device: {guest_nic_mac}\")\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if self.settings.permitted_subnets.permitted(int_ip_address, interface_name=int_full_name) is True:\n nic_ips[int_full_name].append(int_ip_address)\n\n vm_nic_data = {\n \"name\": int_full_name,\n \"virtual_machine\": None,\n \"mac_address\": guest_nic_mac,\n \"enabled\": grab(guest_nic, \"connected\", fallback=False),\n }\n\n if len(nic_ips.get(int_full_name, list())) == 0:\n log.debug(f\"Dummy network interface '{int_full_name}' has no IP addresses assigned. Skipping\")\n continue\n\n nic_data[int_full_name] = vm_nic_data\n\n # add VM to inventory\n self.add_device_vm_to_inventory(NBVM, object_data=vm_data, vnic_data=nic_data,\n nic_ips=nic_ips, p_ipv4=vm_primary_ip4, p_ipv6=vm_primary_ip6,\n vmware_object=obj)\n\n return", "def __create(self, vm, cnt):\n vmms = self.vmms[vm.vmms]\n self.log.debug(\"__create: Using VMMS %s \" % (Config.VMMS_NAME))\n for i in range(cnt):\n newVM = copy.deepcopy(vm)\n newVM.id = self._getNextID()\n self.log.debug(\"__create|calling initializeVM\")\n vmms.initializeVM(newVM)\n self.log.debug(\"__create|done with initializeVM\")\n time.sleep(Config.CREATEVM_SECS)\n\n self.addVM(newVM)\n self.freeVM(newVM)\n self.log.debug(\"__create: Added vm %s to pool %s \" % (newVM.id, newVM.name))", "def add_virtual_machine(self, obj):\n\n name = get_string_or_none(grab(obj, \"name\"))\n\n if name is not None and self.strip_vm_domain_name is True:\n name = name.split(\".\")[0]\n\n #\n # Filtering\n #\n\n # get VM UUID\n vm_uuid = grab(obj, \"config.uuid\")\n\n if vm_uuid is None or vm_uuid in self.processed_vm_uuid:\n return\n\n log.debug2(f\"Parsing vCenter VM: {name}\")\n\n # get VM power state\n status = \"active\" if get_string_or_none(grab(obj, \"runtime.powerState\")) == \"poweredOn\" else \"offline\"\n\n # check if vm is template\n template = grab(obj, \"config.template\")\n if bool(self.skip_vm_templates) is True and template is True:\n log.debug2(f\"VM '{name}' is a template. Skipping\")\n return\n\n # ignore offline VMs during first run\n if self.parsing_vms_the_first_time is True and status == \"offline\":\n log.debug2(f\"Ignoring {status} VM '{name}' on first run\")\n return\n\n # add to processed VMs\n self.processed_vm_uuid.append(vm_uuid)\n\n parent_name = get_string_or_none(grab(obj, \"runtime.host.name\"))\n cluster_name = get_string_or_none(grab(obj, \"runtime.host.parent.name\"))\n\n # honor strip_host_domain_name\n if cluster_name is not None and self.strip_host_domain_name is True and \\\n parent_name.split(\".\")[0] == cluster_name.split(\".\")[0]:\n cluster_name = cluster_name.split(\".\")[0]\n\n # check VM cluster\n if cluster_name is None:\n log.error(f\"Requesting cluster for Virtual Machine '{name}' failed. Skipping.\")\n return\n\n elif self.permitted_clusters.get(cluster_name) is None:\n log.debug(f\"Virtual machine '{name}' is not part of a permitted cluster. Skipping\")\n return\n\n if name in self.processed_vm_names.get(cluster_name, list()):\n log.warning(f\"Virtual machine '{name}' for cluster '{cluster_name}' already parsed. \"\n \"Make sure to use unique VM names. Skipping\")\n return\n\n # add host to processed list\n if self.processed_vm_names.get(cluster_name) is None:\n self.processed_vm_names[cluster_name] = list()\n\n self.processed_vm_names[cluster_name].append(name)\n\n # filter VMs by name\n if self.passes_filter(name, self.vm_include_filter, self.vm_exclude_filter) is False:\n return\n\n #\n # Collect data\n #\n\n # check if cluster is a Standalone ESXi\n site_name = self.permitted_clusters.get(cluster_name)\n if site_name is None:\n site_name = self.get_site_name(NBCluster, cluster_name)\n\n # first check against vm_platform_relation\n platform = grab(obj, \"config.guestFullName\")\n platform = get_string_or_none(grab(obj, \"guest.guestFullName\", fallback=platform))\n\n for platform_relation in grab(self, \"vm_platform_relation\", fallback=list()):\n\n if platform is None:\n break\n\n object_regex = platform_relation.get(\"object_regex\")\n if object_regex.match(platform):\n platform = platform_relation.get(\"platform_name\")\n log.debug2(f\"Found a match ({object_regex.pattern}) for {platform}, using mapped platform '{platform}'\")\n break\n\n hardware_devices = grab(obj, \"config.hardware.device\", fallback=list())\n\n disk = int(sum([getattr(comp, \"capacityInKB\", 0) for comp in hardware_devices\n if isinstance(comp, vim.vm.device.VirtualDisk)\n ]) / 1024 / 1024)\n\n annotation = None\n if bool(self.skip_vm_comments) is False:\n annotation = get_string_or_none(grab(obj, \"config.annotation\"))\n\n # assign vm_tenant_relation\n tenant_name = None\n for tenant_relation in grab(self, \"vm_tenant_relation\", fallback=list()):\n object_regex = tenant_relation.get(\"object_regex\")\n if object_regex.match(name):\n tenant_name = tenant_relation.get(\"tenant_name\")\n log.debug2(f\"Found a match ({object_regex.pattern}) for {name}, using tenant '{tenant_name}'\")\n break\n\n vm_data = {\n \"name\": name,\n \"cluster\": {\"name\": cluster_name},\n \"status\": status,\n \"memory\": grab(obj, \"config.hardware.memoryMB\"),\n \"vcpus\": grab(obj, \"config.hardware.numCPU\"),\n \"disk\": disk\n }\n\n if platform is not None:\n vm_data[\"platform\"] = {\"name\": platform}\n if annotation is not None:\n vm_data[\"comments\"] = annotation\n if tenant_name is not None:\n vm_data[\"tenant\"] = {\"name\": tenant_name}\n\n vm_primary_ip4 = None\n vm_primary_ip6 = None\n vm_default_gateway_ip4 = None\n vm_default_gateway_ip6 = None\n\n # check vm routing to determine which is the default interface for each IP version\n for route in grab(obj, \"guest.ipStack.0.ipRouteConfig.ipRoute\", fallback=list()):\n\n # we found a default route\n if grab(route, \"prefixLength\") == 0:\n\n try:\n ip_a = ip_address(grab(route, \"network\"))\n except ValueError:\n continue\n\n try:\n gateway_ip_address = ip_address(grab(route, \"gateway.ipAddress\"))\n except ValueError:\n continue\n\n if ip_a.version == 4 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv4 gateway {gateway_ip_address}\")\n vm_default_gateway_ip4 = gateway_ip_address\n elif ip_a.version == 6 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv6 gateway {gateway_ip_address}\")\n vm_default_gateway_ip6 = gateway_ip_address\n\n nic_data = dict()\n nic_ips = dict()\n\n # get VM interfaces\n for vm_device in hardware_devices:\n\n # sample: https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getvnicinfo.py\n\n # not a network interface\n if not isinstance(vm_device, vim.vm.device.VirtualEthernetCard):\n continue\n\n int_mac = normalize_mac_address(grab(vm_device, \"macAddress\"))\n\n device_class = grab(vm_device, \"_wsdlName\")\n\n log.debug2(f\"Parsing device {device_class}: {int_mac}\")\n\n device_backing = grab(vm_device, \"backing\")\n\n # set defaults\n int_mtu = None\n int_mode = None\n int_network_vlan_ids = None\n int_network_vlan_id_ranges = None\n int_network_name = None\n int_network_private = False\n\n # get info from local vSwitches\n if isinstance(device_backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):\n\n int_network_name = get_string_or_none(grab(device_backing, \"deviceName\"))\n int_host_pgroup = grab(self.network_data, f\"host_pgroup|{parent_name}|{int_network_name}\",\n separator=\"|\")\n\n if int_host_pgroup is not None:\n int_network_vlan_ids = [int_host_pgroup.get(\"vlan_id\")]\n int_network_vlan_id_ranges = [str(int_host_pgroup.get(\"vlan_id\"))]\n\n int_vswitch_name = int_host_pgroup.get(\"vswitch\")\n int_vswitch_data = grab(self.network_data, f\"vswitch|{parent_name}|{int_vswitch_name}\",\n separator=\"|\")\n\n if int_vswitch_data is not None:\n int_mtu = int_vswitch_data.get(\"mtu\")\n\n # get info from distributed port group\n else:\n\n dvs_portgroup_key = grab(device_backing, \"port.portgroupKey\", fallback=\"None\")\n int_portgroup_data = grab(self.network_data, f\"dpgroup|{dvs_portgroup_key}\", separator=\"|\")\n\n if int_portgroup_data is not None:\n int_network_name = grab(int_portgroup_data, \"name\")\n int_network_vlan_ids = grab(int_portgroup_data, \"vlan_ids\")\n if len(grab(int_portgroup_data, \"vlan_id_ranges\")) > 0:\n int_network_vlan_id_ranges = grab(int_portgroup_data, \"vlan_id_ranges\")\n else:\n int_network_vlan_id_ranges = [str(int_network_vlan_ids[0])]\n int_network_private = grab(int_portgroup_data, \"private\")\n\n int_dvswitch_uuid = grab(device_backing, \"port.switchUuid\")\n int_dvswitch_data = grab(self.network_data, f\"pswitch|{parent_name}|{int_dvswitch_uuid}\", separator=\"|\")\n\n if int_dvswitch_data is not None:\n int_mtu = int_dvswitch_data.get(\"mtu\")\n\n int_connected = grab(vm_device, \"connectable.connected\", fallback=False)\n int_label = grab(vm_device, \"deviceInfo.label\", fallback=\"\")\n\n int_name = \"vNIC {}\".format(int_label.split(\" \")[-1])\n\n int_full_name = int_name\n if int_network_name is not None:\n int_full_name = f\"{int_full_name} ({int_network_name})\"\n\n int_description = f\"{int_label} ({device_class})\"\n if int_network_vlan_ids is not None:\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] == 4095:\n vlan_description = \"all vlans\"\n int_mode = \"tagged-all\"\n else:\n vlan_description = \"vlan ID: %s\" % \", \".join(int_network_vlan_id_ranges)\n\n if len(int_network_vlan_ids) == 1:\n int_mode = \"access\"\n else:\n int_mode = \"tagged\"\n\n if int_network_private is True:\n vlan_description = f\"{vlan_description} (private)\"\n\n int_description = f\"{int_description} ({vlan_description})\"\n\n # find corresponding guest NIC and get IP addresses and connected status\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC\n if int_mac != normalize_mac_address(grab(guest_nic, \"macAddress\")):\n continue\n\n int_connected = grab(guest_nic, \"connected\", fallback=int_connected)\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if ip_valid_to_add_to_netbox(int_ip_address, self.permitted_subnets, int_full_name) is False:\n continue\n\n nic_ips[int_full_name].append(int_ip_address)\n\n # check if primary gateways are in the subnet of this IP address\n # if it matches IP gets chosen as primary IP\n if vm_default_gateway_ip4 is not None and \\\n vm_default_gateway_ip4 in ip_interface(int_ip_address).network and \\\n vm_primary_ip4 is None:\n\n vm_primary_ip4 = int_ip_address\n\n if vm_default_gateway_ip6 is not None and \\\n vm_default_gateway_ip6 in ip_interface(int_ip_address).network and \\\n vm_primary_ip6 is None:\n\n vm_primary_ip6 = int_ip_address\n\n vm_nic_data = {\n \"name\": int_full_name,\n \"virtual_machine\": None,\n \"mac_address\": int_mac,\n \"description\": int_description,\n \"enabled\": int_connected,\n }\n\n if int_mtu is not None:\n vm_nic_data[\"mtu\"] = int_mtu\n if int_mode is not None:\n vm_nic_data[\"mode\"] = int_mode\n\n if int_network_vlan_ids is not None and int_mode != \"tagged-all\":\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] != 0:\n\n vm_nic_data[\"untagged_vlan\"] = self.get_vlan_object_if_exists({\n \"name\": int_network_name,\n \"vid\": int_network_vlan_ids[0],\n \"site\": {\n \"name\": site_name\n }\n })\n else:\n tagged_vlan_list = list()\n for int_network_vlan_id in int_network_vlan_ids:\n\n if int_network_vlan_id == 0:\n continue\n\n tagged_vlan_list.append(self.get_vlan_object_if_exists({\n \"name\": f\"{int_network_name}-{int_network_vlan_id}\",\n \"vid\": int_network_vlan_id,\n \"site\": {\n \"name\": site_name\n }\n }))\n\n if len(tagged_vlan_list) > 0:\n vm_nic_data[\"tagged_vlans\"] = tagged_vlan_list\n\n nic_data[int_full_name] = vm_nic_data\n\n # add VM to inventory\n self.add_device_vm_to_inventory(NBVM, object_data=vm_data, site_name=site_name, vnic_data=nic_data,\n nic_ips=nic_ips, p_ipv4=vm_primary_ip4, p_ipv6=vm_primary_ip6)\n\n return", "def update_vm_list(self):\n vms = self.op.get_vms()\n for vm in vms:\n if vm['mac'] not in self.vm_cache:\n self.vm_cache[vm['mac']] = {'ip': vm['ip'], 'floating_ip': vm['floating_ip']}", "def getAll(cls):\n return [VirtualMachine(vm) for vm in cls._vbox.machines]", "def create_nodes(count=2, instantiateOn='pnode', cores=4, ram=8):\n\n nodes = []\n # index nodes by their proper number (not zero-indexed)\n nodes.append(None)\n\n # create each VM\n for i in range(1, count + 1):\n nodes.append(mkVM('node' + str(i), GLOBALS.UBUNTU18_IMG, instantiateOn=instantiateOn, cores=cores, ram=ram))\n\n # run alternating install scripts on each vm to install software \n odd_node = True\n for node in nodes:\n if node is not None:\n if odd_node:\n node.addService(pg.Execute(shell=\"sh\", command=\"chmod +x /local/repository/install1.sh\"))\n node.addService(pg.Execute(shell=\"sh\", command=\"/local/repository/install1.sh\"))\n else:\n node.addService(pg.Execute(shell=\"sh\", command=\"chmod +x /local/repository/install2.sh\"))\n node.addService(pg.Execute(shell=\"sh\", command=\"/local/repository/install2.sh\"))\n odd_node = not odd_node\n\n return nodes", "def build_runlist(self):\n info.log('INFO', 'panzer', info.pretty_title('run list'))\n metadata = self.get_metadata()\n runlist = self.runlist\n for kind in const.RUNLIST_KIND:\n # - sanity check\n try:\n field_type = meta.get_type(metadata, kind)\n if field_type != 'MetaList':\n info.log('ERROR', 'panzer',\n 'value of field \"%s\" should be of type \"MetaList\"'\n '---found value of type \"%s\", ignoring it'\n % (kind, field_type))\n continue\n except error.MissingField:\n pass\n # - if 'filter', add filter list specified on command line first\n if kind == 'filter':\n for cmd in self.options['pandoc']['filter']:\n entry = dict()\n entry['kind'] = 'filter'\n entry['status'] = const.QUEUED\n entry['command'] = cmd[0]\n entry['arguments'] = list()\n runlist.append(entry)\n # - add commands specified in metadata\n if kind in metadata:\n entries = meta.get_runlist(metadata, kind, self.options)\n runlist.extend(entries)\n # - now some cleanup:\n # -- filters: add writer as first argument\n for entry in runlist:\n if entry['kind'] == 'filter':\n entry['arguments'].insert(0, self.options['pandoc']['write'])\n # -- postprocessors: remove them if output kind is pdf\n # .. or if a binary writer is selected\n if self.options['pandoc']['pdf_output'] \\\n or self.options['pandoc']['write'] in const.BINARY_WRITERS:\n new_runlist = list()\n for entry in runlist:\n if entry['kind'] == 'postprocess':\n info.log('INFO', 'panzer',\n 'postprocess \"%s\" skipped --- output of pandoc is binary file'\n % entry['command'])\n continue\n new_runlist.append(entry)\n runlist = new_runlist\n msg = info.pretty_runlist(runlist)\n for line in msg:\n info.log('INFO', 'panzer', line)\n self.runlist = runlist", "def _get_nebula_vms(self):\n hostname = socket.gethostname()\n fqdn = socket.getfqdn()\n if self.config['onecli_path']:\n onevm_command = '%s/onevm' % self.config['onecli_path']\n else:\n onevm_command = 'onevm'\n args = shlex.split('%s list -x' % onevm_command)\n my_env = os.environ.copy()\n if self.config['one_auth']:\n my_env['ONE_AUTH'] = self.config['one_auth']\n if self.config['one_xmlrpc']:\n my_env['ONE_XMLRPC'] = self.config['one_xmlrpc']\n vm_xml_list = subprocess.Popen(args, stdout=subprocess.PIPE,\n env=my_env)\n vm_xml_arr = vm_xml_list.stdout.readlines()\n vm_xml_string = ''.join([line.strip(\"\\n\") for line in vm_xml_arr])\n vm_xml_etree = xml.etree.ElementTree.fromstring(vm_xml_string)\n vm_hash = {}\n for vm in vm_xml_etree.findall(\"VM\"):\n vm_hostname_element = vm.find(\"*//HOSTNAME\")\n if vm_hostname_element is None:\n # this vm is undeployed or pending, so skip it\n continue\n vm_hostname = vm_hostname_element.text\n if vm_hostname not in [hostname, fqdn]:\n continue\n vm_id = vm.find(\"ID\").text\n pid = self._get_vm_pid(vm_id)\n if not pid:\n continue\n vm_name = self._validate_metric_name(vm.find(\"NAME\").text)\n vm_diamond_prefix_element = vm.find(\"*//DIAMOND_PREFIX\")\n if vm_diamond_prefix_element is None:\n # no diamond prefix in template, so set to default\n vm_diamond_prefix = self.config['default_prefix']\n else:\n vm_diamond_prefix = self._validate_metric_name(\n vm_diamond_prefix_element.text)\n vm_hash[vm_id] = dict(diamond_prefix=vm_diamond_prefix,\n pid=pid, name=vm_name)\n return vm_hash", "def _get_vm_list(limit, offset, quiet, out, project, provider_type):\n\n if provider_type == \"AHV_VM\":\n get_brownfield_ahv_vm_list(limit, offset, quiet, out, project)\n elif provider_type == \"AWS_VM\":\n get_brownfield_aws_vm_list(limit, offset, quiet, out, project)\n elif provider_type == \"AZURE_VM\":\n get_brownfield_azure_vm_list(limit, offset, quiet, out, project)\n elif provider_type == \"GCP_VM\":\n get_brownfield_gcp_vm_list(limit, offset, quiet, out, project)\n elif provider_type == \"VMWARE_VM\":\n # Has issue with it. Fixed in 2.9.8.1 and 3.0.0 (https://jira.nutanix.com/browse/CALM-18635)\n get_brownfield_vmware_vm_list(limit, offset, quiet, out, project)", "def vmimport(self, func=just_import):\n ovas = len(self.vmlist)\n if ovas == 1:\n vmname = func(self.vmlist[0])\n self.results.append(vmname)\n elif ovas <= self.threads:\n self._import_pool(ovas, self.vmlist, func)\n else:\n tmplist = self.vmlist\n while tmplist:\n self._import_pool(self.threads, tmplist[:self.threads], func)\n tmplist = tmplist[self.threads:]\n return self.results", "def build_instances(request):\n all_instances = []\n\n def build_n_volttron_instances(n, bad_config=False):\n build_n_volttron_instances.count = n\n instances = []\n vip_addresses = []\n instances = []\n addr_config = dict()\n names = []\n\n for i in range(0, n):\n address = get_rand_vip()\n vip_addresses.append(address)\n nm = 'platform{}'.format(i + 1)\n names.append(nm)\n\n for i in range(0, n):\n address = vip_addresses[i]\n wrapper = PlatformWrapper()\n wrapper.startup_platform(address, instance_name=names[i])\n wrapper.skip_cleanup = True\n instances.append(wrapper)\n\n gevent.sleep(1)\n for i in range(0, n):\n instances[i].shutdown_platform()\n\n for i in range(0, n):\n addr_config.clear()\n for j in range(0, n):\n if j != i:\n name = names[j]\n addr_config[name] = dict()\n addr_config[name]['instance-name'] = names[j]\n if bad_config:\n addr_config[name]['vip-address123'] = vip_addresses[j]\n else:\n addr_config[name]['vip-address'] = vip_addresses[j]\n addr_config[name]['serverkey'] = instances[j].serverkey\n address_file = os.path.join(instances[i].volttron_home, 'external_platform_discovery.json')\n if address_file:\n with open(address_file, 'w') as f:\n json.dump(addr_config, f)\n\n gevent.sleep(1)\n for i in range(0, n):\n address = vip_addresses.pop(0)\n instances[i].startup_platform(address, instance_name=names[i])\n instances[i].allow_all_connections()\n gevent.sleep(11)\n instances = instances if n > 1 else instances[0]\n\n build_n_volttron_instances.instances = instances\n return instances\n\n return build_n_volttron_instances", "def refresh_vms_status(self, vm_list):\n vm_dict = {}\n try:\n client = oca.Client(self.user + ':' + self.passwd, self.url)\n vm_pool = oca.VirtualMachinePool(client)\n vm_pool.info()\n for vm_id in vm_list:\n vm = {\"interfaces\": []}\n vm_exist = False\n vm_element = None\n for i in vm_pool:\n if str(i.id) == str(vm_id):\n vm_exist = True\n vm_element = i\n break\n if not vm_exist:\n self.logger.info(\"The vm \" + str(vm_id) + \" does not exist.\")\n vm['status'] = \"DELETED\"\n vm['error_msg'] = (\"The vm \" + str(vm_id) + \" does not exist.\")\n continue\n vm_element.info()\n vm[\"vim_info\"] = None\n VMstatus = vm_element.str_lcm_state\n if VMstatus == \"RUNNING\":\n vm['status'] = \"ACTIVE\"\n elif \"FAILURE\" in VMstatus:\n vm['status'] = \"ERROR\"\n vm['error_msg'] = \"VM failure\"\n else:\n vm['status'] = \"BUILD\"\n try:\n for red in vm_element.template.nics:\n interface = {'vim_info': None, \"mac_address\": str(red.mac), \"vim_net_id\": str(red.network_id),\n \"vim_interface_id\": str(red.network_id)}\n # maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6\n if hasattr(red, 'ip'):\n interface[\"ip_address\"] = str(red.ip)\n if hasattr(red, 'ip6_global'):\n interface[\"ip_address\"] = str(red.ip6_global)\n vm[\"interfaces\"].append(interface)\n except Exception as e:\n self.logger.error(\"Error getting vm interface_information \" + type(e).__name__ + \":\" + str(e))\n vm[\"status\"] = \"VIM_ERROR\"\n vm[\"error_msg\"] = \"Error getting vm interface_information \" + type(e).__name__ + \":\" + str(e)\n vm_dict[vm_id] = vm\n return vm_dict\n except Exception as e:\n self.logger.error(e)\n for k in vm_dict:\n vm_dict[k][\"status\"] = \"VIM_ERROR\"\n vm_dict[k][\"error_msg\"] = str(e)\n return vm_dict", "def _build_runlist(self):\n design_vars = self.get_desvar_metadata()\n\n # Add up sizes\n self.num_design_vars = sum(meta['size'] for meta in itervalues(design_vars))\n\n if self.seed is not None:\n seed(self.seed)\n np.random.seed(self.seed)\n\n # Generate an LHC of the proper size\n rand_lhc = self._get_lhc()\n\n # Map LHC to buckets\n buckets = OrderedDict()\n j = 0\n\n for (name, bounds) in iteritems(design_vars):\n buckets[name] = []\n\n # Support for array desvars\n val = self.root.unknowns._dat[name].val\n nval = bounds['size']\n\n for k in range(nval):\n\n lowb = bounds['lower']\n upb = bounds['upper']\n if isinstance(lowb, np.ndarray):\n lowb = lowb[k]\n if isinstance(upb, np.ndarray):\n upb = upb[k]\n\n design_var_buckets = self._get_buckets(lowb, upb)\n buckets[name].append([design_var_buckets[rand_lhc[i, j]]\n for i in range(self.num_samples)])\n j += 1\n\n # Return random values in given buckets\n for i in range(self.num_samples):\n sample = []\n for key, bounds in iteritems(buckets):\n sample.append([key, np.array([np.random.uniform(bounds[k][i][0],\n bounds[k][i][1])\n for k in range(design_vars[key]['size'])])\n ])\n yield sample", "def main():\n\n args = parseArgs()\n\n vm = VMBuilder(args)\n\n if vm.args.command == 'list_disk_pools':\n print(vm.getDiskPools())\n elif vm.args.command == 'list_pool_volumes':\n print(vm.getDiskPoolVolumes())\n elif vm.args.command == 'create_vm':\n logging.debug(\"about to run vm.getbuild.createvm\")\n vm.verifyMinimumCreateVMArgs()\n vm.getBuild().createVM()\n else:\n logging.critical(\"The command you entered is not recognized.\")", "def prepVm(self):\r\n self.server.logMsg(\"PREPARING \" + self.vmName + \" FOR TESTING\")\r\n self.server.logMsg(self.vmName + \" OPERATING SYSTEM: \" + self.vmOS)\r\n self.server.logMsg(self.vmName + \" ARCHITECTURE: \" + self.getArch())\r\n self.getSnapshots()\r\n self.powerOn(False)", "def setup_vm(request):\n\n result = list()\n\n def fin_verify_results():\n \"\"\"\n Check if none of finalizers failed.\n \"\"\"\n global_helper.raise_if_false_in_list(results=result)\n\n def fin_vm():\n \"\"\"\n Teardown:\n Safely remove VM.\n \"\"\"\n testflow.teardown(\"Safely remove test VM.\")\n result.append(\n (\n ll_vms.safely_remove_vms(\n vms=[\n vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n vcons_conf.VIRT_CONSOLE_CLONE_VM_NAME,\n vcons_conf.VIRT_CONSOLE_VM_IMPORT_NEW\n ]\n ), \"Failed to safelly remove {vm} as part of teardown.\".format(\n vm=vcons_conf.VIRT_CONSOLE_VM_SYSTEM\n )\n )\n )\n\n def fin_vm_from_export_domain():\n \"\"\"\n Teardown:\n Remove VM from export domain\n \"\"\"\n\n testflow.teardown(\"Remove exported VM from export domain.\")\n if ll_vms.is_vm_exists_in_export_domain(\n vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n vcons_conf.EXPORT_DOMAIN_NAME\n ):\n result.append(\n (\n ll_vms.remove_vm_from_export_domain(\n True,\n vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n vcons_conf.DC_NAME[0],\n vcons_conf.EXPORT_DOMAIN_NAME\n ), \"Failed to remove VM from export domain.\"\n )\n )\n\n def fin_instance_type():\n \"\"\"\n Teardown:\n Remove instance type.\n \"\"\"\n testflow.teardown(\"Remove custom instance_type.\")\n result.append(\n (\n ll_inst_type.remove_instance_type(\n instance_type_name=vcons_conf.VIRT_CONSOLE_VM_INSTANCE_TYPE\n ), \"Was not able to remove test instance_type.\"\n )\n )\n\n def fin_templates():\n \"\"\"\n Teardown:\n Remove test template.\n \"\"\"\n testflow.teardown(\"Remove test template.\")\n result.append(\n (\n ll_templates.safely_remove_templates(\n templates=[\n vcons_conf.VIRT_CONSOLE_TEMPLATE,\n vcons_conf.VIRT_CONSOLE_TEMPLATE_IMPORT_NEW\n ]\n ), \"Was not able to remove test Template.\"\n )\n )\n\n def fin_template_from_export_domain():\n \"\"\"\n Teardown:\n Remove template from export domain\n \"\"\"\n\n testflow.teardown(\"Remove exported template from export domain.\")\n if ll_templates.export_domain_template_exist(\n vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n vcons_conf.EXPORT_DOMAIN_NAME\n ):\n result.append(\n (\n ll_templates.removeTemplateFromExportDomain(\n True,\n vcons_conf.VIRT_CONSOLE_TEMPLATE,\n vcons_conf.EXPORT_DOMAIN_NAME\n ), \"Failed to remove Template from export domain.\"\n )\n )\n\n request.addfinalizer(fin_instance_type)\n request.addfinalizer(fin_template_from_export_domain)\n request.addfinalizer(fin_templates)\n request.addfinalizer(fin_vm_from_export_domain)\n request.addfinalizer(fin_vm)\n request.addfinalizer(fin_verify_results)\n\n testflow.setup(\n \"Create a instance_type for Virt console test cases execution.\"\n )\n\n assert ll_inst_type.create_instance_type(\n instance_type_name=vcons_conf.VIRT_CONSOLE_VM_INSTANCE_TYPE,\n **vcons_conf.INSTANCE_TYPE_PARAMS\n ), \"Failed to create instance_type.\"\n vm_name = vcons_conf.VM_NAME[0]\n testflow.setup(\"Stop VM {} safely\".format(vm_name))\n assert ll_vms.stop_vms_safely([vm_name])\n testflow.setup(\"Create a Template for Virt console test cases execution.\")\n assert ll_templates.createTemplate(\n positive=True,\n vm=vm_name,\n name=vcons_conf.VIRT_CONSOLE_TEMPLATE,\n cluster=vcons_conf.CLUSTER_NAME[0]\n ), \"Was not able to create template.\"\n\n testflow.setup(\"Create a VM for Virt console test cases execution.\")\n assert ll_vms.createVm(\n positive=True,\n vmName=vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n vmDescription=vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n cluster=vcons_conf.CLUSTER_NAME[0],\n template=vcons_conf.VIRT_CONSOLE_TEMPLATE,\n os_type=vcons_conf.VM_OS_TYPE,\n display_type=vcons_conf.VM_DISPLAY_TYPE,\n nic=vcons_conf.VIRT_CONSOLE_VM_NIC,\n network=vcons_conf.MGMT_BRIDGE\n ), \"Was not able to create VM.\"\n\n testflow.setup(\"Update VM to use test instance type and 2 monitors.\")\n assert ll_vms.updateVm(\n positive=True,\n vm=vcons_conf.VIRT_CONSOLE_VM_SYSTEM,\n instance_type=vcons_conf.VIRT_CONSOLE_VM_INSTANCE_TYPE\n ), \"Failed to set instance_type for VM.\"", "def prepare(self, vms):\n local_config_paths = []\n for vm in vms:\n local_config_path = \"%s.%s\" % (self.get_local_results_path(vm),\n self._short_name)\n self._set_global_defaults(vm)\n self._configuration.save(local_config_path)\n local_config_paths.append(local_config_path)\n # Prepare the VMs.\n self.__prepare_vms(vms, local_config_paths)\n self.__prepared_vms = set(vms)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove img. Return img if removed. Else None.
def _remove_existing(img): if os.path.exists(img): os.unlink(img) return img
[ "def remove_image(self: E) -> E:\n try:\n del self._image\n except AttributeError:\n pass\n\n return self", "def delete_image_tag(self, img, tag):\r\n return img.delete_tag(tag)", "def remove_profile_image(self):\n self.wait_for_field('image')\n self.wait_for_ajax()\n\n self.wait_for_element_visibility('.image-wrapper', \"remove button is visible\")\n self.q(css='.u-field-remove-button').first.click()\n\n self.wait_for_ajax()\n self.mouse_hover(self.browser.find_element_by_css_selector('.image-wrapper'))\n self.wait_for_element_visibility('.u-field-upload-button', \"upload button is visible\")\n return True", "def pop_a_photo(self):\n if self.subclusters == [] and self.photos == []:\n return None\n if self.subclusters == None:\n return self.photos.pop(0)\n else:\n cluster_to_get = random.choice(self.subclusters)\n photo = cluster_to_get.pop_a_photo()\n if len(cluster_to_get.all_photos()) == 0:\n self.subclusters.remove(cluster_to_get)\n return photo", "def delete_image(self, offset, total):\n idx = self._idx + offset\n try:\n obj = self.__getitem__(idx)\n except IndexError:\n return None\n\n self._backup.append((idx, obj))\n\n del self._filenames[idx]\n obj.delete()\n\n if self._idx > 0 and total / 2 > offset:\n self._idx -= 1\n self._load(self._idx - self.PRELOAD_RANGE)\n else:\n self._load(self._idx + self.PRELOAD_RANGE + 1)\n\n return obj", "def delete_image(self, event):\n remove_image = os.path.join(\n self._directory_path, \"{}{}\".format(self._image_id, \".jpg\")\n )\n try:\n os.remove(remove_image)\n _LOGGER.debug(\"Deleting old image %s\", remove_image)\n except OSError as error:\n if error.errno != errno.ENOENT:\n raise", "def del_image(request):\n if not request.user.is_authenticated():\n return HttpResponse(-1)\n img_name = request.POST.get(\"img_name\", \"\")\n if img_name == \"\":\n return HttpResponse(-2)\n file = settings.MEDIA_ROOT + \"/upload/\" + img_name\n if os.path.exists(file):\n os.remove(file)\n return HttpResponse(0)\n return HttpResponse(-3)", "def delete_image(self, http_request, image_id):\n image = self.image_by_id(image_id)\n if image:\n self.glance_admin_image_store.remove(image)\n http_request.setResponseCode(204)\n return b''\n http_request.setResponseCode(404)\n return b''", "def delete_image_member(self, img, project_id):\r\n return img.delete_member(project_id)", "def removeImage(self, fileName, validate=None): # XXX remove unused 'validate'?\n if self._formatVersion < UFOFormatVersion.FORMAT_3_0:\n raise UFOLibError(\n f\"Images are not allowed in UFO {self._formatVersion.major}.\"\n )\n self.removePath(f\"{IMAGES_DIRNAME}/{fsdecode(fileName)}\")", "def remove_images(post_id, images_to_remove):\n #gets the array of current images\n current_images = image_array(post_id)\n #takes the images_to_remove dict and turns it into a list of images to remove\n remove = db_mods.post_tag_identifier(images_to_remove)\n if remove:\n current_images = delete_images(current_images, remove)\n return update_images(post_id, array_to_comma_list(current_images))", "def remove_answer_image(answer_id):\n SQL = \"\"\"UPDATE answer SET image = NULL WHERE id = %s;\"\"\"\n data = (answer_id,)\n fetch = None\n db.run_statements(((SQL, data, fetch),))", "def delete_a_image(answer_id):\n current_image = get_answer_image(answer_id)\n if current_image:\n remove_answer_image(answer_id)\n try:\n os.remove(\"static/uploads/\" + current_image)\n except FileNotFoundError:\n pass", "def delete_image(self, subreddit, name=None, header=False):\n subreddit = six.text_type(subreddit)\n if name and header:\n raise TypeError('Both name and header cannot be set.')\n elif name:\n data = {'img_name': name}\n url = self.config['delete_sr_image']\n self.evict(self.config['stylesheet'].format(subreddit=subreddit))\n else:\n data = True\n url = self.config['delete_sr_header']\n url = url.format(subreddit=subreddit)\n return self.request_json(url, data=data)", "def remove_image(self, image):\n try:\n self.client.remove_image(image = image['Id'], force = True)\n Logger.log(\"Removing image: %s\" % image['Id'])\n except:\n Logger.log_container_error(\"image\", image)", "def delete_image(sender, instance, **kwargs):\n if os.path.exists(instance.image.path):\n os.remove(instance.image.path)", "def remove(self, product, img_file_name, identifierType=None):\n return self.call('catalog_product_attribute_media.remove',\n [product, img_file_name, identifierType])", "def remove_check_image(self) -> bool:\n os.remove(self.CHECK_IMG_LOC)\n if os.path.exists(self.CHECK_IMG_LOC):\n return False\n return True", "def _remove_thumbnail_file(self, name, save=True):\n attr_name = '_thumbnail_file_%s_cache' % name\n thumbs_file = getattr(self, attr_name, None)\n if thumbs_file:\n thumbs_file.delete(save)\n delattr(self, attr_name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare MIME message. Return email.mime.MIMEText.
def _prepare_message(msg): msg_mime = MIMEText(msg, 'text', 'utf-8') msg_mime['From'] = Header(infomail.fromaddr, charset='utf-8') msg_mime['To'] = Header(', '.join(infomail.toaddrs), charset='utf-8') msg_mime['Subject'] = Header("VirtualBox images built", charset='utf-8') return msg_mime
[ "def CreateMessage(sender, to, subject, message_text):\n #message = MIMEText(message_text)\n message = MIMEText(message_text,'html')\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}", "def get_email_message(self):\n msg = self.get_message_parser()\n to = self.to() or mailparser_utils.get_addresses(msg.to)\n cc = self.cc() or mailparser_utils.get_addresses(msg.cc)\n bcc = self.bcc()\n\n # Process headers, but ignore address headers - these are processed explicitly.\n headers = {\n header: value\n for header, value in msg.headers.items()\n if header.lower() not in PARSED_HEADERS_TO_IGNORE\n }\n\n Email = EmailMultiAlternatives if msg.text_html else EmailMessage\n email = Email(\n subject=msg.subject,\n body='\\n'.join(msg.text_plain),\n from_email=mailparser_utils.get_address(msg.from_),\n to=to,\n bcc=bcc,\n headers=headers,\n cc=cc,\n reply_to=mailparser_utils.get_addresses(msg.reply_to),\n )\n\n # set the multipart subtype\n content_type = msg.headers[\"Content-Type\"].split(\";\", 1)[0] # discard boundary\n main_type, subtype = content_type.split(\"/\", 1)\n if main_type == \"multipart\":\n email.mixed_subtype = subtype\n\n # NOTE - mailparser only supports text and HTML, any other content types are\n # considered not_managed.\n if msg.text_html:\n email.attach_alternative('<br>'.join(msg.text_html), mimetype='text/html')\n\n # attachment is a dict with fixed keys:\n # filename, payload, binary, mail_content_type, content-id, content-disposition,\n # charset and content_transfer_encoding\n #\n # This performs generic handling of attachments, respecting the original various\n # ways the attachment can be used.\n for attachment in msg.attachments:\n basetype, subtype = attachment[\"mail_content_type\"].split(\"/\", 1)\n binary = attachment[\"binary\"]\n content = attachment['payload']\n transfer_encoding = attachment[\"content_transfer_encoding\"]\n\n mime_attachment = MIMEBase(basetype, subtype)\n mime_attachment.set_payload(content)\n if not binary:\n Encoders.encode_base64(mime_attachment)\n else:\n mime_attachment.add_header(\"Content-Transfer-Encoding\", transfer_encoding)\n for header in (\"Content-ID\", \"Content-Disposition\"):\n value = attachment[header.lower()]\n if value:\n mime_attachment.add_header(header, value)\n email.attach(mime_attachment)\n\n return email", "def _make_message_multipart(self):\n # Do nothing if message already multipart\n if self._message.is_multipart():\n return\n\n # Create empty multipart message\n multipart_message = email.mime.multipart.MIMEMultipart('alternative')\n\n # Copy headers, preserving duplicate headers\n for header_key in set(self._message.keys()):\n values = self._message.get_all(header_key, failobj=[])\n for value in values:\n multipart_message[header_key] = value\n\n # Copy text, preserving original encoding\n original_text = self._message.get_payload(decode=True)\n original_encoding = str(self._message.get_charset())\n multipart_message.attach(email.mime.text.MIMEText(\n original_text,\n _charset=original_encoding,\n ))\n\n # Replace original message with multipart message\n self._message = multipart_message", "def process_raw_email(raw, include_headers):\n message = email.message_from_string(raw)\n mailheaders = Parser().parsestr(raw, True)\n body = ''\n other_headers = '\\n'.join(\n [\"%s: %s\" % (k, getheader(v)) for k, v in mailheaders.items() if k not in ('Date', 'Message-ID', 'From', 'To', 'Subject')])\n if include_headers:\n body += other_headers\n if message.is_multipart():\n for part in message.walk():\n content_type = part.get_content_type()\n content_disposition = part.get('Content-Disposition')\n \"\"\"\n body += \"Content Disposition: %s\\nContent Type: %s \\n\" % (repr(content_disposition) ,content_type)\n Microsoft sometimes sends the wrong content type. : sending csv as application/octect-stream\n\n \"\"\"\n index_attachments_flag = INDEX_ATTACHMENT_DEFAULT\n extension = str(os.path.splitext(part.get_filename() or '')[1]).lower()\n if extension in SUPPORTED_FILE_EXTENSIONS:\n file_is_supported_attachment = True\n else:\n file_is_supported_attachment = False\n if content_type in SUPPORTED_CONTENT_TYPES or part.get_content_maintype() == 'text':\n content_type_supported = True\n else:\n content_type_supported = False\n if content_type_supported or file_is_supported_attachment:\n if content_disposition is not None and content_disposition != '':\n if \"attachment\" in content_disposition and index_attachments_flag:\n \"\"\"Easier to change to a flag in inputs.conf\"\"\"\n body += \"\\n#BEGIN_ATTACHMENT: %s\\n\" % part.get_filename()\n if extension == '.docx':\n body += read_docx(part.get_payload(decode=True))\n else:\n body += \"\\n%s\" % part.get_payload(decode=True)\n unicode(part.get_payload(decode=True), str(charset), \"ignore\").encode('utf8', 'replace')\n\n body += \"\\n#END_ATTACHMENT: %s\\n\" % part.get_filename()\n else:\n body += \"\\n%s\" % recode_mail(part)\n else:\n body += \"\\n%s\" % recode_mail(part)\n else:\n body += \"\\n#UNSUPPORTED_ATTACHMENT: %s, %s\\n\" % (part.get_filename(),content_type)\n \"\"\"\n else:\n body += \"Found unsupported message part: %s, Filename: %s\" % (content_type,part.get_filename())\n # what if we want to index images for steganalysis? - maybe add hexdump of image\n Give the user the responsibility - add an option for user to specify supported file extensions in input?\n \"\"\"\n else:\n body = recode_mail(message)\n mail_for_index = \"VGhpcyBpcyBhIG1haWwgc2VwYXJhdG9yIGluIGJhc2U2NCBmb3Igb3VyIFNwbHVuayBpbmRleGluZwo=\\n\" \\\n \"Date: %s\\n\" \\\n \"Message-ID: %s\\n\" \\\n \"From: %s\\n\" \\\n \"Subject: %s\\n\" \\\n \"To: %s\\n\" \\\n \"Body: %s\\n\" % (message['Date'], message['Message-ID'],\n message['From'], getheader(message['Subject']), message['To'], body)\n return [message['Date'], message['Message-ID'], mail_for_index]", "def prepareMail(self, mailto, subject, msgHTML, attachments):\n\n\n if type(mailto) == type(list()):\n mailto = ', '.join(mailto)\n\n msg = MIMEMultipart()\n\n msg['From'] = author = formataddr((str(Header(make_unicode(self.display_name), 'utf-8')), self.mail_from))\n msg['To'] = mailto\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = subject\n\n #the Body message\n msg.attach(MIMEText(msgHTML, 'html', 'utf-8'))\n msg.attach(MIMEText(self.mail_signature, \"html\", 'utf-8'))\n if attachments:\n for phile in attachments:\n # we could check for MIMETypes here\n part = MIMEBase('application',\"octet-stream\")\n part.set_payload(open(phile, \"rb\").read())\n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % os.path.basename(phile))\n msg.attach(part)\n return msg", "def _make_message_multipart(self):\n # Do nothing if message already multipart\n if self._message.is_multipart():\n return\n\n # Create empty multipart message\n multipart_message = email.mime.multipart.MIMEMultipart('related')\n\n # Copy headers. Avoid duplicate Content-Type and MIME-Version headers,\n # which we set explicitely. MIME-Version was set when we created an\n # empty mulitpart message. Content-Type will be set when we copy the\n # original text later.\n for header_key in set(self._message.keys()):\n if header_key.lower() in [\"content-type\", \"mime-version\"]:\n continue\n values = self._message.get_all(header_key, failobj=[])\n for value in values:\n multipart_message[header_key] = value\n\n # Copy text, preserving original encoding\n original_text = self._message.get_payload(decode=True)\n original_subtype = self._message.get_content_subtype()\n original_encoding = str(self._message.get_charset())\n multipart_message.attach(email.mime.text.MIMEText(\n original_text,\n _subtype=original_subtype,\n _charset=original_encoding,\n ))\n\n # Replace original message with multipart message\n self._message = multipart_message", "def _prepare_msg(\n subject, txt_template, html_template, context, to_emails,\n from_email=settings.NOTIFY_FROM_EMAIL):\n\n context = Context(context)\n txt = get_template(txt_template).render(context)\n html = get_template(html_template).render(context)\n\n msg = EmailMultiAlternatives(\n subject, txt, from_email, to_emails)\n msg.attach_alternative(html, \"text/html\")\n return msg", "def create_multipart_message(sender: str, recipient_mail: str, bcc_mail: str, title: str, text: str = None,\n html_text: str = None, attachments: list = None) -> MIMEMultipart:\n multipart_content_subtype = 'alternative' if text and html_text else 'mixed'\n msg = MIMEMultipart(multipart_content_subtype)\n msg['Subject'] = title\n msg['From'] = sender\n msg['To'] = recipient_mail\n msg['Bcc'] = bcc_mail\n\n # Record the MIME types of both parts - text/plain and text/html.\n # According to RFC 2046, the last part of a multipart message, in this case the HTML message, is best and preferred.\n if text:\n part = MIMEText(text, 'plain')\n msg.attach(part)\n if html_text:\n part = MIMEText(html_text, 'html')\n msg.attach(part)\n\n # Add attachments\n for attachment in attachments or []:\n with open(attachment, 'rb') as f:\n part = MIMEApplication(f.read())\n part.add_header('Content-Disposition', 'attachment', filename=os.path.basename(attachment))\n msg.attach(part)\n\n return msg", "def _create_message_simple(self, sender, to, subject, message_text):\n self.log.info(\"Creating a simple message...\")\n\n message = MIMEText(message_text)\n message[\"to\"] = to\n message[\"from\"] = sender\n message[\"subject\"] = subject\n\n return message", "def to_message(mail):\n ctype, params = mail.content_encoding['Content-Type']\n\n if not ctype:\n if mail.parts:\n ctype = 'multipart/mixed'\n else:\n ctype = 'text/plain'\n else:\n if mail.parts:\n assert ctype.startswith(\"multipart\") or ctype.startswith(\"message\"), \"Content type should be multipart or message, not %r\" % ctype\n\n # adjust the content type according to what it should be now\n mail.content_encoding['Content-Type'] = (ctype, params)\n\n try:\n out = MIMEPart(ctype, **params)\n except TypeError, exc:\n raise EncodingError(\"Content-Type malformed, not allowed: %r; %r (Python ERROR: %s\" %\n (ctype, params, exc.message))\n\n for k in mail.keys():\n if k in ADDRESS_HEADERS_WHITELIST:\n out[k.encode('ascii')] = header_to_mime_encoding(mail[k])\n else:\n out[k.encode('ascii')] = header_to_mime_encoding(mail[k], not_email=True)\n\n out.extract_payload(mail)\n\n # go through the children\n for part in mail.parts:\n out.attach(to_message(part))\n\n return out", "def _prepare_message(self):\n self.subject = force_unicode(self.subject, strings_only=True)\n self.message = force_unicode(self.message, strings_only=True)\n self.extra_tags = force_unicode(self.extra_tags, strings_only=True)", "def create_message_with_attachment(params, subject, message_text, file_dir, filename):\n # create a message to send\n message = MIMEMultipart()\n message['to'] = params['to']\n message['from'] = params['sender']\n message['subject'] = subject\n \n msg = MIMEText(message_text)\n message.attach(msg)\n\n path = os.path.join(file_dir, filename)\n content_type, encoding = mimetypes.guess_type(path)\n main_type, sub_type = content_type.split('/', 1)\n\n fp = open(path, 'rb')\n msg = MIMEImage(fp.read(), _subtype=sub_type)\n fp.close()\n\n msg.add_header('Content-Disposition', 'attachment', filename=filename)\n message.attach(msg)\n\n return {'raw': base64.urlsafe_b64encode(message.as_string())}", "def _create_message_html(self, sender, to, subject, message_text, message_html):\n self.log.info(\"Creating an html message...\")\n\n message = MIMEMultipart(\"alternative\")\n message[\"subject\"] = subject\n message[\"from\"] = sender\n message[\"to\"] = to\n if message_text:\n message.attach(MIMEText(message_text, \"plain\"))\n message.attach(MIMEText(message_html, \"html\"))\n\n return message", "def create_message(sender, to, subject, message_text, name=None, verbose=True):\n if name:\n from_str = f\"{name} <{sender}>\" #https://stackoverflow.com/questions/44385652/add-senders-name-in-the-from-field-of-the-email-in-python\n else:\n from_str = sender\n message = MIMEText(message_text)\n message['to'] = to\n message['from'] = from_str\n message['subject'] = subject\n if verbose:\n print(f\"Message to {message['to']} from {message['from']} with subject {message['subject']}\")\n b64_bytes = base64.urlsafe_b64encode(message.as_bytes())\n b64_string = b64_bytes.decode()\n return {'raw': b64_string} \n #https://stackoverflow.com/questions/46668084/how-do-i-properly-base64-encode-a-mimetext-for-gmail-api, why decode and encode", "def smtp_message(self):\n\n # Create the object if not already cached.\n if not hasattr(self, \"_smtp_message\"):\n from email.message import EmailMessage as EM\n message = EM()\n if self.body:\n message.set_content(self.body, subtype=self.subtype)\n\n # Add attachments if present.\n if self.attachments:\n from email.mime.multipart import MIMEMultipart\n wrapper = MIMEMultipart()\n wrapper.preamble = \"This is a multipart MIME message\"\n wrapper.attach(message)\n for attachment in self.attachments:\n wrapper.attach(attachment.mime_object)\n message = wrapper\n\n # Plug in the headers.\n message[\"From\"] = self.sender\n message[\"To\"] = \", \".join(self.recips)\n if self.subject:\n message[\"Subject\"] = self.subject\n\n # Cache the object.\n self._smtp_message = message\n\n # Return the (possibly cached) object.\n return self._smtp_message", "def create_msg(\n self,\n fromaddr,\n toaddr,\n subject,\n text_message=None,\n html_message=None,\n addendum=None,\n encrypt_msg=False,\n ):\n if text_message is not None and addendum is not None:\n text_message += f\"\\n----\\n{addendum}\"\n if self.gpg is not None:\n keyid = self.find_keyid(toaddr)\n if not keyid:\n raise RuntimeError(f\"[-] no GPG key found for {toaddr}\")\n encrypted_data = self.gpg.encrypt(text_message, keyid)\n if not encrypted_data.ok:\n raise RuntimeError(\n f\"[-] GPG encryption failed: {encrypted_data.stderr}\")\n text_body = str(encrypted_data)\n else:\n text_body = text_message\n msg = MIMEMultipart('related')\n msg['Subject'] = subject\n msg['From'] = fromaddr\n msg['To'] = toaddr\n msg.preamble = 'This is a multi-part message in MIME format.'\n msg_alternative = MIMEMultipart('alternative')\n msg.attach(msg_alternative)\n part_text = MIMEText(\n lxml.html.fromstring(text_body).text_content().encode('utf-8'),\n 'plain',\n _charset='utf-8',\n )\n if html_message is not None:\n part_html = MIMEText(html_message)\n else:\n part_html = MIMEText(\n text_body.encode('utf-8'),\n 'html',\n _charset='utf-8',\n )\n msg_alternative.attach(part_text)\n msg_alternative.attach(part_html)\n return msg", "def gen_encrypted_email(encryptedstr, boundary=None):\n msg = MIMEMultipartPGP(encryptedstr, boundary=boundary)\n logger.debug('Generated encrypted MIME Multipart.')\n return msg", "def send_mail(context, mto, mfrom, subject, body, mcc=(), mbcc=(),\n attachments=(), related_parts=None,\n encoding=None, plain_text=True, additional_headers=()):\n if encoding is None:\n encoding = get_final_encoding(context)\n if related_parts is None:\n related_parts = {}\n mailhost = getToolByName(context, 'MailHost')\n attachments = list(attachments)\n\n # prepare main content\n content_type = plain_text and 'text/plain' or 'text/html'\n\n if isinstance(body, unicode):\n body = body.encode(encoding)\n\n if plain_text:\n main_msg = MIMEText(body, _subtype='plain', _charset=encoding)\n else:\n alt_html = _make_html_part(body, encoding, related_parts=related_parts)\n alt_plain = MIMEText(html_to_text(body), _charset=encoding)\n main_msg = MIMEMultipart(_subtype='alternative',\n _subparts=[alt_plain, alt_html])\n\n if attachments:\n msg = MIMEMultipart()\n msg.attach(main_msg)\n else:\n msg = main_msg\n\n COMMASPACE = ', '\n\n # Headers\n msg['Subject'] = _encode_header(subject, encoding)\n msg['From'] = _encode_address(mfrom, encoding)\n\n if not mto:\n mto = []\n if isinstance(mto, basestring):\n mto = [mto]\n\n msg['To'] = COMMASPACE.join([_encode_address(to, encoding) for to in mto])\n\n if mcc:\n mcc = isinstance(mcc, basestring) and (mcc,) or mcc\n msg['Cc'] = COMMASPACE.join(\n [_encode_address(cc, encoding) for cc in mcc])\n if not mto:\n # use first Cc as (non header) mail-to\n mto = mcc[0]\n if mbcc:\n # Don't put Bcc in headers otherwise they'd get transferred\n if isinstance(mbcc, basestring):\n mbcc = [mbcc]\n mto.extend(mbcc)\n\n for key, value in additional_headers:\n msg[key] = _encode_header(value, encoding)\n\n if isinstance(subject, unicode):\n msg.preamble = subject.encode(encoding)\n else:\n msg.preamble = subject\n\n # Guarantees the message ends in a newline\n msg.epilogue = ''\n\n # attachment management (if any)\n for title, ctype, data in attachments:\n sub_msg = _make_file_part(title, ctype, data)\n # Set the filename parameter\n sub_msg.add_header('Content-Disposition', 'attachment',\n filename=title)\n msg.attach(sub_msg)\n\n # loggin string\n attachment_log = list((title, ctype) for title, ctype, _ in attachments)\n related_log = list((rel['filename'], rel['content-type'])\n for rel in related_parts.values())\n log_str = 'to: %r, from: %r, subject: %r, body: %r, rel: %r, att: %r' % (\n mto, mfrom, subject, body, related_log, attachment_log)\n logger.debug('sending email %s', log_str)\n\n # sending and error casting\n if not mto:\n raise ValueError(\"Empty final list of recipients address\")\n try:\n return mailhost._send(mfrom, mto, msg.as_string())\n # if anything went wrong: log the error for the admin and raise an exception\n # of type IOError or ValueError that will be catched by the callers in\n # order to build a friendly user message\n except (socket.error, smtplib.SMTPServerDisconnected), e:\n logger.error(\"error sending email %s\" % log_str)\n raise IOError(e)\n except smtplib.SMTPRecipientsRefused, e:\n logger.error(\"error sending email %s\" % log_str)\n raise ValueError('invalid_recipients_address')\n except smtplib.SMTPSenderRefused, e:\n logger.error(\"error sending email %s\" % log_str)\n raise ValueError('invalid_sender_address')", "def from_message(message):\n mail = MailBase()\n\n # parse the content information out of message\n for k in CONTENT_ENCODING_KEYS:\n setting, params = parse_parameter_header(message, k)\n setting = setting.lower() if setting else setting\n mail.content_encoding[k] = (setting, params)\n\n # copy over any keys that are not part of the content information\n for k in message.keys():\n if normalize_header(k) not in mail.content_encoding:\n mail[k] = header_from_mime_encoding(message[k])\n \n decode_message_body(mail, message)\n\n if message.is_multipart():\n # recursively go through each subpart and decode in the same way\n for msg in message.get_payload():\n if msg != message: # skip the multipart message itself\n mail.parts.append(from_message(msg))\n\n return mail" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send info mail using data from imfomail.py Argument upload_dir required for making download URL for recipients. Prepare and send message through smtplib.SMTP
def mail(self, upload_dir): url = infomail.download_url.format(os.path.split(upload_dir)[1]) mymessage = infomail.text_message.format(url) mymessage = self._prepare_message(mymessage) errpref = "SMTP Problem:" smtpconn = smtplib.SMTP(infomail.smtphost, infomail.smtpport) try: smtpconn.sendmail(infomail.fromaddr, infomail.toaddrs, mymessage.as_string()) except smtplib.SMTPRecipientsRefused: print(errpref, end=' ', file=stderr) print("All recipients {} refused".format(infomail.toaddrs), file=stderr) except smtplib.SMTPHeloError: print(errpref, end=' ', file=stderr) print("Server didn't reply properly to the HELLO", file=stderr) except smtplib.SMTPSenderRefused: print(errpref, "Server didn't accept sender", infomail.fromaddr, file=stderr) except smtplib.SMTPDataError: print(errpref, "Server didn't accept mail data", file=stderr) finally: smtpconn.quit()
[ "def send_email(sender, to, cc, subject, body, body_format, file_path, file_list):\n\n msg = MIMEMultipart()\n msg['From'] = sender\n msg['To'] = to\n msg['Cc'] = cc\n msg['Subject'] = subject\n text = body\n\n part1 = MIMEText(text, body_format)\n msg.attach(part1)\n\n ## ATTACHMENT PART OF THE CODE IS HERE\n for file in file_list:\n\n SourcePathName = file_path + file \n attachment = open(SourcePathName, 'rb')\n part = MIMEBase('application', \"octet-stream\")\n part.set_payload((attachment).read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', f\"attachment; filename={file}\")\n msg.attach(part)\n\n server = smtplib.SMTP(\"mail.us164.corpintra.net\")\n server.send_message(msg)\n server.quit()", "def send_email_with_attachment():\r\n # basic info\r\n smtpServer = \"smtp.163.com\"\r\n account = \"onebigbera@163.com\"\r\n password = \"george9527\"\r\n sender = \"onebigbera@163.com\"\r\n receiver = \"2578288992@qq.com\"\r\n\r\n # instantiation an mail object\r\n message = MIMEMultipart()\r\n message['From'] = sender\r\n message['To'] = receiver\r\n content = \"<html><h4 style='color:red'>亲爱的小心有熊出没:</br>爱可能会迟到,但永远不会缺席!</br></h4><p><span>下面为测试报告,请查看!</span></p></html>\"\r\n subject = '寒冷的季节,温暖的是人心 ^_^ !'\r\n message[\"Subject\"] = Header(subject, 'utf-8')\r\n\r\n # attach the content\r\n message.attach(MIMEText(content, 'html', 'utf-8'))\r\n\r\n # instantiation attachment object\r\n html_path = r'F:\\Testing_Development\\UnittestProjects\\automated_testing\\automated_testing\\module_structure_management\\test_report\\2019-10-12_11_21_57result.html'\r\n # get attachment stream\r\n attachment_1 = MIMEText(open(html_path).read(), 'base64', 'utf-8')\r\n\r\n # set property\r\n attachment_1['Content-Type'] = 'application/octet-stream'\r\n attachment_1['Content-Disposition'] = 'attachment; filename=\"report.html\"'\r\n\r\n message.attach(attachment_1)\r\n\r\n att2 = MIMEText(open(\r\n r'F:\\Testing_Development\\UnittestProjects\\UnittestBasic\\51zxw_selenium_example\\emailSender\\attachment\\test1.jpg',\r\n 'rb').read(), 'base64', 'utf-8')\r\n # set attachment\r\n att2[\"Content-Type\"] = 'application/octet-stream'\r\n att2[\"Content-Disposition\"] = 'attachment; filename=\"test1.jpg\"'\r\n message.attach(att2)\r\n\r\n # txt file\r\n att3 = MIMEText(open(\r\n r'F:\\Testing_Development\\UnittestProjects\\UnittestBasic\\51zxw_selenium_example\\emailSender\\attachment\\test.txt',\r\n 'rb').read(), 'base64', 'utf-8')\r\n # attachment setting\r\n att3[\"Content-Type\"] = 'application/octet-stream'\r\n att3[\"Content-Disposition\"] = 'attachment; filename=\"test.txt\"'\r\n message.attach(att3)\r\n\r\n smtp = smtplib.SMTP_SSL(smtpServer, 465)\r\n try:\r\n smtp.helo(smtpServer)\r\n smtp.ehlo(smtpServer)\r\n smtp.login(account, password)\r\n except BaseException as e:\r\n print(e)\r\n\r\n try:\r\n print(\"Begin to send >>>\")\r\n smtp.sendmail(sender, receiver, message.as_string())\r\n print(\"Send finished...\")\r\n except BaseException as e:\r\n print(e)", "def send_alert_attached(subject, flist):\n msg = MIMEMultipart()\n msg['Subject'] = subject\n msg['From'] = mailsender\n msg['To'] = mailreceip\n #message = \"Thank you\"\n msg.attach(MIMEText(\"Galindo Reyes Agustin\", 'plain'))\n \"\"\"for file in flist:\n png_file = file.split('.')[0] + '.png'\n print(png_file)\n fp = open(png_file, 'rb')\n img = MIMEImage(fp.read())\n fp.close()\n msg.attach(img)\"\"\"\n fp = open(\"pred.png\", 'rb')\n img = MIMEImage(fp.read())\n fp.close()\n mserver = smtplib.SMTP(mailserver)\n mserver.starttls()\n # Login Credentials for sending the mail\n mserver.login(mailsender, password)\n\n mserver.sendmail(mailsender, mailreceip, msg.as_string())\n mserver.quit()", "def main(args):\n\n outer= MIMEMultipart()\n # Credentials (if needed)\n if args.verbose:\n msg= message_from_prompt()\n elif args.formatfile:\n msg= message_from_file(open(args.formatfile))\n else:\n msg= message_from_args(args)\n outer.attach(msg)\n outer= transfer_msg_info(msg, outer)\n username= outer['from'].split('@')[0]\n\n #Password from argument or getpass\n if args.password:\n password= args.password\n else:\n password= getpass.getpass()\n \n #Attach files if requested\n if args.attachment:\n outer.attach(build_attachment(args.attachment))\n\n send_gmail(username, password, outer)", "def email_file(file_path, file_name):\r\n from_address = \"ovedimperia@gmail.com\"\r\n to_address = \"ovedimperia@gmail.com\"\r\n\r\n msg = MIMEMultipart()\r\n\r\n msg['From'] = from_address\r\n msg['To'] = to_address\r\n msg['Subject'] = \"File_Transfer From: {} At {}\".format(str(get_lan_ip()),\r\n str(\r\n datetime.datetime.now()))\r\n email_body = \"A successful file transfer\"\r\n\r\n msg.attach(MIMEText(email_body, 'plain'))\r\n\r\n attachment = open(str(file_path),\"rb\")\r\n\r\n part = MIMEBase('application', 'octet-stream')\r\n part.set_payload((attachment).read())\r\n encoders.encode_base64(part)\r\n part.add_header('Content-Disposition',\r\n \"attachment; filename= %s\" % file_name)\r\n\r\n msg.attach(part)\r\n\r\n server = smtplib.SMTP('smtp.gmail.com', 587)\r\n server.starttls()\r\n server.login(from_address, \"ovedimperia1\")\r\n text = msg.as_string()\r\n server.sendmail(from_address, to_address, text)\r\n server.quit()", "def sendEmail(sendTo,textfile,logfile,img):\r\n # Open a plain text file for reading\r\n msg = MIMEMultipart()\r\n\r\n # Read the text file <-- Error msg from OCR module\r\n if(textfile!=\"\"):\r\n fp = open(textfile, 'rb')\r\n text = MIMEText(fp.read())\r\n fp.close()\r\n msg.attach(text)\r\n\r\n if(logfile=='y'):\r\n filename = \"log.txt\"\r\n fp = open(filename)\r\n log = MIMEText(fp.read())\r\n fp.close()\r\n log.add_header('Content-Disposition', 'attachment', filename=filename)\r\n msg.attach(log)\r\n\r\n msg['Subject'] = 'An event has occurred at the MS'\r\n msg['From'] = \"mass.checker@gmail.com\"\r\n msg['To'] = sendTo\r\n\r\n # Load screenshot and attach to email\r\n fp = open(img, 'rb')\r\n img = MIMEImage(fp.read())\r\n fp.close()\r\n msg.attach(img)\r\n\r\n # Send the message\r\n server = smtplib.SMTP('smtp.gmail.com',587)\r\n server.starttls()\r\n server.login(\"mass.checker@gmail.com\", \"massspecchecker1234\")\r\n\r\n server.sendmail(\"mass.checker@gmail.com\", sendTo, msg.as_string())\r\n server.quit()", "def email_success_attachments(dirname, attachments, addresses, smtp_server, smtp_user, smtp_password):\n # Set up multipart message\n msg = MIMEMultipart()\n msg['Subject'] = '%s requires manual intervention' % dirname\n msg['To'] = ', '.join(addresses)\n msg['From'] = \"p2b@localhost\"\n msg.preamble = 'You will not see this in a MIME-aware mail reader.\\n'\n\n # Create and add body\n body = \"%s/Output.xml is ready to be uploaded.\\n\" % dirname\n body += \"Additionally the following files will need to be manually attached: \\n\"\n for att in attachments:\n body += os.path.basename(att) + \"\\n\"\n part1 = MIMEText(body, 'plain')\n msg.attach(part1)\n\n # Send the email using SMTP\n s = smtplib.SMTP(smtp_server, 25)\n if smtp_user and smtp_password:\n s.login(smtp_user, smtp_password)\n s.sendmail(\"p2b@localhost\", addresses, msg.as_string())\n s.quit()", "def get_attachement(detach_dir:str):\n # if 'attachments' not in os.listdir(detach_dir):\n # os.mkdir(detach_dir + \"/\" + 'attachments')\n # detach_dir = detach_dir + \"/attachments\"\n\n # user_name = input('Enter your GMail user_name:')\n user_name = os.environ.get(\"RESPONSYS_GMAIL\")\n # passwd = getpass.getpass('Enter your password: ')\n passwd = os.environ.get(\"RESPONSYS_GMAIL_PASSWD\")\n\n try:\n imap_session = imaplib.IMAP4_SSL('imap.gmail.com')\n return_code, account_details = imap_session.login(user_name, passwd)\n if return_code != 'OK':\n print('Not able to sign in!')\n raise\n \n labels = imap_session.list()[1]\n # imap_session.select('[Gmail]/All Mail')\n for l in labels:\n print(l)\n imap_session.select('INBOX')\n # return_code, data = imap_session.search(None, 'ALL')\n return_code, data = imap_session.search(None, '(UNSEEN)')\n if return_code != 'OK':\n print('Error searching Inbox.')\n raise\n \n # Iterating over all emails\n for msgId in data[0].split():\n return_code, message_parts = imap_session.fetch(msgId, '(RFC822)')\n if return_code != 'OK':\n print('Error fetching mail.')\n raise\n\n email_body = message_parts[0][1]\n mail = email.message_from_bytes(email_body)\n for part in mail.walk():\n if part.get_content_maintype() == 'multipart':\n # print(part.as_string())\n continue\n if part.get('Content-Disposition') is None:\n # print(part.as_string())\n continue\n file_name = part.get_filename()\n\n if bool(file_name):\n # print(\"Raw Date: \", mail[\"Date\"])\n # 'Sun, 15 Jul 2018 08:07:08 +0000'\n date_tuple = email.utils.parsedate_tz(mail['Date'])\n local_date = datetime.datetime.fromtimestamp(email.utils.mktime_tz(date_tuple))\n date_suffix = local_date.strftime(\"%Y-%m-%d\")\n file_name = file_name + \".\" + date_suffix\n\n file_path = os.path.join(detach_dir, file_name)\n if not os.path.isfile(file_path):\n print(file_name)\n fp = open(file_path, 'wb')\n fp.write(part.get_payload(decode=True))\n fp.close()\n else:\n print(file_name + \" was already downloaded.\")\n imap_session.close()\n imap_session.logout()\n call('pwd')\n call('/Users/charliezhu/work/bin/email_metrics_load.sh')\n except (Exception) as error:\n print(error)\n print('Not able to download all attachments.')", "def send_mail_with_embeded_data(mail_from,send_to,subject,txt,img_path=None,file_path=None):\n smtp_info = GLOBAL['default']['smtp-server']\n smtp_server,smtp_port = smtp_info.split(':')\n\n msg = MIMEMultipart('related')\n msg['Subject'] = subject\n msg['From'] = mail_from\n msg['To'] = COMMASPACE.join([send_to])\n msg['Date'] = formatdate(localtime=True)\n # msg.attach(MIMEText(txt,'plain'))\n msg.preamble = txt\n\n if img_path:\n BuiltIn().log(\" Attached an image from `%s`\" % img_path)\n msg_alt = MIMEMultipart('alternative')\n msg.attach(msg_alt)\n img_txt = MIMEText('<img src=\"cid:image\">', 'html')\n msg_alt.attach(img_txt)\n\n img_data = MIMEImage(open(img_path,'rb').read(), name=os.path.basename(img_path))\n BuiltIn().log(\" Loaded data from `%s`\" % img_path)\n img_data.add_header('Content-ID','<image>')\n msg.attach(img_data)\n with smtplib.SMTP(smtp_server,int(smtp_port)) as s:\n s.sendmail(msg['From'],msg['To'],msg.as_string())\n BuiltIn().log(\"Sent a mail from `%s` to `%s`\"% (mail_from,send_to))", "def send_email(get_trademark_url, email_data):\n urls_list = get_trademark_url\n\n tm_database_files = glob.glob('tm_*.html')\n fromaddr = email_data.sender\n toaddr = email_data.receiver\n msg = MIMEMultipart()\n msg['From'] = fromaddr\n msg['To'] = toaddr\n msg['Subject'] = \"Trademark monitoring results\"\n\n msg_intro = MIMEText(\"Dears,\\n\\nbelow see the results from the trademark monitoring \"\n \"made after a month. Attached find the tables of results for \"\n \"particular keywords. In case you would like to investigate \"\n \"suspicious applications, click on the relevant link depending \"\n \"on the trademark application number:\\n\", 'plain')\n msg.attach(msg_intro)\n\n msg_urls = MIMEText(('\\n'.join('{}\\n'.format(value) for value in urls_list))\n .replace('{', '').replace('}', '').replace('\\'', ''), 'plain')\n msg.attach(msg_urls)\n\n for file in tm_database_files:\n with open(file, \"rb\") as f:\n msg_attachments = MIMEApplication(f.read(), name=os.path.basename(file))\n msg_attachments['Content-Disposition'] = 'attachment; filename=\"%s\"' % \\\n os.path.basename(file)\n msg.attach(msg_attachments)\n\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.connect('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(fromaddr, email_data.openkeyword)\n text = msg.as_string()\n server.sendmail(fromaddr, toaddr, text)\n server.quit()\n print(\"Email sent!\")", "def sendMailToProprio():", "def processMails(imapserver: str, username: str, password: str, imap_path: str,\n directory_path: str, numberOfMessages: int, deleteAfterDownload: bool = False,\n query: str = None) -> None:\n # check if the output folder exists:\n # (right now, a terminal exception is being thrown if the folder doesn't exist) \n # numberOfMessages = 5000\n count = 1\n totalcount = 0\n # query = AND(date_gte=datetime.date(2020, 1, 1), date_lt=datetime.date(2020, 2, 4))\n # get list of email messages from the specified folder\n with MailBox(imapserver).login(username, password, initial_folder=imap_path) as mailbox:\n logger.info(\"Mailbox {}@{}/{} opened ... \".format(username, imapserver, imap_path, sep=\"\"))\n try:\n # Q(subject='Saludos'), \n # for msg in mailbox.fetch(query, limit=numberOfMessages, miss_no_uid=True, miss_defect=False): # Q(all=True)\n if query == None:\n query = Q(all=True)\n for msg in mailbox.fetch(query, limit=numberOfMessages, miss_no_uid=False, miss_defect=False, mark_seen=False): # Q(all=True)\n totalcount += 1\n # sometimes there's an attribute error in the following line because the mail address cannot be parsed:\n name = msg.from_\n name = name.replace('/', '-') # Deutsche Bahn puts LDAP info in their mail addresses ....\n # remove special characters from the subject line:\n subject = msg.subject.replace('/', '-').replace(' ', '_').replace('?', '_').replace('\\x00', '').replace('\\x09', '').replace('\\x08', '').replace('\\x0A', '').replace('\\x0D', '')\n filename = \"{}/{}_{}_({})_{}.eml\".format(\n directory_path,\n datetime.datetime.strftime(msg.date, '%Y-%m-%d_%H-%M-%S'),\n name,\n msg.uid,\n subject[0:100]).replace('\\x00', '').replace('\\x09', '').replace('\\x08', '').replace('\\x0A', '').replace('\\x0D', '')\n # logger.debug(\"{}/{}: Processing {} -> {}, {} ...\".format(totalcount, numberOfMessages, name, msg.to, subject))\n if not os.path.isfile(filename):\n with open(filename, 'x', encoding='utf-8') as f:\n logger.info(\"{}/{}: Writing {} ...\".format(count, numberOfMessages, filename))\n f.write(msg.obj.as_bytes().decode(encoding='ISO-8859-1'))\n if deleteAfterDownload:\n mailbox.delete(msg.uid)\n logger.debug(\"Deleted message uid {} ...\".format(msg.uid))\n count += 1\n # set the time of the created file to the time of the e-mail:\n ts = msg.date.timestamp()\n os.utime(filename, (ts, ts))\n else:\n logger.warn(\"File {} already exists!\".format(filename))\n except (RuntimeError, AttributeError) as error:\n logger.error(\"Error while processing message uid {}: {}\".format(msg.uid, error))\n traceback.print_last()", "def send_mail(mail_creds_file, item, prev_price, curr_price, url):\n mail_credentails = get_mail_credentails(mail_creds_file)\n\n try:\n msg = MIMEMultipart()\n msg['From'] = MAIL_FROM\n msg['To'] = ', '.join(SUCCESS_MAIL_TO)\n msg['Subject'] = \"Price drop for \" + item\n\n text_body = \"There is a price drop of Rs \" + str(float(prev_price) - curr_price) +\\\n \" that you might be interested in:\\n\\n\"\n html_body = create_html(item, prev_price, curr_price, url)\n\n msg.attach(MIMEText(text_body, 'plain'))\n msg.attach(MIMEText(html_body, 'html'))\n\n server = smtplib.SMTP(mail_credentails['SMTP_SERVER'], mail_credentails['SMTP_PORT'])\n server.ehlo()\n server.starttls()\n server.ehlo()\n server.login(mail_credentails['LOGIN_USER'], mail_credentails['LOGIN_PASSWORD'])\n server.sendmail(MAIL_FROM, SUCCESS_MAIL_TO, msg.as_string())\n except Exception as error:\n print(error)\n system_exit_error(\"PriceEmailError\")", "def send_mail(config, frm_addr, to_addr, Subject, text, headers = None, files=[], html_body=False, **kw):\n #In headers we send type of data, cc, Subjects\n if headers is None: headers = {}\n \n #with Default settings it works without using SSL and without login.\n server = config.get('server')\n port = config.get('port', 25)\n startSSL = config.get('startSSL', False)\n startTLS = config.get('startTLS', False)\n username = config.get('username', None)\n password = config.get('password', None)\n cc = kw.get('cc', [])\n bcc = kw.get('bcc', [])\n\n def listify(x):\n if not isinstance(x, list):\n return [x]\n return x\n \n #Here are all the recepients. \n cc = listify(cc)\n bcc = listify(bcc)\n to_addr = listify(to_addr)\n recipients = to_addr+cc+bcc\n\n frm_addr = str(frm_addr)\n\n files = listify(files)\n \n #Here are the headers to send message..\n if cc:\n headers['Cc'] = \", \".join(cc)\n \n headers = dictadd({\n 'MIME-Version': '1.0',\n 'Content-Type': 'text/plain; charset=UTF-8',\n 'Content-Disposition': 'inline',\n 'From': frm_addr,\n 'To': \", \".join(to_addr),\n 'Subject': Subject\n }, headers)\n\n #parsing the to and from addresses\n import email.Utils\n from_address = email.Utils.parseaddr(frm_addr)[1]\n recipients = [email.Utils.parseaddr(r)[1] for r in recipients]\n\n #Creating a message to send from server\n message = MIMEMultipart()\n for k, v in headers.items():\n message.add_header(k, v)\n\n if html_body == True:\n txt_msg = MIMEText(text,'html','UTF-8')\n else:\n txt_msg = MIMEText(text,'plain','UTF-8')\n message.attach(txt_msg)\n #message.attach(MIMEText(text))\n \n for f in files:\n part = MIMEBase('application', \"octet-stream\")\n part.set_payload(open(f).read()) \n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % os.path.basename(f))\n message.attach(part)\n\n #making a connection with server\n if startSSL:\n con = smtplib.SMTP_SSL(server, port)\n else:\n con = smtplib.SMTP(server)\n if startTLS:\n con.starttls()\n\n # Logging into server \n if username and password:\n con.login(username, password)\n \n #Now we are ready to send the data..\n con.sendmail(from_address, recipients, message.as_string())\n\n #Closing the connection\n con.quit()", "def create_message_with_multi_attachment(sender, to, subject, message_text, folderpath, startnum, endnum):\n message = MIMEMultipart()\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n file = sorted(os.listdir(folderpath))\n msg = MIMEText(message_text)\n message.attach(msg)\n #for i in range(len(file)):\n for i in range(endnum-startnum):\n content_type, encoding = mimetypes.guess_type(file[startnum + i])\n \n if content_type is None or encoding is not None:\n content_type = 'application/octet-stream'\n main_type, sub_type = content_type.split('/', 1)\n \n if main_type == 'image':\n fp = open(folderpath+file[startnum+i], 'rb')\n msg = MIMEImage(fp.read(), _subtype = sub_type)\n fp.close()\n else:\n fp = open(folderpath+file[i], 'rb')\n msg = MIMEBase(main_type, sub_type)\n msg.set_payload(fp.read())\n fp.close()\n filename = os.path.basename(folderpath+file[startnum+i])\n msg.add_header('Content-Disposition', 'attachment', filename=filename)\n message.attach(msg)\n \n return {'raw': base64.urlsafe_b64encode(message.as_string())}", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n messages = get_messages(service, query='from:nihan has:attachment')\n if not messages:\n print('No messages with current criteria were found.')\n else:\n print('Found {} messages. Now fetching attachments'.format(\n len(messages)))\n msg_counts = defaultdict(int)\n for message in messages:\n cur_message_id = message['id']\n cur_message = service.users().messages().get(\n userId='me', id=cur_message_id).execute()\n cur_message_date = get_message_date(cur_message)\n cur_message_attchs = get_files_attached(cur_message)\n if cur_message_attchs:\n msg_counts[cur_message_date] += 1\n msg_dir = \"{}_{:03d}\".format(\n cur_message_date, msg_counts[cur_message_date])\n msg_path = \"{}/message.json\".format(msg_dir)\n try:\n os.mkdir(msg_dir)\n except OSError:\n print(\"Found '{}', using it!\".format(msg_dir))\n if not os.path.isfile(msg_path):\n with open(msg_path, 'w') as f:\n json.dump(cur_message, f, indent=3,\n separators=(',', ': '))\n else:\n print(\"Found a message in {}, skipping it\".format(msg_dir))\n for attch in cur_message_attchs:\n file_name = \"{}/{}\".format(\n msg_dir, unicode(attch['filename']).encode(\"utf-8\"))\n if not os.path.isfile(file_name):\n with open(file_name, 'w') as f:\n file_data = base64.urlsafe_b64decode(\n get_attachment(service, cur_message_id,\n attch['attchId']))\n f.write(file_data)\n else:\n print(\"Found attachment '{}', skipping it\".format(\n file_name))", "def mail(path, username, password, smtp_host, smtp_port, ssl):\n if not ssl:\n s = SMTP(smtp_host, smtp_port)\n else:\n s = SMTP_SSL(smtp_host, smtp_port)\n try:\n s.login(username, password)\n except:\n s.quit()\n raise\n try:\n for msg in delivery_queue(path):\n receiver = msg['To']\n sender = msg['From']\n try:\n s.sendmail(sender, receiver, msg.as_string())\n logger.info('Mail sent: %s' % receiver)\n except SMTPRecipientsRefused:\n logger.warning('%s address refused' % receiver)\n sleep(0.1)\n finally:\n s.quit()", "def send_mass_mail(datatuple, extra={}, fail_silently=False, auth_user=settings.EMAIL_HOST_USER,\n auth_password=settings.EMAIL_HOST_PASSWORD, tls=getattr(settings, 'EMAIL_TLS', False),\n encoding=settings.DEFAULT_CHARSET):\n try:\n SMTP = smtplib.SMTP\n if settings.EMAIL_DEBUG:\n SMTP = STMPMock\n server = SMTP(settings.EMAIL_HOST, settings.EMAIL_PORT)\n server.ehlo()\n server.esmtp_features[\"auth\"] = \"LOGIN PLAIN\"\n if tls:\n server.starttls()\n server.ehlo()\n if auth_user and auth_password:\n server.login(auth_user, auth_password)\n except:\n if fail_silently:\n return\n raise\n num_sent = 0\n\n for subject, message, from_email, recipient_list, cc_list in datatuple:\n if not recipient_list:\n continue\n from_email = from_email or settings.DEFAULT_FROM_EMAIL\n #################################################\n msg = None\n if isinstance(message, SafeMIMEText) or isinstance(message, SafeMIMEMultipart):\n ## Change below is important!\n ## msg does not act as a proper dictionary... msg['key'] = value does not\n ## reset the value for msg['key'], but adds to it!\n msg = copy.deepcopy(message)\n else:\n msg = SafeMIMEText(message.encode(encoding), 'plain', encoding)\n #################################################\n # TODO: we should encode header fields that aren't pure ASCII, see:\n # http://maxischenko.in.ua/blog/entries/103/python-emails-i18n/\n msg['Subject'] = Header(subject, encoding)\n msg['From'] = from_email\n msg['To'] = ', '.join(recipient_list)\n msg['Date'] = rfc822.formatdate()\n if cc_list:\n msg['Cc'] = ', '.join(cc_list)\n recipient_list.extend(cc_list)\n if extra:\n for key in extra.keys():\n msg[key] = extra[key]\n try:\n server.sendmail(from_email, recipient_list, msg.as_string())\n num_sent += 1\n except:\n if not fail_silently:\n raise\n try:\n server.quit()\n except:\n if fail_silently:\n return\n raise\n return num_sent", "def send_message_attachment(self, template_id, attachments, to, reply_to, from_name, subject, merge_fields=None, view_online=False,\n click_tracking=True, suppress_address=False):\n boundary = ''.join(random.choice(string.digits + string.ascii_letters) for i in range(30))\n binary = io.BytesIO()\n for attachment in attachments:\n with (open(attachment, \"rb\")) as f:\n lines = []\n lines.extend((\n '--{0}'.format(boundary),\n 'Content-Disposition: form-data; name=\"file\"; filename=\"{0}\"'.format(os.path.basename(attachment)),\n 'Content-Type: application/octet-stream',\n ''\n ))\n \n binary.write('\\r\\n'.join(lines).encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(f.read())\n binary.write(b'\\r\\n')\n binary.write('--{0}'.format(boundary).encode('UTF-8'))\n binary.write(b'\\r\\n')\n\n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"template_id\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(str(template_id).encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n \n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"reply_to\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(reply_to.encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n \n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"from\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(from_name.encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n\n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"to\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(to.encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n\n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"subject\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(subject.encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n \n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"view_online\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(str(view_online).encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n\n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"suppress_address\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(str(suppress_address).encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n\n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"click_tracking\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(str(click_tracking).encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n\n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"merge_fields\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(json.dumps(merge_fields).encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'--\\r\\n')\n\n return self.ep.post(self.endpoint,\n content_type=\"multipart/form-data; boundary={0}\".format(boundary), body=binary.getvalue())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import virtual machines from self.vmlist.
def vmimport(self, func=just_import): ovas = len(self.vmlist) if ovas == 1: vmname = func(self.vmlist[0]) self.results.append(vmname) elif ovas <= self.threads: self._import_pool(ovas, self.vmlist, func) else: tmplist = self.vmlist while tmplist: self._import_pool(self.threads, tmplist[:self.threads], func) tmplist = tmplist[self.threads:] return self.results
[ "def importvm(self, ova):\n assert os.path.exists(ova), \"{} not found\" % ova\n subprocess.call(['VBoxManage', 'import', ova,\n '--options', 'keepallmacs'])\n time.sleep(10)\n grouped = self._groupvm()\n sfolders = self._sharedfolders()\n return grouped, sfolders", "def start_VM(self, host):\n action = self.cmc.virtual_machines.start(group_name(host), vm_name(host))\n action.wait()", "def create_vms(self):\n\n\t\tfor vmx_path in self.vmx_files:\n\t\t\t#vm = self.create_vm(\"/Users/alex/Documents/Virtual Machines.localized/macOS 10.12.vmwarevm/macOS 10.12.vmx\")\n\t\t\tvm = self.create_vm(vmx_path)\n\t\t\tself.start_vm(vm)\n\t\t\tself.virtual_machines[str(vm.uuid)] = vm\n\n\t\ttime.sleep(5)", "def just_import(ova):\n name = os.path.split(ova)[1].split('.')[0]\n v_machine = VirtualMachine(name)\n # This must throw exception if such VM already exists.\n try:\n v_machine.checkvm()\n except VirtualMachineExistsError:\n print(\"WARNING: %s already exists. Skipping...\" % name)\n else:\n v_machine.importvm(ova)\n return name", "def force_import(ova):\n name = os.path.split(ova)[1].split('.')[0]\n v_machine = VirtualMachine(name)\n try:\n v_machine.checkvm()\n except VirtualMachineExistsError:\n v_machine.removevm()\n v_machine.importvm(ova)\n return name", "def add_virtual_machine(self, obj):\n\n name = get_string_or_none(grab(obj, \"name\"))\n\n if name is not None and self.strip_vm_domain_name is True:\n name = name.split(\".\")[0]\n\n #\n # Filtering\n #\n\n # get VM UUID\n vm_uuid = grab(obj, \"config.uuid\")\n\n if vm_uuid is None or vm_uuid in self.processed_vm_uuid:\n return\n\n log.debug2(f\"Parsing vCenter VM: {name}\")\n\n # get VM power state\n status = \"active\" if get_string_or_none(grab(obj, \"runtime.powerState\")) == \"poweredOn\" else \"offline\"\n\n # check if vm is template\n template = grab(obj, \"config.template\")\n if bool(self.skip_vm_templates) is True and template is True:\n log.debug2(f\"VM '{name}' is a template. Skipping\")\n return\n\n # ignore offline VMs during first run\n if self.parsing_vms_the_first_time is True and status == \"offline\":\n log.debug2(f\"Ignoring {status} VM '{name}' on first run\")\n return\n\n # add to processed VMs\n self.processed_vm_uuid.append(vm_uuid)\n\n parent_name = get_string_or_none(grab(obj, \"runtime.host.name\"))\n cluster_name = get_string_or_none(grab(obj, \"runtime.host.parent.name\"))\n\n # honor strip_host_domain_name\n if cluster_name is not None and self.strip_host_domain_name is True and \\\n parent_name.split(\".\")[0] == cluster_name.split(\".\")[0]:\n cluster_name = cluster_name.split(\".\")[0]\n\n # check VM cluster\n if cluster_name is None:\n log.error(f\"Requesting cluster for Virtual Machine '{name}' failed. Skipping.\")\n return\n\n elif self.permitted_clusters.get(cluster_name) is None:\n log.debug(f\"Virtual machine '{name}' is not part of a permitted cluster. Skipping\")\n return\n\n if name in self.processed_vm_names.get(cluster_name, list()):\n log.warning(f\"Virtual machine '{name}' for cluster '{cluster_name}' already parsed. \"\n \"Make sure to use unique VM names. Skipping\")\n return\n\n # add host to processed list\n if self.processed_vm_names.get(cluster_name) is None:\n self.processed_vm_names[cluster_name] = list()\n\n self.processed_vm_names[cluster_name].append(name)\n\n # filter VMs by name\n if self.passes_filter(name, self.vm_include_filter, self.vm_exclude_filter) is False:\n return\n\n #\n # Collect data\n #\n\n # check if cluster is a Standalone ESXi\n site_name = self.permitted_clusters.get(cluster_name)\n if site_name is None:\n site_name = self.get_site_name(NBCluster, cluster_name)\n\n # first check against vm_platform_relation\n platform = grab(obj, \"config.guestFullName\")\n platform = get_string_or_none(grab(obj, \"guest.guestFullName\", fallback=platform))\n\n for platform_relation in grab(self, \"vm_platform_relation\", fallback=list()):\n\n if platform is None:\n break\n\n object_regex = platform_relation.get(\"object_regex\")\n if object_regex.match(platform):\n platform = platform_relation.get(\"platform_name\")\n log.debug2(f\"Found a match ({object_regex.pattern}) for {platform}, using mapped platform '{platform}'\")\n break\n\n hardware_devices = grab(obj, \"config.hardware.device\", fallback=list())\n\n disk = int(sum([getattr(comp, \"capacityInKB\", 0) for comp in hardware_devices\n if isinstance(comp, vim.vm.device.VirtualDisk)\n ]) / 1024 / 1024)\n\n annotation = None\n if bool(self.skip_vm_comments) is False:\n annotation = get_string_or_none(grab(obj, \"config.annotation\"))\n\n # assign vm_tenant_relation\n tenant_name = None\n for tenant_relation in grab(self, \"vm_tenant_relation\", fallback=list()):\n object_regex = tenant_relation.get(\"object_regex\")\n if object_regex.match(name):\n tenant_name = tenant_relation.get(\"tenant_name\")\n log.debug2(f\"Found a match ({object_regex.pattern}) for {name}, using tenant '{tenant_name}'\")\n break\n\n vm_data = {\n \"name\": name,\n \"cluster\": {\"name\": cluster_name},\n \"status\": status,\n \"memory\": grab(obj, \"config.hardware.memoryMB\"),\n \"vcpus\": grab(obj, \"config.hardware.numCPU\"),\n \"disk\": disk\n }\n\n if platform is not None:\n vm_data[\"platform\"] = {\"name\": platform}\n if annotation is not None:\n vm_data[\"comments\"] = annotation\n if tenant_name is not None:\n vm_data[\"tenant\"] = {\"name\": tenant_name}\n\n vm_primary_ip4 = None\n vm_primary_ip6 = None\n vm_default_gateway_ip4 = None\n vm_default_gateway_ip6 = None\n\n # check vm routing to determine which is the default interface for each IP version\n for route in grab(obj, \"guest.ipStack.0.ipRouteConfig.ipRoute\", fallback=list()):\n\n # we found a default route\n if grab(route, \"prefixLength\") == 0:\n\n try:\n ip_a = ip_address(grab(route, \"network\"))\n except ValueError:\n continue\n\n try:\n gateway_ip_address = ip_address(grab(route, \"gateway.ipAddress\"))\n except ValueError:\n continue\n\n if ip_a.version == 4 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv4 gateway {gateway_ip_address}\")\n vm_default_gateway_ip4 = gateway_ip_address\n elif ip_a.version == 6 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv6 gateway {gateway_ip_address}\")\n vm_default_gateway_ip6 = gateway_ip_address\n\n nic_data = dict()\n nic_ips = dict()\n\n # get VM interfaces\n for vm_device in hardware_devices:\n\n # sample: https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getvnicinfo.py\n\n # not a network interface\n if not isinstance(vm_device, vim.vm.device.VirtualEthernetCard):\n continue\n\n int_mac = normalize_mac_address(grab(vm_device, \"macAddress\"))\n\n device_class = grab(vm_device, \"_wsdlName\")\n\n log.debug2(f\"Parsing device {device_class}: {int_mac}\")\n\n device_backing = grab(vm_device, \"backing\")\n\n # set defaults\n int_mtu = None\n int_mode = None\n int_network_vlan_ids = None\n int_network_vlan_id_ranges = None\n int_network_name = None\n int_network_private = False\n\n # get info from local vSwitches\n if isinstance(device_backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):\n\n int_network_name = get_string_or_none(grab(device_backing, \"deviceName\"))\n int_host_pgroup = grab(self.network_data, f\"host_pgroup|{parent_name}|{int_network_name}\",\n separator=\"|\")\n\n if int_host_pgroup is not None:\n int_network_vlan_ids = [int_host_pgroup.get(\"vlan_id\")]\n int_network_vlan_id_ranges = [str(int_host_pgroup.get(\"vlan_id\"))]\n\n int_vswitch_name = int_host_pgroup.get(\"vswitch\")\n int_vswitch_data = grab(self.network_data, f\"vswitch|{parent_name}|{int_vswitch_name}\",\n separator=\"|\")\n\n if int_vswitch_data is not None:\n int_mtu = int_vswitch_data.get(\"mtu\")\n\n # get info from distributed port group\n else:\n\n dvs_portgroup_key = grab(device_backing, \"port.portgroupKey\", fallback=\"None\")\n int_portgroup_data = grab(self.network_data, f\"dpgroup|{dvs_portgroup_key}\", separator=\"|\")\n\n if int_portgroup_data is not None:\n int_network_name = grab(int_portgroup_data, \"name\")\n int_network_vlan_ids = grab(int_portgroup_data, \"vlan_ids\")\n if len(grab(int_portgroup_data, \"vlan_id_ranges\")) > 0:\n int_network_vlan_id_ranges = grab(int_portgroup_data, \"vlan_id_ranges\")\n else:\n int_network_vlan_id_ranges = [str(int_network_vlan_ids[0])]\n int_network_private = grab(int_portgroup_data, \"private\")\n\n int_dvswitch_uuid = grab(device_backing, \"port.switchUuid\")\n int_dvswitch_data = grab(self.network_data, f\"pswitch|{parent_name}|{int_dvswitch_uuid}\", separator=\"|\")\n\n if int_dvswitch_data is not None:\n int_mtu = int_dvswitch_data.get(\"mtu\")\n\n int_connected = grab(vm_device, \"connectable.connected\", fallback=False)\n int_label = grab(vm_device, \"deviceInfo.label\", fallback=\"\")\n\n int_name = \"vNIC {}\".format(int_label.split(\" \")[-1])\n\n int_full_name = int_name\n if int_network_name is not None:\n int_full_name = f\"{int_full_name} ({int_network_name})\"\n\n int_description = f\"{int_label} ({device_class})\"\n if int_network_vlan_ids is not None:\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] == 4095:\n vlan_description = \"all vlans\"\n int_mode = \"tagged-all\"\n else:\n vlan_description = \"vlan ID: %s\" % \", \".join(int_network_vlan_id_ranges)\n\n if len(int_network_vlan_ids) == 1:\n int_mode = \"access\"\n else:\n int_mode = \"tagged\"\n\n if int_network_private is True:\n vlan_description = f\"{vlan_description} (private)\"\n\n int_description = f\"{int_description} ({vlan_description})\"\n\n # find corresponding guest NIC and get IP addresses and connected status\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC\n if int_mac != normalize_mac_address(grab(guest_nic, \"macAddress\")):\n continue\n\n int_connected = grab(guest_nic, \"connected\", fallback=int_connected)\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if ip_valid_to_add_to_netbox(int_ip_address, self.permitted_subnets, int_full_name) is False:\n continue\n\n nic_ips[int_full_name].append(int_ip_address)\n\n # check if primary gateways are in the subnet of this IP address\n # if it matches IP gets chosen as primary IP\n if vm_default_gateway_ip4 is not None and \\\n vm_default_gateway_ip4 in ip_interface(int_ip_address).network and \\\n vm_primary_ip4 is None:\n\n vm_primary_ip4 = int_ip_address\n\n if vm_default_gateway_ip6 is not None and \\\n vm_default_gateway_ip6 in ip_interface(int_ip_address).network and \\\n vm_primary_ip6 is None:\n\n vm_primary_ip6 = int_ip_address\n\n vm_nic_data = {\n \"name\": int_full_name,\n \"virtual_machine\": None,\n \"mac_address\": int_mac,\n \"description\": int_description,\n \"enabled\": int_connected,\n }\n\n if int_mtu is not None:\n vm_nic_data[\"mtu\"] = int_mtu\n if int_mode is not None:\n vm_nic_data[\"mode\"] = int_mode\n\n if int_network_vlan_ids is not None and int_mode != \"tagged-all\":\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] != 0:\n\n vm_nic_data[\"untagged_vlan\"] = self.get_vlan_object_if_exists({\n \"name\": int_network_name,\n \"vid\": int_network_vlan_ids[0],\n \"site\": {\n \"name\": site_name\n }\n })\n else:\n tagged_vlan_list = list()\n for int_network_vlan_id in int_network_vlan_ids:\n\n if int_network_vlan_id == 0:\n continue\n\n tagged_vlan_list.append(self.get_vlan_object_if_exists({\n \"name\": f\"{int_network_name}-{int_network_vlan_id}\",\n \"vid\": int_network_vlan_id,\n \"site\": {\n \"name\": site_name\n }\n }))\n\n if len(tagged_vlan_list) > 0:\n vm_nic_data[\"tagged_vlans\"] = tagged_vlan_list\n\n nic_data[int_full_name] = vm_nic_data\n\n # add VM to inventory\n self.add_device_vm_to_inventory(NBVM, object_data=vm_data, site_name=site_name, vnic_data=nic_data,\n nic_ips=nic_ips, p_ipv4=vm_primary_ip4, p_ipv6=vm_primary_ip6)\n\n return", "def add_virtual_machine(self, obj):\n\n name = get_string_or_none(grab(obj, \"name\"))\n\n if name is not None and self.settings.strip_vm_domain_name is True:\n name = name.split(\".\")[0]\n\n #\n # Filtering\n #\n\n # get VM UUID\n vm_uuid = grab(obj, \"config.instanceUuid\")\n\n if vm_uuid is None or vm_uuid in self.processed_vm_uuid and obj not in self.objects_to_reevaluate:\n return\n\n log.debug(f\"Parsing vCenter VM: {name}\")\n\n # get VM power state\n status = \"active\" if get_string_or_none(grab(obj, \"runtime.powerState\")) == \"poweredOn\" else \"offline\"\n\n # check if vm is template\n template = grab(obj, \"config.template\")\n if bool(self.settings.skip_vm_templates) is True and template is True:\n log.debug2(f\"VM '{name}' is a template. Skipping\")\n return\n\n if bool(self.settings.skip_srm_placeholder_vms) is True \\\n and f\"{grab(obj, 'config.managedBy.extensionKey')}\".startswith(\"com.vmware.vcDr\"):\n log.debug2(f\"VM '{name}' is a SRM placeholder VM. Skipping\")\n return\n\n # ignore offline VMs during first run\n if self.parsing_vms_the_first_time is True and status == \"offline\":\n log.debug2(f\"Ignoring {status} VM '{name}' on first run\")\n return\n\n # add to processed VMs\n self.processed_vm_uuid.append(vm_uuid)\n\n parent_host = self.get_parent_object_by_class(grab(obj, \"runtime.host\"), vim.HostSystem)\n cluster_object = self.get_parent_object_by_class(parent_host, vim.ClusterComputeResource)\n\n # get single host 'cluster' if VM runs on one\n if cluster_object is None:\n cluster_object = self.get_parent_object_by_class(parent_host, vim.ComputeResource)\n\n if self.settings.set_source_name_as_cluster_group is True:\n group = self.inventory.get_by_data(NBClusterGroup, data={\"name\": self.name})\n else:\n group = self.get_parent_object_by_class(cluster_object, vim.Datacenter)\n\n if None in [parent_host, cluster_object, group]:\n log.error(f\"Requesting host or cluster for Virtual Machine '{name}' failed. Skipping.\")\n return\n\n nb_cluster_object = self.get_object_from_cache(cluster_object)\n\n # check VM cluster\n if nb_cluster_object is None:\n log.debug(f\"Virtual machine '{name}' is not part of a permitted cluster. Skipping\")\n return\n\n parent_name = grab(parent_host, \"name\")\n cluster_name = grab(nb_cluster_object, \"data.name\")\n cluster_full_name = f\"{group.name}/{cluster_name}\"\n\n if name in self.processed_vm_names.get(cluster_full_name, list()) and obj not in self.objects_to_reevaluate:\n log.warning(f\"Virtual machine '{name}' for cluster '{cluster_full_name}' already parsed. \"\n \"Make sure to use unique VM names. Skipping\")\n return\n\n # add vm to processed list\n if self.processed_vm_names.get(cluster_full_name) is None:\n self.processed_vm_names[cluster_full_name] = list()\n\n self.processed_vm_names[cluster_full_name].append(name)\n\n # filter VMs by name\n if self.passes_filter(name, self.settings.vm_include_filter, self.settings.vm_exclude_filter) is False:\n return\n\n #\n # Collect data\n #\n\n # check if cluster is a Standalone ESXi\n site_name = nb_cluster_object.get_site_name()\n if site_name is None:\n site_name = self.get_site_name(NBCluster, cluster_full_name)\n\n # first check against vm_platform_relation\n platform = get_string_or_none(grab(obj, \"config.guestFullName\"))\n platform = get_string_or_none(grab(obj, \"guest.guestFullName\", fallback=platform))\n\n if platform is not None:\n platform = self.get_object_relation(platform, \"vm_platform_relation\", fallback=platform)\n\n hardware_devices = grab(obj, \"config.hardware.device\", fallback=list())\n\n disk = int(sum([getattr(comp, \"capacityInKB\", 0) for comp in hardware_devices\n if isinstance(comp, vim.vm.device.VirtualDisk)\n ]) / 1024 / 1024)\n\n annotation = None\n if self.settings.skip_vm_comments is False:\n annotation = get_string_or_none(grab(obj, \"config.annotation\"))\n\n # assign vm_tenant_relation\n tenant_name = self.get_object_relation(name, \"vm_tenant_relation\")\n\n # assign vm_tag_relation\n vm_tags = self.get_object_relation(name, \"vm_tag_relation\")\n\n # get vCenter tags\n vm_tags.extend(self.collect_object_tags(obj))\n\n vm_data = {\n \"name\": name,\n \"cluster\": nb_cluster_object,\n \"status\": status,\n \"memory\": grab(obj, \"config.hardware.memoryMB\"),\n \"vcpus\": grab(obj, \"config.hardware.numCPU\"),\n \"disk\": disk\n }\n\n # Add adaption for change in NetBox 3.3.0 VM model\n # issue: https://github.com/netbox-community/netbox/issues/10131#issuecomment-1225783758\n if version.parse(self.inventory.netbox_api_version) >= version.parse(\"3.3.0\"):\n vm_data[\"site\"] = {\"name\": site_name}\n\n if self.settings.track_vm_host:\n vm_data[\"device\"] = self.get_object_from_cache(parent_host)\n\n if platform is not None:\n vm_data[\"platform\"] = {\"name\": platform}\n if annotation is not None:\n vm_data[\"comments\"] = annotation\n if tenant_name is not None:\n vm_data[\"tenant\"] = {\"name\": tenant_name}\n if len(vm_tags) > 0:\n vm_data[\"tags\"] = vm_tags\n\n # add custom fields if present and configured\n vm_custom_fields = self.get_object_custom_fields(obj)\n if len(vm_custom_fields) > 0:\n vm_data[\"custom_fields\"] = vm_custom_fields\n\n vm_primary_ip4 = None\n vm_primary_ip6 = None\n vm_default_gateway_ip4 = None\n vm_default_gateway_ip6 = None\n\n # check vm routing to determine which is the default interface for each IP version\n for route in grab(obj, \"guest.ipStack.0.ipRouteConfig.ipRoute\", fallback=list()):\n\n # we found a default route\n if grab(route, \"prefixLength\") == 0:\n\n try:\n ip_a = ip_address(grab(route, \"network\"))\n except ValueError:\n continue\n\n try:\n gateway_ip_address = ip_address(grab(route, \"gateway.ipAddress\"))\n except ValueError:\n continue\n\n if ip_a.version == 4 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv4 gateway {gateway_ip_address}\")\n vm_default_gateway_ip4 = gateway_ip_address\n elif ip_a.version == 6 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv6 gateway {gateway_ip_address}\")\n vm_default_gateway_ip6 = gateway_ip_address\n\n nic_data = dict()\n nic_ips = dict()\n\n # track MAC addresses in order add dummy guest interfaces\n processed_interface_macs = list()\n\n # get VM interfaces\n for vm_device in hardware_devices:\n\n # sample: https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getvnicinfo.py\n\n # not a network interface\n if not isinstance(vm_device, vim.vm.device.VirtualEthernetCard):\n continue\n\n int_mac = normalize_mac_address(grab(vm_device, \"macAddress\"))\n\n device_class = grab(vm_device, \"_wsdlName\")\n\n log.debug2(f\"Parsing device {device_class}: {int_mac}\")\n\n device_backing = grab(vm_device, \"backing\")\n\n # set defaults\n int_mtu = None\n int_mode = None\n int_network_vlan_ids = None\n int_network_vlan_id_ranges = None\n int_network_name = None\n int_network_private = False\n\n processed_interface_macs.append(int_mac)\n\n # get info from local vSwitches\n if isinstance(device_backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):\n\n int_network_name = get_string_or_none(grab(device_backing, \"deviceName\"))\n int_host_pgroup = grab(self.network_data, f\"host_pgroup|{parent_name}|{int_network_name}\",\n separator=\"|\")\n\n if int_host_pgroup is not None:\n int_network_vlan_ids = [int_host_pgroup.get(\"vlan_id\")]\n int_network_vlan_id_ranges = [str(int_host_pgroup.get(\"vlan_id\"))]\n\n int_vswitch_name = int_host_pgroup.get(\"vswitch\")\n int_vswitch_data = grab(self.network_data, f\"vswitch|{parent_name}|{int_vswitch_name}\",\n separator=\"|\")\n\n if int_vswitch_data is not None:\n int_mtu = int_vswitch_data.get(\"mtu\")\n\n # get info from distributed port group\n else:\n\n dvs_portgroup_key = grab(device_backing, \"port.portgroupKey\", fallback=\"None\")\n int_portgroup_data = grab(self.network_data, f\"dpgroup|{dvs_portgroup_key}\", separator=\"|\")\n\n if int_portgroup_data is not None:\n int_network_name = grab(int_portgroup_data, \"name\")\n int_network_vlan_ids = grab(int_portgroup_data, \"vlan_ids\")\n if len(grab(int_portgroup_data, \"vlan_id_ranges\")) > 0:\n int_network_vlan_id_ranges = grab(int_portgroup_data, \"vlan_id_ranges\")\n else:\n int_network_vlan_id_ranges = [str(int_network_vlan_ids[0])]\n int_network_private = grab(int_portgroup_data, \"private\")\n\n int_dvswitch_uuid = grab(device_backing, \"port.switchUuid\")\n int_dvswitch_data = grab(self.network_data, f\"pswitch|{parent_name}|{int_dvswitch_uuid}\", separator=\"|\")\n\n if int_dvswitch_data is not None:\n int_mtu = int_dvswitch_data.get(\"mtu\")\n\n int_connected = grab(vm_device, \"connectable.connected\", fallback=False)\n int_label = grab(vm_device, \"deviceInfo.label\", fallback=\"\")\n\n int_name = \"vNIC {}\".format(int_label.split(\" \")[-1])\n\n int_full_name = int_name\n if int_network_name is not None:\n int_full_name = f\"{int_full_name} ({int_network_name})\"\n\n int_description = f\"{int_label} ({device_class})\"\n if int_network_vlan_ids is not None:\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] == 4095:\n vlan_description = \"all vlans\"\n int_mode = \"tagged-all\"\n else:\n vlan_description = \"vlan ID: %s\" % \", \".join(int_network_vlan_id_ranges)\n\n if len(int_network_vlan_ids) == 1:\n int_mode = \"access\"\n else:\n int_mode = \"tagged\"\n\n if int_network_private is True:\n vlan_description = f\"{vlan_description} (private)\"\n\n int_description = f\"{int_description} ({vlan_description})\"\n\n # find corresponding guest NIC and get IP addresses and connected status\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC\n if int_mac != normalize_mac_address(grab(guest_nic, \"macAddress\")):\n continue\n\n int_connected = grab(guest_nic, \"connected\", fallback=int_connected)\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if self.settings.permitted_subnets.permitted(int_ip_address, interface_name=int_full_name) is False:\n continue\n\n nic_ips[int_full_name].append(int_ip_address)\n\n # check if primary gateways are in the subnet of this IP address\n # if it matches IP gets chosen as primary IP\n if vm_default_gateway_ip4 is not None and \\\n vm_default_gateway_ip4 in ip_interface(int_ip_address).network and \\\n vm_primary_ip4 is None:\n\n vm_primary_ip4 = int_ip_address\n\n if vm_default_gateway_ip6 is not None and \\\n vm_default_gateway_ip6 in ip_interface(int_ip_address).network and \\\n vm_primary_ip6 is None:\n\n vm_primary_ip6 = int_ip_address\n\n vm_nic_data = {\n \"name\": unquote(int_full_name),\n \"virtual_machine\": None,\n \"mac_address\": int_mac,\n \"description\": unquote(int_description),\n \"enabled\": int_connected,\n }\n\n if int_mtu is not None and self.settings.sync_vm_interface_mtu is True:\n vm_nic_data[\"mtu\"] = int_mtu\n if int_mode is not None:\n vm_nic_data[\"mode\"] = int_mode\n\n if int_network_vlan_ids is not None and int_mode != \"tagged-all\":\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] != 0:\n\n vm_nic_data[\"untagged_vlan\"] = {\n \"name\": unquote(int_network_name),\n \"vid\": int_network_vlan_ids[0],\n \"site\": {\n \"name\": site_name\n }\n }\n else:\n tagged_vlan_list = list()\n for int_network_vlan_id in int_network_vlan_ids:\n\n if int_network_vlan_id == 0:\n continue\n\n tagged_vlan_list.append({\n \"name\": unquote(f\"{int_network_name}-{int_network_vlan_id}\"),\n \"vid\": int_network_vlan_id,\n \"site\": {\n \"name\": site_name\n }\n })\n\n if len(tagged_vlan_list) > 0:\n vm_nic_data[\"tagged_vlans\"] = tagged_vlan_list\n\n nic_data[int_full_name] = vm_nic_data\n\n # find dummy guest NIC interfaces\n if self.settings.sync_vm_dummy_interfaces is True:\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC MAC\n guest_nic_mac = normalize_mac_address(grab(guest_nic, \"macAddress\"))\n\n # skip interfaces of MAC addresses for already known interfaces\n if guest_nic_mac is None or guest_nic_mac in processed_interface_macs:\n continue\n\n processed_interface_macs.append(guest_nic_mac)\n\n int_full_name = \"vNIC Dummy-{}\".format(\"\".join(guest_nic_mac.split(\":\")[-2:]))\n\n log.debug2(f\"Parsing dummy network device: {guest_nic_mac}\")\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if self.settings.permitted_subnets.permitted(int_ip_address, interface_name=int_full_name) is True:\n nic_ips[int_full_name].append(int_ip_address)\n\n vm_nic_data = {\n \"name\": int_full_name,\n \"virtual_machine\": None,\n \"mac_address\": guest_nic_mac,\n \"enabled\": grab(guest_nic, \"connected\", fallback=False),\n }\n\n if len(nic_ips.get(int_full_name, list())) == 0:\n log.debug(f\"Dummy network interface '{int_full_name}' has no IP addresses assigned. Skipping\")\n continue\n\n nic_data[int_full_name] = vm_nic_data\n\n # add VM to inventory\n self.add_device_vm_to_inventory(NBVM, object_data=vm_data, vnic_data=nic_data,\n nic_ips=nic_ips, p_ipv4=vm_primary_ip4, p_ipv6=vm_primary_ip6,\n vmware_object=obj)\n\n return", "def add_machines(wf, query=None):\n with open(wf.settings['PATH']['INDEX']) as fh:\n vi = Index(fh)\n\n for machine_id, machine in vi(query):\n autocomplete = '{mid} {sep} '.format(mid=machine_id[0:8], sep=SEP)\n wf.add_item(title=machine.name,\n subtitle=machine.vagrantfile_path,\n autocomplete=autocomplete,\n icon=machine.icon,\n valid=False)", "def update_vm_list(self):\n vms = self.op.get_vms()\n for vm in vms:\n if vm['mac'] not in self.vm_cache:\n self.vm_cache[vm['mac']] = {'ip': vm['ip'], 'floating_ip': vm['floating_ip']}", "def refresh_vms_status(self, vm_list):\n vm_dict = {}\n try:\n client = oca.Client(self.user + ':' + self.passwd, self.url)\n vm_pool = oca.VirtualMachinePool(client)\n vm_pool.info()\n for vm_id in vm_list:\n vm = {\"interfaces\": []}\n vm_exist = False\n vm_element = None\n for i in vm_pool:\n if str(i.id) == str(vm_id):\n vm_exist = True\n vm_element = i\n break\n if not vm_exist:\n self.logger.info(\"The vm \" + str(vm_id) + \" does not exist.\")\n vm['status'] = \"DELETED\"\n vm['error_msg'] = (\"The vm \" + str(vm_id) + \" does not exist.\")\n continue\n vm_element.info()\n vm[\"vim_info\"] = None\n VMstatus = vm_element.str_lcm_state\n if VMstatus == \"RUNNING\":\n vm['status'] = \"ACTIVE\"\n elif \"FAILURE\" in VMstatus:\n vm['status'] = \"ERROR\"\n vm['error_msg'] = \"VM failure\"\n else:\n vm['status'] = \"BUILD\"\n try:\n for red in vm_element.template.nics:\n interface = {'vim_info': None, \"mac_address\": str(red.mac), \"vim_net_id\": str(red.network_id),\n \"vim_interface_id\": str(red.network_id)}\n # maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6\n if hasattr(red, 'ip'):\n interface[\"ip_address\"] = str(red.ip)\n if hasattr(red, 'ip6_global'):\n interface[\"ip_address\"] = str(red.ip6_global)\n vm[\"interfaces\"].append(interface)\n except Exception as e:\n self.logger.error(\"Error getting vm interface_information \" + type(e).__name__ + \":\" + str(e))\n vm[\"status\"] = \"VIM_ERROR\"\n vm[\"error_msg\"] = \"Error getting vm interface_information \" + type(e).__name__ + \":\" + str(e)\n vm_dict[vm_id] = vm\n return vm_dict\n except Exception as e:\n self.logger.error(e)\n for k in vm_dict:\n vm_dict[k][\"status\"] = \"VIM_ERROR\"\n vm_dict[k][\"error_msg\"] = str(e)\n return vm_dict", "def start_vm(self,vm):\n\t\tvmx_path = vm.vmx_path\n\t\tcmd = self.vmrun_path + \" -T fusion start '\" + vmx_path + \"' nogui\" \n\t\tos.system(cmd)", "def getAll(cls):\n return [VirtualMachine(vm) for vm in cls._vbox.machines]", "def __create(self, vm, cnt):\n vmms = self.vmms[vm.vmms]\n self.log.debug(\"__create: Using VMMS %s \" % (Config.VMMS_NAME))\n for i in range(cnt):\n newVM = copy.deepcopy(vm)\n newVM.id = self._getNextID()\n self.log.debug(\"__create|calling initializeVM\")\n vmms.initializeVM(newVM)\n self.log.debug(\"__create|done with initializeVM\")\n time.sleep(Config.CREATEVM_SECS)\n\n self.addVM(newVM)\n self.freeVM(newVM)\n self.log.debug(\"__create: Added vm %s to pool %s \" % (newVM.id, newVM.name))", "def addVM(self, vm):\n self.lock.acquire()\n machine = self.machines.get(vm.name)\n machine[0].append(vm.id)\n self.machines.set(vm.name, machine)\n self.lock.release()", "def get_vms(self):\n logger.info(\"Getting list of VMs from NetBox API\")\n try:\n result = self.netboxapi.virtualization.get_virtual_machines()\n logger.info(f\"Retrieved {len(result)} virtual machines\")\n return result\n except ConnectionError as e:\n logger.exception(f\"{e.args}\")\n exit(1)", "def run(vm=\"\", output=\"/tmp/tc3\", verbose=True, run=\"1\"):\n vboxcfg = forgeosi.VboxConfig()\n vboxcfg.get_nat_network(run)\n vbox_c1 = forgeosi.Vbox(basename=vm1, clonename=\"testrun\"+run+\"client1\")\n if verbose:\n print \"vm1 created\"\n time.sleep(10)\n vbox_c2 = forgeosi.Vbox(basename=vm2, clonename=\"testrun\"+run+\"client2\")\n if verbose:\n print \"vm2 created\"\n time.sleep(10)\n vbox_c3 = forgeosi.Vbox(basename=vm3, clonename=\"testrun\"+run+\"client3\")\n if verbose:\n print \"vm3 created\"\n time.sleep(10)\n vbox_s = forgeosi.Vbox(basename=vms, clonename=\"testrun\"+run+\"server\")\n if verbose:\n print \"vms created\"\n time.sleep(10)\n p_c1 = vbox_c1.start(session_type=forgeosi.SessionType.gui, wait=False)\n p_c2 = vbox_c2.start(session_type=forgeosi.SessionType.gui, wait=False)\n vbox_s.start(session_type=forgeosi.SessionType.gui, wait=True)\n vbox_c3.start(session_type=forgeosi.SessionType.gui, wait=True)\n p_c1.wait_for_completion()\n p_c2.wait_for_completion()\n\n if verbose:\n print \"all machines booted\"\n time.sleep(60)\n\n vbox_c1.create_guest_session()\n vbox_c2.create_guest_session()\n vbox_c3.create_guest_session()\n vbox_s.create_guest_session()\n if verbose:\n print \"all guest_sessions created\"\n vbox_c1.add_to_nat_network(run)\n vbox_c2.add_to_nat_network(run)\n vbox_c3.add_to_nat_network(run)\n vbox_s.add_to_nat_network(run)\n vbox_s.start_network_trace(path=output+\"/server.pcap\")\n vbox_c1.start_network_trace(path=output+\"/client1.pcap\")\n time.sleep(60)\n\n vbox_s.os.make_dir(\"/home/default/server\")\n\n if verbose:\n print \"downloading files to server\"\n time.sleep(10)\n vbox_s.os.download_file(rhino1, \"/home/default/server/rhino1.jpg\")\n time.sleep(10)\n vbox_s.os.download_file(rhino2, \"/home/default/server/rhino2.jpg\")\n time.sleep(10)\n #install ssh-server for using scp later\n vbox_c1.os.run_shell_cmd(\"\"\"sudo apt-get install openssh-server\nsleep_hack\n12345\nsleep_hack\ny\n\"\"\", gui=True)\n time.sleep(10)\n\n if verbose:\n print \"starting webserver\"\n vbox_s.os.serve_directory(\"~/server\", port=8080)\n time.sleep(10)\n ip_server = vbox_s.get_ip()\n ip_client1 = vbox_c1.get_ip()\n if verbose:\n print \"ip server: \"+str(ip_server)\n print \"ip client1: \"+str(ip_client1)\n\n vbox_c1.os.open_browser(ip_server+\":8080/rhino1.jpg\")\n vbox_c2.os.open_browser(ip_server+\":8080/rhino2.jpg\")\n vbox_c3.os.open_browser(\"http://\"+ip_server+\":8080/rhino2.jpg\",\n method=forgeosi.RunMethod.run)\n if verbose:\n print \"all webbrowsers opened\"\n time.sleep(30)\n vbox_c1.os.make_dir(\"~/rhinopix\")\n time.sleep(10)\n vbox_c1.os.download_file(ip_server+\":8080/rhino1.jpg\",\n \"~/rhinopix/rhino1.jpg\")\n time.sleep(30)\n # client 2 gets one picture form client 1 via scp\n vbox_c2.os.run_shell_cmd(\n\"\"\"cd\nscp default@\"\"\"+ip_client1+\"\"\":~/rhinopix/rhino1.jpg .\nsleep_hack\nyes\nsleep_hack\n12345\n\"\"\", gui=True)\n\n vbox_s.stop_network_trace()\n vbox_s.stop(confirm=forgeosi.StopConfirm.xfce)\n vbox_c1.stop_network_trace()\n vbox_c1.stop()\n vbox_c2.stop()\n vbox_c3.stop()\n\n if verbose:\n print \"machines stopped\"\n vbox_c1.log.write_xml_log(output+\"/log_c1.xml\")\n vbox_c2.log.write_xml_log(output+\"/log_c2.xml\")\n vbox_c3.log.write_xml_log(output+\"/log_c3.xml\")\n vbox_s.log.write_xml_log(output+\"/log_s.xml\")\n #vbox_c1.export(path=output+\"/disk_c1.img\", raw=True)\n #vbox_c2.export(path=output+\"/disk_c2.img\", raw=True)\n #vbox_c3.export(path=output+\"/disk_c3.img\", raw=True)\n #vbox_s.export(path=output+\"/disk_s.img\", raw=True)\n\n vbox_c1.cleanup_and_delete()\n vbox_c2.cleanup_and_delete()\n vbox_c3.cleanup_and_delete()\n vbox_s.cleanup_and_delete()", "def create_vm_list(vms):\n return [(v[0], v[2]) for v in vms]", "def create(self, vm_name):\n\n sub_conf = self.conf['virtualbox']['vms'][vm_name]\n hostname = vm_name\n dir_isocustom = self.conf['general']['dir_isocustom']\n if 'install' in sub_conf.keys() and sub_conf['install']:\n iso = os.path.join(dir_isocustom, sub_conf['install'])\n else:\n iso = None\n\n logging.info('Create virtualbox vm')\n l_vm = self.list_vms()\n\n isexist = [x['name'] for x in l_vm if hostname == x['name']]\n assert isexist == [], \"Error : la vm '\"+hostname+\"' existe déjà\"\n\n # msg = \"Error : la recipe '\"+recipe+\"' n'existe pas\"\n # assert recipe in self.conf['virtualbox']['recipes'].keys(), msg\n\n # dir1 = conf['disk-dir']+'/'+conf['hostname']\n # assert(not os.path.exists(dir1)), \"Le dossier \"+dir1+\" existe déjà !\"\n\n # dir_iso = self.conf['general']['dir_input']\n # dir_isocustom = self.conf['general']['dir_isocustom']\n os_type = sub_conf['os_type']\n file_disk_type = sub_conf['file_disk_type']\n ram = str(sub_conf['ram'])\n vram = str(sub_conf['vram'])\n disk_size = sub_conf['disk_size']\n interface_name = sub_conf['interface_name']\n interface_type = sub_conf['interface_type']\n\n dir_vm = self.get_machine_folder()\n if not os.path.isdir(dir_vm):\n os.mkdir(dir_vm)\n\n os.chdir(dir_vm)\n\n os.mkdir(dir_vm+os.sep+hostname)\n os.chdir(dir_vm+os.sep+hostname)\n\n # Create vm\n run_cmd(\n 'VBoxManage createvm '\n '--name \"'+hostname+'\" '\n '--ostype \"'+os_type+'\" ' # Ex: \"Debian_64\"\n '--register')\n\n # Add SATA controller\n run_cmd(\n 'VBoxManage storagectl \"'+hostname+'\" '\n '--name \"SATA Controller\" '\n '--add sata '\n '--controller IntelAHCI')\n\n # Add disks SATA controller\n if isinstance(disk_size, int):\n disk_size = [disk_size]\n run_cmd(\n 'VBoxManage storagectl '+hostname+' '\n '--name \"SATA Controller\" '\n '--portcount '+str(len(disk_size))) # Number of disque\n\n i = 0\n for on_disk_size in disk_size:\n ds = str(on_disk_size)\n it = str(i)\n disk_name = hostname+'_'+it+'.'+file_disk_type\n\n # Create one disk\n run_cmd(\n 'VBoxManage createhd '\n '--filename \"'+disk_name+'\" ' # Ex:test_0.vmdk\n '--size '+ds) # Disk size in Mo\n\n # Attach one disk to SATA controller\n run_cmd(\n 'VBoxManage storageattach \"'+hostname+'\" '\n '--storagectl \"SATA Controller\" '\n '--port '+it+' '\n '--device 0 '\n '--type hdd '\n '--medium \"'+disk_name+'\"') # Ex:test_0.vmdk\n i += 1\n\n # Add IDE Controller\n run_cmd(\n 'VBoxManage storagectl \"'+hostname+'\" '\n '--name \"IDE Controller\" '\n '--add ide')\n\n # Mount the iso to the IDE controller\n if iso:\n run_cmd(\n 'VBoxManage storageattach \"'+hostname+'\" '\n '--storagectl \"IDE Controller\" '\n '--port 0 '\n '--device 0 '\n '--type dvddrive '\n '--medium \"'+iso+'\"')\n\n # Enable Input/Output (mouse, keyboard, ...)\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--ioapic on')\n\n # Define boot order\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--boot1 dvd '\n '--boot2 disk '\n '--boot3 none '\n '--boot4 none')\n\n # Define RAM and VRAM(video)\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--memory '+ram+' '\n '--vram '+vram)\n\n # Connect network bridge interface\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--nic1 '+interface_type+' '\n '--bridgeadapter1 '+interface_name)", "def migrate_vm_and_check_cpu(number_of_cpus, vm_name=config.CPU_HOTPLUG_VM):\n testflow.step(\"migrating vm: %s\", vm_name)\n assert ll_vms.migrateVm(True, vm_name)\n vm_resource = helpers.get_host_executor(\n hl_vms.get_vm_ip(vm_name), config.VMS_LINUX_PW\n )\n testflow.step(\n \"Verifying that after migration vm: %s has %d cpus\" %\n (vm_name, number_of_cpus)\n )\n assert get_number_of_cores(vm_resource) == number_of_cpus, (\n \"The Cores number should be % and not: %s\",\n number_of_cpus, ll_vms.get_vm_cores(vm_name)\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Look into Packer templates dir and return template's list.
def _discover_templates(): vms = [] for file in os.listdir(paths.packer_templates): json = os.path.join(paths.packer_templates, file, file + '.json') if os.path.exists(json): vms.append(file) return vms
[ "def list_templates():\n module_path = get_module_path()\n\n templates_path = os.path.join(module_path, TEMPLATES)\n result = []\n\n for root, subdirs, files in os.walk(templates_path):\n for fn in files:\n if fn == '_template':\n prefix_path = os.path.relpath(root, templates_path)\n result.append(prefix_path)\n break\n\n result = [posixpath.join(*i.split(os.path.sep)) for i in result]\n\n return result", "def list_templates():\n templates = [f for f in glob.glob(os.path.join(template_path, '*.yaml'))]\n return templates", "def get_templates_dirs(self):\n return []", "def get_template_info(self):\n rospack = rospkg.RosPack()\n path_template = rospack.get_path('package_generator_templates')\n path_template += \"/templates/\"\n template_names = os.listdir(path_template)\n\n return [path_template, template_names]", "def list() -> None:\n template_lister = TemplateLister()\n template_lister.list_available_templates()", "def _load_templates(self):\n assert self.dumped_context is not None\n\n say('loading templates...')\n\n context = self.dumped_context\n templates_path = build_embryo_filepath(self.path, 'templates')\n templates = {}\n\n if not os.path.isdir(templates_path):\n return templates\n\n for root, dirs, files in os.walk(templates_path):\n for fname in files:\n if fname.endswith('.swp'):\n continue\n\n # the file path may itself be templatized. here, we render the\n # filepath template using the context dict and read in the\n # template files.\n\n # fpath here is the templatized file path to the template\n fpath = os.path.join(root, fname)\n\n # rel_fpath is the path relative to the root templates dir\n rel_fpath = fpath.replace(templates_path, '').lstrip('/')\n\n # fname_template is the jinja2 Template for the rel_fpath str\n try:\n fname_template = self.jinja_env.from_string(rel_fpath)\n except TemplateSyntaxError:\n shout(\n 'could not render template '\n 'for file path string: {p}', p=fpath\n )\n raise\n\n # finally rendered_rel_fpath is the rendered relative path\n rendered_rel_fpath = fname_template.render(context)\n\n # now actually read the file into the resulting dict.\n try:\n templates[rendered_rel_fpath] = File.read(fpath)\n except Exception:\n raise TemplateLoadFailed(fpath)\n\n return templates", "def get_main_template_list(env):\n rex_main_template = re.compile(r'^[^/]+\\.jinja2$')\n\n def main_template_filter(name):\n return rex_main_template.match(name)\n\n templ_list = env.list_templates(filter_func=main_template_filter)\n\n LOG.debug('Main template files list: %s', templ_list)\n\n return templ_list", "def get_all_templates(filename, templates, template_dirs):\n contents = get_template(filename, template_dirs)\n match = extend_re.match(contents)\n if match:\n contents = contents[len(match.group(0)):]\n parent = match.group(1)\n get_all_templates(parent, templates, template_dirs)\n templates.append(contents)\n return templates", "def templates_in(path):\n ext = '.cpp'\n return (\n Template(f[0:-len(ext)], load_file(os.path.join(path, f)))\n for f in os.listdir(path) if f.endswith(ext)\n )", "def _load_templates():\n mod = import_module(settings.TCMS_PAGES)\n\n entries, dir_name = {}, dirname(mod.__file__)\n for path, subdirs, files in walk(dir_name):\n name = path.replace(dir_name, '').strip(sep).replace(sep, '.')\n\n for file in filter(lambda f: f.endswith('.py'), files):\n fname = file.replace('.py', '')\n import_name = filter(None, (settings.TCMS_PAGES, name, fname))\n\n try:\n mod = import_module('.'.join(import_name))\n if hasattr(mod, 'PAGE'):\n entries[name or fname] = mod.PAGE\n except (ImportError, AttributeError):\n pass\n return entries", "def locate_pkg_templ_dir(search_dirs: List[str], component_name: str) -> str:\n # look up correct package template directory from list\n log.info(\"Searching pkg template '%s' folder from: %s\", component_name, search_dirs)\n matches: List[str] = []\n for item in search_dirs:\n matches.extend([str(p) for p in Path(item).resolve(strict=True).rglob(component_name)])\n if len(matches) < 1:\n raise IfwSdkError(f\"Expected to find one result for '{component_name}' from {search_dirs}\")\n return matches.pop()", "def _get_template(self) -> List[str]:\n with open(self.template_location, \"r\") as file:\n return file.readlines()", "def pull_template_files(self):\n os.makedirs(self.templates_dir, exist_ok=True)\n self._pull_sftp_files(self.args.templates_path, self.templates_dir)", "def load_templates():\n num_loaded_templates = 0\n templates = {}\n for fn in os.listdir(\"CLEVR_1.0_templates\"):\n if not fn.endswith(\".json\"):\n continue\n with open(os.path.join(\"CLEVR_1.0_templates\", fn), \"r\") as f:\n base = os.path.splitext(fn)[0]\n for i, template in enumerate(json.load(f)):\n num_loaded_templates += 1\n key = (fn, i)\n template[\"regexes\"] = [build_regex(t) for t in template[\"text\"]]\n templates[key] = template\n print(\"Read %d templates from disk\" % num_loaded_templates)\n return templates", "def get_info_templates(app, handler):\n current_handler = handler()\n info_templates_path = current_handler.config().get(\"info_templates_path\")\n\n info_templates = []\n app.logger.info(info_templates_path)\n for ext in ['*.html']:\n for path in pathlib.Path(info_templates_path).rglob(ext):\n app.logger.info(str(path))\n app.logger.info(path.relative_to(info_templates_path))\n template = str(path.relative_to(info_templates_path))\n if not template.startswith(\".\"):\n info_templates.append(template)\n return sorted(info_templates)", "def get_template_dir(self) -> str:", "def _interpolate_templates():\r\n if not os.path.exists(env.rcfile):\r\n raise Exception(\"%(rcfile)s does not exist. See rcfile.sample and run fab --config=rcfile.name <commands>!\" % env)\r\n\r\n interpolated_files = []\r\n # Get a list of all template files in /etc/ that we need to interpolate\r\n template_paths = []\r\n template_paths.extend(env.template_paths)\r\n template_paths.append(env.local_etc_path)\r\n\r\n for template_path in template_paths: \r\n for root, dirs, files in os.walk(template_path):\r\n for name in files:\r\n infilename = os.path.join(root, name)\r\n if re.search('.tmpl$', infilename):\r\n debug(\"Processing template file %s\" % infilename)\r\n \r\n outfilename = os.path.splitext(infilename)[0]\r\n _interpolate_file(infilename, outfilename)\r\n # infile = open(infilename, 'r')\r\n # outfile = open(outfilename, 'w')\r\n # try:\r\n # outfile.write(infile.read() % env)\r\n # except TypeError, e:\r\n # if re.search(\"not enough arguments for format string\", e[0]):\r\n # # We can safely ignore this since it means that there's nothing to interpolate\r\n # print e[0]\r\n # print \"Continuing by using the template file (%s) as the target (ie no interpolation)\" % infilename\r\n # # Remember that we have to go back to the top due to read() being at eof\r\n # infile.seek(0)\r\n # outfile.write(infile.read())\r\n # else:\r\n # raise\r\n # \r\n # outfile.close()\r\n # infile.close()\r\n interpolated_files.append(outfilename)\r\n \r\n return interpolated_files", "def inject_templates(self):\n\n # Sorry, found no other way to get this\n mod_path = sys.modules[self.__class__.__module__].__file__\n mod_dir = os.path.dirname(mod_path)\n tmpl_dir = os.path.join(\n mod_dir,\n 'templates',\n self.site.template_system.name\n )\n if os.path.isdir(tmpl_dir):\n # Inject tmpl_dir low in the theme chain\n self.site.template_system.inject_directory(tmpl_dir)", "def parse(self):\n dir_content = []\n for cur_path, dirs, files in os.walk(self.template_dir):\n\n new_path = cur_path.replace(self.template_dir, self.dest_dir)\n\n path = self._parse_path(new_path)\n file_paths = [self._parse_path(fp) for fp in files]\n file_contents = [self._parse_file(os.path.join(cur_path, fp))\n for fp in files]\n\n dir_content.append((path, file_paths, file_contents))\n\n return dir_content" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build and upload VMs through Builder class methods Build from given as arguments list of VMs. If no arguments given then call self._discover to determine the list of VMs from existing Packer templates.
def _build(self): if self.args.VM_NAME: bld = Builder(self.args.VM_NAME) else: bld = Builder(self._discover_templates()) if self.args.stable: bld.build('stable') result = bld.upload(build='stable') else: bld.build() result = bld.upload() # Send mail only if asked and Builder.upload() return # not empty 'uploaded' list. if self.args.mail and result[1]: bld.mail(result[0]) return result
[ "def create_vms(self):\n\n\t\tfor vmx_path in self.vmx_files:\n\t\t\t#vm = self.create_vm(\"/Users/alex/Documents/Virtual Machines.localized/macOS 10.12.vmwarevm/macOS 10.12.vmx\")\n\t\t\tvm = self.create_vm(vmx_path)\n\t\t\tself.start_vm(vm)\n\t\t\tself.virtual_machines[str(vm.uuid)] = vm\n\n\t\ttime.sleep(5)", "def build(self, bld=None):\n vm_number = len(self.vmlist)\n if vm_number == 1:\n if bld == 'stable':\n ova = build_vm(self.vmlist[0], 'stable')\n self.results.append(ova)\n else:\n ova = build_vm(self.vmlist[0])\n self.results.append(ova)\n elif vm_number <= self.threads:\n if bld == 'stable':\n self._build_pool(vm_number, self.vmlist, 'stable')\n else:\n self._build_pool(vm_number, self.vmlist)\n else:\n tmplist = self.vmlist\n while tmplist:\n if bld == 'stable':\n self._build_pool(self.threads, tmplist[:self.threads], 'stable')\n tmplist = tmplist[self.threads:]\n else:\n self._build_pool(self.threads, tmplist[:self.threads])\n tmplist = tmplist[self.threads:]\n return self.results", "def build_instances(request):\n all_instances = []\n\n def build_n_volttron_instances(n, bad_config=False):\n build_n_volttron_instances.count = n\n instances = []\n vip_addresses = []\n instances = []\n addr_config = dict()\n names = []\n\n for i in range(0, n):\n address = get_rand_vip()\n vip_addresses.append(address)\n nm = 'platform{}'.format(i + 1)\n names.append(nm)\n\n for i in range(0, n):\n address = vip_addresses[i]\n wrapper = PlatformWrapper()\n wrapper.startup_platform(address, instance_name=names[i])\n wrapper.skip_cleanup = True\n instances.append(wrapper)\n\n gevent.sleep(1)\n for i in range(0, n):\n instances[i].shutdown_platform()\n\n for i in range(0, n):\n addr_config.clear()\n for j in range(0, n):\n if j != i:\n name = names[j]\n addr_config[name] = dict()\n addr_config[name]['instance-name'] = names[j]\n if bad_config:\n addr_config[name]['vip-address123'] = vip_addresses[j]\n else:\n addr_config[name]['vip-address'] = vip_addresses[j]\n addr_config[name]['serverkey'] = instances[j].serverkey\n address_file = os.path.join(instances[i].volttron_home, 'external_platform_discovery.json')\n if address_file:\n with open(address_file, 'w') as f:\n json.dump(addr_config, f)\n\n gevent.sleep(1)\n for i in range(0, n):\n address = vip_addresses.pop(0)\n instances[i].startup_platform(address, instance_name=names[i])\n instances[i].allow_all_connections()\n gevent.sleep(11)\n instances = instances if n > 1 else instances[0]\n\n build_n_volttron_instances.instances = instances\n return instances\n\n return build_n_volttron_instances", "def main():\n\n args = parseArgs()\n\n vm = VMBuilder(args)\n\n if vm.args.command == 'list_disk_pools':\n print(vm.getDiskPools())\n elif vm.args.command == 'list_pool_volumes':\n print(vm.getDiskPoolVolumes())\n elif vm.args.command == 'create_vm':\n logging.debug(\"about to run vm.getbuild.createvm\")\n vm.verifyMinimumCreateVMArgs()\n vm.getBuild().createVM()\n else:\n logging.critical(\"The command you entered is not recognized.\")", "def __create(self, vm, cnt):\n vmms = self.vmms[vm.vmms]\n self.log.debug(\"__create: Using VMMS %s \" % (Config.VMMS_NAME))\n for i in range(cnt):\n newVM = copy.deepcopy(vm)\n newVM.id = self._getNextID()\n self.log.debug(\"__create|calling initializeVM\")\n vmms.initializeVM(newVM)\n self.log.debug(\"__create|done with initializeVM\")\n time.sleep(Config.CREATEVM_SECS)\n\n self.addVM(newVM)\n self.freeVM(newVM)\n self.log.debug(\"__create: Added vm %s to pool %s \" % (newVM.id, newVM.name))", "def prepare(self, vms):\n local_config_paths = []\n for vm in vms:\n local_config_path = \"%s.%s\" % (self.get_local_results_path(vm),\n self._short_name)\n self._set_global_defaults(vm)\n self._configuration.save(local_config_path)\n local_config_paths.append(local_config_path)\n # Prepare the VMs.\n self.__prepare_vms(vms, local_config_paths)\n self.__prepared_vms = set(vms)", "def _build_virtualbox_ips(self, env):\n # Load specifications\n env.cloud_spec = self._load_cloud_specs(env)\n\n try:\n output = exec_shell(\n [\n self.bin(\"vagrant\"),\n \"ssh\",\n \"pem\",\n \"-c\",\n \"\\\"ip address\",\n \"show eth1\", \n \"|\",\n \"grep\",\n \"'inet '\",\n \"|\",\n \"sed\",\n \"-e\",\n \"'s/^.*inet //' -e 's/\\/.*$//'\\\"\"\n ],\n environ=self.environ,\n cwd=self.vagrant_project_path\n )\n result = output.decode(\"utf-8\").split('\\n')\n result[0] = result[0].strip()\n env.cloud_spec['pem_server_1']['public_ip'] = result[0]\n env.cloud_spec['pem_server_1']['private_ip'] = result[0]\n except Exception as e:\n logging.error(\"Failed to execute the command\")\n logging.error(e)\n raise CliError(\n (\"Failed to obtain VirtualBox Instance IP Address for: %s, please \"\n \"check the logs for details.\")\n % env.cloud_spec['pem_server_1']['name']\n )\n\n try:\n output = exec_shell(\n [\n self.bin(\"vagrant\"),\n \"ssh\",\n \"barman\",\n \"-c\",\n \"\\\"ip address\",\n \"show eth1\", \n \"|\",\n \"grep\",\n \"'inet '\",\n \"|\",\n \"sed\",\n \"-e\",\n \"'s/^.*inet //' -e 's/\\/.*$//'\\\"\"\n ],\n environ=self.environ,\n cwd=self.vagrant_project_path\n )\n result = output.decode(\"utf-8\").split('\\n')\n result[0] = result[0].strip()\n env.cloud_spec['backup_server_1']['public_ip'] = result[0]\n env.cloud_spec['backup_server_1']['private_ip'] = result[0]\n except Exception as e:\n logging.error(\"Failed to execute the command\")\n logging.error(e)\n raise CliError(\n (\"Failed to obtain VirtualBox Instance IP Address for: %s, please \"\n \"check the logs for details.\")\n % env.cloud_spec['backup_server_1']['name']\n )\n\n try:\n output = exec_shell(\n [\n self.bin(\"vagrant\"),\n \"ssh\",\n \"primary\",\n \"-c\",\n \"\\\"ip address\",\n \"show eth1\", \n \"|\",\n \"grep\",\n \"'inet '\",\n \"|\",\n \"sed\",\n \"-e\",\n \"'s/^.*inet //' -e 's/\\/.*$//'\\\"\"\n ],\n environ=self.environ,\n cwd=self.vagrant_project_path\n )\n result = output.decode(\"utf-8\").split('\\n')\n result[0] = result[0].strip()\n env.cloud_spec['postgres_server_1']['public_ip'] = result[0]\n env.cloud_spec['postgres_server_1']['private_ip'] = result[0]\n except Exception as e:\n logging.error(\"Failed to execute the command\")\n logging.error(e)\n raise CliError(\n (\"Failed to obtain VirtualBox Instance IP Address for: %s, please \"\n \"check the logs for details.\")\n % env.cloud_spec['postgres_server_1']['name']\n )\n\n if env.reference_architecture in ['EDB-RA-2', 'EDB-RA-3']:\n for i in range(2, 4):\n try:\n output = exec_shell(\n [\n self.bin(\"vagrant\"),\n \"ssh\",\n \"standby-%s\" %i,\n \"-c\",\n \"\\\"ip address\",\n \"show eth1\", \n \"|\",\n \"grep\",\n \"'inet '\",\n \"|\",\n \"sed\",\n \"-e\",\n \"'s/^.*inet //' -e 's/\\/.*$//'\\\"\"\n ],\n environ=self.environ,\n cwd=self.vagrant_project_path\n )\n result = output.decode(\"utf-8\").split('\\n')\n result[0] = result[0].strip()\n env.cloud_spec['postgres_server_%s' % i]['public_ip'] = result[0] # noqa\n env.cloud_spec['postgres_server_%s' % i]['private_ip'] = result[0] # noqa\n except Exception as e:\n logging.error(\"Failed to execute the command\")\n logging.error(e)\n raise CliError(\n (\"Failed to obtain VirtualBox Instance IP Address for: %s,\"\n \"please check the logs for details.\")\n % env.cloud_spec['postgres_server_%s' % i]['name']\n )\n if env.reference_architecture == 'EDB-RA-3':\n for i in range(1, 4):\n try:\n output = exec_shell(\n [\n self.bin(\"vagrant\"),\n \"ssh\",\n \"pgpool-%s\" %i,\n \"-c\",\n \"\\\"ip address\",\n \"show eth1\", \n \"|\",\n \"grep\",\n \"'inet '\",\n \"|\",\n \"sed\",\n \"-e\",\n \"'s/^.*inet //' -e 's/\\/.*$//'\\\"\"\n ],\n environ=self.environ,\n cwd=self.vagrant_project_path\n )\n result = output.decode(\"utf-8\").split('\\n')\n result[0] = result[0].strip()\n env.cloud_spec['pooler_server_%s' % i]['public_ip'] = result[0] # noqa\n env.cloud_spec['pooler_server_%s' % i]['private_ip'] = result[0] # noqa\n except Exception as e:\n logging.error(\"Failed to execute the command\")\n logging.error(e)\n raise CliError(\n (\"Failed to obtain VirtualBox Instance IP Address for: %s,\"\n \"please check the logs for details.\")\n % env.cloud_spec['pooler_server_%s' % i]['name']\n )", "def add_virtual_machine(self, obj):\n\n name = get_string_or_none(grab(obj, \"name\"))\n\n if name is not None and self.settings.strip_vm_domain_name is True:\n name = name.split(\".\")[0]\n\n #\n # Filtering\n #\n\n # get VM UUID\n vm_uuid = grab(obj, \"config.instanceUuid\")\n\n if vm_uuid is None or vm_uuid in self.processed_vm_uuid and obj not in self.objects_to_reevaluate:\n return\n\n log.debug(f\"Parsing vCenter VM: {name}\")\n\n # get VM power state\n status = \"active\" if get_string_or_none(grab(obj, \"runtime.powerState\")) == \"poweredOn\" else \"offline\"\n\n # check if vm is template\n template = grab(obj, \"config.template\")\n if bool(self.settings.skip_vm_templates) is True and template is True:\n log.debug2(f\"VM '{name}' is a template. Skipping\")\n return\n\n if bool(self.settings.skip_srm_placeholder_vms) is True \\\n and f\"{grab(obj, 'config.managedBy.extensionKey')}\".startswith(\"com.vmware.vcDr\"):\n log.debug2(f\"VM '{name}' is a SRM placeholder VM. Skipping\")\n return\n\n # ignore offline VMs during first run\n if self.parsing_vms_the_first_time is True and status == \"offline\":\n log.debug2(f\"Ignoring {status} VM '{name}' on first run\")\n return\n\n # add to processed VMs\n self.processed_vm_uuid.append(vm_uuid)\n\n parent_host = self.get_parent_object_by_class(grab(obj, \"runtime.host\"), vim.HostSystem)\n cluster_object = self.get_parent_object_by_class(parent_host, vim.ClusterComputeResource)\n\n # get single host 'cluster' if VM runs on one\n if cluster_object is None:\n cluster_object = self.get_parent_object_by_class(parent_host, vim.ComputeResource)\n\n if self.settings.set_source_name_as_cluster_group is True:\n group = self.inventory.get_by_data(NBClusterGroup, data={\"name\": self.name})\n else:\n group = self.get_parent_object_by_class(cluster_object, vim.Datacenter)\n\n if None in [parent_host, cluster_object, group]:\n log.error(f\"Requesting host or cluster for Virtual Machine '{name}' failed. Skipping.\")\n return\n\n nb_cluster_object = self.get_object_from_cache(cluster_object)\n\n # check VM cluster\n if nb_cluster_object is None:\n log.debug(f\"Virtual machine '{name}' is not part of a permitted cluster. Skipping\")\n return\n\n parent_name = grab(parent_host, \"name\")\n cluster_name = grab(nb_cluster_object, \"data.name\")\n cluster_full_name = f\"{group.name}/{cluster_name}\"\n\n if name in self.processed_vm_names.get(cluster_full_name, list()) and obj not in self.objects_to_reevaluate:\n log.warning(f\"Virtual machine '{name}' for cluster '{cluster_full_name}' already parsed. \"\n \"Make sure to use unique VM names. Skipping\")\n return\n\n # add vm to processed list\n if self.processed_vm_names.get(cluster_full_name) is None:\n self.processed_vm_names[cluster_full_name] = list()\n\n self.processed_vm_names[cluster_full_name].append(name)\n\n # filter VMs by name\n if self.passes_filter(name, self.settings.vm_include_filter, self.settings.vm_exclude_filter) is False:\n return\n\n #\n # Collect data\n #\n\n # check if cluster is a Standalone ESXi\n site_name = nb_cluster_object.get_site_name()\n if site_name is None:\n site_name = self.get_site_name(NBCluster, cluster_full_name)\n\n # first check against vm_platform_relation\n platform = get_string_or_none(grab(obj, \"config.guestFullName\"))\n platform = get_string_or_none(grab(obj, \"guest.guestFullName\", fallback=platform))\n\n if platform is not None:\n platform = self.get_object_relation(platform, \"vm_platform_relation\", fallback=platform)\n\n hardware_devices = grab(obj, \"config.hardware.device\", fallback=list())\n\n disk = int(sum([getattr(comp, \"capacityInKB\", 0) for comp in hardware_devices\n if isinstance(comp, vim.vm.device.VirtualDisk)\n ]) / 1024 / 1024)\n\n annotation = None\n if self.settings.skip_vm_comments is False:\n annotation = get_string_or_none(grab(obj, \"config.annotation\"))\n\n # assign vm_tenant_relation\n tenant_name = self.get_object_relation(name, \"vm_tenant_relation\")\n\n # assign vm_tag_relation\n vm_tags = self.get_object_relation(name, \"vm_tag_relation\")\n\n # get vCenter tags\n vm_tags.extend(self.collect_object_tags(obj))\n\n vm_data = {\n \"name\": name,\n \"cluster\": nb_cluster_object,\n \"status\": status,\n \"memory\": grab(obj, \"config.hardware.memoryMB\"),\n \"vcpus\": grab(obj, \"config.hardware.numCPU\"),\n \"disk\": disk\n }\n\n # Add adaption for change in NetBox 3.3.0 VM model\n # issue: https://github.com/netbox-community/netbox/issues/10131#issuecomment-1225783758\n if version.parse(self.inventory.netbox_api_version) >= version.parse(\"3.3.0\"):\n vm_data[\"site\"] = {\"name\": site_name}\n\n if self.settings.track_vm_host:\n vm_data[\"device\"] = self.get_object_from_cache(parent_host)\n\n if platform is not None:\n vm_data[\"platform\"] = {\"name\": platform}\n if annotation is not None:\n vm_data[\"comments\"] = annotation\n if tenant_name is not None:\n vm_data[\"tenant\"] = {\"name\": tenant_name}\n if len(vm_tags) > 0:\n vm_data[\"tags\"] = vm_tags\n\n # add custom fields if present and configured\n vm_custom_fields = self.get_object_custom_fields(obj)\n if len(vm_custom_fields) > 0:\n vm_data[\"custom_fields\"] = vm_custom_fields\n\n vm_primary_ip4 = None\n vm_primary_ip6 = None\n vm_default_gateway_ip4 = None\n vm_default_gateway_ip6 = None\n\n # check vm routing to determine which is the default interface for each IP version\n for route in grab(obj, \"guest.ipStack.0.ipRouteConfig.ipRoute\", fallback=list()):\n\n # we found a default route\n if grab(route, \"prefixLength\") == 0:\n\n try:\n ip_a = ip_address(grab(route, \"network\"))\n except ValueError:\n continue\n\n try:\n gateway_ip_address = ip_address(grab(route, \"gateway.ipAddress\"))\n except ValueError:\n continue\n\n if ip_a.version == 4 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv4 gateway {gateway_ip_address}\")\n vm_default_gateway_ip4 = gateway_ip_address\n elif ip_a.version == 6 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv6 gateway {gateway_ip_address}\")\n vm_default_gateway_ip6 = gateway_ip_address\n\n nic_data = dict()\n nic_ips = dict()\n\n # track MAC addresses in order add dummy guest interfaces\n processed_interface_macs = list()\n\n # get VM interfaces\n for vm_device in hardware_devices:\n\n # sample: https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getvnicinfo.py\n\n # not a network interface\n if not isinstance(vm_device, vim.vm.device.VirtualEthernetCard):\n continue\n\n int_mac = normalize_mac_address(grab(vm_device, \"macAddress\"))\n\n device_class = grab(vm_device, \"_wsdlName\")\n\n log.debug2(f\"Parsing device {device_class}: {int_mac}\")\n\n device_backing = grab(vm_device, \"backing\")\n\n # set defaults\n int_mtu = None\n int_mode = None\n int_network_vlan_ids = None\n int_network_vlan_id_ranges = None\n int_network_name = None\n int_network_private = False\n\n processed_interface_macs.append(int_mac)\n\n # get info from local vSwitches\n if isinstance(device_backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):\n\n int_network_name = get_string_or_none(grab(device_backing, \"deviceName\"))\n int_host_pgroup = grab(self.network_data, f\"host_pgroup|{parent_name}|{int_network_name}\",\n separator=\"|\")\n\n if int_host_pgroup is not None:\n int_network_vlan_ids = [int_host_pgroup.get(\"vlan_id\")]\n int_network_vlan_id_ranges = [str(int_host_pgroup.get(\"vlan_id\"))]\n\n int_vswitch_name = int_host_pgroup.get(\"vswitch\")\n int_vswitch_data = grab(self.network_data, f\"vswitch|{parent_name}|{int_vswitch_name}\",\n separator=\"|\")\n\n if int_vswitch_data is not None:\n int_mtu = int_vswitch_data.get(\"mtu\")\n\n # get info from distributed port group\n else:\n\n dvs_portgroup_key = grab(device_backing, \"port.portgroupKey\", fallback=\"None\")\n int_portgroup_data = grab(self.network_data, f\"dpgroup|{dvs_portgroup_key}\", separator=\"|\")\n\n if int_portgroup_data is not None:\n int_network_name = grab(int_portgroup_data, \"name\")\n int_network_vlan_ids = grab(int_portgroup_data, \"vlan_ids\")\n if len(grab(int_portgroup_data, \"vlan_id_ranges\")) > 0:\n int_network_vlan_id_ranges = grab(int_portgroup_data, \"vlan_id_ranges\")\n else:\n int_network_vlan_id_ranges = [str(int_network_vlan_ids[0])]\n int_network_private = grab(int_portgroup_data, \"private\")\n\n int_dvswitch_uuid = grab(device_backing, \"port.switchUuid\")\n int_dvswitch_data = grab(self.network_data, f\"pswitch|{parent_name}|{int_dvswitch_uuid}\", separator=\"|\")\n\n if int_dvswitch_data is not None:\n int_mtu = int_dvswitch_data.get(\"mtu\")\n\n int_connected = grab(vm_device, \"connectable.connected\", fallback=False)\n int_label = grab(vm_device, \"deviceInfo.label\", fallback=\"\")\n\n int_name = \"vNIC {}\".format(int_label.split(\" \")[-1])\n\n int_full_name = int_name\n if int_network_name is not None:\n int_full_name = f\"{int_full_name} ({int_network_name})\"\n\n int_description = f\"{int_label} ({device_class})\"\n if int_network_vlan_ids is not None:\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] == 4095:\n vlan_description = \"all vlans\"\n int_mode = \"tagged-all\"\n else:\n vlan_description = \"vlan ID: %s\" % \", \".join(int_network_vlan_id_ranges)\n\n if len(int_network_vlan_ids) == 1:\n int_mode = \"access\"\n else:\n int_mode = \"tagged\"\n\n if int_network_private is True:\n vlan_description = f\"{vlan_description} (private)\"\n\n int_description = f\"{int_description} ({vlan_description})\"\n\n # find corresponding guest NIC and get IP addresses and connected status\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC\n if int_mac != normalize_mac_address(grab(guest_nic, \"macAddress\")):\n continue\n\n int_connected = grab(guest_nic, \"connected\", fallback=int_connected)\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if self.settings.permitted_subnets.permitted(int_ip_address, interface_name=int_full_name) is False:\n continue\n\n nic_ips[int_full_name].append(int_ip_address)\n\n # check if primary gateways are in the subnet of this IP address\n # if it matches IP gets chosen as primary IP\n if vm_default_gateway_ip4 is not None and \\\n vm_default_gateway_ip4 in ip_interface(int_ip_address).network and \\\n vm_primary_ip4 is None:\n\n vm_primary_ip4 = int_ip_address\n\n if vm_default_gateway_ip6 is not None and \\\n vm_default_gateway_ip6 in ip_interface(int_ip_address).network and \\\n vm_primary_ip6 is None:\n\n vm_primary_ip6 = int_ip_address\n\n vm_nic_data = {\n \"name\": unquote(int_full_name),\n \"virtual_machine\": None,\n \"mac_address\": int_mac,\n \"description\": unquote(int_description),\n \"enabled\": int_connected,\n }\n\n if int_mtu is not None and self.settings.sync_vm_interface_mtu is True:\n vm_nic_data[\"mtu\"] = int_mtu\n if int_mode is not None:\n vm_nic_data[\"mode\"] = int_mode\n\n if int_network_vlan_ids is not None and int_mode != \"tagged-all\":\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] != 0:\n\n vm_nic_data[\"untagged_vlan\"] = {\n \"name\": unquote(int_network_name),\n \"vid\": int_network_vlan_ids[0],\n \"site\": {\n \"name\": site_name\n }\n }\n else:\n tagged_vlan_list = list()\n for int_network_vlan_id in int_network_vlan_ids:\n\n if int_network_vlan_id == 0:\n continue\n\n tagged_vlan_list.append({\n \"name\": unquote(f\"{int_network_name}-{int_network_vlan_id}\"),\n \"vid\": int_network_vlan_id,\n \"site\": {\n \"name\": site_name\n }\n })\n\n if len(tagged_vlan_list) > 0:\n vm_nic_data[\"tagged_vlans\"] = tagged_vlan_list\n\n nic_data[int_full_name] = vm_nic_data\n\n # find dummy guest NIC interfaces\n if self.settings.sync_vm_dummy_interfaces is True:\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC MAC\n guest_nic_mac = normalize_mac_address(grab(guest_nic, \"macAddress\"))\n\n # skip interfaces of MAC addresses for already known interfaces\n if guest_nic_mac is None or guest_nic_mac in processed_interface_macs:\n continue\n\n processed_interface_macs.append(guest_nic_mac)\n\n int_full_name = \"vNIC Dummy-{}\".format(\"\".join(guest_nic_mac.split(\":\")[-2:]))\n\n log.debug2(f\"Parsing dummy network device: {guest_nic_mac}\")\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if self.settings.permitted_subnets.permitted(int_ip_address, interface_name=int_full_name) is True:\n nic_ips[int_full_name].append(int_ip_address)\n\n vm_nic_data = {\n \"name\": int_full_name,\n \"virtual_machine\": None,\n \"mac_address\": guest_nic_mac,\n \"enabled\": grab(guest_nic, \"connected\", fallback=False),\n }\n\n if len(nic_ips.get(int_full_name, list())) == 0:\n log.debug(f\"Dummy network interface '{int_full_name}' has no IP addresses assigned. Skipping\")\n continue\n\n nic_data[int_full_name] = vm_nic_data\n\n # add VM to inventory\n self.add_device_vm_to_inventory(NBVM, object_data=vm_data, vnic_data=nic_data,\n nic_ips=nic_ips, p_ipv4=vm_primary_ip4, p_ipv6=vm_primary_ip6,\n vmware_object=obj)\n\n return", "def prepare_instances(self, parameters, count=None, security_configured=True):\n logging.debug('flex_agent.prepare_instances() parameters={0}'.format(parameters))\n try:\n\n flex_cloud_machine_info = parameters[self.PARAM_FLEX_CLOUD_MACHINE_INFO]\n logging.debug('flex_cloud_machine_info =\\n{}'.format(pprint.pformat(flex_cloud_machine_info)))\n\n queue_head = parameters[self.PARAM_FLEX_QUEUE_HEAD]\n logging.debug('queue_head = {}'.format(queue_head))\n queue_head_keyfile = queue_head['keyfile']\n remote_queue_head_keyfile = os.path.join(FlexConfig.QUEUE_HEAD_KEY_DIR,\n os.path.basename(queue_head_keyfile))\n\n for machine in flex_cloud_machine_info:\n ip = machine['ip']\n keyfile = machine['keyfile']\n\n os.chmod(keyfile, int('600', 8))\n\n username = machine['username']\n is_queue_head = machine[self.PARAM_QUEUE_HEAD]\n id = self.get_flex_instance_id(public_ip=ip)\n\n if not os.path.exists(keyfile):\n logging.error('Keyfile: {0} does not exist!'.format(keyfile))\n VMStateModel.set_state(params=parameters, ins_ids=[id],\n state=VMStateModel.STATE_FAILED,\n description=VMStateModel.DESCRI_INVALID_KEYFILE)\n continue\n\n logging.debug(\"[{0}] [{1}] [{2}] [is_queue_head:{3}]\".format(ip, keyfile, username, is_queue_head))\n\n scp_command = \\\n 'scp -o \\'UserKnownHostsFile=/dev/null\\' -o \\'StrictHostKeyChecking no\\' -i {keyfile} {source} {target}'.format(\n keyfile=keyfile,\n source=queue_head_keyfile,\n target=\"{username}@{ip}:{remote_queue_head_keyfile}\".format(\n username=username, ip=ip, remote_queue_head_keyfile=remote_queue_head_keyfile\n )\n )\n\n logging.debug('scp command for queue head keyfile =\\n{}'.format(scp_command))\n res = os.system(scp_command)\n if res != 0:\n logging.error('scp for queue head keyfile failed!'.format(keyfile))\n VMStateModel.set_state(params=parameters, ins_ids=[id],\n state=VMStateModel.STATE_FAILED,\n description=VMStateModel.DESCRI_FAIL_TO_PREPARE)\n continue\n\n script_lines = []\n script_lines.append(\"#!/bin/bash\")\n\n script_lines.append(\"echo export STOCHKIT_HOME={0} >> ~/.bashrc\".format(\"~/stochss/StochKit/\"))\n script_lines.append(\"echo export STOCHKIT_ODE={0} >> ~/.bashrc\".format(\"~/stochss/ode/\"))\n script_lines.append(\"echo export R_LIBS={0} >> ~/.bashrc\".format(\"~/stochss/stochoptim/library\"))\n script_lines.append(\"echo export C_FORCE_ROOT=1 >> ~/.bashrc\".format(\"~/stochss/stochoptim/library\"))\n script_lines.append(\"chmod 600 {remote_queue_head_keyfile}\".format(\n remote_queue_head_keyfile=remote_queue_head_keyfile))\n\n if is_queue_head:\n logging.debug('Adding extra commands for configuring queue head...')\n script_lines.append(\"sudo rabbitmqctl add_user stochss ucsb\")\n script_lines.append('sudo rabbitmqctl set_permissions -p / stochss \".*\" \".*\" \".*\"')\n\n reset_mysql_script = '~/stochss/release-tools/flex-cloud/reset_mysql_pwd.sh'\n script_lines.append(\"sudo {reset_mysql_script} root {flex_db_password}\".format(\n reset_mysql_script=reset_mysql_script,\n flex_db_password=parameters[self.PARAM_FLEX_DB_PASSWORD]))\n\n bash_script = '\\n'.join(script_lines)\n logging.debug(\"\\n\\n\\nbash_script =\\n{0}\\n\\n\\n\".format(bash_script))\n\n bash_script_filename = os.path.join(AgentConfig.TMP_DIRNAME, 'stochss_init.sh')\n with open(bash_script_filename, 'w') as bash_script_file:\n bash_script_file.write(bash_script)\n\n scp_command = 'scp -o \\'UserKnownHostsFile=/dev/null\\' -o \\'StrictHostKeyChecking no\\' -i {keyfile} {source} {target}'.format(\n keyfile=keyfile,\n source=bash_script_filename,\n target=\"{username}@{ip}:~/stochss_init.sh\".format(username=username,\n ip=ip))\n\n logging.debug('scp command =\\n{}'.format(scp_command))\n res = os.system(scp_command)\n\n os.remove(bash_script_filename)\n\n if res != 0:\n logging.error('scp failed!'.format(keyfile))\n VMStateModel.set_state(params=parameters, ins_ids=[id],\n state=VMStateModel.STATE_FAILED,\n description=VMStateModel.DESCRI_FAIL_TO_PREPARE)\n continue\n\n commands = ['chmod +x ~/stochss_init.sh',\n '~/stochss_init.sh']\n command = ';'.join(commands)\n\n remote_command_string = self.get_remote_command_string(ip=ip, username=username,\n keyfile=keyfile, command=command)\n\n logging.debug('remote_command_string =\\n{}'.format(remote_command_string))\n res = os.system(remote_command_string)\n\n if res != 0:\n logging.error('remote command failed!'.format(keyfile))\n VMStateModel.set_state(params=parameters, ins_ids=[id],\n state=VMStateModel.STATE_FAILED,\n description=VMStateModel.DESCRI_FAIL_TO_PREPARE)\n continue\n except Exception as e:\n logging.exception(e)\n raise", "def add_virtual_machine(self, obj):\n\n name = get_string_or_none(grab(obj, \"name\"))\n\n if name is not None and self.strip_vm_domain_name is True:\n name = name.split(\".\")[0]\n\n #\n # Filtering\n #\n\n # get VM UUID\n vm_uuid = grab(obj, \"config.uuid\")\n\n if vm_uuid is None or vm_uuid in self.processed_vm_uuid:\n return\n\n log.debug2(f\"Parsing vCenter VM: {name}\")\n\n # get VM power state\n status = \"active\" if get_string_or_none(grab(obj, \"runtime.powerState\")) == \"poweredOn\" else \"offline\"\n\n # check if vm is template\n template = grab(obj, \"config.template\")\n if bool(self.skip_vm_templates) is True and template is True:\n log.debug2(f\"VM '{name}' is a template. Skipping\")\n return\n\n # ignore offline VMs during first run\n if self.parsing_vms_the_first_time is True and status == \"offline\":\n log.debug2(f\"Ignoring {status} VM '{name}' on first run\")\n return\n\n # add to processed VMs\n self.processed_vm_uuid.append(vm_uuid)\n\n parent_name = get_string_or_none(grab(obj, \"runtime.host.name\"))\n cluster_name = get_string_or_none(grab(obj, \"runtime.host.parent.name\"))\n\n # honor strip_host_domain_name\n if cluster_name is not None and self.strip_host_domain_name is True and \\\n parent_name.split(\".\")[0] == cluster_name.split(\".\")[0]:\n cluster_name = cluster_name.split(\".\")[0]\n\n # check VM cluster\n if cluster_name is None:\n log.error(f\"Requesting cluster for Virtual Machine '{name}' failed. Skipping.\")\n return\n\n elif self.permitted_clusters.get(cluster_name) is None:\n log.debug(f\"Virtual machine '{name}' is not part of a permitted cluster. Skipping\")\n return\n\n if name in self.processed_vm_names.get(cluster_name, list()):\n log.warning(f\"Virtual machine '{name}' for cluster '{cluster_name}' already parsed. \"\n \"Make sure to use unique VM names. Skipping\")\n return\n\n # add host to processed list\n if self.processed_vm_names.get(cluster_name) is None:\n self.processed_vm_names[cluster_name] = list()\n\n self.processed_vm_names[cluster_name].append(name)\n\n # filter VMs by name\n if self.passes_filter(name, self.vm_include_filter, self.vm_exclude_filter) is False:\n return\n\n #\n # Collect data\n #\n\n # check if cluster is a Standalone ESXi\n site_name = self.permitted_clusters.get(cluster_name)\n if site_name is None:\n site_name = self.get_site_name(NBCluster, cluster_name)\n\n # first check against vm_platform_relation\n platform = grab(obj, \"config.guestFullName\")\n platform = get_string_or_none(grab(obj, \"guest.guestFullName\", fallback=platform))\n\n for platform_relation in grab(self, \"vm_platform_relation\", fallback=list()):\n\n if platform is None:\n break\n\n object_regex = platform_relation.get(\"object_regex\")\n if object_regex.match(platform):\n platform = platform_relation.get(\"platform_name\")\n log.debug2(f\"Found a match ({object_regex.pattern}) for {platform}, using mapped platform '{platform}'\")\n break\n\n hardware_devices = grab(obj, \"config.hardware.device\", fallback=list())\n\n disk = int(sum([getattr(comp, \"capacityInKB\", 0) for comp in hardware_devices\n if isinstance(comp, vim.vm.device.VirtualDisk)\n ]) / 1024 / 1024)\n\n annotation = None\n if bool(self.skip_vm_comments) is False:\n annotation = get_string_or_none(grab(obj, \"config.annotation\"))\n\n # assign vm_tenant_relation\n tenant_name = None\n for tenant_relation in grab(self, \"vm_tenant_relation\", fallback=list()):\n object_regex = tenant_relation.get(\"object_regex\")\n if object_regex.match(name):\n tenant_name = tenant_relation.get(\"tenant_name\")\n log.debug2(f\"Found a match ({object_regex.pattern}) for {name}, using tenant '{tenant_name}'\")\n break\n\n vm_data = {\n \"name\": name,\n \"cluster\": {\"name\": cluster_name},\n \"status\": status,\n \"memory\": grab(obj, \"config.hardware.memoryMB\"),\n \"vcpus\": grab(obj, \"config.hardware.numCPU\"),\n \"disk\": disk\n }\n\n if platform is not None:\n vm_data[\"platform\"] = {\"name\": platform}\n if annotation is not None:\n vm_data[\"comments\"] = annotation\n if tenant_name is not None:\n vm_data[\"tenant\"] = {\"name\": tenant_name}\n\n vm_primary_ip4 = None\n vm_primary_ip6 = None\n vm_default_gateway_ip4 = None\n vm_default_gateway_ip6 = None\n\n # check vm routing to determine which is the default interface for each IP version\n for route in grab(obj, \"guest.ipStack.0.ipRouteConfig.ipRoute\", fallback=list()):\n\n # we found a default route\n if grab(route, \"prefixLength\") == 0:\n\n try:\n ip_a = ip_address(grab(route, \"network\"))\n except ValueError:\n continue\n\n try:\n gateway_ip_address = ip_address(grab(route, \"gateway.ipAddress\"))\n except ValueError:\n continue\n\n if ip_a.version == 4 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv4 gateway {gateway_ip_address}\")\n vm_default_gateway_ip4 = gateway_ip_address\n elif ip_a.version == 6 and gateway_ip_address is not None:\n log.debug2(f\"Found default IPv6 gateway {gateway_ip_address}\")\n vm_default_gateway_ip6 = gateway_ip_address\n\n nic_data = dict()\n nic_ips = dict()\n\n # get VM interfaces\n for vm_device in hardware_devices:\n\n # sample: https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getvnicinfo.py\n\n # not a network interface\n if not isinstance(vm_device, vim.vm.device.VirtualEthernetCard):\n continue\n\n int_mac = normalize_mac_address(grab(vm_device, \"macAddress\"))\n\n device_class = grab(vm_device, \"_wsdlName\")\n\n log.debug2(f\"Parsing device {device_class}: {int_mac}\")\n\n device_backing = grab(vm_device, \"backing\")\n\n # set defaults\n int_mtu = None\n int_mode = None\n int_network_vlan_ids = None\n int_network_vlan_id_ranges = None\n int_network_name = None\n int_network_private = False\n\n # get info from local vSwitches\n if isinstance(device_backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):\n\n int_network_name = get_string_or_none(grab(device_backing, \"deviceName\"))\n int_host_pgroup = grab(self.network_data, f\"host_pgroup|{parent_name}|{int_network_name}\",\n separator=\"|\")\n\n if int_host_pgroup is not None:\n int_network_vlan_ids = [int_host_pgroup.get(\"vlan_id\")]\n int_network_vlan_id_ranges = [str(int_host_pgroup.get(\"vlan_id\"))]\n\n int_vswitch_name = int_host_pgroup.get(\"vswitch\")\n int_vswitch_data = grab(self.network_data, f\"vswitch|{parent_name}|{int_vswitch_name}\",\n separator=\"|\")\n\n if int_vswitch_data is not None:\n int_mtu = int_vswitch_data.get(\"mtu\")\n\n # get info from distributed port group\n else:\n\n dvs_portgroup_key = grab(device_backing, \"port.portgroupKey\", fallback=\"None\")\n int_portgroup_data = grab(self.network_data, f\"dpgroup|{dvs_portgroup_key}\", separator=\"|\")\n\n if int_portgroup_data is not None:\n int_network_name = grab(int_portgroup_data, \"name\")\n int_network_vlan_ids = grab(int_portgroup_data, \"vlan_ids\")\n if len(grab(int_portgroup_data, \"vlan_id_ranges\")) > 0:\n int_network_vlan_id_ranges = grab(int_portgroup_data, \"vlan_id_ranges\")\n else:\n int_network_vlan_id_ranges = [str(int_network_vlan_ids[0])]\n int_network_private = grab(int_portgroup_data, \"private\")\n\n int_dvswitch_uuid = grab(device_backing, \"port.switchUuid\")\n int_dvswitch_data = grab(self.network_data, f\"pswitch|{parent_name}|{int_dvswitch_uuid}\", separator=\"|\")\n\n if int_dvswitch_data is not None:\n int_mtu = int_dvswitch_data.get(\"mtu\")\n\n int_connected = grab(vm_device, \"connectable.connected\", fallback=False)\n int_label = grab(vm_device, \"deviceInfo.label\", fallback=\"\")\n\n int_name = \"vNIC {}\".format(int_label.split(\" \")[-1])\n\n int_full_name = int_name\n if int_network_name is not None:\n int_full_name = f\"{int_full_name} ({int_network_name})\"\n\n int_description = f\"{int_label} ({device_class})\"\n if int_network_vlan_ids is not None:\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] == 4095:\n vlan_description = \"all vlans\"\n int_mode = \"tagged-all\"\n else:\n vlan_description = \"vlan ID: %s\" % \", \".join(int_network_vlan_id_ranges)\n\n if len(int_network_vlan_ids) == 1:\n int_mode = \"access\"\n else:\n int_mode = \"tagged\"\n\n if int_network_private is True:\n vlan_description = f\"{vlan_description} (private)\"\n\n int_description = f\"{int_description} ({vlan_description})\"\n\n # find corresponding guest NIC and get IP addresses and connected status\n for guest_nic in grab(obj, \"guest.net\", fallback=list()):\n\n # get matching guest NIC\n if int_mac != normalize_mac_address(grab(guest_nic, \"macAddress\")):\n continue\n\n int_connected = grab(guest_nic, \"connected\", fallback=int_connected)\n\n if nic_ips.get(int_full_name) is None:\n nic_ips[int_full_name] = list()\n\n # grab all valid interface IP addresses\n for int_ip in grab(guest_nic, \"ipConfig.ipAddress\", fallback=list()):\n\n int_ip_address = f\"{int_ip.ipAddress}/{int_ip.prefixLength}\"\n\n if ip_valid_to_add_to_netbox(int_ip_address, self.permitted_subnets, int_full_name) is False:\n continue\n\n nic_ips[int_full_name].append(int_ip_address)\n\n # check if primary gateways are in the subnet of this IP address\n # if it matches IP gets chosen as primary IP\n if vm_default_gateway_ip4 is not None and \\\n vm_default_gateway_ip4 in ip_interface(int_ip_address).network and \\\n vm_primary_ip4 is None:\n\n vm_primary_ip4 = int_ip_address\n\n if vm_default_gateway_ip6 is not None and \\\n vm_default_gateway_ip6 in ip_interface(int_ip_address).network and \\\n vm_primary_ip6 is None:\n\n vm_primary_ip6 = int_ip_address\n\n vm_nic_data = {\n \"name\": int_full_name,\n \"virtual_machine\": None,\n \"mac_address\": int_mac,\n \"description\": int_description,\n \"enabled\": int_connected,\n }\n\n if int_mtu is not None:\n vm_nic_data[\"mtu\"] = int_mtu\n if int_mode is not None:\n vm_nic_data[\"mode\"] = int_mode\n\n if int_network_vlan_ids is not None and int_mode != \"tagged-all\":\n\n if len(int_network_vlan_ids) == 1 and int_network_vlan_ids[0] != 0:\n\n vm_nic_data[\"untagged_vlan\"] = self.get_vlan_object_if_exists({\n \"name\": int_network_name,\n \"vid\": int_network_vlan_ids[0],\n \"site\": {\n \"name\": site_name\n }\n })\n else:\n tagged_vlan_list = list()\n for int_network_vlan_id in int_network_vlan_ids:\n\n if int_network_vlan_id == 0:\n continue\n\n tagged_vlan_list.append(self.get_vlan_object_if_exists({\n \"name\": f\"{int_network_name}-{int_network_vlan_id}\",\n \"vid\": int_network_vlan_id,\n \"site\": {\n \"name\": site_name\n }\n }))\n\n if len(tagged_vlan_list) > 0:\n vm_nic_data[\"tagged_vlans\"] = tagged_vlan_list\n\n nic_data[int_full_name] = vm_nic_data\n\n # add VM to inventory\n self.add_device_vm_to_inventory(NBVM, object_data=vm_data, site_name=site_name, vnic_data=nic_data,\n nic_ips=nic_ips, p_ipv4=vm_primary_ip4, p_ipv6=vm_primary_ip6)\n\n return", "def test_vsphere_vms(self):\n config = {}\n self.load_check(config)\n self.check._is_excluded = MagicMock(return_value=False)\n\n # get the client\n client = vsphere_client()\n\n # list_attached_tags method returns empty list of tags\n client.tagging.TagAssociation.list_attached_tags = MagicMock(return_value=[])\n\n # assign the vsphere client object to the vsphere check client object\n self.check.client = client\n\n content_mock = self.mock_content(\"vm\")\n obj_list = self.check._vsphere_vms(content_mock, \"ESXi\")\n\n self.assertEqual(len(obj_list), 1)\n self.assertEqual(obj_list[0]['hostname'], 'Ubuntu')\n\n # check there should be no tags and labels extracted from vspher client\n self.assertEqual(len(obj_list[0]['topo_tags']['identifiers']), 0)\n\n # Check if labels are added\n self.assertTrue(obj_list[0]['topo_tags'][\"labels\"])\n expected_name_label = obj_list[0]['topo_tags'][\"labels\"][0]\n expected_guestid_label = obj_list[0]['topo_tags'][\"labels\"][1]\n expected_numcpu_label = obj_list[0]['topo_tags'][\"labels\"][3]\n expected_memory_label = obj_list[0]['topo_tags'][\"labels\"][4]\n\n # Check if the labels are as expected\n self.assertEqual(expected_name_label, 'name:Ubuntu')\n self.assertEqual(expected_guestid_label, 'guestId:ubuntu64Guest')\n self.assertEqual(expected_numcpu_label, 'numCPU:1')\n self.assertEqual(expected_memory_label, 'memoryMB:4096')", "def _discover_templates():\n vms = []\n for file in os.listdir(paths.packer_templates):\n json = os.path.join(paths.packer_templates,\n file, file + '.json')\n if os.path.exists(json):\n vms.append(file)\n return vms", "def create_instance_bulk(self, tenant_id, neutron_ports, vms,\n port_profiles, sync=False):", "def vmware_builder(**kwargs):\n\n # Setup vars from kwargs\n builder_spec = kwargs['data']['builder_spec']\n distro = kwargs['data']['distro']\n vagrant_box = kwargs['data']['vagrant_box']\n\n builder_spec.update({\n 'type': 'vmware-iso',\n 'disk_adapter_type': '{{ user `disk_adapter_type` }}',\n 'disk_type_id': 0,\n 'version': '10',\n 'vmx_data': {\n 'ethernet0.pciSlotNumber': '32'\n },\n 'vmx_remove_ethernet_interfaces': True\n })\n\n # Define OS type map for distro to guest OS type\n os_type_map = {'alpine': 'other3xlinux-64', 'centos': 'centos-64',\n 'debian': 'debian8-64', 'fedora': 'fedora-64',\n 'freenas': 'FreeBSD-64', 'ubuntu': 'ubuntu-64'}\n\n # Lookup distro OS type\n guest_os_type = os_type_map[distro]\n\n # If FreeNAS, add storage devices if Vagrant to ensure we can provision\n if distro == 'freenas' and vagrant_box:\n builder_spec.update(\n {'disk_additional_size': ['{{ user `disk_size` }}']})\n\n builder_spec.update({'guest_os_type': guest_os_type})\n\n return builder_spec", "def provision_machines(environment, machine_names=None):\n machine_names = slapchop.to_machine_array(machine_names)\n slapchop.bootstrap(environment=environment, machine_names=machine_names, yes=True)\n slapchop.fabric_setup(environment=environment)\n internal_provision_machines(environment=environment, machine_names=machine_names, puppet_ip=env.puppet_internal_ip)", "def create(self, vm_name):\n\n sub_conf = self.conf['virtualbox']['vms'][vm_name]\n hostname = vm_name\n dir_isocustom = self.conf['general']['dir_isocustom']\n if 'install' in sub_conf.keys() and sub_conf['install']:\n iso = os.path.join(dir_isocustom, sub_conf['install'])\n else:\n iso = None\n\n logging.info('Create virtualbox vm')\n l_vm = self.list_vms()\n\n isexist = [x['name'] for x in l_vm if hostname == x['name']]\n assert isexist == [], \"Error : la vm '\"+hostname+\"' existe déjà\"\n\n # msg = \"Error : la recipe '\"+recipe+\"' n'existe pas\"\n # assert recipe in self.conf['virtualbox']['recipes'].keys(), msg\n\n # dir1 = conf['disk-dir']+'/'+conf['hostname']\n # assert(not os.path.exists(dir1)), \"Le dossier \"+dir1+\" existe déjà !\"\n\n # dir_iso = self.conf['general']['dir_input']\n # dir_isocustom = self.conf['general']['dir_isocustom']\n os_type = sub_conf['os_type']\n file_disk_type = sub_conf['file_disk_type']\n ram = str(sub_conf['ram'])\n vram = str(sub_conf['vram'])\n disk_size = sub_conf['disk_size']\n interface_name = sub_conf['interface_name']\n interface_type = sub_conf['interface_type']\n\n dir_vm = self.get_machine_folder()\n if not os.path.isdir(dir_vm):\n os.mkdir(dir_vm)\n\n os.chdir(dir_vm)\n\n os.mkdir(dir_vm+os.sep+hostname)\n os.chdir(dir_vm+os.sep+hostname)\n\n # Create vm\n run_cmd(\n 'VBoxManage createvm '\n '--name \"'+hostname+'\" '\n '--ostype \"'+os_type+'\" ' # Ex: \"Debian_64\"\n '--register')\n\n # Add SATA controller\n run_cmd(\n 'VBoxManage storagectl \"'+hostname+'\" '\n '--name \"SATA Controller\" '\n '--add sata '\n '--controller IntelAHCI')\n\n # Add disks SATA controller\n if isinstance(disk_size, int):\n disk_size = [disk_size]\n run_cmd(\n 'VBoxManage storagectl '+hostname+' '\n '--name \"SATA Controller\" '\n '--portcount '+str(len(disk_size))) # Number of disque\n\n i = 0\n for on_disk_size in disk_size:\n ds = str(on_disk_size)\n it = str(i)\n disk_name = hostname+'_'+it+'.'+file_disk_type\n\n # Create one disk\n run_cmd(\n 'VBoxManage createhd '\n '--filename \"'+disk_name+'\" ' # Ex:test_0.vmdk\n '--size '+ds) # Disk size in Mo\n\n # Attach one disk to SATA controller\n run_cmd(\n 'VBoxManage storageattach \"'+hostname+'\" '\n '--storagectl \"SATA Controller\" '\n '--port '+it+' '\n '--device 0 '\n '--type hdd '\n '--medium \"'+disk_name+'\"') # Ex:test_0.vmdk\n i += 1\n\n # Add IDE Controller\n run_cmd(\n 'VBoxManage storagectl \"'+hostname+'\" '\n '--name \"IDE Controller\" '\n '--add ide')\n\n # Mount the iso to the IDE controller\n if iso:\n run_cmd(\n 'VBoxManage storageattach \"'+hostname+'\" '\n '--storagectl \"IDE Controller\" '\n '--port 0 '\n '--device 0 '\n '--type dvddrive '\n '--medium \"'+iso+'\"')\n\n # Enable Input/Output (mouse, keyboard, ...)\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--ioapic on')\n\n # Define boot order\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--boot1 dvd '\n '--boot2 disk '\n '--boot3 none '\n '--boot4 none')\n\n # Define RAM and VRAM(video)\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--memory '+ram+' '\n '--vram '+vram)\n\n # Connect network bridge interface\n run_cmd(\n 'VBoxManage modifyvm \"'+hostname+'\" '\n '--nic1 '+interface_type+' '\n '--bridgeadapter1 '+interface_name)", "def setup_builders_from_config_list(builder_specs, helper,\n do_upload_render_results,\n do_upload_bench_results, builder_format):\n for builder_tuple in sorted(builder_specs):\n builder = builder_format(*builder_tuple)\n builder.create(helper, do_upload_render_results, do_upload_bench_results)", "def create_and_update_vips(self, pool_create_args=None,\n vip_update_args=None,\n vip_create_args=None):\n vips = []\n pool_create_args = pool_create_args or {}\n vip_create_args = vip_create_args or {}\n vip_update_args = vip_update_args or {}\n networks = self.context.get(\"tenant\", {}).get(\"networks\", [])\n pools = self._create_v1_pools(networks, **pool_create_args)\n with atomic.ActionTimer(self, \"neutron.create_%s_vips\" % len(pools)):\n for pool in pools:\n vips.append(self._create_v1_vip(pool, **vip_create_args))\n for vip in vips:\n self._update_v1_vip(vip, **vip_update_args)", "def _get_nebula_vms(self):\n hostname = socket.gethostname()\n fqdn = socket.getfqdn()\n if self.config['onecli_path']:\n onevm_command = '%s/onevm' % self.config['onecli_path']\n else:\n onevm_command = 'onevm'\n args = shlex.split('%s list -x' % onevm_command)\n my_env = os.environ.copy()\n if self.config['one_auth']:\n my_env['ONE_AUTH'] = self.config['one_auth']\n if self.config['one_xmlrpc']:\n my_env['ONE_XMLRPC'] = self.config['one_xmlrpc']\n vm_xml_list = subprocess.Popen(args, stdout=subprocess.PIPE,\n env=my_env)\n vm_xml_arr = vm_xml_list.stdout.readlines()\n vm_xml_string = ''.join([line.strip(\"\\n\") for line in vm_xml_arr])\n vm_xml_etree = xml.etree.ElementTree.fromstring(vm_xml_string)\n vm_hash = {}\n for vm in vm_xml_etree.findall(\"VM\"):\n vm_hostname_element = vm.find(\"*//HOSTNAME\")\n if vm_hostname_element is None:\n # this vm is undeployed or pending, so skip it\n continue\n vm_hostname = vm_hostname_element.text\n if vm_hostname not in [hostname, fqdn]:\n continue\n vm_id = vm.find(\"ID\").text\n pid = self._get_vm_pid(vm_id)\n if not pid:\n continue\n vm_name = self._validate_metric_name(vm.find(\"NAME\").text)\n vm_diamond_prefix_element = vm.find(\"*//DIAMOND_PREFIX\")\n if vm_diamond_prefix_element is None:\n # no diamond prefix in template, so set to default\n vm_diamond_prefix = self.config['default_prefix']\n else:\n vm_diamond_prefix = self._validate_metric_name(\n vm_diamond_prefix_element.text)\n vm_hash[vm_id] = dict(diamond_prefix=vm_diamond_prefix,\n pid=pid, name=vm_name)\n return vm_hash" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve list of .ova from dir. Return list.
def _ova_from_dir(directory): res = [] for file in os.listdir(directory): if file.endswith('.ova'): res.append(os.path.join(directory, file)) return res
[ "def _prepare_ovas(self):\n ovalist = []\n for name in self.args.NAME:\n if name.endswith('.ova'):\n ovalist.append(name)\n elif os.path.isdir(name):\n ovalist.extend(self._ova_from_dir(name))\n else:\n print(\"%s doesn't looks like directory or OVA\" % name,\n file=stderr)\n return ovalist", "def listdir(self):\r\n\t\treturn []", "def ls(self, dir):\n if is_abs_bpath(dir):\n fsdir = path.join(self.root, dir[1:])\n listing = os.listdir(fsdir)\n res = [path.splitext(x)[0] for x in listing\n if x.endswith('.array')]\n res += [x for x in listing\n if path.isdir(path.join(fsdir, x))]\n return sorted(res)\n else:\n raise ValueError('Expected absolute blaze catalog path: %r' % dir)", "def ls_arrs(self, dir):\n if is_abs_bpath(dir):\n fsdir = path.join(self.root, dir[1:])\n listing = os.listdir(fsdir)\n return sorted([path.splitext(x)[0] for x in listing\n if x.endswith('.array')])\n else:\n raise ValueError('Expected absolute blaze catalog path: %r' % dir)", "def get_entries(self, dir):\n return os.listdir(dir)", "def listtypedir(self):\r\n\t\ttry:\r\n\t\t\tdirectory = VDOM_CONFIG[\"TYPES-LOCATION\"]\r\n\t\t\tr1 = os.listdir(directory)\r\n\t\t\tr2 = []\r\n\t\t\tfor item in r1:\r\n\t\t\t\tif item.endswith(\".xml\"):\r\n\t\t\t\t\tr2.append(os.path.join(directory, item))\r\n\t\t\treturn r2\r\n#\t\texcept:\r\n\t\texcept Exception, e:\r\n\t\t\ttraceback.print_exc(file=sys.stderr)\r\n\t\t\treturn []", "def listdir(self):\r\n ls = os.listdir(self.folder)\r\n if self.extensions:\r\n return [x for x in ls if os.path.splitext(x)[1][1:] \\\r\n in self.extensions]\r\n else:\r\n return ls", "def avds():\n @requires_binary(\"avdmanager\")\n @requires_binary(\"grep\")\n def _avds():\n avds_raw = _execute(\"avdmanager list avd | grep Name:\")\n return [avd.strip.split(\": \", 1)[-1] for avd in avds_raw.split(\"\\n\")]\n\n return _avds()", "def eye_data_list(experiment_num):\n\n top_dir = project_directory()\n data_dir = _os.path.join(top_dir,\n 'raw-data',\n ('experiment-' + str(experiment_num)),\n 'eye_data_files')\n # get a list of all the scenes in the directory:\n file_list = []\n wildcard = '*.asc'\n for file in _glob.glob(_os.path.join(data_dir, wildcard)):\n file_list.append(file)\n return(file_list)", "def avail_pots():\n #[ name for name in os.listdir(set_path_vasp_potentials()) if os.path.isdir()\n out = []\n for name in os.listdir(set_path_vasp_potentials()):\n path = set_path_vasp_potentials() + \"/\" + name\n if os.path.isdir(path):\n out.append(name)\n return out\n #return filter(os.path.isdir, os.listdir(set_path_vasp_potentials()))", "def list_of_exams(group):\n return os.listdir(os.path.join('groups', group, 'exams'))", "def test_make_ova_adds_all_files(self, fake_rmtree, fake_listdir, fake_rename, fake_open,\n fake_sleep, fake_makedirs, fake_tarfile, fake_download_vmdk, fake_get_vm_ovf_xml,\n fake_block_on_lease, fake_power):\n fake_vcenter = MagicMock()\n fake_vm = MagicMock()\n fake_vm.name = 'myVM'\n fake_log = MagicMock()\n fake_listdir.return_value = ['myVM.ovf', 'disk-0.vmdk']\n\n virtual_machine.make_ova(fake_vcenter, fake_vm, '/save/ova/here', fake_log)\n files_added_to_ova = fake_tarfile.open.return_value.add.call_count\n expected = 2\n\n self.assertEqual(files_added_to_ova, expected)", "def ls_builtin_assets():\n return [p.name for p in data_dir_path().glob('*') if not p.is_dir()]", "def list_demo_files():\n return [demo_file for demo_file in os.listdir(DEMO_DATA_PATH)\n if not demo_file[0] in '_.']", "def get_downloaded_atlases():\n\n # Get brainglobe directory:\n brainglobe_dir = config.get_brainglobe_dir()\n\n return [\n f.name.split(\"_v\")[0]\n for f in brainglobe_dir.glob(\"*_*_*_v*\")\n if f.is_dir()\n ]", "def getTOCFilesArray(self):\n xpath = self.root_tag + \"/mnemonicFileDeploymentProperties\" + self.version_filter + \"/fileDeploymentProperties/file\"\n self.debug(\"getTOCFilesArray(): xpath=\" + xpath + \"\\n\")\n # node_set = self.puke_dom.xml_select( xpath )\n node_set = []\n allElements = self.getData(xpath)\n for el in allElements:\n # el.logMe()\n if (el.getName() == \"file\"):\n node_set.append(el)\n return node_set", "async def list_examples():\n return sorted(\n [p.name for p in Path(\"./data\").iterdir()\n if p.joinpath(\"info.json\").exists()]\n )", "def getFiles(self) -> List[ghidra.framework.model.DomainFile]:\n ...", "def listCalibrationFiles():\n toReturn = {}\n for file in os.listdir(calibrationFilesRoot):\n if(file.endswith(\".calib.txt\")):\n print(file)\n c = Calibrator()\n c.load_from(calibrationFilesRoot+file)\n toReturn[file.replace(\".calib.txt\",\"\")] = c.get_title()\n return toReturn" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get list of .ova from self.args. Return list.
def _prepare_ovas(self): ovalist = [] for name in self.args.NAME: if name.endswith('.ova'): ovalist.append(name) elif os.path.isdir(name): ovalist.extend(self._ova_from_dir(name)) else: print("%s doesn't looks like directory or OVA" % name, file=stderr) return ovalist
[ "def all_args(self) -> List[Namespace]:\n return (\n ([self.peas_args['head']] if self.peas_args['head'] else [])\n + ([self.peas_args['tail']] if self.peas_args['tail'] else [])\n + self.peas_args['peas']\n )", "def argv(self):\n optlist = []\n for n in range(self.count):\n optlist.append(self.flag)\n if self.values is not None:\n optlist.append(self.values[n])\n return optlist", "def getargs( cls ):\n members = inspect.getmembers( cls )\n args = filter( lambda member: isinstance( member[ 1 ], Argument ), members )\n args.sort( key = lambda arg: arg[ 1 ]._creation_order )\n return args", "def _ova_from_dir(directory):\n res = []\n for file in os.listdir(directory):\n if file.endswith('.ova'):\n res.append(os.path.join(directory, file))\n return res", "def args():\n return []", "def args(self):\n result = []\n for key in self.conf.keys():\n result.append('--' + str(key))\n for item in self.conf[key]:\n result.append(str(item))\n return result", "def args_to_list(self):\n arg_list = [self.name]\n for arg_name, arg_value in self.args.items():\n if arg_value is None:\n arg_list.append(arg_name)\n else:\n arg_list.append(arg_name)\n arg_list.append(arg_value)\n return arg_list", "def vocabs(self) -> Tuple[ControlledVocab]:\n return tuple(self.__vocabs)", "def get_args(self):\n return self.get_body().split(\";\")", "def args(self):\n if self.ready():\n return (self._result['args'], self._result['kwargs'])\n raise AttributeError", "def get_args(self) -> argparse.ArgumentParser:\n\n return self.args", "def checked_aovs(self):\n return [aov for checked, aov in zip(self._checked, self.aovs) if checked]", "def obtener_argumentos(self):\n # Core argspec\n nombres_de_arg, spec_dic = self.argspec(self.cuerpo)\n # Obtenga una lista de argumentos + sus valores predeterminados \n # (si los hay) en el orden de declaración/definición (es decir, según\n # getargspec() )\n tuplas = [(x, spec_dic[x]) for x in nombres_de_arg]\n # Prepara la lista de todos los nombres ya-tomados (principalmente\n # para ayudar a elegir las banderas cortas automáticas)\n nombres_tomados = {x[0] for x in tuplas}\n # Crear lista de argumentos (arg_opts se encargará de configurar\n # nombres cortos, etc.)\n args = []\n for nombre, default in tuplas:\n nuevo_arg = Argumento(**self.arg_opts(nombre, default, nombres_tomados))\n args.append(nuevo_arg)\n # Actualizar la lista de nombres_tomados con la lista completa de\n # nombres del nuevo argumento(s) (que puede incluir nuevas \n # banderas cortas) para que la creación posterior de Argumento sepa\n # qué se tomó.\n nombres_tomados.update(set(nuevo_arg.nombres))\n # Ahora necesitamos asegurarnos de que los posicionales terminen al \n # principio de la lista, en el orden dado en self.positional, de modo\n # que cuando Contexto los consuma, este orden se conserve.\n for posarg in reversed(self.posicional):\n for i, arg in enumerate(args):\n if arg.nombre == posarg:\n args.insert(0, args.pop(i))\n break\n return args", "def list_results(self, *args):\n getters = [func for func in args if callable(func)]\n results = [bbox_result for img_result in self.overlaps.values() for bbox_result in img_result]\n return [list(map(func, results)) for func in getters]", "def __str__(self):\n return \", \".join(a for a in self.args)", "def adverbs(self):\n return self._adverbs", "def args(self) -> Tuple[Any, ...]:\n args: List = list()\n argsappend = args.append\n argsextend = args.extend\n paramsget = self.parameters.__getitem__\n argumentsget = self.arguments.__getitem__\n for name in self._argnames:\n kind = paramsget(name).kind\n arg = argumentsget(name)\n if kind == VAR_POSITIONAL:\n argsextend(arg)\n else:\n argsappend(arg)\n return tuple(args)", "def get_arg_vals(self):\n \n return self.arg_vals", "def get_arguments(self):\n self.__validate_clause()\n return map(lambda item: Entity(item), self.__item[PARAMS:])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get maximum speed of vehicle (superclass)
def speed_max(self): return self._speed_max
[ "def max_speed(self):\n raise NotImplementedError", "def max_speed(self, value):\n\n pass", "def getMaxSpeed(self):\n return getHandle().maxSpeed", "def max_speed(self):\n out = self.__fcobj._execute_transceiver_cmd()\n if self.__swobj.is_connection_type_ssh():\n shintd = ShowInterfaceTransceiverDetail(out)\n return int(shintd.max_speed)\n supported_speed = get_key(interfacekeys.SUPP_SPEED, self._SW_VER)\n supp_speed = out.get(supported_speed, None)\n if supp_speed is not None:\n pat = \"Min speed: (\\d+) Mb/s, Max speed: (\\d+) Mb/s\"\n match = re.match(pat, supp_speed)\n if match:\n return int(match.group(2))\n return None", "def max_velocity(self):\n return 10 * self.velocity_scale", "def max_turn_speed(self, value):\n\n pass", "def show_speed(self):\n if self.__max_speed and self.speed > self.__max_speed:\n raise TooFast('The car is going too fast. ' +\n f\"Max speed for this vehicle is {self.__max_speed} km/h\")\n return self.speed", "def max_turn_speed(self):\n\n return self._max_turn_speed", "def get_speed(self):\n return float(self.send('speed?'))", "def migrateGetMaxSpeed(self, flags=0):\n ret = libvirtmod.virDomainMigrateGetMaxSpeed(self._o, flags)\n if ret == -1: raise libvirtError ('virDomainMigrateGetMaxSpeed() failed', dom=self)\n return ret", "def _get_maximumValue(self) -> \"double\" :\n return _core.FloatSpinnerCommandInput__get_maximumValue(self)", "def getSpeedTarget(self):\n return self.__speed_target", "def way_speed(way):\n return way['tags'].get('maxspeed_mph',DEFAULT_SPEED_LIMIT_MPH[way['tags']['highway']])", "def _get_maximumValue(self) -> \"double\" :\n return _core.AngleValueCommandInput__get_maximumValue(self)", "def get_t_half_max(self):\n return self.mass_to_half_life(self._mass_min)", "def speedMultiplier(self) -> float:\n return self._getMultiplier('speed')", "def get_max_throughput(self):\n # type: () -> float\n max_throughput = c_double()\n err = lib.ulAOGetInfoDbl(self.__handle, AoInfoItemDbl.MAX_THROUGHPUT, 0,\n byref(max_throughput))\n if err != 0:\n raise ULException(err)\n return max_throughput.value", "def get_speed(self):\n velocity = self.get_linear_velocity()\n speed = np.linalg.norm(velocity)\n return speed", "def get_target_speed(self):\n unsigned = self._get_variable(VAR_ID.TARGET_SPEED)\n if (unsigned > 3200):\n signed = unsigned - 2**16\n else:\n signed = unsigned\n return signed", "def get_max_volume(self) -> float:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get maximum acceleration of vehicle (superclass)
def accel_max(self): return self._accel_max
[ "def max_velocity(self):\n return 10 * self.velocity_scale", "def getAccelerationMax(self, index):\r\n accelMax = c_double()\r\n \r\n try:\r\n result = PhidgetLibrary.getDll().CPhidgetMotorControl_getAccelerationMax(self.handle, c_int(index), byref(accelMax))\r\n except RuntimeError:\r\n raise\r\n \r\n if result > 0:\r\n raise PhidgetException(result)\r\n else:\r\n return accelMax.value", "def max_speed(self):\n raise NotImplementedError", "def max_velocity_acceleration(_sign):\n\n velocity, acceleration = sign_velocity_acceleration(_sign)\n max_velocity = np.amax(abs(velocity))\n max_acceleration = np.amax(abs(acceleration))\n arg_max_vel = np.argmax(np.amax(abs(velocity), axis=1))\n arg_max_acc = np.argmax(np.amax(abs(acceleration), axis=1))\n\n return max_velocity, max_acceleration, arg_max_vel, arg_max_acc", "def _get_maximumValue(self) -> \"double\" :\n return _core.AngleValueCommandInput__get_maximumValue(self)", "def _get_maximumValue(self) -> \"double\" :\n return _core.FloatSpinnerCommandInput__get_maximumValue(self)", "def getMaxSpeed(self):\n return getHandle().maxSpeed", "def get_max_volume(self) -> float:", "def max_speed(self, value):\n\n pass", "def maxairtemperature(self):\n return self._maxairtemperature", "def accel(self):\n return self.force()/self.mass", "def max_temp(self):\n # pylint: disable=no-member\n if self._max_temp:\n return self._max_temp\n else:\n # Get default temp from super class\n return ClimateDevice.max_temp.fget(self)", "def acceleration(self):\n if self.state.lightning:\n return self.character.acceleration / 2\n else:\n return self.character.acceleration", "def getPotentialMax(self):\r\n potentialMax = c_double()\r\n \r\n try:\r\n result = PhidgetLibrary.getDll().CPhidgetPHSensor_getPotentialMax(self.handle, byref(potentialMax))\r\n except RuntimeError:\r\n raise\r\n \r\n if result > 0:\r\n raise PhidgetException(result)\r\n else:\r\n return potentialMax.value", "def getAccel(self, thrust, mass):\n accel = thrust*1000000.0/mass\n if accel > anwp.func.globals.maxDroneAccel:\n accel = anwp.func.globals.maxDroneAccel\n return accel", "def GetMaxPoint(self):\n ...", "def angular_velocity(self):\n return 0.0", "def max_voltage(self):\n return max([p.voltage for p in self.voltage_pairs])", "def get_motor_velocity_limits(self):\n max_velocity = c_double()\n max_acceleration = c_double()\n self.sdk.SCC_GetMotorVelocityLimits(self._serial, byref(max_velocity), byref(max_acceleration))\n return max_velocity.value, max_acceleration.value", "def _calc_max(self):\n return np.max(self.get_points()) + 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Vehicle maximum path curvature
def curvature_max(self): return 1.0 / self.radius_min
[ "def max_curvature(P0, P1, P2, P3):\n t = np.linspace(0, 1, 300)\n curv = curvature_bezier(P0, P1, P2, P3)(t)\n max_curv = np.max(np.abs(curv.flatten()))\n return max_curv", "def func_curvature(self):\r\n return u.Curvature.CONCAVE", "def func_curvature(self):\r\n return u.Curvature.CONVEX", "def max_curvature(P, T):\n\n H = mean_curvature(P, T)\n K = gaussian_curvature(P, T)\n H2K = torch.pow(H, 2)-K\n return H+torch.sqrt(torch.where(H2K > 0, H2K, torch.zeros_like(H)))", "def _curvature_penalty(P0, P1, P2, P3):\n t = np.linspace(0, 1, 300)\n\n curv = np.abs(curvature_bezier(P0, P1, P2, P3)(t).flatten())\n max_curv = np.max(curv)\n curv_initial = curv[0]\n curv_final = curv[-1]\n\n # this will cause the minimum curvature to be about 4 times lower\n # than at the origin and end points.\n penalty = max_curv + 2 * (curv_initial + curv_final)\n return penalty", "def curvature(self):\r\n return str(self._dcp_attr.curvature)", "def __calculate_curvature(self):\n y_eval_left = np.max(self.leftLine.ally)\n y_eval_right = np.max(self.rightLine.ally)\n # allx for right and left lines should be averaged (best coeffs)\n left_fit_cr = np.polyfit(self.leftLine.ally * ym_per_pix, self.leftLine.allx * xm_per_pix, 2)\n right_fit_cr = np.polyfit(self.rightLine.ally * ym_per_pix, self.rightLine.allx * xm_per_pix, 2)\n # Calculate the new radii of curvature\n left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval_left * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) \\\n / np.absolute(2 * left_fit_cr[0])\n right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval_right * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) \\\n / np.absolute(2 * right_fit_cr[0])\n\n return left_curverad, right_curverad", "def calc_curvature_of_polyline(polyline: np.ndarray) -> float:\n dx_dt = np.gradient(polyline[:, 0])\n dy_dt = np.gradient(polyline[:, 1])\n d2x_dt2 = np.gradient(dx_dt)\n d2y_dt2 = np.gradient(dy_dt)\n curvatureArray = np.abs(d2x_dt2 * dy_dt - dx_dt * d2y_dt2) / (dx_dt * dx_dt + dy_dt * dy_dt) ** 1.5\n curvature = 0\n for elem in curvatureArray:\n curvature = curvature + abs(elem)\n return curvature", "def calc_curvature(self, windows):\n\n x, y = zip(*[window.pos_xy() for window in windows])\n x = np.array(x)\n y = np.array(y)\n fit_cr = np.polyfit(y * self.camera.y_m_per_pix, x * self.camera.x_m_per_pix, 2)\n y_eval = np.max(y)\n return ((1 + (2 * fit_cr[0] * y_eval * self.camera.y_m_per_pix + fit_cr[1]) ** 2) ** 1.5) / np.absolute(\n 2 * fit_cr[0])", "def curvature(A, B, C):\n return (4*triangle_area(A, B, C)) / (node_dist(A, B)*node_dist(B, C)*node_dist(A, C))", "def curvature_bezier(P0, P1, P2, P3):\n b_prime = lambda t: 3 * (1 - t)**2 * (P1 - P0) + 6 * (1 - t) * \\\n t * (P2 - P1) + 3 * t**2 * (P3 - P2)\n b_second = lambda t: 6 * (1 - t) * (P2 - 2 * P1 + P0) + 6 * t * (P3 - 2 * P2 + P1)\n\n dx = lambda t: b_prime(t).x\n dy = lambda t: b_prime(t).y\n ddx = lambda t: b_second(t).x\n ddy = lambda t: b_second(t).y\n return lambda t: (dx(t) * ddy(t) - dy(t) * ddx(t)) / (dx(t) ** 2 + dy(t) ** 2) ** (3 / 2)", "def radius_of_curvature(self):\n _a, _c, _lat = (\n self._a.to_value(u.m),\n self._c.to_value(u.m),\n self._lat.to_value(u.rad),\n )\n return (\n radius_of_curvature_fast(_a, _c, _lat) * u.m\n ) # Need to convert units to u.rad and then take value because numpy expects angles in radians if unit is not given.", "def max_radius():\r\n return 20", "def _curvature(self, contour: np.ndarray,signed:bool=False) -> np.array:\n\n dx_dt = np.gradient(contour[:, 0])\n dy_dt = np.gradient(contour[:, 1])\n \n d2x_dt2 = np.gradient(dx_dt)\n d2y_dt2 = np.gradient(dy_dt)\n\n numerator = d2x_dt2 * dy_dt - dx_dt * d2y_dt2\n curvature = numerator if signed else np.abs(numerator)\n curvature /= (dx_dt**2 + dy_dt**2)**1.5\n\n assert len(contour) == len(curvature)\n return curvature", "def GetCurvature2(self, *args):\n return _FairCurve.FairCurve_MinimalVariation_GetCurvature2(self, *args)", "def __getMinCarDistance(self, collisionPoints, turnRadius, maxRange):\n minForwardRange = np.Inf\n minBackwardRange = -np.Inf\n\n # Find minimum forward and backward distance\n for point in collisionPoints:\n distance = point['carDistance']\n if distance > 0:\n if distance < minForwardRange:\n minForwardRange = distance\n else:\n if distance > minBackwardRange:\n minBackwardRange = distance\n\n if turnRadius == 0:\n # If driving path is a straight line\n if minForwardRange == np.Inf:\n # and no point was found in forward direction set to maxRange\n minForwardRange = maxRange\n if minBackwardRange == -np.Inf:\n # and no point was found in backward direction set to -maxRange\n minBackwardRange = -maxRange\n else:\n # If driving path is a curve\n if (abs(turnRadius) + self.halfCarWidth) * 2 > maxRange:\n # and driving full circle is not possible due to maxRange\n # Find the path distance until the outer edge of the car would touch the vision range circle\n # by intersections of the two circles\n outerTurnRadius = abs(turnRadius) + self.halfCarWidth\n outerTurnRadiusSqu = outerTurnRadius * outerTurnRadius\n turnRadiusSqu = turnRadius * turnRadius\n maxRangeSqu = maxRange * maxRange\n alpha = np.arccos((turnRadiusSqu + outerTurnRadiusSqu - maxRangeSqu)\n / (2*outerTurnRadius*abs(turnRadius)))\n turnArc = alpha * abs(turnRadius)\n\n # Use that path distance if no nearer collision point was found\n if turnArc < minForwardRange:\n minForwardRange = turnArc\n if -turnArc > minBackwardRange:\n minBackwardRange = -turnArc\n else:\n # and driving full circle could be possible due to maxRange\n if collisionPoints.size == 0:\n # and there are no collision points\n # Set the range to full circle\n minForwardRange = abs(turnRadius) * 2 * np.pi\n minBackwardRange = -abs(turnRadius) * 2 * np.pi\n else:\n # but driving full circle is not possible due to collision points\n if minForwardRange == np.Inf and not minBackwardRange == -np.Inf:\n # and there is at least one collision point in the lower half of the turn circle\n # but not in the upper half. Extend the minForwardRange to more than half the\n # turn circle arc length.\n minForwardRange = 2*np.pi*abs(turnRadius) + collisionPoints['carDistance'].min()\n elif not minForwardRange == np.Inf and minBackwardRange == -np.Inf:\n # and there is at least one collision point in the upper half of the turn circle\n # but not in the lower half. Extend the minBackwardRange to more than half the\n # turn circle arc length.\n minBackwardRange = -2*np.pi*abs(turnRadius) + collisionPoints['carDistance'].max()\n\n return minForwardRange, minBackwardRange", "def binary_radial_velocity(self, v_max):\n return (1 / (1 + self.eccentricity)) * v_max * np.sin(self.inclination) *\\\n (self.eccentricity * np.cos(self.orbit_rotation) +\n np.cos(2 * np.arctan((((1 + self.eccentricity) / (1 - self.eccentricity)) ** 0.5) *\n np.tan(0.5 * self.eccentric_anomaly)) + self.orbit_rotation))", "def calculate_curvature(list_metric):\n radi = 2\n num_metric = len(list_metric)\n min_pos = np.clip(\n np.argmin(list_metric), radi, num_metric - radi - 1)\n list1 = list_metric[min_pos - radi:min_pos + radi + 1]\n (afact1, _, _) = np.polyfit(np.arange(0, 2 * radi + 1), list1, 2)\n list2 = list_metric[min_pos - 1:min_pos + 2]\n (afact2, bfact2, _) = np.polyfit(\n np.arange(min_pos - 1, min_pos + 2), list2, 2)\n curvature = np.abs(afact1)\n if afact2 != 0.0:\n num = - bfact2 / (2 * afact2)\n if (num >= min_pos - 1) and (num <= min_pos + 1):\n min_pos = num\n return curvature, np.float32(min_pos)", "def computeMaxRadiusRatio(self, distance):\n max_angle = np.pi / 2.\n ab = distance # The length of the vector between the robot and the\n # farthest point of the farthest vector\n bc = self.model.circle_diameter # The length of the vector of a circle\n ac = geom.al_kashi(b=ab, c=bc, angle=max_angle) # The length of the vector between the robot and the closest\n # point of the farthest vector\n beta = geom.al_kashi(a=bc, b=ab, c=ac) # Angle of vision of the robot to the farthest vector\n de = bc # de and bc are the same vectors\n bd = self.model.length - de # bd is the length of the game board minus one vector\n ad = geom.al_kashi(b=ab, c=bd, angle=max_angle) # The length of the vector between the robot and the farthest\n # point of the closest vector\n be = self.model.length # The length of the game board\n ae = geom.al_kashi(b=ab, c=be, angle=max_angle) # The length of the vector between the robot and the\n # closest point of the closest vector\n alpha = geom.al_kashi(a=de, b=ad, c=ae) # Angle of vision of the robot to the closest vector\n return alpha / beta", "def getMinorRadius(self):\n return float(self.ptx[0,-1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates a new location which is in the middle of two points.
def get_middle_point(l1, l2): row = (l1.row + l2.row) / 2 column = (l1.column + l2.column) / 2 return Location(row, column)
[ "def getMiddlePoint(p1, p2):\n x1, y1 = p1\n x2, y2 = p2\n return (round(float((x1 + x2)/2), 2), round(float((y1 + y2)/2), 2))", "def mid(self, other):\n sx, sy = self.xy()\n ox, oy = other.xy()\n return Point((sx+ox)/2, (sy+oy)/2)", "def mid_point(loc1, loc2):\n geod = Geodesic.WGS84\n inv_line = geod.InverseLine(*(loc1 + loc2))\n distance_m = geod.Inverse(*(loc1 + loc2))[\"s12\"]\n loc = inv_line.Position(distance_m / 2, Geodesic.STANDARD | Geodesic.LONG_UNROLL)\n lat, lon = loc['lat2'], loc['lon2']\n return lat, lon", "def middle(self, other):\n return Point.from_xyz((self.X() + other.X()) / 2., (self.Y() + other.Y()) / 2., (self.Z() + other.Z()) / 2.)", "def midpoint(pnt_a, pnt_b):\n return Point.from_xyz((pnt_a.X() + pnt_b.X()) / 2., (pnt_a.Y() + pnt_b.Y()) / 2, (pnt_a.Z() + pnt_b.Z()) / 2)", "def mid_point(self, other):\n return Point((self.x + other.x) / 2, (self.y + other.y) / 2)", "def find_center_oval(x1, y1, x2, y2) -> tuple:\r\n return int((x1 + x2)/2), int((y1+y2)/2)", "def calculate_offset(location_1, location_2):\n row_offset = abs(location_1.row - location_2.row)\n column_offset = abs(location_1.column - location_2.column)\n return Location(row_offset, column_offset)", "def extend(point_a, point_b, L):\n\n xa, ya = point_a\n xb, yb = point_b\n u_vec = [xb - xa, yb - ya]\n u_vec /= np.linalg.norm(u_vec)\n\n xc = xa + L * u_vec[0]\n yc = ya + L * u_vec[1]\n return xc, yc", "def test_TwoPoint_CurrentHalfway(self):\n\t\tspeed = 10\n\n\t\tpointA = Point(Latitude = 0, Longitude = 0)\n\t\tpointB = Point(Latitude = 10, Longitude = 10)\n\n\t\t# We are between A and B\n\t\tcurrentPosition = Point(Latitude = 5, Longitude = 5)\n\n\t\tdistance = Distance_LatLongs(currentPosition.Latitude, currentPosition.Longitude, pointB.Latitude, pointB.Longitude)\n\n\t\texpected = distance / speed;\n\n\t\tself.predictor.SetDestination(pointB)\n\n\t\tpath = []\n\t\tpath.append(pointA)\n\t\tpath.append(pointB)\n\n\t\tself.predictor.SetPath(path)\n\n\t\t# MidPoint\n\t\tself.predictor.SetCurrentPosition(Point(Latitude = 5, Longitude = 5))\n\n\t\tactual = self.predictor.Modifier_Base(average_speed = speed)\n\n\t\tself.assertEqual(actual, expected)", "def halfway(self, target):\r\n x = 0.5 * (self.x + target.x)\r\n y = 0.5 * (self.y + target.y)\r\n return Point(x,y)", "def xy2center(self, x, y):\n x = x - 10.97 / 2\n y = y - 23.78 / 2\n return x, y", "def initial_position(lat,lon):\n\tstarting_lat = lat\n\tstarting_lon = lon % 360\n\treturn starting_lat, starting_lon", "def get_bearing(p1, p2):\r\n lat1, long1 = p1.lat, p1.long\r\n lat2, long2 = p2.lat, p2.long\r\n\r\n brng = Geodesic.WGS84.Inverse(lat1, long1, lat2, long2)['azi1']\r\n return brng", "def get_middle(left_pointer, right_pointer):\n return (left_pointer + right_pointer) // 2", "def centre_point(self):\n x = (self.pnta.x+self.pntb.x)/2\n y = (self.pnta.y+self.pntb.y)/2\n z = (self.pnta.z+self.pntb.z)/2\n return Point(x, y, z)", "def get_great_circle_from_two_points(long_1, lat_1, long_2, lat_2, ellipsoid='WGS84'):\n # first, find the angle\n geo = Geod(ellps=ellipsoid)\n fwd, back, dist = geo.inv(long_1, lat_1, long_2, lat_2, radians=False)\n coords = []\n for dist in float_range(0.0, 40075000.0, 10000.0):\n to_lon, to_lat, to_z = geo.fwd(long_1, lat_1, fwd, dist, radians=False)\n coords.append((to_lon, to_lat))\n return MultiPoint(coords)", "def CoordinateCalculator(CurrentLatitude,CurrentLongitude,TargetLatitude,TargetLongitude):\n \n r = EarthRadius #(m)\n Phi1 = CurrentLatitude * np.pi / 180 #(Rad)\n Lambda1 = CurrentLongitude * np.pi / 180 #(Rad)\n Phi2 = TargetLatitude * np.pi / 180 #(Rad)\n Lambda2 = TargetLongitude * np.pi / 180 #(Rad)\n \n if -180 <= Lambda2 - Lambda1 <= 180: Lambda12 = Lambda2 - Lambda1 #(Rad)\n if Lambda2 - Lambda1 > 180: Lambda12 = (Lambda2 - Lambda1) - 2 * np.pi #(Rad)\n if Lambda2 - Lambda1 < -180: Lambda12 = (Lambda2 - Lambda1) + 2 * np.pi #(Rad)\n \n Alpha1 = np.arctan2(np.array(np.sin(Lambda12)),np.array(np.cos(Phi1) * np.tan(Phi2) - np.sin(Phi1) * np.cos(Lambda12))) #(Rad)\n Alpha2 = np.arctan2(np.array(np.sin(Lambda12)),np.array(-np.cos(Phi2) * np.tan(Phi1) + np.sin(Phi2) * np.cos(Lambda12))) #(Rad)\n DeltaTheta12 = np.arccos((np.sin(Phi1) * np.sin(Phi2) + np.cos(Phi1) * np.cos(Phi2) * np.cos(Lambda12))) #(Rad)\n ArcLength = DeltaTheta12 * r #(m)\n Alphao = np.arcsin(np.sin(Alpha1) * np.cos(Phi1)) #(Rad)\n DeltaSigma01 = np.arctan2(np.array(np.tan(Phi1)),np.array(np.cos(Alpha1))) #(Rad)\n DeltaSigma02 = DeltaSigma01 + DeltaTheta12 #(Rad)\n Lambda01 = np.arctan2(np.array(np.sin(Alphao) * np.sin(DeltaSigma01)),np.array(np.cos(DeltaSigma01))) #(Rad)\n Lambdao = Lambda1 - Lambda01 #(Rad)\n LatList = []\n LatList1 = []\n LatList2 = []\n LatList3 = []\n LongList = []\n LongList1 = []\n LongList2 = []\n LongList3 = []\n for i in range(101):\n Sigma = DeltaSigma01 + (i * (DeltaSigma02 - DeltaSigma01))/100 #(Rad)\n Phi = (np.arcsin(np.cos(Alphao) * np.sin(Sigma)) * 180 / np.pi) #(Degrees)\n Lambda = (Lambdao + np.arctan2(np.array(np.sin(Alphao) * np.sin(Sigma)),np.array(np.cos(Sigma)))) * 180 / np.pi #(Degrees)\n if -180 <= Lambda <= 180:\n LongList1.append(Lambda) #(Degrees)\n LatList1.append(Phi) #(Degrees)\n if Lambda > 180:\n LongList2.append(Lambda - 360) #(Degrees)\n LatList2.append(Phi) #(Degrees)\n if Lambda < -180:\n LongList3.append(Lambda + 360) #(Degrees)\n LatList3.append(Phi) #(Degrees)\n\n im = plt.imread('EarthCordiants.jpg')\n im = plt.imshow(im, interpolation='bilinear', cmap=cm.gray, origin='lower', extent=[-180,180,-90,90])\n plt.xlabel('Longitude')\n plt.ylabel('Latitude')\n plt.title('Path of Rocket')\n plt.plot(LongList1, LatList1, 'r')\n plt.plot(LongList2, LatList2, 'r')\n plt.plot(LongList3, LatList3, 'r')\n plt.show()\n \n LatList.append(LatList1)\n LatList.append(LatList2)\n LatList.append(LatList3)\n LongList.append(LongList1)\n LongList.append(LongList2)\n LongList.append(LongList3)\n \n return LatList, LongList, Alpha1 * 180 / np.pi, ArcLength", "def proj_mindistance(pt1, pt2, p):\n # blog(x=x, pt1=pt1, pt2=pt2)\n v2 = pt2-pt1\n l = np.sum(v2**2) # compute the squared distance between the 2 vertices\n # blog(l=l, dot=np.dot(p-pt1, pt2-pt1)[0] )\n t = np.max([0., np.min([1., np.dot(p-pt1, pt2-pt1)[0]/l] ) ] ) # np.min([1., np.dot(p-pt1, pt2-pt1)[0]/l] )\n # blog(dot=np.dot(p-pt1, pt2-pt1), t=t)\n proj = pt1 + t*(pt2-pt1)\n return proj, np.sum((proj-p)**2) # project the point" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the offset between two points.
def calculate_offset(location_1, location_2): row_offset = abs(location_1.row - location_2.row) column_offset = abs(location_1.column - location_2.column) return Location(row_offset, column_offset)
[ "def y_distance(p1: sdl2.SDL_Point, p2: sdl2.SDL_Point) -> int:\n\n return p2.y - p1.y", "def calculate_offset_pos_two_side_one_point_locked(b_struct, v_key, pt_1, pt_2, v1, v2, d_o_1, d_o_2):\n\n pt_1_new = add_vectors(pt_1, scale_vector(v1, -1.*d_o_1))\n pt_2_new = add_vectors(pt_2, scale_vector(v2, -1.*d_o_2))\n\n vec_x_new = normalize_vector(vector_from_points(pt_1_new, pt_2_new))\n x_ax = b_struct.vertex[v_key][\"gripping_plane\"][1]\n\n if not angle_vectors(x_ax, vec_x_new, deg=True) < 90:\n vec_x_new = scale_vector(vec_x_new, -1.)\n\n # transform gripping plane\n pt_o = b_struct.vertex[v_key][\"gripping_plane\"][0]\n y_ax = b_struct.vertex[v_key][\"gripping_plane\"][2]\n vec_z = cross_vectors(vec_x_new, y_ax)\n l_n = (pt_1_new, pt_2_new)\n pt_o_new = closest_point_on_line(pt_o, l_n)\n\n return pt_o_new, vec_x_new, y_ax, vec_z", "def x_distance(p1: sdl2.SDL_Point, p2: sdl2.SDL_Point) -> int:\n\n return p2.x - p1.x", "def compute_distance(cls, point_1, point_2):\n return abs(point_1 - point_2)", "def calculate_offset_pos_two_side_two_point_locked(b_struct, v_key, vecs_con_1, vecs_con_2, pts_con_1, pts_con_2, d_o_1, d_o_2):\n assert len(vecs_con_1) == 2 and len(pts_con_1) == 2\n assert len(vecs_con_2) == 2 and len(pts_con_2) == 2\n\n map(normalize_vector, vecs_con_1)\n map(normalize_vector, vecs_con_2)\n v1_1, v1_2 = vecs_con_1\n v2_1, v2_2 = vecs_con_2\n pt_1_1, pt_1_2 = pts_con_1\n pt_2_1, pt_2_2 = pts_con_2\n\n vm_1 = scale_vector(normalize_vector(add_vectors(v1_1, v1_2)), -1.*d_o_1)\n # original contact point (assuming two bars have the same radius)\n pt_1 = centroid_points([pt_1_1, pt_1_2])\n pt_1_new = translate_points([pt_1], vm_1)[0]\n\n vm_2 = scale_vector(normalize_vector(add_vectors(v2_1, v2_2)), -1.*d_o_2)\n pt_2 = centroid_points([pt_2_1, pt_2_2])\n pt_2_new = translate_points([pt_2], vm_2)[0]\n\n vec_x_new = normalize_vector(vector_from_points(pt_1_new, pt_2_new))\n x_ax = b_struct.vertex[v_key][\"gripping_plane\"][1]\n\n if not angle_vectors(x_ax, vec_x_new, deg=True) < 90:\n vec_x_new = scale_vector(vec_x_new, -1.)\n\n pt_o = b_struct.vertex[v_key][\"gripping_plane\"][0]\n y_ax = b_struct.vertex[v_key][\"gripping_plane\"][2]\n vec_z = cross_vectors(vec_x_new, y_ax)\n l_n = (pt_1_new, pt_2_new)\n pt_o_n = closest_point_on_line(pt_o, l_n)\n\n return pt_o_n, vec_x_new, y_ax, vec_z", "def extend(point_a, point_b, L):\n\n xa, ya = point_a\n xb, yb = point_b\n u_vec = [xb - xa, yb - ya]\n u_vec /= np.linalg.norm(u_vec)\n\n xc = xa + L * u_vec[0]\n yc = ya + L * u_vec[1]\n return xc, yc", "def distance(p1: sdl2.SDL_Point, p2: sdl2.SDL_Point) -> float:\n\n distances = xy_distances(p1, p2)\n return math.sqrt(distances.x**2 + distances.y**2)", "def twoPtCenteredDiff(x,y):\n #calculate dydx by center differencing using array slices\n dydx = np.zeros(y.shape,float) #we know it will be this size\n dydx[1:-1] = (y[2:] - y[:-2])/(x[2:] - x[:-2]) #center difference\n dydx[0] = (y[1]-y[0])/(x[1]-x[0]) #forward difference\n dydx[-1] = (y[-1] - y[-2])/(x[-1] - x[-2]) #backward difference\n return dydx", "def perpbisect(pt1, pt2):\n return line.ptnorm((pt1+pt2)/2, pt1-pt2)", "def slope(p1,p2):\n return (p2[1] - p1[1])/(p2[0] - p1[0])", "def solve_offset(first_dict, second_dict, verbose=False): # unused\n assert 'center_ra' in first_dict, warnings.warn(\"center_ra required for first image solving offset.\")\n assert 'center_ra' in second_dict, warnings.warn(\"center_ra required for second image solving offset.\")\n assert 'pixel_scale' in first_dict, warnings.warn(\"pixel_scale required for solving offset.\")\n\n if verbose:\n print(\"Solving offset\")\n\n first_ra = float(first_dict['center_ra']) * u.deg\n first_dec = float(first_dict['center_dec']) * u.deg\n\n second_ra = float(second_dict['center_ra']) * u.deg\n second_dec = float(second_dict['center_dec']) * u.deg\n\n rotation = float(first_dict['rotation']) * u.deg\n\n pixel_scale = float(first_dict['pixel_scale']) * (u.arcsec / u.pixel)\n\n first_time = Time(first_dict['DATE-OBS'])\n second_time = Time(second_dict['DATE-OBS'])\n\n out = {}\n\n # The pixel scale for the camera on our unit is:\n out['pixel_scale'] = pixel_scale\n out['rotation'] = rotation\n\n # Time between offset\n delta_t = ((second_time - first_time).sec * u.second).to(u.minute)\n out['delta_t'] = delta_t\n\n # Offset in degrees\n delta_ra = second_ra - first_ra\n delta_dec = second_dec - first_dec\n\n out['delta_ra_deg'] = delta_ra\n out['delta_dec_deg'] = delta_dec\n\n # Offset in pixels\n delta_ra = delta_ra.to(u.arcsec) / pixel_scale\n delta_dec = delta_dec.to(u.arcsec) / pixel_scale\n\n out['delta_ra'] = delta_ra\n out['delta_dec'] = delta_dec\n\n # Out unit drifted this many pixels in a minute:\n ra_rate = (delta_ra / delta_t)\n out['ra_rate'] = ra_rate\n\n dec_rate = (delta_dec / delta_t)\n out['dec_rate'] = dec_rate\n\n # Standard sidereal rate\n sidereal_rate = (24 * u.hour).to(u.minute) / (360 * u.deg).to(u.arcsec)\n out['sidereal_rate'] = sidereal_rate\n\n # Sidereal rate with our pixel_scale\n sidereal_scale = 1 / (sidereal_rate * pixel_scale)\n out['sidereal_scale'] = sidereal_scale\n\n # Difference between our rate and standard\n sidereal_factor = ra_rate / sidereal_scale\n out['sidereal_factor'] = sidereal_factor\n\n # Number of arcseconds we moved\n ra_delta_as = pixel_scale * delta_ra\n out['ra_delta_as'] = ra_delta_as\n\n # How many milliseconds at sidereal we are off\n # (NOTE: This should be current rate, not necessarily sidearal)\n ra_ms_offset = (ra_delta_as * sidereal_rate).to(u.ms)\n out['ra_ms_offset'] = ra_ms_offset\n\n # Number of arcseconds we moved\n dec_delta_as = pixel_scale * delta_dec\n out['dec_delta_as'] = dec_delta_as\n\n # How many milliseconds at sidereal we are off\n # (NOTE: This should be current rate, not necessarily sidearal)\n dec_ms_offset = (dec_delta_as * sidereal_rate).to(u.ms)\n out['dec_ms_offset'] = dec_ms_offset\n\n return out", "def compare_coordinates(self, p1, p2):\n if float(p1[0]) == float(p2[0]):\n return float(p1[1]) - float(p2[1])\n else:\n return float(p1[0]) - float(p2[0])", "def minkowski_distance(point1, point2):\n p=3\n p_sum = 0\n for i in range(0,len(point1)):\n p_sum+=(abs(point1[i]-point2[i]))**p\n return p_sum**(1. /p)\n raise NotImplementedError", "def getMiddlePoint(p1, p2):\n x1, y1 = p1\n x2, y2 = p2\n return (round(float((x1 + x2)/2), 2), round(float((y1 + y2)/2), 2))", "def relative_pos(particleA,particleB):\n return np.subtract(particleA.position,particleB.position)", "def sum_points(a, b):\n return a[0] + b[0], a[1] + b[1]", "def xy_distance(x1, y1, x2, y2):\r\n return ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** (1 / 2)", "def distance_between_points(p1, p2):\n import math\n import math.sqrt((p2.x-p1.x)**2 + (p2.y - p1.y)**2)", "def calc_distance_2points(self,pos1,pos2,L):\n\n y =0;\n# print((pos1),pos2)\n for count in numpy.arange(len(pos1)):\n if abs(pos1[count]-pos2[count]) > float(L)/2:\n y = y + numpy.power(L -abs(pos1[count]-pos2[count]),2);\n else:\n y = y + numpy.power(pos1[count]-pos2[count],2);\n\n return (numpy.sqrt(y));" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search for the lists in the board. If they do not exists create them
def __get_and_create_lists(self, board, lists_names): lists = [] names = [x.lower() for x in lists_names] lists_names = list(lists_names) # make a copy # search for the lists for lst in board.list_lists(): name = lst.name.lower() if name in names: lists.append(lst) i = names.index(name) lists_names.pop(i) names.pop(i) # create the non existing lists for lst_name in lists_names: lst = board.add_list(lst_name) lists.append(lst) return lists
[ "def _fetch_lists(self):\n # List of the board\n self.lists = self.board.all_lists()\n\n # Compute list orders\n i = 1\n for list_ in self.lists:\n list_.order = i\n i += 1\n\n # List dict of the board used to avoid fetching list data more than once\n self.lists_dict = {list_.id: list_ for list_ in self.lists}\n self.lists_dict_by_name = {list_.name.decode(\"utf-8\"): list_ for list_ in self.lists}\n\n # Comparison function used to compute forward and backward movements\n # when computing card.stats_by_list\n def list_cmp(list_a_id, list_b_id):\n if self.lists_dict[list_b_id].order > self.lists_dict[list_a_id].order:\n return 1\n if self.lists_dict[list_b_id].order < self.lists_dict[list_a_id].order:\n return -1\n return 0\n\n self.list_cmp = list_cmp\n\n # Done list initialization\n self._init_done_list()\n\n # Cycle lists initialization\n self._init_cycle_lists()", "def update_check_movelists(self):\r\n # build check movelist from the white piece that is checking the black king\r\n movelist = []\r\n for i in range(Black.num_queens):\r\n if Black.checker == \"Q\" + str(i):\r\n movelist = BlackQueen.build_check_movelist().copy()\r\n for i in range(2):\r\n if Black.checker == \"B\" + str(i):\r\n movelist = BlackBishop.build_check_movelist().copy()\r\n elif Black.checker == \"R\" + str(i):\r\n movelist = BlackRook.build_check_movelist().copy()\r\n elif Black.checker == \"N\" + str(i):\r\n movelist = [(BlackKnight.row[i], BlackKnight.col[i])]\r\n for i in range(8):\r\n if Black.checker == \"P\" + str(i):\r\n movelist = [(BlackPawn.row[i], BlackPawn.col[i])]\r\n\r\n # now filter all white piece movelists with the new check movelist\r\n for i in range(White.num_queens):\r\n self.filter(\"Q\" + str(i), movelist)\r\n for i in range(2):\r\n self.filter(\"B\" + str(i), movelist)\r\n self.filter(\"R\" + str(i), movelist)\r\n self.filter(\"N\" + str(i), movelist)\r\n for i in range(8):\r\n self.filter(\"P\" + str(i), movelist)", "def _init_cycle_lists(self):\n\n development_list = self.lists_dict_by_name[self.configuration.development_list_name]\n\n self.cycle_lists = []\n self.cycle_lists_dict = {}\n\n # Assumes from the development list to the end list, they all play a role in development\n add_to_cycle_list = False\n for _list in self.lists:\n if _list.id == development_list.id:\n add_to_cycle_list = True\n if add_to_cycle_list:\n self.cycle_lists.append(_list)\n self.cycle_lists_dict[_list.id] = _list\n\n # If there is no cycle lists, assume the configuration is wrong\n if len(self.cycle_lists) <= 1:\n raise EnvironmentError(\n u\"Development list has not been configured for board {0}\".format(self.board_name))", "def __get_new_boards(self, boards: List[Board]) -> List[Board]:\n unique_board_ids = {[board.get_id() for board in boards]}\n\n # getting boards from Redis\n # the set in Redis is called something like 'news_agent:boards'\n # if there is no such element, or it is empty, it returns just an empty set, not None\n connection = get_redis_connection()\n redis_board_ids = connection.smembers(f'{self.__redis_name}:boards')\n\n # new boards ids\n new_board_ids = unique_board_ids - redis_board_ids\n\n # update Redis\n for board_url in new_board_ids:\n connection.sadd(f'{self.__redis_name}:boards', board_url)\n\n # filter new boards\n new_boards = [board for board in boards if board.get_id() in new_board_ids]\n\n return new_boards", "def create_list_tiles(self):\n for j in range(self.NUM_SQUARES):\n local_tiles = []\n for i in range(self.NUM_SQUARES):\n local_tiles.append(None)\n self.list_tiles.append(local_tiles)", "def _file_check_lists(self):\n\n\t\ttry:\n\t\t\twith open(self.filename) as f:\n\t\t\t\tself.lists = json.load(f)\n\t\texcept FileNotFoundError:\n\t\t\tself.lists = {\n\t\t\t\t'groceries' : [],\n\t\t\t\t'to-do' : [],\n\t\t\t\t'favourite movies' : [],\n\t\t\t}\n\t\t\twith open(self.filename, 'w') as f:\n\t\t\t\tjson.dump(self.lists, f)\n\t\t\tprint(\"We've created some lists for you to get started!\\n\")\n\t\telse:\n\t\t\twith open(self.filename) as f:\n\t\t\t\tself.lists = json.load(f)", "def filter(self, piece, pinned_movelist):\r\n new_movelist = []\r\n for i in range(White.num_queens):\r\n if piece == \"Q\" + str(i):\r\n for k in WhiteQueen.movelist[i]:\r\n if not (k not in pinned_movelist):\r\n new_movelist.append(k)\r\n WhiteQueen.movelist[i] = new_movelist.copy()\r\n for i in range(8):\r\n new_movelist.clear()\r\n if piece == \"P\" + str(i):\r\n for k in WhitePawn.movelist[i]:\r\n if not (k not in pinned_movelist):\r\n new_movelist.append(k)\r\n WhitePawn.movelist[i] = new_movelist.copy()\r\n for i in range(2):\r\n new_movelist.clear()\r\n if piece == \"R\" + str(i):\r\n for k in WhiteRook.movelist[i]:\r\n if not (k not in pinned_movelist):\r\n new_movelist.append(k)\r\n WhiteRook.movelist[i] = new_movelist.copy()\r\n elif piece == \"B\" + str(i):\r\n for k in WhiteBishop.movelist[i]:\r\n if not (k not in pinned_movelist):\r\n new_movelist.append(k)\r\n WhiteBishop.movelist[i] = new_movelist.copy()\r\n elif piece == \"N\" + str(i):\r\n for k in WhiteKnight.movelist[i]:\r\n if not (k not in pinned_movelist):\r\n new_movelist.append(k)\r\n WhiteKnight.movelist[i] = new_movelist.copy()", "def find_in_board(x, y, board):\n item_list = [] # temp list\n for item in board: # go through each object in level\n if x == item[1] and y == item[2]: #if object matches the x and y coordinates\n item_list.append(item) #add the item to the temp list.\n\n return item_list #returns a list with all objects on the specific tile.", "def getAllBoards(board):\n for row_i in len(board):\n for col_i in len(board[0]):\n if board[row_i][col_i] == '?':\n b1 = list(board)\n b1[row_i][col_i] = '.'\n b2 = list(board)\n b2[row_i][col_i] = \"O\"\n return [getAllBoards(b1)] + [getAllBoards(b2)]\n return [board]", "async def fill_blacklist(self):\n query = 'SELECT * FROM (SELECT guild_id AS snowflake_id, blacklisted FROM guild_config UNION ALL SELECT user_id AS snowflake_id, blacklisted FROM users_data) WHERE blacklisted=\"TRUE\"'\n cur = await self.db.execute(query)\n data = await cur.fetchall()\n self.blacklist = {r[0] for r in data} or set()", "def add_new_board(name):\n global BOARDS\n # if board name is already taken by another board, return None\n for board in BOARDS:\n if board.name == name:\n return None\n\n new_board = Board.new_board(name)\n BOARDS.append(new_board)\n print(BOARDS)\n return new_board", "def update_list(self, *args):\r\n search_term = self.search_var.get()\r\n self.file = open(\"Station List Data.csv\")\r\n self.reader = csv.reader(self.file)\r\n self.data = list(self.reader)\r\n\r\n station_lst = []\r\n for x in list(range(1, len(self.data))):\r\n station_lst.append(self.data[x][0]) # Appends data to list\r\n menu1 = station_lst\r\n\r\n self.menu1.delete(0, END)\r\n\r\n for item in menu1:\r\n if search_term.lower() in item.lower():\r\n self.menu1.insert(END, item)", "def add_ladders(board):\n if not board:\n msg = \"board can be empty\"\n raise LadderException(error_message=msg)\n for ladder in Ladders.LADDERS.value:\n cell = board[ladder[0]-1]\n cell.ladder_top = ladder[1]", "def fill_avail_moves(self, board):\r\n for x in (7,9):\r\n if (self.loc+(x*self.dir))//8 == (self.loc//8)+self.dir: #checks if adding 7 or 9 ends up on the correct row\r\n look_sq = board[self.loc + (self.dir*x)] #the square that's beeing looked at\r\n if look_sq.color ==' ': #if there's an open space\r\n self.color=self.color.upper()\r\n self.avail_moves[self.loc+(self.dir*x)] = False #adds move to dict False flag means no jump\r\n elif look_sq.dir == self.dir * -1: #if there's an opponnent's checker\r\n if (look_sq.loc+(x*self.dir))//8 == (look_sq.loc//8)+self.dir: #makes sure not looking off the board\r\n look_2 = self.loc+(self.dir*x*2)\r\n if look_2>=0 and look_2<64:\r\n if board[look_2].color == ' ':\r\n self.avail_moves[self.loc+(self.dir*x*2)]=True #adds move to dict. True flag means it's a jump\r\n self.color=self.color.upper()\r\n if not self.avail_moves: #make sure there isn't already an avail_move\r\n self.color=self.color.lower()", "def allocate_to_grid(self):\n for i in range(self.max_list[0]):\n for j in range(self.max_list[1]):\n self.search_grid[i, j] = []\n\n for cnt in self.particle_list:\n self.search_grid[cnt.list_num[0], cnt.list_num[1]].append(cnt)", "def placeStudents(list):\r\n buildings = createBuilding()\r\n\r\n for line in list:\r\n name, furniture = line.split()\r\n floors = buildings.get(name)\r\n rooms = floors.get(name)\r\n room = rooms.get(name)\r\n if room.AddtoRoom(name, furniture):\r\n print(\"student\", name, \"already present in\", buildings.hash_function(name),\"floor\", floors.hash_function(name)\r\n , \"in room\", rooms.hash_function(name), \". Added furniture\", furniture)\r\n # They were already in the room and their furniture was added\r\n else:\r\n print('Added student', name, 'with', furniture, 'to building', buildings.hash_function(name), \"floor\",\r\n floors.hash_function(name), \"in room\", rooms.hash_function(name))", "def compareMCPBoard(self,theList):\n sizeOfStored = len(self.storedPositions)\n sizeOfTheList = len(theList)\n changesToBoard = []\n returnedValue = []\n #If stored larger piece removed\n if sizeOfStored > sizeOfTheList:\n returnedValue.append('rem')\n for item in self.storedPositions:\n if item not in theList:\n changesToBoard.append(item)\n #If stored smaller piece added\n if sizeOfTheList > sizeOfStored:\n returnedValue.append('add')\n for item in theList:\n if item not in self.storedPositions:\n changesToBoard.append(item)\n #If the same size make sure positions have not changed\n if sizeOfTheList == sizeOfStored:\n for item in self.storedPositions:\n if item not in theList:\n changesToBoard.append(item)\n for item in theList:\n if item not in self.storedPositions:\n changesToBoard.append(item)\n if changesToBoard != []:\n #list size the same but we have changes - ERROR\n returnedValue.append('err')\n returnedValue.append('Looks like an error - McpBoard.py - list size same, but has movment')\n else:\n #lists the same zize and no movment\n returnedValue.append('nom')\n\n if changesToBoard != []:\n #Append the movment list\n returnedValue.append(changesToBoard)\n #Return list with status as first element and movment as second\n return returnedValue", "def build_clone_lists(input_dat):\r\n\r\n # Import JSON files that have the same name as dat_name + .json\r\n remove_string = ' \\((Parent-Clone|J64|ROM|Decrypted|Encrypted|BigEndian|ByteSwapped)\\)'\r\n if re.search(remove_string, input_dat.name) != None:\r\n dat_name = re.sub(remove_string, '', input_dat.name)\r\n else:\r\n dat_name = input_dat.name\r\n\r\n if 'GameCube' in dat_name and (\r\n 'NKit GCZ' in dat_name or\r\n 'NKit ISO' in dat_name or\r\n 'NASOS' in dat_name\r\n ):\r\n clone_file = './clonelists/Nintendo - GameCube.json'\r\n elif 'Wii U' in dat_name and 'WUX' in dat_name:\r\n clone_file = './clonelists/Nintendo - Wii U.json'\r\n elif 'Wii' in dat_name and (\r\n 'NKit GCZ' in dat_name or\r\n 'NKit ISO' in dat_name or\r\n 'NASOs' in dat_name\r\n ):\r\n clone_file = './clonelists/Nintendo - Wii.json'\r\n elif (\r\n 'PlayStation Portable' in dat_name\r\n and '(PSN)' not in dat_name\r\n and '(PSX2PSP)' not in dat_name\r\n and '(UMD Music)' not in dat_name\r\n and '(UMD Video)' not in dat_name):\r\n if 'no-intro' in input_dat.url:\r\n clone_file = './clonelists/Sony - PlayStation Portable (No-Intro).json'\r\n elif 'redump' in input_dat.url:\r\n clone_file = './clonelists/Sony - PlayStation Portable (Redump).json'\r\n else:\r\n clone_file = './clonelists/' + dat_name + '.json'\r\n if os.path.exists(clone_file) == True and os.path.isfile(clone_file) == True:\r\n try:\r\n with open(clone_file, 'r') as input_file_read:\r\n clonedata = json.load(input_file_read)\r\n\r\n except OSError as e:\r\n print(f'\\n{Font.error_bold}* Error: {Font.end}{str(e)}\\n')\r\n raise\r\n\r\n except ValueError as e:\r\n printwrap(f'\\n{Font.error_bold}* Error: \"{os.path.abspath(clone_file)}\"{Font.error} isn\\'t valid JSON. Exiting...{Font.end}', 'error')\r\n print('\\n')\r\n raise\r\n\r\n min_version = {}\r\n categories = {}\r\n overrides = {}\r\n renames = {}\r\n removes = {}\r\n\r\n if 'categories' in clonedata:\r\n categories = clonedata['categories']\r\n if 'overrides' in clonedata:\r\n overrides = clonedata['overrides']\r\n if 'renames' in clonedata:\r\n renames = clonedata['renames']\r\n if 'removes' in clonedata:\r\n removes = clonedata['removes']\r\n if 'description' in clonedata:\r\n if 'minimum version' in clonedata['description']:\r\n min_version = clonedata['description']['minimum version']\r\n\r\n\r\n return CloneList(\r\n min_version,\r\n categories,\r\n overrides,\r\n renames,\r\n removes,\r\n )", "def _create_storage_host_lists(self, storage_hosts):\n from nfv_vim import tables\n\n if SW_UPDATE_APPLY_TYPE.IGNORE != self._storage_apply_type:\n host_table = tables.tables_get_host_table()\n\n for host in storage_hosts:\n if HOST_PERSONALITY.STORAGE not in host.personality:\n DLOG.error(\"Host inventory personality storage mismatch \"\n \"detected for host %s.\" % host.name)\n reason = 'host inventory personality storage mismatch detected'\n return None, reason\n\n if 2 > host_table.total_by_personality(HOST_PERSONALITY.STORAGE):\n DLOG.warn(\"Not enough storage hosts to apply software updates.\")\n reason = 'not enough storage hosts to apply software updates'\n return None, reason\n\n host_lists = list()\n\n if SW_UPDATE_APPLY_TYPE.SERIAL == self._storage_apply_type:\n for host in storage_hosts:\n host_lists.append([host])\n\n elif SW_UPDATE_APPLY_TYPE.PARALLEL == self._storage_apply_type:\n policy = HOST_GROUP_POLICY.STORAGE_REPLICATION\n host_group_table = tables.tables_get_host_group_table()\n\n for host in storage_hosts:\n # find the first list that can add this host\n # else create a new list\n for host_list in host_lists:\n for peer_host in host_list:\n if host_group_table.same_group(policy, host.name,\n peer_host.name):\n break\n else:\n host_list.append(host)\n break\n else:\n host_lists.append([host])\n else:\n DLOG.verbose(\"Storage apply type set to ignore.\")\n\n return host_lists, ''" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find a subscription given a subscription_id. This does not return a result
def find(subscription_id): try: response = Http().get("/subscriptions/" + subscription_id) return Subscription(response["subscription"]) except NotFoundError: raise NotFoundError("subscription with id " + subscription_id + " not found")
[ "def get_subscription(self, id: UUID) -> Optional[Subscription]:\n subscription = select([subscriptions]).where(subscriptions.c.id == id).execute().first()\n return subscription", "def get_one(self, subscription_id):\n\n subscription = subscription_api.subscription_get(subscription_id)\n current_user = user_api.user_get(request.current_user_id)\n\n if subscription.user_id != request.current_user_id \\\n and not current_user.is_superuser:\n abort(403, _(\"You do not have access to this record.\"))\n\n return Subscription.from_db_model(subscription)", "def by_subscription_id(cls, subscription_id):\n return Filter('subscription', values=(subscription_id,), operator=Filter.OPERATOR['EQUAL'])", "def findSubscription(name):\n subscriptions = opencue.wrappers.subscription.Subscription()\n return subscriptions.find(name)", "def get_subscription(self, sid):\n with self.subscriptions_lock:\n return self.subscriptions.get(sid)", "def getSubscription(subscriber):", "def get_user_subscription(request):\n user_subscription_qs = Subscription.objects.filter(\n user_membership=get_user_membership(request))\n if user_subscription_qs.exists():\n user_subscription = user_subscription_qs.first()\n return user_subscription\n return None", "def get_subscription(self):\n url = self._get_link(\"subscription\")\n if url:\n from ..resources import CustomerSubscriptions\n\n customer = Customer({}, self.client)\n return CustomerSubscriptions(self.client, customer).from_url(url)", "def get_subscription_audit(self, id: UUID) -> Optional[SubscriptionAudit]:\n audit = select([subscription_audit]).where(subscription_audit.c.id == id).execute().first()\n return audit", "def get_subscriber(self, sid: str) -> Optional[EventSubscriber]:\n for subscriber in self._subscribers:\n if subscriber.uuid == sid:\n return subscriber\n return None", "def get_subscription_data(request, uuid):\n header = get_api_header()\n resp = r.get(_url_subscriptions(_base_url(request)),\n headers=header, params={'uuid': uuid},verify=False)\n return resp.text", "def query_subscription_by_name(subscription_name):\n logger.info(f'Attempting to fetch subscription by name: {subscription_name}')\n subscription_model = db.session.query(SubscriptionModel) \\\n .options(joinedload(SubscriptionModel.network_filter),\n joinedload(SubscriptionModel.measurement_groups),\n joinedload(SubscriptionModel.nfs)) \\\n .filter_by(subscription_name=subscription_name).first()\n db.session.remove()\n return subscription_model", "def getSubscription(self, name, remove=True, root=None):\n if root is not None:\n root = os.path.normpath(os.path.normcase(root))\n if root not in self.sub_by_root:\n return None\n if name not in self.sub_by_root[root]:\n return None\n sub = self.sub_by_root[root][name]\n if remove:\n del self.sub_by_root[root][name]\n # don't let this grow unbounded\n if name in self.subs:\n del self.subs[name]\n return sub\n\n if name not in self.subs:\n return None\n sub = self.subs[name]\n if remove:\n del self.subs[name]\n return sub", "def get_subaccount_by_id(self, subaccount_id: str) -> Optional['Account']:\n if self.guid == subaccount_id:\n return self\n for subaccount in self.children:\n subaccount_result: Optional[Account] = subaccount.get_subaccount_by_id(subaccount_id)\n if subaccount_result is not None:\n return subaccount_result\n return None", "def subscriptionId(self) -> str:\n return self.id", "def get_by_person_record_id(subdomain, person_record_id, limit=200):\n query = Subscription.all().filter('subdomain =', subdomain)\n query = query.filter('person_record_id =', person_record_id)\n return query.fetch(limit)", "def get(self, request, *args, **kwargs):\n subscriber_name = self.kwargs[\"subscriber_name\"]\n channel_name = self.kwargs[\"channel_name\"]\n subscription = ChannelSubscription.objects.filter(\n channel__name=channel_name, user__username=subscriber_name\n ).first()\n\n if not subscription:\n raise NotFound(\n \"User {} is not a subscriber of {}\".format(\n subscriber_name, channel_name\n )\n )\n return Response(SubscriberSerializer(subscription.user).data)", "def _subscribe(self, subscription, callback=None, num_retries=None):\n body = {'returnImmediately': False, 'maxMessages': 1}\n response = self._client.projects().subscriptions().pull(\n subscription=subscription, body=body).execute(\n num_retries=(self._num_retries if num_retries is None\n else num_retries))\n if (callback is not None and\n response and\n 'receivedMessages' in response and\n response['receivedMessages']):\n received_message = response['receivedMessages'][0]\n data = base64.b64decode(\n received_message['message']['data'].encode('utf-8'))\n attributes = {}\n if 'attributes' in received_message['message']:\n attributes = received_message['message']['attributes']\n callback(\n Message(data=data,\n attributes=attributes,\n message_id=received_message['message']['messageId'],\n publish_time=received_message['message']['publishTime'],\n ack_id=received_message['ackId'],\n subscription=subscription,\n ack_func=self.acknowledge,\n nack_func=self._nack))", "def get_subscription(self, include_unconfirmed=False):\n s = Subscription.objects.filter(user=self.user, status='active') \\\n .order_by('-id') \\\n .first()\n if s is not None or include_unconfirmed is False:\n return s\n s = Subscription.objects.filter(user=self.user, status='unconfirmed') \\\n .order_by('-id') \\\n .first()\n return s" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method calls the method in the CommentDAO responsible for retrieving all the comments from the database. The array of comments is then properly formatted and then converted into a json which is then returned.
def getAllComment(self): result = CommentDAO().getAllComment() mapped_result = self.buildMethod(result) return jsonify(Comment=mapped_result)
[ "def get_comment_list(self, response):\n comment_list = CommentList()\n contact_comments = response['contact_comments']\n for value in contact_comments:\n contact_comment = Comment() \n contact_comment.set_comment_id(value['comment_id'])\n contact_comment.set_contact_id(value['contact_id'])\n contact_comment.set_contact_name(value['contact_name'])\n contact_comment.set_description(value['description'])\n contact_comment.set_commented_by_id(value['commented_by_id'])\n contact_comment.set_commented_by(value['commented_by'])\n contact_comment.set_date(value['date'])\n contact_comment.set_date_description(value['date_description'])\n contact_comment.set_time(value['time'])\n contact_comment.set_transaction_id(value['transaction_id'])\n contact_comment.set_transaction_type(value['transaction_type'])\n contact_comment.set_is_entity_deleted(value['is_entity_deleted'])\n contact_comment.set_operation_type(value['operation_type'])\n comment_list.set_comments(contact_comment)\n page_context = response['page_context']\n page_context_object = PageContext()\n page_context_object.set_page(page_context['page'])\n page_context_object.set_per_page(page_context['per_page'])\n page_context_object.set_has_more_page(page_context['has_more_page'])\n page_context_object.set_applied_filter(page_context['applied_filter'])\n page_context_object.set_sort_column(page_context['sort_column'])\n page_context_object.set_sort_order(page_context['sort_order'])\n comment_list.set_page_context(page_context_object)\n \n return comment_list", "def comments(self):\r\n from .._impl.comments import Comment\r\n cs = []\r\n start = 1\r\n num = 100\r\n nextStart = 0\r\n url = \"%s/sharing/rest/content/items/%s/comments\" % (self._portal.url, self.id)\r\n while nextStart != -1:\r\n params = {\r\n \"f\" : \"json\",\r\n \"start\" : start,\r\n \"num\" : num\r\n }\r\n res = self._portal.con.post(url, params)\r\n for c in res['comments']:\r\n cs.append(Comment(url=\"%s/%s\" % (url, c['id']),\r\n item=self, initialize=True))\r\n start += num\r\n nextStart = res['nextStart']\r\n return cs", "def fetch_comments(self):\n new_comments = []\n try:\n comments_gen = self.reddit_obj.get_comments(self.subreddit)\n\n for comment in comments_gen:\n if comment.created_utc > self.end_time:\n continue\n if comment.created_utc < self.start_time:\n break\n new_comments.append({\n 'timestamp': int(comment.created_utc),\n 'message': comment.body,\n 'type': datacluster_pb2.RedditMessage.comment,\n 'subreddit': self.subreddit\n })\n except praw.errors.InvalidSubreddit:\n print \"Invalid Subreddit: no results\"\n return new_comments", "def __get_comments(self, root):\n comments_root = self.__expand_shadow_element_by_tag_name(root, 'mr-comment-list')\n\n list_of_comments = comments_root.find_elements_by_tag_name('mr-comment')\n print ('[*] %d comments' %len(list_of_comments))\n comments = []\n for c in list_of_comments:\n comment_root = self.__expand_shadow_element(c)\n comment_header = comment_root.find_element_by_css_selector('div>div').text.replace('\\n', ' ')\n \n m = re.match(self.comment_pattern, comment_header)\n blank_comment = { 'comment_id':'', 'comment_datetime':'', \n 'comment_author':'', 'comment_message': ' '} \n if m:\n comment_id = m.group(1).strip('\\n\\r ')\n if not 'Deleted' in comment_header:\n message_root = self.__expand_shadow_element_by_css_selector(comment_root, '.comment-body>mr-comment-content')\n lines = message_root.find_elements_by_css_selector('.line')\n\n comments.append({\n 'comment_id': comment_id,\n 'comment_datetime': m.group(4).strip('\\n\\r '),\n 'comment_author' : m.group(3).strip('\\n\\r '),\n 'comment_message': ' '.join([l.text.strip('\\n\\r ') for l in lines]) \n })\n else:\n blank_comment['comment_id'] = comment_id\n comments.append(blank_comment) \n else:\n comments.append(blank_comment) \n return comments", "def get_comments(self):\n comments = self.data().get('comments', {}).get('data', [])\n migration_key = FacebookPost.migration.get_value_for_datastore(self)\n return (FacebookComment(key_name_parts=(cmt['id'], migration_key.name()),\n json_data=json.dumps(cmt))\n for cmt in comments)", "def _retrieve_comments(self):\n url = self.message_url + 'comments'\n return self._request('GET', url, params=self.params)", "def GetComments(self):\n\t\tcomments = []\n\t\tfor submission in self.submissions:\n\t\t\tif self.expanded:\n\t\t\t\tsubmission.replace_more_comments()\n\t\t\t\tcommentobjs = praw.helpers.flatten_tree(submission.comments)\n\t\t\t\tcomments.extend([comment.body for comment in commmentobjs])\n\t\t\telse:\n\t\t\t\tsubmission.replace_more_comments(limit=0)\n\t\t\t\tcomments.extend([comment.body for comment in submission.comments if comment.is_root])\n\t\tself.comments = comments\n\t\tself.commentcount = len(comments)", "def load_comments():\n quantity = current_app.config['PAGE_MAX_COMMENTS']\n video_id = request.json[\"video_id\"]\n counter = request.json[\"counter\"]\n sort_by = request.json[\"sort_by\"]\n sort_direction = request.json[\"sort_direction\"]\n print(request.json)\n num_comments = Comment.query.filter_by(video_id=video_id).count()\n page = math.ceil((counter + 1) / quantity)\n\n sort_options = {\n \"upload_date\": Comment.created_at.desc() if sort_direction == \"desc\" else Comment.created_at.asc(),\n \"rating\": Comment.rating.desc() if sort_direction == \"desc\" else Comment.rating.asc()\n }\n\n if counter < num_comments:\n comments = Comment.query.filter_by(video_id=video_id).order_by(sort_options[sort_by]). \\\n paginate(page=page, per_page=quantity, error_out=False)\n\n res = comments.items\n print(res[0])\n res = (jsonify(list(map(lambda c: c.serialize(), res))))\n else:\n res = make_response(jsonify({}), 200)\n\n return res", "def list_incident_comments(request):\n incident_id = request.GET.get('incident_id')\n\n incident_comments = IncidentComment.objects.filter(incident_id=int(incident_id))\n\n data = {\n 'all_comments': serializers.serialize(\"json\", incident_comments),\n }\n\n return JsonResponse(data)", "def fetch_all_comments(question_id):\n\n if not isinstance(CommentModels().check_authorization(), int):\n\n return CommentModels().check_authorization()\n\n return make_response(jsonify(CommentModels().fetch_all_comments(question_id)), 200)", "def get_json(self):\n\t\treturn json.dumps(self.comment_data)", "def get_comments(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.CommentList(self._results, runtime=self._runtime)", "def comments(self, **kw):\n request = self.service.CommentsRequest(item_id=True, filtered=True, **kw)\n\n self.log('Getting comments matching the following options:')\n self.log_t(request.options, prefix=' - ')\n\n data = request.send()\n lines = self._render_events(data, **kw)\n print(*lines, sep='\\n')", "def test_sales_creditmemo_management_v1_get_comments_list_get(self):\n pass", "def get_comments(request,book): \n\n try:\n b = Book.objects.get(title=book)\n except Book.DoesNotExist:\n comment_json=\"{\\\"comments\\\"\"+\":\\\"None\\\"}\"\n return HttpResponse(comment_json, content_type='application/json')\n\n try:\n comments = Comment.objects.get(book=b)\n except Comment.DoesNotExist:\n comment_json=\"{\\\"comments\\\"\"+\":\\\"None\\\"}\"\n return HttpResponse(comment_json, content_type='application/json')\n except MultipleObjectsReturned:\n comments = Comment.objects.filter(book=b).filter(isFlagged=False)\n\n comments = Comment.objects.filter(book=b).filter(isFlagged=False)\n\n if request.method == 'GET':\n print(comments)\n comment_json=\"{\\\"comments\\\"\"+\":[\"\n for c in comments:\n comment_json += \"{\\\"comment\\\":\\\"\" + str(c.comment) + \"\\\"},\"\n\n comment_json = comment_json[:-1] + \"]}\"\n return HttpResponse(comment_json, content_type='application/json')", "def get(self, comment_id):\n\n db = get_db()\n if db.get_all_accounts() == []:\n response = jsonify([])\n else:\n log_in()\n if comment_id is None:\n response = jsonify(db.get_all_rows('comment'))\n\n return response\n else:\n comment = db.query_by_id('comment', comment_id)\n \n if comment is not None:\n response = jsonify(comment)\n else:\n raise RequestError(404, 'Comment ID not found')\n\n return response", "def testIssuesCommentsList_GetComments(self):\n\n self.services.project.TestAddProject(\n 'test-project', owner_ids=[2],\n project_id=12345)\n\n issue1 = fake.MakeTestIssue(\n project_id=12345, local_id=1, summary='test summary', status='New',\n issue_id=10001, owner_id=2, reporter_id=1)\n self.services.issue.TestAddIssue(issue1)\n\n comment = tracker_pb2.IssueComment(\n id=123, issue_id=10001,\n project_id=12345, user_id=2,\n content='this is a comment',\n timestamp=1437700000)\n self.services.issue.TestAddComment(comment, 1)\n\n resp = self.call_api('issues_comments_list', self.request).json_body\n self.assertEqual(2, resp['totalResults'])\n comment1 = resp['items'][0]\n comment2 = resp['items'][1]\n self.assertEqual('requester@example.com', comment1['author']['name'])\n self.assertEqual('test summary', comment1['content'])\n self.assertEqual('user@example.com', comment2['author']['name'])\n self.assertEqual('this is a comment', comment2['content'])", "def comments_to_dicts(self, comments):\n\t\tlist_of_dicts = [{ \"author\": c.author.name, \"body_html\":c.body_html, \n\t\t\"created_utc\":c.created_utc, \"permalink\":c.permalink(True) } for c in comments]\n\t\treturn list_of_dicts", "def format_comments(self, contents):\n comment_template, reply_template = self.format_templates()\n comments = []\n for i, comment in enumerate(contents):\n comment['num'] = i + 1\n comments.append(comment_template.format(**comment))\n for j, reply in enumerate(comment['replies']):\n reply['num'] = j + 1\n if 'content' not in reply.keys():\n reply['content'] = ''\n comments.append(reply_template.format(**reply))\n comments.append('\\n\\n')\n\n return comments" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method calls the method in CommentDAO responsible for getting a comments by a parameter id. If no comments with a matching id is found the method returns a json containing an error message. If a comment with a matching id is found then its properly formatted and returned as a json.
def getCommentByID(self, id): result = CommentDAO().getCommentById(id) mapped_result = [] if result is None: return jsonify(Error="NOT FOUND"), 404 else: mapped_result.append(self.build_comment(result)) return jsonify(Comment=mapped_result)
[ "def get(self, comment_id):\n\n db = get_db()\n if db.get_all_accounts() == []:\n response = jsonify([])\n else:\n log_in()\n if comment_id is None:\n response = jsonify(db.get_all_rows('comment'))\n\n return response\n else:\n comment = db.query_by_id('comment', comment_id)\n \n if comment is not None:\n response = jsonify(comment)\n else:\n raise RequestError(404, 'Comment ID not found')\n\n return response", "def load_comment(comment_id):\n #try:\n comment = models.load_comment(comment_id)\n if comment is None:\n return gen_missing(\"comment\")\n # TODO: Return author (username and avatar) too\n comment_dict = {\n 'comment_id': comment.comment_id,\n 'post_id': comment.post_id,\n 'user_id': comment.user_id,\n 'comment_content': comment.comment_content\n }\n return gen_response(status=resp.OK, data=comment_dict)\n #except:\n # return resp.RESP_SERVER", "async def resolve_comment(comment_id: int = Path(...), db: Session = Depends(get_db)) -> models.Comment:\n db_comment = crud.get_comment(db, comment_id=comment_id)\n if not db_comment:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Comment \\'{comment_id}\\' not found')\n\n return db_comment", "def get(self, request, slug, comment, id):\n comment_history = CommentHistory.objects.all().filter(\n id=id, commentId=comment).first()\n serializer = self.serializer_class(comment_history, many=False)\n if serializer.data.get(\"body\") == \"\":\n response = Response({\n \"error\": \"History comment selected does not exist\"\n }, status=status.HTTP_404_NOT_FOUND)\n else:\n response = Response({\n \"comment_history\": serializer.data\n }, status=status.HTTP_200_OK)\n return response", "def get_comment_by_id(self, asset_id: str, comment_id: int) -> dict:\n\n path = f\"{self.base_path}/{asset_id}/comments/{comment_id}\"\n\n return self._get(path)", "def scrap_comments(self, params):\n req = self._scrap(self.base_url + self.comments_url, params)\n if req is None:\n return None\n return self.extract_comments(params['id'], req.text)", "def get_comment(self, comment_id, activity_id=None, activity_author_id=None):\n # https://developers.google.com/+/api/latest/comments\n call = self.auth_entity.api().comments().get(commentId=comment_id)\n cmt = call.execute(self.auth_entity.http())\n return self.postprocess_comment(cmt)", "def patch(self, comment_id):\n\n db = get_db()\n if 'content' not in request.form:\n raise RequestError(422, 'Content must be provided')\n else:\n content = request.form['content']\n\n comment = db.query_by_id('comment', comment_id)\n\n if comment is not None:\n verify_account_by_id(comment['author_id'])\n comment = db.update_comment(comment_id, content)\n response = jsonify(comment)\n else:\n raise RequestError(404, 'Comment not found')\n\n return response", "def fetch_all_comments(question_id):\n\n if not isinstance(CommentModels().check_authorization(), int):\n\n return CommentModels().check_authorization()\n\n return make_response(jsonify(CommentModels().fetch_all_comments(question_id)), 200)", "def getCommentByDate(self, json):\n result = CommentDAO().getCommentByDate(json[\"comment_date\"])\n if result is None:\n return jsonify(Error=\"NOT FOUND\"), 404\n else:\n mapped_result = self.buildMethod(result)\n return jsonify(Comment=mapped_result)", "def getAllComment(self):\n result = CommentDAO().getAllComment()\n mapped_result = self.buildMethod(result)\n return jsonify(Comment=mapped_result)", "async def getUserComments(self, userID: int):\n headers = {\"Authorization\": self.token}\n\n async with self.session.get(f'{Config.general_api}/profile/{userID}/comments', headers=headers) as resp:\n data = await _json_or_text(resp)\n status = Config.http_exceptions.get(resp.status)\n if status is not None:\n raise status(resp)\n return data", "def get_comments(request, user_id):\n comments = Comment.objects.filter(user__id=user_id)\n serializer = CommentSerializer(comments, many=True)\n return JsonResponse(serializer.data, safe=False)", "def get_comment(request):\n\tif request.method != 'GET' or is_fields_in_dict(request.GET, 'last_comment_update_time', 'room_pk'):\n\t\treturn HttpResponse(json.dumps({}), content_type='application/json')\n\n\troom = get_object_or_404(Room, pk=request.GET['roomPK'])\n\tlast_update_time = convert_to_localized_time(request.GET['last_comment_update_time'])\n\tnew_comments = get_new_objects_since(Comment.objects.filter(room=room), last_update_time)\n\tresponse_text = []\n\tfor comment in new_comments:\n\t\tparsed_comment = convert_comment_to_dict(comment)\n\t\tresponse_text.append(parsed_comment)\n\treturn HttpResponse(json.dumps(response_text), content_type='application/json')", "def get_img_comment(request):\n req = json.loads(request.body)\n img_id = req['pid']\n\n comments = []\n qs_comment = Comment.objects.filter(img_id=img_id)\n for c in qs_comment:\n c_dict = dict()\n c_dict['text'] = c.text\n c_dict['date'] = c.date\n qs_customer = Customer.objects.filter(id=c.cid)\n customer = qs_customer[0]\n c_dict['username'] = customer.username\n comments.append(c_dict)\n\n jstring = json.dumps(comments)\n return HttpResponse(jstring)", "def get_comments(conn, project_id):\n c = conn.cursor()\n sql = \"\"\"SELECT * FROM comments\n WHERE project_id=?;\"\"\"\n c.execute(sql, (project_id,))\n return c.fetchall()", "def get(self, request, slug, comment):\n comment_history = CommentHistory.objects.all().filter(\n commentId=comment\n )\n comment_hist = []\n if comment_history:\n for history in comment_history:\n serializer = self.serializer_class(history, many=False)\n comment_data = serializer.data\n comment_hist.append(comment_data)\n response = Response({\n \"comments_history\": comment_hist\n }, status=status.HTTP_200_OK)\n else:\n response = Response({\n \"message\": \"No history comments\",\n \"comment\": comment_hist\n }, status=status.HTTP_200_OK)\n return response", "def get_comments_for_match(match_id):\n match = _get_match_or_404(match_id)\n\n party_id = request.args.get('party_id')\n\n comments = comment_service.get_comments(\n match.id, party_id=party_id, include_hidden=True\n )\n\n comment_dtos = list(map(_comment_to_json, comments))\n\n return jsonify({\n 'comments': comment_dtos,\n })", "def get_comments_for_match(match_id):\n match = _get_match_or_404(match_id)\n\n comments = tourney_match_comment_service.get_comments(\n match.id, include_hidden=True\n )\n\n party_id = request.args.get('party_id')\n if party_id:\n user_ids = set(\n chain.from_iterable(map(_get_user_ids_for_comment, comments))\n )\n orga_ids = orga_team_service.select_orgas_for_party(user_ids, party_id)\n else:\n orga_ids = set()\n\n comment_dicts = [\n _comment_to_json(comment, orga_ids) for comment in comments\n ]\n\n return jsonify(\n {\n 'comments': comment_dicts,\n }\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method calls the method in CommentDAO responsible for getting a comments by a parameter date. If no comments with a matching date is found the method returns a json containing an error message. If a comment with a matching id is found then its properly formatted and returned as a json.
def getCommentByDate(self, json): result = CommentDAO().getCommentByDate(json["comment_date"]) if result is None: return jsonify(Error="NOT FOUND"), 404 else: mapped_result = self.buildMethod(result) return jsonify(Comment=mapped_result)
[ "def getCommentByID(self, id):\n result = CommentDAO().getCommentById(id)\n mapped_result = []\n if result is None:\n return jsonify(Error=\"NOT FOUND\"), 404\n else:\n mapped_result.append(self.build_comment(result))\n return jsonify(Comment=mapped_result)", "def get(self, comment_id):\n\n db = get_db()\n if db.get_all_accounts() == []:\n response = jsonify([])\n else:\n log_in()\n if comment_id is None:\n response = jsonify(db.get_all_rows('comment'))\n\n return response\n else:\n comment = db.query_by_id('comment', comment_id)\n \n if comment is not None:\n response = jsonify(comment)\n else:\n raise RequestError(404, 'Comment ID not found')\n\n return response", "def get(self, request, slug, comment, id):\n comment_history = CommentHistory.objects.all().filter(\n id=id, commentId=comment).first()\n serializer = self.serializer_class(comment_history, many=False)\n if serializer.data.get(\"body\") == \"\":\n response = Response({\n \"error\": \"History comment selected does not exist\"\n }, status=status.HTTP_404_NOT_FOUND)\n else:\n response = Response({\n \"comment_history\": serializer.data\n }, status=status.HTTP_200_OK)\n return response", "def get_comment(request):\n\tif request.method != 'GET' or is_fields_in_dict(request.GET, 'last_comment_update_time', 'room_pk'):\n\t\treturn HttpResponse(json.dumps({}), content_type='application/json')\n\n\troom = get_object_or_404(Room, pk=request.GET['roomPK'])\n\tlast_update_time = convert_to_localized_time(request.GET['last_comment_update_time'])\n\tnew_comments = get_new_objects_since(Comment.objects.filter(room=room), last_update_time)\n\tresponse_text = []\n\tfor comment in new_comments:\n\t\tparsed_comment = convert_comment_to_dict(comment)\n\t\tresponse_text.append(parsed_comment)\n\treturn HttpResponse(json.dumps(response_text), content_type='application/json')", "def get(self, request, slug, comment):\n comment_history = CommentHistory.objects.all().filter(\n commentId=comment\n )\n comment_hist = []\n if comment_history:\n for history in comment_history:\n serializer = self.serializer_class(history, many=False)\n comment_data = serializer.data\n comment_hist.append(comment_data)\n response = Response({\n \"comments_history\": comment_hist\n }, status=status.HTTP_200_OK)\n else:\n response = Response({\n \"message\": \"No history comments\",\n \"comment\": comment_hist\n }, status=status.HTTP_200_OK)\n return response", "def scrap_comments(self, params):\n req = self._scrap(self.base_url + self.comments_url, params)\n if req is None:\n return None\n return self.extract_comments(params['id'], req.text)", "def getAllComment(self):\n result = CommentDAO().getAllComment()\n mapped_result = self.buildMethod(result)\n return jsonify(Comment=mapped_result)", "def get_img_comment(request):\n req = json.loads(request.body)\n img_id = req['pid']\n\n comments = []\n qs_comment = Comment.objects.filter(img_id=img_id)\n for c in qs_comment:\n c_dict = dict()\n c_dict['text'] = c.text\n c_dict['date'] = c.date\n qs_customer = Customer.objects.filter(id=c.cid)\n customer = qs_customer[0]\n c_dict['username'] = customer.username\n comments.append(c_dict)\n\n jstring = json.dumps(comments)\n return HttpResponse(jstring)", "def get_comments(request,book): \n\n try:\n b = Book.objects.get(title=book)\n except Book.DoesNotExist:\n comment_json=\"{\\\"comments\\\"\"+\":\\\"None\\\"}\"\n return HttpResponse(comment_json, content_type='application/json')\n\n try:\n comments = Comment.objects.get(book=b)\n except Comment.DoesNotExist:\n comment_json=\"{\\\"comments\\\"\"+\":\\\"None\\\"}\"\n return HttpResponse(comment_json, content_type='application/json')\n except MultipleObjectsReturned:\n comments = Comment.objects.filter(book=b).filter(isFlagged=False)\n\n comments = Comment.objects.filter(book=b).filter(isFlagged=False)\n\n if request.method == 'GET':\n print(comments)\n comment_json=\"{\\\"comments\\\"\"+\":[\"\n for c in comments:\n comment_json += \"{\\\"comment\\\":\\\"\" + str(c.comment) + \"\\\"},\"\n\n comment_json = comment_json[:-1] + \"]}\"\n return HttpResponse(comment_json, content_type='application/json')", "def get_comments(request, user_id):\n comments = Comment.objects.filter(user__id=user_id)\n serializer = CommentSerializer(comments, many=True)\n return JsonResponse(serializer.data, safe=False)", "def _retrieve_comments(self):\n url = self.message_url + 'comments'\n return self._request('GET', url, params=self.params)", "def query_data(date=None):\n if date:\n result = Data.query.get(date)\n if result is None:\n return jsonify({\"Error\": \"No data in given date!\"}), 404\n\n data = {\n 'date': result.date,\n 'cases': result.cases,\n 'death': result.death\n }\n\n return jsonify(data), 200\n else:\n result = Data.query.all()\n data = []\n\n for r in result:\n new_record = {\n 'date': r.date,\n 'cases': r.cases,\n 'death': r.death\n }\n data.append(new_record)\n\n if len(data) == 0:\n return jsonify({\"Error\": \"Data not found!\"}), 404\n else:\n return jsonify(data), 200", "def get_comments_for_match(match_id):\n match = _get_match_or_404(match_id)\n\n party_id = request.args.get('party_id')\n\n comments = comment_service.get_comments(\n match.id, party_id=party_id, include_hidden=True\n )\n\n comment_dtos = list(map(_comment_to_json, comments))\n\n return jsonify({\n 'comments': comment_dtos,\n })", "def fetch_all_comments(question_id):\n\n if not isinstance(CommentModels().check_authorization(), int):\n\n return CommentModels().check_authorization()\n\n return make_response(jsonify(CommentModels().fetch_all_comments(question_id)), 200)", "async def getUserComments(self, userID: int):\n headers = {\"Authorization\": self.token}\n\n async with self.session.get(f'{Config.general_api}/profile/{userID}/comments', headers=headers) as resp:\n data = await _json_or_text(resp)\n status = Config.http_exceptions.get(resp.status)\n if status is not None:\n raise status(resp)\n return data", "def get_comments_for_match(match_id):\n match = _get_match_or_404(match_id)\n\n comments = tourney_match_comment_service.get_comments(\n match.id, include_hidden=True\n )\n\n party_id = request.args.get('party_id')\n if party_id:\n user_ids = set(\n chain.from_iterable(map(_get_user_ids_for_comment, comments))\n )\n orga_ids = orga_team_service.select_orgas_for_party(user_ids, party_id)\n else:\n orga_ids = set()\n\n comment_dicts = [\n _comment_to_json(comment, orga_ids) for comment in comments\n ]\n\n return jsonify(\n {\n 'comments': comment_dicts,\n }\n )", "def _getBlogComment(self,parent_list):\n try:\n comment_iden = self.current_comment.get('id')\n if not checkSessionInfo(self.genre, self.session_info_out,\n comment_iden, self.task.instance_data.get('update'),\n parent_list=parent_list):\n page={}\n try:\n page['et_author_name']=self.current_comment.find('div',attrs={'class':'commentTxt'}).strong.renderContents()\n except:\n log.info(self.log_msg(\"Could not fetch comment author name\"))\n try:\n page['data']= ' '.join(stripHtml(each_para.renderContents().strip()) for each_para in self.current_comment.find('div',attrs={'class':'commentTxt'}).findAll('p')[1:]) \n page['title']=str(page['data'])[:50]\n except:\n page['data']=''\n page['title']=''\n log.info(self.log_msg(\"Blog data not found\"))\n comment_hash = md5.md5(''.join(sorted(map(lambda x: str(x) if isinstance(x,(int,float)) else x , \\\n page.values()))).encode('utf-8','ignore')).hexdigest()\n result=updateSessionInfo(self.genre, self.session_info_out, comment_iden, comment_hash,\n 'Comment', self.task.instance_data.get('update'),\n parent_list=parent_list)\n if result['updated']:\n try:\n page['posted_date']= datetime.strftime(datetime.strptime(self.current_comment.find('a',attrs={'href':re.compile('^#comment-\\d+$')}).renderContents(),\"%b %d, %Y\"),\"%Y-%m-%dT%H:%M:%SZ\")\n except:\n page['posted_date']=datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\")\n log.info(self.log_msg(\"Exception occured while fetching post date from blog\"))\n\n page['parent_path']=copy.copy(parent_list)\n parent_list.append(comment_iden)\n page['path']=parent_list\n page['versioned']=self.task.instance_data.get('versioned',False)\n page['category']=self.task.instance_data.get('category','generic')\n page['client_name']=self.task.client_name\n page['last_updated_time']= datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\") \n page['task_log_id']=self.task.id\n page['entity']='comment'\n page['uri'] = normalize(self.currenturi)\n page['uri_domain'] = urlparse(page['uri'])[1]\n page['priority']=self.task.priority\n page['level']=self.task.level\n page['pickup_date'] = datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\")\n page['connector_instance_log_id'] = self.task.connector_instance_log_id\n page['connector_instance_id'] = self.task.connector_instance_id\n page['workspace_id'] = self.task.workspace_id\n page['client_id'] = self.task.client_id # TODO: Get the client from the project \n self.new_comment_count = self.new_comment_count + 1\n self.pages.append(page)\n log.debug(self.log_msg(\"Appending comment %s\" %(comment_iden)))\n return True\n else:\n log.debug(self.log_msg(\"NOT appending comment %s has been fetched\" %(comment_iden)))\n return False\n else:\n log.debug(self.log_msg(\"NOT appending comment %s has been fetched\" %(comment_iden)))\n return False\n except:\n log.exception(self.log_msg(\"Exception occured while fetching comment %s\" %(comment_iden)))\n return False", "def get(self, doctor_id, date):\n if not doctorDAO.get_doctor_by_id(db.con_pool, doctor_id):\n abort(404, 'Doctor id not found')\n\n try:\n datetime.strptime(date, '%Y-%m-%d')\n except ValueError:\n abort(400, 'Invalid date')\n\n return make_response(jsonify(\n doctorDAO.get_doctor_diary_by_day(db.con_pool, doctor_id, date)), 200)", "def get_comment_by_id(self, asset_id: str, comment_id: int) -> dict:\n\n path = f\"{self.base_path}/{asset_id}/comments/{comment_id}\"\n\n return self._get(path)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
initialize a link with two nodes, a name, a protocol and a risk value
def __init__(self, node1, node2, name, protocol, risk = 5): self._n1 = node1 # private variable storing the first node which is of type Node self._n2 = node2 # private variable storing the second node which is of type Node self._name = name # private variable storing the name of the link self._proto = protocol # private variable storing the protocol of the link self._risk = risk # private varibale storing the risk value of the link self._additional = {} # private dictionary to store additional data
[ "def make_link(self, node0, node1):\r\n Link(node0, node1)", "def __init__(self, from_bath, to_bath, label):\n super(Link, self).__init__(label)\n\n # can only attach Link to Bath\n assert isinstance(from_bath, Bath)\n assert isinstance(to_bath, Bath)\n\n self.from_bath = from_bath\n self.to_bath = to_bath\n\n # updating the from_bath and to_bath attributes\n self.from_bath.link_out.append(self)\n self.to_bath.link_in.append(self)\n\n # empty noise dictionnary\n self.noise_flux = dict()", "def make_links(self, node0, node1):\r\n Link(node0, node1)\r\n Link(node1, node0)", "def __init__(self, link_model, kind_name):\n self.kind_name = kind_name\n self.link_model = link_model\n self.update()", "def _create_link_element(self, rNode, cNode):\n # sub procedure function\n # user mp constraint object\n # function to create ops rigid link command and store to variable\n\n link_str = 'ops.rigidLink(\"{linktype}\",{rNodetag},{cNodetag})\\n'.format(\n linktype=self.link_type, rNodetag=cNode, cNodetag=rNode\n )\n\n self.link_str_list.append(link_str)", "def __init__(self, list_nodes):\n\n self.starter_node = Node(list_nodes[0])\n current_node = self.starter_node\n for val in list_nodes[1:]:\n current_node.link = Node(val)\n current_node = current_node.link", "def __init__(self, tstamp, node1, node2):\n \n self.tstamp = tstamp\n self.name = None\n if node1 < node2:\n self.source = node1\n self.target = node2\n else:\n self.source = node2\n self.target = node1\n\n self.name = Edge.derive_name(node1, node2)", "async def create_link(self, link):\n used = link.pop('used', [])\n link_id = await self.create('core_chain', link)\n for uf in used:\n await self.dao.create('core_used', dict(link_id=link_id, fact_id=uf))", "def __init__(self, name, nodes={}, network=None):\n self.edges = {}\n self.network = network\n for node in nodes:\n WDNode.add_edge(self, node, nodes[node])\n self.name = name", "def link(self, lid1, lid2, linktype=0):\n self.store.add_edge(lid1, lid2, ltype=linktype)", "def _create_link(as1: UserAS, as2: UserAS, ixp: IXP) -> IXPLink:\n if1 = _create_peering_interface(as1, ixp)\n if2 = _create_peering_interface(as2, ixp)\n return IXPLink.objects.create(Link.PEER, if1, if2, ixp)", "def create_or_update(\n self,\n resource_group_name: str,\n network_security_perimeter_name: str,\n link_name: str,\n parameters: IO,\n *,\n content_type: str = \"application/json\",\n **kwargs: Any\n ) -> _models.NspLink:", "def __init__(self, node_id, location=None, power=None, node_parameters=None, protocol_manager=None):\n self.location = location\n self.id = str(node_id)\n self.power = power or 1", "def __init__(self, nodes, probability):\n self.nodes = nodes\n self.probability = probability\n super().__init__()", "def test_learningrule_attr(seed):\n def check_rule(rule, conn, rule_type):\n assert rule.connection is conn and rule.learning_rule_type is rule_type\n\n with nengo.Network(seed=seed):\n a, b, e = [nengo.Ensemble(10, 2) for i in range(3)]\n # nengo.Connection(e, b) # dummy error connection\n\n r1 = PES()\n c1 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r1)\n check_rule(c1.learning_rule, c1, r1)\n\n r2 = [PES(), BCM()]\n c2 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r2)\n assert isinstance(c2.learning_rule, list)\n for rule, rule_type in zip(c2.learning_rule, r2):\n check_rule(rule, c2, rule_type)\n\n r3 = dict(oja=Oja(), bcm=BCM())\n c3 = nengo.Connection(a.neurons, b.neurons, learning_rule_type=r3)\n assert isinstance(c3.learning_rule, dict)\n assert set(c3.learning_rule) == set(r3) # assert same keys\n for key in r3:\n check_rule(c3.learning_rule[key], c3, r3[key])", "def __init__(self):\n\n # Initialize the parent class\n super(LearningSwitch, self).__init__()\n\n # initialize the forwarding table to empty.\n # This may need to be updated if a different topology is used.\n self.fwd_table = {}\n self.fwd_table['1'] = {}\n self.fwd_table['2'] = {}\n self.fwd_table['3'] = {}\n self.fwd_table['4'] = {}\n self.fwd_table['5'] = {}\n\n # only use one flood instance - this is the default policy\n self.flood = flood()\n\n # get the first packet from each new MAC address on a switch\n new_pkts = packets(1, ['srcmac', 'switch'])\n new_pkts.register_callback(self.learn_route)\n self.query = new_pkts\n\n # Initialize the policy\n self.push_rules()", "def add_node(self, name, value=None):\n\n # **to do: create an empty node object, assign its attributes**\n # **hint 1: how is an empty network object created in datalab_practice.py?**\n # **hint 2: take a look Section 0.6 in python101.ipynb, particularly attribute assignment**\n # **hint 3: what values do the method arguments NAME and VALUE take when the add_node method\n # is called in datalab_practice.py?**\n # **hint 4: what does the input argument 'self' represent in this method?**\n\n # 1. WRITE PSEUDOCODE BELOW (optional, recommended)\n # **your pseudocode here**\n\n # 2. IMPLEMENT COMMANDS FOR YOUR PSEUDOCODE\n # ___\n node = Node()\n node.name = name\n node.value = value # replace this command\n\n # 3. THINK VERY CAREFULLY ABOUT WHAT THE NEXT COMMAND IS DOING\n # append node to the list of nodes\n self.nodes.append(node)", "def set_link(self, link):\r\n arg_str = p2e._base._util._convert_args_to_string(\"set.node.link\", self._node._eco_id, link._eco_id)\r\n p2e._app.Exec(arg_str)", "def link2node(self):\n self.link2nodeid = np.zeros((self.linknum, 2), dtype = int)\n \n for i in range(self.linknum):\n self.link2nodeid[i, 0] = self.internet1net2.edgelist[i][\"start node\"]\n self.link2nodeid[i, 1] = self.internet1net2.edgelist[i][\"end node\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return a tuple of the two nodes assigned to the link
def getNodes(self): return (self._n1, self._n2)
[ "def link2node(self):\n self.link2nodeid = np.zeros((self.linknum, 2), dtype = int)\n \n for i in range(self.linknum):\n self.link2nodeid[i, 0] = self.internet1net2.edgelist[i][\"start node\"]\n self.link2nodeid[i, 1] = self.internet1net2.edgelist[i][\"end node\"]", "def link2node(self):\n self.link2nodeid = np.zeros((self.linknum2, 2), dtype = int)\n \n for i in range(self.linknum2):\n self.link2nodeid[i, 0] = self.network2.edgelist[i][\"start node\"]\n self.link2nodeid[i, 1] = self.network2.edgelist[i][\"end node\"]", "def _read_nodes(self):\n nodes = set()\n for e in self.edges:\n nodes.add(e[1])\n nodes.add(e[2])\n return tuple(nodes)", "def _get_edge_repr(cls, node1, node2):\n\t\t\treturn tuple(sorted((node1, node2)))", "def nodes( self ):\n\t\tguard = self.guard\n\t\ttail = guard.r\n\t\thead = guard.l\n\t\treturn guard, tail, head", "def getEdges(self) -> tuple:\n return (self.edge1, self.edge2)", "def pickConnectedNodes(graph):\r\n node1 = random.choice(graph.keys())\r\n node2 = random.choice(graph[node1])\r\n \r\n return (node1, node2)", "def parse_link(self, link):\n # Split source and destination node descriptions\n source, dest = link.split(\"->\")\n\n # Parse the source and destination parameters\n source_node_name, source_plug_name, source_node, source_plug = \\\n self.parse_parameter(source)\n dest_node_name, dest_plug_name, dest_node, dest_plug = \\\n self.parse_parameter(dest)\n\n return (source_node_name, source_plug_name, source_node, source_plug,\n dest_node_name, dest_plug_name, dest_node, dest_plug)", "def node_link_incidence(self):\r\n\r\n # Rows = Nodes and Columns = Links\r\n network = []\r\n for node in self.N:\r\n node_list = []\r\n\r\n for link in self.A:\r\n if node == link[0]:\r\n node_list.append(1)\r\n elif node == link[1]:\r\n node_list.append(-1)\r\n else:\r\n node_list.append(0)\r\n\r\n network.append(node_list)\r\n \r\n return network", "def _node_types_and_ids(weights):\n assert weights.index.nlevels == 2\n weights = weights.reset_index()\n left_name, right_name = weights.columns[:2]\n\n left_type = left_name.split('_')[0]\n right_type = right_name.split('_')[0]\n\n left_nodes = weights[left_name]\n right_nodes = weights[right_name]\n\n return (left_type, left_nodes), (right_type, right_nodes)", "def getVisitableNodes(self):\n\n result = []\n result.extend(self.subnode_args)\n value = self.subnode_name\n if value is None:\n pass\n else:\n result.append(value)\n value = self.subnode_path\n if value is None:\n pass\n else:\n result.append(value)\n return tuple(result)", "def get_nvlink_pairs(topology):\n out = set()\n for device_idx1, item1 in enumerate(topology):\n for device_idx2, item2 in enumerate(item1):\n if is_nvlink(item2):\n if (device_idx2, device_idx1) not in out:\n out.add((device_idx1, device_idx2))\n return out", "def node_tuples(self):\n from pytools import \\\n generate_nonnegative_integer_tuples_summing_to_at_most\n return list(\n generate_nonnegative_integer_tuples_summing_to_at_most(\n self.N, self.dimensions))", "def node_to_tuple(self,node_num):\n row = (node_num-1) / self.cols\n col = (node_num-1) % self.cols\n return (row,col)", "def find_two_nonadjacent(graph, nodes):\n for x, y in combinations(nodes, 2):\n if not graph.are_connected(x, y):\n return x, y", "def _create_links_between_nodes(self, nodes):\n for node in nodes:\n node.left = self._get_left(node.row_id, node.column_id)\n node.right = self._get_right(node.row_id, node.column_id)\n\n # header node does not need up or down links\n if node.value != 'H':\n node.up = self._get_up(node.row_id, node.column_id)\n node.down = self._get_down(node.row_id, node.column_id)\n\n # create reference to column header\n if node.value == 1:\n node.column_header = self._get_column_header(node.column_id)\n node.column_header.size += 1", "def __link_nodes(self):\n def __link_north(node):\n if node.x is 0:\n return\n\n pos = (node.x - 1, node.y)\n step = 0\n\n while self.maze.array[pos[0]][pos[1]] is not 0:\n step = step + 1\n\n if str(pos[0]) + str(pos[1]) in self.graph:\n node.connect(self.graph[str(pos[0]) + str(pos[1])], step)\n break\n pos = (pos[0] - 1, pos[1])\n\n def __link_south(node):\n if node.x is self.maze.height - 1:\n return\n\n try:\n pos = (node.x + 1, node.y)\n step = 0\n\n while self.maze.array[pos[0]][pos[1]] is not 0:\n step = step + 1\n\n if str(pos[0]) + str(pos[1]) in self.graph:\n node.connect(self.graph[str(pos[0]) + str(pos[1])], step)\n break\n pos = (pos[0] + 1, pos[1])\n except IndexError:\n return\n\n def __link_east(node):\n pos = (node.x, node.y + 1)\n step = 0\n\n while self.maze.array[pos[0]][pos[1]] is not 0:\n step = step + 1\n\n if str(pos[0]) + str(pos[1]) in self.graph:\n node.connect(self.graph[str(pos[0]) + str(pos[1])], step)\n break\n pos = (pos[0], pos[1] + 1)\n\n def __link_west(node):\n pos = (node.x, node.y - 1)\n step = 0\n\n while self.maze.array[pos[0]][pos[1]] is not 0:\n step = step + 1\n\n if str(pos[0]) + str(pos[1]) in self.graph:\n node.connect(self.graph[str(pos[0]) + str(pos[1])], step)\n break\n pos = (pos[0], pos[1] - 1)\n\n for node in self.graph.values():\n __link_south(node)\n __link_north(node)\n __link_east(node)\n __link_west(node)", "def get_part_of_links(self):\n\n retval = set()\n\n for n1 in self._partOf_adjacency:\n for n2 in self._partOf_adjacency[n1]:\n retval.add((n1, n2))\n\n return retval", "def _link_nodes(first_id, second_id, rel_type, props_str):\n return \"\"\"\nMATCH (n1 {id: \"%s\"})\nMATCH (n2 {id: \"%s\"})\nWITH n1, n2\nMERGE ((n1)-[:%s %s]->(n2));\n\"\"\" % (first_id, second_id, rel_type, props_str)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set the protocol of the link to newProtocol
def setProtocol(self, newProtocol): self._proto = newProtocol
[ "def setProtocol( self, protocol ):\n\t\tself.protocol = protocol", "def set_protocol(self, protocol):\n self.protocol = protocol", "def setProtocol(self, protocol):\n self[SipViaHeader.PARAM_PROTOCOL] = protocol", "def protocol(ctx: Context, protocol_public_id):\n upgrade_item(ctx, \"protocol\", protocol_public_id)", "def do_change_protocol(self, args):\n lb = self.findlb(args.loadbalancer, readonly=False)\n lb.protocol = args.protocol\n lb.update()", "def updateProtocolSection(self):\n self.protocol = self.fileReadMappedSection('ProtocolSection',KEYS_PROTOCOL)", "def SetProtocol(self, protocol):\n protocol = protocol.lower().strip()\n if protocol not in [u'http', u'https']:\n raise ValueError(u'Invalid protocol specified for Viper lookup')\n self._analyzer.SetProtocol(protocol)", "def SetProtocol(self, protocol):\n protocol = protocol.lower().strip()\n if protocol not in self._SUPPORTED_PROTOCOLS:\n raise ValueError(u'Invalid protocol specified for Viper lookup')\n self._protocol = protocol", "def protocol_config(self, protocol_config):\n\n self._protocol_config = protocol_config", "def _set_protocol(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"protocol\", rest_name=\"protocol\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"protocol must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"protocol\", rest_name=\"protocol\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__protocol = t\n if hasattr(self, '_set'):\n self._set()", "def set_macrocycle_protocol(self, macrocycle_protocol):\n self.nmp = 2", "def protocol_identifier(self, protocol_identifier):\n\n self._protocol_identifier = protocol_identifier", "def _set_protocol(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=protocol.protocol, is_container='container', presence=False, yang_name=\"protocol\", rest_name=\"protocol\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Match route on protocol type and sub-type.', u'cli-compact-syntax': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"protocol must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=protocol.protocol, is_container='container', presence=False, yang_name=\"protocol\", rest_name=\"protocol\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Match route on protocol type and sub-type.', u'cli-compact-syntax': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__protocol = t\n if hasattr(self, '_set'):\n self._set()", "def set_protocol(url, use_tls):\n url = url.split(\"//\", 1)[-1]\n return PROTOCOLS[use_tls] + url", "def new_url(self, new_url):\n\n self._new_url = new_url", "def set_capability_link(self, link):\n if self._capability.get_link() is None:\n self._capability.set_link(link)", "def set_ssl_protocol(self, ssl_protocol):\n CheckValue.check_int_ge_zero(ssl_protocol, 'ssl_protocol')\n self._ssl_protocol = ssl_protocol\n return self", "def set_ovs_protocol(self):\n\t\tfor sw in setting.switches:\n\t\t\tcmd = \"sudo ovs-vsctl set bridge %s protocols=OpenFlow13\" % sw\n\t\t\tos.system(cmd)", "def onClicked_toolButton_load_protocol(self):\n protocol_path = self.openFileNameDialog_protocol()\n self.protocol = Protocol.load_protocol(protocol_path)\n\n self.write_protocol_to_task_table()\n self.write_protocol_to_cue_table()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return the risk value of the link
def getRisk(self): return self._risk
[ "def get_risk(self):\n return str(Decimal(str(self.gui.spn_risk.textFromValue(\n self.gui.spn_risk.value())))\n )", "def get_risk(self, inst):\r\n return self.risk.get_risk(inst)", "def risk(self, dataset, individual_record):\n number_of_visits = len(individual_record.visits)\n instance = (individual_record.visits[0], individual_record.visits[1])\n risk = 0\n arr = array(list(instance), dtype=Trajectory.data_type)\n prob = self.__reidentification_prob(dataset, arr, individual_record.id)\n if prob > risk:\n risk = prob\n return risk", "def get_linkQuality(self):\n # res\n if self._cacheExpiration <= YAPI.GetTickCount():\n if self.load(YAPI._yapiContext.GetCacheValidity()) != YAPI.SUCCESS:\n return YCellular.LINKQUALITY_INVALID\n res = self._linkQuality\n return res", "def ssldssauthorizationsrate(self) :\n try :\n return self._ssldssauthorizationsrate\n except Exception as e:\n raise e", "def risk_score(self) -> Union[float, PaillierCiphertext]:\n if self._risk_score is None:\n raise AttributeError(\"risk score is undefined\")\n return self._risk_score", "def riskType(self):\n if self.riskIncrease == True:\n return 'ARI'\n else:\n return 'ARR'", "def get_eff_credit():\n\tdairy_setting = frappe.get_doc(\"Dairy Setting\")\n\tallow_negative_effective_credit = dairy_setting.get('allow_negative_effective_credit')\n\treturn allow_negative_effective_credit", "def ssldhauthorizationsrate(self) :\n try :\n return self._ssldhauthorizationsrate\n except Exception as e:\n raise e", "def httpsvr404notfoundrate(self) :\n\t\ttry :\n\t\t\treturn self._httpsvr404notfoundrate\n\t\texcept Exception as e:\n\t\t\traise e", "def sslbenullauthorizationsrate(self) :\n try :\n return self._sslbenullauthorizationsrate\n except Exception as e:\n raise e", "def get_price(url):\n global ALLOWANCE\n source = \"\"\n try:\n source = requests.get(url).text\n source = json.loads(source)\n ALLOWANCE = source[\"allowance\"][\"remaining\"]\n except:\n print(\"\\nError loading {}:\\n{}\".format(url, source))\n return \"0\"\n return source[\"result\"][\"price\"]", "def sslrsaauthorizationsrate(self) :\n try :\n return self._sslrsaauthorizationsrate\n except Exception as e:\n raise e", "def csnonhttpprobehitrate(self) :\n\t\ttry :\n\t\t\treturn self._csnonhttpprobehitrate\n\t\texcept Exception as e:\n\t\t\traise e", "def value(self):\n return self.shares() * self.price()", "def cshttpprobehitrate(self) :\n\t\ttry :\n\t\t\treturn self._cshttpprobehitrate\n\t\texcept Exception as e:\n\t\t\traise e", "def risk(self, dataset, individual_record):\n number_of_visits = len(individual_record.visits)\n if self.k > number_of_visits:\n instances = combinations(individual_record.visits, len(individual_record.visits))\n else:\n instances = combinations(individual_record.visits, self.k)\n risk = 0\n for instance in instances:\n arr = array(list(instance), dtype=Trajectory.data_type)\n prob = self.__reidentification_prob(dataset, arr, individual_record.id)\n if prob > risk:\n risk = prob\n return risk", "def earned_value(self): \n \n return self.apc * self.budget", "def sslsslv3handshakesrate(self) :\n try :\n return self._sslsslv3handshakesrate\n except Exception as e:\n raise e" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add the value to the additional dictionary with the key name
def addAdditional(self, name, value): self._additional[name] = value
[ "def add(self,key,value):\n\t\tself.form_dict[key] = value", "def dict_add_dict_to(d, dict_to_add): # How to add the values of one dictionary to another?\n for key, value in dict_to_add.items():\n if key not in d:\n d[key] = value\n else:\n d[key] += value", "def add_dict(from_dict, to_dict):\n for key, stats in from_dict.iteritems():\n if key not in to_dict:\n to_dict[key] = stats.copy()\n else:\n for stat_name, stat in stats.iteritems():\n to_dict[key][stat_name] += stat", "def _add_value(self, name):\n self._values.append(dict(valname=name, valnum=self._num))\n self._num += 1\n return self", "def addOrExpand(d, k, v):\n if k in d:\n d[k] += [v]\n else:\n d[k] = [v]", "def append_tag_dic(tags, id_, k, v, tp):\n dic = {'id': id_,\n 'key': k,\n 'value': v,\n 'type': tp}\n tags.append(dic)", "def appendPlantDict(plantDict, speciesName, Accession_num, bioproject_num, pubdate, title, pubmed_id):\n \n key = speciesName #sets the dictionary key to the species name\n \n values = [Accession_num, bioproject_num, pubdate, title, pubmed_id] #sets dictionary values to appropriate information \n \n plantDict.update({key : values}) #updates existing plantDict for every entry into dictionary\n \n return plantDict #returns completed dictionary ", "def append_val(self, key, val, extra_data):\n raise NotImplementedError", "def addToDic(dic, element):\n dic[\"_total\"] += 1\n dic[\"values\"].append(element)", "def add_in_dict(dict1, dict2):\n new_dict = {}\n new_dict.update(dict1)\n for (k,v) in dict2.items():\n if k in new_dict.keys():\n new_dict[k] += v\n else:\n new_dict[k] = v\n\n return new_dict", "def add_additional_keys(self, data: dict, **kwargs: dict) -> dict:\n data.update(data.pop('additional'))\n if 'additional' in data.keys():\n data.pop('additional')\n return data", "def add(self, key, value):\n self.__dataset[key] = value", "def addToNode(self,name,dic):\n\t\tn = listToPath(name)\n\t\tif not n in self.stats:\n\t\t\tself.stats[n] = dic\n\t\telse:\n\t\t\tself.stats[n].update(dic)\n\t\treturn name", "def add_name_to_key(name: str, dictionary: dict, key: str) -> None:\n if not all([isinstance(name, str), isinstance(key, str),\n isinstance(dictionary, dict)]):\n raise TypeError(ADD_NAME_TO_KEY_ERROR)\n # Check for repeat names while adding.\n if name not in dictionary[key]:\n dictionary[key].append(name)\n dictionary[key].sort() # Sort for test consistency.", "def add(self, dict):\n self.data.update(dict)", "def merge_dict(self, d):\n for key, value in d.items():\n self.modules[key] = self.modules.get(key, 0) + value", "def append(self, key, value):\n if key not in self._fields.keys():\n raise KeyError(key)\n self._values[key].append(value)", "def apply_extra_data(model, key, value):\n model.extra_data[key] = value", "def add(self, key, value):\n log.debug(f\"key: {key} value: {value}\")\n log.debug(f\"key type: {type(key)} value type: {type(value)}\")\n # if it's a list then we serialize to json string format so we can load it back later\n if isinstance(value, list) or isinstance(value, dict):\n self.report[key] = json.dumps(value)\n elif isinstance(value, np.ndarray):\n self.report[key] = json.dumps(value.tolist())\n else:\n self.report[key] = value", "def insert_in_dict(d, name, value, noclobber):\n if noclobber and name in d:\n raise PytestHelperException(\"The pytest_helper function autoimport\"\n \"\\nattempted an overwrite with noclobber set. The attribute\"\n \" is: \" + name)\n try:\n d[name] = value\n except KeyError:\n raise" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the value from additional at the key name
def getAdditional(self, name): return self._additional[name]
[ "def _GetValueFromGroup(self, structure, name, key_name):\n structure_value = self._GetValueFromStructure(structure, name)\n return structure_value.get(key_name)", "def __getitem__(self, key):\n query = select([self.store.c.value]).where(self.store.c.key == key)\n result = self.conn.execute(query).fetchone()\n if result:\n return result['value']\n raise KeyError", "def get_extra_data(self, key):\n if not isinstance(key, basestring):\n raise TypeError(\"key can only be an instance of type basestring\")\n value = self._call(\"getExtraData\",\n in_p=[key])\n return value", "def get_info_value(self, key):\n info = self.parse_info(self.get_info())\n if key in info:\n return info[key]\n else:\n return None", "def get(self, key):\n return super(Metainfo, self).get(key)", "def __getitem__( self, key ):\n return self.read( key=key, default=None, raiseOnError=True )", "def get(self, key, alt=None):\n val = None\n if key not in self.props:\n # Check for alternative, which can be either key or value\n if (alt is not None):\n if self.has(alt):\n val = self.get(alt)\n else:\n val = alt\n # Throws exception if the key is not defined.\n else:\n logging.error(\"Key not found: '%s'\", key)\n raise NameError(\"Key not found: \" + key)\n else:\n val = self.props[key]\n\n # Expand parameters\n m = self.varPattern.match(val)\n while (m):\n start = m.start(1)\n end = m.end(1)\n var = m.group(1)\n mkey = var[2:-1]\n mval = self.get(mkey)\n val = val[:start] + mval + val[end:]\n m = self.varPattern.match(val)\n return val", "def __getitem__(self, name):\n if self.bed_vals.has_key(name):\n return self.bed_vals[name]\n else: raise bedError", "def get_value(self, key: str) -> Any:\r\n if self.get_index(key) is None:\r\n return None\r\n return self.hash_table[self.get_index(key)][1]", "def get_data(self, sat, key):\n return self.get(sat, key)[2]", "def _get_value(obj, key, default=missing):\n if \".\" in key:\n return _get_value_for_keys(obj, key.split(\".\"), default)\n else:\n return _get_value_for_key(obj, key, default)", "def __getitem__(self, key: Any) -> Any:\n return self.contents[key]", "def getitem(d:dict, k:list):\n # retrieve from a nested dictionary\n # possible to use dict.get() or operator.getitem()\n return functools.reduce(dict.__getitem__, k, d)", "def get(self, section, key):\n return self[section][key]", "def get_by_complex_key(cls, json_dict, key):\n key_arr = key.strip().split('.')\n value = \"\"\n d = json_dict.copy()\n for k in key_arr:\n if k not in d.keys():\n d = ''\n break\n else:\n d = d[k]\n value = d\n return value", "def _get_field(extras: dict, field_name: str):\n backcompat_prefix = \"extra__dataprep__\"\n if field_name.startswith(\"extra__\"):\n raise ValueError(\n f\"Got prefixed name {field_name}; please remove the '{backcompat_prefix}' prefix \"\n \"when using this method.\"\n )\n if field_name in extras:\n return extras[field_name] or None\n prefixed_name = f\"{backcompat_prefix}{field_name}\"\n return extras.get(prefixed_name) or None", "def extract(self):\n return self.params.get(self.name, null)", "def get_device_value(self, key: str, subkey: str) -> Any:\n value = None\n if self.coordinator.data is not None and key in self.coordinator.data:\n data = self.coordinator.data[key]\n if subkey in data:\n value = data[subkey]\n return value", "def _get_value_metadata(cfg, data=None):\n if cfg.get('key'):\n return self.metadata.get(cfg.get('key'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
remove the value from additional at the key name
def removeAdditional(self, name): del self._additional[name]
[ "def remove(key: str, value: object, catname: str=''):", "def remove_item(self, key, value):\n ...", "def remove_property(self, key):", "def _remove_special(cls, data):\n for key in list(data.keys()):\n if key.startswith(\"_\") or key == \"name\":\n del data[key]", "def delete_add_arg(self, node, key):\n if key in self.args[node]['add']:\n del self.args[node]['add'][key]", "def _pop_key(self, doc, key):\n path = key.split('.')\n cur = doc\n for step in path[:-1]:\n cur = cur[step]\n cur.pop(path[-1], None)", "def RemoveKey(self, key):\n\n # Remove from header\n i = self.header.index(key)\n self.header.pop(i)\n\n # Remove from data\n for i, item in enumerate(self.data):\n self.data[i].pop(key)", "def __delitem__(self, key: Union[Any, Sequence[Any]]) -> None:\n self.contents = {\n i: self.contents[i] \n for i in self.contents if i not in more_itertools.always_iterable(key)}\n return self", "def add_additional_keys(self, data: dict, **kwargs: dict) -> dict:\n data.update(data.pop('additional'))\n if 'additional' in data.keys():\n data.pop('additional')\n return data", "def remove(self, key):\n self.react_dict.remove_reaction(key)\n self.react_dict.save_dict_to_file()", "def delete(self,key):\n\t\tdel self.form_dict[key]", "def srem(self, key: str, *args) -> None:\n\n prev_set = self.__get_key(key)\n\n # Ignore if the key is not found\n if prev_set is None:\n return\n\n check_type(prev_set, DataType.SET)\n\n # Remove the values\n for value in args:\n prev_set.data.discard(value)\n\n self.storage[key] = prev_set", "def remove(self, e):\n self.vals.pop(e, None)", "def remove(self, e):\n \n del self.vals[e]", "def delete_remove_arg(self, node, key):\n if key in self.args[node]['remove']:\n del self.args[node]['remove'][key]", "def process_fields_removal(self, source_dict, fields):\n keys = source_dict.keys()\n for key in keys:\n if self.remove:\n if key in fields:\n source_dict.pop(key, None)\n else:\n if key not in fields:\n source_dict.pop(key, None)", "def remove_key(self, key):\n # check if key is valid\n if key != \"\":\n # according to the key it will be determined which list contains this key and the component will be removed\n if self._places.has_key(key):\n return self.remove_place_key(key)\n if self._transitions.has_key(key):\n return self.remove_transition_key(key)\n if self._arcs.has_key(key):\n return self.remove_arc_key(key)\n return False", "def remove_value_from_event(self, index, hadd, value_uuid ):\n pass", "def insert_remove_arg(self, node, key, value):\n if node not in self.args:\n self.args[node] = {}\n if 'remove' not in self.args[node]:\n self.args[node]['remove'] = {}\n self.args[node]['remove'][key] = value", "def remove(obj, key, val=ANY):\n return el.removes(parse(key), obj, val)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get slope of each edge of the polygon
def get_slopes(points) -> list: # Get no. of points points_length = len(points) i = 0 # Define an empty list to store slopes of all edges slopes = [] while i < points_length: # Get indices of the two points of the edge if i != points_length - 1: j = i + 1 else: j = 0 # Calculate slope and add it to the list slopes += (points[j][1] - points[i][1]) / (points[j][0] - points[i][0]) i += 1 return slopes
[ "def _get_slope(self):\n return self._slope", "def calc_slope(self):\n sigma_x = np.std(self.x)\n sigma_y = np.std(self.y)\n if sigma_x == 0 or sigma_y == 0:\n self.slope = 0\n else:\n r = np.corrcoef(x=self.x, y=self.y)[1, 0]\n self.slope = r * sigma_y / sigma_x", "def get_slope(self, area) -> 'GeoData':\n ((x_min, x_max), (y_min, y_max)) = area\n\n # extract DEM on a slightly large area to avoid border effects\n dem = self.get_elevation([[x_min - self._elevation_map.pixel_size,\n x_max + self._elevation_map.pixel_size],\n [y_min - self._elevation_map.pixel_size,\n y_max + self._elevation_map.pixel_size]])\n z = dem.data.view(np.float64)\n assert dem.data.shape == z.shape, 'Apparently, the returned DEM is not an array of float'\n\n def rolled(x_roll, y_roll):\n \"\"\"Returns a view of the DEM array rolled on X/Y axis\"\"\"\n return np.roll(np.roll(z, x_roll, axis=0), y_roll, axis=1)\n\n # compute elevation change on x and y direction, cf:\n # http://desktop.arcgis.com/fr/arcmap/10.3/tools/spatial-analyst-toolbox/how-slope-works.htm\n dzdx = rolled(-1, -1) + 2 * rolled(-1, 0) + rolled(-1, 1) - \\\n rolled(1, -1) - 2 * rolled(1, 0) - rolled(1, -1)\n dzdx /= (8 * dem.cell_width)\n dzdy = rolled(1, 1) + 2 * rolled(0, 1) + rolled(-1, 1) - \\\n rolled(1, -1) - 2 * rolled(0, -1) - rolled(-1, -1)\n dzdy /= (8 * dem.cell_width)\n\n # get percentage of slope and the direction of raise and save them as GeoData\n slope_percent = np.sqrt(np.power(dzdx, 2) + np.power(dzdy, 2)) * 100\n raise_dir = np.arctan2(dzdy, dzdx)\n sp = dem.clone(np.array(slope_percent, dtype=[('slope', 'float64')]))\n rd = dem.clone(np.array(raise_dir, dtype=[('raise_dir', 'float64')]))\n\n # combine slope and raise direction into one GeoData and fit it to the area originally asked\n result = sp.combine(rd)\n result.data = result.data[1:dem.data.shape[0]-1, 1:dem.data.shape[1]-1, ...]\n result.max_x -= 2\n result.max_y -= 2\n return result", "def slope_binning(self, lines):\n positve_idx = 0\n negative_idx = 1\n\n binnings = [[], []]\n slopes = [[], []]\n binnings[positve_idx] = []\n binnings[negative_idx] = []\n\n for line in lines:\n dx, dy = line[0][0:2] - line[0][2:4]\n slope = dy / dx\n if slope >= 0:\n binnings[positve_idx].append(line)\n slopes[positve_idx].append(slope)\n else:\n binnings[negative_idx].append(line)\n slopes[negative_idx].append(slope)\n\n return binnings, slopes", "def slope(p1,p2):\n return (p2[1] - p1[1])/(p2[0] - p1[0])", "def calc_average_line_slope(self) -> int:\n\n avg_slope = 0\n\n i = 1\n while i < len(self.valleys):\n u = self.valleys[i - 1]\n d = self.valleys[i]\n avg_slope += self.calc_range_slope(u, d)\n i += 1\n\n return int(avg_slope // (len(self.valleys) - 1))", "def slopeFromOrigin(self):\n if self.x:\n return self.y/self.x", "def derive_slope(rise, run):\n difference_run = np.diff(run)\n difference_rise = np.diff(rise)\n derivative = difference_rise/difference_run\n\n return derivative", "def compute_fit_slope(y, x):\n _, m = P.polyfit(x, y, 1)\n return -m", "def get_slope (feature, bin_mask, bins, workspace, raster_scaling = 1000, bin_size = 50):\r\n slope_list = [str(0.0)] * len(bins) # string list of 0.0 to return\r\n bin_list = bins # List of bin values\r\n centerline_list = [] # List to hold current features length and slope values\r\n \r\n try:\r\n rows = ARCPY.SearchCursor (bin_mask)\r\n for row in rows: # For each bin within the bin mask\r\n elevation_bin = int(row.GRIDCODE / raster_scaling) # Get bin value\r\n \r\n # Clip centerline to current bin and calculate it's length\r\n clipped_line = ARCPY.Clip_analysis (feature, row.shape, 'in_memory\\\\clipped_line' )\r\n ARCPY.CalculateField_management(clipped_line, 'LENGTH', 'float(!shape.length@meters!)', 'PYTHON')\r\n \r\n length = 0\r\n try: # Fails if feature is empty (i.e. there is no centerline in the bin\r\n # Open clipped line segment and look for it's length\r\n clip_rows = ARCPY.SearchCursor (clipped_line)\r\n for clip_row in clip_rows:\r\n length += clip_row.LENGTH # Get length\r\n del clip_row, clip_rows\r\n except: pass\r\n \r\n if length <> 0:\r\n # Get number of multi-part features\r\n m_to_s = ARCPY.MultipartToSinglepart_management (clipped_line, 'in_memory\\\\m_to_s')\r\n feature_count = int(ARCPY.GetCount_management(m_to_s).getOutput(0))\r\n \r\n # If there is a line segment, calculate slope and append it list\r\n if feature_count == 1: # with elevation bin value\r\n center_slope = round(math.degrees(math.atan(float(bin_size) / length)), 1)\r\n centerline_list.append([elevation_bin, center_slope])\r\n elif feature_count > 1:\r\n centerline_list.append([elevation_bin, 'NA']) # If multi-part\r\n \r\n ARCPY.Delete_management(m_to_s) # Clean up temporary clip\r\n ARCPY.Delete_management(clipped_line) # Clean up temporary clip\r\n del row, rows \r\n \r\n # Get a list of elevation bins for the centerline for finding min and max bin\r\n centerline_bins = [] \r\n for item in centerline_list:\r\n centerline_bins.append(item[0])\r\n \r\n # Look to see if there is a slope value for the given bin\r\n for index, entry in enumerate (bin_list): # For each bin (all of them)\r\n bin_number = int(entry[1:]) # Convert string to int ('B150' to 150)\r\n for item in centerline_list: # For each item in current feature\r\n if item[0] == bin_number: # If item bin matches all bin \r\n slope_list[index] = str(item[1]) # Place slope value\r\n if min(centerline_bins) == bin_number or max(centerline_bins) == bin_number:\r\n slope_list[index] = str('NA') # Place slope na if min or max bin\r\n \r\n return slope_list, False # Return current features slope values\r\n except:\r\n return slope_list, True # Return anything that was run or empty list of '0.0'\r", "def compute_slope_intercept(x, y):\r\n mean_x = np.mean(x)\r\n mean_y = np.mean(y) \r\n m = sum([(x[i] - mean_x) * (y[i] - mean_y) for i in range(len(x))]) \\\r\n / sum([(x[i] - mean_x) ** 2 for i in range(len(x))])\r\n # y = mx + b => y - mx\r\n b = mean_y - m * mean_x\r\n return m, b", "def get_filter_slope(self):\n return self.slopes[np.int(self.query(\"OFSL?\"))]", "def slope_from_coords_only(coord_list, stride_length=250.0):\n # TODO: put time and space thresholds for sane returns\n elevation_list = []\n slope_list = []\n bearing_list = []\n for i, coord in enumerate(coord_list[:-1]):\n next_coord = coord_list[i+1]\n this_bearing = bearing(coord[0], coord[1], next_coord[0], next_coord[1])\n\n this_elevation, this_slope = slope_from_coord_bearing(coord[0], coord[1], this_bearing,\n stride_length=stride_length)\n elevation_list.append(this_elevation)\n slope_list.append(this_slope)\n bearing_list.append(this_bearing)\n\n elevation_list.append(get_elevation_safe(coord_list[-1][0], coord_list[-1][1]))\n slope_list.append(None)\n bearing_list.append(None)\n return elevation_list, slope_list, bearing_list", "def edge_lengths(self):\n points = list(self.base_piece.polygon.points())\n NUM = 4\n assert len(points) == NUM\n return [(points[i] - points[(i+1) % NUM]).norm() for i in range(NUM)]", "def elevation_slope(elevation, grad=None):\n\n if grad is None:\n grad = elevation_gradient(elevation)\n\n dx = grad.data[:,:,0]\n dy = grad.data[:,:,1]\n return similar_raster(np.sqrt(dx*dx + dy*dy), elevation)", "def calc_curvature_of_polyline(polyline: np.ndarray) -> float:\n dx_dt = np.gradient(polyline[:, 0])\n dy_dt = np.gradient(polyline[:, 1])\n d2x_dt2 = np.gradient(dx_dt)\n d2y_dt2 = np.gradient(dy_dt)\n curvatureArray = np.abs(d2x_dt2 * dy_dt - dx_dt * d2y_dt2) / (dx_dt * dx_dt + dy_dt * dy_dt) ** 1.5\n curvature = 0\n for elem in curvatureArray:\n curvature = curvature + abs(elem)\n return curvature", "def slope_intercept(self, x: float) -> float:\n b = self.origin.y if self.slope < 0 else self.origin.y - self.size.height # y-intercept\n return self.slope * x + b # y = mx + b", "def polylines(self):\n\n\t\treturn [[self.vertex_coordinates(vkey) for vkey in polyedge] for polyedge in self.polyedges()]", "def pos_to_slope_interp(l: list) -> list:\n\n output = []\n # for sequential pairs in landscape function\n for [[x0,y0], [x1,y1]] in zip(l,l[1:]):\n slope = (y1 - y0)/(x1 - x0)\n output.append([x0,slope])\n output.append([l[-1][0],0])\n return output" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw the circle obstacle on the mapimage
def draw_circle(self) -> None: # Define parameters of circular obstacles circle = [25, (225, 50)] # Define center of the circle a = circle[1][0] b = circle[1][1] # Define radius of the circle r = circle[0] # Draw the circle for y in range(self.height): for x in range(self.width): if (x - a) ** 2 + (y - b) ** 2 <= r ** 2: self.world_img[y][x] = (0, 0, 0)
[ "def _draw_map(self):\n\n for obstacle in self._obstacles:\n obstacle.draw(self._axes)", "def draw(self, image, px, py, angle, color, map_resolution, alpha=1.0, draw_steering_details=True):", "def _draw_circle(self):\n pygame.draw.circle(self.screen, GREY,\n (BOARD_WIDTH//2, BOARD_HEIGHT - DIST_TO_BOTTOM),\n CIRCLE_RADIUS, CIRCLE_WIDTH)", "def __draw_objects(self, img):\n if self.ball:\n (x, y), radius = self.ball\n cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 0), 2)\n cv2.putText(img, \"BALL\", (int(x)+15, int(y)-15), cv2.FONT_ITALIC, 0.6, (0, 0, 255, 255), 2)\n for goal in [self.goal_y, self.goal_b]:\n if goal:\n (x, y), (w, h) = goal\n p1 = (int(x - w/2), int(y - h/2))\n p2 = (int(x + w/2), int(y + h/2))\n cv2.rectangle(img, p1, p2, (0, 255, 0), 2)\n cv2.putText(img, \"GOAL\", (p1[0], p1[1]-10), cv2.FONT_ITALIC, 0.6, (0, 0, 255, 255), 2)", "def plot_obstacles(self, labels=False):\n MCR.plot_shapes(self.obstacles, labels)\n MCR.plot_points([self.start, self.goal])", "def draw_pupil(image, pts):\r\n cv2.circle(image, (int(pts[0][0].round()), int(pts[0][1].round())), 2, (0,255,0), -1)", "def mark_image(image, ball):\n\t# Draw the outer circle\n\tcv2.circle(image, (ball[0], ball[1]), ball[2], (0, 255, 0), 2)\n\t#Draw the centre of the circle\n\tcv2.circle(image, (ball[0], ball[1]), 2, (0, 128, 255), 3)\n\treturn image", "def draw_pawn(self):\n pygame.draw.circle(self.screen, self.color, self.rect.center, int(self.radius))\n if self.promoted == 1:\n self.image_rect.centerx = self.rect.centerx\n self.image_rect.centery = self.rect.centery\n\n self.screen.blit(self.image, self.image_rect)", "def plot_map(self, goal = None, path = None):\n\t\tself.ax.cla()\n\t\tx1,y1 = self.start\n\t\tself.ax.imshow(self.permissible_region, cmap='gray', \n\t\t\textent=[self.x0, self.x0 - self.map_width, self.y0 - self.map_height, self.y0])\n\t\tself.ax.scatter(x1,y1, c='g')\n\n\t\t# checks if goal exists\n\t\tif goal is not None:\n\t\t\tx2,y2 = goal\n\t\t\tprint goal\n\t\t\tself.ax.scatter(x2,y2,c='r')\n\t\t\tplt.pause(.01)\n\n\t\t# checks if path exists\n\t\tif path is not None:\n\t\t\tfor i in range(len(path)):\n\t\t\t\tif i < len(path)-1:\n\t\t\t\t\tx0,y0 = path[i]\n\t\t\t\t\tx1,y1 = path[i+1]\n\t\t\t\t\tself.ax.plot([x0,x1],[y0,y1],'b')\n\t\t\t\t\tplt.pause(.01)\n\n\t\tself.ax.draw", "def drawCircle(image, center, radius=1, color=COLOR_YELLOW, thickness=-1):\n\n cv.circle(image, center, radius, color, thickness)", "def draw(self, extras=False):\n im = self.image\n for y in range(6):\n for x in range(6):\n #draw the dots\n cv2.rectangle(im, tuple(np.array(maze_to_image_coords(x, y))-1),\n tuple(np.array(maze_to_image_coords(x, y))+1), (100,100,100), -1)\n\n #draw any walls present\n if self.hedge_mask[y][x]:\n hline(im, x, y)\n if self.vedge_mask[y][x]:\n vline(im, x, y)\n if extras:\n #draw the start / target\n x, y = self.target\n cv2.circle(im, tuple(maze_to_image_coords(x, y)), 8, (0, 0, 255), -1)\n x, y = self.start\n cv2.circle(im, tuple(maze_to_image_coords(x, y)), 5, (50, 50, 220), -1)", "def draw(o,image,pt):\n pass", "def plot_circle( x0, y0, r, **style):\n plot_ellipse( x0, y0, r, r, 0, **style)", "def _draw_obstacles(self):\n for obstacle_set in self.obstacle_manager:\n for obstacle in obstacle_set:\n pygame.draw.rect(self.screen, WHITE, obstacle.get_rect())", "def show_obstacle(screen, menu, game_engine, game_map=None, param=None):\n menu.init_menu(screen)\n plantPath = os.path.join(basePath, param.picture_path)\n menu.plant_im = pygame.image.load(plantPath).convert_alpha()\n menu.plant_im = pygame.transform.scale(menu.plant_im, (350, 200))", "def display_probability_map(self,p):\r\n\t self.screen.fill((0,0,0))\r\n\t for j in range(0,10):\r\n\t\t for i in range(0,10):\r\n\t\t\t pygame.draw.rect(self.screen,(50+205*p[i][j],0,50+205*p[i][j]),(i*64,j*48,64,48))\r\n\t pygame.draw.circle(self.screen,(255,255,255),(self.y*64+32,self.x*48+24),24)\r\n\t pygame.display.update()\r\n\t pygame.time.wait(50)", "def draw_room(self):\n for obstacle in self.environment.obstacles:\n self.canvas.create_rectangle(obstacle.x1, obstacle.y1, obstacle.x2, obstacle.y2, fill=obstacle.color)", "def drawCollider(self):\n pygame.draw.circle(Display.DISPLAYSURF, Display.BLACK,\n (self.collisionx, self.collisiony), self.size + 2,\n 1)", "def draw_detectron_poses(human,frame):\n\t\timage_h, image_w = frame.shape[:2]\n\t\tfor i,keypoint in enumerate(human):\n\t\t\t#center = (int(keypoint[0] * image_w + 0.5), int(keypoint[1] * image_h + 0.5))\n\t\t\t#cv2.circle(frame, tuple(keypoint), 3, common.CocoColors[i], thickness=3, lineType=8, shift=0)\n\t\t\tcv2.putText(frame, str(i),tuple(keypoint),cv2.FONT_HERSHEY_SIMPLEX,.35,3)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw the convex polygon, rectangle and rhombus on the mapimage
def draw_polygons(self) -> None: # Coordinates of the convex polygon coord_polygon = np.array([(20, self.height - 120), (25, self.height - 185), (75, self.height - 185), (100, self.height - 150), (75, self.height - 120), (50, self.height - 150)], dtype=np.int32) # Coordinates of the rectangle coord_rectangle = np.array([(95 - 75 * np.cos(self.DEG_30), self.height - 75 * np.sin(self.DEG_30) - 30), (95 - 75 * np.cos(self.DEG_30) + 10 * np.cos(self.DEG_60), self.height - 75 * np.sin(self.DEG_30) - 10 * np.sin(self.DEG_60) - 30), (95 + 10 * np.cos(self.DEG_60), self.height - 10 * np.sin(self.DEG_60) - 30), (95, self.height - 30)], dtype=np.int32).reshape((-1, 2)) # Coordinates of the rhombus coord_rhombus = np.array([(300 - 75 - (50 / 2), self.height - (30 / 2) - 10), (300 - 75, self.height - 30 - 10), (300 - 75 + (50 / 2), self.height - (30 / 2) - 10), (300 - 75, self.height - 10)], dtype=np.int32).reshape((-1, 2)) last_poly_slope = ((coord_polygon[2][1] - coord_polygon[5][1]) / (coord_polygon[2][0] - coord_polygon[5][0])) # Get slopes of all the edges of the convex polygon, rectangle, and rhombus slopes_poly = get_slopes(coord_polygon) slopes_rect = get_slopes(coord_rectangle) slopes_rhombus = get_slopes(coord_rhombus) for y in range(self.height): for x in range(self.width): # Get y values for each edge of the convex polygon y_poly = get_y_values(x, slopes_poly, coord_polygon, 6) y_poly.append(last_poly_slope * (x - coord_polygon[5][0]) + coord_polygon[5][1]) # Get y values for each edge of the rectangle y_rect = get_y_values(x, slopes_rect, coord_rectangle, 4) # Get y values for each edge of the rhombus y_rhom = get_y_values(x, slopes_rhombus, coord_rhombus, 4) # Draw the convex polygon if y_poly[0] <= y <= y_poly[6] and y_poly[1] <= y <= y_poly[5]: self.world_img[y][x] = (0, 0, 0) elif y_poly[2] <= y <= y_poly[4] and y_poly[6] <= y <= y_poly[3]: self.world_img[y][x] = (0, 0, 0) # Draw the tilted rectangle elif y_rect[0] <= y <= y_rect[2] and y_rect[1] <= y <= y_rect[3]: self.world_img[y][x] = (0, 0, 0) # Draw the rhombus elif y_rhom[0] <= y <= y_rhom[3] and y_rhom[1] <= y <= y_rhom[2]: self.world_img[y][x] = (0, 0, 0)
[ "def draw(self, image, px, py, angle, color, map_resolution, alpha=1.0, draw_steering_details=True):", "def fillConvexPoly(img, points, color, lineType=..., shift=...) -> img:\n ...", "def draw_poly(self, param): \n\n poly = json.loads( param['poly'] )\n zoom = param['zoom']\n\n width = 2**zoom * param['img_width'] \n height = 2**zoom * param['img_height']\n\n gifparam = 'GIF:' + self.work_dir + 'foreground.gif'\n\n ## .................................\n\n ptargs = []\n lnargs = []\n \n ptrad = 3\n\n if len(poly) > 0 :\n\n ptargs = ['-stroke', '#00FF00', '-strokewidth', \\\n '1.5', '-fill', '#00FF00']\n\n ptargs += ['-draw', ' '.join(['circle %(x)i,%(y)i %(x)i,%(y_r)i'\n % {'x' : x * 2 ** zoom,\n 'y' : y * 2 ** zoom,\n 'y_r' : y * 2 ** zoom - ptrad}\n for (x, y) in poly])]\n ## ...........\n\n lnargs = ['-stroke', '#FF0000', '-fill', 'none']\n\n lnargs += ['-draw', ('polyline ' + ' '.join(['%(x)i,%(y)i '\n % {'x' : x * 2 ** zoom,\n 'y' : y * 2 ** zoom}\n for (x, y) in poly]))]\n \n if (2 < len(poly)):\n lnargs += ['-strokewidth', '.5', '-draw', 'line %i,%i %i,%i' \\\n % tuple([2 ** zoom * p for p in poly[-1] + poly[0]])]\n\n ## .................................\n\n cmdrun = ['convert', '-quality', '100', '+antialias', '-size', \\\n str(width) + 'x' + str(height), \\\n 'xc:transparent'] + ptargs + lnargs + [gifparam ]\n subprocess.Popen( cmdrun ).wait()", "def DrawPolygonLayer(self, dc, data, map_rel):\n\n # draw polygons on map/view\n polygons = []\n lines = []\n pens = []\n brushes = []\n if map_rel:\n # Draw points on map/view, using transparency if implemented.\n try:\n dc = wx.GCDC(dc)\n except NotImplementedError:\n pass\n for (p, place, width, colour, closed,\n filled, fillcolour, x_off, y_off, pdata) in data:\n # gather all polygon points as view coords\n p_lonlat = []\n for lonlat in p:\n (x, y) = self.ConvertGeo2View(lonlat)\n if closed:\n p_lonlat.append((x + x_off, y + y_off))\n else:\n p_lonlat.extend((x + x_off, y + y_off))\n\n pens.append(wx.Pen(colour, width=width))\n\n if filled:\n brushes.append(wx.Brush(fillcolour))\n else:\n brushes.append(wx.TRANSPARENT_BRUSH)\n\n if closed:\n polygons.append(p_lonlat)\n else:\n lines.append(p_lonlat)\n else:\n (dc_w, dc_h) = dc.GetSize()\n dc_w2 = dc_w / 2\n dc_h2 = dc_h / 2\n dc_w -= 1\n dc_h -= 1\n dc = wx.GCDC(dc) # allow transparent colours\n for (p, place, width, colour, closed,\n filled, fillcolour, x_off, y_off, pdata) in data:\n # fetch the exec code, don't refetch for each point in polygon\n place_exec = self.poly_view_placement[place]\n pp = []\n for (x, y) in p:\n exec(place_exec)\n if closed:\n pp.append((x, y))\n else:\n pp.extend((x, y))\n\n pens.append(wx.Pen(colour, width=width))\n if filled:\n brushes.append(wx.Brush(fillcolour))\n else:\n brushes.append(wx.TRANSPARENT_BRUSH)\n\n if closed:\n polygons.append(pp)\n else:\n lines.append(pp)\n if len(lines):\n dc.DrawLineList(lines, pens=pens)\n if len(polygons):\n dc.DrawPolygonList(polygons, pens=pens, brushes=brushes)", "def draw(o,image,pt):\n pass", "def _draw_map(self):\n\n for obstacle in self._obstacles:\n obstacle.draw(self._axes)", "def draw(self, region, style):\n\n def trans_long():\n '''\n Translates longitudes for mapping.\n '''\n return [Plot.interpolate(x, self.min_long, self.max_long, self.width) for x in region.longs()]\n\n def trans_lat():\n '''\n Translates latitudes for mapping.\n '''\n return [self.height - Plot.interpolate(x, self.min_lat, self.max_lat, self.height) for x in region.lats()]\n\n ImageDraw.Draw(self.image).polygon([(x,y) for (x,y) in zip(trans_long(), trans_lat())], Plot.fill(region, style), outline=(0,0,0))", "def demo_polygons_transforms_polygons_bounding_boxes(cls):\n\n image = np.copy(cls.image)\n meerkat_left = cls.meerkat_left\n meerkat_center = cls.meerkat_center\n meerkat_right = cls.meerkat_right\n\n # 1\n psoi = imgaug.PolygonsOnImage([meerkat_left, meerkat_center, meerkat_right], shape=image.shape)\n\n # Convert polygons to BBs and put them in BoundingBoxesOnImage instance\n # we will need that instance below to easily draw all augmented BBs on the image\n bbsoi = BoundingBoxesOnImage([polygon.to_bounding_box() for polygon in psoi.polygons], shape=psoi.shape)\n\n # augment image, BBs and polygons\n batch_aug = imgaug_augmenters.Affine(rotate=45)(images=[image], bounding_boxes=bbsoi,\n polygons=psoi, return_batch=True)\n\n images_aug = batch_aug.images_aug\n bbsoi_aug = batch_aug.bounding_boxes_aug\n psoi_aug = batch_aug.polygons_aug\n\n # visualize\n imgaug.imshow(psoi_aug.draw_on_image(bbsoi_aug.draw_on_image(images_aug[0], size=3),\n alpha_face=0.2, size_points=7))\n pass", "def show_contours(image):\n global coords\n global corners\n io.imshow(image)\n for point in coords:\n plt.scatter([point[1]], [point[0]], s=2, c='r')\n for point in corners:\n plt.scatter([point[1]], [point[0]], s=2, c='w')\n plt.show()", "def draw_polylines (self, mm_img, polylines, **kwargs) :\n\n r = 255\n g = 0\n b = 132\n\n b_r = 255\n b_g = 0\n b_b = 132\n\n\tline_width = 2\n\n opacity_fill = .4\n opacity_border = 1\n\n if kwargs.has_key('color') :\n r = kwargs['color'][0]\n g = kwargs['color'][1]\n b = kwargs['color'][2]\n\n if kwargs.has_key('colour') :\n r = kwargs['colour'][0]\n g = kwargs['colour'][1]\n b = kwargs['colour'][2]\n\n if kwargs.has_key('border_color') :\n b_r = kwargs['border_color'][0]\n b_g = kwargs['border_color'][1]\n b_b = kwargs['border_color'][2]\n\n if kwargs.has_key('border_colour') :\n b_r = kwargs['border_colour'][0]\n b_g = kwargs['border_colour'][1]\n b_b = kwargs['border_colour'][2]\n\n if kwargs.has_key('opacity_fill') :\n opacity_fill = kwargs['opacity_fill']\n\n if kwargs.has_key('opacity_border') :\n opacity_border = kwargs['opacity_border']\n\n\tif kwargs.has_key('line_width') :\n line_width = kwargs['line_width']\n\n\t#\n\n cairo_surface = self._setup_surface(mm_img, **kwargs)\n\n\t#\n\n for coords in polylines :\n points = []\n\n for c in coords :\n points.append(self._coord_to_point(c))\n\n if not kwargs.has_key('no_fill') :\n ctx = self._draw_polyline_points(cairo_surface, points)\n ctx.set_source_rgba(r, g, b, opacity_fill)\n ctx.fill()\n\n ctx = self._draw_polyline_points(cairo_surface, points)\n ctx.set_source_rgba(b_r, b_g, b_b, opacity_border)\n ctx.set_line_width(line_width)\n ctx.stroke()\n\n\treturn self._return_surface(cairo_surface, **kwargs)", "def draw(self, region, style):\n new_coords = []\n for i in region.coords:\n new_coords.append((self.interpolate(i[0], self.min_lat, self.max_lat, self.width), self.new_height - self.interpolate(i[1], self.min_long, self.max_long, self.new_height)))\n \n style_translator = None\n if style == \"GRAD\":\n style_translator = Plot.gradient(region)\n elif style == \"SOLID\":\n style_translator = Plot.solid(region)\n else:\n print(\"Error: style should be 'GRAD' or 'SOLID'\")\n ImageDraw.Draw(self.image).polygon(new_coords, fill=style_translator, outline=None)", "def show_grids(img, bounding_boxes, facial_landmarks=[], step=1):\n img_copy = img.copy()\n draw = ImageDraw.Draw(img_copy)\n\n for b in bounding_boxes:\n draw.rectangle([(b[0], b[1]), (b[2], b[3])],\n outline = 'white')\n\n inx = 0\n for pp in facial_landmarks:\n p = pp.reshape(2,5).T\n p = p.tolist()\n mouth_center = [(p[3][0] + p[4][0]) / 2, (p[3][1] + p[4][1]) / 2]\n eye_center = [(p[0][0] + p[1][0]) / 2, (p[0][1] + p[1][1]) / 2]\n p6 = [(p[2][0] - mouth_center[0])/4 + mouth_center[0],\n (p[2][1] - mouth_center[1])/4 + mouth_center[1]]\n p9 = [p[3][0] - (p[4][0]-p[3][0])/3, p[3][1] - (p[4][1]-p[3][1])/3]\n p10 = [p[4][0] + (p[4][0]-p[3][0])/3, p[4][1] + (p[4][1]-p[3][1])/3]\n p11 = [mouth_center[0] - (eye_center[0] - mouth_center[0]) / 2,\n mouth_center[1] - (eye_center[1] - mouth_center[1]) / 2]\n p12 = [(eye_center[0] -mouth_center[0])/4 + eye_center[0],\n (eye_center[1] - mouth_center[1])/4 + eye_center[1]]\n p13 = [(p[0][0] + p[3][0])/2, (p[0][1] + p[3][1])/2]\n p14 = [(p[1][0] + p[4][0])/2, (p[1][1] + p[4][1])/2]\n\n\n p.append(p6)\n p.append([p[0][0]-3/8*(p[1][0]-p[0][0]), 3/2*p[0][1]-1/2*p[1][1]]) \n p.append([p[1][0]+3/8*(p[1][0]-p[0][0]), 3/2*p[1][1]-1/2*p[0][1]])\n p.append(p9)\n p.append(p10)\n p.append(p11) \n p.append(p12)\n p.append(p13)\n p.append(p14)\n\n\n #for i in range(12):\n # draw.ellipse([\n # (p[i][0]-2.0,p[i][1]-2.0),\n # (p[i][0]+2.0,p[i][1]+2.0)\n # ],outline='white',fill='white')\n\n #draw.ellipse(\n # [(p[1][0]-30.0, p[1][1]-30.0),\n # (p[1][0]+30.0, p[1][1]+30.0)],\n # outline=(136,232,232),\n # width=5\n #)\n\n draw.line(\n ((p[6][0], p[6][1]),\n (p[0][0], p[0][1]),\n (p[12][0], p[12][1]),\n (p[5][0], p[5][1]),\n (p[13][0],p[13][1]),\n (p[1][0], p[1][1]),\n (p[7][0], p[7][1])),\n fill=(136,232,232),\n width=1\n )\n\n draw.line(\n ((p[11][0], p[11][1]),\n (p[7][0], p[7][1]),\n (p[9][0], p[9][1]),\n (p[10][0], p[10][1]),\n (p[8][0], p[8][1]),\n (p[6][0], p[6][1]),\n (p[11][0], p[11][1])),\n fill=(136,232,232),\n width=1\n )\n\n draw.line(\n ((p[11][0], p[11][1]),\n (p[1][0], p[1][1]),\n (p[2][0], p[2][1]),\n (p[5][0], p[5][1]),\n (p[4][0], p[4][1]),\n (p[10][0], p[10][1]),\n (p[3][0], p[3][1]),\n (p[5][0], p[5][1]),\n (p[2][0], p[2][1]),\n (p[0][0], p[0][1]),\n (p[11][0], p[11][1])),\n fill=(136,232,232),\n width=1\n )\n\n return img_copy", "def draw_on_image(self,\n image,\n color=(0, 255, 0), color_face=None,\n color_lines=None, color_points=None,\n alpha=1.0, alpha_face=None,\n alpha_lines=None, alpha_points=None,\n size=1, size_lines=None, size_points=None,\n raise_if_out_of_image=False):\n for poly in self.polygons:\n image = poly.draw_on_image(\n image,\n color=color,\n color_face=color_face,\n color_lines=color_lines,\n color_points=color_points,\n alpha=alpha,\n alpha_face=alpha_face,\n alpha_lines=alpha_lines,\n alpha_points=alpha_points,\n size=size,\n size_lines=size_lines,\n size_points=size_points,\n raise_if_out_of_image=raise_if_out_of_image\n )\n return image", "def main(filenameIn,filenameOut,xmin,ymin,zmin,dx,dy,dz,render):\n # print vtk.VTK_MAJOR_VERSION # Check the version\n # Read the file and create polydata\n reader = vtk.vtkSTLReader()\n reader.SetFileName(filenameIn)\n # Define planes for clipping\n Origins=[\n [xmin,ymin,zmin],\n [xmin,ymin,zmin],\n [xmin,ymin,zmin],\n [xmin+dx,ymin+dy,zmin+dz],\n [xmin+dx,ymin+dy,zmin+dz],\n [xmin+dx,ymin+dy,zmin+dz],\n ]\n Normals=[\n [[-1,0,0],[0,-1,0],[0,0,-1],[-1,0,0],[0,-1,0],[0,0,-1]],\n [[+1,0,0],[0,-1,0],[0,0,-1],[-1,0,0],[0,-1,0],[0,0,-1]],\n [[+1,0,0],[0,-1,0],[0,0,-1],[+1,0,0],[0,-1,0],[0,0,-1]],\n [[-1,0,0],[0,+1,0],[0,0,-1],[-1,0,0],[0,-1,0],[0,0,-1]],\n [[+1,0,0],[0,+1,0],[0,0,-1],[-1,0,0],[0,-1,0],[0,0,-1]],\n [[+1,0,0],[0,+1,0],[0,0,-1],[+1,0,0],[0,-1,0],[0,0,-1]],\n [[-1,0,0],[0,+1,0],[0,0,-1],[-1,0,0],[0,+1,0],[0,0,-1]],\n [[+1,0,0],[0,+1,0],[0,0,-1],[-1,0,0],[0,+1,0],[0,0,-1]],\n [[+1,0,0],[0,+1,0],[0,0,-1],[+1,0,0],[0,+1,0],[0,0,-1]],\n\n [[-1,0,0],[0,-1,0],[0,0,+1],[-1,0,0],[0,-1,0],[0,0,-1]],\n [[+1,0,0],[0,-1,0],[0,0,+1],[-1,0,0],[0,-1,0],[0,0,-1]],\n [[+1,0,0],[0,-1,0],[0,0,+1],[+1,0,0],[0,-1,0],[0,0,-1]],\n [[-1,0,0],[0,+1,0],[0,0,+1],[-1,0,0],[0,-1,0],[0,0,-1]],\n [[+1,0,0],[0,+1,0],[0,0,+1],[-1,0,0],[0,-1,0],[0,0,-1]],\n [[+1,0,0],[0,+1,0],[0,0,+1],[+1,0,0],[0,-1,0],[0,0,-1]],\n [[-1,0,0],[0,+1,0],[0,0,+1],[-1,0,0],[0,+1,0],[0,0,-1]],\n [[+1,0,0],[0,+1,0],[0,0,+1],[-1,0,0],[0,+1,0],[0,0,-1]],\n [[+1,0,0],[0,+1,0],[0,0,+1],[+1,0,0],[0,+1,0],[0,0,-1]],\n\n [[-1,0,0],[0,-1,0],[0,0,+1],[-1,0,0],[0,-1,0],[0,0,+1]],\n [[+1,0,0],[0,-1,0],[0,0,+1],[-1,0,0],[0,-1,0],[0,0,+1]],\n [[+1,0,0],[0,-1,0],[0,0,+1],[+1,0,0],[0,-1,0],[0,0,+1]],\n [[-1,0,0],[0,+1,0],[0,0,+1],[-1,0,0],[0,-1,0],[0,0,+1]],\n [[+1,0,0],[0,+1,0],[0,0,+1],[-1,0,0],[0,-1,0],[0,0,+1]],\n [[+1,0,0],[0,+1,0],[0,0,+1],[+1,0,0],[0,-1,0],[0,0,+1]],\n [[-1,0,0],[0,+1,0],[0,0,+1],[-1,0,0],[0,+1,0],[0,0,+1]],\n [[+1,0,0],[0,+1,0],[0,0,+1],[-1,0,0],[0,+1,0],[0,0,+1]],\n [[+1,0,0],[0,+1,0],[0,0,+1],[+1,0,0],[0,+1,0],[0,0,+1]],\n ]\n # Define directions for moving clipped regions\n Direction=[\n [dx,dy,dz],\n [0,dy,dz],\n [-dx,dy,dz],\n [dx,0,dz],\n [0,0,dz],\n [-dx,0,dz],\n [dx,-dy,dz],\n [0,-dy,dz],\n [-dx,-dy,dz],\n [dx,dy,0],\n [0,dy,0],\n [-dx,dy,0],\n [dx,0,0],\n [0,0,0],\n [-dx,0,0],\n [dx,-dy,0],\n [0,-dy,0],\n [-dx,-dy,0],\n [dx,dy,-dz],\n [0,dy,-dz],\n [-dx,dy,-dz],\n [dx,0,-dz],\n [0,0,-dz],\n [-dx,0,-dz],\n [dx,-dy,-dz],\n [0,-dy,-dz],\n [-dx,-dy,-dz],\n ]\n regions=[]\n n=27\n for j in xrange(n):\n polydata=reader\n # Clip it with all 6 planes\n for i in xrange(6):\n plane=vtk.vtkPlane()\n plane.SetOrigin(Origins[i])\n plane.SetNormal(Normals[j][i])\n clipper = vtk.vtkClipPolyData()\n clipper.SetInputConnection(polydata.GetOutputPort())\n clipper.SetClipFunction(plane)\n polydata=clipper\n polydata.Update()\n # Move it if not empty\n if polydata.GetOutput().GetLength()>0:\n transform = vtk.vtkTransform()\n transform.Translate(Direction[j])\n transformFilter = vtk.vtkTransformPolyDataFilter()\n transformFilter.SetTransform(transform)\n transformFilter.SetInputConnection(polydata.GetOutputPort())\n transformFilter.Update()\n regions.append(vtk.vtkPolyData())\n regions[j].ShallowCopy(transformFilter.GetOutput())\n else:\n regions.append(vtk.vtkPolyData())\n regions[j].ShallowCopy(polydata.GetOutput())\n # Append the all regions\n appendFilter = vtk.vtkAppendPolyData()\n if vtk.VTK_MAJOR_VERSION <= 5:\n for j in xrange(n):\n appendFilter.AddInputConnection(regions[j].GetProducerPort())\n else:\n for j in xrange(n):\n appendFilter.AddInputData(regions[j])\n appendFilter.Update()\n # Remove any duplicate points\n cleanFilter = vtk.vtkCleanPolyData()\n cleanFilter.SetInputConnection(appendFilter.GetOutputPort())\n cleanFilter.Update()\n # One more rotation - not needed\n # transform = vtk.vtkTransform()\n # transform.Translate(-6,-6,-6)\n # transformFilter = vtk.vtkTransformPolyDataFilter()\n # transformFilter.SetTransform(transform)\n # transformFilter.SetInputConnection(cleanFilter.GetOutputPort())\n # transformFilter.Update()\n # transform = vtk.vtkTransform()\n # transform.RotateWXYZ(90,1,0,0)\n # transform.RotateWXYZ(-90,0,1,0)\n # transformFilter2 = vtk.vtkTransformPolyDataFilter()\n # transformFilter2.SetTransform(transform)\n # transformFilter2.SetInputConnection(transformFilter.GetOutputPort())\n # transformFilter2.Update()\n # transform = vtk.vtkTransform()\n # transform.Translate(6,6,6)\n # transformFilter = vtk.vtkTransformPolyDataFilter()\n # transformFilter.SetTransform(transform)\n # transformFilter.SetInputConnection(transformFilter2.GetOutputPort())\n # transformFilter.Update()\n # Final data to be saved and displayed\n finalData=cleanFilter\n # Write the stl file to disk\n stlWriter = vtk.vtkSTLWriter()\n stlWriter.SetFileName(filenameOut)\n stlWriter.SetInputConnection(finalData.GetOutputPort())\n stlWriter.Write()\n if render:\n # Create mappper and actor for rendering\n mapper = vtk.vtkPolyDataMapper()\n if vtk.VTK_MAJOR_VERSION <= 5:\n mapper.SetInput(finalData.GetOutput())\n else:\n mapper.SetInputConnection(finalData.GetOutputPort())\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n # Create a rendering window and renderer\n ren = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(ren)\n # Create a renderwindowinteractor\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n # Assign actor to the renderer\n ren.AddActor(actor)\n # Enable user interface interactor\n iren.Initialize()\n renWin.Render()\n iren.Start()", "def draw_area():\n\n fov_information_title = \"FoV center (ra \"+str(ra) + \" \" +\"dec \"+ str(dec)+\")\" + \"; \" + \"prob: \" + str(prob_fov)+ \\\n \";\" + \" \" + \"Moon\" + \" \" + \"(illumi.:\" + \" \" + str(moon_illumination) + \" \" + \\\n \"dist.:\" + \" \" + str(sep_fov_moon) + \")\"\n \n f.suptitle(fov_information_title, fontsize=10)\n \n canvas = FigureCanvasTkAgg(f, self) \n canvas.draw()\n canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True)\n\n toolbar = NavigationToolbar2Tk(canvas, self)\n toolbar.update()\n canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=True)", "def polygon(self, points, color):\n i = 0\n matrix = np.zeros((len(x),2), dtype=np.int32)\n while i < len(points):\n matrix[i] = points[i]\n i+=1\n matrix.reshape((-1,1,2))\n polygon = cv2.polylines(self.img, [matrix], True, (255,0,0),\\\n thickness=3)\n cv2.imshow('polygon', self.img)", "def visualize_region_search_around_poly(binary_warped, left_lane_inds, right_lane_inds, left_fitx, right_fitx, margin_around_line, ploty, nonzeroy, nonzerox):\n # Create an image to draw on and an image to show the selection window\n out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255\n window_img = np.zeros_like(out_img)\n # Color in left and right line pixels\n out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]\n out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]\n\n # plt.imshow(out_img)\n # plt.title('out_img', fontsize=10)\n # mpimg.imsave(\"out_img.png\", out_img)\n # plt.show()\n\n # Generate a polygon to illustrate the search window area\n # And recast the x and y points into usable format for cv2.fillPoly()\n left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin_around_line, ploty]))])\n left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin_around_line,\n ploty])))])\n left_line_pts = np.hstack((left_line_window1, left_line_window2))\n right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin_around_line, ploty]))])\n right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin_around_line,\n ploty])))])\n right_line_pts = np.hstack((right_line_window1, right_line_window2))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))\n cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))\n result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)\n\n # plt.imshow(result)\n # plt.title('result', fontsize=10)\n # plt.show()\n # mpimg.imsave(\"result.png\", result)\n\n # Plot the polynomial lines onto the image\n plt.plot(left_fitx, ploty, color='yellow')\n plt.plot(right_fitx, ploty, color='yellow')\n ## End visualization steps ##\n\n return result", "def draw_mesh(mesh_obj, electrode_num, electrode_centers, electrode_radius):\n\n plt.rcParams['font.family'] = 'Times New Roman'\n # plt.rc('text', usetex=True)\n plt.rc('xtick', labelsize=12)\n plt.rc('ytick', labelsize=12)\n plt.rc('axes', labelsize=12)\n points = mesh_obj['node']\n tri = mesh_obj['element']\n perm = mesh_obj['perm']\n x, y = points[:, 0] * 0.7, points[:, 1] * 0.7\n fig, ax = plt.subplots(figsize=(4.25, 4.25))\n im = ax.tripcolor(x, y, tri, np.abs(perm), shading='flat', edgecolors='k', vmax=2, vmin=0)\n # fig.colorbar(im)\n for i, electrode_center in enumerate(electrode_centers):\n x = electrode_center[0] - electrode_radius\n y = electrode_center[1] - electrode_radius\n width = 2 * electrode_radius * 0.7\n ax.add_patch(\n patches.Rectangle(\n (x * 0.7, y * 0.7), # (x,y)\n width, # width\n width, # height\n color='y'\n )\n )\n ax.annotate(str(i), (x * 0.7, y * 0.7))\n ax.set_aspect('equal')\n\n _, ax = plt.subplots(figsize=(20, 20))\n ax.plot(points[:, 0], points[:, 1], 'ro', markersize=5)\n for i in range(points.shape[0]):\n ax.text(points[i, 0], points[i, 1], str(i), fontsize=8)\n ax.grid('on')\n ax.set_aspect('equal')\n plt.show()", "def draw_lanes(img_ud, coeff, pp_mtx_inv, annotate=True):\n pl = coeff[:3]\n pr = np.hstack([coeff[:2],[coeff[-1]]])\n pts_y = np.arange(0, img_ud.shape[0], 1)\n pts_lx = pl[0]*pts_y**2 + pl[1]*pts_y + pl[2]\n pts_rx = pr[0]*pts_y**2 + pr[1]*pts_y + pr[2]\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([pts_lx, img_ud.shape[0]-pts_y]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([pts_rx, img_ud.shape[0]-pts_y])))])\n pts = np.hstack((pts_left, pts_right))\n # Draw the lane onto a newly created warped blank image\n warp_zero = np.zeros_like(img_ud[:,:,0]).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n cv2.fillPoly(color_warp, np.int_([pts.astype(np.int32)]), (0, 255, 0))\n # Warp the blank back to original image space using \n # inverse perspective matrix \n newwarp = cv2.warpPerspective(color_warp, pp_mtx_inv, \\\n (img_ud.shape[1], img_ud.shape[0])) \n # Combine the result with the original image\n result = cv2.addWeighted(img_ud, 1, newwarp, 0.3, 0)\n # now compute distance and curvature\n \n xm_per_pix = 30/720 # meters per pixel vertically\n ym_per_pix = 3.7/700 # meters per pixel horizontally\n coeff_adj = np.array([ym_per_pix/xm_per_pix**2, ym_per_pix/xm_per_pix, ym_per_pix])\n left_fit_cr = pl * coeff_adj\n right_fit_cr = pr * coeff_adj\n x_eval = 0\n # Calculate the new radii of curvature\n left_curverad = ((1 + (2*left_fit_cr[0]*x_eval*xm_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])\n right_curverad = ((1 + (2*right_fit_cr[0]*x_eval*xm_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])\n # in theory averaging not necessary because the current window\n # detection algorithm ensures that the detected windows are parallel\n avg_curverad = 2/(1/left_curverad+1/right_curverad) \n lane_center = (np.polyval(left_fit_cr,x_eval*xm_per_pix)+\\\n np.polyval(right_fit_cr,x_eval*xm_per_pix))/2\n lane_loc = ym_per_pix*1280/2-lane_center\n if annotate:\n cv2.putText(result, \\\n '{0} curve avg {1:.3f}m'.format(\\\n 'left' if left_fit_cr[0]+right_fit_cr[0]<0 else 'right', \\\n avg_curverad), \\\n (10, 50), cv2.FONT_HERSHEY_PLAIN, 2, (255,255,255), 2)\n cv2.putText(result, \\\n '{0:.3f}m {1}'.format(\\\n abs(lane_loc), 'left' if lane_loc<0 else 'right'), \\\n (10, 100), cv2.FONT_HERSHEY_PLAIN, 2, (255,255,255), 2)\n\n return result, left_curverad, right_curverad, avg_curverad, \\\n ym_per_pix*1280/2-lane_center" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get eroded image to check for obstacles considering the robot radius and clearance
def erode_image(self) -> bool: # Get map with obstacles eroded_img = self.world_img.copy() eroded_img = cv2.cvtColor(eroded_img, cv2.COLOR_BGR2GRAY) # Erode map image for rigid robot if self.thresh: kernel_size = (self.thresh * 2) + 1 erode_kernel = np.ones((kernel_size, kernel_size), np.uint8) eroded_img = cv2.erode(eroded_img, erode_kernel, iterations=1) # Include border in obstacle space for y in range(self.height): for x in range(self.width): if (0 <= y < self.thresh or self.width - self.thresh <= x < self.width or 0 <= x < self.thresh or self.height - self.thresh <= y < self.height): eroded_img[y][x] = 0 cv2.imwrite(self.CHECK_IMG_LOC, eroded_img) if not os.path.exists(self.CHECK_IMG_LOC): return False return True
[ "def get_image(self):\n im = np.ones((10*self.p + 1, 10*self.q + 1, 3))\n for i in range(self.p):\n for j in range(self.q):\n if self.maze_map[i][j].walls['top']:\n im[10*i, 10*j:(10*(j + 1) + 1), :] = 0\n if self.maze_map[i][j].walls['left']:\n im[10*i:(10*(i + 1) + 1), 10*j, :] = 0\n if self.maze_map[i][j].type == 'direct_dead_end':\n im[(10*i + 1):10*(i + 1), (10*j + 1):10*(j + 1), 1:] = 0\n if self.maze_map[i][j].type == 'indirect_dead_end':\n im[(10*i + 1):10*(i + 1), (10*j + 1):10*(j + 1), :] = 0.5\n im[10*self.p, :, :] = 0\n im[:, 10*self.q, :] = 0\n if hasattr(self, 'start'):\n istart = self.start[0]\n jstart = self.start[1]\n im[(10*istart + 1):10*(istart + 1), (10*jstart + 1):10*(jstart + 1), :2] = 0\n if hasattr(self, 'end'):\n iend = self.end[0]\n jend = self.end[1]\n im[(10*iend + 1):10*(iend + 1), (10*jend + 1):10*(jend + 1), ::2] = 0\n return im", "def getEnemies(img):\n output = img.copy()\n red_bloons = []\n red_sizes = []\n bloons, sizes = getCircle(img)\n for i in range(len(bloons)):\n if isRed(img, bloons[i]):\n red_bloons.append(bloons[i])\n red_sizes.append(sizes[i])\n cv2.circle(output, (bloons[i][0], bloons[i][1]), bloons[i][2], (0, 255, 0), 4)\n #return [red_bloons, red_sizes]\n return output", "def find_gate():\n global img, img_res\n while img is None and not rospy.is_shutdown():\n print('img is none.\\nPlease check topic name or check camera is running')\n\n mask = get_object()\n ROI, left_excess, right_excess, top_excess, bot_excess = get_roi(mask)\n\n if len(ROI) == 0:\n mode = 1\n if len(ROI) == 1:\n mode = 2\n if len(ROI) >= 2:\n mode = 3\n\n if mode == 1:\n print_result(\"MODE 1: CANNOT FIND GATE\")\n publish_result(img_res, 'bgr', pub_topic + 'img')\n publish_result(mask, 'gray', pub_topic + 'mask')\n return message()\n elif mode == 2:\n himg, wimg = img.shape[:2]\n x, y, w, h = cv.boundingRect(ROI[0])\n area = (1.0*h*w)/(himg*wimg)\n if left_excess is False and right_excess is True:\n print_result(\"MODE 2(-1): CAN FIND LEFT GATE\")\n publish_result(img_res, 'bgr', pub_topic + 'img')\n publish_result(mask, 'gray', pub_topic + 'mask')\n return message(pos=-1, area=area, appear=True)\n elif left_excess is True and right_excess is False:\n print_result(\"MODE 2(1): CAN FIND RIGHT GATE\")\n publish_result(img_res, 'bgr', pub_topic + 'img')\n publish_result(mask, 'gray', pub_topic + 'mask')\n return message(pos=1, area=area, appear=True)\n elif left_excess is True and right_excess is True:\n print_result(\n \"MODE 2(0): CAN FIND ALL GATE(GATE IS BIGGER THAN FRAME)\")\n cx = wimg/2\n cv.line(img_res, (cx, 0), (cx, himg), (255, 0, 0), 1)\n publish_result(img_res, 'bgr', pub_topic + 'img')\n publish_result(mask, 'gray', pub_topic + 'mask')\n cx = Aconvert(cx,wimg)\n return message(cx=cx, pos=0, area=area, appear=True)\n elif h < 4*w:\n print (h,w)\n print_result(\"MODE 2(0): CAN FIND ALL GATE\")\n cx = (2*x+w)/2\n cv.line(img_res, (cx, 0), (cx, himg), (255, 0, 0), 1)\n publish_result(img_res, 'bgr', pub_topic + 'img')\n publish_result(mask, 'gray', pub_topic + 'mask')\n cx = Aconvert(cx,wimg)\n return message(cx=cx, pos=0, area=area, appear=True)\n else:\n print_result(\n \"MODE 2(-99): CAN FIND PART OF GATE BUT NOT SURE WHICH PART\")\n publish_result(img_res, 'bgr', pub_topic + 'img')\n publish_result(mask, 'gray', pub_topic + 'mask')\n return message(pos=-99, area=area, appear=True)\n\n if mode == 3:\n himg, wimg = img.shape[:2]\n cx_horizontal = []\n cx_vertical = []\n for cnt in ROI:\n x, y, w, h = cv.boundingRect(cnt)\n M = cv.moments(cnt)\n cx = int(M[\"m10\"]/M[\"m00\"])\n if h < 4 * w:\n cx_horizontal.append(cx)\n else:\n cx_vertical.append(cx)\n if len(cx_horizontal) == 2 or len(cx_horizontal) == 1: # found or found(on water)\n print_result(\"MODE 3(1): CAN FIND HORIZONTAL OF GATE\")\n cx = sum(cx_horizontal)/len(cx_horizontal)\n elif len(cx_vertical) == 2:\n print_result(\"MODE 3(2): CAN FIND VERTICAL OF GATE\")\n cx = sum(cx_vertical)/2\n else:\n print_result(\"MODE 3(3): CAN FIND GATE BUT MAYBE A LOT OF NOISE\")\n cx = (sum(cx_vertical)+(sum(cx_horizontal)*3)) / \\\n (len(cx_vertical)+len(cx_horizontal)+3)\n cv.line(img_res, (cx, 0), (cx, himg), (255, 0, 0), 1)\n publish_result(img_res, 'bgr', pub_topic + 'img')\n publish_result(mask, 'gray', pub_topic + 'mask')\n cx = Aconvert(cx,wimg)\n return message(cx=cx, pos=0, area=-1, appear=True)", "def block(img):\n # FIXME: grid searchowac ten fragment?\n img = exposure.equalize_adapthist(img)\n img = exposure.adjust_gamma(img)\n img = unsharp_mask(img, radius=3, amount=2)\n img = ndimage.uniform_filter(img, size=2)\n return (img * 255).astype(np.uint8)", "def analyze_cells(img,pwd,character):\n TARGET = 100 #number of cells\n percentage = 15\n percentage = percentage / 200\n \n kernels = [x for x in range(3,249) if x%2 != 0]\n kernel = kernels[round(len(kernels)/2)]\n \n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n heirarchy = [[],[]]\n while (len(heirarchy[0]) != TARGET + 1):\n blur = cv2.GaussianBlur(gray, (kernel,kernel), 0)\n thresh = cv2.threshold(blur,127,255,cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n cnts, heirarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n \n if (len(heirarchy[0]) < TARGET + 1):\n kernels = [x for x in range(kernels[0], kernel) if x%2 !=0]\n kernel = kernels[round(len(kernels)/2)]\n else:\n kernels = [x for x in range(kernel, kernels[-1])]\n kernel = kernels[round(len(kernels)/2)]\n \n \n count = 0\n for i in range(len(cnts)):\n if (heirarchy[0][i][3] != -1):\n x,y,w,h = cv2.boundingRect(cnts[i])\n cropped = gray[y:y+h, x:x+w]\n thresh = cv2.threshold(cropped, 127,255,cv2.THRESH_BINARY_INV)[1]\n mask = np.zeros((cropped.shape[0], cropped.shape[1]))\n x1 = cropped.shape[0]\n x2 = round(x1 * percentage)\n y1 = cropped.shape[1]\n y2 = round(y1 * percentage)\n mask[x2:x1-x2, y2:y1-y2] = 1\n masked_image = thresh * mask\n \n masked_image = cv2.resize(masked_image, (28,28))\n try:\n os.remove(pwd + '/cell_images/cell' + str(count) + '.jpg')\n except:\n pass\n cv2.imwrite(pwd+'/cell_images/cell' + str(count) + '.jpg',masked_image)\n count +=1\n \n cells_to_csv(masked_image, pwd, character)", "def find_ball(img_screen, img_dbg = None):\n img_screen_bin = cv2.cvtColor(img_screen, cv2.COLOR_RGB2GRAY)\n # TODO: performing edge detection could remove a lot of unnecessary information and make circle detection more robust\n \n # TODO: tweak parameters to be more robust\n circles = cv2.HoughCircles(img_screen_bin, cv2.HOUGH_GRADIENT, 2, 500, minRadius = 20, maxRadius = 200)\n if circles is None:\n print \"Couldn't find circles\"\n return None\n \n # TODO: use int32?\n circles = np.uint16(np.around(circles))\n # TODO: no idea why OpenCV returns the results in this useless format, but this must be done\n circles = circles[0] \n \n # if many circles are found, sort descending according to circle radius and use the biggest one\n if len(circles) > 1:\n print \"Found many circles, this shouldn't happen. Using the biggest one.\"\n # TODO: ugly\n circles = list(circles)\n circles.sort(key=lambda circle: circle[2])\n circles = np.int32(circles)\n\n # TODO: this function should return the coordinates as (y, x) instead of (x, y)\n return circles[0][:-1]", "def get_camera_vision(self):\n robot_id = self.robot.id_\n robot_coordinates = self.robot.center\n for offset_index in range(len(self.state.robots - 1)):\n if robot_id % 2 == 0:\n # If the robot is in the blue team\n if self.state.robots[robot_id - offset_index - 1].id_ % 2 == 0:\n # Only scan for red team robots and not blue team\n continue\n else:\n # If the robot is in the red team\n if self.state.robots[robot_id - offset_index - 1].id_ % 2 == 1:\n # Only scan for blue team robots and not red team\n continue\n other_robot_coordinates = self.state.robots[robot_id - offset_index - 1].center\n delta_x, delta_y = other_robot_coordinates - robot_coordinates\n angle = np.angle(delta_x + delta_y * 1j, deg=True)\n if angle >= 180: angle -= 360\n if angle <= -180: angle += 360\n # Get relative angle\n angle = angle - self.robot.yaw - self.robot.rotation\n if angle >= 180: angle -= 360\n if angle <= -180: angle += 360\n if abs(angle) < 37.5:\n if self.line_intersects_barriers(robot_coordinates, other_robot_coordinates) \\\n or self.line_intersects_robots(robot_coordinates, other_robot_coordinates):\n pass\n else:\n return self.state.robots[robot_id - offset_index - 1].id_\n else:\n pass\n\n return -1", "def compute_mask(self, experiment):\n assert(len(self.args) != 0)\n center = experiment.project(self.center)\n center_to_edge = self.radius * perpendicular(\n experiment.camera_to(self.center))\n radius_vector = (experiment.project(self.center + center_to_edge)\n - experiment.project(self.center))\n radius = np.linalg.norm(radius_vector)\n \n rr, cc = draw.circle(center[0], center[1], radius,\n shape=experiment.image_shape[:2])\n \n dd = np.empty(rr.shape[0], dtype=np.float64)\n for i in range(dd.shape[0]):\n dd[i] = self.distance_to_surface([rr[i], cc[i]], experiment)\n \n return rr, cc, dd", "def run_2d(self, img):\n\n values = self.get_values(['diameter','offsets','depth'])\n if not values: return False\n\n # We only need an overlap value if we're cutting more than one offsets\n if values['offsets'] != 1:\n v = self.get_values(['overlap'])\n if not v: return False\n values.update(v)\n else:\n values['overlap'] = 0\n\n koko.FRAME.status = 'Finding distance transform'\n distance = img.distance()\n\n koko.FRAME.status = 'Finding contours'\n self.paths = distance.contour(values['diameter'],\n values['offsets'],\n values['overlap'])\n for p in self.paths: p.set_z(values['depth'])\n\n\n self.xmin = img.xmin\n self.ymin = img.ymin\n self.zmin = values['depth']\n\n koko.GLCANVAS.load_paths(self.paths, self.xmin, self.ymin, self.zmin)\n koko.CANVAS.load_paths(self.paths, self.xmin, self.ymin)\n koko.FRAME.status = ''\n\n return {'paths': self.paths}", "def getPieces(filename):\r\n inputimage = cv2.imread(filename)\r\n\r\n\r\n #inputimage = cv2.resize(inputimage, (4032, 3024))\r\n\r\n u_green = np.array([120, 255, 95])#np.array([100, 255, 100])\r\n l_green = np.array([0, 100, 0])#np.array([0,90,0])\r\n mask = cv2.inRange(inputimage, l_green, u_green)\r\n #cv2.imwrite(\"mask.jpg\", mask)\r\n\r\n\r\n masked_image = np.copy(inputimage)\r\n #cv2.imwrite(\"pre-mask.jpg\", masked_image)\r\n masked_image[mask != 0] = [0, 0, 255]\r\n masked_image[mask == 0] = [0,255,0]\r\n cv2.imwrite(\"post-mask.jpg\", masked_image)\r\n m = Image.fromarray(masked_image)\r\n\r\n m.save(\"post-mask.BMP\")\r\n\r\n img = Image.open(\"post-mask.BMP\")\r\n og = Image.open(filename)\r\n w, h = img.size\r\n print(\"Width: \", w, \"\\tHeight \", h)\r\n pixles = img.load()\r\n #pixles = masked_image\r\n piecesarr = []\r\n\r\n\r\n\r\n for i in range(1, w - 1):\r\n for j in range(1, h - 1):\r\n r, g, b = pixles[i, j]\r\n #print(r,g,b)\r\n if b - (r + g) != 255 and r - (g + b) != 255:\r\n fillq = Queue()\r\n maxx = -1\r\n minx = w + 1\r\n maxy = -1\r\n miny = h + 1\r\n fillq.put((i, j))\r\n pixles[i, j] = (255, 0, 0)\r\n while not fillq.empty():\r\n x, y = fillq.get()\r\n # get min/max\r\n if x < minx:\r\n minx = x\r\n if x > maxx:\r\n maxx = x\r\n if y < miny:\r\n miny = y\r\n if y > maxy:\r\n maxy = y\r\n\r\n # check left\r\n if x-1 > 0:\r\n r, g, b = pixles[x - 1, y]\r\n if b - (r + g) != 255 and r - (g + b) != 255 :\r\n fillq.put((x - 1, y))\r\n pixles[x - 1, y] = (255, 0, 0)\r\n # check right\r\n if x + 1 < w:\r\n r, g, b = pixles[x + 1, y]\r\n if b - (r + g) != 255 and r - (g + b) != 255 :\r\n fillq.put((x + 1, y))\r\n pixles[x + 1, y] = (255, 0, 0)\r\n # check up\r\n if y-1 > 0:\r\n r, g, b = pixles[x, y - 1]\r\n if b - (r + g) != 255 and r - (g + b) != 255 :\r\n fillq.put((x, y - 1))\r\n pixles[x, y - 1] = (255, 0, 0)\r\n # check down\r\n if y + 1 < h:\r\n r, g, b = pixles[x, y + 1]\r\n if b - (r + g) != 255 and r - (g + b) != 255:\r\n fillq.put((x, y + 1))\r\n pixles[x, y + 1] = (255, 0, 0)\r\n\r\n #print(\"MaxX: \", maxx, \" | MinX: \", minx, \" | MaxY: \", maxy, \" | MinY: \", miny)\r\n # piecearr = ogpix[minx:maxx, miny:maxy]\r\n if(maxx-minx >40 or maxy-miny >40):\r\n newpiece = og.crop((minx - 3, miny - 3, maxx + 3, maxy + 3))\r\n newmask = img.crop((minx - 3, miny - 3, maxx + 3, maxy + 3))\r\n # newpiece.show()\r\n p1 = Piece(newpiece, newmask)\r\n piecesarr.append(p1)\r\n print(\"number of Pieces:\", len(piecesarr))\r\n\r\n\r\n return piecesarr", "def path_planner(self):\n # Load occupancy grid\n self.load_map_from_disk()\n\n # Transform occupancy grid in array of binary values where 0 represents passable cell and 1 impassable cell\n self.map_grid_binary = np.zeros((self.x_size, self.y_size))\n for iy, ix in np.ndindex(self.map_grid.shape):\n if self.map_grid[iy, ix] == 0: # Flag unknown cell as impassable\n self.map_grid_binary[iy, ix] = 1\n if -0.5 < self.map_grid[iy, ix] < 0:\n self.map_grid_binary[iy, ix] = 1\n\n marker_size = 4500\n marker_beacon = '$BEACON$'\n marker_home = '$HOME$'\n color_map = plt.cm.Accent\n\n # Save plot of binary map before dilation\n fig, ax = plt.subplots(figsize=(20, 20))\n ax.imshow(self.map_grid_binary, cmap=color_map)\n ax.scatter(self.beacon_loc[1], self.beacon_loc[0], marker=marker_beacon, color='white', s=marker_size)\n ax.scatter(self.hp_centre_loc[1], self.hp_centre_loc[0], marker=marker_home, color='white', s=marker_size)\n plt.savefig(fname='plots/binary map from og.png', dpi=300, format='png')\n plt.clf()\n\n # Use binary dilation technique to expand occupied cells to build in safety margin for robot, as A* does not\n # account for robot dimensions\n self.map_grid_binary = binary_dilation(self.map_grid_binary, structure=np.ones((35, 35))).astype(int)\n\n # Save binary map now enhanced with binary dilation\n fig, ax = plt.subplots(figsize=(20, 20))\n ax.imshow(self.map_grid_binary, cmap=color_map)\n ax.scatter(self.beacon_loc[1], self.beacon_loc[0], marker=marker_beacon, color='white', s=marker_size)\n ax.scatter(self.hp_centre_loc[1], self.hp_centre_loc[0], marker=marker_home, color='white', s=marker_size)\n plt.savefig(fname='plots/binary dilation map.png', dpi=300, format='png')\n plt.clf()\n\n # Find optimal path from beacon to home using A* algorithm developed by Christian Careaga\n # http://code.activestate.com/recipes/578919-python-a-pathfinding-with-binary-heap/\n self.planned_route = self.astar_pathfinding()\n self.planned_route = self.planned_route + [self.beacon_loc]\n self.planned_route = self.planned_route[::-1]\n\n # Save route to disk for later use\n self.save_planned_route_to_disk()\n\n # Plot map and path\n path_x = []\n path_y = []\n\n for i in (range(0, len(self.planned_route))):\n x = self.planned_route[i][0]\n y = self.planned_route[i][1]\n path_x.append(x)\n path_y.append(y)\n\n fig, ax = plt.subplots(figsize=(20, 20))\n ax.imshow(self.map_grid_binary, cmap=color_map)\n ax.scatter(self.beacon_loc[1], self.beacon_loc[0], marker=marker_beacon, color='red', s=marker_size)\n ax.scatter(self.hp_centre_loc[1], self.hp_centre_loc[0], marker=marker_home, color='red', s=marker_size)\n ax.plot(path_y, path_x, color='white', linewidth=2.0)\n plt.savefig(fname='plots/binary dilation map with path plan.png', dpi=300, format='png')\n plt.clf()", "def _generate_mask(self) -> ndarray:\n # calculate mean 3x3 (square nbhood) orography heights\n radius = number_of_grid_cells_to_distance(self.topography, 1)\n topo_nbhood = NeighbourhoodProcessing(\"square\", radius)(self.topography)\n topo_nbhood.convert_units(\"m\")\n\n # create mask\n mask = np.full(topo_nbhood.shape, False, dtype=bool)\n mask = np.where(topo_nbhood.data < self.orog_thresh_m, True, mask)\n mask = np.where(self.humidity.data < self.rh_thresh_ratio, True, mask)\n mask = np.where(abs(self.vgradz) < self.vgradz_thresh_ms, True, mask)\n return mask", "def obstacle_img2dist_img(*, img, voxel_size, add_boundary=True):\n\n n_voxels = np.array(img.shape)\n\n if not add_boundary:\n # Main function\n # # EDT wants objects as 0, rest as 1\n dist_img = ndimage.distance_transform_edt(-img.astype(int) + 1, sampling=voxel_size)\n dist_img_complement = ndimage.distance_transform_edt(img.astype(int), sampling=voxel_size)\n dist_img[img] = - dist_img_complement[img] # Add interior information\n\n else:\n # Additional branch, to include boundary filled with obstacles\n obstacle_img_wb = np.ones(n_voxels + 2, dtype=bool)\n inner_image_idx = tuple(map(slice, np.ones(img.ndim, dtype=int), (n_voxels + 1)))\n obstacle_img_wb[inner_image_idx] = img\n\n dist_img = obstacle_img2dist_img(img=obstacle_img_wb, voxel_size=voxel_size, add_boundary=False)\n dist_img = dist_img[inner_image_idx]\n\n return dist_img", "def getCircle(img):\n output = img.copy()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 0.7, 40,\n param1=80, param2=15, minRadius=7,\n maxRadius=0)\n bloons = []\n sizes = []\n if circles is not None:\n circles = circles[0] # syntax\n for lst in circles:\n x = lst[0]\n y = lst[1]\n r = lst[2]\n if not isWhite(img, lst):\n bloons.append(lst)\n sizes.append(math.pi * r * r)\n #cv2.circle(output, (x, y), r, (0, 255, 0), 4)\n return [bloons, sizes]", "def _find_object(self):\n self._fetchImage()\n frame_to_thresh = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)\n thresh = cv2.inRange(frame_to_thresh, self.target.min_hsv, self.target.max_hsv)\n kernel = np.ones((5,5),np.uint8)\n mask = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)\n mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n if len(cnts) > 0:\n # find the largest contour in the mask, then use\n # it to compute the minimum enclosing circle and\n # centroid\n c = max(cnts, key=cv2.contourArea)\n return (cv2.minEnclosingCircle(c), cv2.moments(c))\n return None, None", "def createOcclusionMap(gridmap, save_path, max_occluded_steps=1):\n global thread_number\n global calculated_frames\n num_cells_per_edge_ui = gridmap.shape[0]\n num_cells_per_edge_half_f = gridmap.shape[0] // 2 - 1\n\n occlusion_map = np.ones(gridmap.shape, dtype=np.float32) # 0 - occluded, 1 - non occluded/visible\n start_time = time.time()\n \n # Angle array captures 0 to 360 degree in radians to simulate the lidar beams\n angle_array = np.arange(0,two_pi_f,angular_res_rad_f)\n # Radial array captures 0 to max distance of detection to iterate over the distance to the ego vehicle\n radial_array = np.arange(0, radial_limit_meter_f, radial_res_meter_f)\n # For performance: repeat both arrays up to the shape of the other one to do faster matrix operations\n angle_array = np.stack([angle_array]*radial_array.shape[0], axis=1)\n radial_array = np.stack([radial_array]*angle_array.shape[0], axis=0)\n\n # x,y grid contains all x,y-Coordinates which correlate to the given angle and radius\n xy_grid = np.empty((angle_array.shape[0], radial_array.shape[1], 2), dtype=int) \n xy_grid[:,:,0] = grid_cell_size_inv_f * np.multiply(np.cos(angle_array), radial_array) + num_cells_per_edge_half_f # 0 - x\n xy_grid[:,:,1] = grid_cell_size_inv_f * np.multiply(np.sin(angle_array), radial_array) + num_cells_per_edge_half_f # 1 - y\n xy_grid = np.clip(xy_grid, 0, int(num_cells_per_edge_ui-1)) \n \n occluded_steps = np.zeros((xy_grid.shape[0]), dtype=np.int32)\n is_occluded_array = np.zeros((xy_grid.shape[0]), dtype=np.bool)\n occlusion_wo_occup = np.ones((xy_grid.shape[0]), dtype=np.bool)\n position_array = np.zeros((xy_grid.shape[0], 2), dtype=int)\n\n for radial_index in range(xy_grid.shape[1]):\n x_i = xy_grid[:, radial_index, 0]\n y_i = xy_grid[:, radial_index, 1]\n\n # occluded_steps += np.multiply(np.ones(occluded_steps.shape, dtype=np.int32), is_occluded_array)\n # occluded_steps = np.multiply(is_occluded_array, )\n occ_f = gridmap[y_i, x_i]\n is_occupied = (occ_f < occ_thresh_f)\n is_changed = is_occupied * (1 - is_occluded_array)\n position_array[:,0] = position_array[:,0] * (1 - is_changed) + x_i * (is_changed)\n position_array[:,1] = position_array[:,1] * (1 - is_changed) + y_i * (is_changed)\n is_occluded_array = is_occluded_array + is_occupied \n is_first_pixel = (np.absolute(position_array[:,0] - x_i) <= max_occluded_steps) * (np.absolute(position_array[:,1] - y_i) <= max_occluded_steps) * is_occupied\n # occlusion_wo_occup = (1 - is_occluded_array) + (is_occluded_array * occlusion_wo_occup * is_occupied)\n # occlusion_map[y_i, x_i] = occlusion_map[y_i, x_i] * (1 - (is_occluded_array * (1 - occlusion_wo_occup)))\n occlusion_map[y_i, x_i] = occlusion_map[y_i, x_i] * (1 - (is_occluded_array * (1 - is_first_pixel)))\n\n \n\n \"\"\"\n # Version with for-loops for better understanding\n # TODO: Customizing this loops to new version\n ====================================================================================================\n for angle_index in xrange(xy_grid.shape[0]):\n occluded_steps = 0\n occluded = False\n for radial_index in xrange(xy_grid.shape[1]):\n x_i = xy_grid[angle_index, radial_index, 0]\n y_i = xy_grid[angle_index, radial_index, 1]\n visited_map[y_i, x_i] += 1\n if occluded:\n occluded_steps += 1\n if occluded_steps >= 7:\n occlusion_map[y_i, x_i] = 0\n else:\n occ_f = gridmap[y_i, x_i]\n if(occ_f < occ_thresh_f):\n occluded = True\n \"\"\"\n scipy.misc.toimage(occlusion_map).save(save_path)\n thread_number -= 1\n calculated_frames += 1", "def get_candidate_wings(self, imgray, kernel, headLine, centroid, backPoint, body_length, abd_length, axisLine, wingTips, wholeWings, wingArea,timestamp_FMT, distance, targ_dist):\n \n edge = self.get_edge(imgray)\n if edge > 115:\n wingThresh = int(0.75*edge + 10.0)\n bodyThresh = int(0.45*edge + 2.5) \n else:\n wingThresh = int(0.75*edge + 13.0)\n bodyThresh = int(0.55*edge + 2.5)\n if distance >= 170:\n adjustments = [-10,-5,0]\n else:\n adjustments = [-5,0,5]\n for a in adjustments: \n #DEFINE bodyNotWings AS BODY PORTION PLUS LEGS ETC, USEFUL FOR FINDING WINGS.\n ret1, bodyNotWings = cv2.threshold(imgray, bodyThresh,255,cv2.THRESH_BINARY)\n bodyNotWings = cv2.dilate(bodyNotWings, kernel, iterations=1)\n bodyNotWings = cv2.erode(bodyNotWings, kernel, iterations=1)\n\n \n #DEFINE wings AS WINGS AND TARGETS BUT NOT BODY.\n ret2, wings = cv2.threshold(imgray, wingThresh+a,1,cv2.THRESH_BINARY_INV)\n test = wings*bodyNotWings\n dilated = cv2.erode(test, kernel, iterations=2)\n #eroded = cv2.dilate(dilated, kernel, iterations=wingThresh[1])\n #dilatedCopy = eroded.copy()\n \n wingCont, hierarchy = cv2.findContours(dilated, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n \n \n \n for c in wingCont:\n area = cv2.contourArea(c)\n #WINGS MUST BE APPROPRIATE SIZE\n if (area >= 3000):\n M = cv2.moments(c)\n cx, cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])\n #WINGS MUST BE BEHIND HEAD\n if self.check_laterality(centroid, (cx,cy), headLine[0], headLine[1], headLine[2]):\n checkSpot = (c[0][0][0], c[0][0][1])\n pointSet1 = []\n pointSet2 = []\n pointSetTARGET = []\n for x in c:\n if self.check_laterality((x[0][0], x[0][1]), centroid, headLine[0], headLine[1], headLine[2]):\n if self.check_laterality((x[0][0], x[0][1]), checkSpot, axisLine[0], axisLine[1], axisLine[2]):\n pointSet1.append(x.tolist())\n else:\n pointSet2.append(x.tolist())\n else:\n if targ_dist <=20.0:\n pointSetTARGET.append(x.tolist())\n pointSet1 = np.array(pointSet1).reshape((-1,1,2)).astype(np.int32)\n pointSet2 = np.array(pointSet2).reshape((-1,1,2)).astype(np.int32)\n pointSetTARGET = np.array(pointSetTARGET).reshape((-1,1,2)).astype(np.int32)\n if (len(pointSet1) > 0):\n if cv2.contourArea(pointSet1) >=833:#(2500/(wingThresh[2]+1)):\n near, far = self.get_nearest_and_furthest_from_centroid(pointSet1, centroid)\n if self.get_distance_between_coords(near, centroid) <= 150:\n winglength = self.get_distance_between_coords(far, backPoint)\n if (winglength <= 2.0*(body_length)) and (winglength >= abd_length):\n wingTips.append(far)\n wholeWings.append(pointSet1)#(cv2.convexHull(pointSet1))\n wingArea.append(cv2.contourArea(pointSet1))\n if (len(pointSet2) > 0):\n if cv2.contourArea(pointSet2) >=833:#(2500/(wingThresh[2]+1)):\n near, far = self.get_nearest_and_furthest_from_centroid(pointSet2, centroid)\n if self.get_distance_between_coords(near, centroid) <= 150:\n winglength = self.get_distance_between_coords(far, backPoint)\n if (winglength <= 2.0*(body_length)) and (winglength >= abd_length):\n wingTips.append(far)\n wholeWings.append(pointSet2)#(cv2.convexHull(pointSet2))\n wingArea.append(cv2.contourArea(pointSet2))\n return wingTips, wholeWings, wingArea", "def dilationUnknownFgBgNeighbor(unknown_mask, kernal_size, fg_mask, bg_mask):\n kernel = np.ones((kernal_size,kernal_size),np.uint8)\n dilation_alpha = cv2.dilate(unknown_mask, kernel, iterations = 1)\n \n dila_fg_mask = np.logical_and(fg_mask, dilation_alpha)\n dila_bg_mask = np.logical_and(bg_mask, dilation_alpha)\n \n return dila_fg_mask, dila_bg_mask", "def process_image(image):\n global initial_cordi_set\n global initial_x_y\n # Green squares can be placed on corners to mark the perimeter\n hsv_img = hsv(image)\n g_thresh_img = get_green_thresh_img(hsv_img)\n g_contours = get_contours(g_thresh_img)\n g_squares = get_squares(g_contours) # coordinates of green squares\n\n #place a red square on top of car in between the rear axle\n r_thresh_img = get_red_thresh_img(hsv_img)\n r_contours = get_contours(r_thresh_img)\n r_squares = get_squares(r_contours) # coordinates of red square\n\n if (len(r_squares) != 0) and (initial_cordi_set == True):\n #These coordinates are transformed to match with origin and orientation of odom coordinates\n file2write.write(str(image_time))\n file2write.write(\",\")\n #Write new coordinates to file considering the initial coordinates of the red square as origin.\n file2write.write(str((initial_x_y[0] - r_squares[0][0])/one_m_pixel ))\n file2write.write(\",\")\n file2write.write(str((initial_x_y[1] - r_squares[0][1])/one_m_pixel ))\n file2write.write(\"\\n\")\n elif (len(r_squares) != 0) and initial_cordi_set == False:\n #Set the initial coordinates which will be used to transform all points with respect to this origin\n initial_cordi_set = True\n initial_x_y[0] = r_squares[0][0]\n initial_x_y[1] = r_squares[0][1]\n #print r_squares\n for pt in r_squares:\n cv2.circle(image, pt, 15, (0, 0, 255), -1)\n for pt in g_squares:\n cv2.circle(image, pt, 15, (0, 255, 0), -1)\n return image" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove check image from file system
def remove_check_image(self) -> bool: os.remove(self.CHECK_IMG_LOC) if os.path.exists(self.CHECK_IMG_LOC): return False return True
[ "def _remove_existing(img):\n if os.path.exists(img):\n os.unlink(img)\n return img", "def clean(image_name):\n logging.info('Cleaning image \\'' + image_name + '\\' files')", "def removeImage(self, fileName, validate=None): # XXX remove unused 'validate'?\n if self._formatVersion < UFOFormatVersion.FORMAT_3_0:\n raise UFOLibError(\n f\"Images are not allowed in UFO {self._formatVersion.major}.\"\n )\n self.removePath(f\"{IMAGES_DIRNAME}/{fsdecode(fileName)}\")", "def clear_icons():\n\n keep_files = [\n 'bio-photo.png',\n 'IMG_0784_crop.jpg'\n ]\n\n images_folder = Path('assets/images/')\n\n assert(images_folder.is_dir())\n\n for f in images_folder.iterdir():\n\n if f.is_file():\n \n if not f.name in keep_files:\n\n f.unlink()", "def clearTagImages():\n files = glob.glob('/home/sewerbot/repo/SeniorDesign/site/backend/data_management/temp/tag*.jpg')\n remove(files)", "def test_blog_delete_image_file(self):\n\t\timg_name = 'screwed_up_click.jpg'\n\t\ttest_img = './test/img/'+ img_name\n\t\tuploaded_img = './dandotco/static/img/original/1/'+ img_name\n\t\tshutil.copy(test_img, uploaded_img)\n\n\t\tproc_images = process('original/1/'+ img_name)\n\n\t\tdelete_files(1, img_name)\n\n\t\tfile_exists = os.path.isfile(uploaded_img)\n\t\tself.assertEqual(file_exists, False)\n\n\t\tfile_exists = os.path.isfile('./dandotco/' + proc_images[0])\n\t\tself.assertEqual(file_exists, False)", "def delete_metadata(full_path_to_img):\n piexif.remove(full_path_to_img, \"clean_image.jpg\")\n move(\"clean_image.jpg\", \"documents/clean_image.jpg\")", "def delete_metadata_from_png(full_path_to_img):\n image = Image.open(full_path_to_img)\n image.save(\"documents/clean_image.png\")", "def _clean(self):\n if self.verbose:\n print(\"Removing all individual tif images\")\n tifs = glob.glob('%s*' % (self.indiv_page_prefix)) # all individual tifd\n for tif in tifs:\n os.remove(tif)", "def test_delete(self):\n delete_files.delete_raw_from_jpg(self.data['jpg']['path'], self.data['raw']['path'], self.target)\n self.assertFalse(os.path.isfile(os.path.join(self.data['raw']['path'], '3.raw')))", "def cleanup_temp_images():\n if options[\"destination_kickstart_image\"] != options[\"destination_midway_kickstart_image\"]:\n midway_kickstart = os.path.join(options[\"destination_path\"],\n options[\"destination_midway_kickstart_image\"])\n remove_file(midway_kickstart)\n if options[\"destination_system_image\"] != options[\"destination_midway_system_image\"]:\n midway_system = os.path.join(options[\"destination_path\"],\n options[\"destination_midway_system_image\"])\n remove_file(midway_system)", "def remove_missing_images(self):\n to_remove_idx = []\n for idx in self.imgs:\n img_meta = self.imgs[idx]\n path = self.__image_folder / img_meta[\"file_name\"]\n if not path.exists():\n # There could be paths that have whitespaces renamed (under windows)\n alternative_path = self.__image_folder / img_meta[\"file_name\"].replace(\n \" \", \"_\"\n )\n if not alternative_path.exists():\n del self.imgs[idx]\n to_remove_idx.append(idx)\n\n print(\"removed %d images\" % (len(to_remove_idx)))", "def cleanup_thumbnail(sender, instance, **kargs):\n if instance.file.name.endswith('.png'):\n delete(instance.file)", "def clean_image_folder():\n image_folder = \"{}/tmp_images\".format(os.getcwd())\n try:\n for the_file in os.listdir(image_folder):\n file_path = os.path.join(image_folder, the_file)\n os.unlink(file_path)\n except FileNotFoundError:\n os.mkdir(image_folder)", "def remove_files(self):\n logging.info('Removing image \\'' + self.name + '\\' files')\n\n # Remove image base directory\n if os.path.isdir(self.IMAGE_DIRECTORY):\n logging.debug('Delating directory ' + self.IMAGE_DIRECTORY)\n logging.debug('Executing \\'rm -rf ' + self.IMAGE_DIRECTORY + '\\'')\n shutil.rmtree(self.IMAGE_DIRECTORY)", "def delete_image(filename):\n # Delete image\n image_path = (Path(__file__).parent / f'../images{filename}').resolve()\n if os.path.exists(image_path):\n os.remove(image_path)", "def _unpublish_image(self, object_name, image_share_root):\n if image_share_root:\n published_file = os.path.join(image_share_root, object_name)\n\n ironic_utils.unlink_without_raise(published_file)", "def delete_image(self, event):\n remove_image = os.path.join(\n self._directory_path, \"{}{}\".format(self._image_id, \".jpg\")\n )\n try:\n os.remove(remove_image)\n _LOGGER.debug(\"Deleting old image %s\", remove_image)\n except OSError as error:\n if error.errno != errno.ENOENT:\n raise", "def delete_old_image():\n path_to_dir = os.getcwd()\n previous_number = ChangeWallpaper.get_last_number() - 1\n\n if os.path.isfile(path_to_dir + \"/wallpaper\" + str(previous_number) + \".jpg\"):\n os.remove(path_to_dir + \"/wallpaper\" +\n str(previous_number) + \".jpg\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the graph with all the champions, add edges to connect people of the same class.
def _add_edges(self): for class_ in self.champions_in_class.keys(): # For each class for champ in self.champions_in_class[class_]: # For each Champ of that class for champ_of_same_class in self.champions_in_class[class_]: # Loop to all the other champions of the same class. if champ != champ_of_same_class: # Don't connect to itself # print("Champ 1: {}, Champ 2: {}".format(champ,champ_of_same_class)) self.graph.addEdge(fromVert=champ, toVert=champ_of_same_class) # Connect Champ and all the champs of same class.
[ "def connect_all(self):\n # All classrooms are disconnected nodes\n for classroom in self.nodes.classrooms:\n a, b = funcs.naive_knn(classroom, self.nodes.hallways, k=2)\n d = funcs.project(a, b, classroom)\n\n self.add_edge(a, d, weight=funcs.euclidean_dist_nodes(a, d))\n self.add_edge(b, d, weight=funcs.euclidean_dist_nodes(b, d))\n self.add_edge(classroom, d, weight=funcs.euclidean_dist_nodes(classroom, d))", "def assembleGraph(self):\r\n for kmer1 in self.kmers:\r\n for kmer2 in self.kmers:\r\n if suffix(kmer1) == prefix(kmer2):\r\n self.adjacencyList.append((kmer1, kmer2))", "def add_edges(self, *nodes):\n for node in nodes:\n self.adjacent.add(node)\n node.adjacent.add(self)", "def add_edges_from(self, ebunch):\n for (source, target, new_attr) in ebunch:\n self.add_edge(source, target, new_attr)", "def _linkInstances(self):\n for (app, insts) in self.instances.items():\n edges = list(itertools.combinations(insts, 2))\n for edge in edges:\n self.edges.add(edge)\n self.weights[edge] = 1", "def _build_adjacency(self):\n for movie in self.movies:\n other_actors = (actor for actor in movie.actors if actor != self)\n for other_actor in other_actors:\n self._neighbors[other_actor].add(movie)", "def network_from_hiring_list(hiring_list): \n G = nx.DiGraph()\n for person, place in hiring_list:\n if person.phd_location is None:\n print person.facultyName\n add_weighted_edge(G, (person.phd_location, place))\n return G", "def connect_friends(self, person1, person2):\n\n\t\tperson1.adjacent.add(person2)\n\t\tperson2.adjacent.add(person1)", "def add_edge(people_list, node1, node2, degree_list = []):\n if len(degree_list) > np.maximum(node1, node2):\n degree_list[node1] += 1\n degree_list[node2] += 1\n people_list[node1].contacts.append(node2)\n people_list[node2].contacts.append(node1)", "def construct_graph(self):\r\n\t\tedges = self.generate_edges()\r\n\t\tfor edge in edges:\r\n\t\t\tself.insert_edge(edge[0],edge[1],edge[2]) # adds all the edges to graph\r", "def graph(self):\n pass", "def set_friends(self, person1, person2):\n\n person1.adjacency.add(person2)\n person2.adjacency.add(person1)", "def graph_w_edges():\n from graph import Graph\n new_graph = Graph()\n new_graph.add_edge(1, 3)\n new_graph.add_edge(3, 4)\n new_graph.add_edge(3, 5)\n new_graph.add_edge(5, 1)\n return new_graph", "def __create_graph_all_words(self):\n # for each of the parts of speach\n # connections are supported only for nouns & verbs\n for synset in wn.all_synsets():\n parent = synset\n children = parent.hyponyms()\n # self.__recurse_down_tree(parent, children)\n self.__add_to_graph(parent, children)", "def from_edges(cls, edges):\n graph = cls()\n for u, v in edges:\n if u not in graph.node:\n graph.add_node(u)\n if v not in graph.node:\n graph.add_node(v)\n graph.add_edge(u, v)", "def add_edges(self):\n for node_value in self.node_dict.values():\n for prefix_key, prefix_value in self.node_prefix.items():\n if node_value.get_suffix() == prefix_value \\\n and node_value.get_node_sequence() != prefix_key:\n node_value.add_connected_nodes(prefix_key)", "def disk_graph_captors(self, instance):\n G = nx.Graph()\n points_to_communicate_with = self.list_captors + [(0, 0)]\n G.add_nodes_from([(e[0], e[1]) for e in points_to_communicate_with])\n\n E_com = instance.neighbours_Rcom\n for u in points_to_communicate_with:\n for v in E_com[u]:\n if v in points_to_communicate_with:\n G.add_edge((u[0], u[1]), (v[0], v[1]))\n # G.add_edges_from([((u[0], u[1]), (v[0], v[1])) for v in E_com[u]])\n self.disk_graph_com = G", "def addWeightedEdges(G):\n for nodeA in G.nodes:\n for nodeB in G.nodes:\n shared = 2\n if nodeA != nodeB:\n for i in nodeA:\n if i in nodeB:\n shared *= shared \n else:#\n \"\"\"this avoids adding weight when the nodes do not \n share the same root/higher level set\"\"\"\n break\n if True:\n G.add_edge(nodeA,nodeB,weight=shared**2)", "def __create_graph(self):\n self.clear() \n self.__ordered_network()\n self.__create_new_random_connections()", "def _connect(self, v1, v2):\n v1.neighbours.append(v2)\n v2.neighbours.append(v1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find all champs for each champ class in vert.champ
def find_all_champs_same_class_as(self, vert): start = self.getVertex(vert) # Root checked_classes = set() array_of_champs = {} # { 'yordle': set('kennen', ...), ...} # print("All of {}'s classes: {}".format(vert, start.champ.classes)) print("\n{}'s classes are: {}\n".format(vert.upper(), start.champ.classes)) for class_ in start.champ.classes: # O(3) Worst Case if class_ != None: # print("Checking {} class".format(class_)) vertices = set(self.getVertices()) clique = set() clique.add(start) for vertex in vertices - clique: # O(51) Worst # print("Comparing {} to {}".format(vert, vertex)) if class_ in vertex.champ.classes: # O(3) Worse matching_classes = set(start.champ.classes).intersection(set(vertex.champ.classes)) has_unchecked_match = False for match in matching_classes: # O(3) Worse if match not in checked_classes: has_unchecked_match = True # print("{} matches to {} by {} class".format(vertex, vert, match)) if has_unchecked_match == True: neighbor_of_all = True for v in clique: # O(5) Worse if vertex not in v.get_neighbors(): # O(7) Worse # print("Vertex {} and Vertex {} are not neighbors".format(vertex, v)) neighbor_of_all = False if neighbor_of_all == True: clique.add(vertex) array_of_champs[class_] = clique # O(1) return array_of_champs
[ "def get_champions_from_site(role):\n result = []\n # OP.GG classifies champions by tiers.\n appropriate_tiers = []\n mode = input(\"\\nHow do you feel like playing?\\n\" +\n \"1) Tryhard\\n\" +\n \"2) Not too troll\\n\" +\n \"3) Feed my ass off\\n\" +\n \"4) Just hit me with something, fam\\n\").upper()\n counter = 1\n while True:\n if (mode == \"TRYHARD\" or mode == \"1\" or mode == \"1)\"):\n appropriate_tiers.extend([\"OP\", \"1\"])\n break\n elif (mode == \"NOT TOO TROLL\" or mode == \"2\" or mode == \"2)\"):\n appropriate_tiers.extend([\"2\", \"3\"])\n break\n elif (mode == \"FEED MY ASS OFF\" or mode == \"3\" or mode == \"3)\"):\n appropriate_tiers.extend([\"4\", \"5\"])\n break\n elif (mode == \"JUST HIT ME WITH SOMETHING, FAM\" or mode == \"4\"\n or mode == \"4)\"):\n appropriate_tiers.extend([\"OP\", \"1\", \"2\", \"3\", \"4\", \"5\"])\n break\n else:\n # Exit program, if input is invalid 3 times in a row.\n if (counter > 2):\n print(\"\\nYou're trolling again\")\n sys.exit(1)\n counter += 1\n\n # Fetch contents of main champions webpage.\n r = requests.get(\"https://euw.op.gg/champion/statistics\")\n # Represent document as nested data structure with help of Beautiful Soup.\n soup = bs4.BeautifulSoup(r.text, \"lxml\")\n # Find the specific part of the HTML page, which represents\n # how strong champions are in a certain role. (This corresponds\n # to the right part of the webpage, under \"Champion Rankings\")\n tbody = soup.find(\"tbody\", {\"class\":\n \"champion-trend-tier-\" + role})\n # The contents of tbody are in the form of\n # <tr>...</tr>\n # <tr>...</tr>\n # .\n # .\n # .\n # ,where each component contains meta information about a certain champion\n # in that role.\n for tr in tbody.contents:\n # Somehow some contents + first and last one (always!) are blank lines.\n # Skip them.\n if (tr == \"\\n\"):\n continue\n # Explanation:\n # tr.contents[-2].contents[1] point to an <img /> component. The image\n # determines what tier the champion is, according to OP.GG. If the\n # tier of the champion is as wanted, the program goes on to extract\n # the name of the champion from a <td> component, which contains it.\n for tier in appropriate_tiers:\n if (tr.contents[-2].contents[1][\"src\"].endswith(tier + \".png\")):\n result.append(tr.contents[7].contents[1].contents[1].string)\n return result", "def parse_champs(self, map22, traits, character_folder):\n champ_entries = [x for x in map22.entries if x.type == \"TftShopData\"]\n champs = {}\n\n for champ in champ_entries:\n # always use lowercased name: required for files, and bin data is inconsistent\n name = champ.getv(\"mName\").lower()\n if name == \"tft_template\":\n continue\n\n self_path = os.path.join(character_folder, name, name + \".bin\")\n if not os.path.exists(self_path):\n continue\n\n tft_bin = BinFile(self_path)\n record = next(x for x in tft_bin.entries if x.type == \"TFTCharacterRecord\")\n if \"spellNames\" not in record:\n continue\n\n champ_traits = [] # trait paths, as hashes\n for trait in record.getv(\"mLinkedTraits\", []):\n if isinstance(trait, BinEmbedded):\n champ_traits.extend(field.value for field in trait.fields if field.name.h == 0x053A1F33)\n else:\n champ_traits.append(trait.h)\n\n spell_name = record.getv(\"spellNames\")[0]\n spell_name = spell_name.rsplit(\"/\", 1)[-1]\n ability = next(x.getv(\"mSpell\") for x in tft_bin.entries if x.type == \"SpellObject\" and x.getv(\"mScriptName\") == spell_name)\n ability_variables = [{\"name\": value.getv(\"mName\"), \"value\": value.getv(\"mValues\")} for value in ability.getv(\"mDataValues\", [])]\n rarity = champ.getv(\"mRarity\", 0) + 1\n\n champs[name] = ({\n \"apiName\": record.getv(\"mCharacterName\"),\n \"name\": champ.getv(0xC3143D66),\n \"cost\": rarity + int(rarity / 6),\n \"icon\": champ.getv(\"mIconPath\"),\n \"traits\": [traits[h][\"name\"] for h in champ_traits if h in traits],\n \"stats\": {\n \"hp\": record.getv(\"baseHP\"),\n \"mana\": record[\"primaryAbilityResource\"].value.getv(\"arBase\", 100),\n \"initialMana\": record.getv(\"mInitialMana\", 0),\n \"damage\": record.getv(\"BaseDamage\"),\n \"armor\": record.getv(\"baseArmor\"),\n \"magicResist\": record.getv(\"baseSpellBlock\"),\n \"critMultiplier\": record.getv(\"critDamageMultiplier\"),\n \"critChance\": record.getv(\"baseCritChance\"),\n \"attackSpeed\": record.getv(\"attackSpeed\"),\n \"range\": record.getv(\"attackRange\", 0) // 180,\n },\n \"ability\": {\n \"name\": champ.getv(0x87A69A5E),\n \"desc\": champ.getv(0xBC4F18B3),\n \"icon\": champ.getv(\"mPortraitIconPath\"),\n \"variables\": ability_variables,\n },\n }, champ_traits)\n\n return champs", "def get_clubs(soup):\n return soup.findAll('div', {'class': 'box'})", "def _allInstances(cls):\n return pyalaocl.asSet(_theSession().findByClass(cls))", "def get_heroes(self, hero_name=None, class_name=None):\n\n try:\n # Construct SQL query\n sql_where = \"WHERE\"\n sql_parameters = ()\n \n # hero_name\n if hero_name is not None:\n sql_where += \" hero_name like ?\"\n sql_parameters += (\"%\" + hero_name + \"%\",)\n \n # class_name\n if class_name is not None:\n if len(sql_where) > 5:\n sql_where += \" AND\"\n sql_where += \" class_name like ?\"\n sql_parameters += (class_name,)\n\n # Create cursor\n cursor = self.conn.cursor()\n \n # Execute query\n if len(sql_where) > 5:\n cursor.execute(\"\"\"SELECT hero_name FROM heroes\n INNER JOIN classes ON hero_classkey=class_key\n \"\"\" + sql_where, sql_parameters)\n else:\n cursor.execute(\"SELECT hero_name FROM heroes\")\n \n # Get all of the matching rows\n result = cursor.fetchall()\n \n # Return result as a list (of hero names)\n return [res[0] for res in result]\n\n except Error as e:\n print(\"Error in get_heroes:\", e)\n return []", "def get_classrooms(char):\n \n if char.level < 2:\n return []\n classrooms = []\n page = char.visit(province_url).read()\n\n for m in re.finditer(\"(textePage\\[2\\]\\[1\\]\\[)(\\d+)(\\]\\[\\'Texte\\'\\] = \\')\", page, re.IGNORECASE):\n classroom = Classroom(int(m.group(2)))\n start = m.end(0)\n end = page.find(\"';\", start)\n text = page[start:end]\n soup = BeautifulSoup(text)\n\n classroom.teacher = soup.a.text\n\n m = re.search(\"(Free\\s*places\\s*:\\s*)(\\d+)\", soup.text, re.IGNORECASE)\n classroom.free_places = int(m.group(2))\n \n m = re.search(\"(Total\\s*)(\\d+).(\\d+)\", soup.text, re.IGNORECASE)\n classroom.fee = int(m.group(2)) * 100 + int(m.group(3))\n\n m = re.search(\"(Teaching\\s*:\\s*)(\\w+.*)(\\s*Free)\", soup.text, re.IGNORECASE)\n classroom.subject = m.group(2).lower()\n\n classrooms.append(classroom)\n\n return classrooms", "def get_climbs_in_climb(self):\r\n return [climb.name_crag() for climb in self.get_climbs()]", "def __iter__( self ) :\n\n for nuclide in self.__nuclides : yield nuclide", "def get_full_credits(self, title_id, include_episodes=False):\n\n for cast_member in self.get_full_cast(title_id, include_episodes=include_episodes):\n yield cast_member\n for crew_member in self.get_full_crew(title_id):\n yield crew_member", "def powers(self) -> List[List[str]]:\n powers = []\n for primary in self.primaryChans:\n for secondary in self.secondaryChans:\n powers.append([primary, secondary])\n return powers", "def collect_comics(comic_classes):\n return chain.from_iterable(c.get_comics() for c in comic_classes)", "def getPlayerPieces(self, player_turn):\r\n pieces = []\r\n\r\n # Iterate through board to find pieces\r\n for coordinate, player in self.board:\r\n if player == player_turn:\r\n pieces.append(coordinate)\r\n return pieces", "def champion(champ_id, api_key=None, region=None, **kwargs):\n region = get_region(region)\n url = '{}{}/{}/champion/{}'.format(api_url, region, version, champ_id)\n return get_data_from_api(api_key, url, **kwargs)", "def get_list(soup, class_):\n feature_list = [feature.text.strip('\\n\\t') for feature in soup.find_all('div', class_)]\n return feature_list", "def iter_all(class_name):\n ...", "def show_all_matches(self, tournament: Tournament):\n list_matches = []\n print(\"Liste de tous les matchs d'un tounoi :\")\n for elt in tournament.list_rounds:\n for e in elt.matches_list:\n list_matches.append(e)\n for elt in list_matches:\n self.show_one_match(elt)", "def all_for(klass, profile_id):\n data_file = klass.data_file(profile_id)\n if not os.path.exists(data_file):\n raise RuntimeError('No match data available for {}'.format(profile_id))\n matches = []\n with open(data_file) as f:\n reader = csv.reader(f)\n for row in reader:\n try:\n matches.append(klass.from_csv(row))\n except ValueError:\n pass\n return matches", "def find_all_comms_channel_vtms(self):\n comms_channel_vtms = []\n non_comms_channel_vtms = []\n\n all_vtms = self.session.get(\n '{url}/api/tmcm/2.8/instance'.format(url=self.url),\n auth=self.basic_auth,\n verify=self.sd_api_cert_file\n ).json()['children']\n\n for vtm in tqdm(all_vtms, desc='Query vTMs ', ascii=True):\n vtm_details = self.session.get(\n '{url}/api/tmcm/2.8/instance/{vtm_name}'.format(\n url=self.url, vtm_name=vtm['name']),\n auth=self.basic_auth,\n verify=self.sd_api_cert_file\n ).json()\n\n # Ignore any deleted vTMs\n if vtm_details['status'] == 'Deleted':\n continue\n\n if vtm_details['client_cert'] == \"\":\n non_comms_channel_vtms.append(vtm)\n else:\n comms_channel_vtms.append(vtm)\n\n # Return just the array of names\n return [d['name'] for d in comms_channel_vtms], [d['name'] for d in non_comms_channel_vtms]", "def get_all_chats(self):\n\n with ChatMapper() as mapper:\n return mapper.find_all()", "def get_ap_champions_by_match(match):\n champion_list = []\n participants = match[\"participants\"]\n for p in participants:\n if participant_built_ap(p):\n # Found a player who built AP. Include the champion's ID and whether they won.\n ap_champ = [p[\"championId\"], p[\"stats\"][\"winner\"]]\n champion_list.append(ap_champ)\n return champion_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds an edge to the graph and corresponding vertices to the sets of sources/stocks
def add_edge(self, v_from, v_to): self.v_sources.add(v_from) self.v_stocks.add(v_to) if v_from in self.edges: self.edges[v_from].append(v_to) else: self.edges[v_from] = [v_to,]
[ "def add_edge(source, target, label):\n if target not in elements_set: return\n if simple:\n if source != target:\n result.add_edge([source, target])\n else:\n result.add_edge([source, target, label])", "def append_edge(self, edge):", "def add_edge(self, edge):\n\t\tedge = set(edge)\n\t\t(vertex, neighbor) = tuple(edge)\n\t\tif vertex not in self.g:\n\t\t\tself.g[vertex] = [neighbor]\n\t\telse:\n\t\t\tself.g[vertex].append(neighbor)\n\t\tprint \"Added Edge : {}\".format(edge)", "def add_edge(self, destination):\r\n self.edges.append(Graph.Edge(self, destination))", "def add_edge(self, v1, v2, weight):", "def add_edge(self, edge):\n edge = set(edge)\n (vertex1, vertex2) = tuple(edge)\n if vertex1 in self.__graph_dict:\n self.__graph", "def add_edge(self, edge):\n edge = set(edge)\n (label1, label2) = tuple(edge)\n if label1 in self.vertices() and label2 in self.vertices():\n vertex1 = self[label1]\n vertex2 = self[label2]\n vertex1.add_edge(vertex2)\n vertex2.add_edge(vertex1) # assume undirected", "def add_source_sink_graph(graph_name):\n source_end = min(\n [graph_name.vertices[vertex].start for vertex in graph_name.vertices]\n )\n source_start = source_end\n sink_start = max(\n [graph_name.vertices[vertex].end for vertex in graph_name.vertices]\n )\n sink_end = sink_start\n graph_name.add_vertex(Vertex(SOURCE, source_start, source_end, 0))\n graph_name.add_vertex(Vertex(SINK, sink_start, sink_end, 0))\n\n # add edges from Source to each vertex\n for vertex in graph_name.vertices:\n if vertex != SOURCE:\n graph_name.add_edge(SOURCE, vertex)\n\n # add edges from each vertex to Sink\n for vertex in graph_name.vertices:\n if vertex != SINK:\n graph_name.add_edge(vertex, SINK)\n return # all", "def addEdge(self,edge):\r\n self.adj.append(edge)", "def add_edge(self, weight, attributes, source_node, terminal_node):\n # if the source node is not in the left nodeset\n if source_node.get_name() not in self.get_left_node_names():\n self.add_left_node(source_node) # add the source node\n\n # if the terminal node is not in the right nodeset\n if terminal_node.get_name() not in self.get_right_node_names():\n self.add_right_node(terminal_node) # add the terminal node\n\n edge = Edge(weight, attributes, source_node, terminal_node) # create the Edge object\n source_node.add_outgoing_edge(edge) # connect the source node and the terminal node using the edge\n terminal_node.add_incoming_edge(edge)\n\n self.__check_validity() # check if graph is bipartite - throws exception if not", "def add_edge(\n self, source_vertex: T, destination_vertex: T\n ) -> GraphAdjacencyList[T]:\n\n if not self.directed: # For undirected graphs\n # if both source vertex and destination vertex are both present in the\n # adjacency list, add destination vertex to source vertex list of adjacent\n # vertices and add source vertex to destination vertex list of adjacent\n # vertices.\n if source_vertex in self.adj_list and destination_vertex in self.adj_list:\n self.adj_list[source_vertex].append(destination_vertex)\n self.adj_list[destination_vertex].append(source_vertex)\n # if only source vertex is present in adjacency list, add destination vertex\n # to source vertex list of adjacent vertices, then create a new vertex with\n # destination vertex as key and assign a list containing the source vertex\n # as it's first adjacent vertex.\n elif source_vertex in self.adj_list:\n self.adj_list[source_vertex].append(destination_vertex)\n self.adj_list[destination_vertex] = [source_vertex]\n # if only destination vertex is present in adjacency list, add source vertex\n # to destination vertex list of adjacent vertices, then create a new vertex\n # with source vertex as key and assign a list containing the source vertex\n # as it's first adjacent vertex.\n elif destination_vertex in self.adj_list:\n self.adj_list[destination_vertex].append(source_vertex)\n self.adj_list[source_vertex] = [destination_vertex]\n # if both source vertex and destination vertex are not present in adjacency\n # list, create a new vertex with source vertex as key and assign a list\n # containing the destination vertex as it's first adjacent vertex also\n # create a new vertex with destination vertex as key and assign a list\n # containing the source vertex as it's first adjacent vertex.\n else:\n self.adj_list[source_vertex] = [destination_vertex]\n self.adj_list[destination_vertex] = [source_vertex]\n else: # For directed graphs\n # if both source vertex and destination vertex are present in adjacency\n # list, add destination vertex to source vertex list of adjacent vertices.\n if source_vertex in self.adj_list and destination_vertex in self.adj_list:\n self.adj_list[source_vertex].append(destination_vertex)\n # if only source vertex is present in adjacency list, add destination\n # vertex to source vertex list of adjacent vertices and create a new vertex\n # with destination vertex as key, which has no adjacent vertex\n elif source_vertex in self.adj_list:\n self.adj_list[source_vertex].append(destination_vertex)\n self.adj_list[destination_vertex] = []\n # if only destination vertex is present in adjacency list, create a new\n # vertex with source vertex as key and assign a list containing destination\n # vertex as first adjacent vertex\n elif destination_vertex in self.adj_list:\n self.adj_list[source_vertex] = [destination_vertex]\n # if both source vertex and destination vertex are not present in adjacency\n # list, create a new vertex with source vertex as key and a list containing\n # destination vertex as it's first adjacent vertex. Then create a new vertex\n # with destination vertex as key, which has no adjacent vertex\n else:\n self.adj_list[source_vertex] = [destination_vertex]\n self.adj_list[destination_vertex] = []\n\n return self", "def add_edges(self):\n\t\twith open(self.fname, 'a') as f:\n\t\t\tf.write(\"%%%%%%%%%% ADDING EDGES %%%%%%%%%%%%%\\n\\n\")\n\t\t\tfor v in self.G.nodes:\t\t\t\n\t\t\t\tfor w in self.G.nodes:\n\t\t\t\t\tif (v, w) in self.G.edges:\n\t\t\t\t\t\tf.write('\\t\\\\Edge({})({})\\n'.format(self.vtoid[v], self.vtoid[w]))", "def add_edges_from(self, ebunch):\n for (source, target, new_attr) in ebunch:\n self.add_edge(source, target, new_attr)", "def add_edge(self, v1, v2):\n self.__graph[v1].append(v2)", "def add_edge(self, node1, node2):\n node1.add_edges(node2)", "def add_edges(self, edges):\n if not edges:\n return\n\n for e in edges:\n self.add_edge(e)", "def add_edge(self, vertex_id1, vertex_id2):\n v2 = self.get_vertex(vertex_id2)\n self.__vertex_dict[vertex_id1].add_neighbor(v2)\n if not self.__is_directed:\n self.__vertex_dict[vertex_id2].add_neighbor(self.__vertex_dict[vertex_id1])", "def add_edge(graph, edge):\n edge = set(edge)\n vertex1 = edge.pop();\n if not edge:\n vertex2=vertex1\n else: vertex2 = edge.pop()\n if vertex1 in vertices(graph) and vertex2 in vertices(graph):\n graph[vertex1].append(vertex2)\n if vertex1 != vertex2:\n graph[vertex2].append(vertex1)", "def add_edge(self, vertex_id1, vertex_id2):\n vertex1 = self.get_vertex(vertex_id1)\n vertex2 = self.get_vertex(vertex_id2)\n vertex1.add_neighbor(vertex2)\n if self.__is_directed == False:\n vertex2.add_neighbor(vertex1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a vertex to the set of source vertices (suppose that a distinct vertex is a source)
def add_vertex(self, v): self.v_sources.add(v)
[ "def append_vertex(self, vertex):", "def add_vertex(self, v):\n pass", "def add_vertex(self, vertex):\r\n self.vertices.append(vertex)", "def add_vertex(self, id, vertex):\n \n # Check if vertex with given id already exists.\n if id in self.vertices:\n return\n \n # Check if each vertex in adjacent_to exists.\n for i in vertex.adjacent_to:\n if not i in self.vertices:\n return\n \n # Add given vertex at given id.\n self.vertices[id] = vertex\n \n # Add id to adjacent_to of each vertex in vertex's adjacent_to.\n for i in vertex.adjacent_to:\n self.vertices[i].add_edge(id)", "def add_vertex(self, vertex):\n if vertex.label not in self.vertices():\n self.__graph_dict[vertex.label] = vertex", "def add_vertex(self, v):\n if v not in self.vertices.keys(): \n self.vertices[v] = [False,[],0]", "def add_edge(\n self, source_vertex: T, destination_vertex: T\n ) -> GraphAdjacencyList[T]:\n\n if not self.directed: # For undirected graphs\n # if both source vertex and destination vertex are both present in the\n # adjacency list, add destination vertex to source vertex list of adjacent\n # vertices and add source vertex to destination vertex list of adjacent\n # vertices.\n if source_vertex in self.adj_list and destination_vertex in self.adj_list:\n self.adj_list[source_vertex].append(destination_vertex)\n self.adj_list[destination_vertex].append(source_vertex)\n # if only source vertex is present in adjacency list, add destination vertex\n # to source vertex list of adjacent vertices, then create a new vertex with\n # destination vertex as key and assign a list containing the source vertex\n # as it's first adjacent vertex.\n elif source_vertex in self.adj_list:\n self.adj_list[source_vertex].append(destination_vertex)\n self.adj_list[destination_vertex] = [source_vertex]\n # if only destination vertex is present in adjacency list, add source vertex\n # to destination vertex list of adjacent vertices, then create a new vertex\n # with source vertex as key and assign a list containing the source vertex\n # as it's first adjacent vertex.\n elif destination_vertex in self.adj_list:\n self.adj_list[destination_vertex].append(source_vertex)\n self.adj_list[source_vertex] = [destination_vertex]\n # if both source vertex and destination vertex are not present in adjacency\n # list, create a new vertex with source vertex as key and assign a list\n # containing the destination vertex as it's first adjacent vertex also\n # create a new vertex with destination vertex as key and assign a list\n # containing the source vertex as it's first adjacent vertex.\n else:\n self.adj_list[source_vertex] = [destination_vertex]\n self.adj_list[destination_vertex] = [source_vertex]\n else: # For directed graphs\n # if both source vertex and destination vertex are present in adjacency\n # list, add destination vertex to source vertex list of adjacent vertices.\n if source_vertex in self.adj_list and destination_vertex in self.adj_list:\n self.adj_list[source_vertex].append(destination_vertex)\n # if only source vertex is present in adjacency list, add destination\n # vertex to source vertex list of adjacent vertices and create a new vertex\n # with destination vertex as key, which has no adjacent vertex\n elif source_vertex in self.adj_list:\n self.adj_list[source_vertex].append(destination_vertex)\n self.adj_list[destination_vertex] = []\n # if only destination vertex is present in adjacency list, create a new\n # vertex with source vertex as key and assign a list containing destination\n # vertex as first adjacent vertex\n elif destination_vertex in self.adj_list:\n self.adj_list[source_vertex] = [destination_vertex]\n # if both source vertex and destination vertex are not present in adjacency\n # list, create a new vertex with source vertex as key and a list containing\n # destination vertex as it's first adjacent vertex. Then create a new vertex\n # with destination vertex as key, which has no adjacent vertex\n else:\n self.adj_list[source_vertex] = [destination_vertex]\n self.adj_list[destination_vertex] = []\n\n return self", "def add_vertices(self, vertices=[]):\n for vertex in vertices:\n if vertex not in self._edges:\n self._edges[vertex] = set()", "def add_vertex(self, vertex):\n\n\t\tself.vertices.append(vertex)", "def addVertex(self, label):\n index = len(self.index)\n self.index[label] = index\n self.vertex[index] = label", "def add_vertex(self, vertex):\n if vertex not in self.__graph_dict:\n self.__graph_dict[vertex] = []\n self.__directed_dict[vertex] = []", "def insertVertex(self, index, v):\n self.vertexList.insert(index, v)\n \n if self.augVertexList is None:\n self.augVertexList = {generator: \\\n [StackingVertex(vertex, [], [], [], []) for vertex in self.vertexList]\\\n for generator in self.complex.oneCells}\n \n else:\n for generator in self.augVertexList.keys():\n self.augVertexList[generator].insert( \\\n index, StackingVertex(v, [], [], [], []))", "def insert_vertex(self, x=None):\n v = self.Vertex(x)\n self._outgoing[v] = { }\n if self.is_directed():\n self._incoming[v] = { } # need distinct map for incoming edges\n return v", "def graph_vertex( g, i, add_if_necessary = False ):\n if add_if_necessary and i not in g.id_to_vertex:\n v = g.add_vertex()\n g.id_to_vertex[ i ] = v\n g.vertex_properties[ 'vertex_id' ][ v ] = i\n return g.id_to_vertex[ i ]", "def add_vertices(self, vertices):\n if not vertices:\n return\n\n for v in vertices:\n self.add_vertex(v)", "def add_vertex(self, vertex: str):\n Logger.log(Logger.LogLevel.VERBOSE,\n f\"Adding vertex {self.vertex_count}: {vertex}\")\n self.vertices[self.vertex_count] = vertex\n self.vertex_count += 1", "def add_vertex(self, vertex):\n if isinstance(vertex, Vertex) and vertex.name not in self.vertices:\n self.vertices[vertex.name] = vertex\n return True\n else:\n return False", "def add_edge(self, v_from, v_to):\n self.v_sources.add(v_from)\n self.v_stocks.add(v_to)\n if v_from in self.edges:\n self.edges[v_from].append(v_to)\n else:\n self.edges[v_from] = [v_to,]", "def add_vertex(self):\n u = self.g.add_vertex()\n return u" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an index of the first letter that is different in two strings, 1 if not found
def first_uncommon_letter(str1, str2): i = 0 min_len = min(len(str1), len(str2)) while str1[i] == str2[i]: i += 1 if i == min_len: return -1 return i
[ "def differs_by_one_char_same_len(str1, str2):\n if len(str1) != len(str2):\n raise ValueError(\"Strings aren't the same length\")\n\n one_difference_found = False\n found_index = 0\n for i, (chr1, chr2) in enumerate(zip(str1, str2)):\n if chr1 != chr2:\n if one_difference_found:\n return -1\n one_difference_found = True\n found_index = i\n return found_index", "def max_prefix_match(str1, str2):\n result = 0\n for (char1, char2) in zip(str1, str2):\n assert char1 in \"ACGT\"\n if char1 in DEGENERACY_MAP[char2]:\n result += 1\n else:\n break\n return result", "def one_char_diff(first, second):\n differences = sum((x != y) for x, y in zip(first, second))\n return differences == 1", "def difference_between_words(a, b):\n a = a.lower()\n b = b.lower()\n if a == b:\n return 100\n zipped = zip(a, b) # give list of tuples (of letters at each index)\n difference = sum(1 for e in zipped if e[0] != e[1]) # count tuples with non matching elements\n difference = difference + abs(len(a) - len(b))\n return difference", "def max_suffix_match(str1, str2):\n result = 0\n for (char1, char2) in zip(str1[::-1], str2[::-1]):\n assert char1 in \"ACGT\"\n if char1 in DEGENERACY_MAP[char2]:\n result += 1\n else:\n break\n return result", "def getEditDist(str1, str2):\n assert(len(str1)==len(str2))\n str1 = str1.upper()\n str2 = str2.upper()\n\n editDist = 0\n for c1, c2 in zip(str1, str2):\n if c1!=c2:\n editDist +=1\n return editDist", "def ft_strcmp(s1, s2):\n\ti = 0\n\tlen1 = len(s1)\n\tlen2 = len(s2)\n\twhile i < len1 and i < len2 and s1[i] == s2[i]:\n\t\ti += 1\n\tif i >= len1 and i >= len2:\n\t\treturn 0\n\telif i >= len1 :\n\t\treturn -ord(s2[i])\n\telif i>= len2 :\n\t\treturn ord(s1[i])\n\telse :\n\t\treturn ord(s1[i]) - ord(s2[i])", "def common_isuffix(*strings):\n i = -1\n for i in xrange(0, min(len(s) for s in strings)):\n if not eq(*(s[len(s) - i - 1] for s in strings)):\n return i\n return i + 1", "def end_other(a, b):\n if len(a) < len(b):\n result = b[-len(a):].lower() == a.lower()\n else:\n result = a[-len(b):].lower() == b.lower()\n return result", "def twoStrings(s1, s2):\n #brute force solution O(len(s1) * len(s2))\n # for c1 in s1:\n # for c2 in s2:\n # if c1 == c2:\n # return 'YES'\n # return 'NO'\n\n # set solution O(len(s1)) since 'in' keyword is O(1) time\n all_chars = dict.fromkeys(set(s2), 1)\n for c in s1:\n if c in all_chars.keys():\n return 'YES'\n return 'NO'", "def test_it_returns_the_second_index_of_the_char(self):\n self.assertEqual(second_index(\"sims\", \"s\"), 3)\n self.assertEqual(second_index(\"find the river\", \"e\"), 12)\n self.assertEqual(second_index(\"hi\", \" \"), None)\n self.assertEqual(second_index(\"three occurrences\", \"r\"), 10)", "def commonCharacterCount(s1, s2):\r\n\r\n\t# number of similar characters.\r\n\tcounter = 0\r\n\r\n\t# mutable lists to hold characters of the two strings.\r\n\tls1 = list()\r\n\tls2 = list()\r\n\r\n\t# Append characters of strings to the two lists.\r\n\tfor c in s1:\r\n\t\tls1.append(c)\r\n\tfor c in s2:\r\n\t\tls2.append(c)\r\n\r\n\t# Compare both Strings\r\n\tfor indx, value in enumerate(ls1):\r\n\t\tfor indx2,value2 in enumerate(ls2):\r\n\r\n\t\t\t# increment counter, and remove character from second string to avoid duplicate characters in both lists.\r\n\t\t\tif (value == value2):\r\n\t\t\t\tcounter = counter + 1\r\n\t\t\t\tls2.pop(indx2)\r\n\t\t\t\tbreak\r\n\t\t\t\t\r\n\treturn counter", "def longest_common_prefix_len(a, b):\n for i, (x, y) in enumerate(zip(a, b)):\n if x != y:\n return i\n return i + 1", "def max_prefix(self,b):\n word1 = self.name\n word2 = b.name\n index = 1\n if (len(word1) or len(word2)) < 1:\n return 0\n while index <= len(word1):\n if word1[0:index] != word2[0:index]:\n return index\n index += 1\n return index", "def jaroDistance(string_a, string_b):\n\n a_len = len(string_a)\n b_len = len(string_b)\n\n if 0 == a_len or 0 == b_len:\n # One of the strings is empty, must return no similarity\n return 0.0\n\n # Max length, as part of the definition of Jaro Distance\n max_range = max(0, max(a_len, b_len) // 2 - 1)\n\n # Arrays that represent whether or not the character\n # at the specified index is a match\n a_match = [False] * a_len\n b_match = [False] * b_len\n\n char_matches = 0\n for a_idx in range(a_len):\n # Represents the sliding window we use to determine matches\n min_idx = max(a_idx - max_range, 0)\n max_idx = min(a_idx + max_range + 1, b_len)\n\n if min_idx >= max_idx:\n # Means we ran past the end of string b - nothing left to compare\n break\n\n for b_idx in range(min_idx, max_idx):\n if not b_match[b_idx] and string_a[a_idx] == string_b[b_idx]:\n # Found a new match\n a_match[a_idx] = True\n b_match[b_idx] = True\n char_matches += 1\n break\n\n if 0 == char_matches:\n # If no characters match, then we must return 0.\n return 0.0\n\n a_pos = [0] * char_matches\n b_pos = [0] * char_matches\n\n pos_idx = 0\n for a_idx in range(a_len):\n if a_match[a_idx]:\n a_pos[pos_idx] = a_idx\n pos_idx += 1\n\n pos_idx = 0\n for b_idx in range(b_len):\n if b_match[b_idx]:\n b_pos[pos_idx] = b_idx\n pos_idx += 1\n\n transpositions = 0\n for i in range(char_matches):\n if string_a[a_pos[i]] != string_b[b_pos[i]]:\n transpositions += 1\n\n return JARO_WEIGHT_STRING_A * char_matches / a_len + \\\n JARO_WEIGHT_STRING_B * char_matches / b_len + \\\n JARO_WEIGHT_TRANSPOSITIONS * (char_matches - transpositions // 2) / char_matches", "def approximate_match(A,B):\n lA=A.split(' ')\n lB=B.split(' ')\n result=0\n for i in lA:\n if i in lB:\n result+=1\n return result>=1", "def letter_difference(self, letter1, letter2):\n if letter1 not in self:\n raise IndexError(\"%s is not a letter\" % letter1)\n if letter2 not in self:\n raise IndexError(\"%s is not a letter\" % letter2)\n return (self.index(letter2) - self.index(letter1)) % len(self)", "def common_suffix(text1, text2):\n # Quick check for common null cases.\n if not text1 or not text2 or text1[-1] != text2[-1]:\n return 0\n # Binary search.\n # Performance analysis: https://neil.fraser.name/news/2007/10/09/\n pointermin = 0\n\n # TODO: move as args\n len_text1 = len(text1)\n len_text2 = len(text2)\n\n pointermax = min(len_text1, len_text2)\n pointermid = pointermax\n pointerend = 0\n\n while pointermin < pointermid:\n if (text1[-pointermid:len_text1 - pointerend] == text2[-pointermid:len(text2) - pointerend]):\n pointermin = pointermid\n pointerend = pointermin\n else:\n pointermax = pointermid\n pointermid = (pointermax - pointermin) // 2 + pointermin\n return pointermid", "def exist_matching(s1, s2):\n if (len(s1) != len(s2)):\n return False\n\n mapping = {}\n\n #Check character of s1 and s2 one by one\n for i in range(len(s1)):\n if s1[i] not in mapping.keys():\n mapping[s1[i]] = s2[i]\n else:\n if mapping[s1[i]] != s2[i]:\n return False\n\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }