query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Sets the payment_method of this ChannelOrderRequest. | def payment_method(self, payment_method):
if (self.local_vars_configuration.client_side_validation and
payment_method is not None and len(payment_method) > 50):
raise ValueError("Invalid value for `payment_method`, length must be less than or equal to `50`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
payment_method is not None and len(payment_method) < 0):
raise ValueError("Invalid value for `payment_method`, length must be greater than or equal to `0`") # noqa: E501
self._payment_method = payment_method | [
"def payment_method_type(self, payment_method_type):\n if payment_method_type is None:\n raise ValueError(\"Invalid value for `payment_method_type`, must not be `None`\")\n\n self._payment_method_type = payment_method_type",
"def setPayment(self, payment):\n self.payment = payment",
"def payment_type(self, payment_type):\n allowed_values = [\"card\", \"bank_account\"]\n if payment_type not in allowed_values:\n raise ValueError(\n \"Invalid value for `payment_type` ({0}), must be one of {1}\"\n .format(payment_type, allowed_values)\n )\n\n self._payment_type = payment_type",
"def delivery_method(self, delivery_method):\n allowed_values = [\"DIGITAL\", \"SHIPPING\"] # noqa: E501\n if delivery_method not in allowed_values:\n raise ValueError(\n \"Invalid value for `delivery_method` ({0}), must be one of {1}\" # noqa: E501\n .format(delivery_method, allowed_values)\n )\n\n self._delivery_method = delivery_method",
"def set_payment_method(ctx, card_number, card_exp_month, card_exp_year, card_cvc):\n gigalixir_payment_method.update(ctx.obj['host'], card_number, card_exp_month, card_exp_year, card_cvc)",
"def payment_url(self, payment_url):\n\n self._payment_url = payment_url",
"def payment_method_type(self):\n return self._payment_method_type",
"def createPaymentMethod(self):\n request = self._request\n paymentMethod = stripe.PaymentMethod.create(\n type=request['type'],\n card=request['card'],\n )\n self._PaymentMethod = paymentMethod\n return self._PaymentMethod",
"def contact_payment(self, contact_payment):\n\n self._contact_payment = contact_payment",
"def accounting_method(self, accounting_method):\n allowed_values = [\"accrual\", \"cash\"] # noqa: E501\n if (self._configuration.client_side_validation and\n accounting_method not in allowed_values):\n raise ValueError(\n \"Invalid value for `accounting_method` ({0}), must be one of {1}\" # noqa: E501\n .format(accounting_method, allowed_values)\n )\n\n self._accounting_method = accounting_method",
"def _set_request_method(self, method):\n self._request_method = method",
"def contact_method(self, contact_method):\n if contact_method is None:\n raise ValueError(\"Invalid value for `contact_method`, must not be `None`\") # noqa: E501\n\n self._contact_method = contact_method",
"def authorization_method(self, authorization_method):\n\n self._authorization_method = authorization_method",
"def SendPaymentV2(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def set_bill_payment_id(self, bill_payment_id):\n self.bill_payment_id = bill_payment_id",
"def shipping_method_accounting_code(self, shipping_method_accounting_code):\n\n self._shipping_method_accounting_code = shipping_method_accounting_code",
"def get_payment_type(self):\n return self.payment_type",
"def SendPayment(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def payment_time(self, payment_time):\n if (self.local_vars_configuration.client_side_validation and\n payment_time is not None and not re.search(r'YYYY-MM-DD HH:mm:ss', payment_time)): # noqa: E501\n raise ValueError(r\"Invalid value for `payment_time`, must be a follow pattern or equal to `/YYYY-MM-DD HH:mm:ss/`\") # noqa: E501\n\n self._payment_time = payment_time"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the payment_reference_no of this ChannelOrderRequest. | def payment_reference_no(self, payment_reference_no):
if (self.local_vars_configuration.client_side_validation and
payment_reference_no is not None and len(payment_reference_no) > 250):
raise ValueError("Invalid value for `payment_reference_no`, length must be less than or equal to `250`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
payment_reference_no is not None and len(payment_reference_no) < 0):
raise ValueError("Invalid value for `payment_reference_no`, length must be greater than or equal to `0`") # noqa: E501
self._payment_reference_no = payment_reference_no | [
"def contact_payment(self, contact_payment):\n\n self._contact_payment = contact_payment",
"def set_bill_payment_id(self, bill_payment_id):\n self.bill_payment_id = bill_payment_id",
"def card_authorization_reference_number(self, card_authorization_reference_number):\n\n self._card_authorization_reference_number = card_authorization_reference_number",
"def setPayment(self, payment):\n self.payment = payment",
"def ref_number(self, ref_number):\n if ref_number is not None and len(ref_number) > 63:\n raise ValueError(\"Invalid value for `ref_number`, length must be less than or equal to `63`\")\n\n self._ref_number = ref_number",
"def channel_order_no(self, channel_order_no):\n if self.local_vars_configuration.client_side_validation and channel_order_no is None: # noqa: E501\n raise ValueError(\"Invalid value for `channel_order_no`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n channel_order_no is not None and len(channel_order_no) > 60):\n raise ValueError(\"Invalid value for `channel_order_no`, length must be less than or equal to `60`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n channel_order_no is not None and len(channel_order_no) < 0):\n raise ValueError(\"Invalid value for `channel_order_no`, length must be greater than or equal to `0`\") # noqa: E501\n\n self._channel_order_no = channel_order_no",
"def customer_order_number(self, customer_order_number):\n\n self._customer_order_number = customer_order_number",
"def payment_url(self, payment_url):\n\n self._payment_url = payment_url",
"def charge_payment_order(\n payment_order_no: str,\n body: Optional[PaymentOrderChargeRequest] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = ChargePaymentOrder.create(\n payment_order_no=payment_order_no,\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)",
"async def charge_payment_order_async(\n payment_order_no: str,\n body: Optional[PaymentOrderChargeRequest] = None,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = ChargePaymentOrder.create(\n payment_order_no=payment_order_no,\n body=body,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )",
"def set_carrier_data_reference(self, reference):\n value, name = reference, 'set_carrier_data_reference()'\n self.carrier_data_reference = self._value_to_latin(value, name)",
"def bill_number(self, bill_number):\n self._bill_number = bill_number",
"def set_payment_comment(self, value):\n (self.driver.find_element(*ProjectFormLoc.FIELD_PAYMENT_COMMENT).\n send_keys(value))",
"def card_verification_number_token(self, card_verification_number_token):\n\n self._card_verification_number_token = card_verification_number_token",
"def send_order(self, order):\n\n # Takes the additional action of adding an order that is about to be sent\n # to a dictionary that keeps track of objects using their reference.\n\n if order.ref is None:\n order.ref = self._increment_counter(order.market.item, \"n\")\n self._orders_waiting_ackn[order.market.item][order.ref] = order\n super().send_order(order)",
"def setReference(self, reference: 'char const *') -> \"void\":\n return _coin.ScXMLReferenceDataObj_setReference(self, reference)",
"def optSetRefNr(*args):\n return _optcc.optSetRefNr(*args)",
"def payment_cancelled_view(request):\n ext_ref = request.params.get('payUReference',\n request.params.get('PayUReference'))\n order = Order.by_external_reference_number(ext_ref)\n order.status = 'cancelled'\n return {'order': order}",
"def delivery_charge(self, delivery_charge):\n\n self._delivery_charge = delivery_charge"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the shipping_costs_incl_vat of this ChannelOrderRequest. | def shipping_costs_incl_vat(self, shipping_costs_incl_vat):
if self.local_vars_configuration.client_side_validation and shipping_costs_incl_vat is None: # noqa: E501
raise ValueError("Invalid value for `shipping_costs_incl_vat`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
shipping_costs_incl_vat is not None and shipping_costs_incl_vat < 0): # noqa: E501
raise ValueError("Invalid value for `shipping_costs_incl_vat`, must be a value greater than or equal to `0`") # noqa: E501
self._shipping_costs_incl_vat = shipping_costs_incl_vat | [
"def set_require_confirmed_shipping( self, required ):\n\n req = '1' if required else '0'\n self._nvp_request['REQCONFIRMSHIPPING'] = req",
"def adjusted_shipping(self, adjusted_shipping):\n\n self._adjusted_shipping = adjusted_shipping",
"def update_total(self):\n self.order_total = (\n self.lineitems.aggregate(Sum(\"lineitem_total\"))[\n \"lineitem_total__sum\"\n ] or 0\n )\n if self.order_total < settings.FREE_DELIVERY_THRESHOLD:\n self.delivery_cost = self.order_total * Decimal(\n settings.STANDARD_DELIVERY_PERCENTAGE / 100\n )\n else:\n self.delivery_cost = 0\n self.grand_total = self.order_total + self.delivery_cost\n self.save()",
"def calculate_country_vat(self):\n vat = 0\n special_cases = [ii for ii in self.product['types'] if ii in self.custom_rules]\n for product_type in special_cases:\n # Assume just one for now, but can be > 1?\n vat += self.custom_rules[product_type]()\n return vat\n\n vat += self.calculate_vat_upto_20()\n if self.cost < 20.00:\n return vat\n\n vat += self.calculate_vat_upto_100()\n if self.cost < 100.00:\n return vat\n\n vat += self.calculate_vat_over_100()\n return vat",
"def update_costs(self):\n valid_parameters = True\n self.available_costs = []\n if not self.report_parameter:\n # print(\"Report parameter is empty\")\n valid_parameters = False\n if not self.vendor_parameter:\n # print(\"Vendor parameter is empty\")\n valid_parameters = False\n if not self.name_parameter:\n # print(\"Name parameter is empty\")\n valid_parameters = False\n\n if valid_parameters:\n results = self.get_costs(self.report_parameter, self.vendor_parameter, self.name_parameter)\n report_type_name = NAME_FIELD_SWITCHER[self.report_parameter]\n self.available_costs = self.cost_results_to_dicts(results, report_type_name)\n\n self.populate_available_costs()\n self.populate_cost_fields()",
"def set_f_cost(self):\n self.f_cost = self.get_g_cost() + self.get_h_cost()",
"def calculate_vat_over_100(self):\n vat = (self.cost - 100.00) * 0.20\n return vat",
"def __get_order_cost(self, order_qty, pack_costs):\n total_cost = 0.00\n\n for pack, qty in order_qty.iteritems():\n total_cost += qty * pack_costs[pack]\n\n return total_cost",
"def update_order_shipping(user: UserWithUserTokenBasedAuthentication,\n order_id: str,\n shipping_cost: float\n ) -> requests.models.Response:\n url = f\"{ORDERS_URL}/{order_id}\"\n headers = user.headers\n params = user.params\n\n data = {\"shipping\": shipping_cost}\n\n return requests.post(url, headers=headers, params=params, json=data)",
"def on_cost_in_local_currency_changed(self):\n self.cost_in_local_currency = self.cost_in_local_currency_doublespinbox.value()",
"def billing_same_as_shipping():",
"def shipping_handling_with_discount(self, shipping_handling_with_discount):\n\n self._shipping_handling_with_discount = shipping_handling_with_discount",
"def calculate_vat_upto_20(self):\n if self.cost < 20.00:\n vat = self.cost * self.base_vat\n else:\n vat = 20.00 * self.base_vat\n return vat",
"def get_fedex_shipping_cost(self):\n Currency = Pool().get('currency.currency')\n\n fedex_credentials = self.carrier.get_fedex_credentials()\n\n if not all([\n self.fedex_drop_off_type, self.fedex_packaging_type,\n self.fedex_service_type\n ]):\n self.raise_user_error('fedex_settings_missing')\n\n rate_request = RateService(fedex_credentials)\n requested_shipment = rate_request.RequestedShipment\n\n requested_shipment.DropoffType = self.fedex_drop_off_type.value\n requested_shipment.ServiceType = self.fedex_service_type.value\n requested_shipment.PackagingType = self.fedex_packaging_type.value\n requested_shipment.PreferredCurrency = self.cost_currency.code\n\n # Shipper and Recipient\n requested_shipment.Shipper.AccountNumber = \\\n fedex_credentials.AccountNumber\n # From location is the warehouse location. So it must be filled.\n if not self.warehouse.address:\n self.raise_user_error('warehouse_address_required')\n self.warehouse.address.set_fedex_address(requested_shipment.Shipper)\n self.delivery_address.set_fedex_address(requested_shipment.Recipient)\n\n # Shipping Charges Payment\n shipping_charges = requested_shipment.ShippingChargesPayment\n shipping_charges.PaymentType = 'SENDER'\n shipping_charges.Payor.ResponsibleParty = requested_shipment.Shipper\n\n # Express Freight Detail\n fright_detail = requested_shipment.ExpressFreightDetail\n fright_detail.PackingListEnclosed = 1\n fright_detail.ShippersLoadAndCount = 2\n fright_detail.BookingConfirmationNumber = 'Ref-%s' % self.reference\n\n # Customs Clearance Detail\n self.get_fedex_customs_details(rate_request)\n\n # Label Specification\n requested_shipment.LabelSpecification.LabelFormatType = 'COMMON2D'\n requested_shipment.LabelSpecification.ImageType = 'PNG'\n requested_shipment.LabelSpecification.LabelStockType = 'PAPER_4X6'\n\n requested_shipment.RateRequestTypes = ['ACCOUNT']\n\n self.get_fedex_items_details(rate_request)\n\n try:\n response = rate_request.send_request(int(self.id))\n except RequestError, exc:\n self.raise_user_error(\n 'fedex_shipping_cost_error', error_args=(exc.message, )\n )\n\n currency, = Currency.search([\n ('code', '=', str(\n response.RateReplyDetails[0].RatedShipmentDetails[0].\n ShipmentRateDetail.TotalNetCharge.Currency\n ))\n ])\n\n return Decimal(str(\n response.RateReplyDetails[0].RatedShipmentDetails[0].ShipmentRateDetail.TotalNetCharge.Amount # noqa\n )), currency.id",
"def on_cost_in_local_currency_with_tax_changed(self):\n self.cost_in_local_currency_with_tax = self.cost_in_local_currency_with_tax_doublespinbox.value()",
"def calculateSoftCost(self):\n self.solver.Add(self.solver.Sum((self.brkconstraints[i] * self.brkconstraints_cost[i])\n for i in range(self.nconstraints)) == self.cost)",
"def calculate_vat_upto_100(self):\n if self.cost < 100.00:\n vat = (self.cost - 20.00) * 0.15\n else:\n vat = 80.00 * 0.15\n return vat",
"def set_vif_bandwidth_config(conf, flavor):\n\n bandwidth_items = ['vif_inbound_average', 'vif_inbound_peak',\n 'vif_inbound_burst', 'vif_outbound_average', 'vif_outbound_peak',\n 'vif_outbound_burst']\n for key, value in flavor.get('extra_specs', {}).items():\n scope = key.split(':')\n if len(scope) > 1 and scope[0] == 'quota':\n if scope[1] in bandwidth_items:\n setattr(conf, scope[1], value)",
"def edit_cost(self, new_cost):\n self.cost = new_cost"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the order_date of this ChannelOrderRequest. | def order_date(self, order_date):
if self.local_vars_configuration.client_side_validation and order_date is None: # noqa: E501
raise ValueError("Invalid value for `order_date`, must not be `None`") # noqa: E501
self._order_date = order_date | [
"def set_order(self, order_key: str) -> None:\n if order_key not in self.orders:\n raise exceptions.CommandError(\n \"Unknown flow order: %s\" % order_key\n )\n order_key = self.orders[order_key]\n self.order_key = order_key\n newview = sortedcontainers.SortedListWithKey(key=order_key)\n newview.update(self._view)\n self._view = newview",
"def set_buy_date(self, buy_date: datetime) -> None:\n self.buy_date = buy_date",
"def set_sending_date(self, sending_date):\n self.sending_date = sending_date",
"def order_referrer(self, order_referrer):\n\n self._order_referrer = order_referrer",
"def SetCommentCreatedOn(self, _date):\n self.comment_created_on = _date",
"def invoice_date(self, invoice_date: datetime):\n\n self._invoice_date = invoice_date",
"def communication_date(self, communication_date):\n\n self._communication_date = communication_date",
"def build_date(self, build_date):\n if self.local_vars_configuration.client_side_validation and build_date is None: # noqa: E501\n raise ValueError(\"Invalid value for `build_date`, must not be `None`\") # noqa: E501\n\n self._build_date = build_date",
"def form_date(self, form_date):\n\n self._form_date = form_date",
"def queued_date(self, queued_date):\n\n self._queued_date = queued_date",
"def install_date(self, install_date: str):\n\n self._install_date = install_date",
"def shop_order(self, shop_order):\n\n self._shop_order = shop_order",
"def set_receiving_date(self, receiving_date):\n self.receiving_date = receiving_date",
"def set_line_date(self, line, date):\n self._set_line_date(line, date)",
"def update_order_admitting_date(case_number, order_admitting_date):\n\n\n probate = Probate.query.filter_by(case_number=case_number).first()\n probate.order_admitting_date = order_admitting_date\n db.session.commit()\n\n return probate",
"def vendor_order_id(self, vendor_order_id):\n\n self._vendor_order_id = vendor_order_id",
"def distribution_date(self, distribution_date):\n self._distribution_date = distribution_date",
"def survey_received_date(self, survey_received_date):\n\n self._survey_received_date = survey_received_date",
"def date_modified_billing(self, date_modified_billing):\n\n self._date_modified_billing = date_modified_billing"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the channel_customer_no of this ChannelOrderRequest. | def channel_customer_no(self, channel_customer_no):
if (self.local_vars_configuration.client_side_validation and
channel_customer_no is not None and len(channel_customer_no) > 50):
raise ValueError("Invalid value for `channel_customer_no`, length must be less than or equal to `50`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
channel_customer_no is not None and len(channel_customer_no) < 0):
raise ValueError("Invalid value for `channel_customer_no`, length must be greater than or equal to `0`") # noqa: E501
self._channel_customer_no = channel_customer_no | [
"def customer_order_number(self, customer_order_number):\n\n self._customer_order_number = customer_order_number",
"def channel_order_no(self, channel_order_no):\n if self.local_vars_configuration.client_side_validation and channel_order_no is None: # noqa: E501\n raise ValueError(\"Invalid value for `channel_order_no`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n channel_order_no is not None and len(channel_order_no) > 60):\n raise ValueError(\"Invalid value for `channel_order_no`, length must be less than or equal to `60`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n channel_order_no is not None and len(channel_order_no) < 0):\n raise ValueError(\"Invalid value for `channel_order_no`, length must be greater than or equal to `0`\") # noqa: E501\n\n self._channel_order_no = channel_order_no",
"def customer_code(self, customer_code: str):\n\n self._customer_code = customer_code",
"def next_customer_number(self, next_customer_number):\n\n self._next_customer_number = next_customer_number",
"def set_delivery_customer(self, value):\n (self.driver.find_element(*ProjectFormLoc.FIELD_DELIVERY_CUSTOMER).\n send_keys(value))",
"def setCNonce(self, cNonce):\n self[Header.PARAM_CNONCE] = cNonce",
"def team_customer_permission(self, team_customer_permission):\n\n self._team_customer_permission = team_customer_permission",
"def vendor_order_id(self, vendor_order_id):\n\n self._vendor_order_id = vendor_order_id",
"def customer_id(self):\n if \"customerId\" in self._prop_dict:\n return self._prop_dict[\"customerId\"]\n else:\n return None",
"def customer_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"customer_id\")",
"def customer_service_phone(self, customer_service_phone):\n\n self._customer_service_phone = customer_service_phone",
"def is_customer_notified(self, is_customer_notified):\n if is_customer_notified is None:\n raise ValueError(\"Invalid value for `is_customer_notified`, must not be `None`\")\n\n self._is_customer_notified = is_customer_notified",
"def set_number_served(self,customers):\n self.number_served = customers",
"def update_customer(body, customer_id): # noqa: E501\n if connexion.request.is_json:\n body = RequestCustomer.from_dict(connexion.request.get_json()) # noqa: E501\n return 'do some magic!'",
"def test_AddCustomer_returns_customer_ID(self):\n response = self.mock_request(\n customer_name=self.CUSTOMER_NAME,\n address_1=self.ADDRESS_1,\n country=self.COUNTRY,\n selling_channel_id=self.SELLING_CHANNEL_ID,\n )\n self.assertEqual(response, self.CUSTOMER_ID)",
"def setZChannel(self, channel: int):\n self.axes[self.Axis.kZ] = channel",
"def number(self, channel_number=(0, 0)):\n\n raise NotImplementedError",
"def customer_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"customer_id\")",
"def client_number(self, client_number):\n\n self._client_number = client_number"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Sets the extra_data of this ChannelOrderRequest. | def extra_data(self, extra_data):
self._extra_data = extra_data | [
"def extra_info(self, extra_info: Dict):\n\n self._extra_info = extra_info",
"def extra_fields(self, extra_fields):\n\n self._extra_fields = extra_fields",
"def __set_request_data(self, request_data):\n if request_data is None:\n self.__request_data = None\n else:\n self.__request_data = request_data.copy()",
"def set_extra_data(self, key, value):\n if not isinstance(key, basestring):\n raise TypeError(\"key can only be an instance of type basestring\")\n if not isinstance(value, basestring):\n raise TypeError(\"value can only be an instance of type basestring\")\n self._call(\"setExtraData\",\n in_p=[key, value])",
"def _add_request_data_to_log_dict(self, kwargs, request):\n kwargs['extra'] = kwargs.get('extra', {})\n\n if 'request_body' not in kwargs['extra']:\n kwargs['extra']['request_body'] = request.body\n\n if 'request_headers' not in kwargs['extra']:\n kwargs['extra']['request_headers'] = {\n key: val\n for (key, val) in request.META.items() if key.startswith('HTTP_')\n }",
"def setExtras(self, extras):\n pass",
"def parse_channel_extras(self, extras):\n\n precision = extras.get('precision')\n if precision is not None:\n try:\n self._precision_set = int(precision[0])\n self.prec_signal.emit(self._precision_set)\n except ValueError:\n logger.debug('Cannot convert precision value=%r', precision)\n unit = extras.get('unit')\n if unit is not None:\n self.unit_signal.emit(str(unit[0]))\n upper_limit = extras.get('upper_limit')\n if upper_limit is not None and (self._value_type == 'float' or self._value_type == 'int'):\n self.send_upper_limit(upper_limit[0])\n lower_limit = extras.get('lower_limit')\n if lower_limit is not None and (self._value_type == 'float' or self._value_type == 'int'):\n self.send_lower_limit(lower_limit[0])\n enum_string = extras.get('enum_string')\n if enum_string is not None:\n self.send_enum_string(enum_string[0])\n\n type_kwargs = {k: v for k, v in extras.items()\n if k in self._extra_numpy_config_keys}\n\n if type_kwargs:\n self.format_type_params(type_kwargs)\n\n unused = {k: v for k, v in extras.items()\n if k not in self._extra_config_keys\n and k not in self._required_config_keys\n and k not in self._extra_numpy_config_keys}\n\n if len(unused) == 0:\n return\n\n info = \"The following entries are not valid config keys:\"\n for items in unused:\n info = info + \" \" + items + \",\"\n\n logger.debug(info)",
"def custom_data(self, custom_data):\n if not isinstance(custom_data, CustomData):\n custom_data = CustomData(custom_data)\n self['custom_data'] = custom_data",
"def shop_order(self, shop_order):\n\n self._shop_order = shop_order",
"def update_order(self):\n if self.__mess.__class__.__name__ == 'TradeMessage':\n # add order into customer order and orderpool\n order = Order(self.__mess.get_order_id(), self.__mess.get_code(),\n self.__mess.get_quantity(), self.__mess.get_side(),\n self.__mess.get_account(), self.__mess.get_sum_qty(),\n self.__mess.get_aver_price(),\n self.__mess.get_left_qty(),\n self.__mess.get_price())\n return order\n return None",
"def ExtraMessage(self, msg):\n self._WriteToRecord(EXTRA, msg)",
"def apply_extra_data(model, key, value):\n model.extra_data[key] = value",
"def attach_data_to(self, obj, data):\n obj.attach_data(self.key, data)",
"def send_order(self, order):\n\n # Takes the additional action of adding an order that is about to be sent\n # to a dictionary that keeps track of objects using their reference.\n\n if order.ref is None:\n order.ref = self._increment_counter(order.market.item, \"n\")\n self._orders_waiting_ackn[order.market.item][order.ref] = order\n super().send_order(order)",
"def __add_market_order__(self, new_order):\n if not order_is_market(new_order['cmd']):\n return EID_EAT_INVALID_ORDER_TYPE, -1\n\n #\n sp = self.get_symbol_properties(new_order['symbol'])\n margin = new_order['open_price'] * self.__calculate_pip__(new_order['open_price']) * new_order['volume'] * sp['trade_contract_size'] / self.account['leverage']\n if margin > self.account['balance'] - self.account['margin']:\n return EID_EAT_MARGIN_CALL, -1\n new_order['uid'] = str(new_order['ticket'])\n new_order['comment'] = f\"uid#{new_order['uid']}|\"\n new_order['margin'] = margin\n commission = 0.0\n #commissions\n if self.commission > 0:\n # see: https://www.houseofborse.com/commission-calculation\n # all commission charged and debited on the opening of the trade\n commission = new_order['volume'] * self.commission * self.__calculate_pip__(new_order['open_price']) * 2\n if commission > self.account['balance'] - self.account['margin']:\n return EID_EAT_MARGIN_CALL, -1\n new_order['commission'] = commission\n #\n self.orders['counter'] = self.orders['counter'] + 1\n #\n symbol_orders = self.orders['opened'].get(new_order['symbol'], {})\n\n self.orders['data'][new_order['uid']] = new_order\n symbol_orders[new_order['uid']] = new_order\n self.orders['opened'][new_order['symbol']] = symbol_orders\n self.orders['opened_counter'] = self.orders['opened_counter'] + 1\n\n #\n ds = self.orders['opened'].get('__ds__', None)\n new_a = self.order_to_ndarray(new_order)\n if ds is not None and ds.size > 0:\n self.orders['opened']['__ds__'] = np.concatenate([ds, new_a])\n else:\n self.orders['opened']['__ds__'] = new_a\n #update account\n self.account['margin'] = self.account['margin'] + new_order['margin']\n self.account['commission'] = self.account['commission'] + new_order['commission']\n\n #report\n self.report['total_trades']['value'] += 1\n if order_is_long(new_order['cmd']):\n self.report['long_positions']['value'] += 1\n else:\n self.report['short_positions']['value'] += 1\n\n #\n return EID_OK, new_order['uid']",
"def opp_comm_kwargs(self):\n kwargs = {'commtype': self._commtype, 'use_async': self.is_async,\n 'allow_multiple_comms': self.allow_multiple_comms}\n kwargs['address'] = self.opp_address\n kwargs['serializer'] = self.serializer\n # TODO: Pass copies/partner_copies in kwargs?\n if self.direction == 'send':\n kwargs['direction'] = 'recv'\n else:\n kwargs['direction'] = 'send'\n kwargs.update(self.serializer.input_kwargs)\n return kwargs",
"def __add_pending_order__(self, new_order):\n if not order_is_limit(new_order['cmd']) and not order_is_stop(new_order['cmd']):\n return EID_EAT_INVALID_ORDER_TYPE, -1\n #\n self.orders['counter'] = self.orders['counter'] + 1\n symbol_orders = self.orders['pending'].get(new_order['symbol'], {})\n\n new_order['uid'] = new_order['ticket']\n self.orders['data'][new_order['uid']] = new_order\n symbol_orders[new_order['uid']] = new_order\n self.orders['pending'][new_order['symbol']] = symbol_orders\n self.orders['pending_counter'] = self.orders['pending_counter'] + 1\n #\n sp = self.get_symbol_properties(new_order['symbol'])\n tcs = sp['trade_contract_size']\n #\n ds = self.orders['pending'].get('__ds__', None)\n\n new_a = self.order_to_ndarray(new_order)\n if ds is not None and ds.size > 0:\n self.orders['pending']['__ds__'] = np.concatenate([ds, new_a])\n else:\n self.orders['pending']['__ds__'] = new_a\n\n #\n return EID_OK, new_order['uid']",
"def saveExtra(self):\n\n\t\ttry:\n\t\t\tif not self.hasExtra:\n\t\t\t\treturn\n\t\texcept AttributeError:\n\t\t\treturn\n\n\t\tfilename = os.path.join(self.outdir, \"kdz_extras.bin\")\n\n\t\textra = open(filename, \"wb\")\n\n\t\tprint(\"[+] Extracting extra data to \" + filename)\n\n\t\tself.infile.seek(self.headerEnd, os.SEEK_SET)\n\n\t\ttotal = self.dataStart - self.headerEnd\n\t\twhile total > 0:\n\t\t\tcount = 4096 if 4096 < total else total\n\n\t\t\tbuf = self.infile.read(count)\n\t\t\textra.write(buf)\n\n\t\t\ttotal -= count\n\n\t\textra.close()",
"def create_work_order_params(self, worker_id, workload_id,\n in_data, worker_encrypt_key,\n session_key, session_iv,\n enc_data_enc_key):\n pass",
"def _add_extra_http_data(environ, args):\n extra = {}\n extra.update(args)\n if 'REMOTE_USER' in environ:\n extra['userName'] = environ['REMOTE_USER']\n if 'REMOTE_ADDR' in environ:\n extra['clientIp'] = environ['REMOTE_ADDR']\n if 'SERVER_ADDR' in environ:\n extra['serverIp'] = environ['SERVER_ADDR']\n if environ.get('REQUEST_METHOD') == 'POST':\n m = re.match(r\"/([^/]+)/(git-upload-pack|git-receive-pack)\",\n environ['PATH_INFO'])\n if m:\n extra['repo'] = m.group(1)\n extra['command'] = m.group(2)\n else:\n m = re.match(\"service=(.*)\", environ.get('QUERY_STRING', ''))\n if m:\n extra['command'] = m.group(1)\n m = re.match(r\"/([^/]+)/(info/refs|HEAD)\", environ.get('PATH_INFO', ''))\n if m:\n extra['repo'] = m.group(1)\n return extra"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
when we change the branch, effects on help desk, talent acquisition, GR, HR, etc. | def onchange_branch_id(self):
self.helpdesk_ids = False
self.talent_acquisition_ids = False
self.gr_ids = False
self.hr_ids = False
self.finance_ids = False
self.admin_ids = False
self.payroll_ids = False
self.driver_ids = False
self.hop_ids = False | [
"def action_update(self):\n if self.related_type == 'github':\n local_branches = [br.name for br in self.branch_ids]\n remote_branches = []\n for b in self._get_repo()[0].get_branches():\n remote_branches.append(b.name)\n if b.name not in local_branches:\n print b.name\n br_res = self.env['vcs.branch'].create({\n 'name': b.name,\n 'repository_id': self.id\n })\n self.branch_ids = [(4, br_res.id)]\n for br in self.branch_ids:\n if br.name not in remote_branches:\n br.unlink()\n else:\n br.action_update()\n elif self.related_type == 'bitbucket':\n local_branches = [br.name for br in self.branch_ids]\n remote_branches = []\n for b in bb_branch.find_branches_in_repository(\n self.name.lower(),\n owner=self.owner,\n client=self.user_id._get_user()\n ):\n remote_branches.append(b.name)\n if b.name not in local_branches:\n br_res = self.env['vcs.branch'].create({\n 'name': b.name,\n 'repository_id': self.id\n })\n self.branch_ids = [(4, br_res.id)]\n for br in self.branch_ids:\n if br.name not in remote_branches:\n br.unlink()\n else:\n br.action_update()",
"def stable():\r\n branch('stable')",
"def update(self, branch):\n raise NotImplementedError(\"Abstract method\")",
"def test_branch(self):\n renamed_branch = \"ihavebeenrenamed\"\n self.assertTrue(self.run_function(\"git.branch\", [self.repo, self.branches[1]]))\n self.assertTrue(\n self.run_function(\n \"git.branch\", [self.repo, renamed_branch], opts=\"-m \" + self.branches[1]\n )\n )\n self.assertTrue(\n self.run_function(\"git.branch\", [self.repo, renamed_branch], opts=\"-D\")\n )",
"def check_git_branch():\n server = get_odoo_server_url()\n if server:\n urllib3.disable_warnings()\n http = urllib3.PoolManager(cert_reqs='CERT_NONE')\n try:\n response = http.request(\n 'POST',\n server + \"/web/webclient/version_info\",\n body = '{}',\n headers = {'Content-type': 'application/json'}\n )\n\n if response.status == 200:\n git = ['git', '--work-tree=/home/pi/odoo/', '--git-dir=/home/pi/odoo/.git']\n\n db_branch = json.loads(response.data)['result']['server_serie'].replace('~', '-')\n if not subprocess.check_output(git + ['ls-remote', 'origin', db_branch]):\n db_branch = 'master'\n\n local_branch = subprocess.check_output(git + ['symbolic-ref', '-q', '--short', 'HEAD']).decode('utf-8').rstrip()\n\n if db_branch != local_branch:\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,rw\", \"/\"])\n subprocess.check_call([\"rm\", \"-rf\", \"/home/pi/odoo/addons/hw_drivers/iot_handlers/drivers/*\"])\n subprocess.check_call([\"rm\", \"-rf\", \"/home/pi/odoo/addons/hw_drivers/iot_handlers/interfaces/*\"])\n subprocess.check_call(git + ['branch', '-m', db_branch])\n subprocess.check_call(git + ['remote', 'set-branches', 'origin', db_branch])\n os.system('/home/pi/odoo/addons/point_of_sale/tools/posbox/configuration/posbox_update.sh')\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,ro\", \"/\"])\n subprocess.call([\"sudo\", \"mount\", \"-o\", \"remount,rw\", \"/root_bypass_ramdisks/etc/cups\"])\n\n except Exception as e:\n _logger.error('Could not reach configured server')\n _logger.error('A error encountered : %s ' % e)",
"def on_outcome_changed(self, old, new):",
"def after_state_change(self, source, target):",
"def test_rename_branch():",
"def point_here(branches):\n if not branches:\n print(\"No branches passed.\")\n return\n current = get_output(\"git rev-parse HEAD\")\n for branch in branches:\n run([\"git\", \"update-ref\", f\"refs/heads/{branch}\", current])\n print(branch, \"set to\", current)",
"def create_new_branch(self, newbranch):\n # self.update(self.branch)\n try:\n self.hg_branch(newbranch)\n return 'succes'\n except Exception as e:\n print(e)\n return 'failure'",
"def checkout(self, branch):\n pass",
"def set_branch(component=\"neutron\", branch=\"master\"):\n ip = get_lab_vm_ip()\n with settings(host_string=ip, abort_on_prompts=True, warn_only=True):\n stack_file = '~/devstack/stack-screenrc'\n run(\"screen -S stack -X quit\")\n path = os.path.join(\"/opt\", \"stack\", component)\n with cd(path):\n run(\"git fetch --all; git checkout {br}\".format(br=branch))\n run(\"screen -c {0} -d -m && sleep 1\".format(stack_file))",
"def test_repo_edit_branch_protection(self):\n pass",
"def process_changes(self, change, scale, lines):\n if change and change.has_data():\n lines.append('#<{}>'.format(self.level_name))\n lines.append('')\n change.process(self.level_package, scale, lines)\n lines.append('#</{}>'.format(self.level_name))\n lines.append('')",
"def knobChanged(self, knob):\n \n if knob == self.cdlcorrectionid or knob == self.extrefsearchpath or knob == self.extrefpath: \n self.updateDesc()",
"def before_state_change(self, source, target):",
"def switch_branch(self, dest, branch):\n raise Exception('not implemented')",
"def highlight_branch(self, key, branch, fcolor, bcolor):\n pass",
"def changes(argv):\r\n\t\tcallBuilder()\r\n\t\tshow.info(\"The overview file is in %(TARGETDIR)s.\", OPTIONS)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Reads a .edf file. | def read_edf(path: str, immutable=True) -> Tuple[ Dict[str, Any], Dict[str, Sequence[float]] ] :
# def read_edf(path: str):
data_file = open(path, 'rb')
header = read_header(data_file)
signals = read_signals(data_file, header, immutable=immutable)
data_file.close()
for label in header['labels']:
# compute sampling frequency for every channel
header['signal_infos'][label]['frequency'] = header['signal_infos'][label]['num_samples_in_record'] / header['record_duration']
# compute length for each channel
header['signal_infos'][label]['num_samples'] = len(signals[label])
return header, signals | [
"def test_read_from_edf(self):\n self._compare_reader_with_expected_attrs(self.reader_from_edf,\n self.expected_start_time, self.expected_sample_freqs,\n self.expected_units, self.expected_ECG_head,\n self.expected_ACC_head, self.expected_Marker_head,\n self.expected_HRV_head,\n expected_edf_metadata=self.expected_edf_metadata)",
"def read_eeg(filename, path=\"\", eog=('HEOG', 'VEOG'), misc=\"auto\", reference=None, montage=\"easycap-M1\", preload=True, verbose=\"CRITICAL\"):\n file = path + filename\n\n # Find correct file\n extension = filename.split(\".\")\n if len(extension) == 1:\n extension = None\n else:\n extension = \".\" + extension[-1]\n\n if extension in [\".vhdr\", \".raw\", \".set\", \".fif\", \".edf\"]:\n file = file.split(\".\")[0]\n else:\n if extension is None:\n extension = \".vhdr\"\n if os.path.exists(file + extension) is False:\n extension = \".raw\"\n if os.path.exists(file + extension) is False:\n extension = \".set\"\n if os.path.exists(file + extension) is False:\n extension = \".fif\"\n if os.path.exists(file + extension) is False:\n extension = \".edf\"\n if os.path.exists(file + extension) is False:\n print(\"NeuroKit Error: read_eeg(): couldn't find compatible format of data.\")\n return()\n\n # Load the data\n try:\n if extension == \".vhdr\":\n raw = mne.io.read_raw_brainvision(file + extension, eog=eog, misc=misc, montage=montage, preload=preload, verbose=verbose)\n elif extension == \".raw\":\n raw = mne.io.read_raw_egi(file + extension, eog=eog, misc=misc, montage=montage, preload=preload, verbose=verbose)\n elif extension == \".set\":\n raw = mne.io.read_raw_eeglab(file + extension, eog=eog, misc=misc, montage=montage, preload=preload, verbose=verbose)\n elif extension == \".fif\":\n raw = mne.io.read_raw_fif(file + extension, preload=preload, verbose=verbose)\n elif extension == \".edf\":\n raw = mne.io.read_raw_edf(file + extension, preload=preload, verbose=verbose)\n else:\n print(\"NeuroKit Error: read_eeg(): couldn't find compatible reader of data. Try to do it manually using mne.\")\n\n # Re-reference if needed and if not MEG data\n if True not in [\"MEG\" in chan for chan in raw.info[\"ch_names\"]]:\n if reference is None:\n raw.set_eeg_reference()\n else:\n raw.set_eeg_reference(reference)\n\n except KeyError:\n print(\"NeuroKit Error: read_eeg(): something went wrong. This might be because you have channel names that are missing from the montage definition. Try do read data manually using mne.\")\n except FileNotFoundError:\n print(\"NeuroKit Error: read_eeg(): something went wrong, check the file names that are inside your info files (.vhdr, .vmrk, ...)\")\n except:\n print(\"NeuroKit Error: read_eeg(): error in data loading. Try to do it manually using mne.\")\n\n\n return(raw)",
"def readin(self):\n \n if self.filename.endswith('.fits'):\n # Assumes Science Verification data\n self.read_SV_fits()\n elif self.filename.endswith('.npz'): \n # Assumes DES Y3 Gold data\n self.read_Y3_2_2_npz()\n else: \n print('Unrecognized file type: ' + self.filename)",
"def read_dfg(file_path):\n from pm4py.objects.dfg.importer import importer as dfg_importer\n dfg, start_activities, end_activities = dfg_importer.apply(file_path)\n return dfg, start_activities, end_activities",
"def readFromFile(fileName):\n\n with open(fileName) as file:\n Q = FiniteAutomata.parseLine(file.readline())\n E = FiniteAutomata.parseLine(file.readline())\n q0 = FiniteAutomata.parseLine(file.readline())\n F = FiniteAutomata.parseLine(file.readline())\n D = FiniteAutomata.readD(FiniteAutomata.parseLine(''.join([line for line in file])))\n return FiniteAutomata(Q, E, D, q0, F)",
"def read_efield(filename):\r\n\tE_xyz = []\r\n\tis_none_efield = []\r\n\twith open(filename,'r') as f:\r\n\t\tline = f.readline()\r\n\t\tiline = 0\r\n\t\twhile line:\r\n\t\t\tdata = line.split()\r\n\t\t\tif len(data) == 0: # E_xyz is not shown in the file \r\n\t\t\t\tis_none_efield.append(iline)\r\n\t\t\t\tline = f.readline()\r\n\t\t\t\tiline += 1\r\n\t\t\telse:\r\n\t\t\t\tE_xyz.append(data)\r\n\t\t\t\tline = f.readline()\r\n\t\t\t\tiline += 1\r\n\r\n\tE_xyz = np.array(E_xyz,dtype=np.float32)\r\n\tis_none_efield = np.array(is_none_efield,dtype=np.int)\r\n\treturn E_xyz, is_none_efield",
"def read_elf(f):\n return ElfFile.load(f)",
"def read_data(folder, filename, segments = []):\n # import pdb; pdb.set_trace()\n types_read = ['abf']\n \n # checks if this filetype can be read\n file_type = fold.check_type(filename) \n if file_type not in types_read:\n raise Exception('Unable to read '+ file_type + ' files')\n \n # checks if given file exists\n full_path = os.path.join(folder, filename)\n fold.file_exists(full_path)\n \n if file_type == 'abf':\n data_list, no_segments = read_abfdata(full_path)\n\n # check if there is enough segments in the data\n if len(segments) > 0:\n if max(segments) > no_segments:\n raise Exception(\"Only \" + str(no_segments) + \" segments found\")\n \n data, scale, fs = get_electrdata(data_list, no_segments, segments) \n return data, scale, fs",
"def read_daisy_weekday_file(cls, path):\n\n return TrafficAggregate(data=pd.read_csv(path, sep='\\t', index_col=None), aggregate_type='daisy.weekday')",
"def _read_file(self):\n filetype = self._file[self._file.rfind('.'):]\n if filetype == '.dft':\n return GalileoReader(self._file).create_faulttree()\n raise UnsupportedFileTypeException('{}'.format(filetype))",
"def read_aev(fname):\n\n try:\n f = open(fname, \"r\")\n except IOError:\n print(\"Could not open file:\" + fname)\n sys.exit()\n with f:\n aevd = f.readlines()\n\n n_line = len(aevd)\n npt = int(aevd[0])\n n_atom = int(aevd[1])\n dout = int(aevd[2])\n\n aev = [ [ [0]*dout for a in range(n_atom)] for p in range(npt)]\n line = 3\n for p in range(npt):\n \tfor a in range(n_atom):\n \t\tfor i in range(dout):\n \t\t\taev[p][a][i]=float(aevd[line])\n \t\t\tline += 1\n return npt, n_atom, dout, aev",
"def dem_file_read(self, file_path):\n with open(file_path, 'rb') as handle:\n dem_array_data = pickle.load(handle)\n handle.close()\n return dem_array_data",
"def convert_edf_to_ascii(dir_path):\n file_paths = []\n for edf_fp in iglob(dir_path+'*.edf'):\n fn = os.path.splitext(os.path.basename(edf_fp))[0]\n data_fp = os.path.join(os.path.dirname(edf_fp), fn+'_data.txt')\n file_paths.append((fn, data_fp))\n # checks to ensure that edf2ascii has not already converted this file during a previous run\n if not os.path.isfile(data_fp):\n logging.info(\"Converting {} to ascii\".format(edf_fp))\n subprocess.call([os.path.join(SOURCE_FILE_DIRECTORY, 'edf2ascii'), edf_fp])\n return file_paths",
"def __read_file(self):\r\n \r\n try:\r\n \r\n return gpd.read_file(self.path,encoding='utf-8')\r\n \r\n \r\n except FileNotFoundError as err:\r\n \r\n print(\"File could not be found,ensure you enter a valid geojson file\")\r\n \r\n raise err",
"def read_input(day, test=False):\n if test:\n return _read_file(\"inputs/test_input{}.txt\".format(day))\n return _read_file(\"inputs/input{}.txt\".format(day))",
"def read_eog_training_data():\n return _read_training_data(EOG_TRAINING_DATA_DIR)",
"def readGSD(filename,frame):\n if not foundGSD:\n raise RuntimeError('GSD module not found')\n # read trajectory from gsd file\n gsd_file = gsd.fl.GSDFile(filename,'rb')\n gsd_traj = gsd.hoomd.HOOMDTrajectory(gsd_file)\n gsd_frame = gsd_traj[frame]\n # read values from file\n box = gsd_frame.configuration.box[:3]\n xyz = gsd_frame.particles.position[:,:3]\n return xyz, box",
"def read_from_qe_dos_txt(self):\n raise Exception(\"No function defined to read this quantity \"\n \"from a qe.dos.txt file\")",
"def __readfile(self):\n raise NotImplementedError"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Scales the signal from digital (arbitrary) to physical (uV) units. | def scale(physical_max: float, digital_max: float, signal: Sequence[float]):
# note: this function will increase the computational complexity of Reader
signal *= physical_max / digital_max
return signal | [
"def scale(self, factor: 'float') -> \"void\":\n return _coin.SbViewVolume_scale(self, factor)",
"def scale(self, factor: 'double') -> \"void\":\n return _coin.SbDPViewVolume_scale(self, factor)",
"def scale(self, value):\n\t\tfor val in self.dilutions:\n\t\t\tself.dilutions[val] /= value",
"def scaleFlux(self, scale):\n self._flux *= scale",
"def scale_vol(self, scale_data: dict = None, debug: bool = False) -> Moon:\n scale_data = self.default_scale_data if scale_data == None else utilz.merge_attributes(self.default_scale_data, scale_data)\n print(f\"INFO: {self.englishName} raw values [volume -> {self.volumeRawKG}]\") if debug else None\n self.scaleVolExp = scale_data['planet']['scale_vol']\n self.volExponent = self.volExponent - (self.scaleVolExp)\n self.volumeRawKG = float( f\"{float(self.volValue*(10**self.scaleVolExp)):f}\" )\n print(f\"INFO: {self.englishName} scaled with [values/(10**{self.scaleVolExp})] [volume -> {self.volumeRawKG}]\") if debug else None \n return self",
"def scale_raw_data(self, data):\n data -= self.scalar_mean\n data /= self.scalar_std\n return data",
"def scaleMotor(self, val):\n return min(val + 0.5, 1.0)",
"def set_scale(self, scale):\n self.scale = numpy.float32(scale)\n for i in range(len(self.ptype_ff)):\n self.ptype_ff[i].set_scale(scale)\n self.bases_valid = False",
"def scale_unit(unit,scalar):\n # check that units are compatible between unit/unit2\n unit = _2list(unit)\n\n # subtract using unit conversion\n return _2str([float(scalar)*unit[0],unit[1]])",
"def setScale(self, *args) -> \"void\":\n return _coin.SbDPMatrix_setScale(self, *args)",
"def velocity_rescale():\n system.vel = v_res(system.vel, system.T, const.KB, system.mass)",
"def setScale(self, *args) -> \"void\":\n return _coin.SbMatrix_setScale(self, *args)",
"def flatscale(value, min_val, max_val):\n result = scale(value, min_val, max_val)\n if result < 0:\n result = 0\n elif result > 1:\n result = 1\n return result",
"def scale_from_period(self):\n return self.wavelet.scale_from_period",
"def scale(self):\n return self._moyal_bijector.scale",
"def applyScale(self, scale):\n pass",
"def scale_field(self, scale_factor):\n self.field *= scale_factor",
"def scale(img, vmax, vmin):\n # img = img.copy()\n max_ = img.max() \n min_ = img.min() \n if max_ != 0:\n img[img > 0] *= (vmax / max_)\n if min_ != 0: \n img[img < 0] *= (vmin / min_)\n return img",
"def _do_set_scale(self, string):\n def usage():\n self.error(\"Incorrect usage\", \"see 'help set scale'\")\n\n parts = string.split(' ')\n scale = 1e6 / float(parts.pop(0))\n units = parts.pop(0)\n if type(units) is not int:\n if units not in all_units:\n return self.error(\"Unsupported units\", \"See 'help units'\")\n units = all_units[units]\n\n if len(parts):\n return usage()\n\n self.motor.scale = int(scale), units"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Updates this packet's 'current_position' from its 'pod_speed' | def update_postion_pod(self):
self.current_position += self.pod_speed | [
"def update_pos(self) -> None:\n self.pos = (self.pos[0] + self.touch.dpos[0], self.pos[1] + self.touch.dpos[1])",
"def update_pos(self):\n\t\tself.pos += self.vel",
"def _update_velocity_position(self):\n # Define the hyperparameters from kwargs dictionary\n c1, c2, m = self.kwargs['c1'], self.kwargs['c2'], self.kwargs['m']\n\n # Compute for cognitive and social terms\n cognitive = (c1 * np.random.uniform(0,1,self.swarm_size)\n * (self.pbest_pos - self.pos))\n social = (c2 * np.random.uniform(0,1,self.swarm_size)\n * (self.lbest_pos - self.pos))\n self.velocity = (m * self.velocity) + cognitive + social\n\n # Update position and store it in a temporary variable\n temp = self.pos.copy()\n temp += self.velocity\n\n if self.bounds is not None:\n # Create a mask depending on the set boundaries\n b = (np.all(self.min_bounds <= temp, axis=1)\n * np.all(temp <= self.max_bounds, axis=1))\n # Broadcast the mask\n b = np.repeat(b[:,np.newaxis], self.dims, axis=1)\n # Use the mask to finally guide position update\n temp = np.where(~b, self.pos, temp)\n self.pos = temp",
"def update_pos(self):\n self.last_x = self.x\n self.last_y = self.y\n self.x += self.direction[0] * BLOCK_SIZE\n self.y += self.direction[1] * BLOCK_SIZE",
"def update_position(self):\n\t\tself.heading += self.turn_rate\n\t\tself.position[0] += -sin(self.heading) * self.speed\n\t\tself.position[1] += cos(self.heading) * self.speed",
"def update(self):\r\n self.updateVelocities()\r\n self.updatePositions()",
"def move(self):\n # We first limit the velocity to not get bubbles that go faster than what we can enjoy.\n if self.velocity.length() > self.MAX_VELOCITY:\n self.velocity.scale_to_length(self.MAX_VELOCITY)\n\n self.position += self.velocity\n debug.vector(self.velocity, self.position, scale=10)",
"def update_pos(self, id):\n\n new_x = self.players[id][\"x\"] + self.players[id][\"vx\"] * (server_clock - last_update) * self.players[id][\n \"speed\"]\n new_y = self.players[id][\"y\"] + self.players[id][\"vy\"] * (server_clock - last_update) * self.players[id][\n \"speed\"]\n if (0 < new_y < map_height) and (0 < new_x < map_width):\n if map[int(new_y)][int(new_x)] == True:\n new_y, new_x = inner_slide(self.players[id][\"y\"], self.players[id][\"x\"], new_y, new_x)\n if sqrt((self.players[id][\"x\"]-new_x)**2 + (self.players[id][\"y\"]-new_y)**2) > self.prevent_TP_distance :\n new_x, new_y= self.players[id][\"x\"], self.players[id][\"y\"]\n else:\n new_x = max(min(new_x, map_width - 1), 0)\n new_y = max(min(new_y, map_height - 1), 0)\n self.players[id][\"x\"] = new_x\n self.players[id][\"y\"] = new_y",
"def update(self, **kwargs):\n self.apply_velocity()",
"def ship_acceleration(self):\r\n new_x_speed = self.__speed[X_AXIS] + cos(radians(self.__direction))\r\n new_y_speed = self.__speed[Y_AXIS] + sin(radians(self.__direction))\r\n self.__speed = (new_x_speed, new_y_speed)",
"def set_speed_set_point(self, vacuum_pump, speed):\n self.logger.info(\"Setting speed for vacuum pump {0} to {1}%...\".format(vacuum_pump, speed))\n vacuum_obj = self.vacuum[vacuum_pump]\n vacuum_obj.speed_sp = speed",
"def position_timed(self):\r\n actual_time = time.time()\r\n self.position[0] = self.speed[0] * self.time_speed + self.position[0]\r\n self.position[1] = self.speed[1] * self.time_speed + self.position[1]\r\n self.last_time_position = actual_time\r\n return self.position",
"def _set_new_velocity(self, next_location):\n\n current_time = GameTime.get_time()\n target_speed = self._target_speed\n\n if not self._last_update:\n self._last_update = current_time\n\n if self._consider_obstacles:\n # If distance is less than the proximity threshold, adapt velocity\n if self._obstacle_distance < self._proximity_threshold:\n distance = max(self._obstacle_distance, 0)\n if distance > 0:\n current_speed = math.sqrt(self._actor.get_velocity().x**2 + self._actor.get_velocity().y**2)\n current_speed_other = math.sqrt(\n self._obstacle_actor.get_velocity().x**2 + self._obstacle_actor.get_velocity().y**2)\n if current_speed_other < current_speed:\n acceleration = -0.5 * (current_speed - current_speed_other)**2 / distance\n target_speed = max(acceleration * (current_time - self._last_update) + current_speed, 0)\n else:\n target_speed = 0\n\n # set new linear velocity\n velocity = carla.Vector3D(0, 0, 0)\n direction = next_location - CarlaDataProvider.get_location(self._actor)\n direction_norm = math.sqrt(direction.x**2 + direction.y**2)\n velocity.x = direction.x / direction_norm * target_speed\n velocity.y = direction.y / direction_norm * target_speed\n\n self._actor.set_target_velocity(velocity)\n\n # set new angular velocity\n current_yaw = CarlaDataProvider.get_transform(self._actor).rotation.yaw\n # When we have a waypoint list, use the direction between the waypoints to calculate the heading (change)\n # otherwise use the waypoint heading directly\n if self._waypoints:\n delta_yaw = math.degrees(math.atan2(direction.y, direction.x)) - current_yaw\n else:\n new_yaw = CarlaDataProvider.get_map().get_waypoint(next_location).transform.rotation.yaw\n delta_yaw = new_yaw - current_yaw\n\n if math.fabs(delta_yaw) > 360:\n delta_yaw = delta_yaw % 360\n\n if delta_yaw > 180:\n delta_yaw = delta_yaw - 360\n elif delta_yaw < -180:\n delta_yaw = delta_yaw + 360\n\n angular_velocity = carla.Vector3D(0, 0, 0)\n if target_speed == 0:\n angular_velocity.z = 0\n else:\n angular_velocity.z = delta_yaw / (direction_norm / target_speed)\n self._actor.set_target_angular_velocity(angular_velocity)\n\n self._last_update = current_time\n\n return direction_norm",
"def update_position(self):\n \t\t\n self.x += math.sin(self.angle) * self.speed\n self.y -= math.cos(self.angle) * self.speed",
"def move_base(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10, use_offset=True):\n cmd_vel_value = Twist()\n cmd_vel_value.linear.x = linear_speed\n cmd_vel_value.angular.z = angular_speed\n rospy.logdebug(\"DuckBot Base Twist Cmd>>\" + str(cmd_vel_value))\n self._check_publishers_connection()\n self._cmd_vel_pub.publish(cmd_vel_value)",
"def move_particle(self, p):\n\t\t_x = round(p.pos.x + p.speed.x, 6)\n\t\tp.pos.x = _x\n\t\t_y = round(p.pos.y + p.speed.y, 6)\n\t\tp.pos.y = _y\n\t\tp.pos.z = round(p.pos.z + p.speed.z, 6)\n\t\tp.traveled = round(p.traveled + vp.sqrt(_x**2 + _y**2), 6)",
"def _updateVelocity(self):\n\t\t# Find difference between two vectors\n\t\tdifferenceVector = [0, 0]\n\t\tdifferenceVector[0] = self.targetVelocity[0] - self.currentVelocity[0]\n\t\tdifferenceVector[1] = self.targetVelocity[1] - self.currentVelocity[1]\n\n\t\t# Exit if there's nothing to update to avoid extra calculations\n\t\tif(differenceVector[0] == 0 and differenceVector[1] == 0):\n\t\t\treturn\n\n\t\t# Find the hypotenuse of the difference vector\n\t\tdifferenceMagnitude = math.sqrt((differenceVector[0] ** 2) + (differenceVector[1] ** 2))\n\n\t\t# If hypotenuse <= maxAcceleration, set currentVelocity = targetVelocity\n\t\tif(differenceMagnitude <= self.maxAcceleration):\n\t\t\tself.currentVelocity[0] = self.targetVelocity[0]\n\t\t\tself.currentVelocity[1] = self.targetVelocity[1]\n\t\t\treturn\n\n\t\t# Else, divide the distance vector by the hypotenuse (to make unit vector), multiply by maxAcceleration, and add to currentVelocity\n\t\tdifferenceVector[0] = self.maxAcceleration * (differenceVector[0] / differenceMagnitude)\n\t\tdifferenceVector[1] = self.maxAcceleration * (differenceVector[1] / differenceMagnitude)\n\n\t\tself.currentVelocity[0] += differenceVector[0]\n\t\tself.currentVelocity[1] += differenceVector[1]\n\n\t\treturn",
"def move(self):\n old_position = self.__position\n self.__position = tuple(map(sum, zip(self.__position, self.__velocity)))\n print(f\"Asteroid {self.__id} Moved! Old Pos: {old_position} -> New Pos: {self.__position}\")\n return self.__position",
"def update(self, dt):\n super().update(dt)\n self.velocity.norm = min(self.velocity.norm, self.max_speed)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a code string(int string) into a fixedwith binary string, padding with 0s. | def pad_bin_code(code_str, code_width):
return (code_width - len(bin(code_str)) + 2) * "0" + bin(code_str)[2 : ] | [
"def encode_string(S):\n if S != '':\n S = '{0:b}'.format(int(hexlify(S), 16))\n while (len(S) % 8) != 0:\n S = '0' + S\n if (len(S) >= 0) and (len(S) < 2040):\n U = left_encode(len(S)) + S\n return U\n else:\n print ('Invalid bit string (encode_string)')",
"def bytepad(X, w):\n if w > 0:\n z = left_encode(w) + X\n while (len(z) % 8) != 0:\n z += '0'\n while ((len(z) / 8) % w) != 0:\n z += '00000000'\n return z\n else:\n print ('Invalid integer (bytepad)')",
"def toBinary(anInt, desiredLength=None):\n\th2b = getHexToBin()\n\tres = \"\".join(h2b[c] for c in \"%x\"%anInt).lstrip(\"0\")\n\tif desiredLength is not None:\n\t\tres = \"0\"*(desiredLength-len(res))+res\n\treturn res",
"def string_to_binary(str):\n return bin(int(binascii.hexlify(str.encode()), 16))[2:]",
"def convert_binary_data(tree, input_string):\n\n\tbinary_string = '' #string of binary characters to be written to compressed file\n\tfor char in input_string: \n\t\tbinary_string += tree[char] #for each character append corresponding huffman code to binary_string\n\n\tbinary_tree = encoded_huffman_tree(tree) #generate the encoded huffman tree (in binary)\n\tbinary_string = binary_tree\t+ binary_string #add this infront of the data so that it can be regerated\n\n\tno_padding_bits_dec = (8-((len(binary_string)+3)%8))%8 #data stored in bytes so add calculate number of padding bits needed\n\tno_padding_bits_bin = \"{:03b}\".format(no_padding_bits_dec) #max number of padding bits can be 7 so store this in 3 bits \n\n\tbinary_string = no_padding_bits_bin + binary_string + (no_padding_bits_dec*'0') # add the number of padding bits, data, padding bits\n\n\tbinary_string = BitArray(bin=binary_string) #turn into byte array that can be written to .bin file\n\n\treturn binary_string",
"def binary_strings(max_length=10):\n yield ''\n for size in range(1, 1+max_length):\n for i in range(2**size):\n yield '{:b}'.format(i).rjust(size, '0')",
"def pad_text(self, text):\n extra = 8 - len(text)%8\n for i in range(extra):\n text += \"0\"\n info = \"{0:08b}\".format(extra)\n text = info + text\n return text",
"def ascii2binary(s):\n #return bin(int.from_bytes(s.encode(), 'big'))[2:] # Doesn't account for padding\n b, buff = \"\", \"\"\n for c in s:\n buff = bin(ord(c))[2:]\n while len(buff) % 8 != 0:\n buff = \"0\" + buff\n b += buff\n return b",
"def left_encode(x):\n if (x >= 0) and (x < (1 << 2040)):\n x_bin = '{0:b}'.format(x)\n On = x_bin\n while (len(On) % 8) != 0:\n On = '0' + On\n n = len(On) // 8\n n_bin = '{0:b}'.format(n)\n O0 = n_bin\n while (len(O0) % 8) != 0:\n O0 = '0' + O0\n O = O0 + On\n return O\n else:\n print ('Invalid bit string (left_encode)')",
"def _mk_bits(self,data):\n if isinstance(data, bytes):\n return data[data.index(b\"\\xfc\") :]\n # handles int and unquoted hex\n if isinstance(data, int):\n length = data.bit_length() >> 3\n bites = int.to_bytes(data, length, byteorder=\"big\")\n return bites\n try:\n # Handles hex byte strings\n i = int(data, 16)\n i_len = i.bit_length() >> 3\n bites = int.to_bytes(i, i_len, byteorder=\"big\")\n return bites\n except (LookupError, TypeError, ValueError):\n if data[:2].lower() == \"0x\":\n data = data[2:]\n if data[:2].lower() == \"fc\":\n return bytes.fromhex(data)\n try:\n return b64decode(self.fix_bad_b64(data))\n except (LookupError, TypeError, ValueError):\n return data",
"def pibble32(data: bytes) -> str:\n table: bytes = bytes.maketrans(\n b\"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567\",\n b\"0123456789bcdfghjklmnopqrstvwxyz\",\n )\n encoded: bytes = base64.b32encode(data)\n return str(encoded.translate(table), \"ascii\")",
"def __pcone_to_be(self, pcone_code: list[str]) -> str:\n becode = \"\"\n csum = 0\n x = pcone_code.index(\"1\")\n for item in pcone_code[x + 1:] + pcone_code[: x + 1]:\n if item == \"0\":\n csum += 1\n else:\n becode += str(csum + 1)\n csum = 0\n return becode",
"def reconstruct_binary_into(value, into):\n into.append(TYPE_NAME_BINARY)\n into.append('(')\n into.append(MODIFIER_LENGTH)\n into.append('=')\n into.append(str(len(value)))\n into.append(')')",
"def makeCode(root,string,dic = {}):\r\n #Base case\r\n # If the left and the right of the root are none\r\n # Then it is a leaf node so we just print its value\r\n if root.left == None and root.right == None:\r\n # Make the string its Huffman Code for future use\r\n dic[root.data] = string\r\n return dic\r\n\r\n # if we go to left then add \"0\" to the code.\r\n # if we go to the right add \"1\" to the code.\r\n \r\n makeCode(root.left, string+\"0\",dic)\r\n makeCode(root.right, string+\"1\",dic)",
"def char_to_bitstring(char):\n return bin(ord(char))[2:].rjust(8,\"0\")",
"def CREATE_CODE(digits,data,coord):\n\t\n\tcode = ''\n\tfor function in digits:\n\t\tdigit = function(data,coord)\n\t\tcode += str(hex(digit)[-1])\n\t\n\treturn code",
"def int2bitstring(x, n):\n x += 2**n # support two's complement\n s = bin(x)[2:] # remove '0b' at the beginning\n s = s.rjust(n, '0')[-n:] # make string of length n\n return ''.join(s)",
"def dec_to_bin(self,num):\r\n BinStr = ''\r\n if num == 0: return '0'*8\r\n while num > 0:\r\n BinStr = str(num % 2) + BinStr\r\n num = num >> 1 # right-shift the num by 1 bit\r\n BinStr = BinStr.zfill(8) # make BinStr an 8-bit string\r\n return BinStr",
"def decode(code):\n\n decoded_word = \"\"\n\n for i in xrange(len(code)):\n\n if code[i].isdigit():\n num = int(code[i])\n decoded_word += code[i + num + 1]\n\n return decoded_word"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the hashcodes of all the sentences in the corpus from a given path. | def load_corpus_hashcode(codes_path):
src_f = open(codes_path, "r")
corpus_hashcodes = []
for ln in src_f:
corpus_hashcodes.append(int(ln.strip()))
return corpus_hashcodes | [
"def load_sentences(path, lower, zeros):\n sentences = []\n sentence = []\n num = 0\n for line in codecs.open(path, 'r', 'utf8'):\n num+=1\n line = zero_digits(line.rstrip()) if zeros else line.rstrip()\n # print(list(line))\n if not line:\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n sentence = []\n else:\n if line[0] == \" \":\n line = \"$\" + line[1:]\n word = line.split()\n # word[0] = \" \"\n else:\n word= line.split( )\n assert len(word) == 2\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n return sentences",
"def load_corpus(self, dir):\n word_fn = codecs.open(dir + \"word.dic\", \"r\", \"utf-8\")\n for line in word_fn:\n word_nr, word = line.strip().split(\"\\t\")\n self.int_to_word.append(word)\n self.word_dict[word] = int(word_nr)\n word_fn.close()\n tag_fn = open(dir + \"tag.dic\", \"r\")\n for line in tag_fn:\n tag_nr, tag = line.strip().split(\"\\t\")\n if tag not in self.tag_dict:\n self.int_to_tag.append(tag)\n self.tag_dict[tag] = int(tag_nr)\n tag_fn.close()\n word_count_fn = open(dir + \"word.count\", \"r\")\n for line in word_count_fn:\n word_nr, word_count = line.strip().split(\"\\t\")\n self.word_counts[int(word_nr)] = int(word_count)\n word_count_fn.close()\n self.sequence_list.load(dir + \"sequence_list\")\n\n # Read a text file in conll format and return a sequence list",
"def chasen_loader(filename):\n reader = ChasenCorpusReader(os.path.abspath(os.path.dirname(\n os.path.abspath(__file__)) + '/../../data/raw/yokome-jpn-corpus'),\n filename, encoding='utf-8')\n for word in reader.words():\n for c in word:\n yield (ord(c),)",
"def load_data(path):\n\n\tf = open(path,'r');\n\n\ttweets = [];\n\ttarget = [];\n\tfor line in f :\n\t\tif line != '' and line != '\\n':\n\t\t\tlistLine = line.strip().split('\\t');\n\t\t\t\n\t\t\t#Tokenize tweet\n\t\t\tlistLine[0] = u\" \".join(twokenize_ES.tokenize(listLine[0]))\n\t\t\t\n\t\t\t#Analize tweet\n\t\t\tlistLine[0] = emoticons_ES.analyze_tweet(listLine[0])\n\t\t\t\n\t\t\t#RemovePunctuation\n\t\t\tlistLine[0] = u\" \".join(twokenize_ES.remove_punct(listLine[0]))\n\n\t\t\ttweets.append(listLine[0].strip().split());\n\t\t\tif listLine[1] == 'positive':\n\t\t\t\ttarget.append([1,0,0])\n\t\t\telif listLine[1] == 'negative':\n\t\t\t\ttarget.append([0,0,1])\n\t\t\telse:\n\t\t\t\ttarget.append([0,1,0])\n\n\treturn [tweets,target]",
"def load_data_sentences(self):\n sentence_list = []\n for fname in os.listdir(self.datadir):\n with open(os.path.join(self.datadir, fname)) as file:\n for line in file:\n sentence_list.append(line)\n return sentence_list",
"def load_documents(filepath: str):\n logger.info(\"loading documents to build inverted index\")\n documents = dict()\n with open(filepath, 'r') as doc_file:\n for line in doc_file:\n line = line.rstrip()\n idx, line = line.split(maxsplit=1)\n documents[int(idx)] = line.rstrip()\n return documents",
"def _get_url_hashes(path):\n urls = _read_text_file_path(path)\n\n def url_hash(u):\n h = hashlib.sha1()\n try:\n u = u.encode(\"utf-8\")\n except UnicodeDecodeError:\n logger.error(\"Cannot hash url: %s\", u)\n h.update(u)\n return h.hexdigest()\n\n return {url_hash(u) for u in urls}",
"def load_sentences(self):\n if self.print_only:\n infile = 'data/sentences_clean.txt'\n with open(infile) as infile:\n lines = infile.readlines()\n sentences = [l.lower().strip() for l in lines]\n else:\n infile = resource_filename('typer_game', 'data/audio_lookup_subset.txt')\n sentences = pickle.load(open(infile, 'rb'))\n return sentences",
"def load_corpus(input_file):\r\n\r\n print('Loading corpus...')\r\n time1 = time.time()\r\n corpus = input_file.read()\r\n time2 = time.time()\r\n total_time = time2 - time1\r\n print('It took %0.3f seconds to load corpus' % total_time)",
"def load_vocab(self, vocab_path):\n assert self._special_tokens != None and self._tokens != None, \"Vocab is already set or loaded\"\n with open(vocab_path, encoding=\"utf-8\", errors=\"ignore\") as infile:\n loaded_dict = json.load(infile)\n for tok, idx in loaded_dict.items():\n self.tok2idx_dict[tok] = int(idx)\n self.idx2tok_dict[int(idx)] = tok\n assert len(self.tok2idx_dict) == len(self.idx2tok_dict)\n self._special_tokens = None\n self._tokens = None\n logger.info(\"Loaded vocab from {}\".format(vocab_path))",
"def load_levin(self, path):\n try:\n lexicon_file = open(path)\n self.levin_dict = json.loads(lexicon_file.read())\n except:\n print 'fail to laod levin verb classes'",
"def text_content_analyzer(file_path):\n\n counts_dict = dict({\"total words\": 0, \"unique words\": 0, \"sentences\": 0})\n file_word_list = list()\n with open(file_path) as txt_file:\n for line in txt_file:\n file_word_list += line.lower().split()\n total_words(file_word_list, counts_dict)\n unique_words(file_word_list, counts_dict)\n count_sentences(file_word_list, counts_dict)\n return counts_dict",
"def load_syllable_count_corpus():\n corpus = []\n with open('../misc/syllable_count_corpus.txt', 'r') as corpus_file:\n for line in corpus_file.readlines():\n index = line.find(' ')\n corpus.append([int(line[:index]), line[index+1:].replace('\\n', '')])\n return corpus",
"def read_corpus(corpus_path):\n data = []\n with open(corpus_path, encoding='utf-8') as fr:\n lines = fr.readlines()\n sent_, tag_ = [], []\n for line in lines:\n if line != '\\n':\n # [char, label] = line.split(' ')\n [char, label] = line.replace('\\n','').split(' ')\n sent_.append(char)\n tag_.append(label)\n else:\n data.append((sent_, tag_))\n sent_, tag_ = [], []\n\n return data",
"def load_certs(self, path):\r\n\t\ttry:\r\n\t\t\twith os.scandir(path) as it:\r\n\t\t\t\tfor entry in it:\r\n\t\t\t\t\tif entry.name.endswith('crt') and entry.is_file():\r\n\t\t\t\t\t\twith open(path + entry.name,'rb') as cert:\r\n\t\t\t\t\t\t\tdata=cert.read()\r\n\t\t\t\t\t\t\tcr = x509.load_pem_x509_certificate(data)\r\n\t\t\t\t\t\t\tif self.validate_certificate(cr):\r\n\t\t\t\t\t\t\t\tself.issuers_certs[cr.subject.rfc4514_string()] = cr\r\n\t\t\t\t\t\t\t\r\n\t\t\t\tlogger.info(\"Certicates loaded!\")\r\n\t\texcept:\r\n\t\t\tlogger.error(\"Could not load certificates.Make sure to run this file on the /client directory\")",
"def load(cls, path):\n\n corpus = SubjectFileTSV(path)\n return cls(corpus)",
"def load_synthtext_labels(path):\n return scipy.io.loadmat(path)",
"def init(self, trainfiles):\n for filepaths in trainfiles:\n\n # load files and tokenize words in sentences\n with open(filepaths, \"r\") as text:\n sent_list = tokenize_sentence(text.read())\n\n for sentences in sent_list:\n word_list = sentence_to_word(sentences)\n\n # check unknown words\n for index, words in enumerate(word_list):\n if words not in self.token_list:\n word_list[index] = \"<UNK>\"\n\n # add word to vocab\n self.token_list.append(words)\n\n word_list.insert(0, \"<s>\")\n word_list.append(\"</s>\")\n\n for i in range(len(word_list)-1):\n self.lang_model.append((word_list[i], word_list[i+1]))\n\n for (word1, word2) in self.lang_model:\n self.bigram_dict[(word1, word2)] += 1\n self.words_dict[word1] += 1",
"def load_HSK_vocabulary():\n f = open(config.HSK_FILE_PATH, 'r')\n if f: header = f.readline()\n hsk_list = {}\n for line in f:\n \tline = line.decode('utf-8')\n line_data = line.rstrip().split(',')\n hsk_list[line_data[1]] = (line_data[0], line_data[2])\n return hsk_list"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the amount of damage a move does. If the move doesn't exist or if the player ran out of those moves, this will return 0. If the move is valid, it will return the amount of damage it will do. move => String returns => Int | def attack(self, move):
try:
if self.moves[move] <= 0:
return 0
except KeyError:
return 0
else:
extra_damage = random.randint(1, 5)
with open('move_damage.json') as damage_file:
return json.loads(damage_file.read())["Mario"][move] + extra_damage | [
"def opp_open_move_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n return 8-float(len(game.get_legal_moves(game.get_opponent(player))))",
"def number_of_moves(self, player):\n return len(self.history[player])",
"def on_after_move_damage(self, battle, pokemon, damage, move, foe):",
"def move():\n data = request.get_json(force=True)\n try:\n state = fixInputData(data['gamestate'])\n move = int(data['move'])\n except (KeyError, TypeError, ValueError):\n raise JsonError(description='Invalid value.')\n resp = {\"gamestate\": game_state.doMove(state, move)}\n return checkWin(resp)",
"def get_player_move(move):\n while move != 'r' and move != 'p' and move != 's':\n move = raw_input(\"Invalid Input! Enter \\'r\\' for \\'Rock\\', \\'p\\' for \\'Paper\\', and \\'s\\' for \\'Scissors\\'. \")\n\n print(\"You used %s!\"%(get_move_name(move)))\n return move",
"def calculate_status_damage(pokemon):\n dmg_pct = 0\n if pokemon[\"status\"] == BRN_STATUS:\n # Burns do 1/16 of hp\n dmg_pct = 1.0/16\n elif pokemon[\"status\"] == PSN_STATUS:\n # Poison does 1/8 of hp\n dmg_pct = 1.0/8\n elif pokemon[\"status\"] == TOX_STATUS:\n # Toxic does variable damage\n dmg_pct = (pokemon[\"status_turns\"]+1)*1.0/16\n\n return dmg_pct",
"def max_move_able_steps(player: Player):\n return sum(pawn_.get_remaining() for pawn_ in player.in_progress)",
"def _calculate_damage(self, attack: int) -> int:\n adjusted_def = MAX_STATS_VAL - self.stats.defense\n def_buff = (attack - adjusted_def * attack / MAX_STATS_VAL)\n damage = attack - DamageMultiplier.NORMAL.value * def_buff\n return math.floor(damage)",
"def get_player_move():\n # TODO\n user_move = input(\"Enter a move as 'r', 'p', or 's': \")\n return user_move",
"def check_move(self):\n\n if self.DEBUG_PRINT_FUNCTIONS:\n pass;\n print \"check_move\"\n\n if len(self.square) != 1 or self.piece == None:\n if self.DEBUG:\n print \"missing piece or square!\"\n return 5\n sqr_cords = self.c.coords(self.square) # square coords\n sqr_cntr = apply(self.find_center, sqr_cords) # square center\n pce_cntr = apply(self.find_center, self.c.coords(self.piece)) # piece center\n vtr = (sqr_cntr[0] - pce_cntr[0], sqr_cntr[1] - pce_cntr[1]) # piece vector(distence and direction)\n if self.DEBUG:\n pass; # print sqr_cords, sqr_cntr, pce_cntr, vtr\n\n if self.jumps[0]: # jump checker\n # if move has not been found by check_for_jumps then fail\n # else, ingore all the other checks, and succeed\n if self.jumps[0].count((self.piece, vtr)) != 1:\n self.show_message(\"You have a jump!\", .8)\n return 5\n else:\n self.jump_made = self.jumps[0].index((self.piece, vtr))\n if self.DEBUG:\n print \"jump_made: \", self.jump_made\n return 0\n\n # movement direction checker\n if self.c.itemcget(self.piece, \"outline\") != \"gold2\":\n if self.moving == \"black\":\n if vtr[1] > 0:\n if self.DEBUG:\n print \"wrong way, black!\"\n return 3\n else:\n if vtr[1] < 0:\n if self.DEBUG:\n print \"wrong way, red!\"\n return 3\n\n # distence checker\n if abs(vtr[0]) != self.SQUARESIZE or abs(vtr[1]) != self.SQUARESIZE:\n if self.DEBUG:\n print \"Too far!\"\n return 4\n\n # square emptiness checker\n if self.c.type(self.c.find_overlapping(sqr_cords[0] + (self.SQUARESIZE / 2), \\\n sqr_cords[1] + (self.SQUARESIZE / 2), \\\n sqr_cords[2] - (self.SQUARESIZE / 2), \\\n sqr_cords[3] - (self.SQUARESIZE / 2))) != \"rectangle\":\n if self.DEBUG:\n print \"not empty: \", self.c.find_overlapping(sqr_cords[0] + (self.SQUARESIZE / 2), \\\n sqr_cords[1] + (self.SQUARESIZE / 2), \\\n sqr_cords[2] - (self.SQUARESIZE / 2), \\\n sqr_cords[3] - (self.SQUARESIZE / 2))\n return 2\n\n return 0",
"def get_reward(self):\n winner = self.check_win()\n if winner == 0:\n return 0\n #assert self.is_terminal(), \"ERROR for get_reward! Game is not over!\"\n return 1 if winner == 1 else 0",
"def calculate_modifier(move, attacker, defender):\n modifier = 1\n\n # STAB Modifier\n if move[\"type\"] in attacker[\"types\"]:\n modifier = modifier * 1.5\n\n # Weakness modifier\n for def_type in defender[\"types\"]:\n if move[\"type\"] in WEAKNESS_CHART[def_type]:\n modifier = modifier * WEAKNESS_CHART[def_type][move[\"type\"]]\n\n return modifier",
"def calculate_damage(self, unit):\r\n base_dmg = self.strength\r\n reduction = unit.strength * .5 + unit.wit * .5\r\n return max(base_dmg - reduction, 0)",
"def calculate_utility(self, boardstate):\n #return self.mycount_difference(boardstate)\n #diff = self.mycount_difference(boardstate)\n legMovs = len(boardstate.calculate_legal_moves())\n potMob = self.get_potential_mobility(boardstate)\n return legMovs + potMob",
"def validate_move(state, move):\n N = len(state)\n assert move.isdigit(), \"Move must be numeric (base 10)\"\n move = int(move)\n assert 0 < move <= N ** 2, f\"Move must be between 1 and {N ** 2} inclusive\"\n x, y = index(move, N=N)\n assert state[x][y] is None, \"This position is already marked\"\n return move",
"def get_spell_damage(self, amount: int) -> int:\n\t\tamount += self.spellpower\n\t\tamount <<= self.controller.spellpower_double\n\t\treturn amount",
"def is_move_valid(self, move, position=None):\r\n\r\n if position is None:\r\n position = self.position\r\n\r\n nx = position[0]\r\n ny = position[1]\r\n\r\n # If the transition is invalid, just return the current position\r\n # Note: move-1 because Python is 0-indexed\r\n if self.adjacent[nx, ny, move] == 0:\r\n return None\r\n\r\n # Move while checking potential collisions\r\n\r\n # moves north\r\n if move == 0:\r\n if ny > 0:\r\n ny -= 1\r\n else:\r\n return None\r\n\r\n # move east\r\n elif move == 1:\r\n if nx < self.x_max:\r\n nx += 1\r\n else:\r\n return None\r\n\r\n # move south\r\n elif move == 2:\r\n if ny < self.y_max:\r\n ny += 1\r\n else:\r\n return None\r\n\r\n # move west\r\n elif move == 3:\r\n if nx > 0:\r\n nx -= 1\r\n else:\r\n return None\r\n\r\n return (nx, ny)",
"def best_move(self):\n reward, move = self.move_by_code(0, True), 0\n for m in range(1, 4):\n if self.move_by_code(m, True) > reward: move = m\n return move",
"def get_move(self):\n return self.sdg(self.board, self.falling_piece)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the damage an enemy will do after Mario's defence. enemy_damage => Int returns => Int | def defend(self, enemy_damage):
return enemy_damage - random.randint(0, 5) | [
"def _calc_damage(self, enemy_level):\n return ((((enemy_level + self.level - 1) // self.level) - 1) *\n enemy_level)",
"def take_damage(self, damage):\n # self.current_health -= self.defend(damage)\n # return self.current_health",
"def _calculate_damage(self, attack: int) -> int:\n adjusted_def = MAX_STATS_VAL - self.stats.defense\n def_buff = (attack - adjusted_def * attack / MAX_STATS_VAL)\n damage = attack - DamageMultiplier.NORMAL.value * def_buff\n return math.floor(damage)",
"def damage(self) -> float:\n operators_experience = sum([operator.experience\n for operator in self._operators])\n return 0.1 + operators_experience / 100",
"def character_death(self, enemy):\r\n # if the enemy of the instance caller dies\r\n if enemy.health <= 0:\r\n # if the enemy dies\r\n if self.__class__ == Player:\r\n print('') # space for terminal\r\n # for aesthetics in terminal\r\n print(constants.ENEMY_DEFEAT_BANNER)\r\n\r\n type_print_effect(enemy.name + \" has been slain by \" + self.name + '!\\n')\r\n\r\n # used to increase the level up bar by counting whenever the player defeats an enemy\r\n self.levelup_bar += self.levelup_per_defeated_enemy\r\n\r\n return self.levelup_bar\r\n\r\n # when the player dies by various enemies\r\n else:\r\n print('') # space for terminal\r\n # for aesthetics in terminal\r\n print(constants.PLAYER_DEFEAT_BANNER)\r\n\r\n type_print_effect(enemy.name + \" has been slain by \" + self.name + \" with no mercy!\\n\")\r\n type_print_effect(\"Game over! Thank you so much for playing The Journey\\n\")\r\n\r\n # end the game when the player dies\r\n text_effect_and_exit_function.system_exit()",
"def deal_damage(self, damage, priority_total):\n print(self.name + \" takes \" + str(damage) + \" damage!\")\n kills = 0\n for hero in self.heroes:\n if(hero.is_alive):\n #kills += hero.take_damage(damage)\n damage_portion = damage * hero.damage_priority//priority_total\n print(hero.name + \" takes \" + str(damage_portion) + \" damage!\")\n kill = hero.take_damage(damage_portion)\n self.living_heroes -= kill\n kills += kill\n\n return kills",
"def leeching_health(self, damage, enemy):\r\n # calculates the missing health to prevent over health in a character\r\n health_missing = self.health_max - self.health\r\n\r\n # for aesthetics in terminal\r\n print('')\r\n print(constants.LEECH_BANNER)\r\n\r\n # executed if the health missing is less than the leech or damage\r\n if health_missing <= self.leeching <= damage or health_missing <= self.leeching > damage:\r\n self.health += health_missing\r\n type_print_effect(self.name + \" successfully leeched \" + str(health_missing) +\r\n \" health from \" + enemy.name + \" and gained full health!\\n\")\r\n type_print_effect(self.name + \"'s health is currently at \" + str(self.health) + \".\\n\")\r\n\r\n # executed when the health missing is greater than the leech or damage\r\n elif health_missing > self.leeching <= damage:\r\n self.health += self.leeching\r\n type_print_effect(self.name + \" leeched \" + str(self.leeching) +\r\n \" health from \" + enemy.name + \".\\n\")\r\n type_print_effect(self.name + \"'s health is currently at \" + str(self.health) + \".\\n\")\r\n\r\n # executed when the health missing is greater than the leech but the leech is greater than the damage\r\n elif health_missing > self.leeching > damage or self.leeching > damage:\r\n self.health += damage\r\n type_print_effect(self.name + \" leeched \" + str(damage) +\r\n \" health from \" + enemy.name + \" with all possible damage given at this round.\\n\")\r\n type_print_effect(self.name + \"'s health is currently at \" + str(self.health) + \".\\n\")\r\n\r\n return self.health",
"def defend(self, damage_amt):\n print(self.name + \" prepares their defense!\")\n physical_defend_total = 0\n magical_defend_total = 0\n priority_total = 0\n for hero in self.heroes:\n if(hero.is_alive):\n defend_total = hero.defend()\n physical_defend_total += defend_total[0]\n magical_defend_total += defend_total[1]\n priority_total += hero.damage_priority\n\n #print(defend_total)\n\n physical_damage = damage_amt[0]\n magical_damage = damage_amt[1]\n\n if(physical_defend_total >= physical_damage):\n print(\"Physical damage was completely blocked!\")\n physical_damage = 0\n\n else:\n physical_damage -= physical_defend_total\n\n if(magical_defend_total >= magical_damage):\n print(\"Magical damage was completely blocked!\")\n magical_damage = 0\n\n else:\n magical_damage -= magical_defend_total\n\n remain_damage = physical_damage + magical_damage\n\n kills = self.deal_damage(remain_damage,priority_total)\n\n return kills",
"def attack(self):\n return random.randint((self.max_damage // 2), self.max_damage)",
"def damage(self):\n return self._damage",
"def damage_potential(attacker, defender):\n if attacker.damage_type in defender.weaknesses:\n return attacker.damage * attacker.units * 2\n elif attacker.damage_type in defender.immunities:\n return 0\n else:\n return attacker.damage * attacker.units",
"def damage_dealt(self, attack, defence, playersData, targetID=None):\n damage = (attack ** 2) / (attack + defence)\n print(damage)\n lowerBound = damage * 0.75\n upperBound = damage * 1.25\n damageDealt = random.randint(int(lowerBound), int(upperBound))\n \n if targetID is not None:\n totalDefence = 0\n for playerID in playersData:\n totalDefence += playersData[playerID][\"Statistics\"][7]\n print(damageDealt)\n percentageOfDefence = 1 - (playersData[targetID][\"Statistics\"][7] / totalDefence) if len(playersData) != 1 else 1\n return damageDealt * percentageOfDefence\n\n return damageDealt",
"def attack(self,enemy):\n enemy.takeDamage(self.damage)",
"def magic_damage_dealt(self):\r\n return self.data.totalMagicDamageDealt",
"def on_after_move_damage(self, battle, pokemon, damage, move, foe):",
"def physical_damage_dealt(self):\r\n return self.data.totalPhysicalDamageDealt",
"def attack(self):\n crit_modifier = 1\n if(random.randint(1,100) < self.crit_chance):\n crit_modifier = self.crit_strength\n print(self.name + \" deals a critical Hit!\")\n damage = random.randint(0,self.attack_strength)*crit_modifier\n print(self.name + \" deals \" + str(damage) + \" physical damage!\")\n return damage",
"def missile_damage(self):\n\n return Missile.get_damage()",
"def do_damage_with_shield_bubble(self, enemy):\r\n # damage of the character\r\n damage = self.double_damage_and_damage_generator()\r\n\r\n # computes the shield bubble of the enemy to the damage done by the character\r\n # to update damage to reflect shield bubble if the enemy has one\r\n damage = damage - enemy.shield_bubble\r\n\r\n # so it will not go negative and enemy shield bubble will increase due to it\r\n if damage <= 0:\r\n damage = 0\r\n\r\n # updating shield to input the damage\r\n enemy.shield_bubble = enemy.shield_bubble - damage\r\n\r\n # if statement if the shield bubble stat is non existent or if the shield bubble has broke\r\n if enemy.shield_bubble <= 0:\r\n # sets shield bubble to zero to avoid negative values\r\n enemy.shield_bubble = 0\r\n\r\n # it will go straight to attacking the character directly\r\n self.do_damage(enemy, damage)\r\n\r\n # if the shield bubble is still intact\r\n else:\r\n # checks the class of the caller for aesthetics\r\n if self.__class__ == Player:\r\n # for aesthetics purposes\r\n print(constants.PLAYER_TURN_BANNER)\r\n else:\r\n print(constants.ENEMY_TURN_BANNER)\r\n # for aesthetics purposes\r\n\r\n # message saying that the shield bubble is still intact\r\n type_print_effect(enemy.name + \"'s Shield Bubble has withstand the attack of \" + self.name + \".\\n\")\r\n type_print_effect(\"The remaining Shield Bubble of \" + enemy.name + \" is \"\r\n + str(enemy.shield_bubble) + \".\\n\")\r\n print('') # for terminal\r\n\r\n # returns enemy health to be analyzed in an if statement inside the caller in attack function\r\n return enemy.health"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Parse heading names from a file path | def extract_heading_name(file_path: str) -> Tuple[str, str]:
file_base = os.path.basename(file_path)
both_headings = os.path.splitext(file_base)[0]
heading1, heading2 = both_headings.split('-')
return heading1, heading2 | [
"def parse_header_path(header_path):\n\n (\n *_,\n radiative_transfer_code,\n model_photospheres,\n isotopes,\n folder,\n basename,\n ) = header_path.split(\"/\")\n\n parts = basename.split(\"_\")\n # p_apst{gd}{spectral_type}_{date}_lsf{lsf}_{aspcap}_012_075\n _ = 4\n gd, spectral_type = (parts[1][_], parts[1][_ + 1 :])\n # Special case for the BA grid with kurucz atmospheres. Sigh.\n if gd == \"B\" and spectral_type == \"A\":\n year, month, day = (2019, 11, 21)\n lsf = \"combo5\"\n lsf_telescope_model = \"lco25m\" if parts[2].endswith(\"s\") else \"apo25m\"\n is_giant_grid = False\n gd = \"\"\n spectral_type = \"BA\"\n\n else:\n date_str = parts[2]\n year, month, day = (\n 2000 + int(date_str[:2]),\n int(date_str[2:4]),\n int(date_str[4:6]),\n )\n lsf = parts[3][3]\n lsf_telescope_model = \"lco25m\" if parts[3][4:] == \"s\" else \"apo25m\"\n\n is_giant_grid = gd == \"g\"\n\n kwds = dict(\n radiative_transfer_code=radiative_transfer_code,\n model_photospheres=model_photospheres,\n isotopes=isotopes,\n gd=gd,\n lsf_telescope_model=lsf_telescope_model,\n spectral_type=spectral_type,\n grid_creation_date=datetime.date(year, month, day),\n lsf=lsf,\n )\n\n return kwds",
"def read_dat_names(path):\n names = None\n with open(path) as f:\n for line in f:\n if not line.startswith(\"#\"):\n # names in first row that is not a comment\n names = line.split()\n break\n \n if names is not None:\n # identify time key, check that there is only one\n timekeys = fnmatch.filter(names, '[Tt]ime*')\n if len(timekeys) < 1:\n raise KeyError(f\"The file '{path}' does not contain a time vector\")\n elif len(timekeys) > 1:\n raise KeyError(f\"The file '{path}' contain duplicate time vectors\")\n\n # skip the time array name assumed to be in the first column\n return names[1:]",
"def get_header(path: str) -> List[str]:\n with open(path) as fle:\n header = next(csv.reader(fle))\n\n return header",
"def parse(self, file_path):\r\n with open(file_path) as fh:\r\n lines = list()\r\n try:\r\n lines = fh.readlines()\r\n except:\r\n lines = list()\r\n print(\"Error in header parsing\")\r\n\r\n return self.parse_lines(lines)",
"def _parse_name(line):\n if '/' not in line:\n return line\n search = re.search(r'\\/(\\w+.\\w+$)', line)\n if search:\n return search.group(1)\n return \"\"",
"def get_info_from_file(filename):\n with open(filename, 'U') as f:\n header = f.readline()\n header_elements = header.split()\n\n path, filen = os.path.split(filename)\n filename_elements = filen.split('_')\n\n return header_elements, filename_elements",
"def read_star_header(star_file):\n with open(star_file, 'r') as file:\n lines = file.readlines()\n\n header = []\n for line in lines:\n line = line.strip()\n if line.startswith('_') or line.endswith('_'):\n header.append(line)\n\n return header",
"def split_hname(hname):\n lst = []\n cat = None\n for part in re.split(r\"/(?=[^/])\", hname):\n if cat:\n part = cat + part\n cat = None\n if part[-1] == '/':\n cat = part\n else:\n lst.append(part)\n return lst",
"def ParseElfHeader(path):\n try:\n return elf.ParseElfHeader(path)\n except elf.Error as e:\n raise Error(str(e))",
"def _parse_header(fh):\n headerConverters = {\n b'StartFontMetrics': float,\n b'FontName': _to_str,\n b'FullName': _to_str,\n b'FamilyName': _to_str,\n b'Weight': _to_str,\n b'ItalicAngle': float,\n b'IsFixedPitch': _to_bool,\n b'FontBBox': _to_list_of_ints,\n b'UnderlinePosition': _to_int,\n b'UnderlineThickness': _to_int,\n b'Version': _to_str,\n b'Notice': _to_str,\n b'EncodingScheme': _to_str,\n b'CapHeight': float, # Is the second version a mistake, or\n b'Capheight': float, # do some AFM files contain 'Capheight'? -JKS\n b'XHeight': float,\n b'Ascender': float,\n b'Descender': float,\n b'StdHW': float,\n b'StdVW': float,\n b'StartCharMetrics': _to_int,\n b'CharacterSet': _to_str,\n b'Characters': _to_int,\n }\n d = {}\n while 1:\n line = bytes(fh.readline(), 'ascii')\n if not line: break\n line = line.rstrip()\n if line.startswith(b'Comment'): continue\n lst = line.split(b' ', 1 )\n key = lst[0]\n if len( lst ) == 2:\n val = lst[1]\n else:\n val = b''\n #key, val = line.split(' ', 1)\n try: d[key] = headerConverters[key](val)\n except ValueError:\n continue\n except KeyError:\n continue\n if key==b'StartCharMetrics': return d\n raise RuntimeError('Bad parse')",
"def parse_header(self, line):\n bml.logger.debug(\"BssFile.parse_header(line=%s)\" % (line))\n # GJP 2021-04-16 Allow empty system names\n m = re.match(r\"(?P<file_type>.)00\\{(?P<system_name>[^\\}]*)\\}=NYYYYYY(?P<summary>.*$)\", line)\n assert m, \"line (%s) does not match header record\" % (line)\n self.file_type = m.group('file_type')\n self.system_name = m.group('system_name')\n self.summary = m.group('summary').rstrip()\n bml.logger.debug(\"file_type: %s; system_name: %s; summary: %s\" % (self.file_type, self.system_name, self.summary))\n self.state_nr = self.state_nr + 1 # only one header\n return True",
"def get_header(file_path):\n headers = []\n labels = []\n with codecs.open(file_path, encoding='utf-8') as fp:\n while True:\n line = fp.readline()\n if not line:\n print(\"Data loaded successfully!\")\n headers = [clean_str(str(header)) for header in headers]\n return [headers, np.array(labels)]\n tmp = line.strip().split('\\t')[-2:]\n header, label = tmp[0], int(tmp[1])\n if label == 1:\n labels.append([1, 0, 0, 0, 0])\n elif label == 2:\n labels.append([0, 1, 0, 0, 0])\n elif label == 3:\n labels.append([0, 0, 1, 0, 0])\n elif label == 4:\n labels.append([0, 0, 0, 1, 0])\n else:\n labels.append([0, 0, 0, 0, 1])\n headers.append(header)",
"def _process_header(config, infile):\n header, line = [], infile.readline()\n while line.startswith(\"@\"):\n header.append(line)\n line = infile.readline()\n\n if config.flag_as_sorted:\n _set_sort_order(header)\n _set_pg_tags(header, config.update_pg_tag)\n\n return header, line",
"def test_validate_file_headings():\n good_headings = \"NAME,ICAO,Latitude,Longitude\"\n # using typo's in heading names: NAMEE, Lattitude\n bad_headings = \"NAMEE,ICAO,Lattitude,Longitude\"\n\n assert validate_file_headings(good_headings)\n\n with pytest.raises(InvalidFileHeadingError):\n validate_file_headings(bad_headings)",
"def readHeader(lines):\n while 1:\n # skip blank line at top\n hd = lines.next().strip()\n if hd: break\n fieldNames = hd.split('\\t')\n\n while 1:\n bal = lines.next().strip()\n if bal: break\n dummy, dt, a = bal.split()\n dt = isoDate(dt)\n a = amt(a)\n\n hd = lines.next().strip() # skip blank line\n if hd: raise IOError, \"expected blank line; got\" + hd\n \n return fieldNames, dt, a",
"def parse_filename(filepath):\n\tfilename = os.path.split(filepath)[-1]\n\tfile_descriptor = filename[:4].upper()\n\tyear = int(filename[4:6])\n\tquarter = int(filename[7])\n\treturn file_descriptor, year, quarter",
"def read_header_namevalues(ms2_file):\n result = {}\n for line in ms2_file:\n chunks = line.rstrip().split('\\t')\n if chunks[0] == \"H\":\n if chunks[1].startswith(\"@\") and \"=\" in chunks[1]:\n name, value = chunks[1][1:].split(\"=\")\n result[name] = value\n else:\n # after the header lines, quit\n break\n return result",
"def __readHeaders(self, fh):\n fh.readline()\n fh.readline()\n \n headersStr = fh.readline()\n headers = [ s.strip() for s in headersStr[1:].split() ]\n unitsStr = fh.readline()\n units = [ s.strip() for s in unitsStr[1:].split() ]\n \n fh.readline()\n \n headers.pop(1)\n units[0] = 'mjd'\n units[1] = 'seconds'\n\n self.startDate = self.__getStartDate(fh)\n\n # Get a mapping of header names to column index\n headerDict = dict(list(zip(headers,list(range(len(headers))))))\n return (headerDict, units)",
"def check_headers(self: ProjectUpdater) -> None:\n for header_file_raw in self.header_files:\n assert header_file_raw[0] == '/'\n header_file = f'src/ballistica{header_file_raw}'\n if header_file.endswith('.h'):\n _check_header(self, header_file)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the dataframe containing papers' percentiles and pageranks | def load_percentile_data(heading1:str , heading2: str, base_dir='viz_dataframes') -> pd.DataFrame:
path = f'{base_dir}/percentiles/{heading1}-{heading2}.pkl'
if os.path.exists(path):
with open(path, 'rb') as in_file:
result_df = pkl.load(in_file)
else:
path = f'{base_dir}/percentiles/{heading2}-{heading1}.pkl'
with open(path, 'rb') as in_file:
result_df = pkl.load(in_file)
return result_df | [
"def load_precipout(url, year=None):\n df = table_to_frame(get_table(url))\n\n if year:\n df['year'] = year\n\n return df",
"def calculate_percentiles(self):\n self.percentile_low = np.percentile(self.data, 25)\n self.percentile_high = np.percentile(self.data, 75)",
"def create_df(self):\n alldf= pd.DataFrame()\n pdffiles= glob.glob(self.input_lib+'/**/*.pdf', recursive=True)\n for pdf_file in pdffiles:\n pdf_page_count= self.count_pages(pdf_file)\n for pg in range(1,pdf_page_count+1):\n pg = str(pg)\n cmd = ['pdftotext','-bbox-layout','-f', pg, pdf_file, pdf_file[:-4]+'_'+pg+'.html']\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) \n o, e = proc.communicate()\n page = open(pdf_file[:-4]+'_'+pg+'.html',encoding=\"utf8\")\n soup = BeautifulSoup(page.read(),'html.parser')\n out_html_file_path=pdf_file[:-4]+'_'+pg+'.html'\n lines = soup.find_all('line')\n pdf_file= pdf_file.replace(\"\\\\\",\"/\")\n path= pdf_file.split(\"/\")[-1]\n path= path[:-4]+\"-from-pdf-page\"+pg+'.jpg'\n td_list = []\n for line in lines:\n req_td_dict = {}\n req_td_dict['path'] =path\n #req_td_dict['page']= int(pg)\n req_td_dict['xmin'] = round(float(line['xmin']))\n req_td_dict['ymin'] = round(float(line['ymin']))\n req_td_dict['xmax'] = round(float(line['xmax']))\n req_td_dict['ymax'] = round(float(line['ymax']))\n req_td_dict['label'] = line.text.replace('\\n',' ')\n td_list.append(req_td_dict)\n df1 = pd.DataFrame(td_list)\n alldf= alldf.append(df1)\n alldf_multi= self.apply_multiple(alldf, self.multiple)\n return alldf_multi,alldf",
"def pre_process_df(df: pd.DataFrame, percent: bool = False) -> pd.DataFrame:\n ret = df.copy()\n del ret['info']\n del ret[('Month', 'sum')]\n ret.drop('sum', inplace=True)\n ret.drop('None', inplace=True)\n\n ret.columns = ret.columns.droplevel(0)\n ret.columns.name = 'Month'\n\n pi_series = df[PI].iloc[:-2]\n idx_name = ret.index.name\n\n if percent:\n with pd.option_context('mode.use_inf_as_na', True):\n for key, series in ret.items():\n series.fillna(0.0, inplace=True)\n ret[key] = (100 * series).astype(int)\n iterator = zip(pi_series, ret.iterrows())\n ret.index = [f'{project} ({pi}): {np.nanmax(sbu)} %' for pi, (project, sbu) in iterator]\n else:\n iterator = zip(pi_series, ret.iterrows())\n ret.index = [f'{project} ({pi}): {np.nanmax(sbu):,.0f}' for pi, (project, sbu) in iterator]\n\n ret.index.name = idx_name\n return ret.T",
"def get_percentiles(self):\n self.percentiles = np.linspace(0,100,self.n_bins+1)[1:-1].tolist()\n return self",
"def prepare_dataframe(self):\n startURL = self.config.web_url\n df = waifu.DataFrame(self.dataframe, columns=[n for n in self.dataframe])\n\n print(\"Getting the available pages to scrape...\")\n all_pages = self.get_all_valid_pages(startURL)\n all_books_URLs = []\n\n chunky_size = 10\n processes = 10\n\n print(\"Getting all the books from each page...\")\n with Pool(processes=processes) as pool, tqdm.tqdm(total=len(all_pages)) as pbar:\n for data in pool.imap_unordered(self.get_all_books, all_pages , chunksize=chunky_size):\n all_books_URLs.extend(data)\n pbar.update()\n\n pool.terminate()\n pool.join()\n pbar.close()\n \n print(\"Getting each book's data\")\n with Pool(processes=processes) as pool, tqdm.tqdm(total=len(all_books_URLs)) as pbar:\n for book in pool.imap_unordered(self.get_book_meta, all_books_URLs, chunksize=chunky_size):\n df = df.append(book, ignore_index=True)\n pbar.update()\n\n pool.terminate()\n pool.join()\n pbar.close()\n\n return df",
"def percentile(data_set, p):\n if p > 1:\n raise ValueError(\"p must me 0 < p <= 1\")\n sorted_data_set = sorted(data_set)\n max_index = len(data_set) - 1\n\n p_index = int(p * max_index)\n\n return sorted_data_set[p_index]",
"def load_dataframe():\n # load data from csv file\n train_df = pd.read_csv(os.path.join(config.DATSET_PATH, \"train.csv\"))\n train_df.drop(columns=train_df.columns[0], inplace=True)\n\n # get class names\n with open(os.path.join(config.DATSET_PATH, \"label_map.json\"), \"r\") as file:\n class_names_encoded = json.load(file)\n class_names = list(class_names_encoded.keys())\n\n # stats: count number of class instances\n _, train_labels_count = np.unique(train_df['Label'], return_counts=True)\n\n # keep forest class, get the same amount of other classes\n df_forest = train_df.drop(np.where(train_df['Label'] != 1)[0], inplace=False)\n forest_count = train_labels_count[1]\n df_rest = train_df.drop(\n np.where(train_df['Label'] == 1)[0], inplace=False).iloc[0:forest_count]\n frames = [df_forest, df_rest]\n train_df_cleaned = pd.concat(frames, ignore_index=True)\n\n # getting the class distribution in the prepared dataset\n _, train_labels_count = np.unique(train_df_cleaned['Label'], return_counts=True)\n train_count_df = pd.DataFrame(data=train_labels_count)\n train_count_df['ClassName'] = class_names\n train_count_df.columns = ['Count', 'ClassName']\n train_count_df.set_index('ClassName', inplace=True)\n train_count_df.head()\n train_count_df.plot.bar()\n plt.title(\"Distribution of images per class\")\n plt.ylabel(\"Count\")\n plt.show()\n\n # set non forest class to alternative\n train_df_cleaned.loc[forest_count:(2 * forest_count), 'Label'] = 0\n return train_df_cleaned",
"def _calculatePercentilesInSlice(dataSlice, sourceColumnName, targetColumnName):\n values = dataSlice[sourceColumnName].values\n mn = values.mean()\n std = values.std()\n percentiles = []\n for value in values:\n percentiles.append(100.0*stats.norm.cdf((mn - value)/std))\n\n dataSlice.loc[:, targetColumnName] = pd.Series(\n data=np.array(percentiles),\n index=dataSlice.index)\n return dataSlice",
"def statcast_pitcher_percentile_ranks(year: int) -> pd.DataFrame:\n url = f\"https://baseballsavant.mlb.com/leaderboard/percentile-rankings?type=pitcher&year={year}&position=&team=&csv=true\"\n res = requests.get(url, timeout=None).content\n data = pd.read_csv(io.StringIO(res.decode('utf-8')))\n # URL returns a null player with player id 999999, which we want to drop\n return data.loc[data.player_name.notna()].reset_index(drop=True)",
"def process_raw(self, df, page, **partition):\n df = df.rename(\n columns=self._metadata.get_column_map(page, **partition))\n if 'report_year' not in df.columns:\n df['report_year'] = list(partition.values())[0]\n self.cols_added = ['report_year']\n # if this is one of the EIA860M pages, add data_source\n meta_eia860m = excel.Metadata('eia860m')\n pages_eia860m = meta_eia860m.get_all_pages()\n if page in pages_eia860m:\n df = df.assign(data_source='eia860')\n self.cols_added.append('data_source')\n df = fix_leading_zero_gen_ids(df)\n return df",
"def get_percentage(self, data, column_pct):\n papua = {\n 'groups': 'Papua',\n f'{column_pct}_pct': len(data[(data['groups'] == 1) & (data[column_pct] == True)]) / len(data[data['groups'] == 1])\n }\n non_papua = {\n 'groups': 'Non-Papua',\n f'{column_pct}_pct': len(data[(data['groups'] == 2) & (data[column_pct] == True)]) / len(data[data['groups'] == 2])\n }\n \n return pd.DataFrame([papua, non_papua])",
"def normalize_dataframes(grouped_df, month, total_docs_dict):\n # Get the no. of phrases and documents in the year in the arguments\n doc_count = total_docs_dict.get(month)\n grouped_df['percentage_docs'] = grouped_df['total_docs'] * 100 / doc_count\n # Drop the total occurrences and total docs columns, they are no longer necessary\n grouped_df.drop(['total_occurrences', 'total_docs'], axis=1, inplace=True)\n return grouped_df",
"def abstract_dataframe(filename):\n pmid_ab_dict = medline_parser(filename)\n df = pd.DataFrame.from_dict(pmid_ab_dict, orient='index').reset_index()\n df.columns = ['pmid', 'title']\n df.to_csv('../data/pmid_titles_metabolism_5years.csv', index=False, index_label=False)",
"def load_and_plot_data(filename):\n df = pd.load_csv(filename, index_col=0)\n df.hist()\n return df",
"def premier_quartile(data_frame,colonne):\n return data_frame[colonne].quantile(q=0.25)",
"def read_scaled_df():\n print('Reading scaled data ...')\n path = os.path.dirname(__file__)\n path = os.path.join(path, 'scaled_data.csv')\n return pd.read_csv(path)",
"def set_percentile(self, ulen, K=5):\n perc_gap = np.linspace(0, 100, K+1)\n _percent = np.percentile(ulen, list(perc_gap))\n self.percentile = np.zeros((K, 2))\n for i in range(K):\n self.percentile[i, 0] = int(_percent[i])+1\n self.percentile[i, 1] = int(_percent[i+1])\n if i == 0:\n self.percentile[i,0] = 0\n elif i==4:\n self.percentile[i,1] = float(\"inf\")",
"def set_percentiles(self, percentiles: [int]) -> None:\n self._percentiles = percentiles"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Load the dataframe containing information about journals across fields | def load_journal_data(heading1: str, heading2: str, base_dir='viz_dataframes') -> pd.DataFrame:
path = f'{base_dir}/journals/{heading1}-{heading2}.pkl'
if os.path.exists(path):
with open(path, 'rb') as in_file:
result_df = pkl.load(in_file)
else:
path = f'{base_dir}/journals/{heading2}-{heading1}.pkl'
with open(path, 'rb') as in_file:
result_df = pkl.load(in_file)
return result_df | [
"def handle_actors_df(self):\r\n credits_cp = self.credits.copy()\r\n credits_cp['cast'] = credits_cp['cast'].apply(lambda row: ast.literal_eval(row))\r\n\r\n actors_df = self.metadata_nested_transformer(\r\n df=credits_cp,\r\n main_column='cast',\r\n secondary_column='movie_id'\r\n )[['id', 'name', 'character', 'order', 'movie_id']]\r\n\r\n actors_df.loc[actors_df['character'] == '', 'character'] = 'undefined'\r\n transformed_actors_df = actors_df.astype({'id': int, 'order': int, 'movie_id': int})\r\n return transformed_actors_df",
"def load_docs(self, path):\n df = pd.read_csv(path)\n df['numero_PL'] = df['numero_fecha_PL'].apply(lambda x: x.split('-')[0][-4:]) # keeps only the last 4 digits of the PL number\n df['texto'] = df['texto'].apply(lambda x: self.cleanup_text(x))\n # print(df.head(10))\n return df",
"def journal_df(self):\n if self._journal_df is None:\n if self.keep_in_memory:\n self._journal_df = self.load_journals(show_progress=self.show_progress)\n else:\n return self.load_journals(show_progress=self.show_progress)\n\n return self._journal_df",
"def hospitals_from_data_frame(hospitals, h_id='HOSP_ID', h_level='Level', h_attributes=None):\r\n if h_attributes:\r\n h_attributes = [atr for atr in h_attributes if atr in hospitals.columns]\r\n else:\r\n h_attributes = [atr for atr in hospitals.columns if atr not in [h_id, h_level]]\r\n\r\n return [Hospital(h[h_id], h[h_level], h[h_attributes]) for _, h in hospitals.iterrows()]",
"def data_extract_employee_records(lg):\n try:\n client = MongoDBManager()\n collection = client.get_collection(dbname = 'SampleDatabase3',collection_name ='SampleCollection3')\n projection = {}\n query ={}\n projection[\"Designation\"] = u\"$Designation\"\n cursor = collection.find(query, projection = projection)\n df = pd.DataFrame(list(cursor))\n lg.print_log (\"Data extraction of employee_records complete\")\n df = df.rename(\n columns= {\n '_id' : 'user_id'\n }\n\n ) \n except Exception as e:\n lg.print_log(e)\n return df",
"def extract_historic_missions() -> pd.DataFrame:\n return extract(db_name=\"fmc\", query_filepath=\"fmc/historic_missions.sql\")",
"def data_extract_organizations(lg):\n try:\n client = MongoDBManager()\n collection = client.get_collection(dbname = 'SampleDatabase',collection_name ='SampleCollectionName')\n projection = {}\n query = {}\n projection[\"_id\"] = 1\n projection[\"OrganizationName\"] = 1\n projection[\"FinancialPartnerOrgConfig.FinancialPartnerOrgId\"] = 1\n cursor = collection.find(query, projection = projection)\n df = pd.DataFrame(list(cursor))\n lg.print_log (\"Data extraction of organizations complete\")\n df['FinancialPartnerOrgConfig'] = df['FinancialPartnerOrgConfig'].apply(lambda x: x['FinancialPartnerOrgId'])\n df =df.rename(\n columns ={\n '_id' : 'OrganizationId'\n }\n )\n\n except Exception as e:\n lg.print_log(e)\n return df",
"def get_journey_data(self):\n df_directions = self.get_directions()\n df_places = self.get_places(df_directions)\n post_codes = JourneyStation.generate_station_post_codes(df_places)\n today = Utility.get_today_date()\n try:\n today = Utility.get_today_date()\n data = DatabaseModel().read(\n \"journey_fuel_prices\",\n f\"{today}-{self.origin}-{self.fuel_type}-{self.destination}\",\n )\n df = Utility.to_dataframe(data)\n\n except (TypeError, pymongo.errors.ServerSelectionTimeoutError) as e: # [3]\n df = self.save(post_codes)\n return df",
"def lithology_info_to_frame(data):\n result=json_normalize(data,['data','drilling','lithology'])\n return result",
"def load_data(name: str, location: str = SAVE_LOCATION) -> pd.DataFrame:\n df = pd.read_feather(location + name + '.feather')\n if 'date' in df.columns.values:\n df = df.set_index('date')\n return df",
"def _load_goals(self):\n self.results_df['goals'] = self.results_df.team.apply(self.team_total_goals)",
"def read_news_dataframe(requested_fields, requested_categories, readable_dataframe):\n \n total_main_news_df = readable_dataframe\n total_main_news_df[\"posted_date\"] = total_main_news_df[\"posted_date\"].astype(str) #converting data column to string\n output_category_list = []\n \n for category in requested_categories:\n category_wise_df = total_main_news_df[total_main_news_df[\"category\"] == category]\n category_wise_df_with_requested_fields = category_wise_df[requested_fields]\n category_wise_df_with_requested_fields = (\n category_wise_df_with_requested_fields.replace({np.nan: None})\n )\n \n news_list = []\n for index, row in category_wise_df_with_requested_fields.iterrows():\n response_dict = (\n {i: row[i] for i in category_wise_df_with_requested_fields.columns}\n )\n news_list.append(response_dict)\n \n category_dictionary = {\n \"category\": category,\n \"total_results\": len(category_wise_df_with_requested_fields),\n \"articles\": news_list,\n }\n \n output_category_list.append(category_dictionary)\n \n return {\"news\": output_category_list}",
"def insert_location_details(logger, lobj, dataframe):\n if lobj.location_type == 'state':\n column_array = ['state_code', 'state_name']\n elif lobj.location_type == 'district':\n column_array = ['state_code', 'state_name',\n 'district_code', 'district_name']\n elif lobj.location_type == 'block':\n column_array = ['state_code', 'state_name',\n 'district_code', 'district_name',\n 'block_code', 'block_name']\n elif lobj.location_type == 'panchayat':\n column_array = ['state_code', 'state_name',\n 'district_code', 'district_name',\n 'block_code', 'block_name',\n 'panchayat_name', 'panchayat_code']\n else:\n column_array = []\n for column_name in column_array:\n dataframe[column_name] = getattr(lobj, column_name)\n return dataframe",
"def sl_to_df(fl):\n\n\t# convert the .log to a pandas table\n\tdf = pd.read_table(fl, sep = \"|\", names = headers)\n\n\t# change everything to uniform data type\n\tdf['job-id'] = df['job-id'].astype(str)\n\n\t# filter out zero cpus used for figuring out factors\n\tdf = df[(df['num-cpus'] != 0)]\n\n\treturn df",
"def load_data():\n url = \"https://fullfact.org/media/claim_conclusion.json\"\n filename = CACHE_DIR + \"/claim_conclusion.json\"\n if not os.path.isfile(filename):\n r = requests.get(url, allow_redirects=True)\n open(filename, 'wb').write(r.content)\n with open(filename) as file_in:\n data = file_in.readlines()\n checks = json.loads(\"\".join(data[1:])) # skip first line and parse JSON\n\n def get_topic(row):\n return(row['url'].split(\"/\")[0])\n\n df = pd.DataFrame(checks)\n df['topic'] = df.apply(lambda row: get_topic(row), axis=1)\n return df",
"def get_journals(self):\n journals = Counter()\n for ctree in self.get_ctrees():\n if 'journalInfo' in ctree.metadata:\n #print(ctree.metadata['authorList'][0]['author'])\n ctree_journals = ctree.metadata['journalInfo'][0]['journal']\n for ctree_journal in ctree_journals:\n if 'title' in ctree_journal:\n journals.update(ctree_journal['title'])\n return journals",
"def readInPersonRecords(self):\n self.currentRowId = self.startingRow\n previousFullName = \"\"\n\n while not self.isEmptyRow(self.currentRowId):\n fullName = self.getCellValue(self.currentRowId, 2)\n \n # take only if new person \n if fullName != previousFullName:\n workDistrict = str(self.getCellValue(self.currentRowId, 4)).upper()\n proffesion = str(self.getCellValue(self.currentRowId, 5)).lower()\n \n docNr = self.getCellValue(self.currentRowId, self.relativeCol + 4)\n sentCountry = mapCodeToCountry(self.getCellValue(self.currentRowId, self.relativeCol + 1))\n dateFrom = self.getCellDate(self.currentRowId, self.relativeCol + 5)\n dateTo = self.getCellDate(self.currentRowId, self.relativeCol + 6)\n docDate = calculateLastWorkingDate(dateFrom)\n \n self.savePersonData(fullName, workDistrict, proffesion, docNr, docDate, sentCountry, dateFrom, dateTo)\n \n self.currentRowId += 1\n previousFullName = fullName",
"def _load_labor_data(self):\n\n labor = pd.read_csv(DATA_PATH +\n 'Labor/API_SL_TLF_TOTL_IN_DS2_en_csv_v2_59582.csv',\n header=2)\n labor = labor.drop([\n 'Country Code', 'Indicator Name', 'Indicator Code', 'Unnamed: 63'\n ],\n axis=1)\n labor.set_index('Country Name', inplace=True)\n labor = labor.transpose().astype(float)\n labor.drop(list(labor.index.values[0:5]), inplace=True)\n labor['Total'] = labor.sum(axis=1) / 10.\n labor.index = labor.index.astype(int)\n\n self.datasets['labor'] = labor",
"def import_pris(pris_link):\n pris = pd.read_csv(pris_link,\n delimiter=',',\n encoding='iso-8859-1',\n skiprows=20,\n )\n\n pris = pris.rename(columns={pris.columns[2]: 'Country'})\n pris = pris[['Country', 'Unit', 'Current Status', 'Type',\n 'Model', 'Operator', 'Reactor Supplier', 'Const. Date',\n 'Grid Date', 'Shutdown Date', 'RUP [MWe]']]\n pris.insert(11, 'Latitude', np.nan)\n pris.insert(12, 'Longitude', np.nan)\n pris = pris[pris.Unit.notnull()]\n pris = pris[pris.Unit != 'Unit']\n pris = pris.replace(np.nan, '')\n return pris"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
List all dishes (CSV version) | def get(self):
if not current_user.is_host():
abort(403, "Forbidden")
all_dishes = Dish.query.all()
rows = []
for dish in all_dishes:
invitation = Invitation.query.filter_by(id=dish.invitation_id).first();
if invitation is None:
continue
rows.append({
'id': dish.id,
'dish_type': dish.dish_type,
'name': dish.name,
'desc': dish.desc,
'invitation_id': dish.invitation_id,
'invitation_name': invitation.name,
'allergens': ", ".join([x.name for x in dish.allergens]),
'special_preparation': ", ".join([x.name for x in dish.special_preparations]),
})
si = io.StringIO()
keys = rows[0].keys() if len(rows) > 0 else []
cw = csv.DictWriter(si, fieldnames=keys)
cw.writeheader()
cw.writerows(rows)
output = make_response(si.getvalue())
output.headers["Content-Disposition"] = "attachment; filename=food.csv"
output.headers["Content-type"] = "text/csv"
return output | [
"def csv():\n\n print \"HOST,GUEST,PERSISTENT,ACTIVE,LUN,LV,MASK,SNAP,POOL,SIZE\"\n\n for host in config.HOSTS:\n doms = guests(host, alldoms=True)\n\n for dom in doms:\n printcsv(host, dom)",
"def get_dishes(restaurant_id):\n db_session = current_app.config[\"DB_SESSION\"]\n dishes = (\n db_session.query(MenuDish)\n .filter(restaurant_id == MenuDish.restaurant_id)\n .all()\n )\n return dishes",
"def printcsv(host, dom):\n for el in __prepare_to_print(host, dom):\n print \"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\" % el",
"def db_stories_csv():\n if request.method == 'GET':\n rows = (str(s) for s in models.Story.query.order_by(\n models.Story.language_id).all())\n return '<br>'.join(rows)",
"def all_decks_printer(cls):\r\n logger.info(\"Request to show all decks.\")\r\n with Connection() as con:\r\n logger.debug(\"Connected to database successfully. Preparing all decks...\")\r\n cursor = con.cursor()\r\n cursor.execute(\"SELECT name from decks\")\r\n decks = cursor.fetchall()\r\n decks = [i[0] for i in decks]\r\n print(\"\"\"DECKS (with number of cards left for today shown):\r\n----------\"\"\")\r\n for i in decks:\r\n print(f\"* {i} ({Deck(i).daily_count_printer()})\")\r\n print(\"----------\")\r\n logger.info(\"Decks printed out.\")\r\n return decks",
"def csvToList(self):\n dataFrame = pd.read_csv(\"zacks_custom_screen.csv\")\n res = []\n for item in dataFrame:\n res.append(item)\n return res",
"def getAllClientes(self):\n database = self.database\n sql = f\"SELECT * FROM hermes.clientes;\"\n data = database.executeQuery(sql)\n lista = {}\n final = []\n if len(data) > 0:\n for x in data:\n lista = self.convertTuplaToDicc(x, True)\n final.append(lista)\n return final",
"def get_all() -> list:\n clientes = []\n conn = GenericDao.connect()\n cursor = conn.execute(\"SELECT * FROM clientes\")\n for row in cursor:\n cliente = Cliente(row[1], row[2], row[3], row[4], row[5], row[0])\n clientes.append(cliente)\n if debug:\n print(str(cliente))\n conn.close()\n return clientes",
"def list_datasets():\n # TODO: Query datasets in database\n return []",
"def ls(manager: WebManager):\n manager.sanitize()\n click.echo(tabulate(\n [\n (n.id, n.name, n.version, n.report.user, n.report.public, n.report.number_nodes, n.report.number_edges)\n for n in manager.list_networks()\n ],\n headers=['id', 'name', 'version', 'owner', 'public', 'nodes', 'edges'],\n ))",
"def _GetAllShowList(self):\n today = datetime.date.today().strftime(\"%Y%m%d\")\n saveFile = '_epguides_' + today + '.csv'\n saveFilePath = os.path.join(self._saveDir, saveFile)\n if os.path.exists(saveFilePath):\n # Load data previous saved to file\n with open(saveFilePath, 'r') as allShowsFile:\n self._allShowList = allShowsFile.read()\n else:\n # Download new list from EPGUIDES and strip any leading or trailing whitespace\n self._allShowList = util.WebLookup(self.ALLSHOW_IDLIST_URL).strip()\n\n if self._ParseShowList(checkOnly=True):\n # Save to file to avoid multiple url requests in same day\n with open(saveFilePath, 'w') as allShowsFile:\n goodlogging.Log.Info(\"EPGUIDE\", \"Adding new EPGUIDES file: {0}\".format(saveFilePath), verbosity=self.logVerbosity)\n allShowsFile.write(self._allShowList)\n\n # Delete old copies of this file\n globPattern = '_epguides_????????.csv'\n globFilePath = os.path.join(self._saveDir, globPattern)\n for filePath in glob.glob(globFilePath):\n if filePath != saveFilePath:\n goodlogging.Log.Info(\"EPGUIDE\", \"Removing old EPGUIDES file: {0}\".format(filePath), verbosity=self.logVerbosity)\n os.remove(filePath)",
"def data():\n \n with urlopen(url) as myFile:\n readable_file = []\n for line in myFile:\n line = line[:-1]\n readable_file.append(line.decode('utf-8').split(','))\n del readable_file[0]\n\n\n\n count = 1\n for row in readable_file:\n print(\"Reading Record\", count,\":\")\n print(\"Left dashboard switch (0 or 1): \", row[1])\n print(\"Right dashboard switch (0 or 1): \", row[2])\n print(\"Child lock switch (0 or 1): \", row[3])\n print(\"Master unlock switch (0 or 1): \", row[4])\n print(\"Left inside handle (0 or 1): \", row[5])\n print(\"Left outside handle (0 or 1): \", row[6])\n print(\"Right inside handle (0 or 1): \", row[7])\n print(\"Right outside handle (0 or 1): \", row[8])\n print(\"Gear shift position (P,N,D,1,2,3,or R):\", row[9])\n\n\n print(module1.doors(row[0], row[1], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[9]))\n \n print(\"\")\n count = count + 1",
"def download_as_csv(self, request, queryset):\n field_names = ['date', 'quantity', 'description', 'city', 'park', 'country_code', 'country_name', 'food_type_id', 'food_type_name']\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename={}.csv'.format('feed_entries')\n writer = csv.writer(response)\n\n writer.writerow(field_names)\n for entry in queryset:\n writer.writerow(self._format_entry_as_list(entry))\n\n return response",
"def csv(self):\r\n reader = csv.reader(self.text.splitlines())\r\n return [l for l in reader]",
"def hs_list(args):\n for hs in get_hidden_services():\n print args.fmt.replace(r'\\t', '\\t') % hs",
"def readFromDtb(self) -> list:\n\n self.cursor.execute('SELECT Expense, Price, MoreInfo FROM ' + self.table)\n return self.cursor.fetchall()",
"def getModelList(dataset='ShapeNetCore', synsetId='*'):\n encodedQuery = 'datasets:%s AND wnhypersynsets:%s' % (dataset, synsetId)\n url = '{}?q={}&rows=10000000&fl=fullId&wt=csv&csv.header=false'\n solrQueryURL = url.format(SOLR_URL, encodedQuery)\n response = urllib.urlopen(solrQueryURL)\n return response.read().splitlines()",
"def csv(self, request):\n buffer = io.BytesIO()\n filename = 'all_covid_history_data_{date}.csv'.format(date=datetime.date.today())\n GeneralData.objects.to_csv(buffer)\n response = HttpResponse(\n content_type='text/csv',\n status=200,\n )\n response.write(buffer.getvalue())\n response['Content-Disposition'] = 'attachment; filename={name}'.format(name=filename)\n return response",
"def ls(manager: WebManager):\n click.echo('\\t'.join(('id', 'name', 'description')))\n for omic in manager.session.query(Omic).all():\n click.echo('\\t'.join((str(omic.id), omic.source_name, omic.description)))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Number of main arguments of Fx (t,x,u,xdot), not counting parameters | def nArgs(self):
n=2 # x and t
if self.has_input:
n+=1 #u
if self.implicit:
n+=1 # xdot
return n | [
"def nArgsOutput(self):\n return self.nArgsImplicit-1",
"def _num_arguments(func: Callable) -> int:\n sig = signature(func)\n return len(sig.parameters)",
"def arguments():\n if len(sys.argv) == 3:\n cov = 50\n iden = 25\n\n elif len(sys.argv) == 5:\n if sys.argv[3].isnumeric() and sys.argv[4].isnumeric():\n cov = sys.argv[3]\n iden = sys.argv[4]\n else:\n print(\"Error. Coverage cut-off and identity cut-off must be numbers.\")\n help_msg()\n sys.exit()\n\n else:\n print(\"Error. Incorrect number of arguments.\")\n help_msg()\n sys.exit() \n\n return (cov, iden)",
"def number_of_arguments(func):\n if isinstance(func, functools.partial):\n total_args = len(inspect.signature(func.func).parameters)\n return total_args - len(func.args) - len(func.keywords)\n return len(inspect.signature(func).parameters)",
"def getArgumentCount(self):\n return len(self.__rawArgs__)",
"def len_arguments(self):\n return self.arguments.len()",
"def topic_args(self, n):\n raise NotImplementedError",
"def main(args):\r\n\tprint args",
"def get_used_args():\n return USED_ARGS",
"def args():\n return []",
"def argsfunc(*args):",
"def msg_args(self, n):\n raise NotImplementedError",
"def tail_args(self) -> Namespace:\n ...",
"def head_args(self) -> Namespace:\n ...",
"def test_Tucker_args():\n testing_function_with_args('tucker')",
"def test_arg_types(self):\n for fac, off in itertools.product(self.possible_args, repeat=2):\n factor = fac if fac else 1\n offset = off if off else 0\n ck_utils.convert_unit(10, factor, offset)",
"def test_NTN_args():\n testing_function_with_args('ntn')",
"def print_arguments_given(args):\r\n\tprint('=' * 100)\r\n\tprint('Uai file : {}'.format(args.uai_file))\r\n\tprint('Task number : {}'.format(args.task_id))\r\n\tprint('Training data file : {}'.format(args.training_data))\r\n\tprint('Test data file : {}'.format(args.test_data))\r\n\tprint('=' * 100)",
"def test_z(self):\r\n list.extend(sys.argv, ['-z', 'count_txt'])\r\n self.assertEqual(len(count.og_function()), 26*2) # Check length of z:\r"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Number of main arguments of the output equation Fx (t,x,u), not counting parameters | def nArgsOutput(self):
return self.nArgsImplicit-1 | [
"def nArgs(self):\n n=2 # x and t\n if self.has_input:\n n+=1 #u\n if self.implicit:\n n+=1 # xdot\n return n",
"def n_independent_parameters(self):\n return",
"def num_sol( at, y0, params):\r\n nSteps = at.shape[0]# this is to define the start of our time vector and setting it to 1-D \r\n #We will create a displacement vector and velocity vector \r\n au_hat = np.zeros(nSteps)\r\n av_hat = np.zeros(at.shape[0])\r\n #Next we will set initial conditions \r\n au_hat[0] = y0[0]\r\n av_hat[0] = y0[1]\r\n for i in range(nSteps - 1):\r\n # slope at previos time steps, i \r\n fn1 = av_hat[i]\r\n fn2 = -dPar['w0']**2*au_hat[i]\r\n #Eulers Formula y(n+1) = yn +h*f(n)\r\n au_hat[i+1] = au_hat[i] + dPar['h']*fn1\r\n av_hat[i+1] = av_hat[i] + dPar['h']*fn2\r\n return au_hat, av_hat",
"def arguments():\n if len(sys.argv) == 3:\n cov = 50\n iden = 25\n\n elif len(sys.argv) == 5:\n if sys.argv[3].isnumeric() and sys.argv[4].isnumeric():\n cov = sys.argv[3]\n iden = sys.argv[4]\n else:\n print(\"Error. Coverage cut-off and identity cut-off must be numbers.\")\n help_msg()\n sys.exit()\n\n else:\n print(\"Error. Incorrect number of arguments.\")\n help_msg()\n sys.exit() \n\n return (cov, iden)",
"def _num_arguments(func: Callable) -> int:\n sig = signature(func)\n return len(sig.parameters)",
"def msg_args(self, n):\n raise NotImplementedError",
"def get_quad_args():\r\n epsa, epsr, subdiv = 1e-6, 1e-6, 250\r\n quad_args = {'limit':subdiv,'epsrel':epsr,'epsabs':epsa}\r\n return quad_args",
"def number_of_arguments(func):\n if isinstance(func, functools.partial):\n total_args = len(inspect.signature(func.func).parameters)\n return total_args - len(func.args) - len(func.keywords)\n return len(inspect.signature(func).parameters)",
"def print_arguments_given(args):\r\n\tprint('=' * 100)\r\n\tprint('Uai file : {}'.format(args.uai_file))\r\n\tprint('Task number : {}'.format(args.task_id))\r\n\tprint('Training data file : {}'.format(args.training_data))\r\n\tprint('Test data file : {}'.format(args.test_data))\r\n\tprint('=' * 100)",
"def n_params(model):\n return len(model[\"parameters\"])",
"def topic_args(self, n):\n raise NotImplementedError",
"def test_frequencies_default_multi_param(self):\n\n class DummyOp(qml.operation.Operation):\n r\"\"\"Dummy custom operation\"\"\"\n num_params = 3\n num_wires = 1\n grad_method = \"A\"\n\n x = [0.654, 2.31, 0.1]\n op = DummyOp(*x, wires=0)\n with pytest.raises(\n qml.operation.OperatorPropertyUndefined, match=\"DummyOp does not have parameter\"\n ):\n op.parameter_frequencies",
"def tail_args(self) -> Namespace:\n ...",
"def len_arguments(self):\n return self.arguments.len()",
"def variables_num(self):\n raise NotImplementedError()",
"def num_vars(expr):\r\n return num_terms(expr) - count_ops(expr)",
"def test_measure_arg_postselect(self):\n # create a test program\n sf_prog = Program(1)\n\n with sf_prog.context as q:\n ops.MeasureHomodyne(0.43, select=0.543) | q[0]\n\n xir_prog = io.to_xir(sf_prog)\n\n expected = [(\"MeasureHomodyne\", {\"phi\": 0.43, \"select\": 0.543}, (0,))]\n assert [(stmt.name, stmt.params, stmt.wires) for stmt in xir_prog.statements] == expected\n\n # repeat with kwargs only\n sf_prog = Program(1)\n\n with sf_prog.context as q:\n ops.MeasureHomodyne(phi=0.43, select=0.543) | q[0]\n\n xir_prog = io.to_xir(sf_prog)\n assert [(stmt.name, stmt.params, stmt.wires) for stmt in xir_prog.statements] == expected",
"def foptions():\n \n \n opt_vect = np.zeros(18)\n opt_vect[1] = 1e-4\n opt_vect[2] = 1e-4\n opt_vect[3] = 1e-6\n opt_vect[15] = 1e-8\n opt_vect[16] = 0.1\n return opt_vect",
"def numTriangles(*args, **kwargs):\n \n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Adds a user to the data base | def add_user(self, user: User) -> None:
pass | [
"def add_user(self):\n db.session.flush()\n db.session.add(self)\n db.session.commit()",
"def add_user(self, id, name):\n\t\tconnection = self.connect_to_db()\n\t\tcursor = connection.cursor()\n\n\t\tcursor.execute(\"insert into users values (%s, %s, %s, %s);\", (id,name, '{}', '{}'))\n\t\tconnection.commit()\n\t\tconnection.close()",
"def insert_new_user(self, user_data):\n return self.mongo_toolbox.add_user(user_data)",
"def add_user():\n\n username = request.form.get(\"username\")\n password = request.form.get(\"password\")\n\n new_user = User(email=username,\n password=password)\n\n db.session.add(new_user)\n db.session.commit()\n\n flash('Logged in')\n return redirect(\"/\")",
"def add_user(db, username, email, password, name, suburb, skills):\n cursor = db.cursor()\n # check username is not in use\n sql = \"SELECT 1 FROM users WHERE username=?\"\n data = cursor.execute(sql, (username,))\n if data.fetchone():\n return False\n else:\n salt = generate_salt()\n sql = \"INSERT INTO users (username, email, password, name, suburb, rand, skills) VALUES (?,?,?,?,?,?,?)\"\n cursor.execute(sql, [username, email, firstPassword_hash(\n password, salt), name, suburb, salt, skills])\n db.commit()\n return True",
"def adduser(username, accesskey, secretkey, pkname=None):\n settings.add_user(username, accesskey, secretkey, pkname)",
"def save_user (self):\n User.user_list.append(self)",
"def add_user(self, user_id: int, group_name: str):\n self.cursor.execute(\n f\"INSERT INTO public.users (id, user_id, group_name) VALUES (DEFAULT, {user_id}, '{group_name}');\")\n self.conn.commit()",
"def handle_add_user():\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n profile_image_url = request.form['profile_image_url']\n new_user = User(first_name=first_name, last_name=last_name, profile_image_url=profile_image_url)\n db.session.add(new_user)\n db.session.commit()\n return redirect(url_for('list_users'))",
"def save(self):\n\t\tdbmanager = db.DBManager()\n\t\tif dbmanager.userExists(self.username):\n\t\t\traise UserExistsException(self.username)\n\t\telse:\n\t\t\tdbmanager.insertUser(self)",
"def add_SignUp(user_data):\n data = get_User(user_data['email'])\n if not data:\n session = DBSession()\n user = User(\n name = user_data['name'],\n email = user_data['email']\n )\n user.hash_password(user_data['password'])\n session.add(user)\n session.commit()\n session.close_all()\n return True\n else:\n return False",
"def put_user(self, user):\n\t_result = self.connection.query(\n\t \"\"\"INSERT INTO users (email, password, title, first_name,\n\t last_name, affiliation, phone_number, fax_number, street,\n\t postal_code, city, state, country, sys_role) VALUES (%s)\"\"\" %\n\t (user.as_query_string()))",
"def add_user(self, server_id, user_id):\n query = (\"INSERT INTO `%s` VALUES (%s, 0, 0, false)\" \n % (server_id, \"%s\"))\n self._update_query(query, user_id)",
"def create_user():\n user_record = request.get_json(force=True)\n\n add_user_to_db(user_record)\n\n return \"Successfully added user.\", 200",
"def add_local_user() -> None:\n from getpass import getpass\n\n login = input(\"Username (login): \").strip()\n password = getpass()\n if not all([login, password]):\n print(\"Both username and password are required.\")\n return\n app = make_app() # type: ignore\n with app.app_context():\n user = User(name=login, password=password)\n DB.session.add(user)\n DB.session.commit()",
"def add_user(self, langid, username, password, realname, email):\n if self.check_language(langid) == False:\n self.die(\"Language does not exist\")\n return\n\n if self.check_username(username) != False:\n self.die(\"Username already exists\")\n return\n\n self.__add_user_to_database(langid, username, password, realname, email)",
"def test_add_user(self):\r\n _pword = \"test\"\r\n user = User(email='test@adduser.com', password=_pword)\r\n check = user.add_user()\r\n self.assertTrue(check, \"User should be added\")\r\n self.assertTrue(\r\n user.id,\r\n \"User doesnot contain id so he is not added to the db\"\r\n )",
"def add_user(self, username, vec):\n self.__add_row_to_data(username, vec)\n self.__save_current_user_data()\n self.build_annoy_index()",
"def addUser(self, muName, fName, lName, affil, email):\n conn = getEngineFromFile(self._msMysqlAuthF).connect()\n cmd = \"SELECT instId FROM Institution WHERE instName = %s\"\n instId = conn.execute(cmd, (affil,)).scalar()\n if instId is None:\n raise MetaBException(MetaBException.INST_NOT_FOUND, affil)\n cmd = \"INSERT INTO User(mysqlUserName, firstName, lastName, email, instId) \"\n cmd += \"VALUES(%s, %s, %s, %s, %s)\"\n conn.execute(cmd, (muName, fName, lName, email, instId))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Edits a user, setting a new attribute, given as a dictionary. The dictionary key must be an atribute of the User dataclass | def edit_user(self, user: User, attribute: dict[str, Any]) -> None:
pass | [
"def set_attr(self, user, key, value):\n query1 = \"\"\"SELECT attr_value FROM attributes WHERE attr_uid = ?\n AND attr_key = ?\"\"\"\n query2 = \"INSERT INTO attributes VALUES (?, ?, ?)\"\n query3 = \"\"\"UPDATE attributes SET attr_value = ? WHERE attr_uid = ?\n AND attr_key = ?\"\"\"\n with self._db_access_lock, sqlite.connect(self._dbfile) as conn:\n if conn.execute(query1, (user, key)).fetchone():\n conn.execute(query3, (value, user, key))\n else:\n conn.execute(query2, (user, key, value))\n try:\n self._attributes[user][key] = value\n except KeyError:\n self.attributes[user] = {key: value}",
"def set_user_attr(u,attr,val):\n\n\t# sanity check the attribute we were asked to set\n\tif attr not in users_schema:\n\t\tprint(\"That attribute does not exist!\")\n\t\treturn 400\n\n\t# try to set the value\n\ttry:\n\t\tusers.execute('''\n\t\t\t\tUPDATE users\n\t\t\t\tSET '''+attr+'''=?\n\t\t\t\tWHERE UUUID=?;\n\t\t\t''',(val,u)\n\t\t\t#| doing string catenation in SQL would normally be insecure,\n\t\t\t#| but we validate the attribute requested againt a list of valid attributes so it's hopefully fine\n\t\t\t#| (also this is literally the only way to have a variable field be substituted, otherwise we get a syntax error)\n\t\t)\n\texcept BaseException as e:\n\t\tprint(e)\n\t\tprint(\"A fatal error occured while trying to set the value\")\n\t\treturn 500\n\n\t# save our changes\n\tusers_conn.commit()\n\n\t# http 200 okay\n\treturn 200",
"def edit_user(user_id, data):\n\n try:\n user = Stakeholder.objects.all().filter(pk=user_id)[0]\n user.update(**data)\n user.save()\n return user\n except LookupError:\n return",
"def set_account_information(self, user_id, req):\n c = self.db.cursor()\n try:\n c.execute(\"\"\"\n UPDATE Users\n SET \n username = ?,\n email = ?,\n fName = ?,\n lName = ?,\n streetAddress = ?,\n city = ?,\n state = ?,\n postCode = ?\n WHERE\n id = ?\n \"\"\",\n (\n req['username'],\n req['email'],\n req['fName'],\n req['lName'],\n req['streetAddress'],\n req['city'],\n req['state'],\n req['postCode'],\n user_id\n )\n )\n self.db.commit()\n except sqlite3.Error as e:\n log.error(e)\n raise Exception",
"def add_user_to_user_dict(u_dict,user):\n key = user[ID][0]\n if key in u_dict:\n pass\n else:\n u_dict[key] = 0",
"def modify_user(self, pycl_object=None, name=None, data=None,\n metadata=None, json_string=None):\n return self.user_manager.modify_object(\n pycl_object=pycl_object, name=name, data=data,\n metadata=metadata, json_string=json_string)",
"def update_user(\n self,\n user_id,\n bio=\"\",\n first_name=\"\",\n last_name=\"\",\n profile_pic=\"\",\n new_password=\"\",\n new_email=\"\",\n ):\n new_info = {}\n if bio:\n new_info[\"bio\"] = bio\n if first_name:\n new_info[\"first_name\"] = first_name\n if last_name:\n new_info[\"last_name\"] = last_name\n if profile_pic:\n new_info[\"profile_pic\"] = profile_pic\n if new_password:\n new_info[\"new_password\"] = new_password\n if new_email:\n new_info[\"new_email\"] = new_email\n\n self.client.Users.users.update_one(\n {\"user_id\": user_id}, {\"$set\": new_info}, upsert=True\n )\n return new_info",
"def updateUser(userId, name, description):\n\n user = getUser(userId)\n user.name = name\n user.description = description\n user.put()",
"def put(self, user_id):\n user = AdminUser.query.filter(AdminUser.id == user_id).first()\n if user is None:\n raise UserDoesNotExist\n user.update_from_dict(**api.payload)\n db.session.merge(user)\n db.session.commit()\n return user",
"def set_user_detail_extra(user_id: UserID, key: str, value: str) -> None:\n detail = _get_db_user_detail(user_id)\n\n if detail.extras is None:\n detail.extras = {}\n\n detail.extras[key] = value\n\n db.session.commit()",
"def put(self):\n data = User.parser.parse_args()\n\n user = UserModel.find_by_id(current_identity.id)\n if user is None:\n return {'message': \"There is no user with this ID, or your access_token is invalid.\"}\n else:\n user.bank_account = data['bank_account']\n\n user.save_to_db()\n\n return user.json()",
"def update_user(user_id, update_key, update_value, connection):\n\n database = connection['test']\n collection = database['users']\n\n collection.update({\"_id\" : ObjectId(user_id)},\n {\"$set\" : {update_key : update_value}})\n \n # Update the screen_name to the lowercase version of the display name.\n if (update_key == \"display_name\"):\n collection.update({\"_id\" : ObjectId(user_id)},\n {\"$set\" : {\"screen_name\" : update_value.lower()}})\n \n return",
"def modify(self, sdi_id: str, data: Dict[str, Any]) -> APIResponse:\n return self._put(\"user_detail\", {\"pk\": self.user_pk, \"sdi_id\": sdi_id}, data)",
"def set_user_session_datum(self, user_id, key, value):\n logger = logging.getLogger(\"UserSessionManager.set_user_session_datum\")\n logger.debug(\"Entry. user_id: %s, key: %s, value: %s\" % (user_id, key, value)) \n assert(self.is_user_authorized(user_id))\n self.r.hset(user_id, key, value)",
"def update_user(id, name, password, email):\n return u.update(id, name, password, email)",
"def update_user_fname(user_id, name):\r\n\r\n user = User.query.get(user_id)\r\n user.fname = name\r\n db.session.commit()\r\n\r\n return user",
"def personUpdate(id):\n #get updated user info\n person = Person.query.get(id)\n name = request.json[\"name\"]\n email = request.json[\"email\"]\n username = request.json[\"username\"]\n users_roles_roleid = request.json[\"roleid\"]\n \n #set userinfo to the given user\n person.name = name\n person.email = email\n person.username = username\n person.users_roles_roleid = users_roles_roleid\n\n db.session.commit()\n\n return personSchema.jsonify(person)",
"def update_user(db: Session, user_id: int, user: UserUpdate) -> User:\n db_user = retrieve_user(db, user_id)\n db_user.email = user.email\n db_user.first_name = user.first_name\n db_user.last_name = user.last_name\n db_user.is_active = user.is_active\n db.commit()\n return db_user",
"def setUserInfo(*args, **kwargs):\n \n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Converts a dict of attributes to a user object | def convert_attributes_user(attributes: dict[str, Any]) -> User:
attributes.pop("id")
return User().set(attributes) | [
"def create_user(dct):\n return User.dict2user(dct)",
"def create_user_from_tunnistamo_data(payload):\n return User(\n id=get_user_id(payload),\n first_name=payload.get('given_name'),\n last_name=payload.get('family_name'),\n email=payload.get('email'),\n is_staff=False,\n is_active=True,\n date_joined=None,\n )",
"def _MakeBasicUser(self, user):\n return user['primaryEmail'], user['id'], user['name']['fullName']",
"def from_json(obj: Dict[str, str]) -> Any:\n if \"type\" in obj and obj[\"type\"] == \"User\":\n return User(\n type=obj[\"type\"],\n id=obj[\"id\"],\n first_name=obj[\"firstName\"],\n last_name=obj[\"lastName\"],\n email=obj[\"email\"],\n username=obj[\"username\"],\n password=obj[\"password\"],\n external_reference=obj[\"externalReference\"],\n active=obj[\"active\"]\n )\n\n return obj",
"def _makeUser(row):\n id_, email = row\n return User(id_, email)",
"def from_dict(cls, user_dict):\n new_dict = user_dict.copy()\n resource_options = {}\n options = new_dict.pop('options', {})\n password_expires_at_key = 'password_expires_at'\n if password_expires_at_key in user_dict:\n del new_dict[password_expires_at_key]\n for opt in cls.resource_options_registry.options:\n if opt.option_name in options:\n opt_value = options[opt.option_name]\n # NOTE(notmorgan): None is always a valid type\n if opt_value is not None:\n opt.validator(opt_value)\n resource_options[opt.option_id] = opt_value\n user_obj = super(User, cls).from_dict(new_dict)\n setattr(user_obj, '_resource_options', resource_options)\n return user_obj",
"def create_custom_user(user_dict):\n user_dict['user'] = create_djago_user(user_dict)\n user_dict['access_token'] = _generate_key(90)\n user = CustomUser.objects.create(\n user=user_dict['user'],\n access_token=user_dict['access_token'],\n phone=user_dict['mobile']\n )\n return (user)",
"def _process_user_data(guid,\n email,\n first_name,\n middle_initial,\n last_name,\n email_validated,\n is_nyc_employee,\n has_nyc_account,\n active,\n terms_of_use_accepted,\n is_anonymous_requester):\n mailbox, _ = email.split('@')\n\n if first_name is None:\n first_name = mailbox\n\n user = Users.query.filter_by(guid=guid).first()\n if user is None:\n user = find_user_by_email(email)\n\n # update or create user\n if user is not None:\n _update_user_data(\n user,\n guid,\n email,\n first_name,\n middle_initial,\n last_name,\n email_validated,\n is_nyc_employee,\n has_nyc_account,\n active,\n terms_of_use_accepted,\n is_anonymous_requester\n )\n else:\n user = Users(\n guid=guid,\n email=email,\n first_name=first_name,\n middle_initial=middle_initial,\n last_name=last_name,\n email_validated=email_validated,\n is_nyc_employee=is_nyc_employee,\n has_nyc_account=has_nyc_account,\n active=active,\n terms_of_use_accepted=terms_of_use_accepted,\n is_anonymous_requester=is_anonymous_requester\n )\n create_object(user)\n\n return user",
"def output_user(user):\n return model_to_dict(user, fields=[\"id\", \"username\", \"first_name\", \"last_name\", \"email\"])",
"def user_to_dict(self, user):\r\n\r\n from giveaminute.project import userNameDisplay\r\n from giveaminute.project import isFullLastName\r\n\r\n user_dict = super(EventModelRestController, self).instance_to_dict(user)\r\n\r\n # Add in some of that non-orm goodness\r\n user_dict['avatar_path'] = user.avatar_path\r\n user_dict['display_name'] = userNameDisplay(\r\n user.first_name, user.last_name, user.affiliation,\r\n isFullLastName(user.group_membership_bitmask))\r\n\r\n # Remove sensitive information\r\n del user_dict['password']\r\n del user_dict['salt']\r\n\r\n return user_dict",
"def alternative_user_model(username, claimname, **kwargs):\n return {claimname: username, **kwargs}",
"def get_user(fields):\n return User(fields['username'])",
"def _create_usm_user_obj(snmp_cred):\n auth_protocol = snmp_cred.get('auth_protocol')\n priv_protocol = snmp_cred.get('priv_protocol')\n auth_user = snmp_cred.get('auth_user')\n auth_prot_pp = snmp_cred.get('auth_prot_pp')\n auth_priv_pp = snmp_cred.get('auth_priv_pp')\n\n if ((not auth_protocol) and priv_protocol):\n priv_protocol = (\n MAPPED_SNMP_ATTRIBUTES['privProtocol'][priv_protocol])\n usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,\n auth_priv_pp,\n privProtocol=priv_protocol)\n elif ((not priv_protocol) and auth_protocol):\n auth_protocol = (\n MAPPED_SNMP_ATTRIBUTES['authProtocol'][auth_protocol])\n usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,\n auth_priv_pp,\n authProtocol=auth_protocol)\n elif not all([priv_protocol and auth_protocol]):\n usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,\n auth_priv_pp)\n else:\n auth_protocol = (\n MAPPED_SNMP_ATTRIBUTES['authProtocol'][auth_protocol])\n priv_protocol = (\n MAPPED_SNMP_ATTRIBUTES['privProtocol'][priv_protocol])\n usm_user_obj = hlapi.UsmUserData(auth_user, auth_prot_pp,\n auth_priv_pp,\n authProtocol=auth_protocol,\n privProtocol=priv_protocol)\n return usm_user_obj",
"def get_user_attribute():\n return UserAttribute",
"def make_user_data(self, custom={}):\n raw_json = self.make_response_body()\n user_data = json.loads(raw_json)\n for key, value in custom.items():\n user_data[key] = value\n return user_data",
"def from_json(self, user_doc: dict):\n self.id = user_doc['email']\n self.name = user_doc['name']\n self.first_name = user_doc['name'].split(', ')[0]\n self.email = user_doc['email']\n return self",
"def fromTweepyJSON(json_in: Dict):\n id = json_in.get(\"id\")\n screen_name = json_in.get(\"screen_name\")\n name = json_in.get(\"name\")\n created_at = json_in.get(\"created_at\")\n followers_count = json_in.get(\"followers_count\")\n friends_count = json_in.get(\"friends_count\")\n listed_count = json_in.get(\"listed_count\")\n favourites_count = json_in.get(\"favourites_count\")\n statuses_count = json_in.get(\"statuses_count\")\n default_profile = json_in.get(\"default_profile\")\n default_profile_image = json_in.get(\"default_profile_image\")\n\n user = User(id=id, name=name, screen_name=screen_name,\n created_at=created_at, followers_count=followers_count,\n friends_count=friends_count, listed_count=listed_count,\n favourites_count=favourites_count, statuses_count=statuses_count,\n default_profile=default_profile,\n default_profile_image=default_profile_image)\n\n return user",
"def from_map(name, server, inobj):\n obj = UserMapping(name, inobj.pop('options', {}), server.wrapper,\n server.name)\n obj.set_oldname(inobj)\n return obj",
"def as_dto(self, logged_in_username: str) -> UserDTO:\n user_dto = UserDTO()\n user_dto.id = self.id\n user_dto.username = self.username\n user_dto.role = UserRole(self.role).name\n user_dto.mapping_level = MappingLevel(self.mapping_level).name\n user_dto.projects_mapped = (\n len(self.projects_mapped) if self.projects_mapped else None\n )\n user_dto.is_expert = self.is_expert or False\n user_dto.date_registered = self.date_registered\n user_dto.twitter_id = self.twitter_id\n user_dto.linkedin_id = self.linkedin_id\n user_dto.facebook_id = self.facebook_id\n user_dto.skype_id = self.skype_id\n user_dto.slack_id = self.slack_id\n user_dto.irc_id = self.irc_id\n user_dto.city = self.city\n user_dto.country = self.country\n user_dto.name = self.name\n user_dto.picture_url = self.picture_url\n user_dto.default_editor = self.default_editor\n user_dto.mentions_notifications = self.mentions_notifications\n user_dto.projects_notifications = self.projects_notifications\n user_dto.projects_comments_notifications = self.projects_comments_notifications\n user_dto.tasks_notifications = self.tasks_notifications\n user_dto.tasks_comments_notifications = self.tasks_comments_notifications\n user_dto.teams_announcement_notifications = (\n self.teams_announcement_notifications\n )\n\n if self.username == logged_in_username:\n # Only return email address and gender information when logged in user is looking at their own profile\n user_dto.email_address = self.email_address\n user_dto.is_email_verified = self.is_email_verified\n gender = None\n if self.gender is not None:\n gender = UserGender(self.gender).name\n user_dto.gender = gender\n user_dto.self_description_gender = self.self_description_gender\n return user_dto"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return fileId from a url. | def fileId_from_url(url):
raw_fileId = re.findall("~[A-z.]+/[0-9]+", url)[0][1: ]
return raw_fileId.replace('/', ':') | [
"def fileid_from_url(url):\r\n raw_fileid = re.findall(\"~[A-z.]+/[0-9]+\", url)[0][1:]\r\n return raw_fileid.replace('/', ':')",
"def get_file_by_url(self, url):\r\n parts = urlsplit(url)\r\n query = parse_qs(parts.query)\r\n keys = query.keys()\r\n if \"sourcedoc\" in keys:\r\n uid = query['sourcedoc'][0][1:-1]\r\n return self.get_file_by_id(uid)\r\n elif \"SourceUrl\" in keys:\r\n path = query['SourceUrl'][0] \r\n path = '/' + '/'.join(path.split('/')[3:])\r\n # Check for invalid .xlsf extension\r\n base, ext = os.path.splitext(path)\r\n if ext == '.xlsf':\r\n path = base + '.xls'\r\n return self.get_file_by_path(path)\r\n else: # Assume sharepoint_url is valid and remove all query items\r\n return self.get_file_by_path(parts.path)",
"def get_filename_from_url(url):\n split = url.split('/') # this splits a string on a character and returns a list\n name = split[-1]\n return name",
"def parse_file_name_from_url(response):\r\n split_url = urlsplit(response.url)\r\n filename = split_url.path.split(\"/\")[-1:][0]\r\n return filename",
"def get_by_url(self, url):\n return File(self.context, ServiceOperationPath(\"GetByUrl\", [url], self.resource_path))",
"def url2file(url):\n import urllib\n url = \"_\".join(url.split(\"/\"))\n url = \"__\".join(url.split(\":\"))\n filename = urllib.parse.quote_plus(url, '')\n return filename",
"def hf_url_to_filename(url: str, etag: Optional[str] = None) -> str:\n url_bytes = url.encode(\"utf-8\")\n filename = sha256(url_bytes).hexdigest()\n\n if etag:\n etag_bytes = etag.encode(\"utf-8\")\n filename += \".\" + sha256(etag_bytes).hexdigest()\n\n return filename",
"def parse_url(url, warning=True):\r\n\tparsed = urllib_parse.urlparse(url)\r\n\tquery = urllib_parse.parse_qs(parsed.query)\r\n\tis_gdrive = parsed.hostname == \"drive.google.com\"\r\n\tis_download_link = parsed.path.endswith(\"/uc\")\r\n\r\n\tfile_id = None\r\n\tif is_gdrive and \"id\" in query:\r\n\t\tfile_ids = query[\"id\"]\r\n\t\tif len(file_ids) == 1:\r\n\t\t\tfile_id = file_ids[0]\r\n\tmatch = re.match(r\"^/file/d/(.*?)/view$\", parsed.path)\r\n\tif match:\r\n\t\tfile_id = match.groups()[0]\r\n\r\n\tif is_gdrive and not is_download_link:\r\n\t\twarnings.warn(\r\n\t\t\t\"You specified Google Drive Link but it is not the correct link \"\r\n\t\t\t\"to download the file. Maybe you should try: {url}\".format(\r\n\t\t\t\turl=\"https://drive.google.com/uc?id={}\".format(file_id)\r\n\t\t\t)\r\n\t\t)\r\n\r\n\treturn file_id, is_download_link",
"def get_file_from_network(self, url, filename):\n encoded_url = urllib.parse.quote(url, safe='/:')\n req = Request(encoded_url)\n response = urlopen(req)\n with open(filename, 'wb') as return_file:\n return_file.write(response.read())",
"def _download_from_url(self, url):\n ext = get_file_extension(url)\n if \"?\" in url:\n ext = get_file_extension(os.path.splitext(url.split(\"?\")[0]))\n filepath = \"/tmp/%s.%s\" % (uuid.uuid4().hex, ext)\n request.urlretrieve(url, filepath)\n return filepath",
"def get_file_url(self):\n return self.raw['url']",
"def retrieve_file(url):\n doc = urlopen(url)\n lines = doc.read().decode()\n doc.close()\n return lines",
"def extract_filename_from_url(log, url):\n ## > IMPORTS ##\n import re\n # EXTRACT THE FILENAME FROM THE URL\n try:\n log.debug(\"extracting filename from url \" + url)\n reEoURL = re.compile('([\\w\\.]*)$')\n filename = reEoURL.findall(url)[0]\n # log.debug(filename)\n if(len(filename) == 0):\n filename = 'untitled.html'\n if not (re.search('\\.', filename)):\n filename = filename + '.html'\n except Exception as e:\n filename = None\n # print url\n log.warning(\"could not extracting filename from url : \" + str(e) + \"\\n\")\n\n return filename",
"def filename(self):\n return self.url.filename",
"def get_files_from_github_folder(url):\n\n r = requests.get(url, auth=('', ''))\n html = r.text\n\n # Parse it and check the latest file by it's name\n parser = github_parser()\n parser.feed(html)\n\n return parser.files_dict",
"def _get_file_by_url(url, attempts=5):\n\n\twhile attempts > 0:\n\t\ttry:\n\t\t\tresponse = urlopen(url)\n\t\texcept URLError:\n\t\t\traise IOError('urllib2 failed to open URL.')\n\t\tif response.getcode() < 201:\n\t\t\tbreak\n\t\telse:\n\t\t\tattempts -= 1\n\telse:\n\t\traise IOError('urllib2 failed 5x to download the file.')\n\treturn response",
"def _getFileUrl(rooturl, filename):\n return rooturl + \"/\" + filename",
"def _get_datacenter_id_by_url(self, url):\n if url and isinstance(url, list):\n url = url[0]\n if isinstance(url, vim.vm.ConfigInfo.DatastoreUrlPair):\n url = str(url.url)\n if not url:\n return\n\n try:\n url = url.split('/')\n volume_id = url[url.index('volumes') + 1]\n except (ValueError, IndexError):\n raise LibcloudError(\"Unexpected URL format: {}\".format(url))\n\n datastore_url = 'ds:///vmfs/volumes/{}/'.format(volume_id)\n for info, datacenter in self._get_datastores_info_map().items():\n if info.url == datastore_url:\n return datacenter._moId",
"def get_id(self, url):\n return self.get_ids([url])[0]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Configure Pig with the correct classpath. If Hadoop is available, use HADOOP_CONF_DIR, otherwise use PIG_HOME. | def update_config(self, mode):
with utils.environment_edit_in_place('/etc/environment') as env:
key = 'HADOOP_CONF_DIR' if mode == 'mapreduce' else 'PIG_HOME'
env['PIG_CLASSPATH'] = env[key] | [
"def setup_environment():\n\n add_pyspark_path()\n\n current_location = path.dirname(path.realpath(__file__))\n cwd = os.getcwd()\n\n local_prefixes = [\n path.abspath(path.join(current_location, 'jars')),\n path.abspath(path.join(cwd, 'jars')),\n path.abspath(path.join(cwd, '../geopyspark/jars'))\n ]\n possible_jars = [path.join(prefix, '*.jar') for prefix in local_prefixes]\n configuration = path.join(current_location, 'command', 'geopyspark.conf')\n\n if path.isfile(configuration):\n with open(path.join(configuration)) as conf:\n possible_jars.append(path.relpath(conf.read(), cwd))\n\n jar = path.abspath(resource_filename('geopyspark.jars', JAR))\n jar_dir = os.path.dirname(jar)\n if jar_dir not in local_prefixes:\n possible_jars.append(jar)\n\n returned = [glob.glob(jar_files) for jar_files in possible_jars]\n jars = [jar for sublist in returned for jar in sublist]\n\n if len(jars) == 0:\n raise IOError(\"Failed to find any jars. Looked at these paths {}\".format(possible_jars))\n\n jar_string = str(jars[0])\n\n os.environ['JARS'] = jar_string\n os.environ[\"PYSPARK_PYTHON\"] = \"python3\"\n os.environ[\"PYSPARK_DRIVER_PYTHON\"] = \"python3\"\n if 'TRAVIS' in os.environ:\n os.environ[\"PYSPARK_SUBMIT_ARGS\"] = \"--jars {} \\\n --conf spark.ui.enabled=false \\\n --conf spark.serializer=org.apache.spark.serializer.KryoSerializer \\\n --conf spark.kyro.registrator=geotrellis.spark.io.kyro.KryoRegistrator \\\n --driver-memory 2G \\\n --executor-memory 2G \\\n pyspark-shell\".format(jar_string)\n else:\n os.environ[\"PYSPARK_SUBMIT_ARGS\"] = \"--jars {} \\\n --conf spark.ui.enabled=false \\\n --conf spark.serializer=org.apache.spark.serializer.KryoSerializer \\\n --conf spark.kyro.registrator=geotrellis.spark.io.kyro.KryoRegistrator \\\n --driver-memory 8G \\\n --executor-memory 8G \\\n pyspark-shell\".format(jar_string)",
"def setconfpath(confpath):\n global CONF_PATH\n CONF_PATH = confpath\n config.set_option('CONF_PATH', confpath)",
"def run(self, config):\n SetupHadoopCluster(config)",
"def __init__(self, hadoop_dir=''):\n ShellWrapper.__init__(self, \"hadoop_proc\")\n self._hadoop_dir = hadoop_dir",
"def SetupHadoopCluster(config):\n logging.debug('Start Hadoop cluster: %s', str(config))\n\n # Required parameters.\n try:\n project = config['project']\n prefix = config['prefix']\n except KeyError as e:\n raise HadoopSetupError('Hadoop Setup: Missing required parameter: %s' %\n str(e))\n\n # Optional parameters with default values.\n zone = config.get('zone', DEFAULT_ZONE)\n image = config.get('image', DEFAULT_IMAGE)\n network = config.get('network', DEFAULT_NETWORK)\n machinetype = config.get('machineType', DEFAULT_MACHINETYPE)\n num_workers = int(config.get('numWorkers', DEFAULT_NUM_WORKERS))\n\n # GCE API object used to convert parameters to resource URLs.\n gce = gce_api.GceApi(project, zone)\n\n cluster = hadoop_cluster.HadoopCluster(\n auth.Service.HttpFromServiceAccount(SCOPE),\n project=project,\n name=prefix, # Use prefix as a cluster name.\n prefix=prefix,\n zone=zone,\n machinetype=gce.ResourceUrl('machineTypes', machinetype),\n image=gce.ResourceUrlFromPath(image),\n network=gce.ResourceUrl('networks', network,\n gce_api.ResourceZoning.GLOBAL),\n num_workers=num_workers)\n cluster.StartHadoopCluster()",
"def classpath_prepend(self):",
"def configure(self, command='./configure', args=None, cwd=None, env=None):\n pass",
"def setDefaults(self):\n\t\tself.user = 'hdfs'\n\t\tself.releaseDir = '/usr'\n\t\tself.configLocal = '/etc/sysconfig/hadoop'",
"def add_pyspark_path():\n\n try:\n spark_home = os.environ['SPARK_HOME']\n\n sys.path.append(os.path.join(spark_home, 'python'))\n py4j_src_zip = glob(os.path.join(spark_home, 'python',\n 'lib', 'py4j-*-src.zip'))\n if len(py4j_src_zip) == 0:\n raise ValueError('py4j source archive not found in %s'\n % os.path.join(spark_home, 'python', 'lib'))\n else:\n py4j_src_zip = sorted(py4j_src_zip)[::-1]\n sys.path.append(py4j_src_zip[0])\n except KeyError:\n logging.error(\"\"\"SPARK_HOME was not set. please set it. e.g.\n SPARK_HOME='/home/...' ./bin/pyspark [program]\"\"\")\n except ValueError as e:\n logging.error(str(e))",
"def HadoopSetup(self, hadoop, master, jobscript):\n \n randomnum = str(randrange(999999999))\n randir = os.getenv('HOME') + '/hadoopjob' + randomnum\n randfile = randomnum + \"-fg-hadoop.job_\"\n \n \n #do we need that two directories? or should I remove from all machines? \n randhadooptempdir = '/tmp/hadoop-' + randomnum # this is for hadoop.tmp.dir in core-site.xml \n randhadoophdfsdir = randomnum + \"-fg-hadoop/\" # this is for mapred.local.dir in mapred-site.xml\n \n #gen config script\n genConf_script = hadoop.generate_config_hadoop(randfile, randir, randhadooptempdir, randhadoophdfsdir)\n genConf_script_name = hadoop.save_job_script(randfile + \"genconf\", genConf_script)\n #start script\n start_script = hadoop.generate_start_hadoop()\n start_script_name = hadoop.save_job_script(randfile + \"start\", start_script)\n #runjob script\n #TODO: GET hadoopCmd from jobscript \n if jobscript != \"interactive\" and jobscript != \"background\":\n jobscript = \"/\" + jobscript.lstrip(\"/tmp\")\n f = open(jobscript, 'r')\n for line in f:\n if not re.search('^#', line) and not line.strip() == \"\": \n hadoopCmd = line.rstrip('\\n')\n break\n run_script = hadoop.generate_runjob(hadoopCmd)\n run_script_name = hadoop.save_job_script(randfile + \"jobscript\", run_script)\n else:\n run_script_name = \"\"\n #stop script\n shutdown_script = hadoop.generate_shutdown()\n shutdown_script_name = hadoop.save_job_script(randfile + \"shutdown\", shutdown_script)\n \n #create script\n #Master and slaves have to have the hadoop directory in the same path\n f = open(randfile + \"setup.sh\", \"w\")\n msg = \"#!/bin/bash \\n \" + \\\n \"\\n wget \" + self.http_server + \"/software/hadoop.tgz -O \" + randir + \"/hadoop.tgz\" + \\\n \"\\n cd \" + randir + \\\n \"\\n tar vxfz \" + randir + \"/hadoop.tgz > .hadoop.tgz.log\" + \\\n \"\\n DIR=`head -n 1 .hadoop.tgz.log`\"\n if hadoop.getHpc():\n msg += \"\\n cp $HOME/.bash_profile $HOME/.bash_profile.\" + randomnum + \\\n \"\\n cp $HOME/.bashrc $HOME/.bashrc.\" + randomnum\n \n f1 = open(shutdown_script_name, \"a\")\n f1.write(\"\\n mv -f $HOME/.bash_profile.\" + randomnum + \" $HOME/.bash_profile\" + \\\n \"\\n mv -f $HOME/.bashrc.\" + randomnum + \" $HOME/.bashrc\")\n f1.close()\n \n msg += \"\\n echo export PATH=\" + randir + \"/$DIR/bin/:'$PATH' | tee -a $HOME/.bash_profile > /dev/null\" + \\\n \"\\n echo export PATH=\" + randir + \"/$DIR/bin/:'$PATH' | tee -a $HOME/.bashrc > /dev/null\" + \\\n \"\\n JAVA=`which java | head -n 1`\" + \\\n \"\\n echo export JAVA_HOME=${JAVA/bin\\/java/} | tee -a \" + randir + \"/$DIR/conf/hadoop-env.sh > /dev/null\" + \\\n \"\\n echo export HADOOP_CONF_DIR=\" + randir + \"/$DIR/conf/ | tee -a $HOME/.bash_profile > /dev/null\" + \\\n \"\\n echo export HADOOP_CONF_DIR=\" + randir + \"/$DIR/conf/ | tee -a $HOME/.bashrc > /dev/null\"\n if hadoop.getHpc():\n msg += \"\\n export HADOOP_CONF_DIR=\" + randir + \"/$DIR/conf/\" + \\\n \"\\n export PATH=\" + randir + \"/$DIR/bin/:$PATH\"\n f.write(msg) \n f.close()\n \n \n \n if not hadoop.getHpc():\n f = open(genConf_script_name, \"a\")\n msg = \"\\n DIR=`head -n 1 .hadoop.tgz.log`\" + \\\n \"\\n MACHINES=`tail -n +2 $HOME/machines` \" + \\\n \"\\n for i in $MACHINES;do \" + \\\n \"\\n if [ $i != \\\"\\\" ]; then\" + \\\n \"\\n scp -r -q -oBatchMode=yes -oStrictHostKeyChecking=no \" + randir + \"/$DIR $i:\" + os.path.basename(randir.rstrip(\"/\")) + \"\" + \\\n \"\\n fi\" + \\\n \"\\n done\" + \\\n \"\\n rm -f .hadoop.tgz.log\"\n f.write(msg) \n f.close()\n \n os.system(\"chmod +x \" + \" \" + start_script_name + \" \" + run_script_name + \" \" + shutdown_script_name + \\\n \" \" + randfile + \"setup.sh\" + \" \" + genConf_script_name)\n \n \n if not hadoop.getHpc(): #cloud\n \n cmd = \"ssh -q -oStrictHostKeyChecking=no \" + str(master) + \" mkdir -p \" + randir \n self._log.debug(cmd) \n p = Popen(cmd.split(), stdout=PIPE, stderr=PIPE)\n std = p.communicate()\n if p.returncode != 0:\n msg = \"ERROR: creating directory \" + randir + \" in \" + master + \". failed, status: \" + str(p.returncode) + \" --- \" + std[1]\n self._log.error(msg)\n if self.verbose:\n print msg\n #copy RainHadoopSetupScript.py and scripts\n rainhadoopsetupscript = os.path.expanduser(os.path.dirname(__file__)) + \"/RainHadoopSetupScript.py\"\n cmd = \"scp -q -oBatchMode=yes \" + rainhadoopsetupscript + \" \" + str(master) + \":\" + randir + \"/\" + randfile + \"RainHadoopSetupScript.py\" \n self._log.debug(cmd)\n p = Popen(cmd.split())\n std = p.communicate()\n if p.returncode != 0:\n msg = \"ERROR: sending scripts to \" + master + \". failed, status: \" + str(p.returncode) \n self._log.error(msg)\n if self.verbose:\n print msg \n \n cmd = \"scp -q -oBatchMode=yes \" + start_script_name + \" \" + run_script_name + \" \" + shutdown_script_name + \\\n \" \" + genConf_script_name + \" \" + randfile + \"setup.sh\" + \" \" + str(master) + \":\" + randir + \"/\"\n self._log.debug(cmd)\n p = Popen(cmd.split())\n std = p.communicate()\n if p.returncode != 0:\n msg = \"ERROR: sending scripts to \" + master + \". failed, status: \" + str(p.returncode) \n self._log.error(msg)\n if self.verbose:\n print msg \n \n #remove files created local \n cmd = \"rm -f \" + start_script_name + \" \" + shutdown_script_name + \" \" + \\\n run_script_name + \" \" + randfile + \"setup.sh\" + \" \" + genConf_script_name \n self._log.debug(cmd)\n p = Popen(cmd.split())\n std = p.communicate()\n if p.returncode != 0:\n msg = \"ERROR: sending scripts to \" + master + \". failed, status: \" + str(p.returncode) \n self._log.error(msg)\n if self.verbose:\n print msg\n \n #setting up hadoop\n msg = \"Setting up Hadoop environment in the \" + self.user + \" home directory\"\n self._log.info(msg) \n if self.verbose:\n print msg \n #setting up hadoop cluster\n cmd = \"ssh -q -oStrictHostKeyChecking=no \" + str(master) + \" \" + randir + \"/\" + randfile + \"setup.sh\" \n self._log.debug(cmd) \n p = Popen(cmd.split(), stdout=PIPE, stderr=PIPE)\n std = p.communicate()\n if p.returncode != 0:\n msg = \"ERROR: setting up hadoop in \" + master + \". failed, status: \" + str(p.returncode) + \" --- \" + std[1]\n self._log.error(msg)\n if self.verbose:\n print msg\n \n msg = \"Configure Hadoop cluster in the \" + self.user + \" home directory\"\n self._log.info(msg) \n if self.verbose:\n print msg\n #configuing hadoop cluster\n cmd = \"ssh -q -oStrictHostKeyChecking=no \" + str(master) + \" \" + randir + \"/\" + genConf_script_name \n self._log.debug(cmd) \n p = Popen(cmd.split(), stdout=PIPE, stderr=PIPE)\n std = p.communicate()\n if p.returncode != 0:\n msg = \"ERROR: starting hadoop cluster in \" + master + \". failed, status: \" + str(p.returncode) + \" --- \" + std[1]\n self._log.error(msg)\n if self.verbose:\n print msg\n \n msg = \"Starting Hadoop cluster in the \" + self.user + \" home directory\"\n self._log.info(msg) \n if self.verbose:\n print msg\n #starting hadoop cluster\n cmd = \"ssh -q -oStrictHostKeyChecking=no \" + str(master) + \" \" + randir + \"/\" + start_script_name \n self._log.debug(cmd) \n p = Popen(cmd.split())\n std = p.communicate()\n if p.returncode != 0:\n msg = \"ERROR: starting hadoop cluster in \" + master + \". failed, status: \" + str(p.returncode) \n self._log.error(msg)\n if self.verbose:\n print msg\n \n else:#HPC\n \n #create dir\n cmd = \"mkdir -p \" + os.path.expandvars(os.path.expanduser(randir)) \n self._log.debug(cmd) \n p = Popen(cmd.split(), stdout=PIPE, stderr=PIPE)\n std = p.communicate()\n if p.returncode != 0:\n msg = \"ERROR: creating dir \" + os.path.expandvars(os.path.expanduser(randir)) + \". failed, status: \" + str(p.returncode) + \" --- \" + std[1]\n self._log.error(msg)\n if self.verbose:\n print msg\n \n #script to set up and config hadoop cluster all in one\n all_script_name = randfile + \"all\"\n f = open(all_script_name, 'w')\n \n f.write(\"echo \\\"Setting up Hadoop environment in the \" + self.user + \" home directory\\\" \\n\")\n f.write(\". \" + randir + \"/\" + randfile + \"setup.sh \\n\")\n f.write(\"echo \\\"Configure Hadoop cluster in the \" + self.user + \" home directory\\\" \\n\")\n f.write(randir + \"/\" + genConf_script_name + \" \\n\") \n f.write(\"echo \\\"Starting Hadoop cluster in the \" + self.user + \" home directory\\\" \\n\")\n f.write(randir + \"/\" + start_script_name + \" \\n\")\n if jobscript != \"interactive\" and jobscript != \"background\":\n f.write(\"echo \\\"Executing Job \" + self.user + \" home directory\\\" \\n\")\n f.write(randir + \"/\" + run_script_name + \" \\n\")\n f.write(\"echo \\\"Stopping Hadoop Cluster\\\" \\n\")\n f.write(randir + \"/\" + shutdown_script_name + \" \\n\")\n else:\n f.write(\"bash \\n\")\n \n f.close()\n os.system(\"chmod +x \" + all_script_name)\n \n #copy RainHadoopSetupScript.py and scripts\n rainhadoopsetupscript = os.path.expanduser(os.path.dirname(__file__)) + \"/RainHadoopSetupScript.py\"\n cmd = \"cp \" + rainhadoopsetupscript + \" \" + os.path.expandvars(os.path.expanduser(randir)) + \"/\" + randfile + \"RainHadoopSetupScript.py\" \n self._log.debug(cmd)\n p = Popen(cmd.split())\n std = p.communicate()\n if p.returncode != 0:\n msg = \"ERROR: copying scripts to \" + os.path.expandvars(os.path.expanduser(randir)) + \". failed, status: \" + str(p.returncode) \n self._log.error(msg)\n if self.verbose:\n print msg \n \n if os.path.expandvars(os.path.expanduser(randir)).rstrip(\"/\") != os.getenv('HOME'):\n f = open(shutdown_script_name, 'a')\n cmd = \"\\n rm -rf \" + randir + \" &\" \n f.write(cmd) \n f.close()\n \n cmd = \"mv \" + start_script_name + \" \" + run_script_name + \" \" + shutdown_script_name + \\\n \" \" + genConf_script_name + \" \" + randfile + \"setup.sh\" + \" \" + all_script_name + \" \" + randir \n self._log.debug(cmd)\n p = Popen(cmd.split())\n std = p.communicate()\n if p.returncode != 0:\n msg = \"ERROR: moving scripts to \" + randir + \". failed, status: \" + str(p.returncode) \n self._log.error(msg)\n if self.verbose:\n print msg\n \n return randir, randfile",
"def setup():\n\n here = os.path.abspath(__file__)\n here = os.path.join(os.path.dirname(here), '..')\n here = os.path.normpath(here)\n\n appengine_location = os.path.join(here, 'thirdparty', 'google_appengine')\n\n extra_paths = [here,\n os.path.join(appengine_location, 'lib', 'django'),\n os.path.join(appengine_location, 'lib', 'webob_0_9'),\n os.path.join(appengine_location, 'lib', 'yaml', 'lib'),\n os.path.join(appengine_location, 'lib', 'fancy_urllib'),\n os.path.join(appengine_location, 'lib', 'simplejson'),\n os.path.join(appengine_location, 'lib', 'protorpc-1.0'),\n appengine_location,\n os.path.join(here, 'app'),\n ]\n\n sys.path = extra_paths + sys.path\n\n os.environ['SERVER_SOFTWARE'] = 'Development Interactive Shell'",
"def configure_spark_base(cx):\n spark_home = '/usr/local/spark-2.1.0-bin-hadoop2.4'\n spark_log_dir = '/var/log/spark'\n remote_commands(cx, [\n 'sudo adduser --firstuid 1001 --disabled-password --gecos \"\" spark',\n 'wget --progress=dot:mega http://www-eu.apache.org/dist/spark/spark-2.1.0/spark-2.1.0-bin-hadoop2.4.tgz',\n 'sudo tar xfz spark-2.1.0-bin-hadoop2.4.tgz -C /usr/local',\n 'sudo mkdir %s' % spark_log_dir,\n 'rm spark-2.1.0-bin-hadoop2.4.tgz'\n\n ])\n print \"configure_spark_base\"",
"def _configure_mpi(self):\n if GetOption('help'):\n return None\n\n env = self.Clone()\n\n env['CXX'] = None\n\n if env.subst(\"$MPI_PKG\") != \"\":\n _configure_mpi_pkg(env)\n return env\n\n for mpi in ['openmpi', 'mpich']:\n if not load_mpi(mpi):\n continue\n if _find_mpicc(env):\n print(f'{mpi} is installed')\n return env\n print(f'No {mpi} installed and/or loaded')\n print(\"No MPI installed\")\n return None",
"def __configure(self):\n\n if hpccm.config.g_cpu_arch == cpu_arch.X86_64:\n if not self.__configure_opts:\n self.__configure_opts = ['--enable-shared', '--enable-openmp',\n '--enable-threads', '--enable-sse2']\n\n if hpccm.config.test_cpu_feature_flag('avx'):\n self.__configure_opts.append('--enable-avx')\n\n if hpccm.config.test_cpu_feature_flag('avx2'):\n self.__configure_opts.append('--enable-avx2')\n\n if hpccm.config.test_cpu_feature_flag('avx512'):\n self.__configure_opts.append('--enable-avx512')\n else:\n if not self.__configure_opts:\n self.__configure_opts = ['--enable-shared', '--enable-openmp',\n '--enable-threads']\n\n if self.__mpi:\n self.__configure_opts.append('--enable-mpi')",
"def _resolve_python_path(self):\n\n application_root_path = application_services.get_application_root_path()\n env_utils.set_python_path(self.get_working_directory(application_root_path))",
"def _configure_mpi_pkg(env):\n if _find_mpicc(env):\n return\n try:\n env.ParseConfig('pkg-config --cflags --libs $MPI_PKG')\n except OSError as error:\n print('\\n**********************************')\n print(f'Could not find package MPI_PKG={env.subst(\"$MPI_PKG\")}\\n')\n print('Unset it or update PKG_CONFIG_PATH')\n print('**********************************')\n raise error\n\n return",
"def setup_paths(binaryDir, appDir):\r\n\r\n paths = [\r\n binaryDir,\r\n os.path.join(binaryDir, 'lib'),\r\n os.path.join(binaryDir, 'scripts'),\r\n ] \r\n sys.path.extend([os.path.normpath(p) for p in paths])\r\n\r\n find_eggs(paths[0])\r\n find_eggs(appDir)",
"def setup_python_path(self):\n self.prepare_environment_variables()\n fixed_paths = self.prepare_libraries()\n fixed_paths += self.prepare_code_directories()\n return fixed_paths",
"def load_config() -> Mapping[str, Mapping[str, str]]:\n repo_root = Path(__file__).parent.parent.parent\n\n predictrip_config_dir = path.join(repo_root, 'config', 'predictrip')\n\n config = ConfigParser(dict_type=dict, empty_lines_in_values=False)\n config.read_file(open(path.join(predictrip_config_dir, 'predictrip-defaults.ini')))\n config.read(path.join(predictrip_config_dir, 'predictrip-site.ini'))\n\n # TODO: if not specified in predictrip-site.ini, check AWS credential sources in order checked by aws jars and boto\n # (see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#configuring-credentials),\n # then add whatever options to spark_conf-generation needed to distribute the first one present to workers. pass\n # credentials as params to boto's client method\n\n # TODO: to avoid configuration duplication, get from other packages' config files and remove from predictrip files:\n # config/hadoop/core-site.xml: name_node_host and name_node_port\n\n return config"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates realized vol using historical spot data | def calculate_realized_vol(spot_df, tenor='6m'):
weighting = (22-1)/(22+1)
short_vol, long_vol, robust_vol = {}, {}, {}
for ccy, row in spot_df.iteritems():
long_vol[ccy] = volatility(row, tenor, returns_type=Returns.LOGARITHMIC)
short_vol[ccy] = annualize(returns(row,1, Returns.LOGARITHMIC).ewm(alpha=1-weighting, adjust=True).std())*100
robust_vol[ccy] = robust_volatility(row, tenor)
return pd.DataFrame.from_dict(long_vol), pd.DataFrame.from_dict(short_vol), pd.DataFrame.from_dict(robust_vol) | [
"def vat_rate():",
"def inVolts(mv):\r\n return mv/1000.0",
"def phase_space_volume(self) -> float:",
"def getOBV(currency):\n temp = exeSqlSelect(\"SELECT close, volume FROM \" + str(currency) + \"_Candles ORDER BY PID desc limit 100\")\n\n prices = []\n for i in range(len(temp)):\n prices.append(temp[-1 - i][0])\n\n volume = []\n for i in range(len(temp)):\n volume.append(temp[-1 - i][1])\n\n # On-balance volume indicator\n obv = 0\n OBV = []\n OBV_mov = []\n OBV_pred = []\n for i in range(len(prices)):\n if (i > 0):\n if ((prices[i] - prices[i - 1]) > 0):\n obv += volume[i]\n if ((prices[i] - prices[i - 1]) < 0):\n obv -= volume[i]\n\n OBV.append(obv)\n\n if (i < 14):\n OBV_mov.append(OBV[len(OBV) - 1])\n\n if (i == 14):\n total = 0\n j = 1\n while (j < 15):\n total += OBV[len(OBV) - j]\n j += 1\n OBV_mov.append(total / 14)\n\n if (i > 14):\n OBV_mov.append((OBV[len(OBV) - 1] - OBV_mov[len(OBV_mov) - 1]) * (2 / 15) + OBV_mov[len(OBV_mov) - 1])\n\n if (OBV[len(OBV) - 1] > OBV_mov[len(OBV_mov) - 1]):\n OBV_pred.append(1)\n else:\n OBV_pred.append(-1)\n return OBV_pred[len(OBV_pred) - 1]",
"def calc_volga(d1,d2,t, sigma, S, K):\n #https://quant.stackexchange.com/questions/7025/how-to-calculate-vomma-of-black-scholes-model\n volga = S*np.sqrt(t)*d1*d2*norm.cdf(d1)/sigma\n return volga",
"def get_volume(self):\n\t\treturn abs(inner(cross(self.a, self.b), self.h))/2",
"def annualize_vol(r,periods_per_year):\r\n return r.std()*(periods_per_year**0.5)",
"def get_volume(self, vtu):\r\n\r\n self.warped_mesh(vtu)\r\n self.left_ventricle_volume = 0\r\n for e in self.left_ventricle_cavity.cells():\r\n x1, y1, z1 = self.left_ventricle_cavity.coordinates()[e][0]\r\n x2, y2, z2 = self.left_ventricle_cavity.coordinates()[e][1]\r\n x3, y3, z3 = self.left_ventricle_cavity.coordinates()[e][2]\r\n x4, y4, z4 = self.left_ventricle_cavity.coordinates()[e][3]\r\n v14 = np.array([x1 - x4, y1 - y4, z1 - z4])\r\n v24 = np.array([x2 - x4, y2 - y4, z2 - z4])\r\n v34 = np.array([x3 - x4, y3 - y4, z3 - z4])\r\n ve = 1 / 6 * abs(np.dot(v14, np.cross(v24, v34)))\r\n self.left_ventricle_volume = self.left_ventricle_volume + ve\r\n\r\n self.right_ventricle_volume = 0\r\n for e in self.right_ventricle_cavity.cells():\r\n x1, y1, z1 = self.right_ventricle_cavity.coordinates()[e][0]\r\n x2, y2, z2 = self.right_ventricle_cavity.coordinates()[e][1]\r\n x3, y3, z3 = self.right_ventricle_cavity.coordinates()[e][2]\r\n x4, y4, z4 = self.right_ventricle_cavity.coordinates()[e][3]\r\n v14 = np.array([x1 - x4, y1 - y4, z1 - z4])\r\n v24 = np.array([x2 - x4, y2 - y4, z2 - z4])\r\n v34 = np.array([x3 - x4, y3 - y4, z3 - z4])\r\n ve = 1 / 6 * abs(np.dot(v14, np.cross(v24, v34)))\r\n self.right_ventricle_volume = self.right_ventricle_volume + ve\r\n\r\n print('left ventricular volume : {} [mm3]'.format(self.left_ventricle_volume.round(0)))\r\n print('right ventricular volume : {} [mm3]'.format(self.right_ventricle_volume.round(0)))\r\n print(\r\n 'ventricular volume : {} [mm3]'.format((self.left_ventricle_volume + self.right_ventricle_volume).round(0)))\r\n return self.left_ventricle_volume",
"def calc_volume_kernel(solar_az,solar_zn,sensor_az,sensor_zn,kernel):\n\n relative_az = sensor_az - solar_az\n\n # Eq 2. Schlapfer et al. IEEE-TGARS 2015\n phase = np.arccos(np.cos(solar_zn)*np.cos(sensor_zn) + np.sin(solar_zn)*np.sin(sensor_zn)* np.cos(relative_az))\n\n if kernel == 'ross_thin':\n # Eq 13. Wanner et al. JGRA 1995\n k_vol = ((np.pi/2 - phase)*np.cos(phase) + np.sin(phase))/ (np.cos(sensor_zn)*np.cos(solar_zn)) - (np.pi/2)\n elif kernel == 'ross_thick':\n # Eq 7. Wanner et al. JGRA 1995\n k_vol = ((np.pi/2 - phase)*np.cos(phase) + np.sin(phase))/ (np.cos(sensor_zn)*np.cos(solar_zn)) - (np.pi/4)\n elif kernel in ('hotspot','roujean'):\n # Eq 8 Roujean et al. JGR 1992\n k_vol1 = (4/(3*np.pi)) * (1/(np.cos(solar_zn) + np.cos(sensor_zn)))\n k_vol2 = (((np.pi/2) - phase) * np.cos(phase) + np.sin(phase))\n k_vol = k_vol1*(k_vol2- (1/3))\n if kernel == 'hotspot':\n # Eq. 12 Maignan et al. RSE 2004\n k_vol = k_vol1* k_vol2 * (1 + (1 + (phase/np.radians(1.5)))**-1) - (1/3)\n else:\n print(\"Unrecognized kernel type: %s\" % kernel)\n k_vol = None\n return k_vol",
"def compute_historical_volatility(self,n_days, shift=0):\n print(f\"Computing historical volatility {n_days} days\")\n\n # Set the list of volatilities\n volatilities = []\n \n # Go through each rows and return the volatility of n range\n for idx, row in self.data.iterrows():\n current_idx = int(row['idx'])\n\n if current_idx <= len(self.data['returns']) - n_days:\n current_range = self.data.iloc[current_idx:n_days+current_idx,:]['returns']\n volatilities.append(current_range.std())\n \n else: \n volatilities.append(np.nan)\n \n self.data['HV_{0}_days'.format(n_days)] = volatilities\n self.data['hvol{0}'] = EOS.data['hvol21'] = EOS.data['stdev21'] * (365**0.5) # Annualize.\n print(\"Done!\")",
"def volts(self, value):\n volt = ((value - 2048) * 10.) / 2048.\n return volt",
"def volatility(self,\n strike: types.FloatTensor,\n expiry_dates: Optional[types.DateTensor] = None,\n expiry_times: Optional[types.FloatTensor] = None,\n term: Optional[types.Period] = None) -> types.FloatTensor:\n pass",
"def getHistoricalVolatility(idx):\n # returns the historical volatility at any index from the dataset\n return data.loc[idx, 'historical_volatility']",
"def calculateHistoricalVolatility(dataset_size, rolling_wind_size):\n # calculated the historical volatility by rolling window standard deviation on ln(daily_returns)\n # daily_return = [0] * dataset_size\n # for i in range (0, dataset_size - 1):\n # daily_return[i] = np.log(data.loc[i, 'future_avg'] / data.loc[i+1, 'future_avg'])\n # daily_return[i] = (data.loc[i, 'future_avg'] / data.loc[i+1, 'future_avg']) - 1\n \n # data['daily_return'] = daily_return\n # data['historical_volatility'] = data['daily_return'].rolling(rolling_wind_size).std() * np.sqrt(252 / (rolling_wind_size / (12 * 24 * 60))) # converted to annual\n\n # data['historical_volatility'] = (data['implied_volatility']).rolling(rolling_wind_size).median() \n data['historical_volatility'] = (data['implied_volatility']).ewm(span = rolling_wind_size).mean()",
"def calculate_vol_swap_screen(bbids=['USDJPY', 'AUDUSD'], long_tenor='6m', \n end=business_day_offset(date.today(), -1, roll='forward'), \n start=RelativeDate('-5y').apply_rule()):\n \n vol_df, spot_df = get_data(bbids, long_tenor, end, start)\n long_rvol, short_rvol, robust_vol = calculate_realized_vol(spot_df, long_tenor)\n results = pd.DataFrame(columns=['crosses', '6m Implied spread', 'Beta', f'Entry vs {long_tenor} Z-score', \n 'Avg Carry Z-score', 'Score', f'{long_tenor} Realized Vol', f'{long_tenor} Carry', f'{long_tenor} 5y Avg', f'{long_tenor} 10th', \n f'{long_tenor} 90th', '1m Realized Vol', '1m Carry'])\n pairs = itertools.combinations(crosses, 2)\n for pair in pairs:\n short, long = pair[0], pair[1]\n beta = LinearRegression(vol_df[short], vol_df[long], fit_intercept=False).coefficient(1)\n iv_spread = vol_df.iloc[-1][long] - beta*vol_df.iloc[-1][short]\n rv_long_spread = long_rvol[long] - beta*long_rvol[short] \n rv_short_spread = short_rvol[long] - beta*short_rvol[short]\n robust_spread = robust_vol[long] - beta*robust_vol[short]\n z_score = (robust_spread.mean() - iv_spread)/robust_spread.std()\n carry_long = rv_long_spread[-1] - iv_spread\n carry_short = rv_short_spread[-1] - iv_spread\n carry_avg = (carry_long + carry_short)/2\n carry_zscore = carry_avg / robust_spread.std()\n results = results.append({'crosses': f'{long} vs. {short}', '6m Implied spread': iv_spread, 'Beta': beta, f'Entry vs {long_tenor} Z-score': z_score,\n 'Avg Carry Z-score': carry_zscore, 'Score': z_score + carry_zscore, f'{long_tenor} Realized Vol': rv_long_spread[-1], \n f'{long_tenor} Carry': carry_long,f'{long_tenor} 5y Avg': robust_spread.mean(), \n f'{long_tenor} 10th': rv_long_spread.quantile(0.1), f'{long_tenor} 90th': rv_long_spread.quantile(0.9), \n '1m Realized Vol': rv_short_spread[-1], '1m Carry': carry_short}, ignore_index=True)\n return results.set_index('crosses').sort_values('6m Implied spread')",
"def schedule_variance(self):\n \n ev = self.apc * self.budget\n pv = self.ppc * self.budget\n \n return ev - pv",
"def _vol_fn(t, x):\n # Get parameter values at time `t`\n volatility = _get_parameters(tf.expand_dims(t, -1), self._volatility)[0]\n volatility = tf.transpose(volatility)\n if self._corr_matrix is not None:\n corr_matrix = _get_parameters(tf.expand_dims(t, -1), self._corr_matrix)\n corr_matrix = corr_matrix[0]\n corr_matrix = tf.linalg.cholesky(corr_matrix)\n else:\n corr_matrix = tf.eye(self._dim, dtype=volatility.dtype)\n\n return volatility * corr_matrix + tf.zeros(\n x.shape.as_list()[:-1] + [self._dim, self._dim],\n dtype=volatility.dtype)",
"def volume(self, timestamp: int):\n return self.prices.iloc[numpy.searchsorted(self.prices.index, timestamp, side='right') - 1]['volume']",
"def total_vol(self):\n\t\treturn self._total_vol"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Calculates volatility swap spread screen | def calculate_vol_swap_screen(bbids=['USDJPY', 'AUDUSD'], long_tenor='6m',
end=business_day_offset(date.today(), -1, roll='forward'),
start=RelativeDate('-5y').apply_rule()):
vol_df, spot_df = get_data(bbids, long_tenor, end, start)
long_rvol, short_rvol, robust_vol = calculate_realized_vol(spot_df, long_tenor)
results = pd.DataFrame(columns=['crosses', '6m Implied spread', 'Beta', f'Entry vs {long_tenor} Z-score',
'Avg Carry Z-score', 'Score', f'{long_tenor} Realized Vol', f'{long_tenor} Carry', f'{long_tenor} 5y Avg', f'{long_tenor} 10th',
f'{long_tenor} 90th', '1m Realized Vol', '1m Carry'])
pairs = itertools.combinations(crosses, 2)
for pair in pairs:
short, long = pair[0], pair[1]
beta = LinearRegression(vol_df[short], vol_df[long], fit_intercept=False).coefficient(1)
iv_spread = vol_df.iloc[-1][long] - beta*vol_df.iloc[-1][short]
rv_long_spread = long_rvol[long] - beta*long_rvol[short]
rv_short_spread = short_rvol[long] - beta*short_rvol[short]
robust_spread = robust_vol[long] - beta*robust_vol[short]
z_score = (robust_spread.mean() - iv_spread)/robust_spread.std()
carry_long = rv_long_spread[-1] - iv_spread
carry_short = rv_short_spread[-1] - iv_spread
carry_avg = (carry_long + carry_short)/2
carry_zscore = carry_avg / robust_spread.std()
results = results.append({'crosses': f'{long} vs. {short}', '6m Implied spread': iv_spread, 'Beta': beta, f'Entry vs {long_tenor} Z-score': z_score,
'Avg Carry Z-score': carry_zscore, 'Score': z_score + carry_zscore, f'{long_tenor} Realized Vol': rv_long_spread[-1],
f'{long_tenor} Carry': carry_long,f'{long_tenor} 5y Avg': robust_spread.mean(),
f'{long_tenor} 10th': rv_long_spread.quantile(0.1), f'{long_tenor} 90th': rv_long_spread.quantile(0.9),
'1m Realized Vol': rv_short_spread[-1], '1m Carry': carry_short}, ignore_index=True)
return results.set_index('crosses').sort_values('6m Implied spread') | [
"def set_driftless_volatility(self):\n day_range = self.day_ranges.get('volatility', [20, 40, 60, 80, 100])\n dict = {}\n\n o = np.log(self.opens / self.closes.shift(1))\n u = np.log(self.highs / self.opens)\n d = np.log(self.lows / self.opens)\n c = np.log(self.closes / self.opens)\n\n for day in day_range:\n k = 0.34 / (1.34 + (day + 1) / (day - 1))\n v_o = o.rolling(window=day).std() ** 2\n v_c = c.rolling(window=day).std() ** 2\n v_rs = (u * (u - c) + d * (d - c)).rolling(window=day).mean()\n v = v_o + k * v_c + (1 - k) * v_rs\n dict[day] = np.sqrt(256 * v)\n self.driftless_volatility = self.get_dataframe_from_dict(dict, 'driftless_volatility')",
"def calc_volatility(sgp_dict, final_stats, stat, factor, reverse=True):\n stats_title = \"Stats\" + stat\n up_vol_title = \"UpVol_\" + stat\n down_vol_title = \"DownVol_\" + stat\n sgp_title = stat + \" SGP\"\n sgp = abs(sgp_dict[sgp_title] * factor)\n final_stats.sort(key=operator.itemgetter(stats_title), reverse=reverse)\n list_length = len(final_stats)\n\n for i in range(list_length):\n up_counter = 0\n down_counter = 0\n j = i - 1\n k = i + 1\n current_team_stat = final_stats[i][stats_title]\n while j > 0 and (abs(final_stats[j][stats_title] - current_team_stat) <= abs(sgp)):\n if final_stats[j][stats_title] - current_team_stat == sgp:\n up_counter -= .5\n j -= 1\n up_counter += 1\n while (k < list_length and\n (abs(current_team_stat - final_stats[k][stats_title]) <= abs(sgp))):\n if current_team_stat - final_stats[k][stats_title] == sgp:\n down_counter -= .5\n k += 1\n down_counter += 1\n final_stats[i][up_vol_title] = up_counter\n final_stats[i][down_vol_title] = down_counter",
"def calculate_realized_vol(spot_df, tenor='6m'):\n weighting = (22-1)/(22+1)\n short_vol, long_vol, robust_vol = {}, {}, {}\n \n for ccy, row in spot_df.iteritems():\n long_vol[ccy] = volatility(row, tenor, returns_type=Returns.LOGARITHMIC)\n short_vol[ccy] = annualize(returns(row,1, Returns.LOGARITHMIC).ewm(alpha=1-weighting, adjust=True).std())*100\n robust_vol[ccy] = robust_volatility(row, tenor)\n \n return pd.DataFrame.from_dict(long_vol), pd.DataFrame.from_dict(short_vol), pd.DataFrame.from_dict(robust_vol)",
"def chainsaw(self):\n if self.gui.B(\"Voltage sweep/Normal_Order\"):\n voltage_ramp=np.arange(self.startvolt, self.maxim,self.step)\n voltage_ramp=np.append(voltage_ramp,np.arange(self.maxim,self.minim,-self.step)) \n voltage_ramp=np.append(voltage_ramp,np.arange(self.minim,self.startvolt,self.step)) \n else:\n voltage_ramp=np.arange(self.startvolt, self.minim,-self.step)\n voltage_ramp=np.append(voltage_ramp,np.arange(self.minim,self.maxim,self.step)) \n voltage_ramp=np.append(voltage_ramp,np.arange(self.maxim,self.startvolt,-self.step))\n voltage_ramp=np.tile(voltage_ramp, int(self.number_of_ramps))\n voltage_ramp=np.append(self.ramp, self.startvolt)\n return voltage_ramp, preramp",
"def vat_rate():",
"def adjust_water_vol(self, ratio=(0.5, 0.25)):\n satoms = sorted( self.atoms, key= lambda x: x.id)\n assert( len( satoms ) % 3 == 0 )\n assert( ratio[0] + 2 * ratio[1] == 1.0)\n for idx in xrange( len(satoms) / 3):\n o = satoms[ idx * 3 ]\n h1 = satoms[ idx * 3 + 1 ]\n h2 = satoms[ idx * 3 + 2 ]\n\n vsum = sum( ele.voro_volume for ele in [o, h1, h2])\n vo = ratio[0] * vsum\n vh = ratio[1] * vsum\n\n o.adj_vol = vo\n h1.adj_vol = vh\n h2.adj_vol = vh",
"def compute_sharpe_ratio(returns, vol, riskfree_rate): \n\n sharperatio = (returns - riskfree_rate) / vol\n \n return sharperatio",
"def volatility_surface(self, asset: str) -> VolatilitySurface:\n pass",
"def league_volatility(sgp_dict, final_stats, factor=1):\n calc_volatility(sgp_dict, final_stats, \"R\", factor)\n calc_volatility(sgp_dict, final_stats, \"HR\", factor)\n calc_volatility(sgp_dict, final_stats, \"RBI\", factor)\n calc_volatility(sgp_dict, final_stats, \"SB\", factor)\n calc_volatility(sgp_dict, final_stats, \"OPS\", factor)\n calc_volatility(sgp_dict, final_stats, \"W\", factor)\n calc_volatility(sgp_dict, final_stats, \"SV\", factor)\n calc_volatility(sgp_dict, final_stats, \"K\", factor)\n calc_volatility(sgp_dict, final_stats, \"ERA\", factor, False)\n calc_volatility(sgp_dict, final_stats, \"WHIP\", factor, False)\n for team in final_stats:\n team['Total_Upward_Volatility'] = sum([value for key, value in team.items()\n if 'UpVol' in key])\n team['Total_Downward_Volatility'] = sum([value for key, value in team.items()\n if 'DownVol' in key])\n return final_stats",
"def compute_historical_volatility(self,n_days, shift=0):\n print(f\"Computing historical volatility {n_days} days\")\n\n # Set the list of volatilities\n volatilities = []\n \n # Go through each rows and return the volatility of n range\n for idx, row in self.data.iterrows():\n current_idx = int(row['idx'])\n\n if current_idx <= len(self.data['returns']) - n_days:\n current_range = self.data.iloc[current_idx:n_days+current_idx,:]['returns']\n volatilities.append(current_range.std())\n \n else: \n volatilities.append(np.nan)\n \n self.data['HV_{0}_days'.format(n_days)] = volatilities\n self.data['hvol{0}'] = EOS.data['hvol21'] = EOS.data['stdev21'] * (365**0.5) # Annualize.\n print(\"Done!\")",
"def get_volume(self):\n\t\treturn abs(inner(cross(self.a, self.b), self.h))/2",
"def test_volume_weighted_stock_price(self):\n\n self.record_trades(True)\n now = self.t2 # epoch + 10 minutes\n five_minutes = now / 2\n self.mock_time(now)\n last_five_minutes = (now - five_minutes, now)\n\n # TEA\n # vwsp = sum_i(price_i * quantity_i) / sum_i(quantity_i)\n # = 500000 / 4000\n # = 125\n\n self.assertEqual(\n self.market.calculate_vwsp(\"TEA\", last_five_minutes), 125\n )\n\n # POP\n # vwsp = sum_i(price_i * quantity_i) / sum_i(quantity_i)\n # = 100000 / 1000\n # = 100\n\n self.assertEqual(\n self.market.calculate_vwsp(\"POP\", last_five_minutes), 100\n )\n\n # ALE\n # vwsp = None (no trades)\n self.assertEqual(\n self.market.calculate_vwsp(\"ALE\", last_five_minutes), None\n )",
"def measureMomentumSpreadCalc(self):\n #if self.view.checkBox_4_s.isChecked()==True:\n self.pSpread = self.func.calcMomSpread(self.Cmagnets,'DIP01',self.Is,self.I)",
"def phase_space_volume(self) -> float:",
"def getVoltageShunt_mV(self):\n self.createCurrentPowerMonitor()\n return self.currentPowerMonitor.shunt_voltage()",
"def UpdateBunchEnergySpread(self):\r\n spreadArray = np.array([]) # dummy variable\r\n\r\n for i in range(len(self.listOfParticles)):\r\n spreadArray = np.append(spreadArray, self.listOfParticles[i].TotalEnergy())\r\n\r\n self.bunchEnergySpread = np.std(spreadArray)",
"def calc_vswr(ant,f,r):\n\n# fwdcalfac = get_calfac(f)\n# revcalfac = get_calfac(r)\n\n f=abs(DAQC.getADC(0,f))\n r=abs(DAQC.getADC(0,r))\n# TODO For normalizing elements when true test rig is ready\n# f=f-fwdcalfac\n# r=r-revcalfac\n# Need to divide voltage by 50 ohm to get current, multiply current times voltage to get watts\n x=abs(1 + math.sqrt(rm_utils.safe_div(r,f)))\n y=abs(1 - math.sqrt(rm_utils.safe_div(r,f)))\n swr=round(rm_utils.safe_div(x,y), 3)\n if swr > 3.0:\n logger.warning(\"calc_vswr: Ant Height: {} SWR: \\033[91m {} \\033[0m\".format(ant,swr))\n if DEBUG:\n print(\"Ant Height: {} SWR: \\033[91m {} \\033[0m\".format(ant,swr))\n else:\n if DEBUG:\n print(\"Ant Height: {} SWR: \\033[92m {} \\033[0m\".format(ant,swr))\n return swr",
"def volumen_esfera(radio):\n volumen = (4 / 3) * pi * radio ** 3\n return volumen",
"def volts(self, value):\n volt = ((value - 2048) * 10.) / 2048.\n return volt"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CJspeed2 Calculates CJ detonation velocity and CJ state FUNCTION SYNTAX [cj_speed,gas] = CJspeed(P1,T1,q,mech) INPUT P1 = initial pressure (Pa) T1 = initial temperature (K) q = string of reactant species mole fractions mech = cti file containing mechanism data (i.e. 'gri30.cti') OUTPUT cj_speed = CJ detonation speed (m/s) gas = gas object at CJ state | def CJspeed2(P1, T1, q, mech):
gas2 = Solution(mech)
gas1 = Solution(mech)
gas = Solution(mech)
#INTIAL CONDITIONS
gas.TPX = T1, P1, q;
gas1.TPX = T1, P1, q;
gas2.TPX = T1, P1, q;
#INITIALIZE ERROR VALUES & CHANGE VALUES
ERRFT = 1.0*10**-4; ERRFV = 1.0*10**-4;
r1 = gas1.density; V1 = 1/r1;
P1 = gas1.P; T1 = gas1.T;
i = 0;
#PRELIMINARY GUESS
Vg = V1/10; rg = 1/Vg;
gas.TD = T1,rg;
gas.equilibrate('UV')
Tg = gas.T;
gas2.TDX = Tg, rg, gas.X
#SAVE STATE
V = Vg; r = rg;
T = Tg;
deltaT = 1000; deltaV = 1000; cj_speed = 0;
#START LOOP
while(abs(deltaT) > ERRFT*T or abs(deltaV) > ERRFV*V):
i = i + 1
if i == 500:
print "CJ speed 2 calc did not converge"
return gas
#CALCULATE FH & FP FOR GUESS 1
[FH,FP,cj_speed] = FHFP_CJ2(gas,gas1,gas2)
#TEMPERATURE PERTURBATION
DT = T*0.01; Tper = T + DT;
Vper = V; Rper = 1/Vper;
gas.TD = Tper, Rper
gas.equilibrate('TV',2)
gas2.TDX = Tper, Rper, gas.X
#CALCULATE FHX & FPX FOR "IO" STATE
[FHX,FPX,cj_speed] = FHFP_CJ2(gas,gas1,gas2)
#ELEMENTS OF JACOBIAN
DFHDT = (FHX-FH)/DT; DFPDT = (FPX-FP)/DT;
#VOLUME PERTURBATION
DV = 0.01*V; Vper = V + DV;
Tper = T; Rper = 1/Vper;
gas.TD = Tper, Rper
gas.equilibrate('TV',2)
gas2.TDX = Tper, Rper, gas.X
#CALCULATE FHX & FPX FOR "IO" STATE
[FHX,FPX,cj_speed] = FHFP_CJ2(gas,gas1,gas2)
#ELEMENTS OF JACOBIAN
DFHDV = (FHX-FH)/DV; DFPDV = (FPX-FP)/DV;
#INVERT MATRIX
J = DFHDT*DFPDV - DFPDT*DFHDV
b = [DFPDV, -DFHDV, -DFPDT, DFHDT]
a = [-FH, -FP]
deltaT = (b[0]*a[0]+b[1]*a[1])/J; deltaV = (b[2]*a[0]+b[3]*a[1])/J;
#CHECK & LIMIT CHANGE VALUES
#TEMPERATURE
DTM = 0.2*T
if abs(deltaT) > DTM:
deltaT = DTM*deltaT/abs(deltaT)
#VOLUME
V2X = V + deltaV
if V2X > V1:
DVM = 0.5*(V1 - V)
else:
DVM = 0.2*V
if abs(deltaV) > DVM:
deltaV = DVM*deltaV/abs(deltaV)
#MAKE THE CHANGES
T = T + deltaT; V = V + deltaV; r = 1/V;
gas.TD = T, r
gas.equilibrate('TV',2)
gas2.TDX = T, r, gas.X
[FH,FP,cj_speed] = FHFP_CJ2(gas,gas1,gas2)
return [gas,cj_speed] | [
"def FHFP_CJ2(gas,gas1,gas2):\n \n P1 = gas1.P\n H1 = gas1.enthalpy_mass\n r1 = gas1.density\n P2 = gas.P\n H2 = gas.enthalpy_mass\n r2 = gas.density\n \n speeds = equilSoundSpeeds(gas2)\n w2s=(speeds[0])**2\n w1s = w2s*(r2/r1)**2\n FH = H2 + 0.5*w2s - (H1 + 0.5*w1s)\n FP = P2 + r2*w2s - (P1 + r1*w1s)\n return [FH, FP, sqrt(w1s)]",
"def bmc_j2(t=2001):\n x = time.time()\n tt = min(5,max(1,.05*t))\n abc('bmc3 -r -T %0.2f'%tt)\n if is_sat():\n## print 'cex found in %0.2f sec at frame %d'%((time.time()-x),cex_frame())\n return RESULT[get_status()]\n## abc('bmc3 -T 1')\n N = n_bmc_frames()\n N = max(1,N)\n## print bmc_depth()\n## abc('bmc3 -C 1000000 -T %f -S %d'%(t,int(1.5*max(3,max_bmc))))\n## cmd = 'bmc3 -J 2 -D 4000 -C 1000000 -T %f -S %d'%(t,2*N)\n cmd = 'bmc3 -r -C 2000 -J %d'%(2*N+2)\n## print cmd\n abc(cmd)\n## if is_sat():\n## print 'cex found in %0.2f sec at frame %d'%((time.time()-x),cex_frame())\n gs = prob_status()\n if not gs in [0,1,-1]:\n print 'bmc_j2 returned %s'%str(gs)\n return RESULT[get_status()]",
"def __simulate_speed_shift(self, kph_1_value, speed_1_time_ms, kph_2_value, speed_2_time_ms):\n kph_msg_1 = self.get_CCVS1_message(kph_1_value) # Vehicle Speed signal is sent inside CCVS1 J1939 message\n kph_msg_2 = self.get_CCVS1_message(kph_2_value)\n kph_1_time_s = self.__ms_to_seconds(speed_1_time_ms)\n kph_2_time_s = self.__ms_to_seconds(speed_2_time_ms)\n\n print('Sending Vehicle Speed value: {0} for {1} second(s)'.format(kph_1_value, kph_1_time_s))\n self.__keep_sending_message_for_max_time(kph_msg_1, 100, speed_1_time_ms) # Cycle time 100ms (see J1939 std.)\n print('Sending Vehicle Speed value: {0} for {1} second(s)'.format(kph_2_value, kph_2_time_s))\n self.__keep_sending_message_for_max_time(kph_msg_2, 100, speed_2_time_ms) # Cycle time 100ms (see J1939 std.)",
"def PCO2_calculate_new(self,T0,P0=3.0e4,buf=0):\n #First calculate the temperature\n P=P0 #Pressure in bar\n T=T0+273.0 #self.adiabat(r) #Temperature at the freezing front\n #Calculate log10K1\n logK1=40.07639-T*2.53932*1.0e-2+T*T*5.27096*1.0e-6 + 0.0267*(P-1.0)/T\n #calculate log10K2\n logK2=-6.24763-282.56/T-0.119242*(P-1.0e3)/T\n #Calculate the equilibrium constant Kp from P and T\n #Calculate the equilibrium constant K' eq. A1 of Holloway et al. 1992\n logKp=33.82876-(163.291+0.092542*P)/T-T*2.53932*1.0e-2+T*T*5.27096*1.0e-6\n logKP2=logK1+logK2\n \n #-(163.291+0.092542*P)/T-(2.53932*1.0e-2)*T + T*T*(5.27096*1.0e-6)\n #Calculate fO2 from O'neil 1987, need to provide pressure in bars\n # and temperature in Kelvin\n \n # equation for log10fO2 for IW buffer\n # Eq A8 of Holloway et al., 1992\n logfO2=self.fO2_IW(T,P0,buf)\n #Calculate log10Kf\n #p. 112 of Holloway et al. 1992\n logKf=logKp+logfO2\n Kf=np.exp(2.303*logKf)\n #mole fraction of CO3 in melt\n XCO3_melt=Kf/(1.0+Kf)\n #wt% of CO3 dissolved in melt\n wco2=(120.2656*XCO3_melt)/(1.0-0.202656*XCO3_melt)\n #concentration in fraction\n c=wco2/100.0\n #To calculate CO2 partial pressure calculate K1\n #Equation A1(a) of Holloway et al.\n \n \n #40.07639-2.53932e-2*T+(5.27096e-6)*T*T+0.0267*(P-1000.0)/T\n K1=np.exp(2.303*logK1)\n fO2=np.exp(2.303*logfO2) #Eq 3 from Holloway et al 1992\n PCO2=K1*fO2 #fCO2=PCO2 in bars\n #Calculate PCO2 from Pawley et al. 1992, page 223\n #XCO2_melt(ppm)= 0.492*fCO2\n # Assuming PCO2 is fCO2\n #PCO2=(XCO3_melt*1.0e6)/0.492\n return(PCO2,c,fO2)",
"def ret_motion_jacobian(prev_th, trans_speed):\n motion_jacobian = np.zeros((3,3))\n I = np.eye(3)\n term_2 = np.zeros((3,3))\n term_2[0,2] = -trans_speed * np.sin(prev_th)\n term_2[1,2] = trans_speed * np.cos(prev_th)\n dt = 0.1\n motion_jacobian = I + dt * term_2\n \n return motion_jacobian",
"def _thcompton(tempbb, theta, gamma):\n #c use internally Thomson optical depth\n tautom = np.sqrt(2.250 + 3.0 / (theta * ((gamma + .50)**2 - 2.250))) - 1.50\n\n # Initialise arrays\n dphdot = np.zeros(900); rel = np.zeros(900); c2 = np.zeros(900)\n sptot = np.zeros(900); bet = np.zeros(900); x = np.zeros(900)\n\n #c JMAX - # OF PHOTON ENERGIES\n #c delta is the 10 - log interval of the photon array.\n delta = 0.02\n deltal = delta * np.log(10.0)\n xmin = 1e-4 * tempbb\n xmax = 40.0 * theta\n jmax = min(899, int(np.log10(xmax / xmin) / delta) + 1)\n\n #c X - ARRAY FOR PHOTON ENERGIES\n # Energy array is normalized by 511 keV, the rest energy of an electron\n x[:(jmax + 1)] = xmin * 10.0**(np.arange(jmax + 1) * delta)\n\n #c compute c2(x), and rel(x) arrays\n #c c2(x) is the relativistic correction to Kompaneets equation\n #c rel(x) is the Klein - Nishina cross section divided by the\n #c Thomson crossection\n for j in range(0, jmax):\n w = x[j]\n #c c2 is the Cooper's coefficient calculated at w1\n #c w1 is x(j + 1 / 2) (x(i) defined up to jmax + 1)\n w1 = np.sqrt(x[j] * x[j + 1])\n c2[j] = (w1**4 / (1.0 + 4.60 * w1 + 1.1 * w1 * w1))\n if (w <= 0.05):\n #c use asymptotic limit for rel(x) for x less than 0.05\n rel[j] = (1.0 - 2.0 * w + 26.0 * w * w * 0.2)\n else:\n z1 = (1.0 + w) / w**3\n z2 = 1.0 + 2.0 * w\n z3 = np.log(z2)\n z4 = 2.0 * w * (1.0 + w) / z2\n z5 = z3 / 2.0 / w\n z6 = (1.0 + 3.0 * w) / z2 / z2\n rel[j] = (0.75 * (z1 * (z4 - z3) + z5 - z6))\n\n #c the thermal emission spectrum\n jmaxth = min(900, int(np.log10(50 * tempbb / xmin) / delta))\n if (jmaxth > jmax):\n jmaxth = jmax\n planck = 15.0 / (np.pi * tempbb)**4\n dphdot[:jmaxth] = planck * x[:jmaxth]**2 / (np.exp(x[:jmaxth] / tempbb)-1)\n\n #c compute beta array, the probability of escape per Thomson time.\n #c bet evaluated for spherical geometry and nearly uniform sources.\n #c Between x = 0.1 and 1.0, a function flz modifies beta to allow\n #c the increasingly large energy change per scattering to gradually\n #c eliminate spatial diffusion\n jnr = int(np.log10(0.10 / xmin) / delta + 1)\n jnr = min(jnr, jmax - 1)\n jrel = int(np.log10(1 / xmin) / delta + 1)\n jrel = min(jrel, jmax)\n xnr = x[jnr - 1]\n xr = x[jrel - 1]\n for j in range(0, jnr - 1):\n taukn = tautom * rel[j]\n bet[j] = 1.0 / tautom / (1.0 + taukn / 3.0)\n for j in range(jnr - 1, jrel):\n taukn = tautom * rel[j]\n arg = (x[j] - xnr) / (xr - xnr)\n flz = 1 - arg\n bet[j] = 1.0 / tautom / (1.0 + taukn / 3.0 * flz)\n for j in range(jrel, jmax):\n bet[j] = 1.0 / tautom\n\n dphesc = _thermlc(tautom, theta, deltal, x, jmax, dphdot, bet, c2)\n\n #c the spectrum in E F_E\n for j in range(0, jmax - 1):\n sptot[j] = dphesc[j] * x[j]**2\n\n return x, jmax, sptot",
"def calculate_speed(ds):\n grid = xgcm.Grid(ds, periodic=[\"Y\", \"X\"])\n\n U_cc = grid.interp(ds.vozocrtx, \"X\", to=\"center\")\n V_cc = grid.interp(ds.vomecrty, \"Y\", to=\"center\")\n\n speed = (U_cc**2 + V_cc**2)**0.5\n\n return speed",
"def _generate_C(self, mute=True):\n\n tstart = clock()\n\n omega = self.omega\n c = self.unit_system['c']\n self.C = np.empty((self.ny, self.nx), dtype='complex')\n\n if self.polarization == 'O':\n self.C = omega*omega/(c*c) * self.deps[2,2]\n\n else:\n S = np.real(self.eps0[0,0])\n D = np.imag(self.eps0[1,0])\n S2 = S*S\n D2 = D*D\n self.C = omega*omega/(c*c) * ( D2*self.deps[0,0] + \\\n 1j*D*S*(self.deps[1,0]-self.deps[0,1]) + S2*self.deps[1,1] ) / S2\n\n tend = clock()\n\n if not mute:\n print('Operator C generated. Time used: {:.3}'.format(tend-tstart),\n file=sys.stdout)",
"def _thermlc(tautom, theta, deltal, x, jmax, dphdot, bet, c2):\n dphesc = np.zeros(900) # Initialise the output\n a = np.zeros(900); b = np.zeros(900); c = np.zeros(900)\n d = np.zeros(900); alp = np.zeros(900); u = np.zeros(900)\n g = np.zeros(900); gam = np.zeros(900)\n\n #c u(x) is the dimensionless photon occupation number\n c20 = tautom / deltal\n\n #c determine u\n #c define coefficients going into equation\n #c a(j) * u(j + 1) + b(j) * u(j) + c(j) * u(j - 1) = d(j)\n for j in range(1, jmax - 1):\n w1 = np.sqrt( x[j] * x[j + 1] )\n w2 = np.sqrt( x[j - 1] * x[j] )\n #c w1 is x(j + 1 / 2)\n #c w2 is x(j - 1 / 2)\n a[j] = -c20 * c2[j] * (theta / deltal / w1 + 0.5)\n t1 = -c20 * c2[j] * (0.5 - theta / deltal / w1)\n t2 = c20 * c2[j - 1] * (theta / deltal / w2 + 0.5)\n t3 = x[j]**3 * (tautom * bet[j])\n b[j] = t1 + t2 + t3\n c[j] = c20 * c2[j - 1] * (0.5 - theta / deltal / w2)\n d[j] = x[j] * dphdot[j]\n\n #c define constants going into boundary terms\n #c u(1) = aa * u(2) (zero flux at lowest energy)\n #c u(jx2) given from region 2 above\n x32 = np.sqrt(x[0] * x[1])\n aa = (theta / deltal / x32 + 0.5) / (theta / deltal / x32 - 0.5)\n\n #c zero flux at the highest energy\n u[jmax - 1] = 0.0\n\n #c invert tridiagonal matrix\n alp[1] = b[1] + c[1] * aa\n gam[1] = a[1] / alp[1]\n for j in range(2, jmax - 1):\n alp[j] = b[j] - c[j] * gam[j - 1]\n gam[j] = a[j] / alp[j]\n g[1] = d[1] / alp[1]\n for j in range(2, jmax - 2):\n g[j] = (d[j] - c[j] * g[j - 1]) / alp[j]\n g[jmax - 2] = (d[jmax - 2] - a[jmax - 2] * u[jmax - 1] \n - c[jmax - 2] * g[jmax - 3]) / alp[jmax - 2]\n u[jmax - 2] = g[jmax - 2]\n for j in range(2, jmax + 1):\n jj = jmax - j\n u[jj] = g[jj] - gam[jj] * u[jj + 1]\n u[0] = aa * u[1]\n #c compute new value of dph(x) and new value of dphesc(x)\n dphesc[:jmax] = x[:jmax] * x[:jmax] * u[:jmax] * bet[:jmax] * tautom\n\n return dphesc",
"def speed_cmd(self, vc):\n self.v_c = vc",
"def _CoM_Velocity(field, data):\n try:\n TM = np.sum(data['gas', 'mass'].in_units('g'))\n x_top = np.sum(data['gas', 'mass'].in_units('g')*data['flash','velx'].in_units('cm/s'))\n y_top = np.sum(data['gas', 'mass'].in_units('g')*data['flash','vely'].in_units('cm/s'))\n z_top = np.sum(data['gas', 'mass'].in_units('g')*data['flash','velz'].in_units('cm/s'))\n if ('all', 'particle_mass') in data.ds.field_list:\n TM = TM + np.sum(data['particle_mass'].in_units('g'))\n x_top = x_top + np.sum(data['all', 'particle_mass'].in_units('g')*data['all', 'particle_velx'].in_units('cm/s'))\n y_top = y_top + np.sum(data['all', 'particle_mass'].in_units('g')*data['all', 'particle_vely'].in_units('cm/s'))\n z_top = z_top + np.sum(data['all', 'particle_mass'].in_units('g')*data['all', 'particle_velz'].in_units('cm/s'))\n com = [(x_top/TM), (y_top/TM), (z_top/TM)]\n com = yt.YTArray(com, 'cm/s')\n del x_top\n del y_top\n del z_top\n del TM\n except:\n com = yt.YTArray([0.0, 0.0, 0.0], 'cm/s')\n return com",
"def kmh_to_mph(speed_kmh):\n velocity = speed_kmh * 1.60934\n return velocity",
"def calculate_center_mass_speed_1(reduced_mass, temperature=dfl[\n 'values']['temperature']):\n\n return (8 * dfl['conversions']['kg2g'] * const.R * temperature / (\n const.pi * reduced_mass))** 0.5",
"def get_coupling(self, imol1, imol2, mol_crds, mol_cols,\n\t\t\t\t\t config_filepath=False, xyz_filepath=False):\n\t\tt1 = time.time()\n\t\t# Get mol crds\n\t\tdimer_crds = mol_crds[[imol1, imol2]]\n\t\tdimer_cols = mol_cols[[imol1, imol2]]\n\n\t\tnatom = np.shape(dimer_crds)[1] * 2\n\t\tt2 = time.time()\n\n\t\t# Write dimer as xyz file\n\t\tif xyz_filepath is False: xyz_filepath = \"tmp_dimer.xyz\"\n\t\tself._write_xyz_dimer_(dimer_crds, dimer_cols, xyz_filepath)\n\t\tt3 = time.time()\n\n\t\t# Create the config file\n\t\tif config_filepath is False:\n\t\t\tconfig_filepath = \"tmp_config.txt\"\n\t\t\tunique_elm = self.__get_unique_elements__(mol_cols)\n\t\t\tself._create_config_file_(xyz_filepath, config_filepath, unique_elm)\n\n\t\texe_filepath = self.metadata['coupling_calc_exe']\n\n\t\texe_cmd = [exe_filepath, config_filepath]\n\t\tres = subprocess.run(exe_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tif res.stderr:\n\t\t\traise SystemExit(f\"Bad mol {imol1} {imol2}\")\n\t\tt4 = time.time()\n\n\t\tself.times.setdefault('getCrds', []).append(t2 - t1)\n\t\tself.times.setdefault('writeCrds', []).append(t3 - t2)\n\t\tself.times.setdefault('calcCoupl', []).append(t4 - t3)\n\n\t\treturn float(res.stdout.decode(\"utf-8\"))",
"def test_car_speed2(self):\n man = Car('Mercedes', 'SLR500')\n parked_speed = man.speed\n moving_speed = man.drive(3).speed\n\n self.assertListEqual([parked_speed, moving_speed], [0, 1000],\n msg='The Mercedes should have speed 0 km/h until '+\n 'you put `the pedal to the metal`')",
"def calculate_C_p(tip_speed_ratio):\n a_min = get_induction_factor(0.0)\n a_max = get_induction_factor(tip_speed_ratio)\n\n # Calculate integral\n integral = lambda a: ((1 - a) * (1 - 2 * a) * (1 - 4 * a) / (1 - 3 * a)) ** 2\n a = np.linspace(a_min, a_max, 100000)\n da = a[1] - a[0]\n dCp = integral(a) * da\n\n Cp = np.sum(dCp) * 24.0 / tip_speed_ratio ** 2\n return Cp",
"def C_and_Z_path(A, N1, N2, Z0, Ct, MKMH_option=False):\n Z_path = np.zeros_like(Z0)\n Z_path = np.hstack([Z_path, Z0])\n \n for t in range(T):\n Z = np.matmul(A,Z0)\n Z_path = np.hstack([Z_path,Z])\n Z0 = Z\n \n Z_path = Z_path[:,1:] \n C_path = np.zeros(T)\n KH_path = Z_path[:2,:] \n X_path = Z_path[2:,:] \n MKMH_path = np.matmul(N1,KH_path) + np.matmul(N2,X_path) \n \n for t in range(T-1):\n MK1 = MKMH_path[0,t+1]\n MH1 = MKMH_path[1,t+1]\n H = KH_path[1,t]\n X11 = X_path[0,t+1]\n X2 = X_path[2,t+1]\n MKt1, MHt1, Ht, X1t1, X2t = symbols('MKt1 MHt1 Ht X1t1 X2t')\n \"\"\"\n Ct is the explicit formula of consumption ratio process \n imported from function: solve_habit_persistence\n \"\"\"\n C = Ct.subs([(MKt1,MK1),\n (MHt1,MH1),\n (Ht,H),\n (X1t1,X11),\n (X2t,X2)])\n C_path[t] = C\n \n if MKMH_option: \n return C_path, Z_path, MKMH_path\n else:\n return C_path, Z_path",
"def CalculateChi2(mol):\r\n return _CalculateChinp(mol, NumPath=2)",
"def p2c(self, p):\n return complex(p[0] * self.delta.real, p[1] * self.delta.imag) + self.start"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
FHFP_CJ2 Uses the momentum and energy conservation equations and the equilibrium sound speed to calculate error in current pressure and enthalpy guesses. In this case, state 2 is in equilibrium. FUNCTION SYNTAX [FH,FP,cj_speed] = FHFP_CJ2(gas,gas1,gas2) INPUT gas = working gas object gas1 = gas object at initial state gas2 = dummy gas object (for calculating numerical derivatives) OUTPUT FH,FP = error in enthalpy and pressure cj_speed = CJ detonation speed (m/s) | def FHFP_CJ2(gas,gas1,gas2):
P1 = gas1.P
H1 = gas1.enthalpy_mass
r1 = gas1.density
P2 = gas.P
H2 = gas.enthalpy_mass
r2 = gas.density
speeds = equilSoundSpeeds(gas2)
w2s=(speeds[0])**2
w1s = w2s*(r2/r1)**2
FH = H2 + 0.5*w2s - (H1 + 0.5*w1s)
FP = P2 + r2*w2s - (P1 + r1*w1s)
return [FH, FP, sqrt(w1s)] | [
"def CJspeed2(P1, T1, q, mech):\n \n gas2 = Solution(mech)\n gas1 = Solution(mech)\n gas = Solution(mech)\n\n #INTIAL CONDITIONS\n gas.TPX = T1, P1, q;\n gas1.TPX = T1, P1, q;\n gas2.TPX = T1, P1, q; \n \n #INITIALIZE ERROR VALUES & CHANGE VALUES\n ERRFT = 1.0*10**-4; ERRFV = 1.0*10**-4;\n\n r1 = gas1.density; V1 = 1/r1;\n P1 = gas1.P; T1 = gas1.T;\n i = 0;\n #PRELIMINARY GUESS\n Vg = V1/10; rg = 1/Vg; \n \n gas.TD = T1,rg; \n gas.equilibrate('UV')\n Tg = gas.T; \n gas2.TDX = Tg, rg, gas.X\n \n #SAVE STATE\n V = Vg; r = rg;\n T = Tg;\n deltaT = 1000; deltaV = 1000; cj_speed = 0;\n #START LOOP\n while(abs(deltaT) > ERRFT*T or abs(deltaV) > ERRFV*V):\n i = i + 1\n if i == 500:\n print \"CJ speed 2 calc did not converge\"\n return gas\n \n #CALCULATE FH & FP FOR GUESS 1\n [FH,FP,cj_speed] = FHFP_CJ2(gas,gas1,gas2)\n\n\n #TEMPERATURE PERTURBATION\n DT = T*0.01; Tper = T + DT;\n Vper = V; Rper = 1/Vper;\n \n gas.TD = Tper, Rper\n gas.equilibrate('TV',2)\n gas2.TDX = Tper, Rper, gas.X\n\n #CALCULATE FHX & FPX FOR \"IO\" STATE\n [FHX,FPX,cj_speed] = FHFP_CJ2(gas,gas1,gas2)\n #ELEMENTS OF JACOBIAN\n DFHDT = (FHX-FH)/DT; DFPDT = (FPX-FP)/DT;\n\n #VOLUME PERTURBATION\n DV = 0.01*V; Vper = V + DV;\n Tper = T; Rper = 1/Vper;\n \n gas.TD = Tper, Rper\n gas.equilibrate('TV',2)\n gas2.TDX = Tper, Rper, gas.X\n \n #CALCULATE FHX & FPX FOR \"IO\" STATE\n [FHX,FPX,cj_speed] = FHFP_CJ2(gas,gas1,gas2)\n #ELEMENTS OF JACOBIAN\n DFHDV = (FHX-FH)/DV; DFPDV = (FPX-FP)/DV;\n\n #INVERT MATRIX\n J = DFHDT*DFPDV - DFPDT*DFHDV\n b = [DFPDV, -DFHDV, -DFPDT, DFHDT]\n a = [-FH, -FP]\n deltaT = (b[0]*a[0]+b[1]*a[1])/J; deltaV = (b[2]*a[0]+b[3]*a[1])/J;\n\n #CHECK & LIMIT CHANGE VALUES\n #TEMPERATURE\n DTM = 0.2*T\n if abs(deltaT) > DTM:\n deltaT = DTM*deltaT/abs(deltaT)\n #VOLUME\n V2X = V + deltaV\n if V2X > V1:\n DVM = 0.5*(V1 - V)\n else:\n DVM = 0.2*V\n if abs(deltaV) > DVM:\n deltaV = DVM*deltaV/abs(deltaV)\n #MAKE THE CHANGES\n T = T + deltaT; V = V + deltaV; r = 1/V;\n gas.TD = T, r\n gas.equilibrate('TV',2)\n gas2.TDX = T, r, gas.X\n\n [FH,FP,cj_speed] = FHFP_CJ2(gas,gas1,gas2)\n \n return [gas,cj_speed]",
"def PCO2_calculate_new(self,T0,P0=3.0e4,buf=0):\n #First calculate the temperature\n P=P0 #Pressure in bar\n T=T0+273.0 #self.adiabat(r) #Temperature at the freezing front\n #Calculate log10K1\n logK1=40.07639-T*2.53932*1.0e-2+T*T*5.27096*1.0e-6 + 0.0267*(P-1.0)/T\n #calculate log10K2\n logK2=-6.24763-282.56/T-0.119242*(P-1.0e3)/T\n #Calculate the equilibrium constant Kp from P and T\n #Calculate the equilibrium constant K' eq. A1 of Holloway et al. 1992\n logKp=33.82876-(163.291+0.092542*P)/T-T*2.53932*1.0e-2+T*T*5.27096*1.0e-6\n logKP2=logK1+logK2\n \n #-(163.291+0.092542*P)/T-(2.53932*1.0e-2)*T + T*T*(5.27096*1.0e-6)\n #Calculate fO2 from O'neil 1987, need to provide pressure in bars\n # and temperature in Kelvin\n \n # equation for log10fO2 for IW buffer\n # Eq A8 of Holloway et al., 1992\n logfO2=self.fO2_IW(T,P0,buf)\n #Calculate log10Kf\n #p. 112 of Holloway et al. 1992\n logKf=logKp+logfO2\n Kf=np.exp(2.303*logKf)\n #mole fraction of CO3 in melt\n XCO3_melt=Kf/(1.0+Kf)\n #wt% of CO3 dissolved in melt\n wco2=(120.2656*XCO3_melt)/(1.0-0.202656*XCO3_melt)\n #concentration in fraction\n c=wco2/100.0\n #To calculate CO2 partial pressure calculate K1\n #Equation A1(a) of Holloway et al.\n \n \n #40.07639-2.53932e-2*T+(5.27096e-6)*T*T+0.0267*(P-1000.0)/T\n K1=np.exp(2.303*logK1)\n fO2=np.exp(2.303*logfO2) #Eq 3 from Holloway et al 1992\n PCO2=K1*fO2 #fCO2=PCO2 in bars\n #Calculate PCO2 from Pawley et al. 1992, page 223\n #XCO2_melt(ppm)= 0.492*fCO2\n # Assuming PCO2 is fCO2\n #PCO2=(XCO3_melt*1.0e6)/0.492\n return(PCO2,c,fO2)",
"def _thcompton(tempbb, theta, gamma):\n #c use internally Thomson optical depth\n tautom = np.sqrt(2.250 + 3.0 / (theta * ((gamma + .50)**2 - 2.250))) - 1.50\n\n # Initialise arrays\n dphdot = np.zeros(900); rel = np.zeros(900); c2 = np.zeros(900)\n sptot = np.zeros(900); bet = np.zeros(900); x = np.zeros(900)\n\n #c JMAX - # OF PHOTON ENERGIES\n #c delta is the 10 - log interval of the photon array.\n delta = 0.02\n deltal = delta * np.log(10.0)\n xmin = 1e-4 * tempbb\n xmax = 40.0 * theta\n jmax = min(899, int(np.log10(xmax / xmin) / delta) + 1)\n\n #c X - ARRAY FOR PHOTON ENERGIES\n # Energy array is normalized by 511 keV, the rest energy of an electron\n x[:(jmax + 1)] = xmin * 10.0**(np.arange(jmax + 1) * delta)\n\n #c compute c2(x), and rel(x) arrays\n #c c2(x) is the relativistic correction to Kompaneets equation\n #c rel(x) is the Klein - Nishina cross section divided by the\n #c Thomson crossection\n for j in range(0, jmax):\n w = x[j]\n #c c2 is the Cooper's coefficient calculated at w1\n #c w1 is x(j + 1 / 2) (x(i) defined up to jmax + 1)\n w1 = np.sqrt(x[j] * x[j + 1])\n c2[j] = (w1**4 / (1.0 + 4.60 * w1 + 1.1 * w1 * w1))\n if (w <= 0.05):\n #c use asymptotic limit for rel(x) for x less than 0.05\n rel[j] = (1.0 - 2.0 * w + 26.0 * w * w * 0.2)\n else:\n z1 = (1.0 + w) / w**3\n z2 = 1.0 + 2.0 * w\n z3 = np.log(z2)\n z4 = 2.0 * w * (1.0 + w) / z2\n z5 = z3 / 2.0 / w\n z6 = (1.0 + 3.0 * w) / z2 / z2\n rel[j] = (0.75 * (z1 * (z4 - z3) + z5 - z6))\n\n #c the thermal emission spectrum\n jmaxth = min(900, int(np.log10(50 * tempbb / xmin) / delta))\n if (jmaxth > jmax):\n jmaxth = jmax\n planck = 15.0 / (np.pi * tempbb)**4\n dphdot[:jmaxth] = planck * x[:jmaxth]**2 / (np.exp(x[:jmaxth] / tempbb)-1)\n\n #c compute beta array, the probability of escape per Thomson time.\n #c bet evaluated for spherical geometry and nearly uniform sources.\n #c Between x = 0.1 and 1.0, a function flz modifies beta to allow\n #c the increasingly large energy change per scattering to gradually\n #c eliminate spatial diffusion\n jnr = int(np.log10(0.10 / xmin) / delta + 1)\n jnr = min(jnr, jmax - 1)\n jrel = int(np.log10(1 / xmin) / delta + 1)\n jrel = min(jrel, jmax)\n xnr = x[jnr - 1]\n xr = x[jrel - 1]\n for j in range(0, jnr - 1):\n taukn = tautom * rel[j]\n bet[j] = 1.0 / tautom / (1.0 + taukn / 3.0)\n for j in range(jnr - 1, jrel):\n taukn = tautom * rel[j]\n arg = (x[j] - xnr) / (xr - xnr)\n flz = 1 - arg\n bet[j] = 1.0 / tautom / (1.0 + taukn / 3.0 * flz)\n for j in range(jrel, jmax):\n bet[j] = 1.0 / tautom\n\n dphesc = _thermlc(tautom, theta, deltal, x, jmax, dphdot, bet, c2)\n\n #c the spectrum in E F_E\n for j in range(0, jmax - 1):\n sptot[j] = dphesc[j] * x[j]**2\n\n return x, jmax, sptot",
"def JacInv_CF(self) -> ngsolve.fem.CoefficientFunction:",
"def CalculateChi2(mol):\r\n return _CalculateChinp(mol, NumPath=2)",
"def fo2_2F(cm,t,p,lnfo2,model=\"kc1991\"):\n\n if model == \"kc1991\":\n return ferric.kc91_fo2(cm,t,p,lnfo2)\n elif model == \"r2013\":\n return ferric.r2013_fo2(cm, t, p, lnfo2)",
"def ret_motion_jacobian(prev_th, trans_speed):\n motion_jacobian = np.zeros((3,3))\n I = np.eye(3)\n term_2 = np.zeros((3,3))\n term_2[0,2] = -trans_speed * np.sin(prev_th)\n term_2[1,2] = trans_speed * np.cos(prev_th)\n dt = 0.1\n motion_jacobian = I + dt * term_2\n \n return motion_jacobian",
"def compute_coupling_coefficients(self, fla1, fla2,\n flb1=None, flb2=None,\n lmax=None, n_iter=3,\n l_toeplitz=-1,\n l_exact=-1, dl_band=-1,\n spin0_only=False):\n if flb1 is None:\n flb1 = fla1\n if flb2 is None:\n flb2 = fla2\n\n if self.wsp is not None:\n lib.covar_workspace_free(self.wsp)\n self.wsp = None\n\n ns = fla1.fl.cs.n_eq\n if (fla2.fl.cs.n_eq != ns) or (flb1.fl.cs.n_eq != ns) or \\\n (flb2.fl.cs.n_eq != ns):\n raise ValueError(\"Everything should have the same resolution!\")\n\n if lmax is None:\n lmax = lib.get_lmax_from_cs_py(fla1.fl.cs)\n\n _toeplitz_sanity(l_toeplitz, l_exact, dl_band, lmax, fla1, flb1)\n self.wsp = lib.covar_workspace_init_py(fla1.fl, fla2.fl, flb1.fl,\n flb2.fl, lmax, n_iter,\n l_toeplitz, l_exact, dl_band,\n int(spin0_only))",
"def ccor(ts1, ts2):\r\n f1 = nfft.fft(list(iter(ts1)))\r\n f2 = nfft.fft(np.flipud(list(iter(ts2))))\r\n cc = np.real(nfft.ifft(f1 * f2)) / (abs(ts1) * abs(ts2))\r\n return cc",
"def _hyp2f1_recurrence(a, b, c, z):\n # TODO\n # fails with (200.0, 101.0, 401.6, 1.1)\n assert b % 1.0 == 0.0 and b >= 0\n assert np.abs(c) >= np.abs(a)\n assert 2.0 > z > 1.0 # TODO: generalize\n f0 = 1.0\n f1 = 1 - a * z / c\n s0 = 1.0\n s1 = np.sign(f1)\n g0 = np.zeros(4) # df/da df/db df/dc df/dz\n g1 = np.array([-z / c, 0.0, a * z / c**2, -a / c]) / f1\n p0 = 0.0 # d2f/dz2\n p1 = 0.0\n f0 = np.log(np.abs(f0))\n f1 = np.log(np.abs(f1))\n if b == 0:\n return f0, s0, g0[0], g0[1], g0[2], g0[3], p0\n if b == 1:\n return f1, s1, g1[0], g1[1], g1[2], g1[3], p1\n for n in range(1, int(b)):\n ak = n * (z - 1) / (c + n)\n dak = np.array([0.0, 0.0, -ak / (c + n), ak / (z - 1)])\n bk = (2 * n + c - z * (a + n)) / (c + n)\n dbk = np.array([-z / (c + n), 0.0, (1 - bk) / (c + n), -(a + n) / (c + n)])\n u = s0 * np.exp(f0 - f1)\n v = s1 * bk + u * ak\n s = np.sign(v)\n f = np.log(np.abs(v)) + f1\n g = (g1 * bk * s1 + g0 * u * ak + dbk * s1 + dak * u) / v\n p = (\n p1 * bk * s1\n + p0 * u * ak\n + 2 / (c + n) * (u * g0[3] * n - s1 * g1[3] * (a + n))\n ) / v\n f1, f0 = f, f1\n s1, s0 = s, s1\n g1, g0 = g, g1\n p1, p0 = p, p1\n if not _is_valid_2f1(g[3], p, a, -b, c, z):\n raise Invalid2F1(\"Hypergeometric series did not converge\")\n da, db, dc, dz = g\n return f, s, da, db, dc, dz, p",
"def calc_mph_chl(mph_0, mph_1, sipf, sicf, bair, ndvi, rmax_1, lambda_rmax_1, lambda_rmax_0, rmax_0, mph_floatthres, mph_cyanomax):\n \n if lambda_rmax_1 == 753:\n print('Right side of if-condition.')\n # MPH >= 0.02 or NDVI >0.2\n if (mph_1 >= 0.02 or ndvi >= 0.2):\n float_flag = 1\n adj_flag = 0\n # SICF < 0 and SIPF > 0\n if (sicf < 0 and sipf > 0):\n cyano_flag=1\n print('Flag: floating cyanobacteria true')\n chl_mph = 22.44 * math.exp(35.79 * mph_1)\n print('CHL MPH is: ' + str(chl_mph))\n if chl_mph > mph_floatthres:\n float_flag=1\n print('Floating cyanobacteria')\n else:\n print('Immersed cyanobacteria')\n # SICF >=0 or SIPF <=0 \n elif (sicf >= 0 or sipf <= 0):\n cyano_flag = 0\n chl_mph = np.nan\n print('Floating vegetation')\n \n # Continuation right side\n elif (mph_1 < 0.02 and ndvi < 0.2):\n float_flag = 0\n adj_flag = 1\n print('Flag: adjacent true')\n cyano_flag = 0\n print('Immersed eukaryotes')\n \n chl_mph = 5.24 * 10 ** 9 * mph_0 ** 4 - 1.95 * 10 ** 8 * mph_0 ** 3 + 2.46 * 10 ** 6 * mph_0 ** 2 + 4.02 * 10 ** 3 * mph_0 + 1.97\n \n # Left side of if-condition\n else:\n print('Left side of if-condition.')\n float_flag = 0\n adj_flag = 0\n \n # Left side of 2nd if-condition\n if (sicf >= 0 or sipf <= 0 or bair <= 0.002):\n print('Left 2nd if-condition')\n cyano_flag=0\n print('Immersed eukaryotes')\n chl_mph = 5.24 * 10 ** 9 * mph_0 ** 4 - 1.95 * 10 ** 8 * mph_0 ** 3 + 2.46 * 10 ** 6 * mph_0 ** 2 + 4.02 * 10 ** 3 * mph_0 + 1.97\n \n # Right side of 2nd if-condition\n elif (sicf <= 0 and sipf > 0 and bair > 0.002):\n print('Right 2nd if-condition')\n cyano_flag = 1\n print('Flag: cyanobacteria true')\n chl_mph = 22.44 * math.exp(35.79 * mph_1)\n if chl_mph > mph_floatthres:\n float_flag = 1\n print('Floating cyanobacteria')\n if chl_mph > mph_cyanomax:\n chl_mph = mph_cyanomax\n print('MPH chl maximum reached.')\n\n return chl_mph, cyano_flag, float_flag, adj_flag",
"def calc_proposed_HF_cost (self):\n self.proposed_HF_cost = np.zeros(self.project_life)\n fuel_cost = self.diesel_prices + self.cd['heating fuel premium']# $/gal\n wood_price = self.cd['cordwood price']\n # are there ever o&m costs\n # $/gal * gal/yr = $/year\n self.proposed_HF_cost += \\\n self.proposed_fuel_Hoil_consumption * fuel_cost +\\\n self.proposed_fuel_biomass_consumption * wood_price",
"def calc_coulomb_cck(g_psi0, g_chi, ir, Z, ene):\n\n mat_00 = calc_mat_complex(g_psi0, False)\n v0 = calc_v_mat(g_psi0, g_psi0, [0,0,0], Z)\n\n c0_chi = calc_mat(g_psi0, g_chi, False)[\"s\"][ir, ir].col(0)\n\n res = calc_coulomb_cck_mat(mat_00[\"s\"][ir, ir],\n mat_00[\"t\"][ir, ir] + v0[ir, ir],\n ene, c0_chi)\n return res\n \n \"\"\"\n L0 = (mat_00[\"s\"][irrep,irrep]*ene\n -mat_00[\"t\"][irrep,irrep]\n -v0[irrep, irrep])\n\n # compute coefficient of psi0\n c0_psi0 = la.solve(L0, c0_chi)\n\n # see the comment in the below function \"one_e_pi\"\n im_psi0_chi = np.dot(c0_psi0, c0_chi).imag\n\n k = np.sqrt(ene*2.0)\n sign = im_psi0_chi / abs(im_psi0_chi) \n c0_npsi = sign * np.sqrt(k/2.0) / np.sqrt(sign * im_psi0_chi) * c0_psi0\n #c0_npsi = -np.sqrt(k/2.0) / np.sqrt(-im_psi0_chi) * c0_psi0\n return c0_npsi\n \"\"\"",
"def step_BDF2(self, tres, yres, h):\n alpha = [3./2., -2., 1./2]\n f = self.problem.rhs\n\n t_np1 = tres[-1] + h\n result = fsolve(lambda y: alpha[0] * y +\n alpha[1] * yres[-1] +\n alpha[2] * yres[-2] -\n h * f(t_np1, y),\n yres[-1],\n xtol=self.tol,\n full_output=1)\n if result[2] == 1:\n y_np1 = result[0]\n self.statistics[\"nfcns\"] += result[1]['nfev']\n return t_np1, y_np1\n else:\n raise Explicit_ODE_Exception('fsolve did not find a solution')",
"def planckqf(frequency, temperature):\n return pconst.c1nf * frequency**2 / (numpy.exp(pconst.c2f * frequency \\\n / temperature)-1);",
"def calc_r2eff(self):\n\n # Assemble param vector.\n self.params = self.assemble_param_vector(r2=self.r2, r2a=self.r2a, r2b=self.r2b, dw=self.dw, pA=self.pA, kex=self.kex, spins_params=self.spins_params)\n\n # Make nested list arrays of data. And return them.\n values, errors, cpmg_frqs, missing, frqs, exp_types, relax_times, offsets = self.return_r2eff_arrays()\n\n # Unpack the parameter values.\n # Initialise the post spin parameter indices.\n end_index = []\n # The spin and frequency dependent R2 parameters.\n end_index.append(len(self.exp_type) * self.num_spins * len(self.fields))\n if self.model in [\"CR72 full\"]:\n end_index.append(2 * len(self.exp_type) * self.num_spins * len(self.fields))\n # The spin and dependent parameters (phi_ex, dw, padw2).\n end_index.append(end_index[-1] + self.num_spins)\n\n # Unpack the parameter values.\n R20 = self.params[:end_index[1]].reshape(self.num_spins*2, len(self.fields))\n R20A = R20[::2].flatten()\n R20B = R20[1::2].flatten()\n dw = self.params[end_index[1]:end_index[2]]\n pA = self.params[end_index[2]]\n kex = self.params[end_index[2]+1]\n\n # Copy value structure\n self.back_calc = deepcopy(values)\n\n # Setup special numpy array structures, for higher dimensional computation.\n # Get the shape of back_calc structure.\n back_calc_shape = list( asarray(self.back_calc).shape )[:4]\n\n # Find which frequency has the maximum number of disp points.\n # To let the numpy array operate well together, the broadcast size has to be equal for all shapes.\n self.max_num_disp_points = max(self.num_disp_points)\n\n # Create numpy arrays to pass to the lib function.\n # All numpy arrays have to have same shape to allow to multiply together.\n # The dimensions should be [ei][si][mi][oi][di]. [Experiment][spins][spec. frq][offset][disp points].\n # The number of disp point can change per spectrometer, so we make the maximum size.\n self.R20A_a = ones(back_calc_shape + [self.max_num_disp_points])\n self.R20B_a = ones(back_calc_shape + [self.max_num_disp_points])\n self.dw_frq_a = ones(back_calc_shape + [self.max_num_disp_points])\n self.cpmg_frqs_a = ones(back_calc_shape + [self.max_num_disp_points])\n self.num_disp_points_a = ones(back_calc_shape + [self.max_num_disp_points])\n self.back_calc_a = ones(back_calc_shape + [self.max_num_disp_points])\n\n # Loop over the spins.\n for si in range(self.num_spins):\n # Loop over the spectrometer frequencies.\n for mi in range(len(self.fields)):\n # Extract number of dispersion points.\n num_disp_points = self.num_disp_points[mi]\n\n # Extract cpmg_frqs and num_disp_points from lists.\n self.cpmg_frqs_a[0][si][mi][0][:num_disp_points] = cpmg_frqs[0][mi][0]\n self.num_disp_points_a[0][si][mi][0][:num_disp_points] = self.num_disp_points[mi]\n\n # Now calculate.\n\n # Loop over the spins.\n for si in range(self.num_spins):\n # Loop over the spectrometer frequencies.\n for mi in range(len(self.fields)):\n # Extract number of dispersion points.\n num_disp_points = len(cpmg_frqs[0][mi][0])\n\n # The R20 index.\n r20_index = mi + si*len(self.fields)\n\n # Store r20a and r20b values per disp point.\n self.R20A_a[0][si][mi][0] = array( [R20A[r20_index]] * self.max_num_disp_points, float64)\n self.R20B_a[0][si][mi][0] = array( [R20B[r20_index]] * self.max_num_disp_points, float64)\n\n # Convert dw from ppm to rad/s.\n dw_frq = dw[si] * frqs[0][si][mi]\n\n # Store dw_frq per disp point.\n self.dw_frq_a[0][si][mi][0] = array( [dw_frq] * self.max_num_disp_points, float64)\n\n ## Back calculate the R2eff values.\n r2eff_CR72(r20a_orig=self.R20A_a, r20b_orig=self.R20B_a, dw_orig=self.dw_frq_a, r20a=self.R20A_a, r20b=self.R20B_a, pA=pA, dw=self.dw_frq_a, kex=kex, cpmg_frqs=self.cpmg_frqs_a, back_calc=self.back_calc_a)\n\n # Now return the values back to the structure of self.back_calc object.\n ## For all missing data points, set the back-calculated value to the measured values so that it has no effect on the chi-squared value.\n # Loop over the spins.\n for si in range(self.num_spins):\n # Loop over the spectrometer frequencies.\n for mi in range(len(self.fields)):\n # Extract number of dispersion points.\n num_disp_points = self.num_disp_points[mi]\n\n # Extract the value\n self.back_calc[0][si][mi][0][:] = self.back_calc_a[0][si][mi][0][:num_disp_points]\n\n # Check values.\n for di in range(num_disp_points):\n self.assertAlmostEqual(self.back_calc[0][si][mi][0][di], self.R20A_a[0][si][mi][0][di])",
"def elastic_energy(c1, c2, R, D):\n\n import numpy as np\n\n rho = R*R / D*D # Volume density of BZO. ** For further calculations, assume rho << 1. **\n\n lambda_1 = (c_1[(1,1)] + c_1[(1,2)]) / c_1[(1,3)]\n lambda_2 = (c_2[(1,1)] + c_2[(1,2)]) / c_2[(1,3)]\n\n # TODO: Does f_3 == f_z?? #####\n f_3 = (C2 - C1) / C1\n f_theta = np.sqrt(2)*f_x\n\n a_0 = (f_3(1+f_theta)+lambda_2*f_theta*(1+f3))/(c_1[(1,3)]*(lambda_1*(1+f_theta) - lambda_2*(1+f_3)))\n # b_0 = # DNE\n\n a_1 = a_0 / (np.ln(D) - np.ln(R))\n b_1 = ( f3 + lambda_1*f_theta ) / (c_2[(1,3)] * ( lambda_1*(1+f_theta)-lambda_2*(1+f_3) ))\n\n\n v_1 = 0.5 * ( c1[(1,1)] + c1[(1,2)] ) - ( 1/c1[(3,3)] ) * ( c1[(1,3)] * c1[(1,3)] )\n v_2 = 0.5 * ( c2[(1,2)] + c2[(1,2)] ) - ( 1/c2[(3,3)] ) * ( c2[(1,3)] * c2[(1,3)] )\n\n E = (c1[(1,1)] + c1[(1,2)])*c[(3,3)]*v_1*((2*(1-rho)+rho*np.ln(rho*(2-np.ln(rho))))/(np.ln(rho)*np.ln(rho)))*a_0*a_0 + (c_2[(1,1)]+c_2[(1,2)])*c_2[(3,3)]*v_2*b_1*b_1*rho",
"def gauss2d_convolve ((bmaj1, bmin1, theta1, bmaj2, bmin2, theta2), ang='deg'):\n from scipy import pi, cos, sin, arctan2, sqrt, log\n #\n # check the ang keyword, if deg, go over to radians from deg\n if ang=='deg':\n theta1 *= pi/180\n theta2 *= pi/180\n else:\n pass\n \n cospa1 = cos(theta1)\n cospa2 = cos(theta2)\n sinpa1 = sin(theta1)\n sinpa2 = sin(theta2)\n \n alpha = (bmaj1*cospa1)**2 + (bmin1*sinpa1)**2 + (bmaj2*cospa2)**2 + (bmin2*sinpa2)**2\n beta = (bmaj1*sinpa1)**2 + (bmin1*cospa1)**2 + (bmaj2*sinpa2)**2 + (bmin2*cospa2)**2\n gamma = 2 * ((bmin1**2-bmaj1**2)*sinpa1*cospa1 + (bmin2**2-bmaj2**2)*sinpa2*cospa2)\n s = alpha + beta\n t = sqrt( (alpha-beta)**2 + gamma**2 )\n bmaj = sqrt( 0.5*(s+t) )\n bmin = sqrt( 0.5*(s-t) )\n if not (abs(gamma)+abs(alpha-beta)):\n bpa = 0.0\n else:\n bpa = 0.5 * arctan2(-gamma,alpha-beta) * R2D\n\n \n \n fac = pi / (4.0*log(2.0)) * bmaj1*bmin1 * bmaj2*bmin2 / sqrt(alpha*beta - 0.25 * gamma*gamma)\n\n success = 0\n \n #~ #\n #~ # define some calculations\n #~ alpha = (bmaj1*cos(theta1))**2 + (bmin1*sin(theta1))**2 - \\\n #~ (bmaj2*cos(theta2))**2 - (bmin2*sin(theta2))**2\n #~ beta = (bmaj1*sin(theta1))**2 + (bmin1*cos(theta1))**2 - \\\n #~ (bmaj2*sin(theta2))**2 - (bmin2*cos(theta2))**2\n #~ gamma = 2 * ( (bmin1**2-bmaj1**2)*sin(theta1)*cos(theta1) -\\\n #~ (bmin2**2-bmaj2**2)*sin(theta2)*cos(theta2) )\n #~ #\n #~ # calculate the intermediate results\n #~ s = alpha + beta\n #~ t = sqrt((alpha-beta)**2 + gamma**2)\n #~ limit = 0.1*min(bmaj1,bmin1, bmaj2, bmin2)**2\n #~ #\n #~ # now check if result is illigal/close to a point source\n #~ if alpha < 0 or beta < 0 or s < t:\n #~ bmaj, bmin, bpa = [0, 0, 0]\n #~ #\n #~ # now check if result is close to a point source\n #~ tmp_par =.5*(s-t)\n #~ if tmp_par < limit and alpha > -limit and beta > -limit:\n #~ success = 1\n #~ #\n #~ # it was not close to point source, but results are thus illigal\n #~ else:\n #~ success = 2\n #~ #\n #~ # since (if) everything is ok, go ahead and calculate the bmaj, bmin & bpa\n #~ else:\n #~ bmaj = sqrt(.5*(s+t))\n #~ bmin = sqrt(.5*(s-t))\n #~ #\n #~ # bpa\n #~ if (abs(gamma)+abs(alpha-beta)) == 0:\n #~ bpa = 0\n #~ else:\n #~ bpa = 0.5 * arctan2(-gamma,(alpha-beta))\n #\n # go back to degrees if asked for\n if ang=='deg':\n bpa *= 180/pi\n #\n # send back the results\n return (bmaj, bmin, bpa, fac, success)",
"def calc_gof(num_dof, chi2):\n\n gof = gammaincc(num_dof/2, chi2/2)\n\n return gof"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
equilSoundSpeeds Calculates equilibrium and frozen sound speeds. For the equilibrium sound speed, the gas is equilibrated holding entropy and specific volume constant. FUNCTION SYNTAX [aequil,afrozen] = equilSoundSpeeds(gas) INPUT gas = working gas object (modified inside function) OUTPUT aequil = equilibrium sound speed (m/s) afrozen = frozen sound speed (m/s) | def equilSoundSpeeds(gas):
# set the gas to equilibrium at its current T and P
gas.equilibrate('TP')
# save properties
s0 = gas.entropy_mass
p0 = gas.P
r0 = gas.density
# perturb the density
r1 = r0*1.0001
# set the gas to a state with the same entropy and composition but
# the perturbed density
gas.SV = s0, 1.0/r1
# save the pressure for this case for the frozen sound speed
pfrozen = gas.P
# now equilibrate the gas holding S and V constant
gas.equilibrate("SV")
p1 = gas.P
# equilibrium sound speed
aequil = math.sqrt((p1 - p0)/(r1 - r0));
# frozen sound speed
afrozen = math.sqrt((pfrozen - p0)/(r1 - r0));
return (aequil, afrozen) | [
"def getmixspeeds(self):\n assert not self.refillable # Mixing of refillable wells not supported (unknown volume)\n ptype=self.plate.plateType\n\n if self.isMixed():\n minspeed=0\n elif self.wellMixed:\n minspeed=1000 # Was already mixed, but may have settled or have condensation\n else:\n minspeed=interpolate(ptype.minspeeds,self.volume)\n if minspeed is None:\n assumeSpeed=1900\n logging.notice(\"No shaker min speed data for volume of %.0f ul, assuming %.0f rpm\"%(self.volume,assumeSpeed))\n minspeed=assumeSpeed\n\n maxspeed=interpolate(ptype.maxspeeds,self.volume)\n if maxspeed is None:\n assumeSpeed=1200\n logging.warning(\"No shaker max speed data for volume of %.0f ul, assuming %.0f rpm\"%(self.volume,assumeSpeed))\n maxspeed=assumeSpeed\n \n glycerol=self.glycerolfrac()\n if glycerol>0:\n gmaxspeed=interpolate(ptype.glycerolmaxspeeds,self.volume)\n if gmaxspeed is None:\n logging.warning(\"No shaker max speed data for glycerol with volume of %.0f ul, using no-glycerol speed of %.0f rpm\"%(self.volume,maxspeed))\n gmaxspeed=maxspeed\n\n if glycerol>ptype.glycerol:\n logging.notice(\"Sample %s contains %.1f%% Glycerol (more than tested of %.1f%%)\"%(self.name,glycerol*100,ptype.glycerol*100))\n maxspeed=gmaxspeed\n else:\n maxspeed=maxspeed+(gmaxspeed-maxspeed)*(glycerol/ptype.glycerol)\n if maxspeed<minspeed:\n if maxspeed<minspeed-1:\n logging.notice(\"%s with %.1ful and %.1f%% glycerol has minspeed of %.0f greater than maxspeed of %.0f\"%(self.name,self.volume,glycerol*100,minspeed,maxspeed))\n minspeed=maxspeed\t# Glycerol presence should also reduce minspeed\n return minspeed, maxspeed",
"def calc_sound_speed(temperature=27, salinity=35, pressure=10, formula_source=\"Mackenzie\"):\n if formula_source == \"Mackenzie\":\n ss = 1448.96 + 4.591 * temperature - 5.304e-2 * temperature ** 2 + 2.374e-4 * temperature ** 3\n ss += 1.340 * (salinity - 35) + 1.630e-2 * pressure + 1.675e-7 * pressure ** 2\n ss += -1.025e-2 * temperature * (salinity - 35) - 7.139e-13 * temperature * pressure ** 3\n elif formula_source == \"AZFP\":\n z = temperature / 10\n ss = (1449.05\n + z * (45.7 + z * (-5.21 + 0.23 * z))\n + (1.333 + z * (-0.126 + z * 0.009)) * (salinity - 35.0)\n + (pressure / 1000) * (16.3 + 0.18 * (pressure / 1000)))\n else:\n ValueError(\"Unknown formula source\")\n return ss",
"def airspeedMultiplier(s, obj):\n\n speed = WUps2kts(obj.V.norm())\n return 2.25 / (1 + exp(-0.024 * (speed - 212)))",
"def sound_velocity(C_eff, rho):\n return C_eff / rho",
"def speed_of_sound(self, altitude):\n\n return sqrt(1.4 * 1716.56 * self.temperature(altitude))",
"def energy_avionics(variables):\n # Power [W] requirements by avionics\n # See spreadsheet \"List of EBS items\"\n pfd_mfd = 66\n autopilot = 14\n intercom = 8.4\n portible_instrument_panel = 12\n engine_computer = 6.1\n lights = 28\n cooling_pump = 50\n\n total_power = pfd_mfd + autopilot + intercom + portible_instrument_panel + engine_computer + lights + cooling_pump\n total_time = 2.5*3600 + 2*330 + (914.4/2 + 30) # 2.5h endurance + 2*taxi phase + climb phase\n avionics_energy = total_power * total_time\n return avionics_energy/variables.eff_batt",
"def phonon_quasiharmonic(lammps_command: str,\n ucell: am.System,\n potential: lmp.Potential,\n mpi_command: Optional[str] = None,\n a_mult: int = 2,\n b_mult: int = 2,\n c_mult: int = 2,\n distance: float = 0.01,\n symprec: float = 1e-5,\n strainrange: float = 0.01,\n numstrains: int = 5) -> dict:\n # Get lammps units\n lammps_units = lmp.style.unit(potential.units)\n \n # Get lammps version date\n lammps_date = lmp.checkversion(lammps_command)['date']\n \n # Convert ucell to a primitive cell\n ucell = ucell.dump('primitive_cell', symprec=symprec)\n\n # Get unstrained box vectors\n vects = ucell.box.vects\n\n # Generate the range of strains\n if numstrains == 1:\n zerostrain = phononcalc(lammps_command, ucell, potential,\n mpi_command=mpi_command,\n a_mult=a_mult, b_mult=b_mult, c_mult=c_mult,\n distance=distance, symprec=symprec,\n lammps_date=lammps_date)\n phonons = [zerostrain['phonon']]\n qha = None\n\n elif numstrains % 2 == 0 or numstrains < 5:\n raise ValueError('Invalid number of strains: must be odd and 1 or >= 5')\n else:\n strains = np.linspace(-strainrange, strainrange, numstrains)\n istrains = np.linspace(-(numstrains-1)/2, (numstrains-1)/2, numstrains, dtype=int)\n\n volumes = []\n energies = []\n phonons = []\n temperatures = None\n free_energy = None\n heat_capacity = None\n entropy = None\n\n # Loop over all strains\n for i in range(numstrains):\n strain = strains[i]\n if numstrains != 1:\n istrain = f'_{istrains[i]}'\n else:\n istrain = ''\n\n # Identify the zero strain run\n if istrains[i] == 0:\n zerostrainrun = True\n else:\n zerostrainrun = False\n \n # Generate system at the strain\n newvects = vects * (1 + strain)\n ucell.box_set(vects=newvects, scale=True)\n volumes.append(ucell.box.volume)\n system = ucell.supersize(a_mult, b_mult, c_mult)\n\n # Define lammps variables\n lammps_variables = {}\n system_info = system.dump('atom_data', f='disp.dat',\n potential=potential)\n lammps_variables['atomman_system_pair_info'] = system_info\n\n # Set dump_modify_format based on lammps_date\n if lammps_date < datetime.date(2016, 8, 3):\n lammps_variables['dump_modify_format'] = '\"%d %d %.13e %.13e %.13e %.13e %.13e %.13e\"'\n else:\n lammps_variables['dump_modify_format'] = 'float %.13e'\n\n # Write lammps input script\n lammps_script = 'phonon.in'\n template = read_calc_file('iprPy.calculation.phonon', 'phonon.template')\n with open(lammps_script, 'w') as f:\n f.write(filltemplate(template, lammps_variables, '<', '>'))\n\n # Run LAMMPS\n output = lmp.run(lammps_command, script_name='phonon.in',\n mpi_command=mpi_command)\n\n # Extract system energy\n thermo = output.simulations[0]['thermo']\n energy = uc.set_in_units(thermo.PotEng.values[-1], lammps_units['energy'])\n\n # Scale energy by sizemults and append to list\n energies.append(energy / (a_mult * b_mult * c_mult))\n\n # Compute phonon info for ucell\n phononinfo = phononcalc(lammps_command, ucell, potential, mpi_command=mpi_command,\n a_mult=a_mult, b_mult=b_mult, c_mult=c_mult,\n distance=distance, symprec=symprec, istrain=istrain,\n plot=zerostrainrun, lammps_date=lammps_date)\n phonons.append(phononinfo['phonon'])\n \n # Extract temperature values from the first run\n if temperatures is None:\n temperatures = phononinfo['thermal_properties']['temperatures']\n \n # Initialize QHA input arrays\n free_energy = np.empty((len(temperatures), len(strains)))\n heat_capacity = np.empty((len(temperatures), len(strains)))\n entropy = np.empty((len(temperatures), len(strains)))\n \n # Get values for zerostrainrun\n if zerostrainrun is True:\n zerostrain = phononinfo\n \n # Copy values to qha input arrays\n free_energy[:, i] = phononinfo['thermal_properties']['free_energy']\n entropy[:, i] = phononinfo['thermal_properties']['entropy']\n heat_capacity[:, i] = phononinfo['thermal_properties']['heat_capacity']\n \n # Try to compute qha\n try:\n eos = 'vinet'\n qha = phonopy.PhonopyQHA(volumes=volumes,\n electronic_energies=energies,\n temperatures=temperatures,\n free_energy=free_energy,\n cv=heat_capacity, eos=eos,\n entropy=entropy)\n except:\n try:\n eos = 'birch_murnaghan'\n qha = phonopy.PhonopyQHA(volumes=volumes,\n electronic_energies=energies,\n temperatures=temperatures,\n free_energy=free_energy,\n cv=heat_capacity, eos=eos,\n entropy=entropy)\n except:\n qha = None\n \n results = {} \n \n # Add phonopy objects\n results['phonon_objects'] = phonons\n results['qha_object'] = qha\n \n # Extract zerostrain properties\n results['band_structure'] = zerostrain['band_structure']\n results['density_of_states'] = zerostrain['dos']\n\n # Convert units on thermal properties\n results['thermal_properties'] = zerostrain['thermal_properties']\n results['thermal_properties']['temperature'] = results['thermal_properties'].pop('temperatures')\n results['thermal_properties']['Helmholtz'] = uc.set_in_units(results['thermal_properties'].pop('free_energy'), 'kJ/mol')\n results['thermal_properties']['entropy'] = uc.set_in_units(results['thermal_properties'].pop('entropy'), 'J/K/mol')\n results['thermal_properties']['heat_capacity_v'] = uc.set_in_units(results['thermal_properties'].pop('heat_capacity'), 'J/K/mol')\n \n if qha is not None:\n\n results['qha_eos'] = eos\n\n # Create QHA plots\n qha.plot_bulk_modulus()\n plt.xlabel('Volume ($Å^3$)', size='large')\n plt.ylabel('Energy ($eV$)', size='large')\n plt.savefig('bulk_modulus.png', dpi=400, bbox_inches='tight')\n plt.close()\n\n qha.plot_helmholtz_volume()\n plt.savefig('helmholtz_volume.png', dpi=400)\n plt.close()\n\n # Package volume vs energy scans\n results['volume_scan'] = {}\n results['volume_scan']['volume'] = np.array(volumes)\n results['volume_scan']['strain'] = strains\n results['volume_scan']['energy'] = np.array(energies)\n \n # Compute and add QHA properties\n properties = qha.get_bulk_modulus_parameters()\n results['E0'] = uc.set_in_units(properties[0], 'eV')\n results['B0'] = uc.set_in_units(properties[1], 'eV/angstrom^3')\n results['B0prime'] = uc.set_in_units(properties[2], 'eV/angstrom^3')\n results['V0'] = uc.set_in_units(properties[3], 'angstrom^3')\n \n results['thermal_properties']['volume'] = uc.set_in_units(np.hstack([qha.volume_temperature, np.nan]), 'angstrom^3')\n results['thermal_properties']['thermal_expansion'] = np.hstack([qha.thermal_expansion, np.nan])\n results['thermal_properties']['Gibbs'] = uc.set_in_units(np.hstack([qha.gibbs_temperature, np.nan]), 'eV')\n results['thermal_properties']['bulk_modulus'] = uc.set_in_units(np.hstack([qha.bulk_modulus_temperature, np.nan]), 'GPa')\n results['thermal_properties']['heat_capacity_p_numerical'] = uc.set_in_units(np.hstack([qha.heat_capacity_P_numerical, np.nan]), 'J/K/mol')\n results['thermal_properties']['heat_capacity_p_polyfit'] = uc.set_in_units(np.hstack([qha.heat_capacity_P_polyfit, np.nan]), 'J/K/mol')\n results['thermal_properties']['gruneisen'] = np.hstack([qha.gruneisen_temperature, np.nan])\n \n return results",
"def update_transfer_speed(self, upload_times: Iterable[float]) -> None:\n # update Exponential Moving Average (EMA) of upload time\n for t in upload_times:\n self.avg_ul_time = ((self.avg_ul_time or t) * 0.8) + (t * 0.2)",
"def rickerwave(f = 25, length = 0.512, dt = 0.004): \n time = np.arange(-length/2, (length-dt)/2, dt)\n amplitude = (1.0 - 2.0*(np.pi**2)*(f**2)*(time**2))* \\\n np.exp(-(np.pi**2)*(f**2)*(time**2))\n return (time, amplitude)",
"def calc_speedofsound(self, p, rho):\n from math import sqrt\n ga = self.ga\n return sqrt(ga*p/rho)",
"def max_speed_of_sound(self, plot=False):\n mmax = self.max_mass()\n\n # Value of h at the core of the maximum mass NS.\n h_max = lalsimulation.SimNeutronStarEOSMaxPseudoEnthalpy(self.eos)\n\n # Calculate speed of sound at a list of h's up to h_max,\n # then take the maximum value.\n hs = np.logspace(np.log10(h_max)-1.0, np.log10(h_max), 100)\n vs = np.array([lalsimulation.SimNeutronStarEOSSpeedOfSoundGeometerized(h, self.eos) for h in hs])\n v_max = np.max(vs)\n if plot:\n fig, ax = plt.subplots()\n ax.plot(hs, vs)\n ax.axhline(1.0, c='k')\n ax.axvline(h_max)\n ax.axhline(v_max)\n ax.set_xlabel(r'$h$')\n ax.set_ylabel(r'$v/c$')\n ax.set_xlim(0, 1.1*h_max)\n ax.set_ylim(0, 1.1*v_max)\n self.v_max = v_max\n return self.v_max",
"def pure_dephasing_evolution(tlist, coup_strength, cav_broad, cav_freq, beta, w0):\n integrand = lambda t: quad(pure_dephasing_integrand, 0.0, np.inf,\n args=(coup_strength, cav_broad, cav_freq, beta, t))\n evolution = np.array([np.exp(1j*w0*t + integrand(t)[0]/np.pi) for t in tlist])\n return evolution",
"def wavespeeds_x(W: PrimitiveState) -> [np.ndarray]:\n\n # Check if PrimitiveState\n if isinstance(W, PrimitiveState):\n # Speed of sound\n a = W.a()\n # Compute wavespeeds\n slow, fast = W.u - a, W.u + a\n return slow, fast\n else:\n raise TypeError('Input is not PrimitiveState.')",
"def ionchamber_fluxes(gas='nitrogen', volts=1.0, length=100.0,\n energy=10000.0, sensitivity=1.e-6,\n sensitivity_units='A/V'):\n from .materials import material_mu\n\n fin = fout = fphoto = 0.0\n\n units = sensitivity_units.replace('Volts', 'V').replace('Volt', 'V')\n units = units.replace('Amperes', 'A').replace('Ampere', 'A')\n units = units.replace('Amps', 'A').replace('Amp', 'A')\n units = units.replace('A/V', '')\n sensitivity *= SI_PREFIXES.get(units, 1)\n\n if isinstance(gas, str):\n gas = {gas: 1.0}\n\n gas_total = 0.0\n gas_comps = []\n for gname, frac in gas.items():\n ionpot = ionization_potential(gname)\n if gname == 'N2': gname = 'nitrogen'\n if gname == 'O2': gname = 'oxygen'\n gas_total += frac\n gas_comps.append((gname, frac, ionpot))\n\n\n # note on Photo v Total attenuation:\n # the current is from the photo-electric cross-section, so that\n # flux_photo = flux_in * [1 - exp(-t*mu_photo)]\n # while total attenuation means\n # flux_out = flux_in * exp(-t*mu_total)\n\n for gas, frac, ionpot in gas_comps:\n mu_photo = material_mu(gas, energy=energy, kind='photo')\n mu_total = material_mu(gas, energy=energy, kind='total')\n\n flux_photo = volts * sensitivity * ionpot / (2 * QCHARGE * energy)\n flux_photo *= (frac/gas_total)\n flux_in = flux_photo / (1.0 - np.exp(-length*mu_photo))\n flux_out = flux_in * np.exp(-length*mu_total)\n\n fphoto += flux_photo\n fin += flux_in\n fout += flux_out\n\n return fluxes(photo=fphoto, incident=fin,transmitted=fout)",
"def spectral_velocities(data,wcs=None,fqs=None,fqis=None,restfrq=None):\n if wcs is None:\n log.error(\"A world coordinate system (WCS) is needed\")\n return None\n if restfrq is None:\n restfrq=wcs.wcs.restfrq*u.Hz\n if fqs is None:\n if fqis is None:\n return None\n dim=wcs.wcs.spec\n idx=np.zeros((fqis.size,data.ndim))\n idx[:,dim]=fqis\n vals=wcs.all_pix2world(idx,0)\n fqs=vals[:,dim]*u.Hz\n eq=u.doppler_radio(restfrq)\n return fqs.to(u.km/u.s, equivalencies=eq)",
"def calculate_atmospheric(\n a38, a36, k38, ca38, ca36, decay_time, production_ratios=None, arar_constants=None\n):\n if production_ratios is None:\n production_ratios = {}\n\n if arar_constants is None:\n arar_constants = ArArConstants()\n\n pr = production_ratios\n\n m = pr.get(\"Cl3638\", 0) * nominal_value(arar_constants.lambda_Cl36) * decay_time\n atm3836 = nominal_value(arar_constants.atm3836)\n atm36 = (a36 - ca36 - m * (a38 - k38 - ca38)) / (1 - m * atm3836)\n atm38 = atm3836 * atm36\n cl38 = a38 - atm38 - k38 - ca38\n cl36 = cl38 * m\n\n return atm36, atm38, cl36, cl38",
"def get_hfs_rates(w,orient_vecs,sphere_raduis):\n u_vec, f_vec, s_vec = orient_vecs\n fly_pos = sphere_raduis*u_vec\n vel_vec = numpy.cross(w,fly_pos)\n head_rate = numpy.dot(u_vec,w)\n forw_rate = numpy.dot(vel_vec, f_vec)\n side_rate = numpy.dot(vel_vec, s_vec)\n return head_rate, forw_rate, side_rate",
"def GM_interpolateSounding(self, model, weName, levels, timeRange,\n modelInventory):\n \n prevTR, nextTR = self.GM_getPrevNextModelTimes(modelInventory, \n timeRange)\n if prevTR is None or nextTR is None:\n return None\n\n prevGHCube, prevCube = self.makeNumericSounding(model, weName, levels,\n prevTR, noDataError=0)\n nextGHCube, nextCube = self.makeNumericSounding(model, weName, levels,\n nextTR, noDataError=0)\n # calculate weights for a time-weighted average\n t1 = timeRange.startTime().unixTime() - prevTR.startTime().unixTime()\n t2 = nextTR.startTime().unixTime() - timeRange.startTime().unixTime()\n prevWt = float(t2) / float(t1 + t2)\n nextWt = float(t1) / float(t1 + t2)\n \n interpGHCube = (prevGHCube * prevWt) + (nextGHCube * nextWt)\n \n # If this is a cube of scalars\n if re.search(\"(?i)wind\", weName) is None:\n interpCube = (prevCube * prevWt) + (nextCube * nextWt)\n else:\n\n # Break up the wind into u and v components\n (prevU, prevV) = self.MagDirToUV(prevCube[0], prevCube[1])\n (nextU, nextV) = self.MagDirToUV(nextCube[0], nextCube[1])\n\n # Interpolate the wind components \n interpU = (prevU * prevWt) + (nextU * nextWt)\n interpV = (prevV * prevWt) + (nextV * nextWt)\n \n # Now compute the final wind magnitude and direction \n interpCube = self.UVToMagDir(interpU, interpV)\n \n return interpGHCube, interpCube",
"def FHFP_CJ2(gas,gas1,gas2):\n \n P1 = gas1.P\n H1 = gas1.enthalpy_mass\n r1 = gas1.density\n P2 = gas.P\n H2 = gas.enthalpy_mass\n r2 = gas.density\n \n speeds = equilSoundSpeeds(gas2)\n w2s=(speeds[0])**2\n w1s = w2s*(r2/r1)**2\n FH = H2 + 0.5*w2s - (H1 + 0.5*w1s)\n FP = P2 + r2*w2s - (P1 + r1*w1s)\n return [FH, FP, sqrt(w1s)]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates a speckle image of given shape, speckle diameter etc. and saves it to specified path as JPEG or TIFF, configured for printing. | def generate_and_save(height, width, dpi, speckle_diameter, path, size_randomness=0.5,
position_randomness=0.5, speckle_blur=1, grid_step=1.2):
ppmm = dpi / 25.4
w = int(np.round((width * ppmm)))
h = int(np.round((height * ppmm)))
D = np.ceil(speckle_diameter*ppmm)
im = speckle_image((h, w), D, size_randomness, position_randomness, speckle_blur, grid_step)
if path is None:
path = f'speckle_{width}x{height}mm_D{speckle_diameter}mm_{dpi}DPI.tiff'
# Add exif comment to image:
image_comment = f'height: {height} mm\nwidth: {width} mm\ndpi: {dpi}\nD: {speckle_diameter} mm\n'\
f'size_randomness: {size_randomness}\nposition_randomness: {position_randomness}\n'\
f'speckle_blur: {speckle_blur}\ngrid_step: {grid_step}'
save_image(path, im, dpi, comment=image_comment)
print(f'Image saved to {path}.')
return im | [
"def generate_image(self) -> None:",
"def make_image(sim, scenario_file, scenario_fn, output_path='./img.png'):\n scenario = sim.getScenario()\n img = scenario_fn(scenario)\n dpi = 100\n height, width, depth = img.shape\n figsize = width / dpi, height / dpi\n plt.figure(figsize=figsize, dpi=dpi)\n plt.axis('off')\n plt.imshow(img)\n plt.savefig(output_path, bbox_inches='tight', pad_inches=0)\n print('>', output_path)",
"def test_export_stp(self):\n\n test_shape = ExtrudeMixedShape(\n points=[\n (10, 20, \"straight\"),\n (10, 10, \"straight\"),\n (20, 10, \"circle\"),\n (22, 15, \"circle\"),\n (20, 20, \"straight\"),\n ],\n distance=10,\n )\n os.system(\"rm tests/test.stp\")\n test_shape.export_stp(\"tests/test.stp\")\n assert Path(\"tests/test.stp\").exists() is True\n os.system(\"rm tests/test.stp\")\n\n test_shape.stp_filename = \"tests/test.stp\"\n test_shape.export_stp()\n assert Path(\"tests/test.stp\").exists() is True\n os.system(\"rm tests/test.stp\")",
"def save(turtle, file_name: str):\n turtle_screen = turtle.getscreen()\n turtle_screen.getcanvas().postscript(file=f\"images/{file_name}.eps\")",
"def save_figure(folder_path, name, fig):\n if not os.path.isdir(folder_path):\n os.makedirs(folder_path)\n\n fig.savefig(folder_path + \"/\" + name)",
"def create_snfg_img(self, filepath, **kwargs):\n # set up the parameters for visualizing\n params = {\n \"width\": 100,\n \"height\": 100,\n \"stroke\": 2,\n \"line\": 3,\n }\n params.update(**kwargs)\n params[\"box\"] = min(params[\"width\"], params[\"height\"])\n\n # generate the visualization tree representation of the glycan\n tree = Tree(self).assign_coords()\n width, height = tree.get_bounds()[2:]\n\n # create and draw the image\n img = Image.new(\n mode=\"RGB\",\n size=(int((width + 1) * params[\"width\"]), int((height + 1) * params[\"height\"])),\n color=(255, 255, 255)\n )\n draw_img = ImageDraw.Draw(img)\n tree.draw_edges(draw_img, **params)\n tree.draw_nodes(draw_img, self, **params)\n\n # save the image and return is for further operations\n img.save(filepath)\n return img",
"def write_png(self, fname):\n im = self.make_image()\n _png.write_png(im, fname)",
"def create_jpg(im_ms, cloud_mask, date, satname, filepath):\n\n # rescale image intensity for display purposes\n im_RGB = rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 99.9)\n# im_NIR = rescale_image_intensity(im_ms[:,:,3], cloud_mask, 99.9)\n# im_SWIR = rescale_image_intensity(im_ms[:,:,4], cloud_mask, 99.9)\n\n # make figure (just RGB)\n fig = plt.figure()\n fig.set_size_inches([18,9])\n fig.set_tight_layout(True)\n ax1 = fig.add_subplot(111)\n ax1.axis('off')\n ax1.imshow(im_RGB)\n ax1.set_title(date + ' ' + satname, fontsize=16)\n\n# if im_RGB.shape[1] > 2*im_RGB.shape[0]:\n# ax1 = fig.add_subplot(311)\n# ax2 = fig.add_subplot(312)\n# ax3 = fig.add_subplot(313)\n# else:\n# ax1 = fig.add_subplot(131)\n# ax2 = fig.add_subplot(132)\n# ax3 = fig.add_subplot(133)\n# # RGB\n# ax1.axis('off')\n# ax1.imshow(im_RGB)\n# ax1.set_title(date + ' ' + satname, fontsize=16)\n# # NIR\n# ax2.axis('off')\n# ax2.imshow(im_NIR, cmap='seismic')\n# ax2.set_title('Near Infrared', fontsize=16)\n# # SWIR\n# ax3.axis('off')\n# ax3.imshow(im_SWIR, cmap='seismic')\n# ax3.set_title('Short-wave Infrared', fontsize=16)\n\n # save figure\n plt.rcParams['savefig.jpeg_quality'] = 100\n fig.savefig(os.path.join(filepath,\n date + '_' + satname + '.jpg'), dpi=150)\n plt.close()",
"def save_images(figs, save_path):\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n\n for fig in figs:\n filename = fig.layout.title.text.lower().replace(' ','_')\n file = save_path + '/' + filename + '.webp'\n\n fig.write_image(file)\n\n im = Image.open(file)\n im.show()",
"def save_tiff_output(data: dispim.base.Volume, path: str, name: str, b_8bit: bool = False) -> None:\n generate_output_dir(path)\n out_path = os.path.join(path, f\"{name}.tif\")\n\n save_tiff(data, out_path)",
"def tiff_to_jpeg(self, path, n_images):\n\t\timg = Image.open(path, 'r')\n\t\toutfile = path[:-4] + '.jpeg'\n\t\tprint(path)\n\t\ttry:\n\t\t\tim = Image.open(path, 'r')\n\t\t\t#print \"Generating jpeg for %s\" % name\n\t\t\tim.thumbnail(im.size)\n\t\t\tim.save(outfile, \"JPEG\", quality=100)\n\t\texcept Exception, e:\n\t\t\tprint e",
"def savefig(filename):\n plt.savefig('{}.pgf'.format(filename))\n plt.savefig('{}.pdf'.format(filename))",
"def OutputImage(self, session):\n session.handler.send_header(\"Content-type\", \"image/png\")\n session.handler.end_headers()\n self.canvas.save(file=session.handler.wfile, format='png')",
"def dumpTiling(tiling, filename, fov=None, path = None):\n\ttiling.setTileDimension([fov,fov])\n\tdt=displayTiling(tiling, dim=2048, fov=fov)\n\tpath = (path if path else defaultPath)\n\twrite(dt.getImage(),filename, path)",
"def generate_html_page_for_png_view(self, filepath):\n html_content = '<html> \\\n <body> \\\n <img border=\"0\" src=\"{}\" alt=\"name\" width=\"{}\" height=\"{}\" /> \\\n </body> \\\n </html>'\n\n path = os.path.normpath(filepath.split(\".\")[0] + \".html\")\n width, height = self.get_image_size(filepath)\n with open(path, \"w+\") as f:\n f.write(html_content.format(filepath, width, height))\n return path",
"def write_pfd(self, fname=None):\n infilename = os.path.join(this_file_dir(), \"steam_turbine_template.svg\")\n with open(infilename, \"r\") as f:\n s = svg_tag(svg=f, tag_group=self.tags_steam_streams, outfile=fname)\n if fname is None:\n return s",
"def make_tiles(self, x_size, y_size, x_step, y_step, output_path, verbose=True):\n\n fig, ax = self.make_figure()\n x = self.doc.header['$EXTMIN'][0]\n y = self.doc.header['$EXTMIN'][1]\n\n # Slide until the bottom edge of the window is above the top of\n # the elements in the doc\n while y < self.doc.header['$EXTMAX'][1]:\n\n # Get window into document\n xlim = (x, x + x_size)\n ylim = (y, y + y_size)\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n\n # to check if image is empty\n # import cv2\n # im = cv2.imread('2.jpg')\n # if im is None:\n # Print(\"Image is empty\")\n\n # to get percentage of empty space in image\n # from PIL import Image\n # image = Image.open(\"pepper.png\")\n # bg = image.getpixel((0,0))\n # width, height = image.size\n # bg_count = next(n for n,c in image.getcolors(width*height) if c==bg)\n # img_count = width*height - bg_count\n # img_percent = img_count*100.0/width/height\n\n filename = \"%s_x_%s_%s_y_%s_%s.png\" % (\"tile_\", xlim[0], xlim[1], ylim[0], ylim[1])\n if verbose:\n print('Writing: %s' % filename)\n fig.savefig(os.path.join(output_path, filename), dpi=self.dpi)\n\n # Step\n x += x_step\n if x > self.doc.header['$EXTMAX'][0]:\n x = self.doc.header['$EXTMIN'][0]\n y += y_step",
"def specklize_image(image, spekle_size, bruit=None, verbose=0):\n # n linear dimension of the n x n array to be used\n # k number of samples per speckle\n # the paraneter k must be used when defining the bruit, since is entangled with that, \n # and so the bruit must be already setted as the bruit passed through the pupil\n # if (image.shape != bruit.shape) or (image.shape[0] != image.shape[1]):\n # print('image and noise must have the same size')\n # return 0\n # radius of the lens pupil function in pixels\n # smaller the pupil, bigger the speckle grain\n # n = bruit.shape[0]\n # r0 = float(n)/k\n\n # k = spekle_size\n\n # make a new bruit if needed\n if np.shape(bruit) == ():\n if verbose:\n print('creating a new pupil noise')\n # bruit = np.random.rand(np.shape(image)[0], np.shape(image)[1])\n bruit = make_bruit_like(image)\n elif np.shape(bruit) != np.shape(image):\n eprint('shapes of image and bruit does not match: building a new bruit matrix')\n # bruit = np.random.rand(np.shape(image)[0], np.shape(image)[1])\n bruit = make_bruit_like(image)\n\n # scale the pupil of the added bruit depending to the wanted speckle size\n rpupil = np.int16(np.min(np.shape(image))/spekle_size)\n bruit = pupil(bruit, rpupil)\n\n # \n scatteredfield = np.fft.fft2(np.sqrt(image))\n # scatteredfield = np.fft.fft2(image)\n # scatteredfield = np.fft.fftshift(np.fft.fft2(np.sqrt(image)))\n\n # calculate the field trasmitted by the lens pupil\n # randomfield = np.multiply( (bruit!=0)*np.exp(1j*2*np.pi*bruit), scatteredfield)\n\n # propagate trhough scattering\n randomfield = scatteredfield*np.exp(1j*2*np.pi*bruit)\n # propagate trhough pupil\n randomfield = pupil(randomfield, rpupil)\n # pupilfield = pupil(np.multiply(scatteredfield, randomfield),r0)\n # return back into the image field\n imagefield = np.fft.ifft2(randomfield)\n imageintensity = np.abs(imagefield)**2\n\n # # directly from goodman:\n # but here the illumination is structured\n # scatteredfield = np.multiply(\n # np.sqrt(image),\n # np.exp(1j*2*np.pi*bruit)\n # )\n # pupilfield = pupil(np.fft.fft2(scatteredfield), rpupil)\n # imagefield = np.fft.ifft2(pupilfield)\n # imageintensity = np.abs(imagefield)**2\n\n return imageintensity",
"def write_image(**kwargs):\r\n\r\n # create the image and draw objects, and set the initial vertical spacing\r\n image = Image.new(\r\n 'RGB', (int(kwargs['image_width']), int(kwargs['image_height'])),\r\n kwargs['image_color']\r\n )\r\n draw = ImageDraw.Draw(image)\r\n pixels_from_top = 0\r\n\r\n # if there is a logo, paste it on to the base image\r\n if 'logo_image_path' in kwargs:\r\n image, pixels_from_top = write_image_paste(\r\n image=image, pixels_from_top=pixels_from_top, mode='logo', **kwargs\r\n )\r\n\r\n # write each possible text type, write it to the image if it is available\r\n for mode in ['text_label', 'text_hint']:\r\n if mode in kwargs:\r\n pixels_from_top = write_image_text(\r\n draw=draw, pixels_from_top=pixels_from_top, mode=mode, **kwargs\r\n )\r\n\r\n # if there is a nested image, paste it on to the base image\r\n if 'nest_image_path' in kwargs:\r\n image, pixels_from_top = write_image_paste(\r\n image=image, pixels_from_top=pixels_from_top, mode='nest', **kwargs\r\n )\r\n\r\n # write the file to an 'out' subdirectory as png file\r\n item_filename_ext = os.path.join(\r\n kwargs['file_path'], 'out', '{0}.png'.format(kwargs['file_name']))\r\n image.save(item_filename_ext, 'PNG', dpi=[300, 300])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates a pattern of lines and saves it to specified path as JPEG or TIFF, configured for printing. | def generate_lines(height, width, dpi, line_width, path, orientation='vertical', N_lines=None):
ppmm = dpi / 25.4
w = int(np.round((width * ppmm)))
h = int(np.round((height * ppmm)))
if N_lines is not None:
if orientation == 'vertical':
line_width = width // (2*N_lines)
else:
line_width = height // (2*N_lines)
D = int(np.round(line_width * ppmm))
im = np.full((h, w), 255, dtype=np.uint8)
if orientation == 'vertical':
black_id = np.hstack( [np.arange(i*D, i*D+D) for i in range(0, w//D, 2)] )
if black_id[-1] + D < w:
black_id = np.hstack([black_id, np.arange(w//D*D, w)])
im[:, black_id] = 0
else:
black_id = np.hstack( [np.arange(i*D, i*D+D) for i in range(0, h//D, 2)] )
if black_id[-1] + D < h:
black_id = np.hstack([black_id, np.arange(h//D*D, h)])
im[black_id] = 0
image_comment = f'{orientation} lines\nline width: {line_width}\n DPI: {dpi}'
save_image(path, im, dpi, comment=image_comment)
print(f'Image saved to {path}.')
return im | [
"def write_line_segmentation(file,seg_):\n seg = iulib.intarray()\n seg.copy(seg_)\n ocropus.make_line_segmentation_white(seg)\n iulib.write_image_packed(file,seg)",
"def _save_line_image(image: np.ndarray, line_pts: np.ndarray, save_prefix: os.path):\n save_image = copy.deepcopy(image)\n for (x1, y1, x2, y2) in line_pts:\n save_image = cv2.line(save_image, (x1, y1),\n (x2, y2), (255, 0, 0), thickness=2)\n save_image_file = save_prefix + '.jpg'\n cv2.imwrite(save_image_file, save_image)",
"def export_pattern(points, width, height, filename):\n\n tri = Delaunay(points)\n num_points = points.shape[0]\n num_tris = tri.simplices.shape[0]\n\n with open(filename, \"w\") as f:\n f.write(\"%d %d\\n\"%(width, height))\n f.write(\"%d %d\\n\"%(num_points, num_tris))\n for i in range(num_points):\n f.write(\"%f %f\\n\"%(points[i,0], points[i,1]))\n for i in range(num_tris):\n f.write(\"%d %d %d\\n\"%(tri.simplices[i,0], tri.simplices[i,1], tri.simplices[i,2]))",
"def graph(self, kind, lines):\n\n code = \"\\n\".join(lines)\n name = self.crc64(code)\n\n assert(kind in self.formatters)\n filepath = \"%s%s.png\" % (self.ditaa.config[\"WRITE_IMGS_DIR\"], name)\n if not os.path.exists(filepath):\n tmp = tempfile.NamedTemporaryFile()\n tmp.write(code)\n tmp.flush()\n cmd = \"%s %s %s\" % (\n os.path.join(self.ditaa.config[\"BINARY_PATH\"], kind),\n self.ditaa.config[\"ARGUMENTS\"], tmp.name)\n p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, close_fds=True)\n p.wait()\n shutil.copyfile(tmp.name + \".png\", filepath)\n\n output_path = \"%s%s.png\" % (self.ditaa.config[\"BASE_IMG_LINK_DIR\"],\n name)\n return \"\" % (name, output_path)",
"def generate_and_save(height, width, dpi, speckle_diameter, path, size_randomness=0.5, \n position_randomness=0.5, speckle_blur=1, grid_step=1.2):\n ppmm = dpi / 25.4\n w = int(np.round((width * ppmm)))\n h = int(np.round((height * ppmm)))\n D = np.ceil(speckle_diameter*ppmm)\n\n im = speckle_image((h, w), D, size_randomness, position_randomness, speckle_blur, grid_step)\n\n if path is None:\n path = f'speckle_{width}x{height}mm_D{speckle_diameter}mm_{dpi}DPI.tiff'\n\n # Add exif comment to image:\n image_comment = f'height: {height} mm\\nwidth: {width} mm\\ndpi: {dpi}\\nD: {speckle_diameter} mm\\n'\\\n f'size_randomness: {size_randomness}\\nposition_randomness: {position_randomness}\\n'\\\n f'speckle_blur: {speckle_blur}\\ngrid_step: {grid_step}'\n \n save_image(path, im, dpi, comment=image_comment)\n print(f'Image saved to {path}.')\n return im",
"def processImage(self):\n\t\n\t\tfor _line in self._lst:\n\t\t\tslpid = _line;\n\t\t\tself.fillFormat(slpid);\n\t\t\t_fileRead = self._readDir + slpid + \".\" + self._fext;\n\t\t\t_fileWrite = self._writeDir + slpid + self._sfx + \".\" + self._fext;\n\t\t\t\n\t\t\t\"\"\" Create an image object passing\n\t\t\t\t_fileRead, _fileWrite, self._bgImg; self._authStr, self._illusStr\n\t\t\t\"\"\"\n\t\t\tGI_generateImg(_fileRead, _fileWrite, self._bgImg, self._authStr, self._illusStr);",
"def generate_report(path):\n\n file_list = [file_name for file_name in os.listdir(path) if file_name.startswith(NAME_START)]\n file_list.sort()\n results = []\n for file_name in file_list:\n inside = open(file_name).readlines()\n values = re.findall(\"\\d+\\.?\\d*%\", inside[-2])\n caption = file_name[7:].replace(SPACE_CHAR, \" \")\n results.append(LINE_PATTERN.format(caption, values[0], values[1], values[2]))\n return \"\\n\".join(results)",
"def make_tiles(self, x_size, y_size, x_step, y_step, output_path, verbose=True):\n\n fig, ax = self.make_figure()\n x = self.doc.header['$EXTMIN'][0]\n y = self.doc.header['$EXTMIN'][1]\n\n # Slide until the bottom edge of the window is above the top of\n # the elements in the doc\n while y < self.doc.header['$EXTMAX'][1]:\n\n # Get window into document\n xlim = (x, x + x_size)\n ylim = (y, y + y_size)\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n\n # to check if image is empty\n # import cv2\n # im = cv2.imread('2.jpg')\n # if im is None:\n # Print(\"Image is empty\")\n\n # to get percentage of empty space in image\n # from PIL import Image\n # image = Image.open(\"pepper.png\")\n # bg = image.getpixel((0,0))\n # width, height = image.size\n # bg_count = next(n for n,c in image.getcolors(width*height) if c==bg)\n # img_count = width*height - bg_count\n # img_percent = img_count*100.0/width/height\n\n filename = \"%s_x_%s_%s_y_%s_%s.png\" % (\"tile_\", xlim[0], xlim[1], ylim[0], ylim[1])\n if verbose:\n print('Writing: %s' % filename)\n fig.savefig(os.path.join(output_path, filename), dpi=self.dpi)\n\n # Step\n x += x_step\n if x > self.doc.header['$EXTMAX'][0]:\n x = self.doc.header['$EXTMIN'][0]\n y += y_step",
"def generateOutput(self):\n if not hasattr(self, 'xcms'):\n self.getCenterOfMass()\n\n fh = open(self.settings['output'], 'w')\n rg = open(self.settings['output'].split('.')[0]+'.reg', 'w')\n fh.write('#X coordinate in pixels [starts from 1]\\n')\n fh.write('#Y coordinate in pixels [starts from 1]\\n')\n rg.write('#File written on {0:>s}\\n'.format(datetime.datetime.isoformat(datetime.datetime.now())))\n for x, y in zip(self.xcms, self.ycms):\n fh.write('%10.3f %10.3f\\n' % (x + 1, y + 1))\n rg.write('circle({0:.3f},{1:.3f},5)\\n'.format(x + 1, y + 1))\n fh.close()\n rg.close()",
"def annotateImages(path = None):\n\tif not path:\n\t\tpath = defaultPath\n\tf = open(\"%s/images.txt\" % path)\n\ttry:\n\t\thdr = f.readline()\n\t\titems = hdr.strip(\"\\n\").split(\"\\t\")\n\t\tfirst=True\n\t\tfor tmp in f:\n\t\t\tdata = tmp.strip(\"\\n\").split(\"\\t\")\n\t\t\tm = {}\n\t\t\tfor i in range(0, len(items)):\n\t\t\t\tm[items[i].upper()] = data[i]\n\t\t\timg = []\n\t\t\tfor i in range(0, 4):\n\t\t\t\tif m[\"TYPE\"]==\"SI\":\n\t\t\t\t\tfpt=jio.File(path, m[\"NAME\"])\n\t\t\t\t\ti0 = jio.File(fpt, \"Image[0][[%d]].png\" % (i, ))\n\t\t\t\t\toutFile = jio.File(fpt, \"Image[0][[%d]][SC].png\" % (i, ))\n\t\t\t\telse:\n\t\t\t\t\ti0 = jio.File(path, \"%s[%d].png\" % (m[\"NAME\"], i))\n\t\t\t\t\toutFile = jio.File(path, \"%s[%d][SC].png\" % (m[\"NAME\"], i))\n\t\t\t\tif i0.isFile() and (not outFile.isFile()):\n\t\t\t\t\tbi = iio.ImageIO.read(i0)\n\t\t\t\t\tfov = float(m[\"FOV\"])\n\t\t\t\t\tsc = epq.StageCoordinate()\n\t\t\t\t\tsc.set(X_AXIS, float(m[\"X\"]))\n\t\t\t\t\tsc.set(Y_AXIS, float(m[\"Y\"]))\n\t\t\t\t\tsc.set(Z_AXIS, float(m[\"Z\"]))\n\t\t\t\t\tif first:\n\t\t\t\t\t\tprint \"Filename\\tField-of-View\"\n\t\t\t\t\t\tfirst=False\n\t\t\t\t\tprint \"%s\\t%3.1f um\" % (i0.name, 1000.0*fov )\n\t\t\t\t\tsi = ept.ScaledImage(bi, fov * 0.001, fov * 0.001, 0.0, sc, str(i))\n\t\t\t\t\tsi.applyMicronBar()\n\t\t\t\t\tiio.ImageIO.write(si, \"png\", outFile)\n\tfinally:\n\t\tf.close()",
"def lines_to_gcode(paths, target_width = 180., rate=250.0,\n min_travel = 0.25,\n power=400., outfile = 'plot.gcode'):\n bnds = paths_bounds(paths)\n width = abs(bnds[2]-bnds[0])\n scale_factor = target_width/width\n # Reflect the x-coordinate first\n def map_x(x):\n return -1.*x*scale_factor\n def map_y(y):\n return (y-bnds[1])*scale_factor\n # Write the boundary size to screen.\n print(\"Plot Size (cm): {:f}x{:f}\".format(abs(map_x(bnds[2])-map_x(bnds[0])), abs(map_y(bnds[3])-map_y(bnds[1]))))\n with open(\"bounds_\"+outfile,'w') as f:\n for line in preamble:\n f.write(line)\n f.write(\"G1 X{:0.3f} Y{:0.3f} S0 F{:0.3f}\\n\".format(map_x(bnds[0]),\n map_y(bnds[1]),rate))\n f.write(\"G1 X{:0.3f} Y{:0.3f} S{:0.3f} F{:0.3f}\\n\".format(map_x(bnds[2]),\n map_y(bnds[1]),\n power, rate))\n f.write(\"G1 X{:0.3f} Y{:0.3f} S{:0.3f} F{:0.3f}\\n\".format(map_x(bnds[2]),\n map_y(bnds[3]),\n power, rate))\n f.write(\"G1 X{:0.3f} Y{:0.3f} S{:0.3f} F{:0.3f}\\n\".format(map_x(bnds[0]),\n map_y(bnds[3]),\n power, rate))\n f.write(\"G1 X{:0.3f} Y{:0.3f} S{:0.3f} F{:0.3f}\\n\".format(map_x(bnds[0]),\n map_y(bnds[1]),\n power, rate))\n f.write(\"G1 X{:0.3f} Y{:0.3f} S0 F{:0.3f}\\n\".format(0,0,rate))\n with open(outfile,'w') as f:\n for line in preamble:\n f.write(line)\n for path in paths:\n if len(path) < 2:\n continue\n X,Y = path[0]\n xm = map_x(X)\n ym = map_y(Y)\n # print(\"Plot Starts At \",xm,ym)\n f.write(\"G1 X{:0.3f} Y{:0.3f} S0 F{:0.3f}\\n\".format(map_x(X), map_y(Y), rate))\n for i,p in enumerate(path[1:]):\n xm = map_x(X)\n ym = map_y(Y)\n p0m = map_x(p[0])\n p1m = map_y(p[1])\n if np.sqrt((xm-p0m)*(xm-p0m) + (ym-p1m)*(ym-p1m))<min_travel:\n continue\n X,Y = p\n f.write(\"G1 X{:0.3f} Y{:0.3f} S{:0.3f} F{:0.3f}\\n\".format(map_x(X),\n map_y(Y), power, rate))\n f.write(\"G1 X{:d} Y{:d} S0 F{:0.3f}\\n\".format(0,0,rate))\n return",
"def write_images():\n dataset = NTU_RGB_D(DATASETS_PATH, filetype='pt', preprocess=False)\n dataset.save_images(DATASETS_PATH + 'raw/all/')",
"def write_png(self, fname):\n im = self.make_image()\n _png.write_png(im, fname)",
"def saveMap(filename, paths, images, faces, years, places):\n f = open(filename, 'w+')\n nodes = list(set(cbook.flatten(paths)))\n pathInd = {} #easier form to work with here\n for i in range(len(paths)):\n for j in paths[i]:\n if j in pathInd.keys():\n pathInd[j].append(i+1)\n else:\n pathInd[j] = [i+1]\n strs = []\n\n # Write nodes\n f.write('{ \"nodes\": [\\n')\n for node in nodes:\n imgPath = 'images/' + str(node) + '.png'\n #misc.imsave(websitePath + imgPath, images[node]) #XXX suspect don't need this anymore\n s = '{\"id\": ' + str(node) + ', \"line\": ' + str(pathInd[node])\n s += ', \"faces\": [' + ','.join([str(x) for x in np.nonzero(faces[node])[0]]) + ']'\n p = np.nonzero(places[node])[0]\n s += ', \"time\": ' + str(years[node]) + ', \"place\": ' + str(p[0] if len(p) > 0 else -1)\n s += '}'\n strs.append(s)\n f.write(',\\n'.join(strs) + '],\\n\"links\": [\\n')\n strs = []\n\n # Write links\n for i in range(len(paths)):\n p = paths[i]\n for j in range(0, len(p)-1):\n strs.append('{\"source\": ' + str(nodes.index(p[j])) + ', \"target\": ' + str(nodes.index(p[j+1])) + ', \"line\": ' + str(i+1) + '}')\n f.write(',\\n'.join(strs) + ']}')\n f.close()",
"def make_sketches(path_in, path_sketch):\r\n \r\n os.chdir(path_in)\r\n file_list = glob.glob(\"*\")\r\n file_list.sort()\r\n# os.chdir(path_sketch)\r\n\r\n for file in file_list:\r\n a = Image.open(file)\r\n a = a.filter(ImageFilter.GaussianBlur(radius = 2))\r\n a = a.filter(ImageFilter.GaussianBlur(radius = 4))\r\n a = a.filter(ImageFilter.GaussianBlur(radius = 10))\r\n a = ImageOps.posterize(a, 4)\r\n b = np.asarray(a).copy() # Returns a view, not the array!! Need a copy to assign and play with it.\r\n b[b[:, :, 1] < 100] = 0\r\n im = Image.fromarray(np.uint8(b))\r\n os.chdir(path_sketch)\r\n im.save(file)\r\n os.chdir(path_in)\r\n# print(b.shape)\r\n \r\n \r\n \r\n return None",
"def dumpTiling(tiling, filename, fov=None, path = None):\n\ttiling.setTileDimension([fov,fov])\n\tdt=displayTiling(tiling, dim=2048, fov=fov)\n\tpath = (path if path else defaultPath)\n\twrite(dt.getImage(),filename, path)",
"def process_file(input_filename, lines):\n\n\t# TODO: this function should be made into two functions. One tha processes\n\t# the file and generates all of the data structures and one that calls all\n\t# of the backend specific functions that outputs the code.\n\n\t# open the output files\n#\tif g.OUTPUT == \"C\" or g.OUTPUT == \"omp\" or g.OUTPUT == \"afl\" or g.OUTPUT == \"ocr\":\n#\t\tg.header_file_name = \"pil.h\"\n#\telif g.OUTPUT == \"swarm\":\n#\t\tg.header_file_name = \"pil.swh\"\n#\telse:\n#\t\terror(\"Unknown OUTPUT backend: \" + g.OUTPUT)\n\tg.header_file_name = \"pil.h\"\n\n\tg.h_file = open(g.header_file_name, \"w\")\n\tg.h_file.write(\"#ifndef PIL_H\\n\")\n\tg.h_file.write(\"#define PIL_H\\n\")\n\tg.h_file.write(\"\\n\")\n\n\tg.h_file.write(\"#include <stdint.h>\\n\")\n\tg.h_file.write(\"\\n\")\n\n\tg.h_file.write(\"#ifdef PIL2OCR\\n\")\n\tg.h_file.write(\"#include \\\"ocr.h\\\"\\n\")\n\tg.h_file.write(\"typedef ocrGuid_t guid_t;\\n\")\n\tg.h_file.write(\"#else\\n\")\n\tg.h_file.write(\"#define NULL_GUID NULL\\n\")\n\tg.h_file.write(\"typedef void* guid_t;\\n\")\n\tg.h_file.write(\"#endif // PIL2OCR\\n\")\n\tg.h_file.write(\"\\n\")\n\n\tg.h_file.write(\"typedef struct {\\n\")\n\tg.h_file.write(\"\\tguid_t guid;\\n\")\n\tg.h_file.write(\"\\tvoid *ptr;\\n\")\n\tg.h_file.write(\"} gpp_t;\\n\")\n\tg.h_file.write(\"\\n\")\n\n#\tg.h_file.write(\"struct _pil_communication_buffers {\\n\")\n#\tg.h_file.write(\"\\tvoid *ptr;\\n\")\n#\tg.h_file.write(\"\\tint volatile full;\\n\")\n#\tg.h_file.write(\"\\tsize_t size;\\n\")\n#\tg.h_file.write(\"};\\n\")\n#\tg.h_file.write(\"struct _pil_communication_buffers **_pil_send_buf;\\n\")\n#\tg.h_file.write(\"\\n\")\n\n\t# data structure to store nodes we encounter in so that we can process them\n\t# all together later\n\tnodes = []\n\n\t# 1) print the header\n\tif g.OUTPUT == \"C\" or g.OUTPUT == \"omp\":\n\t\tpil2c.process_header()\n\telif g.OUTPUT == \"swarm\":\n\t\tpil2swarm.process_header(input_filename)\n\telif g.OUTPUT == \"afl\":\n\t\tpil2afl.process_header()\n\telif g.OUTPUT == \"ocr\":\n\t\tpil2ocr.process_header()\n\telse:\n\t\terror(\"Unknown OUTPUT backend: \" + g.OUTPUT)\n\n\t# 2) process the file\n\tlineno = -1\n\twhile (lineno < len(lines)-1):\n\n\t\tlineno += 1\n\t\tl = lines[lineno]\n\n\t\t#line = re.split('\\s+', l)\n\t\tl = strip(l)\n\n\t\t# the line is empty\n\t\t#e = re.match('\\B', l)\n\t\t#if e:\n\t\tif l == '':\n\t\t\tprint l\n\t\t\tcontinue\n\n\t\t# the line is a comment\n\t\tc = re.match('#(.*)', l)\n\t\t# c.group(1) - the comment text\n\t\tif c:\n\t\t\td = re.match('#ifdef(.*)', l)\n\t\t\tif d:\n\t\t\t\tprint l\n\t\t\t\tcontinue\n\t\t\td = re.match('#ifndef(.*)', l)\n\t\t\tif d:\n\t\t\t\tprint l\n\t\t\t\tcontinue\n\t\t\td = re.match('#endif(.*)', l)\n\t\t\tif d:\n\t\t\t\tprint l\n\t\t\t\tcontinue\n\t\t\td = re.match('#else(.*)', l)\n\t\t\tif d:\n\t\t\t\tprint l\n\t\t\t\tcontinue\n\t\t\td = re.match('#include(.*)', l)\n\t\t\tif d:\n\t\t\t\tprint l\n\t\t\t\tcontinue\n\t\t\td = re.match('#undef(.*)', l)\n\t\t\tif d:\n\t\t\t\tprint l\n\t\t\t\tcontinue\n\t\t\td = re.match('#define(.*)', l)\n\t\t\tif d:\n\t\t\t\tprint l\n\t\t\telse:\n\t\t\t\tprint \"//\" + c.group(1)\n\t\t\tcontinue\n\n\t\tc = re.match('//(.*)', l)\n\t\t# c.group(1) - the comment text\n\t\tif c:\n\t\t\tprint \"//\" + c.group(1)\n\t\t\tcontinue\n\n\t\t# the line is a C style block comment on a single line\n\t\t# TODO: still don't account for multi-line block comments\n\t\tc = re.match('/\\*(.*)\\*/', l)\n\t\t# c.group(1) - the comment text\n\t\tif c:\n\t\t\tprint \"/*\" + c.group(1) + \"*/\"\n\t\t\tcontinue\n\n\t\t# TODO: make a function to handle this\n\t\t# the line is a variable\n\t\tv = re.match('(\\s*(\\w+)\\s*([*&]*)\\s*(\\w+)\\s*(=\\s*(NULL)\\s*){0,1});', l) # NULL initialization\n\t\t#v = re.match('(\\s*(\\w+)\\s*([*&]*)\\s*(\\w+)\\s*);', l)\n\t\t# v.group(1) - the whole statement\n\t\t# v.group(2) - the variable type\n\t\t# v.group(3) - the variable modifier\n\t\t# v.group(4) - the variable name\n\t\t# v.group(5) - the assignment\n\t\t# v.group(6) - the variable initialization\n\t\tva = re.match('(\\s*(\\w+)\\s*([*&]*)\\s*(\\(\\*(\\w+)\\))\\[(\\w+)\\]s*);', l)\n\t\t# va.group(1) - the whole statement\n\t\t# va.group(2) - the variable type\n\t\t# va.group(3) - the variable modifier\n\t\t# va.group(4) - the variable name as (*name)\n\t\t# va.group(5) - the variable name\n\t\t# va.group(6) - the variable size\n\n\t\tvas1 = re.match('(\\s*(\\w+)\\s*([*&]*)\\s*(\\w+)\\[(\\w+)\\]s*);', l)\n\t\t# va.group(1) - the whole statement\n\t\t# va.group(2) - the variable type\n\t\t# va.group(3) - the variable modifier\n\t\t# va.group(4) - the variable name\n\t\t# va.group(5) - the variable size\n\n\t\tvas2 = re.match('(\\s*(\\w+)\\s*([*&]*)\\s*(\\w+)\\[(\\w+)\\]\\[(\\w+)\\]s*);', l)\n\t\t# va.group(1) - the whole statement\n\t\t# va.group(2) - the variable type\n\t\t# va.group(3) - the variable modifier\n\t\t# va.group(4) - the variable name\n\t\t# va.group(5) - the variable size\n\t\t# va.group(6) - the variable size\n\n\t\tif v:\n\t\t\tvar_type = v.group(2)\n\t\t\tvar_modifier = v.group(3)\n\t\t\tvar_name = v.group(4)\n\t\t\tvar_init = v.group(6)\n\t\t\tg.variables[var_name] = var_type\n\t\t\tif var_modifier:\n\t\t\t\tg.variables[var_name] += \" \" + var_modifier\n\t\t\tif var_init:\n\t\t\t\t#warning(\"Caught a NULL initialized pointer that won't be NULL initialized: '\" + var_name + \"'\")\n\t\t\t\tg.initial_values[var_name] = var_init\n\t\t\tcontinue\n\t\tif va:\n\t\t\tvar_type = va.group(2)\n\t\t\tvar_modifier = va.group(3)\n\t\t\tvar_sname = va.group(4)\n\t\t\tvar_name = va.group(5)\n\t\t\tvar_size = va.group(6)\n\t\t\tg.variables[var_name] = var_type\n\t\t\tg.arrays[var_name] = var_size\n\t\t\tif var_modifier:\n\t\t\t\tg.variables[var_name] += \" \" + var_modifier\n\t\t\tcontinue\n\t\tif vas1:\n\t\t\tvar_type = vas1.group(2)\n\t\t\tvar_modifier = vas1.group(3)\n\t\t\tvar_name = vas1.group(4)\n\t\t\tvar_sizex = vas1.group(5)\n\t\t\tdebug(4, \"VAS1 match: \" + var_name + \"\\n\")\n\t\t\tg.variables[var_name] = var_type\n\t\t\tg.arrays[var_name] = [var_sizex]\n\t\t\tif var_modifier:\n\t\t\t\tg.variables[var_name] += \" \" + var_modifier\n\t\t\tcontinue\n\t\tif vas2:\n\t\t\tvar_type = vas2.group(2)\n\t\t\tvar_modifier = vas2.group(3)\n\t\t\tvar_name = vas2.group(4)\n\t\t\tvar_sizex = vas2.group(5)\n\t\t\tvar_sizey = vas2.group(6)\n\t\t\tdebug(4, \"VAS2 match: \" + var_name + \"\\n\")\n\t\t\tg.variables[var_name] = var_type\n\t\t\tg.arrays[var_name] = [var_sizex, var_sizey]\n\t\t\tif var_modifier:\n\t\t\t\tg.variables[var_name] += \" \" + var_modifier\n\t\t\tcontinue\n\n\t\t# the line is a function declaration\n\t\tf = re.match('void\\s+\\w+\\s*\\(', l)\n\t\tif f:\n\t\t\t#debug(3, v.group(0))\n\t\t\tlineno = process_function(lines, lineno)\n\t\t\tcontinue\n\n\t\tpn = re.match('_pil_node\\s+(\\w+).*', l)\n\t\tif pn:\n\t\t\tlabel = pn.group(1);\n\t\t\tlineno = process_function(lines, lineno)\n\t\t\tcontinue\n\n\t\tpnl = re.match('_pil_nodelet\\s+(\\w+).*', l)\n\t\tif pnl:\n\t\t\tlabel = pnl.group(1);\n\t\t\tlineno = process_function(lines, lineno)\n\t\t\tcontinue\n\n\t\t# the line is a pil_send\n\t\ts = re.match(g.NW_SEND_RE, l)\n\t\tif s:\n\t\t\tg.nw_calls.append(process_nw_call(lines, lineno))\n\t\t\tcontinue\n\n\t\t# the line is a pil_send\n\t\tr = re.match(g.NW_RECV_RE, l)\n\t\tif r:\n\t\t\tg.nw_calls.append(process_nw_call(lines, lineno))\n\t\t\tcontinue\n\n\t\t# the line is a node\n\t\tm = re.match(g.MAP_RE, l)\n\t\tif m:\n\t\t\t# add the node to the nodes list for later processing\n\t\t\tnodes.append(process_node(lines, lineno))\n\t\t\tcontinue\n\n\t\t# if we have made it this far, the line is invalid\n\t\twarning(\"invalid line: \" + l)\n\n\t# 3) create the global data structure\n\tif g.OUTPUT == \"C\" or g.OUTPUT == \"omp\":\n\t\tpil2c.process_variables()\n\telif g.OUTPUT == \"swarm\":\n\t\tpil2swarm.process_variables()\n\telif g.OUTPUT == \"afl\":\n\t\tpil2afl.process_variables()\n\telif g.OUTPUT == \"ocr\":\n\t\tpil2ocr.process_variables()\n\telse:\n\t\terror(\"Unknown OUTPUT backend: \" + g.OUTPUT)\n\n\t# 4) now that the globals are available, we can output pil_main and the body functions\n\tfor prototype in g.prototypes:\n\t\tprint prototype\n\n#\tif g.OUTPUT == \"C\" or g.OUTPUT == \"omp\":\n#\t\tpil2c.print_main_func()\n#\t\tpil2c.print_funcs()\n#\telif g.OUTPUT == \"swarm\":\n#\t\tpil2swarm.print_main_func()\n#\t\tpil2swarm.print_funcs()\n#\telif g.OUTPUT == \"afl\":\n#\t\tpil2afl.print_main_func()\n#\t\tpil2afl.print_funcs()\n#\telif g.OUTPUT == \"ocr\":\n#\t\tpil2ocr.print_main_func()\n#\t\tpil2ocr.print_funcs()\n#\telse:\n#\t\terror(\"Unknown OUTPUT backend: \" + g.OUTPUT)\n\n\t# 5) process all of the nodes\n\tsplit_nodes = handle_nodes(nodes)\n\n\t# 6) output pil_enter()\n\tprocess_pil_enter(split_nodes)\n\n\t# 7) print the main function\n\tif g.OUTPUT == \"C\" or g.OUTPUT == \"omp\":\n\t\tpil2c.process_main()\n\telif g.OUTPUT == \"swarm\":\n\t\tpil2swarm.process_main()\n\telif g.OUTPUT == \"afl\":\n\t\tpil2afl.process_main()\n\telif g.OUTPUT == \"ocr\":\n\t\tpil2ocr.process_main(split_nodes)\n\telse:\n\t\terror(\"Unknown OUTPUT backend: \" + g.OUTPUT)\n\n\tg.h_file.write(\"#endif // PIL_H\\n\")\n\tg.h_file.close()",
"def output_pattern(self, pattern):\n self._clear()\n for i, line in enumerate(pattern.split(\"\\n\")):\n # for each row in the given pattern.\n for j, c in enumerate(line):\n # for each cell inside that row\n if c == \"*\":\n # If the cell is alive, set the coordinates of that cell on the display to be drawn\n self._set(j, i)\n # Draw the new configuration to the display.\n self._draw()",
"def output_pattern(self, pattern):\n self._clear()\n for i, line in enumerate(pattern.split(\"\\n\")):\n # for each row in the pattern\n for j, c in enumerate(line):\n # for each cell in that row\n if c == \"*\":\n # If the cell is alive, set that cell's coordinates, ready to be drawn.\n self._set(j, i)\n # Draws to the grid, turning all of the set cells on.\n self._draw()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Generates a checkerboard pattern and saves it to specified path as JPEG or TIFF, configured for printing. | def generate_checkerboard(height, width, dpi, path, line_width=1, N_rows=None):
ppmm = dpi / 25.4
w = int(np.round((width * ppmm)))
h = int(np.round((height * ppmm)))
if N_rows is not None:
line_width = height // (2*N_rows)
D = int(np.round(line_width * ppmm))
im = np.ones((h, w), dtype=np.uint8)
black_id = np.hstack( [np.clip(np.arange(i*D, i*D+D), 0, h-1) for i in range(0, h//D+1, 2)] )
im[black_id] = 0
# invert values in every other column
invert_id = np.hstack( [np.clip(np.arange(i*D, i*D+D), 0, w-1) for i in range(0, w//D+1, 2)] )
im[:, invert_id] = 1 - im[:, invert_id]
im = im * 255
image_comment = f'checkerboard\nline width: {line_width}\n DPI: {dpi}'
save_image(path, im, dpi, comment=image_comment)
print(f'Image saved to {path}.')
return im | [
"def generate_png_mask(tiff_path):\n color = (255, 255, 0)\n\n def convert_to_color(data):\n print(\"converting...\")\n for i in range(0, len(data)):\n for j in range(0, len(data[i])):\n if data[i][j][3] != 0:\n data[i][j][0], data[i][j][1], data[i][j][2] = color\n data[i][j][3] = 100 # Leave Alpha band\n print(\"done.\")\n return data\n\n tiff = io.imread(tiff_path)\n png = convert_to_color(tiff)\n # Save to file\n png_out_path = \"/\".join(tiff_path.split(\"/\")[:-1]) + \"/png/\"\n # if not os.path.exists(png_out_path):\n # os.makedirs(png_out_path)\n imsave(png_out_path, png, format='png')\n return png_out_path",
"def export_pattern(points, width, height, filename):\n\n tri = Delaunay(points)\n num_points = points.shape[0]\n num_tris = tri.simplices.shape[0]\n\n with open(filename, \"w\") as f:\n f.write(\"%d %d\\n\"%(width, height))\n f.write(\"%d %d\\n\"%(num_points, num_tris))\n for i in range(num_points):\n f.write(\"%f %f\\n\"%(points[i,0], points[i,1]))\n for i in range(num_tris):\n f.write(\"%d %d %d\\n\"%(tri.simplices[i,0], tri.simplices[i,1], tri.simplices[i,2]))",
"def write_labeled_frames(self, root):\n write_dir = Path(root) / f\"{self.video_name}\"\n os.makedirs(write_dir, exist_ok=True)\n for frame, i in self.frames:\n i_str = str(i).zfill(5)\n frame.save(write_dir / f\"{i_str}.png\",\"PNG\")\n return True",
"def dump_trial_as_jpg(trial_array, file_path):\n import imageio\n num_frames = trial_array.shape[2]\n for i in range(num_frames):\n try:\n imageio.imwrite(file_path + 'frame-' + str(i) + '.jpg',\n trial_array[:, :, i].transpose().astype(int))\n except ValueError:\n imageio.imwrite(file_path + 'frame-' + str(i) + '.jpg',\n trial_array[:, :, i])",
"def to_png(board, square_size=DEFAULT_SQUARE_SIZE):\n png_size = (board.size + 1)*square_size\n writer = png.Writer(png_size, png_size, greyscale=True, bitdepth=1)\n\n lines = board.scale(square_size)\n board.frame_num += 1\n frame_name = '{}.png'.format(board.frame_num)\n with open(frame_name, 'wb') as frame:\n writer.write(frame, lines)",
"def dumpTiling(tiling, filename, fov=None, path = None):\n\ttiling.setTileDimension([fov,fov])\n\tdt=displayTiling(tiling, dim=2048, fov=fov)\n\tpath = (path if path else defaultPath)\n\twrite(dt.getImage(),filename, path)",
"def write_png(self, fname):\n im = self.make_image()\n _png.write_png(im, fname)",
"def make_tiles(self, x_size, y_size, x_step, y_step, output_path, verbose=True):\n\n fig, ax = self.make_figure()\n x = self.doc.header['$EXTMIN'][0]\n y = self.doc.header['$EXTMIN'][1]\n\n # Slide until the bottom edge of the window is above the top of\n # the elements in the doc\n while y < self.doc.header['$EXTMAX'][1]:\n\n # Get window into document\n xlim = (x, x + x_size)\n ylim = (y, y + y_size)\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n\n # to check if image is empty\n # import cv2\n # im = cv2.imread('2.jpg')\n # if im is None:\n # Print(\"Image is empty\")\n\n # to get percentage of empty space in image\n # from PIL import Image\n # image = Image.open(\"pepper.png\")\n # bg = image.getpixel((0,0))\n # width, height = image.size\n # bg_count = next(n for n,c in image.getcolors(width*height) if c==bg)\n # img_count = width*height - bg_count\n # img_percent = img_count*100.0/width/height\n\n filename = \"%s_x_%s_%s_y_%s_%s.png\" % (\"tile_\", xlim[0], xlim[1], ylim[0], ylim[1])\n if verbose:\n print('Writing: %s' % filename)\n fig.savefig(os.path.join(output_path, filename), dpi=self.dpi)\n\n # Step\n x += x_step\n if x > self.doc.header['$EXTMAX'][0]:\n x = self.doc.header['$EXTMIN'][0]\n y += y_step",
"def generate_and_save(height, width, dpi, speckle_diameter, path, size_randomness=0.5, \n position_randomness=0.5, speckle_blur=1, grid_step=1.2):\n ppmm = dpi / 25.4\n w = int(np.round((width * ppmm)))\n h = int(np.round((height * ppmm)))\n D = np.ceil(speckle_diameter*ppmm)\n\n im = speckle_image((h, w), D, size_randomness, position_randomness, speckle_blur, grid_step)\n\n if path is None:\n path = f'speckle_{width}x{height}mm_D{speckle_diameter}mm_{dpi}DPI.tiff'\n\n # Add exif comment to image:\n image_comment = f'height: {height} mm\\nwidth: {width} mm\\ndpi: {dpi}\\nD: {speckle_diameter} mm\\n'\\\n f'size_randomness: {size_randomness}\\nposition_randomness: {position_randomness}\\n'\\\n f'speckle_blur: {speckle_blur}\\ngrid_step: {grid_step}'\n \n save_image(path, im, dpi, comment=image_comment)\n print(f'Image saved to {path}.')\n return im",
"def generate_image(self) -> None:",
"def save_pattern(self):\n _dir = os.path.join(PATTERN_DIR, self.get_name()+'.pattern')\n save(self.pattern, _dir)",
"def graymatrix2png(img_matrix, path):\n data = img_matrix.getRowSpace()\n image2file(data, path)",
"def to_png(dfa: DFA, filename: str, **kwargs):\n\n tmp_file = filename + \".tmp\"\n with open(tmp_file, \"w\") as file:\n file.write(to_dot(dfa, **kwargs))\n\n call((\"dot -Tpng \" + tmp_file + \" -o \" + filename).split(\" \"))\n call((\"rm \" + tmp_file).split(\" \"))",
"def save_chessboard_image(self, frame, corners) -> str:\n # draw chessboard on image\n chessboard_image = cv2.drawChessboardCorners(frame, CHESSBOARD_SIZE, corners, True)\n\n # save file\n file_name = str(int(time())) + '.jpg'\n IMAGE_PATH.mkdir(exist_ok=True)\n cv2.imwrite(str(IMAGE_PATH / file_name), chessboard_image)\n\n return file_name",
"def make_sketches(path_in, path_sketch):\r\n \r\n os.chdir(path_in)\r\n file_list = glob.glob(\"*\")\r\n file_list.sort()\r\n# os.chdir(path_sketch)\r\n\r\n for file in file_list:\r\n a = Image.open(file)\r\n a = a.filter(ImageFilter.GaussianBlur(radius = 2))\r\n a = a.filter(ImageFilter.GaussianBlur(radius = 4))\r\n a = a.filter(ImageFilter.GaussianBlur(radius = 10))\r\n a = ImageOps.posterize(a, 4)\r\n b = np.asarray(a).copy() # Returns a view, not the array!! Need a copy to assign and play with it.\r\n b[b[:, :, 1] < 100] = 0\r\n im = Image.fromarray(np.uint8(b))\r\n os.chdir(path_sketch)\r\n im.save(file)\r\n os.chdir(path_in)\r\n# print(b.shape)\r\n \r\n \r\n \r\n return None",
"def _generate_frame(drive_date, drive_number, frame):\n input_path = paths.rgb.external_frame(drive_date, drive_number, frame)\n image = skimage.io.imread(input_path)\n\n results = mrcnn.get_results(image)\n\n output_path = paths.mask.mrcnn_pickle(drive_date, drive_number, frame)\n output_path.parent.mkdir(exist_ok=True, parents=True) # ensure directory exists\n\n with open(output_path, 'wb') as handle:\n pickle.dump(results, handle, protocol=pickle.HIGHEST_PROTOCOL)",
"def OutputImage(self, session):\n session.handler.send_header(\"Content-type\", \"image/png\")\n session.handler.end_headers()\n self.canvas.save(file=session.handler.wfile, format='png')",
"def save_image(self,path):\n image = PIL.Image.new(\"RGB\",(self.width,self.height))\n image.putdata([piedit.colors.hex_to_rgb(p) for p in self.pixels])\n image.save(path, \"PNG\")\n self.message_handler.handle_message(\"FILE_SAVED\")\n self.set_current_file(path)\n self.set_changes_made(False)\n self.set_window_title(os.path.basename(path))",
"def make_image(sim, scenario_file, scenario_fn, output_path='./img.png'):\n scenario = sim.getScenario()\n img = scenario_fn(scenario)\n dpi = 100\n height, width, depth = img.shape\n figsize = width / dpi, height / dpi\n plt.figure(figsize=figsize, dpi=dpi)\n plt.axis('off')\n plt.imshow(img)\n plt.savefig(output_path, bbox_inches='tight', pad_inches=0)\n print('>', output_path)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the first matching line in a list of lines. See match() | def matching_line(lines, keyword):
for line in lines:
matching=match(line,keyword)
if matching!=None:
return matching
return None | [
"def find_first_line_matching(lines, text, start=0):\r\n for i in range(start, len(lines)):\r\n line = lines[i].strip()\r\n if line == text:\r\n return i\r\n return -1",
"def get_line(self, line_name):\n for line in self.line_list:\n if line.name == line_name:\n return line\n return None",
"def first_regex_match(l, pat):\n\n if isinstance(pat, str):\n pat = re.compile(pat)\n matches = [i for i in l if pat.search(i)]\n return matches[0] if matches else None",
"def find_first_line_containing(lines, text, start=0):\r\n for i in range(start, len(lines)):\r\n line = lines[i].strip()\r\n if text in line:\r\n return i\r\n return -1",
"def find(self, line):\r\n for anEntry in self.db.get_ncf_entries():\r\n if anEntry.isMatch(line):\r\n return anEntry\r\n \r\n return None",
"def match(self, line, pattern):\n m = pattern.match(line)\n return m.groups()[0].strip() if m else None",
"def _get_line(self, regex):\n return self._match(regex).group(1)",
"def search_re_lines(self, regexp):\n rec = re.compile(regexp, re.IGNORECASE)\n for l in self.lines:\n rem = rec.match(l)\n if rem:\n return rem.group(1)\n else:\n return ''",
"def first_substr_match(l, substr):\n\n matches = [i for i in l if substr in i]\n return matches[0] if matches else None",
"def _match(self, regex):\n cregex = re.compile(regex)\n for line in self.content.splitlines():\n match = cregex.match(line)\n if match:\n return match\n raise Exception('No \"{0}\" line in {1}.cpp'.format(\n regex_to_error_msg(regex),\n self.name\n ))",
"def _get_line_number(file_lines, pattern):\n return next(i for i, line in enumerate(file_lines) if pattern in line) + 1",
"def find_line(file_path, search_str):\n found = None\n stream_read = open(file_path, 'r')\n line = stream_read.readline()\n # Loop until EOF\n while line != '':\n # Search for string in line\n index = re.findall(search_str, line)\n if index > 0:\n return line\n # Read next line\n line = stream_read.readline()\n # Close the files\n stream_read.close()\n return found",
"def first_match(predicate, list):\n for item in list:\n val = predicate(item)\n if val is not None:\n return val\n \n return None",
"def line_search(self, regex, lineno=None):\n return regex.search(self.line_text(lineno))",
"def return_match(self, line, regexp):\n parser = re.compile(regexp)\n match = parser.search(line)\n return match",
"def match_line(self, pattern, text) -> str:\n self.assertRegex(text, pattern)\n (ret,) = [line for line in text.split('\\n') if re.match(pattern, line)]\n return ret",
"def _FindFileLine(outbuffer, line, fname, regex):\n match = regex.findall(outbuffer.GetLine(line))\n ifile = None\n if len(match):\n ifile = match[0][0]\n try:\n line = max(int(match[0][1]) - 1, 0)\n except (IndexError, TypeError):\n line = 0\n\n # If not an absolute path then the error is relative to the\n # script that produced this error message.\n if ifile is not None and not os.path.isabs(ifile):\n dname = os.path.split(fname)[0]\n ifile = os.path.join(dname, ifile)\n\n return (ifile, line)",
"def __find_first_error_line(log_lines: typing.List[str]) -> typing.Optional[int]:\n\n index = None\n for index in range(len(log_lines) - 1, -1, -1):\n if (\n ('ERROR' in log_lines[index])\n or\n ('Collected exception:' in log_lines[index])\n ):\n break\n else:\n # we looked all the way back but never found an error line\n return None\n return index",
"def pattern_finder(line: str) -> Optional[str]:\n for pattern, regex in zip(['ARGUMENT', 'NO_ARGUMENT', 'ZERO_1', 'ZERO_2'], regex_list):\n if re.search(regex, line):\n return pattern"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
If the first part of line (modulo blanks) matches keyword, returns the end of that line. Otherwise returns None | def match(line,keyword):
line=line.lstrip()
length=len(keyword)
if line[:length] == keyword:
return line[length:]
else:
return None | [
"def matching_line(lines, keyword):\r\n for line in lines:\r\n matching=match(line,keyword)\r\n if matching!=None:\r\n return matching\r\n return None",
"def find_line(content: str, keyword: str) -> str:\n for line in content.splitlines():\n if keyword.lower() in line.lower():\n return line\n return \"\"",
"def match(self, line, pattern):\n m = pattern.match(line)\n return m.groups()[0].strip() if m else None",
"def _next_nonempty_line(self):\n line = \"\"\n while not line:\n line = self._next_line()\n return line",
"def findAndReturnRestOfLine(sHaystack, sNeedle):\n if sHaystack is None:\n return None;\n off = sHaystack.find(sNeedle);\n if off < 0:\n return None;\n off += len(sNeedle)\n offEol = sHaystack.find('\\n', off);\n if offEol < 0:\n offEol = len(sHaystack);\n return sHaystack[off:offEol]",
"def readline(self) -> Optional[str]:\n # N-Triples lines end in either CRLF, CR, or LF\n # Therefore, we can't just use f.readline()\n if not self.buffer:\n # type error: Item \"None\" of \"Union[TextIO, StreamReader, None]\" has no attribute \"read\"\n buffer = self.file.read(bufsiz) # type: ignore[union-attr]\n if not buffer:\n return None\n self.buffer = buffer\n\n while True:\n m = r_line.match(self.buffer)\n if m: # the more likely prospect\n self.buffer = self.buffer[m.end() :]\n return m.group(1)\n else:\n # type error: Item \"None\" of \"Union[TextIO, StreamReader, None]\" has no attribute \"read\"\n buffer = self.file.read(bufsiz) # type: ignore[union-attr]\n if not buffer and not self.buffer.isspace():\n # Last line does not need to be terminated with a newline\n buffer += \"\\n\"\n elif not buffer:\n return None\n self.buffer += buffer",
"def test_ends_at(line):\n return TEST_END_RE.match(line)",
"def testcase_ends_at(line):\n return TESTCASE_END_RE.match(line)",
"def _is_empty_line(self, line):\r\n return re.match('\\s*$', line) is not None",
"def find_word(f, w):\n while (True):\n line = f.readline()\n if line == \"\":\n print(\"Error: end of file reached in find_word\")\n sys.exit()\n fields = line.split()\n if (len(fields) > 0 and fields[1] == w):\n break\n return line",
"def find_last_line_matching(lines, text, end):\r\n for i in range(end, 0, -1):\r\n line = lines[i].strip()\r\n if line == text:\r\n return i\r\n return -1",
"def find_last_line_containing(lines, text, end):\r\n for i in range(end, 0, -1):\r\n line = lines[i].strip()\r\n if text in line:\r\n return i\r\n return -1",
"def get_next_whole_line():\n line = \"\"\n\n # Skip blank lines and comment lines\n while True:\n line = f.readline()\n if line == \"\":\n return None\n\n line = line.strip()\n\n # Remove comments\n if \"#\" in line:\n comment_start = line.index(\"#\")\n line = line[:comment_start]\n\n if line == \"\":\n continue\n\n break\n\n # Fully build the line, ignoring comments\n while \"#\" not in line[:-1] and line[-1] == \"\\\\\":\n line = line[:-1] + \" \" + f.readline().strip()\n\n # Remove comments\n if \"#\" in line:\n comment_start = line.index(\"#\")\n line = line[:comment_start]\n\n return line",
"def isEnd(self, line):\r\n return self.startsWithAttribute(line)",
"def does_end_token_exist(self) -> bool:",
"def next_line_start_or_here(text, pos):\n\tif pos == 0 or (pos-1 < len(text) and text[pos-1] == \"\\n\"):\n\t\treturn pos\n\treturn next_line_start(text, pos)",
"def cut_reserved_word(self, line):\r\n word = self.read_identifier(line)\r\n if word in RESERVED_WORDS:\r\n return word, line[len(word):]\r\n else:\r\n return None",
"def find(self, line):\r\n for anEntry in self.db.get_ncf_entries():\r\n if anEntry.isMatch(line):\r\n return anEntry\r\n \r\n return None",
"def fpm_parse(keyword):\n if keyword not in ('FPMA', 'FPMB'):\n return False\n else:\n return keyword[-1]",
"def is_end_marker(line):\n assert False, \"Unimplemented!\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Applies the rules to the bunch of text describing a cell and returns the corresponding dictionary | def parse_cell(cell):
parsed_cell={}
for key in rules:
rule=rules[key]
parsed_cell.update({key:rule(cell)})
return parsed_cell | [
"def extract_dictionary(df):\n word_dict = {}\n \n # TODO: Implement this function\n index=0\n for text in df[\"text\"]:\n for p in string.punctuation:\n text=text.replace(p,\" \")\n text=text.lower()\n spl=text.split()\n for word in spl:\n if word not in word_dict:\n word_dict[word]=index\n index=index+1\n return word_dict",
"def rules(terms_dataframe, text_dataframe):\n new_terms = []\n for terms in terms_dataframe['lemma']:\n # Get the same structure of terms as in text dataframe\n tmp = ' '.join(terms.split('-'))\n new_terms.append(tmp.split(' '))\n for i, token in enumerate(text_dataframe['lemma']):\n for j, t in enumerate(new_terms):\n # Case 1: term of size 3 seperated by dashes (ex: text-to-speech) and followed by 1, 2 Nouns or 1 Adj and 1 Noun is a term \n if len(t) == 3 and len(text_dataframe['lemma']) >= i + 5:\n if token == t[0] and text_dataframe['lemma'][i + 1] == '-' and (\n text_dataframe['lemma'][i + 2] == 'to' or text_dataframe['lemma'][i + 2] == 'of' or\n text_dataframe['lemma'][i + 2] == 'by' or text_dataframe['pattern'][i + 2] == 'N') and \\\n text_dataframe['lemma'][i + 3] == '-' and text_dataframe['lemma'][i + 4] == t[2]:\n # followed by 2 nouns (ex: text-to-speech modal synthesis)\n if (text_dataframe['pattern'][i + 5] == 'N' or text_dataframe['pattern'][i + 4] == 'A') and \\\n text_dataframe['pattern'][i + 6] == 'N':\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 6] = text_dataframe['tokens'][i + 6] + ']'\n elif text_dataframe['pattern'][i + 5] == 'N' or text_dataframe['pattern'][i + 5] == 'A':\n # followed by 1 noun (ex: text-to-speech system)\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 5] = text_dataframe['tokens'][i + 5] + ']'\n else:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 4] = text_dataframe['tokens'][i + 4] + ']'\n # Case 2: term of size 2 separated by dashes (ex: encoder-decoder) and followed by 0,1,2 or 3 nouns is a term\n if len(t) >= 2 and len(text_dataframe['lemma']) >= i + 3 and i != 0:\n if token == 'front' and text_dataframe['lemma'][i + 1] == '-' and text_dataframe['lemma'][\n i + 2] == 'end':\n if text_dataframe['pattern'][i - 1] == 'N':\n text_dataframe['tokens'][i - 1] = '[' + text_dataframe['tokens'][i - 1]\n text_dataframe['tokens'][i + 2] = text_dataframe['tokens'][i + 2] + ']'\n if token == t[0] and text_dataframe['lemma'][i + 1] == '-' and text_dataframe['lemma'][i + 2] == t[1]:\n # followed by 3 nouns (ex: HMM-based generation synthesis approach)\n if text_dataframe['pattern'][i + 3] == 'N' and text_dataframe['pattern'][i + 4] == 'N' and \\\n text_dataframe['pattern'][i + 5] == 'N':\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 5] = text_dataframe['tokens'][i + 5] + ']'\n # followed by 2 nouns (ex: HMM-based generation synthesis)\n elif (text_dataframe['pattern'][i + 3] == 'N' or text_dataframe['pattern'][i + 3] == 'A' or\n text_dataframe['pattern'][i + 3] == 'V') and text_dataframe['pattern'][i + 4] == 'N':\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 4] = text_dataframe['tokens'][i + 4] + ']'\n # followed by 1 noun (ex: cross-lingual adaptation)\n elif text_dataframe['pattern'][i + 3] == 'N':\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 3] = text_dataframe['tokens'][i + 3] + ']'\n # followed by nothing (ex: mel-spectrogram)\n else:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 2] = text_dataframe['tokens'][i + 2] + ']'\n if (\n token == 'data' or token == 'voice' or token == 'datum' or token == 'speaker' or token == 'dataset' or token == 'database' or token == 'feature' or token == 'corpus') and i != 0 and len(\n text_dataframe['lemma']) >= i + 1:\n if text_dataframe['pattern'][i - 1] == 'N' or text_dataframe['pattern'][i - 1] == 'A':\n text_dataframe['tokens'][i - 1] = '[' + text_dataframe['tokens'][i - 1]\n text_dataframe['tokens'][i] = text_dataframe['tokens'][i] + ']'\n elif text_dataframe['pattern'][i + 1] == 'N':\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 1] = text_dataframe['tokens'][i + 1] + ']'\n if i != 0:\n if text_dataframe['lemma'][i - 1] in rule_adj and '[' in text_dataframe['tokens'][i]:\n text_dataframe['tokens'][i - 1] = '[' + text_dataframe['tokens'][i - 1] + ']'\n elif i >= 3 and text_dataframe['lemma'][i - 1] in rule_adj and text_dataframe['lemma'][\n i - 3] == 'non' and '[' in text_dataframe['tokens'][i]:\n text_dataframe['tokens'][i - 3] = '[' + text_dataframe['tokens'][i - 3]\n text_dataframe['tokens'][i - 3] = text_dataframe['tokens'][i - 1] + ']'",
"def parse(self, text):\n rules = {}\n tokens = self.__parse_into_tokens(text)\n rules_unrefined = self.__get_rules_with_lazy_body(tokens)\n # make the first pass adding rules to dictionary\n self.__merge_duplicates(rules, rules_unrefined)\n return rules",
"def scores_with_pattern(self, text):\n # TODO: \":D\" is not matched\n words = self.find_all(text)\n scores = [self._dict[word] for word in words]\n return scores",
"def process_string(text):\n\n frequency = get_char_frequency(text)\n transition_matrix = get_transition_probability_matrix(text)\n chars = get_unique_chars(text)\n char_count = len(chars)\n\n frequency_list = [frequency[x] for x in frequency] # puts frequencies from a dict to a list\n H_X = 0 # H(X)\n for f in frequency_list:\n if(f>0):\n H_X += - f * np.log2(f)\n\n H_XbarY = 0 # H(X|Y)\n for i in range(char_count): # X\n for j in range(char_count): # Y\n P_XbarY = transition_matrix[j][i]\n if (P_XbarY != 0):\n H_XbarY += -frequency[chars[j]] * P_XbarY * np.log2(P_XbarY)\n # else: 0 * np.log2(0) = 0 (definition)\n\n H_XY = 0 # H(X,Y)\n for i in range(char_count): # X\n for j in range(char_count): # Y\n P_XY = frequency[chars[j]] * transition_matrix[j][i]\n if P_XY != 0:\n H_XY += - P_XY * np.log2(P_XY)\n # else: 0 * np.log2(0) = 0 (definition)\n\n I_XY = H_X - H_XbarY # I(X,Y)\n\n return { \"H_X\": H_X, \"H_XbarY\": H_XbarY, \"H_XY\": H_XY, \"I_XY\": I_XY, \"symbol_count\": char_count }",
"def transcripts_to_dict(df, topic_list):\n ted_dict = {}\n for topic in topic_list:\n # filter DataFrame to specific series and convert it to a list\n filter_string = 'is_' + str(topic)\n text_list = df.loc[(df[filter_string] == 1), 'transcript'].to_list()\n\n # call combine_transcripts function to return combined text\n combined_text = combine_transcripts(text_list)\n\n # add combined text to dict\n ted_dict[topic] = combined_text\n return ted_dict",
"def _strD1CellRules(self):\n\n cellRule = {'conds': {'cellModel': 'StrD1', 'cellType': 'StrD1'},\n 'secs': {}}\n\n cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}}\n cellRule['secs']['soma']['geom'] = {'diam': 5.642, 'L': 5.642, 'Ra': 1,\n 'nseg': 1}\n\n cellRule['secs']['soma']['mechs']['Str'] = {\n 'gmbar': (2.6e-3 - self.pd * 1.1e-3)}\n\n cellRule['secs']['soma']['vinit'] = random.gauss(-63.8, 5)\n cellRule['secs']['soma']['threshold'] = -10\n\n self.netParams.cellParams['StrD1'] = cellRule",
"def _rsCellRules(self):\n\n cellRule = {'conds': {'cellModel': 'CTX_RS', 'cellType': 'CTX_RS'},\n 'secs': {}} # cell rule dict\n\n cellRule['secs']['soma'] = {'geom': {}, 'pointps': {}}\n cellRule['secs']['soma']['geom'] = {'diam': 5.642, 'L': 5.642, 'Ra': 1,\n 'nseg': 1, 'cm': 1}\n\n cellRule['secs']['soma']['pointps']['Izhi'] = {'mod': 'Izhi2003b',\n 'a': 0.02, 'b': 0.2, 'c': -65, 'd': 8, 'f': 5, 'g': 140, 'thresh': 30} \n\n cellRule['secs']['soma']['vinit'] = -65\n cellRule['secs']['soma']['threshold'] = 30\n \n # add dict to list of cell params\n self.netParams.cellParams['CTX_RS'] = cellRule",
"def load_text(basepath: str, exclude_indices=False, exclude_keys=False) -> Mapping[int, Mapping[str, str]]:\n results = {}\n for ext_lang, lang in lang_map.items():\n data = load_schema(gmd.Gmd, f\"{basepath}_{ext_lang}.gmd\")\n for idx, value_obj in enumerate(data.items):\n if idx not in results and not exclude_indices:\n results[idx] = {}\n if value_obj.key not in results and not exclude_keys:\n results[value_obj.key] = {}\n\n value = value_obj.value\n\n # For german, treat - as a linebreak join if between lowercase characters\n if lang == 'de':\n value = re.sub(r\"(\\p{Ll})-( )*\\r?\\n( )*(\\p{Ll})\", r\"\\1\\4\", value)\n\n value = re.sub(r\"-()*\\r?\\n( )*\", \"-\", value)\n value = re.sub(r\"( )*\\r?\\n( )*\", \" \", value)\n value = re.sub(r\"( )?<ICON ALPHA>\", \" α\", value)\n value = re.sub(r\"( )?<ICON BETA>\", \" β\", value)\n value = re.sub(r\"( )?<ICON GAMMA>\", \" γ\", value)\n value = (value\n .replace(\"<STYL MOJI_YELLOW_DEFAULT>[1]</STYL>\", \"[1]\")\n .replace(\"<STYL MOJI_YELLOW_DEFAULT>[2]</STYL>\", \"[2]\")\n .replace(\"<STYL MOJI_YELLOW_DEFAULT>[3]</STYL>\", \"[3]\")\n .replace(\"<STYL MOJI_YELLOW_DEFAULT>\", \"\")\n .replace(\"<STYL MOJI_LIGHTBLUE_DEFAULT>\", \"\")\n .replace(\"</STYL>\", \"\")).strip()\n\n if not exclude_indices: results[idx][lang] = value\n if not exclude_keys: results[value_obj.key][lang] = value\n\n return results",
"def get_dict(dict_file_name):\n theme_words = pd.read_excel(dict_file_name)\n theme_word_dict = {}\n\n for column_name in list(theme_words.columns):\n result = []\n word_list = list(theme_words[column_name])\n for word in word_list:\n if word is np.nan:\n continue\n else:\n word_one_list = word.lower().strip().split(\"/\")\n result.extend(word_one_list)\n word_others = []\n for word_one_one in word_one_list:\n if word_one_one.find(\"#\"):\n word_others.append(word_one_one.replace(\"#\", ' '))\n word_others.append(word_one_one.replace(\"#\", ' and '))\n result.extend(word_others)\n\n theme_word_dict[column_name] = result\n\n # TODO: Drug Dictionary(从上面抄的,改了一下,所以不要觉得奇怪)\n drugs_dict = {}\n column_name = '药物(Chemicals and Drugs)'\n word_list = list(theme_words[column_name])\n for word in word_list:\n if word is np.nan:\n continue\n else:\n word_one_list = word.lower().strip().split(\"/\")\n drug_head = word_one_list[0]\n for drug_tail in word_one_list:\n drugs_dict[drug_tail] = drug_head\n word_others = []\n for word_one_one in word_one_list:\n if word_one_one.find(\"#\"):\n word_others.append(word_one_one.replace(\"#\", ' '))\n word_others.append(word_one_one.replace(\"#\", ' and '))\n for drug_tail in word_others:\n drugs_dict[drug_tail] = drug_head\n\n return theme_word_dict, drugs_dict",
"def gen_color_dict(self) -> collections.defaultdict:\n\n color_dict = collections.defaultdict(list)\n for line in self.data:\n left, right = line.split(\" contain \")\n container = clean_text(left)\n\n for contents in right.split(\",\"):\n\n if not self.include_counts:\n contained = clean_text(contents[2:])\n if self.inverted:\n color_dict[contained].append(container)\n else:\n color_dict[container].append(contained)\n\n else:\n contained = clean_text(contents)\n if self.inverted:\n color_dict[contained].append(container)\n else:\n color_dict[container].append(contained)\n\n return color_dict",
"def annotate(terms_dataframe, text_dataframe):\n rules(terms_dataframe, text_dataframe) # apply rules\n for i, token in enumerate(text_dataframe['lemma']):\n for term in terms_dataframe['lemma']:\n term = term.split(' ')\n # Case 1: if terms of length 4, we check if each word from text corresponds to each word in the term\n if len(term) == 4:\n term_1 = term[0]\n if token == term_1 and len(text_dataframe['lemma']) >= i + 4:\n if text_dataframe['lemma'][i + 1] == term[1] and text_dataframe['lemma'][i + 2] == term[2] and \\\n text_dataframe['lemma'][i + 3] == term[3]:\n if text_dataframe['lemma'][i + 4] in rule_4:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 4] = text_dataframe['tokens'][i + 4] + ']'\n else:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 3] = text_dataframe['tokens'][i + 3] + ']'\n # Case 2: terms of length 3\n elif len(term) == 3:\n term_1 = term[0]\n if token == term_1 and len(text_dataframe['lemma']) > i + 3:\n if text_dataframe['lemma'][i + 1] == term[1] and text_dataframe['lemma'][i + 2] == term[2]:\n if text_dataframe['lemma'][i + 3] in rule_4:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 3] = text_dataframe['tokens'][i + 3] + ']'\n else:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 2] = text_dataframe['tokens'][i + 2] + ']'\n # Case 3: terms of length 2\n elif len(term) == 2:\n if token == term[0] and len(text_dataframe['lemma']) > i + 2:\n if text_dataframe['lemma'][i + 1] == term[1]:\n if text_dataframe['lemma'][i + 2] in rule_4:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 2] = text_dataframe['tokens'][i + 2] + ']'\n else:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 1] = text_dataframe['tokens'][i + 1] + ']'\n # Case 4: term of length 1\n elif token == term[0] and i > 1 and text_dataframe['lemma'][i - 1] == 'of' and text_dataframe['lemma'][\n i - 2] == 'sequence':\n text_dataframe['tokens'][i - 2] = '[' + text_dataframe['tokens'][i - 2]\n text_dataframe['tokens'][i] = text_dataframe['tokens'][i] + ']'\n elif token == term[0] and len(term) == 1 and len(text_dataframe['lemma']) >= i + 2 and \\\n text_dataframe['lemma'][i + 1] == ')':\n if text_dataframe['lemma'][i + 2] in rule_4:\n text_dataframe['tokens'][i - 1] = '[' + text_dataframe['tokens'][i - 1]\n text_dataframe['tokens'][i + 2] = text_dataframe['tokens'][i + 2] + ']'\n else:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i] + ']'\n elif token == term[0] and len(term) == 1 and len(text_dataframe['lemma']) >= i + 1:\n if text_dataframe['lemma'][i + 1] in rule_4:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 1] = text_dataframe['tokens'][i + 1] + ']'\n else:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i] + ']'\n if i != 0:\n if text_dataframe['lemma'][i - 1] in rule_adj and '[' in text_dataframe['tokens'][i]:\n text_dataframe['tokens'][i - 1] = '[' + text_dataframe['tokens'][i - 1] + ']'\n elif i >= 3 and text_dataframe['lemma'][i - 1] in rule_adj and text_dataframe['lemma'][\n i - 3] == 'non' and '[' in text_dataframe['tokens'][i]:\n text_dataframe['tokens'][i - 3] = '[' + text_dataframe['tokens'][i - 3]\n text_dataframe['tokens'][i - 3] = text_dataframe['tokens'][i - 1] + ']'\n return text_dataframe",
"def theorize_text(s, classifier, data, dict_result = True):\n\n\tpredictions = classifier.decision_function([s]) #we want to know probabilities! this returns a list of lists of values\n\tguess_values = defaultdict()\n\t\n\t#populate dictionary with decisiion function per author\n\tfor index1, prediction in enumerate(predictions): #loop through predictions (f there are multiple )\n\t\tfor index2, value in enumerate(prediction): #loop through each guess and the probability\n\t\t\tguess_values[data.target_names[index2]] = value #save prediction to dictionary, getting name of author corresponding to index in prediction \n\tif dict_result == True:\n\t\treturn guess_values #return dictionary of guesses for the given string\n\telse:\n\t\toutput = \"\"\n\t\tfor author, value in guess_values.items():\n\t\t\toutput += author + \": \" + str(value)+\"\\n\\n\"\n\treturn output",
"def _strD2CellRules(self):\n\n cellRule = {'conds': {'cellModel': 'StrD2', 'cellType': 'StrD2'},\n 'secs': {}}\n\n cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}}\n cellRule['secs']['soma']['geom'] = {'diam': 5.642, 'L': 5.642, 'Ra': 1,\n 'nseg': 1}\n\n cellRule['secs']['soma']['mechs']['Str'] = {\n 'gmbar': (2.6e-3 - self.pd * 1.1e-3)}\n\n cellRule['secs']['soma']['vinit'] = random.gauss(-63.8, 5)\n cellRule['secs']['soma']['threshold'] = -10\n\n self.netParams.cellParams['StrD2'] = cellRule",
"def parse_rule( rule ):\n key, _sep, tail = rule.partition(' bags contain ')\n colors = RE_FIND_COUNTS.findall( tail )\n return key, dict(map(lambda tup: (tup[1],int(tup[0])), colors))",
"def construct_cell_map(self):\n\n # Below tuple is indicating (name of key in dictionary, row, column, section name in excel sheet, row header,\n # column header). Only the first three keys will be used. Rest are for reference\n row_col_tuple_list = [(\"cycles\",\n {'2019': (204, 4), '2013|2016': (200, 4)},\n 'Megacycle calculations',\n ('Active Mailbox CPU Requirements (Mcycles) / Pri-DC Server', 'Double Failure')),\n\n (\"num_servers_per_dag\",\n {'2019': (227, 2), '2013|2016': (228, 2)},\n 'Environment Configuration',\n ('Total Number of Servers / DAG', '/ Primary Datacentre')),\n\n (\"num_dag\",\n {'2019': (97, 2), '2013|2016': (95, 2)},\n 'Server Calculations',\n ('Total DAGs in the Environment', 'Value')),\n\n (\"read_percentage\",\n {'2019': (285, 2), '2013|2016': (284, 2)},\n 'Host IO and Throughput Requirements',\n ('Database Read I/O Percentage', '/ Database')),\n\n (\"iops_server_DB\",\n {'2019': (283, 3), '2013|2016': (282, 3)},\n 'Host IO and Throughput Requirements',\n ('Total Database Required IOPS', '/ Server')),\n\n (\"iops_required_Log\",\n {'2019': (284, 3), '2013|2016': (283, 3)},\n 'Host IO and Throughput Requirements',\n ('Total Log Required IOPS', '/ Server')),\n\n (\"maintenance_throughput\",\n {'2019': (286, 3), '2013|2016': (285, 3)},\n 'Host IO and Throughput Requirements',\n ('Background Database Maintenance Throughput Requirements', '/ Server')),\n\n (\"ram_per_server\",\n {'2019': (143, 2), '2013|2016': (141, 2)},\n 'Memory Calculations (Primary Datacenter)',\n ('Calculated Amount of Server RAM (Total)', 'Value')),\n\n (\"min_GC_cores\",\n {'2019': (220, 2), '2013|2016': (216, 2)},\n 'Processor Core Ratio Requirements',\n ('Recommended Minimum Number of Global Catalog Cores', '/ Primary Datacentre')),\n\n (\"transport_DB_space\",\n {'2019': (273, 3), '2013|2016': (274, 3)},\n 'Disk Space Requirements',\n ('Transport Database Space Required', '/ Server')),\n\n (\"DB_space\",\n {'2019': (274, 3), '2013|2016': (275, 3)},\n 'Disk Space Requirements',\n ('Database Space Required', '/ Server')),\n\n (\"log_space\",\n {'2019': (275, 3), '2013|2016': (276, 3)},\n 'Disk Space Requirements',\n ('Log Space Required', '/ Server'))\n ]\n\n if self.year == '2019':\n\n # This is spec rate for N #cores required by system\n row_col_tuple_list.append((\"spec_2017\",\n {'2019': (136, 2)},\n 'Processor Configuration',\n ('Mailbox Servers', 'Server SPECint2017 Rate Value')))\n\n map_data_dict = dict()\n for item in row_col_tuple_list:\n\n map_data_dict[item[0]] = \\\n {\"position\": {\n \"row\": item[1][self.year][0],\n \"col\": item[1][self.year][1]\n },\n \"data_type\": float,\n \"data\": None}\n\n return map_data_dict",
"def _formatGeneric(self, key, value, logic, cell):\n value = re.split(r'(?si)\\s*\"\\s*', value.strip())\n for i in range(0, len(value), 2):\n # every other one is in \" \"\n value[i] = re.sub(r\"\\s+\", \" and \", value[i].strip())\n # join phrases with and\n value = \" and \".join(value)\n # now too many and's\n value = re.sub(r\"(and|\\s)+or(and|\\s)+\", \" or \", value)\n value = re.sub(r\"(and|\\s)+not(and|\\s)+\", \" not \", value)\n value = re.sub(r\"(and|\\s)+and(and|\\s)+\", \" and \", value)\n value = re.sub(r\"^\\s*(and|or)\\s*|\\s*(and|or|not)\\s*$\", \"\", value)\n sc = str(cell)\n return [(\"pg\"+sc, key), (\"co\"+sc, logic), (\"s\"+sc, value)]",
"def _thCellRules(self):\n\n cellRule = {'conds': {'cellModel': 'TH', 'cellType': 'Thal'}, 'secs': {}}\n cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}}\n cellRule['secs']['soma']['geom'] = {'diam': 5.642, 'L': 5.642, 'Ra': 1,\n 'nseg': 1}\n\n cellRule['secs']['soma']['mechs']['thalamus'] = {}\n cellRule['secs']['soma']['vinit'] = random.gauss(-62, 5)\n cellRule['secs']['soma']['threshold'] = -10\n\n self.netParams.cellParams['TH'] = cellRule",
"def parse_map(map_text):\n width = height = None\n\n # Used to count the rows in the map\n row = 0\n\n # Used to map treasure data to the proper treasure\n current_treasure_id = 0\n\n score = None\n num_players = None\n\n params = {}\n\n # A list of chars representing the letters that are translated into pirates from the map file. It is assigned\n # after the number of players is known, to know how many letters should it know to parse into pirates.\n pirate_keys_list = None\n treasures_data = dict()\n # This dictionary uses the player's id as key to a value which is a list of all the locations of that player's\n # pirates\n player_pirate_locations = defaultdict(list)\n treasures = []\n powerups = []\n scripts = []\n anti_scripts = []\n\n for line in map_text.split('\\n'):\n line = line.strip()\n\n # ignore blank lines and comments\n if not line or line[0] == '#':\n continue\n\n row_key, row_data = line.split(' ', 1)\n row_key = row_key.lower()\n if row_key == 'cols':\n width = int(row_data)\n\n elif row_key == 'rows':\n height = int(row_data)\n\n elif row_key == 'players':\n num_players = int(row_data)\n if num_players < 2 or num_players > 10:\n raise Exception(\"map\", \"player count must be between 2 and 10\")\n\n elif row_key == 'score':\n score = list(map(int, row_data.split()))\n\n elif row_key == 'treasure':\n treasure_params = row_data.split()\n treasure_id = int(treasure_params[0])\n treasure_value = int(treasure_params[1])\n treasures_data[treasure_id] = treasure_value\n\n elif row_key == 'powerup':\n powerup = row_data.split()\n powerup = [powerup[0]] + map(int, powerup[1:])\n powerups.append(powerup)\n\n elif row_key == 'script':\n script = row_data.split()\n script = map(int, script)\n scripts.append(script)\n\n elif row_key == 'anti_script':\n anti_script = row_data.split()\n anti_script = map(int, anti_script)\n anti_scripts.append(anti_script)\n\n elif row_key == 'm':\n # Initiate the pirate_keys_list\n if pirate_keys_list is None:\n if num_players is None:\n raise Exception(\"map\",\n \"players count expected before map lines\")\n # pirates of team 'a'/'b'/'c'...\n pirate_keys_list = [chr(ord('a') + i) for i in range(num_players)]\n\n # row is too short - map must be a full rectangle!\n if len(row_data) != width:\n raise Exception(\"map\",\n \"Incorrect number of cols in row %s. \"\n \"Got %s, expected %s.\"\n % (row, len(row_data), width))\n # parse the row\n for col, char in enumerate(row_data):\n # A pirate\n if char in pirate_keys_list:\n player_pirate_locations[pirate_keys_list.index(char)].append(Location(row, col))\n\n # A treasure\n elif char == MAP_OBJECTS[TREASURE]:\n treasure_value = 1\n if current_treasure_id in treasures_data.keys():\n treasure_value = treasures_data[current_treasure_id]\n treasures.append((current_treasure_id, Location(row, col), treasure_value))\n current_treasure_id += 1\n\n # unknown object\n elif char != MAP_OBJECTS[LAND]:\n raise Exception(\"map\", \"Invalid character in map: %s\" % char)\n\n row += 1\n\n else:\n # default collect all other parameters\n params[row_key] = row_data\n\n if score and len(score) != num_players:\n raise Exception(\"map\",\n \"Incorrect score count. Expected %s, got %s\"\n % (num_players, len(score)))\n if height != row:\n raise Exception(\"map\",\n \"Incorrect number of rows. Expected %s, got %s\"\n % (height, row))\n return {\n 'size': (height, width),\n 'num_players': num_players,\n 'treasures': treasures,\n 'powerups': powerups,\n 'scripts': scripts,\n 'anti_scripts': anti_scripts,\n 'pirate_locations': player_pirate_locations,\n 'params': params\n }"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
uses a mask to get cropping indices, so that images can be cropped to minimum rectangle covering mask | def get_crop_indices_from_mask(mask):
ys, xs = np.where(mask == 255)
return min(ys), max(ys), min(xs), max(xs) | [
"def crop_image_with_masks(image,\n masks,\n max_area=8000,\n min_area=500,\n width_height_ratio=0.9):\n cropped_images = []\n\n for mask_data in masks:\n # Extract mask and bounding box data\n bbox = mask_data['bbox']\n seg = mask_data['segmentation']\n x, y, w, h = bbox\n\n # Crop the image based on the bounding box\n cropped_image = image[y:y+h, x:x+w]\n\n # Create an 8-bit mask from the segmentation data\n mask = np.asarray(seg[y:y+h, x:x+w], dtype=np.uint8) * 255\n # Apply the mask to the cropped image\n cropped_image = cv2.bitwise_and(\n cropped_image, cropped_image, mask=mask)\n cropped_image = cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB)\n if (mask_data['area'] >= min_area and\n mask_data['area'] <= max_area and\n w/h >= width_height_ratio):\n cropped_images.append(cropped_image)\n\n return cropped_images",
"def crop(masks, boxes, padding: int = 1):\n h, w, n = masks.shape\n x1, x2 = sanitize_coordinates(boxes[:, 0], boxes[:, 2], w, padding)\n y1, y2 = sanitize_coordinates(boxes[:, 1], boxes[:, 3], h, padding)\n\n np.arange(w, dtype=x1.dtype)\n rows = np.arange(w, dtype=x1.dtype).reshape((1, -1, 1)).repeat(repeats=h, axis=0).repeat(repeats=n, axis=2)\n cols = np.arange(h, dtype=x1.dtype).reshape((-1, 1, 1)).repeat(repeats=w, axis=1).repeat(repeats=n, axis=2)\n\n masks_left = rows >= x1.reshape((1, 1, -1))\n masks_right = rows < x2.reshape((1, 1, -1))\n masks_up = cols >= y1.reshape((1, 1, -1))\n masks_down = cols < y2.reshape((1, 1, -1))\n\n crop_mask = masks_left * masks_right * masks_up * masks_down\n\n return masks * crop_mask.astype(np.float32)",
"def cropImage():",
"def autocrop_to_mask(self, all_images,mask, thr=0):\n mask = mask>thr\n rows = np.any(mask, axis=1)\n cols = np.any(mask, axis=0)\n rmin, rmax = np.where(rows)[0][[0, -1]]\n cmin, cmax = np.where(cols)[0][[0, -1]]\n for image in all_images.keys():\n all_images[image]= all_images[image][rmin:rmax,cmin:cmax]\n return all_images",
"def _image_crop(self, crop_limits):\n n, rows, cols = self.images.shape\n [(col_min,row_min),(col_max,row_max)] = crop_limits\n #xmin, xmax, ymin, ymax = crop_limits\n self.images = self.images[:, row_min : row_max, col_min : col_max]",
"def multi_crop(path_in, path_out, input_shape=(1292, 968), target_shape=(644, 644), bottom_right=False,\n random_crop=0):\n\n print('Starting multi_crop')\n # Create the folder that will hold all images:\n if os.path.exists(path_out):\n shutil.rmtree(path_out, ignore_errors=True)\n os.makedirs(path_out)\n\n # get the classes\n folders = os.listdir(path_in)\n\n # get center point\n x_c = np.int(input_shape[0] / 2.)\n\n # create dictionary to be used in cropping loop:\n # values define the cropping position\n new_imgs = {'tl': (0, 0, target_shape[0], target_shape[1]),\n 'tc': (x_c - np.int(target_shape[0] / 2.), 0,\n x_c + np.int(target_shape[0] / 2.), target_shape[1]),\n 'tr': (input_shape[0] - target_shape[0], 0,\n input_shape[0], target_shape[1]),\n 'bl': (0, input_shape[1] - target_shape[1],\n target_shape[0], input_shape[1]),\n 'bc': (x_c - np.int(target_shape[0] / 2.), input_shape[1] - target_shape[1],\n x_c + np.int(target_shape[0] / 2.), input_shape[1])}\n\n if bottom_right:\n # if user wants to keep bottom right crop, we add it to the dictionary\n new_imgs['br'] = (input_shape[0] - target_shape[0], input_shape[1] - target_shape[1],\n input_shape[0], input_shape[1])\n for i in range(0, random_crop):\n # if user wants extra randomly centered crops\n # starting point can range from 0 to size of the image - target size\n xi = np.random.randint(0, input_shape[0] - target_shape[0])\n yi = np.random.randint(0, input_shape[1] - target_shape[1])\n new_imgs['r{}'.format(i)] = (xi, yi,\n xi + target_shape[0], yi + target_shape[1])\n\n # uses the path_in and walks in folders to crop images\n for folder in folders:\n print('----{}'.format(folder))\n os.mkdir(path_out + os.sep + folder)\n lst = os.listdir(path_in + os.sep + folder)\n\n images = [item for item in lst if item.lower().endswith(('.png', '.jpg', '.jpeg', '.tif'))]\n\n for file in images:\n\n # open image\n ori = Image.open(path_in + os.sep + folder + os.sep + file)\n\n for k in new_imgs:\n new_name = '{}_{}{}'.format(os.path.splitext(file)[0], k, os.path.splitext(file)[1])\n # crop image\n cropped = ori.crop(new_imgs[k])\n # save cropped image with new resolution\n img = cropped.resize(target_shape, Image.ANTIALIAS)\n img.save(path_out + os.sep + folder + os.sep + new_name)\n print('multi_crop complete\\n')",
"def max_width(mask):\r\n # mask_img = cv2.imread(mask, cv2.IMREAD_GRAYSCALE)\r\n mask_img = mask\r\n # cv2.imwrite(\"mask_img.jpg\", mask_img)\r\n # print(\"pixel:\", mask[0, 0])\r\n ret, mask_img = cv2.threshold(mask_img, 30, 255, cv2.THRESH_BINARY)\r\n # print(\"shape\", mask_img.shape)\r\n height, width = mask_img.shape\r\n\r\n # count max width\r\n max_wid = 0\r\n for i in range(height):\r\n # initialize leftend and rightend of mask area as -1\r\n leftend = -1\r\n rightend = -1\r\n for j in range(width-1):\r\n if mask_img[i, j] > 127 and leftend == -1:\r\n leftend = j\r\n if mask_img[i, j] == 0 and mask_img[i, j-1] > 0 and j > 0:\r\n rightend = j\r\n cv2.imwrite(\"mask_img.png\", branding(mask_img, (i, j), 1))\r\n print(\"leftend:({}, {}); rightedn:({}, {})\\n\".format(i, leftend, i, rightend))\r\n break\r\n max_wid = max(max_wid, rightend-leftend)\r\n # for col in range(width):\r\n # # initialize leftend and rightend of mask area as -1\r\n # leftend = -1\r\n # rightend = -1\r\n # for row in range(height-1):\r\n # if mask_img[row, col] > 30 and leftend == -1:\r\n # leftend = row\r\n # if mask_img[row, col] == 0 and mask_img[row-1, col] > 0 and row > 0:\r\n # rightend = row\r\n # # cv2.imwrite(\"mask_img.png\", branding(mask_img, (i, j), 2))\r\n # # print(\"leftend:({}, {}); rightedn:({}, {})\\n\".format(i, leftend, i, rightend))\r\n # break\r\n # max_wid = max(max_wid, rightend-leftend)\r\n \r\n # print(\"max width: {}\".format(max_wid))\r\n return max_wid",
"def crop_mask_in_target_box(masks,\n boxes,\n target_boxes,\n output_size,\n sample_offset=0,\n use_einsum=True):\n with tf.name_scope('crop_mask_in_target_box'):\n batch_size, num_masks, height, width = masks.get_shape().as_list()\n if batch_size is None:\n batch_size = tf.shape(masks)[0]\n masks = tf.reshape(masks, [batch_size * num_masks, height, width, 1])\n # Pad zeros on the boundary of masks.\n masks = tf.image.pad_to_bounding_box(masks, 2, 2, height + 4, width + 4)\n masks = tf.reshape(masks, [batch_size, num_masks, height+4, width+4, 1])\n\n # Projects target box locations and sizes to corresponding cropped\n # mask coordinates.\n gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(\n value=boxes, num_or_size_splits=4, axis=2)\n bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(\n value=target_boxes, num_or_size_splits=4, axis=2)\n y_transform = (bb_y_min - gt_y_min) * height / (\n gt_y_max - gt_y_min + _EPSILON) + 2\n x_transform = (bb_x_min - gt_x_min) * height / (\n gt_x_max - gt_x_min + _EPSILON) + 2\n h_transform = (bb_y_max - bb_y_min) * width / (\n gt_y_max - gt_y_min + _EPSILON)\n w_transform = (bb_x_max - bb_x_min) * width / (\n gt_x_max - gt_x_min + _EPSILON)\n\n boundaries = tf.concat(\n [tf.to_float(tf.ones_like(y_transform) * ((height + 4) - 1)),\n tf.to_float(tf.ones_like(x_transform) * ((width + 4) - 1))],\n axis=-1)\n\n # Reshape tensors to have the right shape for selective_crop_and_resize.\n trasnformed_boxes = tf.concat(\n [y_transform, x_transform, h_transform, w_transform], -1)\n levels = tf.tile(tf.reshape(tf.range(num_masks), [1, num_masks]),\n [batch_size, 1])\n\n cropped_masks = selective_crop_and_resize(\n masks,\n trasnformed_boxes,\n levels,\n boundaries,\n output_size,\n sample_offset=sample_offset,\n use_einsum_gather=use_einsum)\n cropped_masks = tf.squeeze(cropped_masks, axis=-1)\n\n return cropped_masks",
"def get_crop(mask_image, scale_ratio=1.2):\n mask_image = mask_image.numpy()\n xs, ys = (mask_image[:, :, ::3].sum(2) > 0).nonzero()\n x_min = xs.min()\n x_max = xs.max()\n y_min = ys.min()\n y_max = ys.max()\n radius = max((x_max - x_min), (y_max - y_min)) // 2 * scale_ratio\n x_c = (x_max + x_min) / 2\n y_c = (y_max + y_min) / 2\n x_min = max(int((x_c - radius).item()), 0)\n y_min = max(int((y_c - radius).item()), 0)\n x_max = int((x_c + radius).item())\n y_max = int((y_c + radius).item())\n return x_min, y_min, x_max, y_max",
"def map_mask_2_img_coordinates(mask_size=6, scale=8, start=4):\n X , Y = np.meshgrid(range(mask_size), range(mask_size))\n X = X.reshape(-1)\n Y = Y.reshape(-1)\n indices = np.vstack([Y, X])\n indices = start + scale*indices\n return indices",
"def resize_mask(mask):\n mask = np.maximum(mask, 0)\n mask = cv2.resize(mask, (120, 120))\n mask = mask - np.min(mask)\n if np.max(mask) > 0:\n mask = mask / np.max(mask)\n return mask",
"def CropImageByShpMask(self):\n \n in_shp_file = os.path.join(self.base_path, 'InputFiles', 'ShapeMask', self.StudyAreaShp)\n # with rasterio.open(InputImage) as ImageObj:\n # out_image, out_transform = rasterio.mask.mask(ImageObj, gpd.GeoSeries(Polygon(CrownBuffer)), crop=True, filled=True, nodata = 0)\n\n with fiona.open(in_shp_file, 'r') as shapefile:\n ShapeMask = [feature[\"geometry\"] for feature in shapefile]\n # Crop dem\n RasterOperators.CropImage(os.path.join(self.base_path,'InputFiles',self.dem), ShapeMask, os.path.join(self.base_path,'InputFiles',self.dem))\n # Crop dsm\n RasterOperators.CropImage(os.path.join(self.base_path,'InputFiles',self.dsm), ShapeMask, os.path.join(self.base_path,'InputFiles',self.dsm))",
"def innerRect(img, rects, maskedImg, mask):\n maskedImg = cv.cvtColor(maskedImg, cv.COLOR_BGR2GRAY)\n for rect in rects:\n x1, y1, x2, y2 = makeTuple(rect)\n maskedImg[y1:y2, x1:x2] = 0\n mask[maskedImg == 0] = 0\n mask[maskedImg == 255] = 1\n cutImg = grabCut(img, None, mask)\n return cutImg",
"def crop_boxes(boxes, crop_shape):\n\n crop_x1 = crop_shape[0]\n crop_y1 = crop_shape[1]\n crop_x2 = crop_shape[2]\n crop_y2 = crop_shape[3]\n\n l0 = boxes[:, 0] >= crop_x1\n l1 = boxes[:, 1] >= crop_y1\n l2 = boxes[:, 2] <= crop_x2\n l3 = boxes[:, 3] <= crop_y2\n\n L = l0 * l1 * l2 * l3\n cropped_boxes = boxes[L, :]\n\n cropped_boxes[:, 0] = cropped_boxes[:, 0] - crop_x1\n cropped_boxes[:, 1] = cropped_boxes[:, 1] - crop_y1\n cropped_boxes[:, 2] = cropped_boxes[:, 2] - crop_x1\n cropped_boxes[:, 3] = cropped_boxes[:, 3] - crop_y1\n\n return cropped_boxes",
"def crop_images_wcs(self, ra, dec, size):\n topfile = re.sub(\".*/\", \"\", self.data_dir) # for file /a/b/c, extract c\n\n # crop_dir encodes the detector number, instrument, date\n crop_dir = f'{os.path.abspath(self.data_dir+\"/..\")}/cropped_{topfile}'\n run(f\"mkdir -p {crop_dir}\", shell=True) # make crop_dir\n \n crop_counter = 0\n for fi in self.files:\n hdr = fits.getheader(f\"{self.data_dir}/{fi}\")\n img = fits.getdata(f\"{self.data_dir}/{fi}\")\n y_size, x_size = img.shape # total image dims in pix \n w = wcs.WCS(hdr)\n \n # compute the bounds \n pix_scale = hdr[\"PIXSCAL1\"] # scale of image in arcsec per pix\n size_wcs = pix_scale*size/3600.0 # size of desired box in degrees\n pix_x1 = np.array(w.all_world2pix(ra-size_wcs/2.0, dec, 1))[0]\n pix_x2 = np.array(w.all_world2pix(ra+size_wcs/2.0, dec, 1))[0]\n pix_y1 = np.array(w.all_world2pix(ra, dec-size_wcs/2.0, 1))[1]\n pix_y2 = np.array(w.all_world2pix(ra, dec+size_wcs/2.0, 1))[1]\n x_bounds = np.array(sorted([pix_x1, pix_x2])) # sorted arrays of \n y_bounds = np.array(sorted([pix_y1, pix_y2])) # pixel boundaries\n # truncate bounds if needed\n x_bounds[x_bounds<0] = 0 \n x_bounds[x_bounds>x_size] = x_size\n y_bounds[y_bounds<0] = 0 \n y_bounds[y_bounds>y_size] = y_size\n # convert to horizontal & vertical fractions, pass to __get_crop()\n frac_hori = x_bounds/x_size\n frac_vert = y_bounds/y_size\n \n # if the crop does not contain the bounds, skip it\n # if the crop's aspect ratio is more skew than 4:1 or 1:4, skip\n # if the crop is < 50% the width/height of the desired box, skip\n if np.all(frac_hori==0) or np.all(frac_hori==1.0) or np.all(\n frac_vert==0.0) or np.all(frac_vert==1.0):\n continue \n if not(0.25 < ((frac_hori[1]-frac_hori[0])/\n (frac_vert[1]-frac_vert[0])) < 4.0):\n continue\n if not((x_bounds[1]-x_bounds[0] > size/2.0) and \n (y_bounds[1]-y_bounds[0] > size/2.0) ):\n continue\n \n crop_counter += 1\n cropped_hdu = self.__get_crop(f\"{self.data_dir}/{fi}\", \n frac_hori, frac_vert)\n new_f = fi.replace(\".fits\",\"_cropped.fits\")\n cropped_hdu.writeto(f\"{crop_dir}/{new_f}\", overwrite=True, \n output_verify=\"ignore\") # write them\n \n print(f\"{crop_counter}/{len(self.files)} images were cropped.\\n\", \n flush=True)",
"def get_mask_from_bounding_box(bounding_box_coordinates,shape):\n #unwrap bouding box coordinates\n x,y,w,h = bounding_box_coordinates\n #create blank image with corresponding shape\n blank_image = np.zeros(shape, np.uint8)\n #create corrected mask\n corrected_mask = cv2.rectangle(blank_image,(x,y),(x+w,y+h),(255,255,255),-1)\n return corrected_mask",
"def image_spotselect(CS_mask,N_min = 2):\n S_mask = (CS_mask > 0) \n \n N_spots = sum(S_mask)\n X0,Y0 = where(S_mask)\n close = zeros(N_spots)\n for i in range(N_spots):\n for j in range(N_spots):\n if (i <> j) & (close[i] == 0):\n close[i] = sqrt((X0[i]-X0[j])**2+(Y0[i]-Y0[j])**2) < 4\n S_mask[X0[where(close == 1)],Y0[where(close == 1)]] = 0\n \n S_mask &= (CS_mask >= N_min) # Select spots found in N_min+ images\n \n return S_mask",
"def grabCut(img, rect, mask):\n bgdModel = np.zeros((1,65),np.float64)\n fgdModel = np.zeros((1,65),np.float64)\n\n if rect is None:\n mask, bgdModel, fgdModel = \\\n cv.grabCut(inImg, mask, None, bgdModel, fgdModel, 5, cv.GC_INIT_WITH_MASK)\n else:\n cv.grabCut(img, mask, rect, bgdModel, fgdModel, 5, cv.GC_INIT_WITH_RECT)\n\n mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')\n maskedImg = img*mask2[:,:,np.newaxis]\n return maskedImg, mask",
"def bg_mask(self, anns, width_height, *, crowd_margin):\n anns1, anns2 = anns\n\n mask = np.ones((\n (width_height[1] - 1) // self.stride + 1,\n (width_height[0] - 1) // self.stride + 1,\n ), dtype=np.bool)\n crowd_bbox = [np.inf, np.inf, 0, 0]\n for ann in anns1 + anns2:\n if not ann['iscrowd']:\n valid_keypoints = 'keypoints' in ann and np.any(ann['keypoints'][:, 2] > 0)\n if valid_keypoints:\n continue\n\n if 'mask' not in ann:\n bb = ann['bbox'].copy()\n bb /= self.stride\n bb[2:] += bb[:2] # convert width and height to x2 and y2\n\n # left top\n left = np.clip(int(bb[0] - crowd_margin), 0, mask.shape[1] - 1)\n top = np.clip(int(bb[1] - crowd_margin), 0, mask.shape[0] - 1)\n\n # right bottom\n # ceil: to round up\n # +1: because mask upper limit is exclusive\n right = np.clip(int(np.ceil(bb[2] + crowd_margin)) + 1,\n left + 1, mask.shape[1])\n bottom = np.clip(int(np.ceil(bb[3] + crowd_margin)) + 1,\n top + 1, mask.shape[0])\n\n crowd_bbox[0] = min(crowd_bbox[0], left)\n crowd_bbox[1] = min(crowd_bbox[1], top)\n crowd_bbox[2] = max(crowd_bbox[2], right)\n crowd_bbox[3] = max(crowd_bbox[3], bottom)\n continue\n\n assert False # because code below is not tested\n mask[ann['mask'][::self.stride, ::self.stride]] = 0\n\n if crowd_bbox[1] < crowd_bbox[3] and crowd_bbox[0] < crowd_bbox[2]:\n LOG.debug('crowd_bbox: %s', crowd_bbox)\n mask[crowd_bbox[1]:crowd_bbox[3], crowd_bbox[0]:crowd_bbox[2]] = 0\n\n return mask"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get centre coords of polygon | def get_polygon_centre(polygon):
if type(polygon) != np.ndarray:
polygon = np.array(polygon)
polygon = np.squeeze(polygon, axis=1)
min_y = np.min(polygon[:,0])
min_x = np.min(polygon[:,1])
max_y = np.max(polygon[:,0])
max_x = np.max(polygon[:,1])
centre_x = max_x - (max_x - min_x) / 2
centre_y = max_y - (max_y - min_y) / 2
return np.array([centre_y, centre_x]) | [
"def compute_center(points: list) -> list:\n\n\tpolygon = numpy.array(points)\n\n\tlength = polygon.shape[0]\n\tsum_lon = numpy.sum(polygon[:, 0])\n\tsum_lat = numpy.sum(polygon[:, 1])\n\n\treturn [sum_lon / length, sum_lat / length]",
"def get_center(self):\n\t\treturn sum([self.xyz0, self.get_diagonal/2], axis=0)",
"def get_center(self):\n center = np.mean(self.helix_axis_coords, axis=0) \n return center",
"def get_center(self):\n ra, dec = sphericalFromCartesian(self.bounding_circle[0])\n return np.degrees(ra), np.degrees(dec)",
"def get_center(self):\n x, y = self.pos\n ox, oy = self.origin\n w, h = self.size\n return (x - ox + w / 2, y - oy + h / 2)",
"def centre_point(self):\n x = (self.pnta.x+self.pntb.x)/2\n y = (self.pnta.y+self.pntb.y)/2\n z = (self.pnta.z+self.pntb.z)/2\n return Point(x, y, z)",
"def centre_point(xarray, yarray):\r\n return (xarray.max() + xarray.min())/2., (yarray.max() + yarray.min())/2.",
"def centroid(vertices):\n return (vertices[0] + vertices[1] + vertices[2]) / 3",
"def getCenter(self):\n (left, top), (right, bottom) = self.getCoords()\n x = left + (right - left) / 2\n y = top + (bottom - top) / 2\n return x, y",
"def get_center(self):\n\t\thx = self.h[0]\n\t\thy = self.h[1]\n\t\thz = self.h[2]\n\n\t\treturn sum([self.xyz0, [hx/2, hy/2, hz/2]], axis=0)",
"def findcenter(peice):\r\n xsum = 0\r\n ysum = 0\r\n for point in peice.outline:\r\n xsum += point[0]\r\n ysum += point[1]\r\n return xsum//len(peice.outline), ysum//len(peice.outline)",
"def centroid(pos_1, pos_2, pos_3):\n x1, y1 = pos_1[0], pos_1[1]\n x2, y2 = pos_2[0], pos_2[1]\n x3, y3 = pos_3[0], pos_3[1]\n x = (x1 + x2 + x3) / 3\n y = (y1 + y2 + y3) / 3\n return (x, y)",
"def geometric_center(self):\n geometric_center = np.array([0.0, 0.0, 0.0])\n for atom in self.atoms:\n geometric_center += atom.position\n geometric_center /= len(self.atoms)\n return geometric_center",
"def get_center(box):\n x1,y1,x2,y2 = box\n return [(x1+x2)/2, (y1+y2)/2]",
"def find_centre(image: sitk.Image) -> np.ndarray:\n centre_idx = np.array(image.GetSize()) / 2.\n centre_coords = image.TransformContinuousIndexToPhysicalPoint(centre_idx)\n return np.array(centre_coords)",
"def tile_centre_shape(z, x, y):\n\n from shapely.geometry import Point\n\n lon, lat = tile_centre(z, x, y)\n return Point(lon, lat)",
"def centrepoint(contour):\n return contour.mean(axis=(0, 1))",
"def getCenter(self) -> \"SbVec3f\":\n return _coin.SbXfBox3f_getCenter(self)",
"def computePolygonCentroid(pPolygon):\n return _almathinternal.computePolygonCentroid(pPolygon)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
takes a polygon and expands it by increasing length of vectors from centre to all corners | def enlargen_polygon(polygon, ratio):
centre = get_polygon_centre(polygon)
polygon = polygon.astype(np.int)
enlargened_poly = []
for corner in polygon:
diff = corner - centre
enlargened_poly.append((diff * ratio) + centre)
return np.array(enlargened_poly).astype(np.int32) | [
"def regular_polygon(length, num_sides):\n interior_angle = 360 / num_sides\n for count in range(num_sides):\n forward(length)\n left(interior_angle)",
"def centroid_as_polygon(rectangle, relative_size=0.05):\n w, h = size_of_rotated_rectangle(rectangle)\n c = max(h, w) * relative_size\n return rectangle.centroid.buffer(c)",
"def polygon(sides:int, length:int):\n \n # Make sure the arguments are of type int\n if not isinstance(sides, int):\n raise TypeError('Please provide an int argument')\n if not isinstance(length, int):\n raise TypeError('Please provide an int argument')\n \n # (sides - 2) * 180 -- defines sum of angles in any polygon\n angle = ((sides - 2) * 180) / sides\n\n for x in range(sides):\n turtle.forward(length)\n turtle.left(180 - angle)",
"def polygon(n):\n return circle(360./n)",
"def make_polygon(N,sides):\n # define the end-points\n theta = numpy.linspace(0, -2*numpy.pi, N+1) # equally spaced theta\n r = numpy.cos(numpy.pi/sides)/numpy.cos( # r(theta)\n theta % (2.*numpy.pi/sides)-numpy.pi/sides)\n x,y = r*numpy.cos(theta), r*numpy.sin(theta) # get the coordinates\n return panelize(x,y)",
"def _create_polygon(klass, poly):\n polygon = Polygon.from_serializable(poly)\n polygon.absolutize()\n return polygon",
"def add_polygon(self):\n for i, r in enumerate(self.rows):\n p = Polygon(self.start_pos, r, self.points)\n self.polygons[i].insert(0, p)\n self.space.add(p.body, p.poly)",
"def polygon(s, l):\r\n degree = 360 / s # determines the number of degrees between each side to create a polygon\r\n if s < 3:\r\n pass\r\n else:\r\n while s > 0:\r\n turtle.forward(l)\r\n turtle.left(degree)\r\n s = s - 1\r\n turtle.forward(l)",
"def irregularPolygonShape(pos, points, in_fill = None, in_stroke = None):\n assert False, \"This isn't working yet!\"\n pushMatrix()\n translate(*pos)\n poly = createShape()\n poly.beginShape()\n if in_fill is not None:\n poly.setFill(in_fill)\n if in_stroke is not None:\n poly.setStroke(in_stroke)\n for p in points:\n poly.vertex(*p)\n poly.endShape(CLOSE)\n popMatrix()\n return poly",
"def __transform_base_polygon__(self, path, polyline3D_contur):\n pnt1 = path.GetPoint(0)\n pnt2 = path.GetPoint(1)\n\n diff_x = pnt2.X - pnt1.X\n diff_y = pnt2.Y - pnt1.Y\n diff_z = pnt2.Z - pnt1.Z\n\n tmp_plane = AllplanGeo.Plane3D(AllplanGeo.Point3D(0,0,0),AllplanGeo.Vector3D(diff_x, diff_y, diff_z))\n\n matrix = AllplanGeo.Matrix3D()\n matrix = tmp_plane.GetTransformationMatrix()\n result = AllplanGeo.Transform(polyline3D_contur, matrix)\n matrix2 = AllplanGeo.Matrix3D()\n matrix2.SetTranslation(AllplanGeo.Vector3D(pnt1.X, pnt1.Y, pnt1.Z))\n result = AllplanGeo.Transform(result, matrix2)\n return result",
"def irregularPolygon(pos, points, in_fill = None, in_stroke = None):\n beginShape()\n if in_fill is not None:\n fill(in_fill)\n if in_stroke is not None:\n stroke(in_stroke)\n for p in points:\n vertex(p[0] + pos[0], p[1] + pos[1])\n endShape(CLOSE)",
"def polygon(self, speed_deg_per_second, sides, edge_length):\n degrees_to_turn = (180 - ((sides - 2) * 180) / sides)\n time_s = 1\n while time_s != 0:\n for k in range(sides):\n self.drive_inches(edge_length, speed_deg_per_second)\n self.turn_degrees(degrees_to_turn, speed_deg_per_second)\n time_s = 0",
"def makeDirectedPolygon(self, poly):\n\n last_posn = poly[0]\n result = [last_posn]\n\n for posn in poly[1:]:\n result.extend(self.makeArrowhead(last_posn, posn))\n last_posn = posn\n\n return result",
"def compute_shrinked_polygons(source_poly, limit):\n polygons_at_d = []\n\n # the original MultiPolygon is extended by 1 um to capture any EV in initial state\n poly_at_d = source_poly.buffer(1)\n polygons_at_d.append(poly_at_d)\n\n # compute the shrinked down shapely polygons\n for d in range(1, limit):\n poly_at_d = source_poly.buffer(-d)\n polygons_at_d.append(poly_at_d)\n print('Computed', len(polygons_at_d), 'shrinked polygons')\n return polygons_at_d",
"def polygon(t, n=60, _length=170, angle=80):\n for x in range(n):\n t.fd(_length)\n t.lt(angle)\n turtle.mainloop() # this is for the window to wait until closed",
"def rhombus_polygon(self, X, Y, str_id, hor_size, vert_size):\r\n x0, y0 = X - hor_size, Y # mid_west\r\n x1, y1 = X, Y - vert_size # mid_north\r\n x2, y2 = X + hor_size, Y # mid_east\r\n x3, y3 = X, Y + vert_size # mid_south\r\n\r\n polygon = SvgPolygon(4)\r\n polygon.set_stroke(width=2, color='black')\r\n poly_name = gui.SvgText(X, Y + 5, str_id)\r\n poly_name.attributes['text-anchor'] = 'middle'\r\n self.sheet.append([polygon, poly_name])\r\n\r\n mid_north = [x1, y1]\r\n mid_south = [x3, y3]\r\n mid_east = [x2, y2]\r\n mid_west = [x0, y0]\r\n \r\n polygon.add_coord(*mid_north)\r\n polygon.add_coord(*mid_east)\r\n polygon.add_coord(*mid_south)\r\n polygon.add_coord(*mid_west)\r\n\r\n return mid_north, mid_south, mid_east, mid_west",
"def constrainedDelaunayMap(polygons, imageSize, extraPoints = [],\n onlyInner = True):\n\n assert triangle, \"\"\"For correct CDT, you need to compile the\n triangle module (vigra/experiments/triangle). You might want to\n try the home-made fakedConstrainedDelaunayMap instead, which will\n give a correct result if possible, but may just throw an exception\n if it has to give up or even go into an infinite loop.\"\"\"\n\n points = list(extraPoints)\n segments = []\n holes = []\n for polygon in polygons:\n l = len(points)\n partPoints = list(polygon)\n partSegments = [(l+i, l+i+1) for i in range(len(partPoints)-1)]\n if polygon[-1] == polygon[0]:\n del partPoints[-1]\n if partPoints[0] in points:\n for i, (s, e) in enumerate(partSegments):\n partSegments[i] = (s-1, e-1) # partPoints[0] will be deleted\n partSegments[0] = (points.index(partPoints[0]), partSegments[0][1])\n del partPoints[0]\n partSegments[-1] = (partSegments[-1][0], partSegments[0][0])\n if onlyInner and polygon.partialArea() < 0:\n holes.append(_pointInHole(polygon))\n else:\n if partPoints[-1] in points:\n partSegments[-1] = (partSegments[-1][0], points.index(partPoints[-1]))\n del partPoints[-1]\n if partPoints[0] in points:\n for i, (s, e) in enumerate(partSegments):\n partSegments[i] = (s-1, e-1) # partPoints[0] will be deleted\n partSegments[0] = (points.index(partPoints[0]), partSegments[0][1])\n del partPoints[0]\n points.extend(partPoints)\n segments.extend(partSegments)\n\n print \"- performing Constrained Delaunay Triangulation...\"\n print \" (%d points, %s segments, %d holes)\" % (\n len(points), len(segments), len(holes))\n nodePositions, edgeData = triangle.constrainedDelaunay(\n points, segments, onlyInner, holes)\n\n print \"- storing result in a GeoMap...\"\n result = _delaunayMapFromData(nodePositions, edgeData, imageSize)\n\n for edge in result.edgeIter():\n if edgeData[edge.label()][2]:\n edge.setFlag(CONTOUR_SEGMENT)\n\n result.face(0).setFlag(OUTER_FACE)\n for holePoint in holes:\n result.faceAt(holePoint).setFlag(OUTER_FACE)\n\n return result",
"def simple_polygon(points):\n \n # Firstly swap the bottommost (and if necessary leftmost) point to the\n # 0th position in the list. The first line finds the bottommost point,\n # and the next line finds its index, so it can be swapped to the front.\n bottommost = min(points, key=lambda p: (p.y, p.x))\n index = points.index(bottommost)\n points[0], points[index] = points[index], points[0]\n \n # Now just sort the rest by angle from points[0]\n rest = points[1:]\n # **** FIXME by inserting the missing line of code here ****\n # print('in func:')\n \n \n rest = sorted(rest, key=lambda x: points[0] - x)\n \n \n #print(rest)\n \n \n \n return [points[0]] + rest",
"def polyShape2PipeSegment(shape, type = 'normal', part = []):\n \n parts = shape.parts\n vec = shape.points\n X_coor1 = []\n Y_coor1 = []\n X_coor2 = []\n Y_coor2 = []\n werte = []\n if len(part) == 0:\n startPart = 0\n endPart = len(parts)\n else:\n startPart = part\n endPart = part\n \n if type == 'normal':\n if len(parts) == 1:\n X_coor1 = vec[0][0]\n Y_coor1 = vec[0][1]\n X_coor2 = vec[-1][0]\n Y_coor2 = vec[-1][1]\n else:\n parts.append(len(vec))\n for ii in range(startPart, endPart):\n werte.append([vec[parts[ii]][0], vec[parts[ii + 1] - 1][0]])\n \n diffVal = M_MatLab.grad_Vector(parts)\n pos = M_FindPos.find_pos_ValInVector(max(diffVal), diffVal, \"==\")\n \n X_coor1 = vec[parts[pos[0]]][0]\n Y_coor1 = vec[parts[pos[0]]][1]\n X_coor2 = vec[parts[pos[0] + 1] - 1][0]\n Y_coor2 = vec[parts[pos[0] + 1] - 1][1]\n return [X_coor1, Y_coor1, X_coor2, Y_coor2]\n \n \n elif type == 'mean':\n area = area_for_shape(vec)\n imax = len(vec) - 1#len(polygon) \n \n result_x = 0\n result_y = 0\n for ii in range(0, imax):\n result_x += (vec[ii][0] + vec[ii+1][0]) * ((vec[ii][0] * vec[ii+1][1]) - (vec[ii+1][0] * vec[ii][1]))\n result_y += (vec[ii][1] + vec[ii+1][1]) * ((vec[ii][0] * vec[ii+1][1]) - (vec[ii+1][0] * vec[ii][1]))\n result_x += (vec[imax][0] + vec[0][0]) * ((vec[imax][0] * vec[0][1]) - (vec[0][0] * vec[imax][1]))\n result_y += (vec[imax][1] + vec[0][1]) * ((vec[imax][0] * vec[0][1]) - (vec[0][0] * vec[imax][1]))\n result_x /= (area * 6.0)\n result_y /= (area * 6.0)\n\n return [result_x, result_y]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
uses enlargened polygon to add a white border around masked portion of original image | def get_padded_polygon_image(enlargened_poly, img, mask, color=255):
# mask to extract area of interest
extracted_img = cv2.bitwise_and(img, img, mask=mask)
# invert mask
mask_inv = cv2.bitwise_not(mask)
padded_mask = np.zeros(mask.shape, dtype=np.uint8)
cv2.fillPoly(padded_mask, [np.int32(enlargened_poly)], (color))
padding = cv2.bitwise_and(padded_mask, padded_mask, mask=mask_inv)
padding = np.expand_dims(padding, 2)
padding = np.repeat(padding, 3, 2)
padded_img = cv2.add(padding, extracted_img)
return padded_img, padded_mask | [
"def poly_mask(self):\n result, mapped = self._roiItem.getArrayRegion(\n np.ones_like(self.imageItem.image), self.imageItem, returnMappedCoords=True\n )\n\n # TODO -- move this code to own function and test\n # Reverse the result array to make indexing calculations easier, then revert back\n result = result[::-1, ::-1]\n mapped = mapped[::-1, ::-1]\n\n # Pad result mask rect into bounding rect of mask and image\n floorRow = np.floor(mapped[0]).astype(int)\n floorCol = np.floor(mapped[1]).astype(int)\n\n # Return empty mask if ROI bounding box does not intersect image bounding box\n resultRect = QRectF(QPointF(np.min(floorRow), np.min(floorCol)), QPointF(np.max(floorRow), np.max(floorCol)))\n if not self._intersectsImage(resultRect):\n # TODO -- is zeros(shape) the right return value for a non-intersecting polygon?\n return np.zeros(self.imageItem.image.shape)\n\n # Find the bounds of the ROI polygon\n minX = np.min(floorRow)\n maxX = np.max(floorRow)\n minY = np.min(floorCol)\n maxY = np.max(floorCol)\n\n width = self.imageItem.width()\n height = self.imageItem.height()\n # Pad the ROI polygon into the image shape\n # Don't need padding if a polygon boundary is outside of the image shape\n padXBefore = minX\n if minX < 0:\n padXBefore = 0\n padXAfter = height - maxX\n if padXAfter < 0:\n padXAfter = 0\n padYBefore = minY\n if minY < 0:\n padYBefore = 0\n padYAfter = width - maxY\n if padYAfter < 0:\n padYAfter = 0\n\n boundingBox = np.pad(result, ((padYBefore, padYAfter), (padXBefore, padXAfter)), \"constant\")\n\n # For trimming, any negative minimums need to be shifted into the image shape\n offsetX = 0\n offsetY = 0\n if minX < 0:\n offsetX = abs(minX)\n if minY < 0:\n offsetY = abs(minY)\n trimmed = boundingBox[abs(offsetY): abs(offsetY) + height, abs(offsetX): abs(offsetX) + width]\n\n # Reorient the trimmed mask array\n trimmed = trimmed[::-1, ::-1]\n\n # # TODO remove plotting code below\n # from matplotlib import pyplot as plt\n # plt.figure('bounding_box, origin=\"lower\"')\n # plt.imshow(boundingBox, origin='lower')\n # plt.show()\n #\n #\n # plt.figure(f'trimmed, origin=\"lower\", [{abs(offsetY)}:{abs(offsetY)+height}, {abs(offsetX)}:{abs(offsetX)+width}]')\n # plt.imshow(trimmed, origin='lower')\n # plt.show()\n # # TODO remove the plotting code above\n return trimmed",
"def poly_to_mask(polygon, width, height):\n\n # http://stackoverflow.com/a/3732128/1410871\n img = Image.new(mode='L', size=(width, height), color=0)\n ImageDraw.Draw(img).polygon(xy=polygon, outline=0, fill=1)\n mask = np.array(img).astype(bool)\n return mask",
"def poly_to_mask(polygon, width, height):\n\n\t# http://stackoverflow.com/a/3732128/1410871\n\timg = Image.new(mode='L', size=(width, height), color=0)\n\tImageDraw.Draw(img).polygon(xy=polygon, outline=0, fill=1)\n\tmask = np.array(img).astype(bool)\n\treturn mask",
"def brush_stroke_mask(W, H):\n min_num_vertex = 4\n max_num_vertex = 12\n mean_angle = 2*math.pi / 5\n angle_range = 2*math.pi / 15\n min_width = 12\n max_width = 40\n def generate_mask(W, H):\n average_radius = math.sqrt(H*H+W*W) / 8\n mask = Image.new('L', (W, H), 0)\n\n for _ in range(np.random.randint(1, 4)):\n num_vertex = np.random.randint(min_num_vertex, max_num_vertex)\n angle_min = mean_angle - np.random.uniform(0, angle_range)\n angle_max = mean_angle + np.random.uniform(0, angle_range)\n angles = []\n vertex = []\n for i in range(num_vertex):\n if i % 2 == 0:\n angles.append(2*math.pi - np.random.uniform(angle_min, angle_max))\n else:\n angles.append(np.random.uniform(angle_min, angle_max))\n\n h, w = mask.size\n vertex.append((int(np.random.randint(0, w)), int(np.random.randint(0, h))))\n for i in range(num_vertex):\n r = np.clip(\n np.random.normal(loc=average_radius, scale=average_radius//2),\n 0, 2*average_radius)\n new_x = np.clip(vertex[-1][0] + r * math.cos(angles[i]), 0, w)\n new_y = np.clip(vertex[-1][1] + r * math.sin(angles[i]), 0, h)\n vertex.append((int(new_x), int(new_y)))\n\n draw = ImageDraw.Draw(mask)\n width = int(np.random.uniform(min_width, max_width))\n draw.line(vertex, fill=1, width=width)\n for v in vertex:\n draw.ellipse((v[0] - width//2,\n v[1] - width//2,\n v[0] + width//2,\n v[1] + width//2),\n fill=1)\n\n if np.random.normal() > 0:\n mask.transpose(Image.FLIP_LEFT_RIGHT)\n if np.random.normal() > 0:\n mask.transpose(Image.FLIP_TOP_BOTTOM)\n mask = np.asarray(mask, np.float32)\n mask = np.reshape(mask, (W, H, 1))\n return mask\n\n return generate_mask(W, H)",
"def _wrap_image(self, im, border=7):\n # We should throw an exception if the image is smaller than 'border', since at this point\n # this process doesn't make sense.\n if im.bounds.xmax - im.bounds.xmin < border:\n raise RuntimeError(\"Periodic wrapping does not work with images this small!\")\n expanded_bounds = galsim.BoundsI(im.bounds.xmin-border, im.bounds.xmax+border,\n im.bounds.ymin-border, im.bounds.xmax+border)\n # Make new image with those bounds.\n im_new = galsim.ImageD(expanded_bounds)\n # Make the central subarray equal to what we want.\n im_new[im.bounds] = galsim.Image(im)\n # Set the empty bits around the center properly. There are four strips around the edge, and\n # 4 corner squares that need to be filled in. Surely there must be a smarter python-y way\n # of doing this, but I'm not clever enough to figure it out. This is basically the grossest\n # code I've ever written, but it works properly. Anyone who wants is welcome to fix it.\n #\n # Mike suggested a way to optimize it slightly, if we find that speed is an issue later on:\n # We can make just 4 copies, corresponding to\n # * Strip along left side.\n # * Upper left and strip along top can be done together.\n # * Lower left and strip along bottom can be done together.\n # * Upper right, strip along right, and lower right can be done together.\n # The code will also be a bit neater this way.\n #\n ## Strip along left-hand side\n b1 = border-1\n im_new[galsim.BoundsI(expanded_bounds.xmin, im.bounds.xmin-1,\n im.bounds.ymin, im.bounds.ymax)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmax-b1,im.bounds.xmax,\n im.bounds.ymin, im.bounds.ymax)])\n ## Strip along right-hand side\n im_new[galsim.BoundsI(im.bounds.xmax+1, expanded_bounds.xmax,\n im.bounds.ymin, im.bounds.ymax)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmin, im.bounds.xmin+b1,\n im.bounds.ymin, im.bounds.ymax)])\n ## Strip along the bottom\n im_new[galsim.BoundsI(im.bounds.xmin, im.bounds.xmax,\n expanded_bounds.ymin, im.bounds.ymin-1)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmin, im.bounds.xmax,\n im.bounds.ymax-b1, im.bounds.ymax)])\n ## Strip along the top\n im_new[galsim.BoundsI(im.bounds.xmin, im.bounds.xmax,\n im.bounds.ymax+1, expanded_bounds.ymax)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmin, im.bounds.xmax,\n im.bounds.ymin, im.bounds.ymin+b1)])\n ## Lower-left corner\n im_new[galsim.BoundsI(expanded_bounds.xmin, im.bounds.xmin-1,\n expanded_bounds.ymin, im.bounds.ymin-1)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmax-b1, im.bounds.xmax,\n im.bounds.ymax-b1, im.bounds.ymax)])\n ## Upper-right corner\n im_new[galsim.BoundsI(im.bounds.xmax+1, expanded_bounds.xmax,\n im.bounds.ymax+1, expanded_bounds.ymax)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmin, im.bounds.xmin+b1,\n im.bounds.ymin, im.bounds.ymin+b1)])\n ## Upper-left corner\n im_new[galsim.BoundsI(expanded_bounds.xmin, im.bounds.xmin-1,\n im.bounds.ymax+1, expanded_bounds.ymax)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmax-b1, im.bounds.xmax,\n im.bounds.ymin, im.bounds.ymin+b1)])\n ## Lower-right corner\n im_new[galsim.BoundsI(im.bounds.xmax+1, expanded_bounds.xmax,\n expanded_bounds.ymin, im.bounds.ymin-1)] = \\\n galsim.Image(im[galsim.BoundsI(im.bounds.xmin, im.bounds.xmin+b1,\n im.bounds.ymax-b1, im.bounds.ymax)])\n return im_new",
"def mask_outside_polygon(poly_verts, ax=None):\n import matplotlib.patches as mpatches\n import matplotlib.path as mpath\n\n if ax is None:\n ax = plt.gca()\n\n # Get current plot limits\n xlim = ax.get_xlim()\n ylim = ax.get_ylim()\n\n # Verticies of the plot boundaries in clockwise order\n bound_verts = [(xlim[0], ylim[0]), (xlim[0], ylim[1]), \n (xlim[1], ylim[1]), (xlim[1], ylim[0]), \n (xlim[0], ylim[0])]\n\n # A series of codes (1 and 2) to tell matplotlib whether to draw a line or \n # move the \"pen\" (So that there's no connecting line)\n bound_codes = [mpath.Path.MOVETO] + (len(bound_verts) - 1) * [mpath.Path.LINETO]\n poly_codes = [mpath.Path.MOVETO] + (len(poly_verts) - 1) * [mpath.Path.LINETO]\n\n print(\"bound_verts + poly_verts:\",\n list(zip(bound_verts + poly_verts,\n [num_to_command[code] for code in bound_codes + poly_codes])))\n\n # Plot the masking patch\n path = mpath.Path(bound_verts + poly_verts, bound_codes + poly_codes)\n patch = mpatches.PathPatch(path, facecolor='white', edgecolor='none', alpha=0.8)\n patch = ax.add_patch(patch)\n\n # Reset the plot limits to their original extents\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n\n return patch",
"def fill_poly(shape2d, poly, outline=True, fill=True):\n poly = list(map(tuple, poly)) # PIL...\n\n img = Image.new('L', shape2d, 0)\n ImageDraw.Draw(img).polygon(poly, outline=outline, fill=fill)\n mask = np.array(img).T # PIL works with x,y and not row,col\n\n return mask",
"def clip_raster(polygon, in_raster, out_raster):\n gdal.Warp(out_raster, in_raster, cutlineDSName=polygon)",
"def create_mask(src_img_size):\r\n\r\n h, w, c = src_img_size\r\n mask = np.zeros(src_img_size, np.uint8)\r\n\r\n # definirea coordonatelor hexagonului inscris in piesa candidat\r\n hexa_coord = np.array([[w / 4, 0], [3 * w / 4, 0], [w, h / 2], [3 * w / 4, h], [w / 4, h], [0, h / 2]], np.int32)\r\n cv.fillPoly(mask, [hexa_coord], (255, 255, 255))\r\n\r\n return mask",
"def add_region(mask, poly_line):\n\n c, r = masked_points(poly_line, mask.shape)\n mask[r, c] = 1\n\n return mask",
"def generate_wedge_mask(img, angle):\n\n # Obtain parameters for scaling\n height, width = img.shape[0], img.shape[1]\n\n # Creating a right triangle with the given angle and coordinates\n op_side = math.tan(math.radians(angle)) * img.shape[1]\n c = [0, width, width]\n r = [height, height, int(height - op_side)]\n\n # Generate map of three points that compose the right triangle\n rc = np.array((c, r)).T\n\n # Create a mask\n mask = np.zeros_like(img)\n\n # Draw wedge on the mask\n cv2.drawContours(mask, [rc], 0, 255, -1)\n\n # return mask\n return mask",
"def convert_mask_to_polygon(mask, xy_offset=(0, 0), simplify_tol=0.5,\n conf_thresh=0.5):\n\n # Create padded mask\n padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.float)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, conf_thresh) # Gives n (row, col) coordinates\n\n gdal_ring = ogr.Geometry(ogr.wkbLinearRing)\n\n if len(contours) > 1:\n lens = [len(cont) for cont in contours]\n argmax = np.argmax(lens)\n contours = [contours[argmax]]\n logging.warning(f'Found multiple mask contours for 1 crater, selecting ind {argmax}.')\n\n # Remove the padding and transition from (row, col) to (x, y)\n contour = np.fliplr(contours[0] - 1)\n\n # Convert predicted mask to polygon\n for verts in contour:\n x = verts[0] + xy_offset[0]\n y = verts[1] + xy_offset[1]\n gdal_ring.AddPoint_2D(x, y)\n x0 = contour[0, 0] + xy_offset[0]\n y0 = contour[0, 1] + xy_offset[1]\n gdal_ring.AddPoint_2D(x0, y0) # Add 0th point to close the polygon\n\n # Create polygon\n gdal_poly = ogr.Geometry(ogr.wkbPolygon)\n gdal_poly.AddGeometry(gdal_ring)\n\n # Attempt simplification\n gdal_poly_simp = gdal_poly.Clone()\n gdal_poly_simp.SimplifyPreserveTopology(simplify_tol)\n\n # Return original polygon if there is an issue during simplification\n if gdal_poly_simp.IsEmpty():\n logging.warning('Simplified polygon is empty. Using original polygon.')\n return gdal_poly\n\n return gdal_poly_simp",
"def draw_mask_only(image, box, mask, label=None, color=None, binarize_threshold=0.5):\n\n from keras_retinanet.utils.colors import label_color\n\n # import miscellaneous modules\n import cv2\n import numpy as np\n\n if label is not None:\n color = label_color(label)\n if color is None:\n color = (255, 255, 255)\n\n # resize to fit the box\n mask = mask.astype(np.float32)\n mask = cv2.resize(mask, (box[2] - box[0], box[3] - box[1]))\n\n # binarize the mask\n mask = (mask > binarize_threshold).astype(np.uint8)\n\n # draw the mask in the image\n mask_image = np.zeros((image.shape[0], image.shape[1]), np.uint8)\n mask_image[box[1]:box[3], box[0]:box[2]] = mask\n mask = mask_image\n\n # compute a nice border around the mask\n border = mask - cv2.erode(mask, np.ones((5, 5), np.uint8), iterations=1)\n\n # apply color to the mask and border\n mask = (np.stack([mask] * 3, axis=2) * color).astype(np.uint8)\n border = (np.stack([border] * 3, axis=2) * (255, 255, 255)).astype(np.uint8)\n # this is how you look into the mask\n # for i in mask:\n # \tfor j in i:\n # \t\tb = False\n # \t\tfor k in i:\n # \t\t\tfor l in k:\n # \t\t\t\tif l != 0:\n # \t\t\t\t\tb = True\n # \t\t\t\tif b:\n # \t\t\t\t\tbreak\n # \t\t\tif b:\n # \t\t\t\tbreak\n # \t\tif b:\n # \t\t\tprint (j)\n\n # draw the mask\n indices = np.where(mask != color)\n image[indices[0], indices[1], :] = 0 * image[indices[0], indices[1], :]\n\n return mask",
"def create_triangle_mask(height, width, side=1.5, centre=0, bottom=1):\n # Set mask points\n centre_y = height / 2 + height / 2 * centre\n centre_x = width / 2 \n left_pt = [(centre_x - (width / 2) * side), \\\n (centre_y - height / 2)]\n right_pt = [(centre_x + (width / 2 * side)), \\\n (centre_y - height / 2)]\n bottom_pt = [centre_x, centre_y + (height / 2) * bottom]\n pts = np.array([bottom_pt, left_pt, right_pt], np.int32)\n # Black image\n result = np.zeros((height, width, 3), np.uint8)\n # Create traiangle\n result = cv2.fillConvexPoly(result, pts, (255, 255, 255), 1)\n # Convert to GRAY\n result = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)\n return result",
"def polygonize(obj, np_img, np_mask, outshpFile=None):\n\n\tif outshpFile is None:\n\t\toutshpFile = obj.tmpshpFile1\n\t#--- open tmp file (GDAL) -----------------------------------------\n\togr_shp = ogr.GetDriverByName(\"ESRI Shapefile\").CreateDataSource( outshpFile )\n\n\t#--- get band (GDAL) -------------------------------------\n\tdatatype = 1 # uint8\n\tdrv = gdal.GetDriverByName('MEM')\n\tds_img = drv.Create( '', obj.cols, obj.rows, 1, datatype )\n\tds_img.SetProjection( obj.proj )\n\tds_img.SetGeoTransform( obj.geo )\n\tds_imgband = ds_img.GetRasterBand(1)\n\t#ds_imgband.WriteArray(np_img)\n\n\t#--- mask band (GDAL) -----------------------------------\n\tdatatype = 1 # uint8\n\tdrv = gdal.GetDriverByName('MEM')\n\tds_mask = drv.Create( '', obj.cols, obj.rows, 1, datatype )\n\tds_mask.SetProjection( obj.proj )\n\tds_mask.SetGeoTransform( obj.geo )\n\tds_maskband = ds_img.GetRasterBand(1)\n\tds_maskband.WriteArray(np_mask)\n\n\t#--- masking -------------------------------------------\n\tnp_img = np_img * np_mask\n\tds_imgband.WriteArray(np_img)\n\n\t#--- create layer (ogr) -------------------------------\n\togr_layer = ogr_shp.CreateLayer(\"polygonized\")\n\n\t#--- exec raster to polygon (GDAL) ----------------------------------\n\tgdal.Polygonize( ds_imgband, ds_maskband, ogr_layer, 0, [], callback=None )\n\n\n\t#--- number of features -----\n\tfeatureCount = ogr_layer.GetFeatureCount()\n\n\togr_shp = None\n\n\n\tprint(\"--, finished, polygonize()\")\n\n\treturn featureCount",
"def mask_region(grayscale_image, region):\n \n mask = np.zeros_like(grayscale_image)\n \n cv2.fillPoly(mask, region, 255)\n \n masked_image = cv2.bitwise_and(grayscale_image, mask)\n \n return masked_image",
"def image_mask_overlay(img, mask):\n for i in range(mask.shape[0]):\n for j in range(mask.shape[1]):\n if mask[i, j] == 1:\n img[i, j, :] = [255, 0, 255]\n\n return img",
"def generate_opaque_mask(self, img):\n\n alpha = img.split()[3]\n return alpha.point(lambda a: int(min(a, 25.5) * 10))",
"def makeEdgemask(self,im_mask): #WZ\n width = 15 # 20 25 10\n sn0= 7.5 # 10\n npt=0L\n # im_mask=self.parlists[0][i]['in_mask']\n # maskfits = pyfits.open(im_mask,mode='update')\n maskfits = pyfits.open(im_mask)\n im_sci=string.replace(im_mask,'inmask','SCI_')\n scifits = pyfits.open(im_sci)\n data = scifits[0].data.copy()\n naxis1 = scifits[0].header.get('NAXIS1')\n naxis2 = scifits[0].header.get('NAXIS2')\n sky = scifits[0].header.get('ALIGNSKY')\n mask = maskfits[0].data.copy()\n for j in range(0,width-1): # y\n for k in range(0,naxis1-1): # x\n if (numpy.abs(data[j,k]/sky) > sn0 and mask[j,k]==1):\n # print j,k\n mask[j,k]=0\n npt = npt + 1\n #print mask[10,1000],' 10,1000'\n #print npt\n #npt=0\n for j in range(0,naxis2-1):\n for k in range(0,width-1):\n if (numpy.abs(data[j,k]/sky) > sn0 and mask[j,k]==1):\n # print j,k\n mask[j,k]=0\n npt = npt + 1\n #print npt\n #print mask[1000,10],' 100,10'\n #npt=0\n for j in range(naxis2-width-1,naxis2-1):\n for k in range(0,naxis1-1):\n if (numpy.abs(data[j,k]/sky) > sn0 and mask[j,k]==1):\n # print j,k\n mask[j,k]=0\n npt = npt + 1\n #print mask[2040,1000], ' 2040,1000'\n #print npt\n #npt=0\n for j in range(0,naxis2-1):\n for k in range(naxis1-width-1,naxis1-1):\n if (numpy.abs(data[j,k]/sky) > sn0 and mask[j,k]==1):\n # print j,k\n mask[j,k]=0\n npt = npt + 1\n #print mask[1000,4090] ,' 1000,4090'\n #print npt\n maskfits[0].data = mask.copy()\n self.logfile.write(str(npt) + \" pixels masked near the edges in image: \" + im_mask)\n newfits = pyfits.HDUList()\n newfits.append(pyfits.PrimaryHDU())\n newfits[0].header = maskfits[0].header\n newfits[0].data = mask.copy()\n # pdb.set_trace()\n scifits.close() \n if os.path.isfile(im_mask):\n os.remove(im_mask)\n newfits.writeto(im_mask)\n # maskfits.flush()\n del npt,scifits,maskfits,newfits\n return"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
use mask to extract image portion and pad with white border (set ratio to 0 for no padding) uses cv2.findContours to locate separate polygons in a single mask, if more than one present. added padding may overlap, nothing is implemented to handle this. | def extract_and_pad_mask(fg_img, fg_mask, bg_mask, padding_ratio, transform=True):
# threshold to make binary
# if transform:
# tmp_fg_mask = np.zeros(fg_img.shape, dtype=np.uint8)
# fg_img, fg_mask = transforms(fg_img, fg_mask, bg_mask)
# fg_mask = fg_mask.draw_on_image(tmp_fg_mask)[0]
# print(fg_img.shape, fg_img.dtype, fg_mask.shape, fg_mask.dtype)
_, threshold = cv2.threshold(fg_mask, 110, 255,
cv2.THRESH_BINARY)
# find contours
contours, _ = cv2.findContours(fg_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
results = []
for cnt in contours:
# convert contour to polygon
poly = cv2.approxPolyDP(cnt, 0.009 * cv2.arcLength(cnt, True), True)
# create new mask with only current polygon
this_poly_mask = np.zeros(fg_img.shape[:2], dtype=np.uint8)
cv2.fillPoly(this_poly_mask, [poly], (255))
# enlargen polygon for padding
enlargened_poly = np.squeeze(enlargen_polygon(poly, padding_ratio), axis=1)
# get image of original polygon + added padding
padded_poly_img, padded_mask = \
get_padded_polygon_image(enlargened_poly, fg_img, this_poly_mask)
# get indices to crop from original fg_img into smallest region possible
min_y, max_y, min_x, max_x = get_crop_indices_from_mask(padded_mask)
padded_poly_img = padded_poly_img[min_y:max_y,min_x:max_x,:]
padded_mask = padded_mask[min_y:max_y,min_x:max_x]
this_poly_mask = this_poly_mask[min_y:max_y, min_x:max_x]
results.append({"padded_img":padded_poly_img,
"padded_mask": padded_mask, "annotations_mask": this_poly_mask})
return results | [
"def bg_mask(self, anns, width_height, *, crowd_margin):\n anns1, anns2 = anns\n\n mask = np.ones((\n (width_height[1] - 1) // self.stride + 1,\n (width_height[0] - 1) // self.stride + 1,\n ), dtype=np.bool)\n crowd_bbox = [np.inf, np.inf, 0, 0]\n for ann in anns1 + anns2:\n if not ann['iscrowd']:\n valid_keypoints = 'keypoints' in ann and np.any(ann['keypoints'][:, 2] > 0)\n if valid_keypoints:\n continue\n\n if 'mask' not in ann:\n bb = ann['bbox'].copy()\n bb /= self.stride\n bb[2:] += bb[:2] # convert width and height to x2 and y2\n\n # left top\n left = np.clip(int(bb[0] - crowd_margin), 0, mask.shape[1] - 1)\n top = np.clip(int(bb[1] - crowd_margin), 0, mask.shape[0] - 1)\n\n # right bottom\n # ceil: to round up\n # +1: because mask upper limit is exclusive\n right = np.clip(int(np.ceil(bb[2] + crowd_margin)) + 1,\n left + 1, mask.shape[1])\n bottom = np.clip(int(np.ceil(bb[3] + crowd_margin)) + 1,\n top + 1, mask.shape[0])\n\n crowd_bbox[0] = min(crowd_bbox[0], left)\n crowd_bbox[1] = min(crowd_bbox[1], top)\n crowd_bbox[2] = max(crowd_bbox[2], right)\n crowd_bbox[3] = max(crowd_bbox[3], bottom)\n continue\n\n assert False # because code below is not tested\n mask[ann['mask'][::self.stride, ::self.stride]] = 0\n\n if crowd_bbox[1] < crowd_bbox[3] and crowd_bbox[0] < crowd_bbox[2]:\n LOG.debug('crowd_bbox: %s', crowd_bbox)\n mask[crowd_bbox[1]:crowd_bbox[3], crowd_bbox[0]:crowd_bbox[2]] = 0\n\n return mask",
"def cropwithpad(image, contours, ratio, crop_box):\n img_box = Box.create_default(image.shape)\n if crop_box.is_in(img_box):\n image, contours, _ = Proc.crop(image, contours, 1.0, crop_box)\n else:\n pl = int(max(0, img_box[0] - floor(crop_box[0]))) # calculating the padding\n pr = int(max(0, ceil(crop_box[1]) - img_box[1]))\n pu = int(max(0, img_box[2] - floor(crop_box[2])))\n pb = int(max(0, ceil(crop_box[3]) - img_box[3]))\n image, contours, _ = Proc.pad(image, contours, 1.0, (pu, pb, pl, pr))\n crop_box.shift_box((pl, pu)) # the padded image might have a new origo\n image, contours, _ = Proc.crop(image, contours, 1.0, crop_box)\n return image, contours, ratio",
"def mark_contours(mask):\n padded = 2\n mask = mask.astype(np.uint8) * 255\n\n background = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.uint8)\n padded_background = np.pad(background.copy(), ((padded, padded), (padded, padded)), 'edge')\n background_rgb = cv2.cvtColor(padded_background, cv2.COLOR_GRAY2RGB)\n\n padded_mask = np.pad(mask.copy(), ((padded, padded), (padded, padded), (0, 0)), 'edge')\n\n _, thresh = cv2.threshold(padded_mask, 127, 255, cv2.THRESH_BINARY)\n _, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n contoured_rgb = cv2.drawContours(background_rgb, contours, -1, (255, 255, 255), 1)\n contoured_gray = cv2.cvtColor(contoured_rgb, cv2.COLOR_RGB2GRAY)\n # workaround due to OpenCV issue, the contour starts from the 1st pixel\n contoured_mask = contoured_gray[padded:-padded, padded:-padded]\n\n return contoured_mask",
"def mask_to_border(mask):\n h, w = mask.shape\n border = np.zeros((h, w))\n\n contours = find_contours(mask, 0.5) # since the input range is [0, 1], the threshold is 0.5\n for contour in contours:\n for c in contour:\n x = int(c[0])\n y = int(c[1])\n border[x][y] = 1 # since the input is binary, the value is 1\n\n return border",
"def fixMasks(image, table_mask, column_mask):\r\n table_mask = table_mask.reshape(1024,1024).astype(np.uint8)\r\n column_mask = column_mask.reshape(1024,1024).astype(np.uint8)\r\n \r\n #get contours of the mask to get number of tables\r\n contours, table_heirarchy = cv2.findContours(table_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n \r\n table_contours = []\r\n #ref: https://www.pyimagesearch.com/2015/02/09/removing-contours-image-using-python-opencv/\r\n #remove bad contours\r\n\r\n #print(contours)\r\n\r\n for c in contours:\r\n # if the contour is bad, draw it on the mask\r\n\r\n\r\n #if not is_contour_bad(c):\r\n if cv2.contourArea(c) > 2000:\r\n table_contours.append(c)\r\n \r\n if len(table_contours) == 0:\r\n return None\r\n\r\n #ref : https://docs.opencv.org/4.5.2/da/d0c/tutorial_bounding_rects_circles.html\r\n #get bounding box for the contour\r\n \r\n table_boundRect = [None]*len(table_contours)\r\n for i, c in enumerate(table_contours):\r\n polygon = cv2.approxPolyDP(c, 3, True)\r\n table_boundRect[i] = cv2.boundingRect(polygon)\r\n \r\n #table bounding Box\r\n table_boundRect.sort()\r\n \r\n col_boundRects = []\r\n for x,y,w,h in table_boundRect:\r\n \r\n col_mask_crop = column_mask[y:y+h,x:x+w]\r\n \r\n #get contours of the mask to get number of tables\r\n contours, col_heirarchy = cv2.findContours(col_mask_crop, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n #get bounding box for the contour\r\n boundRect = [None]*len(contours)\r\n for i, c in enumerate(contours):\r\n polygon = cv2.approxPolyDP(c, 3, True)\r\n boundRect[i] = cv2.boundingRect(polygon)\r\n \r\n #adjusting columns as per table coordinates\r\n boundRect[i] = (boundRect[i][0] + x ,\r\n boundRect[i][1] + y ,\r\n boundRect[i][2],\r\n boundRect[i][3])\r\n \r\n col_boundRects.append(boundRect)\r\n \r\n image = image[...,0].reshape(1024, 1024).astype(np.uint8)\r\n \r\n #draw bounding boxes\r\n color = (0,255,0)\r\n thickness = 4\r\n \r\n for x,y,w,h in table_boundRect:\r\n image = cv2.rectangle(image, (x,y),(x+w,y+h), color, thickness)\r\n \r\n return image, table_boundRect, col_boundRects",
"def get_mask_M3(image):\n\n # Tunning parameters. We can put this as input to the function as well\n CANNY_THRESH_1 = 30\n CANNY_THRESH_2 = 130\n\n # load the input image\n image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n blurred = cv.GaussianBlur(image, (5, 5), 0)\n\n # obtain the edges of the image\n edges = cv.Canny(blurred, CANNY_THRESH_1, CANNY_THRESH_2)\n edges = cv.dilate(edges, None)\n edges = cv.erode(edges, None)\n\n # find contours in the edged image\n _,cnts,_ = cv.findContours(edges.copy(), cv.RETR_LIST,\n cv.CHAIN_APPROX_NONE)\n cnts = imutils.grab_contours(cnts)\n\n # sort from biggest area to smallest and take the top5\n cnts = sorted(cnts, key = cv.contourArea, reverse = True)[:5]\n\n\n mask = np.zeros(edges.shape)\n cmax, max_extent=[],0\n # loop over the contours from bigger to smaller, and find the biggest one with the right orientation\n for c in cnts:\n # # approximate to the hull.\n hull = cv.convexHull(c)\n\n # find the contour with the highest extent compared to the bounding rectangle\n area = cv.contourArea(hull)\n x,y,w,h = cv.boundingRect(c)\n rect_area = w*h\n extent = float(area)/rect_area\n\n # get the contour with max extent (area covered, approximation area)\n if max_extent<extent:\n max_extent=extent\n cmax=hull\n\n cv.fillConvexPoly(mask, cmax, (255)) # fill the mask\n\n return mask",
"def circularcrop(img, border=200, threshold=20000, threshold1=100):\n s = np.sum(img, axis=2)\n cols = np.sum(s, axis=0) > threshold \n rows = np.sum(s, axis=1) > threshold\n\n height = rows.shape[0]\n width = cols.shape[0]\n\n x_min = np.argmax(cols[0:width])\n x_max = width/2 + np.argmin(cols[width/2:width-1])\n y_min = np.argmax(rows[0:height/2])\n y_max = np.argmin(cols[height/2:height-1])\n y_max = height/2 + y_max if y_max > 0 else height\n\n radius = (x_max - x_min)/2\n center_x = x_min + radius\n center_y = y_min + radius # the default case (if y_min != 0)\n if y_min == 0: # the upper side is cropped\n if height - y_max > 0: # lower border is not 0\n center_y = y_max - radius\n else:\n upper_line_width = np.sum(s[0,:] > threshold1) # threshold for single line\n center_y = math.sqrt( radius**2 - (upper_line_width/2)**2)\n radius1 = radius - border \n \n mask = np.zeros(img.shape[0:2])\n rr, cc = circle(center_y, center_x, radius1, img.shape)\n mask[rr, cc] = 1\n img[:,:,0] *= mask\n img[:,:,1] *= mask\n img[:,:,2] *= mask \n \n x_borders = (center_x - radius1, img.shape[1] - center_x - radius1)\n y_borders = (max(center_y - radius1,0), max(img.shape[0] - center_y - radius1, 0))\n\n imgres = util.crop(img, (y_borders, x_borders, (0,0)))\n maskT = util.crop(mask, (y_borders, x_borders))\n\n border_pixels = np.sum(1 - maskT)\n \n return imgres, maskT, center_x, center_y, radius",
"def max_width(mask):\r\n # mask_img = cv2.imread(mask, cv2.IMREAD_GRAYSCALE)\r\n mask_img = mask\r\n # cv2.imwrite(\"mask_img.jpg\", mask_img)\r\n # print(\"pixel:\", mask[0, 0])\r\n ret, mask_img = cv2.threshold(mask_img, 30, 255, cv2.THRESH_BINARY)\r\n # print(\"shape\", mask_img.shape)\r\n height, width = mask_img.shape\r\n\r\n # count max width\r\n max_wid = 0\r\n for i in range(height):\r\n # initialize leftend and rightend of mask area as -1\r\n leftend = -1\r\n rightend = -1\r\n for j in range(width-1):\r\n if mask_img[i, j] > 127 and leftend == -1:\r\n leftend = j\r\n if mask_img[i, j] == 0 and mask_img[i, j-1] > 0 and j > 0:\r\n rightend = j\r\n cv2.imwrite(\"mask_img.png\", branding(mask_img, (i, j), 1))\r\n print(\"leftend:({}, {}); rightedn:({}, {})\\n\".format(i, leftend, i, rightend))\r\n break\r\n max_wid = max(max_wid, rightend-leftend)\r\n # for col in range(width):\r\n # # initialize leftend and rightend of mask area as -1\r\n # leftend = -1\r\n # rightend = -1\r\n # for row in range(height-1):\r\n # if mask_img[row, col] > 30 and leftend == -1:\r\n # leftend = row\r\n # if mask_img[row, col] == 0 and mask_img[row-1, col] > 0 and row > 0:\r\n # rightend = row\r\n # # cv2.imwrite(\"mask_img.png\", branding(mask_img, (i, j), 2))\r\n # # print(\"leftend:({}, {}); rightedn:({}, {})\\n\".format(i, leftend, i, rightend))\r\n # break\r\n # max_wid = max(max_wid, rightend-leftend)\r\n \r\n # print(\"max width: {}\".format(max_wid))\r\n return max_wid",
"def get_mask_M0(image):\n\n image_hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)\n\n h,s,v = cv.split(image_hsv)\n\n # 0s --> contours\n mask = cv.adaptiveThreshold(s, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv.THRESH_BINARY, 51, 10)\n\n # 1s --> contours\n mask = 255-mask\n\n # Denoising with \"opening\" morphology operator\n dilatation_size = 1\n element = cv.getStructuringElement(cv.MORPH_RECT, (2*dilatation_size+1, 2*dilatation_size+1),\n (int(dilatation_size/2), int(dilatation_size/2)))\n mask_open = cv.morphologyEx(mask, cv.MORPH_OPEN, element, iterations=3)\n\n # Coordinates of non-black pixels (picture contours)\n coords = np.argwhere(mask_open)\n\n # First and last non-black pixel\n x0, y0 = coords.min(axis=0)\n x1, y1 = coords.max(axis=0)\n\n # Bounding box of non-black pixels\n pnts = np.asarray([[y0,x0], [y0,x1], [y1,x1], [y1,x0]], dtype=np.int32)\n final_mask = np.zeros(mask.shape)\n cv.fillConvexPoly(final_mask, pnts, 255)\n\n return final_mask",
"def innerRect(img, rects, maskedImg, mask):\n maskedImg = cv.cvtColor(maskedImg, cv.COLOR_BGR2GRAY)\n for rect in rects:\n x1, y1, x2, y2 = makeTuple(rect)\n maskedImg[y1:y2, x1:x2] = 0\n mask[maskedImg == 0] = 0\n mask[maskedImg == 255] = 1\n cutImg = grabCut(img, None, mask)\n return cutImg",
"def get_padded_polygon_image(enlargened_poly, img, mask, color=255):\n\n # mask to extract area of interest\n extracted_img = cv2.bitwise_and(img, img, mask=mask)\n # invert mask\n mask_inv = cv2.bitwise_not(mask)\n\n padded_mask = np.zeros(mask.shape, dtype=np.uint8)\n cv2.fillPoly(padded_mask, [np.int32(enlargened_poly)], (color))\n\n padding = cv2.bitwise_and(padded_mask, padded_mask, mask=mask_inv)\n padding = np.expand_dims(padding, 2)\n padding = np.repeat(padding, 3, 2)\n\n padded_img = cv2.add(padding, extracted_img)\n\n return padded_img, padded_mask",
"def _crop(self, img, hm, padding, crop_box):\n img = np.pad(img, padding, mode = 'constant')\n hm = np.pad(hm, padding, mode = 'constant')\n max_lenght = max(crop_box[2], crop_box[3])\n img = img[crop_box[1] - max_lenght //2:crop_box[1] + max_lenght //2, crop_box[0] - max_lenght // 2:crop_box[0] + max_lenght //2]\n hm = hm[crop_box[1] - max_lenght //2:crop_box[1] + max_lenght//2, crop_box[0] - max_lenght // 2:crop_box[0] + max_lenght // 2]\n return img, hm",
"def draw_mask_only(image, box, mask, label=None, color=None, binarize_threshold=0.5):\n\n from keras_retinanet.utils.colors import label_color\n\n # import miscellaneous modules\n import cv2\n import numpy as np\n\n if label is not None:\n color = label_color(label)\n if color is None:\n color = (255, 255, 255)\n\n # resize to fit the box\n mask = mask.astype(np.float32)\n mask = cv2.resize(mask, (box[2] - box[0], box[3] - box[1]))\n\n # binarize the mask\n mask = (mask > binarize_threshold).astype(np.uint8)\n\n # draw the mask in the image\n mask_image = np.zeros((image.shape[0], image.shape[1]), np.uint8)\n mask_image[box[1]:box[3], box[0]:box[2]] = mask\n mask = mask_image\n\n # compute a nice border around the mask\n border = mask - cv2.erode(mask, np.ones((5, 5), np.uint8), iterations=1)\n\n # apply color to the mask and border\n mask = (np.stack([mask] * 3, axis=2) * color).astype(np.uint8)\n border = (np.stack([border] * 3, axis=2) * (255, 255, 255)).astype(np.uint8)\n # this is how you look into the mask\n # for i in mask:\n # \tfor j in i:\n # \t\tb = False\n # \t\tfor k in i:\n # \t\t\tfor l in k:\n # \t\t\t\tif l != 0:\n # \t\t\t\t\tb = True\n # \t\t\t\tif b:\n # \t\t\t\t\tbreak\n # \t\t\tif b:\n # \t\t\t\tbreak\n # \t\tif b:\n # \t\t\tprint (j)\n\n # draw the mask\n indices = np.where(mask != color)\n image[indices[0], indices[1], :] = 0 * image[indices[0], indices[1], :]\n\n return mask",
"def crop_border(img):\n mask = img > 0\n # Keeps rows and columns of images if they are not completely black\n return img[np.ix_(mask.any(1), mask.any(0))]",
"def crop(masks, boxes, padding: int = 1):\n h, w, n = masks.shape\n x1, x2 = sanitize_coordinates(boxes[:, 0], boxes[:, 2], w, padding)\n y1, y2 = sanitize_coordinates(boxes[:, 1], boxes[:, 3], h, padding)\n\n np.arange(w, dtype=x1.dtype)\n rows = np.arange(w, dtype=x1.dtype).reshape((1, -1, 1)).repeat(repeats=h, axis=0).repeat(repeats=n, axis=2)\n cols = np.arange(h, dtype=x1.dtype).reshape((-1, 1, 1)).repeat(repeats=w, axis=1).repeat(repeats=n, axis=2)\n\n masks_left = rows >= x1.reshape((1, 1, -1))\n masks_right = rows < x2.reshape((1, 1, -1))\n masks_up = cols >= y1.reshape((1, 1, -1))\n masks_down = cols < y2.reshape((1, 1, -1))\n\n crop_mask = masks_left * masks_right * masks_up * masks_down\n\n return masks * crop_mask.astype(np.float32)",
"def get_contour_sample(img_orig, sample_interval=5):\n img = img_orig.copy()\n img[img > 10] = 255\n element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (31, 31))\n mask = pad_image_cnt(img, None, (0, 0), (300, 300), bg=(1500,1500))\n# print mask\n for j in range(99):\n mask = cv2.dilate(mask, element)\n# cv2.imshow(\"mask_dilate\", mask)\n# cv2.waitKey()\n cnts, hier = cv2.findContours(mask.copy(), cv2.cv.CV_RETR_EXTERNAL, \n cv2.cv.CV_CHAIN_APPROX_TC89_L1)\n if len(cnts) == 1:\n break\n# cv2.imshow(\"mask_dilate\", mask)\n# cv2.waitKey()\n \n mask = cv2.erode(mask, element, iterations=j+1)\n# cv2.imshow(\"mask\", mask)\n# cv2.waitKey()\n# cv2.destroyWindow(\"mask\")\n cnts, hier = cv2.findContours(mask.copy(), cv2.cv.CV_RETR_EXTERNAL, \n cv2.cv.CV_CHAIN_APPROX_NONE)\n img_contour_pts = np.squeeze(np.vstack(cnts))\n img_contour_sample = img_contour_pts[range(0, img_contour_pts.shape[0], sample_interval), :]\n img_contour_sample = img_contour_sample - np.matlib.repmat((300,300),\n img_contour_sample.shape[0], 1)\n# draw_contours(img_contour_sample, (800,800), show=True)\n return img_contour_sample",
"def get_mask_from_bounding_box(bounding_box_coordinates,shape):\n #unwrap bouding box coordinates\n x,y,w,h = bounding_box_coordinates\n #create blank image with corresponding shape\n blank_image = np.zeros(shape, np.uint8)\n #create corrected mask\n corrected_mask = cv2.rectangle(blank_image,(x,y),(x+w,y+h),(255,255,255),-1)\n return corrected_mask",
"def analyze_cells(img,pwd,character):\n TARGET = 100 #number of cells\n percentage = 15\n percentage = percentage / 200\n \n kernels = [x for x in range(3,249) if x%2 != 0]\n kernel = kernels[round(len(kernels)/2)]\n \n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n heirarchy = [[],[]]\n while (len(heirarchy[0]) != TARGET + 1):\n blur = cv2.GaussianBlur(gray, (kernel,kernel), 0)\n thresh = cv2.threshold(blur,127,255,cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n cnts, heirarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n \n if (len(heirarchy[0]) < TARGET + 1):\n kernels = [x for x in range(kernels[0], kernel) if x%2 !=0]\n kernel = kernels[round(len(kernels)/2)]\n else:\n kernels = [x for x in range(kernel, kernels[-1])]\n kernel = kernels[round(len(kernels)/2)]\n \n \n count = 0\n for i in range(len(cnts)):\n if (heirarchy[0][i][3] != -1):\n x,y,w,h = cv2.boundingRect(cnts[i])\n cropped = gray[y:y+h, x:x+w]\n thresh = cv2.threshold(cropped, 127,255,cv2.THRESH_BINARY_INV)[1]\n mask = np.zeros((cropped.shape[0], cropped.shape[1]))\n x1 = cropped.shape[0]\n x2 = round(x1 * percentage)\n y1 = cropped.shape[1]\n y2 = round(y1 * percentage)\n mask[x2:x1-x2, y2:y1-y2] = 1\n masked_image = thresh * mask\n \n masked_image = cv2.resize(masked_image, (28,28))\n try:\n os.remove(pwd + '/cell_images/cell' + str(count) + '.jpg')\n except:\n pass\n cv2.imwrite(pwd+'/cell_images/cell' + str(count) + '.jpg',masked_image)\n count +=1\n \n cells_to_csv(masked_image, pwd, character)",
"def extractWithMask(self,source,i,grow=0,bg=0,margin=None,dtype=None):\n if dtype is None: dtype = source.dtype\n if self.isEmpty(i): return None\n if margin is None: margin=grow\n box,labels = self.groups[i]\n image = sl.cut(source,box,margin=margin,bg=bg,dtype=dtype)\n mask = sl.cut(self.segmentation,box,margin=grow,bg=0)\n mask = in1d(mask,array(labels,'i')).reshape(image.shape)\n mask = morphology.binary_dilation(mask,iterations=grow)\n return where(mask,image,bg),mask"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
given a mask indicating positions of previous overlays, find a new position to overlay current fg | def randomly_choose_overlay_location(fg, bg_mask, step=50):
# get height, width of img mask
rows, cols = bg_mask.shape
# get height, with of current foreground to overlay
h, w, _ = fg.shape
# adjust max row, col by subtracting foreground dimensions
rows = rows - h
cols = cols - w
# get list of possible starting coordinates
possible_starting_points = list(product([i for i in range(0, rows, step)], [i for i in range(0, cols, step)]))
starting_indices = [i for i in range(len(possible_starting_points))]
# until a good region is found, randomly sample from possible overlay regions
# and check to see if any previous overlays intersect with that position
while len(starting_indices):
start = np.random.choice(starting_indices, 1)[0]
start = starting_indices.pop(start)
row, col = possible_starting_points[start]
slice = bg_mask[row:row+h, col:col+w]
if slice.sum() == 0:
return row, col
return None | [
"def overlay(start_coords, padded_fg_img, padded_fg_mask, fg_anno_mask, bg_img, bg_mask):\n row, col = start_coords\n h, w = padded_fg_mask.shape\n\n # create new mask of same dims as bg and place padded_fg_mask there at proper location\n tmp_bg_mask = np.zeros(shape= bg_mask.shape, dtype=np.uint8)\n tmp_bg_mask[row:row+h, col:col+w] = padded_fg_mask\n tmp_bg_mask_inv = cv2.bitwise_not(tmp_bg_mask)\n\n # create new img of same dims as bg, place padded_fg_img there\n tmp_fg_img = np.zeros(bg_img.shape, dtype=np.uint8)\n tmp_fg_img[row:row+h, col:col+w] = padded_fg_img\n\n # use mask to combine bg_img, tmp_fg_img\n bg_img = cv2.bitwise_and(bg_img, bg_img, mask=tmp_bg_mask_inv)\n tmp_fg_img = cv2.bitwise_and(tmp_fg_img, tmp_fg_img, mask=tmp_bg_mask)\n bg_img = cv2.add(bg_img, tmp_fg_img)\n\n # update bg_mask with annos\n bg_mask[row:row+h, col:col+w] += fg_anno_mask\n\n return bg_img, bg_mask",
"def extract_and_pad_mask(fg_img, fg_mask, bg_mask, padding_ratio, transform=True):\n # threshold to make binary\n # if transform:\n # tmp_fg_mask = np.zeros(fg_img.shape, dtype=np.uint8)\n # fg_img, fg_mask = transforms(fg_img, fg_mask, bg_mask)\n # fg_mask = fg_mask.draw_on_image(tmp_fg_mask)[0]\n # print(fg_img.shape, fg_img.dtype, fg_mask.shape, fg_mask.dtype)\n\n _, threshold = cv2.threshold(fg_mask, 110, 255,\n cv2.THRESH_BINARY)\n # find contours\n contours, _ = cv2.findContours(fg_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n\n results = []\n for cnt in contours:\n # convert contour to polygon\n poly = cv2.approxPolyDP(cnt, 0.009 * cv2.arcLength(cnt, True), True)\n # create new mask with only current polygon\n this_poly_mask = np.zeros(fg_img.shape[:2], dtype=np.uint8)\n cv2.fillPoly(this_poly_mask, [poly], (255))\n # enlargen polygon for padding\n enlargened_poly = np.squeeze(enlargen_polygon(poly, padding_ratio), axis=1)\n # get image of original polygon + added padding\n padded_poly_img, padded_mask = \\\n get_padded_polygon_image(enlargened_poly, fg_img, this_poly_mask)\n # get indices to crop from original fg_img into smallest region possible\n min_y, max_y, min_x, max_x = get_crop_indices_from_mask(padded_mask)\n padded_poly_img = padded_poly_img[min_y:max_y,min_x:max_x,:]\n padded_mask = padded_mask[min_y:max_y,min_x:max_x]\n this_poly_mask = this_poly_mask[min_y:max_y, min_x:max_x]\n results.append({\"padded_img\":padded_poly_img,\n \"padded_mask\": padded_mask, \"annotations_mask\": this_poly_mask})\n\n return results",
"def overlay_images(back, fore, x, y):\n fore = cv2.cvtColor(fore, cv2.COLOR_BGR2BGRA)\n rows, cols, channels = fore.shape \n trans_indices = fore[...,3] != 0 # Where not transparent\n overlay_copy = back[y:y+rows, x:x+cols] \n overlay_copy[trans_indices] = fore[trans_indices]\n back[y:y+rows, x:x+cols] = overlay_copy",
"def _find_middle_coordinates_pip(bg: tuple, fg: tuple):\n bg_width, bg_height = bg\n fg_width, fg_height = fg\n\n bg_middle_x = bg_width // 2\n bg_middle_y = bg_height // 2\n\n fg_middle_x = fg_width // 2\n fg_middle_y = fg_height // 2\n\n return bg_middle_x - fg_middle_x, bg_middle_y - fg_middle_y",
"def image_mask_overlay(img, mask):\n for i in range(mask.shape[0]):\n for j in range(mask.shape[1]):\n if mask[i, j] == 1:\n img[i, j, :] = [255, 0, 255]\n\n return img",
"def make_cloud_mask( inv, rs_mean,processing_defaults, rs_constants):\n \n np.set_printoptions(threshold=np.NaN)\n cloud_threshhold = processing_defaults.get_value('cloud_mask','backscat_threshhold')\n full_mask = processing_defaults.get_value('cloud_mask','mask_entire_profile')\n buffer_bins = np.int(processing_defaults.get_value('cloud_mask','cloud_buffer_zone') \n /(rs_mean.msl_altitudes[2]-rs_mean.msl_altitudes[1]))\n [ntimes, nalts] = inv.beta_a_backscat_par.shape\n temp = inv.beta_a_backscat_par.copy()\n temp = temp +inv.beta_a_backscat_perp\n temp[:, 0] = 0.0\n temp[temp > cloud_threshhold] = np.NaN\n \n\n \n # does not allow for shift from ground-based (i.e. no GPS)\n # to airborne within one record\n if ('installation' not in rs_constants\n or rs_constants['installation'] == 'ground' \n or rs_constants['installation'] == 'shipborne'): # lidar is on the ground looking up\n start_alt = rs_constants['lidar_altitude'] + 300\n start_alt = np.max([start_alt, rs_mean.msl_altitudes[0]])\n temp[:, rs_mean.msl_altitudes < start_alt] = 0.0\n mask = np.isfinite(np.cumsum(temp, 1)).astype('uint16')\n #apply a pre-trigger buffer on nbuf data points to mask\n mask[:,:(nalts-buffer_bins)] = np.bitwise_and(mask[:,:(nalts-buffer_bins)]\n , mask[:,buffer_bins:])\n if full_mask == 1:\n max_cloud_alt = np.float(processing_defaults.get_value('cloud_mask','max_cloud_alt')) \n index = len(rs_mean.msl_altitudes[rs_mean.msl_altitudes < max_cloud_alt*1000.0])\n for i in range(ntimes):\n if any(mask[i,:index] == False):\n mask[i,:] = False\n \n else:\n # lidar is airborne\n indices = np.arange(nalts)\n mask = np.zeros_like(temp).astype('uint16')\n for i in range(ntimes):\n if rs_mean.telescope_pointing[i] > 0.9: # telescope pointing up\n ix = indices[rs_mean.msl_altitudes <= rs_mean.GPS_MSL_Alt[i] + 250]\n if len(ix) > 0:\n start_index = np.max(ix)\n mask[i, start_index:] = \\\n np.isfinite(np.cumsum(temp[i, start_index:]))\\\n .astype('uint16')\n #apply a pre-trigger buffer on nbuf data points to mask\n mask[i,:(nalts-buffer_bins)] = np.bitwise_and(mask[i,:(nalts-buffer_bins)]\n , mask[i,buffer_bins:]) \n elif rs_mean.telescope_pointing[i] < 0.1:\n # telescope is pointing down\n ix = indices[rs_mean.msl_altitudes <= rs_mean.GPS_MSL_Alt[i] - 250]\n if len(ix) > 0:\n start_index = np.max(ix)\n mask[i,start_index:0:-1] = \\\n np.isfinite(np.cumsum(temp[i, start_index:0: -1]))\\\n .astype('uint16')\n #apply a pre-trigger buffer on nbuf data points to mask\n if buffer_bins:\n print 'buffer bins not implemented for nadir viewing'\n \n \n #mask[i,buffer_bins:nalts] = \\\n # np.bitwise_and(mask[i,:(nalts-buffer_bins)]\\\n # , mask[i,buffer_bins:nalts])\n \n \n #mask for bits 0 and 7 \n mask = ~(129*(mask==0)).astype('uint16')\n inv.qc_mask &= mask\n \n \n #rs_mean.qc_mask =inv.qc_mask.copy()\n\n return",
"def shift_mask(action, xmin, xmax, ymin, ymax, original_shape, bb_shape):\n offset_size = 50\n img_height, img_width = original_shape\n bb_height, bb_width = bb_shape\n\n if action == 1: # UP\n ymin = ymin - offset_size\n ymax = ymax - offset_size\n elif action == 2: # UP-RIGHT\n ymin = ymin - offset_size\n ymax = ymax - offset_size\n xmin = xmin + offset_size\n xmax = xmax + offset_size\n elif action == 3: # RIGHT\n xmin = xmin + offset_size\n xmax = xmax + offset_size\n elif action == 4: # DOWN-RIGHT\n ymin = ymin + offset_size\n ymax = ymax + offset_size\n xmin = xmin + offset_size\n xmax = xmax + offset_size\n elif action == 5: # DOWN\n ymin = ymin + offset_size\n ymax = ymax + offset_size\n elif action == 6: # DOWN-LEFT\n ymin = ymin + offset_size\n ymax = ymax + offset_size\n xmin = xmin - offset_size\n xmax = xmax - offset_size\n elif action == 7: # LEFT\n xmin = xmin - offset_size\n xmax = xmax - offset_size\n elif action == 8: # UP-LEFT\n ymin = ymin - offset_size\n ymax = ymax - offset_size\n xmin = xmin - offset_size\n xmax = xmax - offset_size\n\n # Correction for action that forces bboxes out of frame\n if ymax >= img_height:\n ymax = img_height\n ymin = img_height - bb_height\n if ymin <= 0:\n ymin = 0\n ymax = bb_height\n if xmax >= img_width:\n xmax = img_width\n xmin = img_width - bb_width\n if xmin <= 0:\n xmin = 0\n xmax = bb_width\n\n mask = np.zeros(original_shape)\n mask[ymin:ymax,xmin:xmax] = 1\n return mask, xmin, xmax, ymin, ymax",
"def dilationUnknownFgBgNeighbor(unknown_mask, kernal_size, fg_mask, bg_mask):\n kernel = np.ones((kernal_size,kernal_size),np.uint8)\n dilation_alpha = cv2.dilate(unknown_mask, kernel, iterations = 1)\n \n dila_fg_mask = np.logical_and(fg_mask, dilation_alpha)\n dila_bg_mask = np.logical_and(bg_mask, dilation_alpha)\n \n return dila_fg_mask, dila_bg_mask",
"def makeEdgemask(self,im_mask): #WZ\n width = 15 # 20 25 10\n sn0= 7.5 # 10\n npt=0L\n # im_mask=self.parlists[0][i]['in_mask']\n # maskfits = pyfits.open(im_mask,mode='update')\n maskfits = pyfits.open(im_mask)\n im_sci=string.replace(im_mask,'inmask','SCI_')\n scifits = pyfits.open(im_sci)\n data = scifits[0].data.copy()\n naxis1 = scifits[0].header.get('NAXIS1')\n naxis2 = scifits[0].header.get('NAXIS2')\n sky = scifits[0].header.get('ALIGNSKY')\n mask = maskfits[0].data.copy()\n for j in range(0,width-1): # y\n for k in range(0,naxis1-1): # x\n if (numpy.abs(data[j,k]/sky) > sn0 and mask[j,k]==1):\n # print j,k\n mask[j,k]=0\n npt = npt + 1\n #print mask[10,1000],' 10,1000'\n #print npt\n #npt=0\n for j in range(0,naxis2-1):\n for k in range(0,width-1):\n if (numpy.abs(data[j,k]/sky) > sn0 and mask[j,k]==1):\n # print j,k\n mask[j,k]=0\n npt = npt + 1\n #print npt\n #print mask[1000,10],' 100,10'\n #npt=0\n for j in range(naxis2-width-1,naxis2-1):\n for k in range(0,naxis1-1):\n if (numpy.abs(data[j,k]/sky) > sn0 and mask[j,k]==1):\n # print j,k\n mask[j,k]=0\n npt = npt + 1\n #print mask[2040,1000], ' 2040,1000'\n #print npt\n #npt=0\n for j in range(0,naxis2-1):\n for k in range(naxis1-width-1,naxis1-1):\n if (numpy.abs(data[j,k]/sky) > sn0 and mask[j,k]==1):\n # print j,k\n mask[j,k]=0\n npt = npt + 1\n #print mask[1000,4090] ,' 1000,4090'\n #print npt\n maskfits[0].data = mask.copy()\n self.logfile.write(str(npt) + \" pixels masked near the edges in image: \" + im_mask)\n newfits = pyfits.HDUList()\n newfits.append(pyfits.PrimaryHDU())\n newfits[0].header = maskfits[0].header\n newfits[0].data = mask.copy()\n # pdb.set_trace()\n scifits.close() \n if os.path.isfile(im_mask):\n os.remove(im_mask)\n newfits.writeto(im_mask)\n # maskfits.flush()\n del npt,scifits,maskfits,newfits\n return",
"def nicepos(mask,validr=[250,600],step=[25,25]):\n #check the pilmask,it should be the set all the valid pixel value as 1\n #and blind pixel value as zero.\n #In principle, the beam position should be in the detector for the saxs.\n #herein, we use (x,y) stands for the beam position.\n #the (0,0) point is in the top-left-most corner\n #i,j=0,0\n #write good positions into an ascii file\n fidw=open('mirror1m.dat','w')\n fidw.write('#step,[25,25]\\n')\n pilmask=deepcopy(mask)\n #determine the size of pilmask\n pily,pilx=mask['map'].shape\n #get the position in the loop\n for j in np.arange(validr[0],validr[1],step[0]):\n for i in np.arange(validr[0],validr[1],step[1]):\n #determine the size of array constructed\n print 'position of beam(x,y) ',j,' ',i\n tmpmask=np.zeros((pily*2,pilx*2),dtype=np.int)\n #print tmpmask.shape\n #put pilmask into the center of tmpmask\n tmpmask[pily-i:pily-i+pily,pilx-j:pilx-j+pilx]=mask['map']\n pilmask['map']=tmpmask\n res=flipharmony(pilmask)\n #set the filename of res\n res['filename']='BeamPos1M'+'_'+str(j)+'_'+str(i)\n #show the image\n sf_show(res,win=1,auto=1)\n yn=raw_input('Input y or n to save pos: ')\n if yn == 'y':\n mmax=pilmask['map'].max()\n msum=np.sum(pilmask['map'])\n print 'Mask: max, ',mmax,' sum, ',msum\n fidw.write(str(j)+' '+str(i)+'\\n')\n sf_show(res,win=1,auto=1,svg=1)\n pklwrite(res,res['filename'])\n fidw.close()",
"def average_maskdef_offset(calib_slits, platescale, list_detectors):\n\n calib_slits = np.array(calib_slits)\n if list_detectors is None:\n msgs.warn('No average slitmask offset computed')\n return calib_slits\n\n # unpack list_detectors\n blue_and_red = list_detectors.ndim > 1\n spectrograph_dets = list_detectors if blue_and_red else np.expand_dims(list_detectors, 0)\n\n # determine if a slitmask offset exist and use the average offset over all the detectors\n # grab slitmask offsets from slits calibrations\n slitmask_offsets = np.array([ss.maskdef_offset for ss in calib_slits])\n # grab corresponding detectors\n calib_dets = np.array([ss.detname for ss in calib_slits])\n\n # remove eventual None and zeros (zero is assigned when no offset could be measured.)\n calib_dets = calib_dets[(slitmask_offsets != None) & (slitmask_offsets != 0)]\n slitmask_offsets = slitmask_offsets[(slitmask_offsets != None) & (slitmask_offsets != 0)].astype('float')\n\n if slitmask_offsets.size == 0:\n # If all detectors have maskdef_offset=0 give a warning\n msgs.warn('No slitmask offset could be measured. Assumed to be zero. ')\n msgs.warn('RA, DEC, OBJNAME assignment and forced extraction of undetected objects MAY BE WRONG! '\n 'Especially for dithered observations!')\n msgs.warn('To provide a value set `slitmask_offset` in `SlitMaskPar`')\n\n return calib_slits\n\n # are there dets from calib_slits that are blue?\n indx_b = np.where(np.in1d(calib_dets, spectrograph_dets[0]))[0]\n # if this spectrograph is not split into blue and red detectors\n # or if it is but there are no available offsets in the blue\n if not blue_and_red or indx_b.size == 0:\n # use all the available offsets to compute the median\n _, median_off, _ = sigma_clipped_stats(slitmask_offsets, sigma=2.)\n for cs in calib_slits:\n # assign median to each det\n cs.maskdef_offset = median_off\n msgs.info('Average Slitmask offset: {:.2f} pixels ({:.2f} arcsec).'.format(median_off, median_off * platescale))\n\n return calib_slits\n\n if indx_b.size > 0:\n # compute median if these blue dets have values of slitmask_offsets\n _, median_off, _ = sigma_clipped_stats(slitmask_offsets[indx_b], sigma=2.)\n for cs in calib_slits:\n if cs.detname in spectrograph_dets[0]:\n # assign median to each blue det\n cs.maskdef_offset = median_off\n msgs.info('Average Slitmask offset for the blue detectors: '\n '{:.2f} pixels ({:.2f} arcsec).'.format(median_off, median_off * platescale))\n\n # which dets from calib_slits are red?\n indx_r = np.where(np.in1d(calib_dets, spectrograph_dets[1]))[0]\n if indx_r.size > 0:\n # compute median if these red dets have values of slitmask_offsets\n _, median_off, _ = sigma_clipped_stats(slitmask_offsets[indx_r], sigma=2.)\n\n # assign median to each red det (median would be the one computed for red dets if exists\n # or the median computed for blue dets)\n for cs in calib_slits:\n if cs.detname in spectrograph_dets[1]:\n cs.maskdef_offset = median_off\n msgs.info('Average Slitmask offset for the red detectors: '\n '{:.2f} pixels ({:.2f} arcsec).'.format(median_off, median_off * platescale))\n\n return calib_slits",
"def create_transparent_overlay(foreground, background, pos=(0,0)):\n h, w, _ = foreground.shape\n rows, cols, _ = background.shape\n y, x = pos[0], pos[1]\n \n for i in range(h):\n for j in range(w):\n if x + i >= rows or y + j >= cols:\n continue\n alpha = float(foreground[i][j][3] / 255.0)\n background[x + i][y + j] = alpha * foreground[i][j][:3] + (1 - alpha) * background[x + i][y + j]\n return background",
"def flag_loc(image, skier_y):\n # find the nearest flags below the top of the skier's head\n flag_slice = image[int(skier_y):FOG_Y, SCREEN_X[0]:SCREEN_X[1]]\n\n flag_pixels = np.argwhere(flag_slice == COLORS['flag'])\n\n if flag_pixels.size == 0:\n return end_flag_loc(image, skier_y)\n\n # Flag pixels are (y, x) pairs. That's hard to think about. Flip\n # the matrix horizontally so that they're (x, y) pairs\n flag_pixels = np.fliplr(flag_pixels)\n x, y = 0, 1\n\n # what is the left most pixel?\n left_tip_x, left_tip_y = flag_pixels[flag_pixels[:, x].argmin()]\n\n # is this the nearest flag?\n nearest_y = flag_pixels[:, y].min()\n\n # this assumes flags are separated vertically by at least a flag's worth\n # of space. That's a safe assumption for this game\n if left_tip_y - FLAG_HEIGHT > nearest_y:\n # throw out any pixels from the further flags\n flag_pixels = flag_pixels[flag_pixels[:, y] < left_tip_y - FLAG_HEIGHT]\n\n # re-get left most pixel\n left_tip_x, left_tip_y = flag_pixels[flag_pixels[:, x].argmin()]\n\n # now we can get flag_y, flag_left, and flag_right\n flag_y = left_tip_y + skier_y\n flag_left = left_tip_x + TIP_TO_POLE + SCREEN_X[0]\n flag_mid = flag_left + (POLE_TO_POLE / 2.0) - 3\n\n return np.array([flag_mid, flag_y], dtype=np.int)",
"def makeForegroundExtractionMask(self, img, mask, hull):\n\n # no processing?\n # ATTN: in future we might want to so some minimal img cropping\n if (self.get_useFullDieImage()):\n # just return img and mask\n return (img, mask)\n\n\n mask = dicerfuncs.copyCvImage(mask)\n centroid = self.computeFaceCenter(hull)\n\n (height, width) = mask.shape[:2]\n maxside = max(height, width)\n\n # starting and mask\n #imgAnd = dicerfuncs.makeBinaryImageMaskForImg(mask)\n\n # the mask we make may be dependent on self.shape\n if (self.shape is None) or (self.shape == \"circle\"):\n # circular shape\n radiusAll = min(centroid[0], centroid[1])\n # ATTN: 2/24/16 this possibly should be a bit smaller circle like / 1.6, but tht can mess with some 2-digit extractions\n #radius = int(radiusAll / 1.5)\n # ATTN: 2/25/16 1.5 worked on our old die, 1.4 needed on new one\n radius = int(radiusAll / 1.4)\n\n # mask it\n (img, mask) = self.applyForegroundExtractionMask_Circle(img,mask,centroid,radius)\n #color = 255\n #cv2.circle(imgAnd, centroid, radius, color, thickness=-1)\n #mask = cv2.bitwise_and(imgAnd, mask)\n\n # other parameters we can be queried\n # was 16 as of 2/5/16 but this was rejected periods near 9s\n # self.maxDistanceContourAdd = maxside / 1.0\n\n\n # 2/24/16:\n #self.maxDistanceContourAdd = maxside / 12\n self.maxDistanceContourAdd = maxside / 12\n # 2/25/16 had to change this from 5 to 4 for new die\n self.maxDistanceContourAddFar = maxside / 5\n\n # was 52 as of 2/24/16\n #self.maxDistanceFaceCentroidAdd = maxside / 52\n # ATTN: 2/25/16 -- needed for new die\n #self.maxDistanceFaceCentroidAdd = maxside / 12\n self.maxDistanceFaceCentroidAdd = maxside / 18\n\n\n elif (self.shape == \"square\"):\n # simplify hull to square\n hull = dicerfuncs.reduceHullPoints(hull, 4)\n\n # the entire thing\n rotatedRect = cv2.minAreaRect(hull)\n #\n #marginAdjust = 0.8\n marginAdjust = 0.9\n\n # mask it\n (img, mask) = self.applyForegroundExtractionMask_Square(img, mask, centroid, rotatedRect, marginAdjust)\n #rotatedRect2 = (rotatedRect[0], (rotatedRect[1][0] * marginAdjust, rotatedRect[1][1] * marginAdjust), rotatedRect[2])\n #color = 255\n #boxpoints = cv2.boxPoints(rotatedRect2)\n #boxpoints = boxpoints.astype(int)\n #cv2.fillConvexPoly(imgAnd, boxpoints, color)\n #mask = cv2.bitwise_and(imgAnd, mask)\n\n # other parameters\n self.maxDistanceContourAdd = maxside / 2.0\n self.maxDistanceContourAddFar = maxside / 2.0\n self.maxDistanceFaceCentroidAdd = maxside / 2\n\n\n\n # the mask we make may be dependent on self.shape\n elif (self.shape == \"d10\"):\n # circular shape\n radiusAll = min(centroid[0], centroid[1])\n radius = int(radiusAll / 1)\n\n # mask it\n (img, mask) = self.applyForegroundExtractionMask_Circle(img,mask,centroid,radius)\n #color = 255\n #cv2.circle(imgAnd, centroid, radius, color, thickness=-1)\n #mask = cv2.bitwise_and(imgAnd, mask)\n\n # other parameters we can be queried\n self.maxDistanceContourAdd = maxside / 40.0\n self.maxDistanceFaceCentroidAdd = maxside / 8.0\n\n elif (self.shape == \"tri\"):\n # circular shape\n radiusAll = min(centroid[0], centroid[1])\n radius = int(radiusAll / 1.1)\n\n # mask it\n (img, mask) = self.applyForegroundExtractionMask_Circle(img,mask,centroid,radius)\n\n # other parameters we can be queried\n self.maxDistanceContourAdd = maxside / 12.0\n self.maxDistanceFaceCentroidAdd = maxside / 8.0\n\n\n else:\n print \"UNKNOWN DIE SHAPE PASSED: \" + self.shape\n\n # see http://docs.opencv.org/2.4/modules/core/doc/drawing_functions.html\n return (img, mask)",
"def score_foreground_displacement(self, fg_masks):\n bg_masks = [np.bitwise_xor(True, fg_mask) for fg_mask in fg_masks]\n union_mask = bg_masks[0].copy()\n for bg_mask in bg_masks[1:]:\n union_mask += bg_mask\n union_percentage = union_mask.sum() / union_mask.size\n\n return union_percentage",
"def end_flag_loc(image, skier_y):\n # find the nearest flags below the top of the skier's head\n flag_slice = image[int(skier_y):FOG_Y, SCREEN_X[0]:SCREEN_X[1]]\n\n flag_pixels = np.argwhere(flag_slice == COLORS['end_flag'])\n\n # Flag pixels are (y, x) pairs. That's hard to think about. Flip\n # the matrix horizontally so that they're (x, y) pairs\n flag_pixels = np.fliplr(flag_pixels)\n x = 0\n\n # what is the left most pixel?\n left_tip_x, left_tip_y = flag_pixels[flag_pixels[:, x].argmin()]\n\n # now we can get flag_y, flag_left, and flag_right\n flag_y = left_tip_y + skier_y\n flag_left = left_tip_x + TIP_TO_POLE + SCREEN_X[0]\n flag_mid = flag_left + (POLE_TO_POLE / 2.0) - 6\n\n return np.array([flag_mid, flag_y], dtype=np.int)",
"def add_refine_local_mask_blobs(blobs, sampled_boxes, roidb, im_scale, batch_idx, data):\n # Prepare the mask targets by associating one gt mask to each training roi\n # that has a fg (non-bg) class label.\n M = cfg.REFINENET.RESOLUTION\n up_scale = cfg.REFINENET.UP_SCALE\n polys_gt_inds = np.where(\n (roidb['gt_classes'] > 0) & (roidb['is_crowd'] == 0)\n )[0]\n gt_classes = roidb['gt_classes'][polys_gt_inds]\n polys_gt = [roidb['segms'][i] for i in polys_gt_inds]\n boxes_from_polys = segm_utils.polys_to_boxes(polys_gt)\n fg_inds = np.where(blobs['labels_int32'] > 0)[0]\n roi_has_mask = blobs['labels_int32'].copy()\n roi_has_mask[roi_has_mask > 0] = 1\n\n # Define size variables\n inp_h, inp_w = data.shape[2], data.shape[3]\n pad_img_h, pad_img_w = inp_h / im_scale, inp_w / im_scale\n\n if fg_inds.shape[0] > 0:\n # Class labels for the foreground rois\n mask_class_labels = blobs['labels_int32'][fg_inds]\n masks = blob_utils.zeros((fg_inds.shape[0], M**2), int32=True)\n\n # Find overlap between all foreground rois and the bounding boxes\n # enclosing each segmentation\n rois_fg = sampled_boxes[fg_inds]\n overlaps_bbfg_bbpolys = box_utils.bbox_overlaps(\n rois_fg.astype(np.float32, copy=False),\n boxes_from_polys.astype(np.float32, copy=False)\n )\n # Map from each fg rois to the index of the mask with highest overlap\n # (measured by bbox overlap)\n fg_polys_inds = np.argmax(overlaps_bbfg_bbpolys, axis=1)\n\n # Expand the foreground rois by a factor of up_scale and\n # clip by the padded image boundary\n pad_rois_fg = box_utils.expand_boxes(rois_fg, up_scale)\n pad_rois_fg = box_utils.clip_boxes_to_image(pad_rois_fg, pad_img_h, pad_img_w)\n\n if cfg.REFINENET.ONLY_USE_CROWDED_SAMPLES:\n # Only use crowded samples to train the RefineNet\n THRES = cfg.REFINENET.OVERLAP_THRESHOLD\n for i in range(rois_fg.shape[0]):\n overlap = overlaps_bbfg_bbpolys[i]\n if np.sum(overlap > THRES) > 1:\n # if has multiple instances overlapped, use it for training\n fg_polys_ind = fg_polys_inds[i]\n poly_gt = polys_gt[fg_polys_ind]\n pad_roi_fg = pad_rois_fg[i]\n # Rasterize the portion of the polygon mask within the given fg roi\n # to an M x M binary image\n mask = segm_utils.polys_to_mask_wrt_box(poly_gt, pad_roi_fg, M)\n mask = np.array(mask > 0, dtype=np.int32) # Ensure it's binary\n masks[i, :] = np.reshape(mask, M**2)\n\n else: # Only one instance, then set label to be -1 (ignored)\n masks[i, :] = -1\n mask_class_labels[i] = 0\n elif cfg.REFINENET.ASSIGN_LARGER_WEIGHT_FOR_CROWDED_SAMPLES:\n loss_weights = blob_utils.ones((rois_fg.shape[0], ))\n for i in range(rois_fg.shape[0]):\n fg_polys_ind = fg_polys_inds[i]\n poly_gt = polys_gt[fg_polys_ind]\n pad_roi_fg = pad_rois_fg[i]\n class_label = mask_class_labels[i]\n\n # Rasterize the portion of the polygon mask within the given\n # fg roi to an M x M binary image\n mask = segm_utils.polys_to_mask_wrt_box(poly_gt, pad_roi_fg, M)\n mask = np.array(mask > 0, dtype=np.int32) # Ensure it's binary\n masks[i, :] = np.reshape(mask, M**2)\n\n # And now determine the weight for each roi. If any instance\n # that is of the same class as the RoI, then we expect it to\n # be a hard sample and assigns a larger weight for this RoI\n for j in range(len(polys_gt)):\n if j == fg_polys_ind:\n continue\n if gt_classes[j] == class_label: # only same class is valid\n mask = segm_utils.polys_to_mask_wrt_box(\n polys_gt[j], pad_roi_fg, M\n )\n # and check if has anypart fall inside the bbox\n is_inside_bbox = (np.sum(mask) > 0)\n if is_inside_bbox:\n loss_weights[i] = cfg.REFINENET.WEIGHT_LOSS_CROWDED\n break # early stop\n\n else:\n # add fg targets\n for i in range(rois_fg.shape[0]):\n fg_polys_ind = fg_polys_inds[i]\n poly_gt = polys_gt[fg_polys_ind]\n pad_roi_fg = pad_rois_fg[i]\n # Rasterize the portion of the polygon mask within the given fg roi\n # to an M x M binary image\n mask = segm_utils.polys_to_mask_wrt_box(poly_gt, pad_roi_fg, M)\n mask = np.array(mask > 0, dtype=np.int32) # Ensure it's binary\n masks[i, :] = np.reshape(mask, M**2)\n\n else: # If there are no fg masks (it does happen)\n # The network cannot handle empty blobs, so we must provide a mask\n # We simply take the first bg roi, given it an all -1's mask (ignore\n # label), and label it with class zero (bg).\n bg_inds = np.where(blobs['labels_int32'] == 0)[0]\n # pad_rois_fg is actually one background roi, but that's ok because ...\n pad_rois_fg = sampled_boxes[bg_inds[0]].reshape((1, -1))\n # We give it an -1's blob (ignore label)\n masks = -blob_utils.ones((1, M**2), int32=True)\n # We label it with class = 0 (background)\n mask_class_labels = blob_utils.zeros((1, ))\n # Mark that the first roi has a mask\n roi_has_mask[0] = 1\n\n if cfg.MRCNN.CLS_SPECIFIC_MASK:\n masks = _expand_to_class_specific_mask_targets(masks, mask_class_labels)\n\n # Scale rois_fg and format as (batch_idx, x1, y1, x2, y2)\n pad_rois_fg = (pad_rois_fg.astype(np.float32))*im_scale\n repeated_batch_idx = batch_idx * blob_utils.ones((pad_rois_fg.shape[0], 1))\n pad_rois_fg = np.hstack((repeated_batch_idx, pad_rois_fg)).astype(np.int32)\n\n # Update blobs dict with Refine-Net blobs\n blobs['refined_mask_rois'] = pad_rois_fg\n blobs['roi_has_refined_mask_int32'] = roi_has_mask\n blobs['refined_masks_int32'] = masks\n\n if cfg.REFINENET.ASSIGN_LARGER_WEIGHT_FOR_CROWDED_SAMPLES:\n blobs['loss_weights'] = loss_weights",
"def tune_mask_expand(self, show_masks=True):\n\t\tmask_pixels = list(zip(*np.where(self.mask > 0)))\n\t\tif show_masks:\n\t\t\timgplot = plt.imshow(self.original_image)\n\t\t\tplt.title(\"Image\")\n\t\t\tplt.show()\n\t\t\timgplot = plt.imshow(self.mask)\n\t\t\tplt.title(\"Before Enhancement\")\n\t\t\tplt.show()\n\t\t\n\t\t#find segments with parts in mask\n\t\tpresent_segments = set()\n\t\tfor r,c in mask_pixels:\n\t\t\tif self.segments_slic[r][c] not in present_segments:\n\t\t\t\tpresent_segments.add(self.segments_slic[r][c])\n\n\t\t#fill in the mask with those segments\n\t\tfor r in range(len(self.segments_slic)):\n\t\t\tfor c in range(len(self.segments_slic[0])):\n\t\t\t\tif self.segments_slic[r][c] in present_segments:\n\t\t\t\t\tself.mask[r][c] = 100\n\t\t\t\t\t\n\t\tif show_masks:\n\t\t\timgplot = plt.imshow(self.mask)\n\t\t\tplt.title(\"After Enhancement\")\n\t\t\tplt.show()\n\t\treturn self.mask, mask_pixels",
"def _get_local_gain_mask(\n img, mask, min_midpoint, min_slope, max_midpoint, max_slope\n ):\n\n # Piecewise logistic function\n def piecewise_logistic(x):\n # Negative side logistic function, user defined midpoint and slope\n def logistic_negative(x):\n x = (x - min_midpoint) * min_slope\n return 1 / (1 + np.exp(-x))\n\n # Positive side logistic function, user defined midpoint and slope\n def logistic_positive(x):\n x = (x - max_midpoint) * max_slope\n return 1 - (1 / (1 + np.exp(-x)))\n\n return np.piecewise(\n x, [x < 0, x >= 0], [logistic_negative, logistic_positive]\n )\n\n # Apply the piecewise logistic transformation\n img_transformed = piecewise_logistic(img)\n if 0:\n # Visualize the function between -1 and 3 with 1000 points\n x = np.linspace(-1, 3, 1000)\n y = piecewise_logistic(x)\n plt.plot(x, y)\n # Put vertical lines to min_ret_value and max_ret_value\n plt.axvline(x=min_ret_value, color=\"r\", linestyle=\"--\")\n plt.axvline(x=max_ret_value, color=\"r\", linestyle=\"--\")\n plt.show()\n exit()\n\n # Apply mask. Necessary to avoid the rectangular grid to start evolving into RFs\n img_transformed_masked = img_transformed * mask\n\n return img_transformed_masked"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
use padded foreground mask to overlay padded fg onto bg update the bg_mask with annotations mask of fg that is currently overlayed | def overlay(start_coords, padded_fg_img, padded_fg_mask, fg_anno_mask, bg_img, bg_mask):
row, col = start_coords
h, w = padded_fg_mask.shape
# create new mask of same dims as bg and place padded_fg_mask there at proper location
tmp_bg_mask = np.zeros(shape= bg_mask.shape, dtype=np.uint8)
tmp_bg_mask[row:row+h, col:col+w] = padded_fg_mask
tmp_bg_mask_inv = cv2.bitwise_not(tmp_bg_mask)
# create new img of same dims as bg, place padded_fg_img there
tmp_fg_img = np.zeros(bg_img.shape, dtype=np.uint8)
tmp_fg_img[row:row+h, col:col+w] = padded_fg_img
# use mask to combine bg_img, tmp_fg_img
bg_img = cv2.bitwise_and(bg_img, bg_img, mask=tmp_bg_mask_inv)
tmp_fg_img = cv2.bitwise_and(tmp_fg_img, tmp_fg_img, mask=tmp_bg_mask)
bg_img = cv2.add(bg_img, tmp_fg_img)
# update bg_mask with annos
bg_mask[row:row+h, col:col+w] += fg_anno_mask
return bg_img, bg_mask | [
"def extract_and_pad_mask(fg_img, fg_mask, bg_mask, padding_ratio, transform=True):\n # threshold to make binary\n # if transform:\n # tmp_fg_mask = np.zeros(fg_img.shape, dtype=np.uint8)\n # fg_img, fg_mask = transforms(fg_img, fg_mask, bg_mask)\n # fg_mask = fg_mask.draw_on_image(tmp_fg_mask)[0]\n # print(fg_img.shape, fg_img.dtype, fg_mask.shape, fg_mask.dtype)\n\n _, threshold = cv2.threshold(fg_mask, 110, 255,\n cv2.THRESH_BINARY)\n # find contours\n contours, _ = cv2.findContours(fg_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n\n results = []\n for cnt in contours:\n # convert contour to polygon\n poly = cv2.approxPolyDP(cnt, 0.009 * cv2.arcLength(cnt, True), True)\n # create new mask with only current polygon\n this_poly_mask = np.zeros(fg_img.shape[:2], dtype=np.uint8)\n cv2.fillPoly(this_poly_mask, [poly], (255))\n # enlargen polygon for padding\n enlargened_poly = np.squeeze(enlargen_polygon(poly, padding_ratio), axis=1)\n # get image of original polygon + added padding\n padded_poly_img, padded_mask = \\\n get_padded_polygon_image(enlargened_poly, fg_img, this_poly_mask)\n # get indices to crop from original fg_img into smallest region possible\n min_y, max_y, min_x, max_x = get_crop_indices_from_mask(padded_mask)\n padded_poly_img = padded_poly_img[min_y:max_y,min_x:max_x,:]\n padded_mask = padded_mask[min_y:max_y,min_x:max_x]\n this_poly_mask = this_poly_mask[min_y:max_y, min_x:max_x]\n results.append({\"padded_img\":padded_poly_img,\n \"padded_mask\": padded_mask, \"annotations_mask\": this_poly_mask})\n\n return results",
"def create_transparent_overlay(foreground, background, pos=(0,0)):\n h, w, _ = foreground.shape\n rows, cols, _ = background.shape\n y, x = pos[0], pos[1]\n \n for i in range(h):\n for j in range(w):\n if x + i >= rows or y + j >= cols:\n continue\n alpha = float(foreground[i][j][3] / 255.0)\n background[x + i][y + j] = alpha * foreground[i][j][:3] + (1 - alpha) * background[x + i][y + j]\n return background",
"def generate_foreground_masks(self):\n depth_params = copy(self.base_params)\n depth_params.update(\n {\n \"app\": \"GenerateForegroundMasks\",\n \"level\": 0,\n \"dst_level\": None,\n \"dst_image_type\": \"foreground_masks\",\n }\n )\n self.run_halted_queue(depth_params, self.frame_chunks)",
"def dilationUnknownFgBgNeighbor(unknown_mask, kernal_size, fg_mask, bg_mask):\n kernel = np.ones((kernal_size,kernal_size),np.uint8)\n dilation_alpha = cv2.dilate(unknown_mask, kernel, iterations = 1)\n \n dila_fg_mask = np.logical_and(fg_mask, dilation_alpha)\n dila_bg_mask = np.logical_and(bg_mask, dilation_alpha)\n \n return dila_fg_mask, dila_bg_mask",
"def randomly_choose_overlay_location(fg, bg_mask, step=50):\n # get height, width of img mask\n rows, cols = bg_mask.shape\n # get height, with of current foreground to overlay\n h, w, _ = fg.shape\n # adjust max row, col by subtracting foreground dimensions\n rows = rows - h\n cols = cols - w\n # get list of possible starting coordinates\n possible_starting_points = list(product([i for i in range(0, rows, step)], [i for i in range(0, cols, step)]))\n starting_indices = [i for i in range(len(possible_starting_points))]\n\n # until a good region is found, randomly sample from possible overlay regions\n # and check to see if any previous overlays intersect with that position\n while len(starting_indices):\n start = np.random.choice(starting_indices, 1)[0]\n start = starting_indices.pop(start)\n row, col = possible_starting_points[start]\n slice = bg_mask[row:row+h, col:col+w]\n if slice.sum() == 0:\n return row, col\n\n return None",
"def score_foreground_displacement(self, fg_masks):\n bg_masks = [np.bitwise_xor(True, fg_mask) for fg_mask in fg_masks]\n union_mask = bg_masks[0].copy()\n for bg_mask in bg_masks[1:]:\n union_mask += bg_mask\n union_percentage = union_mask.sum() / union_mask.size\n\n return union_percentage",
"def _compute_background(self):\n\n if not hasattr(self, 'background_img'):\n # need to scale the mask, as Collage class does NOT automatically rescale\n self.foreground_mask = mask_image(self.current_img, out_dtype=bool)\n temp_background_img = np.copy(self.current_img)\n temp_background_img[self.foreground_mask] = 0.0\n self.background_img = scale_0to1(temp_background_img,\n exclude_outliers_below=1,\n exclude_outliers_above=1)",
"def bg_mask(self, anns, width_height, *, crowd_margin):\n anns1, anns2 = anns\n\n mask = np.ones((\n (width_height[1] - 1) // self.stride + 1,\n (width_height[0] - 1) // self.stride + 1,\n ), dtype=np.bool)\n crowd_bbox = [np.inf, np.inf, 0, 0]\n for ann in anns1 + anns2:\n if not ann['iscrowd']:\n valid_keypoints = 'keypoints' in ann and np.any(ann['keypoints'][:, 2] > 0)\n if valid_keypoints:\n continue\n\n if 'mask' not in ann:\n bb = ann['bbox'].copy()\n bb /= self.stride\n bb[2:] += bb[:2] # convert width and height to x2 and y2\n\n # left top\n left = np.clip(int(bb[0] - crowd_margin), 0, mask.shape[1] - 1)\n top = np.clip(int(bb[1] - crowd_margin), 0, mask.shape[0] - 1)\n\n # right bottom\n # ceil: to round up\n # +1: because mask upper limit is exclusive\n right = np.clip(int(np.ceil(bb[2] + crowd_margin)) + 1,\n left + 1, mask.shape[1])\n bottom = np.clip(int(np.ceil(bb[3] + crowd_margin)) + 1,\n top + 1, mask.shape[0])\n\n crowd_bbox[0] = min(crowd_bbox[0], left)\n crowd_bbox[1] = min(crowd_bbox[1], top)\n crowd_bbox[2] = max(crowd_bbox[2], right)\n crowd_bbox[3] = max(crowd_bbox[3], bottom)\n continue\n\n assert False # because code below is not tested\n mask[ann['mask'][::self.stride, ::self.stride]] = 0\n\n if crowd_bbox[1] < crowd_bbox[3] and crowd_bbox[0] < crowd_bbox[2]:\n LOG.debug('crowd_bbox: %s', crowd_bbox)\n mask[crowd_bbox[1]:crowd_bbox[3], crowd_bbox[0]:crowd_bbox[2]] = 0\n\n return mask",
"def image_mask_overlay(img, mask):\n for i in range(mask.shape[0]):\n for j in range(mask.shape[1]):\n if mask[i, j] == 1:\n img[i, j, :] = [255, 0, 255]\n\n return img",
"def overlay_images(back, fore, x, y):\n fore = cv2.cvtColor(fore, cv2.COLOR_BGR2BGRA)\n rows, cols, channels = fore.shape \n trans_indices = fore[...,3] != 0 # Where not transparent\n overlay_copy = back[y:y+rows, x:x+cols] \n overlay_copy[trans_indices] = fore[trans_indices]\n back[y:y+rows, x:x+cols] = overlay_copy",
"def exchangerbg(four_channel_img, trhee_channel_img, coord=None):\n mask2 = four_channel_img.copy()\n four_channel_img2=four_channel_img.copy()\n four_channel_img2.paste(trhee_channel_img, coord)#into 4 channel\n array_m = np.array(mask2)[:,:,3]\n array_b = np.array(four_channel_img2)\n array_b[:,:,3] = array_m\n final = Image.fromarray(array_b)\n return final",
"def fore_back_region(beam_centre, beam_sd):\n _b_centre = np.array(beam_centre)\n _b_sd = np.array(beam_sd)\n\n lopx = np.floor(_b_centre - _b_sd * EXTENT_MULT).astype(\"int\")\n hipx = np.ceil(_b_centre + _b_sd * EXTENT_MULT).astype(\"int\")\n\n background_pixels = []\n\n # limit of background regions\n # from refnx.reduce.platypusnexus\n y1 = np.atleast_1d(np.round(lopx - PIXEL_OFFSET).astype(\"int\"))\n y0 = np.atleast_1d(\n np.round(lopx - PIXEL_OFFSET - (EXTENT_MULT * _b_sd)).astype(\"int\")\n )\n\n y2 = np.atleast_1d(np.round(hipx + PIXEL_OFFSET).astype(\"int\"))\n y3 = np.atleast_1d(\n np.round(hipx + PIXEL_OFFSET + (EXTENT_MULT * _b_sd)).astype(\"int\")\n )\n\n # now generate background pixels\n for i in range(np.size(y0)):\n background_pixels.append(\n np.r_[np.arange(y0[i], y1[i] + 1), np.arange(y2[i], y3[i] + 1)]\n )\n\n return lopx, hipx, background_pixels",
"def extract_foreground(image):\n img = image.copy()\n \n #kernel for closing edges\n kernel = np.ones((5,5))\n \n #Perform color quantizization\n quantized = quantize_color(img)\n\n #Threshold the image to segment it. Best results for individual images are\n #labeled as such below\n \n #SWANS\n #ret, threshold = cv2.threshold(quantized, 50, 100, cv2.THRESH_BINARY)\n \n #POLICEMAN\n #ret, threshold = cv2.threshold(quantized, 50, 100, cv2.THRESH_BINARY)\n \n #BIG BEN\n ret, threshold = cv2.threshold(quantized, 100, 125, cv2.THRESH_BINARY)\n\n #LONDON SCENE\n #ret, threshold = cv2.threshold(quantized, 152, 247, cv2.THRESH_BINARY)\n\n #De-noise image before edge detection\n blur = cv2.GaussianBlur(threshold, (11,11), 9)\n\n\n #Blur edges\n edges = cv2.Canny(blur, 50, 55, 7)\n \n #Close edges to create cohesive edge\n edges = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)\n \n #Find the external contours of the edge image\n img, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, \n cv2.CHAIN_APPROX_NONE)\n\n #initial max values for finding largest rectangle in contours\n w_max = 0\n h_max = 0\n \n #iterate through each contour found in the image\n for c in contours:\n #find the bounding rectangles in the contours\n x,y,w,h = cv2.boundingRect(c)\n \n #Identify largest rectangle as foreground component\n if (h >= h_max and w >= w_max):\n r = (x, y, w, h)\n \n #Copy to preserve original\n foreground_extracted = image.copy()\n \n #Create initial mask of zeros and foreground and background of zeros\n mask = np.zeros(foreground_extracted.shape[:2], np.uint8)\n background = np.zeros((1, 65), np.float64)\n foreground = np.zeros((1, 65), np.float64)\n \n #Extract the area bounded by rectangle r and create mask\n cv2.grabCut(foreground_extracted, mask, r, background, foreground, 5, \n cv2.GC_INIT_WITH_RECT) \n \n mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')\n foreground_extracted = foreground_extracted * mask2[:,:,np.newaxis]\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) \n\n return foreground_extracted",
"def merge_images(background, foregrounds, masks):\n bg_i = Image.fromarray(background)\n for fg, m in zip(foregrounds, masks):\n fg_i = Image.fromarray(fg)\n m_i = Image.fromarray(m)\n bg_i = Image.composite(fg_i, bg_i, m_i)\n return bg_i",
"def get_true_mask(image: np.ndarray) -> np.ndarray:\n\n mask = np.zeros(image.shape[:2], dtype=\"uint8\")\n backgroundModel = np.zeros((1, 65), np.float64)\n foregroundModel = np.zeros((1, 65), np.float64)\n\n rect = get_rect(image)\n known_foreground = get_face_mask(mask, image)\n\n mask = np.zeros(image.shape[:2], dtype=\"uint8\")\n cv2.grabCut(image, mask, rect, backgroundModel, foregroundModel, 10,\n cv2.GC_INIT_WITH_RECT)\n outputMask = np.where((mask == cv2.GC_BGD) | (mask == cv2.GC_PR_BGD), 0, 1)\n outputMask = (outputMask * 255).astype(\"uint8\")\n output = cv2.bitwise_and(image, image, mask=outputMask)\n keep_me = cv2.bitwise_xor(image, output, mask=known_foreground)\n output = cv2.bitwise_or(output, keep_me)\n cv2.imshow(\"output\", output)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n cv2.destroyAllWindows()\n\n return output",
"def test_background_mask(self, box_size):\n\n data = np.copy(DATA)\n data[25:50, 25:50] = 100.\n mask = np.zeros_like(DATA, dtype=np.bool)\n mask[25:50, 25:50] = True\n b = Background2D(data, box_size, filter_size=(1, 1), mask=mask,\n bkg_estimator=MeanBackground())\n assert_allclose(b.background, DATA)\n assert_allclose(b.background_rms, BKG_RMS)\n\n # test edge crop with\n b2 = Background2D(data, box_size, filter_size=(1, 1), mask=mask,\n bkg_estimator=MeanBackground(), edge_method='crop')\n assert_allclose(b2.background, DATA)",
"def add_overlay_over_background(self, background, overlay, offset=(0, 0)):\n background_image = self.get_image(background)\n overlay_image = self.get_image(overlay)\n background_image.paste(overlay_image, offset, mask=overlay_image)\n return background_image",
"def _sample_fg_bg(self, max_overlaps):\n # Split proposals into foreground and background based on overlap\n fg_inds = np.where(max_overlaps >= self.fg_thresh)[0]\n fg_rois_per_image = np.round(self.fg_fraction * self.num_rois)\n\n # Guard against the case when an image has fewer than fg_rois_per_image foreground RoIs\n fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)\n\n # Sample foreground regions without replacement\n if fg_inds.size > 0 and not self.deterministic:\n fg_inds = self.be.rng.choice(fg_inds, size=int(fg_rois_per_this_image), replace=False)\n elif fg_inds.size > 0:\n fg_inds = fg_inds[:fg_rois_per_this_image]\n\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = np.where((max_overlaps < self.bg_thresh_hi) &\n (max_overlaps >= self.bg_thresh_lo))[0]\n\n # Compute number of background RoIs to take from this image (guarding\n # against there being fewer than desired)\n bg_rois_per_this_image = self.num_rois - fg_rois_per_this_image\n bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)\n\n # Sample background regions without replacement\n if bg_inds.size > 0 and not self.deterministic:\n bg_inds = self.be.rng.choice(bg_inds, size=int(bg_rois_per_this_image), replace=False)\n elif bg_inds.size > 0:\n bg_inds = bg_inds[:bg_rois_per_this_image]\n\n # The indices that we're selecting (both fg and bg)\n keep_inds = np.append(fg_inds, bg_inds)\n return keep_inds, int(fg_rois_per_this_image)",
"def make_cloud_mask( inv, rs_mean,processing_defaults, rs_constants):\n \n np.set_printoptions(threshold=np.NaN)\n cloud_threshhold = processing_defaults.get_value('cloud_mask','backscat_threshhold')\n full_mask = processing_defaults.get_value('cloud_mask','mask_entire_profile')\n buffer_bins = np.int(processing_defaults.get_value('cloud_mask','cloud_buffer_zone') \n /(rs_mean.msl_altitudes[2]-rs_mean.msl_altitudes[1]))\n [ntimes, nalts] = inv.beta_a_backscat_par.shape\n temp = inv.beta_a_backscat_par.copy()\n temp = temp +inv.beta_a_backscat_perp\n temp[:, 0] = 0.0\n temp[temp > cloud_threshhold] = np.NaN\n \n\n \n # does not allow for shift from ground-based (i.e. no GPS)\n # to airborne within one record\n if ('installation' not in rs_constants\n or rs_constants['installation'] == 'ground' \n or rs_constants['installation'] == 'shipborne'): # lidar is on the ground looking up\n start_alt = rs_constants['lidar_altitude'] + 300\n start_alt = np.max([start_alt, rs_mean.msl_altitudes[0]])\n temp[:, rs_mean.msl_altitudes < start_alt] = 0.0\n mask = np.isfinite(np.cumsum(temp, 1)).astype('uint16')\n #apply a pre-trigger buffer on nbuf data points to mask\n mask[:,:(nalts-buffer_bins)] = np.bitwise_and(mask[:,:(nalts-buffer_bins)]\n , mask[:,buffer_bins:])\n if full_mask == 1:\n max_cloud_alt = np.float(processing_defaults.get_value('cloud_mask','max_cloud_alt')) \n index = len(rs_mean.msl_altitudes[rs_mean.msl_altitudes < max_cloud_alt*1000.0])\n for i in range(ntimes):\n if any(mask[i,:index] == False):\n mask[i,:] = False\n \n else:\n # lidar is airborne\n indices = np.arange(nalts)\n mask = np.zeros_like(temp).astype('uint16')\n for i in range(ntimes):\n if rs_mean.telescope_pointing[i] > 0.9: # telescope pointing up\n ix = indices[rs_mean.msl_altitudes <= rs_mean.GPS_MSL_Alt[i] + 250]\n if len(ix) > 0:\n start_index = np.max(ix)\n mask[i, start_index:] = \\\n np.isfinite(np.cumsum(temp[i, start_index:]))\\\n .astype('uint16')\n #apply a pre-trigger buffer on nbuf data points to mask\n mask[i,:(nalts-buffer_bins)] = np.bitwise_and(mask[i,:(nalts-buffer_bins)]\n , mask[i,buffer_bins:]) \n elif rs_mean.telescope_pointing[i] < 0.1:\n # telescope is pointing down\n ix = indices[rs_mean.msl_altitudes <= rs_mean.GPS_MSL_Alt[i] - 250]\n if len(ix) > 0:\n start_index = np.max(ix)\n mask[i,start_index:0:-1] = \\\n np.isfinite(np.cumsum(temp[i, start_index:0: -1]))\\\n .astype('uint16')\n #apply a pre-trigger buffer on nbuf data points to mask\n if buffer_bins:\n print 'buffer bins not implemented for nadir viewing'\n \n \n #mask[i,buffer_bins:nalts] = \\\n # np.bitwise_and(mask[i,:(nalts-buffer_bins)]\\\n # , mask[i,buffer_bins:nalts])\n \n \n #mask for bits 0 and 7 \n mask = ~(129*(mask==0)).astype('uint16')\n inv.qc_mask &= mask\n \n \n #rs_mean.qc_mask =inv.qc_mask.copy()\n\n return"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A function that logs the metadata from Optuna Study to Neptune. | def log_study_metadata(study: optuna.Study,
run: neptune.Run,
base_namespace='',
log_plots=True,
log_study=True,
log_all_trials=True,
log_distributions=True,
visualization_backend='plotly',
log_plot_contour=True,
log_plot_edf=True,
log_plot_parallel_coordinate=True,
log_plot_param_importances=True,
log_plot_pareto_front=True,
log_plot_slice=True,
log_plot_intermediate_values=True,
log_plot_optimization_history=True):
run = run[base_namespace]
_log_study_details(run, study)
run['best'] = _stringify_keys(_log_best_trials(study))
if log_all_trials:
_log_trials(run, study.trials)
if log_distributions:
run['study/distributions'].log(list(trial.distributions for trial in study.trials))
if log_plots:
_log_plots(run, study,
visualization_backend=visualization_backend,
log_plot_contour=log_plot_contour,
log_plot_edf=log_plot_edf,
log_plot_parallel_coordinate=log_plot_parallel_coordinate,
log_plot_param_importances=log_plot_param_importances,
log_plot_pareto_front=log_plot_pareto_front,
log_plot_slice=log_plot_slice,
log_plot_optimization_history=log_plot_optimization_history,
log_plot_intermediate_values=log_plot_intermediate_values,
)
if log_study:
_log_study(run, study) | [
"def printMetadata(self):\n print (\"************COMMONDATA************\")\n print (\"Setname:\", self.setname, \"PROC:\", self.proc)\n print (\"NDAT:\", self.ndata,\"NSYS:\",self.nsys)",
"def _log_in_neptune(self, logs: List[LogData]):\n # log scalar metrics\n try:\n for log in logs:\n if log.type == \"scalar\":\n self._neptune_run[log.name].log(log.value)\n if log.type == \"image\":\n if (log.value.ndim == 3) and (log.value.shape[-1] != 3):\n self._neptune_run[log.name].log(\n File.as_image(log.value.transpose(1, 2, 0))\n )\n else:\n self._neptune_run[log.name].log(File.as_image(log.value))\n\n # log also csv and pkl files\n # self._neptune_run[\"logs.csv\"].upload(self._save_dir + \"/logs.csv\")\n # self._neptune_run[\"logs.pkl\"].upload(self._save_dir + \"/logs.pkl\")\n # neptune.log_artifact(self._save_dir + \"/logs.csv\")\n # neptune.log_artifact(self._save_dir + \"/logs.pkl\")\n except RuntimeError:\n print(\"WARNING: failed to log in Neptune\")",
"def log_phase_info(self, phase=None):\n onset = self.session.clock.getTime()\n\n if phase is None:\n phase = self.phase\n\n if phase == 0:\n self.start_trial = onset\n\n if self.verbose:\n print(f'Starting trial {self.trial_nr}')\n\n msg = f\"\\tPhase {phase} start: {onset:.5f}\"\n\n if self.verbose:\n print(msg)\n\n if self.eyetracker_on: # send msg to eyetracker\n msg = f'start_type-stim_trial-{self.trial_nr}_phase-{phase}'\n self.session.tracker.sendMessage(msg)\n # Should be log more to the eyetracker? Like 'parameters'?\n\n # add to global log\n idx = self.session.global_log.shape[0]\n self.session.global_log.loc[idx, 'onset'] = onset\n self.session.global_log.loc[idx, 'trial_nr'] = self.trial_nr\n self.session.global_log.loc[idx, 'event_type'] = self.phase_names[phase]\n self.session.global_log.loc[idx, 'phase'] = phase\n self.session.global_log.loc[idx, 'nr_frames'] = self.session.nr_frames\n\n for param, val in self.parameters.items(): # add parameters to log\n if type(val) == np.ndarray or type(val) == list:\n for i, x in enumerate(val):\n self.session.global_log.loc[idx, param+'_%4i'%i] = str(x) \n else: \n self.session.global_log.loc[idx, param] = val\n\n # add to trial_log\n #idx = self.trial_log.shape[0]\n #self.trial_log.loc[idx, 'onset'][self.phase].append(onset)\n\n self.session.nr_frames = 0",
"def record_summary(self, t):\n\n fd = {\n self.avg_reward_placeholder: self.avg_reward,\n self.avg_collsions_placeholder: self.avg_collisions,\n self.avg_distance_placeholder: self.avg_distance,\n #self.eval_reward_placeholder: self.eval_reward,\n }\n summary = self.sess.run(self.merged, feed_dict=fd)\n # tensorboard stuff\n self.file_writer.add_summary(summary, t)",
"def on_train_begin(self, logs=None):\n print('******** HISTORY starting a training session...')",
"def view_metadata(self):\n if isinstance(self.metadata, dict):\n print_nested_dict(self.metadata)",
"def _log_data(context: MLClientCtx, table: str):\n df = pd.read_parquet(table)\n context.log_dataset(key=\"dataset\", db_key=\"dataset\", stats=True, df=df)",
"def logData(self):\n pass",
"def execute_and_log_overview(s):\n logging.info(\"Executing the basic multi match query\")\n s.execute()\n logging.info(s)\n if LOG_LEVEL == \"DEBUG\":\n logging.info(\"Search query qui a été appliqué\")\n logging.info(json.dumps(s.to_dict(), indent=2))\n logging.info(\"Printing des top hits\")\n for hit in s:\n try:\n logging.info(f\"Titre du film trouvé : {hit.title}\")\n except KeyError:\n logging.warning(\"There is currently not the title key indexed\")\n logging.info(f\"Synopsis du film trouvé : {hit.overview}\")\n logging.info(\"Search finished executing in \")",
"def store_metadata(self, drs):\n self.metadata = open(f'{drs.new_dir}{drs.phrase_num}.txt', \"a\")\n self.metadata.write(self.inputs)",
"def log_model_summary(experiment: LightningModule):\n\n summary = str(pl.core.memory.ModelSummary(experiment, max_depth=-1))\n tempdir = tempfile.mkdtemp()\n try:\n summary_file = os.path.join(tempdir, \"model_summary.txt\")\n with open(summary_file, \"w\") as f:\n f.write(summary)\n mlflow.log_artifact(summary_file)\n finally:\n shutil.rmtree(tempdir)",
"def _log_metadata(self, metadata: Dict[str, Any]) -> None:\n if self._enabled:\n for key, val in metadata.items():\n if self.ignore_keys and any(fnmatch.fnmatch(key, pattern) for pattern in self.ignore_keys):\n continue\n self.buffered_metadata[f'mosaicml/{key}'] = format_data_to_json_serializable(val)\n self._flush_metadata()",
"def print_meta(self):\n self.logger.handle('Author: {}'.format(self.meta['author']), Logger.HEADER)\n self.logger.handle('Module name: {}, version {}'.format(self.meta['name'], self.meta['version']), Logger.HEADER)\n self.logger.handle('Description: {}'.format(self.meta['description']), Logger.HEADER)",
"def describe_source_record(self):",
"def __save_tuning_summary(self, path, experiment_title, dataset_name, training_size, noise, test_accuracy):\n\n # We open the file\n f = open(path+'tuning_summary.txt', \"w+\")\n\n # We write the highlights\n f.write(\"Experiment title: %s \\n\\n\" % experiment_title)\n f.write(\"Total budget (in epochs): %s \\n\\n\" % self.total_budget)\n f.write(\"Max budget per config (in epochs): %s \\n\\n\" % self.max_budget_per_config)\n f.write(\"Model name : %s \\n\\n\" % self.model_name)\n f.write(\"Nbr. of cross validation done in each iteration : %g \\n\\n\" % self.nbr_of_cross_validation)\n f.write(\"Validation size in cross validation : %g \\n\\n\" % self.validation_size)\n f.write(\"Dataset name : %s \\n\\n\" % dataset_name)\n f.write(\"Size of training set : %g \\n\\n\" % training_size)\n\n if noise is not None:\n f.write(\"Noise : %g \\n\\n\" % noise)\n\n f.write(\"Number of unique configurations tested : %g \\n\\n\" % max(len(self.hyperparameters_history),\n self.total_unique_sampled))\n f.write(\"Number of configurations tested with full budget : %g \\n\\n\" % len(self.best_accuracy_history))\n f.write(\"Best accuracy obtained in tuning : %g \\n\\n\" % self.actual_best_accuracy)\n f.write(\"Best hyper-parameters found : %s \\n\\n\" % str(self.best_hyperparameters))\n f.write(\"Test accuracy : %g\" % test_accuracy)\n\n # We close the file\n f.close()",
"def log_parameters(self):\n for tag, value in self.named_parameters():\n tag = tag.replace(\".\", \"/\")\n self.logger.experiment.add_histogram(tag, value, self.current_epoch)",
"def addMetadata(streamName=\"string\", scene=bool, channelName=\"string\", indexType=\"string\", channelType=\"string\", structure=\"string\"):\n pass",
"def info(self):\n print(\"RAW DATA\")\n print(\"==================\")\n print(\"File name:\\t\" + str(self.filename))\n print(\"------------------\")\n print(\"Source name:\\t\" + (self.meta[\"source\"] or \"Unknown\"))\n print(\"Observed date:\\t\" + self.meta[\"OBSDATE\"].iso)\n print(\"Description:\\t\" + self.meta[\"obstype\"] or \"Unknown\")\n print(\"Scan number:\\t\" + str(self.meta[\"scan\"] or \"Unknown\"))\n\n print(\"------------------\")\n print(\"No. of KIDS detectors:\\t\", self.ndet)\n print(\"No. of time samples:\\t\", self.nsamples)\n print(\n \"Typical size of fully sampled data (GiB):\\t{:3.1f}\".format(self.nsamples * self.ndet * 32 / 8 / 1024 ** 3)\n )",
"def log_hparams(self, hparams: dict):\n with self.summary_writer.as_default():\n tf.summary.text(name='Hyper Parameters', data=self.generate_hparams_md_table(hparams), step=0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A function that loads Optuna Study from an existing Neptune Run. | def load_study_from_run(run: neptune.Run):
if run['study/storage_type'].fetch() == 'InMemoryStorage':
return _get_pickle(path='study/study', run=run)
else:
return optuna.load_study(study_name=run['study/study_name'].fetch(), storage=run['study/storage_url'].fetch()) | [
"def load_study(owner, title, info):\n study = load_study_from_cmd(owner, title, info)\n click.echo(\"Study successfully added to the database with id %s\"\n % study.id)",
"def load_run(cls, run_id, out_dir=None, phase_indices=None, verbose=True):\n out_dir = out_dir if out_dir is not None else DEFAULT_OUT_DIR\n config_path = cls.get_config_path(cls.get_base_dir(out_dir, cls.experiment_id, run_id))\n config = cls.load_config(config_path)\n exp = cls(config, out_dir=out_dir)\n if 'master_only' in config and config['master_only']:\n exp.load_summary()\n elif phase_indices is not None:\n exp.load_phases(phase_indices, verbose=verbose)\n else:\n exp.load_results(verbose=verbose)\n return exp",
"def load_from_file(filename):\n f = open(filename, 'r') #@type f file\n datas = f.read()\n f.close()\n #working with old files\n datas=datas.replace('enthought.','')\n the_experiment = loads(datas)\n return the_experiment",
"def load_vae(self):\n vae_name = self.opt['vae_name']\n path_to_pretrained = 'models/{0}/vae_model.pt'.format(vae_name)\n vae_config_file = os.path.join('configs', vae_name + '.py')\n vae_config = SourceFileLoader(vae_name, vae_config_file).load_module().config \n\n vae_opt = vae_config['vae_opt']\n vae_opt['device'] = self.device\n vae_opt['vae_load_checkpoint'] = False\n \n vae_module = importlib.import_module(\"architectures.{0}\".format(vae_opt['model']))\n print(' *- Imported module: ', vae_module)\n \n # Initialise the model\n try:\n class_ = getattr(vae_module, vae_opt['model'])\n vae_instance = class_(vae_opt).to(self.device)\n print(' *- Loaded {0}.'.format(class_))\n except: \n raise NotImplementedError(\n 'Model {0} not recognized'.format(vae_opt['model']))\n \n # Load the weights\n if vae_opt['vae_load_checkpoint']:\n checkpoint = torch.load(path_to_pretrained, map_location=self.device)\n vae_instance.load_state_dict(checkpoint['model_state_dict'])\n print(' *- Loaded checkpoint.')\n else:\n vae_instance.load_state_dict(torch.load(path_to_pretrained, map_location=self.device))\n vae_instance.eval()\n assert(not vae_instance.training)\n self.vae = vae_instance",
"def load_seviri_nat(indir, in_time, comp_type, timedelt):\n files = ffar(start_time=in_time,\n end_time=in_time + timedelta(minutes=timedelt),\n base_dir=indir,\n reader='seviri_l1b_native')\n\n scn = Scene(reader='seviri_l1b_native', filenames=files)\n scn.load([comp_type])\n\n return scn",
"def load_seviri_hrit(indir, in_time, comp_type, timedelt):\n files = ffar(start_time=in_time,\n end_time=in_time + timedelta(minutes=timedelt - 1),\n base_dir=indir,\n reader='seviri_l1b_hrit')\n\n scn = Scene(reader='seviri_l1b_hrit', filenames=files)\n scn.load([comp_type])\n\n return scn",
"def load_trained_DQN(self, path):\r\n\r\n trained_file = pickle.load(open(path, 'rb'))\r\n model = trained_file['model']\r\n print \"Trained DQN Parameters:\", json.dumps(trained_file['params'], indent=2)\r\n return model",
"def load_trained_DQN(self, path):\r\n\r\n trained_file = pickle.load(open(path, 'rb'))\r\n model = trained_file['model']\r\n\r\n print(\"trained DQN Parameters:\", json.dumps(trained_file['params'], indent=2))\r\n return model",
"def test_researchstudy_1(base_settings):\n filename = (\n base_settings[\"unittest_data_dir\"]\n / \"researchstudy-example-ctgov-study-record.json\"\n )\n inst = researchstudy.ResearchStudy.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"ResearchStudy\" == inst.resource_type\n\n impl_researchstudy_1(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"ResearchStudy\" == data[\"resourceType\"]\n\n inst2 = researchstudy.ResearchStudy(**data)\n impl_researchstudy_1(inst2)",
"def loadRun(path_to_run):\n run = {}\n with open(path_to_run, 'r') as f:\n for line in f:\n split_line = line.strip().split()\n topic = split_line[0]\n docid = split_line[2]\n score = float(split_line[4])\n if topic not in run:\n run[topic] = []\n run[topic].append((docid, score))\n return run",
"def explore():\n modelpath = util.getEnv('gqcnn_train_stable_model', predicates=[os.path.exists], failIfNot=True)\n dataset = DatasetSlice(0)\n dataset.inspect()",
"def _localTakeAstepRun(self,inDictionary):\n #Train the ROM... It is not needed to add the trainingSet since it's already been added in the initialization method\n for ROM in inDictionary['Output']:\n ROM.train(inDictionary['Input'][0])",
"def load_existing (self, existing_train):\n\n self._name = copy.deepcopy (existing_train.name)\n self._direction_id = copy.deepcopy (existing_train.direction_id)\n self._direction_name = copy.deepcopy (existing_train.direction_name)\n self._stops = copy.deepcopy (existing_train.stops)\n self._tracks = copy.deepcopy (existing_train.tracks)\n self._set_start ()\n self._current = self._start\n self._set_end ()\n self._total_travel_time = copy.deepcopy (\n existing_train._total_travel_time)",
"def load_summary(self):\n summary_path = os.path.join(self.base_dir, 'result_summary.npz')\n if os.path.exists(summary_path):\n summary_results = self.load_phase_result(summary_path)\n self.R.update(summary_results)",
"def load_dataset(self):",
"def load_or_estimate_variogram(dataset: DataUpload, api: API, expander_container=st.sidebar) -> Variogram:\r\n # the user has to make a decision - load available params if any\r\n available_vario_names = {vparam.id: vparam.name for vparam in api.filter_vario_params(data_id=dataset.id)}\r\n \r\n # check the amount of available variograms\r\n if len(available_vario_names) > 0:\r\n # check if there is a vario_id in the session\r\n if 'vario_id' in st.session_state:\r\n if st.session_state.vario_id not in available_vario_names:\r\n del st.session_state.vario_id\r\n expander_container.selectbox('Select variogram', options=list(available_vario_names.keys()), format_func=lambda k: f\"{available_vario_names.get(k)} <ID={k}>\", key='vario_id')\r\n vparam = api.get_vario_params(id=st.session_state.vario_id)\r\n return vparam.variogram\r\n else:\r\n omit_estimation = st.checkbox('Use existing variogram parameters', value=False)\r\n else:\r\n omit_estimation = False\r\n \r\n # check if an estimation is needed\r\n if not omit_estimation:\r\n emp_expander = st.sidebar.expander('VARIOGRAM HYPER-PARAMETERS', expanded=True)\r\n variogram = empirical_variogram_estimation(dataset=dataset, container=emp_expander)\r\n else:\r\n # otherwise select a model\r\n left, right = st.columns((7,2))\r\n vario_id = left.selectbox('Select existing empirical variogram', options=list(available_vario_names.keys()), format_func=lambda k: f\"{available_vario_names.get(k)} <ID={k}>\")\r\n \r\n # add a preview\r\n vparam = api.get_vario_params(id=vario_id)\r\n variogram = vparam.variogram\r\n\r\n fig = plot_variogram_params(variogram)\r\n st.plotly_chart(fig, use_container_width=True)\r\n\r\n # add the load button\r\n right.markdown(\"\"\"<br>\"\"\", unsafe_allow_html=True)\r\n load = right.button('LOAD')\r\n if load:\r\n st.session_state.vario_id = vario_id\r\n st.experimental_rerun()\r\n else:\r\n st.stop()\r\n\r\n # finally return\r\n return variogram",
"def _load_expert_models(scenario_name, base_model, run_id, len_stream):\n # base_dir = f'/raid/carta/EXML_CLVISION_PRETRAINED_EXPERTS/{scenario_name}'\n base_dir = default_dataset_location(\n f\"EXML_CLVISION22_PRETRAINED_EXPERTS/{scenario_name}/run{run_id}\"\n )\n\n weburl = (\n f\"http://131.114.50.174/data/EXML_CLVISION22_PRETRAINED_EXPERTS\"\n f\"/{scenario_name}/run{run_id}\"\n )\n\n experts_stream = []\n for i in range(len_stream):\n fname_i = f\"{base_dir}/model_e{i}.pth\"\n weburl_i = f\"{weburl}/model_e{i}.pth\"\n\n if not os.path.exists(fname_i):\n os.makedirs(base_dir, exist_ok=True)\n print(f\"Downloading expert model {i}\")\n urllib.request.urlretrieve(weburl_i, fname_i)\n\n model = copy.deepcopy(base_model)\n state_d = torch.load(fname_i)\n model.load_state_dict(state_d)\n model.to(\"cpu\").eval()\n experts_stream.append(model)\n return experts_stream",
"def load_scene_robot(self):\n\t\tif self.config['scene'] == 'relocate':\n\t\t\tscene_id = self.config['scene_id']\n\t\t\tn_interactive_objects = self.config.get('obj_num', 1)\n\t\t\tif \"multi_band\" in scene_id:\n\t\t\t\tscene = RelocateScene(scene_id=scene_id, n_interactive_objects=n_interactive_objects, multi_band=True)\n\t\t\telse:\t\n\t\t\t\tscene = RelocateScene(scene_id=scene_id, n_interactive_objects=n_interactive_objects)\n\n\t\t\tself.simulator.import_scene(scene, load_texture=self.config.get('load_texture', True))\n\t\t\tself.scene = scene\n\t\telif self.config['scene'] == 'relocate_different_objects':\n\t\t\tscene_id = self.config['scene_id']\n\t\t\tn_interactive_objects = self.config.get('obj_num', 2)\n\t\t\tscene = RelocateSceneDifferentObjects(scene_id=scene_id, n_interactive_objects=n_interactive_objects, \n\t\t\t\tmaterial_names=self.config.get('obj_material', ['Material__wood_hemlock', 'Material__steel_oxydized_bright']))\n\t\t\tself.simulator.import_scene(scene, load_texture=self.config.get('load_texture', True))\n\t\t\tself.scene = scene\n\t\telse:\n\t\t\traise Exception(\n\t\t\t\t'unknown scene type: {}'.format(self.config['scene']))\n\n\t\tself.load_robot()",
"def _load_turicreate_model(self, path):\n return tc.load_model(path)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Opens the front door. | async def open_front_door(self):
self.status = enums.OvenStatus.OPEN.value
self.set_pizza_status(enums.PizzaStatus.OVEN_OPEN.value)
await self.execute_task('open_front_door') | [
"def open_door(self):\n if not self.door_open:\n self.do(2, \"Opening door\")\n self.door_open = True",
"def open(self):\n self.servo.set(robotmap.positionList.openGearDoorPosition)",
"def __open_door(self):\n\n self.status = StatusEnum.STOP.value\n print(\"Elevator {}: current Position -> {}\".format(self.id_elv, self.current_floor))\n print(\"Elevator {}: status -> {}, direction -> {}\".format(self.id_elv, self.status, self.direction))\n print(\"Elevator {}: open door \".format(self.id_elv))",
"def open_blueprint_scene():\n blueprint = get_editor_blueprint()\n if not blueprint:\n return\n\n if not blueprint.scene_path:\n LOG.warning(\"No scene path associated with the opened Blueprint.\")\n return\n\n LOG.info(\"Opening blueprint: %s\", blueprint.scene_path)\n save_cameras()\n pm.openFile(blueprint.scene_path, force=True)\n restore_cameras()",
"def open(self):\n self.state = _OpenState(self)\n logger.debug(\"Opened\")",
"def open_adobe(self):\n self.driver.start_activity(const.PACKAGE.ADOBE,const.LAUNCH_ACTIVITY.ADOBE, wait_activity=const.PACKAGE.ADOBE + \"*\")\n if self.driver.wait_for_object(\"welcome_screen_exit_button\", timeout=10, raise_e=False):\n self.driver.click(\"welcome_screen_exit_button\")\n if self.has_overlay_ui():\n self.turn_off_overlay_ui_guide()",
"def doOpen(self):\n if self.isOpen():\n return\n\n global _DirFromWhichToRunDS9, _DS9Path\n _Popen(\n args = (_DS9Path, \"-title\", self.template, \"-port\", \"0\"),\n cwd = _DirFromWhichToRunDS9,\n )\n\n startTime = time.time()\n while True:\n time.sleep(_OpenCheckInterval)\n if self.isOpen():\n break\n if time.time() - startTime > _MaxOpenTime:\n raise RuntimeError(\"Could not open ds9 window %r; timeout\" % (self.template,))",
"def open(self): #opens the camera and returns the handle\n if not self.isOpen: \n a = self.call(lib.OpenCamera, self.name, lib.Handle())\n self.isOpen = True\n #self.call(lib.SetStreaming, a[1], 1)\n return a[1]",
"def performOpen(self, **kw):\n # connect through deviceID\n apilevel = 6 # The API level supported by this driver\n (daq, device, props) = zhinst.utils.create_api_session(self.deviceID, apilevel, \n required_devtype='UHF', \n required_options=['AWG'])\n zhinst.utils.api_server_version_check(daq)\n # Create a base configuration: Disable all available outputs, awgs, demods, scopes,...\n zhinst.utils.disable_everything(daq, device)\n self.daq = daq\n self.device = device\n self.props = props",
"async def open_back_door(self):\n \n await self.execute_task('open_back_door')\n self.status = enums.OvenStatus.WAITING_FOR_ROBOT_AFTER.value",
"def open_restaurant(self):\n print(f\"{self.restaurant_name} is open.\")",
"def on_action_open(self, content):\n self.widget().open()",
"def _open_device(self):\n pass",
"def openMenu(self):\n root = tk.Tk()\n menu = Menu(self, master=root)\n menu.mainloop()",
"def open_back_gate(self):\r\n self.send_line('2')",
"def opening_screen():\r\n\r\n HANGMAN_ASCII_ART = f\"\"\"{COL_3} _ _ \r\n | | | | \r\n | |__| | __ _ _ __ __ _ _ __ ___ __ _ _ __ \r\n | __ |/ _` | '_ \\ / _` | '_ ` _ \\ / _` | '_ \\ \r\n | | | | (_| | | | | (_| | | | | | | (_| | | | |\r\n |_| |_|\\__,_|_| |_|\\__, |_| |_| |_|\\__,_|_| |_|\r\n __/ | \r\n |___/ {END_COL}\"\"\"\r\n\r\n USER_NAME = os.getenv('username')\r\n\r\n print(HANGMAN_ASCII_ART, f\"\\n{COL_1}{MAX_TRIES} \\n\\nHello {COL_4}{USER_NAME}!{END_COL} \\n\")",
"def openAE(self):\n self.aeCom.openAE()",
"def try_open(self):\n logger.info(\"Open a merchant\")\n self.game.create_dim()\n self.game.merchant_open = True\n self.game.opened_merchant = self",
"def open_expense_window(self):\n self.expense_window.show()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Closes the front door. | async def close_front_door(self):
await self.execute_task('close_front_door') | [
"def close_door(self):\n if self.door_open:\n self.do(2, \"Closing door\")\n self.door_open = False",
"def close(self):\n self.servo.set(robotmap.positionList.closeGearDoorPosition)",
"def close(self):\r\n pygame.quit()",
"def close_board(self):\n self.ctrl.close()\n return",
"def close(self):\n try:\n _close_camera(self.id)\n finally:\n self.closed = True",
"def close_window(self):\r\n Window.close()",
"def close(self):\n close_command = StandardSend(self._address, COMMAND_LIGHT_OFF_0X13_0X00)\n self._send_method(close_command, self._close_message_received)",
"def closeSkyPanel(self):\n content = dict(\n event='closeSkyPanel'\n )\n self._sendToFrontEnd(content)",
"def close(self):\n\t\tif self.serial.isOpen():\n\t\t\tself.serial.close()",
"def __close_door(self):\n self.status = StatusEnum.STOP.value\n print(\"Elevator {}: current Position -> {}\".format(self.id_elv, self.current_floor))\n print(\"Elevator {}: status -> {}, direction -> {}\".format(self.id_elv, self.status, self.direction))\n print(\"Elevator {}: close door \".format(self.id_elv))",
"def _close_wid(self, wid):\n pass",
"def close(self):\n self.state = _ClosedState(self)\n logger.debug(\"Closed\")",
"def close(self):\n\n self.__camera.release()",
"def close(self) -> None:\n if hasattr(self, '_wandb'):\n self._wandb.join()",
"def close(self, delay=0.5):\n self.log_msg('SYS', 'Shutting Down!')\n if self.controller is None:\n self.log_msg('WARN', 'Controller already off!')\n else:\n try:\n self.log_msg('CTRL', 'Closing Controller ...')\n self.controller.close() ## Disable controller\n except Exception as error:\n self.log_msg('CTRL', 'ERROR: %s' % str(error), important=True)\n for i in range(len(self.cameras)):\n if self.cameras[i] is None:\n self.log_msg('CAM', 'WARN: Camera %d already off!' % i)\n else:\n try:\n self.log_msg('CAM', 'WARN: Closing Camera %d ...' % i)\n self.cameras[i].release() ## Disable cameras\n except Exception as error:\n self.log_msg('CAM', 'ERROR: %s' % str(error), important=True)\n if self.config['DISPLAY_ON']:\n cv2.destroyAllWindows() ## Close windows",
"def close(self):\n self.channel.close()",
"def close(self):\n if self.channel:\n self.channel.close()\n self.channel = None",
"def close_animation(self):\n\n self.env.gym_env.close()",
"def close(self):\n pygame.quit()\n self._setup = not self._setup"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Opens the back door. | async def open_back_door(self):
await self.execute_task('open_back_door')
self.status = enums.OvenStatus.WAITING_FOR_ROBOT_AFTER.value | [
"def open_door(self):\n if not self.door_open:\n self.do(2, \"Opening door\")\n self.door_open = True",
"def open_back_gate(self):\r\n self.send_line('2')",
"def open(self):\n self.servo.set(robotmap.positionList.openGearDoorPosition)",
"def __open_door(self):\n\n self.status = StatusEnum.STOP.value\n print(\"Elevator {}: current Position -> {}\".format(self.id_elv, self.current_floor))\n print(\"Elevator {}: status -> {}, direction -> {}\".format(self.id_elv, self.status, self.direction))\n print(\"Elevator {}: open door \".format(self.id_elv))",
"async def open_front_door(self):\n \n self.status = enums.OvenStatus.OPEN.value\n self.set_pizza_status(enums.PizzaStatus.OVEN_OPEN.value)\n await self.execute_task('open_front_door')",
"def open(self):\n self.state = _OpenState(self)\n logger.debug(\"Opened\")",
"def open_blueprint_scene():\n blueprint = get_editor_blueprint()\n if not blueprint:\n return\n\n if not blueprint.scene_path:\n LOG.warning(\"No scene path associated with the opened Blueprint.\")\n return\n\n LOG.info(\"Opening blueprint: %s\", blueprint.scene_path)\n save_cameras()\n pm.openFile(blueprint.scene_path, force=True)\n restore_cameras()",
"def open_flipper(self):\n self._fiber_shooting_logic.open_flipper()\n return",
"def open_restaurant(self):\n print(f\"{self.restaurant_name} is open.\")",
"def open(self):\n console.output(\"The doors seem to be sealed shut. \"\n \"Hopefully you'll be able to open them when you leave.\")\n return False",
"def try_open(self):\n logger.info(\"Open a merchant\")\n self.game.create_dim()\n self.game.merchant_open = True\n self.game.opened_merchant = self",
"def doOpen(self):\n if self.isOpen():\n return\n\n global _DirFromWhichToRunDS9, _DS9Path\n _Popen(\n args = (_DS9Path, \"-title\", self.template, \"-port\", \"0\"),\n cwd = _DirFromWhichToRunDS9,\n )\n\n startTime = time.time()\n while True:\n time.sleep(_OpenCheckInterval)\n if self.isOpen():\n break\n if time.time() - startTime > _MaxOpenTime:\n raise RuntimeError(\"Could not open ds9 window %r; timeout\" % (self.template,))",
"def openMenu(self):\n root = tk.Tk()\n menu = Menu(self, master=root)\n menu.mainloop()",
"def enter(self, player):\n if self.is_open:\n other_room = self.other_side_from(player.current_room)\n other_room.enter(player)\n else:\n super(Door, self).enter(player)",
"def open(self):\n return self.connector.reopen()",
"def on_action_open(self, content):\n self.widget().open()",
"def open_lid(self):\r\n\r\n if self.db.lid_open:\r\n return\r\n desc = self.db.desc_lid_open\r\n if not desc:\r\n desc = \"This is a large red button, inviting yet evil-looking. \"\r\n desc += \"Its glass cover is open and the button exposed.\"\r\n self.db.desc = desc\r\n self.db.lid_open = True\r\n\r\n # with the lid open, we validate scripts; this will clean out\r\n # scripts that depend on the lid to be closed.\r\n self.scripts.validate()\r\n # now add new scripts that define the open-lid state\r\n self.scripts.add(scriptexamples.OpenLidState)\r\n # we also add a scripted event that will close the lid after a while.\r\n # (this one cleans itself after being called once)\r\n self.scripts.add(scriptexamples.CloseLidEvent)",
"def performOpen(self, **kw):\n # connect through deviceID\n apilevel = 6 # The API level supported by this driver\n (daq, device, props) = zhinst.utils.create_api_session(self.deviceID, apilevel, \n required_devtype='UHF', \n required_options=['AWG'])\n zhinst.utils.api_server_version_check(daq)\n # Create a base configuration: Disable all available outputs, awgs, demods, scopes,...\n zhinst.utils.disable_everything(daq, device)\n self.daq = daq\n self.device = device\n self.props = props",
"def open_hand(self, close=False):\n action = HAND_OPEN\n if close:\n action = HAND_CLOSE\n self.motion.setAngles(R_HAND, action, 0.2)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
3 APs, two roots, one mesh, 3 APs can mesh each other. R1)))M2 R3 Reboot from M2 and check hearbeat host/reboot info. | def test_map_reboot(self):
logging.info('Facilitate all AP to right status.')
self._setup_env()
logging.info('Wait for 90 seconds to trigger statistic report')
time.sleep(90)
xml = STR.get_xml_data(self.zdipaddr, self.zduser, self.zdpasswd)
self.xmlhnd.update_xml(xml)
aptwo_ul_num = self.xmlhnd.get_ap_mesh_uplink_acquired(self.testaptwo)
self.testaptwo.apins.reboot()
elipsetime = self._check_aps_on_zd()
logging.info('Wait for 90 seconds to trigger statistic report')
time.sleep(90 - elipsetime)
xml_next = STR.get_xml_data(self.zdipaddr, self.zduser, self.zdpasswd)
self.xmlhnd.update_xml(xml_next)
aptwo_ul_num_n = self.xmlhnd.get_ap_mesh_uplink_acquired(self.testaptwo)
res1 = self.TestCaseResulter("Reboot MAP")
res2 = self.TestCaseResulter("MAP heartbeat lost")
if aptwo_ul_num_n != aptwo_ul_num + 1:
msg = "AP%s Expected mesh-num-uplink-acquired=%s, actual mesh-num-uplink-acquired=%s" \
% (self.testaptwo.get_ap_mac(), aptwo_ul_num + 1, aptwo_ul_num_n)
res1.update_result("FAIL", msg)
res2.update_result("FAIL", msg)
else:
res1.update_result("PASS", "Correct Behavior")
res2.update_result("PASS", "Correct Behavior")
return [res1, res2] | [
"def multinic_bootstrap_booting(self):\n self.env.revert_snapshot(\"ready\")\n\n slave = self.env.nodes().slaves[0]\n mac_addresses = [interface.mac_address for interface in\n slave.interfaces.filter(network__name='internal')]\n try:\n for mac in mac_addresses:\n Ebtables.block_mac(mac)\n for mac in mac_addresses:\n Ebtables.restore_mac(mac)\n slave.destroy(verbose=False)\n self.env.nodes().admins[0].revert(\"ready\")\n nailgun_slave = self.env.bootstrap_nodes([slave])[0]\n assert_equal(mac.upper(), nailgun_slave['mac'].upper())\n Ebtables.block_mac(mac)\n finally:\n for mac in mac_addresses:\n Ebtables.restore_mac(mac)",
"def reboot(self, update, context):\n\n\t\tlogger.info(\"/Reboot Triggered, Rebooting Router ZTE F609\")\n\n\t\ttry:\n\t\t\tcontext.bot.sendMessage(\n\t\t\t\tchat_id=self.chat_id_telegram,\n\t\t\t\ttext=\"<code>Rebooting Router ZTE F609</code>\",\n\t\t\t\tparse_mode=ParseMode.HTML,\n\t\t\t\treply_markup=self.choices_keyboard_remove)\n\n\t\t\trouter = zte_.RouterZteF609(self.router_ip_address, self.router_user, self.router_password)\n\n\t\t\tif router.reboot() == 'SUCCESS':\n\t\t\t\tlogger.info(\"Rebooting Router ZTE F609 Completed Successfully, Waiting 120 Seconds\")\n\n\t\t\t\ttime.sleep(120)\n\t\t\t\tcontext.bot.sendMessage(\n\t\t\t\t\tchat_id=self.chat_id_telegram,\n\t\t\t\t\ttext=\"`Rebooting Router ZTE F609 Completed Successfully, Do You Want Reboot Again ?`\",\n\t\t\t\t\tparse_mode=ParseMode.MARKDOWN_V2,\n\t\t\t\t\treply_markup=self.choices_keyboard_markup)\n\n\t\t\telse:\n\t\t\t\tlogger.info(\"ERROR Rebooting Router ZTE F609\")\n\t\t\t\tcontext.bot.sendMessage(\n\t\t\t\t\tchat_id=self.chat_id_telegram,\n\t\t\t\t\ttext=\"`ERROR Rebooting Router ZTE F609`\",\n\t\t\t\t\tparse_mode=ParseMode.MARKDOWN_V2,\n\t\t\t\t\treply_markup=self.choices_keyboard_remove)\n\n\t\texcept Exception:\n\t\t\tlogger.exception('ERROR on Rebooting Router ZTE F609')",
"def test_10_reboot_router_forced(self):\n\n list_router_response = list_routers(\n self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid\n )\n self.assertEqual(\n isinstance(list_router_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n router = list_router_response[0]\n\n public_ip = router.publicip\n\n self.debug(\"Force rebooting the router with ID: %s\" % router.id)\n # Reboot the router\n cmd = rebootRouter.rebootRouterCmd()\n cmd.id = router.id\n cmd.forced = True\n self.apiclient.rebootRouter(cmd)\n\n # List routers to check state of router\n retries_cnt = 10\n while retries_cnt >= 0:\n router_response = list_routers(\n self.apiclient,\n id=router.id\n )\n if self.verifyRouterResponse(router_response, public_ip):\n self.debug(\"Router is running successfully after force reboot\")\n return\n time.sleep(10)\n retries_cnt = retries_cnt - 1\n self.fail(\n \"Router response after force reboot is either invalid\\\n or router in stopped state\")\n return",
"def test_04_reboot_instance_in_network(self):\n\n # Validate the following\n # 1. Reboot the virtual machines.\n # 2. Vm should be started successfully.\n # 3. Make sure that all the PF,LB and Static NAT rules on this VM\n # works as expected.\n # 3. Make sure that we are able to access google.com from this user Vm\n\n self.debug(\"Validating if the network rules work properly or not?\")\n self.validate_network_rules()\n\n self.debug(\"Starting the virtual machines in account: %s\" %\n self.account.name)\n try:\n self.vm_1.reboot(self.apiclient)\n self.vm_2.reboot(self.apiclient)\n except Exception as e:\n self.fail(\"Failed to reboot the virtual instances, %s\" % e)\n\n self.debug(\"Validating if the network rules work properly or not?\")\n self.validate_network_rules()\n return",
"def test_04_reboot_instance_in_network(self):\n\n # Validate the following\n # 1. Reboot the virtual machines.\n # 2. Vm should be started successfully.\n # 3. Make sure that all the PF,LB and Static NAT rules on this VM\n # works as expected.\n # 3. Make sure that we are able to access google.com from this user Vm\n\n self.debug(\"Validating if the network rules work properly or not?\")\n self.validate_network_rules()\n\n self.debug(\"Starting the virtual machines in account: %s\" %\n self.account.name)\n try:\n self.vm_1.reboot(self.apiclient)\n self.vm_2.reboot(self.apiclient)\n except Exception as e:\n self.fail(\"Failed to reboot the virtual instances, %s\" % e)\n\n # Wait until vms are up\n time.sleep(120)\n self.debug(\"Validating if the network rules work properly or not?\")\n self.validate_network_rules()\n return",
"def reboot(self):\n if appuifw.query(u\"Reboot phone\", 'query'):\n try:\n e32.start_exe(u'Z:\\\\System\\\\Programs\\\\Starter.exe', '', 0)\n except:\n appuifw.note(u\"Not supported in this model.\", 'error')",
"def rebootServer():\r\n id = getServerId()\r\n serverToReboot = serverManager.find(id)\r\n if not serverToReboot: # find() returns None on failure to find server\r\n print \"Server not found %s\" % id\r\n return\r\n\r\n print \"Hard or Soft (h/S): \"\r\n hard_soft = stdin.readline().strip()\r\n if hard_soft in \"Hh\":\r\n rType = rebootType.hard\r\n else:\r\n rType = rebootType.soft\r\n\r\n sleepTime = getSleepTime() # Get sleep time to avoid overlimit fault\r\n serverManager.reboot(serverToReboot, rType)\r\n status = serverToReboot.status\r\n while status != u\"ACTIVE\":\r\n status = serverToReboot.status\r\n print \"Status : \", serverToReboot.status\r\n print \"Progress : \", serverToReboot.progress\r\n print \"Sleeping : \", sleepTime\r\n sleep(sleepTime) # pacing to avoid overlimit fault\r\n\r\n print \"Rebooted!\"",
"def test_standby_tor_reboot_upstream(\n upper_tor_host, lower_tor_host, send_server_to_t1_with_action, # noqa F811\n toggle_all_simulator_ports_to_upper_tor, toggle_lower_tor_pdu, # noqa F811\n wait_for_device_reachable, wait_for_mux_container # noqa F811\n):\n send_server_to_t1_with_action(\n upper_tor_host, verify=True,\n action=toggle_lower_tor_pdu, stop_after=60\n )\n wait_for_device_reachable(lower_tor_host)\n wait_for_mux_container(lower_tor_host)\n verify_tor_states(\n expected_active_host=upper_tor_host,\n expected_standby_host=lower_tor_host\n )",
"def reboot(target):\n print(\"Rebooting to {target}.\".format(target=target))\n subprocess.check_call([\"adb\", \"reboot\", target])",
"def restart_modem():\n LOG.warn(\"Restarting modem\")\n s0 = run_command(['querymodem', 'run', 'AT+CFUN=4'])\n s1 = run_command(['querymodem', 'run', 'AT+CFUN=6'])\n LOG.warn(\"Restart modem status|%r|%r\", s0, s1)",
"def test_04_restart_network_wo_cleanup(self):\n\n # Validate the following\n # 1. When cleanup = false, router is restarted and\n # all services inside the router are restarted\n # 2. check 'uptime' to see if the actual restart happened\n\n timeout = 10\n # Network should be in Implemented or Setup stage before restart\n while True:\n networks = list_networks(\n self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check list response returns a valid list\"\n )\n network = networks[0]\n if network.state in [\"Implemented\", \"Setup\"]:\n break\n elif timeout == 0:\n break\n else:\n time.sleep(self.services[\"sleep\"])\n timeout = timeout - 1\n\n self.debug(\n \"Restarting network with ID: %s, Network state: %s\" % (\n network.id,\n network.state\n ))\n cmd = restartNetwork.restartNetworkCmd()\n cmd.id = network.id\n cmd.cleanup = False\n self.apiclient.restartNetwork(cmd)\n\n # Get router details after restart\n list_router_response = list_routers(\n self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid\n )\n self.assertEqual(\n isinstance(list_router_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n router = list_router_response[0]\n\n hosts = list_hosts(\n self.apiclient,\n zoneid=router.zoneid,\n type='Routing',\n state='Up',\n id=router.hostid\n )\n self.assertEqual(\n isinstance(hosts, list),\n True,\n \"Check list response returns a valid list\"\n )\n host = hosts[0]\n\n if self.hypervisor.lower() in ('vmware', 'hyperv'):\n res = get_process_status(\n self.apiclient.connection.mgtSvr,\n 22,\n self.apiclient.connection.user,\n self.apiclient.connection.passwd,\n router.linklocalip,\n \"uptime\",\n hypervisor=self.hypervisor\n )\n else:\n try:\n host.user, host.passwd = get_host_credentials(\n self.config, host.ipaddress)\n res = get_process_status(\n host.ipaddress,\n 22,\n host.user,\n host.passwd,\n router.linklocalip,\n \"uptime\"\n )\n except KeyError:\n self.skipTest(\n \"Marvin configuration has no host credentials\\\n to check router services\")\n # res = 12:37:14 up 1 min, 0 users, load average: 0.61, 0.22, 0.08\n # Split result to check the uptime\n result = res[0].split()\n self.debug(\"Router Uptime: %s\" % result)\n self.assertEqual(\n str(result[1]),\n 'up',\n \"Check router is running or not\"\n )\n if str(result[3]) == \"min,\":\n self.assertEqual(\n (int(result[2]) < 20),\n True,\n \"Check uptime is less than 20 mins or not\"\n )\n else:\n self.assertEqual(\n str(result[3]),\n 'sec,',\n \"Check uptime is in seconds\"\n )\n return",
"def test_oobm_issue_power_cycle(self):\n self.configureAndStartMgmtServer()\n self.assertIssueCommandState('CYCLE', 'On')\n global apiRequests\n self.assertTrue('rebootVirtualMachine' in apiRequests)",
"def host_reboot_failed(self, host):\n if self._sw_update is not None:\n self._sw_update.handle_event(\n strategy.STRATEGY_EVENT.HOST_REBOOT_FAILED, host)",
"def reboot(self):\n LOG.info('Reboot nodes: %s', self)\n task = {'command': 'reboot now'}\n self.cloud_management.execute_on_cloud(self.get_ips(), task)",
"def restart():\n if not ui_lib.wait_for_element(FusionSettingsPage.ID_PAGE_LABEL):\n navigate()\n\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_APPLIANCE_LINK)\n\n logger._log_to_console_and_log_file(\"Restart Appliance\")\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_MENU_ACTION_MAIN_BTN)\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_MENU_ACTION_RESTART)\n if ui_lib.wait_for_element_visible(FusionSettingsPage.ID_LABEL_RESTART):\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_BTN_YES_CONFIRM_SHUTDOWN)\n else:\n ui_lib.fail_test('Failed: The message while restarting the appliance is not displayed')\n # Check whether the fusion appliance is restarting\n if ui_lib.wait_for_element_visible(FusionSettingsPage.ID_TEXT_RESTARTING, PerfConstants.RESTART_LABEL_VISIBLE):\n logger._log_to_console_and_log_file(\"The fusion appliance is restarting \")\n else:\n ui_lib.fail_test('Failed: The fusion appliance failed to display restarting label')\n # Checking whether the fusion appliance started to coming up\n if ui_lib.wait_for_element_visible(FusionSettingsPage.ID_LABEL_PROGRESS, PerfConstants.STARTING_PROGRESS_VISIBLE):\n logger._log_to_console_and_log_file(\"The fusion appliance is powered on successfully and waiting for the appliance\")\n else:\n ui_lib.fail_test('Failed: The fusion appliance dint get powered on successfully and appliance couldnot start')\n # Checking whether the fusion appliance home page\n if ui_lib.wait_for_element_notvisible(FusionSettingsPage.ID_LABEL_PROGRESS, PerfConstants.STARTING_PROGRESS_VISIBLE):\n if ui_lib.wait_for_element_visible(FusionDashboardPage.ID_PAGE_LABEL, PerfConstants.FUSION_LOGIN_TIME):\n logger._log_to_console_and_log_file(\"The fusion appliance is restarted successfully\")\n else:\n ui_lib.fail_test('Failed: The fusion appliance did not come up after restart')",
"def load_ceph_partitions_cold_reboot(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"load_ceph_ha\")\n\n self.show_step(2)\n self.fuel_web.wait_mysql_galera_is_up(['slave-01'])\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.show_step(3)\n self.fuel_web.check_ceph_status(cluster_id)\n\n self.show_step(4)\n self.fuel_web.run_ostf(cluster_id=cluster_id)\n\n self.show_step(5)\n for node in ['slave-0{0}'.format(slave) for slave in xrange(1, 4)]:\n with self.fuel_web.get_ssh_for_node(node) as remote:\n file_name = \"test_data\"\n file_dir = remote.execute(\n 'mount | grep -m 1 ceph')['stdout'][0].split()[2]\n file_path = os.path.join(file_dir, file_name)\n result = remote.execute(\n 'fallocate -l 30G {0}'.format(file_path))['exit_code']\n assert_equal(result, 0, \"The file {0} was not \"\n \"allocated\".format(file_name))\n\n self.show_step(6)\n self.fuel_web.check_ceph_status(cluster_id)\n\n self.show_step(7)\n assert_true(settings.PATCHING_RUN_RALLY,\n 'PATCHING_RUN_RALLY was not set in true')\n rally_benchmarks = {}\n for tag in set(settings.RALLY_TAGS):\n rally_benchmarks[tag] = RallyBenchmarkTest(\n container_repo=settings.RALLY_DOCKER_REPO,\n environment=self.env,\n cluster_id=cluster_id,\n test_type=tag\n )\n rally_benchmarks[tag].run(result=False)\n\n self.show_step(8)\n self.fuel_web.cold_restart_nodes(\n self.env.d_env.get_nodes(name__in=[\n 'slave-01',\n 'slave-02',\n 'slave-03',\n 'slave-04',\n 'slave-05']))\n\n for tag in rally_benchmarks:\n task_id = rally_benchmarks[tag].current_task.uuid\n rally_benchmarks[tag].current_task.abort(task_id)\n\n self.show_step(9)\n self.fuel_web.assert_ha_services_ready(cluster_id)\n\n self.fuel_web.assert_os_services_ready(cluster_id)\n\n self.show_step(10)\n self.fuel_web.wait_mysql_galera_is_up(['slave-01'])\n\n try:\n self.fuel_web.run_single_ostf_test(\n cluster_id, test_sets=['smoke'],\n test_name=map_ostf.OSTF_TEST_MAPPING.get(\n 'Create volume and attach it to instance'))\n except AssertionError:\n logger.debug(\"Test failed from first probe,\"\n \" we sleep 180 seconds and try one more time \"\n \"and if it fails again - test will fail \")\n time.sleep(180)\n self.fuel_web.run_single_ostf_test(\n cluster_id, test_sets=['smoke'],\n test_name=map_ostf.OSTF_TEST_MAPPING.get(\n 'Create volume and attach it to instance'))\n self.show_step(11)\n # LB 1519018\n self.fuel_web.run_ostf(cluster_id=cluster_id)\n self.env.make_snapshot(\"load_ceph_partitions_cold_reboot\")",
"def check_reboot(remote, args):\n if args.check:\n print('[i] Checking if vulnerable with \"reboot\"')\n else:\n print(f'[*] Checking remote \"{args.rhost}:{args.rport}\" with \"reboot\"')\n remote.send(query_args='reboot')\n time.sleep(2)\n if not remote.send(url='/', query_args=None):\n print('[!] Remote is vulnerable')\n return True\n else:\n print('[+] Remote is not vulnerable')\n return False",
"def reboot_system(system_name, remote):\n\n env.disable_known_hosts = True\n\n interface, ip_address = get_ip(remote, system_name)\n\n if ip_address != \"No IP\":\n env.host_string = ip_address \n run('/sbin/reboot')\n else:\n print \"Issue getting IP address for %s\" % (system)",
"def reboot(self):\n self.write(\"miner_reboot\")\n response = self.read()\n\n if response:\n return True\n else:\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
draws a line in an image 'Img' given 'rho' and 'theta' | def draw_line(Img, rho, theta):
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * a)
cv2.line(Img, (x1, y1), (x2, y2), (0, 0, 255), 1) | [
"def draw_line(theta, rho, **args):\n def clamp(a, b, a_min, a_max, rho, A, B):\n if a < a_min or a > a_max:\n a = np.fmax(a_min, np.fmin(a_max, a))\n b = (rho-a*A)/B\n return a, b\n\n x_min,x_max = np.sort(plt.xlim())\n y_min,y_max = np.sort(plt.ylim())\n c = np.cos(theta)\n s = np.sin(theta)\n if np.fabs(s) > np.fabs(c):\n x1 = x_min\n x2 = x_max\n y1 = (rho-x1*c)/s\n y2 = (rho-x2*c)/s\n y1,x1 = clamp(y1, x1, y_min, y_max, rho, s, c)\n y2,x2 = clamp(y2, x2, y_min, y_max, rho, s, c)\n else:\n y1 = y_min\n y2 = y_max\n x1 = (rho-y1*s)/c\n x2 = (rho-y2*s)/c\n x1,y1 = clamp(x1, y1, x_min, x_max, rho, c, s)\n x2,y2 = clamp(x2, y2, x_min, x_max, rho, c, s)\n plt.plot([x1, x2], [y1, y2], **args)",
"def HoughLines(\n image,\n rho,\n theta,\n threshold,\n lines=...,\n srn=...,\n stn=...,\n min_theta=...,\n max_theta=...,\n) -> lines:\n ...",
"def draw_hough_line(img, lines, color=[255,0,0], thickness=2):\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n for line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(line_img, (x1,y1), (x2,y2),[255, 0, 0],2)\n return line_img",
"def hough_lines(self, img):\n lines = cv2.HoughLinesP(img,\n self.parameters.hough.rho,\n self.parameters.hough.theta,\n self.parameters.hough.threshold,\n np.array([]),\n minLineLength=self.parameters.hough.min_line_length,\n maxLineGap=self.parameters.hough.max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n self.draw_lines(line_img, lines)\n return line_img",
"def hough_lines(img):\n # Define the Hough transform parameters\n # Make a blank the same size as our image to draw on\n rho = 1 # distance resolution in pixels of the Hough grid\n theta = np.pi / 180 # angular resolution in radians of the Hough grid\n threshold = 1 # minimum number of votes (intersections in Hough grid cell)\n min_line_length = 7 # minimum number of pixels making up a line\n max_line_gap = 3 # maximum gap in pixels between connectible line segments\n vertices = make_vertices(img)\n masked_image = region_of_interest(img, vertices)\n\n lines = cv2.HoughLinesP(masked_image, rho, theta, threshold, np.array([]), min_line_length,\n max_line_gap)\n line_img = np.copy(image) * 0 # creating a blank to draw lines on\n draw_lines(line_img, lines, vertices)\n return line_img",
"def drawLine(turtle, angle, startX, startY, lineLength):\n\tturtle.x = startX\n\tturtle.y = startY\n\tturtle.heading = angle\n\tturtle.fd(lineLength)",
"def draw_line(Irgb, line, color=(0, 255, 0)):\n if len(Irgb.shape) != 3:\n Irgb = cv2.cvtColor(Irgb, cv.CV_GRAY2BGR)\n \n Irgb = Irgb.copy()\n h, w = Irgb.shape[0:2]\n pts = []\n for x in xrange(w):\n y = compute_line_y(line, x)\n if y > 0 and y < h:\n pts.append((x,y))\n cv.Line(cv.fromarray(Irgb), tuple(intrnd(*pts[0])), tuple(intrnd(*pts[-1])), color)\n return Irgb",
"def find_line(image):\n greyscale_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n return_value, thresholds = cv2.threshold(greyscale_img, 20, 255, cv2.THRESH_BINARY)\n\n ro = 1\n theta = np.pi / 180\n min_line_length = 150\n max_line_gap = 5\n lines = cv2.HoughLinesP(thresholds, ro, theta, min_line_length, min_line_length, max_line_gap)\n\n # remember coordinates of line ends\n x1 = min(lines[:, 0, 0])\n x2 = max(lines[:, 0, 2])\n y1 = max(lines[:, 0, 1])\n y2 = min(lines[:, 0, 3])\n\n # calculate line formula in y=k*x+n format\n k = (y1 - y2) / (x1 - x2)\n n = y1 - k * x1\n\n # calculate length of the line\n length = math.sqrt(pow((y2-y1), 2) + pow((x2-x1), 2))\n\n return Line(x1=x1, y1=y1, x2=x2, y2=y2, k=k, n=n, length=length)",
"def add_line(line,pauli_pos,pauli):\n\n unhidden = see_if_unhidden(pauli)\n p = (1-self.rho[pauli])/2 # prob of 1 output\n # in the following, white lines goes from a to b, and black from b to c\n if unhidden:\n if line=='X':\n \n a = ( self.box[pauli_pos][0]-length/2, self.box[pauli_pos][1]-width/2 )\n c = ( self.box[pauli_pos][0]+length/2, self.box[pauli_pos][1]-width/2 )\n b = ( p*a[0] + (1-p)*c[0] , p*a[1] + (1-p)*c[1] )\n \n self.ax.add_patch( Rectangle( a, length*(1-p), width, angle=0, color=(0.0,0.0,0.0)) )\n self.ax.add_patch( Rectangle( b, length*p, width, angle=0, color=(1.0,1.0,1.0)) )\n \n elif line=='Z':\n \n a = ( self.box[pauli_pos][0]-width/2, self.box[pauli_pos][1]-length/2 )\n c = ( self.box[pauli_pos][0]-width/2, self.box[pauli_pos][1]+length/2 )\n b = ( p*a[0] + (1-p)*c[0] , p*a[1] + (1-p)*c[1] )\n \n self.ax.add_patch( Rectangle( a, width, length*(1-p), angle=0, color=(0.0,0.0,0.0)) )\n self.ax.add_patch( Rectangle( b, width, length*p, angle=0, color=(1.0,1.0,1.0)) )\n \n else:\n \n \n a = ( self.box[pauli_pos][0]-length/(2*np.sqrt(2)), self.box[pauli_pos][1]-length/(2*np.sqrt(2)) )\n c = ( self.box[pauli_pos][0]+length/(2*np.sqrt(2)), self.box[pauli_pos][1]+length/(2*np.sqrt(2)) )\n b = ( p*a[0] + (1-p)*c[0] , p*a[1] + (1-p)*c[1] )\n \n self.ax.add_patch( Rectangle( a, width, length*(1-p), angle=-45, color=(0.0,0.0,0.0)) )\n self.ax.add_patch( Rectangle( b, width, length*p, angle=-45, color=(1.0,1.0,1.0)) )\n \n return p",
"def draw_lines(img, lines, color=[1, 0, 0], thickness=2):\n for line in lines:\n p1 = line[0]\n p2 = line[1]\n cv2.line(img, (p1[0], p1[1]), (p2[0], p2[1]), color, thickness)",
"def _hough_line(*args, **kwargs): # real signature unknown\n pass",
"def plot_rho(self,LAXIS,xbl,xbr,ybu,ybd,ilg): \n\t\t\n # load x GRID\n grd1 = self.xzn0\n\t\n # load DATA to plot\n plt1 = self.dd\n\t\t\n # create FIGURE\n plt.figure(figsize=(7,6))\n\t\t\n # format AXIS, make sure it is exponential\n plt.gca().yaxis.get_major_formatter().set_powerlimits((0,0))\t\t\n\t\t\n # set plot boundaries \n to_plot = [plt1]\t\t\n self.set_plt_axis(LAXIS,xbl,xbr,ybu,ybd,to_plot)\t\n\t\t\n # plot DATA \n plt.title('density')\n plt.plot(grd1,plt1,color='brown',label = r'$\\overline{\\rho}$')\n\n # define and show x/y LABELS\n setxlabel = r\"r (cm)\"\n setylabel = r\"$\\overline{\\rho}$ (g cm$^{-3}$)\"\n\n plt.xlabel(setxlabel)\n plt.ylabel(setylabel)\n\t\t\n # show LEGEND\n plt.legend(loc=ilg,prop={'size':18})\n\n # display PLOT\n plt.show(block=False)\n\n # save PLOT\n plt.savefig('RESULTS/'+self.data_prefix+'mean_rho.png')",
"def set_line_image_data(image, line_id, image_file_name, image_fh):\n\n base_name = os.path.splitext(os.path.basename(image_file_name))[0]\n line_id = '_' + line_id.zfill(4)\n line_image_file_name = base_name + line_id + '.png'\n image_path = os.path.join(args.out_dir, line_image_file_name)\n imgray = image.convert('L')\n imgray_rev_arr = np.fliplr(imgray)\n imgray_rev = toimage(imgray_rev_arr)\n imgray_rev.save(image_path)\n image_fh.write(image_path + '\\n')",
"def plot_rho(self, LAXIS, xbl, xbr, ybu, ybd, ilg):\n\n # load x GRID\n grd1 = self.xzn0\n\n # load DATA to plot\n plt1 = self.dd\n\n # create FIGURE\n plt.figure(figsize=(7, 6))\n\n # format AXIS, make sure it is exponential\n plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))\n\n # set plot boundaries \n to_plot = [plt1]\n self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)\n\n # plot DATA \n plt.title('density')\n plt.plot(grd1, plt1, color='brown', label=r'$\\overline{\\rho}$')\n\n # define and show x/y LABELS\n if self.ig == 1:\n setxlabel = r'x (cm)'\n elif self.ig == 2:\n setxlabel = r'r (cm)'\n else:\n print(\"ERROR(HsseContinuityEquation.py):\" + self.errorGeometry(self.ig))\n sys.exit()\n\n setylabel = r\"$\\overline{\\rho}$ (g cm$^{-3}$)\"\n\n plt.xlabel(setxlabel)\n plt.ylabel(setylabel)\n\n # show LEGEND\n plt.legend(loc=ilg, prop={'size': 18})\n\n # display PLOT\n plt.show(block=False)\n\n # save PLOT\n if self.fext == 'png':\n plt.savefig('RESULTS/' + self.data_prefix + 'mean_rho.png')\n elif self.fext == 'eps':\n plt.savefig('RESULTS/' + self.data_prefix + 'mean_rho.eps')",
"def line(self, start, end, color, width=None, title='image', destroy=True):\n line = cv2.line(self.img, start, end, color, width)\n if destroy == False:\n cv2.imshow(title, self.img)\n if destroy == True:\n cv2.imshow(title, self.img)\n cv2.waitKey(0)\n cv2.destroyAllWindows",
"def draw_horizontal_lines(img):\n row, col = img.shape\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n interval = row / 10\n for i in range(1, 10):\n (x0, y0) = map(int, [0, i * interval])\n (x1, y1) = map(int, [col, i * interval])\n img = cv2.line(img, (x0, y0), (x1, y1), (0, 255, 0), 1)\n\n return img",
"def add(self, line):\n\n r, theta = line_helper.get_r_and_theta(line)\n low, high = line_helper.get_low_point_and_high_point(line)\n\n if low[1] < self.ymin:\n self.ymin = low[1]\n self.low_point = low\n if high[1] > self.ymax:\n self.ymin = high[1]\n self.high_point = high\n\n self.radii.append(r)\n self.thetas.append(theta)\n\n self.update_means()",
"def draw_lines(img, lines, color=[255, 0, 0], thickness=2):\n\n left_lane, right_lane = seggregate_and_average_lane_lines(lines)\n\n extrapolated_lines = []\n\n if left_lane.size != 1: # Make sure left_lane is not 'nan'\n left_line = extrapolate_lines(img.shape, left_lane)\n extrapolated_lines.append([left_line])\n\n if right_lane.size != 1: # Make sure left_lane is not 'nan'\n right_line = extrapolate_lines(img.shape, right_lane)\n extrapolated_lines.append([right_line])\n\n for line in extrapolated_lines:\n for x1,y1,x2,y2 in line:\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)",
"def draw_lines(img, lines, color=[255, 0, 0], thickness=10, weights=(0.5, 0.5)): \n img2 = np.zeros_like(img)\n for line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(img2, (x1, y1), (x2, y2), color, thickness)\n return cv2.addWeighted(img, weights[0], img2, weights[1], 0)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Override the default register function to automagically register submodules at once. | def register(self, app, options, first_registration=False):
if first_registration:
self.submodules = list(app.find_submodules(self.import_name))
super(PgAdminModule, self).register(app, options, first_registration)
for module in self.submodules:
app.register_blueprint(module) | [
"def call_register(root_dir):\n for mod in imported_modules:\n if hasattr(mod, \"register\"):\n mod.register()",
"def register_by_module(module):\n\n # Get a list of all user specified modules attached to module\n module_names = module.__all__\n\n # Add in package preamble\n module_names = [module.__name__ + '.' + mod for mod in module_names]\n\n # Register all of the sub-modules\n register(module_names)\n\n return",
"def __register_self(self):\n\n # Register this mod in the unified mod list.\n if self.register_to and isinstance(self.register_to, list):\n logger.info(f'Self-registering as mod #{len(self.register_to) + 1}...')\n self.register_to.append(self)",
"def register_submodule_factory(module_name, submodule_names):\n\n module = None\n submodules = []\n\n def register():\n nonlocal module\n module = __import__(name=module_name, fromlist=submodule_names)\n submodules[:] = [getattr(module, name) for name in submodule_names]\n for mod in submodules:\n mod.register()\n\n def unregister():\n from sys import modules\n for mod in reversed(submodules):\n mod.unregister()\n name = mod.__name__\n delattr(module, name.partition(\".\")[2])\n del modules[name]\n submodules.clear()\n\n return register, unregister",
"def register(self) -> None:\n if self not in sys.meta_path:\n sys.meta_path.append(self)",
"def register_for_new_hierarchies(self):\n pass",
"def register():\n\n reload_modules_main()\n \"\"\"\n importlib.reload(locals()[utils])\n bpy.utils.register_class(utils.BlenderUtils)\n \"\"\"\n bpy.utils.register_class(add_platonic_solids)\n bpy.utils.register_class(add_tetrahedron)\n\n bpy.utils.register_class(OBJECT_OT_mandelbox)\n bpy.utils.register_class(OBJECT_OT_revolution)\n bpy.utils.register_class(OBJECT_OT_moebius)\n bpy.utils.register_class(OBJECT_OT_genus)\n bpy.utils.register_class(OBJECT_OT_heart)\n bpy.utils.register_class(OBJECT_OT_mandelbulb)\n bpy.utils.register_class(OBJECT_OT_mengersponge)\n bpy.utils.register_class(OBJECT_OT_planet)\n bpy.utils.register_class(OBJECT_OT_simplenoiseterrain)\n bpy.utils.register_class(OBJECT_OT_torus)\n bpy.utils.register_class(OBJECT_OT_sphere)\n\n bpy.utils.register_class(OBJECT_MT_fractals)\n bpy.types.VIEW3D_MT_object_context_menu.append(menu_func)",
"def register_module(self, mod, prefix='', what=None, **routing_info):\n if what is None:\n what = mod.__all__\n for func_identifier in what:\n func = getattr(mod, func_identifier)\n func_name = prefix + func.__name__\n self.register_task(func, func_name, **routing_info)",
"def _register_extensions(self, namespace):\n\n # Register any extension classes for this class.\n extmanager = ExtensionManager(\n 'extensions.classes.{}'.format(namespace),\n propagate_map_exceptions=True\n )\n\n if extmanager.extensions:\n extmanager.map(util.register_extension_class, base=self)\n\n # Register any extension methods for this class.\n extmanager = ExtensionManager(\n 'extensions.methods.{}'.format(namespace),\n propagate_map_exceptions=True\n )\n if extmanager.extensions:\n extmanager.map(util.register_extension_method, base=self)",
"def register(module_names, overwrite=False):\n\n for mod_name in module_names:\n # First, ensure module string directs to something importable\n try:\n inst_module = importlib.import_module(mod_name)\n except Exception:\n # Log then preserve trace and propagate error\n estr = ' '.join(('There was a problem trying to import', mod_name))\n pysat.logger.error(estr)\n raise\n\n # Second, check that module is itself pysat compatible\n validate = itc.InstTestClass()\n\n # Work with test code, create dummy structure to make things work\n class Foo(object):\n pass\n validate.inst_loc = Foo()\n\n # Parse string to get package part and instrument module part\n parse = mod_name.split('.')\n\n # Module name without package\n mod_part = parse[-1]\n\n # The package preamble\n pack_part = parse[:-1]\n\n # Assign package info to Test class\n validate.inst_loc.__name__ = '.'.join(pack_part)\n\n # Run tests\n validate.test_modules_standard(mod_part)\n validate.test_standard_function_presence(mod_part)\n\n # Registry is a dict of dicts with platform, name, and module string.\n # Get the platform and name identifiers from imported module\n platform = inst_module.platform\n name = inst_module.name\n\n # Only register module if not already present. Multiple names are\n # allowed for a single platform\n if platform not in pysat.params['user_modules']:\n # setup `of dict` part of dict of dicts\n pysat.params.data['user_modules'][platform] = {}\n\n # Only register name if it is not present under platform\n if name not in pysat.params['user_modules'][platform]:\n pysat.logger.info('Registering user module {}'.format(mod_name))\n # Add to current user modules structure and store it to disk\n pysat.params.data['user_modules'][platform][name] = mod_name\n store()\n else:\n # Platform/name combination already registered. Check to see if\n # this is a new package or just a redundant assignment\n if mod_name != pysat.params['user_modules'][platform][name]:\n # New assignment, check for overwrite flag\n if not overwrite:\n estr = ' '.join(('An instrument has already been ',\n 'registered for platform:', platform,\n 'and name:', name,\n 'which maps to:', mod_name, 'To assign',\n 'a new module the overwrite flag',\n 'must be enabled.'))\n raise ValueError(estr)\n else:\n # Overwrite with new module information\n pysat.params.data['user_modules'][platform][name] = mod_name\n store()\n\n return",
"def populate_registry():\n # We import the register_classes modules as a direct submodule of labscript_devices.\n # But they cannot all have the same name, so we import them as\n # labscript_devices._register_classes_script_<num> with increasing number.\n module_num = 0\n for devices_dir in LABSCRIPT_DEVICES_DIRS:\n for folder, _, filenames in os.walk(devices_dir):\n if 'register_classes.py' in filenames:\n # The module name is the path to the file, relative to the labscript suite\n # install directory:\n # Open the file using the import machinery, and import it as module_name.\n fp, pathname, desc = imp.find_module('register_classes', [folder])\n module_name = 'labscript_devices._register_classes_%d' % module_num\n _ = imp.load_module(module_name, fp, pathname, desc)\n module_num += 1",
"def register(func):\n print('running register(%s)' % func)\n registry.append(func)\n return func",
"def register(registry:list):\n def decorate(func):\n registry.append(func)\n return func\n return decorate",
"def register_core_plugins():\n\n for cls in _builtin_sys_plugins:\n obj = cls()\n obj.activate()",
"def register_ioc(self):\n IoC.register()",
"def register_module(path):\n from .detail import RegisterPackageFromZip\n from .detail import RegisterPackageFromDir\n from .detail import RegisterModuleFromFile\n from paraview.vtk.vtkParallelCore import vtkPSystemTools\n\n from . import importers\n # plug into Python import machinery to help import pipeline modules\n # seamlessly.\n importers.install_pathfinder()\n\n if vtkPSystemTools.FileIsDirectory(path):\n return RegisterPackageFromDir(path)\n elif path.lower().endswith(\".zip\"):\n return RegisterPackageFromZip(path)\n else:\n return RegisterModuleFromFile(path)",
"def _register(self, hack, recursively=True):\n hack = self._resolve(hack)\n\n if hasattr(hack, '__hacks_on_top_of__'):\n for h in hack.__hacks_on_top_of__:\n if not self._already_registered(h):\n self._register(h)\n\n if hasattr(hack, '__hacks_into__'):\n for p in hack.__hacks_into__:\n self._hacks_into[p].append(hack)\n\n if hasattr(hack, '__hacks_around__'):\n for p in hack.__hacks_around__:\n self._hacks_around[p].append(hack)\n\n if hasattr(hack, '__hacks_up__'):\n for p in hack.__hacks_up__:\n self._hacks_up[p].append(hack)\n\n # if it was an instance or a class, register methods + inner classes\n if recursively:\n if inspect.isclass(hack): # class; instantiate and register\n hack = hack()\n self._register_attributes(hack)\n\n if hack not in (self._prev_hacks_list + self._new_hacks_list):\n self._new_hacks_list.append(hack)",
"def register(self, name, func):\n with self.lock:\n if name not in self.hooks:\n self.hooks[name] = set([func])\n else:\n self.hooks[name].add(func)\n\n # Module is already loaded, call hook right away\n if name in sys.modules:\n func(sys.modules[name])",
"def __init_subclass__(self):\n ex_registry.append(self)\n\n if not hasattr(self, \"extensions\") or len(getattr(self, \"extensions\")) == 0:\n setattr(self, \"extensions\", [\".\" + self.__name__.lower()])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
A small addition to pprint that converts any Django model objects to dictionaries so they print prettier. h3. Example usage >>> from toolbox.dprint import dprint >>> from app.models import Dummy >>> dprint(Dummy.objects.all().latest()) | def dprint(object, stream=None, indent=1, width=80, depth=None):
# Catch any singleton Django model object that might get passed in
if getattr(object, '__metaclass__', None):
if object.__metaclass__.__name__ == 'ModelBase':
# Convert it to a dictionary
object = object.__dict__
# Catch any Django QuerySets that might get passed in
elif isinstance(object, QuerySet):
# Convert it to a list of dictionaries
object = [i.__dict__ for i in object]
# Pass everything through pprint in the typical way
printer = PrettyPrinter(stream=stream, indent=indent, width=width, depth=depth)
printer.pprint(object) | [
"def pprint(obj):\n print(json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': ')))",
"def pp(object):\n return pprint.PrettyPrinter(indent=2, width=200).pprint(object)",
"def pprint(self, fh=None):\n if fh is None:\n fh = sys.stdout\n for doc in self:\n print(\"id:\", doc[\"id\"], file=fh)\n print(\"tag:\", \", \".join(doc[\"tag\"]), file=fh)\n timestr = util.utc_to_local(doc[\"updated\"]).strftime(\"%Y-%m-%d %H:%M:%S\")\n print(\"updated: %s\" % timestr, file=fh)\n print(doc[\"content\"], file=fh)\n print(\"\\n\", file=fh)",
"def pretty_print(self): \n data = json.dumps(self.data, sort_keys=True, indent=4 * ' ')\n print(data)",
"def pprint(self, resource):\n pretty_print = pprint.PrettyPrinter(indent=4)\n if (isinstance(resource, dict) and\n 'object' in resource and\n 'resource' in resource):\n if SOURCE_RE.match(resource['resource']):\n print \"%s (%s bytes)\" % (resource['object']['name'],\n resource['object']['size'])\n elif DATASET_RE.match(resource['resource']):\n print \"%s (%s bytes)\" % (resource['object']['name'],\n resource['object']['size'])\n elif MODEL_RE.match(resource['resource']):\n print \"%s (%s bytes)\" % (resource['object']['name'],\n resource['object']['size'])\n elif PREDICTION_RE.match(resource['resource']):\n objective_field_name = (\n resource['object']['fields']\n [resource['object']['objective_fields'][0]]['name'])\n input_data = dict(\n [[resource['object']['fields'][key]['name'], value]\n for key, value in\n resource['object']['input_data'].items()])\n prediction = (\n resource['object']['prediction']\n [resource['object']['objective_fields'][0]])\n print(\"%s for %s is %s\" % (objective_field_name, input_data,\n prediction))\n else:\n pretty_print.pprint(resource)",
"def printr(obj: Any, *args, **kwargs) -> None:\n\n\tprint(repr(obj), *args, **kwargs)",
"def formatted_flat_dict(model):\n return pretty_print_format(to_dict(model))",
"def pretty_print(item):\n print(prettify(item))",
"def dict_prettyprint(dic, title):\n print title\n for k in dic.keys():\n print \"\\t%s : %s\" % (k, dic[k])\n print \"\"",
"def print_embed(embed: discord.Embed) -> None:\n pprint((embed.title, embed.description, embed.footer, embed.color, embed.fields, embed.author, embed.timestamp))",
"def print(self, indent: int = 2):\n print(json.dumps(self.json, indent=indent, sort_keys=True))",
"def pprint_job(\n daemon : Daemon,\n nopretty : bool = False,\n ):\n from meerschaum.utils.warnings import info\n if not nopretty:\n info(f\"Command for job '{daemon.daemon_id}':\")\n print('\\n' + daemon.label + '\\n')\n else:\n print(daemon.daemon_id)",
"def print_model_map() -> None:\n print(_MODEL_MAP)",
"def prettify(self, object_: Union[list, dict, tuple], indent: int = 4, quiet: bool = False) -> str:\n import pprint\n pretty_printer = pprint.PrettyPrinter(indent=indent)\n pretty_string = pretty_printer.pformat(object=object_)\n if not quiet:\n self.multithread_safe(pretty_string)\n return pretty_string",
"def pretty_print_model(devicemodel):\n PRETTY_PRINT_MODEL = \"\"\"Device Model ID: %(deviceModelId)s\n Project ID: %(projectId)s\n Device Type: %(deviceType)s\"\"\"\n logging.info(PRETTY_PRINT_MODEL % devicemodel)\n if 'traits' in devicemodel:\n for trait in devicemodel['traits']:\n logging.info(' Trait %s' % trait)\n else:\n logging.info('No traits')\n logging.info('') # Newline",
"def enablePrettyPrint(self):\n self._sendCommand(ManagementProtocol.PPRINT, (True,))",
"def dump_instance(instance):\n return json.dumps(model_to_dict(instance), cls=DjangoJSONEncoder)",
"def podx(obj, tag=0, all_members=False, f_intro=0, deep=2, maxd=20):\n od = ObjectDumper()\n od.all_members = all_members\n od.f_introspect = f_intro\n od.deep = deep\n print col_tag(tag)\n print od.dwrap(obj)",
"def pprint(self):\n print('Package:')\n pprint(self.name)\n pprint(self.child_packages)\n pprint(self.child_files)\n for p in self.child_packages:\n p.pprint()\n for f in self.child_files:\n f.pprint()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Replace missing info fields with mean values | def replace_mean(df):
df['Glucose'] = df['Glucose'].fillna(df['Glucose'].mean())
df['BloodPressure'] = df['BloodPressure'].fillna(df['BloodPressure'].mean())
df['SkinThickness'] = df['SkinThickness'].fillna(df['SkinThickness'].mean())
df['Insulin'] = df['Insulin'].fillna(df['Insulin'].mean())
df['BMI'] = df['BMI'].fillna(df['BMI'].mean())
return df | [
"def replace_nan(self):\n for column in self.X.columns:\n self.calc_and_fill_mean(column)",
"def fill_empty_values(dataset):\n for f in dataset.get_feature_names():\n if dataset.feature_is_continuous(f):\n f_analysis = dataset.analyse_continuous_feature(f)\n if f_analysis is not None:\n mean = f_analysis[1]\n # Impute missing values with mean\n c = dataset.impute_feature_value(f, mean,\n lambda val, t: val==None)\n if c>0:\n print \"Imputed {0} values for feature {1}\".format(c, f)\n else:\n # Analyse categorical features\n f_analysis = dataset.analyse_categorical_feature(f)\n if f_analysis is not None:\n mode1 = f_analysis[2]\n # Impute missing values with mean\n c = dataset.impute_feature_value(f, mode1,\n lambda val, t: val==None)\n if c>0:\n print \"Imputed {0} values for feature {1}\".format(c, f)",
"def _fill_null(self, df):\n invalid_jobs = df.index[df.isnull().sum(axis=1).gt(0)].values\n print(\"Fill %d missing values with feature mean\" % len(invalid_jobs))\n df.fillna(df.mean(), inplace=True)",
"def impute_missing_values(X):\n col_means=np.nanmean(X,axis=0)\n inds=np.where(np.isnan(X))\n X[inds]=np.take(col_means,inds[1])\n return X",
"def replace_missing_values(data):\n pixel_grid_dim = data.shape\n missing_coords = missing_coordinates(data)\n original_data = np.copy(data) \n for pixel_loc in zip(missing_coords[0], missing_coords[1]):\n neighbours = find_pixel_neighbourhood(pixel_loc, pixel_grid_dim)\n data[pixel_loc] = pixel_average(pixel_loc, neighbours, original_data)\n return data",
"def fill_naCool(data):\n num_var, categ_var = get_info_datasetPrint(data, True, False)\n \n data[num_var] = data[num_var].fillna(data[num_var].mean())\n data[categ_var] = data[categ_var].fillna(data[categ_var].mode()[0]) \n\n print('Number of missing values on your dataset are')\n print()\n print(data.isnull().sum())\n return data",
"def missing_values(games):\n games[\"FT%\"].fillna(games[\"FT%\"].mean(), inplace=True)\n games = games.reset_index()\n return games",
"def imputeNaN(data, newValue):\n\tdata[np.isnan(data)] = newValue; # Se asigno este valor de manera arbitraria para que no marcara un error de validacion por valores muy grandes",
"def replace_nan(input_data, undefined_value, mean=True):\n input_data[input_data == -999] = np.nan\n #the use of np.where provides us with the indices of the nan values\n indices_nan = np.where(np.isnan(input_data))\n if(mean):\n #means is a vector where each element corresponds to the mean of a feature from the input\n means=np.nanmean(input_data, axis=0, keepdims = True)\n #we replace the -999 values with the means of the respective columns\n input_data[indices_nan] = np.take(means, indices_nan[1])\n else:\n medians = np.nanmedian(input_data, axis=0, keepdims = True)\n input_data[indices_nan] = np.take(medians, indices_nan[1])\n return input_data",
"def replace_missing(data):\n\n def fix_missing(data):\n X = data.squareform().copy()\n x, y = np.where(np.isnan(X))\n for i, j in zip(x, y):\n if i != j:\n X[i, j] = (np.nanmean(X[i, :]) + np.nanmean(X[:, j])) / 2\n X = Adjacency(X, matrix_type=data.matrix_type)\n return (X, (x, y))\n\n if data.is_single_matrix:\n X, coord = fix_missing(data)\n else:\n X = []\n coord = []\n for d in data:\n m, c = fix_missing(d)\n X.append(m)\n coord.append(c)\n X = Adjacency(X)\n return (X, coord)",
"def fillMissingValues(self, data, structure):\n for column in structure.values():\n if column == structure[\"class\"]:\n pass\n else:\n if str(column['values'][0]).upper() == \"NUMERIC\":\n self.fillNumericValuesInColumn(data, structure, column['index'])\n else:\n self.fillCategorialValuesInColumn(data, structure, column['index'])",
"def replace_middle_NaNs(filtered_logratios):\n \n for chrom in filtered_logratios:\n for lst1 in NaN_ranges(filtered_logratios[chrom].values):\n if lst1[0]!=0 and lst1[-1] != len(filtered_logratios[chrom].values)-1 and (lst1[-1] - lst1[0]) < 1000:\n avg = (filtered_logratios[chrom].values[lst1[0]-1] + filtered_logratios[chrom].values[lst1[-1]+1]) / 2\n filtered_logratios[chrom].values[lst1[0]:lst1[-1]+1] = avg\n \n return filtered_logratios",
"def avg_incomplete_cols(id, y, X, incomplete_field_value):\r\n\r\n # copy the dataset because we are going to modify them (want to maintaint\r\n # \"functional\" programming-style interface.\r\n X_clean = np.copy(X)\r\n\r\n for idx, col in enumerate(X.T):\r\n X_clean[:, idx][col == incomplete_field_value] = np.mean(col[col != incomplete_field_value])\r\n\r\n id_clean = id\r\n y_clean = y\r\n\r\n return (id_clean, y_clean, X_clean)",
"def process_fares(combined_data):\n # there's one missing fare value - replacing it with the mean.\n combined_data.head(891).Fare.fillna(combined_data.head(891).Fare.mean(), inplace=True)\n\n # Do it separately for test set\n combined_data.iloc[891:].Fare.fillna(combined_data.iloc[891:].Fare.mean(), inplace=True)\n\n status('fare')\n return combined_data",
"def data_cleansing_class_mean(df):\n # Perform null value replacement using mean value\n df = calculate_mean_and_fill(df)\n\n # Output data to a csv\n output_data_cleansing_result(df)\n\n return df",
"def mean_when_defined(data):\n # Get lengths of each row of data\n lens = np.array([len(i) for i in data])\n\n # Mask of valid places in each row\n mask = np.arange(lens.max()) < lens[:, None]\n\n # Setup output array and put elements from data into masked positions\n padded_data = np.ones(mask.shape, dtype=float)*np.nan\n padded_data[mask] = np.concatenate(data)\n\n return np.nanmean(padded_data, axis=0)",
"def _impute_mean(genotypes):\n m = genotypes == -9\n if genotypes.ndim == 1 and any(m):\n genotypes[m] = genotypes[~m].mean()\n else: # genotypes.ndim == 2\n ix = np.nonzero(m)[0]\n if len(ix) > 0:\n a = genotypes.sum(1)\n b = m.sum(1)\n mu = (a + 9*b) / (genotypes.shape[1] - b)\n genotypes[m] = mu[ix]",
"def fill_na_with_mean(self) -> Tuple[pd.DataFrame, np.ndarray]:\n # get arr: all indices of rows with NaN values\n rows_with_nan, _ = np.where(pd.isnull(self.data.loc[:,'q1':'q5']))\n arr = np.unique(rows_with_nan)\n\n # create a series with means by row\n m = self.data.loc[:,'q1':'q5'].mean(axis=1)\n # loop over columns q1:q5, fill NaN with mean\n for quest in self.data.loc[:,'q1':'q5']:\n self.data.loc[:,quest].fillna(m,inplace=True)\n \n return self.data, arr",
"def imputeAge(train, test):\n for df in [train, test]:\n df['Age_Null_Flag'] = df['Age'].apply(lambda x: 1 if pd.isnull(x) else 0)\n train['mean'] = train.groupby(['Name_Title', 'Pclass'])['Age'].transform('mean')\n train['Age'] = train['Age'].fillna(train['mean'])\n merged_data = test.merge(train, on=['Name_Title', 'Pclass'], how='left').drop_duplicates(['PassengerId_x'])\n test['Age'] = np.where(test['Age'].isnull(), merged_data['mean'], test['Age'])\n test['Age'] = test['Age'].fillna(test['Age'].mean())\n del train['mean']\n return train, test"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Find local maximum if image. Starting point is (x,y) and algorithm looks all posible ways Values other than maximum will be set to 0 | def find_local_maximum(y, x, img, last_max, max_y, max_x):
if x == 147 and y == 156:
cv.imshow(img)
cv.waitKey(0)
last_max = img[y][x]
max_y = y
max_x = x
# * * *
# * x *
# * * *
for spaceing in range(1, 100, 1):
treshhold_area = True
max_has_changed = True
while max_has_changed:
max_has_changed = False
for tmp_y in range(max_y-spaceing, max_y + 2*spaceing + 1, 1):
# check vertical lines of pixels
# out of bounds
if tmp_y < 0 or tmp_y >= img.shape[0] or max_x-spaceing < 0 or max_x+spaceing >= img.shape[1]:
continue
if img[tmp_y][max_x-spaceing] != 0:
treshhold_area = False
if img[tmp_y][max_x-spaceing] > last_max:
last_max = img[tmp_y][max_x-spaceing]
max_y = tmp_y
max_x = max_x-spaceing
max_has_changed = True
break
else:
img[tmp_y][max_x-spaceing] = 0
if img[tmp_y][max_x+spaceing] != 0:
treshhold_area = False
if img[tmp_y][max_x+spaceing] > last_max:
last_max = img[tmp_y][max_x+spaceing]
max_y = tmp_y
max_x = max_x+spaceing
max_has_changed = True
break
else:
img[tmp_y][max_x+spaceing] = 0
for tmp_x in range(max_x-spaceing, max_x+2*spaceing + 1, 1):
# check horizontal lines of pixels
if tmp_x < 0 or tmp_x >= img.shape[1] or max_y-spaceing < 0 or max_y+spaceing >= img.shape[0]:
continue
if img[max_y-spaceing][tmp_x] != 0:
treshhold_area = False
if img[max_y-spaceing][tmp_x] > last_max:
last_max = img[max_y-spaceing][tmp_x]
max_y = max_y-spaceing
max_x = tmp_x
max_has_changed = True
break
else:
img[max_y-spaceing][tmp_x] = 0
if img[max_y+spaceing][tmp_x] != 0:
treshhold_area = False
if img[max_y+spaceing][tmp_x] > last_max:
last_max = img[max_y+spaceing][tmp_x]
max_y = max_y+spaceing
max_x = tmp_x
max_has_changed = True
break
else:
img[max_y+spaceing][tmp_x] = 0
if treshhold_area:
break
return max_y, max_x, last_max | [
"def __franjaMax (img, alto, ancho, x_ini, y_ini, y_fin):\n\t\t\n\tmax=0\n\ty_max_franja=y_ini\n\n\tfor i in range(y_ini, y_fin-alto):\n\t\tvalor_franja = cvSum(cvGetSubRect(img, cvRect(x_ini,i,ancho,alto)))\n\t\tif valor_franja[0]>=max:\n\t\t\tmax = valor_franja[0]\n\t\t\ty_max_franja = i\n\n\treturn x_ini,y_max_franja,ancho,alto",
"def _calc_max(self):\n return np.max(self.get_points()) + 1",
"def find_local_maxima(d):\n\n comp_func = lambda x, y: (y - x >= 0)\n return find_local_minima(d, comp_func=comp_func)",
"def maxvalue(x: ee.Image, scale: Optional[float] = None) -> float:\n\n if scale is None:\n scale = x.geometry().projection().nominalScale()\n\n # Create a clean geometry i.e. geodesic = FALSE\n img_geom_local = x.geometry().getInfo()[\"coordinates\"]\n ee_geom = ee.Geometry.Polygon(\n coords=img_geom_local,\n proj=\"EPSG:4326\",\n evenOdd=True,\n maxError=1.0,\n geodesic=False,\n )\n\n # get max values\n maxval = ee.Image.reduceRegion(\n image=x,\n reducer=ee.Reducer.max(),\n scale=scale,\n geometry=ee_geom,\n bestEffort=True,\n ).getInfo()\n\n return maxval",
"def _max(self):\n mat = self._unweight()\n mat = self._factorize(mat, self.xdef)\n mat = self._rdc_x(mat, self.xdef)\n if 0 not in self.xdef:\n np.place(mat[:, 0], mat[:, 0] == 0, np.NaN)\n ysects = self._by_ysect(mat, self.ydef)\n return np.expand_dims([np.nanmax(mat[:, 0]) for mat in ysects], 1).T",
"def getMaxPixelValue(self) -> retval:\n ...",
"def localmax(x,y,xval,lookwithin=1):\n l,r = bl(x,xval-lookwithin),br(x,xval+lookwithin)\n try:\n return max(y[l:r])\n except ValueError:\n raise ValueError(\"No maximum value found in range, perhaps wrong window region?\")",
"def get_maximum_position(self):\n max_position = (0, 0)\n max_prob = 0.0\n\n for x in range(self.width):\n for y in range(self.height):\n if self[y][x] > max_prob:\n max_prob = self[y][x]\n max_position = (y, x)\n\n return max_position",
"def find_local_max(arr, kernel_width=15):\n arr_convolved = np.convolve(arr, generate_pulse_kernel(kernel_width=kernel_width), mode=\"same\")\n # find local max using scipy.signal.argrelextrema\n ind_local_max = argrelextrema(arr_convolved, np.greater_equal, order=kernel_width, mode='clip')[0]\n logging.info(f\"{len(ind_local_max)} maxima found\")\n # interpolate for local min\n ind_local_max_delta = np.diff(ind_local_max) / 2\n ind_local_min_derived = np.hstack(\n (\n ind_local_max[:-1] - ind_local_max_delta,\n ind_local_max[-1] - ind_local_max_delta[-1],\n ind_local_max[-1] + ind_local_max_delta[-1],\n )\n ).astype(int)\n # calculate SNR for local max\n ind_two_sides = np.array([\n ind_local_min_derived[:-1],\n ind_local_min_derived[1:]\n ])\n ind_two_sides_mask = np.logical_or(ind_two_sides < 0, ind_two_sides > len(arr) - 1)\n ind_two_sides_valid = np.where(ind_two_sides_mask, 0, ind_two_sides) # do not go out of bounds\n # estimate SNR of local max, clip out-of-bounds values\n interp_val_local_max = np.ma.MaskedArray(\n data=arr_convolved[ind_two_sides_valid],\n mask=ind_two_sides_mask,\n ).mean(axis=0)\n assert interp_val_local_max.mask.sum() == 0\n interp_val_local_max = interp_val_local_max.data\n val_local_max = arr_convolved[ind_local_max]\n snr_local_max = val_local_max / interp_val_local_max\n return LocalMax(\n num=len(ind_local_max),\n max_ind=ind_local_max,\n max_val=val_local_max,\n max_val_interp=interp_val_local_max,\n max_snr=snr_local_max,\n min_ind=ind_local_min_derived,\n side_ind=ind_two_sides,\n side_mask=ind_two_sides_mask,\n )",
"def detect_local_maxima(self, arr):\n # https://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array/3689710#3689710\n # arr = np.abs(arr)\n avg = np.average(arr)\n # arr[(arr > avg * 2)] = 0\n arr[(arr < avg)] = 0\n # define an connected neighborhood\n # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#generate_binary_structure\n # neighborhood = morphology.generate_binary_structure(rank=len(arr.shape), connectivity=2)\n # apply the local minimum filter; all locations of minimum value\n # in their neighborhood are set to 1\n # http://www.scipy.org/doc/api_docs/SciPy.ndimage.filters.html#minimum_filter\n neighborhood = np.ones(shape=(3, 3, 3))\n local_max = (ndimage.maximum_filter(arr, footprint=neighborhood, mode='constant') == arr)\n # local_min is a mask that contains the peaks we are\n # looking for, but also the background.\n # In order to isolate the peaks we must remove the background from the mask.\n #\n # we create the mask of the background\n background = (arr == 0)\n #\n # a little technicality: we must erode the background in order to\n # successfully subtract it from local_min, otherwise a line will\n # appear along the background border (artifact of the local minimum filter)\n # http://www.scipy.org/doc/api_docs/SciPy.ndimage.morphology.html#binary_erosion\n eroded_background = morphology.binary_erosion(\n background, structure=neighborhood, border_value=1)\n #\n # we obtain the final mask, containing only peaks,\n # by removing the background from the local_min mask\n detected_maxima = local_max ^ eroded_background\n return np.where(detected_maxima)",
"def get_maxima(self):\n return self._get_min_max(self.curve_df, maxima=True)",
"def max(x):\n\treturn np.max(x)",
"def GetMaxPoint(self):\n ...",
"def regionalmaximum(I):\n h = 1\n rec = morphology.reconstruction(I, I+h)\n maxima = I + h - rec\n return maxima",
"def localMaxima(sequence) -> Tuple[List, List]:\n localmaxima = ([], [])\n if sequence[0] > sequence[1]: # boundary value\n localmaxima[0].append(0)\n localmaxima[1].append(sequence[0])\n for index, center in enumerate(sequence[1:-1], 1):\n before = sequence[index - 1]\n after = sequence[index + 1]\n if before < center > after or numpy.isnan(before) and center > after:\n localmaxima[0].append(index)\n localmaxima[1].append(center)\n if sequence[-1] > sequence[-2]: # boundary value\n localmaxima[0].append(len(sequence) - 1)\n localmaxima[1].append(sequence[-1])\n return localmaxima",
"def max(self):\n #-- output spatial object\n temp = spatial(nlon=self.shape[0],nlat=self.shape[1],\n fill_value=self.fill_value)\n #-- copy dimensions\n temp.lon = self.lon.copy()\n temp.lat = self.lat.copy()\n #-- create output maximum spatial object\n temp.data = np.max(self.data,axis=2)\n temp.mask = np.any(self.mask,axis=2)\n #-- get spacing and dimensions\n temp.update_spacing()\n temp.update_extents()\n temp.update_dimensions()\n #-- update mask\n temp.update_mask()\n return temp",
"def sp_maximum_2D ( fun ,\n xmin , xmax ,\n ymin , ymax , x0 = () , *args ) :\n funmin = lambda x , y , *a : -1.0 * ( float ( fun ( x , y , *a ) ) )\n return sp_minimum_2D ( funmin ,\n xmin , xmax ,\n ymin , ymax , x0 , *args )",
"def find_maximum(self, distribution):\n max_idx = np.argmax(distribution)\n return self.latitudes[max_idx], self.longitudes[max_idx]",
"def find_max_value(self):\n max_val = -1\n if self.nodes:\n for node in self.nodes:\n if max_val < node.value:\n max_val = node.value\n return max_val"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Produces a formatted printout of the given sentence number, comparing the correct POS tags with the predicted POS tags. | def get_sent(self, num):
assert num in self.res.sent_num.values, "Sentence index is out of range for this dataset"
this_sentence = self.res[self.res.sent_num == num]
star_if_true = lambda boolean: '*' if boolean else ''
check_if_true = lambda boolean: '✓' if boolean else ''
printout = pd.DataFrame({'true': self.tagset[this_sentence.y_true],
'predict': self.tagset[this_sentence.y_predict],
'correct?': (this_sentence.y_true == this_sentence.y_predict) \
.map(check_if_true).values,
'oov?': this_sentence.oov.map(star_if_true).values,
'ambiguous?': this_sentence.ambig.map(star_if_true).values},
index = this_sentence.token,)
print(printout) | [
"def show_numbers(text):\n for paragraph in text:\n for sentence in paragraph.split('. '):\n if re.findall(r'\\d+', sentence):\n print sentence",
"def print_intermediate_results(n, tagged, census):\n\n print('\\n')\n print('****** total sentences tested = {} ******'.format(n))\n print('named entity recognition')\n print(' truth names : {}\\n'.format(tagged['truth_names']))\n print(' test names : {}'.format(tagged['test_names']))\n print(' test minus tagged : {}'.format(tagged['test_minus_tagged']))\n print(' tagged minus test : {}'.format(tagged['tagged_minus_test']))\n print(' test names correct : {}\\n'.format(\n tagged['test_names'] - tagged['test_minus_tagged'] - tagged['tagged_minus_test'])\n )\n print(' no names : {}\\n'.format(tagged['no_names']))\n print('census data')\n print(' truth_names : {}'.format(census['truth_names']))\n print(' difference : {}'.format(census['difference']))\n print(' no names : {}'.format(census['no_names']))",
"def print_pos_neg(num):\n \n if num > 0:\n return \"positive\"\n elif num == 0: \n return \"neutral\"\n else:\n return \"negative\"",
"def print_prediction(obs_seq, tag_idx, best_path, fname, trace=False):\n tag_dict = {v: k for k, v in tag_idx.items()}\n with open(fname, 'a') as f:\n for i, e in enumerate(obs_seq):\n f.write('\\t'.join([e, tag_dict[best_path[i]]]))\n f.write('\\n')\n if trace:\n print('\\t'.join([e, tag_dict[best_path[i]]]))\n f.write('\\n') # add a new line for end of sentence",
"def BuildSentencePrintString(self):\n self.matchedsentence.BuildHighlightedPrintString(self.matchedword)",
"def print_toxicity_report(preds, threshold=0.6, classes=None):\n if classes is None:\n classes = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\n\n if sum(preds > threshold) > 0:\n precent_classes = [classes[i] for i in range(len(classes))\n if preds[i] > threshold]\n toxicity_levels = [preds[i] for i in range(len(classes))\n if preds[i] > threshold]\n print('Based on a toxicity threshold of ', str(threshold), 'the sentence',\n 'is predicted to contain toxic language of the following types;')\n for i in range(len(precent_classes)):\n print('\\t- %s, with probability %.2f' % (precent_classes[i], toxicity_levels[i]))\n else:\n print('Based on a toxicity threshold of %.1f, ' % threshold,\n 'the sentence is predicted to contain no toxic language!')",
"def print_model(self):\n for tokens, gram in self.grams.iteritems():\n print('{}'.format(tokens))\n for successor, count in gram.successors.iteritems():\n probability = count / gram.count\n print('\\t{} {:.3f}'.format(successor, probability))",
"def textrank_summary(self, number_of_sentences, sentences=True,\n postprocessing=False, post_sentences=1, post_factor=1.1):\n if sentences:\n # better to start with a new weights list everytime the summary is called \n self.sentence_weights_graph = []\n # creating the graph\n graph = self.create_graph(self.sentences, self.sentences_vect)\n # calculate Pagerank\n rank = nx.pagerank(graph, weight='weight')\n \n # convert the rank of sentences to an array\n for v in rank.values():\n self.sentence_weights_graph.append(v[0][0])\n \n if postprocessing:\n self.sentence_weights_graph = self.post_processing(self.sentence_weights_graph, \n post_sentences, post_factor)\n \n # sorting by Rank value\n order = np.array(self.sentence_weights_graph).argsort()[::-1]\n \n else:\n self.paragraph_weights_tfidf = [] \n graph = self.create_graph(self.paragraphs, self.paragraph_vect)\n rank = nx.pagerank(graph, weight='weight')\n \n for v in rank.values():\n self.paragraph_weights_graph.append(v[0][0])\n \n if postprocessing:\n self.paragraph_weights_graph = self.post_processing(self.paragraph_weights_graph, \n post_sentences, post_factor)\n \n # sorting by Rank value\n order = np.array(self.paragraph_weights_graph).argsort()[::-1] \n \n self.print_summary(order, number_of_sentences, sentences)",
"def tag_sentence(self, sentence):\n fp_lapos = os.path.expanduser('~/cltk_data/multilingual/software/lapos')\n fp_model = os.path.expanduser('~/cltk_data/{0}/model/{1}_models_cltk/taggers/pos'.format(self.language, self.language)) # rel from Lapos dir\n try:\n lapos_command = 'cd {0} && echo \"{1}\" | ./lapos -t -m {2}'.format(fp_lapos, sentence, fp_model)\n p_out = subprocess.check_output(lapos_command,\n shell=True,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n except subprocess.CalledProcessError as cp_err:\n logger.error('Lapos call failed. Check installation.')\n logger.error(sentence)\n print(cp_err)\n raise\n\n # Parse output from Lapos\n # TODO: Make this cleaner/faster\n output_list = p_out.split('\\n')\n output_list_filtered = [l for l in output_list if not l.startswith('loading the models')]\n output_list_filtered = [l for l in output_list_filtered if not l == 'done']\n output_list_filtered = [l for l in output_list_filtered if l]\n\n for line in output_list_filtered:\n word_tags = line.split(' ')\n tagged_sentence = []\n for word_tag in word_tags:\n word, tag = word_tag.split('/')\n word_tag_tuple = (word, tag)\n tagged_sentence.append(word_tag_tuple)\n\n return tagged_sentence",
"def generate_text(style, size=500, prime='The', top_k=5, cuda=False):\r\n \r\n if style == \"poem\":\r\n net = load_model(\"./models/inferno_1000.net\")\r\n elif style == \"positive\":\r\n \r\n net = load_model(\"./models/positive_100.net\")\r\n elif style == \"negative\":\r\n \r\n net = load_model(\"./models/negative_60.net\")\r\n \r\n \r\n if cuda:\r\n net.cuda()\r\n else:\r\n net.cpu()\r\n\r\n net.eval()\r\n \r\n \r\n chars = [ch for ch in prime] \r\n h = net.init_hidden(1)\r\n \r\n for ch in prime:\r\n char, h = net.predict(ch, h, cuda=cuda, top_k=top_k)\r\n\r\n chars.append(char)\r\n \r\n \r\n for ii in range(size):\r\n \r\n char, h = net.predict(chars[-1], h, cuda=cuda, top_k=top_k)\r\n chars.append(char)\r\n\r\n return print(''.join(chars))",
"def print_to_file(self):\n\t\tfor index, char in enumerate(self.reduced_sentence):\n\t\t\tif char==10:\n\t\t\t\tself.file.write(\"\\n\")\n\t\t\telif index+1<len(self.reduced_sentence) and self.reduced_sentence[index+1]==10:\n\t\t\t\tself.file.write(\"%s\" %char)\n\t\t\telse:\n\t\t\t\tself.file.write(\"%s \" %char)",
"def print_incorrect_classified():\n parser = argparse.ArgumentParser(description='Print incorrectly classified sentences')\n\n # Relative paths to PE_PATH\n parser.add_argument('true_labels_path', type=str, help='Path to file with ground-truth relevance labels')\n parser.add_argument('predicted_labels_path', type=str, help='Path to file with predicted relevance labels')\n args = parser.parse_args()\n\n true_labels_path = os.path.join(os.environ['PE_PATH'], args.true_labels_path) \n predicted_labels_path = os.path.join(os.environ['PE_PATH'], args.predicted_labels_path)\n\n true_labels = pd.read_csv(true_labels_path)\n predicted_labels = pd.read_csv(predicted_labels_path)\n\n for index, row in true_labels.iterrows():\n if row['relevant'] != predicted_labels['relevant'][index]:\n print(\"{},{},\\\"{}\\\"\".format(row['relevant'], predicted_labels['relevant'][index], row['sentence']))",
"def print_top_misclassified(test_docs, test_labels, X_test, clf, n):\n ###TODO\n predic_prob = clf.predict_proba(X_test)\n prediction = clf.predict(X_test)\n\n prob = []\n for ind in np.where(prediction!=test_labels)[0]:\n prob.append((ind, predic_prob[ind][prediction[ind]]))\n for ind in sorted(prob, key=lambda x:-x[1])[:n]:\n print('truth= %d predict=%d proba=%.6f\\n' %(test_labels[ind[0]],prediction[ind[0]],ind[1]))\n print(test_docs[ind[0]])\n pass",
"def write_to_text(self):\n results = self.average_score()\n output_txt = open(f\"/home/ubuntu/data/output/{self.file}.txt\", \"a+\")\n for i in range(len(self.prediction_arr)):\n predict = [\n \"Prediction {}: {} (1 means no surgery, -1 means needs surgery)\\n\".format(i,\n str(self.prediction_arr[i])),\n \"Decision Function Score {} of {}\\n\".format(i, self.score_arr[i]),\n \"Score sample {} of {}\\n\\n\".format(i, self.sample_arr[i])]\n output_txt.writelines(predict)\n averages = [f\"Average Prediction {results[0]}\\n\",\n f\"Average Decision Function Score {results[1]}\\n\",\n f\"Average Score Sample {results[2]}\"]\n output_txt.writelines(averages)\n output_txt.close()\n\n return -1 if results[0] < 0.1 else 0",
"def sentence_logprob(self, sentence):\n res = 0\n temp = get_ngrams(sentence, 3)\n for item in temp:\n \tp = self.smoothed_trigram_probability(item)\n \tif p > 0:\n \t\tres = res + math.log2(p)\n return res",
"def page100() :\n print(\"\"\"\nAt the change of tide, Dr. Vivaldi leaves for her\ninterview with the Grand Akpar. Only one Raka is\nleft to guard you. You hand him the gold bracelet.\nTaking it, he smiles broadly. You hurry past him,\nbut another guard is standing outside the agon.\nYou wheel past him and run for it. The surprised\nArchpod yells; you soon hear others chasing you.\nBut in a few moments you reach the shelter of the\ncluster-leaf groves, and as you go deeper into the\nwoods, you are relieved that you no longer hear\nthe Archpods behind you. It's strange,though,\nthat they didn't follow you into the woods.\nSuddenly, you feel a presence. Looking around,\nyou see pairs of bright blue lights staring at you.\nThen you see brown bristly faces, iron fangs, and\nlong curled claws. Kota beasts! The last sounds\nyou hear are their unearthly shrieks of triumph\"\"\", the_end)",
"def get_text_rank_summary(self, doc, limit_sentences=20, verbose = True):\n result = doc._.textrank.summary(limit_sentences=limit_sentences)\n res = ''\n \n for sent in result:\n res+='{} '.format(sent)\n if verbose:\n print(sent)\n return res",
"def get_pos_tags(data, export=False):\n nlp = spacy.load(\"en_core_web_sm\")\n pos_tags = []\n \n for review in data:\n sentence = nlp(' '.join(review))\n tags = [token.pos_ for token in sentence]\n pos_tags.append(tags)\n \n tokens = [\"ADJ\", \"ADP\", \"ADV\", \"AUX\", \"CONJ\", \"CCONJ\", \"DET\",\n \"INTJ\", \"NOUN\", \"NUM\", \"PART\", \"PRON\", \"PROPN\", \"PUNCT\",\n \"SCONJ\", \"SYM\", \"VERB\", \"X\", \"EOL\", \"SPACE\"]\n token2id = {token: i for i, token in enumerate(tokens)}\n \n tmp_list = np.zeros((len(data), len(tokens)))\n \n for idx, tags in enumerate(pos_tags):\n for tag in tags:\n tmp_list[idx, token2id[tag]] += 1\n \n if export:\n with open('../data/pos_count.txt','w') as pc:\n for line in tmp_list:\n #line = line.replace(\"[\", \"\")\n #line = line.replace(\"]\", \"\")\n #pc.write(str(line)+'\\n')\n pc.write(', '.join(map(repr, line))+'\\n')\n \n return tmp_list",
"def emit_tagged_file(file_path, model, sentences):\n with open(file_path, \"w\") as f:\n for sentence in sentences:\n for (segment, tag) in sentence:\n print(segment + '\\t' + model['tags'][tag], file=f)\n print(\"\", file=f)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the top guesses for this page | def question_top_guesses(text, deep, guess_connection, id, page, num_guesses=4):
c = guess_connection.cursor()
command = ('select page from guesses where sentence = 2 and token = 0 and question = %i ' +
'order by score desc limit %i') % (id, num_guesses+1)
c.execute(command)
choices = set([page])
for ii, in c:
if len(choices) < num_guesses and not ii in choices:
choices.add(ii)
# If we don't have enough guesses, generate more
new_guesses = deep.text_guess(text)
# sort the guesses and add them
for guess, score in sorted(new_guesses.items(), key=operator.itemgetter(1), reverse=True):
if len(choices) < num_guesses and not guess in choices:
choices.add(guess)
return choices | [
"def get_top(self, entries):\n def calc_score(k, v):\n for tag in get_artist_genres(k):\n v += self.genres_scores[tag]\n return v\n\n def get_reason(artist):\n similar_to = []\n for similar_artist, sugg in self.similar.items():\n if artist in sugg:\n similar_to.append(similar_artist)\n return similar_to\n\n sug = [{'artist': k, 'score': round(calc_score(k, v)),\n 'reason': get_reason(k)}\n for k, v in self.scores.items()\n if k not in self.names and k not in self.blacklist]\n\n print(sug)\n top = tuple(sorted(\n sug, key=itemgetter('score'), reverse=True)[:entries])\n return top",
"def top_user_decks(pages):\n top_decks = []\n main_url = \"https://www.hearthpwn.com/\"\n search_url = \"decks?filter-deck-tag=1&filter-show-constructed-only=y&filter-show-standard=1&page=\"\n deck_link_re = re.compile('^\\/decks\\/[0-9].*')\n for i in range(pages):\n raw_html = simple_get(main_url+search_url+str(i))\n if raw_html is not None:\n html = BeautifulSoup(raw_html, 'html.parser')\n top_decks = get_links(html, deck_link_re, top_decks)\n \n else:\n log(\"error: top_user_decks simple_get returned None\")\n log(\"Found {0} user decks over {1} pages\".format(len(top_decks), pages))\n return top_decks",
"def _get_top_games(self):\n _top_games = dict()\n for entry in self._client.games.get_top():\n _top_games[int(entry['game']['id'])] = entry['game']['name']\n logging.debug('>> Found the following games: ' + ', '.join(_top_games.values()))\n return _top_games",
"def _get_bm_top(self, query: List[str]) -> List[List[str]]:\n # sort titles according to score and return indices\n scores = [(score, title) for score, title in zip(self.bm25.get_scores(query), self.corpus)]\n scores = sorted(scores, key=itemgetter(0), reverse=True)\n\n # Return top 2048 for evaluation purpose, cut to half for recommendations to prevent memory errors\n if self.eval:\n try:\n return [title for score, title in scores][:256]\n except IndexError:\n return [title for score, title in scores]\n else:\n try:\n return [title for score, title in scores if score > 0][:1028]\n except IndexError:\n return [title for score, title in scores if score > 0]",
"def get_gt_top_stories(webpage_text):",
"async def games_top(self, ctx):\n async with ctx.typing():\n conn = self.bot.pool\n games = await self.get_current_games()\n if games:\n sql = (\"SELECT player_tag, current_points \"\n \"FROM uw_clan_games \"\n \"WHERE event_id = $1 \"\n \"ORDER BY current_points DESC \"\n \"LIMIT 10\")\n fetch = await conn.fetch(sql, games['games_id'])\n data = []\n for row in fetch:\n player = await self.bot.coc.get_player(row['player_tag'])\n data.append([row['current_points'], f\"{player.name} ({player.clan.name})\"])\n title = \"UW Top Ten for Clan Games\"\n ctx.icon = \"https://cdn.discordapp.com/emojis/635642869750824980.png\"\n p = formats.TablePaginator(ctx, data=data, title=title, page_count=1)\n await p.paginate()",
"def getGuesses(self):\n return self.guesses",
"def _get_top_results(self):\n return Counter(self.pkg_files).most_common(TOP_N)",
"def top_python_questions(url=cached_so_url):\n content = requests.get(url).content\n \n soup = BeautifulSoup(content, 'html.parser')\n \n questions = soup.find_all('div', class_='question-summary')\n \n question_list = []\n \n for q in questions:\n question = q.find('a', class_='question-hyperlink').text\n votes = int(q.find('span', class_='vote-count-post').text)\n views = q.find('div', class_='views').text.strip()\n \n if views.split()[0][-1] == 'k':\n continue\n \n #views = float(views.split()[0][:-1])\n \n question_list.append((question, votes))\n \n return sorted(question_list, key=lambda x: x[1], reverse=True)\n pass",
"def personal_top_three(self) -> int:\n return sorted(self._scores, reverse=True)[:3]",
"def topMatches(prefs,person,n=10,similarity=sim_distance):\n scores=[(similarity(prefs,person,other),other) for other in prefs if other!=person]\n # Sort the list so the highest scores appear at the top\n scores.sort()\n scores.reverse()\n return scores[0:n]\n #return scores",
"def top_general_decks(pages):\n top_decks = []\n main_url = \"https://www.hearthpwn.com/\"\n page_1_url = \"top-decks?page=1&sort=-rating\"\n page_2_url = \"top-decks?page=2&sort=-rating\"\n deck_link_re = re.compile('^\\/top-decks\\/[0-9].*')\n\n for i in range (1, pages+1):\n page_url = \"top-decks?page={0}&sort=-rating\".format(i)\n raw_html = simple_get(main_url+page_url)\n if raw_html is not None:\n html = BeautifulSoup(raw_html, 'html.parser')\n top_decks = get_links(html, deck_link_re, top_decks)\n else:\n log(\"error: top_general_decks simple get returned None on page {0}.\".format(i))\n log(\"Found {0} general decks over {1} pages\".format(len(top_decks), pages))\n\n return top_decks",
"def get_top_n(n, people, points):\n pass # add your code here",
"def get_top_answers(self, num_answers_to_return):\n return sorted(\n self.answers.iteritems(), key=operator.itemgetter(1),\n reverse=True)[:num_answers_to_return]",
"def topAlgs(self,sort=False):\n self._printDict('TopAlg',sort)",
"def get_top(self, num: int=10) -> List[Tuple[str, int]]:\n self.db.execute(\"SELECT discord_id, score FROM players ORDER BY score DESC LIMIT ?;\", (num,))\n return self.db.fetchall()",
"def guess_probable_keysizes(message, top=3):\n # KEYSIZE between 2 and 40 is an educated/suggested guess\n distances = []\n for keysize in range(2, 41):\n local_distances = []\n part1 = message[0:keysize]\n part2 = message[keysize:keysize * 2]\n part3 = message[keysize * 2:keysize * 3]\n part4 = message[keysize * 3:keysize * 4]\n\n local_distances.extend([\n get_hamming_distance(part1, part2),\n get_hamming_distance(part1, part3),\n get_hamming_distance(part1, part4),\n get_hamming_distance(part2, part3),\n get_hamming_distance(part2, part4),\n get_hamming_distance(part3, part4),\n ])\n\n distance = sum(local_distances) / len(local_distances)\n distances.append((keysize, distance / keysize))\n return sorted(distances, key=lambda x: x[1])[:top]",
"def get_top(case='Confirmed', num=10):\n case = case.title()\n data = load_data()\n top = {}\n for country in data[list(data)[-1]]:\n top[country['Country_Region']]=country[case]\n return {k:v for k, v in\n sorted(top.items(), key=lambda x: x[1], reverse=True)[:num]}",
"def top_match(self):\n\n # If no matches return empty list\n if len([x for x in self.matches().keys()]) == 0:\n return []\n\n # get and sort the list of matches previously used\n mtch_lst = [(k, v) for k, v in self.matches().items()]\n srtd = sorted(mtch_lst, reverse=True, key=lambda x: x[1])\n\n # check if there are any ties\n top_score = srtd[0][1]\n return [x[0] for x in srtd if x[1] == top_score]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the last activity date is min date, when an http error occurs during tasks retrieval. | def test_datetime_http_error_tasks(self, mock_url_read, mock_write_json, mock_read_json):
mock_url_read.side_effect = [
'{"access_token": "ey_xx", "refresh_token": "new_refresh_token"}',
urllib.error.HTTPError(None, None, None, None, None)]
mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}
planner = SharepointPlanner(url='/home', client_id='client_id_xx',
client_secret='client_secret_k=',
refresh_token_location='file_location_of_token.json')
last_activity_date = planner.datetime('plan_id_xx')
mock_write_json.assert_called_once()
self.assertEqual(last_activity_date, datetime.datetime.min) | [
"def test_datetime_on_error(self):\n self.contents = 'raise'\n self.assertEqual(datetime.datetime.min, self.__report.datetime('url'))",
"def test_datetime_json_error_tasks(self, mock_url_read, mock_error, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n 'non-json']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n last_activity_date = planner.datetime('plan_id_xx')\n\n mock_write_json.assert_called_once()\n self.assertEqual(last_activity_date, datetime.datetime.min)\n self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])\n self.assertIsInstance(mock_error.call_args_list[0][0][1], ValueError)",
"def test_improper_date(self):\r\n url = \"http://%s:%s/uniques?d=%s\" % (ip, port, improper_date)\r\n response = requests.get(url)\r\n if response.status_code == 406:\r\n assert True\r\n else:\r\n assert False",
"def test_validate_date_range_failure():\n from FireEyeNX import validate_date_range\n\n fetch_time = '49 hours'\n\n with pytest.raises(ValueError) as e:\n validate_date_range(fetch_time)\n\n assert (\n str(e.value)\n == 'The First fetch time interval should be up to 48 hour as per API limitation.'\n )",
"def test_parse_future_dated(self):\n for article in self.site.articles:\n self.assert_(article.headers['date'] <= datetime.today())",
"def check_if_last_run_reached(last_run: dict[str, date], earliest_fetched_event: dict[str, Any]):\n last_run_time = last_run.get('latest_event_time')\n return last_run_time >= parse_date_string(earliest_fetched_event.get('occurred_date'))",
"def test_deadline():\n task = Task()\n assert task.deadline == datetime.today().strftime(\"%Y-%m-%d\")",
"def test_date_valid_init(generic_task):\n assert generic_task.get_date_valid() == '1970-01-01'",
"def test_date_valid_change(generic_task):\n generic_task.set_date_valid('2018-01-01')\n assert generic_task.get_date_valid() == '2018-01-01'",
"def test_device_readings_get_past_dates(self):\n\n #When we make valid request with a startdate > 10\n request = self.client().get('/devices/{}/readings/'.format(self.device_uuid), data=\n json.dumps({\n 'start': 10,\n }))\n\n #Then we should receive a 200\n self.assertEqual(request.status_code, 200)\n\n #And receive five values\n self.assertEqual(len(json.loads(request.data)), 5)\n\n #When we make valid request with a startdate < 40\n request = self.client().get('/devices/{}/readings/'.format(self.device_uuid), data=\n json.dumps({\n 'end': 40,\n }))\n\n #Then we should receive a 200\n self.assertEqual(request.status_code, 200)\n\n #And receive five values\n self.assertEqual(len(json.loads(request.data)), 5)\n\n #When we make valid request with startdate >= 10 and enddate <= 20\n request = self.client().get('/devices/{}/readings/'.format(self.device_uuid), data=\n json.dumps({\n 'start': 10,\n 'end': 20\n }))\n\n #Then we should receive a 200\n self.assertEqual(request.status_code, 200)\n\n #And receive two values\n self.assertEqual(len(json.loads(request.data)), 2)\n\n #When we make valid request with dates in the future\n request = self.client().get('/devices/{}/readings/'.format(self.device_uuid),\n data=json.dumps({\n 'start': 10000000,\n 'end': 20000000\n }))\n\n #Then we should receive a 200\n self.assertEqual(request.status_code, 200)\n\n #And receive an empty dict\n self.assertFalse(json.loads(request.data))",
"def test_invalid_date(self):\n with pytest.raises(ValueError):\n model.Report(case='19-123456', date=datetime.date.min)",
"def test_week_last_completed(self):\n pass",
"def test_nr_of_over_due_actions_invalid_time(self, mock_url_read, mock_error, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n '{\"non-value\": [{\"completedDateTime\": null, '\n '\"createdDateTime\":\"2018-02-28T11:01:08.8386828Z\",\"assignments\": {}}]}']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)\n self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])\n self.assertIsInstance(mock_error.call_args_list[0][0][1], KeyError)",
"def test_Bridge_getNetworkstatusLastPublished(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n\n published = self.bridge.getNetworkstatusLastPublished()\n self.assertIsNotNone(published)\n self.assertIsInstance(published, datetime.datetime)\n self.assertEqual(str(published), '2014-12-22 21:51:27')",
"def test_invalid_start(self):\n\n timerange = '@timestamp__range={\"gte\":\"%s\"}' % \\\n self.invalid_start\n interval = \"interval=%s\" % self.valid_interval\n\n for entry in self.URLS_START:\n url = \"%s?%s&%s\" % (entry, timerange, interval)\n url = url.replace('\"', '%22')\n self._assert_bad_request(urllib.quote(url))",
"def test_last_ot(self):\n onboarding_device = OnboardingDevice.objects.get(device=self.device)\n self.assertEqual(onboarding_device.last_ot, self.failed_task2)",
"def recent_failure(self):\n return timezone.now() < self.timestamp + timedelta(minutes=BB_BLOCK_INTERVAL)",
"def _max_time_expired_error(exc):\n return isinstance(exc, pymongo.errors.OperationFailure) and exc.code == 50",
"def test_error_insertSleepEntry_missing_data(self):\n method = 'insertSleepEntry'\n dateTo = datetime.utcnow()\n dateFrom = dateTo - timedelta(hours=6) - timedelta(minutes=27)\n\n data1 = {'token':self.token,\n 'dateTo': dateTo,\n 'nap': False}\n resp = requests.post(self.url + method, data1)\n self.assertNotEqual(resp.status_code, 200)\n\n dateTo2 = datetime.now()\n dateFrom2 = dateTo2 - timedelta(hours=6) - timedelta(minutes=27)\n data2 = {'token': \"no token\",\n 'dateFrom': dateFrom2,\n 'dateTo': dateTo2,\n 'nap': False}\n resp = requests.post(self.url + method, data2)\n self.assertNotEqual(resp.status_code, 200)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the last activity date is min date, when invalid json retrieved during tasks retrieval. | def test_datetime_json_error_tasks(self, mock_url_read, mock_error, mock_write_json, mock_read_json):
mock_url_read.side_effect = [
'{"access_token": "ey_xx", "refresh_token": "new_refresh_token"}',
'non-json']
mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}
planner = SharepointPlanner(url='/home', client_id='client_id_xx',
client_secret='client_secret_k=',
refresh_token_location='file_location_of_token.json')
last_activity_date = planner.datetime('plan_id_xx')
mock_write_json.assert_called_once()
self.assertEqual(last_activity_date, datetime.datetime.min)
self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])
self.assertIsInstance(mock_error.call_args_list[0][0][1], ValueError) | [
"def test_datetime_http_error_tasks(self, mock_url_read, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n urllib.error.HTTPError(None, None, None, None, None)]\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n last_activity_date = planner.datetime('plan_id_xx')\n\n mock_write_json.assert_called_once()\n self.assertEqual(last_activity_date, datetime.datetime.min)",
"def test_get_nearest_future_key_date_past_dates_only(self):\n with open(f\"{project_root}/test_data/collection_exercise/closest_past_collection_exercise.json\") as json_data:\n collection_exercise = json.load(json_data)\n\n expected_output = {}\n output = get_nearest_future_key_date(collection_exercise[\"events\"])\n self.assertEqual(output, expected_output)",
"def test_datetime_on_error(self):\n self.contents = 'raise'\n self.assertEqual(datetime.datetime.min, self.__report.datetime('url'))",
"def test_get_current_collection_exercise_past_dates_only(self):\n with open(f\"{project_root}/test_data/collection_exercise/only_past_collection_exercises.json\") as json_data:\n collection_exercise_list = json.load(json_data)\n\n expected_output = {}\n\n output = get_current_collection_exercise(collection_exercise_list)\n self.assertEqual(output, expected_output)",
"def test_date_valid_init(generic_task):\n assert generic_task.get_date_valid() == '1970-01-01'",
"def test_parse_future_dated(self):\n for article in self.site.articles:\n self.assert_(article.headers['date'] <= datetime.today())",
"def test_status_start_date_no_history(self, mock_read_json):\n mock_read_json.return_value = COMPACT_HISTORY\n history = CompactHistory('history_file_name.json')\n\n self.assertEqual(datetime.datetime(2013, 1, 1, 0, 0, 0),\n history.status_start_date('Foo', 'red',\n now=lambda: datetime.datetime(2013, 1, 1, 0, 0, 0)))",
"def check_if_last_run_reached(last_run: dict[str, date], earliest_fetched_event: dict[str, Any]):\n last_run_time = last_run.get('latest_event_time')\n return last_run_time >= parse_date_string(earliest_fetched_event.get('occurred_date'))",
"def test_get_current_collection_exercise_future_dates_only(self):\n with open(f\"{project_root}/test_data/collection_exercise/only_future_collection_exercises.json\") as json_data:\n collection_exercise_list = json.load(json_data)\n\n with open(f\"{project_root}/test_data/collection_exercise/closest_future_collection_exercise.json\") as json_data:\n expected_output = json.load(json_data)\n\n output = get_current_collection_exercise(collection_exercise_list)\n self.assertEqual(output, expected_output)",
"def test_invalid_date(self):\n with pytest.raises(ValueError):\n model.Report(case='19-123456', date=datetime.date.min)",
"def test_date_valid_change(generic_task):\n generic_task.set_date_valid('2018-01-01')\n assert generic_task.get_date_valid() == '2018-01-01'",
"def test_status_start_date(self, mock_read_json):\n mock_read_json.return_value = COMPACT_HISTORY\n history = CompactHistory('history_file_name.json')\n\n self.assertEqual(datetime.datetime(DT_3AGO.year, DT_3AGO.month, DT_3AGO.day, 18, 20, 45),\n history.status_start_date('OpenBugsNone', 'red'))",
"def test_api_meetings_create_authenticated_invalid_date(self):\n user = UserFactory()\n other_user = UserFactory()\n jwt_token = AccessToken.for_user(user)\n\n response = self.client.post(\n \"/api/meetings/\",\n {\n \"name\": \"my meeting\",\n \"owner\": str(other_user.id),\n \"start\": \"2022-07-07T09:00:00Z\",\n \"end\": \"2022-07-07T010:00:00Z\",\n },\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n response.json(),\n {\n \"end\": [\n \"Datetime has wrong format. Use one of these formats instead: \"\n \"YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].\"\n ]\n },\n )",
"def test_device_readings_get_past_dates(self):\n\n #When we make valid request with a startdate > 10\n request = self.client().get('/devices/{}/readings/'.format(self.device_uuid), data=\n json.dumps({\n 'start': 10,\n }))\n\n #Then we should receive a 200\n self.assertEqual(request.status_code, 200)\n\n #And receive five values\n self.assertEqual(len(json.loads(request.data)), 5)\n\n #When we make valid request with a startdate < 40\n request = self.client().get('/devices/{}/readings/'.format(self.device_uuid), data=\n json.dumps({\n 'end': 40,\n }))\n\n #Then we should receive a 200\n self.assertEqual(request.status_code, 200)\n\n #And receive five values\n self.assertEqual(len(json.loads(request.data)), 5)\n\n #When we make valid request with startdate >= 10 and enddate <= 20\n request = self.client().get('/devices/{}/readings/'.format(self.device_uuid), data=\n json.dumps({\n 'start': 10,\n 'end': 20\n }))\n\n #Then we should receive a 200\n self.assertEqual(request.status_code, 200)\n\n #And receive two values\n self.assertEqual(len(json.loads(request.data)), 2)\n\n #When we make valid request with dates in the future\n request = self.client().get('/devices/{}/readings/'.format(self.device_uuid),\n data=json.dumps({\n 'start': 10000000,\n 'end': 20000000\n }))\n\n #Then we should receive a 200\n self.assertEqual(request.status_code, 200)\n\n #And receive an empty dict\n self.assertFalse(json.loads(request.data))",
"def test_deadline():\n task = Task()\n assert task.deadline == datetime.today().strftime(\"%Y-%m-%d\")",
"def test_due_date_in_past(self):\n time = timezone.now() - datetime.timedelta(days=1)\n past_task = Task(date_due = time.date())\n self.assertEqual(past_task.to_show(), False)",
"def test_get_current_collection_exercise_past_and_future_dates(self):\n with open(\n f\"{project_root}/test_data/collection_exercise/mixed_past_and_future_collection_exercises.json\"\n ) as json_data:\n collection_exercise_list = json.load(json_data)\n\n with open(f\"{project_root}/test_data/collection_exercise/closest_future_collection_exercise.json\") as json_data:\n expected_output = json.load(json_data)\n\n output = get_current_collection_exercise(collection_exercise_list)\n self.assertEqual(output, expected_output)",
"def test_get_current_collection_exercise_duplicate_start_dates(self):\n with open(\n f\"{project_root}/test_data/collection_exercise/multiple_same_start_collection_exercises.json\"\n ) as json_data:\n collection_exercise_list = json.load(json_data)\n\n with open(f\"{project_root}/test_data/collection_exercise/closest_future_collection_exercise.json\") as json_data:\n expected_output = json.load(json_data)\n\n output = get_current_collection_exercise(collection_exercise_list)\n self.assertEqual(output, expected_output)",
"def test_latest(self):\n # Assumes that each day there are at least 50 Tweets about \"trump\".\n yesterday = datetime.now(timezone.utc) - timedelta(days=1)\n query = Query('trump', filter=Query.Filter.LATEST)\n tweets = list(search(query, max_tweets=50))\n self.assertEqual(50, len(tweets))\n for tweet in tweets:\n self.assertLess(yesterday, tweet.created_at)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the number of overdue tasks returns 1 when http error occurs. | def test_nr_of_over_due_actions_http_error(self, mock_url_read, mock_write_json, mock_read_json):
mock_url_read.side_effect = [
'{"access_token": "ey_xx", "refresh_token": "new_refresh_token"}',
urllib.error.HTTPError(None, None, None, None, None)]
mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}
planner = SharepointPlanner(url='/home', client_id='client_id_xx',
client_secret='client_secret_k=',
refresh_token_location='file_location_of_token.json')
mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')
self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1) | [
"def test_nr_of_over_due_actions_invalid_time(self, mock_url_read, mock_error, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n '{\"non-value\": [{\"completedDateTime\": null, '\n '\"createdDateTime\":\"2018-02-28T11:01:08.8386828Z\",\"assignments\": {}}]}']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)\n self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])\n self.assertIsInstance(mock_error.call_args_list[0][0][1], KeyError)",
"def test_basic_get_failure():\n start_time = time.perf_counter()\n\n horde = RequestsStampede.horde.RetryRequest()\n\n response = horde.get(\"https://httpstat.us/500\")\n\n end_time = time.perf_counter()\n elapsed_time = end_time - start_time\n\n # The default retry configuration attempts 5 retries with fibonacci backoff\n # delays. As such, the elapsed time should be greater than 4 seconds:\n # sum([0, 1, 1, 2, 0])\n assert elapsed_time > 4.0\n assert response is None",
"def test_status_forcelist_2(self):\n\n url = 'http://localhost:7654/status_code=500'\n \n # start counting the number of requests received\n self.http_server.reset_counter()\n\n res = obstinate.oget(url, o_status_forcelist=['501'],\n o_max_attempts=2)\n\n self.assertEqual(1, self.http_server.counter())",
"def assert_limit_works(client, limit):\n for i in range(limit + 1):\n response = client.get(\"/\")\n assert response.status_code == 200, f\"Response of the request \" \\\n f\"number {i} should be 200\"\n # wait for 0.125 as the original ruby tests waits after making request\n time.sleep(0.125)\n\n for i in range(2):\n response = client.get(\"/\")\n assert response.status_code == 429, f\"Response of the request {limit + 1 + i} \" \\\n f\"should be 429\"\n # wait for 0.125 as the original ruby tests waits after making request\n time.sleep(0.125)",
"def test_nr_of_inactive_actions_http_error(self, mock_url_read, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n urllib.error.HTTPError(None, None, None, None, None)]\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_inactive_actions('plan_id_xx'), -1)",
"def test_get_planning_wrong_request(client, truck_sheet_id, order_sheet_id):\r\n for i in range(1, 3):\r\n rv = publish_planning(client, i, i)\r\n assert rv.status_code == 200\r\n\r\n rv = get_planning(client, truck_sheet_id, order_sheet_id)\r\n\r\n assert rv.status_code == 404",
"def test_call_raises_http_error_after_max_retries_when_status_code_in_retry_list(self):\n max_retries = 3\n self.make_retry_call_with_error_code(503, max_retries=max_retries)\n # Check that the request call was made max_retries + 1 times. The +1 is\n # to account for the initial request call.\n self.assertEqual(max_retries + 1, self.session.request.call_count,\n \"Call should have been made 'max_retries' + 1 times\")",
"def test_basic_head_failure():\n start_time = time.perf_counter()\n\n horde = RequestsStampede.horde.RetryRequest()\n\n response = horde.head(\"https://httpstat.us/500\")\n\n end_time = time.perf_counter()\n elapsed_time = end_time - start_time\n\n # The default retry configuration attempts 5 retries with fibonacci backoff\n # delays. As such, the elapsed time should be greater than 4 seconds:\n # sum([0, 1, 1, 2, 0])\n assert elapsed_time > 4.0\n assert response is None",
"def test_call_raises_http_error_immediately_when_status_code_not_in_retry_list(self):\n self.make_retry_call_with_error_code(404, max_retries=3)\n self.assertEqual(1, self.session.request.call_count,\n \"Request call should have been made only once\")",
"def test_async_requests_manager_num_returns(self):\n workers = [RemoteRLlibActor.remote(sleep_time=0.1) for _ in range(2)]\n workers += [RemoteRLlibActor.remote(sleep_time=5) for _ in range(2)]\n manager = AsyncRequestsManager(\n workers, max_remote_requests_in_flight_per_worker=1\n )\n for _ in range(4):\n manager.call(lambda w: w.task())\n time.sleep(3)\n if not len(manager.get_ready()) == 2:\n raise Exception(\n \"We should return the 2 ready requests in this case from the actors\"\n \" that have shorter tasks\"\n )\n time.sleep(7)\n if not len(manager.get_ready()) == 2:\n raise Exception(\n \"We should return the 2 ready requests in this case from the actors\"\n \" that have longer tasks\"\n )",
"def test_start_too_many_requests(self):\n self.fake_worker.handled_tasks = 1\n self.fake_worker.start()\n self.fake_worker.task.fetch.assert_not_called()\n self.fake_worker.task.execute.assert_not_called()\n self.assertEqual(1, self.fake_worker.handled_tasks)",
"def test_basic_post_failure():\n start_time = time.perf_counter()\n\n horde = RequestsStampede.horde.RetryRequest()\n\n response = horde.post(\"https://httpstat.us/500\")\n\n end_time = time.perf_counter()\n elapsed_time = end_time - start_time\n\n # The default retry configuration attempts 5 retries with fibonacci backoff\n # delays. As such, the elapsed time should be greater than 4 seconds:\n # sum([0, 1, 1, 2, 0])\n assert elapsed_time > 4.0\n assert response is None",
"def increment_content_failures(self, count: int = 1):",
"def _fail_for_n_calls(self, n, status=400):\n self.num_calls += 1\n if self.num_calls <= n:\n e = EC2ResponseError(status, None)\n e.error_code = 'InvalidInstanceID.NotFound'\n raise e",
"def test_nr_of_over_due_actions_invalid_json(self, mock_url_read, mock_error, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n 'non-json']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)\n self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])\n self.assertIsInstance(mock_error.call_args_list[0][0][1], ValueError)",
"def testShortURLErrors(self):\n # ------------------------------\n # Non-existent short URL.\n # ------------------------------\n url = self.get_url('/test/1234567890')\n response = self._RunAsync(self.http_client.fetch, url, method='GET')\n self.assertEqual(response.code, 404)\n\n # ------------------------------\n # Malformed short URL.\n # ------------------------------\n url = self.get_url('/test/1')\n response = self._RunAsync(self.http_client.fetch, url, method='GET')\n self.assertEqual(response.code, 400)\n\n # ------------------------------\n # Expired short URL.\n # ------------------------------\n util._TEST_TIME += constants.SECONDS_PER_DAY\n response = self._RunAsync(self.http_client.fetch, self._url, method='GET')\n self.assertEqual(response.code, 403)\n\n # ------------------------------\n # Unique key cannot be found.\n # ------------------------------\n with mock.patch.object(ShortURL, '_KEY_GEN_TRIES', 0):\n self.assertRaises(TooManyRetriesError,\n self._RunAsync,\n ShortURL.Create,\n self._client,\n group_id='test/abcd',\n timestamp=util._TEST_TIME,\n expires=util._TEST_TIME + constants.SECONDS_PER_DAY,\n arg1=1,\n arg2='foo')",
"def test_process_schedule_client(self):\n error = self.process_schedule_client()\n for err in error: assert err == 0",
"def test_limit_exceeded(silver_client, gold_client):\n for i in range(15):\n assert gold_client.get(\"/\").status_code == 200, f\"Response of the request \" \\\n f\"number {i} should be 200\"\n # wait for 0.125 as the original ruby tests waits after making request\n time.sleep(0.125)\n\n wait_interval()\n\n assert_limit_works(silver_client, limit=10)\n\n wait_until_next_minute()\n\n assert_limit_works(silver_client, limit=10)",
"def test_get_task_status(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the number of overdue tasks returns 1 when tasks json is invalid. | def test_nr_of_over_due_actions_invalid_json(self, mock_url_read, mock_error, mock_write_json, mock_read_json):
mock_url_read.side_effect = [
'{"access_token": "ey_xx", "refresh_token": "new_refresh_token"}',
'non-json']
mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}
planner = SharepointPlanner(url='/home', client_id='client_id_xx',
client_secret='client_secret_k=',
refresh_token_location='file_location_of_token.json')
mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')
self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)
self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])
self.assertIsInstance(mock_error.call_args_list[0][0][1], ValueError) | [
"def test_nr_of_over_due_actions_invalid_time(self, mock_url_read, mock_error, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n '{\"non-value\": [{\"completedDateTime\": null, '\n '\"createdDateTime\":\"2018-02-28T11:01:08.8386828Z\",\"assignments\": {}}]}']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)\n self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])\n self.assertIsInstance(mock_error.call_args_list[0][0][1], KeyError)",
"def test_nr_of_over_due_actions_http_error(self, mock_url_read, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n urllib.error.HTTPError(None, None, None, None, None)]\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)",
"def test_nr_of_over_due_actions_empty_refresh_json(self, mock_url_read, mock_info, mock_write_json, mock_read_json):\n mock_read_json.return_value = ''\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_not_called()\n mock_info.assert_called_once_with(\n 'No refresh token could be loaded. Please, generate one using the script refresh_token_generator.py.')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)\n mock_url_read.assert_not_called()",
"def test_get_task_res_empty(self) -> None:\n # Prepare\n state = self.state_factory()\n\n # Execute\n num_tasks_res = state.num_task_res()\n\n # Assert\n assert num_tasks_res == 0",
"def test_read_incompleted_task_list():\n response = client.get('/task/?completed=false')\n assert response.status_code == 200\n assert response.json() == {}",
"def test_nr_of_over_due_actions_url(self, mock_url_read, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n '{\"value\": [{\"completedDateTime\": null, \"createdDateTime\":\"2018-02-28T13:01:08.828Z\", \"id\": \"id_VbPAGNM\", '\n '\"title\": \"Title!\", \"dueDateTime\": \"' + (datetime.datetime.utcnow() - relativedelta(days=7)\n ).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\") + '\",'\n '\"bucketId\": \"1a\", \"assignments\": {\"ecf0xx\": {\"assignedDateTime\": \"2018-02-28T13:01:08.8386828Z\"}}}]}']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='https://planner_home/', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.over_due_actions_url('plan_id_xx'),\n [('https://planner_home/Home/Task/id_VbPAGNM', 'Title!', '7 dagen')])",
"def test_past_due(self):\n def assert_past_due_count(expected):\n actual = len(list(model.Person.past_due_records(repo='haiti')))\n assert actual == expected\n\n assert_past_due_count(0)\n set_utcnow_for_test(datetime(2010, 2, 15))\n assert_past_due_count(1)\n set_utcnow_for_test(datetime(2010, 3, 15))\n assert_past_due_count(2)",
"def test_create_invalid_pending_status(client):\n resp = client.post(f\"{URL_PREFIX}/todo\", json={\n \"task\": \"Test sample task\",\n \"is_pending\": \"True\"\n })\n assert 400 == resp.status_code\n json_data = resp.get_json()\n assert \"Incorrect input format\" in json_data[\"error\"]",
"def test_nr_of_inactive_actions_invalid_json(self, mock_url_read, mock_error, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n 'non-json']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_inactive_actions('plan_id_xx'), -1)\n self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])\n self.assertIsInstance(mock_error.call_args_list[0][0][1], json.decoder.JSONDecodeError)",
"def test_get_task_status(self):\n pass",
"def num_not_done_tasks(self):\n return Task.objects.filter(labels=self, completed=False).count()",
"def test_num_task_res(self) -> None:\n # Prepare\n state: State = self.state_factory()\n task_0 = create_task_res(producer_node_id=0, anonymous=True, ancestry=[\"1\"])\n task_1 = create_task_res(producer_node_id=0, anonymous=True, ancestry=[\"1\"])\n\n # Store two tasks\n state.store_task_res(task_0)\n state.store_task_res(task_1)\n\n # Execute\n num = state.num_task_res()\n\n # Assert\n assert num == 2",
"def test_completed(self):\n expected = [\n ('scheduled', 0),\n ('in_queue', 0),\n ('started', 0),\n ('completed', 1),\n ('completed_successful', 1),\n ('completed_with_error', 0),\n ]\n\n run = Run.objects.get(pk=1)\n run.enqueue_dts = timezone.now()\n run.start_dts = timezone.now()\n run.return_dts = timezone.now()\n run.return_success = True\n run.save()\n\n for argument, expected in expected:\n json_data = self.get_json(\n '/api/v1/run/?state={0}'.format(argument))\n self.assertEqual(expected, len(json_data['objects']))",
"def test_get_task_ins_empty(self) -> None:\n # Prepare\n state = self.state_factory()\n\n # Execute\n num_task_ins = state.num_task_ins()\n\n # Assert\n assert num_task_ins == 0",
"def assert_ok_async_response(response: requests.Response) -> int:\n assert_proper_response(response)\n data = response.json()\n assert data['message'] == ''\n assert len(data['result']) == 1\n return int(data['result']['task_id'])",
"def test_get_planning_wrong_request(client, truck_sheet_id, order_sheet_id):\r\n for i in range(1, 3):\r\n rv = publish_planning(client, i, i)\r\n assert rv.status_code == 200\r\n\r\n rv = get_planning(client, truck_sheet_id, order_sheet_id)\r\n\r\n assert rv.status_code == 404",
"def test_milestone_due_ok(self):\n self.execute('milestone due milestone2 \"%s\"' % self._test_date)\n rv, output = self.execute('milestone list')\n self.assertEqual(0, rv, output)\n self.assertExpectedResult(output)",
"def test_nr_of_inactive_actions_http_error(self, mock_url_read, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n urllib.error.HTTPError(None, None, None, None, None)]\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_inactive_actions('plan_id_xx'), -1)",
"def test_get_user_list_of_tasks(self, app_client, random_user):\n response = app_client.get('{}/?owner={}'.format(ROOT_URL, random_user['id']),\n headers={})\n assert response.status_code == 200\n\n tasks = response.get_json()\n nbr_of_tasks_in_response = len(tasks) \n assert nbr_of_tasks_in_response == NBR_TEST_TASKS_PER_USER\n assert choice(tasks)['owner_user_id'] == random_user['id']"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the number of overdue tasks returns 1 when refresh token file is empty. | def test_nr_of_over_due_actions_empty_refresh_json(self, mock_url_read, mock_info, mock_write_json, mock_read_json):
mock_read_json.return_value = ''
planner = SharepointPlanner(url='/home', client_id='client_id_xx',
client_secret='client_secret_k=',
refresh_token_location='file_location_of_token.json')
mock_write_json.assert_not_called()
mock_info.assert_called_once_with(
'No refresh token could be loaded. Please, generate one using the script refresh_token_generator.py.')
self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)
mock_url_read.assert_not_called() | [
"def test_exhaustion_refresh():\n clock = MockedTime()\n tb = RateLimitTokenBucket('key', 5, 40, clock=clock)\n tb.decrease_tokens(2)\n tb.compute_current_tokens()\n assert tb.current_tokens == 3 \n clock.tick(8)\n tb.compute_current_tokens()\n assert tb.current_tokens == 4",
"def test_nr_of_over_due_actions_invalid_time(self, mock_url_read, mock_error, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n '{\"non-value\": [{\"completedDateTime\": null, '\n '\"createdDateTime\":\"2018-02-28T11:01:08.8386828Z\",\"assignments\": {}}]}']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)\n self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])\n self.assertIsInstance(mock_error.call_args_list[0][0][1], KeyError)",
"def test_nr_of_over_due_actions_http_error(self, mock_url_read, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n urllib.error.HTTPError(None, None, None, None, None)]\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)",
"def test_nr_of_over_due_actions_url(self, mock_url_read, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n '{\"value\": [{\"completedDateTime\": null, \"createdDateTime\":\"2018-02-28T13:01:08.828Z\", \"id\": \"id_VbPAGNM\", '\n '\"title\": \"Title!\", \"dueDateTime\": \"' + (datetime.datetime.utcnow() - relativedelta(days=7)\n ).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\") + '\",'\n '\"bucketId\": \"1a\", \"assignments\": {\"ecf0xx\": {\"assignedDateTime\": \"2018-02-28T13:01:08.8386828Z\"}}}]}']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='https://planner_home/', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.over_due_actions_url('plan_id_xx'),\n [('https://planner_home/Home/Task/id_VbPAGNM', 'Title!', '7 dagen')])",
"def test_get_task_res_empty(self) -> None:\n # Prepare\n state = self.state_factory()\n\n # Execute\n num_tasks_res = state.num_task_res()\n\n # Assert\n assert num_tasks_res == 0",
"def test_get_task_ins_empty(self) -> None:\n # Prepare\n state = self.state_factory()\n\n # Execute\n num_task_ins = state.num_task_ins()\n\n # Assert\n assert num_task_ins == 0",
"def test_exhaustion_reset():\n clock = MockedTime()\n tb = RateLimitTokenBucket('key', 5, 40, clock=clock)\n tb.decrease_tokens(2)\n assert tb.current_tokens == 3 \n clock.tick(100)\n tb.compute_current_tokens()\n assert tb.current_tokens == 5",
"def test_nr_of_over_due_actions_invalid_json(self, mock_url_read, mock_error, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n 'non-json']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)\n self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])\n self.assertIsInstance(mock_error.call_args_list[0][0][1], ValueError)",
"def test_nr_of_inactive_actions_http_error(self, mock_url_read, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n urllib.error.HTTPError(None, None, None, None, None)]\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_inactive_actions('plan_id_xx'), -1)",
"def test_transaction_exhaustion():\n redis_store.flushall()\n for i in range(0,7):\n transact_request_token(\"general\", 7, 30, 'foo', redis_store)\n assert transact_request_token(\"general\", 7, 30, 'foo', redis_store)[0] == None",
"def test_RefreshTokensView(self):\n\n # Creating the default user\n User.objects.create_user(email='testuser1@gmail.com', password='password')\n\n # Executing all the requests\n for x in self.REQUESTS['RefreshTokenView']['tests']:\n request = self.client.post(\n self.REQUESTS['RefreshTokenView']['route'],\n json.dumps(x['body']),\n content_type='application/json'\n )\n assert request.status_code == x['assert']",
"def testOpenTasksList(self):\n task_prop = {'status': 'Open', 'program': self.gci, 'org': self.org}\n seeder_logic.seed(GCITask, task_prop)\n seeder_logic.seed(GCITask, task_prop)\n response = self.get(self.url)\n self.assertResponseOK(response)\n self.assertTemplatesUsed(response)\n idx = 0\n list_data = self.getListData(self.url, idx)\n self.assertEqual(len(list_data), 2)",
"def test_invalid_tokens(self):\n # TODO: Get a new refresh_token to revoke here instead\n self.authorizer.refresh_token = None\n self.nac.oauth2_revoke_token(self.access_token)\n\n # confirm irrecoverable\n with self.assertRaises(GlobusAPIError) as apiErr:\n self.tc.get_endpoint(GO_EP1_ID)\n self.assertEqual(apiErr.exception.http_status, 400)\n self.assertEqual(apiErr.exception.code, \"Error\")",
"def test_read_incompleted_task_list():\n response = client.get('/task/?completed=false')\n assert response.status_code == 200\n assert response.json() == {}",
"def todo_done_count():\n done_count = 0\n try:\n with open('done.txt') as done_file:\n done_list = done_file.readlines()\n except FileNotFoundError:\n pass\n else:\n done_count = len(done_list)\n return done_count",
"def test_get_task_status(self):\n pass",
"def test_notification_stale_cluster(self, file_list, _):\n url = reverse(\"notification\")\n params = {\"stale_ocp_check\": \"\"}\n response = self.client.get(url, params)\n body = response.json()\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Notification Request Task ID\", body)\n\n url_w_params = reverse(\"notification\") + \"?stale_ocp_check&provider_uuid=dc350f15-ffc7-4fcb-92d7-2a9f1275568e\"\n response = self.client.get(url_w_params)\n body = response.json()\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Notification Request Task ID\", body)",
"def test_future(st: SpaceTime):\n for n in st.nodes:\n assert len(st.node_future[n]) >= 1",
"def checkToken(self):\r\n try:\r\n token_f = open(self.drive_token_file, 'r+')\r\n except FileNotFoundError:\r\n self.refreshToken()\r\n else:\r\n token = token_f.read()\r\n if not token:\r\n self.refreshToken()\r\n return"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the number of overdue tasks returns 1 when json is invalid. | def test_nr_of_over_due_actions_invalid_time(self, mock_url_read, mock_error, mock_write_json, mock_read_json):
mock_url_read.side_effect = [
'{"access_token": "ey_xx", "refresh_token": "new_refresh_token"}',
'{"non-value": [{"completedDateTime": null, '
'"createdDateTime":"2018-02-28T11:01:08.8386828Z","assignments": {}}]}']
mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}
planner = SharepointPlanner(url='/home', client_id='client_id_xx',
client_secret='client_secret_k=',
refresh_token_location='file_location_of_token.json')
mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')
self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)
self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])
self.assertIsInstance(mock_error.call_args_list[0][0][1], KeyError) | [
"def test_nr_of_over_due_actions_invalid_json(self, mock_url_read, mock_error, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n 'non-json']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)\n self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])\n self.assertIsInstance(mock_error.call_args_list[0][0][1], ValueError)",
"def test_nr_of_over_due_actions_http_error(self, mock_url_read, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n urllib.error.HTTPError(None, None, None, None, None)]\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)",
"def test_nr_of_inactive_actions_invalid_json(self, mock_url_read, mock_error, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n 'non-json']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_inactive_actions('plan_id_xx'), -1)\n self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])\n self.assertIsInstance(mock_error.call_args_list[0][0][1], json.decoder.JSONDecodeError)",
"def test_nr_of_over_due_actions_empty_refresh_json(self, mock_url_read, mock_info, mock_write_json, mock_read_json):\n mock_read_json.return_value = ''\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_not_called()\n mock_info.assert_called_once_with(\n 'No refresh token could be loaded. Please, generate one using the script refresh_token_generator.py.')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)\n mock_url_read.assert_not_called()",
"def test_nr_of_over_due_actions_url(self, mock_url_read, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n '{\"value\": [{\"completedDateTime\": null, \"createdDateTime\":\"2018-02-28T13:01:08.828Z\", \"id\": \"id_VbPAGNM\", '\n '\"title\": \"Title!\", \"dueDateTime\": \"' + (datetime.datetime.utcnow() - relativedelta(days=7)\n ).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\") + '\",'\n '\"bucketId\": \"1a\", \"assignments\": {\"ecf0xx\": {\"assignedDateTime\": \"2018-02-28T13:01:08.8386828Z\"}}}]}']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='https://planner_home/', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.over_due_actions_url('plan_id_xx'),\n [('https://planner_home/Home/Task/id_VbPAGNM', 'Title!', '7 dagen')])",
"def test_create_invalid_pending_status(client):\n resp = client.post(f\"{URL_PREFIX}/todo\", json={\n \"task\": \"Test sample task\",\n \"is_pending\": \"True\"\n })\n assert 400 == resp.status_code\n json_data = resp.get_json()\n assert \"Incorrect input format\" in json_data[\"error\"]",
"def test_nr_of_inactive_actions_http_error(self, mock_url_read, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n urllib.error.HTTPError(None, None, None, None, None)]\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_inactive_actions('plan_id_xx'), -1)",
"def assert_ok_async_response(response: requests.Response) -> int:\n assert_proper_response(response)\n data = response.json()\n assert data['message'] == ''\n assert len(data['result']) == 1\n return int(data['result']['task_id'])",
"def test_read_incompleted_task_list():\n response = client.get('/task/?completed=false')\n assert response.status_code == 200\n assert response.json() == {}",
"def test_completed(self):\n expected = [\n ('scheduled', 0),\n ('in_queue', 0),\n ('started', 0),\n ('completed', 1),\n ('completed_successful', 1),\n ('completed_with_error', 0),\n ]\n\n run = Run.objects.get(pk=1)\n run.enqueue_dts = timezone.now()\n run.start_dts = timezone.now()\n run.return_dts = timezone.now()\n run.return_success = True\n run.save()\n\n for argument, expected in expected:\n json_data = self.get_json(\n '/api/v1/run/?state={0}'.format(argument))\n self.assertEqual(expected, len(json_data['objects']))",
"def test_past_due(self):\n def assert_past_due_count(expected):\n actual = len(list(model.Person.past_due_records(repo='haiti')))\n assert actual == expected\n\n assert_past_due_count(0)\n set_utcnow_for_test(datetime(2010, 2, 15))\n assert_past_due_count(1)\n set_utcnow_for_test(datetime(2010, 3, 15))\n assert_past_due_count(2)",
"def test_get_planning_wrong_request(client, truck_sheet_id, order_sheet_id):\r\n for i in range(1, 3):\r\n rv = publish_planning(client, i, i)\r\n assert rv.status_code == 200\r\n\r\n rv = get_planning(client, truck_sheet_id, order_sheet_id)\r\n\r\n assert rv.status_code == 404",
"def test_get_task_res_empty(self) -> None:\n # Prepare\n state = self.state_factory()\n\n # Execute\n num_tasks_res = state.num_task_res()\n\n # Assert\n assert num_tasks_res == 0",
"def test_guids_invalid_count(app, client, user):\n response = client.get(f\"/guid/mint?count=foobar\")\n assert response.status_code == 400",
"def test_milestone_due_ok(self):\n self.execute('milestone due milestone2 \"%s\"' % self._test_date)\n rv, output = self.execute('milestone list')\n self.assertEqual(0, rv, output)\n self.assertExpectedResult(output)",
"def test_create_invalid_data(client):\n resp = client.post(f\"{URL_PREFIX}/todo\", json={\n \"item\": \"Test sample task\",\n \"is_pending\": \"Yes\"\n })\n assert 400 == resp.status_code\n json_data = resp.get_json()\n assert \"Incorrect input format\" in json_data[\"error\"]",
"def test_get_task_status(self):\n pass",
"def test_get_single_todo(client):\n resp = client.get(f\"{URL_PREFIX}/todo/1\")\n assert 200 == resp.status_code\n json_data = resp.get_json()\n assert len(json_data[\"task\"]) != 0\n assert len(json_data[\"is_pending\"]) != 0",
"def test_index_out_of_bounds(self):\n self.plist = PaginatedResourceList(int, self.endpoint)\n\n self.assertRaises(IndexError, lambda: self.plist[self.total])\n self.assertEqual(len(responses.calls), 1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the urls of overdue tasks match. | def test_nr_of_over_due_actions_url(self, mock_url_read, mock_write_json, mock_read_json):
mock_url_read.side_effect = [
'{"access_token": "ey_xx", "refresh_token": "new_refresh_token"}',
'{"value": [{"completedDateTime": null, "createdDateTime":"2018-02-28T13:01:08.828Z", "id": "id_VbPAGNM", '
'"title": "Title!", "dueDateTime": "' + (datetime.datetime.utcnow() - relativedelta(days=7)
).strftime("%Y-%m-%dT%H:%M:%S.%fZ") + '",'
'"bucketId": "1a", "assignments": {"ecf0xx": {"assignedDateTime": "2018-02-28T13:01:08.8386828Z"}}}]}']
mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}
planner = SharepointPlanner(url='https://planner_home/', client_id='client_id_xx',
client_secret='client_secret_k=',
refresh_token_location='file_location_of_token.json')
mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')
self.assertEqual(planner.over_due_actions_url('plan_id_xx'),
[('https://planner_home/Home/Task/id_VbPAGNM', 'Title!', '7 dagen')]) | [
"def test_check_url():\n airbnb = Airbnb()\n\n assert airbnb.check_url(\n 'https://www.airbnb.co.uk/rooms/111?guests=2&adults=1')\n\n assert airbnb.check_url('https://www.airbnb.com/rooms/12')\n assert airbnb.check_url('http://www.airbnb.com/rooms/12')\n assert airbnb.check_url('http://airbnb.ru/rooms/12')\n\n assert not airbnb.check_url('http://booking.com')\n assert not airbnb.check_url(\n 'https://www.airbnb.co.uk/rooms/plus/4950937?guests=1&adults=1')",
"def _assert_parsed_urls(self, event_url, urls_to_parse):\n if 'edit' not in event_url:\n\n # checking URLs after event creation/edition\n response = self.client.get(\n event_url\n )\n for url in urls_to_parse:\n if ('http' not in url) and ('https' not in url):\n self.assertIn('href=\"http://%s\"' % url, response.content)\n else:\n self.assertIn('href=\"%s\"' % url, response.content)\n else:\n # edit event and assess there is no HTML code for urls\n response = self.client.get(\n event_url\n )\n\n for url in urls_to_parse:\n self.assertNotIn(\n 'href=\"%s\"' % url,\n response.context['form']['event_description']\n )",
"def test_xml_url_matches(self):\n\n for url in self.__urls:\n request = Request(self.__host)\n response = Response(self.__host)\n response.text = url[\"test\"]\n\n finder = JSONRegexLinkScraper(Options(), QueueItem(request, response))\n requests = finder.get_requests()\n\n if url[\"must_pass\"]:\n self.assertEqual(requests[0].url, url[\"url\"])\n self.assertEqual(len(requests), 1)\n else:\n self.assertEqual(len(requests), 0)",
"def test_nr_of_over_due_actions_invalid_time(self, mock_url_read, mock_error, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n '{\"non-value\": [{\"completedDateTime\": null, '\n '\"createdDateTime\":\"2018-02-28T11:01:08.8386828Z\",\"assignments\": {}}]}']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)\n self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])\n self.assertIsInstance(mock_error.call_args_list[0][0][1], KeyError)",
"def test_common_urls(self):\n urls = (\n (reverse('join'), 200),\n (reverse('login'), 200),\n (reverse('logout'), 302),\n (reverse('password_reset'), 200),\n (reverse('terms-and-conditions'), 200),\n ('/post/post-1/', 200),\n ('/sitemap.xml', 200),\n )\n for url, code in urls:\n print \"Checking path %s\" % url\n response = self.client.get(url)\n self.assertEqual(response.status_code, code)",
"def test_inactive_actions_url(self, mock_url_read, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n '{\"value\": [{\"completedDateTime\": null, \"createdDateTime\":\"2018-02-28T13:01:08.8386828Z\", '\n '\"id\": \"id_VbPAGNM\", \"title\": \"Title!\", \"dueDateTime\": null,'\n '\"bucketId\": \"1a\", \"assignments\": {\"ecf0xx\": {\"assignedDateTime\": \"' + (\n datetime.datetime.utcnow() - relativedelta(days=19)).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\") + '\"}}}]}']\n\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='https://planner_home/', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.inactive_actions_url('plan_id_xx'),\n [('https://planner_home/Home/Task/id_VbPAGNM', 'Title!', '19 dagen')])",
"def test_invalid_ad_rep_url(self):\n try:\n self.client.get(reverse('ad-rep-home', args=['name$is+invalid.']))\n self.fail('Invalid ad_rep_url accepted.')\n except NoReverseMatch:\n pass",
"def testShortURLErrors(self):\n # ------------------------------\n # Non-existent short URL.\n # ------------------------------\n url = self.get_url('/test/1234567890')\n response = self._RunAsync(self.http_client.fetch, url, method='GET')\n self.assertEqual(response.code, 404)\n\n # ------------------------------\n # Malformed short URL.\n # ------------------------------\n url = self.get_url('/test/1')\n response = self._RunAsync(self.http_client.fetch, url, method='GET')\n self.assertEqual(response.code, 400)\n\n # ------------------------------\n # Expired short URL.\n # ------------------------------\n util._TEST_TIME += constants.SECONDS_PER_DAY\n response = self._RunAsync(self.http_client.fetch, self._url, method='GET')\n self.assertEqual(response.code, 403)\n\n # ------------------------------\n # Unique key cannot be found.\n # ------------------------------\n with mock.patch.object(ShortURL, '_KEY_GEN_TRIES', 0):\n self.assertRaises(TooManyRetriesError,\n self._RunAsync,\n ShortURL.Create,\n self._client,\n group_id='test/abcd',\n timestamp=util._TEST_TIME,\n expires=util._TEST_TIME + constants.SECONDS_PER_DAY,\n arg1=1,\n arg2='foo')",
"def test_full_url(self):\n url = \"http://example.com/\"\n self.assertEqual(url, resolve_url(url))",
"def test_nr_of_over_due_actions_http_error(self, mock_url_read, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n urllib.error.HTTPError(None, None, None, None, None)]\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)",
"def test_urls_are_valid():\n for key in eio.DATA_URLS:\n dataset = eio.DATA_URLS[key]\n if not isinstance(dataset, list):\n dataset = [dataset]\n for url, name, kind in dataset:\n r = requests.get(\"http://www.example.com\")\n assert r.status_code == 200",
"def test_exchange_additional_URL_doesnotexist(self):\n log_new_case(\"test_exchange_additional_URL_doesnotexist\")\n\n # Test variables\n testhandle = self.handle_withloc\n inexistent_old = 'http://sodohfasdkfjhanwikfhbawkedfhbawe.foo'\n new = 'http://newfirst.foo'\n\n # Precondition: URL must not be there yet.\n contained = self.inst.is_URL_contained_in_10320LOC(testhandle, inexistent_old)\n self.assertFalse(contained,\n 'Precondition for test failed! The URL should not be present at the start of the test.')\n\n # Run the code to be tested:\n log_start_test_code()\n self.inst.exchange_additional_URL(testhandle, inexistent_old, new)\n log_end_test_code()\n\n # Check desired effects on handle:\n contained = self.inst.is_URL_contained_in_10320LOC(testhandle, new)\n self.assertFalse(contained,\n 'After replacing a nonexistent URL, the replacement was there.')",
"def test_get_nonexistent_task(self, client):\n\n nonexistent_id = '0e4fac17-f367-4807-8c28-8a059a2f82ac'\n url = f'http://127.0.0.1:8000/check/?id={nonexistent_id}'\n response = client.get(url)\n assert response.status_code == 404",
"def test_duplicate_task(self):\n pass",
"def test_read_incompleted_task_list():\n response = client.get('/task/?completed=false')\n assert response.status_code == 200\n assert response.json() == {}",
"def test_get_promotion_urls(self):\n pass",
"def test_reverse_urls(self):\n #\n # Parts Reports\n #\n url = reverse('report_parts')\n pattern = '^/crm_test/reports/parts/$'\n matched = re.search(pattern, url)\n self.assertTrue(matched)\n\n url = reverse('report_parts_all_time')\n pattern = '^/crm_test/reports/parts_all_time/$'\n matched = re.search(pattern, url)\n self.assertTrue(matched)\n\n url = reverse('report_parts_by_part_number')\n pattern = '^/crm_test/reports/parts_by_part_number/$'\n matched = re.search(pattern, url)\n self.assertTrue(matched)\n #\n url = reverse('report_parts_by_quarter_by_site')\n pattern = '^/crm_test/reports/report_parts_by_quarter_by_site/$'\n matched = re.search(pattern, url)\n self.assertTrue(matched)\n #\n url = reverse('report_parts_in_life')\n pattern = '^/crm_test/reports/report_parts_in_life/$'\n matched = re.search(pattern, url)\n self.assertTrue(matched)\n #\n url = reverse('report_parts_out_of_life')\n pattern = '^/crm_test/reports/report_parts_out_of_life/$'\n matched = re.search(pattern, url)\n self.assertTrue(matched)\n #\n url = reverse('report_unapproved_rmas')\n\n pattern = '^/crm_test/reports/report_unapproved_rmas/$'\n matched = re.search(pattern, url)\n self.assertTrue(matched)\n #\n url = reverse('report_return_inventory')\n\n pattern = '^/crm_test/reports/report_return_inventory/$'\n matched = re.search(pattern, url)\n self.assertTrue(matched)\n\n url = reverse('report_customer_rma')\n pattern = '^/crm_test/reports/report_customer_rma/$'\n matched = re.search(pattern, url)\n self.assertTrue(matched)\n\n\n url = reverse('report_customer_sites')\n pattern = '^/crm_test/reports/report_customer_sites/$'\n matched = re.search(pattern, url)\n self.assertTrue(matched)\n\n\n url = reverse('report_user_roles')\n pattern = '^/crm_test/reports/report_user_roles/$'\n matched = re.search(pattern, url)\n self.assertTrue(matched)\n\n\n url = reverse('show_sites_rmas', args=(), kwargs={'id': 1})\n pattern = '^/crm_test/reports/show_sites_rmas/1$'\n matched = re.search(pattern, url)\n self.assertTrue(matched)\n\n url = reverse('show_rma', args=(), kwargs={'id': 1})\n pattern = '^/crm_test/reports/show_rma/1$'\n matched = re.search(pattern, url)\n self.assertTrue(matched)\n\n url = reverse('operation_not_allowed')\n pattern = '^/crm_test/reports/operation_not_allowed/$'\n matched = re.search(pattern, url)\n self.assertTrue(matched)",
"def test_add_additional_URL_several_toempty(self):\n log_new_case(\"test_add_additional_URL_several_toempty\")\n\n # Test variables\n testhandle = self.handle_withoutloc\n url1 = 'http://one'\n url2 = 'http://two'\n url3 = 'http://three'\n\n # Run code to be tested:\n log_start_test_code()\n self.inst.add_additional_URL(testhandle, url1, url2, url3)\n log_end_test_code()\n\n # Check desired effects on handle:\n contained1 = self.inst.is_URL_contained_in_10320LOC(testhandle, url1)\n contained2 = self.inst.is_URL_contained_in_10320LOC(testhandle, url2)\n contained3 = self.inst.is_URL_contained_in_10320LOC(testhandle, url3)\n self.assertTrue(contained1,\n 'The first added URL was not added.')\n self.assertTrue(contained2,\n 'The second added URL was not added.')\n self.assertTrue(contained3,\n 'The third added URL was not added.')",
"def test_ad_rep_url_does_not_exist(self):\n response = self.client.get(reverse('ad-rep-home',\n args=['IfAnAdRepPicksThisUrlHeOrSheIsCrazy']))\n self.assertEqual(response.status_code, 404)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the number of inactive tasks returns 1 when http error occurs. | def test_nr_of_inactive_actions_http_error(self, mock_url_read, mock_write_json, mock_read_json):
mock_url_read.side_effect = [
'{"access_token": "ey_xx", "refresh_token": "new_refresh_token"}',
urllib.error.HTTPError(None, None, None, None, None)]
mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}
planner = SharepointPlanner(url='/home', client_id='client_id_xx',
client_secret='client_secret_k=',
refresh_token_location='file_location_of_token.json')
mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')
self.assertEqual(planner.nr_of_inactive_actions('plan_id_xx'), -1) | [
"def test_get_task_status(self):\n pass",
"def test_status_forcelist_2(self):\n\n url = 'http://localhost:7654/status_code=500'\n \n # start counting the number of requests received\n self.http_server.reset_counter()\n\n res = obstinate.oget(url, o_status_forcelist=['501'],\n o_max_attempts=2)\n\n self.assertEqual(1, self.http_server.counter())",
"def test_basic_get_failure():\n start_time = time.perf_counter()\n\n horde = RequestsStampede.horde.RetryRequest()\n\n response = horde.get(\"https://httpstat.us/500\")\n\n end_time = time.perf_counter()\n elapsed_time = end_time - start_time\n\n # The default retry configuration attempts 5 retries with fibonacci backoff\n # delays. As such, the elapsed time should be greater than 4 seconds:\n # sum([0, 1, 1, 2, 0])\n assert elapsed_time > 4.0\n assert response is None",
"def test_get_task_res_empty(self) -> None:\n # Prepare\n state = self.state_factory()\n\n # Execute\n num_tasks_res = state.num_task_res()\n\n # Assert\n assert num_tasks_res == 0",
"def test_async_requests_manager_num_returns(self):\n workers = [RemoteRLlibActor.remote(sleep_time=0.1) for _ in range(2)]\n workers += [RemoteRLlibActor.remote(sleep_time=5) for _ in range(2)]\n manager = AsyncRequestsManager(\n workers, max_remote_requests_in_flight_per_worker=1\n )\n for _ in range(4):\n manager.call(lambda w: w.task())\n time.sleep(3)\n if not len(manager.get_ready()) == 2:\n raise Exception(\n \"We should return the 2 ready requests in this case from the actors\"\n \" that have shorter tasks\"\n )\n time.sleep(7)\n if not len(manager.get_ready()) == 2:\n raise Exception(\n \"We should return the 2 ready requests in this case from the actors\"\n \" that have longer tasks\"\n )",
"def update_error():\n requests[\"error\"] += 1",
"def test_service_unavailable_result(self):\n process_result = process_response(self.resp_service_unavailable)\n self.assertEqual(process_result[\"result\"], 4)",
"def test_start_too_many_requests(self):\n self.fake_worker.handled_tasks = 1\n self.fake_worker.start()\n self.fake_worker.task.fetch.assert_not_called()\n self.fake_worker.task.execute.assert_not_called()\n self.assertEqual(1, self.fake_worker.handled_tasks)",
"def test_status_request(self):\n pass",
"def test_read_incompleted_task_list():\n response = client.get('/task/?completed=false')\n assert response.status_code == 200\n assert response.json() == {}",
"def test_call_raises_http_error_immediately_when_status_code_not_in_retry_list(self):\n self.make_retry_call_with_error_code(404, max_retries=3)\n self.assertEqual(1, self.session.request.call_count,\n \"Request call should have been made only once\")",
"def test_five_failures(self):\n function = aws_service.retry_boto(\n self._fail_for_n_calls,\n r'InvalidInstanceID\\.NotFound',\n initial_sleep_seconds=0.0\n )\n function(5)",
"def can_start_new_task(results, max_count):\n\n awaiting_tasks = 0\n\n for r in results:\n try:\n r.successful()\n except AssertionError:\n awaiting_tasks += 1\n\n return awaiting_tasks < max_count",
"def test_nr_of_over_due_actions_http_error(self, mock_url_read, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n urllib.error.HTTPError(None, None, None, None, None)]\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)",
"def test_get_task_ins_empty(self) -> None:\n # Prepare\n state = self.state_factory()\n\n # Execute\n num_task_ins = state.num_task_ins()\n\n # Assert\n assert num_task_ins == 0",
"def test_is_healthy_bad_route():\n client = meilisearch.Client(\"http://wrongurl:1234\", timeout=1)\n response = client.is_healthy()\n assert response is False",
"def test_get_unusual_activity_intraday(self):\n pass",
"def _fail_for_n_calls(self, n, status=400):\n self.num_calls += 1\n if self.num_calls <= n:\n e = EC2ResponseError(status, None)\n e.error_code = 'InvalidInstanceID.NotFound'\n raise e",
"def test_process_get_schedule_server(self):\n error, out = self.process_get_schedule_server()\n for err in error: assert err == 0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the number of inactive tasks returns 1 when json is invalid. | def test_nr_of_inactive_actions_invalid_json(self, mock_url_read, mock_error, mock_write_json, mock_read_json):
mock_url_read.side_effect = [
'{"access_token": "ey_xx", "refresh_token": "new_refresh_token"}',
'non-json']
mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}
planner = SharepointPlanner(url='/home', client_id='client_id_xx',
client_secret='client_secret_k=',
refresh_token_location='file_location_of_token.json')
mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')
self.assertEqual(planner.nr_of_inactive_actions('plan_id_xx'), -1)
self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])
self.assertIsInstance(mock_error.call_args_list[0][0][1], json.decoder.JSONDecodeError) | [
"def test_read_incompleted_task_list():\n response = client.get('/task/?completed=false')\n assert response.status_code == 200\n assert response.json() == {}",
"def test_nr_of_inactive_actions_http_error(self, mock_url_read, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n urllib.error.HTTPError(None, None, None, None, None)]\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_inactive_actions('plan_id_xx'), -1)",
"def test_get_task_status(self):\n pass",
"def test_get_task_res_empty(self) -> None:\n # Prepare\n state = self.state_factory()\n\n # Execute\n num_tasks_res = state.num_task_res()\n\n # Assert\n assert num_tasks_res == 0",
"def test_create_invalid_pending_status(client):\n resp = client.post(f\"{URL_PREFIX}/todo\", json={\n \"task\": \"Test sample task\",\n \"is_pending\": \"True\"\n })\n assert 400 == resp.status_code\n json_data = resp.get_json()\n assert \"Incorrect input format\" in json_data[\"error\"]",
"def test_read_completed_task_list():\n response = client.get('/task/?completed=true')\n assert response.status_code == 200\n assert response.json() == {}",
"def test_nr_of_over_due_actions_invalid_time(self, mock_url_read, mock_error, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n '{\"non-value\": [{\"completedDateTime\": null, '\n '\"createdDateTime\":\"2018-02-28T11:01:08.8386828Z\",\"assignments\": {}}]}']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)\n self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])\n self.assertIsInstance(mock_error.call_args_list[0][0][1], KeyError)",
"def task_is_active(task_json: dict) -> bool:\n return (True if task_json.get(\"start\", None)\n and task_json[\"status\"] == \"pending\" else False)",
"def test_nr_of_over_due_actions_invalid_json(self, mock_url_read, mock_error, mock_write_json, mock_read_json):\n mock_url_read.side_effect = [\n '{\"access_token\": \"ey_xx\", \"refresh_token\": \"new_refresh_token\"}',\n 'non-json']\n mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}\n planner = SharepointPlanner(url='/home', client_id='client_id_xx',\n client_secret='client_secret_k=',\n refresh_token_location='file_location_of_token.json')\n\n mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')\n self.assertEqual(planner.nr_of_over_due_actions('plan_id_xx'), -1)\n self.assertEqual('Invalid json retrieved for tasks. Reason: %s.', mock_error.call_args_list[0][0][0])\n self.assertIsInstance(mock_error.call_args_list[0][0][1], ValueError)",
"def test_completed(self):\n expected = [\n ('scheduled', 0),\n ('in_queue', 0),\n ('started', 0),\n ('completed', 1),\n ('completed_successful', 1),\n ('completed_with_error', 0),\n ]\n\n run = Run.objects.get(pk=1)\n run.enqueue_dts = timezone.now()\n run.start_dts = timezone.now()\n run.return_dts = timezone.now()\n run.return_success = True\n run.save()\n\n for argument, expected in expected:\n json_data = self.get_json(\n '/api/v1/run/?state={0}'.format(argument))\n self.assertEqual(expected, len(json_data['objects']))",
"def assert_ok_async_response(response: requests.Response) -> int:\n assert_proper_response(response)\n data = response.json()\n assert data['message'] == ''\n assert len(data['result']) == 1\n return int(data['result']['task_id'])",
"def test_get_task_ins_empty(self) -> None:\n # Prepare\n state = self.state_factory()\n\n # Execute\n num_task_ins = state.num_task_ins()\n\n # Assert\n assert num_task_ins == 0",
"def test_get_checks_in_pending(self):\n filters = {\n 'status': CheckStatus.pending,\n }\n\n auth = self.get_http_authorization_for_user(self._get_authorised_user())\n response = self.client.get(\n reverse('security-check-list'),\n filters,\n format='json',\n HTTP_AUTHORIZATION=auth,\n )\n\n self.assertEqual(response.status_code, http_status.HTTP_200_OK)\n\n response_data = response.json()\n self.assertEqual(\n response_data['count'],\n Check.objects.filter(status=CheckStatus.pending.value).count(),\n )\n for item in response_data['results']:\n self.assertEqual(item['status'], CheckStatus.pending.value)",
"def test_get_problem_statuses(self):\n\n problem_id = self.problem_id\n ids = [problem_id] * 3\n statuses = self.api.get_problem_statuses(ids)\n\n self.assertEqual(len(statuses), len(ids))\n for status in statuses:\n self.assertIsInstance(status, models.ProblemStatus)\n self.verify_problem_status(status, solved=True)",
"def test_no_other_active_entries(self):\r\n response = self.client.get(self.url)\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(len(response.context['others_active_entries']), 0)",
"def test_statusHistoryMining() -> json:\r\n\r\n # Action\r\n status, result = u.statusHistoryMining()\r\n\r\n # Assertion\r\n AssertNotEmptyOrError(status, result)",
"def test_read_task_list():\n response = client.get('/task/')\n assert response.status_code == 200\n assert response.json() == {}",
"def get_incomplete(task_number):\n\n while True:\n try:\n user_id = int(input(\"Enter your User Id number: \"))\n break\n except ValueError:\n print(\"Please enter a number.\")\n\n params = {\n \"userId\": user_id,\n \"complete\": \"false\"\n }\n\n incomplete_get = requests.get(task_url, params=params)\n print(f\"Response code for reading incomplete tasks: {incomplete_get.status_code}\")\n pprint(incomplete_get.json())\n return",
"def test_service_unavailable_result(self):\n process_result = process_response(self.resp_service_unavailable)\n self.assertEqual(process_result[\"result\"], 4)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that the task urls of inactive actions are retrieved correctly. | def test_inactive_actions_url(self, mock_url_read, mock_write_json, mock_read_json):
mock_url_read.side_effect = [
'{"access_token": "ey_xx", "refresh_token": "new_refresh_token"}',
'{"value": [{"completedDateTime": null, "createdDateTime":"2018-02-28T13:01:08.8386828Z", '
'"id": "id_VbPAGNM", "title": "Title!", "dueDateTime": null,'
'"bucketId": "1a", "assignments": {"ecf0xx": {"assignedDateTime": "' + (
datetime.datetime.utcnow() - relativedelta(days=19)).strftime("%Y-%m-%dT%H:%M:%S.%fZ") + '"}}}]}']
mock_read_json.return_value = {'refresh_token': 'refresh_token_content_xx'}
planner = SharepointPlanner(url='https://planner_home/', client_id='client_id_xx',
client_secret='client_secret_k=',
refresh_token_location='file_location_of_token.json')
mock_write_json.assert_called_once_with({'refresh_token': 'new_refresh_token'}, 'file_location_of_token.json')
self.assertEqual(planner.inactive_actions_url('plan_id_xx'),
[('https://planner_home/Home/Task/id_VbPAGNM', 'Title!', '19 dagen')]) | [
"def test_get_task_status(self):\n pass",
"def test_web_task_anonymous_cant_edit(webapp, new_task):\n webapp.homepage()\n for task in webapp.taskboard.tasks():\n assert \"edit\" not in task.options",
"def test_read_incompleted_task_list():\n response = client.get('/task/?completed=false')\n assert response.status_code == 200\n assert response.json() == {}",
"def test_get_task_instances(self):\n pass",
"def test_url_endpoint(self):\n url = url_for('view_activities')\n assert url == '/activities/'",
"def test_get_tasks_for_user_task_list(self):\n pass",
"def test_read_completed_task_list():\n response = client.get('/task/?completed=true')\n assert response.status_code == 200\n assert response.json() == {}",
"def test_get_subtasks_for_task(self):\n pass",
"def test_get_unusual_activity_intraday(self):\n pass",
"def test_api_activities_get(self):\n pass",
"def test_get_tasks_for_project(self):\n pass",
"def test_get_unusual_activity(self):\n pass",
"def test_get_tasks_for_tag(self):\n pass",
"def testOpenTasksList(self):\n task_prop = {'status': 'Open', 'program': self.gci, 'org': self.org}\n seeder_logic.seed(GCITask, task_prop)\n seeder_logic.seed(GCITask, task_prop)\n response = self.get(self.url)\n self.assertResponseOK(response)\n self.assertTemplatesUsed(response)\n idx = 0\n list_data = self.getListData(self.url, idx)\n self.assertEqual(len(list_data), 2)",
"def test_get_nonexistent_task(self, client):\n\n nonexistent_id = '0e4fac17-f367-4807-8c28-8a059a2f82ac'\n url = f'http://127.0.0.1:8000/check/?id={nonexistent_id}'\n response = client.get(url)\n assert response.status_code == 404",
"def test_read_available_resource_actions(self):\n pass",
"def test_no_other_active_entries(self):\r\n response = self.client.get(self.url)\r\n self.assertEqual(response.status_code, 200)\r\n self.assertEqual(len(response.context['others_active_entries']), 0)",
"def test_api_v3_stories_story_public_id_tasks_task_public_id_get(self):\n pass",
"def test_controllertag_filtering(self):\r\n irc_urls = sorted(list(self.all_urls - self.http_only_urls))\r\n generated = sorted(list(self.manifest.get_urls(controllers=['irc'])))\r\n self.assertEquals(generated, irc_urls)\r\n\r\n http_urls = self.all_urls - self.irc_only_urls\r\n self.assertEquals(self.manifest.get_urls(controllers=['http-get']), http_urls)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merges the first and second half of an array. | def merge(first_half, second_half, array):
i = 0
j = 0
k = 0
while i < len(first_half) and j < len(second_half):
if first_half[i] < second_half[j]:
array[k] = first_half[i]
i += 1
else:
array[k] = second_half[j]
j += 1
k += 1
while i < len(first_half):
array[k] = first_half[i]
i += 1
k += 1
while j < len(second_half):
array[k] = second_half[j]
j += 1
k += 1 | [
"def merge(array, lo, mid, hi):\n # merge array[lo..mid] and array[mid..hi]\n i = lo\n j = mid + 1\n\n aux = array[:] # copy the array to aux\n\n for k in range(lo, hi+1): # actually we need to change at least 2 times!\n # run out of the left side array\n if i > mid:\n array[k] = aux[j]\n j += 1\n # run out of the right side array\n elif j > hi:\n array[k] = aux[i]\n i += 1\n # put the less one to the array\n elif aux[i] > aux[j]:\n array[k] = aux[j]\n j += 1\n else:\n array[k] = aux[i]\n i += 1\n return array",
"def merge_without_sentinel(array, left, mid, right):\n left_part = array[left: mid + 1]\n right_part = array[mid + 1: right + 1]\n i = 0\n j = 0\n k = left\n left_length = mid - left + 1\n right_length = right - mid\n while i < left_length and j < right_length:\n if left_part[i] <= right_part[j]:\n array[k] = left_part[i]\n k = k + 1\n i = i + 1\n else:\n array[k] = right_part[j]\n k = k + 1\n j = j + 1\n if i < left_length:\n array[k: right + 1] = left_part[i: left_length]\n else:\n array[k: right + 1] = right_part[j: right_length]",
"def merge(left_arr, right_arr):\n\n\tleft_index = 0\n\tright_index = 0\n\n\t# initialize array to store results\n\treturn_arr = []\n\n\t# loop through both arrays\n\twhile left_index < len(left_arr) and right_index < len(right_arr):\n\t\tif left_arr[left_index] < right_arr[right_index]:\n\t\t\treturn_arr.append(left_arr[left_index])\n\t\t\tleft_index += 1\n\t\telse:\n\t\t\treturn_arr.append(right_arr[right_index])\n\t\t\tright_index += 1\n\n\t# finish out whatever remains in the other array\n\twhile left_index < len(left_arr):\n\t\treturn_arr.append(left_arr[left_index])\n\t\tleft_index += 1\n\n\twhile right_index < len(right_arr):\n\t\treturn_arr.append(right_arr[right_index])\n\t\tright_index += 1\n\n\treturn return_arr",
"def _merge(arr, aux, lo, mid, high):\n\n aux[lo:high] = arr[lo:high]\n left_indx = lo\n right_indx = mid\n for k in range(lo, high):\n if left_indx == mid:\n arr[k] = aux[right_indx]\n right_indx += 1\n elif right_indx == high:\n arr[k] = aux[left_indx]\n left_indx += 1\n elif aux[left_indx] <= aux[right_indx]:\n arr[k] = aux[left_indx]\n left_indx += 1\n else:\n arr[k] = aux[right_indx]\n right_indx += 1\n\n return",
"def merge(temp_array1, temp_array2, array):\n # To do\n res = []\n while temp_array1 and temp_array2:\n if temp_array1[0] < temp_array2[0]:\n res.append(temp_array1[0])\n temp_array1.pop(0)\n else:\n res.append(temp_array2[0])\n temp_array2.pop(0)\n res += temp_array1 + temp_array2\n return res",
"def merge(left_array, right_array, original_array):\n left_array_length = len(left_array)\n right_array_length = len(right_array)\n\n i, j, k = 0, 0, 0\n while i < left_array_length and j < right_array_length:\n if left_array[i] < right_array[j]:\n original_array[k] = left_array[i]\n i += 1\n else:\n original_array[k] = right_array[j]\n j += 1\n k += 1\n\n while i < left_array_length:\n original_array[k] = left_array[i]\n i += 1\n k += 1\n\n while j < right_array_length:\n original_array[k] = right_array[j]\n j += 1\n k += 1",
"def _merge(self, array, array1, array2):\n if (isinstance(array, np.ndarray) and isinstance(array1,np.ndarray)\n and isinstance(array2,np.ndarray)):\n \n i = 0\n j = 0\n k = 0\n while i < array1.size and j < array2.size:\n if array1[i] < array2[j]:\n array[k] = array1[i]\n i += 1\n else:\n array[k] = array2[j]\n j += 1\n k += 1\n \n while i < array1.size:\n array[k] = array1[i]\n i += 1\n k += 1\n \n while j < array2.size:\n array[k] = array2[j]\n j += 1 \n k += 1\n return array\n \n else:\n raise ValueError(\"input array must be numpy arrays\")",
"def merge_sort(values):\n if not values or len(values) == 1:\n return\n\n mid = len(values) / 2\n left_half = values[:mid]\n right_half = values[mid:]\n\n merge_sort(left_half)\n merge_sort(right_half)\n\n merge(values, left_half, right_half)",
"def mergeSort2(self, array):\n if isinstance(array,np.ndarray):\n #print(\"Splitting \",array)\n n = array.size\n threshold = 30\n if n <= threshold:\n #print(\"Merging \",array)\n return self.insertionSort(array)\n else:\n size = n // 2\n left = self.mergeSort(array[:size].copy())\n right = self.mergeSort(array[size:].copy())\n array = self._merge(array, left, right)\n return array\n else:\n raise ValueError(\"input must be a numpy array\")",
"def _right_extend(array):\n ones = np.ones((array.shape[0], 1), dtype=array.dtype)\n return np.concatenate((array, ones), axis=1)",
"def merge_arrays(first, second):\n if first.size != second.size:\n msg = \"array sizes not equal, {} != {}\".format(first.size, second.size)\n raise ValueError(msg)\n dtype = np.dtype([('first', first.dtype), ('second', second.dtype)])\n merged = np.empty(first.size, dtype=dtype)\n for name in first.dtype.names:\n merged['first'][name] = first[name]\n for name in second.dtype.names:\n merged['second'][name] = second[name]\n return merged",
"def cat_arrays(arr1, arr2):\n arr3 = arr1[:]\n arr3.extend(arr2[:])\n return arr3",
"def __merge2sorted__(arr1, arr2):\n m, n = len(arr1), len(arr2)\n aux_arr = [None] * (m + n)\n p1 = 0\n p2 = 0\n c = 0\n while p1 < m and p2 < n:\n if arr1[p1] < arr2[p2]:\n aux_arr[c] = arr1[p1]\n p1 += 1\n else:\n aux_arr[c] = arr2[p2]\n p2 += 1\n c += 1\n if p1 == m: # arr1 exhausted\n while p2 < n:\n aux_arr[c] = arr2[p2]\n p2 += 1\n c += 1\n elif p2 == n: # arr2 exhausted\n while p1 < m:\n aux_arr[c] = arr1[p1]\n p1 += 1\n c += 1\n return aux_arr",
"def mergesort(array):\n # check array length less than 2\n if len(array) <= 1:\n return array\n # get mid index and create left, right sublists\n mid = len(array) // 2\n left = array[:mid]\n right = array[mid:]\n # recursively call mergesort to left and right sublists\n left = mergesort(left)\n right = mergesort(right)\n # return the merged left and right sublists\n return merge(left, right)",
"def join_arrays(array):\r\n out = []\r\n for i in array:\r\n for j in i:\r\n out.append(j)\r\n\r\n out.append(None)\r\n return out",
"def mergeSort(self):\n\n\t # 1. Divide problem in smaller problems through recursion\n\t # Recursion stop point\n\t lst = self.array\n\t if len(lst) == 1:\n\t return lst\n\t # Recursion\n\t else: # len(array) != 1\n\t n = len(lst)\n\t middlePoint = n//2\n\n\t leftArray = MergeSort()\n\t leftArray.array = lst[:middlePoint]\n\t \n\t rightArray = MergeSort()\n\t rightArray.array = lst[middlePoint:n]\n\t \n\t left = leftArray.mergeSort()\n\t right = rightArray.mergeSort()\n\n\t # 2. Conquer problem through merge\n\t nL = len(left)\n\t nR = len(right)\n\t n = nL + nR\n\t # Compare elements in both arrays and sort them\n\t i = 0\n\t j = 0\n\t index = 0\n\t mergedArray = []\n\n\t while (i < nL) and (j < nR):\n\t\t\t if left[i] < right[j]:\n\t\t\t mergedArray.append(left[i])\n\t\t\t i += 1\n\t\t\t index += 1\n\t\t\t else: # right[j] < left[i]\n\t\t\t mergedArray.append(right[j])\n\t\t\t j += 1\n\t\t\t index += 1\n\t\t\t\n\t # Some elements are left out in arrayA or arrayB. Let's insert them\n\t while (i < nL):\n\t\t\t mergedArray.append(left[i])\n\t\t\t i += 1\n\t\t\t index += 1\n\n\t while (j < nR):\n\t\t\t mergedArray.append(right[j])\n\t\t\t j += 1\n\t\t\t index += 1\n\n\t return mergedArray",
"def mergesort_iterative(arr):\n s = 1\n while s < len(arr):\n # print(f\"Merging sorted subarrays of size {s}\")\n i = 0\n while i < len(arr):\n # print(f\"merge {arr[i:i+s]} and {arr[i+s:i+2*s]}\")\n arr[i : i + 2 * s] = __merge2sorted__(\n arr[i : i + s], arr[i + s : i + 2 * s]\n )\n i = i + 2 * s\n # print(arr)\n s = s * 2\n return arr",
"def merge(arrayA, arrayB):\n\t nA = len(arrayA)\n\t nB = len(arrayB)\n\t n = nA + nB\n\n\t # Compare elements in both arrays and sort them\n\t i = 0\n\t j = 0\n\t index = 0\n\t mergedArray = []\n\n\t while (i < nA) and (j < nB):\n\t if arrayA[i] < arrayB[j]:\n\t mergedArray.append(arrayA[i])\n\t i += 1\n\t index += 1\n\t else: # arrayB[j] < arrayA[i]\n\t mergedArray.append(arrayB[j])\n\t j += 1\n\t index += 1\n\n\t # Some elements are left out in arrayA or arrayB\n\t # Insert them in mergedArray\n\t while (i < nA):\n\t mergedArray.append(arrayA[i])\n\t i += 1\n\t index += 1\n\n\t while (j < nB):\n\t mergedArray.append(arrayB[j])\n\t j += 1\n\t index += 1\n\n\t return mergedArray",
"def merge(self, nums1, nums2):\n nums1 = nums1[0:len(nums1) - len(nums2)]\n nums1 += nums2\n nums1.sort()\n return nums1"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Saves loss plot to output directory | def save_loss_plot(out_dir, loss, log=False, tag=''):
if log:
filename = os.path.join(out_dir, 'loss_log_plot'+tag+'.pdf')
else:
filename = os.path.join(out_dir, 'loss_plot'+tag+'.pdf')
plt.figure()
if log:
plt.plot(range(len(loss)), np.log(loss), 'r')
else:
plt.plot(range(len(loss)), loss, 'r')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.tight_layout()
plt.savefig(filename)
plt.close() | [
"def plot_save_losses(train_loss, test_loss):\n\t#save the losses\n\tnp.savetxt(f'{output_loc}/Losses_v{version}_c{target_label}.txt', np.array([train_loss, test_loss]), header = 'train_loss test_loss')\n\n\t#plot\n\tplt.plot(np.arange(1, len(train_loss)+1), train_loss, label = 'Train loss')\n\tplt.plot(np.arange(1, len(test_loss)+1), test_loss, label = 'Test loss')\n\n\t#set ylims starting at 0\n\tylims = plt.ylim()\n\tplt.ylim((0, ylims[1]))\n\n\t# plt.yscale('log')\n\n\tplt.xlabel('Epoch')\n\tplt.ylabel('Loss')\n\tplt.title(f'Loss progression of cropping network with cluster label {target_label}')\n\tplt.legend(loc = 'best')\n\tplt.grid(alpha = 0.4)\n\n\tplt.savefig(f'{output_loc}/Cropping_losses_c{target_label}.png', dpi = 300, bbox_inches = 'tight')\n\tplt.close()",
"def plot_loss(items, output_path):\n\n df = pd.DataFrame(items, columns=[\"epoch\", \"batch_id\", \"value\"])\n fig = plt.figure()\n ax = fig.add_subplot(111)\n seaborn.lineplot(x=\"epoch\", y=\"value\", data=df, ax=ax)\n fig.savefig(output_path)\n plt.close(fig)",
"def plot_model_loss(self):\n # All losses\n plt_data = np.stack(self.epoch_losses)\n plt_labels = ['loss', 'inputX', 'inputY', 'inputH', 'outputX', 'outputY']\n for i in range(len(plt_labels)):\n plt.subplot(len(plt_labels),1,i+1)\n plt.plot(np.arange(self.current_epoch+1),\n plt_data[:, i], \n label=plt_labels[i])\n plt.ylabel(plt_labels[i])\n plt.xlabel('# epochs')\n plt.legend()\n plt.savefig(self.save_path + '_Losses')\n plt.clf()\n plt.close()\n \n # Losses on the input coordinates\n fig, ax = plt.subplots()\n ax.plot(plt_data[:, 1], 'g-', linewidth=2, label='inputX loss')\n ax.plot(plt_data[:, 2], 'r-', linewidth=2, label='inputY loss')\n ax.plot()\n ax.legend()\n ax.set_xlim(0, self.epochs)\n ax.set(xlabel='# epochs', ylabel='loss', title='Input Coordinate loss')\n plt.savefig(self.save_path + '_InputCoordLoss')\n plt.close()\n \n # Losses on the output coordinates\n fig, ax = plt.subplots()\n ax.plot(plt_data[:, 4], 'g-', linewidth=2, label='outputX loss')\n ax.plot(plt_data[:, 5], 'r-', linewidth=2, label='outputY loss')\n ax.plot()\n ax.legend()\n ax.set_xlim(0, self.epochs)\n ax.set(xlabel='# epochs', ylabel='loss', title='Output Coordinate loss')\n plt.savefig(self.save_path + '_OutputCoordLoss')\n plt.close()\n \n # Total model loss\n fig2, ax2 = plt.subplots()\n ax2.plot(plt_data[:, 0], 'go-', linewidth=3, label='Model loss')\n ax2.plot()\n ax2.set_xlim(0, self.epochs)\n ax2.set(xlabel='# epochs', ylabel='loss', title='Model loss')\n plt.savefig(self.save_path + '_Loss')\n plt.close()",
"def draw_loss(self):\n if not self.iters or not self.losses:\n return\n\n plt.plot(self.iters, self.losses, '-b')\n plt.xlabel(\"iterations\")\n plt.ylabel(\"loss\")\n\n #plt.legend(loc=\"upper left\")\n plt.title(\"Loss\")\n plt.savefig(\"Loss\" + \".png\")\n\n #plt.show()",
"def plot_loss(models,outdir):\r\n checkdir('gallery')\r\n\r\n for model in models:\r\n print(model)\r\n history=np.load('%s/%s.npy'%(outdir,model), allow_pickle=True).item()\r\n keys=history.keys()\r\n\r\n loss=history.get('loss')\r\n print(np.asarray(loss).shape)\r\n val_loss=history.get('val_loss')\r\n py.yscale(\"log\")\r\n py.plot(loss,label=r'$\\rm training$')\r\n py.plot(val_loss,label=r'$\\rm validation$')\r\n\r\n py.ylabel(r'$\\rm Loss$',size=20)\r\n py.text(0.2,0.8,r'$\\rm %s$'%model,size=20)\r\n py.legend(loc=1,fontsize=20,frameon=False)\r\n py.xlabel(r'$\\rm Epoch$',size=20)\r\n\r\n py.tight_layout()\r\n py.savefig('gallery/%s-loss.pdf'%model)\r\n py.close()",
"def save_loss(epoch, batch, how_many_batches, loss, acc, name):\n file = open('{}/loss_{}.txt'.format(args.folder_name, name), 'a')\n file.write(\n \"[Epoch %d/%d] [Batch %d/%d] [Loss: %f] [Accuracy: %f]\" % (epoch, args.n_epochs, batch, how_many_batches, loss, acc) + \"\\n\")\n file.close()",
"def plot_loss(directory, start):\n plt.figure()\n for path in glob.glob(directory+'val_loss_*'):\n with open(path, 'rb') as f:\n val_loss = pickle.load(f)\n plt.plot(val_loss[start:], color='orange')\n for path in glob.glob(directory+'train_loss_*'):\n with open(path, 'rb') as f:\n train_loss = pickle.load(f)\n plt.plot(train_loss[start:], color='blue') \n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Val', 'Train'], loc='upper left')",
"def save(self, fn):\n plt.imsave(fn, self.display)",
"def savefig(filename):\n cur_fig = plt.gcf()\n\n if 'SAVE_FIGURES' in os.environ:\n filename = os.path.join(TARGET_DIR, filename)\n cur_fig.savefig(filename)\n else:\n plt.show()",
"def save_loss_to_file(self, train_loss: any, val_loss: any):\n # Code for printing to a file\n if self.path:\n loss_file = open(self.loss_file_path, 'a')\n print(str(train_loss) + ',' + str(val_loss), file=loss_file)\n loss_file.close()",
"def PlotToFileName(self) -> str:",
"def PlotToFilePath(self) -> str:",
"def plot_loss(losses):\n plt.figure()\n plt.title(\"Training Loss\")\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"binary cross-entropy loss\")\n plt.plot(losses)\n plt.show()",
"def save_plots(self):\n plt.figure(self.ax_words)\n plt.title(\"Histogram of the number of words per sample\")\n plt.legend()\n save_path = os.path.join(self.histograms_path, self.name + \"_words.png\")\n plt.savefig(save_path)\n\n plt.figure(self.ax_characters)\n plt.title(\"Histogram of the number of characters per word\")\n plt.legend()\n save_path = os.path.join(self.histograms_path, self.name + \"_characters.png\")\n plt.savefig(save_path)",
"def save(name, g):\n if not os.path.exists(\"graphs//\"):\n os.mkdir(\"graphs//\")\n write_graphml(g, \"graphs//\" + name + \".graphml\")",
"def save_hue(data, filename):\n fig, ax = plt.subplots()\n x = np.linspace(0, 2*np.pi, 201)\n ax.plot(x, data)\n fig.savefig(filename)\n plt.close()",
"def plot_loss(loss_history):\n plt.title('Loss history')\n plt.xlabel('Iteration')\n plt.ylabel('Loss')\n plt.plot(loss_history)\n plt.show()",
"def learning_curve_per_train_steps(Loss_list):\n print(Loss_list)\n fig = plt.figure()\n plt.title('Learning Curve : Diatom Dataset')\n plt.plot(Loss_list)\n plt.yscale('log')\n plt.xlabel('training_steps')\n plt.ylabel('Loss : Cross Entropy')\n fig.savefig('Learning_curve_plot_diatom_per_training_steps.png')",
"def plot_graph(style_loss_list, content_loss_list):\n with open('/scratch/jv1589/Project/perceptual-losses/content_loss.txt', 'w') as f:\n for item in content_loss_list:\n f.write(\"%s\\n\" % item)\n\n with open('/scratch/jv1589/Project/perceptual-losses/style_loss.txt', 'w') as f2:\n for item in style_loss_list:\n f2.write(\"%s\\n\" % item)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a random subset from input data of shape (batch_size, num_cells_per_input, num_markers) | def generate_subset(inputs,
num_cells_per_input,
batch_size,
weights=None,
return_indices=False):
num_cells_total = inputs.shape[0]
if weights is not None:
indices = np.random.choice(
num_cells_total,
size=batch_size * num_cells_per_input,
replace=True,
p=weights)
else:
indices = np.random.choice(
num_cells_total,
size=batch_size * num_cells_per_input,
replace=True)
subset = inputs[indices, ]
subset = np.reshape(subset, newshape=(batch_size, num_cells_per_input, -1))
if return_indices:
return subset, indices
else:
return subset | [
"def get_data_subset(dataset, n_samples=1000):\n idcs = np.arange(len(dataset))\n n_samples = min(n_samples, len(dataset))\n np.random.shuffle(idcs) # shuffles inplace\n new_idcs = idcs[:n_samples]\n return torch.utils.data.Subset(dataset, new_idcs)",
"def random_subset(self, perc=0.5):",
"def sample(self, batch_size):\n sample_idxs = self.random.choice(np.arange(int(self.num_samples)), size=(int(batch_size),), replace=False)\n samples = self.dataset[torch.as_tensor(sample_idxs).long()]\n if self.intervention:\n masks = self.convert_masks(sample_idxs)\n regimes = self.regimes[torch.as_tensor(sample_idxs).long()]\n else:\n masks = torch.ones_like(samples)\n regimes = None\n return samples, masks, regimes",
"def get_batch(data, config, num_batch):\r\n\r\n batch_cubes = []\r\n for _ in range(num_batch):\r\n # starting indices\r\n x = random.randint(0, config['x']-config['batchX'])\r\n y = random.randint(0, config['y']-config['batchY'])\r\n z = random.randint(0, config['z']-config['batchZ'])\r\n\r\n curr_data = []\r\n for i in range(config['n_set']):\r\n x_idx = x + config['x']*i\r\n curr_data.append(data[x_idx:x_idx+config['batchX'],y:y+config['batchY'],z:z+config['batchZ']])\r\n\r\n batch_cubes.append(curr_data)\r\n\r\n return np.array(batch_cubes)",
"def sample_batch(self, batch_size=64):\n\n # Retrieve the root.\n root = self.sub_left if (len(self.parents) == 0) else self.parents[0]\n ss = np.random.uniform(0, root.value, batch_size)\n return retrieve_leaf_vec(root, ss)",
"def seed_generator(mask_coords, nr_seeds):\n nr_voxels = mask_coords.shape[0]\n random_indices = np.random.choice(nr_voxels, nr_seeds, replace=True)\n res = np.take(mask_coords, random_indices, axis=0)\n return res",
"def random_pick_subset(X, row_percent, col_percent):\n X = random_pick_rows(X, row_percent)\n\n T_X: sp.csr_matrix = X.transpose()\n T_X = random_pick_rows(T_X, col_percent)\n\n X = T_X.transpose()\n X = remove_zero_rows(X)\n return X",
"def get_random_batch(dataset, chunk_size, batch_size):\n # not very efficient algo but OK in this setup:\n # 1. get BATCH_SIZE random labels, and from each label a respective wav ID\n labels = [random.choice(dataset.keys()) for _ in xrange(batch_size)]\n max_per_class = {cl : len(wavlist)-1 for cl, wavlist in dataset.iteritems()}\n # if CHUNK_SIZE<wav_len, the exact chunk position is also randomized:\n wav_ids = [random.randint(0, max_per_class[l]) for l in labels]\n lengths = [dataset[labels[x]][wav_ids[x]].shape[0]\n for x in xrange(batch_size)]\n start_idxs = [random.randint(0, lengths[x]-chunk_size)\n for x in xrange(batch_size)]\n # now that we know class, id and start_idx for each chunk, collect tensor:\n data = np.stack([dataset[labels[x]][wav_ids[x]][start_idxs[x]:\n start_idxs[x]+chunk_size]\n for x in xrange(batch_size)])\n return data, labels",
"def random_batch_sampling(batch_size, epoch, *argv):\n lengths = [len(arg) for arg in argv]\n assert len(set(lengths))<=1\n size = lengths[0]\n num_batch = epoch * (size // batch_size)\n for _ in range(num_batch):\n rand_ids = np.random.randint(0, size, batch_size)\n yield tuple(arg[rand_ids] for arg in argv)",
"def random_sample_partial_neighborhood (self, x, num_samples): \n\n vnc = self.vertices_no_cut(x)\n samples = []\n for _ in range(num_samples):\n i = random.randint(0, self.n-1)\n j = random.randint(0, self.n-1)\n while x[i] == x[j] and not(any(i in l for l in vnc) or any(j in l for l in vnc)):\n j = random.randint(0, self.n-1)\n y = x.copy()\n y[i], y[j] = y[j], y[i]\n samples.append(y)\n\n return samples",
"def random_subset(indicator_arr, sample_prob):\n subset_arr = (np.random.random(indicator_arr.shape) < sample_prob) & indicator_arr\n return subset_arr",
"def select_random(x):\n def to_float(x):\n return tf.cast(x, tf.float32)\n def to_int(x):\n return tf.cast(x, tf.int64)\n batch_size = tf.shape(x)[0]\n rn = tf.range(batch_size)\n nnz = to_float(tf.count_nonzero(x >= 0, axis=1))\n rnd = tf.random_uniform([batch_size])\n ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1)\n return to_int(tf.gather_nd(x, ids))",
"def select_random_chunk(dataset: tf.data.Dataset,\n output_features: Mapping[str, seqio.Feature],\n max_length: Optional[int] = None,\n feature_key: str = 'targets',\n additional_feature_keys: Optional[Sequence[str]] = None,\n passthrough_feature_keys: Optional[\n Sequence[str]] = None,\n sequence_length: Optional[Mapping[str, int]] = None,\n uniform_random_start: bool = False,\n min_length: Optional[int] = None,\n **unused_kwargs) -> tf.data.Dataset:\n\n @seqio.map_over_dataset(num_seeds=1)\n def _my_fn(x, seed):\n return single_example_select_random_chunk(\n x,\n seed,\n output_features=output_features,\n max_length=max_length,\n feature_key=feature_key,\n additional_feature_keys=additional_feature_keys,\n passthrough_feature_keys=passthrough_feature_keys,\n sequence_length=sequence_length,\n uniform_random_start=uniform_random_start,\n min_length=min_length)\n\n # Filter empty examples.\n dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))\n return _my_fn(dataset)",
"def random_sample(data, N):\n rng = np.random.default_rng()\n def func(arr):\n return rng.choice(arr, N, replace=False)\n\n result = xr.apply_ufunc(\n func,\n data.chunk(dict(n=-1)),\n input_core_dims=[['n', 'lev']],\n output_core_dims=[['M', 'lev']],\n dask='parallelized',\n output_dtypes=('float64',),\n vectorize=True,\n dask_gufunc_kwargs={\n 'output_sizes' : {'M' : N}\n }\n \n )\n \n result = result.stack(N=('M', 'time'))\n result = result.transpose('N', 'lev')\n return result",
"def get_sample(data, labels, cell_type):\n\n # Subset the cell types\n cells = data[labels == cell_type]\n cells_n = cells.shape[0]\n \n # Get a random sample\n random_int = np.random.choice(cells_n, size=1, replace=False)\n cell = cells[random_int, :]\n\n # Return sample\n return cell",
"def randomSubset(self, subsetSize):\n\n if subsetSize <= self.numberOfClusters:\n raise TypeError(\"subsetSize must be strictly greater than the numberOfClusters\")\n\n indexes = random.sample(range(self.dataset.shape[0]), subsetSize)\n dataset = self.dataset[indexes,:]\n if self.weights is None:\n weights = None\n else:\n weights = self.weights[indexes]\n\n return dataset, weights",
"def randomGrid(N):\n return numpy.random.choice(vals, N*N, p=[0.2, 0.8]).reshape(N, N)",
"def sample_random_minibatch(\n self, k: int\n ) -> List[Tuple[np.ndarray, np.ndarray, int, float]]:\n k = min(k, len(self.memory))\n return random.sample(self.memory, k)",
"def slice_inputs(inputs, answer_len, pos_mask, seed=None):\n ans_start_pos = tf.cast(tf.where(pos_mask)[0][0], tf.int32)\n inputs_len = tf.shape(inputs)[0]\n start_range_min = tf.maximum(\n 0, ans_start_pos - (max_input_tokens - answer_len))\n start_range_max = tf.minimum(ans_start_pos,\n inputs_len - max_input_tokens) + 1\n\n start_pos = tf.random.stateless_uniform(\n [],\n minval=start_range_min,\n maxval=start_range_max,\n dtype=tf.int32,\n seed=seed)\n return inputs[start_pos:start_pos + max_input_tokens]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
extract face array from VGGFace model | def extract_face_feature(self, input_face, save = False):
model = self.config['model']
layer = self.config['layer']
# build model to specify the layer of feature extraction
vggface = VGGFace(model = model, input_shape=(224, 224, 3))
vggface = Model(vggface.input, vggface.get_layer(layer).output)
# extract face feature
face_array = {}
# for single image
if os.path.isfile(input_face):
img = image.load_img(input_face, target_size=(224, 224))
res = vggface.predict(process_image(img, model))[0,:].reshape(-1)
face_array[input_face.split('/')[-1]] = res
# for image directory
if os.path.isdir(input_face):
for i in tqdm(os.listdir(input_face)):
img = image.load_img('%s/%s'%(input_face,i), target_size=(224, 224))
res = vggface.predict(process_image(img, model))[0,:].reshape(-1)
face_array[i] = res
if save:
save_object(face_array, self.config['face_array_path'])
return face_array | [
"def faces(self):\n return self.face.values()",
"def faces(self):\n return [self.face(i) for i in range(self.dimension() + 1)]",
"def vgg19_feature_extraction(dataset_path):\n base_model = VGG19(weights='imagenet')\n model = Model(inputs=base_model.input, outputs=base_model.get_layer('block5_pool').output)\n\n # Get features of all images using VGG19\n X = []\n Y = []\n model_cnt = 0\n model_index = {}\n img_list = os.listdir(dataset_path)\n img_list.sort()\n temp_cnt = 0\n for img_file in img_list:\n if temp_cnt % 100 == 0:\n print(\"VGG19 \", round(temp_cnt/len(img_list)*100,3), \"% complete\", end='\\r')\n temp_cnt = temp_cnt + 1\n img_path = dataset_path + '/' + img_file\n img = image.load_img(img_path, target_size=(224,224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n block5_pool_features = model.predict(x).flatten()\n\n X.append(block5_pool_features) \n if 'aug' in dataset_path:\n model_id = img_file.split('_')[1]\n else:\n model_id = img_file.split('_')[0]\n\n if model_id in model_index:\n Y.append(model_index[model_id])\n else:\n model_index[model_id] = model_cnt\n Y.append(model_cnt)\n model_cnt = model_cnt + 1 \n\n X = np.asarray(X)\n Y_lab = np.asarray(Y)\n Y_cat = to_categorical(Y_lab)\n\n return X, Y_lab, Y_cat",
"def extract_wav2vec_feats(audio_path, model):\n x = load_audio(audio_path, device=get_device(model))\n with torch.no_grad():\n z = model.feature_extractor(x)\n c = model.feature_aggregator(z)\n return c.cpu().numpy()",
"def get_faces(self):\n\n return self.faces",
"def as_array(self):\n def _reader(frame):\n # Each frame is assumed to be an image here. We make it a single frame\n # video here by expanding its dimensions. This way it can be used with\n # the vstack_features function.\n return frame[1][None, ...]\n return bob.bio.base.vstack_features(_reader, self._frames, same_size=True)",
"def load_face(path):\n if not path.endswith(FaceStore.EXTENSION):\n raise ValueError(\"Wrong file extension for \" + str(path))\n name = os.path.splitext(os.path.basename(path))[0]\n representation = np.fromfile(path, np.float32).reshape(-1, OpenFace.FEATURE_DIM)\n return name, representation",
"def face_detect(face_detector, img):\n test_img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n grayed_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)\n face_coordinates = face_detector.detectMultiScale(grayed_img, 1.1, 5)\n return grayed_img, face_coordinates",
"def extract_faces(img, boxes):\r\n num_faces = boxes.shape[0]\r\n faces = []\r\n \r\n for i in range(num_faces):\r\n ymin, xmin, ymax, xmax = boxes[i].astype('int')\r\n face = img[ymin:ymax, xmin:xmax]\r\n faces.append(face)\r\n \r\n return faces",
"def faces_extraction(dataset_name, img_size=(96, 96)):\n # Dataset path\n path = os.path.join(base_dir, dataset_name)\n dataset_directory = os.path.join(path, 'img')\n # Create the name of the data_directory to return\n data_directory = '{}_faces'.format(path)\n # Create directory for extracted faces images\n faces_directory = os.path.join(data_directory, 'img')\n Path(faces_directory).mkdir(parents=True, exist_ok=True)\n # copy the labels.csv file into the new folder\n copy2(os.path.join(path, labels_filename), data_directory)\n # List of all the images available\n files = sorted(os.listdir(dataset_directory), key=lambda x: int(x.split(\".\")[0]))\n # Extract face for each image in the directory\n counter = 0\n files_not_detected = []\n for file in files:\n image_path = os.path.join(dataset_directory, file)\n # Load the jpg file into a numpy array\n image = cv2.imread(image_path)\n # convert to gray\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # Find faces with a model based on HOG algorithm\n face_places = face_locations(gray, number_of_times_to_upsample=0, model=\"hog\")\n if len(face_places) == 0:\n # Find faces with a pre-trained CNN. It is more accurate than the default HOG method but it is slower.\n # With GPU and dlib compiled with CUDA extensions it will perform faster\n face_places = face_locations(gray, number_of_times_to_upsample=0, model=\"cnn\")\n if len(face_places) == 0:\n # If no faces are detected save the name of the file in a dedicated list\n counter += 1\n print(\"In {0}, no faces found!! --------------- counter: {1}\".format(file, counter))\n files_not_detected.append(file)\n else:\n # instead of ...for face_place in face_places\n # For each image only one detected face will be considered\n # Print the location of the face\n bottom, right, top, left = face_places[0]\n # Select the region of interest in the original rgb image\n face_image = image[bottom:top, left:right]\n # Resize the region of interest and save in the created directory\n resized = cv2.resize(face_image, img_size)\n cv2.imwrite(os.path.join(faces_directory, file), resized)\n return data_directory.split(os.sep)[-1], files_not_detected",
"def _detect_face(self, frame):\n face_coords = list()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n rects = self.detector(gray, 0)\n print(rects)\n # get bounding box for every face in the frame\n for i, d in enumerate(rects):\n x1 = d.left()-consts.PADDING\n y1 = d.top()-consts.PADDING\n x2 = d.right()+consts.PADDING\n y2 = d.bottom()+consts.PADDING\n face_coords.append((x1, y1, x2, y2))\n return face_coords",
"def crop_face(img):\r\n try:\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n face_cascade = cv2.CascadeClassifier('xml/haarcascade_frontalface_alt2.xml') \r\n faces = face_cascade.detectMultiScale(gray, 1.05, 5)\r\n face = np.array(0)\r\n # if face found\r\n if len(faces) > 0:\r\n (x, y, w, h) = faces[0]\r\n \r\n # extend the size of the face detected\r\n ext = int(abs(h-y) * 0.5)\r\n \r\n # test if extension fits on image, if not ext maximum amount\r\n if (y+h+ext) > img.shape[0]:\r\n ext = img.shape[0] - h\r\n face = img[y:y + h + ext, x:x + w]\r\n \r\n # if problem with extracting face, print error and raise FaceNotFound\r\n except Exception as e:\r\n print(\"Error1: \", e)\r\n raise FaceNotFound\r\n \r\n return face",
"def read(self):\n ret, frame = self.capture.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = self.face_detector.detectMultiScale(gray, 1.1, 5)\n return frame, faces",
"def locate_faces(input_image):\n face_cascade = cv2.CascadeClassifier(CASCADE_FILE_PATH)\n gray = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)\n # detect the faces\n faces = face_cascade.detectMultiScale(gray, 1.2, 5)\n print(faces)\n return faces",
"def load_faces(dirname, face, variable, period, chunk):\n ds = xarray.open_zarr(dirname)\n ds = ds.transpose(\"face\", \"j\", \"i\", \"time\")\n return ds.isel(face=face, j=chunk, i=chunk, time=period)[variable].data",
"def classify_face(im):\r\n #get_encoded_faces()\r\n faces = shelve.open('trainingData.yml')\r\n #faces = faces1.read()\r\n #print(faces)\r\n faces_encoded = list(faces.values())\r\n known_face_names = list(faces.keys())\r\n\r\n img = cv2.imread(im, 1)\r\n #img = cv2.resize(img, (0, 0), fx=0.5, fy=0.5)\r\n #img = img[:,:,::-1]\r\n \r\n face_locations = face_recognition.face_locations(img)\r\n unknown_face_encodings = face_recognition.face_encodings(img, face_locations)\r\n\r\n face_names = []\r\n for face_encoding in unknown_face_encodings:\r\n # See if the face is a match for the known face(s)\r\n matches = face_recognition.compare_faces(faces_encoded, face_encoding)\r\n name = \"Unknown\"\r\n #print(\"face_names\",face_names)\r\n #print(\"faces_encoded\",faces_encoded)\r\n #print(\"known_fac_names:\",known_face_names)\r\n\r\n # use the known face with the smallest distance to the new face\r\n face_distances = face_recognition.face_distance(faces_encoded, face_encoding)\r\n best_match_index = np.argmin(face_distances)\r\n if matches[best_match_index]:\r\n name = known_face_names[best_match_index]\r\n\r\n face_names.append(name)\r\n\r\n for (top, right, bottom, left), name in zip(face_locations, face_names):\r\n # Draw a box around the face\r\n cv2.rectangle(img, (left-20, top-20), (right+20, bottom+20), (255, 0, 0), 2)\r\n\r\n # Draw a label with a name below the face\r\n cv2.rectangle(img, (left-20, bottom -15), (right+20, bottom+20), (255, 0, 0), cv2.FILLED)\r\n font = cv2.FONT_HERSHEY_COMPLEX_SMALL\r\n cv2.putText(img, name, (left -20, bottom + 15), font, 1.0, (255, 255, 255), 2)\r\n\r\n\r\n # Display the resulting image\r\n while True:\r\n\r\n cv2.imshow('Video', img)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n return face_names",
"def faces(self):\n upper = [self.face(i,True) for i in range(self.dimension())]\n lower = [self.face(i,False) for i in range(self.dimension())]\n return upper + lower",
"def getFaces(faceType, nodeTags):",
"def faces(self):\n #Get cell connettivity ids as a 1D array. The vtk format is:\n # [nids1, id0 ... idn, niids2, id0 ... idm, etc].\n arr1d = vtk_to_numpy(self._polydata.GetPolys().GetData())\n if len(arr1d) == 0:\n arr1d = vtk_to_numpy(self._polydata.GetStrips().GetData())\n\n #conn = arr1d.reshape(ncells, int(len(arr1d)/len(arr1d)))\n #return conn[:, 1:]\n # instead of:\n\n i = 0\n conn = []\n n = len(arr1d)\n for idummy in range(n):\n # cell = []\n # for k in range(arr1d[i]):\n # cell.append(arr1d[i+k+1])\n cell = [arr1d[i+k+1] for k in range(arr1d[i])]\n conn.append(cell)\n i += arr1d[i]+1\n if i >= n:\n break\n return conn # cannot always make a numpy array of it!"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for Airflow Database Retention feature. | def _ConstructAirflowDatabaseRetentionDaysPatch(airflow_database_retention_days,
release_track):
messages = api_util.GetMessagesModule(release_track=release_track)
config = messages.EnvironmentConfig()
config.dataRetentionConfig = messages.DataRetentionConfig(
airflowDatabaseRetentionDays=airflow_database_retention_days)
return ('config.data_retention_configuration.airflow_database_retention_days',
messages.Environment(config=config)) | [
"def create():\n return _DynamicEnvironment()",
"def _ConstructEnvVariablesPatch(env_ref,\n clear_env_variables,\n remove_env_variables,\n update_env_variables,\n release_track=base.ReleaseTrack.GA):\n env_obj = environments_api_util.Get(env_ref, release_track=release_track)\n initial_env_var_value = env_obj.config.softwareConfig.envVariables\n initial_env_var_list = (\n initial_env_var_value.additionalProperties\n if initial_env_var_value else [])\n\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n env_variables_cls = messages.SoftwareConfig.EnvVariablesValue\n entry_cls = env_variables_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n envVariables=env_variables_cls(additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return ('config.software_config.env_variables',\n command_util.BuildFullMapUpdate(clear_env_variables,\n remove_env_variables,\n update_env_variables,\n initial_env_var_list, entry_cls,\n _BuildEnv))",
"def test_runtime_envs_update(self):\n pass",
"def setup_environment():",
"def _ConstructPrivateEnvironmentPatch(\n enable_private_environment,\n release_track=base.ReleaseTrack.GA,\n):\n messages = api_util.GetMessagesModule(release_track=release_track)\n private_environment_config = messages.PrivateEnvironmentConfig()\n config = messages.EnvironmentConfig(\n privateEnvironmentConfig=private_environment_config\n )\n update_mask = 'config.private_environment_config.enable_private_environment'\n private_environment_config.enablePrivateEnvironment = bool(\n enable_private_environment\n )\n\n return (\n update_mask,\n messages.Environment(config=config),\n )",
"def build_env():\r\n\r\n retro_env = retro.make(game='SpaceInvaders-Atari2600')\r\n\r\n # Build an one hot encoding of the actions\r\n actions = np.array(np.identity(\r\n retro_env.action_space.n, dtype=int).tolist())\r\n\r\n return retro_env, actions",
"def prep_env(env, data_type=np.float32, if_print=True): # preprocess environment\n if not all([hasattr(env, attr) for attr in (\n 'env_name', 'state_dim', 'action_dim', 'target_reward', 'if_discrete')]):\n (env_name, state_dim, action_dim, action_max, if_discrete, target_reward) = get_gym_env_info(env, if_print)\n setattr(env, 'env_name', env_name)\n setattr(env, 'state_dim', state_dim)\n setattr(env, 'action_dim', action_dim)\n setattr(env, 'if_discrete', if_discrete)\n setattr(env, 'target_reward', target_reward)\n else:\n action_max = 1\n\n if action_max != 1:\n def decorator_step(env_step):\n def new_env_step(action):\n state, reward, done, info = env_step(action * action_max)\n return state.astype(data_type), reward, done, info\n\n return new_env_step\n else:\n def decorator_step(env_step):\n def new_env_step(action):\n state, reward, done, info = env_step(action)\n return state.astype(data_type), reward, done, info\n\n return new_env_step\n env.step = decorator_step(env.step)\n\n def decorator_reset(env_reset):\n def new_env_reset():\n state = env_reset()\n return state.astype(data_type)\n\n return new_env_reset\n\n env.reset = decorator_reset(env.reset)\n return env",
"def setup_environment():\n global GIVEN_ENV\n GIVEN_ENV['env'] = env.copy()",
"def __allocate_environment__(cls, options, test_driver):",
"def create_environment(env_name):\n env = gym.make(env_name)\n\n # Fix for certain OpenAI Gym environments,\n # requiring to be reset prior to initial rendering\n if env_name in GYM_ENVS['classical_control']:\n env.reset()\n\n return env",
"def environment_created(self):\n return False\n with self.env.db_transaction as db:\n self.upgrade_environment()",
"def environment_created(self):\n if self.environment_needs_upgrade():\n self.upgrade_environment()",
"def app_env():\n if not self._app_env:\n # TODO: we need to pass this parameter to api, unfortunately\n # in current api framework it is not trivial.\n approot = os.environ['TREADMILL_APPROOT']\n _LOGGER.info('Using approot: %s', approot)\n self._app_env = appmgr.AppEnvironment(approot)\n\n return self._app_env",
"def prepare_environment(self) -> None:\n pass",
"def patch_jinja2_env():\n from pyinfra.api import util\n\n util.Environment = Jinja2Environment",
"def make_energyplus_env(env_id, seed):\n env = gym.make(env_id)\n env = Monitor(env, logger.get_dir())\n env.seed(seed)\n return env",
"def _build_rodent_escape_env():\n walker = walkers.Rat(\n observable_options={'egocentric_camera': dict(enabled=True)}, )\n arena = arenas.bowl.Bowl(size=(20., 20.), aesthetic='outdoor_natural')\n locomotion_task = tasks.escape.Escape(walker=walker,\n arena=arena,\n physics_timestep=0.001,\n control_timestep=.02)\n raw_env = composer.Environment(time_limit=20,\n task=locomotion_task,\n strip_singleton_obs_buffer_dim=True)\n\n return raw_env",
"def fixture_env_object(env_manager):\n env = Environment(\n env_id=COMMIT_HASH,\n created=multiprocessing.Event(),\n creating=multiprocessing.Event(),\n location=os.path.join(env_manager.base_dir, COMMIT_HASH),\n site_packages=os.path.join(env_manager.base_dir, COMMIT_HASH, VENV_SITE_PKGS),\n )\n return env",
"def GetEnvironmentalBG2(self):\n ..."
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Patches an Environment, optionally waiting for the operation to complete. This function is intended to perform the common work of an Environment patching command's Run method. That is, calling the patch API method and waiting for the result or immediately returning the Operation. | def Patch(env_resource,
field_mask,
patch,
is_async,
release_track=base.ReleaseTrack.GA):
operation = environments_api_util.Patch(
env_resource, patch, field_mask, release_track=release_track)
details = 'with operation [{0}]'.format(operation.name)
if is_async:
log.UpdatedResource(
env_resource.RelativeName(),
kind='environment',
is_async=True,
details=details)
return operation
try:
operations_api_util.WaitForOperation(
operation,
'Waiting for [{}] to be updated with [{}]'.format(
env_resource.RelativeName(), operation.name),
release_track=release_track)
except command_util.Error as e:
raise command_util.Error('Error updating [{}]: {}'.format(
env_resource.RelativeName(), six.text_type(e))) | [
"def __try_set_patch_mode(self):\n try:\n self.status_handler.set_current_operation(Constants.CONFIGURE_PATCHING)\n self.current_auto_os_patch_state = self.package_manager.get_current_auto_os_patch_state()\n\n # disable auto OS updates if VM is configured for platform updates only.\n # NOTE: this condition will be false for Assessment operations, since patchMode is not sent in the API request\n if self.current_auto_os_patch_state != Constants.AutomaticOSPatchStates.DISABLED and self.execution_config.patch_mode == Constants.PatchModes.AUTOMATIC_BY_PLATFORM:\n self.package_manager.disable_auto_os_update()\n\n self.current_auto_os_patch_state = self.package_manager.get_current_auto_os_patch_state()\n\n if self.execution_config.patch_mode == Constants.PatchModes.AUTOMATIC_BY_PLATFORM and self.current_auto_os_patch_state == Constants.AutomaticOSPatchStates.UNKNOWN:\n # NOTE: only sending details in error objects for customer visibility on why patch state is unknown, overall configurepatching status will remain successful\n self.configure_patching_exception_error = \"Could not disable one or more automatic OS update services. Please check if they are configured correctly\"\n\n self.composite_logger.log_debug(\"Completed processing patch mode configuration.\")\n except Exception as error:\n self.composite_logger.log_error(\"Error while processing patch mode configuration. [Error={0}]\".format(repr(error)))\n self.configure_patching_exception_error = error\n self.configure_patching_successful &= False",
"def apply_patch(self, patch=None):\n if not any([patch, self.patch]):\n return\n if not patch:\n patch = self.patch\n self.cmd(f\"patch -p1 < {self.project.patch}/{self.ver}/{patch}\")",
"def apply_patch(self, patch=None):\n if not any([patch, self.patch]):\n return\n if not patch:\n patch = self.patch\n self.cmd(f'patch -p1 < {self.project.patch}/{self.ver}/{patch}')",
"def patch_machine(self, name_or_id, patch):\n return self.baremetal.patch_node(name_or_id, patch)",
"def CmdPkgPatch(package, options):\n package.Patch()",
"def _patch_env(**environs: str):\n # Adapted loosely from https://stackoverflow.com/a/34333710\n # Capture the original environ values\n original_environs = {k: os.environ.get(k) for k in environs}\n\n # Patch the environment\n for k, v in environs.items():\n os.environ[k] = v\n try:\n # Run the context manager\n yield\n finally:\n # Restore the original environ values\n for k, v in original_environs.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v",
"def do_environment_model_edit(mc, args):\n jp_obj = None\n if not args.filename:\n jp_obj = json.load(sys.stdin)\n else:\n with open(args.filename) as fpatch:\n jp_obj = json.load(fpatch)\n\n if not isinstance(jp_obj, list):\n raise exceptions.CommandError('JSON-patch must be a list of changes')\n for change in jp_obj:\n if 'op' not in change or 'path' not in change:\n raise exceptions.CommandError('Every change in JSON-patch must '\n 'contain \"op\" and \"path\" keys')\n op = change['op']\n if op not in ['add', 'replace', 'remove']:\n raise exceptions.CommandError('The value of \"op\" item must be '\n '\"add\", \"replace\" or \"remove\", '\n 'got {0}'.format(op))\n if op != 'remove' and 'value' not in change:\n raise exceptions.CommandError('\"add\" or \"replace\" change in '\n 'JSON-patch must contain \"value\" '\n 'key')\n session_id = args.session_id\n new_model = mc.environments.update_model(args.id, jp_obj, session_id)\n print(utils.json_formatter(new_model))",
"def patch_machine(self, name_or_id, patch):\n\n try:\n return meta.obj_to_dict(\n self.manager.submitTask(\n _tasks.MachinePatch(node_id=name_or_id,\n patch=patch,\n http_method='PATCH')))\n except Exception as e:\n self.log.debug(\n \"Machine patch update failed\", exc_info=True)\n raise OpenStackCloudException(\n \"Error updating machine via patch operation. node: %s. \"\n \"%s\" % (name_or_id, str(e)))",
"def do_environment_apps_edit(mc, args):\n\n jp_obj = None\n if not args.filename:\n jp_obj = json.load(sys.stdin)\n else:\n with open(args.filename) as fpatch:\n jp_obj = json.load(fpatch)\n\n jpatch = jsonpatch.JsonPatch(jp_obj)\n\n environment_id = args.id\n session_id = args.session_id\n environment = mc.environments.get(environment_id, session_id)\n\n object_model = jpatch.apply(environment.services)\n utils.traverse_and_replace(object_model)\n\n mc.services.put(\n environment_id,\n path='/',\n data=jpatch.apply(environment.services),\n session_id=session_id)",
"def test_patch_run(self):\n pass",
"def patch_status(self) -> Optional['outputs.VirtualMachinePatchStatusResponse']:\n return pulumi.get(self, \"patch_status\")",
"def patch():\n if getattr(httplib, PATCH_FLAG, False):\n return\n # we set an attribute to avoid multiple wrapping\n setattr(httplib, PATCH_FLAG, True)\n\n wrapt.wrap_function_wrapper(\n httplib_client_module,\n 'HTTPConnection._send_request',\n _send_request\n )\n\n wrapt.wrap_function_wrapper(\n httplib_client_module,\n 'HTTPConnection.getresponse',\n _xray_traced_http_getresponse\n )\n\n wrapt.wrap_function_wrapper(\n httplib_client_module,\n 'HTTPResponse.read',\n _xray_traced_http_client_read\n )",
"def patch_all(**patch_modules):\n modules = PATCH_MODULES.copy()\n modules.update(patch_modules)\n\n patch(raise_errors=False, **modules)",
"def patch(self, endpoint=None, data=None, json=None, callback=None, callback_kwargs=None):\n return self._call(\"PATCH\",\n endpoint=endpoint,\n data=data,\n json=json,\n callback=callback,\n callback_kwargs=callback_kwargs)",
"def patch_env(self, **kwargs):\n new_shell = self._shell.clone()\n for key, value in kwargs.items():\n new_shell.setenv(key, value)\n return attr.evolve(self, shell=new_shell)",
"def update_compute_environment(self, computeEnvironment: str, state: str = None, computeResources: Dict = None, serviceRole: str = None) -> Dict:\n pass",
"def CmdPkgUpdatePatch(package, options):\n package.UpdatePatch()",
"def test_runWithPatchesRestores(self):\n self.monkeyPatcher.addPatch(self.testObject, 'foo', 'haha')\n self.assertEqual(self.originalObject.foo, self.testObject.foo)\n self.monkeyPatcher.runWithPatches(lambda: None)\n self.assertEqual(self.originalObject.foo, self.testObject.foo)",
"def modify_pe(self, pycl_object=None, name=None, data=None,\n metadata=None, json_string=None):\n return self.parallel_environment_manager.modify_object(\n pycl_object=pycl_object, name=name, data=data,\n metadata=metadata, json_string=json_string)",
"def method_patch(self, uri, **kwargs):\r\n return self._api_request(uri, \"PATCH\", **kwargs)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for private environment. | def _ConstructPrivateEnvironmentPatch(
enable_private_environment,
release_track=base.ReleaseTrack.GA,
):
messages = api_util.GetMessagesModule(release_track=release_track)
private_environment_config = messages.PrivateEnvironmentConfig()
config = messages.EnvironmentConfig(
privateEnvironmentConfig=private_environment_config
)
update_mask = 'config.private_environment_config.enable_private_environment'
private_environment_config.enablePrivateEnvironment = bool(
enable_private_environment
)
return (
update_mask,
messages.Environment(config=config),
) | [
"def patch_jinja2_env():\n from pyinfra.api import util\n\n util.Environment = Jinja2Environment",
"def patch_env(self, **kwargs):\n new_shell = self._shell.clone()\n for key, value in kwargs.items():\n new_shell.setenv(key, value)\n return attr.evolve(self, shell=new_shell)",
"def setup_environment():\n global GIVEN_ENV\n GIVEN_ENV['env'] = env.copy()",
"def create():\n return _DynamicEnvironment()",
"def patch_using_env(self):\n if self.cred_properties:\n credentials_config = self.cred_properties\n\n user = getenv(\"HERE_USER_ID\") or credentials_config[\"user\"]\n client = getenv(\"HERE_CLIENT_ID\") or credentials_config[\"client\"]\n key = (\n getenv(\"HERE_ACCESS_KEY_ID\")\n or getenv(\"HERE_ACCESS_KEY\")\n or credentials_config[\"key\"]\n )\n secret = (\n getenv(\"HERE_ACCESS_KEY_SECRET\")\n or getenv(\"HERE_ACCESS_SECRET\")\n or credentials_config[\"secret\"]\n )\n endpoint = (\n getenv(\"HERE_TOKEN_ENDPOINT_URL\")\n or getenv(\"HERE_TOKEN_ENDPOINT\")\n or credentials_config[\"endpoint\"]\n )\n credentials_config[\"user\"] = user\n credentials_config[\"client\"] = client\n credentials_config[\"key\"] = key\n credentials_config[\"secret\"] = secret\n credentials_config[\"endpoint\"] = endpoint",
"def _ConstructEnvVariablesPatch(env_ref,\n clear_env_variables,\n remove_env_variables,\n update_env_variables,\n release_track=base.ReleaseTrack.GA):\n env_obj = environments_api_util.Get(env_ref, release_track=release_track)\n initial_env_var_value = env_obj.config.softwareConfig.envVariables\n initial_env_var_list = (\n initial_env_var_value.additionalProperties\n if initial_env_var_value else [])\n\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n env_variables_cls = messages.SoftwareConfig.EnvVariablesValue\n entry_cls = env_variables_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n envVariables=env_variables_cls(additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return ('config.software_config.env_variables',\n command_util.BuildFullMapUpdate(clear_env_variables,\n remove_env_variables,\n update_env_variables,\n initial_env_var_list, entry_cls,\n _BuildEnv))",
"def _monkeypatch_environment(monkeypatch):\n with tempfile.TemporaryDirectory() as d:\n monkeypatch.setitem(os.environ, 'HOME', d)\n yield d",
"def setup_environment():",
"def fixture_fake_env(monkeypatch):\n environ = {}\n monkeypatch.setattr(os, \"environ\", environ)\n return environ",
"def create_environment(env_name):\n env = gym.make(env_name)\n\n # Fix for certain OpenAI Gym environments,\n # requiring to be reset prior to initial rendering\n if env_name in GYM_ENVS['classical_control']:\n env.reset()\n\n return env",
"def __free_environment__(cls, environment):",
"def _prepare_environment(self):\n env = {'HOME': self._make_mapping(HOME)}\n\n return env",
"def _patch_env(**environs: str):\n # Adapted loosely from https://stackoverflow.com/a/34333710\n # Capture the original environ values\n original_environs = {k: os.environ.get(k) for k in environs}\n\n # Patch the environment\n for k, v in environs.items():\n os.environ[k] = v\n try:\n # Run the context manager\n yield\n finally:\n # Restore the original environ values\n for k, v in original_environs.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v",
"def expanded_env_dict():\n return generate_expanded_env_dict()",
"def test__EnvGetter__new():\n env = EnvGetter()\n vampytest.assert_instance(env, EnvGetter)\n vampytest.assert_instance(env._captured, list, nullable = True)\n vampytest.assert_instance(env._entered, int)",
"def fixture_env_object(env_manager):\n env = Environment(\n env_id=COMMIT_HASH,\n created=multiprocessing.Event(),\n creating=multiprocessing.Event(),\n location=os.path.join(env_manager.base_dir, COMMIT_HASH),\n site_packages=os.path.join(env_manager.base_dir, COMMIT_HASH, VENV_SITE_PKGS),\n )\n return env",
"def create_proxied_env_from_spec(env_spec: study_pb2.EnvironmentSpec,\n create_env_fn: CreateEnvFn,\n mp_context=None) -> environment.Environment:\n if mp_context:\n parent_conn, child_conn = mp_context.Pipe()\n p = mp_context.Process(\n target=_proxy_handler, args=(child_conn, env_spec, create_env_fn))\n else:\n parent_conn, child_conn = multiprocessing.Pipe()\n p = multiprocessing.Process(\n target=_proxy_handler, args=(child_conn, env_spec, create_env_fn))\n p.start()\n return EnvironmentProxy(parent_conn, p)",
"def create_dev_environment(self, svn=False, git=True):\n\n package_list = []\n if svn:\n package_list.append('svn')\n if git:\n package_list.append('git')\n package_list.append('git-lfs')\n\n prefix = os.path.join(self.root_dir, 'dev_env')\n command = 'create'\n text_messages = ['Installing', 'installation into']\n if prefix in self.environments:\n command = 'update'\n text_messages = ['Updating', 'update of']\n\n command_list = [self.conda_exe, command, '-y', '-c', 'conda-forge',\n '--prefix', prefix] + package_list\n\n print('-'*79, file=self.log)\n print('{text} extra development environment containing:'.format(text=text_messages[0]),\n file=self.log)\n for package in package_list:\n print(' -', package, file=self.log)\n\n self._retry_command(command_list, text_messages[1], prefix, verbose=True)\n\n print('-'*79, file=self.log)\n return prefix",
"def _env():\n home = _os.environ['HOME']\n root_dir = _os.path.realpath(\n _os.path.join(_os.environ['CLOUDSDK_CONFIG'], '../..'))\n inet_family = 'IPV4_ONLY'\n dev = '/dev/fuse'\n path = '/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin:.'\n if len(root_dir) > 1 and not root_dir.startswith('/usr/local/google/'):\n home = _os.path.join(root_dir, home)\n inet_family = 'IPV6_ONLY'\n fum = _os.environ['HOME'].split('mount')[0] + '/mount/alloc/fusermount'\n dev = fum + '/dev/fuse'\n path = path + ':' + fum + '/bin'\n config_dir = _os.path.join(home, '.config', 'Google')\n return _Environment(\n home=home,\n root_dir=root_dir,\n inet_family=inet_family,\n dev=dev,\n path=path,\n config_dir=config_dir)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for vpc connectivity. Used only in Composer 3. | def _ConstructVpcConnectivityPatch(
disable_vpc_connectivity,
network,
subnetwork,
network_attachment,
release_track=base.ReleaseTrack.GA,
):
messages = api_util.GetMessagesModule(release_track=release_track)
node_config = messages.NodeConfig()
config = messages.EnvironmentConfig(nodeConfig=node_config)
update_mask = None
if disable_vpc_connectivity:
update_mask = 'config.node_config.network,config.node_config.subnetwork'
elif network_attachment:
update_mask = 'config.node_config.network_attachment'
node_config.composerNetworkAttachment = network_attachment
elif network and subnetwork:
update_mask = 'config.node_config.network,config.node_config.subnetwork'
node_config.network = network
node_config.subnetwork = subnetwork
return (
update_mask,
messages.Environment(config=config),
) | [
"def _ConstructPrivateEnvironmentPatch(\n enable_private_environment,\n release_track=base.ReleaseTrack.GA,\n):\n messages = api_util.GetMessagesModule(release_track=release_track)\n private_environment_config = messages.PrivateEnvironmentConfig()\n config = messages.EnvironmentConfig(\n privateEnvironmentConfig=private_environment_config\n )\n update_mask = 'config.private_environment_config.enable_private_environment'\n private_environment_config.enablePrivateEnvironment = bool(\n enable_private_environment\n )\n\n return (\n update_mask,\n messages.Environment(config=config),\n )",
"def create_eb_environment():\n creation_response = client.create_environment(\n ApplicationName=app_name,\n EnvironmentName=environment_name,\n Description=\"Manheim test deployment\",\n CNAMEPrefix=environment_name,\n Tier={\n 'Name': 'WebServer',\n 'Type': 'Standard'\n },\n SolutionStackName=solution_stack,\n OptionSettings=[\n {\n 'Namespace': 'aws:autoscaling:asg',\n 'OptionName': 'Custom Availability Zones',\n 'ResourceName': 'AWSEBAutoScalingGroup',\n 'Value': 'us-east-1a'\n },\n {\n 'Namespace': 'aws:autoscaling:asg',\n 'OptionName': 'MaxSize',\n 'ResourceName': 'AWSEBAutoScalingGroup',\n 'Value': '3'\n },\n {\n 'Namespace': 'aws:autoscaling:asg',\n 'OptionName': 'MinSize',\n 'ResourceName': 'AWSEBAutoScalingGroup',\n 'Value': '1'\n },\n {\n 'Namespace': 'aws:autoscaling:launchconfiguration',\n 'OptionName': 'InstanceType',\n 'Value': 't2.micro'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'BreachDuration',\n 'ResourceName': 'AWSEBCloudwatchAlarmLow',\n 'Value': '1'\n },\n {\n u'Namespace': 'aws:autoscaling:trigger',\n u'OptionName': 'EvaluationPeriods',\n u'ResourceName': 'AWSEBCloudwatchAlarmLow',\n u'Value': '1'\n },\n {\n u'Namespace': 'aws:autoscaling:trigger',\n u'OptionName': 'LowerBreachScaleIncrement',\n u'ResourceName': 'AWSEBAutoScalingScaleDownPolicy',\n u'Value': '-1'\n },\n {\n u'Namespace': 'aws:autoscaling:trigger',\n u'OptionName': 'LowerThreshold',\n u'ResourceName': 'AWSEBCloudwatchAlarmLow',\n u'Value': '25'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'MeasureName',\n 'ResourceName': 'AWSEBCloudwatchAlarmLow',\n 'Value': 'CPUUtilization'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'Period',\n 'ResourceName': 'AWSEBCloudwatchAlarmLow',\n 'Value': '1'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'Statistic',\n 'ResourceName': 'AWSEBCloudwatchAlarmLow',\n 'Value': 'Average'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'Unit',\n 'ResourceName': 'AWSEBCloudwatchAlarmLow',\n 'Value': 'Percent'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'UpperBreachScaleIncrement',\n 'ResourceName': 'AWSEBAutoScalingScaleUpPolicy',\n 'Value': '1'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'UpperThreshold',\n 'ResourceName': 'AWSEBCloudwatchAlarmHigh',\n 'Value': '85'\n },\n {\n 'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',\n 'OptionName': 'RollingUpdateEnabled',\n 'ResourceName': 'AWSEBAutoScalingGroup',\n 'Value': 'false'\n },\n {\n 'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',\n 'OptionName': 'RollingUpdateType',\n 'ResourceName': 'AWSEBAutoScalingGroup',\n 'Value': 'Time'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:command',\n 'OptionName': 'BatchSize',\n 'Value': '50'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:command',\n 'OptionName': 'BatchSizeType',\n 'Value': 'Percentage'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:command',\n 'OptionName': 'DeploymentPolicy',\n 'Value': 'Rolling'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:command',\n 'OptionName': 'IgnoreHealthCheck',\n 'Value': 'false'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:command',\n 'OptionName': 'Timeout',\n 'Value': '600'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:container:python',\n 'OptionName': 'WSGIPath',\n 'Value': application_path\n }\n ]\n )\n return creation_response",
"def __init__(self, name, local_api, description=\"\",\n default_attributes=None, override_attributes=None,\n cookbook_versions=None):\n super(Environment, self).__init__(name=name, description=description)\n self.local_api_dict = {\"url\": local_api.url,\n \"key\": local_api.key.raw,\n \"client\": local_api.client}\n\n self.default_attributes = default_attributes or {}\n self.override_attributes = override_attributes or {}\n self.cookbook_versions = cookbook_versions or {}\n self.json_class = \"Chef::Environment\"\n self.chef_type = \"environment\"",
"def create_vpc(DryRun=None, CidrBlock=None, InstanceTenancy=None, AmazonProvidedIpv6CidrBlock=None):\n pass",
"def create_environment(env_name):\n env = gym.make(env_name)\n\n # Fix for certain OpenAI Gym environments,\n # requiring to be reset prior to initial rendering\n if env_name in GYM_ENVS['classical_control']:\n env.reset()\n\n return env",
"def _env():\n home = _os.environ['HOME']\n root_dir = _os.path.realpath(\n _os.path.join(_os.environ['CLOUDSDK_CONFIG'], '../..'))\n inet_family = 'IPV4_ONLY'\n dev = '/dev/fuse'\n path = '/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin:.'\n if len(root_dir) > 1 and not root_dir.startswith('/usr/local/google/'):\n home = _os.path.join(root_dir, home)\n inet_family = 'IPV6_ONLY'\n fum = _os.environ['HOME'].split('mount')[0] + '/mount/alloc/fusermount'\n dev = fum + '/dev/fuse'\n path = path + ':' + fum + '/bin'\n config_dir = _os.path.join(home, '.config', 'Google')\n return _Environment(\n home=home,\n root_dir=root_dir,\n inet_family=inet_family,\n dev=dev,\n path=path,\n config_dir=config_dir)",
"def setup_environment():",
"def env_creator(config: dict):\n \n from gridworld import MultiAgentEnv\n\n return MultiAgentEnv(**config)",
"def main(azs, region, keyid, secret, cidr, owner, env):\n\n # Validate the region\n myregion = boto.ec2.get_region(region_name=region)\n if myregion == None:\n print(\"Unknown region.\")\n exit(1)\n\n # Establish a VPC service connection\n try:\n conn = boto.vpc.VPCConnection(aws_access_key_id=keyid, aws_secret_access_key=secret, region=myregion)\n except boto.exception.EC2ResponseError as e:\n print(e.message)\n exit(1)\n\n # Grab the availability-zones\n zones = []\n all_zones = conn.get_all_zones()\n for zone in all_zones:\n if zone.state != 'available':\n continue\n zones.append(zone.name)\n\n subnets = subnet_sizes(azs, cidr) # Calculate the subnet sizes\n name = owner.lower() + '-' + env.lower() + '-' # Used for tagging\n\n vpc_id = create_vpc(conn, name, region, cidr)\n igw_id = create_igw(conn, name, region, vpc_id)\n sub_ids = create_sub(conn, name, region, vpc_id, azs, subnets, zones)\n rtb_ids = create_rtb(conn, name, region, vpc_id, azs, sub_ids, igw_id)\n acl_ids = create_acl(conn, name, region, vpc_id, azs, sub_ids, cidr)\n flow_id = create_flows(vpc_id, keyid, secret, region)",
"def create():\n return _DynamicEnvironment()",
"def __allocate_environment__(cls, options, test_driver):",
"def setup_environment():\n global GIVEN_ENV\n GIVEN_ENV['env'] = env.copy()",
"def patch_jinja2_env():\n from pyinfra.api import util\n\n util.Environment = Jinja2Environment",
"def create_or_resume(name, spec, **_):\n\n # deploy mysql for placement\n utils.ensure_mysql_cluster(\"placement\", spec[\"mysql\"])\n\n # deploy placement api\n utils.create_or_update('placement/daemonset.yml.j2', spec=spec)\n utils.create_or_update('placement/service.yml.j2', spec=spec)\n\n # Create application credential\n identity.ensure_application_credential(name=\"placement\")\n\n url = None\n if \"ingress\" in spec:\n utils.create_or_update('placement/ingress.yml.j2',\n name=name, spec=spec)\n url = spec[\"ingress\"][\"host\"]\n\n if \"endpoint\" not in spec:\n spec[\"endpoint\"] = True\n if spec[\"endpoint\"]:\n identity.ensure_service(name=\"placement\", service_type=\"placement\",\n url=url, desc=\"Placement Service\")",
"def _ConstructEnvVariablesPatch(env_ref,\n clear_env_variables,\n remove_env_variables,\n update_env_variables,\n release_track=base.ReleaseTrack.GA):\n env_obj = environments_api_util.Get(env_ref, release_track=release_track)\n initial_env_var_value = env_obj.config.softwareConfig.envVariables\n initial_env_var_list = (\n initial_env_var_value.additionalProperties\n if initial_env_var_value else [])\n\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n env_variables_cls = messages.SoftwareConfig.EnvVariablesValue\n entry_cls = env_variables_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n envVariables=env_variables_cls(additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return ('config.software_config.env_variables',\n command_util.BuildFullMapUpdate(clear_env_variables,\n remove_env_variables,\n update_env_variables,\n initial_env_var_list, entry_cls,\n _BuildEnv))",
"def fixture_env_object(env_manager):\n env = Environment(\n env_id=COMMIT_HASH,\n created=multiprocessing.Event(),\n creating=multiprocessing.Event(),\n location=os.path.join(env_manager.base_dir, COMMIT_HASH),\n site_packages=os.path.join(env_manager.base_dir, COMMIT_HASH, VENV_SITE_PKGS),\n )\n return env",
"def test_prepare_environment(self):\n pass",
"def do_env_template_create_env(mc, args):\n try:\n data = {}\n data[\"name\"] = args.name\n if args.region:\n data[\"region\"] = args.region\n template = mc.env_templates.create_env(args.id, data)\n except common_exceptions.HTTPNotFound:\n raise exceptions.CommandError(\"Environment template %s not found\"\n % args.id)\n else:\n formatters = {\n \"environment_id\": utils.text_wrap_formatter,\n \"session_id\": utils.text_wrap_formatter\n }\n utils.print_dict(template.to_dict(), formatters=formatters)",
"def _get_environment(\n self,\n cluster_kwargs: Dict[str, Any],\n adapt_kwargs: Dict[str, Any],\n ):\n\n local_tmp_file = \"/tmp/prefect-flow-run.yaml\"\n with open(local_tmp_file, \"w\") as f:\n YAML().dump(self._flow_run_job_spec, stream=f)\n\n # saturn_flow_id is used by Saturn's custom Prefect agent\n k8s_environment = KubernetesJobEnvironment(\n metadata={\"saturn_flow_id\": self.flow_id, \"image\": self.image},\n executor=DaskExecutor(\n cluster_class=\"dask_saturn.SaturnCluster\",\n cluster_kwargs=cluster_kwargs,\n adapt_kwargs=adapt_kwargs,\n ),\n job_spec_file=local_tmp_file,\n labels=self._saturn_flow_labels,\n unique_job_name=True,\n )\n\n # patch command and args to run the user's start script\n new_command = [\"/bin/bash\", \"-ec\"]\n k8s_environment._job_spec[\"spec\"][\"template\"][\"spec\"][\"containers\"][0][\n \"command\"\n ] = new_command\n\n args_from_prefect = k8s_environment._job_spec[\"spec\"][\"template\"][\"spec\"][\"containers\"][\n 0\n ].get(\"args\", [])\n args_from_prefect = \" \".join(args_from_prefect)\n new_args = f\"source /home/jovyan/.saturn/start_wrapper.sh; {args_from_prefect}\"\n k8s_environment._job_spec[\"spec\"][\"template\"][\"spec\"][\"containers\"][0][\"args\"] = [new_args]\n\n return k8s_environment"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for node count. | def _ConstructNodeCountPatch(node_count, release_track=base.ReleaseTrack.GA):
messages = api_util.GetMessagesModule(release_track=release_track)
config = messages.EnvironmentConfig(nodeCount=node_count)
return 'config.node_count', messages.Environment(config=config) | [
"def create_num_bases(self):\r\n if self.dimension==1:\r\n KV_xi=self.knot_vector(self.number_elements,self.order,self.mp)\r\n self.num_bases=len(KV_xi)-(self.order+1)\r\n return\r\n \"\"\"\r\n Generates knot vectors for each patch\r\n \"\"\"\r\n# print(self.number_elements)\r\n KV_xi=lambda patch_num: self.knot_vector(self.number_elements[patch_num,0],self.order[patch_num,0],self.mp[patch_num,0])\r\n KV_eta=lambda patch_num: self.knot_vector(self.number_elements[patch_num,1],self.order[patch_num,1],self.mp[patch_num,1])\r\n \r\n \"\"\"\r\n Finds number of bases in knot vectors\r\n \"\"\"\r\n num_basis_xi=lambda patch_num: len(KV_xi(patch_num))-(self.order[patch_num,0]+1)\r\n num_basis_eta=lambda patch_num: len(KV_eta(patch_num))-(self.order[patch_num,1]+1)\r\n \r\n if np.array_equal(self.order[1,:],np.ones(2)*-1)!=1:\r\n self.num_bases=np.array([ (num_basis_xi(patch_num),num_basis_eta(patch_num)) \\\r\n for patch_num in range(len(self.order))]) \r\n else:\r\n self.num_bases=np.vstack((np.array([num_basis_xi(0),num_basis_eta(0)]),np.zeros(2)))",
"def set_ps_count(self, count: int) -> \"TFClusterConfig.Builder\":\n self.add_node_type(\"ps\", count)\n return self",
"def _ConstructSoftwareConfigurationSchedulerCountPatch(\n scheduler_count, release_track=base.ReleaseTrack.GA):\n messages = api_util.GetMessagesModule(release_track=release_track)\n\n return 'config.software_config.scheduler_count', messages.Environment(\n config=messages.EnvironmentConfig(\n softwareConfig=messages.SoftwareConfig(\n schedulerCount=scheduler_count)))",
"def reset_macs_count(self):\n add_batch_counter_variables_or_reset(self)\n self.apply(add_macs_counter_variable_or_reset)",
"def installed_patch_count(self) -> int:\n return pulumi.get(self, \"installed_patch_count\")",
"def update_counter(self):\n self.node_counter += 1",
"def set_worker_count(self, count: int) -> \"TFClusterConfig.Builder\":\n self.add_node_type(\"worker\", count)\n return self",
"def _ConstructEnvironmentSizePatch(environment_size,\n release_track=base.ReleaseTrack.GA):\n messages = api_util.GetMessagesModule(release_track=release_track)\n config = messages.EnvironmentConfig(environmentSize=environment_size)\n return 'config.environment_size', messages.Environment(config=config)",
"def number_of_nodes():\n return 3",
"def number_of_nodes(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.node\", \r\n self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, int)",
"def number_of_nodes(self) -> int:\n return pulumi.get(self, \"number_of_nodes\")",
"def pending_patch_count(self) -> int:\n return pulumi.get(self, \"pending_patch_count\")",
"def node_size(rank_spec):",
"def __init__(self, env, num_replicas):\n self.env = env\n self.id = env.id\n self.action_space = env.action_space\n self.observation_space = env.observation_space\n self.max_episode_steps = env.max_episode_steps\n data = env.data\n num_replicas = min(len(data), num_replicas)\n self.num_envs = num_replicas\n data_split = map(list, np.array_split(data, num_replicas))\n\n envs = []\n for x in data_split:\n test_env = copy(env)\n test_env.data = x\n test_env.num_repeats = env.num_repeats\n envs.append(test_env)\n self.envs = np.array(envs, dtype=np.object)\n self.env_alive_mask = np.ones(len(envs), dtype=np.int)\n self.done_mask = np.zeros(len(envs), dtype=np.int)\n self.next_round_actions = np.arange(len(envs))\n self.rng = np.random.default_rng(0)",
"def __init__(self, env_make_fn, num_envs=32):\n self.num_envs = num_envs\n self.envs = [env_make_fn() for _ in range(self.num_envs)]\n self.num_actions = self.envs[0].action_space.n",
"def critical_and_security_patch_count(self) -> int:\n return pulumi.get(self, \"critical_and_security_patch_count\")",
"def compute_atom_count(self):\n self.frame['atom_count'] = self.atom.cardinal_groupby().size()",
"def number_of_nodes_in_shell(self):\n try:\n return self._nnodes_inshell\n except AttributeError:\n n_pts_in_shell = numpy.round(2. * numpy.pi * (\n numpy.arange(self.number_of_shells, dtype=float) + 1.))\n self._nnodes_inshell = n_pts_in_shell.astype(int)\n return self._nnodes_inshell",
"def increasing_nodes(self):\n self.na = self.n * 3\n self.nb = self.n * 9",
"def __init__(\n self,\n observation_space,\n action_space,\n parallel_envs,\n cfg,\n **kwargs,\n ):\n super(DictCount, self).__init__(observation_space, action_space, parallel_envs, cfg)\n self.count_table = defaultdict(int)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for environment size. | def _ConstructEnvironmentSizePatch(environment_size,
release_track=base.ReleaseTrack.GA):
messages = api_util.GetMessagesModule(release_track=release_track)
config = messages.EnvironmentConfig(environmentSize=environment_size)
return 'config.environment_size', messages.Environment(config=config) | [
"def getPatchSize(self) -> retval:\n ...",
"def _ConstructEnvVariablesPatch(env_ref,\n clear_env_variables,\n remove_env_variables,\n update_env_variables,\n release_track=base.ReleaseTrack.GA):\n env_obj = environments_api_util.Get(env_ref, release_track=release_track)\n initial_env_var_value = env_obj.config.softwareConfig.envVariables\n initial_env_var_list = (\n initial_env_var_value.additionalProperties\n if initial_env_var_value else [])\n\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n env_variables_cls = messages.SoftwareConfig.EnvVariablesValue\n entry_cls = env_variables_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n envVariables=env_variables_cls(additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return ('config.software_config.env_variables',\n command_util.BuildFullMapUpdate(clear_env_variables,\n remove_env_variables,\n update_env_variables,\n initial_env_var_list, entry_cls,\n _BuildEnv))",
"def set_app_ring_size(self, ring_size):\n\n sed_command = (r\"sed -i 's/^\\(.*RING_SIZE\\)\\s*[[:digit:]]*/\\1 %d/' \" +\n r\"examples/quota_watermark/include/conf.h\")\n self.dut.send_expect(sed_command % int(ring_size), '# ')",
"def __free_environment__(cls, environment):",
"def __allocate_environment__(cls, options, test_driver):",
"def test_winsize_IOError_returns_environ():\n @as_subprocess\n def child():\n def side_effect(fd):\n raise IOError\n\n term = TestTerminal()\n term._winsize = side_effect\n os.environ['COLUMNS'] = '1984'\n os.environ['LINES'] = '1888'\n assert term._height_and_width() == (1888, 1984, None, None)\n\n child()",
"def create():\n return _DynamicEnvironment()",
"def patch_env(self, **kwargs):\n new_shell = self._shell.clone()\n for key, value in kwargs.items():\n new_shell.setenv(key, value)\n return attr.evolve(self, shell=new_shell)",
"def make_energyplus_env(env_id, seed):\n env = gym.make(env_id)\n env = Monitor(env, logger.get_dir())\n env.seed(seed)\n return env",
"def training_patch_size(self):\n return self._training_patch_size",
"def __init__(self, env_config, paddle_length_factor):\n self.env_config = env_config\n self.frameskip = (2, 5)\n self.env = EnvPongDraft_Surface_Headless.EnvPong(render_screen = False, screen_scale = 1.0)\n self.scale_paddle_height(paddle_length_factor)\n self.seed()\n self._action_set = self.env.getMinimalActionSet()\n self.action_space = spaces.Discrete(len(self._action_set))\n screen_width = screen_height = 42\n self.zoom_val = 42 / 400\n self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width), dtype=np.float)",
"def __init__(self, env_make_fn, num_envs=32):\n self.num_envs = num_envs\n self.envs = [env_make_fn() for _ in range(self.num_envs)]\n self.num_actions = self.envs[0].action_space.n",
"def set_size_from_terminal(self):\n self.rows, self.columns = os.popen('stty size', 'r').read().split()\n self.width = int(self.columns)\n self.height = int(self.rows) - 2",
"def validation_patch_size(self):\n return self._validation_patch_size",
"def test_environment_specs_roundtrip(self):\n # Each spec has a different shape, type and name\n observation_spec = specs.Array((1, 2, 3), np.float32, 'spec1')\n action_spec = specs.Array((4, 5), np.float64, 'spec2')\n reward_spec = specs.Array((1,), np.int32, 'spec3')\n discount_spec = specs.Array((2,), np.int64, 'spec4')\n\n env = CustomSpecsEnvironment(observation_spec, action_spec, reward_spec,\n discount_spec)\n\n env_specs = spec_codec.encode_environment_specs(env)\n\n decoded_specs = spec_codec.decode_environment_specs(env_specs)\n self.assertEqual(decoded_specs['observation_spec'], observation_spec)\n self.assertEqual(decoded_specs['action_spec'], action_spec)\n self.assertEqual(decoded_specs['reward_spec'], reward_spec)\n self.assertEqual(decoded_specs['discount_spec'], discount_spec)",
"def create_initial_image(self, size):\n module_path = globals()[\"__file__\"]\n # user-defined image is assumed to reside in the same location as the attack module\n patch_base_image_path = os.path.abspath(\n os.path.join(os.path.join(module_path, \"../\"), self.patch_base_image)\n )\n\n im = cv2.imread(patch_base_image_path)\n im = cv2.resize(im, size)\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n\n patch_base = np.transpose(im, (2, 0, 1))\n patch_base = patch_base / 255.0\n return patch_base",
"def pr152_wcs_set_size() -> Experiment:\n tag = \"pr152_wcs_set_size\"\n data_size = 1_000_000_000\n repeat = 10\n pattern = \"Performance.KySync\"\n\n return Experiment([\n TestInstance(\n tag=f\"{tag}-after\",\n commitish='ab76e33ba92a52fe8a042b7becae6402d8b1fe6d',\n data_size=data_size,\n similarity=0,\n gtest_filter=pattern,\n gtest_repeat=repeat\n ),\n TestInstance(\n tag=f\"{tag}-before\",\n commitish='e626f10f0b11aa6c8956dec18ef962c2277c79e2',\n data_size=data_size,\n similarity=0,\n gtest_filter=pattern,\n gtest_repeat=repeat\n ),\n ], analyze=_analyze)",
"def _create_env(self, gymenv: Union[str, Env], random_seed: Optional[int]):\n if isinstance(gymenv, Env):\n self.env = gymenv\n self.env_name = gymenv.unwrapped.spec.id\n else:\n if gymenv not in [e.id for e in gym.envs.registry.all()]:\n raise Exception(\"Env {} not found in OpenAI Gym.\".format(gymenv))\n self.env = gym.make(gymenv)\n self.env_name = gymenv\n if random_seed is not None:\n self.env.seed(random_seed)\n\n supports_state = isinstance(self.env.observation_space, gym.spaces.Box) and len(\n self.env.observation_space.shape\n ) in [1, 3]\n supports_action = type(self.env.action_space) in (\n gym.spaces.Discrete,\n gym.spaces.Box,\n )\n\n if not supports_state and supports_action:\n raise Exception(\n \"Unsupported environment state or action type: {}, {}\".format(\n self.env.observation_space, self.env.action_space\n )\n )\n\n self.action_space = self.env.action_space\n if isinstance(self.env.action_space, gym.spaces.Discrete):\n self.action_type = EnvType.DISCRETE_ACTION\n self.action_dim = self.env.action_space.n\n elif isinstance(self.env.action_space, gym.spaces.Box):\n self.action_type = EnvType.CONTINUOUS_ACTION\n self.action_dim = self.env.action_space.shape[0] # type: ignore\n\n if len(self.env.observation_space.shape) == 1: # type: ignore\n self.state_dim = self.env.observation_space.shape[0] # type: ignore\n self.img = False\n elif len(self.env.observation_space.shape) == 3: # type: ignore\n self.height, self.width, self.num_input_channels = (\n self.env.observation_space.shape # type: ignore\n )\n self.img = True",
"def __ge__(self, size):\n return GeSpacer(size, self.strength)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for partially updating PyPI packages. | def _ConstructPyPiPackagesPatch(clear_pypi_packages,
remove_pypi_packages,
update_pypi_packages,
release_track=base.ReleaseTrack.GA):
messages = api_util.GetMessagesModule(release_track=release_track)
env_cls = messages.Environment
pypi_packages_cls = (messages.SoftwareConfig.PypiPackagesValue)
entry_cls = pypi_packages_cls.AdditionalProperty
def _BuildEnv(entries):
software_config = messages.SoftwareConfig(
pypiPackages=pypi_packages_cls(additionalProperties=entries))
config = messages.EnvironmentConfig(softwareConfig=software_config)
return env_cls(config=config)
return command_util.BuildPartialUpdate(
clear_pypi_packages, remove_pypi_packages, update_pypi_packages,
'config.software_config.pypi_packages', entry_cls, _BuildEnv) | [
"def update_pipenv_env():\n subprocess.call([\"pipenv\", \"sync\", \"--dev\"])",
"def CmdPkgPatch(package, options):\n package.Patch()",
"def _ConstructEnvVariablesPatch(env_ref,\n clear_env_variables,\n remove_env_variables,\n update_env_variables,\n release_track=base.ReleaseTrack.GA):\n env_obj = environments_api_util.Get(env_ref, release_track=release_track)\n initial_env_var_value = env_obj.config.softwareConfig.envVariables\n initial_env_var_list = (\n initial_env_var_value.additionalProperties\n if initial_env_var_value else [])\n\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n env_variables_cls = messages.SoftwareConfig.EnvVariablesValue\n entry_cls = env_variables_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n envVariables=env_variables_cls(additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return ('config.software_config.env_variables',\n command_util.BuildFullMapUpdate(clear_env_variables,\n remove_env_variables,\n update_env_variables,\n initial_env_var_list, entry_cls,\n _BuildEnv))",
"def patch_jinja2_env():\n from pyinfra.api import util\n\n util.Environment = Jinja2Environment",
"def update_changed_requirements():\n reqs_path = join(env.proj_path, env.reqs_path)\n get_reqs = lambda: run(\"cat %s\" % reqs_path)\n old_reqs = get_reqs() if env.reqs_path else \"\"\n yield\n if old_reqs:\n new_reqs = get_reqs()\n if old_reqs == new_reqs:\n # Unpinned requirements should always be checked.\n for req in new_reqs.split(\"\\n\"):\n if req.startswith(\"-e\"):\n if \"@\" not in req:\n # Editable requirement without pinned commit.\n break\n elif req.strip() and not req.startswith(\"#\"):\n if not set(\">=<\") & set(req):\n # PyPI requirement without version.\n break\n else:\n # All requirements are pinned.\n return\n pip(\"-r %s/%s\" % (env.proj_path, env.reqs_path))",
"def CmdPkgUpdatePatch(package, options):\n package.UpdatePatch()",
"def update_fetch(self):\n Popen([\"mount\", \"-t\", \"devfs\", \"devfs\",\n \"{}/releases/{}/root/dev\".format(self.iocroot,\n self.release)]).communicate()\n copy(\"/etc/resolv.conf\",\n \"{}/releases/{}/root/etc/resolv.conf\".format(self.iocroot,\n self.release))\n\n # TODO: Check for STABLE/PRERELEASE/CURRENT/BETA if we support those.\n # TODO: Fancier.\n self.lgr.info(\"\\n* Updating {} to the latest patch level... \".format(\n self.release))\n\n os.environ[\"UNAME_r\"] = self.release\n os.environ[\"PAGER\"] = \"/bin/cat\"\n new_root = \"{}/releases/{}/root\".format(self.iocroot, self.release)\n if os.path.isfile(\"{}/etc/freebsd-update.conf\".format(new_root)):\n # 10.1-RELEASE and under have a interactive check\n if float(self.release.partition(\"-\")[0][:5]) <= 10.1:\n with NamedTemporaryFile(delete=False) as tmp_conf:\n conf = \"{}/usr/sbin/freebsd-update\".format(new_root)\n with open(conf) as update_conf:\n for line in update_conf:\n tmp_conf.write(re.sub(\"\\[ ! -t 0 \\]\", \"false\",\n line))\n\n os.chmod(tmp_conf.name, 0o755)\n Popen([tmp_conf.name, \"-b\", new_root, \"-d\",\n \"{}/var/db/freebsd-update/\".format(new_root), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(new_root),\n \"fetch\"], stdout=PIPE, stderr=PIPE).communicate()\n os.remove(tmp_conf.name)\n else:\n Popen([\"freebsd-update\", \"-b\", new_root, \"-d\",\n \"{}/var/db/freebsd-update/\".format(new_root), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(new_root),\n \"fetch\"], stdout=PIPE, stderr=PIPE).communicate()\n\n Popen([\"freebsd-update\", \"-b\", new_root, \"-d\",\n \"{}/var/db/freebsd-update/\".format(new_root), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(new_root),\n \"install\"], stdout=PIPE, stderr=PIPE).communicate()\n\n try:\n # Why this sometimes doesn't exist, we may never know.\n os.remove(\"{}/releases/{}/root/etc/resolv.conf\".format(\n self.iocroot, self.release))\n except OSError:\n pass\n\n Popen([\"umount\", \"{}/releases/{}/root/dev\".format(\n self.iocroot, self.release)]).communicate()",
"def unpatch():\n # Only patch once\n global _patched\n if not _patched:\n return\n _patched = False\n\n # 3.4 -> 3.8\n # DEV: Explicitly stop at 3.8 in case the functions we are patching change in any way,\n # we need to validate them before adding support here\n if (3, 4) <= sys.version_info <= (3, 8):\n import importlib\n\n if isinstance(importlib._bootstrap._find_and_load_unlocked, wrapt.FunctionWrapper):\n setattr(\n importlib._bootstrap,\n \"_find_and_load_unlocked\",\n importlib._bootstrap._find_and_load_unlocked.__wrapped__,\n )\n if isinstance(importlib.reload, wrapt.FunctionWrapper):\n setattr(importlib, \"reload\", importlib.reload.__wrapped__)\n\n # 2.7\n # DEV: Slightly more direct approach\n elif sys.version_info >= (2, 7):\n __builtins__[\"__import__\"] = ORIGINAL_IMPORT\n if isinstance(__builtins__[\"reload\"], wrapt.FunctionWrapper):\n __builtins__[\"reload\"] = __builtins__[\"reload\"].__wrapped__",
"def version_XYZ(monkeypatch):\n non_patched_fetch_metadata = resolve_config.fetch_metadata\n\n def fetch_metadata(user=None, user_args=None, user_script_config=None):\n metadata = non_patched_fetch_metadata(user, user_args, user_script_config)\n metadata[\"orion_version\"] = \"XYZ\"\n return metadata\n\n monkeypatch.setattr(resolve_config, \"fetch_metadata\", fetch_metadata)\n\n non_patched_update_metadata = resolve_config.update_metadata\n\n def update_metadata(metadata):\n metadata = non_patched_update_metadata(metadata)\n metadata[\"orion_version\"] = \"XYZ\"\n return metadata\n\n monkeypatch.setattr(resolve_config, \"update_metadata\", update_metadata)",
"def apply_patches():\n with open(os.path.join(os.getcwd(), 'utils', 'sdk.patch'), 'r') as fin:\n subprocess.call(['patch', '-p2'], stdin=fin, cwd=DESTDIR)\n with open(os.path.join(SRCDIR, 's-video_sgx.patch'), 'r') as fin:\n subprocess.call(['patch', '-p2'], stdin=fin, cwd=DESTDIR)",
"def test_create_empty_patch():\n _p = Patch('some_patch_name')",
"def patch(ctx, tool, dir, remove):\n from nf_core.modules import ModulePatch\n\n try:\n module_patch = ModulePatch(\n dir,\n ctx.obj[\"modules_repo_url\"],\n ctx.obj[\"modules_repo_branch\"],\n ctx.obj[\"modules_repo_no_pull\"],\n )\n if remove:\n module_patch.remove(tool)\n else:\n module_patch.patch(tool)\n except (UserWarning, LookupError) as e:\n log.error(e)\n sys.exit(1)",
"def setup_package():\n setup(**bootstrap_cfg())",
"def _ConstructPrivateEnvironmentPatch(\n enable_private_environment,\n release_track=base.ReleaseTrack.GA,\n):\n messages = api_util.GetMessagesModule(release_track=release_track)\n private_environment_config = messages.PrivateEnvironmentConfig()\n config = messages.EnvironmentConfig(\n privateEnvironmentConfig=private_environment_config\n )\n update_mask = 'config.private_environment_config.enable_private_environment'\n private_environment_config.enablePrivateEnvironment = bool(\n enable_private_environment\n )\n\n return (\n update_mask,\n messages.Environment(config=config),\n )",
"def patch():\n # This should never cause their application to not load\n try:\n _patch()\n except Exception:\n log.warning(\"Failed to patch module importing, import hooks will not work\", exc_info=True)",
"def update_reqs():\n req_path = os.path.join(site_path, 'setup', 'requirements.txt')\n run('%s/bin/pip install -E %s -r %s' % (venv, venv, req_path))",
"def pkgdir(tmpdir, monkeypatch):\n cfile = tmpdir.mkdir('debian').join('changelog')\n text = \"\"\"\ntestpkg (1.1.0-1) stable; urgency=medium\n\n * update to 1.1.0\n * other rad packaging updates\n * even more cool packaging updates that take a lot of text to describe so\n the change wraps on multiple lines\n\n -- Ken Dreyer <kdreyer@redhat.com> Tue, 06 Jun 2017 14:46:37 -0600\n\ntestpkg (1.0.0-2redhat1) stable; urgency=medium\n\n * update to 1.0.0 (rhbz#123)\n\n -- Ken Dreyer <kdreyer@redhat.com> Mon, 05 Jun 2017 13:45:36 -0600\n\"\"\".lstrip(\"\\n\")\n cfile.write(text)\n monkeypatch.chdir(tmpdir)\n return tmpdir",
"def _monkeypatch_environment(monkeypatch):\n with tempfile.TemporaryDirectory() as d:\n monkeypatch.setitem(os.environ, 'HOME', d)\n yield d",
"def base_pkg():\r\n pkg = importlib.import_module(\"{{ cookiecutter.project_slug }}\")\r\n return importlib.reload(pkg)",
"def prepare():\n packager = get_packager()\n if packager == APT:\n sudo('apt-get update')\n elif package == YUM:\n sudo('yum update')\n else:\n raise Exception, 'Unknown packager: %s' % (packager,)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for updating labels. | def _ConstructLabelsPatch(clear_labels,
remove_labels,
update_labels,
release_track=base.ReleaseTrack.GA):
messages = api_util.GetMessagesModule(release_track=release_track)
env_cls = messages.Environment
entry_cls = env_cls.LabelsValue.AdditionalProperty
def _BuildEnv(entries):
return env_cls(labels=env_cls.LabelsValue(additionalProperties=entries))
return command_util.BuildPartialUpdate(clear_labels, remove_labels,
update_labels, 'labels', entry_cls,
_BuildEnv) | [
"def UpdateLabels(unused_ref, args, patch_request):\n labels_diff = labels_util.Diff.FromUpdateArgs(args)\n if labels_diff.MayHaveUpdates():\n patch_request = command_util.AddFieldToUpdateMask('labels', patch_request)\n messages = api_util.GetMessagesModule(args.calliope_command.ReleaseTrack())\n app_connector_msg = GetVersionedConnectorReq(args, patch_request)\n if app_connector_msg is None:\n app_connector_msg = GetVersionedConnectorMsg(args, messages)()\n new_labels = labels_diff.Apply(\n GetVersionedConnectorMsg(args, messages).LabelsValue,\n app_connector_msg.labels).GetOrNone()\n if new_labels:\n app_connector_msg.labels = new_labels\n return patch_request",
"def _ConstructEnvVariablesPatch(env_ref,\n clear_env_variables,\n remove_env_variables,\n update_env_variables,\n release_track=base.ReleaseTrack.GA):\n env_obj = environments_api_util.Get(env_ref, release_track=release_track)\n initial_env_var_value = env_obj.config.softwareConfig.envVariables\n initial_env_var_list = (\n initial_env_var_value.additionalProperties\n if initial_env_var_value else [])\n\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n env_variables_cls = messages.SoftwareConfig.EnvVariablesValue\n entry_cls = env_variables_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n envVariables=env_variables_cls(additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return ('config.software_config.env_variables',\n command_util.BuildFullMapUpdate(clear_env_variables,\n remove_env_variables,\n update_env_variables,\n initial_env_var_list, entry_cls,\n _BuildEnv))",
"def _update_labels_on_rep(self, ensure_labels, repo):\n current_labels = {}\n for label in repo.iter_labels():\n current_labels[label.name] = label\n\n for label_name, label_colour in ensure_labels:\n try:\n if label_name not in current_labels.keys():\n print(f\" label add {label_name}\")\n if not self.dry_run:\n label = repo.create_label(label_name, label_colour)\n if label is None:\n raise UserError(\"Unknown error creating label on repository.\")\n current_labels[label_name] = label\n elif current_labels[label_name].color != label_colour:\n current_labels[label_name].update(name=label_name, color=label_colour)\n except GitHubError as ex:\n if ex.code == 422:\n raise UserError(\"Validation filed on create/modify label\")",
"def do_host_device_label_assign(cc, args):\n attributes = utils.args_array_to_list_dict(args.attributes[0])\n parameters = [\"overwrite=\" + str(args.overwrite)]\n host = ihost_utils._find_ihost(cc, args.hostnameorid)\n device = pci_device.find_device(cc, host, args.nameorpciaddr)\n attributes.append({'pcidevice_uuid': device.uuid})\n new_device_labels = cc.device_label.assign(attributes, parameters)\n for p in new_device_labels.device_labels:\n uuid = p['uuid']\n if uuid is not None:\n try:\n device_label = cc.device_label.get(uuid)\n except exc.HTTPNotFound:\n raise exc.CommandError('Host device label not found: %s' % uuid)\n _print_device_label_show(device_label)",
"def patch_jinja2_env():\n from pyinfra.api import util\n\n util.Environment = Jinja2Environment",
"def test_update_labels():\n allure.dynamic.label('user_label', 'very cool')\n pass",
"def patch(self, nml_patch):\n for sec in nml_patch:\n if sec not in self:\n self[sec] = Namelist()\n self[sec].update(nml_patch[sec])",
"def patch_env(self, **kwargs):\n new_shell = self._shell.clone()\n for key, value in kwargs.items():\n new_shell.setenv(key, value)\n return attr.evolve(self, shell=new_shell)",
"def test_label_service_replace_labels(self):\n pass",
"def update_labels_of_buildings(\n self,\n add_label_names: list,\n remove_label_names: list,\n building_ids: list,\n inventory_type: str = \"property\",\n ) -> dict:\n if inventory_type == \"property\":\n endpoint = \"labels_property\"\n elif inventory_type == \"tax_lot\":\n endpoint = \"labels_taxlot\"\n else:\n raise ValueError(\"inventory_type must be either property or tax_lot\")\n\n # first make sure that the labels exist\n labels = self.client.list(endpoint=\"labels\")\n # create a label id look up\n label_id_lookup = {label[\"name\"]: label[\"id\"] for label in labels}\n\n # now find the IDs of the labels that we want to add and remove\n add_label_ids = []\n remove_label_ids = []\n for label_name in add_label_names:\n if label_name in label_id_lookup:\n add_label_ids.append(label_id_lookup[label_name])\n else:\n logger.warning(f\"label name {label_name} not found in SEED, skipping\")\n\n for label_name in remove_label_names:\n if label_name in label_id_lookup:\n remove_label_ids.append(label_id_lookup[label_name])\n else:\n logger.warning(f\"label name {label_name} not found in SEED, skipping\")\n\n payload = {\n \"inventory_ids\": building_ids,\n \"add_label_ids\": add_label_ids,\n \"remove_label_ids\": remove_label_ids,\n }\n result = self.client.put(\n None, required_pk=False, endpoint=endpoint, json=payload\n )\n return result",
"def new_labels(self, labels):\n updated_labels = copy(self.labels)\n updated_labels.update(labels)\n return self.__class__(\n key=self.metric_id,\n measurement_unit=self.measurement_unit,\n labels=updated_labels,\n *self.init_args,\n **self.init_kwargs\n )",
"def _ConstructPrivateEnvironmentPatch(\n enable_private_environment,\n release_track=base.ReleaseTrack.GA,\n):\n messages = api_util.GetMessagesModule(release_track=release_track)\n private_environment_config = messages.PrivateEnvironmentConfig()\n config = messages.EnvironmentConfig(\n privateEnvironmentConfig=private_environment_config\n )\n update_mask = 'config.private_environment_config.enable_private_environment'\n private_environment_config.enablePrivateEnvironment = bool(\n enable_private_environment\n )\n\n return (\n update_mask,\n messages.Environment(config=config),\n )",
"def set_pr_labels(pull, labels):\n if not labels or Config().DRY_RUN:\n return\n print(\"Set PR labels:\", labels)\n # set_labels() should accept list but fails with empty \"AssertionError:\"\n pull.set_labels(labels)",
"def test_update_notification(self):\n # The new label should not yet exist.\n with self.assertRaisesRegexp(\n InvalidLabelError, 'The provided label, \".*\", does not match'\n ):\n self.lm.lookupIRI('new test class')\n\n # Create a new class and give it a label.\n newclass = self.ont.createNewClass('OBTO:0013')\n newclass.addLabel('new test class')\n\n # The LabelMap should automatically have the new label.\n self.assertEqual(\n 'http://purl.obolibrary.org/obo/OBTO_0013',\n str(self.lm.lookupIRI('new test class'))\n )",
"def test_create_empty_patch():\n _p = Patch('some_patch_name')",
"def update_label():\n \n # add code here to update the label_var variable (which is displayed in our label)",
"def _ConstructEnvironmentSizePatch(environment_size,\n release_track=base.ReleaseTrack.GA):\n messages = api_util.GetMessagesModule(release_track=release_track)\n config = messages.EnvironmentConfig(environmentSize=environment_size)\n return 'config.environment_size', messages.Environment(config=config)",
"def update_labels(y, old_cli2new_cli):\n for i in range(len(y)):\n y[i][0] = old_cli2new_cli[y[i][0]]\n return y",
"def create_labels(mkt: MarketData, labelizer: Labelizer) -> Label:\n lbl = labelizer.create(mkt)\n\n return lbl",
"def init_patch(args, val_loader, model, n_channels):\r\n print('Initializing patch detectors...')\r\n model.eval()\r\n labels_set = set()\r\n\r\n for batches in tqdm(val_loader):\r\n if len(labels_set) >= model.nclass:\r\n break\r\n\r\n data, target = batches\r\n if target.item() in labels_set:\r\n continue\r\n else:\r\n labels_set.add(target.item())\r\n idx = target.item()\r\n\r\n if args.gpu is not None:\r\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n data = Variable(data.to(device))\r\n target = Variable(target.to(device))\r\n\r\n result = torch.zeros(n_channels, model.k * model.nclass)\r\n for j, d in enumerate(data): # data [batchsize, 3, 448, 448]\r\n d = d.unsqueeze(0) # d [1, 3, 448, 448]\r\n center = model(d)\r\n result[:, idx*model.k : idx*model.k + model.k] = center\r\n\r\n return result.view(-1, n_channels)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for updating Airflow configs. | def _ConstructAirflowConfigsPatch(clear_airflow_configs,
remove_airflow_configs,
update_airflow_configs,
release_track=base.ReleaseTrack.GA):
messages = api_util.GetMessagesModule(release_track=release_track)
env_cls = messages.Environment
airflow_config_overrides_cls = (
messages.SoftwareConfig.AirflowConfigOverridesValue)
entry_cls = airflow_config_overrides_cls.AdditionalProperty
def _BuildEnv(entries):
software_config = messages.SoftwareConfig(
airflowConfigOverrides=airflow_config_overrides_cls(
additionalProperties=entries))
config = messages.EnvironmentConfig(softwareConfig=software_config)
return env_cls(config=config)
return command_util.BuildPartialUpdate(
clear_airflow_configs, remove_airflow_configs, update_airflow_configs,
'config.software_config.airflow_config_overrides', entry_cls, _BuildEnv) | [
"def _ConstructEnvVariablesPatch(env_ref,\n clear_env_variables,\n remove_env_variables,\n update_env_variables,\n release_track=base.ReleaseTrack.GA):\n env_obj = environments_api_util.Get(env_ref, release_track=release_track)\n initial_env_var_value = env_obj.config.softwareConfig.envVariables\n initial_env_var_list = (\n initial_env_var_value.additionalProperties\n if initial_env_var_value else [])\n\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n env_variables_cls = messages.SoftwareConfig.EnvVariablesValue\n entry_cls = env_variables_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n envVariables=env_variables_cls(additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return ('config.software_config.env_variables',\n command_util.BuildFullMapUpdate(clear_env_variables,\n remove_env_variables,\n update_env_variables,\n initial_env_var_list, entry_cls,\n _BuildEnv))",
"def test_runtime_envs_update(self):\n pass",
"def patch_env(self, **kwargs):\n new_shell = self._shell.clone()\n for key, value in kwargs.items():\n new_shell.setenv(key, value)\n return attr.evolve(self, shell=new_shell)",
"def setup_environment():",
"def patch_using_env(self):\n if self.cred_properties:\n credentials_config = self.cred_properties\n\n user = getenv(\"HERE_USER_ID\") or credentials_config[\"user\"]\n client = getenv(\"HERE_CLIENT_ID\") or credentials_config[\"client\"]\n key = (\n getenv(\"HERE_ACCESS_KEY_ID\")\n or getenv(\"HERE_ACCESS_KEY\")\n or credentials_config[\"key\"]\n )\n secret = (\n getenv(\"HERE_ACCESS_KEY_SECRET\")\n or getenv(\"HERE_ACCESS_SECRET\")\n or credentials_config[\"secret\"]\n )\n endpoint = (\n getenv(\"HERE_TOKEN_ENDPOINT_URL\")\n or getenv(\"HERE_TOKEN_ENDPOINT\")\n or credentials_config[\"endpoint\"]\n )\n credentials_config[\"user\"] = user\n credentials_config[\"client\"] = client\n credentials_config[\"key\"] = key\n credentials_config[\"secret\"] = secret\n credentials_config[\"endpoint\"] = endpoint",
"def _patch_env(**environs: str):\n # Adapted loosely from https://stackoverflow.com/a/34333710\n # Capture the original environ values\n original_environs = {k: os.environ.get(k) for k in environs}\n\n # Patch the environment\n for k, v in environs.items():\n os.environ[k] = v\n try:\n # Run the context manager\n yield\n finally:\n # Restore the original environ values\n for k, v in original_environs.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v",
"def setup_environment():\n global GIVEN_ENV\n GIVEN_ENV['env'] = env.copy()",
"def do_environment_apps_edit(mc, args):\n\n jp_obj = None\n if not args.filename:\n jp_obj = json.load(sys.stdin)\n else:\n with open(args.filename) as fpatch:\n jp_obj = json.load(fpatch)\n\n jpatch = jsonpatch.JsonPatch(jp_obj)\n\n environment_id = args.id\n session_id = args.session_id\n environment = mc.environments.get(environment_id, session_id)\n\n object_model = jpatch.apply(environment.services)\n utils.traverse_and_replace(object_model)\n\n mc.services.put(\n environment_id,\n path='/',\n data=jpatch.apply(environment.services),\n session_id=session_id)",
"def patch_jinja2_env():\n from pyinfra.api import util\n\n util.Environment = Jinja2Environment",
"def apply_env():\n\n for node, val in SUPPORTED_ENV.iteritems():\n for param in val:\n env_var = (CONFIG['env_pfx'] + '_' + node + '_' + param).upper()\n env_value = os.environ.get(env_var)\n if env_value is not None:\n CONFIG[node][param] = env_value",
"def expanded_env_dict():\n return generate_expanded_env_dict()",
"def setup_env(app_dir, app_id, version, module_id, remote_api=False):\n # GCS library behaves differently when running under remote_api. It uses\n # SERVER_SOFTWARE to figure this out. See cloudstorage/common.py, local_run().\n if remote_api:\n os.environ['SERVER_SOFTWARE'] = 'remote_api'\n else:\n os.environ['SERVER_SOFTWARE'] = 'Development yo dawg/1.0'\n if app_dir:\n app_id = app_id or Application(app_dir).app_id\n version = version or 'default-version'\n if app_id:\n os.environ['APPLICATION_ID'] = app_id\n if version:\n os.environ['CURRENT_VERSION_ID'] = '%s.%d' % (\n version, int(time.time()) << 28)\n if module_id:\n os.environ['CURRENT_MODULE_ID'] = module_id",
"def _GetEnvChanges(args):\n return config_changes.EnvVarLiteralChanges(\n updates=_StripKeys(\n getattr(args, 'update_env_vars', None)\n or args.set_env_vars\n or args.env_vars_file\n or {}\n ),\n removes=_MapLStrip(getattr(args, 'remove_env_vars', None) or []),\n clear_others=bool(\n args.set_env_vars or args.env_vars_file or args.clear_env_vars\n ),\n )",
"def _ConstructPrivateEnvironmentPatch(\n enable_private_environment,\n release_track=base.ReleaseTrack.GA,\n):\n messages = api_util.GetMessagesModule(release_track=release_track)\n private_environment_config = messages.PrivateEnvironmentConfig()\n config = messages.EnvironmentConfig(\n privateEnvironmentConfig=private_environment_config\n )\n update_mask = 'config.private_environment_config.enable_private_environment'\n private_environment_config.enablePrivateEnvironment = bool(\n enable_private_environment\n )\n\n return (\n update_mask,\n messages.Environment(config=config),\n )",
"def _get_environment(\n self,\n cluster_kwargs: Dict[str, Any],\n adapt_kwargs: Dict[str, Any],\n ):\n\n local_tmp_file = \"/tmp/prefect-flow-run.yaml\"\n with open(local_tmp_file, \"w\") as f:\n YAML().dump(self._flow_run_job_spec, stream=f)\n\n # saturn_flow_id is used by Saturn's custom Prefect agent\n k8s_environment = KubernetesJobEnvironment(\n metadata={\"saturn_flow_id\": self.flow_id, \"image\": self.image},\n executor=DaskExecutor(\n cluster_class=\"dask_saturn.SaturnCluster\",\n cluster_kwargs=cluster_kwargs,\n adapt_kwargs=adapt_kwargs,\n ),\n job_spec_file=local_tmp_file,\n labels=self._saturn_flow_labels,\n unique_job_name=True,\n )\n\n # patch command and args to run the user's start script\n new_command = [\"/bin/bash\", \"-ec\"]\n k8s_environment._job_spec[\"spec\"][\"template\"][\"spec\"][\"containers\"][0][\n \"command\"\n ] = new_command\n\n args_from_prefect = k8s_environment._job_spec[\"spec\"][\"template\"][\"spec\"][\"containers\"][\n 0\n ].get(\"args\", [])\n args_from_prefect = \" \".join(args_from_prefect)\n new_args = f\"source /home/jovyan/.saturn/start_wrapper.sh; {args_from_prefect}\"\n k8s_environment._job_spec[\"spec\"][\"template\"][\"spec\"][\"containers\"][0][\"args\"] = [new_args]\n\n return k8s_environment",
"def _make_pod_envconfig(\n config: Dict[str, Any], relation_state: Dict[str, Any]\n) -> Dict[str, Any]:\n envconfig = {\n # General configuration\n \"ALLOW_ANONYMOUS_LOGIN\": \"yes\",\n \"OSMLCM_GLOBAL_LOGLEVEL\": config[\"log_level\"],\n # RO configuration\n \"OSMLCM_RO_HOST\": relation_state[\"ro_host\"],\n \"OSMLCM_RO_PORT\": relation_state[\"ro_port\"],\n \"OSMLCM_RO_TENANT\": \"osm\",\n # Kafka configuration\n \"OSMLCM_MESSAGE_DRIVER\": \"kafka\",\n \"OSMLCM_MESSAGE_HOST\": relation_state[\"message_host\"],\n \"OSMLCM_MESSAGE_PORT\": relation_state[\"message_port\"],\n # Database configuration\n \"OSMLCM_DATABASE_DRIVER\": \"mongo\",\n \"OSMLCM_DATABASE_URI\": relation_state[\"database_uri\"],\n \"OSMLCM_DATABASE_COMMONKEY\": config[\"database_commonkey\"],\n # Storage configuration\n \"OSMLCM_STORAGE_DRIVER\": \"mongo\",\n \"OSMLCM_STORAGE_PATH\": \"/app/storage\",\n \"OSMLCM_STORAGE_COLLECTION\": \"files\",\n \"OSMLCM_STORAGE_URI\": relation_state[\"database_uri\"],\n # VCA configuration\n \"OSMLCM_VCA_HOST\": config[\"vca_host\"],\n \"OSMLCM_VCA_PORT\": config[\"vca_port\"],\n \"OSMLCM_VCA_USER\": config[\"vca_user\"],\n \"OSMLCM_VCA_PUBKEY\": config[\"vca_pubkey\"],\n \"OSMLCM_VCA_SECRET\": config[\"vca_password\"],\n \"OSMLCM_VCA_CACERT\": config[\"vca_cacert\"],\n \"OSMLCM_VCA_CLOUD\": config[\"vca_cloud\"],\n \"OSMLCM_VCA_K8S_CLOUD\": config[\"vca_k8s_cloud\"],\n }\n\n if \"vca_apiproxy\" in config and config[\"vca_apiproxy\"]:\n envconfig[\"OSMLCM_VCA_APIPROXY\"] = config[\"vca_apiproxy\"]\n\n return envconfig",
"def update(env, zname, **fter):\n a = _amod_settings(env, zname, update_one, fter)\n logger.info('update(): %r', a)",
"def init_environ(self):\n\t\t#workdir = wpre + projectname + '/' + setname + '/'\n\t\tself.config['pdict'] = {}\n\t\t#self.config['workdir'] = workdir\n\n\t\tself.config['solvent'] = 'water'\n\t\tself.config['interface'] = 'electronic'\n\t\tself.config['diffuse'] = 'none'",
"def extension_environ(env_config_path, monkeypatch):\n monkeypatch.setattr(serverextension, \"ENV_CONFIG_PATH\", [str(env_config_path)])",
"def create_dev_environment(self, svn=False, git=True):\n\n package_list = []\n if svn:\n package_list.append('svn')\n if git:\n package_list.append('git')\n package_list.append('git-lfs')\n\n prefix = os.path.join(self.root_dir, 'dev_env')\n command = 'create'\n text_messages = ['Installing', 'installation into']\n if prefix in self.environments:\n command = 'update'\n text_messages = ['Updating', 'update of']\n\n command_list = [self.conda_exe, command, '-y', '-c', 'conda-forge',\n '--prefix', prefix] + package_list\n\n print('-'*79, file=self.log)\n print('{text} extra development environment containing:'.format(text=text_messages[0]),\n file=self.log)\n for package in package_list:\n print(' -', package, file=self.log)\n\n self._retry_command(command_list, text_messages[1], prefix, verbose=True)\n\n print('-'*79, file=self.log)\n return prefix"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for updating environment variables. Note that environment variable updates do not support partial update masks unlike other map updates due to comments in (b/78298321). For this reason, we need to retrieve the Environment, apply an update on EnvVariable dictionary, and patch the entire dictionary. The potential race condition here (environment variables being updated between when we retrieve them and when we send patch request)is not a concern since environment variable updates take 5 mins to complete, and environments cannot be updated while already in the updating state. | def _ConstructEnvVariablesPatch(env_ref,
clear_env_variables,
remove_env_variables,
update_env_variables,
release_track=base.ReleaseTrack.GA):
env_obj = environments_api_util.Get(env_ref, release_track=release_track)
initial_env_var_value = env_obj.config.softwareConfig.envVariables
initial_env_var_list = (
initial_env_var_value.additionalProperties
if initial_env_var_value else [])
messages = api_util.GetMessagesModule(release_track=release_track)
env_cls = messages.Environment
env_variables_cls = messages.SoftwareConfig.EnvVariablesValue
entry_cls = env_variables_cls.AdditionalProperty
def _BuildEnv(entries):
software_config = messages.SoftwareConfig(
envVariables=env_variables_cls(additionalProperties=entries))
config = messages.EnvironmentConfig(softwareConfig=software_config)
return env_cls(config=config)
return ('config.software_config.env_variables',
command_util.BuildFullMapUpdate(clear_env_variables,
remove_env_variables,
update_env_variables,
initial_env_var_list, entry_cls,
_BuildEnv)) | [
"def get_updated_env(env, update):\n # If an update env var references itself, merge the original env value\n # into the update value.\n # (This is to somewhat generalize the \"PATH=/foo:$PATH\" special case.)\n for k in update:\n if k not in env or not isinstance(update[k], str) or \\\n not isinstance(env[k], str):\n continue\n update[k] = re.sub(r\"\\${?\" + k + r\"}?(:|\\s|$)\", env[k] + r\"\\1\",\n update[k])\n env_ = updated(env, update)\n # pop those explicitly set to None\n for e in list(env_):\n if env_[e] is None:\n del env_[e]\n return env_",
"def _patch_env(**environs: str):\n # Adapted loosely from https://stackoverflow.com/a/34333710\n # Capture the original environ values\n original_environs = {k: os.environ.get(k) for k in environs}\n\n # Patch the environment\n for k, v in environs.items():\n os.environ[k] = v\n try:\n # Run the context manager\n yield\n finally:\n # Restore the original environ values\n for k, v in original_environs.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v",
"def patch_env(self, **kwargs):\n new_shell = self._shell.clone()\n for key, value in kwargs.items():\n new_shell.setenv(key, value)\n return attr.evolve(self, shell=new_shell)",
"def update_env(*remove, **update):\n orig_env = copy.deepcopy(os.environ)\n try:\n [os.environ.pop(r) for r in remove]\n os.environ.update(update)\n yield\n finally:\n os.environ = copy.deepcopy(orig_env)",
"def apply_environment_substitutions(env):\n vardefs = VariableSubstitution.apply_variable_substitutions_and_merge_repeatedly(env.variables)\n\n deps = apply_substitutions_to_dependencies(env.dependencies, vardefs)\n\n for d in deps:\n assert \"$\" not in d, \"Environment %s produced malformed dependency %s\" % (env.environment_name, d)\n\n if env.matches.Environment:\n return env._withReplacement(\n image=substitute_variables_in_image(env.image, vardefs),\n variables=vardefs,\n dependencies=deps\n )\n else:\n return env._withReplacement(\n setup_script_contents=VariableSubstitution.substitute_variables(env.setup_script_contents, vardefs),\n variables=vardefs,\n dependencies=deps\n )",
"def do_environment_apps_edit(mc, args):\n\n jp_obj = None\n if not args.filename:\n jp_obj = json.load(sys.stdin)\n else:\n with open(args.filename) as fpatch:\n jp_obj = json.load(fpatch)\n\n jpatch = jsonpatch.JsonPatch(jp_obj)\n\n environment_id = args.id\n session_id = args.session_id\n environment = mc.environments.get(environment_id, session_id)\n\n object_model = jpatch.apply(environment.services)\n utils.traverse_and_replace(object_model)\n\n mc.services.put(\n environment_id,\n path='/',\n data=jpatch.apply(environment.services),\n session_id=session_id)",
"def test_runtime_envs_update(self):\n pass",
"def update_compute_environment(self, computeEnvironment: str, state: str = None, computeResources: Dict = None, serviceRole: str = None) -> Dict:\n pass",
"def update_environment(self, environment_update_parameter, project, environment_id):\n route_values = {}\n if project is not None:\n route_values['project'] = self._serialize.url('project', project, 'str')\n if environment_id is not None:\n route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')\n content = self._serialize.body(environment_update_parameter, 'EnvironmentUpdateParameter')\n response = self._send(http_method='PATCH',\n location_id='8572b1fc-2482-47fa-8f74-7e3ed53ee54b',\n version='6.0-preview.1',\n route_values=route_values,\n content=content)\n return self._deserialize('EnvironmentInstance', response)",
"def do_environment_model_edit(mc, args):\n jp_obj = None\n if not args.filename:\n jp_obj = json.load(sys.stdin)\n else:\n with open(args.filename) as fpatch:\n jp_obj = json.load(fpatch)\n\n if not isinstance(jp_obj, list):\n raise exceptions.CommandError('JSON-patch must be a list of changes')\n for change in jp_obj:\n if 'op' not in change or 'path' not in change:\n raise exceptions.CommandError('Every change in JSON-patch must '\n 'contain \"op\" and \"path\" keys')\n op = change['op']\n if op not in ['add', 'replace', 'remove']:\n raise exceptions.CommandError('The value of \"op\" item must be '\n '\"add\", \"replace\" or \"remove\", '\n 'got {0}'.format(op))\n if op != 'remove' and 'value' not in change:\n raise exceptions.CommandError('\"add\" or \"replace\" change in '\n 'JSON-patch must contain \"value\" '\n 'key')\n session_id = args.session_id\n new_model = mc.environments.update_model(args.id, jp_obj, session_id)\n print(utils.json_formatter(new_model))",
"def update_environ(variables):\n for name, value in variables.items():\n compliant_name = name.upper().replace('-', '_')\n # logger.debug(\"set env var {} = {}\".format(compliant_name, value))\n os.environ.setdefault(compliant_name, value)",
"def env_update():\n if request.forms.get('confirmed') == 'no':\n env_name = request.forms.get('update_env')\n env_data_raw = requests.get(url='http://localhost:8080/v1.0/env/%s' % env_name)\n env_data_filtered = env_data_raw.json()[env_name]\n mano_list_raw = requests.get(url='http://localhost:8080/v1.0/mano')\n vim_list_raw = requests.get(url='http://localhost:8080/v1.0/vim')\n em_list_raw = requests.get(url='http://localhost:8080/v1.0/em')\n traffic_list_raw = requests.get(url='http://localhost:8080/v1.0/traffic')\n vnf_list_raw = requests.get(url='http://localhost:8080/v1.0/vnf')\n env_list = {\n 'mano': mano_list_raw.json().keys(),\n 'vim': vim_list_raw.json().keys(),\n 'em': em_list_raw.json().keys(),\n 'traffic': traffic_list_raw.json().keys(),\n 'vnf': vnf_list_raw.json().keys()\n }\n for element in ['mano', 'vim', 'em', 'traffic', 'vnf']:\n env_list[element].insert(0, '')\n if element in env_data_filtered.keys():\n if env_data_filtered[element] in env_list[element]:\n env_list[element].remove(env_data_filtered[element])\n env_list[element].insert(0, env_data_filtered[element])\n else:\n continue\n return template('env_update.html', env_name=env_name, env_list=env_list)\n else:\n env_name = request.forms.get('env_name')\n new_env = {}\n for element in ['mano', 'vim', 'em', 'traffic', 'vnf']:\n if request.forms.get(element) != '':\n new_env[element] = request.forms.get(element)\n requests.put(url='http://localhost:8080/v1.0/env/%s' % env_name, json=new_env)\n return index()",
"def expand_vars(self, in_table=None, old_key=None, update=True):\n if in_table:\n t = in_table\n else:\n t = self.data\n if not update:\n t = deepcopy(t)\n for key, value in t.items():\n # If we get a dict, recurse\n if isinstance(value, dict):\n if old_key:\n new_key = '%s.%s' % (old_key, key)\n else:\n new_key = key\n self.expand_vars(in_table=value, old_key=new_key)\n elif isinstance(value, str):\n # If we get string, first replace environment variables\n value = re.sub('\\$([A-z0-9-_]+)', get_env_variable, value)\n t[key] = value\n return t",
"def do_env_template_update(mc, args):\n try:\n env_template = mc.env_templates.update(args.id, args.name)\n except common_exceptions.HTTPNotFound:\n raise exceptions.CommandError(\"Environment template %s not found\"\n % args.id)\n _print_env_template_list([env_template])",
"def patch_jinja2_env():\n from pyinfra.api import util\n\n util.Environment = Jinja2Environment",
"def update_environment(environment_id, file):\n _confirm_account()\n\n evolv_client = EvolvClient(EVOLV_CONFIG)\n environment = evolv_client.get_environment(environment_id, account_id=EVOLV_ACCOUNT_ID)\n if not environment:\n raise Exception(\"Failed to retrieve the previous environments.\")\n\n response = evolv_client.update_environment(environment_id=environment_id, name=environment['name'],\n content=file.read().decode('utf-8'),\n content_type=APPLICATION_YAML\n if '.yml' in file.name else APPLICATION_JSON,\n account_id=EVOLV_ACCOUNT_ID)\n _print_dict(response)",
"def fix_env_conf(env, root_path=None):\n for name, value in env.items():\n if isinstance(value, dict):\n # if value is dict, think of it as of a (sub)environment\n # within current environment\n # since it can also contain envvars/relative paths,\n # recursively update (sub)environment as well\n env[name] = fix_env_conf(value, root_path=root_path)\n else:\n env[name] = fix_path(value, name, root_path=root_path)\n return env",
"def update(env, zname, **fter):\n a = _amod_settings(env, zname, update_one, fter)\n logger.info('update(): %r', a)",
"def update_environment(self, environment_id, new_name):\n\n return self.murano_client.environments.update(environment_id, new_name)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for web server plugins mode patch. | def _ConstructWebServerPluginsModePatch(
support_web_server_plugins, release_track=base.ReleaseTrack.GA
):
messages = api_util.GetMessagesModule(release_track=release_track)
software_config = messages.SoftwareConfig()
if support_web_server_plugins:
software_config.webServerPluginsMode = (
messages.SoftwareConfig.WebServerPluginsModeValueValuesEnum.PLUGINS_ENABLED
)
else:
software_config.webServerPluginsMode = (
messages.SoftwareConfig.WebServerPluginsModeValueValuesEnum.PLUGINS_DISABLED
)
config = messages.EnvironmentConfig(softwareConfig=software_config)
return 'config.software_config.web_server_plugins_mode', messages.Environment(
config=config) | [
"def patch_jinja2_env():\n from pyinfra.api import util\n\n util.Environment = Jinja2Environment",
"def _ConstructPrivateEnvironmentPatch(\n enable_private_environment,\n release_track=base.ReleaseTrack.GA,\n):\n messages = api_util.GetMessagesModule(release_track=release_track)\n private_environment_config = messages.PrivateEnvironmentConfig()\n config = messages.EnvironmentConfig(\n privateEnvironmentConfig=private_environment_config\n )\n update_mask = 'config.private_environment_config.enable_private_environment'\n private_environment_config.enablePrivateEnvironment = bool(\n enable_private_environment\n )\n\n return (\n update_mask,\n messages.Environment(config=config),\n )",
"def _ConstructEnvVariablesPatch(env_ref,\n clear_env_variables,\n remove_env_variables,\n update_env_variables,\n release_track=base.ReleaseTrack.GA):\n env_obj = environments_api_util.Get(env_ref, release_track=release_track)\n initial_env_var_value = env_obj.config.softwareConfig.envVariables\n initial_env_var_list = (\n initial_env_var_value.additionalProperties\n if initial_env_var_value else [])\n\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n env_variables_cls = messages.SoftwareConfig.EnvVariablesValue\n entry_cls = env_variables_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n envVariables=env_variables_cls(additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return ('config.software_config.env_variables',\n command_util.BuildFullMapUpdate(clear_env_variables,\n remove_env_variables,\n update_env_variables,\n initial_env_var_list, entry_cls,\n _BuildEnv))",
"def extension_environ(env_config_path, monkeypatch):\n monkeypatch.setattr(serverextension, \"ENV_CONFIG_PATH\", [str(env_config_path)])",
"def setup_environment():",
"def _ConstructWebServerMachineTypePatch(web_server_machine_type, release_track):\n messages = api_util.GetMessagesModule(release_track=release_track)\n config = messages.EnvironmentConfig(\n webServerConfig=messages.WebServerConfig(\n machineType=web_server_machine_type))\n return 'config.web_server_config.machine_type', messages.Environment(\n config=config)",
"def populate_jinja_environment(self, env):\n env.filters['registry'] = self.registry\n env.globals['flattened_url'] = self.flattened_url\n env.globals['new_etcd_discovery_token'] = self.new_etcd_discovery_token\n env.globals['load_coreos_ami'] = self.load_coreos_ami_id\n env.globals['dockersystemd'] = self._dockersystemd_template",
"def create():\n return _DynamicEnvironment()",
"def env_creator(config: dict):\n \n from gridworld import MultiAgentEnv\n\n return MultiAgentEnv(**config)",
"def inject_env():\n\n return dict(site.config, current_menu=current_menu)",
"def environment_info(self):\n\n return {\n \"application_environment\": {\n \"framework\": \"pylons\",\n \"env\": dict(os.environ),\n \"language\": \"python\",\n \"language_version\": sys.version.replace('\\n', ''),\n \"application_root_directory\": self.project_root()\n },\n \"client\": {\n \"name\": \"pylons-exceptional\",\n \"version\": __version__,\n \"protocol_version\": EXCEPTIONAL_PROTOCOL_VERSION\n }\n }",
"def production():\n env.root = root = '/opt/www.commcarehq.org_project'\n env.virtualenv_root = _join(root, 'env/cchq_www')\n env.code_root = _join(root, 'src/commcare-hq')\n env.pre_code_root = _join(root, 'src/_commcare-hq')\n env.code_branch = 'master'\n env.sudo_user = 'cchqwww'\n env.hosts = ['10.84.168.241']\n env.environment = 'production'\n env.user = prompt(\"Username: \", default=env.user)\n env.restart_server = True",
"def _monkeypatch_environment(monkeypatch):\n with tempfile.TemporaryDirectory() as d:\n monkeypatch.setitem(os.environ, 'HOME', d)\n yield d",
"def get_env():\n\n obj = getattr(world, 'env', None) or world.webenv_class(world)\n\n if not obj:\n raise Warning(u\"Lettuce-Web environment not initialized\")\n return None\n\n world.env = obj\n return obj",
"def make_env(args):\r\n scenario = scenarios.load(args.env_name + \".py\").Scenario()\r\n world = scenario.make_world()\r\n done_callback = None\r\n\r\n env = MultiAgentEnv(\r\n world,\r\n reset_callback=scenario.reset_world,\r\n reward_callback=scenario.reward,\r\n observation_callback=scenario.observation,\r\n done_callback=done_callback)\r\n\r\n assert env.discrete_action_space is False, \"For cont. action, this flag must be False\"\r\n\r\n return env",
"def patch_env(self, **kwargs):\n new_shell = self._shell.clone()\n for key, value in kwargs.items():\n new_shell.setenv(key, value)\n return attr.evolve(self, shell=new_shell)",
"def apply_patches():\n with open(os.path.join(os.getcwd(), 'utils', 'sdk.patch'), 'r') as fin:\n subprocess.call(['patch', '-p2'], stdin=fin, cwd=DESTDIR)\n with open(os.path.join(SRCDIR, 's-video_sgx.patch'), 'r') as fin:\n subprocess.call(['patch', '-p2'], stdin=fin, cwd=DESTDIR)",
"def setup_environment():\n global GIVEN_ENV\n GIVEN_ENV['env'] = env.copy()",
"def fixture_fake_env(monkeypatch):\n environ = {}\n monkeypatch.setattr(os, \"environ\", environ)\n return environ"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for environment image version. | def _ConstructImageVersionPatch(update_image_version,
release_track=base.ReleaseTrack.GA):
messages = api_util.GetMessagesModule(release_track=release_track)
software_config = messages.SoftwareConfig(imageVersion=update_image_version)
config = messages.EnvironmentConfig(softwareConfig=software_config)
return 'config.software_config.image_version', messages.Environment(
config=config) | [
"def _ConstructEnvVariablesPatch(env_ref,\n clear_env_variables,\n remove_env_variables,\n update_env_variables,\n release_track=base.ReleaseTrack.GA):\n env_obj = environments_api_util.Get(env_ref, release_track=release_track)\n initial_env_var_value = env_obj.config.softwareConfig.envVariables\n initial_env_var_list = (\n initial_env_var_value.additionalProperties\n if initial_env_var_value else [])\n\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n env_variables_cls = messages.SoftwareConfig.EnvVariablesValue\n entry_cls = env_variables_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n envVariables=env_variables_cls(additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return ('config.software_config.env_variables',\n command_util.BuildFullMapUpdate(clear_env_variables,\n remove_env_variables,\n update_env_variables,\n initial_env_var_list, entry_cls,\n _BuildEnv))",
"def _ConstructEnvironmentSizePatch(environment_size,\n release_track=base.ReleaseTrack.GA):\n messages = api_util.GetMessagesModule(release_track=release_track)\n config = messages.EnvironmentConfig(environmentSize=environment_size)\n return 'config.environment_size', messages.Environment(config=config)",
"def _get_version_from_container_config_env(self, attrs: dict) -> str:\n environment = attrs.get(\"Config\").get(\"Env\")\n for var in environment:\n if \"\".join(var).split(\"=\")[0] == self.version_var:\n version = \"\".join(var).split(\"=\")[1]\n return version\n return \"\"",
"def set_image_tagged_version(image, version='latest'):\n return DOCKER_IMAGE_VERSION_DELIM.join([image, version])",
"def new_env_name(self):\n pkg_name_version = self.package + '-' + self.package_version\n if self.environments:\n if self.package not in self.environments.values():\n env_name = self.package\n elif pkg_name_version not in self.environments.values():\n env_name = pkg_name_version\n else:\n for i in range(1, 1000):\n new_pkg_name = pkg_name_version + '_' + str(i)\n if new_pkg_name not in self.environments.values():\n env_name = new_pkg_name\n break\n else:\n env_name = self.package\n return env_name",
"def create_dev_environment(self, svn=False, git=True):\n\n package_list = []\n if svn:\n package_list.append('svn')\n if git:\n package_list.append('git')\n package_list.append('git-lfs')\n\n prefix = os.path.join(self.root_dir, 'dev_env')\n command = 'create'\n text_messages = ['Installing', 'installation into']\n if prefix in self.environments:\n command = 'update'\n text_messages = ['Updating', 'update of']\n\n command_list = [self.conda_exe, command, '-y', '-c', 'conda-forge',\n '--prefix', prefix] + package_list\n\n print('-'*79, file=self.log)\n print('{text} extra development environment containing:'.format(text=text_messages[0]),\n file=self.log)\n for package in package_list:\n print(' -', package, file=self.log)\n\n self._retry_command(command_list, text_messages[1], prefix, verbose=True)\n\n print('-'*79, file=self.log)\n return prefix",
"def _ConstructPrivateEnvironmentPatch(\n enable_private_environment,\n release_track=base.ReleaseTrack.GA,\n):\n messages = api_util.GetMessagesModule(release_track=release_track)\n private_environment_config = messages.PrivateEnvironmentConfig()\n config = messages.EnvironmentConfig(\n privateEnvironmentConfig=private_environment_config\n )\n update_mask = 'config.private_environment_config.enable_private_environment'\n private_environment_config.enablePrivateEnvironment = bool(\n enable_private_environment\n )\n\n return (\n update_mask,\n messages.Environment(config=config),\n )",
"def patch_jinja2_env():\n from pyinfra.api import util\n\n util.Environment = Jinja2Environment",
"def environment_artifact(self, env_name):\n\n raise NotImplementedError",
"def python_version(self):\n if self.environment_yaml:\n return super(PythonBuildPack, self).python_version\n else:\n return super(RBuildPack, self).python_version",
"def create_new_patch_release():\n try:\n last_version_number = get_last_version()\n except subprocess.CalledProcessError as err:\n if err.stderr.decode(\"utf8\").startswith(\"HTTP 404:\"):\n # The project doesn't have any releases yet.\n new_version_number = \"0.0.1\"\n else:\n raise\n else:\n new_version_number = bump_patch_number(last_version_number)\n\n subprocess.run(\n [\"gh\", \"release\", \"create\", \"--generate-notes\", new_version_number],\n check=True,\n )",
"def create_docker_image_language_version(version):\n if version == PythonPackageConfigSection.UNIVERSAL_LANGUAGE_VERSION:\n image_version = \"3\"\n elif version in (\"2\", \"3\"):\n image_version = version\n else:\n raise Exception(\"Unexpected version passed into \" +\n \"create_docker_image_language_version function\")\n return image_version",
"def get_env_over(env):\n fname = env.config['tripleo']['overcloud_env']\n return util.parse_env_file(fname, '^OS_|_VERSION=')",
"def _get_version_from_image_config(self, conf: ImageConfig) -> str:\r\n env: List[str] = conf.config.get(\"Env\")\r\n for var in env:\r\n if \"\".join(var).split(\"=\")[0] == self.version_var:\r\n version = \"\".join(var).split(\"=\")[1]\r\n return version\r\n return \"\"",
"def create_environment(self, builder='cctbx', filename=None, python=None,\n copy=False, offline=False):\n\n # handles check for choices in case parser is not available\n if builder not in self.env_locations:\n raise RuntimeError(\"\"\"\nThe builder, {builder}, is not recognized. The available builders are,\n{builders}\n\"\"\".\\\nformat(builder=builder, builders=', '.join(sorted(self.env_locations.keys()))))\n\n if self.conda_base is None:\n raise RuntimeError(\"\"\"A conda installation is not available.\"\"\")\n\n if builder == \"dials\" and python in (\"27\", \"36\"):\n builder = \"dials-old\"\n\n if filename is None:\n filename = os.path.join(\n self.root_dir, 'modules', self.env_locations[builder])\n if python is not None:\n if python not in ['27', '37', '38', '39', '310']:\n raise RuntimeError(\n \"\"\"Only Python 2.7, 3.7, 3.8, 3.9, and 3.10 are currently supported.\"\"\")\n filename = filename.replace('PYTHON_VERSION', python)\n else:\n filename = os.path.abspath(filename)\n\n if not os.path.isfile(filename):\n raise RuntimeError(\"\"\"\\\nThe file, {filename}, is not available. Please contact the developers to make \\\nsure that the requested version of Python is supported for the {builder} \\\nbuilder.\"\"\".format(filename=filename, builder=builder))\n\n yaml_format = False\n if filename.endswith('yml') or filename.endswith('yaml'):\n yaml_format = True\n\n # make a new environment directory\n if self.conda_env is None:\n name = 'conda_base'\n prefix = os.path.join(self.root_dir, name)\n # or use the existing one\n else:\n prefix = os.path.abspath(self.conda_env)\n\n # compare time stamps of the filename and environment directory\n # only install/update if the time stamp of the filename is more recent\n file_stats = None\n env_stats = None\n if os.path.exists(filename):\n file_stats = os.stat(filename)\n if os.path.exists(prefix):\n env_stats = os.stat(prefix)\n\n if env_stats is not None and file_stats is not None:\n if env_stats.st_mtime > file_stats.st_mtime:\n print('The environment is newer than the environment file. Skipping update.',\n file=self.log)\n return\n\n # install a new environment or update and existing one\n if prefix in self.environments:\n command = 'install'\n if yaml_format:\n command = 'update'\n text_messages = ['Updating', 'update of']\n else:\n command = 'create'\n text_messages = ['Installing', 'installation into']\n command_list = [self.conda_exe, command, '--prefix', prefix,\n '--file', filename]\n if yaml_format:\n command_list.insert(1, 'env')\n if self.system == 'Windows':\n command_list = [os.path.join(self.conda_base, 'Scripts', 'activate'),\n 'base', '&&'] + command_list\n if copy and not yaml_format:\n command_list.append('--copy')\n if offline and not yaml_format:\n command_list.append('--offline')\n if builder in (\"dials\", \"dials-old\", \"xfel\", \"labelit\") and not yaml_format:\n command_list.append(\"-y\")\n if builder in self.env_without_python:\n python_version = tuple(int(i) for i in (python or \"36\"))\n python_requirement = '\"conda-forge::python>=%s.%s,<%s.%s\"' % (\n python_version[0],\n python_version[1],\n python_version[0],\n python_version[1] + 1,\n )\n command_list.append(python_requirement)\n # RuntimeError is raised on failure\n print('{text} {builder} environment with:\\n {filename}'.format(\n text=text_messages[0], builder=builder, filename=filename),\n file=self.log)\n\n self._retry_command(command_list, text_messages[1], prefix, verbose=True)\n\n # on Windows, also download the Visual C++ 2008 Redistributable\n # use the same version as conda-forge\n # https://github.com/conda-forge/vs2008_runtime-feedstock\n if self.system == 'Windows' and prefix.endswith('conda_base'):\n download_file(\n url='https://download.microsoft.com/download/5/D/8/5D8C65CB-C849-4025-8E95-C3966CAFD8AE/vcredist_x64.exe',\n filename=os.path.join(prefix, 'vcredist_x64.exe'))",
"def versioned_mender_image(\n request, prepared_test_build, latest_mender_image, bitbake_variables, bitbake_image\n):\n\n global LAST_BUILD_VERSION\n\n version = request.param\n\n if version == 1:\n pytest.fail()\n\n if (\n version >= 2\n and not version_is_minimum(bitbake_variables, \"mender-artifact\", \"2.0.0\")\n ) or (\n version >= 3\n and not version_is_minimum(bitbake_variables, \"mender-artifact\", \"3.0.0\")\n ):\n pytest.skip(\"Requires version %d of mender-artifact format.\" % version)\n\n if version_is_minimum(bitbake_variables, \"mender-artifact\", \"3.0.0\"):\n default_version = 3\n elif version_is_minimum(bitbake_variables, \"mender-artifact\", \"2.0.0\"):\n default_version = 2\n else:\n default_version = 2\n\n if LAST_BUILD_VERSION != version:\n # Run a separate build for this artifact. This doesn't conflict with the\n # above version because the non-default version ends up in a different\n # directory.\n if version != default_version:\n build_image(\n prepared_test_build[\"build_dir\"],\n prepared_test_build[\"bitbake_corebase\"],\n bitbake_image,\n ['MENDER_ARTIFACT_EXTRA_ARGS = \"-v %d\"' % version],\n )\n else:\n build_image(\n prepared_test_build[\"build_dir\"],\n prepared_test_build[\"bitbake_corebase\"],\n bitbake_image,\n )\n\n LAST_BUILD_VERSION = version\n return (\n version,\n latest_build_artifact(\n request, prepared_test_build[\"build_dir\"], \"core-image*.mender\"\n ),\n )",
"def environment_info(self):\n\n return {\n \"application_environment\": {\n \"framework\": \"pylons\",\n \"env\": dict(os.environ),\n \"language\": \"python\",\n \"language_version\": sys.version.replace('\\n', ''),\n \"application_root_directory\": self.project_root()\n },\n \"client\": {\n \"name\": \"pylons-exceptional\",\n \"version\": __version__,\n \"protocol_version\": EXCEPTIONAL_PROTOCOL_VERSION\n }\n }",
"def create():\n return _DynamicEnvironment()",
"def build_env_name(task, shared_memory, use_image_obs):\n del task\n env_name = \"BlockPushDiscontinuous\"\n\n if use_image_obs:\n env_name = env_name + \"Rgb\"\n\n if shared_memory:\n env_name = \"Shared\" + env_name\n\n env_name = env_name + \"-v0\"\n\n return env_name",
"def test_runtime_envs_update(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for web server network access control. | def _ConstructWebServerAccessControlPatch(web_server_access_control,
release_track):
messages = api_util.GetMessagesModule(release_track=release_track)
config = messages.EnvironmentConfig(
webServerNetworkAccessControl=environments_api_util
.BuildWebServerNetworkAccessControl(web_server_access_control,
release_track))
return 'config.web_server_network_access_control', messages.Environment(
config=config) | [
"def _ConstructPrivateEnvironmentPatch(\n enable_private_environment,\n release_track=base.ReleaseTrack.GA,\n):\n messages = api_util.GetMessagesModule(release_track=release_track)\n private_environment_config = messages.PrivateEnvironmentConfig()\n config = messages.EnvironmentConfig(\n privateEnvironmentConfig=private_environment_config\n )\n update_mask = 'config.private_environment_config.enable_private_environment'\n private_environment_config.enablePrivateEnvironment = bool(\n enable_private_environment\n )\n\n return (\n update_mask,\n messages.Environment(config=config),\n )",
"def setup_permission(self) :\n cmd = IPNETNSEXEC % ( self.hostname, PermCMD % (ConfigDIR, ConfigDIR))\n runOS(cmd)",
"def _base_environ(self, **request):\n environ = {\n 'HTTP_COOKIE': self.cookies.output(header='', sep='; '),\n 'PATH_INFO': '/',\n 'QUERY_STRING': '',\n 'REMOTE_ADDR': '127.0.0.1',\n 'REQUEST_METHOD': 'GET',\n 'SCRIPT_NAME': '',\n 'SERVER_NAME': 'testserver',\n 'SERVER_PORT': '80',\n 'SERVER_PROTOCOL': 'HTTP/1.1',\n 'wsgi.version': (1, 0),\n 'wsgi.url_scheme': 'http',\n 'wsgi.errors': self.errors,\n 'wsgi.multiprocess': True,\n 'wsgi.multithread': False,\n 'wsgi.run_once': False,\n }\n environ.update(self.defaults)\n environ.update(request)\n return environ",
"def setup_environment():",
"def create():\n return _DynamicEnvironment()",
"def test_patch_compute_server_setting(self):\n pass",
"def getNetServerConfig(self):\n fqdn, shortName = self.fqdn, self.shortName\n\n if not self.hasDatabase:\n raise RuntimeError(\"Cannot create a netserver for \"\n \"external project %r\" % (shortName,))\n\n cfg = netserver.ServerConfig()\n cfg.authCacheTimeout = self._cfg.authCacheTimeout\n cfg.changesetCacheDir = os.path.join(self._cfg.dataPath, 'cscache')\n cfg.externalPasswordURL = self._cfg.externalPasswordURL\n cfg.logFile = (self._cfg.reposLog\n and os.path.join(self._cfg.logPath, 'repository.log') or None)\n cfg.repositoryDB = None # We open databases ourselves\n cfg.readOnlyRepository = self._cfg.readOnlyRepositories\n # FIXME: Until there is a per-project signature requirement flag, this\n # will have to do.\n if self.isLocalMirror or shortName == 'rmake-repository':\n cfg.requireSigs = False\n else:\n # FIXME: Shim clients will eventually need to sign packages, and\n # all repository traffic will eventually go through this interface\n # as well. But for now we don't and won't sign shim commits, and\n # the primary consumer of this interface is shim clients, so\n # disable signature requirements.\n cfg.requireSigs = False\n #cfg.requireSigs = self._cfg.requireSigs\n cfg.serializeCommits = True\n cfg.memCache = self._cfg.memCache\n cfg.memCacheTimeout = self._cfg.memCacheTimeout\n # Make sure cached info is partitioned by role.\n cfg.memCacheUserAuth = True\n\n cfg.serverName = [fqdn]\n cfg.configLine('contentsDir ' + ' '.join(self.contentsDirs))\n cfg.tmpDir = os.path.join(self._cfg.dataPath, 'tmp')\n\n action = (\"%(executable)s -mconary.server.commitaction\"\n \" --repmap='%(repMap)s'\"\n \" --build-label=%(buildLabel)s \"\n \" --username=%(authUser)s \"\n \" --password=%(authPass)s \"\n \" --module='mint.rbuilderaction --config=%(config)s\"\n \" --user=%%(user)s --hostname=%(fqdn)s'\")\n if self.commitEmail:\n action += (\" --module='conary.changemail --user=%%(user)s\"\n \" --from=%(commitFromEmail)s --email=%(commitEmail)s'\")\n for module in self._cfg.commitActionModule:\n action += \" --module='%s'\" % module\n actionDict = {\n 'executable': sys.executable,\n 'config': config.RBUILDER_CONFIG,\n 'repMap': '%s %s' % (fqdn, self.getURL()),\n 'buildLabel': '%s@rpl:1' % (fqdn,),\n 'projectName': shortName,\n 'fqdn': fqdn,\n 'commitFromEmail': self._cfg.commitEmail,\n 'commitEmail': self.commitEmail,\n 'authUser': self._cfg.authUser,\n 'authPass': self._cfg.authPass,\n }\n cfg.commitAction = action % actionDict\n\n return cfg",
"def init_environ():\n\n ENV_VAR_HELP = \"\"\"\n Environment variables explanation:\n - DOCUMENT_ROOT: full filesystem path to html files\n ex: /srv/http/imageboard.example.com/\n - SCRIPT_NAME: url to wakarimasen.py without host part\n ex: /wakarimasen.py\n - SERVER_NAME: hostname of the webserver\n ex: imageboard.example.com\n - SERVER_PORT: port of the webserver (optional)\n ex: 80\n \"\"\"\n\n local.environ.update(os.environ)\n werkzeug.BaseRequest(local.environ)\n\n local.environ.setdefault('waka.rootpath',\n os.path.join('/', config.BOARD_DIR, ''))\n local.environ.setdefault('wsgi.url_scheme', 'http')\n local.environ.setdefault('SERVER_PORT', '80')\n\n required_vars = ['DOCUMENT_ROOT', 'SCRIPT_NAME', 'SERVER_NAME']\n\n for var in required_vars:\n if var not in local.environ:\n print \"Error: %s not in environment\" % (var,)\n print ENV_VAR_HELP\n sys.exit(1)",
"def _create_environ(self, url, method, data, refer, content_type=None):\n environ_args = dict(self._wsgi_server, method=method)\n base_url = self._referrer if refer else self._base_url\n environ_args.update(self._canonicalize_url(url, base_url))\n environ_args.update(self._prep_input(method, data, content_type))\n environ = create_environ(**environ_args)\n if refer and self._referrer:\n environ['HTTP_REFERER'] = self._referrer\n environ.setdefault('REMOTE_ADDR', '127.0.0.1')\n self._cookie_jar.export_to_environ(environ)\n return environ",
"def extension_environ(env_config_path, monkeypatch):\n monkeypatch.setattr(serverextension, \"ENV_CONFIG_PATH\", [str(env_config_path)])",
"def _ConstructVpcConnectivityPatch(\n disable_vpc_connectivity,\n network,\n subnetwork,\n network_attachment,\n release_track=base.ReleaseTrack.GA,\n):\n messages = api_util.GetMessagesModule(release_track=release_track)\n node_config = messages.NodeConfig()\n config = messages.EnvironmentConfig(nodeConfig=node_config)\n update_mask = None\n if disable_vpc_connectivity:\n update_mask = 'config.node_config.network,config.node_config.subnetwork'\n elif network_attachment:\n update_mask = 'config.node_config.network_attachment'\n node_config.composerNetworkAttachment = network_attachment\n elif network and subnetwork:\n update_mask = 'config.node_config.network,config.node_config.subnetwork'\n node_config.network = network\n node_config.subnetwork = subnetwork\n return (\n update_mask,\n messages.Environment(config=config),\n )",
"def patch_env(self, **kwargs):\n new_shell = self._shell.clone()\n for key, value in kwargs.items():\n new_shell.setenv(key, value)\n return attr.evolve(self, shell=new_shell)",
"def _ConstructWebServerMachineTypePatch(web_server_machine_type, release_track):\n messages = api_util.GetMessagesModule(release_track=release_track)\n config = messages.EnvironmentConfig(\n webServerConfig=messages.WebServerConfig(\n machineType=web_server_machine_type))\n return 'config.web_server_config.machine_type', messages.Environment(\n config=config)",
"def make_environ(extra=None, **kwds):\n environ = {}\n if extra is not None:\n environ.update(extra)\n environ[\"wsgi.version\"] = (1, 0)\n environ[\"wsgi.url_scheme\"] = \"http\"\n environ[\"SERVER_NAME\"] = \"localhost\"\n environ[\"SERVER_PORT\"] = \"80\"\n environ[\"REQUEST_METHOD\"] = \"GET\"\n environ[\"SCRIPT_NAME\"] = \"\"\n environ[\"PATH_INFO\"] = \"/\"\n environ.update(kwds)\n return environ",
"def test_environ(self):\n return create_environ('/test', None)",
"def _env():\n home = _os.environ['HOME']\n root_dir = _os.path.realpath(\n _os.path.join(_os.environ['CLOUDSDK_CONFIG'], '../..'))\n inet_family = 'IPV4_ONLY'\n dev = '/dev/fuse'\n path = '/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin:.'\n if len(root_dir) > 1 and not root_dir.startswith('/usr/local/google/'):\n home = _os.path.join(root_dir, home)\n inet_family = 'IPV6_ONLY'\n fum = _os.environ['HOME'].split('mount')[0] + '/mount/alloc/fusermount'\n dev = fum + '/dev/fuse'\n path = path + ':' + fum + '/bin'\n config_dir = _os.path.join(home, '.config', 'Google')\n return _Environment(\n home=home,\n root_dir=root_dir,\n inet_family=inet_family,\n dev=dev,\n path=path,\n config_dir=config_dir)",
"def mock_webserver_service_environment(\n monkeypatch: MonkeyPatch,\n mock_env_makefile: EnvVarsDict,\n mock_env_devel_environment: EnvVarsDict,\n mock_env_Dockerfile_build: EnvVarsDict,\n mock_env_auto_deployer_agent: EnvVarsDict,\n) -> EnvVarsDict:\n # @docker compose config (overrides)\n # TODO: get from docker compose config\n # r'- ([A-Z2_]+)=\\$\\{\\1:-([\\w-]+)\\}'\n\n # - .env-devel + docker-compose service environs\n # hostname: \"{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}\"\n\n # environment:\n # - CATALOG_HOST=${CATALOG_HOST:-catalog}\n # - CATALOG_PORT=${CATALOG_PORT:-8000}\n # - DIAGNOSTICS_MAX_AVG_LATENCY=10\n # - DIAGNOSTICS_MAX_TASK_DELAY=30\n # - DIRECTOR_HOST=${DIRECTOR_HOST:-director}\n # - DIRECTOR_PORT=${DIRECTOR_PORT:-8080}\n # - DIRECTOR_V2_HOST=${DIRECTOR_V2_HOST:-director-v2}\n # - DIRECTOR_V2_PORT=${DIRECTOR_V2_PORT:-8000}\n # - STORAGE_HOST=${STORAGE_HOST:-storage}\n # - STORAGE_PORT=${STORAGE_PORT:-8080}\n # - SWARM_STACK_NAME=${SWARM_STACK_NAME:-simcore}\n # - WEBSERVER_LOGLEVEL=${LOG_LEVEL:-WARNING}\n # env_file:\n # - ../.env\n mock_envs_docker_compose_environment = setenvs_from_dict(\n monkeypatch,\n {\n # Emulates MYVAR=${MYVAR:-default}\n \"CATALOG_HOST\": os.environ.get(\"CATALOG_HOST\", \"catalog\"),\n \"CATALOG_PORT\": os.environ.get(\"CATALOG_PORT\", \"8000\"),\n \"DIAGNOSTICS_MAX_AVG_LATENCY\": \"30\",\n \"DIRECTOR_HOST\": os.environ.get(\"DIRECTOR_HOST\", \"director\"),\n \"DIRECTOR_PORT\": os.environ.get(\"DIRECTOR_PORT\", \"8080\"),\n \"DIRECTOR_V2_HOST\": os.environ.get(\"DIRECTOR_V2_HOST\", \"director-v2\"),\n \"DIRECTOR_V2_PORT\": os.environ.get(\"DIRECTOR_V2_PORT\", \"8000\"),\n \"STORAGE_HOST\": os.environ.get(\"STORAGE_HOST\", \"storage\"),\n \"STORAGE_PORT\": os.environ.get(\"STORAGE_PORT\", \"8080\"),\n \"SWARM_STACK_NAME\": os.environ.get(\"SWARM_STACK_NAME\", \"simcore\"),\n \"WEBSERVER_LOGLEVEL\": os.environ.get(\"LOG_LEVEL\", \"WARNING\"),\n },\n )\n\n return (\n mock_env_makefile\n | mock_env_devel_environment\n | mock_env_Dockerfile_build\n | mock_env_auto_deployer_agent\n | mock_envs_docker_compose_environment\n )",
"def _ConstructEnvVariablesPatch(env_ref,\n clear_env_variables,\n remove_env_variables,\n update_env_variables,\n release_track=base.ReleaseTrack.GA):\n env_obj = environments_api_util.Get(env_ref, release_track=release_track)\n initial_env_var_value = env_obj.config.softwareConfig.envVariables\n initial_env_var_list = (\n initial_env_var_value.additionalProperties\n if initial_env_var_value else [])\n\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n env_variables_cls = messages.SoftwareConfig.EnvVariablesValue\n entry_cls = env_variables_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n envVariables=env_variables_cls(additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return ('config.software_config.env_variables',\n command_util.BuildFullMapUpdate(clear_env_variables,\n remove_env_variables,\n update_env_variables,\n initial_env_var_list, entry_cls,\n _BuildEnv))",
"def secure_website():\r\n if env.webserver == 'apache':\r\n # Symlink the .htaccess file to the webserver root\r\n run ('if [ ! -e %(etc_path)s/htpasswd ];then touch %(etc_path)s/htpasswd; fi && htpasswd -b %(etc_path)s/htpasswd %(webserver_auth_user)s %(webserver_auth_password)s ' % env)\r\n for path in env.get('protected_folders'):\r\n env.temp = path\r\n run('ln -snf %(etc_path)s/%(webserver)s/htaccess %(webserver_docroot)s/%(temp)s/.htaccess' % env)\r\n else:\r\n raise \"Cannot set security parameters for webserver %(webserver)s yet. Please contact the developer.\" % env"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for Cloud SQL machine type. | def _ConstructCloudSqlMachineTypePatch(cloud_sql_machine_type, release_track):
messages = api_util.GetMessagesModule(release_track=release_track)
config = messages.EnvironmentConfig(
databaseConfig=messages.DatabaseConfig(
machineType=cloud_sql_machine_type))
return 'config.database_config.machine_type', messages.Environment(
config=config) | [
"def _ConstructWebServerMachineTypePatch(web_server_machine_type, release_track):\n messages = api_util.GetMessagesModule(release_track=release_track)\n config = messages.EnvironmentConfig(\n webServerConfig=messages.WebServerConfig(\n machineType=web_server_machine_type))\n return 'config.web_server_config.machine_type', messages.Environment(\n config=config)",
"def create():\n return _DynamicEnvironment()",
"def setup_environment(config: Dict[str, Any], environment_type: Environment) -> Environment:\n # interpret the provided string argument\n if environment_type == Environment.PRODUCTION:\n # Safe configuration: restrictions for mainnet apply and matrix rooms have to be private\n config['environment_type'] = Environment.PRODUCTION\n config['transport']['matrix']['private_rooms'] = True\n else:\n config['environment_type'] = Environment.DEVELOPMENT\n\n print(f'Raiden is running in {environment_type.value.lower()} mode')\n return environment_type",
"def __init__(self, name=None, zone_name=None):\n\n super(MachineType, self).__init__('machineType', 'zonal')\n self.name = name\n self.zone = Zone(zone_name)",
"def setup_environment():",
"def _ConstructEnvironmentSizePatch(environment_size,\n release_track=base.ReleaseTrack.GA):\n messages = api_util.GetMessagesModule(release_track=release_track)\n config = messages.EnvironmentConfig(environmentSize=environment_size)\n return 'config.environment_size', messages.Environment(config=config)",
"def generate_cloudsql_instance(self):\n self.cloudsql_instance = '{}-{}-db-{}'.format('forseti',\n self.installation_type,\n self.identifier)",
"def __init__(self, name, local_api, description=\"\",\n default_attributes=None, override_attributes=None,\n cookbook_versions=None):\n super(Environment, self).__init__(name=name, description=description)\n self.local_api_dict = {\"url\": local_api.url,\n \"key\": local_api.key.raw,\n \"client\": local_api.client}\n\n self.default_attributes = default_attributes or {}\n self.override_attributes = override_attributes or {}\n self.cookbook_versions = cookbook_versions or {}\n self.json_class = \"Chef::Environment\"\n self.chef_type = \"environment\"",
"def create_eb_environment():\n creation_response = client.create_environment(\n ApplicationName=app_name,\n EnvironmentName=environment_name,\n Description=\"Manheim test deployment\",\n CNAMEPrefix=environment_name,\n Tier={\n 'Name': 'WebServer',\n 'Type': 'Standard'\n },\n SolutionStackName=solution_stack,\n OptionSettings=[\n {\n 'Namespace': 'aws:autoscaling:asg',\n 'OptionName': 'Custom Availability Zones',\n 'ResourceName': 'AWSEBAutoScalingGroup',\n 'Value': 'us-east-1a'\n },\n {\n 'Namespace': 'aws:autoscaling:asg',\n 'OptionName': 'MaxSize',\n 'ResourceName': 'AWSEBAutoScalingGroup',\n 'Value': '3'\n },\n {\n 'Namespace': 'aws:autoscaling:asg',\n 'OptionName': 'MinSize',\n 'ResourceName': 'AWSEBAutoScalingGroup',\n 'Value': '1'\n },\n {\n 'Namespace': 'aws:autoscaling:launchconfiguration',\n 'OptionName': 'InstanceType',\n 'Value': 't2.micro'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'BreachDuration',\n 'ResourceName': 'AWSEBCloudwatchAlarmLow',\n 'Value': '1'\n },\n {\n u'Namespace': 'aws:autoscaling:trigger',\n u'OptionName': 'EvaluationPeriods',\n u'ResourceName': 'AWSEBCloudwatchAlarmLow',\n u'Value': '1'\n },\n {\n u'Namespace': 'aws:autoscaling:trigger',\n u'OptionName': 'LowerBreachScaleIncrement',\n u'ResourceName': 'AWSEBAutoScalingScaleDownPolicy',\n u'Value': '-1'\n },\n {\n u'Namespace': 'aws:autoscaling:trigger',\n u'OptionName': 'LowerThreshold',\n u'ResourceName': 'AWSEBCloudwatchAlarmLow',\n u'Value': '25'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'MeasureName',\n 'ResourceName': 'AWSEBCloudwatchAlarmLow',\n 'Value': 'CPUUtilization'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'Period',\n 'ResourceName': 'AWSEBCloudwatchAlarmLow',\n 'Value': '1'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'Statistic',\n 'ResourceName': 'AWSEBCloudwatchAlarmLow',\n 'Value': 'Average'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'Unit',\n 'ResourceName': 'AWSEBCloudwatchAlarmLow',\n 'Value': 'Percent'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'UpperBreachScaleIncrement',\n 'ResourceName': 'AWSEBAutoScalingScaleUpPolicy',\n 'Value': '1'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'UpperThreshold',\n 'ResourceName': 'AWSEBCloudwatchAlarmHigh',\n 'Value': '85'\n },\n {\n 'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',\n 'OptionName': 'RollingUpdateEnabled',\n 'ResourceName': 'AWSEBAutoScalingGroup',\n 'Value': 'false'\n },\n {\n 'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',\n 'OptionName': 'RollingUpdateType',\n 'ResourceName': 'AWSEBAutoScalingGroup',\n 'Value': 'Time'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:command',\n 'OptionName': 'BatchSize',\n 'Value': '50'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:command',\n 'OptionName': 'BatchSizeType',\n 'Value': 'Percentage'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:command',\n 'OptionName': 'DeploymentPolicy',\n 'Value': 'Rolling'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:command',\n 'OptionName': 'IgnoreHealthCheck',\n 'Value': 'false'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:command',\n 'OptionName': 'Timeout',\n 'Value': '600'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:container:python',\n 'OptionName': 'WSGIPath',\n 'Value': application_path\n }\n ]\n )\n return creation_response",
"def __init__(self, ext=False, pret=False, verb=False, dbg=False, ip='0.0.0.0', timeout=60, stampfile='mjf.stamp', force=False):\n self.varnames = ['MACHINEFEATURES', 'JOBFEATURES'] # names of machine features environment variables\n self.varnameslower = map(lambda x: x.lower(), self.varnames) # lower case versions of the env variable name\n self.httpip = ip # ip address for metat data in case of IaaS (openstack)\n if self.httpip == '0.0.0.0' : self.httpip = '169.254.169.254'\n self.httpport = 80\n self.data = {} # the machine / job features data structure\n self.ext = ext # is the module called from the command line (True) or imported (False)\n self.verb = verb\n self.pret = pret\n self.dbg = dbg\n self.stampfile = stampfile\n self.lastcollect = datetime(1970,1,1,0,0,0,1)\n self.timeout = int(timeout) # if information retrieved from http, return cache info if called within timeout minutes\n self.indent = None\n if self.pret : self.indent = 2\n self.force = force",
"def _create_env(self, gymenv: Union[str, Env], random_seed: Optional[int]):\n if isinstance(gymenv, Env):\n self.env = gymenv\n self.env_name = gymenv.unwrapped.spec.id\n else:\n if gymenv not in [e.id for e in gym.envs.registry.all()]:\n raise Exception(\"Env {} not found in OpenAI Gym.\".format(gymenv))\n self.env = gym.make(gymenv)\n self.env_name = gymenv\n if random_seed is not None:\n self.env.seed(random_seed)\n\n supports_state = isinstance(self.env.observation_space, gym.spaces.Box) and len(\n self.env.observation_space.shape\n ) in [1, 3]\n supports_action = type(self.env.action_space) in (\n gym.spaces.Discrete,\n gym.spaces.Box,\n )\n\n if not supports_state and supports_action:\n raise Exception(\n \"Unsupported environment state or action type: {}, {}\".format(\n self.env.observation_space, self.env.action_space\n )\n )\n\n self.action_space = self.env.action_space\n if isinstance(self.env.action_space, gym.spaces.Discrete):\n self.action_type = EnvType.DISCRETE_ACTION\n self.action_dim = self.env.action_space.n\n elif isinstance(self.env.action_space, gym.spaces.Box):\n self.action_type = EnvType.CONTINUOUS_ACTION\n self.action_dim = self.env.action_space.shape[0] # type: ignore\n\n if len(self.env.observation_space.shape) == 1: # type: ignore\n self.state_dim = self.env.observation_space.shape[0] # type: ignore\n self.img = False\n elif len(self.env.observation_space.shape) == 3: # type: ignore\n self.height, self.width, self.num_input_channels = (\n self.env.observation_space.shape # type: ignore\n )\n self.img = True",
"def __allocate_environment__(cls, options, test_driver):",
"def prep_env(env, data_type=np.float32, if_print=True): # preprocess environment\n if not all([hasattr(env, attr) for attr in (\n 'env_name', 'state_dim', 'action_dim', 'target_reward', 'if_discrete')]):\n (env_name, state_dim, action_dim, action_max, if_discrete, target_reward) = get_gym_env_info(env, if_print)\n setattr(env, 'env_name', env_name)\n setattr(env, 'state_dim', state_dim)\n setattr(env, 'action_dim', action_dim)\n setattr(env, 'if_discrete', if_discrete)\n setattr(env, 'target_reward', target_reward)\n else:\n action_max = 1\n\n if action_max != 1:\n def decorator_step(env_step):\n def new_env_step(action):\n state, reward, done, info = env_step(action * action_max)\n return state.astype(data_type), reward, done, info\n\n return new_env_step\n else:\n def decorator_step(env_step):\n def new_env_step(action):\n state, reward, done, info = env_step(action)\n return state.astype(data_type), reward, done, info\n\n return new_env_step\n env.step = decorator_step(env.step)\n\n def decorator_reset(env_reset):\n def new_env_reset():\n state = env_reset()\n return state.astype(data_type)\n\n return new_env_reset\n\n env.reset = decorator_reset(env.reset)\n return env",
"def GetEnvironmentalBG2(self):\n ...",
"def create_environment(env_name):\n env = gym.make(env_name)\n\n # Fix for certain OpenAI Gym environments,\n # requiring to be reset prior to initial rendering\n if env_name in GYM_ENVS['classical_control']:\n env.reset()\n\n return env",
"def docker_machine(machine):\n _env = local('docker-machine env {}'.format(machine), capture=True)\n # Reorganize into a string that could be used with prefix().\n _env = re.sub(r'^#.*$', '', _env, flags=re.MULTILINE) # Remove comments\n _env = re.sub(r'^export ', '', _env, flags=re.MULTILINE) # Remove `export `\n _env = re.sub(r'\\n', ' ', _env, flags=re.MULTILINE) # Merge to a single line\n return _env",
"def __init__(self, vm_spec):\n super(WindowsGceVirtualMachine, self).__init__(vm_spec)\n self.boot_metadata['windows-startup-script-ps1'] = (\n windows_virtual_machine.STARTUP_SCRIPT\n )",
"def fork_sim_env_visuals() -> 'ExecEnv':\n from tc2.env.ExecEnv import ExecEnv\n from tc2.env.EnvType import EnvType\n from tc2.env.TimeEnv import TimeEnv\n from tc2.data.data_storage.redis.RedisManager import RedisManager\n from tc2.data.data_storage.mongo.MongoManager import MongoManager\n from tc2.data.stock_data_collection.PolygonDataCollector import PolygonDataCollector\n\n if shared.sim_env_visuals is None:\n shared.sim_env_visuals = ExecEnv(shared.program.logfeed_program, shared.program.logfeed_visuals)\n sim_time = TimeEnv(datetime.now())\n shared.sim_env_visuals.setup_first_time(env_type=EnvType.VISUAL_GENERATION,\n time=sim_time,\n data_collector=PolygonDataCollector(\n logfeed_program=shared.program.logfeed_program,\n logfeed_process=shared.program.logfeed_visuals,\n time_env=sim_time\n ),\n mongo=MongoManager(shared.program.logfeed_visuals,\n EnvType.VISUAL_GENERATION),\n redis=RedisManager(shared.program.logfeed_visuals,\n EnvType.VISUAL_GENERATION))\n return shared.sim_env_visuals\n\n # Wipe databases\n shared.sim_env_visuals.reset_dbs()\n\n shared.sim_env_visuals.fork_new_thread(creator_env=shared.sim_env_visuals)\n return shared.sim_env_visuals",
"def env_creator(config: dict):\n \n from gridworld import MultiAgentEnv\n\n return MultiAgentEnv(**config)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for Airflow web server machine type. | def _ConstructWebServerMachineTypePatch(web_server_machine_type, release_track):
messages = api_util.GetMessagesModule(release_track=release_track)
config = messages.EnvironmentConfig(
webServerConfig=messages.WebServerConfig(
machineType=web_server_machine_type))
return 'config.web_server_config.machine_type', messages.Environment(
config=config) | [
"def _ConstructCloudSqlMachineTypePatch(cloud_sql_machine_type, release_track):\n messages = api_util.GetMessagesModule(release_track=release_track)\n config = messages.EnvironmentConfig(\n databaseConfig=messages.DatabaseConfig(\n machineType=cloud_sql_machine_type))\n return 'config.database_config.machine_type', messages.Environment(\n config=config)",
"def mock_webserver_service_environment(\n monkeypatch: MonkeyPatch,\n mock_env_makefile: EnvVarsDict,\n mock_env_devel_environment: EnvVarsDict,\n mock_env_Dockerfile_build: EnvVarsDict,\n mock_env_auto_deployer_agent: EnvVarsDict,\n) -> EnvVarsDict:\n # @docker compose config (overrides)\n # TODO: get from docker compose config\n # r'- ([A-Z2_]+)=\\$\\{\\1:-([\\w-]+)\\}'\n\n # - .env-devel + docker-compose service environs\n # hostname: \"{{.Node.Hostname}}-{{.Service.Name}}-{{.Task.Slot}}\"\n\n # environment:\n # - CATALOG_HOST=${CATALOG_HOST:-catalog}\n # - CATALOG_PORT=${CATALOG_PORT:-8000}\n # - DIAGNOSTICS_MAX_AVG_LATENCY=10\n # - DIAGNOSTICS_MAX_TASK_DELAY=30\n # - DIRECTOR_HOST=${DIRECTOR_HOST:-director}\n # - DIRECTOR_PORT=${DIRECTOR_PORT:-8080}\n # - DIRECTOR_V2_HOST=${DIRECTOR_V2_HOST:-director-v2}\n # - DIRECTOR_V2_PORT=${DIRECTOR_V2_PORT:-8000}\n # - STORAGE_HOST=${STORAGE_HOST:-storage}\n # - STORAGE_PORT=${STORAGE_PORT:-8080}\n # - SWARM_STACK_NAME=${SWARM_STACK_NAME:-simcore}\n # - WEBSERVER_LOGLEVEL=${LOG_LEVEL:-WARNING}\n # env_file:\n # - ../.env\n mock_envs_docker_compose_environment = setenvs_from_dict(\n monkeypatch,\n {\n # Emulates MYVAR=${MYVAR:-default}\n \"CATALOG_HOST\": os.environ.get(\"CATALOG_HOST\", \"catalog\"),\n \"CATALOG_PORT\": os.environ.get(\"CATALOG_PORT\", \"8000\"),\n \"DIAGNOSTICS_MAX_AVG_LATENCY\": \"30\",\n \"DIRECTOR_HOST\": os.environ.get(\"DIRECTOR_HOST\", \"director\"),\n \"DIRECTOR_PORT\": os.environ.get(\"DIRECTOR_PORT\", \"8080\"),\n \"DIRECTOR_V2_HOST\": os.environ.get(\"DIRECTOR_V2_HOST\", \"director-v2\"),\n \"DIRECTOR_V2_PORT\": os.environ.get(\"DIRECTOR_V2_PORT\", \"8000\"),\n \"STORAGE_HOST\": os.environ.get(\"STORAGE_HOST\", \"storage\"),\n \"STORAGE_PORT\": os.environ.get(\"STORAGE_PORT\", \"8080\"),\n \"SWARM_STACK_NAME\": os.environ.get(\"SWARM_STACK_NAME\", \"simcore\"),\n \"WEBSERVER_LOGLEVEL\": os.environ.get(\"LOG_LEVEL\", \"WARNING\"),\n },\n )\n\n return (\n mock_env_makefile\n | mock_env_devel_environment\n | mock_env_Dockerfile_build\n | mock_env_auto_deployer_agent\n | mock_envs_docker_compose_environment\n )",
"def create():\n return _DynamicEnvironment()",
"def envname(self):\n return \"LIVEWEB_\" + self.name.upper()",
"def setup_environment():",
"def _get_execution_env(environment):\n if environment is None:\n typename = 'LocalAsync'\n else:\n typename = type(environment).__name__\n\n tracker = _mt._get_metric_tracker()\n tracker.track('deploy.job.create.%s' % typename.lower(), value=1)\n\n if typename == 'Local':\n exec_env = LocalExecutionEnvironment\n elif typename == 'LocalAsync':\n exec_env = LocalAsynchronousEnvironment\n elif typename in ['EC2', 'Ec2Cluster']:\n exec_env = Ec2ExecutionEnvironment\n elif typename in ['Hadoop', 'HadoopCluster']:\n exec_env = HadoopExecutionEnvironment\n else:\n raise Exception(\"Validation Failed: Unknown execution environment.\")\n\n return exec_env",
"def _ConstructWebServerAccessControlPatch(web_server_access_control,\n release_track):\n messages = api_util.GetMessagesModule(release_track=release_track)\n config = messages.EnvironmentConfig(\n webServerNetworkAccessControl=environments_api_util\n .BuildWebServerNetworkAccessControl(web_server_access_control,\n release_track))\n return 'config.web_server_network_access_control', messages.Environment(\n config=config)",
"def create_eb_environment():\n creation_response = client.create_environment(\n ApplicationName=app_name,\n EnvironmentName=environment_name,\n Description=\"Manheim test deployment\",\n CNAMEPrefix=environment_name,\n Tier={\n 'Name': 'WebServer',\n 'Type': 'Standard'\n },\n SolutionStackName=solution_stack,\n OptionSettings=[\n {\n 'Namespace': 'aws:autoscaling:asg',\n 'OptionName': 'Custom Availability Zones',\n 'ResourceName': 'AWSEBAutoScalingGroup',\n 'Value': 'us-east-1a'\n },\n {\n 'Namespace': 'aws:autoscaling:asg',\n 'OptionName': 'MaxSize',\n 'ResourceName': 'AWSEBAutoScalingGroup',\n 'Value': '3'\n },\n {\n 'Namespace': 'aws:autoscaling:asg',\n 'OptionName': 'MinSize',\n 'ResourceName': 'AWSEBAutoScalingGroup',\n 'Value': '1'\n },\n {\n 'Namespace': 'aws:autoscaling:launchconfiguration',\n 'OptionName': 'InstanceType',\n 'Value': 't2.micro'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'BreachDuration',\n 'ResourceName': 'AWSEBCloudwatchAlarmLow',\n 'Value': '1'\n },\n {\n u'Namespace': 'aws:autoscaling:trigger',\n u'OptionName': 'EvaluationPeriods',\n u'ResourceName': 'AWSEBCloudwatchAlarmLow',\n u'Value': '1'\n },\n {\n u'Namespace': 'aws:autoscaling:trigger',\n u'OptionName': 'LowerBreachScaleIncrement',\n u'ResourceName': 'AWSEBAutoScalingScaleDownPolicy',\n u'Value': '-1'\n },\n {\n u'Namespace': 'aws:autoscaling:trigger',\n u'OptionName': 'LowerThreshold',\n u'ResourceName': 'AWSEBCloudwatchAlarmLow',\n u'Value': '25'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'MeasureName',\n 'ResourceName': 'AWSEBCloudwatchAlarmLow',\n 'Value': 'CPUUtilization'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'Period',\n 'ResourceName': 'AWSEBCloudwatchAlarmLow',\n 'Value': '1'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'Statistic',\n 'ResourceName': 'AWSEBCloudwatchAlarmLow',\n 'Value': 'Average'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'Unit',\n 'ResourceName': 'AWSEBCloudwatchAlarmLow',\n 'Value': 'Percent'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'UpperBreachScaleIncrement',\n 'ResourceName': 'AWSEBAutoScalingScaleUpPolicy',\n 'Value': '1'\n },\n {\n 'Namespace': 'aws:autoscaling:trigger',\n 'OptionName': 'UpperThreshold',\n 'ResourceName': 'AWSEBCloudwatchAlarmHigh',\n 'Value': '85'\n },\n {\n 'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',\n 'OptionName': 'RollingUpdateEnabled',\n 'ResourceName': 'AWSEBAutoScalingGroup',\n 'Value': 'false'\n },\n {\n 'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',\n 'OptionName': 'RollingUpdateType',\n 'ResourceName': 'AWSEBAutoScalingGroup',\n 'Value': 'Time'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:command',\n 'OptionName': 'BatchSize',\n 'Value': '50'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:command',\n 'OptionName': 'BatchSizeType',\n 'Value': 'Percentage'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:command',\n 'OptionName': 'DeploymentPolicy',\n 'Value': 'Rolling'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:command',\n 'OptionName': 'IgnoreHealthCheck',\n 'Value': 'false'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:command',\n 'OptionName': 'Timeout',\n 'Value': '600'\n },\n {\n 'Namespace': 'aws:elasticbeanstalk:container:python',\n 'OptionName': 'WSGIPath',\n 'Value': application_path\n }\n ]\n )\n return creation_response",
"def cli_env(mlflow_client):\n return {\n \"LC_ALL\": \"en_US.UTF-8\",\n \"LANG\": \"en_US.UTF-8\",\n \"MLFLOW_TRACKING_URI\": mlflow_client.tracking_uri,\n }",
"def init_environ():\n\n ENV_VAR_HELP = \"\"\"\n Environment variables explanation:\n - DOCUMENT_ROOT: full filesystem path to html files\n ex: /srv/http/imageboard.example.com/\n - SCRIPT_NAME: url to wakarimasen.py without host part\n ex: /wakarimasen.py\n - SERVER_NAME: hostname of the webserver\n ex: imageboard.example.com\n - SERVER_PORT: port of the webserver (optional)\n ex: 80\n \"\"\"\n\n local.environ.update(os.environ)\n werkzeug.BaseRequest(local.environ)\n\n local.environ.setdefault('waka.rootpath',\n os.path.join('/', config.BOARD_DIR, ''))\n local.environ.setdefault('wsgi.url_scheme', 'http')\n local.environ.setdefault('SERVER_PORT', '80')\n\n required_vars = ['DOCUMENT_ROOT', 'SCRIPT_NAME', 'SERVER_NAME']\n\n for var in required_vars:\n if var not in local.environ:\n print \"Error: %s not in environment\" % (var,)\n print ENV_VAR_HELP\n sys.exit(1)",
"def get_env():\n\n obj = getattr(world, 'env', None) or world.webenv_class(world)\n\n if not obj:\n raise Warning(u\"Lettuce-Web environment not initialized\")\n return None\n\n world.env = obj\n return obj",
"def environment_info(self):\n\n return {\n \"application_environment\": {\n \"framework\": \"pylons\",\n \"env\": dict(os.environ),\n \"language\": \"python\",\n \"language_version\": sys.version.replace('\\n', ''),\n \"application_root_directory\": self.project_root()\n },\n \"client\": {\n \"name\": \"pylons-exceptional\",\n \"version\": __version__,\n \"protocol_version\": EXCEPTIONAL_PROTOCOL_VERSION\n }\n }",
"def patch_jinja2_env():\n from pyinfra.api import util\n\n util.Environment = Jinja2Environment",
"def _ConstructWebServerPluginsModePatch(\n support_web_server_plugins, release_track=base.ReleaseTrack.GA\n):\n messages = api_util.GetMessagesModule(release_track=release_track)\n software_config = messages.SoftwareConfig()\n\n if support_web_server_plugins:\n software_config.webServerPluginsMode = (\n messages.SoftwareConfig.WebServerPluginsModeValueValuesEnum.PLUGINS_ENABLED\n )\n else:\n software_config.webServerPluginsMode = (\n messages.SoftwareConfig.WebServerPluginsModeValueValuesEnum.PLUGINS_DISABLED\n )\n\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n\n return 'config.software_config.web_server_plugins_mode', messages.Environment(\n config=config)",
"def create_environment(env_name):\n env = gym.make(env_name)\n\n # Fix for certain OpenAI Gym environments,\n # requiring to be reset prior to initial rendering\n if env_name in GYM_ENVS['classical_control']:\n env.reset()\n\n return env",
"def setup_environment(config: Dict[str, Any], environment_type: Environment) -> Environment:\n # interpret the provided string argument\n if environment_type == Environment.PRODUCTION:\n # Safe configuration: restrictions for mainnet apply and matrix rooms have to be private\n config['environment_type'] = Environment.PRODUCTION\n config['transport']['matrix']['private_rooms'] = True\n else:\n config['environment_type'] = Environment.DEVELOPMENT\n\n print(f'Raiden is running in {environment_type.value.lower()} mode')\n return environment_type",
"def app_env():\n if not self._app_env:\n # TODO: we need to pass this parameter to api, unfortunately\n # in current api framework it is not trivial.\n approot = os.environ['TREADMILL_APPROOT']\n _LOGGER.info('Using approot: %s', approot)\n self._app_env = appmgr.AppEnvironment(approot)\n\n return self._app_env",
"def get_environment():\n if os.environ.get('SERVER_NAME', '').startswith('production'):\n return 'production'\n elif os.environ.get('SERVER_NAME', '').startswith('staging'):\n return 'staging'\n elif os.environ.get('SERVER_NAME', '').startswith('v') and os.environ.get(\n 'SERVER_NAME', ''\n ).endswith('appspot.com'):\n return 'testing'\n elif os.environ.get('SERVER_NAME', '').startswith('test'):\n return 'test'\n return 'development'",
"def env_creator(config: dict):\n \n from gridworld import MultiAgentEnv\n\n return MultiAgentEnv(**config)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for Master authorized networks feature. | def _ConstructMasterAuthorizedNetworksTypePatch(enabled, networks,
release_track):
messages = api_util.GetMessagesModule(release_track=release_track)
config = messages.EnvironmentConfig()
networks = [] if networks is None else networks
config.masterAuthorizedNetworksConfig = messages.MasterAuthorizedNetworksConfig(
enabled=enabled,
cidrBlocks=[
messages.CidrBlock(cidrBlock=network) for network in networks
])
return 'config.master_authorized_networks_config', messages.Environment(
config=config) | [
"def env_creator(config: dict):\n \n from gridworld import MultiAgentEnv\n\n return MultiAgentEnv(**config)",
"def make_env(args):\r\n scenario = scenarios.load(args.env_name + \".py\").Scenario()\r\n world = scenario.make_world()\r\n done_callback = None\r\n\r\n env = MultiAgentEnv(\r\n world,\r\n reset_callback=scenario.reset_world,\r\n reward_callback=scenario.reward,\r\n observation_callback=scenario.observation,\r\n done_callback=done_callback)\r\n\r\n assert env.discrete_action_space is False, \"For cont. action, this flag must be False\"\r\n\r\n return env",
"def activate_local():\n print 'Activating local master'\n command = 'chown -R apache:nagios %s %s && /usr/bin/check_mk -O' % (CMK_CONF_PATH, CMK_EXTRA_CONF_PATH)\n communicate_cli(command)",
"def _ConstructPrivateEnvironmentPatch(\n enable_private_environment,\n release_track=base.ReleaseTrack.GA,\n):\n messages = api_util.GetMessagesModule(release_track=release_track)\n private_environment_config = messages.PrivateEnvironmentConfig()\n config = messages.EnvironmentConfig(\n privateEnvironmentConfig=private_environment_config\n )\n update_mask = 'config.private_environment_config.enable_private_environment'\n private_environment_config.enablePrivateEnvironment = bool(\n enable_private_environment\n )\n\n return (\n update_mask,\n messages.Environment(config=config),\n )",
"def build_master(config, token):\n LOG.debug('Building master config')\n master_config = {}\n\n # Fix the hostname\n master_config['hostname'] = \"{0}-master\".format(config['host-prefix'])\n\n # Add users\n master_config['users'] = build_users(config)\n\n # Add base run comands\n master_config['runcmd'] = build_base_commands(config)\n\n # Set the master interface\n if config['network']['wlan']['mesh']['enabled']:\n master_iface = \"$(ip addr show bat0 | grep -Po 'inet \\K[\\d.]+')\"\n LOG.debug(\"Used mesh configuration for master_iface\")\n else:\n master_iface = '0.0.0.0'\n LOG.debug(\"Set master_iface to 0.0.0.0\")\n\n # Add the commands to init the master\n if config['network']['wlan']['mesh']['enabled']:\n master_config['runcmd'].append(r'apt-get install -o Dpkg::Options::='\n '\"--force-confold\" --force-yes -y '\n 'isc-dhcp-server')\n master_config['runcmd'].append(r'kubeadm init --token {0} '\n '--feature-gates=SelfHosting={1} '\n '--apiserver-advertise-address {2}'\n .format(token, config['kubeadm']\n ['selfHosted'], master_iface\n .strip()))\n master_config['runcmd'].append(\n r'export KUBECONFIG=/etc/kubernetes/admin.conf')\n if config['kubeadm']['network'] == 'weavenet':\n master_config['runcmd'].append(\n r'export kubever=$(kubectl version | base64 | tr -d \"\\n\")')\n master_config['runcmd'].append(\n r'kubectl apply -f \"https://cloud.weave.works/k8s/net?'\n 'k8s-version=$kubever\"')\n master_config['runcmd'].append(\n r'kubectl apply -f https://raw.githubusercontent.com/'\n 'kubernetes/dashboard/master/src/deploy/alternative/'\n 'kubernetes-dashboard-arm.yaml')\n master_config['runcmd'].append(\n r'mkdir -p /root/.kube')\n master_config['runcmd'].append(\n r'cp /etc/kubernetes/admin.conf /root/.kube/config')\n\n # Add the other config options\n master_config['locale'] = \"en_US.UTF-8\"\n master_config['manage_etc_hosts'] = True\n\n # Add the network config\n master_config['write_files'] = build_network_config(config, 200)\n\n # If batman is selected, then add it to the writefiles\n if config['network']['wlan']['mesh']['enabled']:\n master_config['write_files'].append(configure_alfred())\n master_config['write_files'].append(configure_batvis())\n master_config['write_files'].append(dhcp_default())\n master_config['write_files'].append(configure_dhcp())\n\n # Write the file\n filename = \"{0}-master.yaml\".format(config['host-prefix'])\n with open(filename, \"w\") as file:\n yaml.dump(master_config, file, default_flow_style=False)\n line_prepender(filename, \"#cloud-config\")\n return None",
"def determine_new_master(self):\n self.master_host = determine_host_address()",
"def __set_master(name='localhost'):\n \n # TODO: rospy needs to have built-in multimaster support for this\n # to actually work, or we need to get rid of the node singleton\n \n if name.startswith('http://'):\n ctx.master._reinit(name)\n else:\n # assume its a hostname\n ctx.master._reinit('http://%s:11311'%name)\n \n # update the system-wide environment \n os.environ[roslib.rosenv.ROS_MASTER_URI] = ctx.master.master_uri\n return ctx.master.is_online()",
"def __set_minion_master(self):\n master_id = self.master_remote.hostname\n for rem in self.remotes.iterkeys():\n # remove old master public key if present. Minion will refuse to\n # start if master name changed but old key is present\n delete_file(rem, '/etc/salt/pki/minion/minion_master.pub',\n sudo=True, check=False)\n\n # set master id\n sed_cmd = ('echo master: {} > '\n '/etc/salt/minion.d/master.conf').format(master_id)\n rem.run(args=[\n 'sudo',\n 'sh',\n '-c',\n sed_cmd,\n ])",
"def set_master(self, master):\n self.master_host = master",
"def configure_master_zonegroup_and_zones(ctx, config, master_zonegroup, master_zone, realm, master_clients):\n global access_key, secret\n access_key = gen_access_key()\n secret = gen_secret()\n\n zone_endpoint = extract_endpoints(ctx, master_clients)\n log.info('client {}'.format(master_clients[0]))\n zg_endpoint = extract_endpoints(ctx, master_clients[0])\n\n log.info('creating master zonegroup and zone on {}'.format(master_clients))\n rgwadmin(ctx, master_clients[0],\n cmd=['realm', 'create', '--rgw-realm', realm, '--default'],\n check_status=True)\n\n rgwadmin(ctx, master_clients[0],\n cmd=['zonegroup', 'create', '--rgw-zonegroup', master_zonegroup, '--master', '--endpoints', zg_endpoint,\n '--default'], check_status=True)\n\n rgwadmin(ctx, master_clients[0],\n cmd=['zone', 'create', '--rgw-zonegroup', master_zonegroup,\n '--rgw-zone', master_zone, '--endpoints', zone_endpoint, '--access-key',\n access_key, '--secret',\n secret, '--master', '--default'],\n check_status=True)\n\n rgwadmin(ctx, master_clients[0],\n cmd=['period', 'update', '--commit'],\n check_status=True)\n\n #zone_to_conf(ctx, master_clients, master_zone)\n\n yield",
"def ImportMasterConfigs(master_name=None):\n for master in chromium_utils.ListMasters():\n path = os.path.join(master, 'master_site_config.py')\n if os.path.exists(path):\n local_vars = {}\n try:\n execfile(path, local_vars)\n # pylint: disable=W0703\n except Exception, e:\n # Naked exceptions are banned by the style guide but we are\n # trying to be resilient here.\n print >> sys.stderr, 'WARNING: cannot exec ' + path\n print >> sys.stderr, e\n for (symbol_name, symbol) in local_vars.items():\n if inspect.isclass(local_vars[symbol_name]):\n setattr(config.Master, symbol_name, symbol)\n # If we have a master_name and it matches, set\n # config.Master.active_master.\n if master_name and master_name == symbol_name:\n setattr(config.Master, 'active_master', symbol)",
"def start_master(environ, port=DEFAULT_MASTER_PORT):\n global _local_master_uri\n master = rospy.msnode.ROSNode(rospy.core.MASTER_NAME, port, ROSMasterHandlerSD())\n master.start()\n while not master.uri and not rospy.core.is_shutdown():\n time.sleep(0.0001) #poll for init\n _local_master_uri = master.uri\n\n # TODO: Figure out if there is a way to query the launching process for completion state\n # (e.g. determine when roslaunch has finished starting nodes, reading parameters)\n while time.time() - master.handler.last_master_activity_time < 3.0:\n time.sleep(0.1) # Poll until master is resting\n\n # start service discovery on ROSMasterHandlerSD\n master.handler.start_service_discovery(master.uri)\n\n return master",
"def _ConstructEnvVariablesPatch(env_ref,\n clear_env_variables,\n remove_env_variables,\n update_env_variables,\n release_track=base.ReleaseTrack.GA):\n env_obj = environments_api_util.Get(env_ref, release_track=release_track)\n initial_env_var_value = env_obj.config.softwareConfig.envVariables\n initial_env_var_list = (\n initial_env_var_value.additionalProperties\n if initial_env_var_value else [])\n\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n env_variables_cls = messages.SoftwareConfig.EnvVariablesValue\n entry_cls = env_variables_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n envVariables=env_variables_cls(additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return ('config.software_config.env_variables',\n command_util.BuildFullMapUpdate(clear_env_variables,\n remove_env_variables,\n update_env_variables,\n initial_env_var_list, entry_cls,\n _BuildEnv))",
"def _ConstructWebServerAccessControlPatch(web_server_access_control,\n release_track):\n messages = api_util.GetMessagesModule(release_track=release_track)\n config = messages.EnvironmentConfig(\n webServerNetworkAccessControl=environments_api_util\n .BuildWebServerNetworkAccessControl(web_server_access_control,\n release_track))\n return 'config.web_server_network_access_control', messages.Environment(\n config=config)",
"def puppet_master(self):\n self.install(\"puppet augeas-tools\")\n self.install(\"puppetmaster sqlite3 libsqlite3-ruby git rake\")\n self.install(\"libactiverecord-ruby\")\n self.install(\"puppetlabs_spec_helper\", gem=True)\n self.install(\"puppetmaster-common\")\n\n puppet_master = self.template(\"puppet-master\").substitute()\n sudo(puppet_master)\n\n sudo(\"cd /etc/puppet/modules; git clone %s; \"\\\n \"cd openstack; rake modules:clone\" % PUPPETLABS_OPENSTACK)\n\n self.manifest()\n self.puppet_restart()",
"def create_environment(env_name):\n env = gym.make(env_name)\n\n # Fix for certain OpenAI Gym environments,\n # requiring to be reset prior to initial rendering\n if env_name in GYM_ENVS['classical_control']:\n env.reset()\n\n return env",
"def __init__(__self__, *,\n enable_private_endpoint: Optional[pulumi.Input[bool]] = None,\n enable_private_nodes: Optional[pulumi.Input[bool]] = None,\n master_global_access_config: Optional[pulumi.Input['PrivateClusterMasterGlobalAccessConfigArgs']] = None,\n master_ipv4_cidr_block: Optional[pulumi.Input[str]] = None):\n if enable_private_endpoint is not None:\n pulumi.set(__self__, \"enable_private_endpoint\", enable_private_endpoint)\n if enable_private_nodes is not None:\n pulumi.set(__self__, \"enable_private_nodes\", enable_private_nodes)\n if master_global_access_config is not None:\n pulumi.set(__self__, \"master_global_access_config\", master_global_access_config)\n if master_ipv4_cidr_block is not None:\n pulumi.set(__self__, \"master_ipv4_cidr_block\", master_ipv4_cidr_block)",
"def __deepcopy__(self, memo):\n unique_name = datetime.now().strftime(\"%Y%m%d_%H%M%S_%f\")\n environment_model_copy = MultilayerNNEnvModel('env_copy_'+unique_name,\n self.sess,\n self.observation_space,\n self.action_space,\n self.learning_rate,\n self.env_model_save_path,\n False)\n env_model_weights = []\n for param in self.env_model_params:\n env_model_weights.append(param.eval(session = self.sess))\n environment_model_copy.set_env_model_weights(env_model_weights)\n return environment_model_copy",
"def build_env():\r\n\r\n retro_env = retro.make(game='SpaceInvaders-Atari2600')\r\n\r\n # Build an one hot encoding of the actions\r\n actions = np.array(np.identity(\r\n retro_env.action_space.n, dtype=int).tolist())\r\n\r\n return retro_env, actions"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs an environment patch for updating maintenance window. | def _ConstructMaintenanceWindowPatch(maintenance_window_start,
maintenance_window_end,
maintenance_window_recurrence,
release_track=base.ReleaseTrack.GA):
messages = api_util.GetMessagesModule(release_track=release_track)
window_value = messages.MaintenanceWindow(
startTime=maintenance_window_start.isoformat(),
endTime=maintenance_window_end.isoformat(),
recurrence=maintenance_window_recurrence)
config = messages.EnvironmentConfig(maintenanceWindow=window_value)
return 'config.maintenance_window', messages.Environment(config=config) | [
"def _ConstructEnvVariablesPatch(env_ref,\n clear_env_variables,\n remove_env_variables,\n update_env_variables,\n release_track=base.ReleaseTrack.GA):\n env_obj = environments_api_util.Get(env_ref, release_track=release_track)\n initial_env_var_value = env_obj.config.softwareConfig.envVariables\n initial_env_var_list = (\n initial_env_var_value.additionalProperties\n if initial_env_var_value else [])\n\n messages = api_util.GetMessagesModule(release_track=release_track)\n env_cls = messages.Environment\n env_variables_cls = messages.SoftwareConfig.EnvVariablesValue\n entry_cls = env_variables_cls.AdditionalProperty\n\n def _BuildEnv(entries):\n software_config = messages.SoftwareConfig(\n envVariables=env_variables_cls(additionalProperties=entries))\n config = messages.EnvironmentConfig(softwareConfig=software_config)\n return env_cls(config=config)\n\n return ('config.software_config.env_variables',\n command_util.BuildFullMapUpdate(clear_env_variables,\n remove_env_variables,\n update_env_variables,\n initial_env_var_list, entry_cls,\n _BuildEnv))",
"def maintenance_window(self) -> Optional[pulumi.Input['InstanceMaintenanceWindowArgs']]:\n return pulumi.get(self, \"maintenance_window\")",
"def patch_env(self, **kwargs):\n new_shell = self._shell.clone()\n for key, value in kwargs.items():\n new_shell.setenv(key, value)\n return attr.evolve(self, shell=new_shell)",
"def maintenance_window(self) -> pulumi.Output[Optional['outputs.InstanceMaintenanceWindow']]:\n return pulumi.get(self, \"maintenance_window\")",
"def maintenance_options(self) -> Optional[pulumi.Input['LaunchTemplateMaintenanceOptionsArgs']]:\n return pulumi.get(self, \"maintenance_options\")",
"def patch_jinja2_env():\n from pyinfra.api import util\n\n util.Environment = Jinja2Environment",
"def dt_patch(self):\r\n \r\n if self.os.lower() == \"windowsxp\":\r\n idt = WINXP_2003_IDT\r\n ldt = WINXP_2003_LDT\r\n gdt = WINXP_2003_GDT\r\n else:\r\n idt = WIN2000_IDT\r\n ldt = WIN2000_LDT\r\n gdt = WIN2000_GDT\r\n \r\n # Iterate through the addresses, and write \r\n # the trickery\r\n for address in self.hook_addrs:\r\n \r\n # Create the stub where we will detour\r\n stub_address = self.imm.remoteVirtualAlloc(size=50)\r\n\r\n opcode = self.hook_addrs[address][1]\r\n \r\n # Determine what we have to patch\r\n if self.hook_addrs[address][0] == \"idt\":\r\n patch_value = idt\r\n elif self.hook_addrs[address][0] == \"gdt\":\r\n patch_value = gdt\r\n elif self.hook_addrs[address][0] == \"ldt\":\r\n patch_value = ldt\r\n \r\n # First we need to determine the length of the original\r\n # opcode, if the length is greater than 5 bytes, we need to \r\n # preserve the instruction that follows it, the reason is\r\n # our JMP [ALLOC_ADDRESS] is 5 bytes long\r\n saved_instructions = None\r\n nop_sled = None\r\n \r\n if opcode.getSize() < 5:\r\n new_opcode = self.imm.disasmForward(address)\r\n saved_instructions = new_opcode.getDisasm()\r\n self.imm.Log(\"The length is going ot be an issue: %d\" % opcode.getSize(), address = address)\r\n self.imm.Log(\"About to clobber: %s\" % new_opcode.getDisasm(),address = new_opcode.getAddress())\r\n \r\n # It seems to be easier to NOP everything out first before\r\n # we write out the patch\r\n nop_sled = \"\\x90\" * (opcode.getSize() + new_opcode.getSize())\r\n self.imm.writeMemory(address,nop_sled)\r\n \r\n # Now let's write the detour JMP to our allocated memory page\r\n detour_jmp = self.imm.Assemble(\"JMP 0x%08x\" % stub_address,address = address)\r\n detour_jmp_len = len(detour_jmp)\r\n \r\n self.imm.writeMemory(address,detour_jmp)\r\n \r\n # Now write out the patch header, we are just going to do a MOV on\r\n # the original SIDT/SLDT/SGDT instruction, directly into it's operand\r\n operand = opcode.getDisasm()[5:]\r\n register = operand.split(\"[\")[1].split(\"]\")[0]\r\n \r\n # We need to put a WORD ptr check in here, but \r\n # for now just assemble as a DWORD manipulation\r\n patch_header = \"MOV DWORD PTR [%s],0x%08x \\n\" % (register,patch_value)\r\n \r\n # Now if we need to preserve some instructions from the original\r\n # basic block (because we clobbered them with our detour JMP)\r\n # write them out after the patch header\r\n if saved_instructions is not None:\r\n patch_body = new_opcode.getDisasm() + \"\\n\"\r\n else:\r\n patch_body = \"\\n\"\r\n\r\n # Write the return address\r\n # Now we want to do the JMP back to the original function\r\n # plus the size of our detour JMP instruction\r\n if nop_sled is not None:\r\n detour_ret = address + len(nop_sled)\r\n else:\r\n detour_ret = address + detour_jmp_len\r\n \r\n self.imm.Log(\"Detour Return: 0x%08x\" % detour_ret)\r\n \r\n # We are going to use a PUSH/RET sequence to get\r\n # back from our detour page\r\n jmp_label = \"%08x\" % detour_ret\r\n ret_jmp = \"PUSH %s\\n RET\" % jmp_label\r\n \r\n # Assemble the final patch\r\n final_patch = self.imm.Assemble(patch_header + patch_body + ret_jmp,address = stub_address)\r\n self.imm.Log(\"Final Patch: %s\" % final_patch.encode(\"HEX\"))\r\n self.imm.writeMemory(stub_address,final_patch)",
"def _update_maint_window(self, new_config: MaintenanceWindow) -> bool:\n\n uri = Settings.api_resources[\"Maintenance Windows\"][\"Update Maintenance Window\"].format(\n GROUP_ID=self.atlas.group)\n self.atlas.network.patch(Settings.BASE_URL + uri, payload=new_config.as_update_dict())\n\n return True",
"def _ConstructPrivateEnvironmentPatch(\n enable_private_environment,\n release_track=base.ReleaseTrack.GA,\n):\n messages = api_util.GetMessagesModule(release_track=release_track)\n private_environment_config = messages.PrivateEnvironmentConfig()\n config = messages.EnvironmentConfig(\n privateEnvironmentConfig=private_environment_config\n )\n update_mask = 'config.private_environment_config.enable_private_environment'\n private_environment_config.enablePrivateEnvironment = bool(\n enable_private_environment\n )\n\n return (\n update_mask,\n messages.Environment(config=config),\n )",
"def create_env(self):\n return lambda: VizDoom(self.cfg_path, number_maps=self.number_maps, scaled_resolution=self.scaled_resolution,\n action_frame_repeat=self.action_frame_repeat)",
"def do_environment_apps_edit(mc, args):\n\n jp_obj = None\n if not args.filename:\n jp_obj = json.load(sys.stdin)\n else:\n with open(args.filename) as fpatch:\n jp_obj = json.load(fpatch)\n\n jpatch = jsonpatch.JsonPatch(jp_obj)\n\n environment_id = args.id\n session_id = args.session_id\n environment = mc.environments.get(environment_id, session_id)\n\n object_model = jpatch.apply(environment.services)\n utils.traverse_and_replace(object_model)\n\n mc.services.put(\n environment_id,\n path='/',\n data=jpatch.apply(environment.services),\n session_id=session_id)",
"def cic_maintenance_mode_env(self):\n self.check_run('cic_maintenance_mode')\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n data = {\n 'ceilometer': True,\n }\n\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=settings.DEPLOYMENT_MODE_HA,\n settings=data)\n\n self.fuel_web.update_nodes(\n cluster_id,\n {\n 'slave-01': ['controller', 'mongo'],\n 'slave-02': ['controller', 'mongo'],\n 'slave-03': ['controller', 'mongo'],\n 'slave-04': ['compute', 'cinder'],\n 'slave-05': ['compute', 'cinder']\n }\n )\n\n # Cluster deploy\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n # Check network\n self.fuel_web.verify_network(cluster_id)\n\n # Run ostf\n self.fuel_web.run_ostf(cluster_id, test_sets=['ha', 'smoke', 'sanity'])\n\n self.env.make_snapshot(\"cic_maintenance_mode\", is_make=True)",
"def create_dev_environment(self, svn=False, git=True):\n\n package_list = []\n if svn:\n package_list.append('svn')\n if git:\n package_list.append('git')\n package_list.append('git-lfs')\n\n prefix = os.path.join(self.root_dir, 'dev_env')\n command = 'create'\n text_messages = ['Installing', 'installation into']\n if prefix in self.environments:\n command = 'update'\n text_messages = ['Updating', 'update of']\n\n command_list = [self.conda_exe, command, '-y', '-c', 'conda-forge',\n '--prefix', prefix] + package_list\n\n print('-'*79, file=self.log)\n print('{text} extra development environment containing:'.format(text=text_messages[0]),\n file=self.log)\n for package in package_list:\n print(' -', package, file=self.log)\n\n self._retry_command(command_list, text_messages[1], prefix, verbose=True)\n\n print('-'*79, file=self.log)\n return prefix",
"def update_fetch(self):\n Popen([\"mount\", \"-t\", \"devfs\", \"devfs\",\n \"{}/releases/{}/root/dev\".format(self.iocroot,\n self.release)]).communicate()\n copy(\"/etc/resolv.conf\",\n \"{}/releases/{}/root/etc/resolv.conf\".format(self.iocroot,\n self.release))\n\n # TODO: Check for STABLE/PRERELEASE/CURRENT/BETA if we support those.\n # TODO: Fancier.\n self.lgr.info(\"\\n* Updating {} to the latest patch level... \".format(\n self.release))\n\n os.environ[\"UNAME_r\"] = self.release\n os.environ[\"PAGER\"] = \"/bin/cat\"\n new_root = \"{}/releases/{}/root\".format(self.iocroot, self.release)\n if os.path.isfile(\"{}/etc/freebsd-update.conf\".format(new_root)):\n # 10.1-RELEASE and under have a interactive check\n if float(self.release.partition(\"-\")[0][:5]) <= 10.1:\n with NamedTemporaryFile(delete=False) as tmp_conf:\n conf = \"{}/usr/sbin/freebsd-update\".format(new_root)\n with open(conf) as update_conf:\n for line in update_conf:\n tmp_conf.write(re.sub(\"\\[ ! -t 0 \\]\", \"false\",\n line))\n\n os.chmod(tmp_conf.name, 0o755)\n Popen([tmp_conf.name, \"-b\", new_root, \"-d\",\n \"{}/var/db/freebsd-update/\".format(new_root), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(new_root),\n \"fetch\"], stdout=PIPE, stderr=PIPE).communicate()\n os.remove(tmp_conf.name)\n else:\n Popen([\"freebsd-update\", \"-b\", new_root, \"-d\",\n \"{}/var/db/freebsd-update/\".format(new_root), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(new_root),\n \"fetch\"], stdout=PIPE, stderr=PIPE).communicate()\n\n Popen([\"freebsd-update\", \"-b\", new_root, \"-d\",\n \"{}/var/db/freebsd-update/\".format(new_root), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(new_root),\n \"install\"], stdout=PIPE, stderr=PIPE).communicate()\n\n try:\n # Why this sometimes doesn't exist, we may never know.\n os.remove(\"{}/releases/{}/root/etc/resolv.conf\".format(\n self.iocroot, self.release))\n except OSError:\n pass\n\n Popen([\"umount\", \"{}/releases/{}/root/dev\".format(\n self.iocroot, self.release)]).communicate()",
"def setup_workdir(self, env, t, scomm):\r\n # create the working directory first before calling the base class prerequisites\r\n avg_BEGYR = (int(env['ENDYR_'+t]) - int(env['YRS_TO_AVG'])) + 1\r\n subdir = '{0}.{1}-{2}/{3}.{4}_{5}'.format(env['CASE_TO_'+t], avg_BEGYR, env['ENDYR_'+t],self._name.lower(), str(avg_BEGYR), env['ENDYR_'+t])\r\n workdir = '{0}/{1}'.format(env['PATH_CLIMO_'+t], subdir)\r\n env['CLIMO_'+t] = workdir\r\n\r\n if (scomm.is_manager()):\r\n if env['CLEANUP_FILES'].lower() in ['t','true'] and os.path.exists(workdir):\r\n shutil.rmtree(workdir)\r\n try:\r\n os.makedirs(workdir)\r\n except OSError as exception:\r\n if exception.errno != errno.EEXIST:\r\n err_msg = 'ERROR: {0} problem accessing the working directory {1}'.format(self.__class__.__name__, workdir)\r\n raise OSError(err_msg)\r\n\r\n # create symbolic links between the old and new workdir and get the real names of the files\r\n old_workdir = env['PATH_CLIMO_'+t]+'/'+env['CASE_TO_'+t]+'.'+str(avg_BEGYR)+'-'+env['ENDYR_'+t]\r\n env['PATH_CLIMO_'+t] = workdir\r\n\r\n if (scomm.is_manager()):\r\n print('calling name = {0}'.format(self._name))\r\n print('subdir = {0}'.format(subdir))\r\n print('workdir = {0}'.format(workdir))\r\n print('old_workdir = {0}'.format(old_workdir))\r\n\r\n # Add links to the new wkdir that use the expected file names (existing climos have dates, the NCL do not like dates)\r\n if (scomm.is_manager()):\r\n climo_files = glob.glob(old_workdir+'/*.nc') \r\n for climo_file in climo_files:\r\n if ('ice_vol_' in climo_file):\r\n new_fn = workdir + '/' + os.path.basename(climo_file)\r\n## if (scomm.is_manager()):\r\n## print('1. ice_diags_bc.py: new_fn = {0}'.format(new_fn))\r\n else:\r\n name_split = climo_file.split('.') # Split on '.'\r\n if ('-' in name_split[-3]):\r\n fn = str.join('.',name_split[:len(name_split)-3] + name_split[-2:]) #Piece together w/o the date, but still has old path \r\n if (scomm.is_manager()):\r\n print('1. fn = {0}'.format(fn))\r\n path_split = fn.split('/') # Remove the path\r\n if ('jfm_climo' in path_split[-1]):\r\n s = 'jfm'\r\n elif ('amj_climo' in path_split[-1]):\r\n s = 'amj'\r\n elif ('jas_climo' in path_split[-1]):\r\n s = 'jas'\r\n elif ('ond_climo' in path_split[-1]):\r\n s = 'ond'\r\n elif ('fm_climo' in path_split[-1]):\r\n s = 'fm'\r\n elif ('on_climo' in path_split[-1]):\r\n s = 'on'\r\n elif ('_ANN_climo' in path_split[-1]):\r\n s = 'ann'\r\n else:\r\n s = None\r\n if s is not None:\r\n new_fn = workdir + '/' + s + '_avg_' + str(avg_BEGYR).zfill(4) + '-' + env['ENDYR_'+t].zfill(4) + '.nc' \r\n## if (scomm.is_manager()):\r\n## print('2. ice_diags_bc.py s = {0}: new_fn = {1}'.format(s, new_fn))\r\n else:\r\n new_fn = workdir + '/' +path_split[-1] # Take file name and add it to new path\r\n## if (scomm.is_manager()):\r\n## print('3. ice_diags_bc.py: new_fn = {0}'.format(new_fn))\r\n else:\r\n new_fn = workdir + '/' + os.path.basename(climo_file)\r\n## if (scomm.is_manager()):\r\n## print('4. ice_diags_bc.py: new_fn = {0}'.format(new_fn))\r\n rc1, err_msg1 = cesmEnvLib.checkFile(new_fn, 'read')\r\n if not rc1:\r\n os.symlink(climo_file,new_fn)\r\n else:\r\n print('ice_diags_bc.py: unable to create link to file {0}'.format(new_fn))\r\n return env",
"def _ConstructWebServerMachineTypePatch(web_server_machine_type, release_track):\n messages = api_util.GetMessagesModule(release_track=release_track)\n config = messages.EnvironmentConfig(\n webServerConfig=messages.WebServerConfig(\n machineType=web_server_machine_type))\n return 'config.web_server_config.machine_type', messages.Environment(\n config=config)",
"def test_runtime_envs_update(self):\n pass",
"def testMaintenanceMode(self):\n self.site.latest_gsoc = self.gsoc_program.key().name()\n self.site.latest_gci = self.gci_program.key().name()\n self.site.put()\n\n # the page should is in maintenance mode\n self.site.maintenance_mode = True\n self.site.put()\n\n # check that page is not accessible for non-developers\n response = self.get('/')\n self.assertResponseCode(response, httplib.SERVICE_UNAVAILABLE)\n\n # log in as a developer\n profile_utils.loginNDB(self.user, is_admin=True)\n response = self.get('/')\n self.assertResponseOK(response)",
"def test_full_update_system(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs a patch for updating scheduler count for Composer 1... | def _ConstructSoftwareConfigurationSchedulerCountPatch(
scheduler_count, release_track=base.ReleaseTrack.GA):
messages = api_util.GetMessagesModule(release_track=release_track)
return 'config.software_config.scheduler_count', messages.Environment(
config=messages.EnvironmentConfig(
softwareConfig=messages.SoftwareConfig(
schedulerCount=scheduler_count))) | [
"def scheduled(self, scheduler):",
"def test_patch_with_reschedule(self):\n Run.objects.update(enqueue_dts=timezone.now())\n response = self.patch(\n '/api/v1/run/1/',\n {\n 'return_dts': timezone.now().isoformat(' '),\n 'return_success': True,\n }\n )\n\n self.assertEqual(202, response.status_code)\n self.assertEqual(2, Job.objects.get(pk=1).run_set.count())\n self.assertEqual(1, Job.objects.get(pk=3).run_set.count())",
"def getPatch(self) -> int:\n ...",
"def test_patch_with_reschedule(self):\n return_dts = timezone.now()\n Run.objects.update(enqueue_dts=timezone.now())\n response = self.patch(\n '/api/v1/run/1/',\n {\n 'return_dts': return_dts.isoformat(' '),\n 'return_success': True,\n }\n )\n\n self.assertEqual(202, response.status_code)\n self.assertEqual(2, Run.objects.filter(job_id=1).count())\n self.assertEqual(\n return_dts, Run.objects.filter(job_id=1)[0].return_dts)",
"def pending_patch_count(self) -> int:\n return pulumi.get(self, \"pending_patch_count\")",
"def semver_incr_patch(ver: str) -> str:\n parts = ver.split(\".\")\n patch = str(int(parts[-1]) + 1)\n\n parts = parts[:-1]\n parts.append(patch)\n\n return \".\".join(parts)",
"def test_scheduled_action_count(friends):\n # original\n dispatcher, scheduler, action = friends()\n dispatcher.add_action(\"foo\", action)\n dispatcher.add_scheduler(\"bar\", scheduler)\n dispatcher.schedule_action(action_name=\"foo\", scheduler_name=\"bar\")\n assert 1 == dispatcher.get_scheduled_action_count()\n assert 1 == dispatcher.job_count()",
"def test_patch_with_reschedule_no_children_reschedule(self):\n Run.objects.update(\n enqueue_dts=timezone.now(),\n schedule_children=False,\n )\n response = self.patch(\n '/api/v1/run/1/',\n {\n 'return_dts': timezone.now().isoformat(' '),\n 'return_success': True,\n }\n )\n\n self.assertEqual(202, response.status_code)\n self.assertEqual(2, Job.objects.get(pk=1).run_set.count())\n self.assertEqual(0, Job.objects.get(pk=3).run_set.count())",
"def update_pending_tuples_count(self, count):\n self.update_reduced_metric(self.PENDING_ACKED_COUNT, count)",
"def increment_new_content_updates(self, count: int = 1):",
"def get_number_of_agents_for_scheduling(self, context):\n return 1",
"def update(self):\n self.logger.info('update CronService')\n self.cancel_alarm()\n self.setup_alarm()",
"def test_update_counts(self):\n conn = self.reporter.conn\n\n (build,) = list(conn.execute(self.reporter.Builds.select()))\n\n assert_equal(build['method_count'], None)\n\n self.reporter.test_counts(3, 50)\n (updated_build,) = list(conn.execute(self.reporter.Builds.select()))\n\n assert_equal(updated_build['method_count'], 50)",
"def worker():\n global PREV_WORKER_TIME # pylint: disable=global-statement\n global NEXT_WORKER_TIME # pylint: disable=global-statement\n PREV_WORKER_TIME = NEXT_WORKER_TIME # pylint: disable=used-before-assignment\n NEXT_WORKER_TIME = time()\n\n running_jobs_count = 0\n\n inventory = Collection(\"inventory\")\n\n for prcuuid in inventory.find_objuuids(type=\"procedure\"):\n procedure = inventory.get_object(prcuuid)\n\n if \"enabled\" not in procedure.object:\n logging.warning('setting \"enabled\" to false')\n procedure.object[\"enabled\"] = False\n procedure.set()\n\n if \"seconds\" not in procedure.object:\n logging.warning('setting \"seconds\" to \"0\"')\n procedure.object[\"seconds\"] = \"0\"\n procedure.set()\n\n if \"minutes\" not in procedure.object:\n logging.warning('setting \"minutes\" to \"*\"')\n procedure.object[\"minutes\"] = \"*\"\n procedure.set()\n\n if \"hours\" not in procedure.object:\n logging.warning('setting \"hours\" to \"*\"')\n procedure.object[\"hours\"] = \"*\"\n procedure.set()\n\n if \"dayofmonth\" not in procedure.object:\n logging.warning('setting \"dayofmonth\" to \"*\"')\n procedure.object[\"dayofmonth\"] = \"*\"\n procedure.set()\n\n if \"dayofweek\" not in procedure.object:\n logging.warning('setting \"dayofweek\" to \"*\"')\n procedure.object[\"dayofweek\"] = \"*\"\n procedure.set()\n\n if \"year\" not in procedure.object:\n logging.warning('setting \"year\" to \"*\"')\n procedure.object[\"year\"] = \"*\"\n procedure.set()\n\n if procedure.object[\"enabled\"] in (True, \"true\"):\n for epoch_time in range(int(PREV_WORKER_TIME), int(NEXT_WORKER_TIME)):\n now = datetime.fromtimestamp(epoch_time).now()\n # pylint: disable=too-many-boolean-expressions\n if (\n eval_cron_field(procedure.object[\"seconds\"], now.second) and\n eval_cron_field(procedure.object[\"minutes\"], now.minute) and\n eval_cron_field(procedure.object[\"hours\"], now.hour) and\n eval_cron_field(procedure.object[\"dayofmonth\"], now.day) and\n eval_cron_field(procedure.object[\"dayofweek\"], now.weekday()) and\n eval_cron_field(procedure.object[\"year\"], now.year)\n ):\n for hstuuid in procedure.object[\"hosts\"]:\n queue_procedure(hstuuid, procedure.objuuid, None)\n break\n\n try:\n JOB_LOCK.acquire()\n\n # Concurrency conditioning\n for key in list(JOBS.keys()):\n try:\n assert int(JOBS[key][\"host\"][\"concurrency\"]) > 0\n except (AssertionError, KeyError, ValueError):\n logging.warning('host concurrency defaulting to 1')\n JOBS[key][\"host\"][\"concurrency\"] = \"1\"\n\n try:\n assert int(JOBS[key][\"console\"][\"concurrency\"]) > 0\n except (AssertionError, KeyError, ValueError):\n logging.warning('console concurrency defaulting to 1')\n JOBS[key][\"console\"][\"concurrency\"] = \"1\"\n\n running_jobs_counts = {}\n for key in list(JOBS.keys()):\n running_jobs_counts[JOBS[key][\"host\"][\"objuuid\"]] = 0\n running_jobs_counts[JOBS[key][\"console\"][\"objuuid\"]] = 0\n\n for key in list(JOBS.keys()):\n if JOBS[key][\"process\"] is not None:\n if JOBS[key][\"process\"].is_alive():\n running_jobs_count += 1\n running_jobs_counts[JOBS[key][\"host\"][\"objuuid\"]] += 1\n running_jobs_counts[JOBS[key][\"console\"][\"objuuid\"]] += 1\n else:\n release_display_row(JOBS[key][\"display row\"])\n del JOBS[key]\n\n for key in list(JOBS.keys()):\n if running_jobs_count < int(get_config()[\"concurrency\"]):\n if JOBS[key][\"process\"] is None:\n if (\n running_jobs_counts[JOBS[key][\"host\"][\"objuuid\"]] < \\\n int(JOBS[key][\"host\"][\"concurrency\"]) and\n running_jobs_counts[JOBS[key][\"console\"][\"objuuid\"]] < \\\n int(JOBS[key][\"console\"][\"concurrency\"])\n ):\n\n JOBS[key][\"process\"] = Process(\n target=run_procedure,\n args=(\n JOBS[key][\"host\"],\n JOBS[key][\"procedure\"],\n JOBS[key][\"console\"],\n JOBS[key][\"ctruuid\"],\n JOBS[key][\"display row\"]\n )\n )\n\n JOBS[key][\"start time\"] = time()\n JOBS[key][\"process\"].start()\n\n running_jobs_count += 1\n running_jobs_counts[JOBS[key][\"host\"][\"objuuid\"]] += 1\n running_jobs_counts[JOBS[key][\"console\"][\"objuuid\"]] += 1\n\n kvstore.touch(\"queueState\")\n except Exception as exception: # pylint: disable=broad-except\n logging.error(exception)\n finally:\n JOB_LOCK.release()\n start_timer()",
"def critical_and_security_patch_count(self) -> int:\n return pulumi.get(self, \"critical_and_security_patch_count\")",
"def update_submission_comment_count(sender, instance, **kwargs):\n obj = instance.content_object\n if isinstance(obj, Submission):\n new_total = ThreadedComment.public.all_for_object(obj).count() \n Submission.objects.filter(pk=obj.pk).update(comments_total=new_total)",
"def set_trigger_count(self, count):\n self.count = count",
"def update_pb(self):\n self.pb_run = min(\n self.past_runs, key=lambda run: run.total\n )",
"def add_cache_increment_parameter(tasks):\n denom = len(tasks) or 1\n increment = 1.0 / denom * 100\n # This is kind of terrible. Once we know how much progress each task\n # yeilds, we must pass that value into the Signature for the sub tassks.\n for _task in tasks:\n _task.args = _task.args + (increment,)\n\n return tasks"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs a patch for updating Cloud Data Lineage integration config. | def _ConstructSoftwareConfigurationCloudDataLineageIntegrationPatch(
enabled, release_track):
messages = api_util.GetMessagesModule(release_track=release_track)
return 'config.software_config.cloud_data_lineage_integration', messages.Environment(
config=messages.EnvironmentConfig(
softwareConfig=messages.SoftwareConfig(
cloudDataLineageIntegration=messages.CloudDataLineageIntegration(
enabled=enabled)))) | [
"def apply_patch_to_config(\n config: dict,\n patch: dict\n ):\n from meerschaum.utils.packages import cascadict\n base = cascadict.CascaDict(config)\n new = base.cascade(patch)\n return new.copy_flat()",
"def patch_gcloud(monkeypatch, gcloud_run, gcloud_config):\n monkeypatch.setattr(\"hailtop.hailctl.dataproc.gcloud.run\", gcloud_run)\n monkeypatch.setattr(\"hailtop.hailctl.dataproc.gcloud.get_version\", Mock(return_value=MINIMUM_REQUIRED_GCLOUD_VERSION))\n\n def mock_gcloud_get_config(setting):\n return gcloud_config.get(setting, None)\n\n monkeypatch.setattr(\"hailtop.hailctl.dataproc.gcloud.get_config\", mock_gcloud_get_config)\n\n yield\n\n monkeypatch.undo()",
"def update_config():\n g.config = app.config",
"def update_drbd_config(self, context):\n return self.call(context, self.make_msg('update_drbd_config'))",
"def modify_infra_cfg(config_infr, cloud, template):\n print \"[+]Modifing infra.cfg....\"\n for i in config_infr:\n config_infr[i]['cloud'] = cloud\n config_infr[i]['template'] = template\n config_infr.write()\n return config_infr",
"def _update_runtime_properties():\n # Override any values in `config` with values in `additional_config`\n config = inputs['config']\n additional_config = inputs['additional_config']\n _dict_merge(config, additional_config)\n\n ctx.instance.runtime_properties['config'] = config\n ctx.instance.update()\n ctx.logger.debug('Updated {0}: {1}'.format(ctx.instance.id, config))",
"def test_patch(self):\n url = reverse(\"users-api:userconfig-list\")\n\n data = {\n \"a\": {\n \"a1\": \"X\",\n \"a2\": \"Y\",\n },\n \"b\": {\n \"b1\": \"Z\",\n },\n }\n response = self.client.patch(url, data=data, format=\"json\", **self.header)\n self.assertDictEqual(response.data, data)\n self.user.refresh_from_db()\n self.assertDictEqual(self.user.config_data, data)\n\n update_data = {\"c\": 123}\n response = self.client.patch(url, data=update_data, format=\"json\", **self.header)\n new_data = deepmerge(data, update_data)\n self.assertDictEqual(response.data, new_data)\n self.user.refresh_from_db()\n self.assertDictEqual(self.user.config_data, new_data)",
"def __init__(self, patch_dict, remote, url_prefix):\n self.patch_dict = patch_dict\n self.url_prefix = url_prefix\n current_patch_set = patch_dict.get('currentPatchSet', {})\n # id - The CL's ChangeId\n # revision - The CL's SHA1 hash.\n # number - The CL's gerrit number.\n super(GerritPatch, self).__init__(\n os.path.join(url_prefix, patch_dict['project']),\n patch_dict['project'],\n current_patch_set.get('ref'),\n patch_dict['branch'],\n remote,\n current_patch_set.get('revision'),\n patch_dict['id'],\n ParseGerritNumber(str(patch_dict['number'])),\n current_patch_set.get('number'),\n owner_email=patch_dict['owner']['email'])\n\n prefix_str = site_config.params.CHANGE_PREFIX[self.remote]\n self.gerrit_number_str = '%s%s' % (prefix_str, self.gerrit_number)\n self.url = patch_dict['url']\n # status - Current state of this change. Can be one of\n # ['NEW', 'SUBMITTED', 'MERGED', 'ABANDONED'].\n self.status = patch_dict['status']\n self._approvals = []\n if 'currentPatchSet' in self.patch_dict:\n self._approvals = self.patch_dict['currentPatchSet'].get('approvals', [])\n self.commit_timestamp = current_patch_set.get('date', 0)\n self.approval_timestamp = max(\n self.commit_timestamp,\n max(x['grantedOn'] for x in self._approvals) if self._approvals else 0)\n self._commit_message = None\n self.commit_message = patch_dict.get('commitMessage')",
"def test_patch_asset_device_configuration(self):\n pass",
"def updated(self, newConfiguration):",
"def modify_cconf(self, pycl_object=None, data=None,\n metadata=None, json_string=None):\n return self.complex_configuration_manager.modify_object(\n pycl_object=pycl_object, name='', data=data,\n metadata=metadata, json_string=json_string)",
"def __patch_jenkins_config( self ):\n config_file = StringIO( )\n if run( 'test -f ~/config.xml', quiet=True ).succeeded:\n fresh_instance = False\n get( remote_path='~/config.xml', local_path=config_file )\n else:\n # Get the in-memory config as the on-disk one may be absent on a fresh instance.\n # Luckily, a fresh instance won't have any configured security.\n fresh_instance = True\n config_url = 'http://localhost:8080/computer/(master)/config.xml'\n with hide( 'output' ):\n config_file.write( run( 'curl \"%s\"' % config_url ) )\n config_file.seek( 0 )\n config = ElementTree.parse( config_file )\n\n yield config\n\n config_file.truncate( 0 )\n config.write( config_file, encoding='utf-8', xml_declaration=True )\n if fresh_instance:\n self.__service_jenkins( 'stop' )\n try:\n put( local_path=config_file, remote_path='~/config.xml' )\n finally:\n if fresh_instance:\n self.__service_jenkins( 'start' )\n else:\n log.warn( 'Visit the Jenkins web UI and click Manage Jenkins - Reload '\n 'Configuration from Disk' )",
"def patch_runway_config(\n request: FixtureRequest, monkeypatch: MonkeyPatch, runway_config: MockRunwayConfig\n) -> MockRunwayConfig:\n patch_path = getattr(cast(\"Module\", request.module), \"PATCH_RUNWAY_CONFIG\", None)\n if patch_path:\n monkeypatch.setattr(patch_path, runway_config)\n return runway_config",
"def update_config_with_dvc_params(base_config):\n params = yaml.safe_load(open(\"params.yaml\"))\n\n if params is None:\n return base_config\n\n def _update(config, params):\n for key, value in params.items():\n if isinstance(value, dict):\n config[key] = _update(config.get(key, {}), value)\n else:\n config[key] = value\n return config\n\n return _update(base_config, params)",
"def config():\n update_config_cli()",
"def admin_update_third_party_config(\n body: ModelsUpdateConfigRequest,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = AdminUpdateThirdPartyConfig.create(\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)",
"def update_package_config():\n try:\n import importlib\n import sys\n import json\n\n path = importlib.machinery.PathFinder().find_spec('sentinelhub', sys.path[1:]).submodule_search_locations[0]\n old_config_filename = os.path.join(path, 'config.json')\n\n with open(old_config_filename, 'r') as file:\n old_config = json.load(file)\n\n from sentinelhub.config import SHConfig\n\n config = SHConfig()\n for attr, value in old_config.items():\n if hasattr(config, attr) and not getattr(config, attr):\n setattr(config, attr, value)\n\n config.save()\n\n except BaseException:\n pass",
"def _updated_config(self):\n from .. import __version__ as keras_version\n\n config = self.get_config()\n model_config = {\n 'class_name': self.__class__.__name__,\n 'config': config,\n 'keras_version': keras_version,\n 'backend': K.backend()\n }\n return model_config",
"def test_patch_compute_server_setting(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Constructs a patch for updating high resilience. | def _ConstructHighResiliencePatch(
enabled, release_track):
messages = api_util.GetMessagesModule(release_track=release_track)
if not enabled:
return 'config.resilience_mode', messages.Environment(
config=messages.EnvironmentConfig()
)
return 'config.resilience_mode', messages.Environment(
config=messages.EnvironmentConfig(
resilienceMode=(
messages.EnvironmentConfig.ResilienceModeValueValuesEnum.HIGH_RESILIENCE
)
)
) | [
"def _patch_update():\n def patched_update(self, *args, **kwargs):\n \"\"\"\n Patched version of Resource.update which send update requests\n containing only the properties specified as arguments to the\n method. If no properties are specified all of them are sent in the\n request.\n \"\"\"\n # pylint: disable=protected-access\n orig_props = self._properties\n\n # user specified which properties to update: set properties dict\n # to contain only them so that the update request do not update\n # unwanted fields\n if args or kwargs:\n self._properties = dict()\n if '$uri' in orig_props:\n self._properties['$uri'] = orig_props['$uri']\n\n # perform the request\n self._properties.update(*args, **kwargs)\n self.save()\n\n # restore all properties\n if args or kwargs:\n orig_props.update(self._properties)\n self._properties = orig_props\n # patched_update()\n potion_resource.Resource.update = patched_update",
"def __init__(__self__,\n resource_name: str,\n args: Optional[PatchBaselineArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def getPatch(self) -> int:\n ...",
"def test_create_empty_patch():\n _p = Patch('some_patch_name')",
"def test_patch_apiextensions_v1beta1_custom_resource_definition(self):\n pass",
"def test_patch_compute_rack_unit(self):\n pass",
"def update(cls, client, resource) :\n try :\n if type(resource) is not list :\n updateresource = lldpparam()\n updateresource.holdtimetxmult = resource.holdtimetxmult\n updateresource.timer = resource.timer\n updateresource.mode = resource.mode\n return updateresource.update_resource(client)\n except Exception as e :\n raise e",
"def __init__(self, patch_dict, remote, url_prefix):\n self.patch_dict = patch_dict\n self.url_prefix = url_prefix\n current_patch_set = patch_dict.get('currentPatchSet', {})\n # id - The CL's ChangeId\n # revision - The CL's SHA1 hash.\n # number - The CL's gerrit number.\n super(GerritPatch, self).__init__(\n os.path.join(url_prefix, patch_dict['project']),\n patch_dict['project'],\n current_patch_set.get('ref'),\n patch_dict['branch'],\n remote,\n current_patch_set.get('revision'),\n patch_dict['id'],\n ParseGerritNumber(str(patch_dict['number'])),\n current_patch_set.get('number'),\n owner_email=patch_dict['owner']['email'])\n\n prefix_str = site_config.params.CHANGE_PREFIX[self.remote]\n self.gerrit_number_str = '%s%s' % (prefix_str, self.gerrit_number)\n self.url = patch_dict['url']\n # status - Current state of this change. Can be one of\n # ['NEW', 'SUBMITTED', 'MERGED', 'ABANDONED'].\n self.status = patch_dict['status']\n self._approvals = []\n if 'currentPatchSet' in self.patch_dict:\n self._approvals = self.patch_dict['currentPatchSet'].get('approvals', [])\n self.commit_timestamp = current_patch_set.get('date', 0)\n self.approval_timestamp = max(\n self.commit_timestamp,\n max(x['grantedOn'] for x in self._approvals) if self._approvals else 0)\n self._commit_message = None\n self.commit_message = patch_dict.get('commitMessage')",
"def vulnerability_patch(self, vulnerability_patch):\n self._vulnerability_patch = vulnerability_patch",
"def update(cls, client, resource) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tupdateresource = icalatencyprofile()\n\t\t\t\tupdateresource.name = resource.name\n\t\t\t\tupdateresource.l7latencymonitoring = resource.l7latencymonitoring\n\t\t\t\tupdateresource.l7latencythresholdfactor = resource.l7latencythresholdfactor\n\t\t\t\tupdateresource.l7latencywaittime = resource.l7latencywaittime\n\t\t\t\tupdateresource.l7latencynotifyinterval = resource.l7latencynotifyinterval\n\t\t\t\tupdateresource.l7latencymaxnotifycount = resource.l7latencymaxnotifycount\n\t\t\t\treturn updateresource.update_resource(client)\n\t\t\telse :\n\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\tupdateresources = [ icalatencyprofile() for _ in range(len(resource))]\n\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\tupdateresources[i].name = resource[i].name\n\t\t\t\t\t\tupdateresources[i].l7latencymonitoring = resource[i].l7latencymonitoring\n\t\t\t\t\t\tupdateresources[i].l7latencythresholdfactor = resource[i].l7latencythresholdfactor\n\t\t\t\t\t\tupdateresources[i].l7latencywaittime = resource[i].l7latencywaittime\n\t\t\t\t\t\tupdateresources[i].l7latencynotifyinterval = resource[i].l7latencynotifyinterval\n\t\t\t\t\t\tupdateresources[i].l7latencymaxnotifycount = resource[i].l7latencymaxnotifycount\n\t\t\t\tresult = cls.update_bulk_request(client, updateresources)\n\t\t\treturn result\n\t\texcept Exception as e :\n\t\t\traise e",
"def __init__(self, source, spec):\n\n # Source image is now blurred, then encoded to be a HLS-encoded array.\n logging.debug(\"Converting to HLS color space.\")\n self.img = source\n self.img_size = source.shape\n self.img_blurred = ops.blur(source, BLUR_SIGMA)\n self.img_luminosity = ops.rgb2hls(self.img_blurred)[:,:,1]\n\n # Now we make a histogram of the blurred luminosities, each in bins.\n logging.debug(\"Preparing first version of output.\")\n L = window(self.img_luminosity)\n hist, bins = np.histogram(L, density=True, bins=BIN_COUNT)\n L_indices = np.digitize(L.flatten(), bins)\n\n # Store the center of all patches by using the luminosity bins. \n coordinates = np.indices((source.shape[0]-PATCH_SIZE, source.shape[1]-PATCH_SIZE)).swapaxes(0,2).swapaxes(0,1)\n coordinates += [PATCH_HALF, PATCH_HALF]\n self.c_coords = self.createBins(L_indices, coordinates)\n\n # For each bin we calculate the average color, per-luminosity which assumes\n # the image patterns don't have too much hue variation.\n c_buckets = self.createBins(L_indices, window(self.img_blurred))\n c_averages = [np.average(bucket, axis=0) for bucket in c_buckets]\n\n # Normalize the specification image based on what our luminosity can provide.\n ml = min(L.flatten())\n sl = max(L.flatten()) - ml\n self.spec = ml + spec * sl\n\n # Apply the same binning process to the spec image....\n S_indices = np.digitize(self.spec.flatten(), bins)\n self.spec_bins = {}\n for i, bn in enumerate(S_indices):\n # Check coordinates and discard if it's out of bounds.\n ty, tx = i//self.spec.shape[1], i%self.spec.shape[1]\n if ty+PATCH_START < 0 or ty+PATCH_FINISH > self.spec.shape[0]:\n continue\n if tx+PATCH_START < 0 or tx+PATCH_FINISH > self.spec.shape[1]:\n continue\n self.spec_bins[(ty, tx)] = min(bn-1, BIN_COUNT-1)\n\n # Generate a first version of the output based on the average given the luminosity\n # of the specification. There are no interesting patterns, just colors.\n self.output = np.array([c_averages[min(bn-1, BIN_COUNT-1)] for bn in S_indices], dtype=np.float32)\\\n .reshape(self.spec.shape[0], self.spec.shape[1], 3)\n self.coverage = np.zeros(self.output.shape[:2], dtype=np.float32)\n\n # Prepare a masking array used for blending and feathering out the edges of patches.\n self.createMask()",
"def test_rirs_partial_update(self):\n pass",
"def create_patches_and_update_specfile(self, upstream_ref) -> None:\n if self.with_action(action=ActionName.create_patches):\n patches = self.create_patches(\n upstream=upstream_ref, destination=str(self.absolute_specfile_dir)\n )\n self.specfile_add_patches(patches)",
"def newFixedResonance(self, **attrlinks):\n return FixedResonance(self, **attrlinks)",
"def apply_patch(self, patch=None):\n if not any([patch, self.patch]):\n return\n if not patch:\n patch = self.patch\n self.cmd(f'patch -p1 < {self.project.patch}/{self.ver}/{patch}')",
"def rf(monkeypatch, rf):\n def rf_patch(self, path, data='', content_type='application/octet-stream',\n **extra):\n \"\"\"Prepare PATCH request\"\"\"\n return self.generic('PATCH', path, data, content_type, **extra)\n\n from django.test.client import RequestFactory\n monkeypatch.setattr(RequestFactory, 'patch', rf_patch, raising=False)\n return rf",
"def apply_patch(self, patch=None):\n if not any([patch, self.patch]):\n return\n if not patch:\n patch = self.patch\n self.cmd(f\"patch -p1 < {self.project.patch}/{self.ver}/{patch}\")",
"def apply_patch(cls, api):\r\n\r\n\r\n ##BRANCH_ADDR = {\r\n ## \"NTSC-U\" : 0x9ea5c,\r\n ##}[api.VERSION]\r\n\r\n CURRENT_AMMO_SET_A1 = {\r\n \"NTSC-U\" : 0x9eac0,\r\n }[api.VERSION]\r\n\r\n CURRENT_AMMO_DRAW_CALL = {\r\n \"NTSC-U\" : 0x9eadc\r\n }[api.VERSION]\r\n\r\n IF_ADDR = {\r\n \"NTSC-U\" : 0x9eaec,\r\n }[api.VERSION]\r\n\r\n EXTRA_AMMO_SET_A1 = {\r\n \"NTSC-U\" : 0x9eb68,\r\n }[api.VERSION]\r\n\r\n JMP_OVER_EXTRA_AMMO = {\r\n \"NTSC-U\" : 0x9eb90,\r\n }[api.VERSION]\r\n\r\n\r\n guardDelayMem = MemoryAddress(0x801DB33C + 0x008) # guard_0x15.frames_until_update in adjusted setup\r\n guardDelayLoad = \"lb\"\r\n\r\n api.MemConst.global_timer_delta.offset_term(\"reg\")\r\n\r\n\r\n # [2] Correct extra ammo first\r\n # Clear the if\r\n for i in range(7):\r\n api.nop_quietly(hex(IF_ADDR + i*4))\r\n \r\n # Use just the last to set up our a1..\r\n # It moves very fast so we may divide it.. we end up with 3 nops above this so we have room\r\n api.asm(hex(IF_ADDR + 0x6 * 0x4), guardDelayMem.lui_instr(\"a1\"))\r\n\r\n # .. then finish it off where they actually set a1\r\n api.asm(hex(EXTRA_AMMO_SET_A1), \"{} a1, {}\".format(guardDelayLoad, guardDelayMem.offset_term(\"a1\")))\r\n\r\n\r\n # [1]\r\n # Set up the a1 where they set it ..\r\n api.asm(hex(CURRENT_AMMO_SET_A1), api.MemConst.global_timer_delta.lui_instr(\"a1\"))\r\n\r\n \"\"\"\r\n End of CURRENT_AMMO section precedes the EXTRA_AMMO if statement\r\n We need to make a bit more space\r\n 7f069fac 0f c1 a7 23 jal FUN_7f069c8c undefined FUN_7f069c8c()\r\n 7f069fb0 00 00 38 25 _or a3,zero,zero\r\n 7f069fb4 af a2 00 68 sw v0,local_res0(sp)\r\n 7f069fb8 8f ad 00 50 lw t5,extraAmmo(sp)\r\n\r\n \"\"\"\r\n\r\n instrs = [\r\n # .. then make room to finish it off\r\n \"lw a1, {}\".format(api.MemConst.global_timer_delta.offset_term(\"a1\")),\r\n \"jal 0xf069c8c\",\r\n \"or a3,zero,zero\",\r\n \"sw v0, 0x68(sp)\",\r\n # lw t5 bumped off\r\n ]\r\n\r\n for i, instr in enumerate(instrs):\r\n api.asm(hex(CURRENT_AMMO_DRAW_CALL + i*0x4), instr)\r\n \r\n\r\n # Now tidy up the if statement - restore the extraAmmo > 0 test for safety\r\n instrs = [\r\n \"lw t5, 0x50(sp)\",\r\n \"lw a0, 0x60(sp)\",\r\n \"blez t5, 0x{:x}\".format(JMP_OVER_EXTRA_AMMO),\r\n \"nop\",\r\n \"nop\",\r\n \"nop\",\r\n # guardDelayMem.lui_instr(\"a1\")\r\n ]\r\n \r\n for i, instr in enumerate(instrs):\r\n api.asm(hex(IF_ADDR + i*0x4), instr)",
"def apply_patches():\n with open(os.path.join(os.getcwd(), 'utils', 'sdk.patch'), 'r') as fin:\n subprocess.call(['patch', '-p2'], stdin=fin, cwd=DESTDIR)\n with open(os.path.join(SRCDIR, 's-video_sgx.patch'), 'r') as fin:\n subprocess.call(['patch', '-p2'], stdin=fin, cwd=DESTDIR)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Download reference genome from the S3 bucket to an EC2 instance, run alignment jobs with kallisto, then generate loom files based on the alignment results, and upload the loom files to S3 bucket. logger Logger object that exposes the interface the code directly uses | def main(logger):
parser = get_parser()
args = parser.parse_args()
root_dir = pathlib.Path(args.root_dir)
if os.environ.get("AWS_BATCH_JOB_ID"):
root_dir = root_dir / os.environ["AWS_BATCH_JOB_ID"]
# local directories on the EC2 instance
if args.s3_input_path.endswith("/"):
args.s3_input_path = args.s3_input_path[:-1]
run_dir = root_dir / "data"
run_dir.mkdir(parents=True)
# extract sample name(s) and technology from the metadata tsv file
metadata_name = os.path.basename(args.metadata)
metadata_dir = run_dir / "metadata"
metadata_dir.mkdir(parents=True)
metadata_dir = metadata_dir / metadata_name
s3_metadata_bucket, s3_metadata_prefix = s3u.s3_bucket_and_key(args.metadata)
s3c.download_file(
Bucket=s3_metadata_bucket, # just always download this from us-west-2...
Key=s3_metadata_prefix,
Filename=str(metadata_dir),
)
technology, sample_name = "", ""
with open(metadata_dir) as fd:
rd = csv.reader(fd, delimiter="\t", quotechar='"')
file_content = list()
for row in rd:
file_content.append(row)
file_content = file_content[1:]
sample_name = file_content[0][0]
technology = file_content[0][
1
] # need to fix this later to fit tsv file with multiple samples
# check if the input genome is valid
if args.taxon in reference_genomes_indexes:
genome_name = reference_genomes_indexes[args.taxon]
else:
raise ValueError(f"unknown taxon {args.taxon}")
if "10x" in technology:
genome_dir = root_dir / "genome" / "10X" / genome_name
elif (
"smartseq2" in technology
): # may need to update these after confirming what technology name looks like for smartseq2 data
genome_dir = (
root_dir / "genome" / "smartseq2" / genome_name
) # necessary to separate the reference genome location path for 10x and smartseq2?
genome_dir.mkdir(parents=True)
s3_input_bucket, s3_input_prefix = s3u.s3_bucket_and_key(args.s3_input_path)
logger.info(
f"""Run Info: partition {args.partition_id} out of {args.num_partitions}
genome_dir:\t{genome_dir}
taxon:\t{args.taxon}
s3_input_path:\t{args.s3_input_path}"""
)
s3 = boto3.resource("s3")
# download the reference genome index data from s3 bucket to an EC2 instance
logger.info("Downloading reference genome index files of {}".format(genome_name))
if "10x" in technology:
s3_genome_index = f"s3://{S3_REFERENCE['west']}/loompy/10X/{genome_name}"
elif "smartseq2" in technology:
s3_genome_index = f"s3://{S3_REFERENCE['west']}/loompy/smartseq2/{genome_name}"
s3_genome_index_bucket, s3_genome_index_prefix = s3u.s3_bucket_and_key(
s3_genome_index
)
s3_genome_files_prefix = list(
s3u.get_files(s3_genome_index_bucket, s3_genome_index_prefix)
)
s3_genome_files_prefix = s3_genome_files_prefix[1:]
file_names = list(
os.path.basename(file_path) for file_path in s3_genome_files_prefix
)
genome_name_to_prefix = dict(zip(file_names, s3_genome_files_prefix))
for file in genome_name_to_prefix.keys():
s3c.download_file(
Bucket=s3_genome_index_bucket,
Key=genome_name_to_prefix[file],
Filename=str(genome_dir / file),
)
# extract valid fastq files
sample_re_smartseq2 = re.compile("([^/]+)_R\d(?:_\d+)?.fastq.gz$")
sample_re_10x = re.compile("([^/]+)_L\d+_R\d(?:_\d+)?.fastq.gz$")
s3_output_bucket, s3_output_prefix = s3u.s3_bucket_and_key(args.s3_output_path)
logger.info(
"Running partition {} of {}".format(args.partition_id, args.num_partitions)
)
# fastq files are either stored directly under the s3 input folder, or in sample sub-folders under the s3 input folder
if list(s3u.get_files(s3_input_bucket, s3_input_prefix)):
fastq_key_and_size = [
(fn, s)
for fn, s in s3u.get_size(s3_input_bucket, s3_input_prefix)
if fn.endswith("fastq.gz")
]
else:
sample_folder_paths = s3u.get_folders(s3_input_bucket, s3_input_prefix + "/")
fastq_key_and_size = []
for sample in sample_folder_paths:
files = [
(fn, s)
for fn, s in s3u.get_size(s3_input_bucket, s3_input_prefix)
if fn.endswith("fastq.gz")
]
fastq_key_and_size += files
sample_name_to_fastq_keys = defaultdict(list)
fastq_sizes = defaultdict(list)
fastqs_key_to_name = dict()
for fn, s in fastq_key_and_size:
matched = False
if "10x" in technology:
matched = sample_re_10x.search(os.path.basename(fn))
elif "smartseq2" in technology:
matched = sample_re_smartseq2.search(os.path.basename(fn))
if matched:
sample_name_to_fastq_keys[matched.group(1)].append(fn)
fastq_sizes[matched.group(1)].append(s)
fastqs_key_to_name[fn] = os.path.basename(fn)
logger.info(f"number of samples: {len(sample_name_to_fastq_keys)}")
# download input fastqs from S3 to an EC2 instance
fastq_dir = run_dir / "fastqs"
fastq_dir.mkdir(parents=True)
for key in fastqs_key_to_name.keys():
s3c.download_file(
Bucket=s3_input_bucket,
Key=key,
Filename=str(fastq_dir / fastqs_key_to_name[key]),
)
# run kallisto alignment and RNA velocity analysis on the valid fastq files
for sample in sorted(sample_name_to_fastq_keys)[
args.partition_id :: args.num_partitions
]:
result_path = run_dir / "results"
result_path.mkdir(parents=True)
command = [
"loompy",
"fromfq",
str(result_path / f"{sample_name}.loom"),
sample_name,
str(genome_dir),
str(metadata_dir),
]
fastq_names = [
fastqs_key_to_name[key] for key in sample_name_to_fastq_keys[sample]
]
fastq_dirs = [str(fastq_dir / fastq) for fastq in fastq_names]
command += fastq_dirs
print(command) # for testing purpose
failed = ut_log.log_command(
logger,
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
t_config = TransferConfig(use_threads=False)
if failed:
raise RuntimeError("loompy failed")
else:
logger.info(f"Uploading {sample_name}.loom")
s3c.upload_file(
Filename=str(result_path / f"{sample_name}.loom"),
Bucket=s3_output_bucket,
Key=os.path.join(s3_output_prefix, f"{sample_name}.loom"),
Config=t_config,
)
command = ["rm", "-rf", str(result_path)]
ut_log.log_command(logger, command, shell=True)
time.sleep(30)
logger.info("Job completed") | [
"def download_files(\n self, mcg_obj, awscli_pod, bucket_to_read, download_dir, s3_creds=None\n ):\n ns_bucket_path = f\"s3://{bucket_to_read}\"\n\n if s3_creds:\n # Read data directly from target bucket (uls) to result dir\n sync_object_directory(\n awscli_pod,\n ns_bucket_path,\n download_dir,\n signed_request_creds=s3_creds,\n )\n else:\n # Read data from NS bucket to result dir\n sync_object_directory(awscli_pod, ns_bucket_path, download_dir, mcg_obj)",
"def download_genomes():\n\n log = open(LOG_PATH, 'w', buffering=1)\n\n os.chdir('./data')\n\n os.system('wget {}'.format(FTP_LINK + ORGANISM + '/assembly_summary.txt'))\n assembly = pd.read_csv('assembly_summary.txt', sep='\\t', header=1)\n assembly = assembly[assembly['assembly_level'].isin(['Complete Genome', 'Chromosome'])]\n\n is_downloaded = np.zeros(assembly.shape[0], dtype=bool)\n\n for i, link in enumerate(tqdm(assembly['ftp_path'])):\n\n if link == '-' or is_downloaded[i]:\n log.write('No link found or the genome is downloaded already\\n')\n log.write('End of the iteration. {}/{} done\\n\\n'.format(i + 1, assembly.shape[0]))\n continue\n\n try:\n log.write('Working with {}\\n'.format(link))\n\n name = link.split('/')[-1]\n os.makedirs(name, exist_ok=True)\n os.chdir(name)\n log.write('Working in dir {}\\n'.format(os.getcwd()))\n\n for file in ('protein.faa.gz', 'feature_table.txt.gz', 'genomic.fna.gz', 'protein.gpff.gz'):\n file_link = link + '/' + name + '_' + file\n log.write('Trying to download {}\\n'.format(file_link))\n\n wget = sp.run(['wget', file_link], cwd=os.getcwd(), capture_output=True)\n if wget.returncode:\n log.write('Error\\n')\n else:\n log.write('Success\\n')\n is_downloaded[i] = 1\n\n log.write('Trying to extract archive\\n')\n os.system('gunzip ' + name + '_' + file)\n\n os.chdir('..') # Out of genome directory\n\n except Exception:\n log.write('Error occured\\n')\n finally:\n log.write('End of the iteration. {}/{} done\\n\\n'.format(i + 1, assembly.shape[0]))\n\n os.chdir('..') # Out of data directory",
"def download_from_s3():\n s3 = boto3.resource(\n service_name='s3',\n region_name=os.environ[\"AWS_DEFAULT_REGION\"],\n aws_access_key_id=os.environ[\"AWS_ACCESS_KEY_ID\"],\n aws_secret_access_key=os.environ[\"AWS_SECRET_ACCESS_KEY\"]\n )\n s3.Bucket('rossmann-mynt').download_file(Key='models/model.joblib', Filename='../model/model.joblib')",
"def main():\n \n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = f\"s3a://{OUTPUT_BUCKET}/\"\n \n song_df = read_song_data(spark, input_data)\n process_song_data(spark, song_df, input_data, output_data) \n process_log_data(spark, song_df, input_data, output_data)\n spark.stop()",
"def run(input_data_directory):\n if not os.path.exists(input_data_directory):\n print(f\"Creating directory: {input_data_directory}\")\n os.makedirs(input_data_directory)\n\n print(f\"Downloading {GEMPAK_TO_CF_URL} into {input_data_directory}\")\n\n cmd = f\"curl -L -O {GEMPAK_TO_CF_URL}\"\n try:\n subprocess.run(shlex.split(cmd),\n check=True,\n cwd=input_data_directory)\n except subprocess.CalledProcessError as err:\n print(f\"ERROR: Download failed: {os.path.basename(GEMPAK_TO_CF_URL)}\")\n return False\n\n return True",
"def process(job):\n # download and set vars\n bands, input_path, scene_id = download_and_set(job)\n\n # resize bands\n delete_me, rename_me = resize_bands(bands, input_path, scene_id)\n\n # remove original band files and rename downsized to correct name\n remove_and_rename(delete_me, rename_me)\n\n # call landsat-util to merge images\n merge_images(input_path, bands)\n\n # construct the file names\n file_location, file_name, file_tif = name_files(bands,\n input_path,\n scene_id)\n\n # convert from TIF to png\n file_png = tif_to_png(file_location, file_name, file_tif)\n\n # upload to s3\n upload_to_s3(file_location, file_png, job)\n\n # delete files\n delete_files(input_path)\n\n return True",
"def main():\n\n global args, summaryInstance\n from Bio import SeqIO\n import pysam\n import logging\n\n\n\n configureLogging('info')\n\n readArgs() # Argument parsing\n logging.info('Arguments read successfully')\n\n summaryInstance = Summary()\n\n # Generates a dictionary from concatenated .clstr file (stores in Summary instance)\n # dict1 = consensus barcode representing the cluster : non-consensus barcode in the same cluster\n # dict2 = master dictionary, concatenated from dict1\n\n readAndProcessClusters(args.input_clstr)\n\n logging.info('Cluster file processed successfully')\n\n infile = pysam.AlignmentFile(args.input_mapped_bam, 'rb')\n out = pysam.AlignmentFile(args.output_tagged_bam+'.bam', 'wb', template=infile)\n\n for read in infile.fetch(until_eof=True):\n read_bc = read.query_name.split()[0].split('_')[-1]\n consensus_seq = summaryInstance.master_barcode_dict[read_bc]\n read.set_tag('BC', str(consensus_seq),value_type='Z') # Stores as string, makes duplicate removal possible. Can do it as integer as well.\n read.query_name = (read.query_name + '_@BC:Z:' + str(consensus_seq))\n out.write(read)\n\n infile.close()\n out.close()\n\n logging.info('Tagging completed')",
"def download(self, outdir='results'):\n\n bucket = self._s3_conn.get_bucket(self._s3_bucket)\n keys = bucket.list(prefix=self._get_keyname())\n for key in keys:\n keyname = key.name\n # Ignore folder keys\n if '$' not in keyname:\n suffix = keyname.split('/')[1:] # removes team identifier\n filename = os.path.join(outdir, *suffix)\n dirname = os.path.dirname(filename)\n\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n key.get_contents_to_filename(filename)",
"def _s3_download(self):\n print(\"Info : Starting to download from s3 %s ...\" %\n (self._data_requirement_file[\"src\"]))\n try:\n subprocess.check_call(['aws', 's3', 'sync', '--no-sign-request',\n self._data_requirement_file[\"src\"], self._dst_path])\n except FileNotFoundError:\n print(\"Error: aws does not appear to be installed\")\n raise",
"def download(source_bucket, source_object_key, tmp):\n # TODO\n pass",
"def main():\n (opts,path) = options()\n real_path = None\n uploaded_files = []\n #Determine argument is file or url\n if isfile(path):\n real_path = path\n else:\n #If url specified download to local\n try:\n f = urllib2.urlopen(path)\n except:\n sys.stderr.write(\"An error occured while downloading image file: %s\\n\" % path)\n sys.exit(2)\n tmpfile = open(get_filename_from_url(path),\"w+b\");\n real_path = tmpfile.name\n tmpfile.write(f.read())\n tmpfile.close()\n \n urlbase = \"https://s3.amazonaws.com/%s/\" % opts.bucket\n #Check whether file is correct\n if real_path == None:\n sys.stderr.write(\"Specified file couldn't be found: %s\\n\" %path);\n sys.exit(3);\n\n #Create a connection object to S3. If an error occured terminate.\n try:\n conn, bucket = create_s3_conn(opts.key,opts.secret,opts.bucket) \n except:\n sys.stderr.write(\"An error occured while uploading files to S3. Check your key/secret and bucket name\")\n sys.exit(3);\n \n #Upload original if it is specified from command line\n if opts.upload_original:\n im = Image.open(real_path)\n data = { 'width':im.size[0], \n 'height':im.size[1],\n 'filename':basename(real_path),\n 'url':urlbase+basename(real_path) }\n if( upload_file( real_path, bucket ) != None):\n uploaded_files.append(data)\n \n #If there is parameters specified, create thumbnails accordingly\n if opts.thumbnails != None:\n for t in opts.thumbnails:\n #Parse size parameters WxH => (w,h)\n size = tuple([ int(x) for x in t.split(\"x\")])\n tmpfile = \"thumb-%s-%d-%d.jpg\" % ( slugify(basename(real_path)), size[0], size[1] )\n actual_size = create_thumbnail(real_path, tmpfile, size)\n data = { 'width': actual_size[0],\n 'height': actual_size[1],\n 'filename': tmpfile,\n 'url':urlbase+tmpfile }\n if( upload_file( tmpfile, bucket ) != None):\n uploaded_files.append(data)\n \n #Finally print or send information of successfully uploaded files\n print_upload_data( opts, uploaded_files )",
"def download_from_aws(scene_id: str, destination: str, compressed_path: str = None, chunk_size: int = 512*1024):\n from .publish import BAND_MAP_DN\n\n if compressed_path == None:\n compressed_path = destination\n\n os.makedirs(compressed_path, exist_ok=True)\n\n compressed_path = os.path.join(compressed_path, '{}.tar.gz'.format(scene_id))\n\n files = ['{}_{}.TIF'.format(scene_id, b) for b in BAND_MAP_DN.values()]\n files.append('{}_MTL.txt'.format(scene_id))\n files.append('{}_ANG.txt'.format(scene_id))\n\n pathrow = scene_id.split('_')[2]\n\n path, row = pathrow[:3], pathrow[3:]\n\n os.makedirs(destination, exist_ok=True)\n\n url = 'https://landsat-pds.s3.amazonaws.com/c1/L8/{}/{}/{}'.format(path, row, scene_id)\n\n for f in files:\n stream = requests.get('{}/{}'.format(url, os.path.basename(f)), timeout=90, stream=True)\n\n # Throw for any HTTP error code\n stream.raise_for_status()\n\n logging.debug('Downloading {}...'.format(f))\n\n digital_number_file_path = os.path.join(destination, f)\n _download_file(stream, digital_number_file_path, byte_size=chunk_size)\n\n if f.lower().endswith('.tif'):\n # Remove compression and Tiled order from AWS files in order\n # to espa-science work properly.\n # https://github.com/USGS-EROS/espa-surface-reflectance/issues/76\n remove_tile_compression(digital_number_file_path)\n\n try:\n logging.debug('Compressing {}'.format(compressed_path))\n # Create compressed file and make available\n with tarfile.open(compressed_path, 'w:gz') as compressed_file:\n with working_directory(destination):\n for f in files:\n compressed_file.add(f)\n\n except BaseException:\n logging.error('Could not compress {}.tar.gz'.format(scene_id), exc_info=True)\n\n raise\n\n return compressed_path, url",
"def main(clargs):\n\n\n exporter = ExportAligned()\n args = exporter.parse_args(clargs)\n writer = AlignedWriter(args.output)\n writer.start()\n exporter.setup(args)\n exporter.open_output(writer.queue)\n exporter.process_regions()\n log.info(\"Finished processing regions\")\n exporter.close_output()\n writer.join()",
"def __processAlignment(self):\n self.__ifVerbose(\"Filtering alignment with GATK and Picard-Tools.\")\n self.__logFH.write(\"########## Filtering alignment with GATK and Picard-Tools. ##########\\n\")\n GATKdir = self.outdir + \"/GATK\"\n self.__CallCommand('mkdir', ['mkdir', '-p', GATKdir])\n\n \"\"\" Convert SAM to BAM\"\"\"\n if (self.__ranBWA):\n self.__ifVerbose(\" Running SamFormatConverter.\")\n self.__CallCommand('SamFormatConverter', ['java', '-Xmx4g', '-jar', self.__picard, 'SamFormatConverter', \n 'INPUT='+ self.__alnSam, 'VALIDATION_STRINGENCY=LENIENT', \n 'OUTPUT='+ GATKdir +'/GATK.bam', ])\n else:\n self.__CallCommand('cp', ['cp', self.__alnSam, GATKdir +'/GATK.bam'])\n\n\n \"\"\" Run mapping Report and Mark duplicates using Picard-Tools\"\"\"\n self.__ifVerbose(\" Running SortSam.\")\n self.__CallCommand('SortSam', ['java', '-Xmx8g', '-Djava.io.tmpdir=' + self.tmp, '-jar', self.__picard, 'SortSam', \n 'INPUT='+ GATKdir +'/GATK.bam', 'SORT_ORDER=coordinate', 'OUTPUT='+ GATKdir +'/GATK_s.bam', \n 'VALIDATION_STRINGENCY=LENIENT', 'TMP_DIR=' + self.tmp])\n self.__ifVerbose(\" Running MarkDuplicates.\")\n self.__CallCommand('MarkDuplicates', ['java', '-Xmx8g', '-jar', self.__picard, 'MarkDuplicates', \n 'INPUT='+ GATKdir +'/GATK_s.bam', 'OUTPUT='+ GATKdir +'/GATK_sdr.bam',\n 'METRICS_FILE='+ GATKdir +'/MarkDupes.metrics', 'ASSUME_SORTED=true', \n 'REMOVE_DUPLICATES=false', 'VALIDATION_STRINGENCY=LENIENT'])\n self.__ifVerbose(\" Running BuildBamIndex.\")\n self.__CallCommand('BuildBamIndex', ['java', '-Xmx8g', '-jar', self.__picard, 'BuildBamIndex', \n 'INPUT='+ GATKdir +'/GATK_sdr.bam', 'VALIDATION_STRINGENCY=LENIENT'])\n\n \"\"\" Re-alignment around InDels using GATK \"\"\"\n self.__ifVerbose(\" Running RealignerTargetCreator.\")\n self.__CallCommand('RealignerTargetCreator', ['java', '-Xmx32g', '-jar', self.__gatk, '-T', \n 'RealignerTargetCreator', '-I', GATKdir +'/GATK_sdr.bam', '-R', self.reference, \n '-o', GATKdir +'/GATK.intervals', '-nt', '12'])\n self.__ifVerbose(\" Running IndelRealigner.\")\n self.__CallCommand('IndelRealigner', ['java', '-Xmx4g', '-jar', self.__gatk, '-T', 'IndelRealigner', '-l', \n 'INFO', '-I', GATKdir +'/GATK_sdr.bam', '-R', self.reference, '-targetIntervals', \n GATKdir +'/GATK.intervals', '-o', GATKdir +'/GATK_sdrc.bam'])\n self.__ifVerbose(\" Running BaseRecalibrator.\")\n self.__CallCommand('BaseRecalibrator', ['java', '-Xmx16g', '-jar', self.__gatk, '-T', 'BaseRecalibrator', \n '-I', GATKdir +'/GATK_sdrc.bam', '-R', self.reference, '--knownSites', \n self.snplist,'--maximum_cycle_value', '1600', '-o', GATKdir +'/GATK_Resilist.grp','-nct', '8'])\n self.__ifVerbose(\" Running PrintReads.\")\n self.__CallCommand('PrintReads', ['java', '-Xmx4g', '-jar', self.__gatk, '-T', 'PrintReads', \n '-I', GATKdir +'/GATK_sdrc.bam', '-R', self.reference, '-BQSR', \n GATKdir +'/GATK_Resilist.grp', '-o', GATKdir +'/GATK_sdrcr.bam','-nct', '8'])\n self.__ifVerbose(\" Running SortSam.\")\n self.__CallCommand('SortSam', ['java', '-Xmx8g', '-Djava.io.tmpdir=' + self.tmp, '-jar', self.__picard,'SortSam', \n 'INPUT='+ GATKdir +'/GATK_sdrcr.bam', 'SORT_ORDER=coordinate', 'TMP_DIR=' + self.tmp, \n 'OUTPUT='+ GATKdir +'/GATK_sdrcs.bam', 'VALIDATION_STRINGENCY=LENIENT'])\n self.__ifVerbose(\" Running BuildBamIndex.\")\n self.__CallCommand('BuildBamIndex', ['java', '-Xmx8g', '-jar', self.__picard, 'BuildBamIndex', \n 'INPUT='+ GATKdir +'/GATK_sdrcs.bam', 'VALIDATION_STRINGENCY=LENIENT'])\n\n \"\"\" Filter out unmapped reads \"\"\"\n self.__finalBam = self.fOut + '/'+ self.name + '_sdrcsm.bam'\n self.__ifVerbose(\" Running samtools view.\")\n self.__CallCommand('samtools view', [self.__samtools, 'view', '-bhF', '4', '-o', self.__finalBam, \n GATKdir +'/GATK_sdrcs.bam'])\n self.__ifVerbose(\" Running BuildBamIndex.\")\n self.__CallCommand('BuildBamIndex', ['java', '-Xmx8g', '-jar', self.__picard, 'BuildBamIndex', 'INPUT='+ self.__finalBam, \n 'VALIDATION_STRINGENCY=LENIENT'])\n self.__ifVerbose(\"\")\n self.__CallCommand('rm', ['rm', '-r', self.tmp])",
"def download_shared_files(job, input_args):\n shared_ids = {}\n for fname in ['ref.fasta', 'phase.vcf', 'mills.vcf', 'dbsnp.vcf', 'cosmic.vcf']:\n shared_ids[fname] = job.addChildJobFn(download_from_url, url=input_args[fname], name=fname).rv()\n job.addFollowOnJobFn(reference_preprocessing, input_args, shared_ids)",
"def reference_mapping(self):\n logging.info('Extracting paths to reference genomes')\n self.ref_file()\n logging.info('Running bowtie2 build')\n strain_bowtie2_index_dict, self.strain_reference_abs_path_dict, self.strain_reference_dep_path_dict = \\\n VCFMethods.index_ref_genome(reference_link_path_dict=self.reference_strain_dict,\n dependency_path=self.ref_path,\n logfile=self.logfile,\n reference_mapper='bowtie2')\n logging.info('Creating .fai index file of {ref}'.format(ref=self.ref_strain))\n VCFMethods.faidx_ref_genome(reference_link_path_dict=self.reference_strain_dict,\n dependency_path=self.ref_path,\n logfile=self.logfile)\n logging.info('Running bowtie2 reference mapping')\n self.strain_sorted_bam_dict = VCFMethods.map_ref_genome(\n strain_fastq_dict=self.strain_fastq_dict,\n strain_name_dict=self.strain_name_dict,\n strain_mapper_index_dict=strain_bowtie2_index_dict,\n threads=self.threads,\n logfile=self.logfile,\n reference_mapper='bowtie2')\n logging.debug('Sorted BAM files: \\n{files}'.format(\n files='\\n'.join(['{strain_name}: {bam_file}'.format(strain_name=sn, bam_file=bf)\n for sn, bf in self.strain_sorted_bam_dict.items()])))\n logging.info('Indexing sorted BAM files')\n VCFMethods.samtools_index(strain_sorted_bam_dict=self.strain_sorted_bam_dict,\n strain_name_dict=self.strain_name_dict,\n threads=self.threads,\n logfile=self.logfile)\n logging.info('Extracting unmapped reads')\n strain_unmapped_reads_dict = VCFMethods.extract_unmapped_reads(\n strain_sorted_bam_dict=self.strain_sorted_bam_dict,\n strain_name_dict=self.strain_name_dict,\n threads=self.threads,\n logfile=self.logfile)\n logging.info('Attempting to assemble unmapped reads with SKESA')\n strain_skesa_output_fasta_dict = VCFMethods.assemble_unmapped_reads(\n strain_unmapped_reads_dict=strain_unmapped_reads_dict,\n strain_name_dict=self.strain_name_dict,\n threads=self.threads,\n logfile=self.logfile)\n logging.debug('SKESA assemblies: \\n{files}'.format(\n files='\\n'.join(['{strain_name}: {assembly}'.format(strain_name=sn, assembly=af)\n for sn, af in strain_skesa_output_fasta_dict.items()])))\n logging.info('Running Quast on SKESA assemblies')\n quast_report_dict = VCFMethods \\\n .quast(strain_skesa_output_fasta_dict=strain_skesa_output_fasta_dict,\n strain_unmapped_reads_dict=strain_unmapped_reads_dict,\n strain_sorted_bam_dict=self.strain_sorted_bam_dict,\n threads=self.threads,\n logfile=self.logfile)\n VCFMethods.parse_quast_report(quast_report_dict=quast_report_dict,\n summary_path=self.summary_path)",
"def test_sagemaker_java_jar_multinode(tag, role, image_uri, configuration, sagemaker_session, sagemaker_client):\n spark = SparkJarProcessor(\n base_job_name=\"sm-spark-java\",\n framework_version=tag,\n image_uri=image_uri,\n role=role,\n instance_count=2,\n instance_type=\"ml.c5.xlarge\",\n max_runtime_in_seconds=1200,\n sagemaker_session=sagemaker_session,\n )\n\n bucket = spark.sagemaker_session.default_bucket()\n with open(\"test/resources/data/files/data.jsonl\") as data:\n body = data.read()\n input_data_uri = \"s3://{}/spark/input/data.jsonl\".format(bucket)\n S3Uploader.upload_string_as_file_body(\n body=body, desired_s3_uri=input_data_uri, sagemaker_session=sagemaker_session\n )\n output_data_uri = \"s3://{}/spark/output/sales/{}\".format(bucket, datetime.now().isoformat())\n\n java_project_dir = \"test/resources/code/java/hello-java-spark\"\n spark.run(\n submit_app=\"{}/target/hello-java-spark-1.0-SNAPSHOT.jar\".format(java_project_dir),\n submit_class=\"com.amazonaws.sagemaker.spark.test.HelloJavaSparkApp\",\n arguments=[\"--input\", input_data_uri, \"--output\", output_data_uri],\n configuration=configuration,\n )\n processing_job = spark.latest_job\n\n waiter = sagemaker_client.get_waiter(\"processing_job_completed_or_stopped\")\n waiter.wait(\n ProcessingJobName=processing_job.job_name,\n # poll every 15 seconds. timeout after 15 minutes.\n WaiterConfig={\"Delay\": 15, \"MaxAttempts\": 60},\n )\n\n output_contents = S3Downloader.list(output_data_uri, sagemaker_session=sagemaker_session)\n assert len(output_contents) != 0",
"def secret_op_pipeline(\n url='gs://ml-pipeline/sample-data/shakespeare/shakespeare1.txt'):\n\n gcs_list_items_task = gcs_list_items_op(url)\n gcs_list_buckets_task = gcs_list_buckets_op()",
"def To_S3(all_urls):\n files_list =[]\n client = boto3.client(\n 's3',\n aws_access_key_id='A******************N',\n aws_secret_access_key='y****************************************')\n \n try:\n for obj in client.list_objects_v2(Bucket= 'dataelection')['Contents']:\n FileName = obj['Key']\n files_list.append(FileName)\n except:\n pass \n now = datetime.datetime.now()\n current_year=now.year\n #current_year='2002'\n if len(files_list)==0:\n\n for yr_link in all_urls:\n req = urllib2.urlopen(yr_link)\n zip_file = zipfile.ZipFile(BytesIO(req.read()))\n zip_file.namelist()\n zip_file.extractall('files_to_upload') \n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n if 'files_to_upload/itcont.txt'in glob.glob(\"files_to_upload/*.txt\"):\n #os.rename('files_to_upload/itcont.txt','files_to_upload/itcont'+timestr+'.txt')\n os.rename('files_to_upload/itcont.txt','files_to_upload/itcont'+timestr+'.txt')\n files_list = os.listdir('files_to_upload')\n for file in files_list:\n if file.endswith(\".txt\"):\n print(os.path.join('files_to_upload', file))\n client.upload_file(os.path.join('files_to_upload', file),'dataelection',file)\n os.remove(os.path.join('files_to_upload', file))\n else:\n for url in all_urls:\n if current_year in url:\n req = urllib2.urlopen(url)\n zip_file = zipfile.ZipFile(BytesIO(req.read()))\n files_to_check=zip_file.namelist()\n files_to_extract=list(set(files_to_check)-set(files_list))\n if files_to_extract:\n for cnt in range(0,len(files_to_extract)):\n zip_file.extract(files_to_extract[cnt],'files_to_upload')\n if 'files_to_upload/itcont.txt'in glob.glob(\"files_to_upload/*.txt\"):\n #os.rename('files_to_upload/itcont.txt','files_to_upload/itcont'+timestr+'.txt')\n os.rename('files_to_upload/itcont.txt','files_to_upload/itcont'+timestr+'.txt')\n files_list = os.listdir('files_to_upload')\n for file in files_list:\n if file.endswith(\".txt\"):\n print(os.path.join('files_to_upload', file))\n client.upload_file(os.path.join('files_to_upload', file),'dataelection',file)\n os.remove(os.path.join('files_to_upload', file))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deduplicate Whitehall statistics announcement | def dedupe_stats_announcement(duplicate_slug, authoritative_slug):
command = 'govuk_setenv whitehall ./script/dedupe_stats_announcement {} {}'
with cd('/var/apps/whitehall'):
sudo(command.format(duplicate_slug, authoritative_slug),
user='deploy') | [
"def clear_summaries(self):\n\n\t\tself.count = 0\n\t\tmemset(self.counts, 0, self.n*sizeof(double))",
"async def reset_weekly_stats(self):\n await self.insert_data(\"UPDATE staff SET weekly_tickets = 0\")\n await self.insert_data(\"UPDATE staff SET weekly_Started = 0\")",
"def truncate_dist_reported(self):\n self.stopping_prob_schedule.append(\n sum(self.distribution_reported_tally[self.min_winner_ballots[-1]:]))\n self.distribution_reported_tally = self.distribution_reported_tally[\n :self.min_winner_ballots[-1]]",
"async def _prune(self, ctx):\n tokens = []\n playerdb = []\n settings = await self.config.guild(ctx.guild).all()\n for member in settings[\"playerstats\"]:\n if \"xuid\" in settings[\"playerstats\"][member]:\n xuid = settings[\"playerstats\"][member][\"xuid\"]\n playerdb.append(xuid)\n for cname in settings[\"clusters\"]:\n for sname in settings[\"clusters\"][cname][\"servers\"]:\n if \"api\" in settings[\"clusters\"][cname][\"servers\"][sname]:\n api = settings[\"clusters\"][cname][\"servers\"][sname][\"api\"]\n gt = settings[\"clusters\"][cname][\"servers\"][sname][\"gamertag\"]\n tokens.append((api, gt))\n if tokens:\n embed = discord.Embed(\n description=f\"Gathering Data...\"\n )\n embed.set_footer(text=\"This may take a while, sit back and relax.\")\n embed.set_thumbnail(url=LOADING)\n msg = await ctx.send(embed=embed)\n friendreq = \"https://xbl.io/api/v2/friends\"\n for host in tokens:\n purgelist = []\n key = host[0]\n gt = host[1]\n embed = discord.Embed(\n description=f\"Gathering data for {gt}...\"\n )\n embed.set_thumbnail(url=LOADING)\n await msg.edit(embed=embed)\n data, status = await self.apicall(friendreq, key)\n if status == 200:\n embed = discord.Embed(\n description=f\"Pruning players from {gt}...\"\n )\n embed.set_footer(text=\"This may take a while, sit back and relax.\")\n embed.set_thumbnail(url=LOADING)\n await msg.edit(embed=embed)\n async with ctx.typing():\n for friend in data[\"people\"]:\n xuid = friend[\"xuid\"]\n playertag = friend[\"gamertag\"]\n if xuid not in playerdb:\n purgelist.append((xuid, playertag))\n trash = len(purgelist)\n cur_member = 1\n for xuid in purgelist:\n status, remaining = await self._purgewipe(xuid[0], key)\n if int(remaining) < 30:\n await ctx.send(f\"`{gt}` low on remaining API calls `(30)`. Skipping for now.\")\n break\n elif int(status) != 200:\n await msg.edit(f\"`{gt}` failed to unfriend `{xuid[1]}`.\")\n continue\n else:\n embed = discord.Embed(\n description=f\"Pruning `{xuid[1]}` from {gt}...\\n\"\n f\"`{cur_member}/{trash}` pruned.\"\n )\n embed.set_footer(text=\"This may take a while, sit back and relax.\")\n embed.set_thumbnail(url=LOADING)\n await msg.edit(embed=embed)\n cur_member += 1\n\n embed = discord.Embed(\n description=f\"Purge Complete\",\n color=discord.Color.green()\n )\n embed.set_thumbnail(url=SUCCESS)\n await msg.edit(embed=embed)",
"def _generateDelayStats(self):\n\n def computeDelay(messages):\n data = messages[['datetime', 'sender']]\n delay = np.insert(data['datetime'].iloc[1:].values -data['datetime'].iloc[:-1].values, 0, 0)\n data['delay'] = delay\n return data\n\n #sender of current message must be different from sender previous message\n def removeConsecutiveMessages(messages):\n return messages[messages['sender'] != messages['sender'].shift()]\n\n res = removeConsecutiveMessages(computeDelay(self.df))\n res = res.groupby('sender').sum()\n return res",
"def summarize(self):\n for stats in self.stats_pool.values():\n _ = stats.summarize()",
"def stats_singularity(data):\n \n # Prepare the result set\n result = {'total' : 0}\n # Crunch\n for game in data:\n if game['turn'] == 36:\n dead = True\n for key in game['players']:\n if dead:\n dead = game['players'][key][-1] == 0\n if dead:\n result['total'] += 1\n # Print\n helper_print(\"Dead games\", result)",
"def reset(self):\n # reserve only those that has been around this time\n new_stats_data={}\n for c in self.stats_diff.keys():\n # but carry over all the users... should not change that often\n new_stats_data[c]=self.current_stats_data[c]\n\n self.old_stats_data=new_stats_data\n self.current_stats_data={}\n\n # and flush out the differences\n self.stats_diff={}",
"def user_statistics_cleanup(context: CallbackContext, session: scoped_session) -> None:\n threshold = date.today() - timedelta(days=7)\n session.query(UserStatistic).filter(UserStatistic.date < threshold).delete()",
"def DelAllTrafficShaper(self):\n req = self.ApiGet('cmdb/firewall.shaper/traffic-shaper/')\n data = json.loads(req.text)\n for y in range(0, len(data['results'])):\n traffic_shaper_name = data['results'][y]['name']\n return_code = self.DelTrafficShaper(traffic_shaper_name)\n print('del traffic shaper:', traffic_shaper_name, '(', return_code, ')')\n if return_code != 200: return return_code\n return 200",
"def strip_out_internal_stats(record, stats_text):\n delete = 0\n lines = record.split('\\n')\n for i, line in enumerate(lines):\n if stats_text in line:\n delete = i\n break\n if delete:\n lines = lines[:delete]\n return '\\n'.join(lines)",
"def Histogram(self):\n\n hist = {}\n\n hunt = aff4.FACTORY.Open(\"aff4:/hunts/%s\" % self.session_id,\n age=aff4.ALL_TIMES, token=self.token)\n\n log = hunt.GetValuesForAttribute(hunt.Schema.LOG)\n\n client_ids = [l.client_id for l in log]\n\n to_read = []\n\n while client_ids:\n clients = aff4.FACTORY.MultiOpen(\n [\"aff4:/%s\" % client_id for client_id in client_ids[:1000]])\n client_ids = client_ids[1000:]\n\n for client in clients:\n for user in client.Get(client.Schema.USER):\n to_read.append(\"aff4:/%s/analysis/RunKeys/%s/RunOnce\" %\n (client.client_id, user.username))\n to_read.append(\"aff4:/%s/analysis/RunKeys/%s/Run\" %\n (client.client_id, user.username))\n to_read.append(\"aff4:/%s/analysis/RunKeys/System/RunOnce\" %\n client.client_id)\n to_read.append(\"aff4:/%s/analysis/RunKeys/System/Run\" %\n client.client_id)\n\n print \"Processing %d collections.\" % len(to_read)\n collections_done = 0\n\n while to_read:\n # Only do 1000 at a time.\n collections_done += len(to_read[:1000])\n collections = aff4.FACTORY.MultiOpen(to_read[:1000], token=self.token)\n to_read = to_read[1000:]\n\n for collection in collections:\n try:\n for runkey in collection:\n key = runkey.filepath.replace(\"\\\"\", \"\")\n key = re.sub(r\"Users\\\\[^\\\\]+\\\\\", r\"Users\\\\USER\\\\\", key)\n hist.setdefault(key, set()).add(str(collection.urn)[6:6+18])\n except AttributeError:\n pass\n\n print \"%d collections done.\" % collections_done\n\n rk_list = sorted(hist.iteritems(), reverse=True, key=lambda (k, v): len(v))\n for rk, freq in rk_list:\n print \"%d %s\" % (len(freq), rk)\n\n return rk_list",
"def prune_totals(stats):\n\tstats.pop('fgm')\n\tstats.pop('fga')\n\tstats.pop('ftm')\n\tstats.pop('fta')\n\tstats.pop('minutes')\n\tstats.pop('games_played')\n\n\treturn stats",
"async def _wipestats(self, ctx: commands.Context):\n async with self.config.guild(ctx.guild).all() as data:\n async with ctx.typing():\n del data[\"playerstats\"]\n await ctx.send(embed=discord.Embed(description=\"Player Stats have been wiped.\"))",
"def _prune_message_counts(self, time_now_s: float) -> None:\n # We create a copy of the key list here as the dictionary is modified during\n # the loop\n for key in list(self.actions.keys()):\n action_count, time_start, rate_hz = self.actions[key]\n\n # Rate limit = \"seconds since we started limiting this action\" * rate_hz\n # If this limit has not been exceeded, wipe our record of this action\n time_delta = time_now_s - time_start\n if action_count - time_delta * rate_hz > 0:\n continue\n else:\n del self.actions[key]",
"def clear_report_results(self):",
"def flush_stats(self):\n self.stored_stats = []\n self.last_stats = []",
"def drop_tweetsduplicates(self):\n dates = self.tweets[\"date\"].unique()\n # define a dataframe which will contain the cleaned tweets\n clean_df1 = pd.DataFrame(columns=[\"date\", \"text\"])\n for d in dates:\n # for each day we drop all the duplicated tweets\n df_ = self.tweets[self.tweets[\"date\"] == d]\n # append the slice of cleaned tweets for the dat d in the the clean dataframe\n clean_df1 = clean_df1.append(self.tweets_sim(df_))\n return clean_df1",
"def purge():"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Allocates item_count into groups of bin_count If we cannot divide evenly we find mininum which should be allocated to each bin and rest we distribute evenly starting from first bin till all items are accounted for | def allocate(bin_count: int, item_count: int) -> list[int]:
assert bin_count > 0, item_count >= 0
per_bin, left_over = divmod(item_count, bin_count)
allocations = []
for _ in range(bin_count):
this_allocation = per_bin
if left_over > 0:
this_allocation += 1
left_over -= 1
allocations.append(this_allocation)
return allocations | [
"def first_fit(items, bin_size):\n #check input: if an item is greater than bin_size, terminate program, let user know\n for item in items:\n if item > bin_size:\n return print (\"unidentified object in bagging area: your item is too big for our bins.\")\n #sort items into reverse order to save runtime\n items.sort(reverse = True)\n #Set up the list of lists\n list_of_bins = [[]]\n \n #For each integer in the list of items\n for item in items:\n item_not_stored = True\n \n while item_not_stored:\n \n #Check to see if it will fit into the first bin, if not, traverse the list of bins until space\n for list_ in list_of_bins:\n #if there is space in the bin, store the item there\n if item + sum(list_) <= bin_size:\n list_.append(item)\n item_not_stored = False\n #stop item from repeated storing in later bins:\n break\n #if there is no space remaining in any extant bins, create a new bin and add to list_of_bins:\n if item_not_stored:\n new_bin = [item]\n list_of_bins.append(new_bin)\n item_not_stored = False\n \n return list_of_bins",
"def partition_items(count, bin_size):\n num_bins = int(math.ceil(count / float(bin_size)))\n bins = [0] * num_bins\n for i in range(count):\n bins[i % num_bins] += 1\n return bins",
"def computeBins(self, state, cpu_id):\n import pulp\n import time\n\n #https://www.linkedin.com/pulse/bin-packing-python-pulp-michael-basilyan\n #This code modified from https://github.com/mbasilyan/binpacking/blob/master/binpacker.py\n items = [(i, i.getCost()) for i in self._queue]\n itemCount = len(items)\n if itemCount == 0:\n return\n\n # Max number of bins allowed.\n maxBins = 10\n\n # Bin Size\n binCapacity = state.getVar(\"binsize\")\n\n # Indicator variable assigned 1 when the bin is used.\n y = pulp.LpVariable.dicts('BinUsed', range(maxBins),\n lowBound = 0,\n upBound = 1,\n cat = pulp.LpInteger)\n\n # An indicator variable that is assigned 1 when item is placed into binNum\n possible_ItemInBin = [(itemTuple[0], binNum) for itemTuple in items\n for binNum in range(maxBins)]\n x = pulp.LpVariable.dicts('itemInBin', possible_ItemInBin,\n lowBound = 0,\n upBound = 1,\n cat = pulp.LpInteger)\n\n # Initialize the problem\n prob = pulp.LpProblem(\"Bin Packing Problem\", pulp.LpMinimize)\n\n # Add the objective function.\n prob += (\n pulp.lpSum([y[i] for i in range(maxBins)]),\n \"Objective: Minimize Bins Used\"\n )\n\n #\n # This is the constraints section.\n #\n\n # First constraint: For every item, the sum of bins in which it appears must be 1\n for j in items:\n prob += (\n pulp.lpSum([x[(j[0], i)] for i in range(maxBins)]) == 1,\n (\"An item can be in only 1 bin -- \" + str(j[0]))\n )\n\n # Second constraint: For every bin, the number of items in the bin cannot exceed the bin capacity\n for i in range(maxBins):\n prob += (\n pulp.lpSum([items[j][1] * x[(items[j][0], i)] for j in range(itemCount)]) <= binCapacity*y[i],\n (\"The sum of item sizes must be smaller than the bin -- \" + str(i))\n )\n\n # Write the model to disk\n #prob.writeLP(\"BinPack.lp\")\n\n # Solve the optimization.\n start_time = time.time()\n try:\n prob.solve()\n except:\n #Sometimes the solver crashes?! Better luck next time.\n return\n #print(\"Solved in %s seconds.\" % (time.time() - start_time))\n\n # Bins used\n bin_count = int(sum([y[i].value() for i in range(maxBins)]))\n #print(\"Bins used: {}\".format(bin_count))\n\n # The rest of this is some unpleasent massaging to get pretty results.\n bins = {}\n\n for itemBinPair in x.keys():\n if(x[itemBinPair].value() == 1):\n itemNum = itemBinPair[0]\n binNum = itemBinPair[1]\n if binNum not in bins:\n bins[binNum] = Bin()\n\n bins[binNum].addTask(itemNum)\n\n self._binqueue = sorted(bins.values(), key=lambda b:b.getCost(), reverse=True)\n split = int(len(self._binqueue) * 0.75)\n if split > 1:\n (self._binqueue, dropped) = (self._binqueue[:split], self._binqueue[split:])\n self._queue = sum(map(lambda d: d.getTasks(), dropped), [])\n else:\n self._queue = []\n\n #Apparently, if the maxBins is exceeded it just throws all remaining tasks into one big task\n # which is probably cheating.\n large = filter(lambda b : b.getCost() > binCapacity, self._binqueue)\n self._binqueue = list(filter(lambda b : b.getCost() <= binCapacity, self._binqueue))\n\n self._queue.extend(sum([l.getTasks() for l in large], []))\n\n #print (list(map(lambda b : b.getCost(), self._binqueue)))",
"def get_optimal_binning(hist, min_count=10, default_width=0.1, bin_prec=3, smooth_bkg=False):\n new_bins = [1]\n last_bin_edge = 1\n count = 0\n bin_numbers = list(range(1, hist.GetNbinsX() + 1))\n bin_numbers.reverse()\n first_merge = True\n last_count = 0\n for i in bin_numbers:\n count += hist.GetBinContent(i)\n bin_low_edge = round(hist.GetBinLowEdge(i), bin_prec)\n if count >= min_count and (not smooth_bkg or count >= last_count):\n current_bin_width = round(abs(last_bin_edge - bin_low_edge), bin_prec)\n if smooth_bkg or (first_merge or current_bin_width >= default_width):\n new_bins.append(bin_low_edge)\n last_bin_edge = bin_low_edge\n last_count = count\n count = 0\n first_merge = False\n if new_bins[-1] != -1: # last bin has to few entries\n new_bins[-1] = -1 # merge with previous bin\n new_bins.reverse()\n return new_bins",
"def allocate(chunks, bin_size, chunk_overhead=False, allow_overflow=False):\n chunks = sorted(chunks)\n binsets = []\n while chunks:\n still_virgin = True\n for counter in range(len(chunks) - 1, -1, -1):\n chunk = chunks[counter]\n if still_virgin:\n subset = []\n if chunk_overhead + chunk[0] <= bin_size:\n bin_count = 1\n used_units = chunk_overhead + chunk[0]\n elif allow_overflow:\n quotient, remainder = divmod(chunk_overhead + chunk[0],\n bin_size)\n bin_count = 1 + quotient\n used_units = chunk_overhead + remainder\n else:\n raise BinOverflow()\n subset.append(chunk)\n del chunks[counter]\n still_virgin = False\n elif used_units + chunk_overhead + chunk[0] <= bin_size:\n used_units += chunk_overhead + chunk[0]\n subset.append(chunk)\n del chunks[counter]\n subset.reverse()\n binsets.append((bin_count, subset))\n return binsets",
"def rebin_mincount(self,min_count,max_bins=None):\n\n bins = [0]\n c = np.concatenate(([0],np.cumsum(self._counts)))\n\n for ibin in range(self._axes[0].nbins+1):\n\n nbin = ibin-bins[-1] \n if not max_bins is None and nbin > max_bins:\n bins.append(ibin)\n elif c[ibin] - c[bins[-1]] >= min_count or \\\n ibin == self._axes[0].nbins:\n bins.append(ibin)\n\n return self.rebin(bins)",
"def create_discrete_binning(policy_data, out):\n discretized_data = np.floor((policy_data - mins) / bin_sizes).astype(np.uint)\n # Stackoverflow #2004364\n np.add.at(out, tuple([discretized_data[:, d] for d in range(ndim)]), 1)\n out /= len(policy_data)",
"def _split_into_subsets(items: List[str], bin_count: int):\n bin_size = len(items) // bin_count\n for idx in range(bin_count - 1):\n yield items[bin_size * idx : bin_size * (idx + 1)]\n # The last bin might have up to `bin_size - 1` additional items\n yield items[bin_size * (bin_count - 1) :]",
"def NEWsplitRingsIntoBins(bin_size, distances, si_objects): #Could possibly improve this to speed up a bit\n bin_list = [] #2-d array of each bin of ring types (types are ints)\n bin_mids = [] #midpoint distance of each ring/bin from edge of hole\n \n #Get maximum distance away that a ring is from the hole or axis (in nm)\n max_dist = float(numpy.max(distances))\n bin_start = 0\n while bin_start < max_dist:\n #creates the bins\n bin_mids.append(bin_start + (bin_size/2)) #save each midpoint in a list\n \n #put bins in the bin_list\n #for each bin, set up a list of frequencies for each ring type\n bin_list.append([0, 0, 0, 0, 0, 0]) \n bin_start += bin_size\n \n for i in range(len(si_objects)):\n si = si_objects[i] #This is the Si that we are looking at\n si_dist = distances[i] #Here is its distance from the hole or axis\n si_rings = si.get_rings() #Here is a list of 3 ring objects for each Si\n \n #cycle through bins to find which one to put these 3 ring types in\n bin_start = 0\n bin_num = 0\n while bin_start < max_dist:\n if bin_start <= si_dist < bin_start + bin_size:\n #If the distance for the si fits in the bin,\n for ring in si_rings:\n #increment each ring type for the si's rings\n ring_type = ring.get_type()\n #only add a fraction to the frequency,\n #ex: 1 Si in a 7-mem ring is only 1/7 of the ring\n bin_list[bin_num][ring_type - 4] += (1/ring_type)\n \n bin_start += bin_size\n bin_num += 1\n \n return bin_list, bin_mids",
"def make_buckets(x, bucket_size):\n check_numeric(x)\n return Counter([bucket_size * math.floor(i / bucket_size) for i in x if not math.isnan(i)])",
"def _partition_into_bins(x, width):\n x = np.atleast_1d(x)\n bins, bin_start = [], 0\n ind = np.argsort(x)\n if width is None:\n return [ind], np.array([x.mean()])\n elif width <= 0.0:\n return [[n] for n in ind], x[ind]\n relative_x = x[ind] - x[ind[0]]\n while bin_start < len(relative_x):\n relative_x -= relative_x[bin_start]\n bin_inds = ind[bin_start + (relative_x[bin_start:] < width).nonzero()[0]]\n bins.append(bin_inds)\n bin_start += len(bin_inds)\n return bins, np.hstack([x[bin].mean() for bin in bins])",
"def binarize(i, bins):\n\n hist, edges = np.histogram(i, bins=bins, range=[10, 2000], normed=True)\n edges = (edges[:-1] + edges[1:])/2\n hist *= edges\n\n return hist",
"def arrangeBuckets(self, counter, areas, bucketarea, sample, N):\n boundaries = sorted(bucketarea.items(), key=operator.itemgetter(1))\n low = self.min\n values = bucketarea.values()\n values = list(itertools.chain(*values))\n values = sorted(values)\n for i in range(0, len(values)):\n self.buckets[i]['low'] = low\n highindex = values[i]\n self.buckets[i]['high'] = sample[highindex]\n self.buckets[i]['size'] = sample[highindex] - low\n if sample[highindex] == self.buckets[i]['low']:\n self.buckets[i]['high'] = sample[highindex + 1]\n self.buckets[i]['size'] = sample[highindex + 1] - low\n if low == self.min:\n self.buckets[i]['frequency'] = counter[sample[0]] * N / len(sample) * 2\n else:\n self.buckets[i]['frequency'] = counter[low] * N / len(sample) * 2\n low = self.buckets[i]['high']\n self.buckets[self.numbuckets - 1]['high'] = self.max + 1\n self.buckets[self.numbuckets - 1]['low'] = self.buckets[self.numbuckets - 2]['high']\n self.buckets[self.numbuckets - 1]['frequency'] = counter[self.buckets[self.numbuckets - 1]['low']] * N / len(sample) * 2\n self.buckets[self.numbuckets - 1]['size'] = self.buckets[self.numbuckets - 1]['high'] - self.buckets[self.numbuckets - 1]['low']\n f = 0\n for i in range(len(self.buckets)):\n f += self.buckets[i]['frequency']\n #assert np.isclose(f, N)",
"def _calc_histogram_bins(count):\n max_bins, max_per_bin = 90, 10\n\n if not count:\n return 1\n if count <= 5:\n return 2\n if count <= 10:\n return 3\n if count <= 880:\n # note that math.ceil(881/10) + 1 equals 90\n return count // max_per_bin + 1\n\n return max_bins",
"def fits_in_bin(item, list, bin_size):\n if item + sum(list) <= bin_size:\n return True\n else:\n return False",
"def get_normal_distribution_buckets(self, num_items, num_segments):\n assert(num_segments > 0,\n \"number of segments is less than 1: {}\".format(num_segments))\n\n integrator = integration.Integrator(20, 1E-10)\n func = lambda x: integrator.integrate_minus_infinity_to(\n statistics.normal_distribution, x)\n\n results = []\n cumulative_probability = 0\n previous_upper_bound = None\n segment_allocation = self.get_segment_allocation(\n num_items, num_segments)\n\n for items_in_segment in segment_allocation[:-1]:\n cumulative_probability += items_in_segment / float(num_items)\n upper_bound = integration.approximate_inverse(\n func, cumulative_probability)\n results.append(SegmentRange(previous_upper_bound, upper_bound))\n previous_upper_bound = upper_bound\n\n results.append(SegmentRange(previous_upper_bound, None))\n return results",
"def __calculate_number_of_buckets(self, expected_item_count, target_total_size):\n return max(\n math.floor(target_total_size / self.__bucket_size),\n math.ceil(expected_item_count / self.__max_items_per_bucket)\n )",
"def _create_digit_splits(bins, base, cumlative_splits):\n\tbins_total = sum(bins)\n\tdigit_splits_width = float(bins_total) / base\n\tdigit_splits = []\n\trunning_total = 0.0\n\tfor i in range(base):\n\t\t# round lower digit_split to the max of the cumlative_splits bin in which it resides\n\t\t_,lower = cumlative_splits[_lies_at_index_range(cumlative_splits,running_total)]\n\t\trunning_total += digit_splits_width\n\t\tassert lower < running_total\n\t\tdigit_splits.append((lower, running_total))\n\n\t# ensure the last max is rounded up\n\tlow,_ = digit_splits[-1]\n\tdigit_splits[-1] = low,bins_total\n\n\treturn digit_splits",
"def pigeonhole_bounded(num_balls, num_bins, max_balls_per_bin):\n lim = int(min(num_bins, 1.*num_balls / (max_balls_per_bin+1)))\n val = 0\n for q in range(lim+1):\n sgn = (-1)**q\n term_1 = comb(num_bins, q)\n term_2 = comb(num_balls - q*(max_balls_per_bin + 1) + num_bins - 1, num_bins - 1)\n val += sgn * term_1 * term_2\n return val"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
return a list of days between start and end excluding current and prev | def get_missing_days(start: datetime, end: datetime) -> list[datetime]:
assert start > end
one_day = timedelta(days=-1)
missing_days, current = [], start + one_day
while current > end:
print(current)
missing_days.append(current)
current += one_day
return missing_days | [
"def get_dates(self):\n\t\tdates = []\n\t\tif self.end_date==self.start_date:\n\t\t\tdates.append(self.start_date)\n\t\telse:\n\t\t\tdelta = self.end_date - self.start_date\n\t\t\tfor day in range(0, delta.days+1):\n\t\t\t\tdates.append(self.start_date + timedelta(days=day))\n\t\treturn dates",
"def get_date_range(self):\n\n date_list = []\n if self.start and self.end:\n day = self.start\n delta = datetime.timedelta(days=1)\n stop = self.end + delta\n while day < stop:\n date_list.append(day)\n day += delta\n\n return date_list",
"def get_dates(start, end):\n\n files = []\n\n while start <= end:\n p = start\n start += timedelta(days=1)\n files.append(p)\n\n return sorted(files)",
"def getDateList(start, end=None):\n\n start_date_time = datetime.strptime(start, \"%Y%m%d\")\n if end is None:\n oneday = timedelta(days=1)\n end_date_time = start_date_time + oneday\n end = end_date_time.strftime(\"%Y%m%d\")\n return start, end\n else:\n end_date_time = datetime.strptime(end, \"%Y%m%d\")\n delta = (end_date_time - start_date_time).days\n return [(start_date_time + timedelta(days=ii)).strftime(\"%Y%m%d\") for ii in xrange(0, delta + 1)][:-1]",
"def list_days_in_future(self, end_date=None, reverse: bool = False, count: int = -1) -> List[WrappedDate]:\n return self.list_days(start_date=date.today(), end_date=end_date, reverse=reverse, count=count)",
"def possible_no_trade_days(self, start_date, end_date):\r\n current = start_date\r\n result = []\r\n\r\n while current <= end_date:\r\n # for each day, count the number of points with filler values\r\n dt_list = get_datetime_from_date(current) # hour-steps between 9 and 16\r\n day_list = [self.__edited_data.get(t, []) for t in dt_list] # elements are tuples\r\n\r\n count = len([point for point in day_list if len(point) == 2]) # a tuple of length 2 indicates added data\r\n\r\n # more than half missing? possible no-trade day!\r\n if count > 4:\r\n result += [current]\r\n\r\n current = get_next_day(current)\r\n\r\n return result",
"def generate_date_series(start_date, end_date):\n days_diff = (end_date - start_date).days\n return [start_date + timedelta(days=x) for x in range(0, days_diff + 1)]",
"def produceDateList(startDate, endDate): \n dateList=[]\n delta = endDate - startDate\n for i in range(delta.days+1):\n day = startDate + dt.timedelta(days=i)\n dateList.append(dt.datetime.strftime(day,'%Y%-m%d'))\n return dateList",
"def _date_range(start_date, end_date):\n\n for n in range(int((end_date - start_date).days)):\n yield start_date + timedelta(n)",
"def get_holidays_between_dates(self, start_date, end_date):",
"def days_list(self):\n return self._days_list",
"def daterangelist(start_date, end_date, skip):\r\n l = np.array([])\r\n for n in range(0, int((end_date - start_date).days), skip):\r\n l = np.append(l, (start_date + datetime.timedelta(n)))\r\n return l.astype('datetime64')",
"def create_calendar_bus_days(self, start_date, end_date, cal = 'FX'):\n hols = self.get_holidays(start_date, end_date, cal)\n index = pandas.bdate_range(start=start_date, end=end_date, freq='D')\n\n return [x for x in index if x not in hols]",
"def daterange(start_date, end_date):\n\n for n in range(int((end_date - start_date).days)):\n yield start_date + timedelta(n)",
"def create_date_list(start_date, end_date, del_t='day'):\n times = []\n if del_t == 'day':\n delta = dt.timedelta(hours=24)\n elif del_t == 'year':\n delta = dt.timedelta(years=1)\n elif del_t == 'half_hour':\n delta = dt.timedelta(minutes=30)\n date = start_date\n while date <= end_date:\n times.append(date)\n date = date + delta\n return times",
"def get_diary_list(self, start=0, end=10, order='-publish_time'):\n size = end - start\n prev = next = False\n diaries = Diary.objects.order_by(order)[start:end + 1]\n if len(diaries) - size > 0:\n next = True\n if start != 0:\n prev = True\n\n return prev, next, diaries[start:end]",
"def generate_dt_list(dt_start, dt_end):\n\n l = []\n for day in range((dt_end - dt_start).days + 1):\n dt_day = dt_start + timedelta(days=day)\n for hour in range(24):\n dt_hour = dt_day + timedelta(hours=hour)\n l.append(dt_hour)\n return l",
"def get_datedeltas(cubepathname=settings.ACCESS_G_AGG, end_date=yesterday):\n refcube = Dataset(cubepathname, mode='a', format='NETCDF4')\n time = refcube.get_variables_by_attributes(long_name='time')\n if len(time) == 0:\n print('error: no time variable found')\n return False, False\n delta = datetime.timedelta(int(time[0][0]))\n startdelta = delta.days\n startbase = datetime.date(1900, 1, 1)\n datedelta = (end_date - startbase).days\n\n return range(startdelta, datedelta)",
"def _iter_days(start=None):\r\n return _iter_time(start, days=1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Recieves the current ProductSales and last ProductSales we have in our database and posts new cup sales objects to our api. | def interpolate_and_post(current: ProductSales, prev: ProductSales) -> None:
assert current.date > prev.date
results = get_interpolated_list(current, prev)
json_payload = [obj.to_json() for obj in results]
return requests.post(api_url, json=json_payload) | [
"def update_deliveries():\n with db.session.connection(execution_options={\"schema_translate_map\":{\"tenant\":session['schema']}}):\n \n shift_id = request.form.get(\"shift\")\n shift = Shift.query.get(shift_id)\n document= request.form.get(\"document\")\n supplier_id=request.form.get(\"suppliers\")\n cost_price= float(request.form.get(\"cost_price\"))\n product_id = request.form.get(\"product\")\n qty= float(request.form.get(\"qty\"))\n product = Product.query.get(product_id)\n price= Price.query.filter(and_(Price.product_id == product.id,Price.shift_id ==shift_id)).first()\n inventory_acc = Account.query.get(product.account_id)\n amount = qty * cost_price\n amount = round(amount,2)\n new_cost = ((product.cost_price*product.qty) + amount)/(qty +product.qty)\n product.cost_price = cost_price\n product.avg_price = round(new_cost,2)\n price.avg_price = round(new_cost,2)\n price.cost_price = cost_price\n supplier = Supplier.query.get(supplier_id)\n post_balance = supplier_txn_opening_balance(shift.date,supplier.id) + amount\n txn = SupplierTxn(date=shift.date,txn_type=\"Delivery\",supplier_id=supplier.id,amount=amount,post_balance=post_balance)\n db.session.add(txn)\n db.session.flush()\n update_supplier_balances(shift.date,amount,supplier.id,txn.txn_type)\n if product.product_type ==\"Fuels\":\n tank_id= request.form.get(\"tank\")\n try:\n tank = Tank.query.get(tank_id)\n tank_id = tank.id\n except:\n flash('Please select valid tank','warning')\n return redirect(url_for('readings_entry'))\n \n delivery = Delivery(date=shift.date,shift_id=shift_id,tank_id=tank.id,qty=qty,product_id=product_id,document_number=document,supplier=supplier_id,cost_price=cost_price,supplier_txn_id=txn.id)\n else:\n delivery = Delivery(date=shift.date,shift_id=shift_id,qty=qty,product_id=product_id,document_number=document,supplier=supplier_id,cost_price=cost_price,supplier_txn_id=txn.id)\n \n db.session.add(delivery)\n db.session.flush()\n details = \"Delivery {}\".format(delivery.id)\n delivery_journal=Journal(date=shift.date,details=details,amount=amount,dr=inventory_acc.id,cr=supplier.account_id,created_by=session['user_id'],updated=False)\n db.session.add(delivery_journal)\n db.session.commit()\n \n return redirect(url_for('readings_entry'))",
"def get_sales_forecast(self, cr, uid, ids, context=None):\n\n\n if context is None:\n context = {}\n\n amount = 0.0\n\n new_id = False\n\n products = {}\n value = {}\n\n invoice_ids = []\n months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug',\n 'sep', 'oct', 'nov', 'dec']\n\n inv_obj = self.pool.get('account.invoice')\n forecast_obj = self.pool.get('sales.forecast')\n forecast_line_obj = self.pool.get('sales.forecast.line')\n user_obj = self.pool.get('res.users')\n product_obj = self.pool.get('product.product')\n\n company_id = user_obj.browse(cr, uid, uid).company_id.id\n\n for form in self.browse(cr, uid, ids):\n #create forecast sales without lines\n new_id = forecast_obj.create(cr, uid, {'name': form.name,\n 'analytic_id': form.account_id.id,\n 'commercial_id': uid,\n 'date': time.strftime('%d-%m-%Y'),\n 'company_id': company_id,\n 'state': 'draft'\n })\n for month in range(0,11):\n #I find all the invoices in for each month last year.\n domain = \\\n [('date_invoice','>',str('01-' + str(month + 1) +\n '-' + str(int(time.strftime('%d-%m-%Y')[6:]) - 1))),\n ('date_invoice','<',\n str((calendar.monthrange((int(time.strftime('%d-%m-%Y')[6:]) - 1),\n (month + 1))[1])) + '-' + str(month + 1) + '-' +\n str(int(time.strftime('%d-%m-%Y')[6:]) - 1)),\n ('company_id','=', company_id)]\n\n invoice_ids = inv_obj.search(cr, uid, domain)\n if invoice_ids:\n\n #If invoices, step through lines that share the selected\n #analytic account and save them in a dictionary, with the\n #id of product of the line like key:\n #{Product_Id: [(amount, benefits)]}\n for inv in inv_obj.browse(cr, uid, invoice_ids):\n for line in inv.invoice_line:\n if line.account_analytic_id and \\\n line.account_analytic_id.id == form.account_id.id and \\\n line.product_id:\n\n quantity = self.pool.get('product.uom')._compute_qty(cr, uid, line.uos_id.id,line.quantity, line.product_id.uom_id.id)\n if products.get(line.product_id.id):\n new_val = (products[line.product_id.id][0][0] + quantity,\n products[line.product_id.id][0][1] + line.price_subtotal)\n products[line.product_id.id][0] = new_val\n else:\n products[line.product_id.id] = []\n products[line.product_id.id].append((quantity,\n line.price_subtotal))\n if products:\n for product in products:\n if form.percent_increase:\n #Calculation percentage increase\n qty = products[product][0][0] + \\\n ((form.percent_increase / 100) * \\\n products[product][0][0])\n else:\n qty = products[product][0][0]\n\n cur_forecast = forecast_obj.browse(cr, uid, new_id)\n l_products = forecast_line_obj.search(cr, uid,\n [('product_id','=', product),\n ('sales_forecast_id', '=', cur_forecast.id)])\n #If there are already lines created for the same product,\n #update the quantities. Else, I create a new line\n if l_products:\n l = forecast_line_obj.browse(cr, uid, l_products[0])\n if l.product_id.id == product:\n forecast_line_obj.write(cr, uid, l.id,\n {months[month] + '_qty': (qty + \\\n (eval('o.' + (months[month] + '_qty'),{'o': l})))})\n else:\n forecast_line_obj.create(cr, uid, {\n 'sales_forecast_id': new_id,\n 'product_id': product,\n months[month] + '_qty': qty})\n\n products = {}\n\n value = {\n 'domain': str([('id', 'in', [new_id])]),\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'res_model': 'sales.forecast',\n 'view_id': False,\n 'type': 'ir.actions.act_window',\n 'res_id': new_id\n }\n\n return value",
"def coupon_sales():\n \n with db.session.connection(execution_options={\"schema_translate_map\":{\"tenant\":session['schema']}}):\n #current_shift = Shift.query.order_by(Shift.id.desc()).first()\n shift_underway = Shift_Underway.query.all()\n current_shift = shift_underway[0].current_shift\n current_shift = Shift.query.get(current_shift)\n date = current_shift.date\n shift_id = current_shift.id\n product = Product.query.get(request.form.get(\"product_id\"))\n coupon = Coupon.query.get(request.form.get(\"coupon_id\"))\n number_of_coupons = request.form.get(\"number_of_coupons\")\n total_litres = float(coupon.coupon_qty) * float(number_of_coupons)\n customer = Customer.query.get(coupon.customer_id)\n price = Price.query.filter(and_(Price.shift_id==shift_id,Price.product_id==product.id)).first()\n coupon_sale = CouponSale(date=date,shift_id=shift_id,product_id=product.id,coupon_id=coupon.id,qty=number_of_coupons)\n amount = round(total_litres * price.selling_price,2)\n post_balance=customer_txn_opening_balance(date,customer.id) +amount\n txn = CustomerTxn(date=date,txn_type=\"Coupon Sales\",customer_id=customer.id,amount=amount,post_balance=post_balance)\n db.session.add(txn)\n db.session.flush()\n update_customer_balances(date,amount,customer.id,txn.txn_type)\n invoice = Invoice(date=date,product_id=product.id,shift_id=shift_id,customer_id=customer.id,qty=total_litres,price=price.selling_price,customer_txn_id=txn.id)\n \n db.session.add(coupon_sale)\n db.session.add(invoice)\n db.session.commit()\n \n return redirect(url_for('driveway_invoices'))",
"def create_sale():\n #store the request data in user_input variable\n user_input = request.get_json(force=True)\n\n #validate user input\n attendant_name = user_input.get(\"attendant_name\")\n if not attendant_name or attendant_name.isspace():\n raise InvalidUsage('Attendant Name is required', status_code=400)\n charset = re.compile('[A-Za-z]')\n checkmatch = charset.match(attendant_name)\n if not checkmatch:\n raise InvalidUsage('Attendant Name must be letters', status_code=400)\n\n no_of_pdts = user_input.get(\"no_of_products\")\n if not no_of_pdts:\n raise InvalidUsage('Number of products is required', status_code=400)\n \n if not isinstance(no_of_pdts, int):\n raise InvalidUsage('Number of products must be a number', status_code=400)\n\n ttl_profit = user_input.get(\"total_profit\")\n if not ttl_profit:\n raise InvalidUsage('Total profit is required', status_code=400)\n\n if not isinstance(ttl_profit, int):\n raise InvalidUsage('Total profit must be a number', status_code=400)\n\n #auto generate the sales ID\n sales_id = len(Sales.sales) + 1\n \n sale_object = Sales(sales_id, attendant_name, no_of_pdts, ttl_profit)\n sale = sale_object.create_sale()\n if Sales.sales:\n return sale, 201\n else:\n raise InvalidUsage('Insertion failed', status_code=400)",
"def post(self):\n data = SalesListResource.parser.parse_args()\n\n # validate all inputs not to be empty\n for k, v in data.items():\n if v == \"\":\n return {\"message\": \"{} cannot be an empty\".format(k)}\n\n # get the attendant details\n\n current_user = get_jwt_identity()\n user = current_user[\"email\"]\n\n # increment sale by id\n sales_id = len(sales_list) + 1\n\n # custom message for missing product\n message = \"no product with id {}\".format(data[\"product_id\"])\n\n # get the category name by id\n product = productModel.get_by_id(data[\"product_id\"],\n productModel.get_products())\n\n if product:\n # get category name via its key name\n product_name = product['name']\n\n # calculate the price\n price = product[\"price\"]\n total = salesModel.calculate_total(price, data['quantity'])\n\n # prodct item to be saved\n sale_input = {\n \"id\": sales_id, \"product\": product_name,\n \"quantity\": data['quantity'],\n \"attendant\": user,\n \"total\": total}\n\n salesModel.add_sales(sale_input)\n sale = salesModel.get_by_id(sales_id, sales_list)\n return sale, 201\n return {\"message\": message}, 404",
"def register_sale():\n\n sale = {\n \"date\": today()\n }\n\n print(\"--- Register sale ---\\n\")\n print(\"Who is selling the product?\")\n\n # display employees to the user\n employees = get_employees()\n for e in employees:\n print(\"- %s (%s)\" % (e['name'], e['id']))\n print()\n\n employee = select_by_id_or_name(employees, 'employee')\n sale['employee_id'] = employee['id']\n\n print(\"Selected: %s (%s)\\n\" % (employee['name'], employee['id']))\n print(\"Which product is it?\")\n products = get_products()\n\n # display products to the user\n for p in products:\n print(\"- %s (%s) (%s in stock)\" %\n (p['name'], p['id'], p['quantity']))\n print()\n\n product = select_by_id_or_name(products, 'product')\n\n sale['product_id'] = product['id']\n\n print(\"Selected: %s (%s) (%s in stock)\\n\" %\n (product['name'], product['id'], (product['quantity'])))\n\n quantity = 0\n while True:\n quantity = safe_input(\"int_positive\", \"How many items? \")\n # check if there are enough items in stock\n if quantity > 0 and quantity <= product['quantity']:\n print(\"The order is valid. Calculating total price...\")\n break\n else:\n print(\n \"The order is invalid. Please choose a number that is not greater than the quantity in stock\")\n\n # we are updating the reference, so this dictionary is also modified\n # on the products list\n product['quantity'] -= quantity\n sale['num_products'] = quantity\n sale['total_price'] = quantity * product['price']\n\n print(\"\\nTotal price: $%s (+ $%s tax)\" % (\n sale['total_price'],\n sale['total_price'] * 0.16\n ))\n\n sale['id'] = len(get_sales())\n\n print(\"\\nThis order's id is\", sale['id'])\n\n update_products(products)\n add_sale(sale)",
"def ingest_products(stores_obj):\n try:\n session = shopify.Session(stores_obj.store_name, stores_obj.permanent_token)\n shopify.ShopifyResource.activate_session(session)\n product_listings = shopify.Product.find()\n collection_listings = shopify.Collect.find()\n\n for product_listing in product_listings:\n product_id = product_listing.id\n product_name = product_listing.title\n product_image = product_listing.image\n handle = product_listing.handle\n tags = product_listing.tags\n\n vendor = product_listing.vendor if product_listing.vendor else ''\n product_type = product_listing.product_type if product_listing.product_type else ''\n main_image_url = product_image.attributes['src'] if product_image else ''\n\n Product.objects.update_or_create(product_id=product_id, store__store_name=stores_obj.store_name,\n defaults={'product_name': product_name, 'store': stores_obj,\n 'main_image_url': main_image_url,\n 'handle': handle,\n 'vendor': vendor,\n 'tags': tags,\n 'product_type': product_type})\n\n for collection_listing in collection_listings:\n collection_id = collection_listing.attributes['collection_id']\n product_id = collection_listing.attributes['product_id']\n\n product = Product.objects.get(product_id=product_id)\n Collection.objects.update_or_create(product=product, collection_id=collection_id,\n defaults={'collection_id': collection_id})\n\n except Exception as e:\n logger.error('Exception caught for {}. {}'.format(stores_obj.store_name, e))",
"def update_sales_receipts():\n with db.session.connection(execution_options={\"schema_translate_map\":{\"tenant\":session['schema']}}):\n \n shift_id = request.form.get(\"shift\")\n shift = Shift.query.get(shift_id)\n date = shift.date\n vehicle_number= request.form.get(\"vehicle_number\")\n driver_name= request.form.get(\"driver_name\")\n customer_id=request.form.get(\"customers\")\n sales_price= float(request.form.get(\"sales_price\"))\n product_id = request.form.get(\"product\")\n qty= float(request.form.get(\"qty\"))\n try:\n sales_acc = Account.query.filter_by(account_name=\"Fuel Sales\").first()\n amount = qty * sales_price\n amount = round(amount,2)\n post_balance = customer_txn_opening_balance(date,customer_id) + amount\n txn = CustomerTxn(date=date,txn_type=\"Invoice\",customer_id=customer_id,amount=amount,post_balance=post_balance)\n db.session.add(txn)\n db.session.flush()\n update_customer_balances(date,amount,customer_id,txn.txn_type)\n customer = Customer.query.get(customer_id)\n invoice = Invoice(date=date,shift_id=shift_id,product_id=product_id,customer_id=customer_id,qty=qty,price=sales_price,vehicle_number=vehicle_number,driver_name=driver_name,customer_txn_id=txn.id)\n db.session.add(invoice)\n db.session.flush()\n details = \"Invoice {}\".format(invoice.id)\n sales_journal=Journal(date=shift.date,details=details,amount=amount,dr=customer.account_id,cr=sales_acc.id,created_by=session['user_id'],updated=False)\n db.session.add(sales_journal)\n db.session.commit()\n \n return redirect(url_for('readings_entry'))\n except:\n flash(\"Something is wrong\",'warning')\n return redirect(url_for('readings_entry'))",
"def predict_future_purchase_sales(self):\r\n # Reload the csv file to clear unnecessary columns\r\n self.read_csv_data()\r\n\r\n # Rename columns\r\n self.rename_columns()\r\n\r\n # Creates a dataset exclusing Date Margin and Tax, because that will be predicted by model\r\n train = self.all_data.drop([self._date, self._net_purchase, self._gross_sale], axis=1)\r\n\r\n # Creates a test dataset to test the trained model\r\n test = self.all_data[[self._net_purchase, self._gross_sale]]\r\n\r\n # Creates different training and testing dataset\r\n # test_size = 0.3 signifies, 30% data will be used for testing and 70% data will be used for training\r\n x_train, x_test, y_train, y_test = train_test_split(train, test, test_size=0.3, random_state=2)\r\n\r\n # Create LinearRegression object\r\n simple_regr = LinearRegression()\r\n\r\n # Train the model\r\n simple_regr.fit(x_train, y_train)\r\n\r\n # Receive input from the user\r\n tax_assume = float(input('Enter Tax: '))\r\n margin_assume = float(input('Enter Margin: '))\r\n\r\n # Convert the data to dataframe\r\n predict_data = pd.DataFrame(np.array([[tax_assume, margin_assume]]), columns=[self._tax, self._margin])\r\n\r\n # Predict the input\r\n predicted_purchase_sale = simple_regr.predict(predict_data)\r\n\r\n # Get the accuracy of the trained model\r\n accuracy = simple_regr.score(x_test, y_test) * 100\r\n\r\n # Display the predicted tax with accuracy\r\n print(f'The predicted net purchase is {predicted_purchase_sale[0][0]:.2f} and predicted gross sale is {predicted_purchase_sale[0][1]:.2f} with {accuracy:.2f}% accuracy')",
"def rest_sales(request):\n # Check if the user is a developer\n if not request.user.has_perm('gamestore.developer'):\n return permission_denied(request, PermissionDenied)\n\n # Allow only requests for the developers own sales statistics\n orders = request.user.developer.sales.all()\n\n if request.method == 'GET':\n # Filter by order id\n if 'order' in request.GET:\n orders = orders.filter(pk=request.GET['order'])\n # Filter by game name\n if 'game' in request.GET:\n if Game.objects.filter(name=request.GET['game']).exists():\n game = Game.objects.get(name=request.GET['game'])\n orders = orders.filter(game=game)\n else:\n orders = []\n # Filter by buyer (username in this case!)\n if 'buyer' in request.GET:\n if User.objects.filter(username=request.GET['buyer']).exists():\n user = User.objects.get(username=request.GET['buyer'])\n if hasattr(user, 'player'):\n orders = orders.filter(buyer=user.player)\n else:\n orders = []\n else:\n orders = []\n # Search by status (paid or not paid orders)\n if 'status' in request.GET:\n if request.GET['status'] == 'paid':\n orders = orders.filter(status=True)\n elif request.get['status'] == 'not_paid':\n orders = orders.filter(status=False)\n else:\n orders = []\n\n # Convert buyer and seller ids to usernames and game ids to game names and status to paid/not_paid\n data = serializers.serialize('json', orders)\n data_json = json.loads(data)\n for d in data_json:\n d['fields']['buyer'] = Player.objects.get(pk=d['fields']['buyer']).user.username\n d['fields']['seller'] = Developer.objects.get(pk=d['fields']['seller']).user.username\n d['fields']['game'] = Game.objects.get(pk=d['fields']['game']).name\n d['fields']['status'] = 'paid' if Order.objects.get(pk=d['pk']).status else 'not_paid'\n # Remove model information from the returned data\n d.pop('model')\n data = json.dumps(data_json)\n\n return render(request, 'rest_sales.html', {'data': data})",
"def create_sale_record(self):\n self.cursor.execute(\n \"INSERT INTO sales(user_id, product_id, sales_quantity, prod_price)\"\n \" VALUES(%s,%s,%s,%s)\",\n (self.user_id, self.prod_id, self.quantity, self.price), )",
"def sale_list_create_new(current_user):\n data = request.get_json()\n\n new_sale_list = SaleList(\n created_on=str(datetime.date.today()),\n customer_name=data[\"customer_details\"][\"customer_name\"],\n customer_contact=data[\"customer_details\"][\"customer_contact\"],\n business_id=data[\"business_id\"],\n )\n db.session.add(new_sale_list)\n db.session.commit()\n\n for sale in data[\"sale_list\"]:\n new_sale = Sale(\n quantity=sale[\"quantity\"],\n selling_price=sale[\"selling_price\"],\n created_on=str(datetime.date.today()),\n product_id=sale[\"product_id\"],\n sale_list_id=new_sale_list.id,\n )\n db.session.add(new_sale)\n db.session.commit()\n return jsonify({\"message\": \"Sale created successfully\"}), 201",
"def set_orders(self):\n new_buy_orders, new_sell_orders = api.get_orders(self.currency_pair)\n\n # check if the sell book isn't empty\n if new_sell_orders != []:\n log = 'new_sell_orders : ', new_sell_orders # number of new sell orders\n logging.info(log)\n # remove all sell orders under sell_price_min\n if new_sell_orders[0][2] < self.sell_price_min: # order[2] => rate\n for order in new_sell_orders:\n if order[2] < self.sell_price_min:\n resp = api.cancel_order(self.currency_pair, order[0]) # order[0] => order_number\n\n log = 'Sell order removed : ', order\n logging.warning(log)\n\n new_sell_orders.remove(order)\n # remove orders if there too much of them\n # checking if the rate of the last order is too big than the\n # supposed right rate relatively to both the increment and nb_order_to_display variables\n if new_sell_orders[-1][2] > self.sell_price_min + self.increment * self.nb_orders_to_display:\n # if so, defining a variable corresponding to the right rate\n price_target = self.sell_price_min + self.increment * self.nb_orders_to_display\n\n # removing the order if greater than the supposed right price\n for order in new_sell_orders:\n if order[2] > price_target:\n resp = api.cancel_order(self.currency_pair, order[0])\n\n log = 'Sell order removed : ', order\n logging.warning(log)\n\n new_sell_orders.remove(order)\n # if it remain sells orders\n if new_sell_orders != []:\n i = 0\n target = len(new_sell_orders)\n nb_orders_to_display_tmp = int(self.nb_orders_to_display)\n\n log = 'new_sell_orders : ', new_sell_orders\n logging.info(log)\n # check if the first item in new_sell_orders is at sell_price_min\n # or add it\n if new_sell_orders[0][2] != self.sell_price_min:\n # api.set_sell_order is not better?\n order = api.set_sell_order(self.currency_pair, self.sell_price_min, self.amount)\n\n new_sell_orders.insert(0, order)\n\n log = 'Sell order added : ', order\n logging.warning(log)\n\n # incrementing target for the while loop? => because the exclusion of the last integer if not?\n target += 1\n # browse sell_orders to add or removes orders\n while i < target:\n # check for overflow\n if new_sell_orders[i][2] + self.increment > self.sell_price_max:\n i = target\n logging.warning('sell_price_max reached')\n\n else:\n # add a sell order if there is no higher sell in sell_orders\n if i + 1 >= len(new_sell_orders): # possible change : less than sign instead of 'greater than'\n order = api.set_sell_order(self.currency_pair, \\\n (new_sell_orders[i][2] + self.increment), self.amount)\n\n new_sell_orders.insert((i + 1), order)\n\n log = 'Added sell order : ', order\n logging.warning(log)\n\n if target < nb_orders_to_display_tmp:\n target += 1\n\n i += 1\n # remove sell order if there is less than increment between them\n elif new_sell_orders[i + 1][2] - new_sell_orders[i][2] \\\n < self.increment:\n\n resp = api.cancel_order(self.currency_pair, new_sell_orders[i + 1][0])\n\n log = 'Sell order removed : ', order\n logging.warning(log)\n\n new_sell_orders.remove(order)\n\n target -= 1\n # add sell order if there is more than increment between them\n elif new_sell_orders[i + 1][2] - new_sell_orders[i][2] \\\n > self.increment:\n\n order = api.set_sell_order(self.currency_pair, \\\n (new_sell_orders[i][2] + self.increment), self.amount)\n\n new_sell_orders.insert((i + 1), order)\n\n log = 'Added sell order : ', order\n logging.warning(log)\n\n if target < nb_orders_to_display_tmp:\n target += 1\n\n i += 1\n # increment ok, next round\n else:\n i += 1\n\n self.sell_orders = new_sell_orders[:]\n\n if new_sell_orders == []:\n price_start = self.sell_price_min\n\n logging.warning('no active sell orders')\n\n # set the number of sell orders to execute and check if no more than nb_orders_to_display\n # personal note : recheck the meaning of that condition\n if (self.sell_price_max - self.sell_price_min) / self.increment > self.nb_orders_to_display:\n\n i = int(self.nb_orders_to_display) + 1\n\n else:\n i = int((self.sell_price_max - self.sell_price_min) / self.increment)\n\n log = i, 'sell order to add from : ', price_start, 'to', (price_start + i * self.increment)\n logging.warning(log)\n\n sell_orders_executed = api.set_several_sell_orders(self.currency_pair, price_start, \\\n self.amount, i, self.increment)\n\n self.sell_orders = sell_orders_executed[:]\n\n # When there is orders(s) in new_buy_orders\n if new_buy_orders != []:\n log = 'new_buy_orders : ', new_buy_orders\n logging.info(log)\n # Remove orders with price superior to buy_price_max.\n if new_buy_orders[-1][2] > self.buy_price_max:\n for order in new_buy_orders:\n if order[2] > self.buy_price_max:\n resp = api.cancel_order(self.currency_pair, order[0])\n\n log = 'Buy order removed : ', order\n logging.warning(log)\n\n new_buy_orders.remove(order)\n # Remove orders with price under our target\n # Why not set 'buy_price_min'? for the comparison\n if new_buy_orders[0][2] < self.buy_price_max - self.increment * self.nb_orders_to_display:\n\n price_target = self.buy_price_max - self.increment * self.nb_orders_to_display\n\n for order in new_buy_orders:\n if order[2] < price_target:\n resp = api.cancel_order(self.currency_pair, order[0])\n\n log = 'Buy order removed : ', order\n logging.warning(log)\n\n new_buy_orders.remove(order)\n # If it remain buy(s) order(s)\n if new_buy_orders != []:\n i = 0\n target = len(new_buy_orders)\n # Add a buy order when the price of the first item in new_buy_orders\n # is not good\n # Why not set 'buy_price_min' for the comparison ?\n if new_buy_orders[0][2] != self.buy_price_max - self.increment \\\n * self.nb_orders_to_display:\n order = api.set_buy_order(self.currency_pair, (self.buy_price_max \\\n - self.increment * self.nb_orders_to_display),\n self.amount)\n\n new_buy_orders.insert(0, order)\n\n log = 'Added buy order : ', order\n logging.warning(log)\n\n nb_orders_to_display_tmp = int(self.nb_orders_to_display)\n\n target += 1\n # Browse buy_orders to add or remove orders\n while i < target:\n # Add buy orders when there is no higher buy in buy_orders\n if i + 1 >= len(new_buy_orders):\n order = api.set_buy_order(self.currency_pair, (new_buy_orders[i][2] \\\n + self.increment), self.amount)\n\n new_buy_orders.insert((i + 1), order)\n\n log = 'Added buy order : ', order\n logging.warning(log)\n\n nb_orders_to_display_tmp = int(self.nb_orders_to_display)\n\n if target < nb_orders_to_display_tmp:\n target += 1\n\n i += 1\n # Remove buy order where there is less than increment between them.\n elif new_buy_orders[i + 1][2] - new_buy_orders[i][2] < self.increment:\n resp = api.cancel_order(self.currency_pair, new_buy_orders[i + 1][0])\n\n log = 'Buy order removed : ', order\n logging.warning(log)\n\n new_buy_orders.remove(order)\n\n target -= 1\n # Add buy order when there is more than increment between them.\n elif new_buy_orders[i + 1][2] - new_buy_orders[i][2] > self.increment:\n order = api.set_buy_order(self.currency_pair, (new_buy_orders[i][2] \\\n + self.increment), self.amount)\n\n new_buy_orders.insert((i + 1), order)\n\n log = 'Added buy order : ', order\n logging.warning(log)\n\n nb_orders_to_display_tmp = int(self.nb_orders_to_display)\n\n if target < nb_orders_to_display_tmp:\n target += 1\n\n i += 1\n # Increment ok, next round.\n else:\n i += 1\n\n self.buy_orders = new_buy_orders[:]\n\n # Add buy orders when new_buy_orders is empty\n if new_buy_orders == []:\n price_start = self.buy_price_max\n logging.warning('No active buy orders')\n # set the number of buy orders to execute and check if no more than\n # nb_orders_to_display\n if (self.buy_price_max - self.buy_price_min) / self.increment \\\n > self.nb_orders_to_display:\n\n i = int(self.nb_orders_to_display) + 1\n\n else:\n i = int((self.buy_price_max - self.buy_price_min) / self.increment)\n\n # change: simplifying because i is an integer => Decimal(str(i)) should not be needed\n log = i, 'add buy orders from', price_start, 'to', (price_start + i * self.increment)\n logging.warning(log)\n\n buy_orders_executed = api.set_several_buy_orders(self.currency_pair, price_start, \\\n self.amount, i, self.increment)\n\n self.buy_orders = buy_orders_executed[:]",
"async def tp_current(self, ctx, buys_sells):\n\t\tuser = ctx.message.author\n\t\tcolor = self.getColor(user)\n\t\tstate = buys_sells.lower()\n\t\tscopes = [\"tradingpost\"]\n\t\tendpoint = \"commerce/transactions/current/{0}\".format(state)\n\t\tkeydoc = await self.fetch_key(user)\n\t\tif state == \"buys\" or state == \"sells\":\n\t\t\ttry:\n\t\t\t\tawait self._check_scopes_(user, scopes)\n\t\t\t\tkey = keydoc[\"key\"]\n\t\t\t\theaders = self.construct_headers(key)\n\t\t\t\taccountname = keydoc[\"account_name\"]\n\t\t\t\tresults = await self.call_api(endpoint, headers)\n\t\t\texcept APIKeyError as e:\n\t\t\t\tawait self.bot.say(e)\n\t\t\t\treturn\n\t\t\texcept APIError as e:\n\t\t\t\tawait self.bot.say(\"{0.mention}, API has responded with the following error: \"\n\t\t\t\t\t\t\t\t \"`{1}`\".format(user, e))\n\t\t\t\treturn\n\t\telse:\n\t\t\tawait self.bot.say(\"{0.mention}, Please us either 'sells' or 'buys' as parameter\".format(user))\n\t\t\treturn\n\t\tdata = discord.Embed(description='Current ' + state, colour=color)\n\t\tdata.set_author(name='Transaction overview of {0}'.format(accountname))\n\t\tdata.set_thumbnail(\n\t\t\turl=\"https://wiki.guildwars2.com/images/thumb/d/df/Black-Lion-Logo.png/300px-Black-Lion-Logo.png\")\n\t\tdata.set_footer(text=\"Black Lion Trading Company\")\n\t\tresults = results[:20] # Only display 20 most recent transactions\n\t\titem_id = \"\"\n\t\tdup_item = {}\n\t\titemlist = []\n\t\t# Collect listed items\n\t\tfor result in results:\n\t\t\titemdoc = await self.fetch_item(result[\"item_id\"])\n\t\t\titemlist.append(itemdoc)\n\t\t\titem_id += str(result[\"item_id\"]) + \",\"\n\t\t\tif result[\"item_id\"] not in dup_item:\n\t\t\t\tdup_item[result[\"item_id\"]] = len(dup_item)\n\t\t# Get information about all items, doesn't matter if string ends with ,\n\t\tendpoint_items = \"items?ids={0}\".format(str(item_id))\n\t\tendpoint_listing = \"commerce/listings?ids={0}\".format(str(item_id))\n\t\t# Call API once for all items\n\t\ttry:\n\t\t\tlistings = await self.call_api(endpoint_listing)\n\t\texcept APIError as e:\n\t\t\tawait self.bot.say(\"{0.mention}, API has responded with the following error: \"\n\t\t\t\t\t\t\t \"`{1}`\".format(user, e))\n\t\t\treturn\n\t\tfor result in results:\n\t\t\t# Store data about transaction\n\t\t\tindex = dup_item[result[\"item_id\"]]\n\t\t\tquantity = result[\"quantity\"]\n\t\t\tprice = result[\"price\"]\n\t\t\titem_name = itemlist[index][\"name\"]\n\t\t\toffers = listings[index][state]\n\t\t\tmax_price = offers[0][\"unit_price\"]\n\t\t\tdata.add_field(name=item_name, value=str(quantity) + \" x \" + self.gold_to_coins(price)\n\t\t\t\t\t\t + \" | Max. offer: \" + self.gold_to_coins(max_price), inline=False)\n\t\ttry:\n\t\t\tawait self.bot.say(embed=data)\n\t\texcept discord.HTTPException:\n\t\t\tawait self.bot.say(\"Need permission to embed links\")",
"def get_sales(start_date: datetime.datetime, end_date: datetime.datetime, seller_skus: set) -> List:\n\n print(\"getting sales data...\")\n interval = create_date_interval(start_date, end_date)\n\n return _get_sales(interval, Granularity.HOUR, seller_skus)",
"def test_multiple_purchases_update_product_price(self):\n\n # Generate timestamps for correct timing of purchases and updates\n t1 = datetime.datetime.now() - datetime.timedelta(seconds=30)\n t2 = datetime.datetime.now() - datetime.timedelta(seconds=25)\n t3 = datetime.datetime.now() - datetime.timedelta(seconds=20)\n t4 = datetime.datetime.now() - datetime.timedelta(seconds=15)\n t5 = datetime.datetime.now() - datetime.timedelta(seconds=10)\n t6 = datetime.datetime.now() - datetime.timedelta(seconds=5)\n # Update product price\n pp = ProductPrice(product_id=1, price=300, admin_id=1, timestamp=t1)\n db.session.add(pp)\n db.session.commit()\n # Get the first product price\n product = Product.query.filter_by(id=1).first()\n pr_1 = copy(product.price)\n # Do first purchase\n purchase = Purchase(user_id=1, product_id=1, amount=1, timestamp=t2)\n db.session.add(purchase)\n db.session.commit()\n # Update product price\n pp = ProductPrice(product_id=1, price=100, admin_id=1, timestamp=t3)\n db.session.add(pp)\n db.session.commit()\n # Get the second product price\n product = Product.query.filter_by(id=1).first()\n pr_2 = copy(product.price)\n # Do second purchase\n purchase = Purchase(user_id=1, product_id=1, amount=1, timestamp=t4)\n db.session.add(purchase)\n # Update product price\n pp = ProductPrice(product_id=1, price=600, admin_id=1, timestamp=t5)\n db.session.add(pp)\n db.session.commit()\n # Get the third product price\n product = Product.query.filter_by(id=1).first()\n pr_3 = copy(product.price)\n # Do third purchase\n purchase = Purchase(user_id=1, product_id=1, amount=1, timestamp=t6)\n db.session.add(purchase)\n db.session.commit()\n\n # Check the product prices\n self.assertEqual(pr_1, 300)\n self.assertEqual(pr_2, 100)\n self.assertEqual(pr_3, 600)\n\n # Check user credit\n user = User.query.filter_by(id=1).first()\n self.assertEqual(len(user.purchases.all()), 3)\n self.assertEqual(user.credit, -(pr_1 + pr_2 + pr_3))\n\n # Check purchase prices\n purchases = Purchase.query.all()\n self.assertEqual(purchases[0].price, 300)\n self.assertEqual(purchases[1].price, 100)\n self.assertEqual(purchases[2].price, 600)",
"def req_new(self, startdate_, enddate_):\r\n request_ = requests.get('https://finnhub.io/api/v1/company-news?symbol=' + self.ticker_request + '&from=' +\r\n startdate_ + '&to=' + enddate_ + '&token=' + self.finhub_key)\r\n self.js_data += request_.json()\r\n for indice in range(0, len(self.js_data)):\r\n self.summary = self.summary + self.js_data[indice]['summary']\r\n self.scores.append(self.weighted_sentiment_score(self.js_data[indice]['summary'], 1))\r\n epoch_time = self.js_data[indice]['datetime']\r\n time_formatted = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(epoch_time))\r\n self.time.append(time_formatted)",
"def get_user_product_sales(self, current_user):\n try:\n conn = open_connection()\n cur = conn.cursor()\n cur.execute(\"SELECT products.product_id, products.product_name,\\\n products.product_model, sales.quantity, sales.total_price FROM \\\n products INNER JOIN sales ON products.product_id = \\\n sales.product_id WHERE sales.created_by = %s\", (current_user,))\n product_sales = cur.fetchall()\n close_connection(conn)\n return product_sales\n except Exception as e:\n print(e)",
"def store_results(transactions):\r\n\r\n server='LAPTOP-N3JOPONO'\r\n database='TD_Ameritrade'\r\n data_connection=pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};\\\r\n SERVER=' + server + ';\\\r\n DATABASE=' + database + ';\\\r\n Trusted_Connection=yes;')\r\n\r\n data_cursor=data_connection.cursor()\r\n\r\n symbol = transactions['Stock Symbol']\r\n #Add buy history to SQL table\r\n for (orderID,price,quantity,placed_time,filled_time) in transactions['Buy History']:\r\n\r\n #Insert query to insert new data into Buy_Orders table\r\n insert_query_buy = '''INSERT INTO Buy_Orders(Buy_Order_ID,Stock_Ticker,Price,Quantity,Time_Placed,Time_Filled)\r\n VALUES(?,?,?,?,?,?);'''\r\n\r\n #Information on buy transactions\r\n values_buy=(orderID,symbol,price,quantity,placed_time,filled_time)\r\n data_cursor.execute(insert_query_buy,values_buy)\r\n\r\n #Add sell history to SQL Table\r\n for (orderID,price,quantity,placed_time,filled_time,parentID) in transactions['Sell History']:\r\n\r\n #Insert query to insert new data into Sell_Orders table\r\n insert_query_sell = '''INSERT INTO Sell_Orders(Sell_Order_ID,Stock_Ticker,Price,Quantity,Time_Placed,Time_Filled,Buy_Order_ID_link)\r\n VALUES(?,?,?,?,?,?,?);'''\r\n\r\n #Information on sell transactions\r\n values_sell=(orderID,symbol,price,quantity,placed_time,filled_time,parentID)\r\n data_cursor.execute(insert_query_sell,values_sell)\r\n\r\n #Add current open sell orders to SQL Table\r\n for (orderID,price,parentID) in transactions['Limit Sells']:\r\n\r\n #Insert query to insert new data into Open_Sell_Orders table\r\n insert_query_sell_open = '''INSERT INTO Open_Sell_Orders(Sell_Order_ID,Stock_Ticker,Price,Date,Buy_Order_ID_link)\r\n VALUES(?,?,?,?,?);'''\r\n\r\n #Information on sell transactions\r\n values_sell_open=(orderID,symbol,price,datetime.datetime.now().date(),parentID)\r\n data_cursor.execute(insert_query_sell_open,values_sell_open)\r\n\r\n\r\n data_connection.commit()\r\n data_cursor.close()\r\n data_connection.close()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks if the info we're getting from the page is being scraped with the right format | def test_page_info_format(self):
url = self.refs_list[random.randint(0, len(self.refs_list)-1)]
test_page = self.scraper.get_url_page(url) # exchange this for a copy of an html file
url_info = self.scraper.get_info_itemprop('a', 'url', test_page)
self.assertTrue(re.search(r'^http://www.', url_info) or url_info == "Not found")
email_info = self.scraper.get_info_itemprop('a', 'email', test_page)
self.assertTrue(re.search(r'^\S+@\S+', email_info) or email_info == "Not found") | [
"def _good_response(self, resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)",
"def check_url_format(self):\n m = re.match('^https://www.att.com/.*\\.html\\??(#sku=sku\\d+)?$', self.product_page_url)\n return not not m",
"def test_good_page_url():\n page_html = site_parser._get_page_html(\n \"https://www.smashingmagazine.com/category/wallpapers/\",\n )\n assert type(page_html) == BeautifulSoup",
"def _is_page_valid(self):\n try:\n error_box = self.driver.find_element_by_class_name(\"block\")\n if error_box.text == \"We are sorry but we could not find the property you have requested.\":\n self.logger.error(\"{} not a valid page\".format(self.url))\n return False\n else:\n self.logger.info(\"{} exists\".format(self.url))\n return True\n except:\n self.logger.info(\"{} exists\".format(self.url))\n return True",
"def check_url_format(self):\r\n #m = re.match(\"^http://www.amazon.com/dp/[a-zA-Z0-9]+$\", self.product_page_url)\r\n m = re.match(r\"^http://www.statelinetack.com/.*?$\", self.product_page_url)\r\n return not not m",
"def test_html_is_not_valid(self):\n url = \"\"\n single_date = date(2019, 3, 4)\n\n coins = {}\n with patch.object(\n BCRASMLScraper,\n 'fetch_content',\n return_value=' '\n ):\n scraper = BCRASMLScraper(url, coins, intermediate_panel_path=None, use_intermediate_panel=False)\n content = scraper.fetch_content(single_date)\n\n soup = BeautifulSoup(content, \"html.parser\")\n\n table = soup.find('table')\n head = table.find('thead') if table else None\n body = table.find('tbody') if table else None\n\n assert table is None\n assert head is None\n assert body is None",
"def is_scraped(self) -> bool:\n title_set: bool = self.title not in ['', None]\n body_set: bool = self.body not in ['', None]\n return title_set and body_set",
"def __parse_stats_page(self, html, year, url):\n return None",
"def test__parse_page_info(page_info_data):\n return parse_page_info(page_info_data)",
"def test_html_dont_have_other_user_visitors(self):\n contents = ['21/02/2018', '18:00 h', 'Visita de outro morador']\n for content in contents:\n with self.subTest():\n self.assertNotContains(self.resp, content)",
"async def page_has_scrape(vendor,page_URL):\n try:\n content = await request_lib.GET_request_async(vendor,page_URL)\n\n if(content != None):\n soup = BeautifulSoup(content, \"html.parser\")\n website = scrape_elements.websites[vendor]\n\n if website[\"product-scope\"][\"name\"]:\n regex_class_name = re.compile(website[\"product-scope\"][\"name\"])\n else:\n regex_class_name = ''\n \n productElements = soup.find_all(website[\"product-scope\"][\"element\"], class_= regex_class_name )\n \n if (productElements):\n #print(\" 000> Page is scrapable.\")\n return True\n else:\n #print(\" +++> Page is not scrapable because there is no product scope in its dom elements.\")\n #print(\"Product scope can be find at corresponding vendor's scrape_elements.py mapping. \")\n return False\n else:\n #print(\" +++> Page is not scrapable because there is no content !\")\n return False\n except Exception as e:\n print(\"\\n0000 ERROR IN page_has_scrape 000 \\nMESSAGE : \"+ str(e))\n return False",
"def test_scrape_stock_specific_info():\n\n stock = \"MSFT\"\n\n info = scraper.scrape_stock_info(stock)\n\n assert info is not None\n assert info[\"sector\"] == \"Technology\"\n assert info[\"country\"] == \"United States\"\n assert info[\"city\"] == \"Redmond\"",
"def test_beautiful_soup_can_parse_html_from_returned_content(self):\n soup = self.soupify(self.response)\n self.assertIsNotNone(soup)",
"def does_page_exist(self, resp_json):\n if resp_json is not None:\n if 'results' in resp_json:\n if len(resp_json['results']) > 0:\n if 'type' in resp_json['results'][0]:\n return True\n elif 'by' in resp_json['results'][0]:\n return True\n elif 'type' in resp_json:\n if resp_json['type'] == 'page':\n return True\n elif resp_json['type'] == 'url':\n return True\n\n return False",
"def check_url_format(self):\n if re.match('^https?://www.walgreens.com/store/c/.+/ID=prod\\d+-product$', self.product_page_url):\n return True\n return False",
"def test_html_has_user_future_visitors_planned(self):\n contents = ['20/02/2018', '17:00 h', 'Fogás entrega de gás']\n for content in contents:\n with self.subTest():\n self.assertContains(self.resp, content)",
"def valid_rinfo(rinfo):\n if '\\\\n' in rinfo:\n return False\n elif '76,78,81,83,85,88,90,92,94,96grid_definition' in rinfo:\n return False\n elif 'startDate' in rinfo:\n return False\n else:\n return True",
"def isWorldUp() :\n parser = WorldParser()\n with urlopen(Configs['worldstatusURL']) as response :\n html = str(response.read()) #TODO c'est pas propre il vaudrait mieux faire un decode et retirer le littleStrip\n try :\n parser.feed(html)\n except Exception :\n pass\n return parser.state;",
"def has_page_error():\n elements = find_elements_by_css(\"p[class='error-description']\")\n return len(elements) > 0"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
get spec_sdr and spec_sensor info | def get_spec_data(self):
print "Start get spec threshold data..."
# get spec threshold data
spec_threshold_sensor, spec_threshold_sdr = self.get_spec_threshold_discrete_data("Threshold Sensors",
conf.Start_SpecFile,
conf.End_SpecFile)
# get spec discrete data
spec_discrete_sensor, spec_discrete_sdr = self.get_spec_threshold_discrete_data("Discrete Sensors",
conf.Start_Discrete_SpecFile,
conf.End_Discrete_SpecFile)
return spec_threshold_sensor, spec_threshold_sdr, spec_discrete_sdr | [
"def sdrandsensor_compare(spec_sdr, ipmi_sdr, spec_sensor, ipmi_sensor):\n if spec_sensor and ipmi_sensor:\n if len(ipmi_sdr) != len(ipmi_sensor):\n print \"get ipmi sdr/sensor data info error\"\n logger.error(\"get ipmi data error\")\n return 0\n print \"Start compare spec/ipmi sdr/sensor data...\"\n sensor_index = 0\n # read ipmi list\n for ipmi_fru_each in ipmi_sdr:\n sensor_name = ipmi_fru_each[0]\n spec_fru_each = []\n spec_threshold_each = []\n # use sensor name get sdr values in spec\n try:\n spec_fru_each = spec_sdr[sensor_name]\n if spec_sensor and ipmi_sensor:\n spec_threshold_each = spec_sensor[sensor_name]\n except KeyError as e:\n try:\n # judge sensor name whether only because capitalized not same (all upper or all lower in spec)\n if spec_sdr[sensor_name.upper()] or spec_sdr[sensor_name.lower()]:\n logger.warning(sensor_name + \"'s field ( Sensor name ): warning.\")\n fru_compare(ipmi_fru_each, spec_fru_each, sensor_name)\n if spec_sensor and ipmi_sensor:\n threshold_compare(ipmi_sensor[sensor_index], spec_threshold_each, sensor_name)\n sensor_index += 1\n except KeyError:\n logger.error(\"spec don't have \" + sensor_name + \" sensor_name. \\n\\\n sensor number/entity id/sensor type/threshold not compare.\\n\")\n continue\n else:\n logger.info(sensor_name + \"'s field ( Sensor name ): pass.\")\n\n # fru compare\n fru_compare(ipmi_fru_each, spec_fru_each, sensor_name)\n\n if spec_sensor and ipmi_sensor:\n # compare threshold\n threshold_compare(ipmi_sensor[sensor_index], spec_threshold_each, sensor_name)\n sensor_index += 1",
"def get_sdr_info():\n\n status, ret_values = \\\n grk.run_key_u(\"Run IPMI Standard Command sdr info\")\n result = vf.key_value_outbuf_to_dict(ret_values, process_indent=1)\n\n return result",
"def device_info(self) -> Dict[str, Any]:\n return {\n \"identifiers\": {(LITTERROBOT_DOMAIN, self.robot.serial)},\n \"name\": self.robot.name,\n \"manufacturer\": \"Litter-Robot\",\n \"model\": \"Litter-Robot 3 Connect\"\n if self.robot.serial.startswith(\"LR3C\")\n else \"unknown\",\n }",
"def cris_sensor_info(EngPktFile=None):\n para = dict(normBins= [717, 437, 163], \\\n normRes = [0.625, 1.25, 2.5], \\\n wvLow = [650.0, 1210.0, 2155.0], \\\n wvHigh = [1095.0, 1750.0, 2550.0], \\\n fullBins= [717, 869, 637], \\\n fullRes = [0.625, 0.625, 0.625]) \n \n wvNorm = []\n wvFull = []\n wvNormReal = []\n wvFullReal = []\n \n ## produce wavenumber for CrIS spectra \n for i in np.arange(0,3): \n wv=np.linspace(para['wvLow'][i], para['wvHigh'][i], num=para['normBins'][i]-4)\n wvNorm.append(wv)\n\n wv=np.linspace(para['wvLow'][i], para['wvHigh'][i], num=para['fullBins'][i]-4)\n wvFull.append(wv)\n\n wv=np.linspace(para['wvLow'][i]-2*para['normRes'][i], \\\n para['wvHigh'][i]+2*para['normRes'][i], \\\n num=para['normBins'][i])\n wvNormReal.append(wv)\n \n wv=np.linspace(para['wvLow'][i]-2*para['normRes'][i], \\\n para['wvHigh'][i]+2*para['normRes'][i], \\\n num=para['normBins'][i])\n wvFullReal.append(wv)\n\n \n para['wvNorm'] = wvNorm\n para['wvFull'] = wvFull\n para['wvNormReal'] = wvNormReal\n para['wvFullReal'] = wvFullReal\n \n \n if EngPktFile is None: EngPktFile = './EngPkt/JPSS1_side1_V115_EngPkt.xml'\n \n if isinstance(EngPktFile, str): \n \n with open(EngPktFile) as f: \n xml = f.read()\n \n x = xmltodict.parse(xml)\n \n InstrumentId = int(x['EngPkt']['InstrumentId'])\n PktVersion = int(x['EngPkt']['PktVersion'])\n \n lw_crosstrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Lw']['CrosstrackOffsetAngle'].split(), dtype=np.float64)\n lw_intrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Lw']['IntrackOffsetAngle'].split(), dtype=np.float64)\n lw_losRelativeYaw = float(x['EngPkt']['FovParam']['Lw']['LosRelativeYaw'])\n lw_losRelativePitch = float(x['EngPkt']['FovParam']['Lw']['LosRelativePitch'])\n lw_fovSize = np.asarray(x['EngPkt']['FovParam']['Lw']['Size'].split(), dtype=np.float64)\n\n mw_crosstrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Mw']['CrosstrackOffsetAngle'].split(), dtype=np.float64)\n mw_intrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Mw']['IntrackOffsetAngle'].split(), dtype=np.float64)\n mw_losRelativeYaw = float(x['EngPkt']['FovParam']['Mw']['LosRelativeYaw'])\n mw_losRelativePitch = float(x['EngPkt']['FovParam']['Mw']['LosRelativePitch'])\n mw_fovSize = np.asarray(x['EngPkt']['FovParam']['Mw']['Size'].split(), dtype=np.float64)\n \n sw_crosstrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Sw']['CrosstrackOffsetAngle'].split(), dtype=np.float64)\n sw_intrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Sw']['IntrackOffsetAngle'].split(), dtype=np.float64)\n sw_losRelativeYaw = float(x['EngPkt']['FovParam']['Sw']['LosRelativeYaw'])\n sw_losRelativePitch = float(x['EngPkt']['FovParam']['Sw']['LosRelativePitch'])\n sw_fovSize = np.asarray(x['EngPkt']['FovParam']['Sw']['Size'].split(), dtype=np.float64)\n \n actualCrosstrackAngle = np.asarray(x['EngPkt']['MappingParameters']['ActualCrosstrackAngleRoll'].split(), dtype=np.float64)\n actualIntrackAngle = np.asarray(x['EngPkt']['MappingParameters']['ActualIntrackAnglePitch'].split(), dtype=np.float64)\n \n SsmrToSsmf = x['EngPkt']['MappingParameters']['SsmrToSsmf']\n SSMRtoSSMF_roll, SSMRtoSSMF_pitch, SSMRtoSSMF_yaw = [float(v) for k, v in SsmrToSsmf.items()]\n \n IarToSsmr = x['EngPkt']['MappingParameters']['IarToSsmr']\n IARtoSSMR_roll , IARtoSSMR_pitch, IARtoSSMR_yaw = [float(v) for k, v in IarToSsmr.items()]\n \n IfrBoresightToSsmf = x['EngPkt']['MappingParameters']['IfrBoresightToSsmf']\n IFRboresighttoSSMF_yaw, IFRboresighttoSSMF_pitch = [float(v) for k, v in IfrBoresightToSsmf.items()]\n \n SbfToIar = x['EngPkt']['MappingParameters']['SbfToIar']\n SBFtoIAR_roll, SBFtoIAR_pitch, SBFtoIAR_yaw = [float(v) for k, v in SbfToIar.items()]\n \n ### millisecond == > microsecond \n TimeStampBias = int(x['EngPkt']['MappingParameters']['TimeStampBias'])*1000\n \n \n # PCT mounting matrix\n ### NPP Case \n if InstrumentId == 1: SCtoSBF_roll, SCtoSBF_pitch, SCtoSBF_yaw = [-518.45683, -77.760702, 46.109524]\n if InstrumentId == 4: SCtoSBF_roll, SCtoSBF_pitch, SCtoSBF_yaw = [ -145.84994, 267.42417, 594.61832]\n ### J1\n \n \n # putting into dictionary\n para['InstrumentId'] = InstrumentId\n para['PktVersion'] = PktVersion \n \n para['lw_crosstrackOffsetAngle'] = lw_crosstrackOffsetAngle\n para['mw_crosstrackOffsetAngle'] = mw_crosstrackOffsetAngle\n para['sw_crosstrackOffsetAngle'] = sw_crosstrackOffsetAngle\n \n para['lw_intrackOffsetAngle'] = lw_intrackOffsetAngle\n para['mw_intrackOffsetAngle'] = mw_intrackOffsetAngle\n para['sw_intrackOffsetAngle'] = sw_intrackOffsetAngle\n \n para['lw_losRelativeYaw'] = lw_losRelativeYaw\n para['mw_losRelativeYaw'] = mw_losRelativeYaw\n para['sw_losRelativeYaw'] = sw_losRelativeYaw\n \n para['lw_losRelativePitch'] = lw_losRelativePitch\n para['mw_losRelativePitch'] = mw_losRelativePitch\n para['sw_losRelativePitch'] = sw_losRelativePitch\n \n para['lw_fovSize'] = lw_fovSize\n para['mw_fovSize'] = mw_fovSize\n para['sw_fovSize'] = sw_fovSize\n \n para['actualCrosstrackAngle'] = actualCrosstrackAngle\n para['actualIntrackAngle'] = actualIntrackAngle\n \n para['SSMRtoSSMF_roll'] = SSMRtoSSMF_roll\n para['SSMRtoSSMF_pitch'] = SSMRtoSSMF_pitch\n para['SSMRtoSSMF_yaw'] = SSMRtoSSMF_yaw\n \n para['IARtoSSMR_roll'] = IARtoSSMR_roll\n para['IARtoSSMR_pitch'] = IARtoSSMR_pitch\n para['IARtoSSMR_yaw'] = IARtoSSMR_yaw\n \n para['IFRboresighttoSSMF_yaw'] = IFRboresighttoSSMF_yaw\n para['IFRboresighttoSSMF_pitch'] = IFRboresighttoSSMF_pitch\n \n para['SBFtoIAR_roll'] = SBFtoIAR_roll\n para['SBFtoIAR_pitch'] = SBFtoIAR_pitch\n para['SBFtoIAR_yaw'] = SBFtoIAR_yaw\n \n para['SCtoSBF_roll'] = SCtoSBF_roll\n para['SCtoSBF_pitch'] = SCtoSBF_pitch\n para['SCtoSBF_yaw'] = SCtoSBF_yaw\n \n para['TimeStampBias'] = TimeStampBias\n \n return para",
"def get_sensor_spec(sensor_id: int) -> Tuple[Functionalisations_t, WorkingChannels_t]:\n functionalisations: np.ndarray = np.array([])\n failures: np.ndarray = np.array([])\n\n if sensor_id == 4:\n functionalisations = np.array(\n [2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4,\n 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2]\n )\n failures = np.array(\n [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n )\n elif sensor_id == 5:\n functionalisations = load_sensor_preset('LasVegas.preset')\n # Channel 15, 16 & 23 disabled as it gives huge numbers (but it kinda works..?)\n failures_huge = [15, 16, 23]\n # Channel 22, 31 are shorts and always stuck to the lower bound (347.9)\n failures_shorts = [22, 31]\n # Channels are IN SOME MEASUREMENTS stuck to the lower bound\n failures_mid_low = [3, 4, 22, 25, 26, 27, 28, 29, 31, 35, 36, 38, 39, 60]\n # More channels that are stuck somewhere\n failures_more = [2, 3, 4, 5, 22, 25, 26, 27, 28, 29, 31, 35, 36, 38, 39, 56, 59, 60, 61]\n failures_too_many = [0, 1, 2, 3, 4, 5, 6, 7,\n 22, 24, 25, 26, 27, 28, 29, 30, 31, 35, 36, 37, 38, 39, 56, 58, 59, 60, 61, 62]\n '''failures = np.array(\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1,\n 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n )'''\n failures = np.zeros(64, bool)\n #failures[failures_huge] = True\n failures[failures_shorts] = True\n #failures[failures_mid_low] = True\n #failures[failures_more] = True\n else:\n print('Unknown Sensor ID %i! No functionalisation and channel failure data available' % sensor_id)\n correct_channels = np.invert(np.array(failures).astype(bool))\n print('using sensor %i specification' % sensor_id)\n\n return functionalisations, correct_channels",
"def gather_chassis_details(self):",
"def get_sensor_descriptions(self):\n return ()",
"def getsysinfo(self):\n\t\tst=self._req_rdsingle(1,1,0x18)\n\t\tif st[\"len\"]==0x12:\n\t\t\tself.sysinfo=dict(zip(['addinfo','maxaxis','cnctype','mttype','series','version','axes'],\n\t\t\tunpack(\">HH2s2s4s4s2s\",st[\"data\"])))",
"def sense_device():\n\n title = 'SENSE.DEVICE'\n\n content = {\n\n title: {\n\n 'device_name': '',\n 'device_local_ip': '',\n 'has_cam': 'False'\n\n },\n }\n\n return content",
"def get_terror_waves_info(self):",
"def get_info(self):\r\n print(\"Getting info\")\r\n info = \"Controller:\\n%s\\n\" % self.controller.GetDeviceInfo().BuildDeviceDescription()\r\n sortedMap = sorted(self.axis_chan_mapping.items(), key=operator.itemgetter(1))\r\n for axis, channelno in sortedMap:\r\n channel = self.__get_chan(axis)\r\n chaninfo = channel.GetDeviceInfo().BuildDeviceDescription()\r\n piezoConfig = channel.GetPiezoConfiguration(self.deviceID)\r\n curDevSet = channel.PiezoDeviceSettings\r\n piezoInfo = \"Piezo Configuration Name: %s, Piezo Max Voltage: %s\" % (\r\n piezoConfig.DeviceSettingsName,\r\n curDevSet.OutputVoltageRange.MaxOutputVoltage.ToString())\r\n info += \"Channel %d (%s axis):\\n%s%s\\n\\n\" % (channelno,\r\n axis,\r\n chaninfo,\r\n piezoInfo)\r\n info += \"\\n\"\r\n return info",
"async def test_several_sensors(hass: HomeAssistant, rfxtrx) -> None:\n entry_data = create_rfx_test_cfg(\n devices={\n \"0a52080705020095220269\": {},\n \"0a520802060100ff0e0269\": {},\n }\n )\n mock_entry = MockConfigEntry(domain=\"rfxtrx\", unique_id=DOMAIN, data=entry_data)\n\n mock_entry.add_to_hass(hass)\n\n await hass.config_entries.async_setup(mock_entry.entry_id)\n await hass.async_block_till_done()\n await hass.async_start()\n\n state = hass.states.get(\"sensor.wt260_wt260h_wt440h_wt450_wt450h_05_02_temperature\")\n assert state\n assert state.state == \"unknown\"\n assert (\n state.attributes.get(\"friendly_name\")\n == \"WT260,WT260H,WT440H,WT450,WT450H 05:02 Temperature\"\n )\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UnitOfTemperature.CELSIUS\n\n state = hass.states.get(\"sensor.wt260_wt260h_wt440h_wt450_wt450h_06_01_temperature\")\n assert state\n assert state.state == \"unknown\"\n assert (\n state.attributes.get(\"friendly_name\")\n == \"WT260,WT260H,WT440H,WT450,WT450H 06:01 Temperature\"\n )\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UnitOfTemperature.CELSIUS\n\n state = hass.states.get(\"sensor.wt260_wt260h_wt440h_wt450_wt450h_06_01_humidity\")\n assert state\n assert state.state == \"unknown\"\n assert (\n state.attributes.get(\"friendly_name\")\n == \"WT260,WT260H,WT440H,WT450,WT450H 06:01 Humidity\"\n )\n assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE",
"def get_device_info(self, node_id):\n\n values = {}\n # dummy data\n values[\"module_id\"] = 0\n values[\"os_version\"] = \"Unknown\"\n values[\"tr_mcu_type\"] = \"Unknown\"\n values[\"os_build\"] = 0\n values[\"rssi\"] = \"-130 dBm\"\n values[\"supply_voltage\"] = \"0 V\"\n values[\"flags\"] = 0\n values[\"online\"] = False\n values[\"rssi_numerical\"] = -131\n values[\"supply_voltage_numerical\"] = 0\n\n bstr = self.cmd_byte_string(node_id, 0x02, 0x00)\n ret = self.dpa_request(bstr)\n if ret['status'] != 'STATUS_NO_ERROR':\n return values\n\n ret = self.dpabytes2bytes(ret['response'])[8:]\n\n # get module id\n values[\"module_id\"] = ret[3] << 24 | ret[2] << 16 | ret[1] << 8 | ret[0]\n\n # get os version\n if ret[5] & 0x7 == 4:\n postfix = \"D\"\n else:\n postfix = \"?\"\n values[\"os_version\"] = \"%d.%02d%s\" % (ret[4] >> 4, ret[4] & 0xf, postfix)\n\n # get TR and MCU type\n if values[\"module_id\"] & 0x80000000 > 0:\n dctr = \"DCTR\"\n else:\n dctr = \"TR\"\n\n val = (ret[5] >> 4) & 0x7\n tr = \"-?%d\" % val\n if val == 0:\n tr = \"-52D\"\n if val == 1:\n tr = \"58D-RJ\"\n if val == 2:\n tr = \"-72D\"\n if val == 3:\n tr = \"-53D\"\n if val == 8:\n tr = \"-54D\"\n if val == 9:\n tr = \"-55D\"\n if val == 10:\n tr = \"-56D\"\n if val == 11:\n tr = \"-76D\"\n\n val = (ret[5] >> 3) & 0x1\n fcc = \"FCC not certified\"\n if val == 1:\n fcc = \"FCC certified\"\n\n val = ret[5] & 0x7\n mcu = \"unknown MCU\"\n if val == 4:\n mcu = \"PIC16F1938\"\n\n values['tr_mcu_type'] = \"%s%s, %s, %s\" % (dctr, tr, fcc, mcu)\n\n # get OS build\n values[\"os_build\"] = ret[7] << 8 | ret[6]\n\n # get RSSI\n val = ret[8] - 130\n values[\"rssi_numerical\"] = val\n values[\"rssi\"] = \"%d dBm\" % (val)\n\n # get supply voltage\n val = 261.12 / (127.0 - ret[9])\n values[\"supply_voltage\"] = \"%f V\" % (val)\n values[\"supply_voltage_numerical\"] = val\n\n values[\"flags\"] = ret[10]\n values[\"online\"] = True\n\n return values",
"def drive_info(drive):\n sysfs_dir = '/sys/block/' + drive\n if not os.path.isdir(sysfs_dir):\n sys.stderr.write(\"Error: '\" + sysfs_dir + \"' does not exist.\\n\")\n return None\n\n command=['/sbin/udevadm','info','-q','path','-p',sysfs_dir]\n p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=None)\n dir = p.communicate(None)[0].rstrip()\n sas_device_dir = '/sys' + dir + '/../../../../sas_device'\n end_device_dir = sas_device_dir + '/' + os.listdir(sas_device_dir)[0]\n ret = []\n with open(end_device_dir + '/enclosure_identifier') as f:\n ret.append(f.read().rstrip())\n with open(end_device_dir + '/bay_identifier') as f:\n ret.append(f.read().rstrip())\n \n print ret\n return ret",
"def _read_sensor(self):\n pass",
"def get_diskinfo(self):\n (self.__serial, self.__fw) = get_disk_info('/dev/%s' % self.devname)[0].split(' ')",
"def deviceInfo(self):\n getusbs = usb.core.find(find_all=True)\n devices = dict(enumerate(str(dev.manufacturer) + \":\" + str(dev.idProduct) + \":\" + str(dev.idVendor) for dev in getusbs))\n for key, value in devices.items():\n print(key, \":\", value)\n hook = input(\"---> Select a device: \")\n idProd, idVen = devices[int(hook)].split(':')[1:]\n device = usb.core.find(idVendor=int(idVen), idProduct=int(idProd))\n print(device)",
"def get_details(self) -> None:\n body = helpers.req_body(self.manager, 'devicedetail')\n body['uuid'] = self.uuid\n r, _ = helpers.call_api(\n '/SmartBulb/v1/device/devicedetail',\n 'post',\n headers=helpers.req_headers(self.manager),\n json_object=body,\n )\n if helpers.code_check(r):\n self.connection_status = r.get('connectionStatus')\n self.device_status = r.get('deviceStatus')\n if self.dimmable_feature:\n self._brightness = int(r.get('brightNess'))\n else:\n logger.debug('Error getting %s details', self.device_name)",
"def test_sensor_configs():\n\n s = DigitalSensorConfigs(bcm_pin=17, header='random')\n assert s == DigitalSensorConfigs(**s.dict())\n\n s = RandomSensorConfigs(header='random')\n assert s == RandomSensorConfigs(**s.dict())\n\n s = AirSensorConfigs(i2c_address=0x12)\n assert s == AirSensorConfigs(**s.dict())"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks, if this container is set to the compact view mode. | def is_compact (self):
if self.container:
return self.container.is_compact ( )
else:
return False | [
"def is_compact (self):\n return self.grid.is_compact ( )",
"def isCompact(self) -> \"SbBool\":\n return _coin.SoOutput_isCompact(self)",
"def __nonzero__(self): # pragma: no cover\n return self._panels is not None",
"def is_display_active(self):\n return self.op is not None",
"def is_short_view_applied_in_folder_content(self):\n return is_element_present(self._driver, *self.short_view_locator_template)",
"def __bool__(self): # pragma: no cover\n return self._panels is not None",
"def IsAutoLayout(self) -> bool:",
"def _get_isFullScreen(self) -> \"bool\" :\n return _core.Viewport__get_isFullScreen(self)",
"def is_fullscreen(self) -> bool:\n return self._props['fullscreen']",
"def IsModelLayout(self) -> bool:",
"def has_solidify(obj):\n if not hasattr(obj, \"modifiers\"):\n return False\n for mod in obj.modifiers:\n if mod.type == 'SOLIDIFY' and mod.show_viewport:\n return True\n return False",
"def isBoxMode(self):\n\t\treturn False",
"def set_compact2D(value = True):\n global _compact2D\n _compact2D = value",
"def is_view_3d(self):\n ret_val = self._is_view_3d()\n return ret_val",
"def is_docked(self):\n if self.docking_status != Ship.DockingStatus.UNDOCKED:\n return True\n else:\n return False",
"def dimensionless(self):\n tmp = copy.copy(self).convert_to_reference()\n\n return not bool(tmp.dimensionality)",
"def isShown(self):\n \n parent=self.parent\n if parent is not None:\n if parent.expanded == False: # collapsed\n return False\n else:\n return self.parent.isShown()\n else:\n # root node is always shown\n return True",
"def is_cusp_collapsed(self, cusp):\n return self.is_cusp_collapsed(cusp)",
"def hasRenderSettingsCollectionInstance(self):\n \n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks, if this container is set to the full span view mode. | def is_full_span (self):
return self.container.is_full_span ( ) | [
"def is_full_span (self):\n return self.grid.is_full_span ( )",
"def _get_isFullScreen(self) -> \"bool\" :\n return _core.Viewport__get_isFullScreen(self)",
"def _is_full(self):\n\t\t # the number of round can't be superior to the number of case of the grid\n\t\treturn self._round > self.width * self.height",
"def is_fullscreen(self) -> bool:\n return self._props['fullscreen']",
"def isinview(self):\n term = getsession().terminal\n return (self.xloc > 0 and self.xloc +self.width -1 <= term.width\n and self.yloc > 0 and self.yloc +self.height -1 <= term.height)",
"def is_spanned(self):\n return self._tc.is_spanned",
"def box_at_full_size(self):\n return self.current_width == self.width and self.current_height == self.height",
"def full_window():\n global window\n True if (len(window) >= MWS / MSS) else False",
"def has_fixed_region_size(self) -> bool:\n return self._fixed_region_size is not None",
"def is_full(self):\n for i in xrange(self.start, self.board_end + 1):\n if not self.pos_full(i):\n return False\n return True",
"def isShown(self):\n \n parent=self.parent\n if parent is not None:\n if parent.expanded == False: # collapsed\n return False\n else:\n return self.parent.isShown()\n else:\n # root node is always shown\n return True",
"def contained_in_full_array(self, full_array):\n sh = full_array.shape\n if (self.sly_parent.start < 0) | (self.slx_parent.start < 0):\n return False\n\n if (self.sly_parent.stop >= sh[0]) | (self.slx_parent.stop >= sh[1]):\n return False\n\n return True",
"def _set_isFullWidth(self, *args) -> \"bool\" :\n return _core.TextBoxCommandInput__set_isFullWidth(self, *args)",
"def in_view(self):\n \n bbox = self.bbox()\n area = self.parent.canvas.get_visible_area()\n\n y1, y2 = bbox[1], bbox[3]\n v1, v2 = area[1], area[3]\n\n return (y1 > v1 and y2 < v2)",
"def isBound(self):\n return self.__bound > 0",
"def in_screen(self, coord):\n\t\treturn coord.x >= 0 and coord.x < self.width and coord.y >= 0 and coord.y < self.height",
"def isBoxMode(self):\n\t\treturn False",
"def IsAutoLayout(self) -> bool:",
"def is_full(self) -> bool:\n return self.get_size() >= self.size"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Event listener for a single cell selection. | def __cell_selected (self, event):
if event.Selecting ( ):
self.SelectBlock (event.GetRow ( ), event.GetCol ( ), event.GetRow ( ), event.GetCol ( ))
dates = self.container.get_dates ([event.GetCol ( )])
people = self.container.get_people ([event.GetRow ( )])
wx.PostEvent (self.GetEventHandler ( ), custom_events.ComplexSelectEvent (self.GetId ( ), dates=dates, people=people))
event.Skip ( ) | [
"def cell_selected(self):\n\n self.file_selection(self.ui.tableWidget.currentRow())",
"def get_selected_row(event):",
"def on_select_menuitem(self, event, grid, row, col, selection):\n if self.grid.changes: # if user selects a menuitem, that is an edit\n self.grid.changes.add(row)\n else:\n self.grid.changes = {row}\n\n item_id = event.GetId()\n item = event.EventObject.FindItemById(item_id)\n label = item.ItemLabelText #DEBUG\n #label = item.Label\n cell_value = grid.GetCellValue(row, col)\n if str(label) == \"CLEAR cell of all values\":\n label = \"\"\n\n col_label = grid.GetColLabelValue(col).strip('\\nEDIT ALL').strip('**').strip('^^')\n if col_label in self.colon_delimited_lst and label:\n if not label.lower() in cell_value.lower():\n label += (\":\" + cell_value).rstrip(':')\n else:\n label = cell_value\n\n if self.selected_col and self.selected_col == col:\n for row in range(self.grid.GetNumberRows()):\n grid.SetCellValue(row, col, label)\n if self.grid.changes:\n self.grid.changes.add(row)\n else:\n self.grid.changes = {row}\n\n #self.selected_col = None\n else:\n grid.SetCellValue(row, col, label)\n\n if selection:\n for cell in selection:\n row = cell[0]\n grid.SetCellValue(row, col, label)\n return",
"def _on_row_selection(self, added, removed):\n self._no_update = True\n try:\n indexes = self.control.selectionModel().selectedRows()\n index = self.proxyModel.mapToSource(indexes[0])\n\n if index:\n self.selected_row = index.row()\n self.selected = self.adapter.get_item(\n self.object, self.name, self.selected_row\n )\n else:\n self.selected_row = -1\n self.selected = None\n finally:\n self._no_update = False",
"def on_selectionChanged(self):\n columnHeader = self.parent().columnHeader\n indexHeader = self.parent().indexHeader\n\n # The two blocks below check what columns or rows are selected in the data table and highlights the\n # corresponding ones in the two headers. The if statements check for focus on headers, because if the user\n # clicks a header that will auto-select all cells in that row or column which will trigger this function\n # and cause and infinite loop\n\n if not columnHeader.hasFocus():\n selection = self.selectionModel().selection()\n columnHeader.selectionModel().select(\n selection,\n QtCore.QItemSelectionModel.Columns\n | QtCore.QItemSelectionModel.ClearAndSelect,\n )\n\n if not indexHeader.hasFocus():\n selection = self.selectionModel().selection()\n indexHeader.selectionModel().select(\n selection,\n QtCore.QItemSelectionModel.Rows\n | QtCore.QItemSelectionModel.ClearAndSelect,\n )",
"def selectCell(self, x: int, y: int):\n pass",
"def _colSelectionEnterPressed(self):\n global selected_col\n old_col_select = selected_col\n try:\n new_col = int(self.col_select_field.text())\n if new_col < 0:\n self.col_select_field.setText(str(old_col_select))\n else:\n selected_col = (new_col) % NUM_ROWS\n self.col_select_field.setText(str(selected_col))\n\n # UNUSED\n # The following block tries to update the selection grid accordingly but it's not working.\n # if self.grid_layout is not None:\n # self.grid_layout.itemAtPosition(selected_row,old_col_select).widget().setChecked(False)\n # self.grid_layout.itemAtPosition(selected_row,selected_col).widget().setChecked(True)\n # TODO Update corresponding check boxes\n except:\n self.col_select_field.setText(str(old_col_select))\n print(f\"Selected col: {selected_col}\")",
"def column_selected(self, event):\n\n selected_column_id = self.dropdown_menu.GetCurrentSelection()\n selcted_column = self.hue_columns[selected_column_id]\n\n self.draw_pair(selcted_column)",
"def selection_notify(self, func):\n return self._subscribe(\"selection_notify\", func)",
"def on_selectionChanged(self):\n # Check focus so we don't get recursive loop, since headers trigger selection of data cells and vice versa\n if self.hasFocus():\n dataView = self.parent().dataView\n\n # Set selection mode so selecting one row or column at a time adds to selection each time\n if (\n self.orientation == Qt.Horizontal\n ): # This case is for the horizontal header\n # Get the header's selected columns\n selection = self.selectionModel().selection()\n\n # Removes the higher levels so that only the lowest level of the header affects the data table selection\n last_row_ix = self.pgdf.dataframe.columns.nlevels - 1\n last_col_ix = self.model().columnCount() - 1\n higher_levels = QtCore.QItemSelection(\n self.model().index(0, 0),\n self.model().index(last_row_ix - 1, last_col_ix),\n )\n selection.merge(higher_levels, QtCore.QItemSelectionModel.Deselect)\n\n # Select the cells in the data view\n dataView.selectionModel().select(\n selection,\n QtCore.QItemSelectionModel.Columns\n | QtCore.QItemSelectionModel.ClearAndSelect,\n )\n if self.orientation == Qt.Vertical:\n selection = self.selectionModel().selection()\n\n last_row_ix = self.model().rowCount() - 1\n last_col_ix = self.pgdf.dataframe.index.nlevels - 1\n higher_levels = QtCore.QItemSelection(\n self.model().index(0, 0),\n self.model().index(last_row_ix, last_col_ix - 1),\n )\n selection.merge(higher_levels, QtCore.QItemSelectionModel.Deselect)\n\n dataView.selectionModel().select(\n selection,\n QtCore.QItemSelectionModel.Rows\n | QtCore.QItemSelectionModel.ClearAndSelect,\n )\n\n self.selectAbove()",
"def item_selection_changed(self):\n pass",
"def selectionChanged(self, selected, deselected):\r\n\r\n try:\r\n item_status = self.model()[self.selected_index]\r\n except IndexError:\r\n pass\r\n else:\r\n common.print_item_status(item_status)",
"def _selection_changed_slot(self, selected, deselected):\n # self._test_sel_index(selected, deselected)\n self._sel_index_2(selected, deselected)",
"def _rowSelectionEnterPressed(self):\n global selected_row\n old_row_select = selected_row\n try:\n new_row = int(self.row_select_field.text())\n if new_row < 0:\n self.row_select_field.setText(str(old_row_select))\n else:\n selected_row = (new_row) % NUM_ROWS\n self.row_select_field.setText(str(selected_row))\n\n # UNUSED\n # The following block tries to update the selection grid accordingly but it's not working.\n # if self.grid_layout is not None:\n # self.grid_layout.itemAtPosition(old_row_select,selected_col).widget().setChecked(False)\n # self.grid_layout.itemAtPosition(selected_row,selected_col).widget().setChecked(True)\n # TODO Update corresponding check boxes\n except:\n self.row_select_field.setText(str(old_row_select))\n print(f\"Selected row: {selected_row}\")",
"def on_selection_change_callback(self,attr,old,new):\n\n # (un)lock Save button\n if len(self.cds.selected.indices) > 0:\n self.save.disabled = False\n else:\n self.save.disabled = True\n\n # make selection in the heatmap\n dates = []\n for i in self.cds.selected.indices:\n dates.append(self.cds.data[\"datetime_date\"][i])\n selection = []\n i = 0\n for d in self.cds_OxCGRTHeatmap.data[\"datetime_date\"]:\n if d in dates:\n selection.append(i)\n i += 1\n self.cds_OxCGRTHeatmap.selected.indices = selection",
"def _on_rows_selection(self, added, removed):\n self._no_update = True\n try:\n indexes = self.control.selectionModel().selectedRows()\n selected_rows = []\n selected = []\n for index in indexes:\n index = self.proxyModel.mapToSource(index)\n row = index.row()\n selected_rows.append(row)\n selected.append(self.adapter.get_item(self.object, self.name, row))\n self.multi_selected_rows = selected_rows\n self.multi_selected = selected\n finally:\n self._no_update = False",
"def SelectCell(self, vtkPointSet, p_int, vtkCell, vtkGenericCell):\n ...",
"def on_label_click(self, event):\n col = event.GetCol()\n color = self.grid.GetCellBackgroundColour(0, col)\n if color != (191, 216, 216, 255): # light blue\n self.col_color = color\n if col not in (-1, 0):\n # if a new column was chosen without de-selecting the previous column, deselect the old selected_col\n if self.selected_col is not None and self.selected_col != col:\n col_label_value = self.grid.GetColLabelValue(self.selected_col)\n self.grid.SetColLabelValue(self.selected_col, col_label_value[:-10])\n if not self.huge_grid:\n for row in range(self.grid.GetNumberRows()):\n self.grid.SetCellBackgroundColour(row, self.selected_col, self.col_color)# 'white'\n self.grid.ForceRefresh()\n # deselect col if user is clicking on it a second time\n if col == self.selected_col:\n col_label_value = self.grid.GetColLabelValue(col)\n self.grid.SetColLabelValue(col, col_label_value[:-10])\n if not self.huge_grid:\n for row in range(self.grid.GetNumberRows()):\n self.grid.SetCellBackgroundColour(row, col, self.col_color) # 'white'\n self.grid.ForceRefresh()\n self.selected_col = None\n # otherwise, select (highlight) col\n else:\n self.selected_col = col\n col_label_value = self.grid.GetColLabelValue(col)\n self.grid.SetColLabelValue(col, col_label_value + \" \\nEDIT ALL\")\n if not self.huge_grid:\n for row in range(self.grid.GetNumberRows()):\n self.grid.SetCellBackgroundColour(row, col, 'light blue')\n self.grid.ForceRefresh()\n has_dropdown = False\n if col in list(self.choices.keys()):\n has_dropdown = True\n\n # if the column has no drop-down list, allow user to edit all cells in the column through text entry\n if not has_dropdown and col != 0:\n if self.selected_col == col:\n col_label = self.grid.GetColLabelValue(col)\n if col_label.endswith(\" \\nEDIT ALL\"):\n col_label = col_label[:-10]\n default_value = self.grid.GetCellValue(0, col)\n data = None\n dialog = wx.TextEntryDialog(None, \"Enter value for all cells in the column {}\\nNote: this will overwrite any existing cell values\".format(col_label_value), \"Edit All\", default_value, style=wx.OK|wx.CANCEL)\n dialog.Centre()\n if dialog.ShowModal() == wx.ID_OK:\n data = dialog.GetValue()\n # with HugeMagicGrid, you can add a new value to a column\n # all at once\n if self.huge_grid:\n self.grid.SetColumnValues(col, str(data))\n if self.grid.changes:\n self.grid.changes.add(0)\n else:\n self.grid.changes = {0}\n # with MagicGrid, you must add a new value\n # one row at a time\n else:\n for row in range(self.grid.GetNumberRows()):\n self.grid.SetCellValue(row, col, str(data))\n if self.grid.changes:\n self.grid.changes.add(row)\n else:\n self.grid.changes = {row}\n dialog.Destroy()\n # then deselect column\n col_label_value = self.grid.GetColLabelValue(col)\n self.grid.SetColLabelValue(col, col_label_value[:-10])\n if not self.huge_grid:\n for row in range(self.grid.GetNumberRows()):\n self.grid.SetCellBackgroundColour(row, col, self.col_color) # 'white'\n self.grid.ForceRefresh()\n self.selected_col = None",
"def reacttoselection(self,e):\r\n \r\n if self._lastselectedfriend == None: #only initiates refreshing once\r\n self.refreshloop()\r\n\r\n if self._friendlist.size() == 0:\r\n return\r\n \r\n self._lastselectedfriend=self._friendlist.get(\\\r\n self._friendlist.curselection()[0])\r\n self._title2.config(text='Chatting with ' + self._lastselectedfriend)\r\n self.list_messages()\r\n self.download_notes()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.